mirror of
https://github.com/QwenLM/qwen-code.git
synced 2025-12-28 04:29:15 +00:00
Compare commits
54 Commits
feat/gemin
...
fix/integr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7233d37bd1 | ||
|
|
f7d04323f3 | ||
|
|
257c6705e1 | ||
|
|
27e7438b75 | ||
|
|
8a3ff8db12 | ||
|
|
26f8b67d4f | ||
|
|
b64d636280 | ||
|
|
781c57b438 | ||
|
|
c53bdde747 | ||
|
|
99db18069d | ||
|
|
a0a5b831d4 | ||
|
|
8f74dd224c | ||
|
|
b931d28f35 | ||
|
|
9f65bd3b39 | ||
|
|
2b3830cf83 | ||
|
|
2b9140940d | ||
|
|
4efdea0981 | ||
|
|
05791d4200 | ||
|
|
add35d2904 | ||
|
|
bc2a7efcb3 | ||
|
|
4f970c9987 | ||
|
|
251031cfc5 | ||
|
|
77c257d9d0 | ||
|
|
4311af96eb | ||
|
|
b49c11e9a2 | ||
|
|
642dda0315 | ||
|
|
bbbdeb280d | ||
|
|
0d43ddee2a | ||
|
|
50e03f2dd6 | ||
|
|
f440ff2f7f | ||
|
|
9a6b0abc37 | ||
|
|
9cdd85c62a | ||
|
|
00547ba439 | ||
|
|
fc1dac9dc7 | ||
|
|
338eb9038d | ||
|
|
e0b9044833 | ||
|
|
f33f43e2f7 | ||
|
|
18e9b2340b | ||
|
|
ad427da340 | ||
|
|
484e0fd943 | ||
|
|
80bb2890df | ||
|
|
abd9ee2a7b | ||
|
|
b8df689e31 | ||
|
|
e610578ecc | ||
|
|
235159216e | ||
|
|
93b30cca29 | ||
|
|
177fc42f04 | ||
|
|
2560c2d1a2 | ||
|
|
bd6e16d41b | ||
|
|
2f0fa267c8 | ||
|
|
fa6ae0a324 | ||
|
|
387be44866 | ||
|
|
51b82771da | ||
|
|
629cd14fad |
9
.github/workflows/e2e.yml
vendored
9
.github/workflows/e2e.yml
vendored
@@ -18,8 +18,6 @@ jobs:
|
||||
- 'sandbox:docker'
|
||||
node-version:
|
||||
- '20.x'
|
||||
- '22.x'
|
||||
- '24.x'
|
||||
steps:
|
||||
- name: 'Checkout'
|
||||
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
|
||||
@@ -67,10 +65,13 @@ jobs:
|
||||
OPENAI_BASE_URL: '${{ secrets.OPENAI_BASE_URL }}'
|
||||
OPENAI_MODEL: '${{ secrets.OPENAI_MODEL }}'
|
||||
KEEP_OUTPUT: 'true'
|
||||
SANDBOX: '${{ matrix.sandbox }}'
|
||||
VERBOSE: 'true'
|
||||
run: |-
|
||||
npm run "test:integration:${SANDBOX}"
|
||||
if [[ "${{ matrix.sandbox }}" == "sandbox:docker" ]]; then
|
||||
npm run test:integration:sandbox:docker
|
||||
else
|
||||
npm run test:integration:sandbox:none
|
||||
fi
|
||||
|
||||
e2e-test-macos:
|
||||
name: 'E2E Test - macOS'
|
||||
|
||||
54
.github/workflows/release-sdk.yml
vendored
54
.github/workflows/release-sdk.yml
vendored
@@ -33,6 +33,10 @@ on:
|
||||
type: 'boolean'
|
||||
default: false
|
||||
|
||||
concurrency:
|
||||
group: '${{ github.workflow }}'
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
release-sdk:
|
||||
runs-on: 'ubuntu-latest'
|
||||
@@ -46,6 +50,7 @@ jobs:
|
||||
packages: 'write'
|
||||
id-token: 'write'
|
||||
issues: 'write'
|
||||
pull-requests: 'write'
|
||||
outputs:
|
||||
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
|
||||
|
||||
@@ -163,11 +168,11 @@ jobs:
|
||||
echo "BRANCH_NAME=${BRANCH_NAME}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: 'Update package version'
|
||||
working-directory: 'packages/sdk-typescript'
|
||||
env:
|
||||
RELEASE_VERSION: '${{ steps.version.outputs.RELEASE_VERSION }}'
|
||||
run: |-
|
||||
npm version "${RELEASE_VERSION}" --no-git-tag-version --allow-same-version
|
||||
# Use npm workspaces so the root lockfile is updated consistently.
|
||||
npm version -w @qwen-code/sdk "${RELEASE_VERSION}" --no-git-tag-version --allow-same-version
|
||||
|
||||
- name: 'Commit and Conditionally Push package version'
|
||||
env:
|
||||
@@ -175,7 +180,7 @@ jobs:
|
||||
IS_DRY_RUN: '${{ steps.vars.outputs.is_dry_run }}'
|
||||
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
|
||||
run: |-
|
||||
git add packages/sdk-typescript/package.json
|
||||
git add packages/sdk-typescript/package.json package-lock.json
|
||||
if git diff --staged --quiet; then
|
||||
echo "No version changes to commit"
|
||||
else
|
||||
@@ -222,6 +227,49 @@ jobs:
|
||||
--notes-start-tag "sdk-typescript-${PREVIOUS_RELEASE_TAG}" \
|
||||
--generate-notes
|
||||
|
||||
- name: 'Create PR to merge release branch into main'
|
||||
if: |-
|
||||
${{ steps.vars.outputs.is_dry_run == 'false' }}
|
||||
id: 'pr'
|
||||
env:
|
||||
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
|
||||
RELEASE_BRANCH: '${{ steps.release_branch.outputs.BRANCH_NAME }}'
|
||||
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
|
||||
run: |-
|
||||
set -euo pipefail
|
||||
|
||||
pr_url="$(gh pr list --head "${RELEASE_BRANCH}" --base main --json url --jq '.[0].url')"
|
||||
if [[ -z "${pr_url}" ]]; then
|
||||
pr_url="$(gh pr create \
|
||||
--base main \
|
||||
--head "${RELEASE_BRANCH}" \
|
||||
--title "chore(release): sdk-typescript ${RELEASE_TAG}" \
|
||||
--body "Automated release PR for sdk-typescript ${RELEASE_TAG}.")"
|
||||
fi
|
||||
|
||||
echo "PR_URL=${pr_url}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: 'Wait for CI checks to complete'
|
||||
if: |-
|
||||
${{ steps.vars.outputs.is_dry_run == 'false' }}
|
||||
env:
|
||||
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
|
||||
PR_URL: '${{ steps.pr.outputs.PR_URL }}'
|
||||
run: |-
|
||||
set -euo pipefail
|
||||
echo "Waiting for CI checks to complete..."
|
||||
gh pr checks "${PR_URL}" --watch --interval 30
|
||||
|
||||
- name: 'Enable auto-merge for release PR'
|
||||
if: |-
|
||||
${{ steps.vars.outputs.is_dry_run == 'false' }}
|
||||
env:
|
||||
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
|
||||
PR_URL: '${{ steps.pr.outputs.PR_URL }}'
|
||||
run: |-
|
||||
set -euo pipefail
|
||||
gh pr merge "${PR_URL}" --merge --auto
|
||||
|
||||
- name: 'Create Issue on Failure'
|
||||
if: |-
|
||||
${{ failure() }}
|
||||
|
||||
110
CONTRIBUTING.md
110
CONTRIBUTING.md
@@ -2,27 +2,6 @@
|
||||
|
||||
We would love to accept your patches and contributions to this project.
|
||||
|
||||
## Before you begin
|
||||
|
||||
### Sign our Contributor License Agreement
|
||||
|
||||
Contributions to this project must be accompanied by a
|
||||
[Contributor License Agreement](https://cla.developers.google.com/about) (CLA).
|
||||
You (or your employer) retain the copyright to your contribution; this simply
|
||||
gives us permission to use and redistribute your contributions as part of the
|
||||
project.
|
||||
|
||||
If you or your current employer have already signed the Google CLA (even if it
|
||||
was for a different project), you probably don't need to do it again.
|
||||
|
||||
Visit <https://cla.developers.google.com/> to see your current agreements or to
|
||||
sign a new one.
|
||||
|
||||
### Review our Community Guidelines
|
||||
|
||||
This project follows [Google's Open Source Community
|
||||
Guidelines](https://opensource.google/conduct/).
|
||||
|
||||
## Contribution Process
|
||||
|
||||
### Code Reviews
|
||||
@@ -74,12 +53,6 @@ Your PR should have a clear, descriptive title and a detailed description of the
|
||||
|
||||
In the PR description, explain the "why" behind your changes and link to the relevant issue (e.g., `Fixes #123`).
|
||||
|
||||
## Forking
|
||||
|
||||
If you are forking the repository you will be able to run the Build, Test and Integration test workflows. However in order to make the integration tests run you'll need to add a [GitHub Repository Secret](https://docs.github.com/en/actions/security-for-github-actions/security-guides/using-secrets-in-github-actions#creating-secrets-for-a-repository) with a value of `GEMINI_API_KEY` and set that to a valid API key that you have available. Your key and secret are private to your repo; no one without access can see your key and you cannot see any secrets related to this repo.
|
||||
|
||||
Additionally you will need to click on the `Actions` tab and enable workflows for your repository, you'll find it's the large blue button in the center of the screen.
|
||||
|
||||
## Development Setup and Workflow
|
||||
|
||||
This section guides contributors on how to build, modify, and understand the development setup of this project.
|
||||
@@ -98,8 +71,8 @@ This section guides contributors on how to build, modify, and understand the dev
|
||||
To clone the repository:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/google-gemini/gemini-cli.git # Or your fork's URL
|
||||
cd gemini-cli
|
||||
git clone https://github.com/QwenLM/qwen-code.git # Or your fork's URL
|
||||
cd qwen-code
|
||||
```
|
||||
|
||||
To install dependencies defined in `package.json` as well as root dependencies:
|
||||
@@ -118,9 +91,9 @@ This command typically compiles TypeScript to JavaScript, bundles assets, and pr
|
||||
|
||||
### Enabling Sandboxing
|
||||
|
||||
[Sandboxing](#sandboxing) is highly recommended and requires, at a minimum, setting `GEMINI_SANDBOX=true` in your `~/.env` and ensuring a sandboxing provider (e.g. `macOS Seatbelt`, `docker`, or `podman`) is available. See [Sandboxing](#sandboxing) for details.
|
||||
[Sandboxing](#sandboxing) is highly recommended and requires, at a minimum, setting `QWEN_SANDBOX=true` in your `~/.env` and ensuring a sandboxing provider (e.g. `macOS Seatbelt`, `docker`, or `podman`) is available. See [Sandboxing](#sandboxing) for details.
|
||||
|
||||
To build both the `gemini` CLI utility and the sandbox container, run `build:all` from the root directory:
|
||||
To build both the `qwen-code` CLI utility and the sandbox container, run `build:all` from the root directory:
|
||||
|
||||
```bash
|
||||
npm run build:all
|
||||
@@ -130,13 +103,13 @@ To skip building the sandbox container, you can use `npm run build` instead.
|
||||
|
||||
### Running
|
||||
|
||||
To start the Gemini CLI from the source code (after building), run the following command from the root directory:
|
||||
To start the Qwen Code application from the source code (after building), run the following command from the root directory:
|
||||
|
||||
```bash
|
||||
npm start
|
||||
```
|
||||
|
||||
If you'd like to run the source build outside of the gemini-cli folder, you can utilize `npm link path/to/gemini-cli/packages/cli` (see: [docs](https://docs.npmjs.com/cli/v9/commands/npm-link)) or `alias gemini="node path/to/gemini-cli/packages/cli"` to run with `gemini`
|
||||
If you'd like to run the source build outside of the qwen-code folder, you can utilize `npm link path/to/qwen-code/packages/cli` (see: [docs](https://docs.npmjs.com/cli/v9/commands/npm-link)) to run with `qwen-code`
|
||||
|
||||
### Running Tests
|
||||
|
||||
@@ -154,7 +127,7 @@ This will run tests located in the `packages/core` and `packages/cli` directorie
|
||||
|
||||
#### Integration Tests
|
||||
|
||||
The integration tests are designed to validate the end-to-end functionality of the Gemini CLI. They are not run as part of the default `npm run test` command.
|
||||
The integration tests are designed to validate the end-to-end functionality of Qwen Code. They are not run as part of the default `npm run test` command.
|
||||
|
||||
To run the integration tests, use the following command:
|
||||
|
||||
@@ -209,19 +182,61 @@ npm run lint
|
||||
### Coding Conventions
|
||||
|
||||
- Please adhere to the coding style, patterns, and conventions used throughout the existing codebase.
|
||||
- Consult [QWEN.md](https://github.com/QwenLM/qwen-code/blob/main/QWEN.md) (typically found in the project root) for specific instructions related to AI-assisted development, including conventions for React, comments, and Git usage.
|
||||
- **Imports:** Pay special attention to import paths. The project uses ESLint to enforce restrictions on relative imports between packages.
|
||||
|
||||
### Project Structure
|
||||
|
||||
- `packages/`: Contains the individual sub-packages of the project.
|
||||
- `cli/`: The command-line interface.
|
||||
- `core/`: The core backend logic for the Gemini CLI.
|
||||
- `core/`: The core backend logic for Qwen Code.
|
||||
- `docs/`: Contains all project documentation.
|
||||
- `scripts/`: Utility scripts for building, testing, and development tasks.
|
||||
|
||||
For more detailed architecture, see `docs/architecture.md`.
|
||||
|
||||
## Documentation Development
|
||||
|
||||
This section describes how to develop and preview the documentation locally.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
1. Ensure you have Node.js (version 18+) installed
|
||||
2. Have npm or yarn available
|
||||
|
||||
### Setup Documentation Site Locally
|
||||
|
||||
To work on the documentation and preview changes locally:
|
||||
|
||||
1. Navigate to the `docs-site` directory:
|
||||
|
||||
```bash
|
||||
cd docs-site
|
||||
```
|
||||
|
||||
2. Install dependencies:
|
||||
|
||||
```bash
|
||||
npm install
|
||||
```
|
||||
|
||||
3. Link the documentation content from the main `docs` directory:
|
||||
|
||||
```bash
|
||||
npm run link
|
||||
```
|
||||
|
||||
This creates a symbolic link from `../docs` to `content` in the docs-site project, allowing the documentation content to be served by the Next.js site.
|
||||
|
||||
4. Start the development server:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
```
|
||||
|
||||
5. Open [http://localhost:3000](http://localhost:3000) in your browser to see the documentation site with live updates as you make changes.
|
||||
|
||||
Any changes made to the documentation files in the main `docs` directory will be reflected immediately in the documentation site.
|
||||
|
||||
## Debugging
|
||||
|
||||
### VS Code:
|
||||
@@ -231,7 +246,7 @@ For more detailed architecture, see `docs/architecture.md`.
|
||||
```bash
|
||||
npm run debug
|
||||
```
|
||||
This command runs `node --inspect-brk dist/gemini.js` within the `packages/cli` directory, pausing execution until a debugger attaches. You can then open `chrome://inspect` in your Chrome browser to connect to the debugger.
|
||||
This command runs `node --inspect-brk dist/index.js` within the `packages/cli` directory, pausing execution until a debugger attaches. You can then open `chrome://inspect` in your Chrome browser to connect to the debugger.
|
||||
2. In VS Code, use the "Attach" launch configuration (found in `.vscode/launch.json`).
|
||||
|
||||
Alternatively, you can use the "Launch Program" configuration in VS Code if you prefer to launch the currently open file directly, but 'F5' is generally recommended.
|
||||
@@ -239,16 +254,16 @@ Alternatively, you can use the "Launch Program" configuration in VS Code if you
|
||||
To hit a breakpoint inside the sandbox container run:
|
||||
|
||||
```bash
|
||||
DEBUG=1 gemini
|
||||
DEBUG=1 qwen-code
|
||||
```
|
||||
|
||||
**Note:** If you have `DEBUG=true` in a project's `.env` file, it won't affect gemini-cli due to automatic exclusion. Use `.gemini/.env` files for gemini-cli specific debug settings.
|
||||
**Note:** If you have `DEBUG=true` in a project's `.env` file, it won't affect qwen-code due to automatic exclusion. Use `.qwen-code/.env` files for qwen-code specific debug settings.
|
||||
|
||||
### React DevTools
|
||||
|
||||
To debug the CLI's React-based UI, you can use React DevTools. Ink, the library used for the CLI's interface, is compatible with React DevTools version 4.x.
|
||||
|
||||
1. **Start the Gemini CLI in development mode:**
|
||||
1. **Start the Qwen Code application in development mode:**
|
||||
|
||||
```bash
|
||||
DEV=true npm start
|
||||
@@ -270,23 +285,10 @@ To debug the CLI's React-based UI, you can use React DevTools. Ink, the library
|
||||
```
|
||||
|
||||
Your running CLI application should then connect to React DevTools.
|
||||

|
||||
|
||||
## Sandboxing
|
||||
|
||||
### macOS Seatbelt
|
||||
|
||||
On macOS, `qwen` uses Seatbelt (`sandbox-exec`) under a `permissive-open` profile (see `packages/cli/src/utils/sandbox-macos-permissive-open.sb`) that restricts writes to the project folder but otherwise allows all other operations and outbound network traffic ("open") by default. You can switch to a `restrictive-closed` profile (see `packages/cli/src/utils/sandbox-macos-restrictive-closed.sb`) that declines all operations and outbound network traffic ("closed") by default by setting `SEATBELT_PROFILE=restrictive-closed` in your environment or `.env` file. Available built-in profiles are `{permissive,restrictive}-{open,closed,proxied}` (see below for proxied networking). You can also switch to a custom profile `SEATBELT_PROFILE=<profile>` if you also create a file `.qwen/sandbox-macos-<profile>.sb` under your project settings directory `.qwen`.
|
||||
|
||||
### Container-based Sandboxing (All Platforms)
|
||||
|
||||
For stronger container-based sandboxing on macOS or other platforms, you can set `GEMINI_SANDBOX=true|docker|podman|<command>` in your environment or `.env` file. The specified command (or if `true` then either `docker` or `podman`) must be installed on the host machine. Once enabled, `npm run build:all` will build a minimal container ("sandbox") image and `npm start` will launch inside a fresh instance of that container. The first build can take 20-30s (mostly due to downloading of the base image) but after that both build and start overhead should be minimal. Default builds (`npm run build`) will not rebuild the sandbox.
|
||||
|
||||
Container-based sandboxing mounts the project directory (and system temp directory) with read-write access and is started/stopped/removed automatically as you start/stop Gemini CLI. Files created within the sandbox should be automatically mapped to your user/group on host machine. You can easily specify additional mounts, ports, or environment variables by setting `SANDBOX_{MOUNTS,PORTS,ENV}` as needed. You can also fully customize the sandbox for your projects by creating the files `.qwen/sandbox.Dockerfile` and/or `.qwen/sandbox.bashrc` under your project settings directory (`.qwen`) and running `qwen` with `BUILD_SANDBOX=1` to trigger building of your custom sandbox.
|
||||
|
||||
#### Proxied Networking
|
||||
|
||||
All sandboxing methods, including macOS Seatbelt using `*-proxied` profiles, support restricting outbound network traffic through a custom proxy server that can be specified as `GEMINI_SANDBOX_PROXY_COMMAND=<command>`, where `<command>` must start a proxy server that listens on `:::8877` for relevant requests. See `docs/examples/proxy-script.md` for a minimal proxy that only allows `HTTPS` connections to `example.com:443` (e.g. `curl https://example.com`) and declines all other requests. The proxy is started and stopped automatically alongside the sandbox.
|
||||
> TBD
|
||||
|
||||
## Manual Publish
|
||||
|
||||
|
||||
10
Makefile
10
Makefile
@@ -1,9 +1,9 @@
|
||||
# Makefile for gemini-cli
|
||||
# Makefile for qwen-code
|
||||
|
||||
.PHONY: help install build build-sandbox build-all test lint format preflight clean start debug release run-npx create-alias
|
||||
|
||||
help:
|
||||
@echo "Makefile for gemini-cli"
|
||||
@echo "Makefile for qwen-code"
|
||||
@echo ""
|
||||
@echo "Usage:"
|
||||
@echo " make install - Install npm dependencies"
|
||||
@@ -14,11 +14,11 @@ help:
|
||||
@echo " make format - Format the code"
|
||||
@echo " make preflight - Run formatting, linting, and tests"
|
||||
@echo " make clean - Remove generated files"
|
||||
@echo " make start - Start the Gemini CLI"
|
||||
@echo " make debug - Start the Gemini CLI in debug mode"
|
||||
@echo " make start - Start the Qwen Code CLI"
|
||||
@echo " make debug - Start the Qwen Code CLI in debug mode"
|
||||
@echo ""
|
||||
@echo " make run-npx - Run the CLI using npx (for testing the published package)"
|
||||
@echo " make create-alias - Create a 'gemini' alias for your shell"
|
||||
@echo " make create-alias - Create a 'qwen' alias for your shell"
|
||||
|
||||
install:
|
||||
npm install
|
||||
|
||||
410
README.md
410
README.md
@@ -1,382 +1,152 @@
|
||||
# Qwen Code
|
||||
|
||||
<div align="center">
|
||||
|
||||

|
||||
|
||||
[](https://www.npmjs.com/package/@qwen-code/qwen-code)
|
||||
[](./LICENSE)
|
||||
[](https://nodejs.org/)
|
||||
[](https://www.npmjs.com/package/@qwen-code/qwen-code)
|
||||
|
||||
**AI-powered command-line workflow tool for developers**
|
||||
**An open-source AI agent that lives in your terminal.**
|
||||
|
||||
[Installation](#installation) • [Quick Start](#quick-start) • [Features](#key-features) • [Documentation](./docs/) • [Contributing](./CONTRIBUTING.md)
|
||||
<a href="https://qwenlm.github.io/qwen-code-docs/zh/users/overview">中文</a> |
|
||||
<a href="https://qwenlm.github.io/qwen-code-docs/de/users/overview">Deutsch</a> |
|
||||
<a href="https://qwenlm.github.io/qwen-code-docs/fr/users/overview">français</a> |
|
||||
<a href="https://qwenlm.github.io/qwen-code-docs/ja/users/overview">日本語</a> |
|
||||
<a href="https://qwenlm.github.io/qwen-code-docs/ru/users/overview">Русский</a> |
|
||||
<a href="https://qwenlm.github.io/qwen-code-docs/pt-BR/users/overview">Português (Brasil)</a>
|
||||
|
||||
</div>
|
||||
|
||||
<div align="center">
|
||||
|
||||
<a href="https://qwenlm.github.io/qwen-code-docs/de/">Deutsch</a> |
|
||||
<a href="https://qwenlm.github.io/qwen-code-docs/fr">français</a> |
|
||||
<a href="https://qwenlm.github.io/qwen-code-docs/ja/">日本語</a> |
|
||||
<a href="https://qwenlm.github.io/qwen-code-docs/ru">Русский</a> |
|
||||
<a href="https://qwenlm.github.io/qwen-code-docs/zh/">中文</a>
|
||||
|
||||
</div>
|
||||
Qwen Code is an open-source AI agent for the terminal, optimized for [Qwen3-Coder](https://github.com/QwenLM/Qwen3-Coder). It helps you understand large codebases, automate tedious work, and ship faster.
|
||||
|
||||
Qwen Code is a powerful command-line AI workflow tool adapted from [**Gemini CLI**](https://github.com/google-gemini/gemini-cli), specifically optimized for [Qwen3-Coder](https://github.com/QwenLM/Qwen3-Coder) models. It enhances your development workflow with advanced code understanding, automated tasks, and intelligent assistance.
|
||||

|
||||
|
||||
## 💡 Free Options Available
|
||||
## Why Qwen Code?
|
||||
|
||||
Get started with Qwen Code at no cost using any of these free options:
|
||||
|
||||
### 🔥 Qwen OAuth (Recommended)
|
||||
|
||||
- **2,000 requests per day** with no token limits
|
||||
- **60 requests per minute** rate limit
|
||||
- Simply run `qwen` and authenticate with your qwen.ai account
|
||||
- Automatic credential management and refresh
|
||||
- Use `/auth` command to switch to Qwen OAuth if you have initialized with OpenAI compatible mode
|
||||
|
||||
### 🌏 Regional Free Tiers
|
||||
|
||||
- **Mainland China**: ModelScope offers **2,000 free API calls per day**
|
||||
- **International**: OpenRouter provides **up to 1,000 free API calls per day** worldwide
|
||||
|
||||
For detailed setup instructions, see [Authorization](#authorization).
|
||||
|
||||
> [!WARNING]
|
||||
> **Token Usage Notice**: Qwen Code may issue multiple API calls per cycle, resulting in higher token usage (similar to Claude Code). We're actively optimizing API efficiency.
|
||||
|
||||
## Key Features
|
||||
|
||||
- **Code Understanding & Editing** - Query and edit large codebases beyond traditional context window limits
|
||||
- **Workflow Automation** - Automate operational tasks like handling pull requests and complex rebases
|
||||
- **Enhanced Parser** - Adapted parser specifically optimized for Qwen-Coder models
|
||||
- **Vision Model Support** - Automatically detect images in your input and seamlessly switch to vision-capable models for multimodal analysis
|
||||
- **OpenAI-compatible, OAuth free tier**: use an OpenAI-compatible API, or sign in with Qwen OAuth to get 2,000 free requests/day.
|
||||
- **Open-source, co-evolving**: both the framework and the Qwen3-Coder model are open-source—and they ship and evolve together.
|
||||
- **Agentic workflow, feature-rich**: rich built-in tools (Skills, SubAgents, Plan Mode) for a full agentic workflow and a Claude Code-like experience.
|
||||
- **Terminal-first, IDE-friendly**: built for developers who live in the command line, with optional integration for VS Code and Zed.
|
||||
|
||||
## Installation
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Ensure you have [Node.js version 20](https://nodejs.org/en/download) or higher installed.
|
||||
#### Prerequisites
|
||||
|
||||
```bash
|
||||
# Node.js 20+
|
||||
curl -qL https://www.npmjs.com/install.sh | sh
|
||||
```
|
||||
|
||||
### Install from npm
|
||||
#### NPM (recommended)
|
||||
|
||||
```bash
|
||||
npm install -g @qwen-code/qwen-code@latest
|
||||
qwen --version
|
||||
```
|
||||
|
||||
### Install from source
|
||||
|
||||
```bash
|
||||
git clone https://github.com/QwenLM/qwen-code.git
|
||||
cd qwen-code
|
||||
npm install
|
||||
npm install -g .
|
||||
```
|
||||
|
||||
### Install globally with Homebrew (macOS/Linux)
|
||||
#### Homebrew (macOS, Linux)
|
||||
|
||||
```bash
|
||||
brew install qwen-code
|
||||
```
|
||||
|
||||
## VS Code Extension
|
||||
|
||||
In addition to the CLI tool, Qwen Code also provides a **VS Code extension** that brings AI-powered coding assistance directly into your editor with features like file system operations, native diffing, interactive chat, and more.
|
||||
|
||||
> 📦 The extension is currently in development. For installation, features, and development guide, see the [VS Code Extension README](./packages/vscode-ide-companion/README.md).
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Start Qwen Code
|
||||
# Start Qwen Code (interactive)
|
||||
qwen
|
||||
|
||||
# Example commands
|
||||
> Explain this codebase structure
|
||||
> Help me refactor this function
|
||||
> Generate unit tests for this module
|
||||
# Then, in the session:
|
||||
/help
|
||||
/auth
|
||||
```
|
||||
|
||||
### Session Management
|
||||
On first use, you'll be prompted to sign in. You can run `/auth` anytime to switch authentication methods.
|
||||
|
||||
Control your token usage with configurable session limits to optimize costs and performance.
|
||||
Example prompts:
|
||||
|
||||
#### Configure Session Token Limit
|
||||
|
||||
Create or edit `.qwen/settings.json` in your home directory:
|
||||
|
||||
```json
|
||||
{
|
||||
"sessionTokenLimit": 32000
|
||||
}
|
||||
```text
|
||||
What does this project do?
|
||||
Explain the codebase structure.
|
||||
Help me refactor this function.
|
||||
Generate unit tests for this module.
|
||||
```
|
||||
|
||||
#### Session Commands
|
||||
|
||||
- **`/compress`** - Compress conversation history to continue within token limits
|
||||
- **`/clear`** - Clear all conversation history and start fresh
|
||||
- **`/stats`** - Check current token usage and limits
|
||||
|
||||
> 📝 **Note**: Session token limit applies to a single conversation, not cumulative API calls.
|
||||
|
||||
### Vision Model Configuration
|
||||
|
||||
Qwen Code includes intelligent vision model auto-switching that detects images in your input and can automatically switch to vision-capable models for multimodal analysis. **This feature is enabled by default** - when you include images in your queries, you'll see a dialog asking how you'd like to handle the vision model switch.
|
||||
|
||||
#### Skip the Switch Dialog (Optional)
|
||||
|
||||
If you don't want to see the interactive dialog each time, configure the default behavior in your `.qwen/settings.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"experimental": {
|
||||
"vlmSwitchMode": "once"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Available modes:**
|
||||
|
||||
- **`"once"`** - Switch to vision model for this query only, then revert
|
||||
- **`"session"`** - Switch to vision model for the entire session
|
||||
- **`"persist"`** - Continue with current model (no switching)
|
||||
- **Not set** - Show interactive dialog each time (default)
|
||||
|
||||
#### Command Line Override
|
||||
|
||||
You can also set the behavior via command line:
|
||||
|
||||
```bash
|
||||
# Switch once per query
|
||||
qwen --vlm-switch-mode once
|
||||
|
||||
# Switch for entire session
|
||||
qwen --vlm-switch-mode session
|
||||
|
||||
# Never switch automatically
|
||||
qwen --vlm-switch-mode persist
|
||||
```
|
||||
|
||||
#### Disable Vision Models (Optional)
|
||||
|
||||
To completely disable vision model support, add to your `.qwen/settings.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"experimental": {
|
||||
"visionModelPreview": false
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
> 💡 **Tip**: In YOLO mode (`--yolo`), vision switching happens automatically without prompts when images are detected.
|
||||
|
||||
### Authorization
|
||||
|
||||
Choose your preferred authentication method based on your needs:
|
||||
|
||||
#### 1. Qwen OAuth (🚀 Recommended - Start in 30 seconds)
|
||||
|
||||
The easiest way to get started - completely free with generous quotas:
|
||||
|
||||
```bash
|
||||
# Just run this command and follow the browser authentication
|
||||
qwen
|
||||
```
|
||||
|
||||
**What happens:**
|
||||
|
||||
1. **Instant Setup**: CLI opens your browser automatically
|
||||
2. **One-Click Login**: Authenticate with your qwen.ai account
|
||||
3. **Automatic Management**: Credentials cached locally for future use
|
||||
4. **No Configuration**: Zero setup required - just start coding!
|
||||
|
||||
**Free Tier Benefits:**
|
||||
|
||||
- ✅ **2,000 requests/day** (no token counting needed)
|
||||
- ✅ **60 requests/minute** rate limit
|
||||
- ✅ **Automatic credential refresh**
|
||||
- ✅ **Zero cost** for individual users
|
||||
- ℹ️ **Note**: Model fallback may occur to maintain service quality
|
||||
|
||||
#### 2. OpenAI-Compatible API
|
||||
|
||||
Use API keys for OpenAI or other compatible providers:
|
||||
|
||||
**Configuration Methods:**
|
||||
|
||||
1. **Environment Variables**
|
||||
|
||||
```bash
|
||||
export OPENAI_API_KEY="your_api_key_here"
|
||||
export OPENAI_BASE_URL="your_api_endpoint"
|
||||
export OPENAI_MODEL="your_model_choice"
|
||||
```
|
||||
|
||||
2. **Project `.env` File**
|
||||
Create a `.env` file in your project root:
|
||||
```env
|
||||
OPENAI_API_KEY=your_api_key_here
|
||||
OPENAI_BASE_URL=your_api_endpoint
|
||||
OPENAI_MODEL=your_model_choice
|
||||
```
|
||||
|
||||
**API Provider Options**
|
||||
|
||||
> ⚠️ **Regional Notice:**
|
||||
>
|
||||
> - **Mainland China**: Use Alibaba Cloud Bailian or ModelScope
|
||||
> - **International**: Use Alibaba Cloud ModelStudio or OpenRouter
|
||||
|
||||
<details>
|
||||
<summary><b>🇨🇳 For Users in Mainland China</b></summary>
|
||||
<summary>Click to watch a demo video</summary>
|
||||
|
||||
**Option 1: Alibaba Cloud Bailian** ([Apply for API Key](https://bailian.console.aliyun.com/))
|
||||
|
||||
```bash
|
||||
export OPENAI_API_KEY="your_api_key_here"
|
||||
export OPENAI_BASE_URL="https://dashscope.aliyuncs.com/compatible-mode/v1"
|
||||
export OPENAI_MODEL="qwen3-coder-plus"
|
||||
```
|
||||
|
||||
**Option 2: ModelScope (Free Tier)** ([Apply for API Key](https://modelscope.cn/docs/model-service/API-Inference/intro))
|
||||
|
||||
- ✅ **2,000 free API calls per day**
|
||||
- ⚠️ Connect your Aliyun account to avoid authentication errors
|
||||
|
||||
```bash
|
||||
export OPENAI_API_KEY="your_api_key_here"
|
||||
export OPENAI_BASE_URL="https://api-inference.modelscope.cn/v1"
|
||||
export OPENAI_MODEL="Qwen/Qwen3-Coder-480B-A35B-Instruct"
|
||||
```
|
||||
<video src="https://cloud.video.taobao.com/vod/HLfyppnCHplRV9Qhz2xSqeazHeRzYtG-EYJnHAqtzkQ.mp4" controls>
|
||||
Your browser does not support the video tag.
|
||||
</video>
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><b>🌍 For International Users</b></summary>
|
||||
## Authentication
|
||||
|
||||
**Option 1: Alibaba Cloud ModelStudio** ([Apply for API Key](https://modelstudio.console.alibabacloud.com/))
|
||||
Qwen Code supports two authentication methods:
|
||||
|
||||
- **Qwen OAuth (recommended & free)**: sign in with your `qwen.ai` account in a browser.
|
||||
- **OpenAI-compatible API**: use `OPENAI_API_KEY` (and optionally a custom base URL / model).
|
||||
|
||||
#### Qwen OAuth (recommended)
|
||||
|
||||
Start `qwen`, then run:
|
||||
|
||||
```bash
|
||||
export OPENAI_API_KEY="your_api_key_here"
|
||||
export OPENAI_BASE_URL="https://dashscope-intl.aliyuncs.com/compatible-mode/v1"
|
||||
export OPENAI_MODEL="qwen3-coder-plus"
|
||||
/auth
|
||||
```
|
||||
|
||||
**Option 2: OpenRouter (Free Tier Available)** ([Apply for API Key](https://openrouter.ai/))
|
||||
Choose **Qwen OAuth** and complete the browser flow. Your credentials are cached locally so you usually won't need to log in again.
|
||||
|
||||
#### OpenAI-compatible API (API key)
|
||||
|
||||
Environment variables (recommended for CI / headless environments):
|
||||
|
||||
```bash
|
||||
export OPENAI_API_KEY="your_api_key_here"
|
||||
export OPENAI_BASE_URL="https://openrouter.ai/api/v1"
|
||||
export OPENAI_MODEL="qwen/qwen3-coder:free"
|
||||
export OPENAI_API_KEY="your-api-key-here"
|
||||
export OPENAI_BASE_URL="https://api.openai.com/v1" # optional
|
||||
export OPENAI_MODEL="gpt-4o" # optional
|
||||
```
|
||||
|
||||
</details>
|
||||
For details (including `.qwen/.env` loading and security notes), see the [authentication guide](https://qwenlm.github.io/qwen-code-docs/en/users/configuration/auth/).
|
||||
|
||||
## Usage Examples
|
||||
## Usage
|
||||
|
||||
### 🔍 Explore Codebases
|
||||
As an open-source terminal agent, you can use Qwen Code in four primary ways:
|
||||
|
||||
1. Interactive mode (terminal UI)
|
||||
2. Headless mode (scripts, CI)
|
||||
3. IDE integration (VS Code, Zed)
|
||||
4. TypeScript SDK
|
||||
|
||||
#### Interactive mode
|
||||
|
||||
```bash
|
||||
cd your-project/
|
||||
qwen
|
||||
|
||||
# Architecture analysis
|
||||
> Describe the main pieces of this system's architecture
|
||||
> What are the key dependencies and how do they interact?
|
||||
> Find all API endpoints and their authentication methods
|
||||
```
|
||||
|
||||
### 💻 Code Development
|
||||
Run `qwen` in your project folder to launch the interactive terminal UI. Use `@` to reference local files (for example `@src/main.ts`).
|
||||
|
||||
#### Headless mode
|
||||
|
||||
```bash
|
||||
# Refactoring
|
||||
> Refactor this function to improve readability and performance
|
||||
> Convert this class to use dependency injection
|
||||
> Split this large module into smaller, focused components
|
||||
|
||||
# Code generation
|
||||
> Create a REST API endpoint for user management
|
||||
> Generate unit tests for the authentication module
|
||||
> Add error handling to all database operations
|
||||
cd your-project/
|
||||
qwen -p "your question"
|
||||
```
|
||||
|
||||
### 🔄 Automate Workflows
|
||||
Use `-p` to run Qwen Code without the interactive UI—ideal for scripts, automation, and CI/CD. Learn more: [Headless mode](https://qwenlm.github.io/qwen-code-docs/en/users/features/headless).
|
||||
|
||||
```bash
|
||||
# Git automation
|
||||
> Analyze git commits from the last 7 days, grouped by feature
|
||||
> Create a changelog from recent commits
|
||||
> Find all TODO comments and create GitHub issues
|
||||
#### IDE integration
|
||||
|
||||
# File operations
|
||||
> Convert all images in this directory to PNG format
|
||||
> Rename all test files to follow the *.test.ts pattern
|
||||
> Find and remove all console.log statements
|
||||
```
|
||||
Use Qwen Code inside your editor (VS Code and Zed):
|
||||
|
||||
### 🐛 Debugging & Analysis
|
||||
- [Use in VS Code](https://qwenlm.github.io/qwen-code-docs/en/users/integration-vscode/)
|
||||
- [Use in Zed](https://qwenlm.github.io/qwen-code-docs/en/users/integration-zed/)
|
||||
|
||||
```bash
|
||||
# Performance analysis
|
||||
> Identify performance bottlenecks in this React component
|
||||
> Find all N+1 query problems in the codebase
|
||||
#### TypeScript SDK
|
||||
|
||||
# Security audit
|
||||
> Check for potential SQL injection vulnerabilities
|
||||
> Find all hardcoded credentials or API keys
|
||||
```
|
||||
Build on top of Qwen Code with the TypeScript SDK:
|
||||
|
||||
## Popular Tasks
|
||||
|
||||
### 📚 Understand New Codebases
|
||||
|
||||
```text
|
||||
> What are the core business logic components?
|
||||
> What security mechanisms are in place?
|
||||
> How does the data flow through the system?
|
||||
> What are the main design patterns used?
|
||||
> Generate a dependency graph for this module
|
||||
```
|
||||
|
||||
### 🔨 Code Refactoring & Optimization
|
||||
|
||||
```text
|
||||
> What parts of this module can be optimized?
|
||||
> Help me refactor this class to follow SOLID principles
|
||||
> Add proper error handling and logging
|
||||
> Convert callbacks to async/await pattern
|
||||
> Implement caching for expensive operations
|
||||
```
|
||||
|
||||
### 📝 Documentation & Testing
|
||||
|
||||
```text
|
||||
> Generate comprehensive JSDoc comments for all public APIs
|
||||
> Write unit tests with edge cases for this component
|
||||
> Create API documentation in OpenAPI format
|
||||
> Add inline comments explaining complex algorithms
|
||||
> Generate a README for this module
|
||||
```
|
||||
|
||||
### 🚀 Development Acceleration
|
||||
|
||||
```text
|
||||
> Set up a new Express server with authentication
|
||||
> Create a React component with TypeScript and tests
|
||||
> Implement a rate limiter middleware
|
||||
> Add database migrations for new schema
|
||||
> Configure CI/CD pipeline for this project
|
||||
```
|
||||
- [Use the Qwen Code SDK](./packages/sdk-typescript/README.md)
|
||||
|
||||
## Commands & Shortcuts
|
||||
|
||||
@@ -386,6 +156,7 @@ qwen
|
||||
- `/clear` - Clear conversation history
|
||||
- `/compress` - Compress history to save tokens
|
||||
- `/stats` - Show current session information
|
||||
- `/bug` - Submit a bug report
|
||||
- `/exit` or `/quit` - Exit Qwen Code
|
||||
|
||||
### Keyboard Shortcuts
|
||||
@@ -394,6 +165,19 @@ qwen
|
||||
- `Ctrl+D` - Exit (on empty line)
|
||||
- `Up/Down` - Navigate command history
|
||||
|
||||
> Learn more about [Commands](https://qwenlm.github.io/qwen-code-docs/en/users/features/commands/)
|
||||
>
|
||||
> **Tip**: In YOLO mode (`--yolo`), vision switching happens automatically without prompts when images are detected. Learn more about [Approval Mode](https://qwenlm.github.io/qwen-code-docs/en/users/features/approval-mode/)
|
||||
|
||||
## Configuration
|
||||
|
||||
Qwen Code can be configured via `settings.json`, environment variables, and CLI flags.
|
||||
|
||||
- **User settings**: `~/.qwen/settings.json`
|
||||
- **Project settings**: `.qwen/settings.json`
|
||||
|
||||
See [settings](https://qwenlm.github.io/qwen-code-docs/en/users/configuration/settings/) for available options and precedence.
|
||||
|
||||
## Benchmark Results
|
||||
|
||||
### Terminal-Bench Performance
|
||||
@@ -403,24 +187,18 @@ qwen
|
||||
| Qwen Code | Qwen3-Coder-480A35 | 37.5% |
|
||||
| Qwen Code | Qwen3-Coder-30BA3B | 31.3% |
|
||||
|
||||
## Development & Contributing
|
||||
## Ecosystem
|
||||
|
||||
See [CONTRIBUTING.md](./CONTRIBUTING.md) to learn how to contribute to the project.
|
||||
Looking for a graphical interface?
|
||||
|
||||
For detailed authentication setup, see the [authentication guide](./docs/cli/authentication.md).
|
||||
- [**Gemini CLI Desktop**](https://github.com/Piebald-AI/gemini-cli-desktop) A cross-platform desktop/web/mobile UI for Qwen Code
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you encounter issues, check the [troubleshooting guide](docs/troubleshooting.md).
|
||||
If you encounter issues, check the [troubleshooting guide](https://qwenlm.github.io/qwen-code-docs/en/users/support/troubleshooting/).
|
||||
|
||||
To report a bug from within the CLI, run `/bug` and include a short title and repro steps.
|
||||
|
||||
## Acknowledgments
|
||||
|
||||
This project is based on [Google Gemini CLI](https://github.com/google-gemini/gemini-cli). We acknowledge and appreciate the excellent work of the Gemini CLI team. Our main contribution focuses on parser-level adaptations to better support Qwen-Coder models.
|
||||
|
||||
## License
|
||||
|
||||
[LICENSE](./LICENSE)
|
||||
|
||||
## Star History
|
||||
|
||||
[](https://www.star-history.com/#QwenLM/qwen-code&Date)
|
||||
|
||||
@@ -43,6 +43,7 @@ Qwen Code uses JSON settings files for persistent configuration. There are four
|
||||
In addition to a project settings file, a project's `.qwen` directory can contain other project-specific files related to Qwen Code's operation, such as:
|
||||
|
||||
- [Custom sandbox profiles](../features/sandbox) (e.g. `.qwen/sandbox-macos-custom.sb`, `.qwen/sandbox.Dockerfile`).
|
||||
- [Agent Skills](../features/skills) (experimental) under `.qwen/skills/` (each Skill is a directory containing a `SKILL.md`).
|
||||
|
||||
### Available settings in `settings.json`
|
||||
|
||||
@@ -380,6 +381,8 @@ Arguments passed directly when running the CLI can override other configurations
|
||||
| `--telemetry-otlp-protocol` | | Sets the OTLP protocol for telemetry (`grpc` or `http`). | | Defaults to `grpc`. See [telemetry](../../developers/development/telemetry) for more information. |
|
||||
| `--telemetry-log-prompts` | | Enables logging of prompts for telemetry. | | See [telemetry](../../developers/development/telemetry) for more information. |
|
||||
| `--checkpointing` | | Enables [checkpointing](../features/checkpointing). | | |
|
||||
| `--experimental-acp` | | Enables ACP mode (Agent Control Protocol). Useful for IDE/editor integrations like [Zed](../integration-zed). | | Experimental. |
|
||||
| `--experimental-skills` | | Enables experimental [Agent Skills](../features/skills) (registers the `skill` tool and loads Skills from `.qwen/skills/` and `~/.qwen/skills/`). | | Experimental. |
|
||||
| `--extensions` | `-e` | Specifies a list of extensions to use for the session. | Extension names | If not provided, all available extensions are used. Use the special term `qwen -e none` to disable all extensions. Example: `qwen -e my-extension -e my-other-extension` |
|
||||
| `--list-extensions` | `-l` | Lists all available extensions and exits. | | |
|
||||
| `--proxy` | | Sets the proxy for the CLI. | Proxy URL | Example: `--proxy http://localhost:7890`. |
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
export default {
|
||||
commands: 'Commands',
|
||||
'sub-agents': 'SubAgents',
|
||||
skills: 'Skills (Experimental)',
|
||||
headless: 'Headless Mode',
|
||||
checkpointing: {
|
||||
display: 'hidden',
|
||||
|
||||
@@ -189,19 +189,20 @@ qwen -p "Write code" --output-format stream-json --include-partial-messages | jq
|
||||
|
||||
Key command-line options for headless usage:
|
||||
|
||||
| Option | Description | Example |
|
||||
| ---------------------------- | --------------------------------------------------- | ------------------------------------------------------------------------ |
|
||||
| `--prompt`, `-p` | Run in headless mode | `qwen -p "query"` |
|
||||
| `--output-format`, `-o` | Specify output format (text, json, stream-json) | `qwen -p "query" --output-format json` |
|
||||
| `--input-format` | Specify input format (text, stream-json) | `qwen --input-format text --output-format stream-json` |
|
||||
| `--include-partial-messages` | Include partial messages in stream-json output | `qwen -p "query" --output-format stream-json --include-partial-messages` |
|
||||
| `--debug`, `-d` | Enable debug mode | `qwen -p "query" --debug` |
|
||||
| `--all-files`, `-a` | Include all files in context | `qwen -p "query" --all-files` |
|
||||
| `--include-directories` | Include additional directories | `qwen -p "query" --include-directories src,docs` |
|
||||
| `--yolo`, `-y` | Auto-approve all actions | `qwen -p "query" --yolo` |
|
||||
| `--approval-mode` | Set approval mode | `qwen -p "query" --approval-mode auto_edit` |
|
||||
| `--continue` | Resume the most recent session for this project | `qwen --continue -p "Pick up where we left off"` |
|
||||
| `--resume [sessionId]` | Resume a specific session (or choose interactively) | `qwen --resume 123e... -p "Finish the refactor"` |
|
||||
| Option | Description | Example |
|
||||
| ---------------------------- | ------------------------------------------------------- | ------------------------------------------------------------------------ |
|
||||
| `--prompt`, `-p` | Run in headless mode | `qwen -p "query"` |
|
||||
| `--output-format`, `-o` | Specify output format (text, json, stream-json) | `qwen -p "query" --output-format json` |
|
||||
| `--input-format` | Specify input format (text, stream-json) | `qwen --input-format text --output-format stream-json` |
|
||||
| `--include-partial-messages` | Include partial messages in stream-json output | `qwen -p "query" --output-format stream-json --include-partial-messages` |
|
||||
| `--debug`, `-d` | Enable debug mode | `qwen -p "query" --debug` |
|
||||
| `--all-files`, `-a` | Include all files in context | `qwen -p "query" --all-files` |
|
||||
| `--include-directories` | Include additional directories | `qwen -p "query" --include-directories src,docs` |
|
||||
| `--yolo`, `-y` | Auto-approve all actions | `qwen -p "query" --yolo` |
|
||||
| `--approval-mode` | Set approval mode | `qwen -p "query" --approval-mode auto_edit` |
|
||||
| `--continue` | Resume the most recent session for this project | `qwen --continue -p "Pick up where we left off"` |
|
||||
| `--resume [sessionId]` | Resume a specific session (or choose interactively) | `qwen --resume 123e... -p "Finish the refactor"` |
|
||||
| `--experimental-skills` | Enable experimental Skills (registers the `skill` tool) | `qwen --experimental-skills -p "What Skills are available?"` |
|
||||
|
||||
For complete details on all available configuration options, settings files, and environment variables, see the [Configuration Guide](../configuration/settings).
|
||||
|
||||
|
||||
282
docs/users/features/skills.md
Normal file
282
docs/users/features/skills.md
Normal file
@@ -0,0 +1,282 @@
|
||||
# Agent Skills (Experimental)
|
||||
|
||||
> Create, manage, and share Skills to extend Qwen Code’s capabilities.
|
||||
|
||||
This guide shows you how to create, use, and manage Agent Skills in **Qwen Code**. Skills are modular capabilities that extend the model’s effectiveness through organized folders containing instructions (and optionally scripts/resources).
|
||||
|
||||
> [!note]
|
||||
>
|
||||
> Skills are currently **experimental** and must be enabled with `--experimental-skills`.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Qwen Code (recent version)
|
||||
- Run with the experimental flag enabled:
|
||||
|
||||
```bash
|
||||
qwen --experimental-skills
|
||||
```
|
||||
|
||||
- Basic familiarity with Qwen Code ([Quickstart](../quickstart.md))
|
||||
|
||||
## What are Agent Skills?
|
||||
|
||||
Agent Skills package expertise into discoverable capabilities. Each Skill consists of a `SKILL.md` file with instructions that the model can load when relevant, plus optional supporting files like scripts and templates.
|
||||
|
||||
### How Skills are invoked
|
||||
|
||||
Skills are **model-invoked** — the model autonomously decides when to use them based on your request and the Skill’s description. This is different from slash commands, which are **user-invoked** (you explicitly type `/command`).
|
||||
|
||||
### Benefits
|
||||
|
||||
- Extend Qwen Code for your workflows
|
||||
- Share expertise across your team via git
|
||||
- Reduce repetitive prompting
|
||||
- Compose multiple Skills for complex tasks
|
||||
|
||||
## Create a Skill
|
||||
|
||||
Skills are stored as directories containing a `SKILL.md` file.
|
||||
|
||||
### Personal Skills
|
||||
|
||||
Personal Skills are available across all your projects. Store them in `~/.qwen/skills/`:
|
||||
|
||||
```bash
|
||||
mkdir -p ~/.qwen/skills/my-skill-name
|
||||
```
|
||||
|
||||
Use personal Skills for:
|
||||
|
||||
- Your individual workflows and preferences
|
||||
- Experimental Skills you’re developing
|
||||
- Personal productivity helpers
|
||||
|
||||
### Project Skills
|
||||
|
||||
Project Skills are shared with your team. Store them in `.qwen/skills/` within your project:
|
||||
|
||||
```bash
|
||||
mkdir -p .qwen/skills/my-skill-name
|
||||
```
|
||||
|
||||
Use project Skills for:
|
||||
|
||||
- Team workflows and conventions
|
||||
- Project-specific expertise
|
||||
- Shared utilities and scripts
|
||||
|
||||
Project Skills can be checked into git and automatically become available to teammates.
|
||||
|
||||
## Write `SKILL.md`
|
||||
|
||||
Create a `SKILL.md` file with YAML frontmatter and Markdown content:
|
||||
|
||||
```yaml
|
||||
---
|
||||
name: your-skill-name
|
||||
description: Brief description of what this Skill does and when to use it
|
||||
---
|
||||
|
||||
# Your Skill Name
|
||||
|
||||
## Instructions
|
||||
Provide clear, step-by-step guidance for Qwen Code.
|
||||
|
||||
## Examples
|
||||
Show concrete examples of using this Skill.
|
||||
```
|
||||
|
||||
### Field requirements
|
||||
|
||||
Qwen Code currently validates that:
|
||||
|
||||
- `name` is a non-empty string
|
||||
- `description` is a non-empty string
|
||||
|
||||
Recommended conventions (not strictly enforced yet):
|
||||
|
||||
- Use lowercase letters, numbers, and hyphens in `name`
|
||||
- Make `description` specific: include both **what** the Skill does and **when** to use it (key words users will naturally mention)
|
||||
|
||||
## Add supporting files
|
||||
|
||||
Create additional files alongside `SKILL.md`:
|
||||
|
||||
```text
|
||||
my-skill/
|
||||
├── SKILL.md (required)
|
||||
├── reference.md (optional documentation)
|
||||
├── examples.md (optional examples)
|
||||
├── scripts/
|
||||
│ └── helper.py (optional utility)
|
||||
└── templates/
|
||||
└── template.txt (optional template)
|
||||
```
|
||||
|
||||
Reference these files from `SKILL.md`:
|
||||
|
||||
````markdown
|
||||
For advanced usage, see [reference.md](reference.md).
|
||||
|
||||
Run the helper script:
|
||||
|
||||
```bash
|
||||
python scripts/helper.py input.txt
|
||||
```
|
||||
````
|
||||
|
||||
## View available Skills
|
||||
|
||||
When `--experimental-skills` is enabled, Qwen Code discovers Skills from:
|
||||
|
||||
- Personal Skills: `~/.qwen/skills/`
|
||||
- Project Skills: `.qwen/skills/`
|
||||
|
||||
To view available Skills, ask Qwen Code directly:
|
||||
|
||||
```text
|
||||
What Skills are available?
|
||||
```
|
||||
|
||||
Or inspect the filesystem:
|
||||
|
||||
```bash
|
||||
# List personal Skills
|
||||
ls ~/.qwen/skills/
|
||||
|
||||
# List project Skills (if in a project directory)
|
||||
ls .qwen/skills/
|
||||
|
||||
# View a specific Skill’s content
|
||||
cat ~/.qwen/skills/my-skill/SKILL.md
|
||||
```
|
||||
|
||||
## Test a Skill
|
||||
|
||||
After creating a Skill, test it by asking questions that match your description.
|
||||
|
||||
Example: if your description mentions “PDF files”:
|
||||
|
||||
```text
|
||||
Can you help me extract text from this PDF?
|
||||
```
|
||||
|
||||
The model autonomously decides to use your Skill if it matches the request — you don’t need to explicitly invoke it.
|
||||
|
||||
## Debug a Skill
|
||||
|
||||
If Qwen Code doesn’t use your Skill, check these common issues:
|
||||
|
||||
### Make the description specific
|
||||
|
||||
Too vague:
|
||||
|
||||
```yaml
|
||||
description: Helps with documents
|
||||
```
|
||||
|
||||
Specific:
|
||||
|
||||
```yaml
|
||||
description: Extract text and tables from PDF files, fill forms, merge documents. Use when working with PDFs, forms, or document extraction.
|
||||
```
|
||||
|
||||
### Verify file path
|
||||
|
||||
- Personal Skills: `~/.qwen/skills/<skill-name>/SKILL.md`
|
||||
- Project Skills: `.qwen/skills/<skill-name>/SKILL.md`
|
||||
|
||||
```bash
|
||||
# Personal
|
||||
ls ~/.qwen/skills/my-skill/SKILL.md
|
||||
|
||||
# Project
|
||||
ls .qwen/skills/my-skill/SKILL.md
|
||||
```
|
||||
|
||||
### Check YAML syntax
|
||||
|
||||
Invalid YAML prevents the Skill metadata from loading correctly.
|
||||
|
||||
```bash
|
||||
cat SKILL.md | head -n 15
|
||||
```
|
||||
|
||||
Ensure:
|
||||
|
||||
- Opening `---` on line 1
|
||||
- Closing `---` before Markdown content
|
||||
- Valid YAML syntax (no tabs, correct indentation)
|
||||
|
||||
### View errors
|
||||
|
||||
Run Qwen Code with debug mode to see Skill loading errors:
|
||||
|
||||
```bash
|
||||
qwen --experimental-skills --debug
|
||||
```
|
||||
|
||||
## Share Skills with your team
|
||||
|
||||
You can share Skills through project repositories:
|
||||
|
||||
1. Add the Skill under `.qwen/skills/`
|
||||
2. Commit and push
|
||||
3. Teammates pull the changes and run with `--experimental-skills`
|
||||
|
||||
```bash
|
||||
git add .qwen/skills/
|
||||
git commit -m "Add team Skill for PDF processing"
|
||||
git push
|
||||
```
|
||||
|
||||
## Update a Skill
|
||||
|
||||
Edit `SKILL.md` directly:
|
||||
|
||||
```bash
|
||||
# Personal Skill
|
||||
code ~/.qwen/skills/my-skill/SKILL.md
|
||||
|
||||
# Project Skill
|
||||
code .qwen/skills/my-skill/SKILL.md
|
||||
```
|
||||
|
||||
Changes take effect the next time you start Qwen Code. If Qwen Code is already running, restart it to load the updates.
|
||||
|
||||
## Remove a Skill
|
||||
|
||||
Delete the Skill directory:
|
||||
|
||||
```bash
|
||||
# Personal
|
||||
rm -rf ~/.qwen/skills/my-skill
|
||||
|
||||
# Project
|
||||
rm -rf .qwen/skills/my-skill
|
||||
git commit -m "Remove unused Skill"
|
||||
```
|
||||
|
||||
## Best practices
|
||||
|
||||
### Keep Skills focused
|
||||
|
||||
One Skill should address one capability:
|
||||
|
||||
- Focused: “PDF form filling”, “Excel analysis”, “Git commit messages”
|
||||
- Too broad: “Document processing” (split into smaller Skills)
|
||||
|
||||
### Write clear descriptions
|
||||
|
||||
Help the model discover when to use Skills by including specific triggers:
|
||||
|
||||
```yaml
|
||||
description: Analyze Excel spreadsheets, create pivot tables, and generate charts. Use when working with Excel files, spreadsheets, or .xlsx data.
|
||||
```
|
||||
|
||||
### Test with your team
|
||||
|
||||
- Does the Skill activate when expected?
|
||||
- Are the instructions clear?
|
||||
- Are there missing examples or edge cases?
|
||||
@@ -1,4 +1,6 @@
|
||||
# Qwen Code overview
|
||||
[](https://npm-compare.com/@qwen-code/qwen-code)
|
||||
[](https://www.npmjs.com/package/@qwen-code/qwen-code)
|
||||
|
||||
> Learn about Qwen Code, Qwen's agentic coding tool that lives in your terminal and helps you turn ideas into code faster than ever before.
|
||||
|
||||
@@ -46,7 +48,7 @@ You'll be prompted to log in on first use. That's it! [Continue with Quickstart
|
||||
|
||||
> [!note]
|
||||
>
|
||||
> **New VS Code Extension (Beta)**: Prefer a graphical interface? Our new **VS Code extension** provides an easy-to-use native IDE experience without requiring terminal familiarity. Simply install from the marketplace and start coding with Qwen Code directly in your sidebar. You can search for **Qwen Code** in the VS Code Marketplace and download it.
|
||||
> **New VS Code Extension (Beta)**: Prefer a graphical interface? Our new **VS Code extension** provides an easy-to-use native IDE experience without requiring terminal familiarity. Simply install from the marketplace and start coding with Qwen Code directly in your sidebar. Download and install the [Qwen Code Companion](https://marketplace.visualstudio.com/items?itemName=qwenlm.qwen-code-vscode-ide-companion) now.
|
||||
|
||||
## What Qwen Code does for you
|
||||
|
||||
|
||||
@@ -5,8 +5,6 @@
|
||||
*/
|
||||
|
||||
import { describe, it, expect } from 'vitest';
|
||||
import { existsSync } from 'node:fs';
|
||||
import * as path from 'node:path';
|
||||
import { TestRig, printDebugInfo, validateModelOutput } from './test-helper.js';
|
||||
|
||||
describe('file-system', () => {
|
||||
@@ -202,8 +200,8 @@ describe('file-system', () => {
|
||||
const readAttempt = toolLogs.find(
|
||||
(log) => log.toolRequest.name === 'read_file',
|
||||
);
|
||||
const writeAttempt = toolLogs.find(
|
||||
(log) => log.toolRequest.name === 'write_file',
|
||||
const editAttempt = toolLogs.find(
|
||||
(log) => log.toolRequest.name === 'edit_file',
|
||||
);
|
||||
const successfulReplace = toolLogs.find(
|
||||
(log) => log.toolRequest.name === 'replace' && log.toolRequest.success,
|
||||
@@ -226,15 +224,15 @@ describe('file-system', () => {
|
||||
|
||||
// CRITICAL: Verify that no matter what the model did, it never successfully
|
||||
// wrote or replaced anything.
|
||||
if (writeAttempt) {
|
||||
if (editAttempt) {
|
||||
console.error(
|
||||
'A write_file attempt was made when no file should be written.',
|
||||
'A edit_file attempt was made when no file should be written.',
|
||||
);
|
||||
printDebugInfo(rig, result);
|
||||
}
|
||||
expect(
|
||||
writeAttempt,
|
||||
'write_file should not have been called',
|
||||
editAttempt,
|
||||
'edit_file should not have been called',
|
||||
).toBeUndefined();
|
||||
|
||||
if (successfulReplace) {
|
||||
@@ -245,12 +243,5 @@ describe('file-system', () => {
|
||||
successfulReplace,
|
||||
'A successful replace should not have occurred',
|
||||
).toBeUndefined();
|
||||
|
||||
// Final verification: ensure the file was not created.
|
||||
const filePath = path.join(rig.testDir!, fileName);
|
||||
const fileExists = existsSync(filePath);
|
||||
expect(fileExists, 'The non-existent file should not be created').toBe(
|
||||
false,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -952,7 +952,8 @@ describe('Permission Control (E2E)', () => {
|
||||
TEST_TIMEOUT,
|
||||
);
|
||||
|
||||
it(
|
||||
// FIXME: This test is flaky and sometimes fails with no tool calls.
|
||||
it.skip(
|
||||
'should allow read-only tools without restrictions',
|
||||
async () => {
|
||||
// Create test files for the model to read
|
||||
|
||||
143
package-lock.json
generated
143
package-lock.json
generated
@@ -134,6 +134,36 @@
|
||||
"node": ">=6.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@anthropic-ai/sdk": {
|
||||
"version": "0.36.3",
|
||||
"resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.36.3.tgz",
|
||||
"integrity": "sha512-+c0mMLxL/17yFZ4P5+U6bTWiCSFZUKJddrv01ud2aFBWnTPLdRncYV76D3q1tqfnL7aCnhRtykFnoCFzvr4U3Q==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@types/node": "^18.11.18",
|
||||
"@types/node-fetch": "^2.6.4",
|
||||
"abort-controller": "^3.0.0",
|
||||
"agentkeepalive": "^4.2.1",
|
||||
"form-data-encoder": "1.7.2",
|
||||
"formdata-node": "^4.3.2",
|
||||
"node-fetch": "^2.6.7"
|
||||
}
|
||||
},
|
||||
"node_modules/@anthropic-ai/sdk/node_modules/@types/node": {
|
||||
"version": "18.19.130",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.130.tgz",
|
||||
"integrity": "sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"undici-types": "~5.26.4"
|
||||
}
|
||||
},
|
||||
"node_modules/@anthropic-ai/sdk/node_modules/undici-types": {
|
||||
"version": "5.26.5",
|
||||
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
|
||||
"integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@asamuzakjp/css-color": {
|
||||
"version": "3.2.0",
|
||||
"resolved": "https://registry.npmjs.org/@asamuzakjp/css-color/-/css-color-3.2.0.tgz",
|
||||
@@ -3822,6 +3852,16 @@
|
||||
"undici-types": "~6.21.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/node-fetch": {
|
||||
"version": "2.6.13",
|
||||
"resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.13.tgz",
|
||||
"integrity": "sha512-QGpRVpzSaUs30JBSGPjOg4Uveu384erbHBoT1zeONvyCfwQxIkUshLAOqN/k9EjGviPRmWTTe6aH2qySWKTVSw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@types/node": "*",
|
||||
"form-data": "^4.0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/normalize-package-data": {
|
||||
"version": "2.4.4",
|
||||
"resolved": "https://registry.npmjs.org/@types/normalize-package-data/-/normalize-package-data-2.4.4.tgz",
|
||||
@@ -4820,7 +4860,6 @@
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz",
|
||||
"integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"event-target-shim": "^5.0.0"
|
||||
@@ -4907,6 +4946,18 @@
|
||||
"node": ">= 14"
|
||||
}
|
||||
},
|
||||
"node_modules/agentkeepalive": {
|
||||
"version": "4.6.0",
|
||||
"resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.6.0.tgz",
|
||||
"integrity": "sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"humanize-ms": "^1.2.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 8.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/ajv": {
|
||||
"version": "6.12.6",
|
||||
"resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
|
||||
@@ -5478,7 +5529,6 @@
|
||||
"version": "0.4.0",
|
||||
"resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
|
||||
"integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/atomically": {
|
||||
@@ -6437,7 +6487,6 @@
|
||||
"version": "1.0.8",
|
||||
"resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
|
||||
"integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"delayed-stream": "~1.0.0"
|
||||
@@ -7063,7 +7112,6 @@
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
|
||||
"integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=0.4.0"
|
||||
@@ -7576,7 +7624,6 @@
|
||||
"version": "2.1.0",
|
||||
"resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz",
|
||||
"integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"es-errors": "^1.3.0",
|
||||
@@ -8106,7 +8153,6 @@
|
||||
"version": "5.0.1",
|
||||
"resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz",
|
||||
"integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
@@ -8652,7 +8698,6 @@
|
||||
"version": "4.0.4",
|
||||
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz",
|
||||
"integrity": "sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"asynckit": "^0.4.0",
|
||||
@@ -8665,11 +8710,16 @@
|
||||
"node": ">= 6"
|
||||
}
|
||||
},
|
||||
"node_modules/form-data-encoder": {
|
||||
"version": "1.7.2",
|
||||
"resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-1.7.2.tgz",
|
||||
"integrity": "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/form-data/node_modules/mime-types": {
|
||||
"version": "2.1.35",
|
||||
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
|
||||
"integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"mime-db": "1.52.0"
|
||||
@@ -8678,6 +8728,28 @@
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/formdata-node": {
|
||||
"version": "4.4.1",
|
||||
"resolved": "https://registry.npmjs.org/formdata-node/-/formdata-node-4.4.1.tgz",
|
||||
"integrity": "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"node-domexception": "1.0.0",
|
||||
"web-streams-polyfill": "4.0.0-beta.3"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 12.20"
|
||||
}
|
||||
},
|
||||
"node_modules/formdata-node/node_modules/web-streams-polyfill": {
|
||||
"version": "4.0.0-beta.3",
|
||||
"resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz",
|
||||
"integrity": "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 14"
|
||||
}
|
||||
},
|
||||
"node_modules/formdata-polyfill": {
|
||||
"version": "4.0.10",
|
||||
"resolved": "https://registry.npmjs.org/formdata-polyfill/-/formdata-polyfill-4.0.10.tgz",
|
||||
@@ -9262,7 +9334,6 @@
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz",
|
||||
"integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"has-symbols": "^1.0.3"
|
||||
@@ -9441,6 +9512,15 @@
|
||||
"node": ">=16.17.0"
|
||||
}
|
||||
},
|
||||
"node_modules/humanize-ms": {
|
||||
"version": "1.2.1",
|
||||
"resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz",
|
||||
"integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"ms": "^2.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/husky": {
|
||||
"version": "9.1.7",
|
||||
"resolved": "https://registry.npmjs.org/husky/-/husky-9.1.7.tgz",
|
||||
@@ -11940,6 +12020,48 @@
|
||||
"node": ">=10.5.0"
|
||||
}
|
||||
},
|
||||
"node_modules/node-fetch": {
|
||||
"version": "2.7.0",
|
||||
"resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz",
|
||||
"integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"whatwg-url": "^5.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": "4.x || >=6.0.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"encoding": "^0.1.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"encoding": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/node-fetch/node_modules/tr46": {
|
||||
"version": "0.0.3",
|
||||
"resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz",
|
||||
"integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/node-fetch/node_modules/webidl-conversions": {
|
||||
"version": "3.0.1",
|
||||
"resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz",
|
||||
"integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==",
|
||||
"license": "BSD-2-Clause"
|
||||
},
|
||||
"node_modules/node-fetch/node_modules/whatwg-url": {
|
||||
"version": "5.0.0",
|
||||
"resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz",
|
||||
"integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"tr46": "~0.0.3",
|
||||
"webidl-conversions": "^3.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/node-pty": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/node-pty/-/node-pty-1.0.0.tgz",
|
||||
@@ -17834,6 +17956,7 @@
|
||||
"version": "0.6.0",
|
||||
"hasInstallScript": true,
|
||||
"dependencies": {
|
||||
"@anthropic-ai/sdk": "^0.36.1",
|
||||
"@google/genai": "1.30.0",
|
||||
"@modelcontextprotocol/sdk": "^1.25.1",
|
||||
"@opentelemetry/api": "^1.9.0",
|
||||
@@ -18470,7 +18593,7 @@
|
||||
},
|
||||
"packages/sdk-typescript": {
|
||||
"name": "@qwen-code/sdk",
|
||||
"version": "0.6.0",
|
||||
"version": "0.1.0",
|
||||
"license": "Apache-2.0",
|
||||
"dependencies": {
|
||||
"@modelcontextprotocol/sdk": "^1.25.1",
|
||||
|
||||
@@ -26,6 +26,20 @@ export function validateAuthMethod(authMethod: string): string | null {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (authMethod === AuthType.USE_ANTHROPIC) {
|
||||
const hasApiKey = process.env['ANTHROPIC_API_KEY'];
|
||||
if (!hasApiKey) {
|
||||
return 'ANTHROPIC_API_KEY environment variable not found.';
|
||||
}
|
||||
|
||||
const hasBaseUrl = process.env['ANTHROPIC_BASE_URL'];
|
||||
if (!hasBaseUrl) {
|
||||
return 'ANTHROPIC_BASE_URL environment variable not found.';
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
if (authMethod === AuthType.USE_GEMINI) {
|
||||
const hasApiKey = process.env['GEMINI_API_KEY'];
|
||||
if (!hasApiKey) {
|
||||
|
||||
@@ -2114,7 +2114,14 @@ describe('loadCliConfig model selection', () => {
|
||||
});
|
||||
|
||||
it('always prefers model from argvs', async () => {
|
||||
process.argv = ['node', 'script.js', '--model', 'qwen3-coder-plus'];
|
||||
process.argv = [
|
||||
'node',
|
||||
'script.js',
|
||||
'--auth-type',
|
||||
'openai',
|
||||
'--model',
|
||||
'qwen3-coder-plus',
|
||||
];
|
||||
const argv = await parseArguments({} as Settings);
|
||||
const config = await loadCliConfig(
|
||||
{
|
||||
@@ -2134,7 +2141,14 @@ describe('loadCliConfig model selection', () => {
|
||||
});
|
||||
|
||||
it('selects the model from argvs if provided', async () => {
|
||||
process.argv = ['node', 'script.js', '--model', 'qwen3-coder-plus'];
|
||||
process.argv = [
|
||||
'node',
|
||||
'script.js',
|
||||
'--auth-type',
|
||||
'openai',
|
||||
'--model',
|
||||
'qwen3-coder-plus',
|
||||
];
|
||||
const argv = await parseArguments({} as Settings);
|
||||
const config = await loadCliConfig(
|
||||
{
|
||||
|
||||
@@ -112,6 +112,7 @@ export interface CliArgs {
|
||||
allowedMcpServerNames: string[] | undefined;
|
||||
allowedTools: string[] | undefined;
|
||||
experimentalAcp: boolean | undefined;
|
||||
experimentalSkills: boolean | undefined;
|
||||
extensions: string[] | undefined;
|
||||
listExtensions: boolean | undefined;
|
||||
openaiLogging: boolean | undefined;
|
||||
@@ -307,6 +308,11 @@ export async function parseArguments(settings: Settings): Promise<CliArgs> {
|
||||
type: 'boolean',
|
||||
description: 'Starts the agent in ACP mode',
|
||||
})
|
||||
.option('experimental-skills', {
|
||||
type: 'boolean',
|
||||
description: 'Enable experimental Skills feature',
|
||||
default: false,
|
||||
})
|
||||
.option('channel', {
|
||||
type: 'string',
|
||||
choices: ['VSCode', 'ACP', 'SDK', 'CI'],
|
||||
@@ -462,6 +468,7 @@ export async function parseArguments(settings: Settings): Promise<CliArgs> {
|
||||
type: 'string',
|
||||
choices: [
|
||||
AuthType.USE_OPENAI,
|
||||
AuthType.USE_ANTHROPIC,
|
||||
AuthType.QWEN_OAUTH,
|
||||
AuthType.USE_GEMINI,
|
||||
AuthType.USE_VERTEX_AI,
|
||||
@@ -870,11 +877,30 @@ export async function loadCliConfig(
|
||||
);
|
||||
}
|
||||
|
||||
const selectedAuthType =
|
||||
(argv.authType as AuthType | undefined) ||
|
||||
settings.security?.auth?.selectedType;
|
||||
|
||||
const apiKey =
|
||||
(selectedAuthType === AuthType.USE_OPENAI
|
||||
? argv.openaiApiKey ||
|
||||
process.env['OPENAI_API_KEY'] ||
|
||||
settings.security?.auth?.apiKey
|
||||
: '') || '';
|
||||
const baseUrl =
|
||||
(selectedAuthType === AuthType.USE_OPENAI
|
||||
? argv.openaiBaseUrl ||
|
||||
process.env['OPENAI_BASE_URL'] ||
|
||||
settings.security?.auth?.baseUrl
|
||||
: '') || '';
|
||||
const resolvedModel =
|
||||
argv.model ||
|
||||
process.env['OPENAI_MODEL'] ||
|
||||
process.env['QWEN_MODEL'] ||
|
||||
settings.model?.name;
|
||||
(selectedAuthType === AuthType.USE_OPENAI
|
||||
? process.env['OPENAI_MODEL'] ||
|
||||
process.env['QWEN_MODEL'] ||
|
||||
settings.model?.name
|
||||
: '') ||
|
||||
'';
|
||||
|
||||
const sandboxConfig = await loadSandboxConfig(settings, argv);
|
||||
const screenReader =
|
||||
@@ -956,27 +982,20 @@ export async function loadCliConfig(
|
||||
maxSessionTurns:
|
||||
argv.maxSessionTurns ?? settings.model?.maxSessionTurns ?? -1,
|
||||
experimentalZedIntegration: argv.experimentalAcp || false,
|
||||
experimentalSkills: argv.experimentalSkills || false,
|
||||
listExtensions: argv.listExtensions || false,
|
||||
extensions: allExtensions,
|
||||
blockedMcpServers,
|
||||
noBrowser: !!process.env['NO_BROWSER'],
|
||||
authType:
|
||||
(argv.authType as AuthType | undefined) ||
|
||||
settings.security?.auth?.selectedType,
|
||||
authType: selectedAuthType,
|
||||
inputFormat,
|
||||
outputFormat,
|
||||
includePartialMessages,
|
||||
generationConfig: {
|
||||
...(settings.model?.generationConfig || {}),
|
||||
model: resolvedModel,
|
||||
apiKey:
|
||||
argv.openaiApiKey ||
|
||||
process.env['OPENAI_API_KEY'] ||
|
||||
settings.security?.auth?.apiKey,
|
||||
baseUrl:
|
||||
argv.openaiBaseUrl ||
|
||||
process.env['OPENAI_BASE_URL'] ||
|
||||
settings.security?.auth?.baseUrl,
|
||||
apiKey,
|
||||
baseUrl,
|
||||
enableOpenAILogging:
|
||||
(typeof argv.openaiLogging === 'undefined'
|
||||
? settings.model?.enableOpenAILogging
|
||||
|
||||
@@ -461,6 +461,7 @@ describe('gemini.tsx main function kitty protocol', () => {
|
||||
allowedMcpServerNames: undefined,
|
||||
allowedTools: undefined,
|
||||
experimentalAcp: undefined,
|
||||
experimentalSkills: undefined,
|
||||
extensions: undefined,
|
||||
listExtensions: undefined,
|
||||
openaiLogging: undefined,
|
||||
|
||||
@@ -228,6 +228,7 @@ export const useAuthCommand = (
|
||||
![
|
||||
AuthType.QWEN_OAUTH,
|
||||
AuthType.USE_OPENAI,
|
||||
AuthType.USE_ANTHROPIC,
|
||||
AuthType.USE_GEMINI,
|
||||
AuthType.USE_VERTEX_AI,
|
||||
].includes(defaultAuthType as AuthType)
|
||||
@@ -240,6 +241,7 @@ export const useAuthCommand = (
|
||||
validValues: [
|
||||
AuthType.QWEN_OAUTH,
|
||||
AuthType.USE_OPENAI,
|
||||
AuthType.USE_ANTHROPIC,
|
||||
AuthType.USE_GEMINI,
|
||||
AuthType.USE_VERTEX_AI,
|
||||
].join(', '),
|
||||
|
||||
@@ -526,10 +526,15 @@ export const useGeminiStream = (
|
||||
return currentThoughtBuffer;
|
||||
}
|
||||
|
||||
const newThoughtBuffer = currentThoughtBuffer + thoughtText;
|
||||
let newThoughtBuffer = currentThoughtBuffer + thoughtText;
|
||||
|
||||
const pendingType = pendingHistoryItemRef.current?.type;
|
||||
const isPendingThought =
|
||||
pendingType === 'gemini_thought' ||
|
||||
pendingType === 'gemini_thought_content';
|
||||
|
||||
// If we're not already showing a thought, start a new one
|
||||
if (pendingHistoryItemRef.current?.type !== 'gemini_thought') {
|
||||
if (!isPendingThought) {
|
||||
// If there's a pending non-thought item, finalize it first
|
||||
if (pendingHistoryItemRef.current) {
|
||||
addItem(pendingHistoryItemRef.current, userMessageTimestamp);
|
||||
@@ -537,11 +542,37 @@ export const useGeminiStream = (
|
||||
setPendingHistoryItem({ type: 'gemini_thought', text: '' });
|
||||
}
|
||||
|
||||
// Update the existing thought message with accumulated content
|
||||
setPendingHistoryItem({
|
||||
type: 'gemini_thought',
|
||||
text: newThoughtBuffer,
|
||||
});
|
||||
// Split large thought messages for better rendering performance (same rationale
|
||||
// as regular content streaming). This helps avoid terminal flicker caused by
|
||||
// constantly re-rendering an ever-growing "pending" block.
|
||||
const splitPoint = findLastSafeSplitPoint(newThoughtBuffer);
|
||||
const nextPendingType: 'gemini_thought' | 'gemini_thought_content' =
|
||||
isPendingThought && pendingType === 'gemini_thought_content'
|
||||
? 'gemini_thought_content'
|
||||
: 'gemini_thought';
|
||||
|
||||
if (splitPoint === newThoughtBuffer.length) {
|
||||
// Update the existing thought message with accumulated content
|
||||
setPendingHistoryItem({
|
||||
type: nextPendingType,
|
||||
text: newThoughtBuffer,
|
||||
});
|
||||
} else {
|
||||
const beforeText = newThoughtBuffer.substring(0, splitPoint);
|
||||
const afterText = newThoughtBuffer.substring(splitPoint);
|
||||
addItem(
|
||||
{
|
||||
type: nextPendingType,
|
||||
text: beforeText,
|
||||
},
|
||||
userMessageTimestamp,
|
||||
);
|
||||
setPendingHistoryItem({
|
||||
type: 'gemini_thought_content',
|
||||
text: afterText,
|
||||
});
|
||||
newThoughtBuffer = afterText;
|
||||
}
|
||||
|
||||
// Also update the thought state for the loading indicator
|
||||
mergeThought(eventValue);
|
||||
|
||||
@@ -60,6 +60,11 @@ export function getOpenAIAvailableModelFromEnv(): AvailableModel | null {
|
||||
return id ? { id, label: id } : null;
|
||||
}
|
||||
|
||||
export function getAnthropicAvailableModelFromEnv(): AvailableModel | null {
|
||||
const id = process.env['ANTHROPIC_MODEL']?.trim();
|
||||
return id ? { id, label: id } : null;
|
||||
}
|
||||
|
||||
export function getAvailableModelsForAuthType(
|
||||
authType: AuthType,
|
||||
): AvailableModel[] {
|
||||
@@ -70,6 +75,10 @@ export function getAvailableModelsForAuthType(
|
||||
const openAIModel = getOpenAIAvailableModelFromEnv();
|
||||
return openAIModel ? [openAIModel] : [];
|
||||
}
|
||||
case AuthType.USE_ANTHROPIC: {
|
||||
const anthropicModel = getAnthropicAvailableModelFromEnv();
|
||||
return anthropicModel ? [anthropicModel] : [];
|
||||
}
|
||||
default:
|
||||
// For other auth types, return empty array for now
|
||||
// This can be expanded later according to the design doc
|
||||
|
||||
@@ -20,6 +20,11 @@ const makeConfig = (tools: Record<string, AnyDeclarativeTool>) =>
|
||||
getToolRegistry: () => ({
|
||||
getTool: (name: string) => tools[name],
|
||||
}),
|
||||
getContentGenerator: () => ({
|
||||
// Default to showing full thinking content during resume unless explicitly
|
||||
// summarized; tests don't care about summarized thinking behavior.
|
||||
useSummarizedThinking: () => false,
|
||||
}),
|
||||
}) as unknown as Config;
|
||||
|
||||
describe('resumeHistoryUtils', () => {
|
||||
|
||||
@@ -204,7 +204,11 @@ function convertToHistoryItems(
|
||||
const parts = record.message?.parts as Part[] | undefined;
|
||||
|
||||
// Extract thought content
|
||||
const thoughtText = extractThoughtTextFromParts(parts);
|
||||
const thoughtText = !config
|
||||
.getContentGenerator()
|
||||
.useSummarizedThinking()
|
||||
? extractThoughtTextFromParts(parts)
|
||||
: '';
|
||||
|
||||
// Extract text content (non-function-call, non-thought)
|
||||
const text = extractTextFromParts(parts);
|
||||
|
||||
@@ -153,7 +153,8 @@ export async function getExtendedSystemInfo(
|
||||
|
||||
// Get base URL if using OpenAI auth
|
||||
const baseUrl =
|
||||
baseInfo.selectedAuthType === AuthType.USE_OPENAI
|
||||
baseInfo.selectedAuthType === AuthType.USE_OPENAI ||
|
||||
baseInfo.selectedAuthType === AuthType.USE_ANTHROPIC
|
||||
? context.services.config?.getContentGeneratorConfig()?.baseUrl
|
||||
: undefined;
|
||||
|
||||
|
||||
@@ -19,6 +19,9 @@ describe('validateNonInterActiveAuth', () => {
|
||||
let originalEnvVertexAi: string | undefined;
|
||||
let originalEnvGcp: string | undefined;
|
||||
let originalEnvOpenAiApiKey: string | undefined;
|
||||
let originalEnvQwenOauth: string | undefined;
|
||||
let originalEnvGoogleApiKey: string | undefined;
|
||||
let originalEnvAnthropicApiKey: string | undefined;
|
||||
let consoleErrorSpy: ReturnType<typeof vi.spyOn>;
|
||||
let processExitSpy: ReturnType<typeof vi.spyOn<[code?: number], never>>;
|
||||
let refreshAuthMock: ReturnType<typeof vi.fn>;
|
||||
@@ -29,10 +32,16 @@ describe('validateNonInterActiveAuth', () => {
|
||||
originalEnvVertexAi = process.env['GOOGLE_GENAI_USE_VERTEXAI'];
|
||||
originalEnvGcp = process.env['GOOGLE_GENAI_USE_GCA'];
|
||||
originalEnvOpenAiApiKey = process.env['OPENAI_API_KEY'];
|
||||
originalEnvQwenOauth = process.env['QWEN_OAUTH'];
|
||||
originalEnvGoogleApiKey = process.env['GOOGLE_API_KEY'];
|
||||
originalEnvAnthropicApiKey = process.env['ANTHROPIC_API_KEY'];
|
||||
delete process.env['GEMINI_API_KEY'];
|
||||
delete process.env['GOOGLE_GENAI_USE_VERTEXAI'];
|
||||
delete process.env['GOOGLE_GENAI_USE_GCA'];
|
||||
delete process.env['OPENAI_API_KEY'];
|
||||
delete process.env['QWEN_OAUTH'];
|
||||
delete process.env['GOOGLE_API_KEY'];
|
||||
delete process.env['ANTHROPIC_API_KEY'];
|
||||
consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {});
|
||||
processExitSpy = vi.spyOn(process, 'exit').mockImplementation((code) => {
|
||||
throw new Error(`process.exit(${code}) called`);
|
||||
@@ -80,6 +89,21 @@ describe('validateNonInterActiveAuth', () => {
|
||||
} else {
|
||||
delete process.env['OPENAI_API_KEY'];
|
||||
}
|
||||
if (originalEnvQwenOauth !== undefined) {
|
||||
process.env['QWEN_OAUTH'] = originalEnvQwenOauth;
|
||||
} else {
|
||||
delete process.env['QWEN_OAUTH'];
|
||||
}
|
||||
if (originalEnvGoogleApiKey !== undefined) {
|
||||
process.env['GOOGLE_API_KEY'] = originalEnvGoogleApiKey;
|
||||
} else {
|
||||
delete process.env['GOOGLE_API_KEY'];
|
||||
}
|
||||
if (originalEnvAnthropicApiKey !== undefined) {
|
||||
process.env['ANTHROPIC_API_KEY'] = originalEnvAnthropicApiKey;
|
||||
} else {
|
||||
delete process.env['ANTHROPIC_API_KEY'];
|
||||
}
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
|
||||
@@ -27,6 +27,9 @@ function getAuthTypeFromEnv(): AuthType | undefined {
|
||||
if (process.env['GOOGLE_API_KEY']) {
|
||||
return AuthType.USE_VERTEX_AI;
|
||||
}
|
||||
if (process.env['ANTHROPIC_API_KEY']) {
|
||||
return AuthType.USE_ANTHROPIC;
|
||||
}
|
||||
|
||||
return undefined;
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@
|
||||
"scripts/postinstall.js"
|
||||
],
|
||||
"dependencies": {
|
||||
"@anthropic-ai/sdk": "^0.36.1",
|
||||
"@google/genai": "1.30.0",
|
||||
"@modelcontextprotocol/sdk": "^1.25.1",
|
||||
"@opentelemetry/api": "^1.9.0",
|
||||
|
||||
@@ -16,7 +16,6 @@ import {
|
||||
QwenLogger,
|
||||
} from '../telemetry/index.js';
|
||||
import type { ContentGeneratorConfig } from '../core/contentGenerator.js';
|
||||
import { DEFAULT_DASHSCOPE_BASE_URL } from '../core/openaiContentGenerator/constants.js';
|
||||
import {
|
||||
AuthType,
|
||||
createContentGeneratorConfig,
|
||||
@@ -273,7 +272,7 @@ describe('Server Config (config.ts)', () => {
|
||||
authType,
|
||||
{
|
||||
model: MODEL,
|
||||
baseUrl: DEFAULT_DASHSCOPE_BASE_URL,
|
||||
baseUrl: undefined,
|
||||
},
|
||||
);
|
||||
// Verify that contentGeneratorConfig is updated
|
||||
|
||||
@@ -54,6 +54,7 @@ import { canUseRipgrep } from '../utils/ripgrepUtils.js';
|
||||
import { RipGrepTool } from '../tools/ripGrep.js';
|
||||
import { ShellTool } from '../tools/shell.js';
|
||||
import { SmartEditTool } from '../tools/smart-edit.js';
|
||||
import { SkillTool } from '../tools/skill.js';
|
||||
import { TaskTool } from '../tools/task.js';
|
||||
import { TodoWriteTool } from '../tools/todoWrite.js';
|
||||
import { ToolRegistry } from '../tools/tool-registry.js';
|
||||
@@ -65,6 +66,7 @@ import { WriteFileTool } from '../tools/write-file.js';
|
||||
import { ideContextStore } from '../ide/ideContext.js';
|
||||
import { InputFormat, OutputFormat } from '../output/types.js';
|
||||
import { PromptRegistry } from '../prompts/prompt-registry.js';
|
||||
import { SkillManager } from '../skills/skill-manager.js';
|
||||
import { SubagentManager } from '../subagents/subagent-manager.js';
|
||||
import type { SubagentConfig } from '../subagents/types.js';
|
||||
import {
|
||||
@@ -94,7 +96,6 @@ import {
|
||||
} from './constants.js';
|
||||
import { DEFAULT_QWEN_EMBEDDING_MODEL, DEFAULT_QWEN_MODEL } from './models.js';
|
||||
import { Storage } from './storage.js';
|
||||
import { DEFAULT_DASHSCOPE_BASE_URL } from '../core/openaiContentGenerator/constants.js';
|
||||
import { ChatRecordingService } from '../services/chatRecordingService.js';
|
||||
import {
|
||||
SessionService,
|
||||
@@ -305,6 +306,7 @@ export interface ConfigParameters {
|
||||
extensionContextFilePaths?: string[];
|
||||
maxSessionTurns?: number;
|
||||
sessionTokenLimit?: number;
|
||||
experimentalSkills?: boolean;
|
||||
experimentalZedIntegration?: boolean;
|
||||
listExtensions?: boolean;
|
||||
extensions?: GeminiCLIExtension[];
|
||||
@@ -389,6 +391,7 @@ export class Config {
|
||||
private toolRegistry!: ToolRegistry;
|
||||
private promptRegistry!: PromptRegistry;
|
||||
private subagentManager!: SubagentManager;
|
||||
private skillManager!: SkillManager;
|
||||
private fileSystemService: FileSystemService;
|
||||
private contentGeneratorConfig!: ContentGeneratorConfig;
|
||||
private contentGenerator!: ContentGenerator;
|
||||
@@ -458,6 +461,7 @@ export class Config {
|
||||
| undefined;
|
||||
private readonly cliVersion?: string;
|
||||
private readonly experimentalZedIntegration: boolean = false;
|
||||
private readonly experimentalSkills: boolean = false;
|
||||
private readonly chatRecordingEnabled: boolean;
|
||||
private readonly loadMemoryFromIncludeDirectories: boolean = false;
|
||||
private readonly webSearch?: {
|
||||
@@ -557,6 +561,7 @@ export class Config {
|
||||
this.sessionTokenLimit = params.sessionTokenLimit ?? -1;
|
||||
this.experimentalZedIntegration =
|
||||
params.experimentalZedIntegration ?? false;
|
||||
this.experimentalSkills = params.experimentalSkills ?? false;
|
||||
this.listExtensions = params.listExtensions ?? false;
|
||||
this._extensions = params.extensions ?? [];
|
||||
this._blockedMcpServers = params.blockedMcpServers ?? [];
|
||||
@@ -568,7 +573,7 @@ export class Config {
|
||||
this._generationConfig = {
|
||||
model: params.model,
|
||||
...(params.generationConfig || {}),
|
||||
baseUrl: params.generationConfig?.baseUrl || DEFAULT_DASHSCOPE_BASE_URL,
|
||||
baseUrl: params.generationConfig?.baseUrl,
|
||||
};
|
||||
this.contentGeneratorConfig = this
|
||||
._generationConfig as ContentGeneratorConfig;
|
||||
@@ -644,6 +649,7 @@ export class Config {
|
||||
}
|
||||
this.promptRegistry = new PromptRegistry();
|
||||
this.subagentManager = new SubagentManager(this);
|
||||
this.skillManager = new SkillManager(this);
|
||||
|
||||
// Load session subagents if they were provided before initialization
|
||||
if (this.sessionSubagents.length > 0) {
|
||||
@@ -1066,6 +1072,10 @@ export class Config {
|
||||
return this.experimentalZedIntegration;
|
||||
}
|
||||
|
||||
getExperimentalSkills(): boolean {
|
||||
return this.experimentalSkills;
|
||||
}
|
||||
|
||||
getListExtensions(): boolean {
|
||||
return this.listExtensions;
|
||||
}
|
||||
@@ -1296,6 +1306,10 @@ export class Config {
|
||||
return this.subagentManager;
|
||||
}
|
||||
|
||||
getSkillManager(): SkillManager {
|
||||
return this.skillManager;
|
||||
}
|
||||
|
||||
async createToolRegistry(
|
||||
sendSdkMcpMessage?: SendSdkMcpMessage,
|
||||
): Promise<ToolRegistry> {
|
||||
@@ -1338,6 +1352,9 @@ export class Config {
|
||||
};
|
||||
|
||||
registerCoreTool(TaskTool, this);
|
||||
if (this.getExperimentalSkills()) {
|
||||
registerCoreTool(SkillTool, this);
|
||||
}
|
||||
registerCoreTool(LSTool, this);
|
||||
registerCoreTool(ReadFileTool, this);
|
||||
|
||||
|
||||
@@ -126,6 +126,10 @@ export class Storage {
|
||||
return path.join(this.getExtensionsDir(), 'qwen-extension.json');
|
||||
}
|
||||
|
||||
getUserSkillsDir(): string {
|
||||
return path.join(Storage.getGlobalQwenDir(), 'skills');
|
||||
}
|
||||
|
||||
getHistoryFilePath(): string {
|
||||
return path.join(this.getProjectTempDir(), 'shell_history');
|
||||
}
|
||||
|
||||
@@ -0,0 +1,500 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
import type {
|
||||
CountTokensParameters,
|
||||
GenerateContentParameters,
|
||||
} from '@google/genai';
|
||||
import { FinishReason, GenerateContentResponse } from '@google/genai';
|
||||
|
||||
// Mock the request tokenizer module BEFORE importing the class that uses it.
|
||||
const mockTokenizer = {
|
||||
calculateTokens: vi.fn(),
|
||||
dispose: vi.fn(),
|
||||
};
|
||||
|
||||
vi.mock('../../utils/request-tokenizer/index.js', () => ({
|
||||
getDefaultTokenizer: vi.fn(() => mockTokenizer),
|
||||
DefaultRequestTokenizer: vi.fn(() => mockTokenizer),
|
||||
disposeDefaultTokenizer: vi.fn(),
|
||||
}));
|
||||
|
||||
type AnthropicCreateArgs = [unknown, { signal?: AbortSignal }?];
|
||||
|
||||
const anthropicMockState: {
|
||||
constructorOptions?: Record<string, unknown>;
|
||||
lastCreateArgs?: AnthropicCreateArgs;
|
||||
createImpl: ReturnType<typeof vi.fn>;
|
||||
} = {
|
||||
constructorOptions: undefined,
|
||||
lastCreateArgs: undefined,
|
||||
createImpl: vi.fn(),
|
||||
};
|
||||
|
||||
vi.mock('@anthropic-ai/sdk', () => {
|
||||
class AnthropicMock {
|
||||
messages: { create: (...args: AnthropicCreateArgs) => unknown };
|
||||
|
||||
constructor(options: Record<string, unknown>) {
|
||||
anthropicMockState.constructorOptions = options;
|
||||
this.messages = {
|
||||
create: (...args: AnthropicCreateArgs) => {
|
||||
anthropicMockState.lastCreateArgs = args;
|
||||
return anthropicMockState.createImpl(...args);
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
default: AnthropicMock,
|
||||
__anthropicState: anthropicMockState,
|
||||
};
|
||||
});
|
||||
|
||||
// Now import the modules that depend on the mocked modules.
|
||||
import type { Config } from '../../config/config.js';
|
||||
|
||||
const importGenerator = async (): Promise<{
|
||||
AnthropicContentGenerator: typeof import('./anthropicContentGenerator.js').AnthropicContentGenerator;
|
||||
}> => import('./anthropicContentGenerator.js');
|
||||
|
||||
const importConverter = async (): Promise<{
|
||||
AnthropicContentConverter: typeof import('./converter.js').AnthropicContentConverter;
|
||||
}> => import('./converter.js');
|
||||
|
||||
describe('AnthropicContentGenerator', () => {
|
||||
let mockConfig: Config;
|
||||
let anthropicState: {
|
||||
constructorOptions?: Record<string, unknown>;
|
||||
lastCreateArgs?: AnthropicCreateArgs;
|
||||
createImpl: ReturnType<typeof vi.fn>;
|
||||
};
|
||||
|
||||
beforeEach(async () => {
|
||||
vi.clearAllMocks();
|
||||
vi.resetModules();
|
||||
|
||||
mockTokenizer.calculateTokens.mockResolvedValue({
|
||||
totalTokens: 50,
|
||||
breakdown: {
|
||||
textTokens: 50,
|
||||
imageTokens: 0,
|
||||
audioTokens: 0,
|
||||
otherTokens: 0,
|
||||
},
|
||||
processingTime: 1,
|
||||
});
|
||||
anthropicState = anthropicMockState;
|
||||
|
||||
anthropicState.createImpl.mockReset();
|
||||
anthropicState.lastCreateArgs = undefined;
|
||||
anthropicState.constructorOptions = undefined;
|
||||
|
||||
mockConfig = {
|
||||
getCliVersion: vi.fn().mockReturnValue('1.2.3'),
|
||||
} as unknown as Config;
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
it('passes a QwenCode User-Agent header to the Anthropic SDK', async () => {
|
||||
const { AnthropicContentGenerator } = await importGenerator();
|
||||
void new AnthropicContentGenerator(
|
||||
{
|
||||
model: 'claude-test',
|
||||
apiKey: 'test-key',
|
||||
baseUrl: 'https://example.invalid',
|
||||
timeout: 10_000,
|
||||
maxRetries: 2,
|
||||
samplingParams: {},
|
||||
schemaCompliance: 'auto',
|
||||
},
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
const headers = (anthropicState.constructorOptions?.['defaultHeaders'] ||
|
||||
{}) as Record<string, string>;
|
||||
expect(headers['User-Agent']).toContain('QwenCode/1.2.3');
|
||||
expect(headers['User-Agent']).toContain(
|
||||
`(${process.platform}; ${process.arch})`,
|
||||
);
|
||||
});
|
||||
|
||||
it('adds the effort beta header when reasoning.effort is set', async () => {
|
||||
const { AnthropicContentGenerator } = await importGenerator();
|
||||
void new AnthropicContentGenerator(
|
||||
{
|
||||
model: 'claude-test',
|
||||
apiKey: 'test-key',
|
||||
baseUrl: 'https://example.invalid',
|
||||
timeout: 10_000,
|
||||
maxRetries: 2,
|
||||
samplingParams: {},
|
||||
schemaCompliance: 'auto',
|
||||
reasoning: { effort: 'medium' },
|
||||
},
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
const headers = (anthropicState.constructorOptions?.['defaultHeaders'] ||
|
||||
{}) as Record<string, string>;
|
||||
expect(headers['anthropic-beta']).toContain('effort-2025-11-24');
|
||||
});
|
||||
|
||||
it('does not add the effort beta header when reasoning.effort is not set', async () => {
|
||||
const { AnthropicContentGenerator } = await importGenerator();
|
||||
void new AnthropicContentGenerator(
|
||||
{
|
||||
model: 'claude-test',
|
||||
apiKey: 'test-key',
|
||||
baseUrl: 'https://example.invalid',
|
||||
timeout: 10_000,
|
||||
maxRetries: 2,
|
||||
samplingParams: {},
|
||||
schemaCompliance: 'auto',
|
||||
},
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
const headers = (anthropicState.constructorOptions?.['defaultHeaders'] ||
|
||||
{}) as Record<string, string>;
|
||||
expect(headers['anthropic-beta']).not.toContain('effort-2025-11-24');
|
||||
});
|
||||
|
||||
it('omits the anthropic beta header when reasoning is disabled', async () => {
|
||||
const { AnthropicContentGenerator } = await importGenerator();
|
||||
void new AnthropicContentGenerator(
|
||||
{
|
||||
model: 'claude-test',
|
||||
apiKey: 'test-key',
|
||||
baseUrl: 'https://example.invalid',
|
||||
timeout: 10_000,
|
||||
maxRetries: 2,
|
||||
samplingParams: {},
|
||||
schemaCompliance: 'auto',
|
||||
reasoning: false,
|
||||
},
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
const headers = (anthropicState.constructorOptions?.['defaultHeaders'] ||
|
||||
{}) as Record<string, string>;
|
||||
expect(headers['anthropic-beta']).toBeUndefined();
|
||||
});
|
||||
|
||||
describe('generateContent', () => {
|
||||
it('builds request with config sampling params (config overrides request) and thinking budget', async () => {
|
||||
const { AnthropicContentConverter } = await importConverter();
|
||||
const { AnthropicContentGenerator } = await importGenerator();
|
||||
|
||||
const convertResponseSpy = vi
|
||||
.spyOn(
|
||||
AnthropicContentConverter.prototype,
|
||||
'convertAnthropicResponseToGemini',
|
||||
)
|
||||
.mockReturnValue(
|
||||
(() => {
|
||||
const r = new GenerateContentResponse();
|
||||
r.responseId = 'gemini-1';
|
||||
return r;
|
||||
})(),
|
||||
);
|
||||
|
||||
anthropicState.createImpl.mockResolvedValue({
|
||||
id: 'anthropic-1',
|
||||
model: 'claude-test',
|
||||
content: [{ type: 'text', text: 'hi' }],
|
||||
});
|
||||
|
||||
const generator = new AnthropicContentGenerator(
|
||||
{
|
||||
model: 'claude-test',
|
||||
apiKey: 'test-key',
|
||||
baseUrl: 'https://example.invalid',
|
||||
timeout: 10_000,
|
||||
maxRetries: 2,
|
||||
samplingParams: {
|
||||
temperature: 0.7,
|
||||
max_tokens: 1000,
|
||||
top_p: 0.9,
|
||||
top_k: 20,
|
||||
},
|
||||
schemaCompliance: 'auto',
|
||||
reasoning: { effort: 'high', budget_tokens: 1000 },
|
||||
},
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
const abortController = new AbortController();
|
||||
const request: GenerateContentParameters = {
|
||||
model: 'models/ignored',
|
||||
contents: 'Hello',
|
||||
config: {
|
||||
temperature: 0.1,
|
||||
maxOutputTokens: 200,
|
||||
topP: 0.5,
|
||||
topK: 5,
|
||||
abortSignal: abortController.signal,
|
||||
},
|
||||
};
|
||||
|
||||
const result = await generator.generateContent(request);
|
||||
expect(result.responseId).toBe('gemini-1');
|
||||
|
||||
expect(anthropicState.lastCreateArgs).toBeDefined();
|
||||
const [anthropicRequest, options] =
|
||||
anthropicState.lastCreateArgs as AnthropicCreateArgs;
|
||||
|
||||
expect(options?.signal).toBe(abortController.signal);
|
||||
|
||||
expect(anthropicRequest).toEqual(
|
||||
expect.objectContaining({
|
||||
model: 'claude-test',
|
||||
max_tokens: 1000,
|
||||
temperature: 0.7,
|
||||
top_p: 0.9,
|
||||
top_k: 20,
|
||||
thinking: { type: 'enabled', budget_tokens: 1000 },
|
||||
output_config: { effort: 'high' },
|
||||
}),
|
||||
);
|
||||
|
||||
expect(convertResponseSpy).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('omits thinking when request.config.thinkingConfig.includeThoughts is false', async () => {
|
||||
const { AnthropicContentGenerator } = await importGenerator();
|
||||
anthropicState.createImpl.mockResolvedValue({
|
||||
id: 'anthropic-1',
|
||||
model: 'claude-test',
|
||||
content: [{ type: 'text', text: 'hi' }],
|
||||
});
|
||||
|
||||
const generator = new AnthropicContentGenerator(
|
||||
{
|
||||
model: 'claude-test',
|
||||
apiKey: 'test-key',
|
||||
timeout: 10_000,
|
||||
maxRetries: 2,
|
||||
samplingParams: { max_tokens: 500 },
|
||||
schemaCompliance: 'auto',
|
||||
reasoning: { effort: 'high' },
|
||||
},
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
await generator.generateContent({
|
||||
model: 'models/ignored',
|
||||
contents: 'Hello',
|
||||
config: { thinkingConfig: { includeThoughts: false } },
|
||||
} as unknown as GenerateContentParameters);
|
||||
|
||||
const [anthropicRequest] =
|
||||
anthropicState.lastCreateArgs as AnthropicCreateArgs;
|
||||
expect(anthropicRequest).toEqual(
|
||||
expect.not.objectContaining({ thinking: expect.anything() }),
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('countTokens', () => {
|
||||
it('counts tokens using the request tokenizer', async () => {
|
||||
const { AnthropicContentGenerator } = await importGenerator();
|
||||
const generator = new AnthropicContentGenerator(
|
||||
{
|
||||
model: 'claude-test',
|
||||
apiKey: 'test-key',
|
||||
timeout: 10_000,
|
||||
maxRetries: 2,
|
||||
samplingParams: {},
|
||||
schemaCompliance: 'auto',
|
||||
},
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
const request: CountTokensParameters = {
|
||||
contents: [{ role: 'user', parts: [{ text: 'Hello world' }] }],
|
||||
model: 'claude-test',
|
||||
};
|
||||
|
||||
const result = await generator.countTokens(request);
|
||||
expect(mockTokenizer.calculateTokens).toHaveBeenCalledWith(request, {
|
||||
textEncoding: 'cl100k_base',
|
||||
});
|
||||
expect(result.totalTokens).toBe(50);
|
||||
});
|
||||
|
||||
it('falls back to character approximation when tokenizer throws', async () => {
|
||||
const { AnthropicContentGenerator } = await importGenerator();
|
||||
mockTokenizer.calculateTokens.mockRejectedValueOnce(new Error('boom'));
|
||||
const generator = new AnthropicContentGenerator(
|
||||
{
|
||||
model: 'claude-test',
|
||||
apiKey: 'test-key',
|
||||
timeout: 10_000,
|
||||
maxRetries: 2,
|
||||
samplingParams: {},
|
||||
schemaCompliance: 'auto',
|
||||
},
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
const request: CountTokensParameters = {
|
||||
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
|
||||
model: 'claude-test',
|
||||
};
|
||||
|
||||
const content = JSON.stringify(request.contents);
|
||||
const expected = Math.ceil(content.length / 4);
|
||||
const result = await generator.countTokens(request);
|
||||
expect(result.totalTokens).toBe(expected);
|
||||
});
|
||||
});
|
||||
|
||||
describe('generateContentStream', () => {
|
||||
it('requests stream=true and converts streamed events into Gemini chunks', async () => {
|
||||
const { AnthropicContentGenerator } = await importGenerator();
|
||||
anthropicState.createImpl.mockResolvedValue(
|
||||
(async function* () {
|
||||
yield {
|
||||
type: 'message_start',
|
||||
message: {
|
||||
id: 'msg-1',
|
||||
model: 'claude-test',
|
||||
usage: { cache_read_input_tokens: 2, input_tokens: 3 },
|
||||
},
|
||||
};
|
||||
|
||||
yield {
|
||||
type: 'content_block_start',
|
||||
index: 0,
|
||||
content_block: { type: 'text' },
|
||||
};
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: 0,
|
||||
delta: { type: 'text_delta', text: 'Hello' },
|
||||
};
|
||||
yield { type: 'content_block_stop', index: 0 };
|
||||
|
||||
yield {
|
||||
type: 'content_block_start',
|
||||
index: 1,
|
||||
content_block: { type: 'thinking', signature: '' },
|
||||
};
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: 1,
|
||||
delta: { type: 'thinking_delta', thinking: 'Think' },
|
||||
};
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: 1,
|
||||
delta: { type: 'signature_delta', signature: 'abc' },
|
||||
};
|
||||
yield { type: 'content_block_stop', index: 1 };
|
||||
|
||||
yield {
|
||||
type: 'content_block_start',
|
||||
index: 2,
|
||||
content_block: {
|
||||
type: 'tool_use',
|
||||
id: 't1',
|
||||
name: 'tool',
|
||||
input: {},
|
||||
},
|
||||
};
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: 2,
|
||||
delta: { type: 'input_json_delta', partial_json: '{"x":' },
|
||||
};
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: 2,
|
||||
delta: { type: 'input_json_delta', partial_json: '1}' },
|
||||
};
|
||||
yield { type: 'content_block_stop', index: 2 };
|
||||
|
||||
yield {
|
||||
type: 'message_delta',
|
||||
delta: { stop_reason: 'end_turn' },
|
||||
usage: {
|
||||
output_tokens: 5,
|
||||
input_tokens: 7,
|
||||
cache_read_input_tokens: 2,
|
||||
},
|
||||
};
|
||||
yield { type: 'message_stop' };
|
||||
})(),
|
||||
);
|
||||
|
||||
const generator = new AnthropicContentGenerator(
|
||||
{
|
||||
model: 'claude-test',
|
||||
apiKey: 'test-key',
|
||||
timeout: 10_000,
|
||||
maxRetries: 2,
|
||||
samplingParams: { max_tokens: 123 },
|
||||
schemaCompliance: 'auto',
|
||||
},
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
const stream = await generator.generateContentStream({
|
||||
model: 'models/ignored',
|
||||
contents: 'Hello',
|
||||
} as unknown as GenerateContentParameters);
|
||||
|
||||
const chunks: GenerateContentResponse[] = [];
|
||||
for await (const chunk of stream) {
|
||||
chunks.push(chunk);
|
||||
}
|
||||
|
||||
const [anthropicRequest] =
|
||||
anthropicState.lastCreateArgs as AnthropicCreateArgs;
|
||||
expect(anthropicRequest).toEqual(
|
||||
expect.objectContaining({ stream: true }),
|
||||
);
|
||||
|
||||
// Text chunk.
|
||||
expect(chunks[0]?.candidates?.[0]?.content?.parts?.[0]).toEqual({
|
||||
text: 'Hello',
|
||||
});
|
||||
|
||||
// Thinking chunk.
|
||||
expect(chunks[1]?.candidates?.[0]?.content?.parts?.[0]).toEqual({
|
||||
text: 'Think',
|
||||
thought: true,
|
||||
});
|
||||
|
||||
// Signature chunk.
|
||||
expect(chunks[2]?.candidates?.[0]?.content?.parts?.[0]).toEqual({
|
||||
thought: true,
|
||||
thoughtSignature: 'abc',
|
||||
});
|
||||
|
||||
// Tool call chunk.
|
||||
expect(chunks[3]?.candidates?.[0]?.content?.parts?.[0]).toEqual({
|
||||
functionCall: { id: 't1', name: 'tool', args: { x: 1 } },
|
||||
});
|
||||
|
||||
// Usage/finish chunks exist; check the last one.
|
||||
const last = chunks[chunks.length - 1]!;
|
||||
expect(last.candidates?.[0]?.finishReason).toBe(FinishReason.STOP);
|
||||
expect(last.usageMetadata).toEqual({
|
||||
cachedContentTokenCount: 2,
|
||||
promptTokenCount: 9, // cached(2) + input(7)
|
||||
candidatesTokenCount: 5,
|
||||
totalTokenCount: 14,
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,502 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import Anthropic from '@anthropic-ai/sdk';
|
||||
import type {
|
||||
CountTokensParameters,
|
||||
CountTokensResponse,
|
||||
EmbedContentParameters,
|
||||
EmbedContentResponse,
|
||||
GenerateContentParameters,
|
||||
GenerateContentResponseUsageMetadata,
|
||||
Part,
|
||||
} from '@google/genai';
|
||||
import { GenerateContentResponse } from '@google/genai';
|
||||
import type { Config } from '../../config/config.js';
|
||||
import type {
|
||||
ContentGenerator,
|
||||
ContentGeneratorConfig,
|
||||
} from '../contentGenerator.js';
|
||||
type Message = Anthropic.Message;
|
||||
type MessageCreateParamsNonStreaming =
|
||||
Anthropic.MessageCreateParamsNonStreaming;
|
||||
type MessageCreateParamsStreaming = Anthropic.MessageCreateParamsStreaming;
|
||||
type RawMessageStreamEvent = Anthropic.RawMessageStreamEvent;
|
||||
import { getDefaultTokenizer } from '../../utils/request-tokenizer/index.js';
|
||||
import { safeJsonParse } from '../../utils/safeJsonParse.js';
|
||||
import { AnthropicContentConverter } from './converter.js';
|
||||
|
||||
type StreamingBlockState = {
|
||||
type: string;
|
||||
id?: string;
|
||||
name?: string;
|
||||
inputJson: string;
|
||||
signature: string;
|
||||
};
|
||||
|
||||
type MessageCreateParamsWithThinking = MessageCreateParamsNonStreaming & {
|
||||
thinking?: { type: 'enabled'; budget_tokens: number };
|
||||
// Anthropic beta feature: output_config.effort (requires beta header effort-2025-11-24)
|
||||
// This is not yet represented in the official SDK types we depend on.
|
||||
output_config?: { effort: 'low' | 'medium' | 'high' };
|
||||
};
|
||||
|
||||
export class AnthropicContentGenerator implements ContentGenerator {
|
||||
private client: Anthropic;
|
||||
private converter: AnthropicContentConverter;
|
||||
|
||||
constructor(
|
||||
private contentGeneratorConfig: ContentGeneratorConfig,
|
||||
private readonly cliConfig: Config,
|
||||
) {
|
||||
const defaultHeaders = this.buildHeaders();
|
||||
const baseURL = contentGeneratorConfig.baseUrl;
|
||||
|
||||
this.client = new Anthropic({
|
||||
apiKey: contentGeneratorConfig.apiKey,
|
||||
baseURL,
|
||||
timeout: contentGeneratorConfig.timeout,
|
||||
maxRetries: contentGeneratorConfig.maxRetries,
|
||||
defaultHeaders,
|
||||
});
|
||||
|
||||
this.converter = new AnthropicContentConverter(
|
||||
contentGeneratorConfig.model,
|
||||
contentGeneratorConfig.schemaCompliance,
|
||||
);
|
||||
}
|
||||
|
||||
async generateContent(
|
||||
request: GenerateContentParameters,
|
||||
): Promise<GenerateContentResponse> {
|
||||
const anthropicRequest = await this.buildRequest(request);
|
||||
const response = (await this.client.messages.create(anthropicRequest, {
|
||||
signal: request.config?.abortSignal,
|
||||
})) as Message;
|
||||
|
||||
return this.converter.convertAnthropicResponseToGemini(response);
|
||||
}
|
||||
|
||||
async generateContentStream(
|
||||
request: GenerateContentParameters,
|
||||
): Promise<AsyncGenerator<GenerateContentResponse>> {
|
||||
const anthropicRequest = await this.buildRequest(request);
|
||||
const streamingRequest: MessageCreateParamsStreaming & {
|
||||
thinking?: { type: 'enabled'; budget_tokens: number };
|
||||
} = {
|
||||
...anthropicRequest,
|
||||
stream: true,
|
||||
};
|
||||
|
||||
const stream = (await this.client.messages.create(
|
||||
streamingRequest as MessageCreateParamsStreaming,
|
||||
{
|
||||
signal: request.config?.abortSignal,
|
||||
},
|
||||
)) as AsyncIterable<RawMessageStreamEvent>;
|
||||
|
||||
return this.processStream(stream);
|
||||
}
|
||||
|
||||
async countTokens(
|
||||
request: CountTokensParameters,
|
||||
): Promise<CountTokensResponse> {
|
||||
try {
|
||||
const tokenizer = getDefaultTokenizer();
|
||||
const result = await tokenizer.calculateTokens(request, {
|
||||
textEncoding: 'cl100k_base',
|
||||
});
|
||||
|
||||
return {
|
||||
totalTokens: result.totalTokens,
|
||||
};
|
||||
} catch (error) {
|
||||
console.warn(
|
||||
'Failed to calculate tokens with tokenizer, ' +
|
||||
'falling back to simple method:',
|
||||
error,
|
||||
);
|
||||
|
||||
const content = JSON.stringify(request.contents);
|
||||
const totalTokens = Math.ceil(content.length / 4);
|
||||
return {
|
||||
totalTokens,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
async embedContent(
|
||||
_request: EmbedContentParameters,
|
||||
): Promise<EmbedContentResponse> {
|
||||
throw new Error('Anthropic does not support embeddings.');
|
||||
}
|
||||
|
||||
useSummarizedThinking(): boolean {
|
||||
return false;
|
||||
}
|
||||
|
||||
private buildHeaders(): Record<string, string> {
|
||||
const version = this.cliConfig.getCliVersion() || 'unknown';
|
||||
const userAgent = `QwenCode/${version} (${process.platform}; ${process.arch})`;
|
||||
|
||||
const betas: string[] = [];
|
||||
const reasoning = this.contentGeneratorConfig.reasoning;
|
||||
|
||||
// Interleaved thinking is used when we send the `thinking` field.
|
||||
if (reasoning !== false) {
|
||||
betas.push('interleaved-thinking-2025-05-14');
|
||||
}
|
||||
|
||||
// Effort (beta) is enabled when reasoning.effort is set.
|
||||
if (reasoning !== false && reasoning?.effort !== undefined) {
|
||||
betas.push('effort-2025-11-24');
|
||||
}
|
||||
|
||||
const headers: Record<string, string> = {
|
||||
'User-Agent': userAgent,
|
||||
};
|
||||
|
||||
if (betas.length) {
|
||||
headers['anthropic-beta'] = betas.join(',');
|
||||
}
|
||||
|
||||
return headers;
|
||||
}
|
||||
|
||||
private async buildRequest(
|
||||
request: GenerateContentParameters,
|
||||
): Promise<MessageCreateParamsWithThinking> {
|
||||
const { system, messages } =
|
||||
this.converter.convertGeminiRequestToAnthropic(request);
|
||||
|
||||
const tools = request.config?.tools
|
||||
? await this.converter.convertGeminiToolsToAnthropic(request.config.tools)
|
||||
: undefined;
|
||||
|
||||
const sampling = this.buildSamplingParameters(request);
|
||||
const thinking = this.buildThinkingConfig(request);
|
||||
const outputConfig = this.buildOutputConfig();
|
||||
|
||||
return {
|
||||
model: this.contentGeneratorConfig.model,
|
||||
system,
|
||||
messages,
|
||||
tools,
|
||||
...sampling,
|
||||
...(thinking ? { thinking } : {}),
|
||||
...(outputConfig ? { output_config: outputConfig } : {}),
|
||||
};
|
||||
}
|
||||
|
||||
private buildSamplingParameters(request: GenerateContentParameters): {
|
||||
max_tokens: number;
|
||||
temperature?: number;
|
||||
top_p?: number;
|
||||
top_k?: number;
|
||||
} {
|
||||
const configSamplingParams = this.contentGeneratorConfig.samplingParams;
|
||||
const requestConfig = request.config || {};
|
||||
|
||||
const getParam = <T>(
|
||||
configKey: keyof NonNullable<typeof configSamplingParams>,
|
||||
requestKey?: keyof NonNullable<typeof requestConfig>,
|
||||
): T | undefined => {
|
||||
const configValue = configSamplingParams?.[configKey] as T | undefined;
|
||||
const requestValue = requestKey
|
||||
? (requestConfig[requestKey] as T | undefined)
|
||||
: undefined;
|
||||
return configValue !== undefined ? configValue : requestValue;
|
||||
};
|
||||
|
||||
const maxTokens =
|
||||
getParam<number>('max_tokens', 'maxOutputTokens') ?? 10_000;
|
||||
|
||||
return {
|
||||
max_tokens: maxTokens,
|
||||
temperature: getParam<number>('temperature', 'temperature') ?? 1,
|
||||
top_p: getParam<number>('top_p', 'topP'),
|
||||
top_k: getParam<number>('top_k', 'topK'),
|
||||
};
|
||||
}
|
||||
|
||||
private buildThinkingConfig(
|
||||
request: GenerateContentParameters,
|
||||
): { type: 'enabled'; budget_tokens: number } | undefined {
|
||||
if (request.config?.thinkingConfig?.includeThoughts === false) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const reasoning = this.contentGeneratorConfig.reasoning;
|
||||
|
||||
if (reasoning === false) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
if (reasoning?.budget_tokens !== undefined) {
|
||||
return {
|
||||
type: 'enabled',
|
||||
budget_tokens: reasoning.budget_tokens,
|
||||
};
|
||||
}
|
||||
|
||||
const effort = reasoning?.effort;
|
||||
// When using interleaved thinking with tools, this budget token limit is the entire context window(200k tokens).
|
||||
const budgetTokens =
|
||||
effort === 'low' ? 16_000 : effort === 'high' ? 64_000 : 32_000;
|
||||
|
||||
return {
|
||||
type: 'enabled',
|
||||
budget_tokens: budgetTokens,
|
||||
};
|
||||
}
|
||||
|
||||
private buildOutputConfig():
|
||||
| { effort: 'low' | 'medium' | 'high' }
|
||||
| undefined {
|
||||
const reasoning = this.contentGeneratorConfig.reasoning;
|
||||
if (reasoning === false || reasoning === undefined) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
if (reasoning.effort === undefined) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
return { effort: reasoning.effort };
|
||||
}
|
||||
|
||||
private async *processStream(
|
||||
stream: AsyncIterable<RawMessageStreamEvent>,
|
||||
): AsyncGenerator<GenerateContentResponse> {
|
||||
let messageId: string | undefined;
|
||||
let model = this.contentGeneratorConfig.model;
|
||||
let cachedTokens = 0;
|
||||
let promptTokens = 0;
|
||||
let completionTokens = 0;
|
||||
let finishReason: string | undefined;
|
||||
|
||||
const blocks = new Map<number, StreamingBlockState>();
|
||||
const collectedResponses: GenerateContentResponse[] = [];
|
||||
|
||||
for await (const event of stream) {
|
||||
switch (event.type) {
|
||||
case 'message_start': {
|
||||
messageId = event.message.id ?? messageId;
|
||||
model = event.message.model ?? model;
|
||||
cachedTokens =
|
||||
event.message.usage?.cache_read_input_tokens ?? cachedTokens;
|
||||
promptTokens = event.message.usage?.input_tokens ?? promptTokens;
|
||||
break;
|
||||
}
|
||||
case 'content_block_start': {
|
||||
const index = event.index ?? 0;
|
||||
const type = String(event.content_block.type || 'text');
|
||||
const initialInput =
|
||||
type === 'tool_use' && 'input' in event.content_block
|
||||
? JSON.stringify(event.content_block.input)
|
||||
: '';
|
||||
blocks.set(index, {
|
||||
type,
|
||||
id:
|
||||
'id' in event.content_block ? event.content_block.id : undefined,
|
||||
name:
|
||||
'name' in event.content_block
|
||||
? event.content_block.name
|
||||
: undefined,
|
||||
inputJson: initialInput !== '{}' ? initialInput : '',
|
||||
signature:
|
||||
type === 'thinking' &&
|
||||
'signature' in event.content_block &&
|
||||
typeof event.content_block.signature === 'string'
|
||||
? event.content_block.signature
|
||||
: '',
|
||||
});
|
||||
break;
|
||||
}
|
||||
case 'content_block_delta': {
|
||||
const index = event.index ?? 0;
|
||||
const deltaType = (event.delta as { type?: string }).type || '';
|
||||
const blockState = blocks.get(index);
|
||||
|
||||
if (deltaType === 'text_delta') {
|
||||
const text = 'text' in event.delta ? event.delta.text : '';
|
||||
if (text) {
|
||||
const chunk = this.buildGeminiChunk({ text }, messageId, model);
|
||||
collectedResponses.push(chunk);
|
||||
yield chunk;
|
||||
}
|
||||
} else if (deltaType === 'thinking_delta') {
|
||||
const thinking =
|
||||
(event.delta as { thinking?: string }).thinking || '';
|
||||
if (thinking) {
|
||||
const chunk = this.buildGeminiChunk(
|
||||
{ text: thinking, thought: true },
|
||||
messageId,
|
||||
model,
|
||||
);
|
||||
collectedResponses.push(chunk);
|
||||
yield chunk;
|
||||
}
|
||||
} else if (deltaType === 'signature_delta' && blockState) {
|
||||
const signature =
|
||||
(event.delta as { signature?: string }).signature || '';
|
||||
if (signature) {
|
||||
blockState.signature += signature;
|
||||
const chunk = this.buildGeminiChunk(
|
||||
{ thought: true, thoughtSignature: signature },
|
||||
messageId,
|
||||
model,
|
||||
);
|
||||
collectedResponses.push(chunk);
|
||||
yield chunk;
|
||||
}
|
||||
} else if (deltaType === 'input_json_delta' && blockState) {
|
||||
const jsonDelta =
|
||||
(event.delta as { partial_json?: string }).partial_json || '';
|
||||
if (jsonDelta) {
|
||||
blockState.inputJson += jsonDelta;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 'content_block_stop': {
|
||||
const index = event.index ?? 0;
|
||||
const blockState = blocks.get(index);
|
||||
if (blockState?.type === 'tool_use') {
|
||||
const args = safeJsonParse(blockState.inputJson || '{}', {});
|
||||
const chunk = this.buildGeminiChunk(
|
||||
{
|
||||
functionCall: {
|
||||
id: blockState.id,
|
||||
name: blockState.name,
|
||||
args,
|
||||
},
|
||||
},
|
||||
messageId,
|
||||
model,
|
||||
);
|
||||
collectedResponses.push(chunk);
|
||||
yield chunk;
|
||||
}
|
||||
blocks.delete(index);
|
||||
break;
|
||||
}
|
||||
case 'message_delta': {
|
||||
const stopReasonValue = event.delta.stop_reason;
|
||||
if (stopReasonValue) {
|
||||
finishReason = stopReasonValue;
|
||||
}
|
||||
|
||||
// Some Anthropic-compatible providers may include additional usage fields
|
||||
// (e.g. `input_tokens`, `cache_read_input_tokens`) even though the official
|
||||
// Anthropic SDK types only expose `output_tokens` here.
|
||||
const usageUnknown = event.usage as unknown;
|
||||
const usageRecord =
|
||||
usageUnknown && typeof usageUnknown === 'object'
|
||||
? (usageUnknown as Record<string, unknown>)
|
||||
: undefined;
|
||||
|
||||
if (event.usage?.output_tokens !== undefined) {
|
||||
completionTokens = event.usage.output_tokens;
|
||||
}
|
||||
if (usageRecord?.['input_tokens'] !== undefined) {
|
||||
const inputTokens = usageRecord['input_tokens'];
|
||||
if (typeof inputTokens === 'number') {
|
||||
promptTokens = inputTokens;
|
||||
}
|
||||
}
|
||||
if (usageRecord?.['cache_read_input_tokens'] !== undefined) {
|
||||
const cacheRead = usageRecord['cache_read_input_tokens'];
|
||||
if (typeof cacheRead === 'number') {
|
||||
cachedTokens = cacheRead;
|
||||
}
|
||||
}
|
||||
|
||||
if (finishReason || event.usage) {
|
||||
const chunk = this.buildGeminiChunk(
|
||||
undefined,
|
||||
messageId,
|
||||
model,
|
||||
finishReason,
|
||||
{
|
||||
cachedContentTokenCount: cachedTokens,
|
||||
promptTokenCount: cachedTokens + promptTokens,
|
||||
candidatesTokenCount: completionTokens,
|
||||
totalTokenCount: cachedTokens + promptTokens + completionTokens,
|
||||
},
|
||||
);
|
||||
collectedResponses.push(chunk);
|
||||
yield chunk;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 'message_stop': {
|
||||
if (promptTokens || completionTokens) {
|
||||
const chunk = this.buildGeminiChunk(
|
||||
undefined,
|
||||
messageId,
|
||||
model,
|
||||
finishReason,
|
||||
{
|
||||
cachedContentTokenCount: cachedTokens,
|
||||
promptTokenCount: cachedTokens + promptTokens,
|
||||
candidatesTokenCount: completionTokens,
|
||||
totalTokenCount: cachedTokens + promptTokens + completionTokens,
|
||||
},
|
||||
);
|
||||
collectedResponses.push(chunk);
|
||||
yield chunk;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private buildGeminiChunk(
|
||||
part?: {
|
||||
text?: string;
|
||||
thought?: boolean;
|
||||
thoughtSignature?: string;
|
||||
functionCall?: unknown;
|
||||
},
|
||||
responseId?: string,
|
||||
model?: string,
|
||||
finishReason?: string,
|
||||
usageMetadata?: GenerateContentResponseUsageMetadata,
|
||||
): GenerateContentResponse {
|
||||
const response = new GenerateContentResponse();
|
||||
response.responseId = responseId;
|
||||
response.createTime = Date.now().toString();
|
||||
response.modelVersion = model || this.contentGeneratorConfig.model;
|
||||
response.promptFeedback = { safetyRatings: [] };
|
||||
|
||||
const candidateParts = part ? [part as unknown as Part] : [];
|
||||
const mappedFinishReason =
|
||||
finishReason !== undefined
|
||||
? this.converter.mapAnthropicFinishReasonToGemini(finishReason)
|
||||
: undefined;
|
||||
response.candidates = [
|
||||
{
|
||||
content: {
|
||||
parts: candidateParts,
|
||||
role: 'model' as const,
|
||||
},
|
||||
index: 0,
|
||||
safetyRatings: [],
|
||||
...(mappedFinishReason ? { finishReason: mappedFinishReason } : {}),
|
||||
},
|
||||
];
|
||||
|
||||
if (usageMetadata) {
|
||||
response.usageMetadata = usageMetadata;
|
||||
}
|
||||
|
||||
return response;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,377 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
import type { CallableTool, Content, Tool } from '@google/genai';
|
||||
import { FinishReason } from '@google/genai';
|
||||
import type Anthropic from '@anthropic-ai/sdk';
|
||||
|
||||
// Mock schema conversion so we can force edge-cases (e.g. missing `type`).
|
||||
vi.mock('../../utils/schemaConverter.js', () => ({
|
||||
convertSchema: vi.fn((schema: unknown) => schema),
|
||||
}));
|
||||
|
||||
import { convertSchema } from '../../utils/schemaConverter.js';
|
||||
import { AnthropicContentConverter } from './converter.js';
|
||||
|
||||
describe('AnthropicContentConverter', () => {
|
||||
let converter: AnthropicContentConverter;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
converter = new AnthropicContentConverter('test-model', 'auto');
|
||||
});
|
||||
|
||||
describe('convertGeminiRequestToAnthropic', () => {
|
||||
it('extracts systemInstruction text from string', () => {
|
||||
const { system } = converter.convertGeminiRequestToAnthropic({
|
||||
model: 'models/test',
|
||||
contents: 'hi',
|
||||
config: { systemInstruction: 'sys' },
|
||||
});
|
||||
|
||||
expect(system).toBe('sys');
|
||||
});
|
||||
|
||||
it('extracts systemInstruction text from parts and joins with newlines', () => {
|
||||
const { system } = converter.convertGeminiRequestToAnthropic({
|
||||
model: 'models/test',
|
||||
contents: 'hi',
|
||||
config: {
|
||||
systemInstruction: {
|
||||
role: 'system',
|
||||
parts: [{ text: 'a' }, { text: 'b' }],
|
||||
} as unknown as Content,
|
||||
},
|
||||
});
|
||||
|
||||
expect(system).toBe('a\nb');
|
||||
});
|
||||
|
||||
it('converts a plain string content into a user message', () => {
|
||||
const { messages } = converter.convertGeminiRequestToAnthropic({
|
||||
model: 'models/test',
|
||||
contents: 'Hello',
|
||||
});
|
||||
|
||||
expect(messages).toEqual([
|
||||
{ role: 'user', content: [{ type: 'text', text: 'Hello' }] },
|
||||
]);
|
||||
});
|
||||
|
||||
it('converts user content parts into a user message with text blocks', () => {
|
||||
const { messages } = converter.convertGeminiRequestToAnthropic({
|
||||
model: 'models/test',
|
||||
contents: [
|
||||
{
|
||||
role: 'user',
|
||||
parts: [{ text: 'Hello' }, { text: 'World' }],
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
expect(messages).toEqual([
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'Hello' },
|
||||
{ type: 'text', text: 'World' },
|
||||
],
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it('converts assistant thought parts into Anthropic thinking blocks', () => {
|
||||
const { messages } = converter.convertGeminiRequestToAnthropic({
|
||||
model: 'models/test',
|
||||
contents: [
|
||||
{
|
||||
role: 'model',
|
||||
parts: [
|
||||
{ text: 'internal', thought: true, thoughtSignature: 'sig' },
|
||||
{ text: 'visible' },
|
||||
],
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
expect(messages).toEqual([
|
||||
{
|
||||
role: 'assistant',
|
||||
content: [
|
||||
{ type: 'thinking', thinking: 'internal', signature: 'sig' },
|
||||
{ type: 'text', text: 'visible' },
|
||||
],
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it('converts functionCall parts from model role into tool_use blocks', () => {
|
||||
const { messages } = converter.convertGeminiRequestToAnthropic({
|
||||
model: 'models/test',
|
||||
contents: [
|
||||
{
|
||||
role: 'model',
|
||||
parts: [
|
||||
{ text: 'preface' },
|
||||
{
|
||||
functionCall: {
|
||||
id: 'call-1',
|
||||
name: 'tool_name',
|
||||
args: { a: 1 },
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
expect(messages).toEqual([
|
||||
{
|
||||
role: 'assistant',
|
||||
content: [
|
||||
{ type: 'text', text: 'preface' },
|
||||
{
|
||||
type: 'tool_use',
|
||||
id: 'call-1',
|
||||
name: 'tool_name',
|
||||
input: { a: 1 },
|
||||
},
|
||||
],
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it('converts functionResponse parts into user tool_result messages', () => {
|
||||
const { messages } = converter.convertGeminiRequestToAnthropic({
|
||||
model: 'models/test',
|
||||
contents: [
|
||||
{
|
||||
role: 'user',
|
||||
parts: [
|
||||
{
|
||||
functionResponse: {
|
||||
id: 'call-1',
|
||||
name: 'tool_name',
|
||||
response: { output: 'ok' },
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
expect(messages).toEqual([
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{
|
||||
type: 'tool_result',
|
||||
tool_use_id: 'call-1',
|
||||
content: 'ok',
|
||||
},
|
||||
],
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it('extracts function response error field when present', () => {
|
||||
const { messages } = converter.convertGeminiRequestToAnthropic({
|
||||
model: 'models/test',
|
||||
contents: [
|
||||
{
|
||||
role: 'user',
|
||||
parts: [
|
||||
{
|
||||
functionResponse: {
|
||||
id: 'call-1',
|
||||
name: 'tool_name',
|
||||
response: { error: 'boom' },
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
expect(messages[0]).toEqual({
|
||||
role: 'user',
|
||||
content: [
|
||||
{
|
||||
type: 'tool_result',
|
||||
tool_use_id: 'call-1',
|
||||
content: 'boom',
|
||||
},
|
||||
],
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('convertGeminiToolsToAnthropic', () => {
|
||||
it('converts Tool.functionDeclarations to Anthropic tools and runs schema conversion', async () => {
|
||||
const tools = [
|
||||
{
|
||||
functionDeclarations: [
|
||||
{
|
||||
name: 'get_weather',
|
||||
description: 'Get weather',
|
||||
parametersJsonSchema: {
|
||||
type: 'object',
|
||||
properties: { location: { type: 'string' } },
|
||||
required: ['location'],
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
] as Tool[];
|
||||
|
||||
const result = await converter.convertGeminiToolsToAnthropic(tools);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0]).toEqual({
|
||||
name: 'get_weather',
|
||||
description: 'Get weather',
|
||||
input_schema: {
|
||||
type: 'object',
|
||||
properties: { location: { type: 'string' } },
|
||||
required: ['location'],
|
||||
},
|
||||
});
|
||||
|
||||
expect(vi.mocked(convertSchema)).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('resolves CallableTool.tool() and converts its functionDeclarations', async () => {
|
||||
const callable = [
|
||||
{
|
||||
tool: async () =>
|
||||
({
|
||||
functionDeclarations: [
|
||||
{
|
||||
name: 'dynamic_tool',
|
||||
description: 'resolved tool',
|
||||
parametersJsonSchema: { type: 'object', properties: {} },
|
||||
},
|
||||
],
|
||||
}) as unknown as Tool,
|
||||
},
|
||||
] as CallableTool[];
|
||||
|
||||
const result = await converter.convertGeminiToolsToAnthropic(callable);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].name).toBe('dynamic_tool');
|
||||
});
|
||||
|
||||
it('defaults missing parameters to an empty object schema', async () => {
|
||||
const tools = [
|
||||
{
|
||||
functionDeclarations: [
|
||||
{ name: 'no_params', description: 'no params' },
|
||||
],
|
||||
},
|
||||
] as Tool[];
|
||||
|
||||
const result = await converter.convertGeminiToolsToAnthropic(tools);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0]).toEqual({
|
||||
name: 'no_params',
|
||||
description: 'no params',
|
||||
input_schema: { type: 'object', properties: {} },
|
||||
});
|
||||
});
|
||||
|
||||
it('forces input_schema.type to "object" when schema conversion yields no type', async () => {
|
||||
vi.mocked(convertSchema).mockImplementationOnce(() => ({
|
||||
properties: {},
|
||||
}));
|
||||
const tools = [
|
||||
{
|
||||
functionDeclarations: [
|
||||
{
|
||||
name: 'edge',
|
||||
description: 'edge',
|
||||
parametersJsonSchema: { type: 'object', properties: {} },
|
||||
},
|
||||
],
|
||||
},
|
||||
] as Tool[];
|
||||
|
||||
const result = await converter.convertGeminiToolsToAnthropic(tools);
|
||||
expect(result[0]?.input_schema?.type).toBe('object');
|
||||
});
|
||||
});
|
||||
|
||||
describe('convertAnthropicResponseToGemini', () => {
|
||||
it('converts text, tool_use, thinking, and redacted_thinking blocks', () => {
|
||||
const response = converter.convertAnthropicResponseToGemini({
|
||||
id: 'msg-1',
|
||||
model: 'claude-test',
|
||||
stop_reason: 'end_turn',
|
||||
content: [
|
||||
{ type: 'thinking', thinking: 'thought', signature: 'sig' },
|
||||
{ type: 'text', text: 'hello' },
|
||||
{ type: 'tool_use', id: 't1', name: 'tool', input: { x: 1 } },
|
||||
{ type: 'redacted_thinking' },
|
||||
],
|
||||
usage: { input_tokens: 3, output_tokens: 5 },
|
||||
} as unknown as Anthropic.Message);
|
||||
|
||||
expect(response.responseId).toBe('msg-1');
|
||||
expect(response.modelVersion).toBe('claude-test');
|
||||
expect(response.candidates?.[0]?.finishReason).toBe(FinishReason.STOP);
|
||||
expect(response.usageMetadata).toEqual({
|
||||
promptTokenCount: 3,
|
||||
candidatesTokenCount: 5,
|
||||
totalTokenCount: 8,
|
||||
});
|
||||
|
||||
const parts = response.candidates?.[0]?.content?.parts || [];
|
||||
expect(parts).toEqual([
|
||||
{ text: 'thought', thought: true, thoughtSignature: 'sig' },
|
||||
{ text: 'hello' },
|
||||
{ functionCall: { id: 't1', name: 'tool', args: { x: 1 } } },
|
||||
{ text: '', thought: true },
|
||||
]);
|
||||
});
|
||||
|
||||
it('handles tool_use input that is a JSON string', () => {
|
||||
const response = converter.convertAnthropicResponseToGemini({
|
||||
id: 'msg-1',
|
||||
model: 'claude-test',
|
||||
stop_reason: null,
|
||||
content: [
|
||||
{ type: 'tool_use', id: 't1', name: 'tool', input: '{"x":1}' },
|
||||
],
|
||||
} as unknown as Anthropic.Message);
|
||||
|
||||
const parts = response.candidates?.[0]?.content?.parts || [];
|
||||
expect(parts).toEqual([
|
||||
{ functionCall: { id: 't1', name: 'tool', args: { x: 1 } } },
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('mapAnthropicFinishReasonToGemini', () => {
|
||||
it('maps known reasons', () => {
|
||||
expect(converter.mapAnthropicFinishReasonToGemini('end_turn')).toBe(
|
||||
FinishReason.STOP,
|
||||
);
|
||||
expect(converter.mapAnthropicFinishReasonToGemini('max_tokens')).toBe(
|
||||
FinishReason.MAX_TOKENS,
|
||||
);
|
||||
expect(converter.mapAnthropicFinishReasonToGemini('content_filter')).toBe(
|
||||
FinishReason.SAFETY,
|
||||
);
|
||||
});
|
||||
|
||||
it('returns undefined for null/empty', () => {
|
||||
expect(converter.mapAnthropicFinishReasonToGemini(null)).toBeUndefined();
|
||||
expect(converter.mapAnthropicFinishReasonToGemini('')).toBeUndefined();
|
||||
});
|
||||
});
|
||||
});
|
||||
448
packages/core/src/core/anthropicContentGenerator/converter.ts
Normal file
448
packages/core/src/core/anthropicContentGenerator/converter.ts
Normal file
@@ -0,0 +1,448 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type {
|
||||
Candidate,
|
||||
CallableTool,
|
||||
Content,
|
||||
ContentListUnion,
|
||||
ContentUnion,
|
||||
FunctionCall,
|
||||
FunctionResponse,
|
||||
GenerateContentParameters,
|
||||
Part,
|
||||
PartUnion,
|
||||
Tool,
|
||||
ToolListUnion,
|
||||
} from '@google/genai';
|
||||
import { FinishReason, GenerateContentResponse } from '@google/genai';
|
||||
import type Anthropic from '@anthropic-ai/sdk';
|
||||
import { safeJsonParse } from '../../utils/safeJsonParse.js';
|
||||
import {
|
||||
convertSchema,
|
||||
type SchemaComplianceMode,
|
||||
} from '../../utils/schemaConverter.js';
|
||||
|
||||
type AnthropicMessageParam = Anthropic.MessageParam;
|
||||
type AnthropicToolParam = Anthropic.Tool;
|
||||
type AnthropicContentBlockParam = Anthropic.ContentBlockParam;
|
||||
|
||||
type ThoughtPart = { text: string; signature?: string };
|
||||
|
||||
interface ParsedParts {
|
||||
thoughtParts: ThoughtPart[];
|
||||
contentParts: string[];
|
||||
functionCalls: FunctionCall[];
|
||||
functionResponses: FunctionResponse[];
|
||||
}
|
||||
|
||||
export class AnthropicContentConverter {
|
||||
private model: string;
|
||||
private schemaCompliance: SchemaComplianceMode;
|
||||
|
||||
constructor(model: string, schemaCompliance: SchemaComplianceMode = 'auto') {
|
||||
this.model = model;
|
||||
this.schemaCompliance = schemaCompliance;
|
||||
}
|
||||
|
||||
convertGeminiRequestToAnthropic(request: GenerateContentParameters): {
|
||||
system?: string;
|
||||
messages: AnthropicMessageParam[];
|
||||
} {
|
||||
const messages: AnthropicMessageParam[] = [];
|
||||
|
||||
const system = this.extractTextFromContentUnion(
|
||||
request.config?.systemInstruction,
|
||||
);
|
||||
|
||||
this.processContents(request.contents, messages);
|
||||
|
||||
return {
|
||||
system: system || undefined,
|
||||
messages,
|
||||
};
|
||||
}
|
||||
|
||||
async convertGeminiToolsToAnthropic(
|
||||
geminiTools: ToolListUnion,
|
||||
): Promise<AnthropicToolParam[]> {
|
||||
const tools: AnthropicToolParam[] = [];
|
||||
|
||||
for (const tool of geminiTools) {
|
||||
let actualTool: Tool;
|
||||
|
||||
if ('tool' in tool) {
|
||||
actualTool = await (tool as CallableTool).tool();
|
||||
} else {
|
||||
actualTool = tool as Tool;
|
||||
}
|
||||
|
||||
if (!actualTool.functionDeclarations) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (const func of actualTool.functionDeclarations) {
|
||||
if (!func.name) continue;
|
||||
|
||||
let inputSchema: Record<string, unknown> | undefined;
|
||||
if (func.parametersJsonSchema) {
|
||||
inputSchema = {
|
||||
...(func.parametersJsonSchema as Record<string, unknown>),
|
||||
};
|
||||
} else if (func.parameters) {
|
||||
inputSchema = func.parameters as Record<string, unknown>;
|
||||
}
|
||||
|
||||
if (!inputSchema) {
|
||||
inputSchema = { type: 'object', properties: {} };
|
||||
}
|
||||
|
||||
inputSchema = convertSchema(inputSchema, this.schemaCompliance);
|
||||
if (typeof inputSchema['type'] !== 'string') {
|
||||
inputSchema['type'] = 'object';
|
||||
}
|
||||
|
||||
tools.push({
|
||||
name: func.name,
|
||||
description: func.description,
|
||||
input_schema: inputSchema as Anthropic.Tool.InputSchema,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return tools;
|
||||
}
|
||||
|
||||
convertAnthropicResponseToGemini(
|
||||
response: Anthropic.Message,
|
||||
): GenerateContentResponse {
|
||||
const geminiResponse = new GenerateContentResponse();
|
||||
const parts: Part[] = [];
|
||||
|
||||
for (const block of response.content || []) {
|
||||
const blockType = String((block as { type?: string })['type'] || '');
|
||||
if (blockType === 'text') {
|
||||
const text =
|
||||
typeof (block as { text?: string }).text === 'string'
|
||||
? (block as { text?: string }).text
|
||||
: '';
|
||||
if (text) {
|
||||
parts.push({ text });
|
||||
}
|
||||
} else if (blockType === 'tool_use') {
|
||||
const toolUse = block as {
|
||||
id?: string;
|
||||
name?: string;
|
||||
input?: unknown;
|
||||
};
|
||||
parts.push({
|
||||
functionCall: {
|
||||
id: typeof toolUse.id === 'string' ? toolUse.id : undefined,
|
||||
name: typeof toolUse.name === 'string' ? toolUse.name : undefined,
|
||||
args: this.safeInputToArgs(toolUse.input),
|
||||
},
|
||||
});
|
||||
} else if (blockType === 'thinking') {
|
||||
const thinking =
|
||||
typeof (block as { thinking?: string }).thinking === 'string'
|
||||
? (block as { thinking?: string }).thinking
|
||||
: '';
|
||||
const signature =
|
||||
typeof (block as { signature?: string }).signature === 'string'
|
||||
? (block as { signature?: string }).signature
|
||||
: '';
|
||||
if (thinking || signature) {
|
||||
const thoughtPart: Part = {
|
||||
text: thinking,
|
||||
thought: true,
|
||||
thoughtSignature: signature,
|
||||
};
|
||||
parts.push(thoughtPart);
|
||||
}
|
||||
} else if (blockType === 'redacted_thinking') {
|
||||
parts.push({ text: '', thought: true });
|
||||
}
|
||||
}
|
||||
|
||||
const candidate: Candidate = {
|
||||
content: {
|
||||
parts,
|
||||
role: 'model' as const,
|
||||
},
|
||||
index: 0,
|
||||
safetyRatings: [],
|
||||
};
|
||||
|
||||
const finishReason = this.mapAnthropicFinishReasonToGemini(
|
||||
response.stop_reason,
|
||||
);
|
||||
if (finishReason) {
|
||||
candidate.finishReason = finishReason;
|
||||
}
|
||||
|
||||
geminiResponse.candidates = [candidate];
|
||||
geminiResponse.responseId = response.id;
|
||||
geminiResponse.createTime = Date.now().toString();
|
||||
geminiResponse.modelVersion = response.model || this.model;
|
||||
geminiResponse.promptFeedback = { safetyRatings: [] };
|
||||
|
||||
if (response.usage) {
|
||||
const promptTokens = response.usage.input_tokens || 0;
|
||||
const completionTokens = response.usage.output_tokens || 0;
|
||||
geminiResponse.usageMetadata = {
|
||||
promptTokenCount: promptTokens,
|
||||
candidatesTokenCount: completionTokens,
|
||||
totalTokenCount: promptTokens + completionTokens,
|
||||
};
|
||||
}
|
||||
|
||||
return geminiResponse;
|
||||
}
|
||||
|
||||
private processContents(
|
||||
contents: ContentListUnion,
|
||||
messages: AnthropicMessageParam[],
|
||||
): void {
|
||||
if (Array.isArray(contents)) {
|
||||
for (const content of contents) {
|
||||
this.processContent(content, messages);
|
||||
}
|
||||
} else if (contents) {
|
||||
this.processContent(contents, messages);
|
||||
}
|
||||
}
|
||||
|
||||
private processContent(
|
||||
content: ContentUnion | PartUnion,
|
||||
messages: AnthropicMessageParam[],
|
||||
): void {
|
||||
if (typeof content === 'string') {
|
||||
messages.push({
|
||||
role: 'user',
|
||||
content: [{ type: 'text', text: content }],
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
if (!this.isContentObject(content)) return;
|
||||
|
||||
const parsed = this.parseParts(content.parts || []);
|
||||
|
||||
if (parsed.functionResponses.length > 0) {
|
||||
for (const response of parsed.functionResponses) {
|
||||
messages.push({
|
||||
role: 'user',
|
||||
content: [
|
||||
{
|
||||
type: 'tool_result',
|
||||
tool_use_id: response.id || '',
|
||||
content: this.extractFunctionResponseContent(response.response),
|
||||
},
|
||||
],
|
||||
});
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (content.role === 'model' && parsed.functionCalls.length > 0) {
|
||||
const thinkingBlocks: AnthropicContentBlockParam[] =
|
||||
parsed.thoughtParts.map((part) => {
|
||||
const thinkingBlock: unknown = {
|
||||
type: 'thinking',
|
||||
thinking: part.text,
|
||||
};
|
||||
if (part.signature) {
|
||||
(thinkingBlock as { signature?: string }).signature =
|
||||
part.signature;
|
||||
}
|
||||
return thinkingBlock as AnthropicContentBlockParam;
|
||||
});
|
||||
const toolUses: AnthropicContentBlockParam[] = parsed.functionCalls.map(
|
||||
(call, index) => ({
|
||||
type: 'tool_use',
|
||||
id: call.id || `tool_${index}`,
|
||||
name: call.name || '',
|
||||
input: (call.args as Record<string, unknown>) || {},
|
||||
}),
|
||||
);
|
||||
|
||||
const textBlocks: AnthropicContentBlockParam[] = parsed.contentParts.map(
|
||||
(text) => ({
|
||||
type: 'text' as const,
|
||||
text,
|
||||
}),
|
||||
);
|
||||
|
||||
messages.push({
|
||||
role: 'assistant',
|
||||
content: [...thinkingBlocks, ...textBlocks, ...toolUses],
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const role = content.role === 'model' ? 'assistant' : 'user';
|
||||
const thinkingBlocks: AnthropicContentBlockParam[] =
|
||||
role === 'assistant'
|
||||
? parsed.thoughtParts.map((part) => {
|
||||
const thinkingBlock: unknown = {
|
||||
type: 'thinking',
|
||||
thinking: part.text,
|
||||
};
|
||||
if (part.signature) {
|
||||
(thinkingBlock as { signature?: string }).signature =
|
||||
part.signature;
|
||||
}
|
||||
return thinkingBlock as AnthropicContentBlockParam;
|
||||
})
|
||||
: [];
|
||||
const textBlocks: AnthropicContentBlockParam[] = [
|
||||
...thinkingBlocks,
|
||||
...parsed.contentParts.map((text) => ({
|
||||
type: 'text' as const,
|
||||
text,
|
||||
})),
|
||||
];
|
||||
if (textBlocks.length > 0) {
|
||||
messages.push({ role, content: textBlocks });
|
||||
}
|
||||
}
|
||||
|
||||
private parseParts(parts: Part[]): ParsedParts {
|
||||
const thoughtParts: ThoughtPart[] = [];
|
||||
const contentParts: string[] = [];
|
||||
const functionCalls: FunctionCall[] = [];
|
||||
const functionResponses: FunctionResponse[] = [];
|
||||
|
||||
for (const part of parts) {
|
||||
if (typeof part === 'string') {
|
||||
contentParts.push(part);
|
||||
} else if (
|
||||
'text' in part &&
|
||||
part.text &&
|
||||
!('thought' in part && part.thought)
|
||||
) {
|
||||
contentParts.push(part.text);
|
||||
} else if ('text' in part && 'thought' in part && part.thought) {
|
||||
thoughtParts.push({
|
||||
text: part.text || '',
|
||||
signature:
|
||||
'thoughtSignature' in part &&
|
||||
typeof part.thoughtSignature === 'string'
|
||||
? part.thoughtSignature
|
||||
: undefined,
|
||||
});
|
||||
} else if ('functionCall' in part && part.functionCall) {
|
||||
functionCalls.push(part.functionCall);
|
||||
} else if ('functionResponse' in part && part.functionResponse) {
|
||||
functionResponses.push(part.functionResponse);
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
thoughtParts,
|
||||
contentParts,
|
||||
functionCalls,
|
||||
functionResponses,
|
||||
};
|
||||
}
|
||||
|
||||
private extractTextFromContentUnion(contentUnion: unknown): string {
|
||||
if (typeof contentUnion === 'string') {
|
||||
return contentUnion;
|
||||
}
|
||||
|
||||
if (Array.isArray(contentUnion)) {
|
||||
return contentUnion
|
||||
.map((item) => this.extractTextFromContentUnion(item))
|
||||
.filter(Boolean)
|
||||
.join('\n');
|
||||
}
|
||||
|
||||
if (typeof contentUnion === 'object' && contentUnion !== null) {
|
||||
if ('parts' in contentUnion) {
|
||||
const content = contentUnion as Content;
|
||||
return (
|
||||
content.parts
|
||||
?.map((part: Part) => {
|
||||
if (typeof part === 'string') return part;
|
||||
if ('text' in part) return part.text || '';
|
||||
return '';
|
||||
})
|
||||
.filter(Boolean)
|
||||
.join('\n') || ''
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
return '';
|
||||
}
|
||||
|
||||
private extractFunctionResponseContent(response: unknown): string {
|
||||
if (response === null || response === undefined) {
|
||||
return '';
|
||||
}
|
||||
|
||||
if (typeof response === 'string') {
|
||||
return response;
|
||||
}
|
||||
|
||||
if (typeof response === 'object') {
|
||||
const responseObject = response as Record<string, unknown>;
|
||||
const output = responseObject['output'];
|
||||
if (typeof output === 'string') {
|
||||
return output;
|
||||
}
|
||||
|
||||
const error = responseObject['error'];
|
||||
if (typeof error === 'string') {
|
||||
return error;
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const serialized = JSON.stringify(response);
|
||||
return serialized ?? String(response);
|
||||
} catch {
|
||||
return String(response);
|
||||
}
|
||||
}
|
||||
|
||||
private safeInputToArgs(input: unknown): Record<string, unknown> {
|
||||
if (input && typeof input === 'object') {
|
||||
return input as Record<string, unknown>;
|
||||
}
|
||||
if (typeof input === 'string') {
|
||||
return safeJsonParse(input, {});
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
mapAnthropicFinishReasonToGemini(
|
||||
reason?: string | null,
|
||||
): FinishReason | undefined {
|
||||
if (!reason) return undefined;
|
||||
const mapping: Record<string, FinishReason> = {
|
||||
end_turn: FinishReason.STOP,
|
||||
stop_sequence: FinishReason.STOP,
|
||||
tool_use: FinishReason.STOP,
|
||||
max_tokens: FinishReason.MAX_TOKENS,
|
||||
content_filter: FinishReason.SAFETY,
|
||||
};
|
||||
return mapping[reason] || FinishReason.FINISH_REASON_UNSPECIFIED;
|
||||
}
|
||||
|
||||
private isContentObject(
|
||||
content: unknown,
|
||||
): content is { role: string; parts: Part[] } {
|
||||
return (
|
||||
typeof content === 'object' &&
|
||||
content !== null &&
|
||||
'role' in content &&
|
||||
'parts' in content &&
|
||||
Array.isArray((content as Record<string, unknown>)['parts'])
|
||||
);
|
||||
}
|
||||
}
|
||||
21
packages/core/src/core/anthropicContentGenerator/index.ts
Normal file
21
packages/core/src/core/anthropicContentGenerator/index.ts
Normal file
@@ -0,0 +1,21 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type {
|
||||
ContentGenerator,
|
||||
ContentGeneratorConfig,
|
||||
} from '../contentGenerator.js';
|
||||
import type { Config } from '../../config/config.js';
|
||||
import { AnthropicContentGenerator } from './anthropicContentGenerator.js';
|
||||
|
||||
export { AnthropicContentGenerator } from './anthropicContentGenerator.js';
|
||||
|
||||
export function createAnthropicContentGenerator(
|
||||
contentGeneratorConfig: ContentGeneratorConfig,
|
||||
cliConfig: Config,
|
||||
): ContentGenerator {
|
||||
return new AnthropicContentGenerator(contentGeneratorConfig, cliConfig);
|
||||
}
|
||||
@@ -8,7 +8,7 @@ import { describe, it, expect, vi } from 'vitest';
|
||||
import { createContentGenerator, AuthType } from './contentGenerator.js';
|
||||
import { GoogleGenAI } from '@google/genai';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { LoggingContentGenerator } from './geminiContentGenerator/loggingContentGenerator.js';
|
||||
import { LoggingContentGenerator } from './loggingContentGenerator/index.js';
|
||||
|
||||
vi.mock('@google/genai');
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ import type {
|
||||
} from '@google/genai';
|
||||
import { DEFAULT_QWEN_MODEL } from '../config/models.js';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { LoggingContentGenerator } from './loggingContentGenerator/index.js';
|
||||
|
||||
/**
|
||||
* Interface abstracting the core functionalities for generating content and counting tokens.
|
||||
@@ -37,10 +38,11 @@ export interface ContentGenerator {
|
||||
}
|
||||
|
||||
export enum AuthType {
|
||||
USE_GEMINI = 'gemini-api-key',
|
||||
USE_VERTEX_AI = 'vertex-ai',
|
||||
USE_OPENAI = 'openai',
|
||||
QWEN_OAUTH = 'qwen-oauth',
|
||||
USE_GEMINI = 'gemini',
|
||||
USE_VERTEX_AI = 'vertex-ai',
|
||||
USE_ANTHROPIC = 'anthropic',
|
||||
}
|
||||
|
||||
export type ContentGeneratorConfig = {
|
||||
@@ -63,9 +65,12 @@ export type ContentGeneratorConfig = {
|
||||
temperature?: number;
|
||||
max_tokens?: number;
|
||||
};
|
||||
reasoning?: {
|
||||
effort?: 'low' | 'medium' | 'high';
|
||||
};
|
||||
reasoning?:
|
||||
| false
|
||||
| {
|
||||
effort?: 'low' | 'medium' | 'high';
|
||||
budget_tokens?: number;
|
||||
};
|
||||
proxy?: string | undefined;
|
||||
userAgent?: string;
|
||||
// Schema compliance mode for tool definitions
|
||||
@@ -77,7 +82,7 @@ export function createContentGeneratorConfig(
|
||||
authType: AuthType | undefined,
|
||||
generationConfig?: Partial<ContentGeneratorConfig>,
|
||||
): ContentGeneratorConfig {
|
||||
const newContentGeneratorConfig: Partial<ContentGeneratorConfig> = {
|
||||
let newContentGeneratorConfig: Partial<ContentGeneratorConfig> = {
|
||||
...(generationConfig || {}),
|
||||
authType,
|
||||
proxy: config?.getProxy(),
|
||||
@@ -94,8 +99,16 @@ export function createContentGeneratorConfig(
|
||||
}
|
||||
|
||||
if (authType === AuthType.USE_OPENAI) {
|
||||
newContentGeneratorConfig = {
|
||||
...newContentGeneratorConfig,
|
||||
apiKey: newContentGeneratorConfig.apiKey || process.env['OPENAI_API_KEY'],
|
||||
baseUrl:
|
||||
newContentGeneratorConfig.baseUrl || process.env['OPENAI_BASE_URL'],
|
||||
model: newContentGeneratorConfig.model || process.env['OPENAI_MODEL'],
|
||||
};
|
||||
|
||||
if (!newContentGeneratorConfig.apiKey) {
|
||||
throw new Error('OpenAI API key is required');
|
||||
throw new Error('OPENAI_API_KEY environment variable not found.');
|
||||
}
|
||||
|
||||
return {
|
||||
@@ -104,10 +117,62 @@ export function createContentGeneratorConfig(
|
||||
} as ContentGeneratorConfig;
|
||||
}
|
||||
|
||||
return {
|
||||
...newContentGeneratorConfig,
|
||||
model: newContentGeneratorConfig?.model || DEFAULT_QWEN_MODEL,
|
||||
} as ContentGeneratorConfig;
|
||||
if (authType === AuthType.USE_ANTHROPIC) {
|
||||
newContentGeneratorConfig = {
|
||||
...newContentGeneratorConfig,
|
||||
apiKey:
|
||||
newContentGeneratorConfig.apiKey || process.env['ANTHROPIC_API_KEY'],
|
||||
baseUrl:
|
||||
newContentGeneratorConfig.baseUrl || process.env['ANTHROPIC_BASE_URL'],
|
||||
model: newContentGeneratorConfig.model || process.env['ANTHROPIC_MODEL'],
|
||||
};
|
||||
|
||||
if (!newContentGeneratorConfig.apiKey) {
|
||||
throw new Error('ANTHROPIC_API_KEY environment variable not found.');
|
||||
}
|
||||
|
||||
if (!newContentGeneratorConfig.baseUrl) {
|
||||
throw new Error('ANTHROPIC_BASE_URL environment variable not found.');
|
||||
}
|
||||
|
||||
if (!newContentGeneratorConfig.model) {
|
||||
throw new Error('ANTHROPIC_MODEL environment variable not found.');
|
||||
}
|
||||
}
|
||||
|
||||
if (authType === AuthType.USE_GEMINI) {
|
||||
newContentGeneratorConfig = {
|
||||
...newContentGeneratorConfig,
|
||||
apiKey: newContentGeneratorConfig.apiKey || process.env['GEMINI_API_KEY'],
|
||||
model: newContentGeneratorConfig.model || process.env['GEMINI_MODEL'],
|
||||
};
|
||||
|
||||
if (!newContentGeneratorConfig.apiKey) {
|
||||
throw new Error('GEMINI_API_KEY environment variable not found.');
|
||||
}
|
||||
|
||||
if (!newContentGeneratorConfig.model) {
|
||||
throw new Error('GEMINI_MODEL environment variable not found.');
|
||||
}
|
||||
}
|
||||
|
||||
if (authType === AuthType.USE_VERTEX_AI) {
|
||||
newContentGeneratorConfig = {
|
||||
...newContentGeneratorConfig,
|
||||
apiKey: newContentGeneratorConfig.apiKey || process.env['GOOGLE_API_KEY'],
|
||||
model: newContentGeneratorConfig.model || process.env['GOOGLE_MODEL'],
|
||||
};
|
||||
|
||||
if (!newContentGeneratorConfig.apiKey) {
|
||||
throw new Error('GOOGLE_API_KEY environment variable not found.');
|
||||
}
|
||||
|
||||
if (!newContentGeneratorConfig.model) {
|
||||
throw new Error('GOOGLE_MODEL environment variable not found.');
|
||||
}
|
||||
}
|
||||
|
||||
return newContentGeneratorConfig as ContentGeneratorConfig;
|
||||
}
|
||||
|
||||
export async function createContentGenerator(
|
||||
@@ -115,19 +180,9 @@ export async function createContentGenerator(
|
||||
gcConfig: Config,
|
||||
isInitialAuth?: boolean,
|
||||
): Promise<ContentGenerator> {
|
||||
if (
|
||||
config.authType === AuthType.USE_GEMINI ||
|
||||
config.authType === AuthType.USE_VERTEX_AI
|
||||
) {
|
||||
const { createGeminiContentGenerator } = await import(
|
||||
'./geminiContentGenerator/index.js'
|
||||
);
|
||||
return createGeminiContentGenerator(config, gcConfig);
|
||||
}
|
||||
|
||||
if (config.authType === AuthType.USE_OPENAI) {
|
||||
if (!config.apiKey) {
|
||||
throw new Error('OpenAI API key is required');
|
||||
throw new Error('OPENAI_API_KEY environment variable not found.');
|
||||
}
|
||||
|
||||
// Import OpenAIContentGenerator dynamically to avoid circular dependencies
|
||||
@@ -136,7 +191,8 @@ export async function createContentGenerator(
|
||||
);
|
||||
|
||||
// Always use OpenAIContentGenerator, logging is controlled by enableOpenAILogging flag
|
||||
return createOpenAIContentGenerator(config, gcConfig);
|
||||
const generator = createOpenAIContentGenerator(config, gcConfig);
|
||||
return new LoggingContentGenerator(generator, gcConfig);
|
||||
}
|
||||
|
||||
if (config.authType === AuthType.QWEN_OAUTH) {
|
||||
@@ -157,7 +213,8 @@ export async function createContentGenerator(
|
||||
);
|
||||
|
||||
// Create the content generator with dynamic token management
|
||||
return new QwenContentGenerator(qwenClient, config, gcConfig);
|
||||
const generator = new QwenContentGenerator(qwenClient, config, gcConfig);
|
||||
return new LoggingContentGenerator(generator, gcConfig);
|
||||
} catch (error) {
|
||||
throw new Error(
|
||||
`${error instanceof Error ? error.message : String(error)}`,
|
||||
@@ -165,6 +222,30 @@ export async function createContentGenerator(
|
||||
}
|
||||
}
|
||||
|
||||
if (config.authType === AuthType.USE_ANTHROPIC) {
|
||||
if (!config.apiKey) {
|
||||
throw new Error('ANTHROPIC_API_KEY environment variable not found.');
|
||||
}
|
||||
|
||||
const { createAnthropicContentGenerator } = await import(
|
||||
'./anthropicContentGenerator/index.js'
|
||||
);
|
||||
|
||||
const generator = createAnthropicContentGenerator(config, gcConfig);
|
||||
return new LoggingContentGenerator(generator, gcConfig);
|
||||
}
|
||||
|
||||
if (
|
||||
config.authType === AuthType.USE_GEMINI ||
|
||||
config.authType === AuthType.USE_VERTEX_AI
|
||||
) {
|
||||
const { createGeminiContentGenerator } = await import(
|
||||
'./geminiContentGenerator/index.js'
|
||||
);
|
||||
const generator = createGeminiContentGenerator(config, gcConfig);
|
||||
return new LoggingContentGenerator(generator, gcConfig);
|
||||
}
|
||||
|
||||
throw new Error(
|
||||
`Error creating contentGenerator: Unsupported authType: ${config.authType}`,
|
||||
);
|
||||
|
||||
@@ -720,66 +720,6 @@ describe('GeminiChat', () => {
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle summarized thinking by conditionally including thoughts in history', async () => {
|
||||
// Case 1: useSummarizedThinking is true -> thoughts NOT in history
|
||||
vi.mocked(mockContentGenerator.useSummarizedThinking).mockReturnValue(
|
||||
true,
|
||||
);
|
||||
const stream1 = (async function* () {
|
||||
yield {
|
||||
candidates: [
|
||||
{
|
||||
content: {
|
||||
role: 'model',
|
||||
parts: [{ thought: true, text: 'T1' }, { text: 'A1' }],
|
||||
},
|
||||
finishReason: 'STOP',
|
||||
},
|
||||
],
|
||||
} as unknown as GenerateContentResponse;
|
||||
})();
|
||||
vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue(
|
||||
stream1,
|
||||
);
|
||||
|
||||
const res1 = await chat.sendMessageStream('m1', { message: 'h1' }, 'p1');
|
||||
for await (const _ of res1);
|
||||
|
||||
const history1 = chat.getHistory();
|
||||
expect(history1[1].parts).toEqual([{ text: 'A1' }]);
|
||||
|
||||
// Case 2: useSummarizedThinking is false -> thoughts ARE in history
|
||||
chat.clearHistory();
|
||||
vi.mocked(mockContentGenerator.useSummarizedThinking).mockReturnValue(
|
||||
false,
|
||||
);
|
||||
const stream2 = (async function* () {
|
||||
yield {
|
||||
candidates: [
|
||||
{
|
||||
content: {
|
||||
role: 'model',
|
||||
parts: [{ thought: true, text: 'T2' }, { text: 'A2' }],
|
||||
},
|
||||
finishReason: 'STOP',
|
||||
},
|
||||
],
|
||||
} as unknown as GenerateContentResponse;
|
||||
})();
|
||||
vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue(
|
||||
stream2,
|
||||
);
|
||||
|
||||
const res2 = await chat.sendMessageStream('m1', { message: 'h1' }, 'p2');
|
||||
for await (const _ of res2);
|
||||
|
||||
const history2 = chat.getHistory();
|
||||
expect(history2[1].parts).toEqual([
|
||||
{ text: 'T2', thought: true },
|
||||
{ text: 'A2' },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should keep parts with thoughtSignature when consolidating history', async () => {
|
||||
const stream = (async function* () {
|
||||
yield {
|
||||
|
||||
@@ -559,14 +559,25 @@ export class GeminiChat {
|
||||
yield chunk; // Yield every chunk to the UI immediately.
|
||||
}
|
||||
|
||||
let thoughtText = '';
|
||||
// Only include thoughts if not using summarized thinking.
|
||||
if (!this.config.getContentGenerator().useSummarizedThinking()) {
|
||||
thoughtText = allModelParts
|
||||
.filter((part) => part.thought)
|
||||
.map((part) => part.text)
|
||||
.join('')
|
||||
.trim();
|
||||
let thoughtContentPart: Part | undefined;
|
||||
const thoughtText = allModelParts
|
||||
.filter((part) => part.thought)
|
||||
.map((part) => part.text)
|
||||
.join('')
|
||||
.trim();
|
||||
|
||||
if (thoughtText !== '') {
|
||||
thoughtContentPart = {
|
||||
text: thoughtText,
|
||||
thought: true,
|
||||
};
|
||||
|
||||
const thoughtSignature = allModelParts.filter(
|
||||
(part) => part.thoughtSignature && part.thought,
|
||||
)?.[0]?.thoughtSignature;
|
||||
if (thoughtContentPart && thoughtSignature) {
|
||||
thoughtContentPart.thoughtSignature = thoughtSignature;
|
||||
}
|
||||
}
|
||||
|
||||
const contentParts = allModelParts.filter((part) => !part.thought);
|
||||
@@ -592,11 +603,11 @@ export class GeminiChat {
|
||||
.trim();
|
||||
|
||||
// Record assistant turn with raw Content and metadata
|
||||
if (thoughtText || contentText || hasToolCall || usageMetadata) {
|
||||
if (thoughtContentPart || contentText || hasToolCall || usageMetadata) {
|
||||
this.chatRecordingService?.recordAssistantTurn({
|
||||
model,
|
||||
message: [
|
||||
...(thoughtText ? [{ text: thoughtText, thought: true }] : []),
|
||||
...(thoughtContentPart ? [thoughtContentPart] : []),
|
||||
...(contentText ? [{ text: contentText }] : []),
|
||||
...(hasToolCall
|
||||
? contentParts
|
||||
@@ -632,7 +643,7 @@ export class GeminiChat {
|
||||
this.history.push({
|
||||
role: 'model',
|
||||
parts: [
|
||||
...(thoughtText ? [{ text: thoughtText, thought: true }] : []),
|
||||
...(thoughtContentPart ? [thoughtContentPart] : []),
|
||||
...consolidatedHistoryParts,
|
||||
],
|
||||
});
|
||||
|
||||
@@ -39,7 +39,7 @@ export class GeminiContentGenerator implements ContentGenerator {
|
||||
this.contentGeneratorConfig = contentGeneratorConfig;
|
||||
}
|
||||
|
||||
private buildSamplingParameters(
|
||||
private buildGenerateContentConfig(
|
||||
request: GenerateContentParameters,
|
||||
): GenerateContentConfig {
|
||||
const configSamplingParams = this.contentGeneratorConfig?.samplingParams;
|
||||
@@ -84,17 +84,7 @@ export class GeminiContentGenerator implements ContentGenerator {
|
||||
'frequencyPenalty',
|
||||
),
|
||||
thinkingConfig: getParameterValue(
|
||||
this.contentGeneratorConfig?.reasoning
|
||||
? {
|
||||
includeThoughts: true,
|
||||
thinkingLevel: (this.contentGeneratorConfig.reasoning.effort ===
|
||||
'low'
|
||||
? 'LOW'
|
||||
: this.contentGeneratorConfig.reasoning.effort === 'high'
|
||||
? 'HIGH'
|
||||
: 'THINKING_LEVEL_UNSPECIFIED') as ThinkingLevel,
|
||||
}
|
||||
: undefined,
|
||||
this.buildThinkingConfig(),
|
||||
'thinkingConfig',
|
||||
{
|
||||
includeThoughts: true,
|
||||
@@ -104,13 +94,40 @@ export class GeminiContentGenerator implements ContentGenerator {
|
||||
};
|
||||
}
|
||||
|
||||
private buildThinkingConfig():
|
||||
| { includeThoughts: boolean; thinkingLevel?: ThinkingLevel }
|
||||
| undefined {
|
||||
const reasoning = this.contentGeneratorConfig?.reasoning;
|
||||
|
||||
if (reasoning === false) {
|
||||
return { includeThoughts: false };
|
||||
}
|
||||
|
||||
if (reasoning) {
|
||||
const thinkingLevel = (
|
||||
reasoning.effort === 'low'
|
||||
? 'LOW'
|
||||
: reasoning.effort === 'high'
|
||||
? 'HIGH'
|
||||
: 'THINKING_LEVEL_UNSPECIFIED'
|
||||
) as ThinkingLevel;
|
||||
|
||||
return {
|
||||
includeThoughts: true,
|
||||
thinkingLevel,
|
||||
};
|
||||
}
|
||||
|
||||
return undefined;
|
||||
}
|
||||
|
||||
async generateContent(
|
||||
request: GenerateContentParameters,
|
||||
_userPromptId: string,
|
||||
): Promise<GenerateContentResponse> {
|
||||
const finalRequest = {
|
||||
...request,
|
||||
config: this.buildSamplingParameters(request),
|
||||
config: this.buildGenerateContentConfig(request),
|
||||
};
|
||||
return this.googleGenAI.models.generateContent(finalRequest);
|
||||
}
|
||||
@@ -121,7 +138,7 @@ export class GeminiContentGenerator implements ContentGenerator {
|
||||
): Promise<AsyncGenerator<GenerateContentResponse>> {
|
||||
const finalRequest = {
|
||||
...request,
|
||||
config: this.buildSamplingParameters(request),
|
||||
config: this.buildGenerateContentConfig(request),
|
||||
};
|
||||
return this.googleGenAI.models.generateContentStream(finalRequest);
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { createGeminiContentGenerator } from './index.js';
|
||||
import { GeminiContentGenerator } from './geminiContentGenerator.js';
|
||||
import { LoggingContentGenerator } from './loggingContentGenerator.js';
|
||||
import type { Config } from '../../config/config.js';
|
||||
import { AuthType } from '../contentGenerator.js';
|
||||
|
||||
@@ -15,10 +14,6 @@ vi.mock('./geminiContentGenerator.js', () => ({
|
||||
GeminiContentGenerator: vi.fn().mockImplementation(() => ({})),
|
||||
}));
|
||||
|
||||
vi.mock('./loggingContentGenerator.js', () => ({
|
||||
LoggingContentGenerator: vi.fn().mockImplementation((wrapped) => wrapped),
|
||||
}));
|
||||
|
||||
describe('createGeminiContentGenerator', () => {
|
||||
let mockConfig: Config;
|
||||
|
||||
@@ -31,7 +26,7 @@ describe('createGeminiContentGenerator', () => {
|
||||
} as unknown as Config;
|
||||
});
|
||||
|
||||
it('should create a GeminiContentGenerator wrapped in LoggingContentGenerator', () => {
|
||||
it('should create a GeminiContentGenerator', () => {
|
||||
const config = {
|
||||
model: 'gemini-1.5-flash',
|
||||
apiKey: 'test-key',
|
||||
@@ -41,7 +36,6 @@ describe('createGeminiContentGenerator', () => {
|
||||
const generator = createGeminiContentGenerator(config, mockConfig);
|
||||
|
||||
expect(GeminiContentGenerator).toHaveBeenCalled();
|
||||
expect(LoggingContentGenerator).toHaveBeenCalled();
|
||||
expect(generator).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -11,10 +11,8 @@ import type {
|
||||
} from '../contentGenerator.js';
|
||||
import type { Config } from '../../config/config.js';
|
||||
import { InstallationManager } from '../../utils/installationManager.js';
|
||||
import { LoggingContentGenerator } from './loggingContentGenerator.js';
|
||||
|
||||
export { GeminiContentGenerator } from './geminiContentGenerator.js';
|
||||
export { LoggingContentGenerator } from './loggingContentGenerator.js';
|
||||
|
||||
/**
|
||||
* Create a Gemini content generator.
|
||||
@@ -51,5 +49,5 @@ export function createGeminiContentGenerator(
|
||||
config,
|
||||
);
|
||||
|
||||
return new LoggingContentGenerator(geminiContentGenerator, gcConfig);
|
||||
return geminiContentGenerator;
|
||||
}
|
||||
|
||||
7
packages/core/src/core/loggingContentGenerator/index.ts
Normal file
7
packages/core/src/core/loggingContentGenerator/index.ts
Normal file
@@ -0,0 +1,7 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
export { LoggingContentGenerator } from './loggingContentGenerator.js';
|
||||
@@ -0,0 +1,371 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import type {
|
||||
GenerateContentParameters,
|
||||
GenerateContentResponseUsageMetadata,
|
||||
} from '@google/genai';
|
||||
import { GenerateContentResponse } from '@google/genai';
|
||||
import type { Config } from '../../config/config.js';
|
||||
import type { ContentGenerator } from '../contentGenerator.js';
|
||||
import { LoggingContentGenerator } from './index.js';
|
||||
import { OpenAIContentConverter } from '../openaiContentGenerator/converter.js';
|
||||
import {
|
||||
logApiRequest,
|
||||
logApiResponse,
|
||||
logApiError,
|
||||
} from '../../telemetry/loggers.js';
|
||||
import { OpenAILogger } from '../../utils/openaiLogger.js';
|
||||
import type OpenAI from 'openai';
|
||||
|
||||
vi.mock('../../telemetry/loggers.js', () => ({
|
||||
logApiRequest: vi.fn(),
|
||||
logApiResponse: vi.fn(),
|
||||
logApiError: vi.fn(),
|
||||
}));
|
||||
|
||||
vi.mock('../../utils/openaiLogger.js', () => ({
|
||||
OpenAILogger: vi.fn().mockImplementation(() => ({
|
||||
logInteraction: vi.fn().mockResolvedValue(undefined),
|
||||
})),
|
||||
}));
|
||||
|
||||
const convertGeminiRequestToOpenAISpy = vi
|
||||
.spyOn(OpenAIContentConverter.prototype, 'convertGeminiRequestToOpenAI')
|
||||
.mockReturnValue([{ role: 'user', content: 'converted' }]);
|
||||
const convertGeminiToolsToOpenAISpy = vi
|
||||
.spyOn(OpenAIContentConverter.prototype, 'convertGeminiToolsToOpenAI')
|
||||
.mockResolvedValue([{ type: 'function', function: { name: 'tool' } }]);
|
||||
const convertGeminiResponseToOpenAISpy = vi
|
||||
.spyOn(OpenAIContentConverter.prototype, 'convertGeminiResponseToOpenAI')
|
||||
.mockReturnValue({
|
||||
id: 'openai-response',
|
||||
object: 'chat.completion',
|
||||
created: 123456789,
|
||||
model: 'test-model',
|
||||
choices: [],
|
||||
} as OpenAI.Chat.ChatCompletion);
|
||||
|
||||
const createConfig = (overrides: Record<string, unknown> = {}): Config =>
|
||||
({
|
||||
getContentGeneratorConfig: () => ({
|
||||
authType: 'openai',
|
||||
enableOpenAILogging: false,
|
||||
...overrides,
|
||||
}),
|
||||
}) as Config;
|
||||
|
||||
const createWrappedGenerator = (
|
||||
generateContent: ContentGenerator['generateContent'],
|
||||
generateContentStream: ContentGenerator['generateContentStream'],
|
||||
): ContentGenerator =>
|
||||
({
|
||||
generateContent,
|
||||
generateContentStream,
|
||||
countTokens: vi.fn(),
|
||||
embedContent: vi.fn(),
|
||||
useSummarizedThinking: vi.fn().mockReturnValue(false),
|
||||
}) as ContentGenerator;
|
||||
|
||||
const createResponse = (
|
||||
responseId: string,
|
||||
modelVersion: string,
|
||||
parts: Array<Record<string, unknown>>,
|
||||
usageMetadata?: GenerateContentResponseUsageMetadata,
|
||||
finishReason?: string,
|
||||
): GenerateContentResponse => {
|
||||
const response = new GenerateContentResponse();
|
||||
response.responseId = responseId;
|
||||
response.modelVersion = modelVersion;
|
||||
response.usageMetadata = usageMetadata;
|
||||
response.candidates = [
|
||||
{
|
||||
content: {
|
||||
role: 'model',
|
||||
parts: parts as never[],
|
||||
},
|
||||
finishReason: finishReason as never,
|
||||
index: 0,
|
||||
safetyRatings: [],
|
||||
},
|
||||
];
|
||||
return response;
|
||||
};
|
||||
|
||||
describe('LoggingContentGenerator', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
convertGeminiRequestToOpenAISpy.mockClear();
|
||||
convertGeminiToolsToOpenAISpy.mockClear();
|
||||
convertGeminiResponseToOpenAISpy.mockClear();
|
||||
});
|
||||
|
||||
it('logs request/response, normalizes thought parts, and logs OpenAI interaction', async () => {
|
||||
const wrapped = createWrappedGenerator(
|
||||
vi.fn().mockResolvedValue(
|
||||
createResponse(
|
||||
'resp-1',
|
||||
'model-v2',
|
||||
[{ text: 'ok' }],
|
||||
{
|
||||
promptTokenCount: 3,
|
||||
candidatesTokenCount: 5,
|
||||
totalTokenCount: 8,
|
||||
},
|
||||
'STOP',
|
||||
),
|
||||
),
|
||||
vi.fn(),
|
||||
);
|
||||
const generator = new LoggingContentGenerator(
|
||||
wrapped,
|
||||
createConfig({
|
||||
enableOpenAILogging: true,
|
||||
openAILoggingDir: 'logs',
|
||||
schemaCompliance: 'openapi_30',
|
||||
}),
|
||||
);
|
||||
|
||||
const request = {
|
||||
model: 'test-model',
|
||||
contents: [
|
||||
{
|
||||
role: 'user',
|
||||
parts: [
|
||||
{ text: 'Hello', thought: 'internal' },
|
||||
{
|
||||
functionCall: { id: 'call-1', name: 'tool', args: '{}' },
|
||||
thought: 'strip-me',
|
||||
},
|
||||
null,
|
||||
],
|
||||
},
|
||||
],
|
||||
config: {
|
||||
temperature: 0.3,
|
||||
topP: 0.9,
|
||||
maxOutputTokens: 256,
|
||||
presencePenalty: 0.2,
|
||||
frequencyPenalty: 0.1,
|
||||
tools: [
|
||||
{
|
||||
functionDeclarations: [
|
||||
{ name: 'tool', description: 'desc', parameters: {} },
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
} as unknown as GenerateContentParameters;
|
||||
|
||||
const response = await generator.generateContent(request, 'prompt-1');
|
||||
|
||||
expect(response.responseId).toBe('resp-1');
|
||||
expect(logApiRequest).toHaveBeenCalledTimes(1);
|
||||
const [, requestEvent] = vi.mocked(logApiRequest).mock.calls[0];
|
||||
const loggedContents = JSON.parse(requestEvent.request_text || '[]');
|
||||
expect(loggedContents[0].parts[0]).toEqual({
|
||||
text: 'Hello\n[Thought: internal]',
|
||||
});
|
||||
expect(loggedContents[0].parts[1]).toEqual({
|
||||
functionCall: { id: 'call-1', name: 'tool', args: '{}' },
|
||||
});
|
||||
|
||||
expect(logApiResponse).toHaveBeenCalledTimes(1);
|
||||
const [, responseEvent] = vi.mocked(logApiResponse).mock.calls[0];
|
||||
expect(responseEvent.response_id).toBe('resp-1');
|
||||
expect(responseEvent.model).toBe('model-v2');
|
||||
expect(responseEvent.prompt_id).toBe('prompt-1');
|
||||
expect(responseEvent.input_token_count).toBe(3);
|
||||
|
||||
expect(convertGeminiRequestToOpenAISpy).toHaveBeenCalledTimes(1);
|
||||
expect(convertGeminiToolsToOpenAISpy).toHaveBeenCalledTimes(1);
|
||||
expect(convertGeminiResponseToOpenAISpy).toHaveBeenCalledTimes(1);
|
||||
|
||||
const openaiLoggerInstance = vi.mocked(OpenAILogger).mock.results[0]
|
||||
?.value as { logInteraction: ReturnType<typeof vi.fn> };
|
||||
expect(openaiLoggerInstance.logInteraction).toHaveBeenCalledTimes(1);
|
||||
const [openaiRequest, openaiResponse, openaiError] =
|
||||
openaiLoggerInstance.logInteraction.mock.calls[0];
|
||||
expect(openaiRequest).toEqual(
|
||||
expect.objectContaining({
|
||||
model: 'test-model',
|
||||
messages: [{ role: 'user', content: 'converted' }],
|
||||
tools: [{ type: 'function', function: { name: 'tool' } }],
|
||||
temperature: 0.3,
|
||||
top_p: 0.9,
|
||||
max_tokens: 256,
|
||||
presence_penalty: 0.2,
|
||||
frequency_penalty: 0.1,
|
||||
}),
|
||||
);
|
||||
expect(openaiResponse).toEqual({
|
||||
id: 'openai-response',
|
||||
object: 'chat.completion',
|
||||
created: 123456789,
|
||||
model: 'test-model',
|
||||
choices: [],
|
||||
});
|
||||
expect(openaiError).toBeUndefined();
|
||||
});
|
||||
|
||||
it('logs errors with status code and request id, then rethrows', async () => {
|
||||
const error = Object.assign(new Error('boom'), {
|
||||
code: 429,
|
||||
request_id: 'req-99',
|
||||
type: 'rate_limit',
|
||||
});
|
||||
const wrapped = createWrappedGenerator(
|
||||
vi.fn().mockRejectedValue(error),
|
||||
vi.fn(),
|
||||
);
|
||||
const generator = new LoggingContentGenerator(
|
||||
wrapped,
|
||||
createConfig({ enableOpenAILogging: true }),
|
||||
);
|
||||
|
||||
const request = {
|
||||
model: 'test-model',
|
||||
contents: 'Hello',
|
||||
} as unknown as GenerateContentParameters;
|
||||
|
||||
await expect(
|
||||
generator.generateContent(request, 'prompt-2'),
|
||||
).rejects.toThrow('boom');
|
||||
|
||||
expect(logApiError).toHaveBeenCalledTimes(1);
|
||||
const [, errorEvent] = vi.mocked(logApiError).mock.calls[0];
|
||||
expect(errorEvent.response_id).toBe('req-99');
|
||||
expect(errorEvent.status_code).toBe(429);
|
||||
expect(errorEvent.error_type).toBe('rate_limit');
|
||||
expect(errorEvent.prompt_id).toBe('prompt-2');
|
||||
|
||||
const openaiLoggerInstance = vi.mocked(OpenAILogger).mock.results[0]
|
||||
?.value as { logInteraction: ReturnType<typeof vi.fn> };
|
||||
const [, , loggedError] = openaiLoggerInstance.logInteraction.mock.calls[0];
|
||||
expect(loggedError).toBeInstanceOf(Error);
|
||||
expect((loggedError as Error).message).toBe('boom');
|
||||
});
|
||||
|
||||
it('logs streaming responses and consolidates tool calls', async () => {
|
||||
const usage1 = {
|
||||
promptTokenCount: 1,
|
||||
} as GenerateContentResponseUsageMetadata;
|
||||
const usage2 = {
|
||||
promptTokenCount: 2,
|
||||
candidatesTokenCount: 4,
|
||||
totalTokenCount: 6,
|
||||
} as GenerateContentResponseUsageMetadata;
|
||||
|
||||
const response1 = createResponse(
|
||||
'resp-1',
|
||||
'model-stream',
|
||||
[
|
||||
{ text: 'Hello' },
|
||||
{ functionCall: { id: 'call-1', name: 'tool', args: '{}' } },
|
||||
],
|
||||
usage1,
|
||||
);
|
||||
const response2 = createResponse(
|
||||
'resp-2',
|
||||
'model-stream',
|
||||
[
|
||||
{ text: ' world' },
|
||||
{ functionCall: { id: 'call-1', name: 'tool', args: '{"x":1}' } },
|
||||
{ functionResponse: { name: 'tool', response: { output: 'ok' } } },
|
||||
],
|
||||
usage2,
|
||||
'STOP',
|
||||
);
|
||||
|
||||
const wrapped = createWrappedGenerator(
|
||||
vi.fn(),
|
||||
vi.fn().mockResolvedValue(
|
||||
(async function* () {
|
||||
yield response1;
|
||||
yield response2;
|
||||
})(),
|
||||
),
|
||||
);
|
||||
const generator = new LoggingContentGenerator(
|
||||
wrapped,
|
||||
createConfig({ enableOpenAILogging: true }),
|
||||
);
|
||||
|
||||
const request = {
|
||||
model: 'test-model',
|
||||
contents: 'Hello',
|
||||
} as unknown as GenerateContentParameters;
|
||||
|
||||
const stream = await generator.generateContentStream(request, 'prompt-3');
|
||||
const seen: GenerateContentResponse[] = [];
|
||||
for await (const item of stream) {
|
||||
seen.push(item);
|
||||
}
|
||||
expect(seen).toHaveLength(2);
|
||||
|
||||
expect(logApiResponse).toHaveBeenCalledTimes(1);
|
||||
const [, responseEvent] = vi.mocked(logApiResponse).mock.calls[0];
|
||||
expect(responseEvent.response_id).toBe('resp-1');
|
||||
expect(responseEvent.input_token_count).toBe(2);
|
||||
|
||||
expect(convertGeminiResponseToOpenAISpy).toHaveBeenCalledTimes(1);
|
||||
const [consolidatedResponse] =
|
||||
convertGeminiResponseToOpenAISpy.mock.calls[0];
|
||||
const consolidatedParts =
|
||||
consolidatedResponse.candidates?.[0]?.content?.parts || [];
|
||||
expect(consolidatedParts).toEqual([
|
||||
{ text: 'Hello' },
|
||||
{ functionCall: { id: 'call-1', name: 'tool', args: '{"x":1}' } },
|
||||
{ text: ' world' },
|
||||
{ functionResponse: { name: 'tool', response: { output: 'ok' } } },
|
||||
]);
|
||||
expect(consolidatedResponse.usageMetadata).toBe(usage2);
|
||||
expect(consolidatedResponse.responseId).toBe('resp-2');
|
||||
expect(consolidatedResponse.candidates?.[0]?.finishReason).toBe('STOP');
|
||||
});
|
||||
|
||||
it('logs stream errors and skips response logging', async () => {
|
||||
const response1 = createResponse('resp-1', 'model-stream', [
|
||||
{ text: 'partial' },
|
||||
]);
|
||||
const streamError = new Error('stream-fail');
|
||||
const wrapped = createWrappedGenerator(
|
||||
vi.fn(),
|
||||
vi.fn().mockResolvedValue(
|
||||
(async function* () {
|
||||
yield response1;
|
||||
throw streamError;
|
||||
})(),
|
||||
),
|
||||
);
|
||||
const generator = new LoggingContentGenerator(
|
||||
wrapped,
|
||||
createConfig({ enableOpenAILogging: true }),
|
||||
);
|
||||
|
||||
const request = {
|
||||
model: 'test-model',
|
||||
contents: 'Hello',
|
||||
} as unknown as GenerateContentParameters;
|
||||
|
||||
const stream = await generator.generateContentStream(request, 'prompt-4');
|
||||
await expect(async () => {
|
||||
for await (const _item of stream) {
|
||||
// Consume stream to trigger error.
|
||||
}
|
||||
}).rejects.toThrow('stream-fail');
|
||||
|
||||
expect(logApiResponse).not.toHaveBeenCalled();
|
||||
expect(logApiError).toHaveBeenCalledTimes(1);
|
||||
const openaiLoggerInstance = vi.mocked(OpenAILogger).mock.results[0]
|
||||
?.value as { logInteraction: ReturnType<typeof vi.fn> };
|
||||
expect(openaiLoggerInstance.logInteraction).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
});
|
||||
@@ -4,20 +4,22 @@
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type {
|
||||
Content,
|
||||
CountTokensParameters,
|
||||
CountTokensResponse,
|
||||
EmbedContentParameters,
|
||||
EmbedContentResponse,
|
||||
GenerateContentParameters,
|
||||
GenerateContentResponseUsageMetadata,
|
||||
import {
|
||||
GenerateContentResponse,
|
||||
ContentListUnion,
|
||||
ContentUnion,
|
||||
Part,
|
||||
PartUnion,
|
||||
type Content,
|
||||
type CountTokensParameters,
|
||||
type CountTokensResponse,
|
||||
type EmbedContentParameters,
|
||||
type EmbedContentResponse,
|
||||
type GenerateContentParameters,
|
||||
type GenerateContentResponseUsageMetadata,
|
||||
type ContentListUnion,
|
||||
type ContentUnion,
|
||||
type Part,
|
||||
type PartUnion,
|
||||
type FinishReason,
|
||||
} from '@google/genai';
|
||||
import type OpenAI from 'openai';
|
||||
import {
|
||||
ApiRequestEvent,
|
||||
ApiResponseEvent,
|
||||
@@ -31,6 +33,8 @@ import {
|
||||
} from '../../telemetry/loggers.js';
|
||||
import type { ContentGenerator } from '../contentGenerator.js';
|
||||
import { isStructuredError } from '../../utils/quotaErrorDetection.js';
|
||||
import { OpenAIContentConverter } from '../openaiContentGenerator/converter.js';
|
||||
import { OpenAILogger } from '../../utils/openaiLogger.js';
|
||||
|
||||
interface StructuredError {
|
||||
status: number;
|
||||
@@ -40,10 +44,19 @@ interface StructuredError {
|
||||
* A decorator that wraps a ContentGenerator to add logging to API calls.
|
||||
*/
|
||||
export class LoggingContentGenerator implements ContentGenerator {
|
||||
private openaiLogger?: OpenAILogger;
|
||||
private schemaCompliance?: 'auto' | 'openapi_30';
|
||||
|
||||
constructor(
|
||||
private readonly wrapped: ContentGenerator,
|
||||
private readonly config: Config,
|
||||
) {}
|
||||
) {
|
||||
const generatorConfig = this.config.getContentGeneratorConfig();
|
||||
if (generatorConfig?.enableOpenAILogging) {
|
||||
this.openaiLogger = new OpenAILogger(generatorConfig.openAILoggingDir);
|
||||
this.schemaCompliance = generatorConfig.schemaCompliance;
|
||||
}
|
||||
}
|
||||
|
||||
getWrapped(): ContentGenerator {
|
||||
return this.wrapped;
|
||||
@@ -91,21 +104,31 @@ export class LoggingContentGenerator implements ContentGenerator {
|
||||
prompt_id: string,
|
||||
): void {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
const errorType = error instanceof Error ? error.name : 'unknown';
|
||||
const errorType =
|
||||
(error as { type?: string })?.type ||
|
||||
(error instanceof Error ? error.name : 'unknown');
|
||||
const errorResponseId =
|
||||
(error as { requestID?: string; request_id?: string })?.requestID ||
|
||||
(error as { requestID?: string; request_id?: string })?.request_id ||
|
||||
responseId;
|
||||
const errorStatus =
|
||||
(error as { code?: string | number; status?: number })?.code ??
|
||||
(error as { status?: number })?.status ??
|
||||
(isStructuredError(error)
|
||||
? (error as StructuredError).status
|
||||
: undefined);
|
||||
|
||||
logApiError(
|
||||
this.config,
|
||||
new ApiErrorEvent(
|
||||
responseId,
|
||||
errorResponseId,
|
||||
model,
|
||||
errorMessage,
|
||||
durationMs,
|
||||
prompt_id,
|
||||
this.config.getContentGeneratorConfig()?.authType,
|
||||
errorType,
|
||||
isStructuredError(error)
|
||||
? (error as StructuredError).status
|
||||
: undefined,
|
||||
errorStatus,
|
||||
),
|
||||
);
|
||||
}
|
||||
@@ -116,6 +139,7 @@ export class LoggingContentGenerator implements ContentGenerator {
|
||||
): Promise<GenerateContentResponse> {
|
||||
const startTime = Date.now();
|
||||
this.logApiRequest(this.toContents(req.contents), req.model, userPromptId);
|
||||
const openaiRequest = await this.buildOpenAIRequestForLogging(req);
|
||||
try {
|
||||
const response = await this.wrapped.generateContent(req, userPromptId);
|
||||
const durationMs = Date.now() - startTime;
|
||||
@@ -127,10 +151,12 @@ export class LoggingContentGenerator implements ContentGenerator {
|
||||
response.usageMetadata,
|
||||
JSON.stringify(response),
|
||||
);
|
||||
await this.logOpenAIInteraction(openaiRequest, response);
|
||||
return response;
|
||||
} catch (error) {
|
||||
const durationMs = Date.now() - startTime;
|
||||
this._logApiError(undefined, durationMs, error, req.model, userPromptId);
|
||||
await this.logOpenAIInteraction(openaiRequest, undefined, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
@@ -141,6 +167,7 @@ export class LoggingContentGenerator implements ContentGenerator {
|
||||
): Promise<AsyncGenerator<GenerateContentResponse>> {
|
||||
const startTime = Date.now();
|
||||
this.logApiRequest(this.toContents(req.contents), req.model, userPromptId);
|
||||
const openaiRequest = await this.buildOpenAIRequestForLogging(req);
|
||||
|
||||
let stream: AsyncGenerator<GenerateContentResponse>;
|
||||
try {
|
||||
@@ -148,6 +175,7 @@ export class LoggingContentGenerator implements ContentGenerator {
|
||||
} catch (error) {
|
||||
const durationMs = Date.now() - startTime;
|
||||
this._logApiError(undefined, durationMs, error, req.model, userPromptId);
|
||||
await this.logOpenAIInteraction(openaiRequest, undefined, error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
@@ -156,6 +184,7 @@ export class LoggingContentGenerator implements ContentGenerator {
|
||||
startTime,
|
||||
userPromptId,
|
||||
req.model,
|
||||
openaiRequest,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -164,6 +193,7 @@ export class LoggingContentGenerator implements ContentGenerator {
|
||||
startTime: number,
|
||||
userPromptId: string,
|
||||
model: string,
|
||||
openaiRequest?: OpenAI.Chat.ChatCompletionCreateParams,
|
||||
): AsyncGenerator<GenerateContentResponse> {
|
||||
const responses: GenerateContentResponse[] = [];
|
||||
|
||||
@@ -186,6 +216,9 @@ export class LoggingContentGenerator implements ContentGenerator {
|
||||
lastUsageMetadata,
|
||||
JSON.stringify(responses),
|
||||
);
|
||||
const consolidatedResponse =
|
||||
this.consolidateGeminiResponsesForLogging(responses);
|
||||
await this.logOpenAIInteraction(openaiRequest, consolidatedResponse);
|
||||
} catch (error) {
|
||||
const durationMs = Date.now() - startTime;
|
||||
this._logApiError(
|
||||
@@ -195,10 +228,182 @@ export class LoggingContentGenerator implements ContentGenerator {
|
||||
responses[0]?.modelVersion || model,
|
||||
userPromptId,
|
||||
);
|
||||
await this.logOpenAIInteraction(openaiRequest, undefined, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
private async buildOpenAIRequestForLogging(
|
||||
request: GenerateContentParameters,
|
||||
): Promise<OpenAI.Chat.ChatCompletionCreateParams | undefined> {
|
||||
if (!this.openaiLogger) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const converter = new OpenAIContentConverter(
|
||||
request.model,
|
||||
this.schemaCompliance,
|
||||
);
|
||||
const messages = converter.convertGeminiRequestToOpenAI(request, {
|
||||
cleanOrphanToolCalls: false,
|
||||
});
|
||||
|
||||
const openaiRequest: OpenAI.Chat.ChatCompletionCreateParams = {
|
||||
model: request.model,
|
||||
messages,
|
||||
};
|
||||
|
||||
if (request.config?.tools) {
|
||||
openaiRequest.tools = await converter.convertGeminiToolsToOpenAI(
|
||||
request.config.tools,
|
||||
);
|
||||
}
|
||||
|
||||
if (request.config?.temperature !== undefined) {
|
||||
openaiRequest.temperature = request.config.temperature;
|
||||
}
|
||||
if (request.config?.topP !== undefined) {
|
||||
openaiRequest.top_p = request.config.topP;
|
||||
}
|
||||
if (request.config?.maxOutputTokens !== undefined) {
|
||||
openaiRequest.max_tokens = request.config.maxOutputTokens;
|
||||
}
|
||||
if (request.config?.presencePenalty !== undefined) {
|
||||
openaiRequest.presence_penalty = request.config.presencePenalty;
|
||||
}
|
||||
if (request.config?.frequencyPenalty !== undefined) {
|
||||
openaiRequest.frequency_penalty = request.config.frequencyPenalty;
|
||||
}
|
||||
|
||||
return openaiRequest;
|
||||
}
|
||||
|
||||
private async logOpenAIInteraction(
|
||||
openaiRequest: OpenAI.Chat.ChatCompletionCreateParams | undefined,
|
||||
response?: GenerateContentResponse,
|
||||
error?: unknown,
|
||||
): Promise<void> {
|
||||
if (!this.openaiLogger || !openaiRequest) {
|
||||
return;
|
||||
}
|
||||
|
||||
const openaiResponse = response
|
||||
? this.convertGeminiResponseToOpenAIForLogging(response, openaiRequest)
|
||||
: undefined;
|
||||
|
||||
await this.openaiLogger.logInteraction(
|
||||
openaiRequest,
|
||||
openaiResponse,
|
||||
error instanceof Error
|
||||
? error
|
||||
: error
|
||||
? new Error(String(error))
|
||||
: undefined,
|
||||
);
|
||||
}
|
||||
|
||||
private convertGeminiResponseToOpenAIForLogging(
|
||||
response: GenerateContentResponse,
|
||||
openaiRequest: OpenAI.Chat.ChatCompletionCreateParams,
|
||||
): OpenAI.Chat.ChatCompletion {
|
||||
const converter = new OpenAIContentConverter(
|
||||
openaiRequest.model,
|
||||
this.schemaCompliance,
|
||||
);
|
||||
|
||||
return converter.convertGeminiResponseToOpenAI(response);
|
||||
}
|
||||
|
||||
private consolidateGeminiResponsesForLogging(
|
||||
responses: GenerateContentResponse[],
|
||||
): GenerateContentResponse | undefined {
|
||||
if (responses.length === 0) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const consolidated = new GenerateContentResponse();
|
||||
const combinedParts: Part[] = [];
|
||||
const functionCallIndex = new Map<string, number>();
|
||||
let finishReason: FinishReason | undefined;
|
||||
let usageMetadata: GenerateContentResponseUsageMetadata | undefined;
|
||||
|
||||
for (const response of responses) {
|
||||
if (response.usageMetadata) {
|
||||
usageMetadata = response.usageMetadata;
|
||||
}
|
||||
|
||||
const candidate = response.candidates?.[0];
|
||||
if (candidate?.finishReason) {
|
||||
finishReason = candidate.finishReason;
|
||||
}
|
||||
|
||||
const parts = candidate?.content?.parts ?? [];
|
||||
for (const part of parts as Part[]) {
|
||||
if (typeof part === 'string') {
|
||||
combinedParts.push({ text: part });
|
||||
continue;
|
||||
}
|
||||
|
||||
if ('text' in part) {
|
||||
if (part.text) {
|
||||
combinedParts.push({
|
||||
text: part.text,
|
||||
...(part.thought ? { thought: true } : {}),
|
||||
...(part.thoughtSignature
|
||||
? { thoughtSignature: part.thoughtSignature }
|
||||
: {}),
|
||||
});
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if ('functionCall' in part && part.functionCall) {
|
||||
const callKey =
|
||||
part.functionCall.id || part.functionCall.name || 'tool_call';
|
||||
const existingIndex = functionCallIndex.get(callKey);
|
||||
const functionPart = { functionCall: part.functionCall };
|
||||
if (existingIndex !== undefined) {
|
||||
combinedParts[existingIndex] = functionPart;
|
||||
} else {
|
||||
functionCallIndex.set(callKey, combinedParts.length);
|
||||
combinedParts.push(functionPart);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if ('functionResponse' in part && part.functionResponse) {
|
||||
combinedParts.push({ functionResponse: part.functionResponse });
|
||||
continue;
|
||||
}
|
||||
|
||||
combinedParts.push(part);
|
||||
}
|
||||
}
|
||||
|
||||
const lastResponse = responses[responses.length - 1];
|
||||
const lastCandidate = lastResponse.candidates?.[0];
|
||||
|
||||
consolidated.responseId = lastResponse.responseId;
|
||||
consolidated.createTime = lastResponse.createTime;
|
||||
consolidated.modelVersion = lastResponse.modelVersion;
|
||||
consolidated.promptFeedback = lastResponse.promptFeedback;
|
||||
consolidated.usageMetadata = usageMetadata;
|
||||
|
||||
consolidated.candidates = [
|
||||
{
|
||||
content: {
|
||||
role: lastCandidate?.content?.role || 'model',
|
||||
parts: combinedParts,
|
||||
},
|
||||
...(finishReason ? { finishReason } : {}),
|
||||
index: 0,
|
||||
safetyRatings: lastCandidate?.safetyRatings || [],
|
||||
},
|
||||
];
|
||||
|
||||
return consolidated;
|
||||
}
|
||||
|
||||
async countTokens(req: CountTokensParameters): Promise<CountTokensResponse> {
|
||||
return this.wrapped.countTokens(req);
|
||||
}
|
||||
@@ -236,8 +236,9 @@ export class OpenAIContentConverter {
|
||||
*/
|
||||
convertGeminiRequestToOpenAI(
|
||||
request: GenerateContentParameters,
|
||||
options: { cleanOrphanToolCalls: boolean } = { cleanOrphanToolCalls: true },
|
||||
): OpenAI.Chat.ChatCompletionMessageParam[] {
|
||||
const messages: OpenAI.Chat.ChatCompletionMessageParam[] = [];
|
||||
let messages: OpenAI.Chat.ChatCompletionMessageParam[] = [];
|
||||
|
||||
// Handle system instruction from config
|
||||
this.addSystemInstructionMessage(request, messages);
|
||||
@@ -246,11 +247,89 @@ export class OpenAIContentConverter {
|
||||
this.processContents(request.contents, messages);
|
||||
|
||||
// Clean up orphaned tool calls and merge consecutive assistant messages
|
||||
const cleanedMessages = this.cleanOrphanedToolCalls(messages);
|
||||
const mergedMessages =
|
||||
this.mergeConsecutiveAssistantMessages(cleanedMessages);
|
||||
if (options.cleanOrphanToolCalls) {
|
||||
messages = this.cleanOrphanedToolCalls(messages);
|
||||
}
|
||||
messages = this.mergeConsecutiveAssistantMessages(messages);
|
||||
|
||||
return mergedMessages;
|
||||
return messages;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert Gemini response to OpenAI completion format (for logging).
|
||||
*/
|
||||
convertGeminiResponseToOpenAI(
|
||||
response: GenerateContentResponse,
|
||||
): OpenAI.Chat.ChatCompletion {
|
||||
const candidate = response.candidates?.[0];
|
||||
const parts = (candidate?.content?.parts || []) as Part[];
|
||||
const parsedParts = this.parseParts(parts);
|
||||
|
||||
const message: ExtendedCompletionMessage = {
|
||||
role: 'assistant',
|
||||
content: parsedParts.contentParts.join('') || null,
|
||||
refusal: null,
|
||||
};
|
||||
|
||||
const reasoningContent = parsedParts.thoughtParts.join('');
|
||||
if (reasoningContent) {
|
||||
message.reasoning_content = reasoningContent;
|
||||
}
|
||||
|
||||
if (parsedParts.functionCalls.length > 0) {
|
||||
message.tool_calls = parsedParts.functionCalls.map((call, index) => ({
|
||||
id: call.id || `call_${index}`,
|
||||
type: 'function' as const,
|
||||
function: {
|
||||
name: call.name || '',
|
||||
arguments: JSON.stringify(call.args || {}),
|
||||
},
|
||||
}));
|
||||
}
|
||||
|
||||
const finishReason = this.mapGeminiFinishReasonToOpenAI(
|
||||
candidate?.finishReason,
|
||||
);
|
||||
|
||||
const usageMetadata = response.usageMetadata;
|
||||
const usage: OpenAI.CompletionUsage = {
|
||||
prompt_tokens: usageMetadata?.promptTokenCount || 0,
|
||||
completion_tokens: usageMetadata?.candidatesTokenCount || 0,
|
||||
total_tokens: usageMetadata?.totalTokenCount || 0,
|
||||
};
|
||||
|
||||
if (usageMetadata?.cachedContentTokenCount !== undefined) {
|
||||
(
|
||||
usage as OpenAI.CompletionUsage & {
|
||||
prompt_tokens_details?: { cached_tokens?: number };
|
||||
}
|
||||
).prompt_tokens_details = {
|
||||
cached_tokens: usageMetadata.cachedContentTokenCount,
|
||||
};
|
||||
}
|
||||
|
||||
const createdMs = response.createTime
|
||||
? Number(response.createTime)
|
||||
: Date.now();
|
||||
const createdSeconds = Number.isFinite(createdMs)
|
||||
? Math.floor(createdMs / 1000)
|
||||
: Math.floor(Date.now() / 1000);
|
||||
|
||||
return {
|
||||
id: response.responseId || `gemini-${Date.now()}`,
|
||||
object: 'chat.completion',
|
||||
created: createdSeconds,
|
||||
model: response.modelVersion || this.model,
|
||||
choices: [
|
||||
{
|
||||
index: 0,
|
||||
message,
|
||||
finish_reason: finishReason,
|
||||
logprobs: null,
|
||||
},
|
||||
],
|
||||
usage,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -836,84 +915,6 @@ export class OpenAIContentConverter {
|
||||
return response;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert Gemini response format to OpenAI chat completion format for logging
|
||||
*/
|
||||
convertGeminiResponseToOpenAI(
|
||||
response: GenerateContentResponse,
|
||||
): OpenAI.Chat.ChatCompletion {
|
||||
const candidate = response.candidates?.[0];
|
||||
const content = candidate?.content;
|
||||
|
||||
let messageContent: string | null = null;
|
||||
const toolCalls: OpenAI.Chat.ChatCompletionMessageToolCall[] = [];
|
||||
|
||||
if (content?.parts) {
|
||||
const textParts: string[] = [];
|
||||
|
||||
for (const part of content.parts) {
|
||||
if ('text' in part && part.text) {
|
||||
textParts.push(part.text);
|
||||
} else if ('functionCall' in part && part.functionCall) {
|
||||
toolCalls.push({
|
||||
id: part.functionCall.id || `call_${toolCalls.length}`,
|
||||
type: 'function' as const,
|
||||
function: {
|
||||
name: part.functionCall.name || '',
|
||||
arguments: JSON.stringify(part.functionCall.args || {}),
|
||||
},
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
messageContent = textParts.join('').trimEnd();
|
||||
}
|
||||
|
||||
const choice: OpenAI.Chat.ChatCompletion.Choice = {
|
||||
index: 0,
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content: messageContent,
|
||||
refusal: null,
|
||||
},
|
||||
finish_reason: this.mapGeminiFinishReasonToOpenAI(
|
||||
candidate?.finishReason,
|
||||
) as OpenAI.Chat.ChatCompletion.Choice['finish_reason'],
|
||||
logprobs: null,
|
||||
};
|
||||
|
||||
if (toolCalls.length > 0) {
|
||||
choice.message.tool_calls = toolCalls;
|
||||
}
|
||||
|
||||
const openaiResponse: OpenAI.Chat.ChatCompletion = {
|
||||
id: response.responseId || `chatcmpl-${Date.now()}`,
|
||||
object: 'chat.completion',
|
||||
created: response.createTime
|
||||
? Number(response.createTime)
|
||||
: Math.floor(Date.now() / 1000),
|
||||
model: this.model,
|
||||
choices: [choice],
|
||||
};
|
||||
|
||||
// Add usage metadata if available
|
||||
if (response.usageMetadata) {
|
||||
openaiResponse.usage = {
|
||||
prompt_tokens: response.usageMetadata.promptTokenCount || 0,
|
||||
completion_tokens: response.usageMetadata.candidatesTokenCount || 0,
|
||||
total_tokens: response.usageMetadata.totalTokenCount || 0,
|
||||
};
|
||||
|
||||
if (response.usageMetadata.cachedContentTokenCount) {
|
||||
openaiResponse.usage.prompt_tokens_details = {
|
||||
cached_tokens: response.usageMetadata.cachedContentTokenCount,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return openaiResponse;
|
||||
}
|
||||
|
||||
/**
|
||||
* Map OpenAI finish reasons to Gemini finish reasons
|
||||
*/
|
||||
@@ -931,29 +932,24 @@ export class OpenAIContentConverter {
|
||||
return mapping[openaiReason] || FinishReason.FINISH_REASON_UNSPECIFIED;
|
||||
}
|
||||
|
||||
/**
|
||||
* Map Gemini finish reasons to OpenAI finish reasons
|
||||
*/
|
||||
private mapGeminiFinishReasonToOpenAI(geminiReason?: unknown): string {
|
||||
if (!geminiReason) return 'stop';
|
||||
private mapGeminiFinishReasonToOpenAI(
|
||||
geminiReason?: FinishReason,
|
||||
): 'stop' | 'length' | 'tool_calls' | 'content_filter' | 'function_call' {
|
||||
if (!geminiReason) {
|
||||
return 'stop';
|
||||
}
|
||||
|
||||
switch (geminiReason) {
|
||||
case 'STOP':
|
||||
case 1: // FinishReason.STOP
|
||||
case FinishReason.STOP:
|
||||
return 'stop';
|
||||
case 'MAX_TOKENS':
|
||||
case 2: // FinishReason.MAX_TOKENS
|
||||
case FinishReason.MAX_TOKENS:
|
||||
return 'length';
|
||||
case 'SAFETY':
|
||||
case 3: // FinishReason.SAFETY
|
||||
case FinishReason.SAFETY:
|
||||
return 'content_filter';
|
||||
case 'RECITATION':
|
||||
case 4: // FinishReason.RECITATION
|
||||
return 'content_filter';
|
||||
case 'OTHER':
|
||||
case 5: // FinishReason.OTHER
|
||||
return 'stop';
|
||||
default:
|
||||
if (geminiReason === ('RECITATION' as FinishReason)) {
|
||||
return 'content_filter';
|
||||
}
|
||||
return 'stop';
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import type { GenerateContentParameters } from '@google/genai';
|
||||
import { EnhancedErrorHandler } from './errorHandler.js';
|
||||
import type { RequestContext } from './telemetryService.js';
|
||||
import type { RequestContext } from './errorHandler.js';
|
||||
|
||||
describe('EnhancedErrorHandler', () => {
|
||||
let errorHandler: EnhancedErrorHandler;
|
||||
|
||||
@@ -5,7 +5,15 @@
|
||||
*/
|
||||
|
||||
import type { GenerateContentParameters } from '@google/genai';
|
||||
import type { RequestContext } from './telemetryService.js';
|
||||
|
||||
export interface RequestContext {
|
||||
userPromptId: string;
|
||||
model: string;
|
||||
authType: string;
|
||||
startTime: number;
|
||||
duration: number;
|
||||
isStreaming: boolean;
|
||||
}
|
||||
|
||||
export interface ErrorHandler {
|
||||
handle(
|
||||
|
||||
@@ -91,11 +91,4 @@ export function determineProvider(
|
||||
return new DefaultOpenAICompatibleProvider(contentGeneratorConfig, cliConfig);
|
||||
}
|
||||
|
||||
// Services
|
||||
export {
|
||||
type TelemetryService,
|
||||
type RequestContext,
|
||||
DefaultTelemetryService,
|
||||
} from './telemetryService.js';
|
||||
|
||||
export { type ErrorHandler, EnhancedErrorHandler } from './errorHandler.js';
|
||||
|
||||
@@ -11,7 +11,6 @@ import type {
|
||||
} from '@google/genai';
|
||||
import type { PipelineConfig } from './pipeline.js';
|
||||
import { ContentGenerationPipeline } from './pipeline.js';
|
||||
import { DefaultTelemetryService } from './telemetryService.js';
|
||||
import { EnhancedErrorHandler } from './errorHandler.js';
|
||||
import { getDefaultTokenizer } from '../../utils/request-tokenizer/index.js';
|
||||
import type { ContentGeneratorConfig } from '../contentGenerator.js';
|
||||
@@ -29,11 +28,6 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
cliConfig,
|
||||
provider,
|
||||
contentGeneratorConfig,
|
||||
telemetryService: new DefaultTelemetryService(
|
||||
cliConfig,
|
||||
contentGeneratorConfig.enableOpenAILogging,
|
||||
contentGeneratorConfig.openAILoggingDir,
|
||||
),
|
||||
errorHandler: new EnhancedErrorHandler(
|
||||
(error: unknown, request: GenerateContentParameters) =>
|
||||
this.shouldSuppressErrorLogging(error, request),
|
||||
|
||||
@@ -15,7 +15,6 @@ import { OpenAIContentConverter } from './converter.js';
|
||||
import type { Config } from '../../config/config.js';
|
||||
import type { ContentGeneratorConfig, AuthType } from '../contentGenerator.js';
|
||||
import type { OpenAICompatibleProvider } from './provider/index.js';
|
||||
import type { TelemetryService } from './telemetryService.js';
|
||||
import type { ErrorHandler } from './errorHandler.js';
|
||||
|
||||
// Mock dependencies
|
||||
@@ -28,7 +27,6 @@ describe('ContentGenerationPipeline', () => {
|
||||
let mockProvider: OpenAICompatibleProvider;
|
||||
let mockClient: OpenAI;
|
||||
let mockConverter: OpenAIContentConverter;
|
||||
let mockTelemetryService: TelemetryService;
|
||||
let mockErrorHandler: ErrorHandler;
|
||||
let mockContentGeneratorConfig: ContentGeneratorConfig;
|
||||
let mockCliConfig: Config;
|
||||
@@ -63,13 +61,6 @@ describe('ContentGenerationPipeline', () => {
|
||||
getDefaultGenerationConfig: vi.fn().mockReturnValue({}),
|
||||
};
|
||||
|
||||
// Mock telemetry service
|
||||
mockTelemetryService = {
|
||||
logSuccess: vi.fn().mockResolvedValue(undefined),
|
||||
logError: vi.fn().mockResolvedValue(undefined),
|
||||
logStreamingSuccess: vi.fn().mockResolvedValue(undefined),
|
||||
};
|
||||
|
||||
// Mock error handler
|
||||
mockErrorHandler = {
|
||||
handle: vi.fn().mockImplementation((error: unknown) => {
|
||||
@@ -99,7 +90,6 @@ describe('ContentGenerationPipeline', () => {
|
||||
cliConfig: mockCliConfig,
|
||||
provider: mockProvider,
|
||||
contentGeneratorConfig: mockContentGeneratorConfig,
|
||||
telemetryService: mockTelemetryService,
|
||||
errorHandler: mockErrorHandler,
|
||||
};
|
||||
|
||||
@@ -172,17 +162,6 @@ describe('ContentGenerationPipeline', () => {
|
||||
expect(mockConverter.convertOpenAIResponseToGemini).toHaveBeenCalledWith(
|
||||
mockOpenAIResponse,
|
||||
);
|
||||
expect(mockTelemetryService.logSuccess).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
userPromptId,
|
||||
model: 'test-model',
|
||||
authType: 'openai',
|
||||
isStreaming: false,
|
||||
}),
|
||||
mockGeminiResponse,
|
||||
expect.any(Object),
|
||||
mockOpenAIResponse,
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle tools in request', async () => {
|
||||
@@ -268,16 +247,6 @@ describe('ContentGenerationPipeline', () => {
|
||||
'API Error',
|
||||
);
|
||||
|
||||
expect(mockTelemetryService.logError).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
userPromptId,
|
||||
model: 'test-model',
|
||||
authType: 'openai',
|
||||
isStreaming: false,
|
||||
}),
|
||||
testError,
|
||||
expect.any(Object),
|
||||
);
|
||||
expect(mockErrorHandler.handle).toHaveBeenCalledWith(
|
||||
testError,
|
||||
expect.any(Object),
|
||||
@@ -376,17 +345,6 @@ describe('ContentGenerationPipeline', () => {
|
||||
signal: undefined,
|
||||
}),
|
||||
);
|
||||
expect(mockTelemetryService.logStreamingSuccess).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
userPromptId,
|
||||
model: 'test-model',
|
||||
authType: 'openai',
|
||||
isStreaming: true,
|
||||
}),
|
||||
[mockGeminiResponse1, mockGeminiResponse2],
|
||||
expect.any(Object),
|
||||
[mockChunk1, mockChunk2],
|
||||
);
|
||||
});
|
||||
|
||||
it('should filter empty responses', async () => {
|
||||
@@ -490,16 +448,6 @@ describe('ContentGenerationPipeline', () => {
|
||||
|
||||
expect(results).toHaveLength(0); // No results due to error
|
||||
expect(mockConverter.resetStreamingToolCalls).toHaveBeenCalledTimes(2); // Once at start, once on error
|
||||
expect(mockTelemetryService.logError).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
userPromptId,
|
||||
model: 'test-model',
|
||||
authType: 'openai',
|
||||
isStreaming: true,
|
||||
}),
|
||||
testError,
|
||||
expect.any(Object),
|
||||
);
|
||||
expect(mockErrorHandler.handle).toHaveBeenCalledWith(
|
||||
testError,
|
||||
expect.any(Object),
|
||||
@@ -650,18 +598,6 @@ describe('ContentGenerationPipeline', () => {
|
||||
candidatesTokenCount: 20,
|
||||
totalTokenCount: 30,
|
||||
});
|
||||
|
||||
expect(mockTelemetryService.logStreamingSuccess).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
userPromptId,
|
||||
model: 'test-model',
|
||||
authType: 'openai',
|
||||
isStreaming: true,
|
||||
}),
|
||||
results,
|
||||
expect.any(Object),
|
||||
[mockChunk1, mockChunk2, mockChunk3],
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle ideal case where last chunk has both finishReason and usageMetadata', async () => {
|
||||
@@ -853,18 +789,6 @@ describe('ContentGenerationPipeline', () => {
|
||||
candidatesTokenCount: 20,
|
||||
totalTokenCount: 30,
|
||||
});
|
||||
|
||||
expect(mockTelemetryService.logStreamingSuccess).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
userPromptId,
|
||||
model: 'test-model',
|
||||
authType: 'openai',
|
||||
isStreaming: true,
|
||||
}),
|
||||
results,
|
||||
expect.any(Object),
|
||||
[mockChunk1, mockChunk2, mockChunk3],
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle providers that send finishReason and valid usage in same chunk', async () => {
|
||||
@@ -1118,19 +1042,6 @@ describe('ContentGenerationPipeline', () => {
|
||||
await pipeline.execute(request, userPromptId);
|
||||
|
||||
// Assert
|
||||
expect(mockTelemetryService.logSuccess).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
userPromptId,
|
||||
model: 'test-model',
|
||||
authType: 'openai',
|
||||
isStreaming: false,
|
||||
startTime: expect.any(Number),
|
||||
duration: expect.any(Number),
|
||||
}),
|
||||
expect.any(Object),
|
||||
expect.any(Object),
|
||||
expect.any(Object),
|
||||
);
|
||||
});
|
||||
|
||||
it('should create context with correct properties for streaming request', async () => {
|
||||
@@ -1173,19 +1084,6 @@ describe('ContentGenerationPipeline', () => {
|
||||
}
|
||||
|
||||
// Assert
|
||||
expect(mockTelemetryService.logStreamingSuccess).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
userPromptId,
|
||||
model: 'test-model',
|
||||
authType: 'openai',
|
||||
isStreaming: true,
|
||||
startTime: expect.any(Number),
|
||||
duration: expect.any(Number),
|
||||
}),
|
||||
expect.any(Array),
|
||||
expect.any(Object),
|
||||
expect.any(Array),
|
||||
);
|
||||
});
|
||||
|
||||
it('should collect all OpenAI chunks for logging even when Gemini responses are filtered', async () => {
|
||||
@@ -1329,22 +1227,6 @@ describe('ContentGenerationPipeline', () => {
|
||||
// Should only yield the final response (empty ones are filtered)
|
||||
expect(responses).toHaveLength(1);
|
||||
expect(responses[0]).toBe(finalGeminiResponse);
|
||||
|
||||
// Verify telemetry was called with ALL OpenAI chunks, including the filtered ones
|
||||
expect(mockTelemetryService.logStreamingSuccess).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
model: 'test-model',
|
||||
duration: expect.any(Number),
|
||||
userPromptId: 'test-prompt-id',
|
||||
authType: 'openai',
|
||||
}),
|
||||
[finalGeminiResponse], // Only the non-empty Gemini response
|
||||
expect.objectContaining({
|
||||
model: 'test-model',
|
||||
messages: [{ role: 'user', content: 'test' }],
|
||||
}),
|
||||
[partialToolCallChunk1, partialToolCallChunk2, finishChunk], // ALL OpenAI chunks
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -13,14 +13,12 @@ import type { Config } from '../../config/config.js';
|
||||
import type { ContentGeneratorConfig } from '../contentGenerator.js';
|
||||
import type { OpenAICompatibleProvider } from './provider/index.js';
|
||||
import { OpenAIContentConverter } from './converter.js';
|
||||
import type { TelemetryService, RequestContext } from './telemetryService.js';
|
||||
import type { ErrorHandler } from './errorHandler.js';
|
||||
import type { ErrorHandler, RequestContext } from './errorHandler.js';
|
||||
|
||||
export interface PipelineConfig {
|
||||
cliConfig: Config;
|
||||
provider: OpenAICompatibleProvider;
|
||||
contentGeneratorConfig: ContentGeneratorConfig;
|
||||
telemetryService: TelemetryService;
|
||||
errorHandler: ErrorHandler;
|
||||
}
|
||||
|
||||
@@ -46,7 +44,7 @@ export class ContentGenerationPipeline {
|
||||
request,
|
||||
userPromptId,
|
||||
false,
|
||||
async (openaiRequest, context) => {
|
||||
async (openaiRequest) => {
|
||||
const openaiResponse = (await this.client.chat.completions.create(
|
||||
openaiRequest,
|
||||
{
|
||||
@@ -57,14 +55,6 @@ export class ContentGenerationPipeline {
|
||||
const geminiResponse =
|
||||
this.converter.convertOpenAIResponseToGemini(openaiResponse);
|
||||
|
||||
// Log success
|
||||
await this.config.telemetryService.logSuccess(
|
||||
context,
|
||||
geminiResponse,
|
||||
openaiRequest,
|
||||
openaiResponse,
|
||||
);
|
||||
|
||||
return geminiResponse;
|
||||
},
|
||||
);
|
||||
@@ -88,12 +78,7 @@ export class ContentGenerationPipeline {
|
||||
)) as AsyncIterable<OpenAI.Chat.ChatCompletionChunk>;
|
||||
|
||||
// Stage 2: Process stream with conversion and logging
|
||||
return this.processStreamWithLogging(
|
||||
stream,
|
||||
context,
|
||||
openaiRequest,
|
||||
request,
|
||||
);
|
||||
return this.processStreamWithLogging(stream, context, request);
|
||||
},
|
||||
);
|
||||
}
|
||||
@@ -110,11 +95,9 @@ export class ContentGenerationPipeline {
|
||||
private async *processStreamWithLogging(
|
||||
stream: AsyncIterable<OpenAI.Chat.ChatCompletionChunk>,
|
||||
context: RequestContext,
|
||||
openaiRequest: OpenAI.Chat.ChatCompletionCreateParams,
|
||||
request: GenerateContentParameters,
|
||||
): AsyncGenerator<GenerateContentResponse> {
|
||||
const collectedGeminiResponses: GenerateContentResponse[] = [];
|
||||
const collectedOpenAIChunks: OpenAI.Chat.ChatCompletionChunk[] = [];
|
||||
|
||||
// Reset streaming tool calls to prevent data pollution from previous streams
|
||||
this.converter.resetStreamingToolCalls();
|
||||
@@ -125,9 +108,6 @@ export class ContentGenerationPipeline {
|
||||
try {
|
||||
// Stage 2a: Convert and yield each chunk while preserving original
|
||||
for await (const chunk of stream) {
|
||||
// Always collect OpenAI chunks for logging, regardless of Gemini conversion result
|
||||
collectedOpenAIChunks.push(chunk);
|
||||
|
||||
const response = this.converter.convertOpenAIChunkToGemini(chunk);
|
||||
|
||||
// Stage 2b: Filter empty responses to avoid downstream issues
|
||||
@@ -164,15 +144,8 @@ export class ContentGenerationPipeline {
|
||||
yield pendingFinishResponse;
|
||||
}
|
||||
|
||||
// Stage 2e: Stream completed successfully - perform logging with original OpenAI chunks
|
||||
// Stage 2e: Stream completed successfully
|
||||
context.duration = Date.now() - context.startTime;
|
||||
|
||||
await this.config.telemetryService.logStreamingSuccess(
|
||||
context,
|
||||
collectedGeminiResponses,
|
||||
openaiRequest,
|
||||
collectedOpenAIChunks,
|
||||
);
|
||||
} catch (error) {
|
||||
// Clear streaming tool calls on error to prevent data pollution
|
||||
this.converter.resetStreamingToolCalls();
|
||||
@@ -258,7 +231,7 @@ export class ContentGenerationPipeline {
|
||||
const baseRequest: OpenAI.Chat.ChatCompletionCreateParams = {
|
||||
model: this.contentGeneratorConfig.model,
|
||||
messages,
|
||||
...this.buildSamplingParameters(request),
|
||||
...this.buildGenerateContentConfig(request),
|
||||
};
|
||||
|
||||
// Add streaming options if present
|
||||
@@ -280,7 +253,7 @@ export class ContentGenerationPipeline {
|
||||
return this.config.provider.buildRequest(baseRequest, userPromptId);
|
||||
}
|
||||
|
||||
private buildSamplingParameters(
|
||||
private buildGenerateContentConfig(
|
||||
request: GenerateContentParameters,
|
||||
): Record<string, unknown> {
|
||||
const defaultSamplingParams =
|
||||
@@ -316,7 +289,7 @@ export class ContentGenerationPipeline {
|
||||
return value !== undefined ? { [key]: value } : {};
|
||||
};
|
||||
|
||||
const params = {
|
||||
const params: Record<string, unknown> = {
|
||||
// Parameters with request fallback but no defaults
|
||||
...addParameterIfDefined('temperature', 'temperature', 'temperature'),
|
||||
...addParameterIfDefined('top_p', 'top_p', 'topP'),
|
||||
@@ -337,11 +310,24 @@ export class ContentGenerationPipeline {
|
||||
'frequency_penalty',
|
||||
'frequencyPenalty',
|
||||
),
|
||||
...this.buildReasoningConfig(),
|
||||
};
|
||||
|
||||
return params;
|
||||
}
|
||||
|
||||
private buildReasoningConfig(): Record<string, unknown> {
|
||||
const reasoning = this.contentGeneratorConfig.reasoning;
|
||||
|
||||
if (reasoning === false) {
|
||||
return {};
|
||||
}
|
||||
|
||||
return {
|
||||
reasoning_effort: reasoning?.effort ?? 'medium',
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Common error handling wrapper for execute methods
|
||||
*/
|
||||
@@ -369,13 +355,7 @@ export class ContentGenerationPipeline {
|
||||
return result;
|
||||
} catch (error) {
|
||||
// Use shared error handling logic
|
||||
return await this.handleError(
|
||||
error,
|
||||
context,
|
||||
request,
|
||||
userPromptId,
|
||||
isStreaming,
|
||||
);
|
||||
return await this.handleError(error, context, request);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -387,37 +367,8 @@ export class ContentGenerationPipeline {
|
||||
error: unknown,
|
||||
context: RequestContext,
|
||||
request: GenerateContentParameters,
|
||||
userPromptId?: string,
|
||||
isStreaming?: boolean,
|
||||
): Promise<never> {
|
||||
context.duration = Date.now() - context.startTime;
|
||||
|
||||
// Build request for logging (may fail, but we still want to log the error)
|
||||
let openaiRequest: OpenAI.Chat.ChatCompletionCreateParams;
|
||||
try {
|
||||
if (userPromptId !== undefined && isStreaming !== undefined) {
|
||||
openaiRequest = await this.buildRequest(
|
||||
request,
|
||||
userPromptId,
|
||||
isStreaming,
|
||||
);
|
||||
} else {
|
||||
// For processStreamWithLogging, we don't have userPromptId/isStreaming,
|
||||
// so create a minimal request
|
||||
openaiRequest = {
|
||||
model: this.contentGeneratorConfig.model,
|
||||
messages: [],
|
||||
};
|
||||
}
|
||||
} catch (_buildError) {
|
||||
// If we can't build the request, create a minimal one for logging
|
||||
openaiRequest = {
|
||||
model: this.contentGeneratorConfig.model,
|
||||
messages: [],
|
||||
};
|
||||
}
|
||||
|
||||
await this.config.telemetryService.logError(context, error, openaiRequest);
|
||||
this.config.errorHandler.handle(error, context, request);
|
||||
}
|
||||
|
||||
|
||||
@@ -39,7 +39,8 @@ export class DashScopeOpenAICompatibleProvider
|
||||
return (
|
||||
authType === AuthType.QWEN_OAUTH ||
|
||||
baseUrl === 'https://dashscope.aliyuncs.com/compatible-mode/v1' ||
|
||||
baseUrl === 'https://dashscope-intl.aliyuncs.com/compatible-mode/v1'
|
||||
baseUrl === 'https://dashscope-intl.aliyuncs.com/compatible-mode/v1' ||
|
||||
!baseUrl
|
||||
);
|
||||
}
|
||||
|
||||
@@ -144,9 +145,7 @@ export class DashScopeOpenAICompatibleProvider
|
||||
|
||||
getDefaultGenerationConfig(): GenerateContentConfig {
|
||||
return {
|
||||
temperature: 0.7,
|
||||
topP: 0.8,
|
||||
topK: 20,
|
||||
temperature: 0.3,
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,275 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type { Config } from '../../config/config.js';
|
||||
import { logApiError, logApiResponse } from '../../telemetry/loggers.js';
|
||||
import { ApiErrorEvent, ApiResponseEvent } from '../../telemetry/types.js';
|
||||
import { OpenAILogger } from '../../utils/openaiLogger.js';
|
||||
import type { GenerateContentResponse } from '@google/genai';
|
||||
import type OpenAI from 'openai';
|
||||
import type { ExtendedCompletionChunkDelta } from './converter.js';
|
||||
|
||||
export interface RequestContext {
|
||||
userPromptId: string;
|
||||
model: string;
|
||||
authType: string;
|
||||
startTime: number;
|
||||
duration: number;
|
||||
isStreaming: boolean;
|
||||
}
|
||||
|
||||
export interface TelemetryService {
|
||||
logSuccess(
|
||||
context: RequestContext,
|
||||
response: GenerateContentResponse,
|
||||
openaiRequest?: OpenAI.Chat.ChatCompletionCreateParams,
|
||||
openaiResponse?: OpenAI.Chat.ChatCompletion,
|
||||
): Promise<void>;
|
||||
|
||||
logError(
|
||||
context: RequestContext,
|
||||
error: unknown,
|
||||
openaiRequest?: OpenAI.Chat.ChatCompletionCreateParams,
|
||||
): Promise<void>;
|
||||
|
||||
logStreamingSuccess(
|
||||
context: RequestContext,
|
||||
responses: GenerateContentResponse[],
|
||||
openaiRequest?: OpenAI.Chat.ChatCompletionCreateParams,
|
||||
openaiChunks?: OpenAI.Chat.ChatCompletionChunk[],
|
||||
): Promise<void>;
|
||||
}
|
||||
|
||||
export class DefaultTelemetryService implements TelemetryService {
|
||||
private logger: OpenAILogger;
|
||||
|
||||
constructor(
|
||||
private config: Config,
|
||||
private enableOpenAILogging: boolean = false,
|
||||
openAILoggingDir?: string,
|
||||
) {
|
||||
// Always create a new logger instance to ensure correct working directory
|
||||
// If no custom directory is provided, undefined will use the default path
|
||||
this.logger = new OpenAILogger(openAILoggingDir);
|
||||
}
|
||||
|
||||
async logSuccess(
|
||||
context: RequestContext,
|
||||
response: GenerateContentResponse,
|
||||
openaiRequest?: OpenAI.Chat.ChatCompletionCreateParams,
|
||||
openaiResponse?: OpenAI.Chat.ChatCompletion,
|
||||
): Promise<void> {
|
||||
// Log API response event for UI telemetry
|
||||
const responseEvent = new ApiResponseEvent(
|
||||
response.responseId || 'unknown',
|
||||
context.model,
|
||||
context.duration,
|
||||
context.userPromptId,
|
||||
context.authType,
|
||||
response.usageMetadata,
|
||||
);
|
||||
|
||||
logApiResponse(this.config, responseEvent);
|
||||
|
||||
// Log interaction if enabled
|
||||
if (this.enableOpenAILogging && openaiRequest && openaiResponse) {
|
||||
await this.logger.logInteraction(openaiRequest, openaiResponse);
|
||||
}
|
||||
}
|
||||
|
||||
async logError(
|
||||
context: RequestContext,
|
||||
error: unknown,
|
||||
openaiRequest?: OpenAI.Chat.ChatCompletionCreateParams,
|
||||
): Promise<void> {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
|
||||
// Log API error event for UI telemetry
|
||||
const errorEvent = new ApiErrorEvent(
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
(error as any)?.requestID || 'unknown',
|
||||
context.model,
|
||||
errorMessage,
|
||||
context.duration,
|
||||
context.userPromptId,
|
||||
context.authType,
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
(error as any)?.type,
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
(error as any)?.code,
|
||||
);
|
||||
logApiError(this.config, errorEvent);
|
||||
|
||||
// Log error interaction if enabled
|
||||
if (this.enableOpenAILogging && openaiRequest) {
|
||||
await this.logger.logInteraction(
|
||||
openaiRequest,
|
||||
undefined,
|
||||
error as Error,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
async logStreamingSuccess(
|
||||
context: RequestContext,
|
||||
responses: GenerateContentResponse[],
|
||||
openaiRequest?: OpenAI.Chat.ChatCompletionCreateParams,
|
||||
openaiChunks?: OpenAI.Chat.ChatCompletionChunk[],
|
||||
): Promise<void> {
|
||||
// Get final usage metadata from the last response that has it
|
||||
const finalUsageMetadata = responses
|
||||
.slice()
|
||||
.reverse()
|
||||
.find((r) => r.usageMetadata)?.usageMetadata;
|
||||
|
||||
// Log API response event for UI telemetry
|
||||
const responseEvent = new ApiResponseEvent(
|
||||
responses[responses.length - 1]?.responseId || 'unknown',
|
||||
context.model,
|
||||
context.duration,
|
||||
context.userPromptId,
|
||||
context.authType,
|
||||
finalUsageMetadata,
|
||||
);
|
||||
|
||||
logApiResponse(this.config, responseEvent);
|
||||
|
||||
// Log interaction if enabled - combine chunks only when needed
|
||||
if (
|
||||
this.enableOpenAILogging &&
|
||||
openaiRequest &&
|
||||
openaiChunks &&
|
||||
openaiChunks.length > 0
|
||||
) {
|
||||
const combinedResponse = this.combineOpenAIChunksForLogging(openaiChunks);
|
||||
await this.logger.logInteraction(openaiRequest, combinedResponse);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Combine OpenAI chunks for logging purposes
|
||||
* This method consolidates all OpenAI stream chunks into a single ChatCompletion response
|
||||
* for telemetry and logging purposes, avoiding unnecessary format conversions
|
||||
*/
|
||||
private combineOpenAIChunksForLogging(
|
||||
chunks: OpenAI.Chat.ChatCompletionChunk[],
|
||||
): OpenAI.Chat.ChatCompletion {
|
||||
if (chunks.length === 0) {
|
||||
throw new Error('No chunks to combine');
|
||||
}
|
||||
|
||||
const firstChunk = chunks[0];
|
||||
|
||||
// Combine all content from chunks
|
||||
let combinedContent = '';
|
||||
const toolCalls: OpenAI.Chat.ChatCompletionMessageToolCall[] = [];
|
||||
let finishReason:
|
||||
| 'stop'
|
||||
| 'length'
|
||||
| 'tool_calls'
|
||||
| 'content_filter'
|
||||
| 'function_call'
|
||||
| null = null;
|
||||
let combinedReasoning = '';
|
||||
let usage:
|
||||
| {
|
||||
prompt_tokens: number;
|
||||
completion_tokens: number;
|
||||
total_tokens: number;
|
||||
}
|
||||
| undefined;
|
||||
|
||||
for (const chunk of chunks) {
|
||||
const choice = chunk.choices?.[0];
|
||||
if (choice) {
|
||||
// Combine reasoning content
|
||||
const reasoningContent = (choice.delta as ExtendedCompletionChunkDelta)
|
||||
?.reasoning_content;
|
||||
if (reasoningContent) {
|
||||
combinedReasoning += reasoningContent;
|
||||
}
|
||||
// Combine text content
|
||||
if (choice.delta?.content) {
|
||||
combinedContent += choice.delta.content;
|
||||
}
|
||||
|
||||
// Collect tool calls
|
||||
if (choice.delta?.tool_calls) {
|
||||
for (const toolCall of choice.delta.tool_calls) {
|
||||
if (toolCall.index !== undefined) {
|
||||
if (!toolCalls[toolCall.index]) {
|
||||
toolCalls[toolCall.index] = {
|
||||
id: toolCall.id || '',
|
||||
type: toolCall.type || 'function',
|
||||
function: { name: '', arguments: '' },
|
||||
};
|
||||
}
|
||||
|
||||
if (toolCall.function?.name) {
|
||||
toolCalls[toolCall.index].function.name +=
|
||||
toolCall.function.name;
|
||||
}
|
||||
if (toolCall.function?.arguments) {
|
||||
toolCalls[toolCall.index].function.arguments +=
|
||||
toolCall.function.arguments;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get finish reason from the last chunk
|
||||
if (choice.finish_reason) {
|
||||
finishReason = choice.finish_reason;
|
||||
}
|
||||
}
|
||||
|
||||
// Get usage from the last chunk that has it
|
||||
if (chunk.usage) {
|
||||
usage = chunk.usage;
|
||||
}
|
||||
}
|
||||
|
||||
// Create the combined ChatCompletion response
|
||||
const message: OpenAI.Chat.ChatCompletionMessage = {
|
||||
role: 'assistant',
|
||||
content: combinedContent || null,
|
||||
refusal: null,
|
||||
};
|
||||
if (combinedReasoning) {
|
||||
// Attach reasoning content if any thought tokens were streamed
|
||||
(message as { reasoning_content?: string }).reasoning_content =
|
||||
combinedReasoning;
|
||||
}
|
||||
|
||||
// Add tool calls if any
|
||||
if (toolCalls.length > 0) {
|
||||
message.tool_calls = toolCalls.filter((tc) => tc.id); // Filter out empty tool calls
|
||||
}
|
||||
|
||||
const combinedResponse: OpenAI.Chat.ChatCompletion = {
|
||||
id: firstChunk.id,
|
||||
object: 'chat.completion',
|
||||
created: firstChunk.created,
|
||||
model: firstChunk.model,
|
||||
choices: [
|
||||
{
|
||||
index: 0,
|
||||
message,
|
||||
finish_reason: finishReason || 'stop',
|
||||
logprobs: null,
|
||||
},
|
||||
],
|
||||
usage: usage || {
|
||||
prompt_tokens: 0,
|
||||
completion_tokens: 0,
|
||||
total_tokens: 0,
|
||||
},
|
||||
system_fingerprint: firstChunk.system_fingerprint,
|
||||
};
|
||||
|
||||
return combinedResponse;
|
||||
}
|
||||
}
|
||||
@@ -20,11 +20,26 @@ async function getProcessInfo(pid: number): Promise<{
|
||||
command: string;
|
||||
}> {
|
||||
// Only used for Unix systems (macOS and Linux)
|
||||
const { stdout } = await execAsync(`ps -p ${pid} -o ppid=,comm=`);
|
||||
const [ppidStr, ...commandParts] = stdout.trim().split(/\s+/);
|
||||
const parentPid = parseInt(ppidStr, 10);
|
||||
const command = commandParts.join(' ');
|
||||
return { parentPid, name: path.basename(command), command };
|
||||
try {
|
||||
const command = `ps -o ppid=,command= -p ${pid}`;
|
||||
const { stdout } = await execAsync(command);
|
||||
const trimmedStdout = stdout.trim();
|
||||
if (!trimmedStdout) {
|
||||
return { parentPid: 0, name: '', command: '' };
|
||||
}
|
||||
const parts = trimmedStdout.split(/\s+/);
|
||||
const ppidString = parts[0];
|
||||
const parentPid = parseInt(ppidString, 10);
|
||||
const fullCommand = trimmedStdout.substring(ppidString.length).trim();
|
||||
const processName = path.basename(fullCommand.split(' ')[0]);
|
||||
return {
|
||||
parentPid: isNaN(parentPid) ? 1 : parentPid,
|
||||
name: processName,
|
||||
command: fullCommand,
|
||||
};
|
||||
} catch (_e) {
|
||||
return { parentPid: 0, name: '', command: '' };
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Finds the IDE process info on Unix-like systems.
|
||||
|
||||
@@ -80,6 +80,9 @@ export * from './tools/tool-registry.js';
|
||||
// Export subagents (Phase 1)
|
||||
export * from './subagents/index.js';
|
||||
|
||||
// Export skills
|
||||
export * from './skills/index.js';
|
||||
|
||||
// Export prompt logic
|
||||
export * from './prompts/mcp-prompts.js';
|
||||
|
||||
@@ -101,6 +104,7 @@ export * from './tools/mcp-client-manager.js';
|
||||
export * from './tools/mcp-tool.js';
|
||||
export * from './tools/sdk-control-client-transport.js';
|
||||
export * from './tools/task.js';
|
||||
export * from './tools/skill.js';
|
||||
export * from './tools/todoWrite.js';
|
||||
export * from './tools/exitPlanMode.js';
|
||||
|
||||
|
||||
27
packages/core/src/mcp/constants.ts
Normal file
27
packages/core/src/mcp/constants.ts
Normal file
@@ -0,0 +1,27 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/**
|
||||
* OAuth client name used for MCP dynamic client registration.
|
||||
* This name must match the allowlist on MCP servers like Figma.
|
||||
*/
|
||||
export const MCP_OAUTH_CLIENT_NAME = 'Gemini CLI MCP Client';
|
||||
|
||||
/**
|
||||
* OAuth client name for service account impersonation provider.
|
||||
*/
|
||||
export const MCP_SA_IMPERSONATION_CLIENT_NAME =
|
||||
'Gemini CLI (Service Account Impersonation)';
|
||||
|
||||
/**
|
||||
* Port for OAuth redirect callback server.
|
||||
*/
|
||||
export const OAUTH_REDIRECT_PORT = 7777;
|
||||
|
||||
/**
|
||||
* Path for OAuth redirect callback.
|
||||
*/
|
||||
export const OAUTH_REDIRECT_PATH = '/oauth/callback';
|
||||
@@ -13,6 +13,7 @@ import type {
|
||||
} from '@modelcontextprotocol/sdk/shared/auth.js';
|
||||
import { GoogleAuth } from 'google-auth-library';
|
||||
import type { MCPServerConfig } from '../config/config.js';
|
||||
import { MCP_OAUTH_CLIENT_NAME } from './constants.js';
|
||||
|
||||
const ALLOWED_HOSTS = [/^.+\.googleapis\.com$/, /^(.*\.)?luci\.app$/];
|
||||
|
||||
@@ -22,7 +23,7 @@ export class GoogleCredentialProvider implements OAuthClientProvider {
|
||||
// Properties required by OAuthClientProvider, with no-op values
|
||||
readonly redirectUrl = '';
|
||||
readonly clientMetadata: OAuthClientMetadata = {
|
||||
client_name: 'Gemini CLI (Google ADC)',
|
||||
client_name: MCP_OAUTH_CLIENT_NAME,
|
||||
redirect_uris: [],
|
||||
grant_types: [],
|
||||
response_types: [],
|
||||
|
||||
@@ -13,6 +13,11 @@ import type { OAuthToken } from './token-storage/types.js';
|
||||
import { MCPOAuthTokenStorage } from './oauth-token-storage.js';
|
||||
import { getErrorMessage } from '../utils/errors.js';
|
||||
import { OAuthUtils } from './oauth-utils.js';
|
||||
import {
|
||||
MCP_OAUTH_CLIENT_NAME,
|
||||
OAUTH_REDIRECT_PORT,
|
||||
OAUTH_REDIRECT_PATH,
|
||||
} from './constants.js';
|
||||
|
||||
export const OAUTH_DISPLAY_MESSAGE_EVENT = 'oauth-display-message' as const;
|
||||
|
||||
@@ -89,8 +94,6 @@ interface PKCEParams {
|
||||
state: string;
|
||||
}
|
||||
|
||||
const REDIRECT_PORT = 7777;
|
||||
const REDIRECT_PATH = '/oauth/callback';
|
||||
const HTTP_OK = 200;
|
||||
|
||||
/**
|
||||
@@ -115,10 +118,11 @@ export class MCPOAuthProvider {
|
||||
config: MCPOAuthConfig,
|
||||
): Promise<OAuthClientRegistrationResponse> {
|
||||
const redirectUri =
|
||||
config.redirectUri || `http://localhost:${REDIRECT_PORT}${REDIRECT_PATH}`;
|
||||
config.redirectUri ||
|
||||
`http://localhost:${OAUTH_REDIRECT_PORT}${OAUTH_REDIRECT_PATH}`;
|
||||
|
||||
const registrationRequest: OAuthClientRegistrationRequest = {
|
||||
client_name: 'Gemini CLI (Google ADC)',
|
||||
client_name: MCP_OAUTH_CLIENT_NAME,
|
||||
redirect_uris: [redirectUri],
|
||||
grant_types: ['authorization_code', 'refresh_token'],
|
||||
response_types: ['code'],
|
||||
@@ -192,9 +196,12 @@ export class MCPOAuthProvider {
|
||||
const server = http.createServer(
|
||||
async (req: http.IncomingMessage, res: http.ServerResponse) => {
|
||||
try {
|
||||
const url = new URL(req.url!, `http://localhost:${REDIRECT_PORT}`);
|
||||
const url = new URL(
|
||||
req.url!,
|
||||
`http://localhost:${OAUTH_REDIRECT_PORT}`,
|
||||
);
|
||||
|
||||
if (url.pathname !== REDIRECT_PATH) {
|
||||
if (url.pathname !== OAUTH_REDIRECT_PATH) {
|
||||
res.writeHead(404);
|
||||
res.end('Not found');
|
||||
return;
|
||||
@@ -257,8 +264,10 @@ export class MCPOAuthProvider {
|
||||
);
|
||||
|
||||
server.on('error', reject);
|
||||
server.listen(REDIRECT_PORT, () => {
|
||||
console.log(`OAuth callback server listening on port ${REDIRECT_PORT}`);
|
||||
server.listen(OAUTH_REDIRECT_PORT, () => {
|
||||
console.log(
|
||||
`OAuth callback server listening on port ${OAUTH_REDIRECT_PORT}`,
|
||||
);
|
||||
});
|
||||
|
||||
// Timeout after 5 minutes
|
||||
@@ -286,7 +295,8 @@ export class MCPOAuthProvider {
|
||||
mcpServerUrl?: string,
|
||||
): string {
|
||||
const redirectUri =
|
||||
config.redirectUri || `http://localhost:${REDIRECT_PORT}${REDIRECT_PATH}`;
|
||||
config.redirectUri ||
|
||||
`http://localhost:${OAUTH_REDIRECT_PORT}${OAUTH_REDIRECT_PATH}`;
|
||||
|
||||
const params = new URLSearchParams({
|
||||
client_id: config.clientId!,
|
||||
@@ -343,7 +353,8 @@ export class MCPOAuthProvider {
|
||||
mcpServerUrl?: string,
|
||||
): Promise<OAuthTokenResponse> {
|
||||
const redirectUri =
|
||||
config.redirectUri || `http://localhost:${REDIRECT_PORT}${REDIRECT_PATH}`;
|
||||
config.redirectUri ||
|
||||
`http://localhost:${OAUTH_REDIRECT_PORT}${OAUTH_REDIRECT_PATH}`;
|
||||
|
||||
const params = new URLSearchParams({
|
||||
grant_type: 'authorization_code',
|
||||
|
||||
@@ -13,6 +13,7 @@ import type {
|
||||
import { GoogleAuth } from 'google-auth-library';
|
||||
import type { MCPServerConfig } from '../config/config.js';
|
||||
import type { OAuthClientProvider } from '@modelcontextprotocol/sdk/client/auth.js';
|
||||
import { MCP_SA_IMPERSONATION_CLIENT_NAME } from './constants.js';
|
||||
|
||||
const fiveMinBufferMs = 5 * 60 * 1000;
|
||||
|
||||
@@ -32,7 +33,7 @@ export class ServiceAccountImpersonationProvider
|
||||
// Properties required by OAuthClientProvider, with no-op values
|
||||
readonly redirectUrl = '';
|
||||
readonly clientMetadata: OAuthClientMetadata = {
|
||||
client_name: 'Gemini CLI (Service Account Impersonation)',
|
||||
client_name: MCP_SA_IMPERSONATION_CLIENT_NAME,
|
||||
redirect_uris: [],
|
||||
grant_types: [],
|
||||
response_types: [],
|
||||
|
||||
31
packages/core/src/skills/index.ts
Normal file
31
packages/core/src/skills/index.ts
Normal file
@@ -0,0 +1,31 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/**
|
||||
* @fileoverview Skills feature implementation
|
||||
*
|
||||
* This module provides the foundation for the skills feature, which allows
|
||||
* users to define reusable skill configurations that can be loaded by the
|
||||
* model via a dedicated Skills tool.
|
||||
*
|
||||
* Skills are stored as directories in `.qwen/skills/` (project-level) or
|
||||
* `~/.qwen/skills/` (user-level), with each directory containing a SKILL.md
|
||||
* file with YAML frontmatter for metadata.
|
||||
*/
|
||||
|
||||
// Core types and interfaces
|
||||
export type {
|
||||
SkillConfig,
|
||||
SkillLevel,
|
||||
SkillValidationResult,
|
||||
ListSkillsOptions,
|
||||
SkillErrorCode,
|
||||
} from './types.js';
|
||||
|
||||
export { SkillError } from './types.js';
|
||||
|
||||
// Main management class
|
||||
export { SkillManager } from './skill-manager.js';
|
||||
463
packages/core/src/skills/skill-manager.test.ts
Normal file
463
packages/core/src/skills/skill-manager.test.ts
Normal file
@@ -0,0 +1,463 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { vi, describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||
import * as fs from 'fs/promises';
|
||||
import * as path from 'path';
|
||||
import * as os from 'os';
|
||||
import { SkillManager } from './skill-manager.js';
|
||||
import { type SkillConfig, SkillError } from './types.js';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { makeFakeConfig } from '../test-utils/config.js';
|
||||
|
||||
// Mock file system operations
|
||||
vi.mock('fs/promises');
|
||||
vi.mock('os');
|
||||
|
||||
// Mock yaml parser - use vi.hoisted for proper hoisting
|
||||
const mockParseYaml = vi.hoisted(() => vi.fn());
|
||||
|
||||
vi.mock('../utils/yaml-parser.js', () => ({
|
||||
parse: mockParseYaml,
|
||||
stringify: vi.fn(),
|
||||
}));
|
||||
|
||||
describe('SkillManager', () => {
|
||||
let manager: SkillManager;
|
||||
let mockConfig: Config;
|
||||
|
||||
beforeEach(() => {
|
||||
// Create mock Config object using test utility
|
||||
mockConfig = makeFakeConfig({});
|
||||
|
||||
// Mock the project root method
|
||||
vi.spyOn(mockConfig, 'getProjectRoot').mockReturnValue('/test/project');
|
||||
|
||||
// Mock os.homedir
|
||||
vi.mocked(os.homedir).mockReturnValue('/home/user');
|
||||
|
||||
// Reset and setup mocks
|
||||
vi.clearAllMocks();
|
||||
|
||||
// Setup yaml parser mocks with sophisticated behavior
|
||||
mockParseYaml.mockImplementation((yamlString: string) => {
|
||||
// Handle different test cases based on YAML content
|
||||
if (yamlString.includes('allowedTools:')) {
|
||||
return {
|
||||
name: 'test-skill',
|
||||
description: 'A test skill',
|
||||
allowedTools: ['read_file', 'write_file'],
|
||||
};
|
||||
}
|
||||
if (yamlString.includes('name: skill1')) {
|
||||
return { name: 'skill1', description: 'First skill' };
|
||||
}
|
||||
if (yamlString.includes('name: skill2')) {
|
||||
return { name: 'skill2', description: 'Second skill' };
|
||||
}
|
||||
if (yamlString.includes('name: skill3')) {
|
||||
return { name: 'skill3', description: 'Third skill' };
|
||||
}
|
||||
if (!yamlString.includes('name:')) {
|
||||
return { description: 'A test skill' }; // Missing name case
|
||||
}
|
||||
if (!yamlString.includes('description:')) {
|
||||
return { name: 'test-skill' }; // Missing description case
|
||||
}
|
||||
// Default case
|
||||
return {
|
||||
name: 'test-skill',
|
||||
description: 'A test skill',
|
||||
};
|
||||
});
|
||||
|
||||
manager = new SkillManager(mockConfig);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
const validSkillConfig: SkillConfig = {
|
||||
name: 'test-skill',
|
||||
description: 'A test skill',
|
||||
level: 'project',
|
||||
filePath: '/test/project/.qwen/skills/test-skill/SKILL.md',
|
||||
body: 'You are a helpful assistant with this skill.',
|
||||
};
|
||||
|
||||
const validMarkdown = `---
|
||||
name: test-skill
|
||||
description: A test skill
|
||||
---
|
||||
|
||||
You are a helpful assistant with this skill.
|
||||
`;
|
||||
|
||||
describe('parseSkillContent', () => {
|
||||
it('should parse valid markdown content', () => {
|
||||
const config = manager.parseSkillContent(
|
||||
validMarkdown,
|
||||
validSkillConfig.filePath,
|
||||
'project',
|
||||
);
|
||||
|
||||
expect(config.name).toBe('test-skill');
|
||||
expect(config.description).toBe('A test skill');
|
||||
expect(config.body).toBe('You are a helpful assistant with this skill.');
|
||||
expect(config.level).toBe('project');
|
||||
expect(config.filePath).toBe(validSkillConfig.filePath);
|
||||
});
|
||||
|
||||
it('should parse content with allowedTools', () => {
|
||||
const markdownWithTools = `---
|
||||
name: test-skill
|
||||
description: A test skill
|
||||
allowedTools:
|
||||
- read_file
|
||||
- write_file
|
||||
---
|
||||
|
||||
You are a helpful assistant with this skill.
|
||||
`;
|
||||
|
||||
const config = manager.parseSkillContent(
|
||||
markdownWithTools,
|
||||
validSkillConfig.filePath,
|
||||
'project',
|
||||
);
|
||||
|
||||
expect(config.allowedTools).toEqual(['read_file', 'write_file']);
|
||||
});
|
||||
|
||||
it('should determine level from file path', () => {
|
||||
const projectPath = '/test/project/.qwen/skills/test-skill/SKILL.md';
|
||||
const userPath = '/home/user/.qwen/skills/test-skill/SKILL.md';
|
||||
|
||||
const projectConfig = manager.parseSkillContent(
|
||||
validMarkdown,
|
||||
projectPath,
|
||||
'project',
|
||||
);
|
||||
const userConfig = manager.parseSkillContent(
|
||||
validMarkdown,
|
||||
userPath,
|
||||
'user',
|
||||
);
|
||||
|
||||
expect(projectConfig.level).toBe('project');
|
||||
expect(userConfig.level).toBe('user');
|
||||
});
|
||||
|
||||
it('should throw error for invalid frontmatter format', () => {
|
||||
const invalidMarkdown = `No frontmatter here
|
||||
Just content`;
|
||||
|
||||
expect(() =>
|
||||
manager.parseSkillContent(
|
||||
invalidMarkdown,
|
||||
validSkillConfig.filePath,
|
||||
'project',
|
||||
),
|
||||
).toThrow(SkillError);
|
||||
});
|
||||
|
||||
it('should throw error for missing name', () => {
|
||||
const markdownWithoutName = `---
|
||||
description: A test skill
|
||||
---
|
||||
|
||||
You are a helpful assistant.
|
||||
`;
|
||||
|
||||
expect(() =>
|
||||
manager.parseSkillContent(
|
||||
markdownWithoutName,
|
||||
validSkillConfig.filePath,
|
||||
'project',
|
||||
),
|
||||
).toThrow(SkillError);
|
||||
});
|
||||
|
||||
it('should throw error for missing description', () => {
|
||||
const markdownWithoutDescription = `---
|
||||
name: test-skill
|
||||
---
|
||||
|
||||
You are a helpful assistant.
|
||||
`;
|
||||
|
||||
expect(() =>
|
||||
manager.parseSkillContent(
|
||||
markdownWithoutDescription,
|
||||
validSkillConfig.filePath,
|
||||
'project',
|
||||
),
|
||||
).toThrow(SkillError);
|
||||
});
|
||||
});
|
||||
|
||||
describe('validateConfig', () => {
|
||||
it('should validate valid configuration', () => {
|
||||
const result = manager.validateConfig(validSkillConfig);
|
||||
|
||||
expect(result.isValid).toBe(true);
|
||||
expect(result.errors).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should report error for missing name', () => {
|
||||
const invalidConfig = { ...validSkillConfig, name: '' };
|
||||
const result = manager.validateConfig(invalidConfig);
|
||||
|
||||
expect(result.isValid).toBe(false);
|
||||
expect(result.errors).toContain('"name" cannot be empty');
|
||||
});
|
||||
|
||||
it('should report error for missing description', () => {
|
||||
const invalidConfig = { ...validSkillConfig, description: '' };
|
||||
const result = manager.validateConfig(invalidConfig);
|
||||
|
||||
expect(result.isValid).toBe(false);
|
||||
expect(result.errors).toContain('"description" cannot be empty');
|
||||
});
|
||||
|
||||
it('should report error for invalid allowedTools type', () => {
|
||||
const invalidConfig = {
|
||||
...validSkillConfig,
|
||||
allowedTools: 'not-an-array' as unknown as string[],
|
||||
};
|
||||
const result = manager.validateConfig(invalidConfig);
|
||||
|
||||
expect(result.isValid).toBe(false);
|
||||
expect(result.errors).toContain('"allowedTools" must be an array');
|
||||
});
|
||||
|
||||
it('should warn for empty body', () => {
|
||||
const configWithEmptyBody = { ...validSkillConfig, body: '' };
|
||||
const result = manager.validateConfig(configWithEmptyBody);
|
||||
|
||||
expect(result.isValid).toBe(true); // Still valid
|
||||
expect(result.warnings).toContain('Skill body is empty');
|
||||
});
|
||||
});
|
||||
|
||||
describe('loadSkill', () => {
|
||||
it('should load skill from project level first', async () => {
|
||||
vi.mocked(fs.readdir).mockResolvedValue([
|
||||
{ name: 'test-skill', isDirectory: () => true, isFile: () => false },
|
||||
] as unknown as Awaited<ReturnType<typeof fs.readdir>>);
|
||||
vi.mocked(fs.access).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.readFile).mockResolvedValue(validMarkdown);
|
||||
|
||||
const config = await manager.loadSkill('test-skill');
|
||||
|
||||
expect(config).toBeDefined();
|
||||
expect(config!.name).toBe('test-skill');
|
||||
});
|
||||
|
||||
it('should fall back to user level if project level fails', async () => {
|
||||
vi.mocked(fs.readdir)
|
||||
.mockRejectedValueOnce(new Error('Project dir not found')) // project level fails
|
||||
.mockResolvedValueOnce([
|
||||
{ name: 'test-skill', isDirectory: () => true, isFile: () => false },
|
||||
] as unknown as Awaited<ReturnType<typeof fs.readdir>>); // user level succeeds
|
||||
vi.mocked(fs.access).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.readFile).mockResolvedValue(validMarkdown);
|
||||
|
||||
const config = await manager.loadSkill('test-skill');
|
||||
|
||||
expect(config).toBeDefined();
|
||||
expect(config!.name).toBe('test-skill');
|
||||
});
|
||||
|
||||
it('should return null if not found at either level', async () => {
|
||||
vi.mocked(fs.readdir).mockRejectedValue(new Error('Directory not found'));
|
||||
|
||||
const config = await manager.loadSkill('nonexistent');
|
||||
|
||||
expect(config).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe('loadSkillForRuntime', () => {
|
||||
it('should load skill for runtime', async () => {
|
||||
vi.mocked(fs.readdir).mockResolvedValueOnce([
|
||||
{ name: 'test-skill', isDirectory: () => true, isFile: () => false },
|
||||
] as unknown as Awaited<ReturnType<typeof fs.readdir>>);
|
||||
|
||||
vi.mocked(fs.access).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.readFile).mockResolvedValue(validMarkdown); // SKILL.md
|
||||
|
||||
const config = await manager.loadSkillForRuntime('test-skill');
|
||||
|
||||
expect(config).toBeDefined();
|
||||
expect(config!.name).toBe('test-skill');
|
||||
});
|
||||
|
||||
it('should return null if skill not found', async () => {
|
||||
vi.mocked(fs.readdir).mockRejectedValue(new Error('Directory not found'));
|
||||
|
||||
const config = await manager.loadSkillForRuntime('nonexistent');
|
||||
|
||||
expect(config).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe('listSkills', () => {
|
||||
beforeEach(() => {
|
||||
// Mock directory listing for skills directories (with Dirent objects)
|
||||
vi.mocked(fs.readdir)
|
||||
.mockResolvedValueOnce([
|
||||
{ name: 'skill1', isDirectory: () => true, isFile: () => false },
|
||||
{ name: 'skill2', isDirectory: () => true, isFile: () => false },
|
||||
{
|
||||
name: 'not-a-dir.txt',
|
||||
isDirectory: () => false,
|
||||
isFile: () => true,
|
||||
},
|
||||
] as unknown as Awaited<ReturnType<typeof fs.readdir>>)
|
||||
.mockResolvedValueOnce([
|
||||
{ name: 'skill3', isDirectory: () => true, isFile: () => false },
|
||||
{ name: 'skill1', isDirectory: () => true, isFile: () => false },
|
||||
] as unknown as Awaited<ReturnType<typeof fs.readdir>>);
|
||||
|
||||
vi.mocked(fs.access).mockResolvedValue(undefined);
|
||||
|
||||
// Mock file reading for valid skills
|
||||
vi.mocked(fs.readFile).mockImplementation((filePath) => {
|
||||
const pathStr = String(filePath);
|
||||
if (pathStr.includes('skill1')) {
|
||||
return Promise.resolve(`---
|
||||
name: skill1
|
||||
description: First skill
|
||||
---
|
||||
Skill 1 content`);
|
||||
} else if (pathStr.includes('skill2')) {
|
||||
return Promise.resolve(`---
|
||||
name: skill2
|
||||
description: Second skill
|
||||
---
|
||||
Skill 2 content`);
|
||||
} else if (pathStr.includes('skill3')) {
|
||||
return Promise.resolve(`---
|
||||
name: skill3
|
||||
description: Third skill
|
||||
---
|
||||
Skill 3 content`);
|
||||
}
|
||||
return Promise.reject(new Error('File not found'));
|
||||
});
|
||||
});
|
||||
|
||||
it('should list skills from both levels', async () => {
|
||||
const skills = await manager.listSkills();
|
||||
|
||||
expect(skills).toHaveLength(3); // skill1 (project takes precedence), skill2, skill3
|
||||
expect(skills.map((s) => s.name).sort()).toEqual([
|
||||
'skill1',
|
||||
'skill2',
|
||||
'skill3',
|
||||
]);
|
||||
});
|
||||
|
||||
it('should prioritize project level over user level', async () => {
|
||||
const skills = await manager.listSkills();
|
||||
const skill1 = skills.find((s) => s.name === 'skill1');
|
||||
|
||||
expect(skill1!.level).toBe('project');
|
||||
});
|
||||
|
||||
it('should filter by level', async () => {
|
||||
const projectSkills = await manager.listSkills({
|
||||
level: 'project',
|
||||
});
|
||||
|
||||
expect(projectSkills).toHaveLength(2); // skill1, skill2
|
||||
expect(projectSkills.every((s) => s.level === 'project')).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle empty directories', async () => {
|
||||
vi.mocked(fs.readdir).mockReset();
|
||||
vi.mocked(fs.readdir).mockResolvedValue(
|
||||
[] as unknown as Awaited<ReturnType<typeof fs.readdir>>,
|
||||
);
|
||||
|
||||
const skills = await manager.listSkills({ force: true });
|
||||
|
||||
expect(skills).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should handle directory read errors', async () => {
|
||||
vi.mocked(fs.readdir).mockReset();
|
||||
vi.mocked(fs.readdir).mockRejectedValue(new Error('Directory not found'));
|
||||
|
||||
const skills = await manager.listSkills({ force: true });
|
||||
|
||||
expect(skills).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getSkillsBaseDir', () => {
|
||||
it('should return project-level base dir', () => {
|
||||
const baseDir = manager.getSkillsBaseDir('project');
|
||||
|
||||
expect(baseDir).toBe(path.join('/test/project', '.qwen', 'skills'));
|
||||
});
|
||||
|
||||
it('should return user-level base dir', () => {
|
||||
const baseDir = manager.getSkillsBaseDir('user');
|
||||
|
||||
expect(baseDir).toBe(path.join('/home/user', '.qwen', 'skills'));
|
||||
});
|
||||
});
|
||||
|
||||
describe('change listeners', () => {
|
||||
it('should notify listeners when cache is refreshed', async () => {
|
||||
const listener = vi.fn();
|
||||
manager.addChangeListener(listener);
|
||||
|
||||
vi.mocked(fs.readdir).mockResolvedValue(
|
||||
[] as unknown as Awaited<ReturnType<typeof fs.readdir>>,
|
||||
);
|
||||
|
||||
await manager.refreshCache();
|
||||
|
||||
expect(listener).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should remove listener when cleanup function is called', async () => {
|
||||
const listener = vi.fn();
|
||||
const removeListener = manager.addChangeListener(listener);
|
||||
|
||||
removeListener();
|
||||
|
||||
vi.mocked(fs.readdir).mockResolvedValue(
|
||||
[] as unknown as Awaited<ReturnType<typeof fs.readdir>>,
|
||||
);
|
||||
|
||||
await manager.refreshCache();
|
||||
|
||||
expect(listener).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe('parse errors', () => {
|
||||
it('should track parse errors', async () => {
|
||||
vi.mocked(fs.readdir).mockResolvedValue([
|
||||
{ name: 'bad-skill', isDirectory: () => true, isFile: () => false },
|
||||
] as unknown as Awaited<ReturnType<typeof fs.readdir>>);
|
||||
vi.mocked(fs.access).mockResolvedValue(undefined);
|
||||
vi.mocked(fs.readFile).mockResolvedValue(
|
||||
'invalid content without frontmatter',
|
||||
);
|
||||
|
||||
await manager.listSkills({ force: true });
|
||||
|
||||
const errors = manager.getParseErrors();
|
||||
expect(errors.size).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
});
|
||||
452
packages/core/src/skills/skill-manager.ts
Normal file
452
packages/core/src/skills/skill-manager.ts
Normal file
@@ -0,0 +1,452 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import * as fs from 'fs/promises';
|
||||
import * as path from 'path';
|
||||
import * as os from 'os';
|
||||
import { parse as parseYaml } from '../utils/yaml-parser.js';
|
||||
import type {
|
||||
SkillConfig,
|
||||
SkillLevel,
|
||||
ListSkillsOptions,
|
||||
SkillValidationResult,
|
||||
} from './types.js';
|
||||
import { SkillError, SkillErrorCode } from './types.js';
|
||||
import type { Config } from '../config/config.js';
|
||||
|
||||
const QWEN_CONFIG_DIR = '.qwen';
|
||||
const SKILLS_CONFIG_DIR = 'skills';
|
||||
const SKILL_MANIFEST_FILE = 'SKILL.md';
|
||||
|
||||
/**
|
||||
* Manages skill configurations stored as directories containing SKILL.md files.
|
||||
* Provides discovery, parsing, validation, and caching for skills.
|
||||
*/
|
||||
export class SkillManager {
|
||||
private skillsCache: Map<SkillLevel, SkillConfig[]> | null = null;
|
||||
private readonly changeListeners: Set<() => void> = new Set();
|
||||
private parseErrors: Map<string, SkillError> = new Map();
|
||||
|
||||
constructor(private readonly config: Config) {}
|
||||
|
||||
/**
|
||||
* Adds a listener that will be called when skills change.
|
||||
* @returns A function to remove the listener.
|
||||
*/
|
||||
addChangeListener(listener: () => void): () => void {
|
||||
this.changeListeners.add(listener);
|
||||
return () => {
|
||||
this.changeListeners.delete(listener);
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Notifies all registered change listeners.
|
||||
*/
|
||||
private notifyChangeListeners(): void {
|
||||
for (const listener of this.changeListeners) {
|
||||
try {
|
||||
listener();
|
||||
} catch (error) {
|
||||
console.warn('Skill change listener threw an error:', error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets any parse errors that occurred during skill loading.
|
||||
* @returns Map of skill paths to their parse errors.
|
||||
*/
|
||||
getParseErrors(): Map<string, SkillError> {
|
||||
return new Map(this.parseErrors);
|
||||
}
|
||||
|
||||
/**
|
||||
* Lists all available skills.
|
||||
*
|
||||
* @param options - Filtering options
|
||||
* @returns Array of skill configurations
|
||||
*/
|
||||
async listSkills(options: ListSkillsOptions = {}): Promise<SkillConfig[]> {
|
||||
const skills: SkillConfig[] = [];
|
||||
const seenNames = new Set<string>();
|
||||
|
||||
const levelsToCheck: SkillLevel[] = options.level
|
||||
? [options.level]
|
||||
: ['project', 'user'];
|
||||
|
||||
// Check if we should use cache or force refresh
|
||||
const shouldUseCache = !options.force && this.skillsCache !== null;
|
||||
|
||||
// Initialize cache if it doesn't exist or we're forcing a refresh
|
||||
if (!shouldUseCache) {
|
||||
await this.refreshCache();
|
||||
}
|
||||
|
||||
// Collect skills from each level (project takes precedence over user)
|
||||
for (const level of levelsToCheck) {
|
||||
const levelSkills = this.skillsCache?.get(level) || [];
|
||||
|
||||
for (const skill of levelSkills) {
|
||||
// Skip if we've already seen this name (precedence: project > user)
|
||||
if (seenNames.has(skill.name)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
skills.push(skill);
|
||||
seenNames.add(skill.name);
|
||||
}
|
||||
}
|
||||
|
||||
// Sort by name for consistent ordering
|
||||
skills.sort((a, b) => a.name.localeCompare(b.name));
|
||||
|
||||
return skills;
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads a skill configuration by name.
|
||||
* If level is specified, only searches that level.
|
||||
* If level is omitted, searches project-level first, then user-level.
|
||||
*
|
||||
* @param name - Name of the skill to load
|
||||
* @param level - Optional level to limit search to
|
||||
* @returns SkillConfig or null if not found
|
||||
*/
|
||||
async loadSkill(
|
||||
name: string,
|
||||
level?: SkillLevel,
|
||||
): Promise<SkillConfig | null> {
|
||||
if (level) {
|
||||
return this.findSkillByNameAtLevel(name, level);
|
||||
}
|
||||
|
||||
// Try project level first
|
||||
const projectSkill = await this.findSkillByNameAtLevel(name, 'project');
|
||||
if (projectSkill) {
|
||||
return projectSkill;
|
||||
}
|
||||
|
||||
// Try user level
|
||||
return this.findSkillByNameAtLevel(name, 'user');
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads a skill with its full content, ready for runtime use.
|
||||
* This includes loading additional files from the skill directory.
|
||||
*
|
||||
* @param name - Name of the skill to load
|
||||
* @param level - Optional level to limit search to
|
||||
* @returns SkillConfig or null if not found
|
||||
*/
|
||||
async loadSkillForRuntime(
|
||||
name: string,
|
||||
level?: SkillLevel,
|
||||
): Promise<SkillConfig | null> {
|
||||
const skill = await this.loadSkill(name, level);
|
||||
if (!skill) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return skill;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates a skill configuration.
|
||||
*
|
||||
* @param config - Configuration to validate
|
||||
* @returns Validation result
|
||||
*/
|
||||
validateConfig(config: Partial<SkillConfig>): SkillValidationResult {
|
||||
const errors: string[] = [];
|
||||
const warnings: string[] = [];
|
||||
|
||||
// Check required fields
|
||||
if (typeof config.name !== 'string') {
|
||||
errors.push('Missing or invalid "name" field');
|
||||
} else if (config.name.trim() === '') {
|
||||
errors.push('"name" cannot be empty');
|
||||
}
|
||||
|
||||
if (typeof config.description !== 'string') {
|
||||
errors.push('Missing or invalid "description" field');
|
||||
} else if (config.description.trim() === '') {
|
||||
errors.push('"description" cannot be empty');
|
||||
}
|
||||
|
||||
// Validate allowedTools if present
|
||||
if (config.allowedTools !== undefined) {
|
||||
if (!Array.isArray(config.allowedTools)) {
|
||||
errors.push('"allowedTools" must be an array');
|
||||
} else {
|
||||
for (const tool of config.allowedTools) {
|
||||
if (typeof tool !== 'string') {
|
||||
errors.push('"allowedTools" must contain only strings');
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Warn if body is empty
|
||||
if (!config.body || config.body.trim() === '') {
|
||||
warnings.push('Skill body is empty');
|
||||
}
|
||||
|
||||
return {
|
||||
isValid: errors.length === 0,
|
||||
errors,
|
||||
warnings,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Refreshes the skills cache by loading all skills from disk.
|
||||
*/
|
||||
async refreshCache(): Promise<void> {
|
||||
const skillsCache = new Map<SkillLevel, SkillConfig[]>();
|
||||
this.parseErrors.clear();
|
||||
|
||||
const levels: SkillLevel[] = ['project', 'user'];
|
||||
|
||||
for (const level of levels) {
|
||||
const levelSkills = await this.listSkillsAtLevel(level);
|
||||
skillsCache.set(level, levelSkills);
|
||||
}
|
||||
|
||||
this.skillsCache = skillsCache;
|
||||
this.notifyChangeListeners();
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses a SKILL.md file and returns the configuration.
|
||||
*
|
||||
* @param filePath - Path to the SKILL.md file
|
||||
* @param level - Storage level
|
||||
* @returns SkillConfig
|
||||
* @throws SkillError if parsing fails
|
||||
*/
|
||||
parseSkillFile(filePath: string, level: SkillLevel): Promise<SkillConfig> {
|
||||
return this.parseSkillFileInternal(filePath, level);
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal implementation of skill file parsing.
|
||||
*/
|
||||
private async parseSkillFileInternal(
|
||||
filePath: string,
|
||||
level: SkillLevel,
|
||||
): Promise<SkillConfig> {
|
||||
let content: string;
|
||||
|
||||
try {
|
||||
content = await fs.readFile(filePath, 'utf8');
|
||||
} catch (error) {
|
||||
const skillError = new SkillError(
|
||||
`Failed to read skill file: ${error instanceof Error ? error.message : 'Unknown error'}`,
|
||||
SkillErrorCode.FILE_ERROR,
|
||||
);
|
||||
this.parseErrors.set(filePath, skillError);
|
||||
throw skillError;
|
||||
}
|
||||
|
||||
return this.parseSkillContent(content, filePath, level);
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses skill content from a string.
|
||||
*
|
||||
* @param content - File content
|
||||
* @param filePath - File path for error reporting
|
||||
* @param level - Storage level
|
||||
* @returns SkillConfig
|
||||
* @throws SkillError if parsing fails
|
||||
*/
|
||||
parseSkillContent(
|
||||
content: string,
|
||||
filePath: string,
|
||||
level: SkillLevel,
|
||||
): SkillConfig {
|
||||
try {
|
||||
// Split frontmatter and content
|
||||
const frontmatterRegex = /^---\n([\s\S]*?)\n---\n([\s\S]*)$/;
|
||||
const match = content.match(frontmatterRegex);
|
||||
|
||||
if (!match) {
|
||||
throw new Error('Invalid format: missing YAML frontmatter');
|
||||
}
|
||||
|
||||
const [, frontmatterYaml, body] = match;
|
||||
|
||||
// Parse YAML frontmatter
|
||||
const frontmatter = parseYaml(frontmatterYaml) as Record<string, unknown>;
|
||||
|
||||
// Extract required fields
|
||||
const nameRaw = frontmatter['name'];
|
||||
const descriptionRaw = frontmatter['description'];
|
||||
|
||||
if (nameRaw == null || nameRaw === '') {
|
||||
throw new Error('Missing "name" in frontmatter');
|
||||
}
|
||||
|
||||
if (descriptionRaw == null || descriptionRaw === '') {
|
||||
throw new Error('Missing "description" in frontmatter');
|
||||
}
|
||||
|
||||
// Convert to strings
|
||||
const name = String(nameRaw);
|
||||
const description = String(descriptionRaw);
|
||||
|
||||
// Extract optional fields
|
||||
const allowedToolsRaw = frontmatter['allowedTools'] as
|
||||
| unknown[]
|
||||
| undefined;
|
||||
let allowedTools: string[] | undefined;
|
||||
|
||||
if (allowedToolsRaw !== undefined) {
|
||||
if (Array.isArray(allowedToolsRaw)) {
|
||||
allowedTools = allowedToolsRaw.map(String);
|
||||
} else {
|
||||
throw new Error('"allowedTools" must be an array');
|
||||
}
|
||||
}
|
||||
|
||||
const config: SkillConfig = {
|
||||
name,
|
||||
description,
|
||||
allowedTools,
|
||||
level,
|
||||
filePath,
|
||||
body: body.trim(),
|
||||
};
|
||||
|
||||
// Validate the parsed configuration
|
||||
const validation = this.validateConfig(config);
|
||||
if (!validation.isValid) {
|
||||
throw new Error(`Validation failed: ${validation.errors.join(', ')}`);
|
||||
}
|
||||
|
||||
return config;
|
||||
} catch (error) {
|
||||
const skillError = new SkillError(
|
||||
`Failed to parse skill file: ${error instanceof Error ? error.message : 'Unknown error'}`,
|
||||
SkillErrorCode.PARSE_ERROR,
|
||||
);
|
||||
this.parseErrors.set(filePath, skillError);
|
||||
throw skillError;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the base directory for skills at a specific level.
|
||||
*
|
||||
* @param level - Storage level
|
||||
* @returns Absolute directory path
|
||||
*/
|
||||
getSkillsBaseDir(level: SkillLevel): string {
|
||||
const baseDir =
|
||||
level === 'project'
|
||||
? path.join(
|
||||
this.config.getProjectRoot(),
|
||||
QWEN_CONFIG_DIR,
|
||||
SKILLS_CONFIG_DIR,
|
||||
)
|
||||
: path.join(os.homedir(), QWEN_CONFIG_DIR, SKILLS_CONFIG_DIR);
|
||||
|
||||
return baseDir;
|
||||
}
|
||||
|
||||
/**
|
||||
* Lists skills at a specific level.
|
||||
*
|
||||
* @param level - Storage level to scan
|
||||
* @returns Array of skill configurations
|
||||
*/
|
||||
private async listSkillsAtLevel(level: SkillLevel): Promise<SkillConfig[]> {
|
||||
const projectRoot = this.config.getProjectRoot();
|
||||
const homeDir = os.homedir();
|
||||
const isHomeDirectory = path.resolve(projectRoot) === path.resolve(homeDir);
|
||||
|
||||
// If project level is requested but project root is same as home directory,
|
||||
// return empty array to avoid conflicts between project and global skills
|
||||
if (level === 'project' && isHomeDirectory) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const baseDir = this.getSkillsBaseDir(level);
|
||||
|
||||
try {
|
||||
const entries = await fs.readdir(baseDir, { withFileTypes: true });
|
||||
const skills: SkillConfig[] = [];
|
||||
|
||||
for (const entry of entries) {
|
||||
// Only process directories (each skill is a directory)
|
||||
if (!entry.isDirectory()) continue;
|
||||
|
||||
const skillDir = path.join(baseDir, entry.name);
|
||||
const skillManifest = path.join(skillDir, SKILL_MANIFEST_FILE);
|
||||
|
||||
try {
|
||||
// Check if SKILL.md exists
|
||||
await fs.access(skillManifest);
|
||||
|
||||
const config = await this.parseSkillFileInternal(
|
||||
skillManifest,
|
||||
level,
|
||||
);
|
||||
skills.push(config);
|
||||
} catch (error) {
|
||||
// Skip directories without valid SKILL.md
|
||||
if (error instanceof SkillError) {
|
||||
// Parse error was already recorded
|
||||
console.warn(
|
||||
`Failed to parse skill at ${skillDir}: ${error.message}`,
|
||||
);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
return skills;
|
||||
} catch (_error) {
|
||||
// Directory doesn't exist or can't be read
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds a skill by name at a specific level.
|
||||
*
|
||||
* @param name - Name of the skill to find
|
||||
* @param level - Storage level to search
|
||||
* @returns SkillConfig or null if not found
|
||||
*/
|
||||
private async findSkillByNameAtLevel(
|
||||
name: string,
|
||||
level: SkillLevel,
|
||||
): Promise<SkillConfig | null> {
|
||||
await this.ensureLevelCache(level);
|
||||
|
||||
const levelSkills = this.skillsCache?.get(level) || [];
|
||||
|
||||
// Find the skill with matching name
|
||||
return levelSkills.find((skill) => skill.name === name) || null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensures the cache is populated for a specific level without loading other levels.
|
||||
*/
|
||||
private async ensureLevelCache(level: SkillLevel): Promise<void> {
|
||||
if (!this.skillsCache) {
|
||||
this.skillsCache = new Map<SkillLevel, SkillConfig[]>();
|
||||
}
|
||||
|
||||
if (!this.skillsCache.has(level)) {
|
||||
const levelSkills = await this.listSkillsAtLevel(level);
|
||||
this.skillsCache.set(level, levelSkills);
|
||||
}
|
||||
}
|
||||
}
|
||||
105
packages/core/src/skills/types.ts
Normal file
105
packages/core/src/skills/types.ts
Normal file
@@ -0,0 +1,105 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/**
|
||||
* Represents the storage level for a skill configuration.
|
||||
* - 'project': Stored in `.qwen/skills/` within the project directory
|
||||
* - 'user': Stored in `~/.qwen/skills/` in the user's home directory
|
||||
*/
|
||||
export type SkillLevel = 'project' | 'user';
|
||||
|
||||
/**
|
||||
* Core configuration for a skill as stored in SKILL.md files.
|
||||
* Each skill directory contains a SKILL.md file with YAML frontmatter
|
||||
* containing metadata, followed by markdown content describing the skill.
|
||||
*/
|
||||
export interface SkillConfig {
|
||||
/** Unique name identifier for the skill */
|
||||
name: string;
|
||||
|
||||
/** Human-readable description of what this skill provides */
|
||||
description: string;
|
||||
|
||||
/**
|
||||
* Optional list of tool names that this skill is allowed to use.
|
||||
* For v1, this is informational only (no gating).
|
||||
*/
|
||||
allowedTools?: string[];
|
||||
|
||||
/**
|
||||
* Storage level - determines where the configuration file is stored
|
||||
*/
|
||||
level: SkillLevel;
|
||||
|
||||
/**
|
||||
* Absolute path to the skill directory containing SKILL.md
|
||||
*/
|
||||
filePath: string;
|
||||
|
||||
/**
|
||||
* The markdown body content from SKILL.md (after the frontmatter)
|
||||
*/
|
||||
body: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Runtime configuration for a skill when it's being actively used.
|
||||
* Extends SkillConfig with additional runtime-specific fields.
|
||||
*/
|
||||
export type SkillRuntimeConfig = SkillConfig;
|
||||
|
||||
/**
|
||||
* Result of a validation operation on a skill configuration.
|
||||
*/
|
||||
export interface SkillValidationResult {
|
||||
/** Whether the configuration is valid */
|
||||
isValid: boolean;
|
||||
|
||||
/** Array of error messages if validation failed */
|
||||
errors: string[];
|
||||
|
||||
/** Array of warning messages (non-blocking issues) */
|
||||
warnings: string[];
|
||||
}
|
||||
|
||||
/**
|
||||
* Options for listing skills.
|
||||
*/
|
||||
export interface ListSkillsOptions {
|
||||
/** Filter by storage level */
|
||||
level?: SkillLevel;
|
||||
|
||||
/** Force refresh from disk, bypassing cache. Defaults to false. */
|
||||
force?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Error thrown when a skill operation fails.
|
||||
*/
|
||||
export class SkillError extends Error {
|
||||
constructor(
|
||||
message: string,
|
||||
readonly code: SkillErrorCode,
|
||||
readonly skillName?: string,
|
||||
) {
|
||||
super(message);
|
||||
this.name = 'SkillError';
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Error codes for skill operations.
|
||||
*/
|
||||
export const SkillErrorCode = {
|
||||
NOT_FOUND: 'NOT_FOUND',
|
||||
INVALID_CONFIG: 'INVALID_CONFIG',
|
||||
INVALID_NAME: 'INVALID_NAME',
|
||||
FILE_ERROR: 'FILE_ERROR',
|
||||
PARSE_ERROR: 'PARSE_ERROR',
|
||||
} as const;
|
||||
|
||||
export type SkillErrorCode =
|
||||
(typeof SkillErrorCode)[keyof typeof SkillErrorCode];
|
||||
@@ -33,6 +33,7 @@ export const EVENT_MALFORMED_JSON_RESPONSE =
|
||||
export const EVENT_FILE_OPERATION = 'qwen-code.file_operation';
|
||||
export const EVENT_MODEL_SLASH_COMMAND = 'qwen-code.slash_command.model';
|
||||
export const EVENT_SUBAGENT_EXECUTION = 'qwen-code.subagent_execution';
|
||||
export const EVENT_SKILL_LAUNCH = 'qwen-code.skill_launch';
|
||||
export const EVENT_AUTH = 'qwen-code.auth';
|
||||
|
||||
// Performance Events
|
||||
|
||||
@@ -44,6 +44,7 @@ export {
|
||||
logRipgrepFallback,
|
||||
logNextSpeakerCheck,
|
||||
logAuth,
|
||||
logSkillLaunch,
|
||||
} from './loggers.js';
|
||||
export type { SlashCommandEvent, ChatCompressionEvent } from './types.js';
|
||||
export {
|
||||
@@ -63,6 +64,7 @@ export {
|
||||
RipgrepFallbackEvent,
|
||||
NextSpeakerCheckEvent,
|
||||
AuthEvent,
|
||||
SkillLaunchEvent,
|
||||
} from './types.js';
|
||||
export { makeSlashCommandEvent, makeChatCompressionEvent } from './types.js';
|
||||
export type { TelemetryEvent } from './types.js';
|
||||
|
||||
@@ -200,6 +200,8 @@ describe('loggers', () => {
|
||||
mcp_tools: undefined,
|
||||
mcp_tools_count: undefined,
|
||||
output_format: 'json',
|
||||
skills: undefined,
|
||||
subagents: undefined,
|
||||
},
|
||||
});
|
||||
});
|
||||
@@ -262,7 +264,7 @@ describe('loggers', () => {
|
||||
'event.timestamp': '2025-01-01T00:00:00.000Z',
|
||||
prompt_length: 11,
|
||||
prompt_id: 'prompt-id-9',
|
||||
auth_type: 'gemini-api-key',
|
||||
auth_type: 'gemini',
|
||||
},
|
||||
});
|
||||
});
|
||||
@@ -331,7 +333,7 @@ describe('loggers', () => {
|
||||
total_token_count: 0,
|
||||
response_text: 'test-response',
|
||||
prompt_id: 'prompt-id-1',
|
||||
auth_type: 'gemini-api-key',
|
||||
auth_type: 'gemini',
|
||||
},
|
||||
});
|
||||
|
||||
|
||||
@@ -37,6 +37,7 @@ import {
|
||||
EVENT_MALFORMED_JSON_RESPONSE,
|
||||
EVENT_INVALID_CHUNK,
|
||||
EVENT_AUTH,
|
||||
EVENT_SKILL_LAUNCH,
|
||||
} from './constants.js';
|
||||
import {
|
||||
recordApiErrorMetrics,
|
||||
@@ -84,6 +85,7 @@ import type {
|
||||
MalformedJsonResponseEvent,
|
||||
InvalidChunkEvent,
|
||||
AuthEvent,
|
||||
SkillLaunchEvent,
|
||||
} from './types.js';
|
||||
import type { UiEvent } from './uiTelemetry.js';
|
||||
import { uiTelemetryService } from './uiTelemetry.js';
|
||||
@@ -123,6 +125,8 @@ export function logStartSession(
|
||||
mcp_tools: event.mcp_tools,
|
||||
mcp_tools_count: event.mcp_tools_count,
|
||||
output_format: event.output_format,
|
||||
skills: event.skills,
|
||||
subagents: event.subagents,
|
||||
};
|
||||
|
||||
const logger = logs.getLogger(SERVICE_NAME);
|
||||
@@ -865,3 +869,21 @@ export function logAuth(config: Config, event: AuthEvent): void {
|
||||
};
|
||||
logger.emit(logRecord);
|
||||
}
|
||||
|
||||
export function logSkillLaunch(config: Config, event: SkillLaunchEvent): void {
|
||||
if (!isTelemetrySdkInitialized()) return;
|
||||
|
||||
const attributes: LogAttributes = {
|
||||
...getCommonAttributes(config),
|
||||
...event,
|
||||
'event.name': EVENT_SKILL_LAUNCH,
|
||||
'event.timestamp': new Date().toISOString(),
|
||||
};
|
||||
|
||||
const logger = logs.getLogger(SERVICE_NAME);
|
||||
const logRecord: LogRecord = {
|
||||
body: `Skill launch: ${event.skill_name}. Success: ${event.success}.`,
|
||||
attributes,
|
||||
};
|
||||
logger.emit(logRecord);
|
||||
}
|
||||
|
||||
@@ -38,6 +38,7 @@ import type {
|
||||
ModelSlashCommandEvent,
|
||||
ExtensionDisableEvent,
|
||||
AuthEvent,
|
||||
SkillLaunchEvent,
|
||||
RipgrepFallbackEvent,
|
||||
EndSessionEvent,
|
||||
} from '../types.js';
|
||||
@@ -391,6 +392,8 @@ export class QwenLogger {
|
||||
telemetry_enabled: event.telemetry_enabled,
|
||||
telemetry_log_user_prompts_enabled:
|
||||
event.telemetry_log_user_prompts_enabled,
|
||||
skills: event.skills,
|
||||
subagents: event.subagents,
|
||||
},
|
||||
});
|
||||
|
||||
@@ -827,6 +830,18 @@ export class QwenLogger {
|
||||
this.flushIfNeeded();
|
||||
}
|
||||
|
||||
logSkillLaunchEvent(event: SkillLaunchEvent): void {
|
||||
const rumEvent = this.createActionEvent('misc', 'skill_launch', {
|
||||
properties: {
|
||||
skill_name: event.skill_name,
|
||||
success: event.success ? 1 : 0,
|
||||
},
|
||||
});
|
||||
|
||||
this.enqueueLogEvent(rumEvent);
|
||||
this.flushIfNeeded();
|
||||
}
|
||||
|
||||
logChatCompressionEvent(event: ChatCompressionEvent): void {
|
||||
const rumEvent = this.createActionEvent('misc', 'chat_compression', {
|
||||
properties: {
|
||||
|
||||
@@ -18,6 +18,9 @@ import {
|
||||
import type { FileOperation } from './metrics.js';
|
||||
export { ToolCallDecision };
|
||||
import type { OutputFormat } from '../output/types.js';
|
||||
import { ToolNames } from '../tools/tool-names.js';
|
||||
import type { SkillTool } from '../tools/skill.js';
|
||||
import type { TaskTool } from '../tools/task.js';
|
||||
|
||||
export interface BaseTelemetryEvent {
|
||||
'event.name': string;
|
||||
@@ -47,6 +50,8 @@ export class StartSessionEvent implements BaseTelemetryEvent {
|
||||
mcp_tools_count?: number;
|
||||
mcp_tools?: string;
|
||||
output_format: OutputFormat;
|
||||
skills?: string;
|
||||
subagents?: string;
|
||||
|
||||
constructor(config: Config) {
|
||||
const generatorConfig = config.getContentGeneratorConfig();
|
||||
@@ -79,6 +84,7 @@ export class StartSessionEvent implements BaseTelemetryEvent {
|
||||
config.getFileFilteringRespectGitIgnore();
|
||||
this.mcp_servers_count = mcpServers ? Object.keys(mcpServers).length : 0;
|
||||
this.output_format = config.getOutputFormat();
|
||||
|
||||
if (toolRegistry) {
|
||||
const mcpTools = toolRegistry
|
||||
.getAllTools()
|
||||
@@ -87,6 +93,22 @@ export class StartSessionEvent implements BaseTelemetryEvent {
|
||||
this.mcp_tools = mcpTools
|
||||
.map((tool) => (tool as DiscoveredMCPTool).name)
|
||||
.join(',');
|
||||
|
||||
const skillTool = toolRegistry.getTool(ToolNames.SKILL) as
|
||||
| SkillTool
|
||||
| undefined;
|
||||
const skillNames = skillTool?.getAvailableSkillNames?.();
|
||||
if (skillNames && skillNames.length > 0) {
|
||||
this.skills = skillNames.join(',');
|
||||
}
|
||||
|
||||
const taskTool = toolRegistry.getTool(ToolNames.TASK) as
|
||||
| TaskTool
|
||||
| undefined;
|
||||
const subagentNames = taskTool?.getAvailableSubagentNames?.();
|
||||
if (subagentNames && subagentNames.length > 0) {
|
||||
this.subagents = subagentNames.join(',');
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -721,6 +743,20 @@ export class AuthEvent implements BaseTelemetryEvent {
|
||||
}
|
||||
}
|
||||
|
||||
export class SkillLaunchEvent implements BaseTelemetryEvent {
|
||||
'event.name': 'skill_launch';
|
||||
'event.timestamp': string;
|
||||
skill_name: string;
|
||||
success: boolean;
|
||||
|
||||
constructor(skill_name: string, success: boolean) {
|
||||
this['event.name'] = 'skill_launch';
|
||||
this['event.timestamp'] = new Date().toISOString();
|
||||
this.skill_name = skill_name;
|
||||
this.success = success;
|
||||
}
|
||||
}
|
||||
|
||||
export type TelemetryEvent =
|
||||
| StartSessionEvent
|
||||
| EndSessionEvent
|
||||
@@ -749,7 +785,8 @@ export type TelemetryEvent =
|
||||
| ExtensionUninstallEvent
|
||||
| ToolOutputTruncatedEvent
|
||||
| ModelSlashCommandEvent
|
||||
| AuthEvent;
|
||||
| AuthEvent
|
||||
| SkillLaunchEvent;
|
||||
|
||||
export class ExtensionDisableEvent implements BaseTelemetryEvent {
|
||||
'event.name': 'extension_disable';
|
||||
|
||||
@@ -31,6 +31,8 @@ describe('LSTool', () => {
|
||||
tempSecondaryDir,
|
||||
]);
|
||||
|
||||
const userSkillsBase = path.join(os.homedir(), '.qwen', 'skills');
|
||||
|
||||
mockConfig = {
|
||||
getTargetDir: () => tempRootDir,
|
||||
getWorkspaceContext: () => mockWorkspaceContext,
|
||||
@@ -39,6 +41,9 @@ describe('LSTool', () => {
|
||||
respectGitIgnore: true,
|
||||
respectQwenIgnore: true,
|
||||
}),
|
||||
storage: {
|
||||
getUserSkillsDir: () => userSkillsBase,
|
||||
},
|
||||
} as unknown as Config;
|
||||
|
||||
lsTool = new LSTool(mockConfig);
|
||||
@@ -288,7 +293,7 @@ describe('LSTool', () => {
|
||||
};
|
||||
const invocation = lsTool.build(params);
|
||||
const description = invocation.getDescription();
|
||||
const expected = path.relative(tempRootDir, params.path);
|
||||
const expected = path.resolve(params.path);
|
||||
expect(description).toBe(expected);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -9,6 +9,7 @@ import path from 'node:path';
|
||||
import type { ToolInvocation, ToolResult } from './tools.js';
|
||||
import { BaseDeclarativeTool, BaseToolInvocation, Kind } from './tools.js';
|
||||
import { makeRelative, shortenPath } from '../utils/paths.js';
|
||||
import { isSubpath } from '../utils/paths.js';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { DEFAULT_FILE_FILTERING_OPTIONS } from '../config/constants.js';
|
||||
import { ToolErrorType } from './tool-error.js';
|
||||
@@ -311,8 +312,14 @@ export class LSTool extends BaseDeclarativeTool<LSToolParams, ToolResult> {
|
||||
return `Path must be absolute: ${params.path}`;
|
||||
}
|
||||
|
||||
const userSkillsBase = this.config.storage.getUserSkillsDir();
|
||||
const isUnderUserSkills = isSubpath(userSkillsBase, params.path);
|
||||
|
||||
const workspaceContext = this.config.getWorkspaceContext();
|
||||
if (!workspaceContext.isPathWithinWorkspace(params.path)) {
|
||||
if (
|
||||
!workspaceContext.isPathWithinWorkspace(params.path) &&
|
||||
!isUnderUserSkills
|
||||
) {
|
||||
const directories = workspaceContext.getDirectories();
|
||||
return `Path must be within one of the workspace directories: ${directories.join(
|
||||
', ',
|
||||
|
||||
@@ -40,6 +40,7 @@ describe('ReadFileTool', () => {
|
||||
getWorkspaceContext: () => createMockWorkspaceContext(tempRootDir),
|
||||
storage: {
|
||||
getProjectTempDir: () => path.join(tempRootDir, '.temp'),
|
||||
getUserSkillsDir: () => path.join(os.homedir(), '.qwen', 'skills'),
|
||||
},
|
||||
getTruncateToolOutputThreshold: () => 2500,
|
||||
getTruncateToolOutputLines: () => 500,
|
||||
|
||||
@@ -20,6 +20,7 @@ import { FileOperation } from '../telemetry/metrics.js';
|
||||
import { getProgrammingLanguage } from '../telemetry/telemetry-utils.js';
|
||||
import { logFileOperation } from '../telemetry/loggers.js';
|
||||
import { FileOperationEvent } from '../telemetry/types.js';
|
||||
import { isSubpath } from '../utils/paths.js';
|
||||
|
||||
/**
|
||||
* Parameters for the ReadFile tool
|
||||
@@ -183,15 +184,20 @@ export class ReadFileTool extends BaseDeclarativeTool<
|
||||
|
||||
const workspaceContext = this.config.getWorkspaceContext();
|
||||
const projectTempDir = this.config.storage.getProjectTempDir();
|
||||
const userSkillsDir = this.config.storage.getUserSkillsDir();
|
||||
const resolvedFilePath = path.resolve(filePath);
|
||||
const resolvedProjectTempDir = path.resolve(projectTempDir);
|
||||
const isWithinTempDir =
|
||||
resolvedFilePath.startsWith(resolvedProjectTempDir + path.sep) ||
|
||||
resolvedFilePath === resolvedProjectTempDir;
|
||||
const isWithinTempDir = isSubpath(projectTempDir, resolvedFilePath);
|
||||
const isWithinUserSkills = isSubpath(userSkillsDir, resolvedFilePath);
|
||||
|
||||
if (!workspaceContext.isPathWithinWorkspace(filePath) && !isWithinTempDir) {
|
||||
if (
|
||||
!workspaceContext.isPathWithinWorkspace(filePath) &&
|
||||
!isWithinTempDir &&
|
||||
!isWithinUserSkills
|
||||
) {
|
||||
const directories = workspaceContext.getDirectories();
|
||||
return `File path must be within one of the workspace directories: ${directories.join(', ')} or within the project temp directory: ${projectTempDir}`;
|
||||
return `File path must be within one of the workspace directories: ${directories.join(
|
||||
', ',
|
||||
)} or within the project temp directory: ${projectTempDir}`;
|
||||
}
|
||||
if (params.offset !== undefined && params.offset < 0) {
|
||||
return 'Offset must be a non-negative number';
|
||||
|
||||
442
packages/core/src/tools/skill.test.ts
Normal file
442
packages/core/src/tools/skill.test.ts
Normal file
@@ -0,0 +1,442 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
|
||||
import { SkillTool, type SkillParams } from './skill.js';
|
||||
import type { PartListUnion } from '@google/genai';
|
||||
import type { ToolResultDisplay } from './tools.js';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { SkillManager } from '../skills/skill-manager.js';
|
||||
import type { SkillConfig } from '../skills/types.js';
|
||||
import { partToString } from '../utils/partUtils.js';
|
||||
|
||||
// Type for accessing protected methods in tests
|
||||
type SkillToolWithProtectedMethods = SkillTool & {
|
||||
createInvocation: (params: SkillParams) => {
|
||||
execute: (
|
||||
signal?: AbortSignal,
|
||||
updateOutput?: (output: ToolResultDisplay) => void,
|
||||
) => Promise<{
|
||||
llmContent: PartListUnion;
|
||||
returnDisplay: ToolResultDisplay;
|
||||
}>;
|
||||
getDescription: () => string;
|
||||
shouldConfirmExecute: () => Promise<boolean>;
|
||||
};
|
||||
};
|
||||
|
||||
// Mock dependencies
|
||||
vi.mock('../skills/skill-manager.js');
|
||||
vi.mock('../telemetry/index.js', () => ({
|
||||
logSkillLaunch: vi.fn(),
|
||||
SkillLaunchEvent: class {
|
||||
constructor(
|
||||
public skill_name: string,
|
||||
public success: boolean,
|
||||
) {}
|
||||
},
|
||||
}));
|
||||
|
||||
const MockedSkillManager = vi.mocked(SkillManager);
|
||||
|
||||
describe('SkillTool', () => {
|
||||
let config: Config;
|
||||
let skillTool: SkillTool;
|
||||
let mockSkillManager: SkillManager;
|
||||
let changeListeners: Array<() => void>;
|
||||
|
||||
const mockSkills: SkillConfig[] = [
|
||||
{
|
||||
name: 'code-review',
|
||||
description: 'Specialized skill for reviewing code quality',
|
||||
level: 'project',
|
||||
filePath: '/project/.qwen/skills/code-review/SKILL.md',
|
||||
body: 'Review code for quality and best practices.',
|
||||
},
|
||||
{
|
||||
name: 'testing',
|
||||
description: 'Skill for writing and running tests',
|
||||
level: 'user',
|
||||
filePath: '/home/user/.qwen/skills/testing/SKILL.md',
|
||||
body: 'Help write comprehensive tests.',
|
||||
allowedTools: ['read_file', 'write_file', 'shell'],
|
||||
},
|
||||
];
|
||||
|
||||
beforeEach(async () => {
|
||||
// Setup fake timers
|
||||
vi.useFakeTimers();
|
||||
|
||||
// Create mock config
|
||||
config = {
|
||||
getProjectRoot: vi.fn().mockReturnValue('/test/project'),
|
||||
getSessionId: vi.fn().mockReturnValue('test-session-id'),
|
||||
getSkillManager: vi.fn(),
|
||||
getGeminiClient: vi.fn().mockReturnValue(undefined),
|
||||
} as unknown as Config;
|
||||
|
||||
changeListeners = [];
|
||||
|
||||
// Setup SkillManager mock
|
||||
mockSkillManager = {
|
||||
listSkills: vi.fn().mockResolvedValue(mockSkills),
|
||||
loadSkill: vi.fn(),
|
||||
loadSkillForRuntime: vi.fn(),
|
||||
addChangeListener: vi.fn((listener: () => void) => {
|
||||
changeListeners.push(listener);
|
||||
return () => {
|
||||
const index = changeListeners.indexOf(listener);
|
||||
if (index >= 0) {
|
||||
changeListeners.splice(index, 1);
|
||||
}
|
||||
};
|
||||
}),
|
||||
getParseErrors: vi.fn().mockReturnValue(new Map()),
|
||||
} as unknown as SkillManager;
|
||||
|
||||
MockedSkillManager.mockImplementation(() => mockSkillManager);
|
||||
|
||||
// Make config return the mock SkillManager
|
||||
vi.mocked(config.getSkillManager).mockReturnValue(mockSkillManager);
|
||||
|
||||
// Create SkillTool instance
|
||||
skillTool = new SkillTool(config);
|
||||
|
||||
// Allow async initialization to complete
|
||||
await vi.runAllTimersAsync();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.useRealTimers();
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
describe('initialization', () => {
|
||||
it('should initialize with correct name and properties', () => {
|
||||
expect(skillTool.name).toBe('skill');
|
||||
expect(skillTool.displayName).toBe('Skill');
|
||||
expect(skillTool.kind).toBe('read');
|
||||
});
|
||||
|
||||
it('should load available skills during initialization', () => {
|
||||
expect(mockSkillManager.listSkills).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should subscribe to skill manager changes', () => {
|
||||
expect(mockSkillManager.addChangeListener).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('should update description with available skills', () => {
|
||||
expect(skillTool.description).toContain('code-review');
|
||||
expect(skillTool.description).toContain(
|
||||
'Specialized skill for reviewing code quality',
|
||||
);
|
||||
expect(skillTool.description).toContain('testing');
|
||||
expect(skillTool.description).toContain(
|
||||
'Skill for writing and running tests',
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle empty skills list gracefully', async () => {
|
||||
vi.mocked(mockSkillManager.listSkills).mockResolvedValue([]);
|
||||
|
||||
const emptySkillTool = new SkillTool(config);
|
||||
await vi.runAllTimersAsync();
|
||||
|
||||
expect(emptySkillTool.description).toContain(
|
||||
'No skills are currently configured',
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle skill loading errors gracefully', async () => {
|
||||
vi.mocked(mockSkillManager.listSkills).mockRejectedValue(
|
||||
new Error('Loading failed'),
|
||||
);
|
||||
|
||||
const consoleSpy = vi.spyOn(console, 'warn').mockImplementation(() => {});
|
||||
|
||||
new SkillTool(config);
|
||||
await vi.runAllTimersAsync();
|
||||
|
||||
expect(consoleSpy).toHaveBeenCalledWith(
|
||||
'Failed to load skills for Skills tool:',
|
||||
expect.any(Error),
|
||||
);
|
||||
consoleSpy.mockRestore();
|
||||
});
|
||||
});
|
||||
|
||||
describe('schema generation', () => {
|
||||
it('should expose static schema without dynamic enums', () => {
|
||||
const schema = skillTool.schema;
|
||||
const properties = schema.parametersJsonSchema as {
|
||||
properties: {
|
||||
skill: {
|
||||
type: string;
|
||||
description: string;
|
||||
enum?: string[];
|
||||
};
|
||||
};
|
||||
};
|
||||
expect(properties.properties.skill.type).toBe('string');
|
||||
expect(properties.properties.skill.description).toBe(
|
||||
'The skill name (no arguments). E.g., "pdf" or "xlsx"',
|
||||
);
|
||||
expect(properties.properties.skill.enum).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should keep schema static even when no skills available', async () => {
|
||||
vi.mocked(mockSkillManager.listSkills).mockResolvedValue([]);
|
||||
|
||||
const emptySkillTool = new SkillTool(config);
|
||||
await vi.runAllTimersAsync();
|
||||
|
||||
const schema = emptySkillTool.schema;
|
||||
const properties = schema.parametersJsonSchema as {
|
||||
properties: {
|
||||
skill: {
|
||||
type: string;
|
||||
description: string;
|
||||
enum?: string[];
|
||||
};
|
||||
};
|
||||
};
|
||||
expect(properties.properties.skill.type).toBe('string');
|
||||
expect(properties.properties.skill.description).toBe(
|
||||
'The skill name (no arguments). E.g., "pdf" or "xlsx"',
|
||||
);
|
||||
expect(properties.properties.skill.enum).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('validateToolParams', () => {
|
||||
it('should validate valid parameters', () => {
|
||||
const result = skillTool.validateToolParams({ skill: 'code-review' });
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
it('should reject empty skill', () => {
|
||||
const result = skillTool.validateToolParams({ skill: '' });
|
||||
expect(result).toBe('Parameter "skill" must be a non-empty string.');
|
||||
});
|
||||
|
||||
it('should reject non-existent skill', () => {
|
||||
const result = skillTool.validateToolParams({
|
||||
skill: 'non-existent',
|
||||
});
|
||||
expect(result).toBe(
|
||||
'Skill "non-existent" not found. Available skills: code-review, testing',
|
||||
);
|
||||
});
|
||||
|
||||
it('should show appropriate message when no skills available', async () => {
|
||||
vi.mocked(mockSkillManager.listSkills).mockResolvedValue([]);
|
||||
|
||||
const emptySkillTool = new SkillTool(config);
|
||||
await vi.runAllTimersAsync();
|
||||
|
||||
const result = emptySkillTool.validateToolParams({
|
||||
skill: 'non-existent',
|
||||
});
|
||||
expect(result).toBe(
|
||||
'Skill "non-existent" not found. No skills are currently available.',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('refreshSkills', () => {
|
||||
it('should refresh when change listener fires', async () => {
|
||||
const newSkills: SkillConfig[] = [
|
||||
{
|
||||
name: 'new-skill',
|
||||
description: 'A brand new skill',
|
||||
level: 'project',
|
||||
filePath: '/project/.qwen/skills/new-skill/SKILL.md',
|
||||
body: 'New skill content.',
|
||||
},
|
||||
];
|
||||
|
||||
vi.mocked(mockSkillManager.listSkills).mockResolvedValueOnce(newSkills);
|
||||
|
||||
const listener = changeListeners[0];
|
||||
expect(listener).toBeDefined();
|
||||
|
||||
listener?.();
|
||||
await vi.runAllTimersAsync();
|
||||
|
||||
expect(skillTool.description).toContain('new-skill');
|
||||
expect(skillTool.description).toContain('A brand new skill');
|
||||
});
|
||||
|
||||
it('should refresh available skills and update description', async () => {
|
||||
const newSkills: SkillConfig[] = [
|
||||
{
|
||||
name: 'test-skill',
|
||||
description: 'A test skill',
|
||||
level: 'project',
|
||||
filePath: '/project/.qwen/skills/test-skill/SKILL.md',
|
||||
body: 'Test content.',
|
||||
},
|
||||
];
|
||||
|
||||
vi.mocked(mockSkillManager.listSkills).mockResolvedValue(newSkills);
|
||||
|
||||
await skillTool.refreshSkills();
|
||||
|
||||
expect(skillTool.description).toContain('test-skill');
|
||||
expect(skillTool.description).toContain('A test skill');
|
||||
});
|
||||
});
|
||||
|
||||
describe('SkillToolInvocation', () => {
|
||||
const mockRuntimeConfig: SkillConfig = {
|
||||
...mockSkills[0],
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
vi.mocked(mockSkillManager.loadSkillForRuntime).mockResolvedValue(
|
||||
mockRuntimeConfig,
|
||||
);
|
||||
});
|
||||
|
||||
it('should execute skill load successfully', async () => {
|
||||
const params: SkillParams = {
|
||||
skill: 'code-review',
|
||||
};
|
||||
|
||||
const invocation = (
|
||||
skillTool as SkillToolWithProtectedMethods
|
||||
).createInvocation(params);
|
||||
const result = await invocation.execute();
|
||||
|
||||
expect(mockSkillManager.loadSkillForRuntime).toHaveBeenCalledWith(
|
||||
'code-review',
|
||||
);
|
||||
|
||||
const llmText = partToString(result.llmContent);
|
||||
expect(llmText).toContain(
|
||||
'Base directory for this skill: /project/.qwen/skills/code-review',
|
||||
);
|
||||
expect(llmText.trim()).toContain(
|
||||
'Review code for quality and best practices.',
|
||||
);
|
||||
|
||||
expect(result.returnDisplay).toBe('Launching skill: code-review');
|
||||
});
|
||||
|
||||
it('should include allowedTools in result when present', async () => {
|
||||
const skillWithTools: SkillConfig = {
|
||||
...mockSkills[1],
|
||||
};
|
||||
vi.mocked(mockSkillManager.loadSkillForRuntime).mockResolvedValue(
|
||||
skillWithTools,
|
||||
);
|
||||
|
||||
const params: SkillParams = {
|
||||
skill: 'testing',
|
||||
};
|
||||
|
||||
const invocation = (
|
||||
skillTool as SkillToolWithProtectedMethods
|
||||
).createInvocation(params);
|
||||
const result = await invocation.execute();
|
||||
|
||||
const llmText = partToString(result.llmContent);
|
||||
expect(llmText).toContain('testing');
|
||||
// Base description is omitted from llmContent; ensure body is present.
|
||||
expect(llmText).toContain('Help write comprehensive tests.');
|
||||
|
||||
expect(result.returnDisplay).toBe('Launching skill: testing');
|
||||
});
|
||||
|
||||
it('should handle skill not found error', async () => {
|
||||
vi.mocked(mockSkillManager.loadSkillForRuntime).mockResolvedValue(null);
|
||||
|
||||
const params: SkillParams = {
|
||||
skill: 'non-existent',
|
||||
};
|
||||
|
||||
const invocation = (
|
||||
skillTool as SkillToolWithProtectedMethods
|
||||
).createInvocation(params);
|
||||
const result = await invocation.execute();
|
||||
|
||||
const llmText = partToString(result.llmContent);
|
||||
expect(llmText).toContain('Skill "non-existent" not found');
|
||||
});
|
||||
|
||||
it('should handle execution errors gracefully', async () => {
|
||||
vi.mocked(mockSkillManager.loadSkillForRuntime).mockRejectedValue(
|
||||
new Error('Loading failed'),
|
||||
);
|
||||
|
||||
const consoleSpy = vi
|
||||
.spyOn(console, 'error')
|
||||
.mockImplementation(() => {});
|
||||
|
||||
const params: SkillParams = {
|
||||
skill: 'code-review',
|
||||
};
|
||||
|
||||
const invocation = (
|
||||
skillTool as SkillToolWithProtectedMethods
|
||||
).createInvocation(params);
|
||||
const result = await invocation.execute();
|
||||
|
||||
const llmText = partToString(result.llmContent);
|
||||
expect(llmText).toContain('Failed to load skill');
|
||||
expect(llmText).toContain('Loading failed');
|
||||
|
||||
consoleSpy.mockRestore();
|
||||
});
|
||||
|
||||
it('should not require confirmation', async () => {
|
||||
const params: SkillParams = {
|
||||
skill: 'code-review',
|
||||
};
|
||||
|
||||
const invocation = (
|
||||
skillTool as SkillToolWithProtectedMethods
|
||||
).createInvocation(params);
|
||||
const shouldConfirm = await invocation.shouldConfirmExecute();
|
||||
|
||||
expect(shouldConfirm).toBe(false);
|
||||
});
|
||||
|
||||
it('should provide correct description', () => {
|
||||
const params: SkillParams = {
|
||||
skill: 'code-review',
|
||||
};
|
||||
|
||||
const invocation = (
|
||||
skillTool as SkillToolWithProtectedMethods
|
||||
).createInvocation(params);
|
||||
const description = invocation.getDescription();
|
||||
|
||||
expect(description).toBe('Launching skill: "code-review"');
|
||||
});
|
||||
|
||||
it('should handle skill without additional files', async () => {
|
||||
vi.mocked(mockSkillManager.loadSkillForRuntime).mockResolvedValue(
|
||||
mockSkills[0],
|
||||
);
|
||||
|
||||
const params: SkillParams = {
|
||||
skill: 'code-review',
|
||||
};
|
||||
|
||||
const invocation = (
|
||||
skillTool as SkillToolWithProtectedMethods
|
||||
).createInvocation(params);
|
||||
const result = await invocation.execute();
|
||||
|
||||
const llmText = partToString(result.llmContent);
|
||||
expect(llmText).not.toContain('## Additional Files');
|
||||
|
||||
expect(result.returnDisplay).toBe('Launching skill: code-review');
|
||||
});
|
||||
});
|
||||
});
|
||||
264
packages/core/src/tools/skill.ts
Normal file
264
packages/core/src/tools/skill.ts
Normal file
@@ -0,0 +1,264 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { BaseDeclarativeTool, BaseToolInvocation, Kind } from './tools.js';
|
||||
import { ToolNames, ToolDisplayNames } from './tool-names.js';
|
||||
import type { ToolResult, ToolResultDisplay } from './tools.js';
|
||||
import type { Config } from '../config/config.js';
|
||||
import type { SkillManager } from '../skills/skill-manager.js';
|
||||
import type { SkillConfig } from '../skills/types.js';
|
||||
import { logSkillLaunch, SkillLaunchEvent } from '../telemetry/index.js';
|
||||
import path from 'path';
|
||||
|
||||
export interface SkillParams {
|
||||
skill: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Skill tool that enables the model to access skill definitions.
|
||||
* The tool dynamically loads available skills and includes them in its description
|
||||
* for the model to choose from.
|
||||
*/
|
||||
export class SkillTool extends BaseDeclarativeTool<SkillParams, ToolResult> {
|
||||
static readonly Name: string = ToolNames.SKILL;
|
||||
|
||||
private skillManager: SkillManager;
|
||||
private availableSkills: SkillConfig[] = [];
|
||||
|
||||
constructor(private readonly config: Config) {
|
||||
// Initialize with a basic schema first
|
||||
const initialSchema = {
|
||||
type: 'object',
|
||||
properties: {
|
||||
skill: {
|
||||
type: 'string',
|
||||
description: 'The skill name (no arguments). E.g., "pdf" or "xlsx"',
|
||||
},
|
||||
},
|
||||
required: ['skill'],
|
||||
additionalProperties: false,
|
||||
$schema: 'http://json-schema.org/draft-07/schema#',
|
||||
};
|
||||
|
||||
super(
|
||||
SkillTool.Name,
|
||||
ToolDisplayNames.SKILL,
|
||||
'Execute a skill within the main conversation. Loading available skills...', // Initial description
|
||||
Kind.Read,
|
||||
initialSchema,
|
||||
true, // isOutputMarkdown
|
||||
false, // canUpdateOutput
|
||||
);
|
||||
|
||||
this.skillManager = config.getSkillManager();
|
||||
this.skillManager.addChangeListener(() => {
|
||||
void this.refreshSkills();
|
||||
});
|
||||
|
||||
// Initialize the tool asynchronously
|
||||
this.refreshSkills();
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously initializes the tool by loading available skills
|
||||
* and updating the description and schema.
|
||||
*/
|
||||
async refreshSkills(): Promise<void> {
|
||||
try {
|
||||
this.availableSkills = await this.skillManager.listSkills();
|
||||
this.updateDescriptionAndSchema();
|
||||
} catch (error) {
|
||||
console.warn('Failed to load skills for Skills tool:', error);
|
||||
this.availableSkills = [];
|
||||
this.updateDescriptionAndSchema();
|
||||
} finally {
|
||||
// Update the client with the new tools
|
||||
const geminiClient = this.config.getGeminiClient();
|
||||
if (geminiClient && geminiClient.isInitialized()) {
|
||||
await geminiClient.setTools();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the tool's description and schema based on available skills.
|
||||
*/
|
||||
private updateDescriptionAndSchema(): void {
|
||||
let skillDescriptions = '';
|
||||
if (this.availableSkills.length === 0) {
|
||||
skillDescriptions =
|
||||
'No skills are currently configured. Skills can be created by adding directories with SKILL.md files to .qwen/skills/ or ~/.qwen/skills/.';
|
||||
} else {
|
||||
skillDescriptions = this.availableSkills
|
||||
.map(
|
||||
(skill) => `<skill>
|
||||
<name>
|
||||
${skill.name}
|
||||
</name>
|
||||
<description>
|
||||
${skill.description} (${skill.level})
|
||||
</description>
|
||||
<location>
|
||||
${skill.level}
|
||||
</location>
|
||||
</skill>`,
|
||||
)
|
||||
.join('\n');
|
||||
}
|
||||
|
||||
const baseDescription = `Execute a skill within the main conversation
|
||||
|
||||
<skills_instructions>
|
||||
When users ask you to perform tasks, check if any of the available skills below can help complete the task more effectively. Skills provide specialized capabilities and domain knowledge.
|
||||
|
||||
How to invoke:
|
||||
- Use this tool with the skill name only (no arguments)
|
||||
- Examples:
|
||||
- \`skill: "pdf"\` - invoke the pdf skill
|
||||
- \`skill: "xlsx"\` - invoke the xlsx skill
|
||||
- \`skill: "ms-office-suite:pdf"\` - invoke using fully qualified name
|
||||
|
||||
Important:
|
||||
- When a skill is relevant, you must invoke this tool IMMEDIATELY as your first action
|
||||
- NEVER just announce or mention a skill in your text response without actually calling this tool
|
||||
- This is a BLOCKING REQUIREMENT: invoke the relevant Skill tool BEFORE generating any other response about the task
|
||||
- Only use skills listed in <available_skills> below
|
||||
- Do not invoke a skill that is already running
|
||||
- Do not use this tool for built-in CLI commands (like /help, /clear, etc.)
|
||||
</skills_instructions>
|
||||
|
||||
<available_skills>
|
||||
${skillDescriptions}
|
||||
</available_skills>
|
||||
`;
|
||||
// Update description using object property assignment
|
||||
(this as { description: string }).description = baseDescription;
|
||||
}
|
||||
|
||||
override validateToolParams(params: SkillParams): string | null {
|
||||
// Validate required fields
|
||||
if (
|
||||
!params.skill ||
|
||||
typeof params.skill !== 'string' ||
|
||||
params.skill.trim() === ''
|
||||
) {
|
||||
return 'Parameter "skill" must be a non-empty string.';
|
||||
}
|
||||
|
||||
// Validate that the skill exists
|
||||
const skillExists = this.availableSkills.some(
|
||||
(skill) => skill.name === params.skill,
|
||||
);
|
||||
|
||||
if (!skillExists) {
|
||||
const availableNames = this.availableSkills.map((s) => s.name);
|
||||
if (availableNames.length === 0) {
|
||||
return `Skill "${params.skill}" not found. No skills are currently available.`;
|
||||
}
|
||||
return `Skill "${params.skill}" not found. Available skills: ${availableNames.join(', ')}`;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
protected createInvocation(params: SkillParams) {
|
||||
return new SkillToolInvocation(this.config, this.skillManager, params);
|
||||
}
|
||||
|
||||
getAvailableSkillNames(): string[] {
|
||||
return this.availableSkills.map((skill) => skill.name);
|
||||
}
|
||||
}
|
||||
|
||||
class SkillToolInvocation extends BaseToolInvocation<SkillParams, ToolResult> {
|
||||
constructor(
|
||||
private readonly config: Config,
|
||||
private readonly skillManager: SkillManager,
|
||||
params: SkillParams,
|
||||
) {
|
||||
super(params);
|
||||
}
|
||||
|
||||
getDescription(): string {
|
||||
return `Launching skill: "${this.params.skill}"`;
|
||||
}
|
||||
|
||||
override async shouldConfirmExecute(): Promise<false> {
|
||||
// Skill loading is a read-only operation, no confirmation needed
|
||||
return false;
|
||||
}
|
||||
|
||||
async execute(
|
||||
_signal?: AbortSignal,
|
||||
_updateOutput?: (output: ToolResultDisplay) => void,
|
||||
): Promise<ToolResult> {
|
||||
try {
|
||||
// Load the skill with runtime config (includes additional files)
|
||||
const skill = await this.skillManager.loadSkillForRuntime(
|
||||
this.params.skill,
|
||||
);
|
||||
|
||||
if (!skill) {
|
||||
// Log failed skill launch
|
||||
logSkillLaunch(
|
||||
this.config,
|
||||
new SkillLaunchEvent(this.params.skill, false),
|
||||
);
|
||||
|
||||
// Get parse errors if any
|
||||
const parseErrors = this.skillManager.getParseErrors();
|
||||
const errorMessages: string[] = [];
|
||||
|
||||
for (const [filePath, error] of parseErrors) {
|
||||
if (filePath.includes(this.params.skill)) {
|
||||
errorMessages.push(`Parse error at ${filePath}: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
const errorDetail =
|
||||
errorMessages.length > 0
|
||||
? `\nErrors:\n${errorMessages.join('\n')}`
|
||||
: '';
|
||||
|
||||
return {
|
||||
llmContent: `Skill "${this.params.skill}" not found.${errorDetail}`,
|
||||
returnDisplay: `Skill "${this.params.skill}" not found.${errorDetail}`,
|
||||
};
|
||||
}
|
||||
|
||||
// Log successful skill launch
|
||||
logSkillLaunch(
|
||||
this.config,
|
||||
new SkillLaunchEvent(this.params.skill, true),
|
||||
);
|
||||
|
||||
const baseDir = path.dirname(skill.filePath);
|
||||
|
||||
// Build markdown content for LLM (show base dir, then body)
|
||||
const llmContent = `Base directory for this skill: ${baseDir}\n\n${skill.body}\n`;
|
||||
|
||||
return {
|
||||
llmContent: [{ text: llmContent }],
|
||||
returnDisplay: `Launching skill: ${skill.name}`,
|
||||
};
|
||||
} catch (error) {
|
||||
const errorMessage =
|
||||
error instanceof Error ? error.message : String(error);
|
||||
console.error(`[SkillsTool] Error launching skill: ${errorMessage}`);
|
||||
|
||||
// Log failed skill launch
|
||||
logSkillLaunch(
|
||||
this.config,
|
||||
new SkillLaunchEvent(this.params.skill, false),
|
||||
);
|
||||
|
||||
return {
|
||||
llmContent: `Failed to load skill "${this.params.skill}": ${errorMessage}`,
|
||||
returnDisplay: `Failed to load skill "${this.params.skill}": ${errorMessage}`,
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -252,6 +252,10 @@ assistant: "I'm going to use the Task tool to launch the with the greeting-respo
|
||||
protected createInvocation(params: TaskParams) {
|
||||
return new TaskToolInvocation(this.config, this.subagentManager, params);
|
||||
}
|
||||
|
||||
getAvailableSubagentNames(): string[] {
|
||||
return this.availableSubagents.map((subagent) => subagent.name);
|
||||
}
|
||||
}
|
||||
|
||||
class TaskToolInvocation extends BaseToolInvocation<TaskParams, ToolResult> {
|
||||
|
||||
@@ -20,6 +20,7 @@ export const ToolNames = {
|
||||
TODO_WRITE: 'todo_write',
|
||||
MEMORY: 'save_memory',
|
||||
TASK: 'task',
|
||||
SKILL: 'skill',
|
||||
EXIT_PLAN_MODE: 'exit_plan_mode',
|
||||
WEB_FETCH: 'web_fetch',
|
||||
WEB_SEARCH: 'web_search',
|
||||
@@ -42,6 +43,7 @@ export const ToolDisplayNames = {
|
||||
TODO_WRITE: 'TodoWrite',
|
||||
MEMORY: 'SaveMemory',
|
||||
TASK: 'Task',
|
||||
SKILL: 'Skill',
|
||||
EXIT_PLAN_MODE: 'ExitPlanMode',
|
||||
WEB_FETCH: 'WebFetch',
|
||||
WEB_SEARCH: 'WebSearch',
|
||||
|
||||
@@ -120,6 +120,10 @@ export function makeRelative(
|
||||
const resolvedTargetPath = path.resolve(targetPath);
|
||||
const resolvedRootDirectory = path.resolve(rootDirectory);
|
||||
|
||||
if (!isSubpath(resolvedRootDirectory, resolvedTargetPath)) {
|
||||
return resolvedTargetPath;
|
||||
}
|
||||
|
||||
const relativePath = path.relative(resolvedRootDirectory, resolvedTargetPath);
|
||||
|
||||
// If the paths are the same, path.relative returns '', return '.' instead
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/sdk",
|
||||
"version": "0.6.0",
|
||||
"version": "0.1.0",
|
||||
"description": "TypeScript SDK for programmatic access to qwen-code CLI",
|
||||
"main": "./dist/index.cjs",
|
||||
"module": "./dist/index.mjs",
|
||||
|
||||
@@ -150,48 +150,49 @@ export function parseExecutableSpec(executableSpec?: string): {
|
||||
}
|
||||
|
||||
// Check for runtime prefix (e.g., 'bun:/path/to/cli.js')
|
||||
// Use whitelist mechanism: only treat as runtime spec if prefix matches supported runtimes
|
||||
const supportedRuntimes = ['node', 'bun', 'tsx', 'deno'];
|
||||
const runtimeMatch = executableSpec.match(/^([^:]+):(.+)$/);
|
||||
|
||||
if (runtimeMatch) {
|
||||
const [, runtime, filePath] = runtimeMatch;
|
||||
if (!runtime || !filePath) {
|
||||
throw new Error(`Invalid runtime specification: '${executableSpec}'`);
|
||||
|
||||
// Only process as runtime specification if it matches a supported runtime
|
||||
if (runtime && supportedRuntimes.includes(runtime)) {
|
||||
if (!filePath) {
|
||||
throw new Error(`Invalid runtime specification: '${executableSpec}'`);
|
||||
}
|
||||
|
||||
if (!validateRuntimeAvailability(runtime)) {
|
||||
throw new Error(
|
||||
`Runtime '${runtime}' is not available on this system. Please install it first.`,
|
||||
);
|
||||
}
|
||||
|
||||
const resolvedPath = path.resolve(filePath);
|
||||
|
||||
if (!fs.existsSync(resolvedPath)) {
|
||||
throw new Error(
|
||||
`Executable file not found at '${resolvedPath}' for runtime '${runtime}'. ` +
|
||||
'Please check the file path and ensure the file exists.',
|
||||
);
|
||||
}
|
||||
|
||||
if (!validateFileExtensionForRuntime(resolvedPath, runtime)) {
|
||||
const ext = path.extname(resolvedPath);
|
||||
throw new Error(
|
||||
`File extension '${ext}' is not compatible with runtime '${runtime}'. ` +
|
||||
`Expected extensions for ${runtime}: ${getExpectedExtensions(runtime).join(', ')}`,
|
||||
);
|
||||
}
|
||||
|
||||
return {
|
||||
runtime,
|
||||
executablePath: resolvedPath,
|
||||
isExplicitRuntime: true,
|
||||
};
|
||||
}
|
||||
|
||||
const supportedRuntimes = ['node', 'bun', 'tsx', 'deno'];
|
||||
if (!supportedRuntimes.includes(runtime)) {
|
||||
throw new Error(
|
||||
`Unsupported runtime '${runtime}'. Supported runtimes: ${supportedRuntimes.join(', ')}`,
|
||||
);
|
||||
}
|
||||
|
||||
if (!validateRuntimeAvailability(runtime)) {
|
||||
throw new Error(
|
||||
`Runtime '${runtime}' is not available on this system. Please install it first.`,
|
||||
);
|
||||
}
|
||||
|
||||
const resolvedPath = path.resolve(filePath);
|
||||
|
||||
if (!fs.existsSync(resolvedPath)) {
|
||||
throw new Error(
|
||||
`Executable file not found at '${resolvedPath}' for runtime '${runtime}'. ` +
|
||||
'Please check the file path and ensure the file exists.',
|
||||
);
|
||||
}
|
||||
|
||||
if (!validateFileExtensionForRuntime(resolvedPath, runtime)) {
|
||||
const ext = path.extname(resolvedPath);
|
||||
throw new Error(
|
||||
`File extension '${ext}' is not compatible with runtime '${runtime}'. ` +
|
||||
`Expected extensions for ${runtime}: ${getExpectedExtensions(runtime).join(', ')}`,
|
||||
);
|
||||
}
|
||||
|
||||
return {
|
||||
runtime,
|
||||
executablePath: resolvedPath,
|
||||
isExplicitRuntime: true,
|
||||
};
|
||||
// If not a supported runtime, fall through to treat as file path (e.g., Windows paths like 'D:\path\to\cli.js')
|
||||
}
|
||||
|
||||
// Check if it's a command name (no path separators) or a file path
|
||||
|
||||
@@ -125,12 +125,43 @@ describe('CLI Path Utilities', () => {
|
||||
});
|
||||
});
|
||||
|
||||
it('should throw for invalid runtime prefix format', () => {
|
||||
it('should treat non-whitelisted runtime prefixes as command names', () => {
|
||||
// With whitelist approach, 'invalid:format' is not recognized as a runtime spec
|
||||
// so it's treated as a command name, which fails validation due to the colon
|
||||
expect(() => parseExecutableSpec('invalid:format')).toThrow(
|
||||
'Unsupported runtime',
|
||||
'Invalid command name',
|
||||
);
|
||||
});
|
||||
|
||||
it('should treat Windows drive letters as file paths, not runtime specs', () => {
|
||||
mockFs.existsSync.mockReturnValue(true);
|
||||
|
||||
// Test various Windows drive letters
|
||||
const windowsPaths = [
|
||||
'C:\\path\\to\\cli.js',
|
||||
'D:\\path\\to\\cli.js',
|
||||
'E:\\Users\\dev\\qwen\\cli.js',
|
||||
];
|
||||
|
||||
for (const winPath of windowsPaths) {
|
||||
const result = parseExecutableSpec(winPath);
|
||||
|
||||
expect(result.isExplicitRuntime).toBe(false);
|
||||
expect(result.runtime).toBeUndefined();
|
||||
expect(result.executablePath).toBe(path.resolve(winPath));
|
||||
}
|
||||
});
|
||||
|
||||
it('should handle Windows paths with forward slashes', () => {
|
||||
mockFs.existsSync.mockReturnValue(true);
|
||||
|
||||
const result = parseExecutableSpec('C:/path/to/cli.js');
|
||||
|
||||
expect(result.isExplicitRuntime).toBe(false);
|
||||
expect(result.runtime).toBeUndefined();
|
||||
expect(result.executablePath).toBe(path.resolve('C:/path/to/cli.js'));
|
||||
});
|
||||
|
||||
it('should throw when runtime-prefixed file does not exist', () => {
|
||||
mockFs.existsSync.mockReturnValue(false);
|
||||
|
||||
@@ -453,6 +484,41 @@ describe('CLI Path Utilities', () => {
|
||||
originalInput: `bun:${bundlePath}`,
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle Windows paths with drive letters', () => {
|
||||
const windowsPath = 'D:\\path\\to\\cli.js';
|
||||
const result = prepareSpawnInfo(windowsPath);
|
||||
|
||||
expect(result).toEqual({
|
||||
command: process.execPath,
|
||||
args: [path.resolve(windowsPath)],
|
||||
type: 'node',
|
||||
originalInput: windowsPath,
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle Windows paths with TypeScript files', () => {
|
||||
const windowsPath = 'C:\\Users\\dev\\qwen\\index.ts';
|
||||
const result = prepareSpawnInfo(windowsPath);
|
||||
|
||||
expect(result).toEqual({
|
||||
command: 'tsx',
|
||||
args: [path.resolve(windowsPath)],
|
||||
type: 'tsx',
|
||||
originalInput: windowsPath,
|
||||
});
|
||||
});
|
||||
|
||||
it('should not confuse Windows drive letters with runtime prefixes', () => {
|
||||
// Ensure 'D:' is not treated as a runtime specification
|
||||
const windowsPath = 'D:\\workspace\\project\\cli.js';
|
||||
const result = prepareSpawnInfo(windowsPath);
|
||||
|
||||
// Should use node runtime based on .js extension, not treat 'D' as runtime
|
||||
expect(result.type).toBe('node');
|
||||
expect(result.command).toBe(process.execPath);
|
||||
expect(result.args).toEqual([path.resolve(windowsPath)]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('error cases', () => {
|
||||
@@ -472,21 +538,39 @@ describe('CLI Path Utilities', () => {
|
||||
);
|
||||
});
|
||||
|
||||
it('should provide helpful error for invalid runtime specification', () => {
|
||||
it('should treat non-whitelisted runtime prefixes as command names', () => {
|
||||
// With whitelist approach, 'invalid:spec' is not recognized as a runtime spec
|
||||
// so it's treated as a command name, which fails validation due to the colon
|
||||
expect(() => prepareSpawnInfo('invalid:spec')).toThrow(
|
||||
'Unsupported runtime',
|
||||
'Invalid command name',
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle Windows paths correctly even when file is missing', () => {
|
||||
mockFs.existsSync.mockReturnValue(false);
|
||||
|
||||
expect(() => prepareSpawnInfo('D:\\missing\\cli.js')).toThrow(
|
||||
'Executable file not found at',
|
||||
);
|
||||
// Should not throw 'Invalid command name' error (which would happen if 'D:' was treated as invalid command)
|
||||
expect(() => prepareSpawnInfo('D:\\missing\\cli.js')).not.toThrow(
|
||||
'Invalid command name',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('comprehensive validation', () => {
|
||||
describe('runtime validation', () => {
|
||||
it('should reject unsupported runtimes', () => {
|
||||
expect(() =>
|
||||
parseExecutableSpec('unsupported:/path/to/file.js'),
|
||||
).toThrow(
|
||||
"Unsupported runtime 'unsupported'. Supported runtimes: node, bun, tsx, deno",
|
||||
);
|
||||
it('should treat unsupported runtime prefixes as file paths', () => {
|
||||
mockFs.existsSync.mockReturnValue(true);
|
||||
|
||||
// With whitelist approach, 'unsupported:' is not recognized as a runtime spec
|
||||
// so 'unsupported:/path/to/file.js' is treated as a file path
|
||||
const result = parseExecutableSpec('unsupported:/path/to/file.js');
|
||||
|
||||
// Should be treated as a file path, not a runtime specification
|
||||
expect(result.isExplicitRuntime).toBe(false);
|
||||
expect(result.runtime).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should validate runtime availability for explicit runtime specs', () => {
|
||||
|
||||
Reference in New Issue
Block a user