mirror of
https://github.com/QwenLM/qwen-code.git
synced 2025-12-25 11:09:13 +00:00
Compare commits
211 Commits
v0.0.5-nig
...
update-sec
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5f08128d07 | ||
|
|
93f5e59710 | ||
|
|
7b378e826c | ||
|
|
5e70b34041 | ||
|
|
df1479f864 | ||
|
|
14e6d3c01e | ||
|
|
da0b8b5534 | ||
|
|
e1d502991d | ||
|
|
7e01554b9c | ||
|
|
36c65658ff | ||
|
|
a925ac56fa | ||
|
|
5d4a9452d8 | ||
|
|
3e082ae89a | ||
|
|
51207043d0 | ||
|
|
2403061bab | ||
|
|
1ffcb51052 | ||
|
|
c33d162ff2 | ||
|
|
bbfe94cfe2 | ||
|
|
03c7b1836f | ||
|
|
f2ba6dbb8a | ||
|
|
2d0884b04d | ||
|
|
fc70439355 | ||
|
|
0265b67b90 | ||
|
|
1f91b9ece1 | ||
|
|
c58106079e | ||
|
|
6516d0d136 | ||
|
|
5369af61d2 | ||
|
|
6a4005cace | ||
|
|
290ccdbe21 | ||
|
|
b5514fd052 | ||
|
|
bc92da04e9 | ||
|
|
0bc45aeefe | ||
|
|
7856f52afb | ||
|
|
e986476fe0 | ||
|
|
cfc1aebee6 | ||
|
|
ef1c8a4bfe | ||
|
|
484292b2ac | ||
|
|
f9659184d4 | ||
|
|
6d5bb1b57c | ||
|
|
fb9f2d292c | ||
|
|
16ea8560b7 | ||
|
|
2655af079a | ||
|
|
807844fb57 | ||
|
|
2202d26ac7 | ||
|
|
58f66ccfc6 | ||
|
|
65c622c0ac | ||
|
|
a3ec2f52c9 | ||
|
|
c96852dc56 | ||
|
|
028a82ebeb | ||
|
|
6b67cd1b57 | ||
|
|
96a9b683b2 | ||
|
|
dcc86699cf | ||
|
|
964509f587 | ||
|
|
a37423bf7f | ||
|
|
bfcb3e7f1d | ||
|
|
1a581ed191 | ||
|
|
5c94913643 | ||
|
|
e221b077e5 | ||
|
|
0f58b3fd32 | ||
|
|
32d06b2fc1 | ||
|
|
e3a5806ae2 | ||
|
|
a45adbdc76 | ||
|
|
41500814b0 | ||
|
|
786832913b | ||
|
|
4807434d9f | ||
|
|
c09abb817f | ||
|
|
b7663950f2 | ||
|
|
8158e82165 | ||
|
|
f8d3571e31 | ||
|
|
6f399c078a | ||
|
|
854c452580 | ||
|
|
f503be14e9 | ||
|
|
5d2a678cb2 | ||
|
|
ce632725b0 | ||
|
|
ea7dcf8347 | ||
|
|
ffc2d27ca3 | ||
|
|
ea96293e16 | ||
|
|
cd7e60e008 | ||
|
|
59bde4a612 | ||
|
|
02f7e48c51 | ||
|
|
aeb6602266 | ||
|
|
805114aef8 | ||
|
|
91035ad7b0 | ||
|
|
12a9bc3ed9 | ||
|
|
2141b39c3d | ||
|
|
268627469b | ||
|
|
6a72cd064b | ||
|
|
aebe3ace3c | ||
|
|
c402784d97 | ||
|
|
bed6ab1cce | ||
|
|
1b08a6c063 | ||
|
|
82fa7a0660 | ||
|
|
2e9236fab4 | ||
|
|
dadf05809c | ||
|
|
29c3825604 | ||
|
|
faf6a5497a | ||
|
|
dd85aaa951 | ||
|
|
aacae1de43 | ||
|
|
8d993156e7 | ||
|
|
57003ca68c | ||
|
|
47de37eb0a | ||
|
|
dc7b4fda64 | ||
|
|
3dcca31796 | ||
|
|
c194a6ac3b | ||
|
|
d421fa9e64 | ||
|
|
2778c7d851 | ||
|
|
b465145229 | ||
|
|
f2d6748432 | ||
|
|
08f1431946 | ||
|
|
43d5aaa798 | ||
|
|
5c8268b6f4 | ||
|
|
d0cda58f1f | ||
|
|
c7a1de4983 | ||
|
|
49001a0f83 | ||
|
|
11ecf6fc86 | ||
|
|
99ba2f6424 | ||
|
|
93f8fe3671 | ||
|
|
e7b468e122 | ||
|
|
dca040908a | ||
|
|
2180dd13dc | ||
|
|
11808ef7ed | ||
|
|
8da6d23688 | ||
|
|
37b83e05a7 | ||
|
|
5caf23d627 | ||
|
|
d1bfba1abb | ||
|
|
b9fe4fc263 | ||
|
|
e506b40c27 | ||
|
|
83a04c4755 | ||
|
|
94b7b402c5 | ||
|
|
a8984a9b30 | ||
|
|
acd48a1259 | ||
|
|
70478b92a9 | ||
|
|
2cdaf912ba | ||
|
|
072d8ba289 | ||
|
|
03ed37d0dc | ||
|
|
bedcbb9feb | ||
|
|
820169ba2e | ||
|
|
15a1f1af9d | ||
|
|
387706607d | ||
|
|
dccca91fc9 | ||
|
|
a6a386f72a | ||
|
|
67d16992cf | ||
|
|
9382334a5e | ||
|
|
c795168e9c | ||
|
|
24c5a15d7a | ||
|
|
c725e258c6 | ||
|
|
d42e3f1e7f | ||
|
|
7748e56153 | ||
|
|
e126d2fcd9 | ||
|
|
a5a3da01f6 | ||
|
|
dc9f17bb4a | ||
|
|
f21ff09389 | ||
|
|
6c3fb18ef6 | ||
|
|
a3a432e3cf | ||
|
|
6f7beb414c | ||
|
|
61e382444a | ||
|
|
32809a7be7 | ||
|
|
37a3f1e6b6 | ||
|
|
574015edd9 | ||
|
|
f9a05401c1 | ||
|
|
9a6422f331 | ||
|
|
ae86c7ba05 | ||
|
|
65be9cab47 | ||
|
|
23c014e29c | ||
|
|
3ef2c6d198 | ||
|
|
c77a22d4c6 | ||
|
|
d06e17fbd9 | ||
|
|
0c6f788406 | ||
|
|
325bb89137 | ||
|
|
ac1bb5ee42 | ||
|
|
498edb57ab | ||
|
|
7bc8766542 | ||
|
|
c1fe688956 | ||
|
|
21965f986c | ||
|
|
32b1ef3779 | ||
|
|
bcce1e7b84 | ||
|
|
bc23009f61 | ||
|
|
b447c329db | ||
|
|
fd434626c5 | ||
|
|
8985e489a5 | ||
|
|
0ce89392b8 | ||
|
|
d5a1b717c2 | ||
|
|
091804c750 | ||
|
|
d64c3d6af8 | ||
|
|
327f915610 | ||
|
|
008051e42d | ||
|
|
293bb82019 | ||
|
|
80079cd2a5 | ||
|
|
7356764a48 | ||
|
|
871e0dfab8 | ||
|
|
83c4dddb7e | ||
|
|
1c1aa047ff | ||
|
|
b08679c906 | ||
|
|
b6c2c64f9b | ||
|
|
cfe3753d4c | ||
|
|
9aef0a8e6c | ||
|
|
a5ea113a8e | ||
|
|
379765da23 | ||
|
|
f7e559223d | ||
|
|
0170791800 | ||
|
|
e275441651 | ||
|
|
f2e006179d | ||
|
|
bd85070411 | ||
|
|
9ed351260c | ||
|
|
ab0d9df658 | ||
|
|
bce6eb5014 | ||
|
|
9ca48c00a6 | ||
|
|
0b5cc96362 | ||
|
|
b497791c59 | ||
|
|
36e1e57252 | ||
|
|
a9f04eba2c |
65
.github/workflows/build-and-publish-image.yml
vendored
Normal file
65
.github/workflows/build-and-publish-image.yml
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
name: Build and Publish Docker Image
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
publish:
|
||||
description: 'Publish to GHCR (only works on main branch)'
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
|
||||
jobs:
|
||||
build-and-push-to-ghcr:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=ref,event=pr
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=sha,prefix=sha-,format=short
|
||||
|
||||
- name: Log in to the Container registry
|
||||
if: github.event_name != 'pull_request' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/v'))
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
id: build-and-push
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: ${{ github.event_name != 'pull_request' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/v') || github.event.inputs.publish == 'true') }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
build-args: |
|
||||
CLI_VERSION_ARG=${{ github.sha }}
|
||||
@@ -24,7 +24,7 @@ jobs:
|
||||
ISSUE_TITLE: ${{ github.event.issue.title }}
|
||||
ISSUE_BODY: ${{ github.event.issue.body }}
|
||||
with:
|
||||
version: 0.0.4
|
||||
version: 0.0.7
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
settings_json: |
|
||||
{
|
||||
|
||||
@@ -42,7 +42,7 @@ jobs:
|
||||
ISSUES_TO_TRIAGE: ${{ steps.find_issues.outputs.issues_to_triage }}
|
||||
REPOSITORY: ${{ github.repository }}
|
||||
with:
|
||||
version: 0.0.4
|
||||
version: 0.0.7
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
OPENAI_BASE_URL: ${{ secrets.OPENAI_BASE_URL }}
|
||||
OPENAI_MODEL: ${{ secrets.OPENAI_MODEL }}
|
||||
|
||||
12
.github/workflows/qwen-code-pr-review.yml
vendored
12
.github/workflows/qwen-code-pr-review.yml
vendored
@@ -1,7 +1,7 @@
|
||||
name: 🧐 Qwen Pull Request Review
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
pull_request_target:
|
||||
types: [opened]
|
||||
pull_request_review_comment:
|
||||
types: [created]
|
||||
@@ -18,7 +18,11 @@ jobs:
|
||||
review-pr:
|
||||
if: >
|
||||
github.event_name == 'workflow_dispatch' ||
|
||||
(github.event_name == 'pull_request' && github.event.action == 'opened') ||
|
||||
(github.event_name == 'pull_request_target' &&
|
||||
github.event.action == 'opened' &&
|
||||
(github.event.pull_request.author_association == 'OWNER' ||
|
||||
github.event.pull_request.author_association == 'MEMBER' ||
|
||||
github.event.pull_request.author_association == 'COLLABORATOR')) ||
|
||||
(github.event_name == 'issue_comment' &&
|
||||
github.event.issue.pull_request &&
|
||||
contains(github.event.comment.body, '@qwen /review') &&
|
||||
@@ -49,9 +53,9 @@ jobs:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Get PR details (pull_request & workflow_dispatch)
|
||||
- name: Get PR details (pull_request_target & workflow_dispatch)
|
||||
id: get_pr
|
||||
if: github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch'
|
||||
if: github.event_name == 'pull_request_target' || github.event_name == 'workflow_dispatch'
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
|
||||
2
.npmrc
2
.npmrc
@@ -1 +1 @@
|
||||
@google:registry=https://wombat-dressing-room.appspot.com
|
||||
registry=https://registry.npmjs.org
|
||||
|
||||
26
CHANGELOG.md
Normal file
26
CHANGELOG.md
Normal file
@@ -0,0 +1,26 @@
|
||||
# Changelog
|
||||
|
||||
## 0.0.7
|
||||
|
||||
- Fix MCP tools
|
||||
- Fix Web Fetch tool
|
||||
- Fix Web Search tool, by replacing web search from Google/Gemini to Tavily API
|
||||
- Fix: Compatible with occasional tool call parameters returned by LLM that are invalid JSON
|
||||
- Fix: prevent concurrent query submissions on some rare cases
|
||||
- Fix: incorrect qwen logger exit handler setup
|
||||
- Fix: seperate static QR code and dynamic spin components
|
||||
- Sync gemini-cli to v0.1.18
|
||||
|
||||
## 0.0.6
|
||||
|
||||
- Add usage statistics logging for Qwen integration
|
||||
- Make `/init` command respect configured context filename and align docs with QWEN.md
|
||||
- Fix EPERM error when run `qwen --sandbox` in macOS
|
||||
- Fix terminal flicker when waiting for login
|
||||
- Fix `glm-4.5` model request error
|
||||
|
||||
## 0.0.5
|
||||
|
||||
- Support Qwen OAuth login and provide up to 2000 free requests per day
|
||||
- Sync gemini-cli to v0.1.17
|
||||
- Add systemPromptMappings Configuration Feature
|
||||
55
Dockerfile
55
Dockerfile
@@ -1,3 +1,31 @@
|
||||
# Build stage
|
||||
FROM docker.io/library/node:20-slim AS builder
|
||||
|
||||
# Install build dependencies
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
python3 \
|
||||
make \
|
||||
g++ \
|
||||
git \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Set up npm global package folder
|
||||
RUN mkdir -p /usr/local/share/npm-global
|
||||
ENV NPM_CONFIG_PREFIX=/usr/local/share/npm-global
|
||||
ENV PATH=$PATH:/usr/local/share/npm-global/bin
|
||||
|
||||
# Copy source code
|
||||
COPY . /home/node/app
|
||||
WORKDIR /home/node/app
|
||||
|
||||
# Install dependencies and build packages
|
||||
RUN npm ci \
|
||||
&& npm run build --workspaces \
|
||||
&& npm pack -w @qwen-code/qwen-code --pack-destination ./packages/cli/dist \
|
||||
&& npm pack -w @qwen-code/qwen-code-core --pack-destination ./packages/core/dist
|
||||
|
||||
# Runtime stage
|
||||
FROM docker.io/library/node:20-slim
|
||||
|
||||
ARG SANDBOX_NAME="qwen-code-sandbox"
|
||||
@@ -5,11 +33,9 @@ ARG CLI_VERSION_ARG
|
||||
ENV SANDBOX="$SANDBOX_NAME"
|
||||
ENV CLI_VERSION=$CLI_VERSION_ARG
|
||||
|
||||
# install minimal set of packages, then clean up
|
||||
# Install runtime dependencies
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
python3 \
|
||||
make \
|
||||
g++ \
|
||||
man-db \
|
||||
curl \
|
||||
dnsutils \
|
||||
@@ -29,22 +55,19 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# set up npm global package folder under /usr/local/share
|
||||
# give it to non-root user node, already set up in base image
|
||||
RUN mkdir -p /usr/local/share/npm-global \
|
||||
&& chown -R node:node /usr/local/share/npm-global
|
||||
# Set up npm global package folder
|
||||
RUN mkdir -p /usr/local/share/npm-global
|
||||
ENV NPM_CONFIG_PREFIX=/usr/local/share/npm-global
|
||||
ENV PATH=$PATH:/usr/local/share/npm-global/bin
|
||||
|
||||
# switch to non-root user node
|
||||
USER node
|
||||
# Copy built packages from builder stage
|
||||
COPY --from=builder /home/node/app/packages/cli/dist/*.tgz /tmp/
|
||||
COPY --from=builder /home/node/app/packages/core/dist/*.tgz /tmp/
|
||||
|
||||
# install qwen-code and clean up
|
||||
COPY packages/cli/dist/qwen-code-*.tgz /usr/local/share/npm-global/qwen-code.tgz
|
||||
COPY packages/core/dist/qwen-code-qwen-code-core-*.tgz /usr/local/share/npm-global/qwen-code-core.tgz
|
||||
RUN npm install -g /usr/local/share/npm-global/qwen-code.tgz /usr/local/share/npm-global/qwen-code-core.tgz \
|
||||
# Install built packages globally
|
||||
RUN npm install -g /tmp/*.tgz \
|
||||
&& npm cache clean --force \
|
||||
&& rm -f /usr/local/share/npm-global/qwen-{code,code-core}.tgz
|
||||
&& rm -rf /tmp/*.tgz
|
||||
|
||||
# default entrypoint when none specified
|
||||
CMD ["qwen"]
|
||||
# Default entrypoint when none specified
|
||||
CMD ["qwen"]
|
||||
|
||||
1
LICENSE
1
LICENSE
@@ -188,6 +188,7 @@
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2025 Google LLC
|
||||
Copyright 2025 Qwen
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
2
Makefile
2
Makefile
@@ -53,7 +53,7 @@ debug:
|
||||
|
||||
|
||||
run-npx:
|
||||
npx https://github.com/google-gemini/gemini-cli
|
||||
npx https://github.com/QwenLM/qwen-code
|
||||
|
||||
create-alias:
|
||||
scripts/create_alias.sh
|
||||
|
||||
77
README.md
77
README.md
@@ -15,12 +15,43 @@
|
||||
|
||||
</div>
|
||||
|
||||
<div align="center">
|
||||
|
||||
<!-- Keep these links. Translations will automatically update with the README. -->
|
||||
<a href="https://readme-i18n.com/de/QwenLM/qwen-code">Deutsch</a> |
|
||||
<a href="https://readme-i18n.com/es/QwenLM/qwen-code">Español</a> |
|
||||
<a href="https://readme-i18n.com/fr/QwenLM/qwen-code">français</a> |
|
||||
<a href="https://readme-i18n.com/ja/QwenLM/qwen-code">日本語</a> |
|
||||
<a href="https://readme-i18n.com/ko/QwenLM/qwen-code">한국어</a> |
|
||||
<a href="https://readme-i18n.com/pt/QwenLM/qwen-code">Português</a> |
|
||||
<a href="https://readme-i18n.com/ru/QwenLM/qwen-code">Русский</a> |
|
||||
<a href="https://readme-i18n.com/zh/QwenLM/qwen-code">中文</a>
|
||||
|
||||
</div>
|
||||
|
||||
Qwen Code is a powerful command-line AI workflow tool adapted from [**Gemini CLI**](https://github.com/google-gemini/gemini-cli) ([details](./README.gemini.md)), specifically optimized for [Qwen3-Coder](https://github.com/QwenLM/Qwen3-Coder) models. It enhances your development workflow with advanced code understanding, automated tasks, and intelligent assistance.
|
||||
|
||||
## 💡 Free Options Available
|
||||
|
||||
Get started with Qwen Code at no cost using any of these free options:
|
||||
|
||||
### 🔥 Qwen OAuth (Recommended)
|
||||
|
||||
- **2,000 requests per day** with no token limits
|
||||
- **60 requests per minute** rate limit
|
||||
- Simply run `qwen` and authenticate with your qwen.ai account
|
||||
- Automatic credential management and refresh
|
||||
- Use `/auth` command to switch to Qwen OAuth if you have initialized with OpenAI compatible mode
|
||||
|
||||
### 🌏 Regional Free Tiers
|
||||
|
||||
- **Mainland China**: ModelScope offers **2,000 free API calls per day**
|
||||
- **International**: OpenRouter provides **up to 1,000 free API calls per day** worldwide
|
||||
|
||||
For detailed setup instructions, see [Authorization](#authorization).
|
||||
|
||||
> [!WARNING]
|
||||
> **Token Usage Notice**: Qwen Code may issue multiple API calls per cycle, resulting in higher token usage (similar to Claude Code). We're actively optimizing API efficiency.
|
||||
>
|
||||
> 💡 **Free Option**: ModelScope provides **2,000 free API calls per day** for users in mainland China. OpenRouter offers up to **1,000 free API calls per day** worldwide. For setup instructions, see [API Configuration](#api-configuration).
|
||||
|
||||
## Key Features
|
||||
|
||||
@@ -84,15 +115,43 @@ Create or edit `.qwen/settings.json` in your home directory:
|
||||
|
||||
- **`/compress`** - Compress conversation history to continue within token limits
|
||||
- **`/clear`** - Clear all conversation history and start fresh
|
||||
- **`/status`** - Check current token usage and limits
|
||||
- **`/stats`** - Check current token usage and limits
|
||||
|
||||
> 📝 **Note**: Session token limit applies to a single conversation, not cumulative API calls.
|
||||
|
||||
### API Configuration
|
||||
### Authorization
|
||||
|
||||
Qwen Code supports multiple API providers. You can configure your API key through environment variables or a `.env` file in your project root.
|
||||
Choose your preferred authentication method based on your needs:
|
||||
|
||||
#### Configuration Methods
|
||||
#### 1. Qwen OAuth (🚀 Recommended - Start in 30 seconds)
|
||||
|
||||
The easiest way to get started - completely free with generous quotas:
|
||||
|
||||
```bash
|
||||
# Just run this command and follow the browser authentication
|
||||
qwen
|
||||
```
|
||||
|
||||
**What happens:**
|
||||
|
||||
1. **Instant Setup**: CLI opens your browser automatically
|
||||
2. **One-Click Login**: Authenticate with your qwen.ai account
|
||||
3. **Automatic Management**: Credentials cached locally for future use
|
||||
4. **No Configuration**: Zero setup required - just start coding!
|
||||
|
||||
**Free Tier Benefits:**
|
||||
|
||||
- ✅ **2,000 requests/day** (no token counting needed)
|
||||
- ✅ **60 requests/minute** rate limit
|
||||
- ✅ **Automatic credential refresh**
|
||||
- ✅ **Zero cost** for individual users
|
||||
- ℹ️ **Note**: Model fallback may occur to maintain service quality
|
||||
|
||||
#### 2. OpenAI-Compatible API
|
||||
|
||||
Use API keys for OpenAI or other compatible providers:
|
||||
|
||||
**Configuration Methods:**
|
||||
|
||||
1. **Environment Variables**
|
||||
|
||||
@@ -110,7 +169,7 @@ Qwen Code supports multiple API providers. You can configure your API key throug
|
||||
OPENAI_MODEL=your_model_choice
|
||||
```
|
||||
|
||||
#### API Provider Options
|
||||
**API Provider Options**
|
||||
|
||||
> ⚠️ **Regional Notice:**
|
||||
>
|
||||
@@ -265,7 +324,7 @@ qwen
|
||||
- `/help` - Display available commands
|
||||
- `/clear` - Clear conversation history
|
||||
- `/compress` - Compress history to save tokens
|
||||
- `/status` - Show current session information
|
||||
- `/stats` - Show current session information
|
||||
- `/exit` or `/quit` - Exit Qwen Code
|
||||
|
||||
### Keyboard Shortcuts
|
||||
@@ -287,6 +346,8 @@ qwen
|
||||
|
||||
See [CONTRIBUTING.md](./CONTRIBUTING.md) to learn how to contribute to the project.
|
||||
|
||||
For detailed authentication setup, see the [authentication guide](./docs/cli/authentication.md).
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you encounter issues, check the [troubleshooting guide](docs/troubleshooting.md).
|
||||
|
||||
@@ -56,7 +56,7 @@ find initiatives that interest you.
|
||||
Gemini CLI is an open-source project, and we welcome contributions from the community! Whether you're a developer, a designer, or just an enthusiastic user you can find our [Community Guidelines here](https://github.com/google-gemini/gemini-cli/blob/main/CONTRIBUTING.md) to learn how to get started. There are many ways to get involved:
|
||||
|
||||
- **Roadmap:** Please review and find areas in our [roadmap](https://github.com/google-gemini/gemini-cli/issues/4191) that you would like to contribute to. Contributions based on this will be easiest to integrate with.
|
||||
- **Report Bugs:** If you find an issue, please create a bug(https://github.com/google-gemini/gemini-cli/issues/new?template=bug_report.yml) with as much detail as possible. If you believe it is a critical breaking issue preventing direct CLI usage, please tag it as `priorty/p0`.
|
||||
- **Report Bugs:** If you find an issue, please create a [bug](https://github.com/google-gemini/gemini-cli/issues/new?template=bug_report.yml) with as much detail as possible. If you believe it is a critical breaking issue preventing direct CLI usage, please tag it as `priority/p0`.
|
||||
- **Suggest Features:** Have a great idea? We'd love to hear it! Open a [feature request](https://github.com/google-gemini/gemini-cli/issues/new?template=feature_request.yml).
|
||||
- **Contribute Code:** Check out our [CONTRIBUTING.md](https://github.com/google-gemini/gemini-cli/blob/main/CONTRIBUTING.md) file for guidelines on how to submit pull requests. We have a list of "good first issues" for new contributors.
|
||||
- **Write Documentation:** Help us improve our documentation, tutorials, and examples.
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
# Reporting Security Issues
|
||||
|
||||
To report a security issue, please use [https://g.co/vulnz](https://g.co/vulnz).
|
||||
We use g.co/vulnz for our intake, and do coordination and disclosure here on
|
||||
GitHub (including using GitHub Security Advisory). The Google Security Team will
|
||||
respond within 5 working days of your report on g.co/vulnz.
|
||||
Please report any security issue or Higress crash report to [ASRC](https://security.alibaba.com/) (Alibaba Security Response Center) where the issue will be triaged appropriately.
|
||||
|
||||
[GitHub Security Advisory]: https://github.com/google-gemini/gemini-cli/security/advisories
|
||||
Thank you for helping keep our project secure.
|
||||
|
||||
@@ -1,106 +1,93 @@
|
||||
# Authentication Setup
|
||||
|
||||
The Gemini CLI requires you to authenticate with Google's AI services. On initial startup you'll need to configure **one** of the following authentication methods:
|
||||
Qwen Code supports two main authentication methods to access AI models. Choose the method that best fits your use case:
|
||||
|
||||
1. **Login with Google (Gemini Code Assist):**
|
||||
- Use this option to log in with your google account.
|
||||
- During initial startup, Gemini CLI will direct you to a webpage for authentication. Once authenticated, your credentials will be cached locally so the web login can be skipped on subsequent runs.
|
||||
- Note that the web login must be done in a browser that can communicate with the machine Gemini CLI is being run from. (Specifically, the browser will be redirected to a localhost url that Gemini CLI will be listening on).
|
||||
- <a id="workspace-gca">Users may have to specify a GOOGLE_CLOUD_PROJECT if:</a>
|
||||
1. You have a Google Workspace account. Google Workspace is a paid service for businesses and organizations that provides a suite of productivity tools, including a custom email domain (e.g. your-name@your-company.com), enhanced security features, and administrative controls. These accounts are often managed by an employer or school.
|
||||
1. You have received a Gemini Code Assist license through the [Google Developer Program](https://developers.google.com/program/plans-and-pricing) (including qualified Google Developer Experts)
|
||||
1. You have been assigned a license to a current Gemini Code Assist standard or enterprise subscription.
|
||||
1. You are using the product outside the [supported regions](https://developers.google.com/gemini-code-assist/resources/available-locations) for free individual usage.
|
||||
1. You are a Google account holder under the age of 18
|
||||
- If you fall into one of these categories, you must first configure a Google Cloud Project ID to use, [enable the Gemini for Cloud API](https://cloud.google.com/gemini/docs/discover/set-up-gemini#enable-api) and [configure access permissions](https://cloud.google.com/gemini/docs/discover/set-up-gemini#grant-iam).
|
||||
1. **Qwen OAuth (Recommended):**
|
||||
- Use this option to log in with your qwen.ai account.
|
||||
- During initial startup, Qwen Code will direct you to the qwen.ai authentication page. Once authenticated, your credentials will be cached locally so the web login can be skipped on subsequent runs.
|
||||
- **Requirements:**
|
||||
- Valid qwen.ai account
|
||||
- Internet connection for initial authentication
|
||||
- **Benefits:**
|
||||
- Seamless access to Qwen models
|
||||
- Automatic credential refresh
|
||||
- No manual API key management required
|
||||
|
||||
You can temporarily set the environment variable in your current shell session using the following command:
|
||||
**Getting Started:**
|
||||
|
||||
```bash
|
||||
export GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"
|
||||
```
|
||||
```bash
|
||||
# Start Qwen Code and follow the OAuth flow
|
||||
qwen
|
||||
```
|
||||
|
||||
- For repeated use, you can add the environment variable to your [.env file](#persisting-environment-variables-with-env-files) or your shell's configuration file (like `~/.bashrc`, `~/.zshrc`, or `~/.profile`). For example, the following command adds the environment variable to a `~/.bashrc` file:
|
||||
The CLI will automatically open your browser and guide you through the authentication process.
|
||||
|
||||
```bash
|
||||
echo 'export GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"' >> ~/.bashrc
|
||||
source ~/.bashrc
|
||||
```
|
||||
**For users who authenticate using their qwen.ai account:**
|
||||
|
||||
2. **<a id="gemini-api-key"></a>Gemini API key:**
|
||||
- Obtain your API key from Google AI Studio: [https://aistudio.google.com/app/apikey](https://aistudio.google.com/app/apikey)
|
||||
- Set the `GEMINI_API_KEY` environment variable. In the following methods, replace `YOUR_GEMINI_API_KEY` with the API key you obtained from Google AI Studio:
|
||||
- You can temporarily set the environment variable in your current shell session using the following command:
|
||||
```bash
|
||||
export GEMINI_API_KEY="YOUR_GEMINI_API_KEY"
|
||||
```
|
||||
- For repeated use, you can add the environment variable to your [.env file](#persisting-environment-variables-with-env-files).
|
||||
**Quota:**
|
||||
- 60 requests per minute
|
||||
- 2,000 requests per day
|
||||
- Token usage is not applicable
|
||||
|
||||
- Alternatively you can export the API key from your shell's configuration file (like `~/.bashrc`, `~/.zshrc`, or `~/.profile`). For example, the following command adds the environment variable to a `~/.bashrc` file:
|
||||
**Cost:** Free
|
||||
|
||||
```bash
|
||||
echo 'export GEMINI_API_KEY="YOUR_GEMINI_API_KEY"' >> ~/.bashrc
|
||||
source ~/.bashrc
|
||||
```
|
||||
**Notes:** A specific quota for different models is not specified; model fallback may occur to preserve shared experience quality.
|
||||
|
||||
:warning: Be advised that when you export your API key inside your shell configuration file, any other process executed from the shell can read it.
|
||||
2. **<a id="openai-api"></a>OpenAI-Compatible API:**
|
||||
- Use API keys for OpenAI or other compatible providers.
|
||||
- This method allows you to use various AI models through API keys.
|
||||
|
||||
3. **Vertex AI:**
|
||||
- Obtain your Google Cloud API key: [Get an API Key](https://cloud.google.com/vertex-ai/generative-ai/docs/start/api-keys?usertype=newuser)
|
||||
- Set the `GOOGLE_API_KEY` environment variable. In the following methods, replace `YOUR_GOOGLE_API_KEY` with your Vertex AI API key:
|
||||
- You can temporarily set these environment variables in your current shell session using the following commands:
|
||||
```bash
|
||||
export GOOGLE_API_KEY="YOUR_GOOGLE_API_KEY"
|
||||
```
|
||||
- For repeated use, you can add the environment variables to your [.env file](#persisting-environment-variables-with-env-files) or your shell's configuration file (like `~/.bashrc`, `~/.zshrc`, or `~/.profile`). For example, the following commands add the environment variables to a `~/.bashrc` file:
|
||||
```bash
|
||||
echo 'export GOOGLE_API_KEY="YOUR_GOOGLE_API_KEY"' >> ~/.bashrc
|
||||
source ~/.bashrc
|
||||
```
|
||||
- To use Application Default Credentials (ADC), use the following command:
|
||||
- Ensure you have a Google Cloud project and have enabled the Vertex AI API.
|
||||
```bash
|
||||
gcloud auth application-default login
|
||||
```
|
||||
For more information, see [Set up Application Default Credentials for Google Cloud](https://cloud.google.com/docs/authentication/provide-credentials-adc).
|
||||
- Set the `GOOGLE_CLOUD_PROJECT` and `GOOGLE_CLOUD_LOCATION` environment variables. In the following methods, replace `YOUR_PROJECT_ID` and `YOUR_PROJECT_LOCATION` with the relevant values for your project:
|
||||
- You can temporarily set these environment variables in your current shell session using the following commands:
|
||||
```bash
|
||||
export GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"
|
||||
export GOOGLE_CLOUD_LOCATION="YOUR_PROJECT_LOCATION" # e.g., us-central1
|
||||
```
|
||||
- For repeated use, you can add the environment variables to your [.env file](#persisting-environment-variables-with-env-files)
|
||||
**Configuration Methods:**
|
||||
|
||||
- Alternatively you can export the environment variables from your shell's configuration file (like `~/.bashrc`, `~/.zshrc`, or `~/.profile`). For example, the following commands add the environment variables to a `~/.bashrc` file:
|
||||
a) **Environment Variables:**
|
||||
|
||||
```bash
|
||||
echo 'export GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"' >> ~/.bashrc
|
||||
echo 'export GOOGLE_CLOUD_LOCATION="YOUR_PROJECT_LOCATION"' >> ~/.bashrc
|
||||
source ~/.bashrc
|
||||
```
|
||||
```bash
|
||||
export OPENAI_API_KEY="your_api_key_here"
|
||||
export OPENAI_BASE_URL="your_api_endpoint" # Optional
|
||||
export OPENAI_MODEL="your_model_choice" # Optional
|
||||
```
|
||||
|
||||
:warning: Be advised that when you export your API key inside your shell configuration file, any other process executed from the shell can read it.
|
||||
b) **Project `.env` File:**
|
||||
Create a `.env` file in your project root:
|
||||
|
||||
4. **Cloud Shell:**
|
||||
- This option is only available when running in a Google Cloud Shell environment.
|
||||
- It automatically uses the credentials of the logged-in user in the Cloud Shell environment.
|
||||
- This is the default authentication method when running in Cloud Shell and no other method is configured.
|
||||
```env
|
||||
OPENAI_API_KEY=your_api_key_here
|
||||
OPENAI_BASE_URL=your_api_endpoint
|
||||
OPENAI_MODEL=your_model_choice
|
||||
```
|
||||
|
||||
:warning: Be advised that when you export your API key inside your shell configuration file, any other process executed from the shell can read it.
|
||||
**Supported Providers:**
|
||||
- OpenAI (https://platform.openai.com/api-keys)
|
||||
- Alibaba Cloud Bailian
|
||||
- ModelScope
|
||||
- OpenRouter
|
||||
- Azure OpenAI
|
||||
- Any OpenAI-compatible API
|
||||
|
||||
## Switching Authentication Methods
|
||||
|
||||
To switch between authentication methods during a session, use the `/auth` command in the CLI interface:
|
||||
|
||||
```bash
|
||||
# Within the CLI, type:
|
||||
/auth
|
||||
```
|
||||
|
||||
This will allow you to reconfigure your authentication method without restarting the application.
|
||||
|
||||
### Persisting Environment Variables with `.env` Files
|
||||
|
||||
You can create a **`.gemini/.env`** file in your project directory or in your home directory. Creating a plain **`.env`** file also works, but `.gemini/.env` is recommended to keep Gemini variables isolated from other tools.
|
||||
You can create a **`.qwen/.env`** file in your project directory or in your home directory. Creating a plain **`.env`** file also works, but `.qwen/.env` is recommended to keep Qwen Code variables isolated from other tools.
|
||||
|
||||
**Important:** Some environment variables (like `DEBUG` and `DEBUG_MODE`) are automatically excluded from project `.env` files to prevent interference with gemini-cli behavior. Use `.gemini/.env` files for gemini-cli specific variables.
|
||||
**Important:** Some environment variables (like `DEBUG` and `DEBUG_MODE`) are automatically excluded from project `.env` files to prevent interference with qwen-code behavior. Use `.qwen/.env` files for qwen-code specific variables.
|
||||
|
||||
Gemini CLI automatically loads environment variables from the **first** `.env` file it finds, using the following search order:
|
||||
Qwen Code automatically loads environment variables from the **first** `.env` file it finds, using the following search order:
|
||||
|
||||
1. Starting in the **current directory** and moving upward toward `/`, for each directory it checks:
|
||||
1. `.gemini/.env`
|
||||
1. `.qwen/.env`
|
||||
2. `.env`
|
||||
2. If no file is found, it falls back to your **home directory**:
|
||||
- `~/.gemini/.env`
|
||||
- `~/.qwen/.env`
|
||||
- `~/.env`
|
||||
|
||||
> **Important:** The search stops at the **first** file encountered—variables are **not merged** across multiple files.
|
||||
@@ -110,37 +97,47 @@ Gemini CLI automatically loads environment variables from the **first** `.env` f
|
||||
**Project-specific overrides** (take precedence when you are inside the project):
|
||||
|
||||
```bash
|
||||
mkdir -p .gemini
|
||||
echo 'GOOGLE_CLOUD_PROJECT="your-project-id"' >> .gemini/.env
|
||||
mkdir -p .qwen
|
||||
cat >> .qwen/.env <<'EOF'
|
||||
OPENAI_API_KEY="your-api-key"
|
||||
OPENAI_BASE_URL="https://api-inference.modelscope.cn/v1"
|
||||
OPENAI_MODEL="Qwen/Qwen3-Coder-480B-A35B-Instruct"
|
||||
EOF
|
||||
```
|
||||
|
||||
**User-wide settings** (available in every directory):
|
||||
|
||||
```bash
|
||||
mkdir -p ~/.gemini
|
||||
cat >> ~/.gemini/.env <<'EOF'
|
||||
GOOGLE_CLOUD_PROJECT="your-project-id"
|
||||
GEMINI_API_KEY="your-gemini-api-key"
|
||||
mkdir -p ~/.qwen
|
||||
cat >> ~/.qwen/.env <<'EOF'
|
||||
OPENAI_API_KEY="your-api-key"
|
||||
OPENAI_BASE_URL="https://dashscope.aliyuncs.com/compatible-mode/v1"
|
||||
OPENAI_MODEL="qwen3-coder-plus"
|
||||
EOF
|
||||
```
|
||||
|
||||
## Non-Interactive Mode / Headless Environments
|
||||
|
||||
When running the Gemini CLI in a non-interactive environment, you cannot use the interactive login flow.
|
||||
When running Qwen Code in a non-interactive environment, you cannot use the OAuth login flow.
|
||||
Instead, you must configure authentication using environment variables.
|
||||
|
||||
The CLI will automatically detect if it is running in a non-interactive terminal and will use one of the
|
||||
following authentication methods if available:
|
||||
The CLI will automatically detect if it is running in a non-interactive terminal and will use the
|
||||
OpenAI-compatible API method if configured:
|
||||
|
||||
1. **Gemini API Key:**
|
||||
- Set the `GEMINI_API_KEY` environment variable.
|
||||
- The CLI will use this key to authenticate with the Gemini API.
|
||||
1. **OpenAI-Compatible API:**
|
||||
- Set the `OPENAI_API_KEY` environment variable.
|
||||
- Optionally set `OPENAI_BASE_URL` and `OPENAI_MODEL` for custom endpoints.
|
||||
- The CLI will use these credentials to authenticate with the API provider.
|
||||
|
||||
2. **Vertex AI:**
|
||||
- Set the `GOOGLE_GENAI_USE_VERTEXAI=true` environment variable.
|
||||
- **Using an API Key:** Set the `GOOGLE_API_KEY` environment variable.
|
||||
- **Using Application Default Credentials (ADC):**
|
||||
- Run `gcloud auth application-default login` in your environment to configure ADC.
|
||||
- Ensure the `GOOGLE_CLOUD_PROJECT` and `GOOGLE_CLOUD_LOCATION` environment variables are set.
|
||||
**Example for headless environments:**
|
||||
|
||||
If none of these environment variables are set in a non-interactive session, the CLI will exit with an error.
|
||||
```bash
|
||||
export OPENAI_API_KEY="your-api-key"
|
||||
export OPENAI_BASE_URL="https://api-inference.modelscope.cn/v1"
|
||||
export OPENAI_MODEL="Qwen/Qwen3-Coder-480B-A35B-Instruct"
|
||||
|
||||
# Run Qwen Code
|
||||
qwen
|
||||
```
|
||||
|
||||
If no API key is set in a non-interactive session, the CLI will exit with an error prompting you to configure authentication.
|
||||
|
||||
@@ -27,6 +27,9 @@ Slash commands provide meta-level control over the CLI itself.
|
||||
- **Usage:** `/chat resume <tag>`
|
||||
- **`list`**
|
||||
- **Description:** Lists available tags for chat state resumption.
|
||||
- **`delete`**
|
||||
- **Description:** Deletes a saved conversation checkpoint.
|
||||
- **Usage:** `/chat delete <tag>`
|
||||
|
||||
- **`/clear`**
|
||||
- **Description:** Clear the terminal screen, including the visible session history and scrollback within the CLI. The underlying session data (for history recall) might be preserved depending on the exact implementation, but the visual display is cleared.
|
||||
@@ -46,7 +49,18 @@ Slash commands provide meta-level control over the CLI itself.
|
||||
- **Usage:** `/directory add <path1>,<path2>`
|
||||
- **Note:** Disabled in restrictive sandbox profiles. If you're using that, use `--include-directories` when starting the session instead.
|
||||
- **`show`**:
|
||||
- **Description:** Display all directories added by `/direcotry add` and `--include-directories`.
|
||||
- **Description:** Display all directories added by `/directory add` and `--include-directories`.
|
||||
- **Usage:** `/directory show`
|
||||
|
||||
- **`/directory`** (or **`/dir`**)
|
||||
- **Description:** Manage workspace directories for multi-directory support.
|
||||
- **Sub-commands:**
|
||||
- **`add`**:
|
||||
- **Description:** Add a directory to the workspace. The path can be absolute or relative to the current working directory. Moreover, the reference from home directory is supported as well.
|
||||
- **Usage:** `/directory add <path1>,<path2>`
|
||||
- **Note:** Disabled in restrictive sandbox profiles. If you're using that, use `--include-directories` when starting the session instead.
|
||||
- **`show`**:
|
||||
- **Description:** Display all directories added by `/directory add` and `--include-directories`.
|
||||
- **Usage:** `/directory show`
|
||||
|
||||
- **`/editor`**
|
||||
@@ -70,15 +84,15 @@ Slash commands provide meta-level control over the CLI itself.
|
||||
- **Keyboard Shortcut:** Press **Ctrl+T** at any time to toggle between showing and hiding tool descriptions.
|
||||
|
||||
- **`/memory`**
|
||||
- **Description:** Manage the AI's instructional context (hierarchical memory loaded from `GEMINI.md` files).
|
||||
- **Description:** Manage the AI's instructional context (hierarchical memory loaded from `QWEN.md` files by default; configurable via `contextFileName`).
|
||||
- **Sub-commands:**
|
||||
- **`add`**:
|
||||
- **Description:** Adds the following text to the AI's memory. Usage: `/memory add <text to remember>`
|
||||
- **`show`**:
|
||||
- **Description:** Display the full, concatenated content of the current hierarchical memory that has been loaded from all `GEMINI.md` files. This lets you inspect the instructional context being provided to the Gemini model.
|
||||
- **Description:** Display the full, concatenated content of the current hierarchical memory that has been loaded from all context files (e.g., `QWEN.md`). This lets you inspect the instructional context being provided to the model.
|
||||
- **`refresh`**:
|
||||
- **Description:** Reload the hierarchical instructional memory from all `GEMINI.md` files found in the configured locations (global, project/ancestors, and sub-directories). This command updates the model with the latest `GEMINI.md` content.
|
||||
- **Note:** For more details on how `GEMINI.md` files contribute to hierarchical memory, see the [CLI Configuration documentation](./configuration.md#4-geminimd-files-hierarchical-instructional-context).
|
||||
- **Description:** Reload the hierarchical instructional memory from all context files (default: `QWEN.md`) found in the configured locations (global, project/ancestors, and sub-directories). This updates the model with the latest context content.
|
||||
- **Note:** For more details on how context files contribute to hierarchical memory, see the [CLI Configuration documentation](./configuration.md#context-files-hierarchical-instructional-context).
|
||||
|
||||
- **`/restore`**
|
||||
- **Description:** Restores the project files to the state they were in just before a tool was executed. This is particularly useful for undoing file edits made by a tool. If run without a tool call ID, it will list available checkpoints to restore from.
|
||||
@@ -123,7 +137,7 @@ Slash commands provide meta-level control over the CLI itself.
|
||||
- **Status indicator:** When enabled, shows `[NORMAL]` or `[INSERT]` in the footer
|
||||
|
||||
- **`/init`**
|
||||
- **Description:** To help users easily create a `GEMINI.md` file, this command analyzes the current directory and generates a tailored context file, making it simpler for them to provide project-specific instructions to the Gemini agent.
|
||||
- **Description:** Analyzes the current directory and creates a `QWEN.md` context file by default (or the filename specified by `contextFileName`). If a non-empty file already exists, no changes are made. The command seeds an empty file and prompts the model to populate it with project-specific instructions.
|
||||
|
||||
### Custom Commands
|
||||
|
||||
@@ -253,7 +267,7 @@ Please generate a Conventional Commit message based on the following git diff:
|
||||
|
||||
```diff
|
||||
!{git diff --staged}
|
||||
````
|
||||
```
|
||||
|
||||
"""
|
||||
|
||||
@@ -274,7 +288,7 @@ First, ensure the user commands directory exists, then create a `refactor` subdi
|
||||
```bash
|
||||
mkdir -p ~/.gemini/commands/refactor
|
||||
touch ~/.gemini/commands/refactor/pure.toml
|
||||
````
|
||||
```
|
||||
|
||||
**2. Add the content to the file:**
|
||||
|
||||
|
||||
@@ -38,8 +38,8 @@ In addition to a project settings file, a project's `.gemini` directory can cont
|
||||
### Available settings in `settings.json`:
|
||||
|
||||
- **`contextFileName`** (string or array of strings):
|
||||
- **Description:** Specifies the filename for context files (e.g., `GEMINI.md`, `AGENTS.md`). Can be a single filename or a list of accepted filenames.
|
||||
- **Default:** `GEMINI.md`
|
||||
- **Description:** Specifies the filename for context files (e.g., `QWEN.md`, `AGENTS.md`). Can be a single filename or a list of accepted filenames.
|
||||
- **Default:** `QWEN.md`
|
||||
- **Example:** `"contextFileName": "AGENTS.md"`
|
||||
|
||||
- **`bugCommand`** (object):
|
||||
@@ -248,6 +248,31 @@ In addition to a project settings file, a project's `.gemini` directory can cont
|
||||
"excludedProjectEnvVars": ["DEBUG", "DEBUG_MODE", "NODE_ENV"]
|
||||
```
|
||||
|
||||
- **`includeDirectories`** (array of strings):
|
||||
- **Description:** Specifies an array of additional absolute or relative paths to include in the workspace context. This allows you to work with files across multiple directories as if they were one. Paths can use `~` to refer to the user's home directory. This setting can be combined with the `--include-directories` command-line flag.
|
||||
- **Default:** `[]`
|
||||
- **Example:**
|
||||
```json
|
||||
"includeDirectories": [
|
||||
"/path/to/another/project",
|
||||
"../shared-library",
|
||||
"~/common-utils"
|
||||
]
|
||||
```
|
||||
|
||||
- **`loadMemoryFromIncludeDirectories`** (boolean):
|
||||
- **Description:** Controls the behavior of the `/memory refresh` command. If set to `true`, `QWEN.md` files should be loaded from all directories that are added. If set to `false`, `QWEN.md` should only be loaded from the current directory.
|
||||
- **Default:** `false`
|
||||
- **Example:**
|
||||
```json
|
||||
"loadMemoryFromIncludeDirectories": true
|
||||
```
|
||||
|
||||
- **`tavilyApiKey`** (string):
|
||||
- **Description:** API key for Tavily web search service. Required to enable the `web_search` tool functionality. If not configured, the web search tool will be disabled and skipped.
|
||||
- **Default:** `undefined` (web search disabled)
|
||||
- **Example:** `"tavilyApiKey": "tvly-your-api-key-here"`
|
||||
|
||||
### Example `settings.json`:
|
||||
|
||||
```json
|
||||
@@ -256,6 +281,7 @@ In addition to a project settings file, a project's `.gemini` directory can cont
|
||||
"sandbox": "docker",
|
||||
"toolDiscoveryCommand": "bin/get_tools",
|
||||
"toolCallCommand": "bin/call_tool",
|
||||
"tavilyApiKey": "$TAVILY_API_KEY",
|
||||
"mcpServers": {
|
||||
"mainServer": {
|
||||
"command": "bin/mcp_server.py"
|
||||
@@ -280,7 +306,9 @@ In addition to a project settings file, a project's `.gemini` directory can cont
|
||||
"tokenBudget": 100
|
||||
}
|
||||
},
|
||||
"excludedProjectEnvVars": ["DEBUG", "DEBUG_MODE", "NODE_ENV"]
|
||||
"excludedProjectEnvVars": ["DEBUG", "DEBUG_MODE", "NODE_ENV"],
|
||||
"includeDirectories": ["path/to/dir1", "~/path/to/dir2", "../path/to/dir3"],
|
||||
"loadMemoryFromIncludeDirectories": true
|
||||
}
|
||||
```
|
||||
|
||||
@@ -351,6 +379,11 @@ The CLI automatically loads environment variables from an `.env` file. The loadi
|
||||
- **`CODE_ASSIST_ENDPOINT`**:
|
||||
- Specifies the endpoint for the code assist server.
|
||||
- This is useful for development and testing.
|
||||
- **`TAVILY_API_KEY`**:
|
||||
- Your API key for the Tavily web search service.
|
||||
- Required to enable the `web_search` tool functionality.
|
||||
- If not configured, the web search tool will be disabled and skipped.
|
||||
- Example: `export TAVILY_API_KEY="tvly-your-api-key-here"`
|
||||
|
||||
## Command-Line Arguments
|
||||
|
||||
@@ -408,6 +441,9 @@ Arguments passed directly when running the CLI can override other configurations
|
||||
- Displays the version of the CLI.
|
||||
- **`--openai-logging`**:
|
||||
- Enables logging of OpenAI API calls for debugging and analysis. This flag overrides the `enableOpenAILogging` setting in `settings.json`.
|
||||
- **`--tavily-api-key <api_key>`**:
|
||||
- Sets the Tavily API key for web search functionality for this session.
|
||||
- Example: `gemini --tavily-api-key tvly-your-api-key-here`
|
||||
|
||||
## Context Files (Hierarchical Instructional Context)
|
||||
|
||||
@@ -415,7 +451,7 @@ While not strictly configuration for the CLI's _behavior_, context files (defaul
|
||||
|
||||
- **Purpose:** These Markdown files contain instructions, guidelines, or context that you want the Gemini model to be aware of during your interactions. The system is designed to manage this instructional context hierarchically.
|
||||
|
||||
### Example Context File Content (e.g., `GEMINI.md`)
|
||||
### Example Context File Content (e.g., `QWEN.md`)
|
||||
|
||||
Here's a conceptual example of what a context file at the root of a TypeScript project might contain:
|
||||
|
||||
@@ -450,9 +486,9 @@ Here's a conceptual example of what a context file at the root of a TypeScript p
|
||||
|
||||
This example demonstrates how you can provide general project context, specific coding conventions, and even notes about particular files or components. The more relevant and precise your context files are, the better the AI can assist you. Project-specific context files are highly encouraged to establish conventions and context.
|
||||
|
||||
- **Hierarchical Loading and Precedence:** The CLI implements a sophisticated hierarchical memory system by loading context files (e.g., `GEMINI.md`) from several locations. Content from files lower in this list (more specific) typically overrides or supplements content from files higher up (more general). The exact concatenation order and final context can be inspected using the `/memory show` command. The typical loading order is:
|
||||
- **Hierarchical Loading and Precedence:** The CLI implements a sophisticated hierarchical memory system by loading context files (e.g., `QWEN.md`) from several locations. Content from files lower in this list (more specific) typically overrides or supplements content from files higher up (more general). The exact concatenation order and final context can be inspected using the `/memory show` command. The typical loading order is:
|
||||
1. **Global Context File:**
|
||||
- Location: `~/.gemini/<contextFileName>` (e.g., `~/.gemini/GEMINI.md` in your user home directory).
|
||||
- Location: `~/.qwen/<contextFileName>` (e.g., `~/.qwen/QWEN.md` in your user home directory).
|
||||
- Scope: Provides default instructions for all your projects.
|
||||
2. **Project Root & Ancestors Context Files:**
|
||||
- Location: The CLI searches for the configured context file in the current working directory and then in each parent directory up to either the project root (identified by a `.git` folder) or your home directory.
|
||||
@@ -523,3 +559,5 @@ You can opt out of usage statistics collection at any time by setting the `usage
|
||||
"usageStatisticsEnabled": false
|
||||
}
|
||||
```
|
||||
|
||||
Note: When usage statistics are enabled, events are sent to an Alibaba Cloud RUM collection endpoint.
|
||||
|
||||
@@ -1,28 +1,28 @@
|
||||
# Gemini CLI
|
||||
# Qwen Code CLI
|
||||
|
||||
Within Gemini CLI, `packages/cli` is the frontend for users to send and receive prompts with the Gemini AI model and its associated tools. For a general overview of Gemini CLI, see the [main documentation page](../index.md).
|
||||
Within Qwen Code, `packages/cli` is the frontend for users to send and receive prompts with Qwen and other AI models and their associated tools. For a general overview of Qwen Code, see the [main documentation page](../index.md).
|
||||
|
||||
## Navigating this section
|
||||
|
||||
- **[Authentication](./authentication.md):** A guide to setting up authentication with Google's AI services.
|
||||
- **[Commands](./commands.md):** A reference for Gemini CLI commands (e.g., `/help`, `/tools`, `/theme`).
|
||||
- **[Configuration](./configuration.md):** A guide to tailoring Gemini CLI behavior using configuration files.
|
||||
- **[Authentication](./authentication.md):** A guide to setting up authentication with Qwen OAuth and OpenAI-compatible providers.
|
||||
- **[Commands](./commands.md):** A reference for Qwen Code CLI commands (e.g., `/help`, `/tools`, `/theme`).
|
||||
- **[Configuration](./configuration.md):** A guide to tailoring Qwen Code CLI behavior using configuration files.
|
||||
- **[Token Caching](./token-caching.md):** Optimize API costs through token caching.
|
||||
- **[Themes](./themes.md)**: A guide to customizing the CLI's appearance with different themes.
|
||||
- **[Tutorials](tutorials.md)**: A tutorial showing how to use Gemini CLI to automate a development task.
|
||||
- **[Tutorials](tutorials.md)**: A tutorial showing how to use Qwen Code to automate a development task.
|
||||
|
||||
## Non-interactive mode
|
||||
|
||||
Gemini CLI can be run in a non-interactive mode, which is useful for scripting and automation. In this mode, you pipe input to the CLI, it executes the command, and then it exits.
|
||||
Qwen Code can be run in a non-interactive mode, which is useful for scripting and automation. In this mode, you pipe input to the CLI, it executes the command, and then it exits.
|
||||
|
||||
The following example pipes a command to Gemini CLI from your terminal:
|
||||
The following example pipes a command to Qwen Code from your terminal:
|
||||
|
||||
```bash
|
||||
echo "What is fine tuning?" | gemini
|
||||
echo "What is fine tuning?" | qwen
|
||||
```
|
||||
|
||||
Gemini CLI executes the command and prints the output to your terminal. Note that you can achieve the same behavior by using the `--prompt` or `-p` flag. For example:
|
||||
Qwen Code executes the command and prints the output to your terminal. Note that you can achieve the same behavior by using the `--prompt` or `-p` flag. For example:
|
||||
|
||||
```bash
|
||||
gemini -p "What is fine tuning?"
|
||||
qwen -p "What is fine tuning?"
|
||||
```
|
||||
|
||||
@@ -5,14 +5,14 @@ Gemini CLI's core package (`packages/core`) is the backend portion of Gemini CLI
|
||||
## Navigating this section
|
||||
|
||||
- **[Core tools API](./tools-api.md):** Information on how tools are defined, registered, and used by the core.
|
||||
- **[Memory Import Processor](./memport.md):** Documentation for the modular GEMINI.md import feature using @file.md syntax.
|
||||
- **[Memory Import Processor](./memport.md):** Documentation for the modular QWEN.md import feature using @file.md syntax.
|
||||
|
||||
## Role of the core
|
||||
|
||||
While the `packages/cli` portion of Gemini CLI provides the user interface, `packages/core` is responsible for:
|
||||
|
||||
- **Gemini API interaction:** Securely communicating with the Google Gemini API, sending user prompts, and receiving model responses.
|
||||
- **Prompt engineering:** Constructing effective prompts for the Gemini model, potentially incorporating conversation history, tool definitions, and instructional context from `GEMINI.md` files.
|
||||
- **Prompt engineering:** Constructing effective prompts for the model, potentially incorporating conversation history, tool definitions, and instructional context from context files (e.g., `QWEN.md`).
|
||||
- **Tool management & orchestration:**
|
||||
- Registering available tools (e.g., file system tools, shell command execution).
|
||||
- Interpreting tool use requests from the Gemini model.
|
||||
@@ -48,8 +48,8 @@ The file discovery service is responsible for finding files in the project that
|
||||
|
||||
## Memory discovery service
|
||||
|
||||
The memory discovery service is responsible for finding and loading the `GEMINI.md` files that provide context to the model. It searches for these files in a hierarchical manner, starting from the current working directory and moving up to the project root and the user's home directory. It also searches in subdirectories.
|
||||
The memory discovery service is responsible for finding and loading the context files (default: `QWEN.md`) that provide context to the model. It searches for these files in a hierarchical manner, starting from the current working directory and moving up to the project root and the user's home directory. It also searches in subdirectories.
|
||||
|
||||
This allows you to have global, project-level, and component-level context files, which are all combined to provide the model with the most relevant information.
|
||||
|
||||
You can use the [`/memory` command](../cli/commands.md) to `show`, `add`, and `refresh` the content of loaded `GEMINI.md` files.
|
||||
You can use the [`/memory` command](../cli/commands.md) to `show`, `add`, and `refresh` the content of loaded context files.
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
# Memory Import Processor
|
||||
|
||||
The Memory Import Processor is a feature that allows you to modularize your GEMINI.md files by importing content from other files using the `@file.md` syntax.
|
||||
The Memory Import Processor is a feature that allows you to modularize your context files (e.g., `QWEN.md`) by importing content from other files using the `@file.md` syntax.
|
||||
|
||||
## Overview
|
||||
|
||||
This feature enables you to break down large GEMINI.md files into smaller, more manageable components that can be reused across different contexts. The import processor supports both relative and absolute paths, with built-in safety features to prevent circular imports and ensure file access security.
|
||||
This feature enables you to break down large context files (e.g., `QWEN.md`) into smaller, more manageable components that can be reused across different contexts. The import processor supports both relative and absolute paths, with built-in safety features to prevent circular imports and ensure file access security.
|
||||
|
||||
## Syntax
|
||||
|
||||
Use the `@` symbol followed by the path to the file you want to import:
|
||||
|
||||
```markdown
|
||||
# Main GEMINI.md file
|
||||
# Main QWEN.md file
|
||||
|
||||
This is the main content.
|
||||
|
||||
@@ -39,7 +39,7 @@ More content here.
|
||||
### Basic Import
|
||||
|
||||
```markdown
|
||||
# My GEMINI.md
|
||||
# My QWEN.md
|
||||
|
||||
Welcome to my project!
|
||||
|
||||
@@ -110,13 +110,13 @@ The import processor uses the `marked` library to detect code blocks and inline
|
||||
|
||||
## Import Tree Structure
|
||||
|
||||
The processor returns an import tree that shows the hierarchy of imported files, similar to Claude's `/memory` feature. This helps users debug problems with their GEMINI.md files by showing which files were read and their import relationships.
|
||||
The processor returns an import tree that shows the hierarchy of imported files. This helps users debug problems with their context files by showing which files were read and their import relationships.
|
||||
|
||||
Example tree structure:
|
||||
|
||||
```
|
||||
Memory Files
|
||||
L project: GEMINI.md
|
||||
Memory Files
|
||||
L project: QWEN.md
|
||||
L a.md
|
||||
L b.md
|
||||
L c.md
|
||||
@@ -138,7 +138,7 @@ Note: The import tree is mainly for clarity during development and has limited r
|
||||
|
||||
### `processImports(content, basePath, debugMode?, importState?)`
|
||||
|
||||
Processes import statements in GEMINI.md content.
|
||||
Processes import statements in context file content.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|
||||
@@ -15,9 +15,11 @@ The Gemini CLI core (`packages/core`) features a robust system for defining, reg
|
||||
- `execute()`: The core method that performs the tool's action and returns a `ToolResult`.
|
||||
|
||||
- **`ToolResult` (`tools.ts`):** An interface defining the structure of a tool's execution outcome:
|
||||
- `llmContent`: The factual string content to be included in the history sent back to the LLM for context.
|
||||
- `llmContent`: The factual content to be included in the history sent back to the LLM for context. This can be a simple string or a `PartListUnion` (an array of `Part` objects and strings) for rich content.
|
||||
- `returnDisplay`: A user-friendly string (often Markdown) or a special object (like `FileDiff`) for display in the CLI.
|
||||
|
||||
- **Returning Rich Content:** Tools are not limited to returning simple text. The `llmContent` can be a `PartListUnion`, which is an array that can contain a mix of `Part` objects (for images, audio, etc.) and `string`s. This allows a single tool execution to return multiple pieces of rich content.
|
||||
|
||||
- **Tool Registry (`tool-registry.ts`):** A class (`ToolRegistry`) responsible for:
|
||||
- **Registering Tools:** Holding a collection of all available built-in tools (e.g., `ReadFileTool`, `ShellTool`).
|
||||
- **Discovering Tools:** It can also discover tools dynamically:
|
||||
|
||||
@@ -28,7 +28,7 @@ The `gemini-extension.json` file contains the configuration for the extension. T
|
||||
"command": "node my-server.js"
|
||||
}
|
||||
},
|
||||
"contextFileName": "GEMINI.md",
|
||||
"contextFileName": "QWEN.md",
|
||||
"excludeTools": ["run_shell_command"]
|
||||
}
|
||||
```
|
||||
@@ -36,7 +36,7 @@ The `gemini-extension.json` file contains the configuration for the extension. T
|
||||
- `name`: The name of the extension. This is used to uniquely identify the extension and for conflict resolution when extension commands have the same name as user or project commands.
|
||||
- `version`: The version of the extension.
|
||||
- `mcpServers`: A map of MCP servers to configure. The key is the name of the server, and the value is the server configuration. These servers will be loaded on startup just like MCP servers configured in a [`settings.json` file](./cli/configuration.md). If both an extension and a `settings.json` file configure an MCP server with the same name, the server defined in the `settings.json` file takes precedence.
|
||||
- `contextFileName`: The name of the file that contains the context for the extension. This will be used to load the context from the workspace. If this property is not used but a `GEMINI.md` file is present in your extension directory, then that file will be loaded.
|
||||
- `contextFileName`: The name of the file that contains the context for the extension. This will be used to load the context from the workspace. If this property is not used but a `QWEN.md` file is present in your extension directory, then that file will be loaded.
|
||||
- `excludeTools`: An array of tool names to exclude from the model. You can also specify command-specific restrictions for tools that support it, like the `run_shell_command` tool. For example, `"excludeTools": ["run_shell_command(rm -rf)"]` will block the `rm -rf` command.
|
||||
|
||||
When Gemini CLI starts, it loads all the extensions and merges their configurations. If there are any conflicts, the workspace configuration takes precedence.
|
||||
|
||||
@@ -28,7 +28,7 @@ This documentation is organized into the following sections:
|
||||
- **[Multi-File Read Tool](./tools/multi-file.md):** Documentation for the `read_many_files` tool.
|
||||
- **[Shell Tool](./tools/shell.md):** Documentation for the `run_shell_command` tool.
|
||||
- **[Web Fetch Tool](./tools/web-fetch.md):** Documentation for the `web_fetch` tool.
|
||||
- **[Web Search Tool](./tools/web-search.md):** Documentation for the `google_web_search` tool.
|
||||
- **[Web Search Tool](./tools/web-search.md):** Documentation for the `web_search` tool.
|
||||
- **[Memory Tool](./tools/memory.md):** Documentation for the `save_memory` tool.
|
||||
- **[Contributing & Development Guide](../CONTRIBUTING.md):** Information for contributors and developers, including setup, building, testing, and coding conventions.
|
||||
- **[NPM Workspaces and Publishing](./npm.md):** Details on how the project's packages are managed and published.
|
||||
|
||||
@@ -169,6 +169,7 @@ Use the `/mcp auth` command to manage OAuth authentication:
|
||||
- **`scopes`** (string[]): Required OAuth scopes
|
||||
- **`redirectUri`** (string): Custom redirect URI (defaults to `http://localhost:7777/oauth/callback`)
|
||||
- **`tokenParamName`** (string): Query parameter name for tokens in SSE URLs
|
||||
- **`audiences`** (string[]): Audiences the token is valid for
|
||||
|
||||
#### Token Management
|
||||
|
||||
@@ -571,6 +572,56 @@ The MCP integration tracks several states:
|
||||
|
||||
This comprehensive integration makes MCP servers a powerful way to extend the Gemini CLI's capabilities while maintaining security, reliability, and ease of use.
|
||||
|
||||
## Returning Rich Content from Tools
|
||||
|
||||
MCP tools are not limited to returning simple text. You can return rich, multi-part content, including text, images, audio, and other binary data in a single tool response. This allows you to build powerful tools that can provide diverse information to the model in a single turn.
|
||||
|
||||
All data returned from the tool is processed and sent to the model as context for its next generation, enabling it to reason about or summarize the provided information.
|
||||
|
||||
### How It Works
|
||||
|
||||
To return rich content, your tool's response must adhere to the MCP specification for a [`CallToolResult`](https://modelcontextprotocol.io/specification/2025-06-18/server/tools#tool-result). The `content` field of the result should be an array of `ContentBlock` objects. The Gemini CLI will correctly process this array, separating text from binary data and packaging it for the model.
|
||||
|
||||
You can mix and match different content block types in the `content` array. The supported block types include:
|
||||
|
||||
- `text`
|
||||
- `image`
|
||||
- `audio`
|
||||
- `resource` (embedded content)
|
||||
- `resource_link`
|
||||
|
||||
### Example: Returning Text and an Image
|
||||
|
||||
Here is an example of a valid JSON response from an MCP tool that returns both a text description and an image:
|
||||
|
||||
```json
|
||||
{
|
||||
"content": [
|
||||
{
|
||||
"type": "text",
|
||||
"text": "Here is the logo you requested."
|
||||
},
|
||||
{
|
||||
"type": "image",
|
||||
"data": "BASE64_ENCODED_IMAGE_DATA_HERE",
|
||||
"mimeType": "image/png"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"text": "The logo was created in 2025."
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
When the Gemini CLI receives this response, it will:
|
||||
|
||||
1. Extract all the text and combine it into a single `functionResponse` part for the model.
|
||||
2. Present the image data as a separate `inlineData` part.
|
||||
3. Provide a clean, user-friendly summary in the CLI, indicating that both text and an image were received.
|
||||
|
||||
This enables you to build sophisticated tools that can provide rich, multi-modal context to the Gemini model.
|
||||
|
||||
## MCP Prompts as Slash Commands
|
||||
|
||||
In addition to tools, MCP servers can expose predefined prompts that can be executed as slash commands within the Gemini CLI. This allows you to create shortcuts for common or complex queries that can be easily invoked by name.
|
||||
|
||||
@@ -4,7 +4,7 @@ This document describes the `save_memory` tool for the Gemini CLI.
|
||||
|
||||
## Description
|
||||
|
||||
Use `save_memory` to save and recall information across your Gemini CLI sessions. With `save_memory`, you can direct the CLI to remember key details across sessions, providing personalized and directed assistance.
|
||||
Use `save_memory` to save and recall information across your Qwen Code sessions. With `save_memory`, you can direct the CLI to remember key details across sessions, providing personalized and directed assistance.
|
||||
|
||||
### Arguments
|
||||
|
||||
@@ -14,9 +14,9 @@ Use `save_memory` to save and recall information across your Gemini CLI sessions
|
||||
|
||||
## How to use `save_memory` with the Gemini CLI
|
||||
|
||||
The tool appends the provided `fact` to a special `GEMINI.md` file located in the user's home directory (`~/.gemini/GEMINI.md`). This file can be configured to have a different name.
|
||||
The tool appends the provided `fact` to your context file in the user's home directory (`~/.qwen/QWEN.md` by default). This filename can be configured via `contextFileName`.
|
||||
|
||||
Once added, the facts are stored under a `## Gemini Added Memories` section. This file is loaded as context in subsequent sessions, allowing the CLI to recall the saved information.
|
||||
Once added, the facts are stored under a `## Qwen Added Memories` section. This file is loaded as context in subsequent sessions, allowing the CLI to recall the saved information.
|
||||
|
||||
Usage:
|
||||
|
||||
|
||||
@@ -52,7 +52,7 @@ Read the main README, all Markdown files in the `docs` directory, and a specific
|
||||
read_many_files(paths=["README.md", "docs/**/*.md", "assets/logo.png"], exclude=["docs/OLD_README.md"])
|
||||
```
|
||||
|
||||
Read all JavaScript files but explicitly including test files and all JPEGs in an `images` folder:
|
||||
Read all JavaScript files but explicitly include test files and all JPEGs in an `images` folder:
|
||||
|
||||
```
|
||||
read_many_files(paths=["**/*.js"], include=["**/*.test.js", "images/**/*.jpg"], useDefaultExcludes=False)
|
||||
|
||||
@@ -137,6 +137,5 @@ To block all shell commands, add the `run_shell_command` wildcard to `excludeToo
|
||||
|
||||
## Security Note for `excludeTools`
|
||||
|
||||
Command-specific restrictions in
|
||||
`excludeTools` for `run_shell_command` are based on simple string matching and can be easily bypassed. This feature is **not a security mechanism** and should not be relied upon to safely execute untrusted code. It is recommended to use `coreTools` to explicitly select commands
|
||||
Command-specific restrictions in `excludeTools` for `run_shell_command` are based on simple string matching and can be easily bypassed. This feature is **not a security mechanism** and should not be relied upon to safely execute untrusted code. It is recommended to use `coreTools` to explicitly select commands
|
||||
that can be executed.
|
||||
|
||||
@@ -4,24 +4,25 @@ This document describes the `web_fetch` tool for the Gemini CLI.
|
||||
|
||||
## Description
|
||||
|
||||
Use `web_fetch` to summarize, compare, or extract information from web pages. The `web_fetch` tool processes content from one or more URLs (up to 20) embedded in a prompt. `web_fetch` takes a natural language prompt and returns a generated response.
|
||||
Use `web_fetch` to fetch content from a specified URL and process it using an AI model. The tool takes a URL and a prompt as input, fetches the URL content, converts HTML to markdown, and processes the content with the prompt using a small, fast model.
|
||||
|
||||
### Arguments
|
||||
|
||||
`web_fetch` takes one argument:
|
||||
`web_fetch` takes two arguments:
|
||||
|
||||
- `prompt` (string, required): A comprehensive prompt that includes the URL(s) (up to 20) to fetch and specific instructions on how to process their content. For example: `"Summarize https://example.com/article and extract key points from https://another.com/data"`. The prompt must contain at least one URL starting with `http://` or `https://`.
|
||||
- `url` (string, required): The URL to fetch content from. Must be a fully-formed valid URL starting with `http://` or `https://`.
|
||||
- `prompt` (string, required): The prompt describing what information you want to extract from the page content.
|
||||
|
||||
## How to use `web_fetch` with the Gemini CLI
|
||||
|
||||
To use `web_fetch` with the Gemini CLI, provide a natural language prompt that contains URLs. The tool will ask for confirmation before fetching any URLs. Once confirmed, the tool will process URLs through Gemini API's `urlContext`.
|
||||
To use `web_fetch` with the Gemini CLI, provide a URL and a prompt describing what you want to extract from that URL. The tool will ask for confirmation before fetching the URL. Once confirmed, the tool will fetch the content directly and process it using an AI model.
|
||||
|
||||
If the Gemini API cannot access the URL, the tool will fall back to fetching content directly from the local machine. The tool will format the response, including source attribution and citations where possible. The tool will then provide the response to the user.
|
||||
The tool automatically converts HTML to text, handles GitHub blob URLs (converting them to raw URLs), and upgrades HTTP URLs to HTTPS for security.
|
||||
|
||||
Usage:
|
||||
|
||||
```
|
||||
web_fetch(prompt="Your prompt, including a URL such as https://google.com.")
|
||||
web_fetch(url="https://example.com", prompt="Summarize the main points of this article")
|
||||
```
|
||||
|
||||
## `web_fetch` examples
|
||||
@@ -29,16 +30,25 @@ web_fetch(prompt="Your prompt, including a URL such as https://google.com.")
|
||||
Summarize a single article:
|
||||
|
||||
```
|
||||
web_fetch(prompt="Can you summarize the main points of https://example.com/news/latest")
|
||||
web_fetch(url="https://example.com/news/latest", prompt="Can you summarize the main points of this article?")
|
||||
```
|
||||
|
||||
Compare two articles:
|
||||
Extract specific information:
|
||||
|
||||
```
|
||||
web_fetch(prompt="What are the differences in the conclusions of these two papers: https://arxiv.org/abs/2401.0001 and https://arxiv.org/abs/2401.0002?")
|
||||
web_fetch(url="https://arxiv.org/abs/2401.0001", prompt="What are the key findings and methodology described in this paper?")
|
||||
```
|
||||
|
||||
Analyze GitHub documentation:
|
||||
|
||||
```
|
||||
web_fetch(url="https://github.com/google/gemini-react/blob/main/README.md", prompt="What are the installation steps and main features?")
|
||||
```
|
||||
|
||||
## Important notes
|
||||
|
||||
- **URL processing:** `web_fetch` relies on the Gemini API's ability to access and process the given URLs.
|
||||
- **Single URL processing:** `web_fetch` processes one URL at a time. To analyze multiple URLs, make separate calls to the tool.
|
||||
- **URL format:** The tool automatically upgrades HTTP URLs to HTTPS and converts GitHub blob URLs to raw format for better content access.
|
||||
- **Content processing:** The tool fetches content directly and processes it using an AI model, converting HTML to readable text format.
|
||||
- **Output quality:** The quality of the output will depend on the clarity of the instructions in the prompt.
|
||||
- **MCP tools:** If an MCP-provided web fetch tool is available (starting with "mcp\_\_"), prefer using that tool as it may have fewer restrictions.
|
||||
|
||||
@@ -1,36 +1,43 @@
|
||||
# Web Search Tool (`google_web_search`)
|
||||
# Web Search Tool (`web_search`)
|
||||
|
||||
This document describes the `google_web_search` tool.
|
||||
This document describes the `web_search` tool.
|
||||
|
||||
## Description
|
||||
|
||||
Use `google_web_search` to perform a web search using Google Search via the Gemini API. The `google_web_search` tool returns a summary of web results with sources.
|
||||
Use `web_search` to perform a web search using the Tavily API. The tool returns a concise answer with sources when possible.
|
||||
|
||||
### Arguments
|
||||
|
||||
`google_web_search` takes one argument:
|
||||
`web_search` takes one argument:
|
||||
|
||||
- `query` (string, required): The search query.
|
||||
|
||||
## How to use `google_web_search` with the Gemini CLI
|
||||
## How to use `web_search`
|
||||
|
||||
The `google_web_search` tool sends a query to the Gemini API, which then performs a web search. `google_web_search` will return a generated response based on the search results, including citations and sources.
|
||||
`web_search` calls the Tavily API directly. You must configure the `TAVILY_API_KEY` through one of the following methods:
|
||||
|
||||
1. **Settings file**: Add `"tavilyApiKey": "your-key-here"` to your `settings.json`
|
||||
2. **Environment variable**: Set `TAVILY_API_KEY` in your environment or `.env` file
|
||||
3. **Command line**: Use `--tavily-api-key your-key-here` when running the CLI
|
||||
|
||||
If the key is not configured, the tool will be disabled and skipped.
|
||||
|
||||
Usage:
|
||||
|
||||
```
|
||||
google_web_search(query="Your query goes here.")
|
||||
web_search(query="Your query goes here.")
|
||||
```
|
||||
|
||||
## `google_web_search` examples
|
||||
## `web_search` examples
|
||||
|
||||
Get information on a topic:
|
||||
|
||||
```
|
||||
google_web_search(query="latest advancements in AI-powered code generation")
|
||||
web_search(query="latest advancements in AI-powered code generation")
|
||||
```
|
||||
|
||||
## Important notes
|
||||
|
||||
- **Response returned:** The `google_web_search` tool returns a processed summary, not a raw list of search results.
|
||||
- **Citations:** The response includes citations to the sources used to generate the summary.
|
||||
- **Response returned:** The `web_search` tool returns a concise answer when available, with a list of source links.
|
||||
- **Citations:** Source links are appended as a numbered list.
|
||||
- **API key:** Configure `TAVILY_API_KEY` via settings.json, environment variables, .env files, or command line arguments. If not configured, the tool is not registered.
|
||||
|
||||
@@ -1,28 +1,38 @@
|
||||
# Troubleshooting Guide
|
||||
# Troubleshooting guide
|
||||
|
||||
This guide provides solutions to common issues and debugging tips.
|
||||
This guide provides solutions to common issues and debugging tips, including topics on:
|
||||
|
||||
## Authentication
|
||||
- Authentication or login errors
|
||||
- Frequently asked questions (FAQs)
|
||||
- Debugging tips
|
||||
- Existing GitHub Issues similar to yours or creating new Issues
|
||||
|
||||
## Authentication or login errors
|
||||
|
||||
- **Error: `Failed to login. Message: Request contains an invalid argument`**
|
||||
- Users with Google Workspace accounts, or users with Google Cloud accounts
|
||||
- Users with Google Workspace accounts or Google Cloud accounts
|
||||
associated with their Gmail accounts may not be able to activate the free
|
||||
tier of the Google Code Assist plan.
|
||||
- For Google Cloud accounts, you can work around this by setting
|
||||
`GOOGLE_CLOUD_PROJECT` to your project ID.
|
||||
- You can also grab an API key from [AI Studio](https://aistudio.google.com/app/apikey), which also includes a
|
||||
- Alternatively, you can obtain the Gemini API key from
|
||||
[Google AI Studio](http://aistudio.google.com/app/apikey), which also includes a
|
||||
separate free tier.
|
||||
|
||||
## Frequently asked questions (FAQs)
|
||||
|
||||
- **Q: How do I update Gemini CLI to the latest version?**
|
||||
- A: If installed globally via npm, update Gemini CLI using the command `npm install -g @google/gemini-cli@latest`. If run from source, pull the latest changes from the repository and rebuild using `npm run build`.
|
||||
- A: If you installed it globally via `npm`, update it using the command `npm install -g @google/gemini-cli@latest`. If you compiled it from source, pull the latest changes from the repository, and then rebuild using the command `npm run build`.
|
||||
|
||||
- **Q: Where are Gemini CLI configuration files stored?**
|
||||
- A: The CLI configuration is stored within two `settings.json` files: one in your home directory and one in your project's root directory. In both locations, `settings.json` is found in the `.gemini/` folder. Refer to [CLI Configuration](./cli/configuration.md) for more details.
|
||||
- **Q: Where are the Gemini CLI configuration or settings files stored?**
|
||||
- A: The Gemini CLI configuration is stored in two `settings.json` files:
|
||||
1. In your home directory: `~/.gemini/settings.json`.
|
||||
2. In your project's root directory: `./.gemini/settings.json`.
|
||||
|
||||
Refer to [Gemini CLI Configuration](./cli/configuration.md) for more details.
|
||||
|
||||
- **Q: Why don't I see cached token counts in my stats output?**
|
||||
- A: Cached token information is only displayed when cached tokens are being used. This feature is available for API key users (Gemini API key or Vertex AI) but not for OAuth users (Google Personal/Enterprise accounts) at this time, as the Code Assist API does not support cached content creation. You can still view your total token usage with the `/stats` command.
|
||||
- A: Cached token information is only displayed when cached tokens are being used. This feature is available for API key users (Gemini API key or Google Cloud Vertex AI) but not for OAuth users (such as Google Personal/Enterprise accounts like Google Gmail or Google Workspace, respectively). This is because the Gemini Code Assist API does not support cached content creation. You can still view your total token usage using the `/stats` command in Gemini CLI.
|
||||
|
||||
## Common error messages and solutions
|
||||
|
||||
@@ -31,26 +41,27 @@ This guide provides solutions to common issues and debugging tips.
|
||||
- **Solution:**
|
||||
Either stop the other process that is using the port or configure the MCP server to use a different port.
|
||||
|
||||
- **Error: Command not found (when attempting to run Gemini CLI).**
|
||||
- **Cause:** Gemini CLI is not correctly installed or not in your system's PATH.
|
||||
- **Error: Command not found (when attempting to run Gemini CLI with `gemini`).**
|
||||
- **Cause:** Gemini CLI is not correctly installed or it is not in your system's `PATH`.
|
||||
- **Solution:**
|
||||
1. Ensure Gemini CLI installation was successful.
|
||||
2. If installed globally, check that your npm global binary directory is in your PATH.
|
||||
3. If running from source, ensure you are using the correct command to invoke it (e.g., `node packages/cli/dist/index.js ...`).
|
||||
The update depends on how you installed Gemini CLI:
|
||||
- If you installed `gemini` globally, check that your `npm` global binary directory is in your `PATH`. You can update Gemini CLI using the command `npm install -g @google/gemini-cli@latest`.
|
||||
- If you are running `gemini` from source, ensure you are using the correct command to invoke it (e.g., `node packages/cli/dist/index.js ...`). To update Gemini CLI, pull the latest changes from the repository, and then rebuild using the command `npm run build`.
|
||||
|
||||
- **Error: `MODULE_NOT_FOUND` or import errors.**
|
||||
- **Cause:** Dependencies are not installed correctly, or the project hasn't been built.
|
||||
- **Solution:**
|
||||
1. Run `npm install` to ensure all dependencies are present.
|
||||
2. Run `npm run build` to compile the project.
|
||||
3. Verify that the build completed successfully with `npm run start`.
|
||||
|
||||
- **Error: "Operation not permitted", "Permission denied", or similar.**
|
||||
- **Cause:** If sandboxing is enabled, then the application is likely attempting an operation restricted by your sandbox, such as writing outside the project directory or system temp directory.
|
||||
- **Solution:** See [Sandboxing](./cli/configuration.md#sandboxing) for more information, including how to customize your sandbox configuration.
|
||||
- **Cause:** When sandboxing is enabled, Gemini CLI may attempt operations that are restricted by your sandbox configuration, such as writing outside the project directory or system temp directory.
|
||||
- **Solution:** Refer to the [Configuration: Sandboxing](./cli/configuration.md#sandboxing) documentation for more information, including how to customize your sandbox configuration.
|
||||
|
||||
- **CLI is not interactive in "CI" environments**
|
||||
- **Issue:** The CLI does not enter interactive mode (no prompt appears) if an environment variable starting with `CI_` (e.g., `CI_TOKEN`) is set. This is because the `is-in-ci` package, used by the underlying UI framework, detects these variables and assumes a non-interactive CI environment.
|
||||
- **Cause:** The `is-in-ci` package checks for the presence of `CI`, `CONTINUOUS_INTEGRATION`, or any environment variable with a `CI_` prefix. When any of these are found, it signals that the environment is non-interactive, which prevents the CLI from starting in its interactive mode.
|
||||
- **Gemini CLI is not running in interactive mode in "CI" environments**
|
||||
- **Issue:** The Gemini CLI does not enter interactive mode (no prompt appears) if an environment variable starting with `CI_` (e.g., `CI_TOKEN`) is set. This is because the `is-in-ci` package, used by the underlying UI framework, detects these variables and assumes a non-interactive CI environment.
|
||||
- **Cause:** The `is-in-ci` package checks for the presence of `CI`, `CONTINUOUS_INTEGRATION`, or any environment variable with a `CI_` prefix. When any of these are found, it signals that the environment is non-interactive, which prevents the Gemini CLI from starting in its interactive mode.
|
||||
- **Solution:** If the `CI_` prefixed variable is not needed for the CLI to function, you can temporarily unset it for the command. e.g., `env -u CI_TOKEN gemini`
|
||||
|
||||
- **DEBUG mode not working from project .env file**
|
||||
@@ -72,9 +83,11 @@ This guide provides solutions to common issues and debugging tips.
|
||||
- **Tool issues:**
|
||||
- If a specific tool is failing, try to isolate the issue by running the simplest possible version of the command or operation the tool performs.
|
||||
- For `run_shell_command`, check that the command works directly in your shell first.
|
||||
- For file system tools, double-check paths and permissions.
|
||||
- For _file system tools_, verify that paths are correct and check the permissions.
|
||||
|
||||
- **Pre-flight checks:**
|
||||
- Always run `npm run preflight` before committing code. This can catch many common issues related to formatting, linting, and type errors.
|
||||
|
||||
If you encounter an issue not covered here, consider searching the project's issue tracker on GitHub or reporting a new issue with detailed information.
|
||||
## Existing GitHub Issues similar to yours or creating new Issues
|
||||
|
||||
If you encounter an issue that was not covered here in this _Troubleshooting guide_, consider searching the Gemini CLI [Issue tracker on GitHub](https://github.com/google-gemini/gemini-cli/issues). If you can't find an issue similar to yours, consider creating a new GitHub Issue with a detailed description. Pull requests are also welcome!
|
||||
|
||||
@@ -151,24 +151,6 @@ export default tseslint.config(
|
||||
'default-case': 'error',
|
||||
},
|
||||
},
|
||||
{
|
||||
files: ['./**/*.{tsx,ts,js}'],
|
||||
plugins: {
|
||||
'license-header': licenseHeader,
|
||||
},
|
||||
rules: {
|
||||
'license-header/header': [
|
||||
'error',
|
||||
[
|
||||
'/**',
|
||||
' * @license',
|
||||
' * Copyright 2025 Google LLC',
|
||||
' * SPDX-License-Identifier: Apache-2.0',
|
||||
' */',
|
||||
],
|
||||
],
|
||||
},
|
||||
},
|
||||
// extra settings for scripts that we run directly with node
|
||||
{
|
||||
files: ['./scripts/**/*.js', 'esbuild.config.js'],
|
||||
|
||||
@@ -9,6 +9,11 @@ import { strict as assert } from 'assert';
|
||||
import { TestRig, printDebugInfo, validateModelOutput } from './test-helper.js';
|
||||
|
||||
test('should be able to search the web', async () => {
|
||||
// Skip if Tavily key is not configured
|
||||
if (!process.env.TAVILY_API_KEY) {
|
||||
console.warn('Skipping web search test: TAVILY_API_KEY not set');
|
||||
return;
|
||||
}
|
||||
const rig = new TestRig();
|
||||
await rig.setup('should be able to search the web');
|
||||
|
||||
@@ -27,7 +32,7 @@ test('should be able to search the web', async () => {
|
||||
throw error; // Re-throw if not a network error
|
||||
}
|
||||
|
||||
const foundToolCall = await rig.waitForToolCall('google_web_search');
|
||||
const foundToolCall = await rig.waitForToolCall('web_search');
|
||||
|
||||
// Add debugging information
|
||||
if (!foundToolCall) {
|
||||
@@ -35,12 +40,11 @@ test('should be able to search the web', async () => {
|
||||
|
||||
// Check if the tool call failed due to network issues
|
||||
const failedSearchCalls = allTools.filter(
|
||||
(t) =>
|
||||
t.toolRequest.name === 'google_web_search' && !t.toolRequest.success,
|
||||
(t) => t.toolRequest.name === 'web_search' && !t.toolRequest.success,
|
||||
);
|
||||
if (failedSearchCalls.length > 0) {
|
||||
console.warn(
|
||||
'google_web_search tool was called but failed, possibly due to network issues',
|
||||
'web_search tool was called but failed, possibly due to network issues',
|
||||
);
|
||||
console.warn(
|
||||
'Failed calls:',
|
||||
@@ -50,20 +54,20 @@ test('should be able to search the web', async () => {
|
||||
}
|
||||
}
|
||||
|
||||
assert.ok(foundToolCall, 'Expected to find a call to google_web_search');
|
||||
assert.ok(foundToolCall, 'Expected to find a call to web_search');
|
||||
|
||||
// Validate model output - will throw if no output, warn if missing expected content
|
||||
const hasExpectedContent = validateModelOutput(
|
||||
result,
|
||||
['weather', 'london'],
|
||||
'Google web search test',
|
||||
'Web search test',
|
||||
);
|
||||
|
||||
// If content was missing, log the search queries used
|
||||
if (!hasExpectedContent) {
|
||||
const searchCalls = rig
|
||||
.readToolLogs()
|
||||
.filter((t) => t.toolRequest.name === 'google_web_search');
|
||||
.filter((t) => t.toolRequest.name === 'web_search');
|
||||
if (searchCalls.length > 0) {
|
||||
console.warn(
|
||||
'Search queries used:',
|
||||
2709
package-lock.json
generated
2709
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.0.5-nightly.3",
|
||||
"version": "0.0.7",
|
||||
"engines": {
|
||||
"node": ">=20.0.0"
|
||||
},
|
||||
@@ -13,7 +13,7 @@
|
||||
"url": "git+https://github.com/QwenLM/qwen-code.git"
|
||||
},
|
||||
"config": {
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.5-nightly.3"
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.7"
|
||||
},
|
||||
"scripts": {
|
||||
"start": "node scripts/start.js",
|
||||
@@ -62,6 +62,7 @@
|
||||
"@types/mime-types": "^3.0.1",
|
||||
"@types/minimatch": "^5.1.2",
|
||||
"@types/mock-fs": "^4.13.4",
|
||||
"@types/qrcode-terminal": "^0.12.2",
|
||||
"@types/shell-quote": "^1.7.5",
|
||||
"@types/uuid": "^10.0.0",
|
||||
"@vitest/coverage-v8": "^3.1.1",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.0.5-nightly.3",
|
||||
"version": "0.0.7",
|
||||
"description": "Qwen Code",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
@@ -25,12 +25,12 @@
|
||||
"dist"
|
||||
],
|
||||
"config": {
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.5-nightly.3"
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.7"
|
||||
},
|
||||
"dependencies": {
|
||||
"@qwen-code/qwen-code-core": "file:../core",
|
||||
"@google/genai": "1.9.0",
|
||||
"@iarna/toml": "^2.2.5",
|
||||
"@qwen-code/qwen-code-core": "file:../core",
|
||||
"@types/update-notifier": "^6.0.8",
|
||||
"command-exists": "^1.2.9",
|
||||
"diff": "^7.0.0",
|
||||
@@ -46,16 +46,17 @@
|
||||
"lowlight": "^3.3.0",
|
||||
"mime-types": "^3.0.1",
|
||||
"open": "^10.1.2",
|
||||
"qrcode-terminal": "^0.12.0",
|
||||
"react": "^19.1.0",
|
||||
"read-package-up": "^11.0.0",
|
||||
"shell-quote": "^1.8.3",
|
||||
"string-width": "^7.1.0",
|
||||
"strip-ansi": "^7.1.0",
|
||||
"strip-json-comments": "^3.1.1",
|
||||
"tiktoken": "^1.0.21",
|
||||
"update-notifier": "^7.3.1",
|
||||
"yargs": "^17.7.2",
|
||||
"zod": "^3.23.8",
|
||||
"tiktoken": "^1.0.21"
|
||||
"zod": "^3.23.8"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/runtime": "^7.27.6",
|
||||
@@ -74,7 +75,8 @@
|
||||
"pretty-format": "^30.0.2",
|
||||
"react-dom": "^19.1.0",
|
||||
"typescript": "^5.3.3",
|
||||
"vitest": "^3.1.1"
|
||||
"vitest": "^3.1.1",
|
||||
"@qwen-code/qwen-code-test-utils": "file:../test-utils"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=20"
|
||||
|
||||
@@ -45,6 +45,12 @@ export const validateAuthMethod = (authMethod: string): string | null => {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (authMethod === AuthType.QWEN_OAUTH) {
|
||||
// Qwen OAuth doesn't require any environment variables for basic setup
|
||||
// The OAuth flow will handle authentication
|
||||
return null;
|
||||
}
|
||||
|
||||
return 'Invalid auth method selected.';
|
||||
};
|
||||
|
||||
|
||||
@@ -6,7 +6,9 @@
|
||||
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import * as os from 'os';
|
||||
import { loadCliConfig, parseArguments, CliArgs } from './config.js';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import { loadCliConfig, parseArguments } from './config.js';
|
||||
import { Settings } from './settings.js';
|
||||
import { Extension } from './extension.js';
|
||||
import * as ServerConfig from '@qwen-code/qwen-code-core';
|
||||
@@ -44,7 +46,7 @@ vi.mock('@qwen-code/qwen-code-core', async () => {
|
||||
},
|
||||
loadEnvironment: vi.fn(),
|
||||
loadServerHierarchicalMemory: vi.fn(
|
||||
(cwd, debug, fileService, extensionPaths, _maxDirs) =>
|
||||
(cwd, dirs, debug, fileService, extensionPaths, _maxDirs) =>
|
||||
Promise.resolve({
|
||||
memoryContent: extensionPaths?.join(',') || '',
|
||||
fileCount: extensionPaths?.length || 0,
|
||||
@@ -499,6 +501,7 @@ describe('Hierarchical Memory Loading (config.ts) - Placeholder Suite', () => {
|
||||
await loadCliConfig(settings, extensions, 'session-id', argv);
|
||||
expect(ServerConfig.loadServerHierarchicalMemory).toHaveBeenCalledWith(
|
||||
expect.any(String),
|
||||
[],
|
||||
false,
|
||||
expect.any(Object),
|
||||
[
|
||||
@@ -1078,14 +1081,86 @@ describe('loadCliConfig ideModeFeature', () => {
|
||||
const config = await loadCliConfig(settings, [], 'test-session', argv);
|
||||
expect(config.getIdeModeFeature()).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
it('should be false when settings.ideModeFeature is true, but SANDBOX is set', async () => {
|
||||
process.argv = ['node', 'script.js'];
|
||||
vi.mock('fs', async () => {
|
||||
const actualFs = await vi.importActual<typeof fs>('fs');
|
||||
const MOCK_CWD1 = process.cwd();
|
||||
const MOCK_CWD2 = path.resolve(path.sep, 'home', 'user', 'project');
|
||||
|
||||
const mockPaths = new Set([
|
||||
MOCK_CWD1,
|
||||
MOCK_CWD2,
|
||||
path.resolve(path.sep, 'cli', 'path1'),
|
||||
path.resolve(path.sep, 'settings', 'path1'),
|
||||
path.join(os.homedir(), 'settings', 'path2'),
|
||||
path.join(MOCK_CWD2, 'cli', 'path2'),
|
||||
path.join(MOCK_CWD2, 'settings', 'path3'),
|
||||
]);
|
||||
|
||||
return {
|
||||
...actualFs,
|
||||
existsSync: vi.fn((p) => mockPaths.has(p.toString())),
|
||||
statSync: vi.fn((p) => {
|
||||
if (mockPaths.has(p.toString())) {
|
||||
return { isDirectory: () => true };
|
||||
}
|
||||
// Fallback for other paths if needed, though the test should be specific.
|
||||
return actualFs.statSync(p);
|
||||
}),
|
||||
realpathSync: vi.fn((p) => p),
|
||||
};
|
||||
});
|
||||
|
||||
describe('loadCliConfig with includeDirectories', () => {
|
||||
const originalArgv = process.argv;
|
||||
const originalEnv = { ...process.env };
|
||||
|
||||
beforeEach(() => {
|
||||
vi.resetAllMocks();
|
||||
vi.mocked(os.homedir).mockReturnValue('/mock/home/user');
|
||||
process.env.GEMINI_API_KEY = 'test-api-key';
|
||||
vi.spyOn(process, 'cwd').mockReturnValue(
|
||||
path.resolve(path.sep, 'home', 'user', 'project'),
|
||||
);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
process.argv = originalArgv;
|
||||
process.env = originalEnv;
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
it('should combine and resolve paths from settings and CLI arguments', async () => {
|
||||
const mockCwd = path.resolve(path.sep, 'home', 'user', 'project');
|
||||
process.argv = [
|
||||
'node',
|
||||
'script.js',
|
||||
'--include-directories',
|
||||
`${path.resolve(path.sep, 'cli', 'path1')},${path.join(mockCwd, 'cli', 'path2')}`,
|
||||
];
|
||||
const argv = await parseArguments();
|
||||
process.env.TERM_PROGRAM = 'vscode';
|
||||
process.env.SANDBOX = 'true';
|
||||
const settings: Settings = { ideModeFeature: true };
|
||||
const settings: Settings = {
|
||||
includeDirectories: [
|
||||
path.resolve(path.sep, 'settings', 'path1'),
|
||||
path.join(os.homedir(), 'settings', 'path2'),
|
||||
path.join(mockCwd, 'settings', 'path3'),
|
||||
],
|
||||
};
|
||||
const config = await loadCliConfig(settings, [], 'test-session', argv);
|
||||
expect(config.getIdeModeFeature()).toBe(false);
|
||||
const expected = [
|
||||
mockCwd,
|
||||
path.resolve(path.sep, 'cli', 'path1'),
|
||||
path.join(mockCwd, 'cli', 'path2'),
|
||||
path.resolve(path.sep, 'settings', 'path1'),
|
||||
path.join(os.homedir(), 'settings', 'path2'),
|
||||
path.join(mockCwd, 'settings', 'path3'),
|
||||
];
|
||||
expect(config.getWorkspaceContext().getDirectories()).toEqual(
|
||||
expect.arrayContaining(expected),
|
||||
);
|
||||
expect(config.getWorkspaceContext().getDirectories()).toHaveLength(
|
||||
expected.length,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -22,13 +22,13 @@ import {
|
||||
FileDiscoveryService,
|
||||
TelemetryTarget,
|
||||
FileFilteringOptions,
|
||||
IdeClient,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import { Settings } from './settings.js';
|
||||
|
||||
import { Extension, annotateActiveExtensions } from './extension.js';
|
||||
import { getCliVersion } from '../utils/version.js';
|
||||
import { loadSandboxConfig } from './sandboxConfig.js';
|
||||
import { resolvePath } from '../utils/resolvePath.js';
|
||||
|
||||
// Simple console logger for now - replace with actual logger if available
|
||||
const logger = {
|
||||
@@ -68,6 +68,8 @@ export interface CliArgs {
|
||||
openaiBaseUrl: string | undefined;
|
||||
proxy: string | undefined;
|
||||
includeDirectories: string[] | undefined;
|
||||
loadMemoryFromIncludeDirectories: boolean | undefined;
|
||||
tavilyApiKey: string | undefined;
|
||||
}
|
||||
|
||||
export async function parseArguments(): Promise<CliArgs> {
|
||||
@@ -214,6 +216,10 @@ export async function parseArguments(): Promise<CliArgs> {
|
||||
type: 'string',
|
||||
description: 'OpenAI base URL (for custom endpoints)',
|
||||
})
|
||||
.option('tavily-api-key', {
|
||||
type: 'string',
|
||||
description: 'Tavily API key for web search functionality',
|
||||
})
|
||||
.option('proxy', {
|
||||
type: 'string',
|
||||
description:
|
||||
@@ -228,6 +234,12 @@ export async function parseArguments(): Promise<CliArgs> {
|
||||
// Handle comma-separated values
|
||||
dirs.flatMap((dir) => dir.split(',').map((d) => d.trim())),
|
||||
})
|
||||
.option('load-memory-from-include-directories', {
|
||||
type: 'boolean',
|
||||
description:
|
||||
'If true, when refreshing memory, QWEN.md files should be loaded from all directories that are added. If false, QWEN.md files should only be loaded from the primary working directory.',
|
||||
default: false,
|
||||
})
|
||||
.version(await getCliVersion()) // This will enable the --version flag based on package.json
|
||||
.alias('v', 'version')
|
||||
.help()
|
||||
@@ -255,6 +267,7 @@ export async function parseArguments(): Promise<CliArgs> {
|
||||
// TODO: Consider if App.tsx should get memory via a server call or if Config should refresh itself.
|
||||
export async function loadHierarchicalGeminiMemory(
|
||||
currentWorkingDirectory: string,
|
||||
includeDirectoriesToReadGemini: readonly string[] = [],
|
||||
debugMode: boolean,
|
||||
fileService: FileDiscoveryService,
|
||||
settings: Settings,
|
||||
@@ -280,6 +293,7 @@ export async function loadHierarchicalGeminiMemory(
|
||||
// Directly call the server function with the corrected path.
|
||||
return loadServerHierarchicalMemory(
|
||||
effectiveCwd,
|
||||
includeDirectoriesToReadGemini,
|
||||
debugMode,
|
||||
fileService,
|
||||
extensionContextFilePaths,
|
||||
@@ -302,13 +316,10 @@ export async function loadCliConfig(
|
||||
) ||
|
||||
false;
|
||||
const memoryImportFormat = settings.memoryImportFormat || 'tree';
|
||||
|
||||
const ideMode = settings.ideMode ?? false;
|
||||
|
||||
const ideModeFeature =
|
||||
(argv.ideModeFeature ?? settings.ideModeFeature ?? false) &&
|
||||
!process.env.SANDBOX;
|
||||
|
||||
const ideClient = IdeClient.getInstance(ideMode && ideModeFeature);
|
||||
argv.ideModeFeature ?? settings.ideModeFeature ?? false;
|
||||
|
||||
const allExtensions = annotateActiveExtensions(
|
||||
extensions,
|
||||
@@ -328,6 +339,11 @@ export async function loadCliConfig(
|
||||
process.env.OPENAI_BASE_URL = argv.openaiBaseUrl;
|
||||
}
|
||||
|
||||
// Handle Tavily API key from command line
|
||||
if (argv.tavilyApiKey) {
|
||||
process.env.TAVILY_API_KEY = argv.tavilyApiKey;
|
||||
}
|
||||
|
||||
// Set the context filename in the server's memoryTool module BEFORE loading memory
|
||||
// TODO(b/343434939): This is a bit of a hack. The contextFileName should ideally be passed
|
||||
// directly to the Config constructor in core, and have core handle setGeminiMdFilename.
|
||||
@@ -350,9 +366,14 @@ export async function loadCliConfig(
|
||||
...settings.fileFiltering,
|
||||
};
|
||||
|
||||
const includeDirectories = (settings.includeDirectories || [])
|
||||
.map(resolvePath)
|
||||
.concat((argv.includeDirectories || []).map(resolvePath));
|
||||
|
||||
// Call the (now wrapper) loadHierarchicalGeminiMemory which calls the server's version
|
||||
const { memoryContent, fileCount } = await loadHierarchicalGeminiMemory(
|
||||
process.cwd(),
|
||||
settings.loadMemoryFromIncludeDirectories ? includeDirectories : [],
|
||||
debugMode,
|
||||
fileService,
|
||||
settings,
|
||||
@@ -412,13 +433,18 @@ export async function loadCliConfig(
|
||||
}
|
||||
|
||||
const sandboxConfig = await loadSandboxConfig(settings, argv);
|
||||
const cliVersion = await getCliVersion();
|
||||
|
||||
return new Config({
|
||||
sessionId,
|
||||
embeddingModel: DEFAULT_GEMINI_EMBEDDING_MODEL,
|
||||
sandbox: sandboxConfig,
|
||||
targetDir: process.cwd(),
|
||||
includeDirectories: argv.includeDirectories,
|
||||
includeDirectories,
|
||||
loadMemoryFromIncludeDirectories:
|
||||
argv.loadMemoryFromIncludeDirectories ||
|
||||
settings.loadMemoryFromIncludeDirectories ||
|
||||
false,
|
||||
debugMode,
|
||||
question: argv.promptInteractive || argv.prompt || '',
|
||||
fullContext: argv.allFiles || argv.all_files || false,
|
||||
@@ -469,7 +495,7 @@ export async function loadCliConfig(
|
||||
model: argv.model || settings.model || DEFAULT_GEMINI_MODEL,
|
||||
extensionContextFilePaths,
|
||||
maxSessionTurns: settings.maxSessionTurns ?? -1,
|
||||
sessionTokenLimit: settings.sessionTokenLimit ?? 32000,
|
||||
sessionTokenLimit: settings.sessionTokenLimit ?? -1,
|
||||
maxFolderItems: settings.maxFolderItems ?? 20,
|
||||
experimentalAcp: argv.experimentalAcp || false,
|
||||
listExtensions: argv.listExtensions || false,
|
||||
@@ -479,7 +505,6 @@ export async function loadCliConfig(
|
||||
summarizeToolOutput: settings.summarizeToolOutput,
|
||||
ideMode,
|
||||
ideModeFeature,
|
||||
ideClient,
|
||||
enableOpenAILogging:
|
||||
(typeof argv.openaiLogging === 'undefined'
|
||||
? settings.enableOpenAILogging
|
||||
@@ -497,6 +522,9 @@ export async function loadCliConfig(
|
||||
},
|
||||
],
|
||||
contentGenerator: settings.contentGenerator,
|
||||
cliVersion,
|
||||
tavilyApiKey:
|
||||
argv.tavilyApiKey || settings.tavilyApiKey || process.env.TAVILY_API_KEY,
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -112,6 +112,7 @@ describe('Settings Loading and Merging', () => {
|
||||
expect(settings.merged).toEqual({
|
||||
customThemes: {},
|
||||
mcpServers: {},
|
||||
includeDirectories: [],
|
||||
});
|
||||
expect(settings.errors.length).toBe(0);
|
||||
});
|
||||
@@ -145,6 +146,7 @@ describe('Settings Loading and Merging', () => {
|
||||
...systemSettingsContent,
|
||||
customThemes: {},
|
||||
mcpServers: {},
|
||||
includeDirectories: [],
|
||||
});
|
||||
});
|
||||
|
||||
@@ -178,6 +180,7 @@ describe('Settings Loading and Merging', () => {
|
||||
...userSettingsContent,
|
||||
customThemes: {},
|
||||
mcpServers: {},
|
||||
includeDirectories: [],
|
||||
});
|
||||
});
|
||||
|
||||
@@ -209,6 +212,7 @@ describe('Settings Loading and Merging', () => {
|
||||
...workspaceSettingsContent,
|
||||
customThemes: {},
|
||||
mcpServers: {},
|
||||
includeDirectories: [],
|
||||
});
|
||||
});
|
||||
|
||||
@@ -246,6 +250,7 @@ describe('Settings Loading and Merging', () => {
|
||||
contextFileName: 'WORKSPACE_CONTEXT.md',
|
||||
customThemes: {},
|
||||
mcpServers: {},
|
||||
includeDirectories: [],
|
||||
});
|
||||
});
|
||||
|
||||
@@ -295,6 +300,7 @@ describe('Settings Loading and Merging', () => {
|
||||
allowMCPServers: ['server1', 'server2'],
|
||||
customThemes: {},
|
||||
mcpServers: {},
|
||||
includeDirectories: [],
|
||||
});
|
||||
});
|
||||
|
||||
@@ -616,6 +622,40 @@ describe('Settings Loading and Merging', () => {
|
||||
expect(settings.merged.mcpServers).toEqual({});
|
||||
});
|
||||
|
||||
it('should merge includeDirectories from all scopes', () => {
|
||||
(mockFsExistsSync as Mock).mockReturnValue(true);
|
||||
const systemSettingsContent = {
|
||||
includeDirectories: ['/system/dir'],
|
||||
};
|
||||
const userSettingsContent = {
|
||||
includeDirectories: ['/user/dir1', '/user/dir2'],
|
||||
};
|
||||
const workspaceSettingsContent = {
|
||||
includeDirectories: ['/workspace/dir'],
|
||||
};
|
||||
|
||||
(fs.readFileSync as Mock).mockImplementation(
|
||||
(p: fs.PathOrFileDescriptor) => {
|
||||
if (p === getSystemSettingsPath())
|
||||
return JSON.stringify(systemSettingsContent);
|
||||
if (p === USER_SETTINGS_PATH)
|
||||
return JSON.stringify(userSettingsContent);
|
||||
if (p === MOCK_WORKSPACE_SETTINGS_PATH)
|
||||
return JSON.stringify(workspaceSettingsContent);
|
||||
return '{}';
|
||||
},
|
||||
);
|
||||
|
||||
const settings = loadSettings(MOCK_WORKSPACE_DIR);
|
||||
|
||||
expect(settings.merged.includeDirectories).toEqual([
|
||||
'/system/dir',
|
||||
'/user/dir1',
|
||||
'/user/dir2',
|
||||
'/workspace/dir',
|
||||
]);
|
||||
});
|
||||
|
||||
it('should handle JSON parsing errors gracefully', () => {
|
||||
(mockFsExistsSync as Mock).mockReturnValue(true); // Both files "exist"
|
||||
const invalidJsonContent = 'invalid json';
|
||||
@@ -654,6 +694,7 @@ describe('Settings Loading and Merging', () => {
|
||||
expect(settings.merged).toEqual({
|
||||
customThemes: {},
|
||||
mcpServers: {},
|
||||
includeDirectories: [],
|
||||
});
|
||||
|
||||
// Check that error objects are populated in settings.errors
|
||||
@@ -1090,6 +1131,7 @@ describe('Settings Loading and Merging', () => {
|
||||
...systemSettingsContent,
|
||||
customThemes: {},
|
||||
mcpServers: {},
|
||||
includeDirectories: [],
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -132,6 +132,7 @@ export interface Settings {
|
||||
// Environment variables to exclude from project .env files
|
||||
excludedProjectEnvVars?: string[];
|
||||
dnsResolutionOrder?: DnsResolutionOrder;
|
||||
|
||||
sampling_params?: Record<string, unknown>;
|
||||
systemPromptMappings?: Array<{
|
||||
baseUrls: string[];
|
||||
@@ -142,6 +143,13 @@ export interface Settings {
|
||||
timeout?: number;
|
||||
maxRetries?: number;
|
||||
};
|
||||
|
||||
includeDirectories?: string[];
|
||||
|
||||
loadMemoryFromIncludeDirectories?: boolean;
|
||||
|
||||
// Web search API keys
|
||||
tavilyApiKey?: string;
|
||||
}
|
||||
|
||||
export interface SettingsError {
|
||||
@@ -197,6 +205,11 @@ export class LoadedSettings {
|
||||
...(workspace.mcpServers || {}),
|
||||
...(system.mcpServers || {}),
|
||||
},
|
||||
includeDirectories: [
|
||||
...(system.includeDirectories || []),
|
||||
...(user.includeDirectories || []),
|
||||
...(workspace.includeDirectories || []),
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
@@ -387,7 +400,7 @@ export function loadSettings(workspaceDir: string): LoadedSettings {
|
||||
const settingsErrors: SettingsError[] = [];
|
||||
const systemSettingsPath = getSystemSettingsPath();
|
||||
|
||||
// FIX: Resolve paths to their canonical representation to handle symlinks
|
||||
// Resolve paths to their canonical representation to handle symlinks
|
||||
const resolvedWorkspaceDir = path.resolve(workspaceDir);
|
||||
const resolvedHomeDir = path.resolve(homedir());
|
||||
|
||||
@@ -442,7 +455,6 @@ export function loadSettings(workspaceDir: string): LoadedSettings {
|
||||
});
|
||||
}
|
||||
|
||||
// This comparison is now much more reliable.
|
||||
if (realWorkspaceDir !== realHomeDir) {
|
||||
// Load workspace settings
|
||||
try {
|
||||
|
||||
@@ -70,6 +70,7 @@ describe('runNonInteractive', () => {
|
||||
getIdeMode: vi.fn().mockReturnValue(false),
|
||||
getFullContext: vi.fn().mockReturnValue(false),
|
||||
getContentGeneratorConfig: vi.fn().mockReturnValue({}),
|
||||
getDebugMode: vi.fn().mockReturnValue(false),
|
||||
} as unknown as Config;
|
||||
});
|
||||
|
||||
|
||||
@@ -17,28 +17,37 @@ import {
|
||||
import { Content, Part, FunctionCall } from '@google/genai';
|
||||
|
||||
import { parseAndFormatApiError } from './ui/utils/errorParsing.js';
|
||||
import { ConsolePatcher } from './ui/utils/ConsolePatcher.js';
|
||||
|
||||
export async function runNonInteractive(
|
||||
config: Config,
|
||||
input: string,
|
||||
prompt_id: string,
|
||||
): Promise<void> {
|
||||
await config.initialize();
|
||||
// Handle EPIPE errors when the output is piped to a command that closes early.
|
||||
process.stdout.on('error', (err: NodeJS.ErrnoException) => {
|
||||
if (err.code === 'EPIPE') {
|
||||
// Exit gracefully if the pipe is closed.
|
||||
process.exit(0);
|
||||
}
|
||||
const consolePatcher = new ConsolePatcher({
|
||||
stderr: true,
|
||||
debugMode: config.getDebugMode(),
|
||||
});
|
||||
|
||||
const geminiClient = config.getGeminiClient();
|
||||
const toolRegistry: ToolRegistry = await config.getToolRegistry();
|
||||
|
||||
const abortController = new AbortController();
|
||||
let currentMessages: Content[] = [{ role: 'user', parts: [{ text: input }] }];
|
||||
let turnCount = 0;
|
||||
try {
|
||||
await config.initialize();
|
||||
consolePatcher.patch();
|
||||
// Handle EPIPE errors when the output is piped to a command that closes early.
|
||||
process.stdout.on('error', (err: NodeJS.ErrnoException) => {
|
||||
if (err.code === 'EPIPE') {
|
||||
// Exit gracefully if the pipe is closed.
|
||||
process.exit(0);
|
||||
}
|
||||
});
|
||||
|
||||
const geminiClient = config.getGeminiClient();
|
||||
const toolRegistry: ToolRegistry = await config.getToolRegistry();
|
||||
|
||||
const abortController = new AbortController();
|
||||
let currentMessages: Content[] = [
|
||||
{ role: 'user', parts: [{ text: input }] },
|
||||
];
|
||||
let turnCount = 0;
|
||||
while (true) {
|
||||
turnCount++;
|
||||
if (
|
||||
@@ -133,6 +142,7 @@ export async function runNonInteractive(
|
||||
);
|
||||
process.exit(1);
|
||||
} finally {
|
||||
consolePatcher.cleanup();
|
||||
if (isTelemetrySdkInitialized()) {
|
||||
await shutdownTelemetry();
|
||||
}
|
||||
|
||||
@@ -148,7 +148,7 @@ vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
|
||||
getUserTier: vi.fn(),
|
||||
})),
|
||||
getCheckpointingEnabled: vi.fn(() => opts.checkpointing ?? true),
|
||||
getAllGeminiMdFilenames: vi.fn(() => ['GEMINI.md']),
|
||||
getAllGeminiMdFilenames: vi.fn(() => ['QWEN.md']),
|
||||
setFlashFallbackHandler: vi.fn(),
|
||||
getSessionId: vi.fn(() => 'test-session-id'),
|
||||
getUserTier: vi.fn().mockResolvedValue(undefined),
|
||||
@@ -169,7 +169,7 @@ vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
|
||||
...actualCore,
|
||||
Config: ConfigClassMock,
|
||||
MCPServerConfig: actualCore.MCPServerConfig,
|
||||
getAllGeminiMdFilenames: vi.fn(() => ['GEMINI.md']),
|
||||
getAllGeminiMdFilenames: vi.fn(() => ['QWEN.md']),
|
||||
ideContext: ideContextMock,
|
||||
isGitRepository: vi.fn(),
|
||||
};
|
||||
@@ -577,7 +577,7 @@ describe('App UI', () => {
|
||||
},
|
||||
});
|
||||
mockConfig.getGeminiMdFileCount.mockReturnValue(1);
|
||||
mockConfig.getAllGeminiMdFilenames.mockReturnValue(['GEMINI.md']);
|
||||
mockConfig.getAllGeminiMdFilenames.mockReturnValue(['QWEN.md']);
|
||||
|
||||
const { lastFrame, unmount } = render(
|
||||
<App
|
||||
@@ -589,13 +589,13 @@ describe('App UI', () => {
|
||||
currentUnmount = unmount;
|
||||
await Promise.resolve();
|
||||
expect(lastFrame()).toContain(
|
||||
'Using: 1 open file (ctrl+e to view) | 1 GEMINI.md file',
|
||||
'Using: 1 open file (ctrl+e to view) | 1 QWEN.md file',
|
||||
);
|
||||
});
|
||||
|
||||
it('should display default "GEMINI.md" in footer when contextFileName is not set and count is 1', async () => {
|
||||
it('should display default "QWEN.md" in footer when contextFileName is not set and count is 1', async () => {
|
||||
mockConfig.getGeminiMdFileCount.mockReturnValue(1);
|
||||
mockConfig.getAllGeminiMdFilenames.mockReturnValue(['GEMINI.md']);
|
||||
mockConfig.getAllGeminiMdFilenames.mockReturnValue(['QWEN.md']);
|
||||
// For this test, ensure showMemoryUsage is false or debugMode is false if it relies on that
|
||||
mockConfig.getDebugMode.mockReturnValue(false);
|
||||
mockConfig.getShowMemoryUsage.mockReturnValue(false);
|
||||
@@ -609,15 +609,12 @@ describe('App UI', () => {
|
||||
);
|
||||
currentUnmount = unmount;
|
||||
await Promise.resolve(); // Wait for any async updates
|
||||
expect(lastFrame()).toContain('Using: 1 GEMINI.md file');
|
||||
expect(lastFrame()).toContain('Using: 1 QWEN.md file');
|
||||
});
|
||||
|
||||
it('should display default "GEMINI.md" with plural when contextFileName is not set and count is > 1', async () => {
|
||||
it('should display default "QWEN.md" with plural when contextFileName is not set and count is > 1', async () => {
|
||||
mockConfig.getGeminiMdFileCount.mockReturnValue(2);
|
||||
mockConfig.getAllGeminiMdFilenames.mockReturnValue([
|
||||
'GEMINI.md',
|
||||
'GEMINI.md',
|
||||
]);
|
||||
mockConfig.getAllGeminiMdFilenames.mockReturnValue(['QWEN.md', 'QWEN.md']);
|
||||
mockConfig.getDebugMode.mockReturnValue(false);
|
||||
mockConfig.getShowMemoryUsage.mockReturnValue(false);
|
||||
|
||||
@@ -630,7 +627,7 @@ describe('App UI', () => {
|
||||
);
|
||||
currentUnmount = unmount;
|
||||
await Promise.resolve();
|
||||
expect(lastFrame()).toContain('Using: 2 GEMINI.md files');
|
||||
expect(lastFrame()).toContain('Using: 2 QWEN.md files');
|
||||
});
|
||||
|
||||
it('should display custom contextFileName in footer when set and count is 1', async () => {
|
||||
@@ -727,12 +724,9 @@ describe('App UI', () => {
|
||||
expect(lastFrame()).not.toContain('ANY_FILE.MD');
|
||||
});
|
||||
|
||||
it('should display GEMINI.md and MCP server count when both are present', async () => {
|
||||
it('should display QWEN.md and MCP server count when both are present', async () => {
|
||||
mockConfig.getGeminiMdFileCount.mockReturnValue(2);
|
||||
mockConfig.getAllGeminiMdFilenames.mockReturnValue([
|
||||
'GEMINI.md',
|
||||
'GEMINI.md',
|
||||
]);
|
||||
mockConfig.getAllGeminiMdFilenames.mockReturnValue(['QWEN.md', 'QWEN.md']);
|
||||
mockConfig.getMcpServers.mockReturnValue({
|
||||
server1: {} as MCPServerConfig,
|
||||
});
|
||||
@@ -751,7 +745,7 @@ describe('App UI', () => {
|
||||
expect(lastFrame()).toContain('1 MCP server');
|
||||
});
|
||||
|
||||
it('should display only MCP server count when GEMINI.md count is 0', async () => {
|
||||
it('should display only MCP server count when QWEN.md count is 0', async () => {
|
||||
mockConfig.getGeminiMdFileCount.mockReturnValue(0);
|
||||
mockConfig.getAllGeminiMdFilenames.mockReturnValue([]);
|
||||
mockConfig.getMcpServers.mockReturnValue({
|
||||
|
||||
@@ -22,6 +22,7 @@ import { useGeminiStream } from './hooks/useGeminiStream.js';
|
||||
import { useLoadingIndicator } from './hooks/useLoadingIndicator.js';
|
||||
import { useThemeCommand } from './hooks/useThemeCommand.js';
|
||||
import { useAuthCommand } from './hooks/useAuthCommand.js';
|
||||
import { useQwenAuth } from './hooks/useQwenAuth.js';
|
||||
import { useEditorSettings } from './hooks/useEditorSettings.js';
|
||||
import { useSlashCommandProcessor } from './hooks/slashCommandProcessor.js';
|
||||
import { useAutoAcceptIndicator } from './hooks/useAutoAcceptIndicator.js';
|
||||
@@ -35,6 +36,7 @@ import { Footer } from './components/Footer.js';
|
||||
import { ThemeDialog } from './components/ThemeDialog.js';
|
||||
import { AuthDialog } from './components/AuthDialog.js';
|
||||
import { AuthInProgress } from './components/AuthInProgress.js';
|
||||
import { QwenOAuthProgress } from './components/QwenOAuthProgress.js';
|
||||
import { EditorSettingsDialog } from './components/EditorSettingsDialog.js';
|
||||
import { ShellConfirmationDialog } from './components/ShellConfirmationDialog.js';
|
||||
import { Colors } from './colors.js';
|
||||
@@ -231,6 +233,15 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
|
||||
cancelAuthentication,
|
||||
} = useAuthCommand(settings, setAuthError, config);
|
||||
|
||||
const {
|
||||
isQwenAuthenticating,
|
||||
deviceAuth,
|
||||
isQwenAuth,
|
||||
cancelQwenAuth,
|
||||
authStatus,
|
||||
authMessage,
|
||||
} = useQwenAuth(settings, isAuthenticating);
|
||||
|
||||
useEffect(() => {
|
||||
if (settings.merged.selectedAuthType && !settings.merged.useExternalAuth) {
|
||||
const error = validateAuthMethod(settings.merged.selectedAuthType);
|
||||
@@ -254,6 +265,27 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
|
||||
}
|
||||
}, [config, isAuthenticating]);
|
||||
|
||||
// Handle Qwen OAuth timeout
|
||||
useEffect(() => {
|
||||
if (isQwenAuth && authStatus === 'timeout') {
|
||||
setAuthError(
|
||||
authMessage ||
|
||||
'Qwen OAuth authentication timed out. Please try again or select a different authentication method.',
|
||||
);
|
||||
cancelQwenAuth();
|
||||
cancelAuthentication();
|
||||
openAuthDialog();
|
||||
}
|
||||
}, [
|
||||
isQwenAuth,
|
||||
authStatus,
|
||||
authMessage,
|
||||
cancelQwenAuth,
|
||||
cancelAuthentication,
|
||||
openAuthDialog,
|
||||
setAuthError,
|
||||
]);
|
||||
|
||||
const {
|
||||
isEditorDialogOpen,
|
||||
openEditorDialog,
|
||||
@@ -276,6 +308,9 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
|
||||
try {
|
||||
const { memoryContent, fileCount } = await loadHierarchicalGeminiMemory(
|
||||
process.cwd(),
|
||||
settings.merged.loadMemoryFromIncludeDirectories
|
||||
? config.getWorkspaceContext().getDirectories()
|
||||
: [],
|
||||
config.getDebugMode(),
|
||||
config.getFileService(),
|
||||
settings.merged,
|
||||
@@ -480,6 +515,7 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
|
||||
openPrivacyNotice,
|
||||
toggleVimEnabled,
|
||||
setIsProcessing,
|
||||
setGeminiMdFileCount,
|
||||
);
|
||||
|
||||
const {
|
||||
@@ -501,6 +537,7 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
|
||||
performMemoryRefresh,
|
||||
modelSwitchedFromQuotaError,
|
||||
setModelSwitchedFromQuotaError,
|
||||
refreshStatic,
|
||||
);
|
||||
|
||||
// Input handling
|
||||
@@ -599,7 +636,7 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
|
||||
if (config) {
|
||||
setGeminiMdFileCount(config.getGeminiMdFileCount());
|
||||
}
|
||||
}, [config]);
|
||||
}, [config, config.getGeminiMdFileCount]);
|
||||
|
||||
const logger = useLogger();
|
||||
const [userMessages, setUserMessages] = useState<string[]>([]);
|
||||
@@ -868,13 +905,35 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
|
||||
</Box>
|
||||
) : isAuthenticating ? (
|
||||
<>
|
||||
<AuthInProgress
|
||||
onTimeout={() => {
|
||||
setAuthError('Authentication timed out. Please try again.');
|
||||
cancelAuthentication();
|
||||
openAuthDialog();
|
||||
}}
|
||||
/>
|
||||
{isQwenAuth && isQwenAuthenticating ? (
|
||||
<QwenOAuthProgress
|
||||
deviceAuth={deviceAuth || undefined}
|
||||
authStatus={authStatus}
|
||||
authMessage={authMessage}
|
||||
onTimeout={() => {
|
||||
setAuthError(
|
||||
'Qwen OAuth authentication timed out. Please try again.',
|
||||
);
|
||||
cancelQwenAuth();
|
||||
cancelAuthentication();
|
||||
openAuthDialog();
|
||||
}}
|
||||
onCancel={() => {
|
||||
setAuthError('Qwen OAuth authentication cancelled.');
|
||||
cancelQwenAuth();
|
||||
cancelAuthentication();
|
||||
openAuthDialog();
|
||||
}}
|
||||
/>
|
||||
) : (
|
||||
<AuthInProgress
|
||||
onTimeout={() => {
|
||||
setAuthError('Authentication timed out. Please try again.');
|
||||
cancelAuthentication();
|
||||
openAuthDialog();
|
||||
}}
|
||||
/>
|
||||
)}
|
||||
{showErrorDetails && (
|
||||
<OverflowProvider>
|
||||
<Box flexDirection="column">
|
||||
|
||||
@@ -40,11 +40,24 @@ describe('directoryCommand', () => {
|
||||
getGeminiClient: vi.fn().mockReturnValue({
|
||||
addDirectoryContext: vi.fn(),
|
||||
}),
|
||||
getWorkingDir: () => '/test/dir',
|
||||
shouldLoadMemoryFromIncludeDirectories: () => false,
|
||||
getDebugMode: () => false,
|
||||
getFileService: () => ({}),
|
||||
getExtensionContextFilePaths: () => [],
|
||||
getFileFilteringOptions: () => ({ ignore: [], include: [] }),
|
||||
setUserMemory: vi.fn(),
|
||||
setGeminiMdFileCount: vi.fn(),
|
||||
} as unknown as Config;
|
||||
|
||||
mockContext = {
|
||||
services: {
|
||||
config: mockConfig,
|
||||
settings: {
|
||||
merged: {
|
||||
memoryDiscoveryMaxDirs: 1000,
|
||||
},
|
||||
},
|
||||
},
|
||||
ui: {
|
||||
addItem: vi.fn(),
|
||||
|
||||
@@ -8,6 +8,7 @@ import { SlashCommand, CommandContext, CommandKind } from './types.js';
|
||||
import { MessageType } from '../types.js';
|
||||
import * as os from 'os';
|
||||
import * as path from 'path';
|
||||
import { loadServerHierarchicalMemory } from '@qwen-code/qwen-code-core';
|
||||
|
||||
export function expandHomeDir(p: string): string {
|
||||
if (!p) {
|
||||
@@ -16,7 +17,7 @@ export function expandHomeDir(p: string): string {
|
||||
let expandedPath = p;
|
||||
if (p.toLowerCase().startsWith('%userprofile%')) {
|
||||
expandedPath = os.homedir() + p.substring('%userprofile%'.length);
|
||||
} else if (p.startsWith('~')) {
|
||||
} else if (p === '~' || p.startsWith('~/')) {
|
||||
expandedPath = os.homedir() + p.substring(1);
|
||||
}
|
||||
return path.normalize(expandedPath);
|
||||
@@ -90,6 +91,37 @@ export const directoryCommand: SlashCommand = {
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
if (config.shouldLoadMemoryFromIncludeDirectories()) {
|
||||
const { memoryContent, fileCount } =
|
||||
await loadServerHierarchicalMemory(
|
||||
config.getWorkingDir(),
|
||||
[
|
||||
...config.getWorkspaceContext().getDirectories(),
|
||||
...pathsToAdd,
|
||||
],
|
||||
config.getDebugMode(),
|
||||
config.getFileService(),
|
||||
config.getExtensionContextFilePaths(),
|
||||
context.services.settings.merged.memoryImportFormat || 'tree', // Use setting or default to 'tree'
|
||||
config.getFileFilteringOptions(),
|
||||
context.services.settings.merged.memoryDiscoveryMaxDirs,
|
||||
);
|
||||
config.setUserMemory(memoryContent);
|
||||
config.setGeminiMdFileCount(fileCount);
|
||||
context.ui.setGeminiMdFileCount(fileCount);
|
||||
}
|
||||
addItem(
|
||||
{
|
||||
type: MessageType.INFO,
|
||||
text: `Successfully added GEMINI.md files from the following directories if there are:\n- ${added.join('\n- ')}`,
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
} catch (error) {
|
||||
errors.push(`Error refreshing memory: ${(error as Error).message}`);
|
||||
}
|
||||
|
||||
if (added.length > 0) {
|
||||
const gemini = config.getGeminiClient();
|
||||
if (gemini) {
|
||||
|
||||
@@ -42,9 +42,15 @@ describe('ideCommand', () => {
|
||||
mockConfig = {
|
||||
getIdeModeFeature: vi.fn(),
|
||||
getIdeMode: vi.fn(),
|
||||
getIdeClient: vi.fn(),
|
||||
getIdeClient: vi.fn(() => ({
|
||||
reconnect: vi.fn(),
|
||||
disconnect: vi.fn(),
|
||||
getCurrentIde: vi.fn(),
|
||||
getDetectedIdeDisplayName: vi.fn(),
|
||||
getConnectionStatus: vi.fn(),
|
||||
})),
|
||||
setIdeModeAndSyncConnection: vi.fn(),
|
||||
setIdeMode: vi.fn(),
|
||||
setIdeClientDisconnected: vi.fn(),
|
||||
} as unknown as Config;
|
||||
|
||||
platformSpy = vi.spyOn(process, 'platform', 'get');
|
||||
|
||||
@@ -8,6 +8,7 @@ import {
|
||||
Config,
|
||||
DetectedIde,
|
||||
IDEConnectionStatus,
|
||||
IdeClient,
|
||||
getIdeDisplayName,
|
||||
getIdeInstaller,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
@@ -19,6 +20,35 @@ import {
|
||||
} from './types.js';
|
||||
import { SettingScope } from '../../config/settings.js';
|
||||
|
||||
function getIdeStatusMessage(ideClient: IdeClient): {
|
||||
messageType: 'info' | 'error';
|
||||
content: string;
|
||||
} {
|
||||
const connection = ideClient.getConnectionStatus();
|
||||
switch (connection.status) {
|
||||
case IDEConnectionStatus.Connected:
|
||||
return {
|
||||
messageType: 'info',
|
||||
content: `🟢 Connected to ${ideClient.getDetectedIdeDisplayName()}`,
|
||||
};
|
||||
case IDEConnectionStatus.Connecting:
|
||||
return {
|
||||
messageType: 'info',
|
||||
content: `🟡 Connecting...`,
|
||||
};
|
||||
default: {
|
||||
let content = `🔴 Disconnected`;
|
||||
if (connection?.details) {
|
||||
content += `: ${connection.details}`;
|
||||
}
|
||||
return {
|
||||
messageType: 'error',
|
||||
content,
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const ideCommand = (config: Config | null): SlashCommand | null => {
|
||||
if (!config || !config.getIdeModeFeature()) {
|
||||
return null;
|
||||
@@ -54,33 +84,13 @@ export const ideCommand = (config: Config | null): SlashCommand | null => {
|
||||
name: 'status',
|
||||
description: 'check status of IDE integration',
|
||||
kind: CommandKind.BUILT_IN,
|
||||
action: (_context: CommandContext): SlashCommandActionReturn => {
|
||||
const connection = ideClient.getConnectionStatus();
|
||||
switch (connection.status) {
|
||||
case IDEConnectionStatus.Connected:
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
content: `🟢 Connected to ${ideClient.getDetectedIdeDisplayName()}`,
|
||||
} as const;
|
||||
case IDEConnectionStatus.Connecting:
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
content: `🟡 Connecting...`,
|
||||
} as const;
|
||||
default: {
|
||||
let content = `🔴 Disconnected`;
|
||||
if (connection?.details) {
|
||||
content += `: ${connection.details}`;
|
||||
}
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content,
|
||||
} as const;
|
||||
}
|
||||
}
|
||||
action: (): SlashCommandActionReturn => {
|
||||
const { messageType, content } = getIdeStatusMessage(ideClient);
|
||||
return {
|
||||
type: 'message',
|
||||
messageType,
|
||||
content,
|
||||
} as const;
|
||||
},
|
||||
};
|
||||
|
||||
@@ -110,6 +120,10 @@ export const ideCommand = (config: Config | null): SlashCommand | null => {
|
||||
);
|
||||
|
||||
const result = await installer.install();
|
||||
if (result.success) {
|
||||
config.setIdeMode(true);
|
||||
context.services.settings.setValue(SettingScope.User, 'ideMode', true);
|
||||
}
|
||||
context.ui.addItem(
|
||||
{
|
||||
type: result.success ? 'info' : 'error',
|
||||
@@ -126,8 +140,15 @@ export const ideCommand = (config: Config | null): SlashCommand | null => {
|
||||
kind: CommandKind.BUILT_IN,
|
||||
action: async (context: CommandContext) => {
|
||||
context.services.settings.setValue(SettingScope.User, 'ideMode', true);
|
||||
config.setIdeMode(true);
|
||||
config.setIdeClientConnected();
|
||||
await config.setIdeModeAndSyncConnection(true);
|
||||
const { messageType, content } = getIdeStatusMessage(ideClient);
|
||||
context.ui.addItem(
|
||||
{
|
||||
type: messageType,
|
||||
text: content,
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
},
|
||||
};
|
||||
|
||||
@@ -137,8 +158,15 @@ export const ideCommand = (config: Config | null): SlashCommand | null => {
|
||||
kind: CommandKind.BUILT_IN,
|
||||
action: async (context: CommandContext) => {
|
||||
context.services.settings.setValue(SettingScope.User, 'ideMode', false);
|
||||
config.setIdeMode(false);
|
||||
config.setIdeClientDisconnected();
|
||||
await config.setIdeModeAndSyncConnection(false);
|
||||
const { messageType, content } = getIdeStatusMessage(ideClient);
|
||||
context.ui.addItem(
|
||||
{
|
||||
type: messageType,
|
||||
text: content,
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@@ -11,16 +11,31 @@ import { initCommand } from './initCommand.js';
|
||||
import { createMockCommandContext } from '../../test-utils/mockCommandContext.js';
|
||||
import { type CommandContext } from './types.js';
|
||||
|
||||
// Mock the 'fs' module
|
||||
vi.mock('fs', () => ({
|
||||
existsSync: vi.fn(),
|
||||
writeFileSync: vi.fn(),
|
||||
}));
|
||||
// Mock the 'fs' module with both named and default exports to avoid breaking default import sites
|
||||
vi.mock('fs', async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import('fs')>();
|
||||
const existsSync = vi.fn();
|
||||
const writeFileSync = vi.fn();
|
||||
const readFileSync = vi.fn();
|
||||
return {
|
||||
...actual,
|
||||
existsSync,
|
||||
writeFileSync,
|
||||
readFileSync,
|
||||
default: {
|
||||
...(actual as unknown as Record<string, unknown>),
|
||||
existsSync,
|
||||
writeFileSync,
|
||||
readFileSync,
|
||||
},
|
||||
} as unknown as typeof import('fs');
|
||||
});
|
||||
|
||||
describe('initCommand', () => {
|
||||
let mockContext: CommandContext;
|
||||
const targetDir = '/test/dir';
|
||||
const geminiMdPath = path.join(targetDir, 'GEMINI.md');
|
||||
const DEFAULT_CONTEXT_FILENAME = 'QWEN.md';
|
||||
const geminiMdPath = path.join(targetDir, DEFAULT_CONTEXT_FILENAME);
|
||||
|
||||
beforeEach(() => {
|
||||
// Create a fresh mock context for each test
|
||||
@@ -38,9 +53,10 @@ describe('initCommand', () => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
it('should inform the user if GEMINI.md already exists', async () => {
|
||||
it(`should inform the user if ${DEFAULT_CONTEXT_FILENAME} already exists and is non-empty`, async () => {
|
||||
// Arrange: Simulate that the file exists
|
||||
vi.mocked(fs.existsSync).mockReturnValue(true);
|
||||
vi.spyOn(fs, 'readFileSync').mockReturnValue('# Existing content');
|
||||
|
||||
// Act: Run the command's action
|
||||
const result = await initCommand.action!(mockContext, '');
|
||||
@@ -49,14 +65,13 @@ describe('initCommand', () => {
|
||||
expect(result).toEqual({
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
content:
|
||||
'A GEMINI.md file already exists in this directory. No changes were made.',
|
||||
content: `A ${DEFAULT_CONTEXT_FILENAME} file already exists in this directory. No changes were made.`,
|
||||
});
|
||||
// Assert: Ensure no file was written
|
||||
expect(fs.writeFileSync).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should create GEMINI.md and submit a prompt if it does not exist', async () => {
|
||||
it(`should create ${DEFAULT_CONTEXT_FILENAME} and submit a prompt if it does not exist`, async () => {
|
||||
// Arrange: Simulate that the file does not exist
|
||||
vi.mocked(fs.existsSync).mockReturnValue(false);
|
||||
|
||||
@@ -70,7 +85,7 @@ describe('initCommand', () => {
|
||||
expect(mockContext.ui.addItem).toHaveBeenCalledWith(
|
||||
{
|
||||
type: 'info',
|
||||
text: 'Empty GEMINI.md created. Now analyzing the project to populate it.',
|
||||
text: `Empty ${DEFAULT_CONTEXT_FILENAME} created. Now analyzing the project to populate it.`,
|
||||
},
|
||||
expect.any(Number),
|
||||
);
|
||||
@@ -78,10 +93,20 @@ describe('initCommand', () => {
|
||||
// Assert: Check that the correct prompt is submitted
|
||||
expect(result.type).toBe('submit_prompt');
|
||||
expect(result.content).toContain(
|
||||
'You are an AI agent that brings the power of Gemini',
|
||||
'You are Qwen Code, an interactive CLI agent',
|
||||
);
|
||||
});
|
||||
|
||||
it(`should proceed to initialize when ${DEFAULT_CONTEXT_FILENAME} exists but is empty`, async () => {
|
||||
vi.mocked(fs.existsSync).mockReturnValue(true);
|
||||
vi.spyOn(fs, 'readFileSync').mockReturnValue(' \n ');
|
||||
|
||||
const result = await initCommand.action!(mockContext, '');
|
||||
|
||||
expect(fs.writeFileSync).toHaveBeenCalledWith(geminiMdPath, '', 'utf8');
|
||||
expect(result.type).toBe('submit_prompt');
|
||||
});
|
||||
|
||||
it('should return an error if config is not available', async () => {
|
||||
// Arrange: Create a context without config
|
||||
const noConfigContext = createMockCommandContext();
|
||||
|
||||
@@ -12,10 +12,11 @@ import {
|
||||
SlashCommandActionReturn,
|
||||
CommandKind,
|
||||
} from './types.js';
|
||||
import { getCurrentGeminiMdFilename } from '@qwen-code/qwen-code-core';
|
||||
|
||||
export const initCommand: SlashCommand = {
|
||||
name: 'init',
|
||||
description: 'Analyzes the project and creates a tailored GEMINI.md file.',
|
||||
description: 'Analyzes the project and creates a tailored QWEN.md file.',
|
||||
kind: CommandKind.BUILT_IN,
|
||||
action: async (
|
||||
context: CommandContext,
|
||||
@@ -29,32 +30,55 @@ export const initCommand: SlashCommand = {
|
||||
};
|
||||
}
|
||||
const targetDir = context.services.config.getTargetDir();
|
||||
const geminiMdPath = path.join(targetDir, 'GEMINI.md');
|
||||
const contextFileName = getCurrentGeminiMdFilename();
|
||||
const contextFilePath = path.join(targetDir, contextFileName);
|
||||
|
||||
if (fs.existsSync(geminiMdPath)) {
|
||||
try {
|
||||
if (fs.existsSync(contextFilePath)) {
|
||||
// If file exists but is empty (or whitespace), continue to initialize; otherwise, bail out
|
||||
try {
|
||||
const existing = fs.readFileSync(contextFilePath, 'utf8');
|
||||
if (existing && existing.trim().length > 0) {
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
content: `A ${contextFileName} file already exists in this directory. No changes were made.`,
|
||||
};
|
||||
}
|
||||
} catch {
|
||||
// If we fail to read, conservatively proceed to (re)create the file
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure an empty context file exists before prompting the model to populate it
|
||||
try {
|
||||
fs.writeFileSync(contextFilePath, '', 'utf8');
|
||||
context.ui.addItem(
|
||||
{
|
||||
type: 'info',
|
||||
text: `Empty ${contextFileName} created. Now analyzing the project to populate it.`,
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
} catch (err) {
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content: `Failed to create ${contextFileName}: ${err instanceof Error ? err.message : String(err)}`,
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
content:
|
||||
'A GEMINI.md file already exists in this directory. No changes were made.',
|
||||
messageType: 'error',
|
||||
content: `Unexpected error preparing ${contextFileName}: ${error instanceof Error ? error.message : String(error)}`,
|
||||
};
|
||||
}
|
||||
|
||||
// Create an empty GEMINI.md file
|
||||
fs.writeFileSync(geminiMdPath, '', 'utf8');
|
||||
|
||||
context.ui.addItem(
|
||||
{
|
||||
type: 'info',
|
||||
text: 'Empty GEMINI.md created. Now analyzing the project to populate it.',
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
|
||||
return {
|
||||
type: 'submit_prompt',
|
||||
content: `
|
||||
You are an AI agent that brings the power of Gemini directly into the terminal. Your task is to analyze the current directory and generate a comprehensive GEMINI.md file to be used as instructional context for future interactions.
|
||||
You are Qwen Code, an interactive CLI agent. Analyze the current directory and generate a comprehensive ${contextFileName} file to be used as instructional context for future interactions.
|
||||
|
||||
**Analysis Process:**
|
||||
|
||||
@@ -70,7 +94,7 @@ You are an AI agent that brings the power of Gemini directly into the terminal.
|
||||
* **Code Project:** Look for clues like \`package.json\`, \`requirements.txt\`, \`pom.xml\`, \`go.mod\`, \`Cargo.toml\`, \`build.gradle\`, or a \`src\` directory. If you find them, this is likely a software project.
|
||||
* **Non-Code Project:** If you don't find code-related files, this might be a directory for documentation, research papers, notes, or something else.
|
||||
|
||||
**GEMINI.md Content Generation:**
|
||||
**${contextFileName} Content Generation:**
|
||||
|
||||
**For a Code Project:**
|
||||
|
||||
@@ -86,7 +110,7 @@ You are an AI agent that brings the power of Gemini directly into the terminal.
|
||||
|
||||
**Final Output:**
|
||||
|
||||
Write the complete content to the \`GEMINI.md\` file. The output must be well-formatted Markdown.
|
||||
Write the complete content to the \`${contextFileName}\` file. The output must be well-formatted Markdown.
|
||||
`,
|
||||
};
|
||||
},
|
||||
|
||||
@@ -161,6 +161,10 @@ describe('memoryCommand', () => {
|
||||
getDebugMode: () => false,
|
||||
getFileService: () => ({}) as FileDiscoveryService,
|
||||
getExtensionContextFilePaths: () => [],
|
||||
shouldLoadMemoryFromIncludeDirectories: () => false,
|
||||
getWorkspaceContext: () => ({
|
||||
getDirectories: () => [],
|
||||
}),
|
||||
getFileFilteringOptions: () => ({
|
||||
ignore: [],
|
||||
include: [],
|
||||
|
||||
@@ -89,6 +89,9 @@ export const memoryCommand: SlashCommand = {
|
||||
const { memoryContent, fileCount } =
|
||||
await loadServerHierarchicalMemory(
|
||||
config.getWorkingDir(),
|
||||
config.shouldLoadMemoryFromIncludeDirectories()
|
||||
? config.getWorkspaceContext().getDirectories()
|
||||
: [],
|
||||
config.getDebugMode(),
|
||||
config.getFileService(),
|
||||
config.getExtensionContextFilePaths(),
|
||||
|
||||
@@ -49,7 +49,7 @@ describe('setupGithubCommand', () => {
|
||||
`curl -fsSL -o "${fakeRepoRoot}/.github/workflows/gemini-issue-automated-triage.yml"`,
|
||||
`curl -fsSL -o "${fakeRepoRoot}/.github/workflows/gemini-issue-scheduled-triage.yml"`,
|
||||
`curl -fsSL -o "${fakeRepoRoot}/.github/workflows/gemini-pr-review.yml"`,
|
||||
'https://raw.githubusercontent.com/google-github-actions/run-gemini-cli/refs/heads/v0/examples/workflows/',
|
||||
'https://raw.githubusercontent.com/google-github-actions/run-gemini-cli/refs/tags/v0/examples/workflows/',
|
||||
];
|
||||
|
||||
for (const substring of expectedSubstrings) {
|
||||
|
||||
@@ -28,7 +28,7 @@ export const setupGithubCommand: SlashCommand = {
|
||||
}
|
||||
|
||||
const version = 'v0';
|
||||
const workflowBaseUrl = `https://raw.githubusercontent.com/google-github-actions/run-gemini-cli/refs/heads/${version}/examples/workflows/`;
|
||||
const workflowBaseUrl = `https://raw.githubusercontent.com/google-github-actions/run-gemini-cli/refs/tags/${version}/examples/workflows/`;
|
||||
|
||||
const workflows = [
|
||||
'gemini-cli/gemini-cli.yml',
|
||||
|
||||
@@ -59,6 +59,7 @@ export interface CommandContext {
|
||||
/** Toggles a special display mode. */
|
||||
toggleCorgiMode: () => void;
|
||||
toggleVimEnabled: () => Promise<boolean>;
|
||||
setGeminiMdFileCount: (count: number) => void;
|
||||
};
|
||||
// Session-specific data
|
||||
session: {
|
||||
|
||||
@@ -189,7 +189,7 @@ describe('AuthDialog', () => {
|
||||
);
|
||||
|
||||
// This is a bit brittle, but it's the best way to check which item is selected.
|
||||
expect(lastFrame()).toContain('● 1. OpenAI');
|
||||
expect(lastFrame()).toContain('● 2. OpenAI');
|
||||
});
|
||||
|
||||
it('should fall back to default if GEMINI_DEFAULT_AUTH_TYPE is not set', () => {
|
||||
@@ -217,8 +217,8 @@ describe('AuthDialog', () => {
|
||||
<AuthDialog onSelect={() => {}} settings={settings} />,
|
||||
);
|
||||
|
||||
// Default is OpenAI (only option available)
|
||||
expect(lastFrame()).toContain('● 1. OpenAI');
|
||||
// Default is Qwen OAuth (first option)
|
||||
expect(lastFrame()).toContain('● 1. Qwen OAuth');
|
||||
});
|
||||
|
||||
it('should show an error and fall back to default if GEMINI_DEFAULT_AUTH_TYPE is invalid', () => {
|
||||
@@ -249,8 +249,8 @@ describe('AuthDialog', () => {
|
||||
);
|
||||
|
||||
// Since the auth dialog doesn't show GEMINI_DEFAULT_AUTH_TYPE errors anymore,
|
||||
// it will just show the default OpenAI option
|
||||
expect(lastFrame()).toContain('● 1. OpenAI');
|
||||
// it will just show the default Qwen OAuth option
|
||||
expect(lastFrame()).toContain('● 1. Qwen OAuth');
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -45,7 +45,10 @@ export function AuthDialog({
|
||||
initialErrorMessage || null,
|
||||
);
|
||||
const [showOpenAIKeyPrompt, setShowOpenAIKeyPrompt] = useState(false);
|
||||
const items = [{ label: 'OpenAI', value: AuthType.USE_OPENAI }];
|
||||
const items = [
|
||||
{ label: 'Qwen OAuth', value: AuthType.QWEN_OAUTH },
|
||||
{ label: 'OpenAI', value: AuthType.USE_OPENAI },
|
||||
];
|
||||
|
||||
const initialAuthIndex = Math.max(
|
||||
0,
|
||||
@@ -65,6 +68,10 @@ export function AuthDialog({
|
||||
return item.value === AuthType.USE_GEMINI;
|
||||
}
|
||||
|
||||
if (process.env.QWEN_OAUTH_TOKEN) {
|
||||
return item.value === AuthType.QWEN_OAUTH;
|
||||
}
|
||||
|
||||
return item.value === AuthType.LOGIN_WITH_GOOGLE;
|
||||
}),
|
||||
);
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
*/
|
||||
|
||||
import { render } from 'ink-testing-library';
|
||||
import { waitFor } from '@testing-library/react';
|
||||
import { InputPrompt, InputPromptProps } from './InputPrompt.js';
|
||||
import type { TextBuffer } from './shared/text-buffer.js';
|
||||
import { Config } from '@qwen-code/qwen-code-core';
|
||||
@@ -1226,11 +1227,12 @@ describe('InputPrompt', () => {
|
||||
stdin.write('\x12');
|
||||
await wait();
|
||||
stdin.write('\x1B');
|
||||
await wait();
|
||||
|
||||
const frame = stdout.lastFrame();
|
||||
expect(frame).not.toContain('(r:)');
|
||||
expect(frame).not.toContain('echo hello');
|
||||
await waitFor(() => {
|
||||
expect(stdout.lastFrame()).not.toContain('(r:)');
|
||||
});
|
||||
|
||||
expect(stdout.lastFrame()).not.toContain('echo hello');
|
||||
|
||||
unmount();
|
||||
});
|
||||
@@ -1240,9 +1242,11 @@ describe('InputPrompt', () => {
|
||||
stdin.write('\x12');
|
||||
await wait();
|
||||
stdin.write('\t');
|
||||
await wait();
|
||||
|
||||
expect(stdout.lastFrame()).not.toContain('(r:)');
|
||||
await waitFor(() => {
|
||||
expect(stdout.lastFrame()).not.toContain('(r:)');
|
||||
});
|
||||
|
||||
expect(props.buffer.setText).toHaveBeenCalledWith('echo hello');
|
||||
unmount();
|
||||
});
|
||||
@@ -1253,9 +1257,11 @@ describe('InputPrompt', () => {
|
||||
await wait();
|
||||
expect(stdout.lastFrame()).toContain('(r:)');
|
||||
stdin.write('\r');
|
||||
await wait();
|
||||
|
||||
expect(stdout.lastFrame()).not.toContain('(r:)');
|
||||
await waitFor(() => {
|
||||
expect(stdout.lastFrame()).not.toContain('(r:)');
|
||||
});
|
||||
|
||||
expect(props.onSubmit).toHaveBeenCalledWith('echo hello');
|
||||
unmount();
|
||||
});
|
||||
@@ -1268,9 +1274,10 @@ describe('InputPrompt', () => {
|
||||
await wait();
|
||||
expect(stdout.lastFrame()).toContain('(r:)');
|
||||
stdin.write('\x1B');
|
||||
await wait();
|
||||
|
||||
expect(stdout.lastFrame()).not.toContain('(r:)');
|
||||
await waitFor(() => {
|
||||
expect(stdout.lastFrame()).not.toContain('(r:)');
|
||||
});
|
||||
expect(props.buffer.text).toBe('initial text');
|
||||
expect(props.buffer.cursor).toEqual([0, 3]);
|
||||
|
||||
|
||||
546
packages/cli/src/ui/components/QwenOAuthProgress.test.tsx
Normal file
546
packages/cli/src/ui/components/QwenOAuthProgress.test.tsx
Normal file
@@ -0,0 +1,546 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
// React import not needed for test files
|
||||
import { render } from 'ink-testing-library';
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import { QwenOAuthProgress } from './QwenOAuthProgress.js';
|
||||
import { DeviceAuthorizationInfo } from '../hooks/useQwenAuth.js';
|
||||
|
||||
// Mock qrcode-terminal module
|
||||
vi.mock('qrcode-terminal', () => ({
|
||||
default: {
|
||||
generate: vi.fn(),
|
||||
},
|
||||
}));
|
||||
|
||||
// Mock ink-spinner
|
||||
vi.mock('ink-spinner', () => ({
|
||||
default: ({ type }: { type: string }) => `MockSpinner(${type})`,
|
||||
}));
|
||||
|
||||
// Mock ink-link
|
||||
vi.mock('ink-link', () => ({
|
||||
default: ({ children }: { children: React.ReactNode; url: string }) =>
|
||||
children,
|
||||
}));
|
||||
|
||||
describe('QwenOAuthProgress', () => {
|
||||
const mockOnTimeout = vi.fn();
|
||||
const mockOnCancel = vi.fn();
|
||||
|
||||
const createMockDeviceAuth = (
|
||||
overrides: Partial<DeviceAuthorizationInfo> = {},
|
||||
): DeviceAuthorizationInfo => ({
|
||||
verification_uri: 'https://example.com/device',
|
||||
verification_uri_complete: 'https://example.com/device?user_code=ABC123',
|
||||
user_code: 'ABC123',
|
||||
expires_in: 300,
|
||||
...overrides,
|
||||
});
|
||||
|
||||
const mockDeviceAuth = createMockDeviceAuth();
|
||||
|
||||
const renderComponent = (
|
||||
props: Partial<{
|
||||
deviceAuth: DeviceAuthorizationInfo;
|
||||
authStatus:
|
||||
| 'idle'
|
||||
| 'polling'
|
||||
| 'success'
|
||||
| 'error'
|
||||
| 'timeout'
|
||||
| 'rate_limit';
|
||||
authMessage: string | null;
|
||||
}> = {},
|
||||
) =>
|
||||
render(
|
||||
<QwenOAuthProgress
|
||||
onTimeout={mockOnTimeout}
|
||||
onCancel={mockOnCancel}
|
||||
{...props}
|
||||
/>,
|
||||
);
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
vi.useFakeTimers();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.useRealTimers();
|
||||
});
|
||||
|
||||
describe('Loading state (no deviceAuth)', () => {
|
||||
it('should render loading state when deviceAuth is not provided', () => {
|
||||
const { lastFrame } = renderComponent();
|
||||
|
||||
const output = lastFrame();
|
||||
expect(output).toContain('MockSpinner(dots)');
|
||||
expect(output).toContain('Waiting for Qwen OAuth authentication...');
|
||||
expect(output).toContain('(Press ESC to cancel)');
|
||||
});
|
||||
|
||||
it('should render loading state with gray border', () => {
|
||||
const { lastFrame } = renderComponent();
|
||||
const output = lastFrame();
|
||||
|
||||
// Should not contain auth flow elements
|
||||
expect(output).not.toContain('Qwen OAuth Authentication');
|
||||
expect(output).not.toContain('Please visit this URL to authorize:');
|
||||
// Loading state still shows time remaining with default timeout
|
||||
expect(output).toContain('Time remaining:');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Authenticated state (with deviceAuth)', () => {
|
||||
it('should render authentication flow when deviceAuth is provided', () => {
|
||||
const { lastFrame } = renderComponent({ deviceAuth: mockDeviceAuth });
|
||||
|
||||
const output = lastFrame();
|
||||
// Initially no QR code shown until it's generated, but the status area should be visible
|
||||
expect(output).toContain('MockSpinner(dots)');
|
||||
expect(output).toContain('Waiting for authorization');
|
||||
expect(output).toContain('Time remaining: 5:00');
|
||||
expect(output).toContain('(Press ESC to cancel)');
|
||||
});
|
||||
|
||||
it('should display correct URL in Static component when QR code is generated', async () => {
|
||||
const qrcode = await import('qrcode-terminal');
|
||||
const mockGenerate = vi.mocked(qrcode.default.generate);
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
let qrCallback: any = null;
|
||||
mockGenerate.mockImplementation((url, options, callback) => {
|
||||
qrCallback = callback;
|
||||
});
|
||||
|
||||
const customAuth = createMockDeviceAuth({
|
||||
verification_uri_complete: 'https://custom.com/auth?code=XYZ789',
|
||||
});
|
||||
|
||||
const { lastFrame, rerender } = renderComponent({
|
||||
deviceAuth: customAuth,
|
||||
});
|
||||
|
||||
// Manually trigger the QR code callback
|
||||
if (qrCallback && typeof qrCallback === 'function') {
|
||||
qrCallback('Mock QR Code Data');
|
||||
}
|
||||
|
||||
rerender(
|
||||
<QwenOAuthProgress
|
||||
onTimeout={mockOnTimeout}
|
||||
onCancel={mockOnCancel}
|
||||
deviceAuth={customAuth}
|
||||
/>,
|
||||
);
|
||||
|
||||
expect(lastFrame()).toContain('https://custom.com/auth?code=XYZ789');
|
||||
});
|
||||
|
||||
it('should format time correctly', () => {
|
||||
const deviceAuthWithCustomTime: DeviceAuthorizationInfo = {
|
||||
...mockDeviceAuth,
|
||||
expires_in: 125, // 2 minutes and 5 seconds
|
||||
};
|
||||
|
||||
const { lastFrame } = render(
|
||||
<QwenOAuthProgress
|
||||
onTimeout={mockOnTimeout}
|
||||
onCancel={mockOnCancel}
|
||||
deviceAuth={deviceAuthWithCustomTime}
|
||||
/>,
|
||||
);
|
||||
|
||||
const output = lastFrame();
|
||||
expect(output).toContain('Time remaining: 2:05');
|
||||
});
|
||||
|
||||
it('should format single digit seconds with leading zero', () => {
|
||||
const deviceAuthWithCustomTime: DeviceAuthorizationInfo = {
|
||||
...mockDeviceAuth,
|
||||
expires_in: 67, // 1 minute and 7 seconds
|
||||
};
|
||||
|
||||
const { lastFrame } = render(
|
||||
<QwenOAuthProgress
|
||||
onTimeout={mockOnTimeout}
|
||||
onCancel={mockOnCancel}
|
||||
deviceAuth={deviceAuthWithCustomTime}
|
||||
/>,
|
||||
);
|
||||
|
||||
const output = lastFrame();
|
||||
expect(output).toContain('Time remaining: 1:07');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Timer functionality', () => {
|
||||
it('should countdown and call onTimeout when timer expires', async () => {
|
||||
const deviceAuthWithShortTime: DeviceAuthorizationInfo = {
|
||||
...mockDeviceAuth,
|
||||
expires_in: 2, // 2 seconds
|
||||
};
|
||||
|
||||
const { rerender } = render(
|
||||
<QwenOAuthProgress
|
||||
onTimeout={mockOnTimeout}
|
||||
onCancel={mockOnCancel}
|
||||
deviceAuth={deviceAuthWithShortTime}
|
||||
/>,
|
||||
);
|
||||
|
||||
// Advance timer by 1 second
|
||||
vi.advanceTimersByTime(1000);
|
||||
rerender(
|
||||
<QwenOAuthProgress
|
||||
onTimeout={mockOnTimeout}
|
||||
onCancel={mockOnCancel}
|
||||
deviceAuth={deviceAuthWithShortTime}
|
||||
/>,
|
||||
);
|
||||
|
||||
// Advance timer by another second to trigger timeout
|
||||
vi.advanceTimersByTime(1000);
|
||||
rerender(
|
||||
<QwenOAuthProgress
|
||||
onTimeout={mockOnTimeout}
|
||||
onCancel={mockOnCancel}
|
||||
deviceAuth={deviceAuthWithShortTime}
|
||||
/>,
|
||||
);
|
||||
|
||||
expect(mockOnTimeout).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('should update time remaining display', async () => {
|
||||
const { lastFrame, rerender } = render(
|
||||
<QwenOAuthProgress
|
||||
onTimeout={mockOnTimeout}
|
||||
onCancel={mockOnCancel}
|
||||
deviceAuth={mockDeviceAuth}
|
||||
/>,
|
||||
);
|
||||
|
||||
// Initial time should be 5:00
|
||||
expect(lastFrame()).toContain('Time remaining: 5:00');
|
||||
|
||||
// Advance by 1 second
|
||||
vi.advanceTimersByTime(1000);
|
||||
rerender(
|
||||
<QwenOAuthProgress
|
||||
onTimeout={mockOnTimeout}
|
||||
onCancel={mockOnCancel}
|
||||
deviceAuth={mockDeviceAuth}
|
||||
/>,
|
||||
);
|
||||
|
||||
// Should now show 4:59
|
||||
expect(lastFrame()).toContain('Time remaining: 4:59');
|
||||
});
|
||||
|
||||
it('should use default 300 second timeout when deviceAuth is null', () => {
|
||||
const { lastFrame } = render(
|
||||
<QwenOAuthProgress onTimeout={mockOnTimeout} onCancel={mockOnCancel} />,
|
||||
);
|
||||
|
||||
// Should show default 5:00 (300 seconds) timeout
|
||||
expect(lastFrame()).toContain('Time remaining: 5:00');
|
||||
|
||||
// The timer functionality is already tested in other tests,
|
||||
// this test mainly verifies the default timeout value is used
|
||||
});
|
||||
});
|
||||
|
||||
describe('Animated dots', () => {
|
||||
it('should cycle through animated dots', async () => {
|
||||
const { lastFrame, rerender } = render(
|
||||
<QwenOAuthProgress
|
||||
onTimeout={mockOnTimeout}
|
||||
onCancel={mockOnCancel}
|
||||
deviceAuth={mockDeviceAuth}
|
||||
/>,
|
||||
);
|
||||
|
||||
// Initial state should have no dots
|
||||
expect(lastFrame()).toContain('Waiting for authorization');
|
||||
|
||||
// Advance by 500ms to add first dot
|
||||
vi.advanceTimersByTime(500);
|
||||
rerender(
|
||||
<QwenOAuthProgress
|
||||
onTimeout={mockOnTimeout}
|
||||
onCancel={mockOnCancel}
|
||||
deviceAuth={mockDeviceAuth}
|
||||
/>,
|
||||
);
|
||||
expect(lastFrame()).toContain('Waiting for authorization.');
|
||||
|
||||
// Advance by another 500ms to add second dot
|
||||
vi.advanceTimersByTime(500);
|
||||
rerender(
|
||||
<QwenOAuthProgress
|
||||
onTimeout={mockOnTimeout}
|
||||
onCancel={mockOnCancel}
|
||||
deviceAuth={mockDeviceAuth}
|
||||
/>,
|
||||
);
|
||||
expect(lastFrame()).toContain('Waiting for authorization..');
|
||||
|
||||
// Advance by another 500ms to add third dot
|
||||
vi.advanceTimersByTime(500);
|
||||
rerender(
|
||||
<QwenOAuthProgress
|
||||
onTimeout={mockOnTimeout}
|
||||
onCancel={mockOnCancel}
|
||||
deviceAuth={mockDeviceAuth}
|
||||
/>,
|
||||
);
|
||||
expect(lastFrame()).toContain('Waiting for authorization...');
|
||||
|
||||
// Advance by another 500ms to reset dots
|
||||
vi.advanceTimersByTime(500);
|
||||
rerender(
|
||||
<QwenOAuthProgress
|
||||
onTimeout={mockOnTimeout}
|
||||
onCancel={mockOnCancel}
|
||||
deviceAuth={mockDeviceAuth}
|
||||
/>,
|
||||
);
|
||||
expect(lastFrame()).toContain('Waiting for authorization');
|
||||
});
|
||||
});
|
||||
|
||||
describe('QR Code functionality', () => {
|
||||
it('should generate QR code when deviceAuth is provided', async () => {
|
||||
const qrcode = await import('qrcode-terminal');
|
||||
const mockGenerate = vi.mocked(qrcode.default.generate);
|
||||
|
||||
mockGenerate.mockImplementation((url, options, callback) => {
|
||||
callback!('Mock QR Code Data');
|
||||
});
|
||||
|
||||
render(
|
||||
<QwenOAuthProgress
|
||||
onTimeout={mockOnTimeout}
|
||||
onCancel={mockOnCancel}
|
||||
deviceAuth={mockDeviceAuth}
|
||||
/>,
|
||||
);
|
||||
|
||||
expect(mockGenerate).toHaveBeenCalledWith(
|
||||
mockDeviceAuth.verification_uri_complete,
|
||||
{ small: true },
|
||||
expect.any(Function),
|
||||
);
|
||||
});
|
||||
|
||||
it('should display QR code in Static component when available', async () => {
|
||||
const qrcode = await import('qrcode-terminal');
|
||||
const mockGenerate = vi.mocked(qrcode.default.generate);
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
let qrCallback: any = null;
|
||||
mockGenerate.mockImplementation((url, options, callback) => {
|
||||
qrCallback = callback;
|
||||
});
|
||||
|
||||
const { lastFrame, rerender } = render(
|
||||
<QwenOAuthProgress
|
||||
onTimeout={mockOnTimeout}
|
||||
onCancel={mockOnCancel}
|
||||
deviceAuth={mockDeviceAuth}
|
||||
/>,
|
||||
);
|
||||
|
||||
// Manually trigger the QR code callback
|
||||
if (qrCallback && typeof qrCallback === 'function') {
|
||||
qrCallback('Mock QR Code Data');
|
||||
}
|
||||
|
||||
rerender(
|
||||
<QwenOAuthProgress
|
||||
onTimeout={mockOnTimeout}
|
||||
onCancel={mockOnCancel}
|
||||
deviceAuth={mockDeviceAuth}
|
||||
/>,
|
||||
);
|
||||
|
||||
const output = lastFrame();
|
||||
expect(output).toContain('Or scan the QR code below:');
|
||||
expect(output).toContain('Mock QR Code Data');
|
||||
});
|
||||
|
||||
it('should handle QR code generation errors gracefully', async () => {
|
||||
const qrcode = await import('qrcode-terminal');
|
||||
const mockGenerate = vi.mocked(qrcode.default.generate);
|
||||
const consoleErrorSpy = vi
|
||||
.spyOn(console, 'error')
|
||||
.mockImplementation(() => {});
|
||||
|
||||
mockGenerate.mockImplementation(() => {
|
||||
throw new Error('QR Code generation failed');
|
||||
});
|
||||
|
||||
const { lastFrame } = render(
|
||||
<QwenOAuthProgress
|
||||
onTimeout={mockOnTimeout}
|
||||
onCancel={mockOnCancel}
|
||||
deviceAuth={mockDeviceAuth}
|
||||
/>,
|
||||
);
|
||||
|
||||
// Should not crash and should not show QR code section since QR generation failed
|
||||
const output = lastFrame();
|
||||
expect(output).not.toContain('Or scan the QR code below:');
|
||||
expect(consoleErrorSpy).toHaveBeenCalledWith(
|
||||
'Failed to generate QR code:',
|
||||
expect.any(Error),
|
||||
);
|
||||
|
||||
consoleErrorSpy.mockRestore();
|
||||
});
|
||||
|
||||
it('should not generate QR code when deviceAuth is null', async () => {
|
||||
const qrcode = await import('qrcode-terminal');
|
||||
const mockGenerate = vi.mocked(qrcode.default.generate);
|
||||
|
||||
render(
|
||||
<QwenOAuthProgress onTimeout={mockOnTimeout} onCancel={mockOnCancel} />,
|
||||
);
|
||||
|
||||
expect(mockGenerate).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe('User interactions', () => {
|
||||
it('should call onCancel when ESC key is pressed', () => {
|
||||
const { stdin } = render(
|
||||
<QwenOAuthProgress
|
||||
onTimeout={mockOnTimeout}
|
||||
onCancel={mockOnCancel}
|
||||
deviceAuth={mockDeviceAuth}
|
||||
/>,
|
||||
);
|
||||
|
||||
// Simulate ESC key press
|
||||
stdin.write('\u001b'); // ESC character
|
||||
|
||||
expect(mockOnCancel).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('should call onCancel when ESC is pressed in loading state', () => {
|
||||
const { stdin } = render(
|
||||
<QwenOAuthProgress onTimeout={mockOnTimeout} onCancel={mockOnCancel} />,
|
||||
);
|
||||
|
||||
// Simulate ESC key press
|
||||
stdin.write('\u001b'); // ESC character
|
||||
|
||||
expect(mockOnCancel).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('should not call onCancel for other key presses', () => {
|
||||
const { stdin } = render(
|
||||
<QwenOAuthProgress
|
||||
onTimeout={mockOnTimeout}
|
||||
onCancel={mockOnCancel}
|
||||
deviceAuth={mockDeviceAuth}
|
||||
/>,
|
||||
);
|
||||
|
||||
// Simulate other key presses
|
||||
stdin.write('a');
|
||||
stdin.write('\r'); // Enter
|
||||
stdin.write(' '); // Space
|
||||
|
||||
expect(mockOnCancel).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Props changes', () => {
|
||||
it('should display initial timer value from deviceAuth', () => {
|
||||
const deviceAuthWith10Min: DeviceAuthorizationInfo = {
|
||||
...mockDeviceAuth,
|
||||
expires_in: 600, // 10 minutes
|
||||
};
|
||||
|
||||
const { lastFrame } = render(
|
||||
<QwenOAuthProgress
|
||||
onTimeout={mockOnTimeout}
|
||||
onCancel={mockOnCancel}
|
||||
deviceAuth={deviceAuthWith10Min}
|
||||
/>,
|
||||
);
|
||||
|
||||
expect(lastFrame()).toContain('Time remaining: 10:00');
|
||||
});
|
||||
|
||||
it('should reset to loading state when deviceAuth becomes null', () => {
|
||||
const { rerender, lastFrame } = render(
|
||||
<QwenOAuthProgress
|
||||
onTimeout={mockOnTimeout}
|
||||
onCancel={mockOnCancel}
|
||||
deviceAuth={mockDeviceAuth}
|
||||
/>,
|
||||
);
|
||||
|
||||
// Initially shows waiting for authorization
|
||||
expect(lastFrame()).toContain('Waiting for authorization');
|
||||
|
||||
rerender(
|
||||
<QwenOAuthProgress onTimeout={mockOnTimeout} onCancel={mockOnCancel} />,
|
||||
);
|
||||
|
||||
expect(lastFrame()).toContain('Waiting for Qwen OAuth authentication...');
|
||||
expect(lastFrame()).not.toContain('Waiting for authorization');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Timeout state', () => {
|
||||
it('should render timeout state when authStatus is timeout', () => {
|
||||
const { lastFrame } = renderComponent({
|
||||
authStatus: 'timeout',
|
||||
authMessage: 'Custom timeout message',
|
||||
});
|
||||
|
||||
const output = lastFrame();
|
||||
expect(output).toContain('Qwen OAuth Authentication Timeout');
|
||||
expect(output).toContain('Custom timeout message');
|
||||
expect(output).toContain(
|
||||
'Press any key to return to authentication type selection.',
|
||||
);
|
||||
});
|
||||
|
||||
it('should render default timeout message when no authMessage provided', () => {
|
||||
const { lastFrame } = renderComponent({
|
||||
authStatus: 'timeout',
|
||||
});
|
||||
|
||||
const output = lastFrame();
|
||||
expect(output).toContain('Qwen OAuth Authentication Timeout');
|
||||
expect(output).toContain(
|
||||
'OAuth token expired (over 300 seconds). Please select authentication method again.',
|
||||
);
|
||||
});
|
||||
|
||||
it('should call onCancel for any key press in timeout state', () => {
|
||||
const { stdin } = renderComponent({
|
||||
authStatus: 'timeout',
|
||||
});
|
||||
|
||||
// Simulate any key press
|
||||
stdin.write('a');
|
||||
expect(mockOnCancel).toHaveBeenCalledTimes(1);
|
||||
|
||||
// Reset mock and try enter key
|
||||
mockOnCancel.mockClear();
|
||||
stdin.write('\r');
|
||||
expect(mockOnCancel).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
});
|
||||
});
|
||||
267
packages/cli/src/ui/components/QwenOAuthProgress.tsx
Normal file
267
packages/cli/src/ui/components/QwenOAuthProgress.tsx
Normal file
@@ -0,0 +1,267 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import React, { useState, useEffect, useMemo } from 'react';
|
||||
import { Box, Text, useInput } from 'ink';
|
||||
import Spinner from 'ink-spinner';
|
||||
import Link from 'ink-link';
|
||||
import qrcode from 'qrcode-terminal';
|
||||
import { Colors } from '../colors.js';
|
||||
import { DeviceAuthorizationInfo } from '../hooks/useQwenAuth.js';
|
||||
|
||||
interface QwenOAuthProgressProps {
|
||||
onTimeout: () => void;
|
||||
onCancel: () => void;
|
||||
deviceAuth?: DeviceAuthorizationInfo;
|
||||
authStatus?:
|
||||
| 'idle'
|
||||
| 'polling'
|
||||
| 'success'
|
||||
| 'error'
|
||||
| 'timeout'
|
||||
| 'rate_limit';
|
||||
authMessage?: string | null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Static QR Code Display Component
|
||||
* Renders the QR code and URL once and doesn't re-render unless the URL changes
|
||||
*/
|
||||
function QrCodeDisplay({
|
||||
verificationUrl,
|
||||
qrCodeData,
|
||||
}: {
|
||||
verificationUrl: string;
|
||||
qrCodeData: string | null;
|
||||
}): React.JSX.Element | null {
|
||||
if (!qrCodeData) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<Box
|
||||
borderStyle="round"
|
||||
borderColor={Colors.AccentBlue}
|
||||
flexDirection="column"
|
||||
padding={1}
|
||||
width="100%"
|
||||
>
|
||||
<Text bold color={Colors.AccentBlue}>
|
||||
Qwen OAuth Authentication
|
||||
</Text>
|
||||
|
||||
<Box marginTop={1}>
|
||||
<Text>Please visit this URL to authorize:</Text>
|
||||
</Box>
|
||||
|
||||
<Link url={verificationUrl} fallback={false}>
|
||||
<Text color={Colors.AccentGreen} bold>
|
||||
{verificationUrl}
|
||||
</Text>
|
||||
</Link>
|
||||
|
||||
<Box marginTop={1}>
|
||||
<Text>Or scan the QR code below:</Text>
|
||||
</Box>
|
||||
|
||||
<Box marginTop={1}>
|
||||
<Text>{qrCodeData}</Text>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Dynamic Status Display Component
|
||||
* Shows the loading spinner, timer, and status messages
|
||||
*/
|
||||
function StatusDisplay({
|
||||
timeRemaining,
|
||||
dots,
|
||||
}: {
|
||||
timeRemaining: number;
|
||||
dots: string;
|
||||
}): React.JSX.Element {
|
||||
const formatTime = (seconds: number): string => {
|
||||
const minutes = Math.floor(seconds / 60);
|
||||
const remainingSeconds = seconds % 60;
|
||||
return `${minutes}:${remainingSeconds.toString().padStart(2, '0')}`;
|
||||
};
|
||||
|
||||
return (
|
||||
<Box
|
||||
borderStyle="round"
|
||||
borderColor={Colors.AccentBlue}
|
||||
flexDirection="column"
|
||||
padding={1}
|
||||
width="100%"
|
||||
>
|
||||
<Box marginTop={1}>
|
||||
<Text>
|
||||
<Spinner type="dots" /> Waiting for authorization{dots}
|
||||
</Text>
|
||||
</Box>
|
||||
|
||||
<Box marginTop={1} justifyContent="space-between">
|
||||
<Text color={Colors.Gray}>
|
||||
Time remaining: {formatTime(timeRemaining)}
|
||||
</Text>
|
||||
<Text color={Colors.AccentPurple}>(Press ESC to cancel)</Text>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
export function QwenOAuthProgress({
|
||||
onTimeout,
|
||||
onCancel,
|
||||
deviceAuth,
|
||||
authStatus,
|
||||
authMessage,
|
||||
}: QwenOAuthProgressProps): React.JSX.Element {
|
||||
const defaultTimeout = deviceAuth?.expires_in || 300; // Default 5 minutes
|
||||
const [timeRemaining, setTimeRemaining] = useState<number>(defaultTimeout);
|
||||
const [dots, setDots] = useState<string>('');
|
||||
const [qrCodeData, setQrCodeData] = useState<string | null>(null);
|
||||
|
||||
useInput((input, key) => {
|
||||
if (authStatus === 'timeout') {
|
||||
// Any key press in timeout state should trigger cancel to return to auth dialog
|
||||
onCancel();
|
||||
} else if (key.escape) {
|
||||
onCancel();
|
||||
}
|
||||
});
|
||||
|
||||
// Generate QR code once when device auth is available
|
||||
useEffect(() => {
|
||||
if (!deviceAuth?.verification_uri_complete) {
|
||||
return;
|
||||
}
|
||||
|
||||
const generateQR = () => {
|
||||
try {
|
||||
qrcode.generate(
|
||||
deviceAuth.verification_uri_complete,
|
||||
{ small: true },
|
||||
(qrcode: string) => {
|
||||
setQrCodeData(qrcode);
|
||||
},
|
||||
);
|
||||
} catch (error) {
|
||||
console.error('Failed to generate QR code:', error);
|
||||
setQrCodeData(null);
|
||||
}
|
||||
};
|
||||
|
||||
generateQR();
|
||||
}, [deviceAuth?.verification_uri_complete]);
|
||||
|
||||
// Countdown timer
|
||||
useEffect(() => {
|
||||
const timer = setInterval(() => {
|
||||
setTimeRemaining((prev) => {
|
||||
if (prev <= 1) {
|
||||
onTimeout();
|
||||
return 0;
|
||||
}
|
||||
return prev - 1;
|
||||
});
|
||||
}, 1000);
|
||||
|
||||
return () => clearInterval(timer);
|
||||
}, [onTimeout]);
|
||||
|
||||
// Animated dots
|
||||
useEffect(() => {
|
||||
const dotsTimer = setInterval(() => {
|
||||
setDots((prev) => {
|
||||
if (prev.length >= 3) return '';
|
||||
return prev + '.';
|
||||
});
|
||||
}, 500);
|
||||
|
||||
return () => clearInterval(dotsTimer);
|
||||
}, []);
|
||||
|
||||
// Memoize the QR code display to prevent unnecessary re-renders
|
||||
const qrCodeDisplay = useMemo(() => {
|
||||
if (!deviceAuth?.verification_uri_complete) return null;
|
||||
|
||||
return (
|
||||
<QrCodeDisplay
|
||||
verificationUrl={deviceAuth.verification_uri_complete}
|
||||
qrCodeData={qrCodeData}
|
||||
/>
|
||||
);
|
||||
}, [deviceAuth?.verification_uri_complete, qrCodeData]);
|
||||
|
||||
// Handle timeout state
|
||||
if (authStatus === 'timeout') {
|
||||
return (
|
||||
<Box
|
||||
borderStyle="round"
|
||||
borderColor={Colors.AccentRed}
|
||||
flexDirection="column"
|
||||
padding={1}
|
||||
width="100%"
|
||||
>
|
||||
<Text bold color={Colors.AccentRed}>
|
||||
Qwen OAuth Authentication Timeout
|
||||
</Text>
|
||||
|
||||
<Box marginTop={1}>
|
||||
<Text>
|
||||
{authMessage ||
|
||||
`OAuth token expired (over ${defaultTimeout} seconds). Please select authentication method again.`}
|
||||
</Text>
|
||||
</Box>
|
||||
|
||||
<Box marginTop={1}>
|
||||
<Text color={Colors.Gray}>
|
||||
Press any key to return to authentication type selection.
|
||||
</Text>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
// Show loading state when no device auth is available yet
|
||||
if (!deviceAuth) {
|
||||
return (
|
||||
<Box
|
||||
borderStyle="round"
|
||||
borderColor={Colors.Gray}
|
||||
flexDirection="column"
|
||||
padding={1}
|
||||
width="100%"
|
||||
>
|
||||
<Box>
|
||||
<Text>
|
||||
<Spinner type="dots" /> Waiting for Qwen OAuth authentication...
|
||||
</Text>
|
||||
</Box>
|
||||
<Box marginTop={1} justifyContent="space-between">
|
||||
<Text color={Colors.Gray}>
|
||||
Time remaining: {Math.floor(timeRemaining / 60)}:
|
||||
{(timeRemaining % 60).toString().padStart(2, '0')}
|
||||
</Text>
|
||||
<Text color={Colors.AccentPurple}>(Press ESC to cancel)</Text>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<Box flexDirection="column" width="100%">
|
||||
{/* Static QR Code Display */}
|
||||
{qrCodeDisplay}
|
||||
|
||||
{/* Dynamic Status Display */}
|
||||
<StatusDisplay timeRemaining={timeRemaining} dots={dots} />
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
@@ -51,6 +51,7 @@ export const useSlashCommandProcessor = (
|
||||
openPrivacyNotice: () => void,
|
||||
toggleVimEnabled: () => Promise<boolean>,
|
||||
setIsProcessing: (isProcessing: boolean) => void,
|
||||
setGeminiMdFileCount: (count: number) => void,
|
||||
) => {
|
||||
const session = useSessionStats();
|
||||
const [commands, setCommands] = useState<readonly SlashCommand[]>([]);
|
||||
@@ -163,6 +164,7 @@ export const useSlashCommandProcessor = (
|
||||
setPendingItem: setPendingCompressionItem,
|
||||
toggleCorgiMode,
|
||||
toggleVimEnabled,
|
||||
setGeminiMdFileCount,
|
||||
},
|
||||
session: {
|
||||
stats: session.stats,
|
||||
@@ -187,6 +189,7 @@ export const useSlashCommandProcessor = (
|
||||
toggleCorgiMode,
|
||||
toggleVimEnabled,
|
||||
sessionShellAllowlist,
|
||||
setGeminiMdFileCount,
|
||||
],
|
||||
);
|
||||
|
||||
|
||||
380
packages/cli/src/ui/hooks/useAtCompletion.test.ts
Normal file
380
packages/cli/src/ui/hooks/useAtCompletion.test.ts
Normal file
@@ -0,0 +1,380 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/** @vitest-environment jsdom */
|
||||
|
||||
import { describe, it, expect, beforeEach, vi, afterEach } from 'vitest';
|
||||
import { renderHook, waitFor, act } from '@testing-library/react';
|
||||
import { useAtCompletion } from './useAtCompletion.js';
|
||||
import { Config, FileSearch } from '@qwen-code/qwen-code-core';
|
||||
import {
|
||||
createTmpDir,
|
||||
cleanupTmpDir,
|
||||
FileSystemStructure,
|
||||
} from '@qwen-code/qwen-code-test-utils';
|
||||
import { useState } from 'react';
|
||||
import { Suggestion } from '../components/SuggestionsDisplay.js';
|
||||
|
||||
// Test harness to capture the state from the hook's callbacks.
|
||||
function useTestHarnessForAtCompletion(
|
||||
enabled: boolean,
|
||||
pattern: string,
|
||||
config: Config | undefined,
|
||||
cwd: string,
|
||||
) {
|
||||
const [suggestions, setSuggestions] = useState<Suggestion[]>([]);
|
||||
const [isLoadingSuggestions, setIsLoadingSuggestions] = useState(false);
|
||||
|
||||
useAtCompletion({
|
||||
enabled,
|
||||
pattern,
|
||||
config,
|
||||
cwd,
|
||||
setSuggestions,
|
||||
setIsLoadingSuggestions,
|
||||
});
|
||||
|
||||
return { suggestions, isLoadingSuggestions };
|
||||
}
|
||||
|
||||
describe('useAtCompletion', () => {
|
||||
let testRootDir: string;
|
||||
let mockConfig: Config;
|
||||
|
||||
beforeEach(() => {
|
||||
mockConfig = {
|
||||
getFileFilteringOptions: vi.fn(() => ({
|
||||
respectGitIgnore: true,
|
||||
respectGeminiIgnore: true,
|
||||
})),
|
||||
} as unknown as Config;
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (testRootDir) {
|
||||
await cleanupTmpDir(testRootDir);
|
||||
}
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
describe('File Search Logic', () => {
|
||||
it('should perform a recursive search for an empty pattern', async () => {
|
||||
const structure: FileSystemStructure = {
|
||||
'file.txt': '',
|
||||
src: {
|
||||
'index.js': '',
|
||||
components: ['Button.tsx', 'Button with spaces.tsx'],
|
||||
},
|
||||
};
|
||||
testRootDir = await createTmpDir(structure);
|
||||
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForAtCompletion(true, '', mockConfig, testRootDir),
|
||||
);
|
||||
|
||||
await waitFor(() => {
|
||||
expect(result.current.suggestions.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
expect(result.current.suggestions.map((s) => s.value)).toEqual([
|
||||
'src/',
|
||||
'src/components/',
|
||||
'file.txt',
|
||||
'src/components/Button\\ with\\ spaces.tsx',
|
||||
'src/components/Button.tsx',
|
||||
'src/index.js',
|
||||
]);
|
||||
});
|
||||
|
||||
it('should correctly filter the recursive list based on a pattern', async () => {
|
||||
const structure: FileSystemStructure = {
|
||||
'file.txt': '',
|
||||
src: {
|
||||
'index.js': '',
|
||||
components: {
|
||||
'Button.tsx': '',
|
||||
},
|
||||
},
|
||||
};
|
||||
testRootDir = await createTmpDir(structure);
|
||||
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForAtCompletion(true, 'src/', mockConfig, testRootDir),
|
||||
);
|
||||
|
||||
await waitFor(() => {
|
||||
expect(result.current.suggestions.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
expect(result.current.suggestions.map((s) => s.value)).toEqual([
|
||||
'src/',
|
||||
'src/components/',
|
||||
'src/components/Button.tsx',
|
||||
'src/index.js',
|
||||
]);
|
||||
});
|
||||
|
||||
it('should append a trailing slash to directory paths in suggestions', async () => {
|
||||
const structure: FileSystemStructure = {
|
||||
'file.txt': '',
|
||||
dir: {},
|
||||
};
|
||||
testRootDir = await createTmpDir(structure);
|
||||
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForAtCompletion(true, '', mockConfig, testRootDir),
|
||||
);
|
||||
|
||||
await waitFor(() => {
|
||||
expect(result.current.suggestions.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
expect(result.current.suggestions.map((s) => s.value)).toEqual([
|
||||
'dir/',
|
||||
'file.txt',
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('UI State and Loading Behavior', () => {
|
||||
it('should be in a loading state during initial file system crawl', async () => {
|
||||
testRootDir = await createTmpDir({});
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForAtCompletion(true, '', mockConfig, testRootDir),
|
||||
);
|
||||
|
||||
// It's initially true because the effect runs synchronously.
|
||||
expect(result.current.isLoadingSuggestions).toBe(true);
|
||||
|
||||
// Wait for the loading to complete.
|
||||
await waitFor(() => {
|
||||
expect(result.current.isLoadingSuggestions).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
it('should NOT show a loading indicator for subsequent searches that complete under 100ms', async () => {
|
||||
const structure: FileSystemStructure = { 'a.txt': '', 'b.txt': '' };
|
||||
testRootDir = await createTmpDir(structure);
|
||||
|
||||
const { result, rerender } = renderHook(
|
||||
({ pattern }) =>
|
||||
useTestHarnessForAtCompletion(true, pattern, mockConfig, testRootDir),
|
||||
{ initialProps: { pattern: 'a' } },
|
||||
);
|
||||
|
||||
await waitFor(() => {
|
||||
expect(result.current.suggestions.map((s) => s.value)).toEqual([
|
||||
'a.txt',
|
||||
]);
|
||||
});
|
||||
expect(result.current.isLoadingSuggestions).toBe(false);
|
||||
|
||||
rerender({ pattern: 'b' });
|
||||
|
||||
// Wait for the final result
|
||||
await waitFor(() => {
|
||||
expect(result.current.suggestions.map((s) => s.value)).toEqual([
|
||||
'b.txt',
|
||||
]);
|
||||
});
|
||||
|
||||
expect(result.current.isLoadingSuggestions).toBe(false);
|
||||
});
|
||||
|
||||
it('should show a loading indicator and clear old suggestions for subsequent searches that take longer than 100ms', async () => {
|
||||
const structure: FileSystemStructure = { 'a.txt': '', 'b.txt': '' };
|
||||
testRootDir = await createTmpDir(structure);
|
||||
|
||||
// Spy on the search method to introduce an artificial delay
|
||||
const originalSearch = FileSearch.prototype.search;
|
||||
vi.spyOn(FileSearch.prototype, 'search').mockImplementation(
|
||||
async function (...args) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 200));
|
||||
return originalSearch.apply(this, args);
|
||||
},
|
||||
);
|
||||
|
||||
const { result, rerender } = renderHook(
|
||||
({ pattern }) =>
|
||||
useTestHarnessForAtCompletion(true, pattern, mockConfig, testRootDir),
|
||||
{ initialProps: { pattern: 'a' } },
|
||||
);
|
||||
|
||||
// Wait for the initial (slow) search to complete
|
||||
await waitFor(() => {
|
||||
expect(result.current.suggestions.map((s) => s.value)).toEqual([
|
||||
'a.txt',
|
||||
]);
|
||||
});
|
||||
|
||||
// Now, rerender to trigger the second search
|
||||
rerender({ pattern: 'b' });
|
||||
|
||||
// Wait for the loading indicator to appear
|
||||
await waitFor(() => {
|
||||
expect(result.current.isLoadingSuggestions).toBe(true);
|
||||
});
|
||||
|
||||
// Suggestions should be cleared while loading
|
||||
expect(result.current.suggestions).toEqual([]);
|
||||
|
||||
// Wait for the final (slow) search to complete
|
||||
await waitFor(
|
||||
() => {
|
||||
expect(result.current.suggestions.map((s) => s.value)).toEqual([
|
||||
'b.txt',
|
||||
]);
|
||||
},
|
||||
{ timeout: 1000 },
|
||||
); // Increase timeout for the slow search
|
||||
|
||||
expect(result.current.isLoadingSuggestions).toBe(false);
|
||||
});
|
||||
|
||||
it('should abort the previous search when a new one starts', async () => {
|
||||
const structure: FileSystemStructure = { 'a.txt': '', 'b.txt': '' };
|
||||
testRootDir = await createTmpDir(structure);
|
||||
|
||||
const abortSpy = vi.spyOn(AbortController.prototype, 'abort');
|
||||
const searchSpy = vi
|
||||
.spyOn(FileSearch.prototype, 'search')
|
||||
.mockImplementation(async (...args) => {
|
||||
const delay = args[0] === 'a' ? 500 : 50;
|
||||
await new Promise((resolve) => setTimeout(resolve, delay));
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
return [args[0] as any];
|
||||
});
|
||||
|
||||
const { result, rerender } = renderHook(
|
||||
({ pattern }) =>
|
||||
useTestHarnessForAtCompletion(true, pattern, mockConfig, testRootDir),
|
||||
{ initialProps: { pattern: 'a' } },
|
||||
);
|
||||
|
||||
// Wait for the hook to be ready (initialization is complete)
|
||||
await waitFor(() => {
|
||||
expect(searchSpy).toHaveBeenCalledWith('a', expect.any(Object));
|
||||
});
|
||||
|
||||
// Now that the first search is in-flight, trigger the second one.
|
||||
act(() => {
|
||||
rerender({ pattern: 'b' });
|
||||
});
|
||||
|
||||
// The abort should have been called for the first search.
|
||||
expect(abortSpy).toHaveBeenCalledTimes(1);
|
||||
|
||||
// Wait for the final result, which should be from the second, faster search.
|
||||
await waitFor(
|
||||
() => {
|
||||
expect(result.current.suggestions.map((s) => s.value)).toEqual(['b']);
|
||||
},
|
||||
{ timeout: 1000 },
|
||||
);
|
||||
|
||||
// The search spy should have been called for both patterns.
|
||||
expect(searchSpy).toHaveBeenCalledWith('b', expect.any(Object));
|
||||
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Filtering and Configuration', () => {
|
||||
it('should respect .gitignore files', async () => {
|
||||
const gitignoreContent = ['dist/', '*.log'].join('\n');
|
||||
const structure: FileSystemStructure = {
|
||||
'.git': {},
|
||||
'.gitignore': gitignoreContent,
|
||||
dist: {},
|
||||
'test.log': '',
|
||||
src: {},
|
||||
};
|
||||
testRootDir = await createTmpDir(structure);
|
||||
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForAtCompletion(true, '', mockConfig, testRootDir),
|
||||
);
|
||||
|
||||
await waitFor(() => {
|
||||
expect(result.current.suggestions.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
expect(result.current.suggestions.map((s) => s.value)).toEqual([
|
||||
'src/',
|
||||
'.gitignore',
|
||||
]);
|
||||
});
|
||||
|
||||
it('should work correctly when config is undefined', async () => {
|
||||
const structure: FileSystemStructure = {
|
||||
node_modules: {},
|
||||
src: {},
|
||||
};
|
||||
testRootDir = await createTmpDir(structure);
|
||||
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForAtCompletion(true, '', undefined, testRootDir),
|
||||
);
|
||||
|
||||
await waitFor(() => {
|
||||
expect(result.current.suggestions.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
expect(result.current.suggestions.map((s) => s.value)).toEqual([
|
||||
'node_modules/',
|
||||
'src/',
|
||||
]);
|
||||
});
|
||||
|
||||
it('should reset and re-initialize when the cwd changes', async () => {
|
||||
const structure1: FileSystemStructure = { 'file1.txt': '' };
|
||||
const rootDir1 = await createTmpDir(structure1);
|
||||
const structure2: FileSystemStructure = { 'file2.txt': '' };
|
||||
const rootDir2 = await createTmpDir(structure2);
|
||||
|
||||
const { result, rerender } = renderHook(
|
||||
({ cwd, pattern }) =>
|
||||
useTestHarnessForAtCompletion(true, pattern, mockConfig, cwd),
|
||||
{
|
||||
initialProps: {
|
||||
cwd: rootDir1,
|
||||
pattern: 'file',
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
// Wait for initial suggestions from the first directory
|
||||
await waitFor(() => {
|
||||
expect(result.current.suggestions.map((s) => s.value)).toEqual([
|
||||
'file1.txt',
|
||||
]);
|
||||
});
|
||||
|
||||
// Change the CWD
|
||||
act(() => {
|
||||
rerender({ cwd: rootDir2, pattern: 'file' });
|
||||
});
|
||||
|
||||
// After CWD changes, suggestions should be cleared and it should load again.
|
||||
await waitFor(() => {
|
||||
expect(result.current.isLoadingSuggestions).toBe(true);
|
||||
expect(result.current.suggestions).toEqual([]);
|
||||
});
|
||||
|
||||
// Wait for the new suggestions from the second directory
|
||||
await waitFor(() => {
|
||||
expect(result.current.suggestions.map((s) => s.value)).toEqual([
|
||||
'file2.txt',
|
||||
]);
|
||||
});
|
||||
expect(result.current.isLoadingSuggestions).toBe(false);
|
||||
|
||||
await cleanupTmpDir(rootDir1);
|
||||
await cleanupTmpDir(rootDir2);
|
||||
});
|
||||
});
|
||||
});
|
||||
235
packages/cli/src/ui/hooks/useAtCompletion.ts
Normal file
235
packages/cli/src/ui/hooks/useAtCompletion.ts
Normal file
@@ -0,0 +1,235 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { useEffect, useReducer, useRef } from 'react';
|
||||
import { Config, FileSearch, escapePath } from '@qwen-code/qwen-code-core';
|
||||
import {
|
||||
Suggestion,
|
||||
MAX_SUGGESTIONS_TO_SHOW,
|
||||
} from '../components/SuggestionsDisplay.js';
|
||||
|
||||
export enum AtCompletionStatus {
|
||||
IDLE = 'idle',
|
||||
INITIALIZING = 'initializing',
|
||||
READY = 'ready',
|
||||
SEARCHING = 'searching',
|
||||
ERROR = 'error',
|
||||
}
|
||||
|
||||
interface AtCompletionState {
|
||||
status: AtCompletionStatus;
|
||||
suggestions: Suggestion[];
|
||||
isLoading: boolean;
|
||||
pattern: string | null;
|
||||
}
|
||||
|
||||
type AtCompletionAction =
|
||||
| { type: 'INITIALIZE' }
|
||||
| { type: 'INITIALIZE_SUCCESS' }
|
||||
| { type: 'SEARCH'; payload: string }
|
||||
| { type: 'SEARCH_SUCCESS'; payload: Suggestion[] }
|
||||
| { type: 'SET_LOADING'; payload: boolean }
|
||||
| { type: 'ERROR' }
|
||||
| { type: 'RESET' };
|
||||
|
||||
const initialState: AtCompletionState = {
|
||||
status: AtCompletionStatus.IDLE,
|
||||
suggestions: [],
|
||||
isLoading: false,
|
||||
pattern: null,
|
||||
};
|
||||
|
||||
function atCompletionReducer(
|
||||
state: AtCompletionState,
|
||||
action: AtCompletionAction,
|
||||
): AtCompletionState {
|
||||
switch (action.type) {
|
||||
case 'INITIALIZE':
|
||||
return {
|
||||
...state,
|
||||
status: AtCompletionStatus.INITIALIZING,
|
||||
isLoading: true,
|
||||
};
|
||||
case 'INITIALIZE_SUCCESS':
|
||||
return { ...state, status: AtCompletionStatus.READY, isLoading: false };
|
||||
case 'SEARCH':
|
||||
// Keep old suggestions, don't set loading immediately
|
||||
return {
|
||||
...state,
|
||||
status: AtCompletionStatus.SEARCHING,
|
||||
pattern: action.payload,
|
||||
};
|
||||
case 'SEARCH_SUCCESS':
|
||||
return {
|
||||
...state,
|
||||
status: AtCompletionStatus.READY,
|
||||
suggestions: action.payload,
|
||||
isLoading: false,
|
||||
};
|
||||
case 'SET_LOADING':
|
||||
// Only show loading if we are still in a searching state
|
||||
if (state.status === AtCompletionStatus.SEARCHING) {
|
||||
return { ...state, isLoading: action.payload, suggestions: [] };
|
||||
}
|
||||
return state;
|
||||
case 'ERROR':
|
||||
return {
|
||||
...state,
|
||||
status: AtCompletionStatus.ERROR,
|
||||
isLoading: false,
|
||||
suggestions: [],
|
||||
};
|
||||
case 'RESET':
|
||||
return initialState;
|
||||
default:
|
||||
return state;
|
||||
}
|
||||
}
|
||||
|
||||
export interface UseAtCompletionProps {
|
||||
enabled: boolean;
|
||||
pattern: string;
|
||||
config: Config | undefined;
|
||||
cwd: string;
|
||||
setSuggestions: (suggestions: Suggestion[]) => void;
|
||||
setIsLoadingSuggestions: (isLoading: boolean) => void;
|
||||
}
|
||||
|
||||
export function useAtCompletion(props: UseAtCompletionProps): void {
|
||||
const {
|
||||
enabled,
|
||||
pattern,
|
||||
config,
|
||||
cwd,
|
||||
setSuggestions,
|
||||
setIsLoadingSuggestions,
|
||||
} = props;
|
||||
const [state, dispatch] = useReducer(atCompletionReducer, initialState);
|
||||
const fileSearch = useRef<FileSearch | null>(null);
|
||||
const searchAbortController = useRef<AbortController | null>(null);
|
||||
const slowSearchTimer = useRef<NodeJS.Timeout | null>(null);
|
||||
|
||||
useEffect(() => {
|
||||
setSuggestions(state.suggestions);
|
||||
}, [state.suggestions, setSuggestions]);
|
||||
|
||||
useEffect(() => {
|
||||
setIsLoadingSuggestions(state.isLoading);
|
||||
}, [state.isLoading, setIsLoadingSuggestions]);
|
||||
|
||||
useEffect(() => {
|
||||
dispatch({ type: 'RESET' });
|
||||
}, [cwd, config]);
|
||||
|
||||
// Reacts to user input (`pattern`) ONLY.
|
||||
useEffect(() => {
|
||||
if (!enabled) {
|
||||
// reset when first getting out of completion suggestions
|
||||
if (
|
||||
state.status === AtCompletionStatus.READY ||
|
||||
state.status === AtCompletionStatus.ERROR
|
||||
) {
|
||||
dispatch({ type: 'RESET' });
|
||||
}
|
||||
return;
|
||||
}
|
||||
if (pattern === null) {
|
||||
dispatch({ type: 'RESET' });
|
||||
return;
|
||||
}
|
||||
|
||||
if (state.status === AtCompletionStatus.IDLE) {
|
||||
dispatch({ type: 'INITIALIZE' });
|
||||
} else if (
|
||||
(state.status === AtCompletionStatus.READY ||
|
||||
state.status === AtCompletionStatus.SEARCHING) &&
|
||||
pattern !== state.pattern // Only search if the pattern has changed
|
||||
) {
|
||||
dispatch({ type: 'SEARCH', payload: pattern });
|
||||
}
|
||||
}, [enabled, pattern, state.status, state.pattern]);
|
||||
|
||||
// The "Worker" that performs async operations based on status.
|
||||
useEffect(() => {
|
||||
const initialize = async () => {
|
||||
try {
|
||||
const searcher = new FileSearch({
|
||||
projectRoot: cwd,
|
||||
ignoreDirs: [],
|
||||
useGitignore:
|
||||
config?.getFileFilteringOptions()?.respectGitIgnore ?? true,
|
||||
useGeminiignore:
|
||||
config?.getFileFilteringOptions()?.respectGeminiIgnore ?? true,
|
||||
cache: true,
|
||||
cacheTtl: 30, // 30 seconds
|
||||
});
|
||||
await searcher.initialize();
|
||||
fileSearch.current = searcher;
|
||||
dispatch({ type: 'INITIALIZE_SUCCESS' });
|
||||
if (state.pattern !== null) {
|
||||
dispatch({ type: 'SEARCH', payload: state.pattern });
|
||||
}
|
||||
} catch (_) {
|
||||
dispatch({ type: 'ERROR' });
|
||||
}
|
||||
};
|
||||
|
||||
const search = async () => {
|
||||
if (!fileSearch.current || state.pattern === null) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (slowSearchTimer.current) {
|
||||
clearTimeout(slowSearchTimer.current);
|
||||
}
|
||||
|
||||
const controller = new AbortController();
|
||||
searchAbortController.current = controller;
|
||||
|
||||
slowSearchTimer.current = setTimeout(() => {
|
||||
dispatch({ type: 'SET_LOADING', payload: true });
|
||||
}, 100);
|
||||
|
||||
try {
|
||||
const results = await fileSearch.current.search(state.pattern, {
|
||||
signal: controller.signal,
|
||||
maxResults: MAX_SUGGESTIONS_TO_SHOW * 3,
|
||||
});
|
||||
|
||||
if (slowSearchTimer.current) {
|
||||
clearTimeout(slowSearchTimer.current);
|
||||
}
|
||||
|
||||
if (controller.signal.aborted) {
|
||||
return;
|
||||
}
|
||||
|
||||
const suggestions = results.map((p) => ({
|
||||
label: p,
|
||||
value: escapePath(p),
|
||||
}));
|
||||
dispatch({ type: 'SEARCH_SUCCESS', payload: suggestions });
|
||||
} catch (error) {
|
||||
if (!(error instanceof Error && error.name === 'AbortError')) {
|
||||
dispatch({ type: 'ERROR' });
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if (state.status === AtCompletionStatus.INITIALIZING) {
|
||||
initialize();
|
||||
} else if (state.status === AtCompletionStatus.SEARCHING) {
|
||||
search();
|
||||
}
|
||||
|
||||
return () => {
|
||||
searchAbortController.current?.abort();
|
||||
if (slowSearchTimer.current) {
|
||||
clearTimeout(slowSearchTimer.current);
|
||||
}
|
||||
};
|
||||
}, [state.status, state.pattern, config, cwd]);
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -4,20 +4,7 @@
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { useEffect, useCallback, useMemo, useRef } from 'react';
|
||||
import * as fs from 'fs/promises';
|
||||
import * as path from 'path';
|
||||
import { glob } from 'glob';
|
||||
import {
|
||||
isNodeError,
|
||||
escapePath,
|
||||
unescapePath,
|
||||
getErrorMessage,
|
||||
Config,
|
||||
FileDiscoveryService,
|
||||
DEFAULT_FILE_FILTERING_OPTIONS,
|
||||
SHELL_SPECIAL_CHARS,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import { useCallback, useMemo, useEffect } from 'react';
|
||||
import { Suggestion } from '../components/SuggestionsDisplay.js';
|
||||
import { CommandContext, SlashCommand } from '../commands/types.js';
|
||||
import {
|
||||
@@ -26,8 +13,17 @@ import {
|
||||
} from '../components/shared/text-buffer.js';
|
||||
import { isSlashCommand } from '../utils/commandUtils.js';
|
||||
import { toCodePoints } from '../utils/textUtils.js';
|
||||
import { useAtCompletion } from './useAtCompletion.js';
|
||||
import { useSlashCompletion } from './useSlashCompletion.js';
|
||||
import { Config } from '@qwen-code/qwen-code-core';
|
||||
import { useCompletion } from './useCompletion.js';
|
||||
|
||||
export enum CompletionMode {
|
||||
IDLE = 'IDLE',
|
||||
AT = 'AT',
|
||||
SLASH = 'SLASH',
|
||||
}
|
||||
|
||||
export interface UseCommandCompletionReturn {
|
||||
suggestions: Suggestion[];
|
||||
activeSuggestionIndex: number;
|
||||
@@ -72,541 +68,109 @@ export function useCommandCompletion(
|
||||
navigateDown,
|
||||
} = useCompletion();
|
||||
|
||||
const completionStart = useRef(-1);
|
||||
const completionEnd = useRef(-1);
|
||||
|
||||
const cursorRow = buffer.cursor[0];
|
||||
const cursorCol = buffer.cursor[1];
|
||||
|
||||
// Check if cursor is after @ or / without unescaped spaces
|
||||
const commandIndex = useMemo(() => {
|
||||
const currentLine = buffer.lines[cursorRow] || '';
|
||||
if (cursorRow === 0 && isSlashCommand(currentLine.trim())) {
|
||||
return currentLine.indexOf('/');
|
||||
}
|
||||
|
||||
// For other completions like '@', we search backwards from the cursor.
|
||||
|
||||
const codePoints = toCodePoints(currentLine);
|
||||
for (let i = cursorCol - 1; i >= 0; i--) {
|
||||
const char = codePoints[i];
|
||||
|
||||
if (char === ' ') {
|
||||
// Check for unescaped spaces.
|
||||
let backslashCount = 0;
|
||||
for (let j = i - 1; j >= 0 && codePoints[j] === '\\'; j--) {
|
||||
backslashCount++;
|
||||
}
|
||||
if (backslashCount % 2 === 0) {
|
||||
return -1; // Inactive on unescaped space.
|
||||
}
|
||||
} else if (char === '@') {
|
||||
// Active if we find an '@' before any unescaped space.
|
||||
return i;
|
||||
const { completionMode, query, completionStart, completionEnd } =
|
||||
useMemo(() => {
|
||||
const currentLine = buffer.lines[cursorRow] || '';
|
||||
if (cursorRow === 0 && isSlashCommand(currentLine.trim())) {
|
||||
return {
|
||||
completionMode: CompletionMode.SLASH,
|
||||
query: currentLine,
|
||||
completionStart: 0,
|
||||
completionEnd: currentLine.length,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return -1;
|
||||
}, [cursorRow, cursorCol, buffer.lines]);
|
||||
const codePoints = toCodePoints(currentLine);
|
||||
for (let i = cursorCol - 1; i >= 0; i--) {
|
||||
const char = codePoints[i];
|
||||
|
||||
if (char === ' ') {
|
||||
let backslashCount = 0;
|
||||
for (let j = i - 1; j >= 0 && codePoints[j] === '\\'; j--) {
|
||||
backslashCount++;
|
||||
}
|
||||
if (backslashCount % 2 === 0) {
|
||||
return {
|
||||
completionMode: CompletionMode.IDLE,
|
||||
query: null,
|
||||
completionStart: -1,
|
||||
completionEnd: -1,
|
||||
};
|
||||
}
|
||||
} else if (char === '@') {
|
||||
let end = codePoints.length;
|
||||
for (let i = cursorCol; i < codePoints.length; i++) {
|
||||
if (codePoints[i] === ' ') {
|
||||
let backslashCount = 0;
|
||||
for (let j = i - 1; j >= 0 && codePoints[j] === '\\'; j--) {
|
||||
backslashCount++;
|
||||
}
|
||||
|
||||
if (backslashCount % 2 === 0) {
|
||||
end = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
const pathStart = i + 1;
|
||||
const partialPath = currentLine.substring(pathStart, end);
|
||||
return {
|
||||
completionMode: CompletionMode.AT,
|
||||
query: partialPath,
|
||||
completionStart: pathStart,
|
||||
completionEnd: end,
|
||||
};
|
||||
}
|
||||
}
|
||||
return {
|
||||
completionMode: CompletionMode.IDLE,
|
||||
query: null,
|
||||
completionStart: -1,
|
||||
completionEnd: -1,
|
||||
};
|
||||
}, [cursorRow, cursorCol, buffer.lines]);
|
||||
|
||||
useAtCompletion({
|
||||
enabled: completionMode === CompletionMode.AT,
|
||||
pattern: query || '',
|
||||
config,
|
||||
cwd,
|
||||
setSuggestions,
|
||||
setIsLoadingSuggestions,
|
||||
});
|
||||
|
||||
const slashCompletionRange = useSlashCompletion({
|
||||
enabled: completionMode === CompletionMode.SLASH,
|
||||
query,
|
||||
slashCommands,
|
||||
commandContext,
|
||||
setSuggestions,
|
||||
setIsLoadingSuggestions,
|
||||
setIsPerfectMatch,
|
||||
});
|
||||
|
||||
useEffect(() => {
|
||||
if (commandIndex === -1 || reverseSearchActive) {
|
||||
setTimeout(resetCompletionState, 0);
|
||||
return;
|
||||
}
|
||||
setActiveSuggestionIndex(suggestions.length > 0 ? 0 : -1);
|
||||
setVisibleStartIndex(0);
|
||||
}, [suggestions, setActiveSuggestionIndex, setVisibleStartIndex]);
|
||||
|
||||
const currentLine = buffer.lines[cursorRow] || '';
|
||||
const codePoints = toCodePoints(currentLine);
|
||||
|
||||
if (codePoints[commandIndex] === '/') {
|
||||
// Always reset perfect match at the beginning of processing.
|
||||
setIsPerfectMatch(false);
|
||||
|
||||
const fullPath = currentLine.substring(commandIndex + 1);
|
||||
const hasTrailingSpace = currentLine.endsWith(' ');
|
||||
|
||||
// Get all non-empty parts of the command.
|
||||
const rawParts = fullPath.split(/\s+/).filter((p) => p);
|
||||
|
||||
let commandPathParts = rawParts;
|
||||
let partial = '';
|
||||
|
||||
// If there's no trailing space, the last part is potentially a partial segment.
|
||||
// We tentatively separate it.
|
||||
if (!hasTrailingSpace && rawParts.length > 0) {
|
||||
partial = rawParts[rawParts.length - 1];
|
||||
commandPathParts = rawParts.slice(0, -1);
|
||||
}
|
||||
|
||||
// Traverse the Command Tree using the tentative completed path
|
||||
let currentLevel: readonly SlashCommand[] | undefined = slashCommands;
|
||||
let leafCommand: SlashCommand | null = null;
|
||||
|
||||
for (const part of commandPathParts) {
|
||||
if (!currentLevel) {
|
||||
leafCommand = null;
|
||||
currentLevel = [];
|
||||
break;
|
||||
}
|
||||
const found: SlashCommand | undefined = currentLevel.find(
|
||||
(cmd) => cmd.name === part || cmd.altNames?.includes(part),
|
||||
);
|
||||
if (found) {
|
||||
leafCommand = found;
|
||||
currentLevel = found.subCommands as
|
||||
| readonly SlashCommand[]
|
||||
| undefined;
|
||||
} else {
|
||||
leafCommand = null;
|
||||
currentLevel = [];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let exactMatchAsParent: SlashCommand | undefined;
|
||||
// Handle the Ambiguous Case
|
||||
if (!hasTrailingSpace && currentLevel) {
|
||||
exactMatchAsParent = currentLevel.find(
|
||||
(cmd) =>
|
||||
(cmd.name === partial || cmd.altNames?.includes(partial)) &&
|
||||
cmd.subCommands,
|
||||
);
|
||||
|
||||
if (exactMatchAsParent) {
|
||||
// It's a perfect match for a parent command. Override our initial guess.
|
||||
// Treat it as a completed command path.
|
||||
leafCommand = exactMatchAsParent;
|
||||
currentLevel = exactMatchAsParent.subCommands;
|
||||
partial = ''; // We now want to suggest ALL of its sub-commands.
|
||||
}
|
||||
}
|
||||
|
||||
// Check for perfect, executable match
|
||||
if (!hasTrailingSpace) {
|
||||
if (leafCommand && partial === '' && leafCommand.action) {
|
||||
// Case: /command<enter> - command has action, no sub-commands were suggested
|
||||
setIsPerfectMatch(true);
|
||||
} else if (currentLevel) {
|
||||
// Case: /command subcommand<enter>
|
||||
const perfectMatch = currentLevel.find(
|
||||
(cmd) =>
|
||||
(cmd.name === partial || cmd.altNames?.includes(partial)) &&
|
||||
cmd.action,
|
||||
);
|
||||
if (perfectMatch) {
|
||||
setIsPerfectMatch(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const depth = commandPathParts.length;
|
||||
const isArgumentCompletion =
|
||||
leafCommand?.completion &&
|
||||
(hasTrailingSpace ||
|
||||
(rawParts.length > depth && depth > 0 && partial !== ''));
|
||||
|
||||
// Set completion range
|
||||
if (hasTrailingSpace || exactMatchAsParent) {
|
||||
completionStart.current = currentLine.length;
|
||||
completionEnd.current = currentLine.length;
|
||||
} else if (partial) {
|
||||
if (isArgumentCompletion) {
|
||||
const commandSoFar = `/${commandPathParts.join(' ')}`;
|
||||
const argStartIndex =
|
||||
commandSoFar.length + (commandPathParts.length > 0 ? 1 : 0);
|
||||
completionStart.current = argStartIndex;
|
||||
} else {
|
||||
completionStart.current = currentLine.length - partial.length;
|
||||
}
|
||||
completionEnd.current = currentLine.length;
|
||||
} else {
|
||||
// e.g. /
|
||||
completionStart.current = commandIndex + 1;
|
||||
completionEnd.current = currentLine.length;
|
||||
}
|
||||
|
||||
// Provide Suggestions based on the now-corrected context
|
||||
if (isArgumentCompletion) {
|
||||
const fetchAndSetSuggestions = async () => {
|
||||
setIsLoadingSuggestions(true);
|
||||
const argString = rawParts.slice(depth).join(' ');
|
||||
const results =
|
||||
(await leafCommand!.completion!(commandContext, argString)) || [];
|
||||
const finalSuggestions = results.map((s) => ({ label: s, value: s }));
|
||||
setSuggestions(finalSuggestions);
|
||||
setShowSuggestions(finalSuggestions.length > 0);
|
||||
setActiveSuggestionIndex(finalSuggestions.length > 0 ? 0 : -1);
|
||||
setIsLoadingSuggestions(false);
|
||||
};
|
||||
fetchAndSetSuggestions();
|
||||
return;
|
||||
}
|
||||
|
||||
// Command/Sub-command Completion
|
||||
const commandsToSearch = currentLevel || [];
|
||||
if (commandsToSearch.length > 0) {
|
||||
let potentialSuggestions = commandsToSearch.filter(
|
||||
(cmd) =>
|
||||
cmd.description &&
|
||||
(cmd.name.startsWith(partial) ||
|
||||
cmd.altNames?.some((alt) => alt.startsWith(partial))),
|
||||
);
|
||||
|
||||
// If a user's input is an exact match and it is a leaf command,
|
||||
// enter should submit immediately.
|
||||
if (potentialSuggestions.length > 0 && !hasTrailingSpace) {
|
||||
const perfectMatch = potentialSuggestions.find(
|
||||
(s) => s.name === partial || s.altNames?.includes(partial),
|
||||
);
|
||||
if (perfectMatch && perfectMatch.action) {
|
||||
potentialSuggestions = [];
|
||||
}
|
||||
}
|
||||
|
||||
const finalSuggestions = potentialSuggestions.map((cmd) => ({
|
||||
label: cmd.name,
|
||||
value: cmd.name,
|
||||
description: cmd.description,
|
||||
}));
|
||||
|
||||
setSuggestions(finalSuggestions);
|
||||
setShowSuggestions(finalSuggestions.length > 0);
|
||||
setActiveSuggestionIndex(finalSuggestions.length > 0 ? 0 : -1);
|
||||
setIsLoadingSuggestions(false);
|
||||
return;
|
||||
}
|
||||
|
||||
// If we fall through, no suggestions are available.
|
||||
useEffect(() => {
|
||||
if (completionMode === CompletionMode.IDLE || reverseSearchActive) {
|
||||
resetCompletionState();
|
||||
return;
|
||||
}
|
||||
|
||||
// Handle At Command Completion
|
||||
completionEnd.current = codePoints.length;
|
||||
for (let i = cursorCol; i < codePoints.length; i++) {
|
||||
if (codePoints[i] === ' ') {
|
||||
let backslashCount = 0;
|
||||
for (let j = i - 1; j >= 0 && codePoints[j] === '\\'; j--) {
|
||||
backslashCount++;
|
||||
}
|
||||
|
||||
if (backslashCount % 2 === 0) {
|
||||
completionEnd.current = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const pathStart = commandIndex + 1;
|
||||
const partialPath = currentLine.substring(pathStart, completionEnd.current);
|
||||
const lastSlashIndex = partialPath.lastIndexOf('/');
|
||||
completionStart.current =
|
||||
lastSlashIndex === -1 ? pathStart : pathStart + lastSlashIndex + 1;
|
||||
const baseDirRelative =
|
||||
lastSlashIndex === -1
|
||||
? '.'
|
||||
: partialPath.substring(0, lastSlashIndex + 1);
|
||||
const prefix = unescapePath(
|
||||
lastSlashIndex === -1
|
||||
? partialPath
|
||||
: partialPath.substring(lastSlashIndex + 1),
|
||||
);
|
||||
|
||||
let isMounted = true;
|
||||
|
||||
const findFilesRecursively = async (
|
||||
startDir: string,
|
||||
searchPrefix: string,
|
||||
fileDiscovery: FileDiscoveryService | null,
|
||||
filterOptions: {
|
||||
respectGitIgnore?: boolean;
|
||||
respectGeminiIgnore?: boolean;
|
||||
},
|
||||
currentRelativePath = '',
|
||||
depth = 0,
|
||||
maxDepth = 10, // Limit recursion depth
|
||||
maxResults = 50, // Limit number of results
|
||||
): Promise<Suggestion[]> => {
|
||||
if (depth > maxDepth) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const lowerSearchPrefix = searchPrefix.toLowerCase();
|
||||
let foundSuggestions: Suggestion[] = [];
|
||||
try {
|
||||
const entries = await fs.readdir(startDir, { withFileTypes: true });
|
||||
for (const entry of entries) {
|
||||
if (foundSuggestions.length >= maxResults) break;
|
||||
|
||||
const entryPathRelative = path.join(currentRelativePath, entry.name);
|
||||
const entryPathFromRoot = path.relative(
|
||||
startDir,
|
||||
path.join(startDir, entry.name),
|
||||
);
|
||||
|
||||
// Conditionally ignore dotfiles
|
||||
if (!searchPrefix.startsWith('.') && entry.name.startsWith('.')) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if this entry should be ignored by filtering options
|
||||
if (
|
||||
fileDiscovery &&
|
||||
fileDiscovery.shouldIgnoreFile(entryPathFromRoot, filterOptions)
|
||||
) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (entry.name.toLowerCase().startsWith(lowerSearchPrefix)) {
|
||||
foundSuggestions.push({
|
||||
label: entryPathRelative + (entry.isDirectory() ? '/' : ''),
|
||||
value: escapePath(
|
||||
entryPathRelative + (entry.isDirectory() ? '/' : ''),
|
||||
),
|
||||
});
|
||||
}
|
||||
if (
|
||||
entry.isDirectory() &&
|
||||
entry.name !== 'node_modules' &&
|
||||
!entry.name.startsWith('.')
|
||||
) {
|
||||
if (foundSuggestions.length < maxResults) {
|
||||
foundSuggestions = foundSuggestions.concat(
|
||||
await findFilesRecursively(
|
||||
path.join(startDir, entry.name),
|
||||
searchPrefix, // Pass original searchPrefix for recursive calls
|
||||
fileDiscovery,
|
||||
filterOptions,
|
||||
entryPathRelative,
|
||||
depth + 1,
|
||||
maxDepth,
|
||||
maxResults - foundSuggestions.length,
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (_err) {
|
||||
// Ignore errors like permission denied or ENOENT during recursive search
|
||||
}
|
||||
return foundSuggestions.slice(0, maxResults);
|
||||
};
|
||||
|
||||
const findFilesWithGlob = async (
|
||||
searchPrefix: string,
|
||||
fileDiscoveryService: FileDiscoveryService,
|
||||
filterOptions: {
|
||||
respectGitIgnore?: boolean;
|
||||
respectGeminiIgnore?: boolean;
|
||||
},
|
||||
searchDir: string,
|
||||
maxResults = 50,
|
||||
): Promise<Suggestion[]> => {
|
||||
const globPattern = `**/${searchPrefix}*`;
|
||||
const files = await glob(globPattern, {
|
||||
cwd: searchDir,
|
||||
dot: searchPrefix.startsWith('.'),
|
||||
nocase: true,
|
||||
});
|
||||
|
||||
const suggestions: Suggestion[] = files
|
||||
.filter((file) => {
|
||||
if (fileDiscoveryService) {
|
||||
return !fileDiscoveryService.shouldIgnoreFile(file, filterOptions);
|
||||
}
|
||||
return true;
|
||||
})
|
||||
.map((file: string) => {
|
||||
const absolutePath = path.resolve(searchDir, file);
|
||||
const label = path.relative(cwd, absolutePath);
|
||||
return {
|
||||
label,
|
||||
value: escapePath(label),
|
||||
};
|
||||
})
|
||||
.slice(0, maxResults);
|
||||
|
||||
return suggestions;
|
||||
};
|
||||
|
||||
const fetchSuggestions = async () => {
|
||||
setIsLoadingSuggestions(true);
|
||||
let fetchedSuggestions: Suggestion[] = [];
|
||||
|
||||
const fileDiscoveryService = config ? config.getFileService() : null;
|
||||
const enableRecursiveSearch =
|
||||
config?.getEnableRecursiveFileSearch() ?? true;
|
||||
const filterOptions =
|
||||
config?.getFileFilteringOptions() ?? DEFAULT_FILE_FILTERING_OPTIONS;
|
||||
|
||||
try {
|
||||
// If there's no slash, or it's the root, do a recursive search from workspace directories
|
||||
for (const dir of dirs) {
|
||||
let fetchedSuggestionsPerDir: Suggestion[] = [];
|
||||
if (
|
||||
partialPath.indexOf('/') === -1 &&
|
||||
prefix &&
|
||||
enableRecursiveSearch
|
||||
) {
|
||||
if (fileDiscoveryService) {
|
||||
fetchedSuggestionsPerDir = await findFilesWithGlob(
|
||||
prefix,
|
||||
fileDiscoveryService,
|
||||
filterOptions,
|
||||
dir,
|
||||
);
|
||||
} else {
|
||||
fetchedSuggestionsPerDir = await findFilesRecursively(
|
||||
dir,
|
||||
prefix,
|
||||
null,
|
||||
filterOptions,
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// Original behavior: list files in the specific directory
|
||||
const lowerPrefix = prefix.toLowerCase();
|
||||
const baseDirAbsolute = path.resolve(dir, baseDirRelative);
|
||||
const entries = await fs.readdir(baseDirAbsolute, {
|
||||
withFileTypes: true,
|
||||
});
|
||||
|
||||
// Filter entries using git-aware filtering
|
||||
const filteredEntries = [];
|
||||
for (const entry of entries) {
|
||||
// Conditionally ignore dotfiles
|
||||
if (!prefix.startsWith('.') && entry.name.startsWith('.')) {
|
||||
continue;
|
||||
}
|
||||
if (!entry.name.toLowerCase().startsWith(lowerPrefix)) continue;
|
||||
|
||||
const relativePath = path.relative(
|
||||
dir,
|
||||
path.join(baseDirAbsolute, entry.name),
|
||||
);
|
||||
if (
|
||||
fileDiscoveryService &&
|
||||
fileDiscoveryService.shouldIgnoreFile(
|
||||
relativePath,
|
||||
filterOptions,
|
||||
)
|
||||
) {
|
||||
continue;
|
||||
}
|
||||
|
||||
filteredEntries.push(entry);
|
||||
}
|
||||
|
||||
fetchedSuggestionsPerDir = filteredEntries.map((entry) => {
|
||||
const absolutePath = path.resolve(baseDirAbsolute, entry.name);
|
||||
const label =
|
||||
cwd === dir ? entry.name : path.relative(cwd, absolutePath);
|
||||
const suggestionLabel = entry.isDirectory() ? label + '/' : label;
|
||||
return {
|
||||
label: suggestionLabel,
|
||||
value: escapePath(suggestionLabel),
|
||||
};
|
||||
});
|
||||
}
|
||||
fetchedSuggestions = [
|
||||
...fetchedSuggestions,
|
||||
...fetchedSuggestionsPerDir,
|
||||
];
|
||||
}
|
||||
|
||||
// Like glob, we always return forward slashes for path separators, even on Windows.
|
||||
// But preserve backslash escaping for special characters.
|
||||
const specialCharsLookahead = `(?![${SHELL_SPECIAL_CHARS.source.slice(1, -1)}])`;
|
||||
const pathSeparatorRegex = new RegExp(
|
||||
`\\\\${specialCharsLookahead}`,
|
||||
'g',
|
||||
);
|
||||
fetchedSuggestions = fetchedSuggestions.map((suggestion) => ({
|
||||
...suggestion,
|
||||
label: suggestion.label.replace(pathSeparatorRegex, '/'),
|
||||
value: suggestion.value.replace(pathSeparatorRegex, '/'),
|
||||
}));
|
||||
|
||||
// Sort by depth, then directories first, then alphabetically
|
||||
fetchedSuggestions.sort((a, b) => {
|
||||
const depthA = (a.label.match(/\//g) || []).length;
|
||||
const depthB = (b.label.match(/\//g) || []).length;
|
||||
|
||||
if (depthA !== depthB) {
|
||||
return depthA - depthB;
|
||||
}
|
||||
|
||||
const aIsDir = a.label.endsWith('/');
|
||||
const bIsDir = b.label.endsWith('/');
|
||||
if (aIsDir && !bIsDir) return -1;
|
||||
if (!aIsDir && bIsDir) return 1;
|
||||
|
||||
// exclude extension when comparing
|
||||
const filenameA = a.label.substring(
|
||||
0,
|
||||
a.label.length - path.extname(a.label).length,
|
||||
);
|
||||
const filenameB = b.label.substring(
|
||||
0,
|
||||
b.label.length - path.extname(b.label).length,
|
||||
);
|
||||
|
||||
return (
|
||||
filenameA.localeCompare(filenameB) || a.label.localeCompare(b.label)
|
||||
);
|
||||
});
|
||||
|
||||
if (isMounted) {
|
||||
setSuggestions(fetchedSuggestions);
|
||||
setShowSuggestions(fetchedSuggestions.length > 0);
|
||||
setActiveSuggestionIndex(fetchedSuggestions.length > 0 ? 0 : -1);
|
||||
setVisibleStartIndex(0);
|
||||
}
|
||||
} catch (error: unknown) {
|
||||
if (isNodeError(error) && error.code === 'ENOENT') {
|
||||
if (isMounted) {
|
||||
setSuggestions([]);
|
||||
setShowSuggestions(false);
|
||||
}
|
||||
} else {
|
||||
console.error(
|
||||
`Error fetching completion suggestions for ${partialPath}: ${getErrorMessage(error)}`,
|
||||
);
|
||||
if (isMounted) {
|
||||
resetCompletionState();
|
||||
}
|
||||
}
|
||||
}
|
||||
if (isMounted) {
|
||||
setIsLoadingSuggestions(false);
|
||||
}
|
||||
};
|
||||
|
||||
const debounceTimeout = setTimeout(fetchSuggestions, 100);
|
||||
|
||||
return () => {
|
||||
isMounted = false;
|
||||
clearTimeout(debounceTimeout);
|
||||
};
|
||||
// Show suggestions if we are loading OR if there are results to display.
|
||||
setShowSuggestions(isLoadingSuggestions || suggestions.length > 0);
|
||||
}, [
|
||||
buffer.text,
|
||||
cursorRow,
|
||||
cursorCol,
|
||||
buffer.lines,
|
||||
dirs,
|
||||
cwd,
|
||||
commandIndex,
|
||||
resetCompletionState,
|
||||
slashCommands,
|
||||
commandContext,
|
||||
config,
|
||||
completionMode,
|
||||
suggestions.length,
|
||||
isLoadingSuggestions,
|
||||
reverseSearchActive,
|
||||
setSuggestions,
|
||||
resetCompletionState,
|
||||
setShowSuggestions,
|
||||
setActiveSuggestionIndex,
|
||||
setIsLoadingSuggestions,
|
||||
setIsPerfectMatch,
|
||||
setVisibleStartIndex,
|
||||
]);
|
||||
|
||||
const handleAutocomplete = useCallback(
|
||||
@@ -616,18 +180,23 @@ export function useCommandCompletion(
|
||||
}
|
||||
const suggestion = suggestions[indexToUse].value;
|
||||
|
||||
if (completionStart.current === -1 || completionEnd.current === -1) {
|
||||
let start = completionStart;
|
||||
let end = completionEnd;
|
||||
if (completionMode === CompletionMode.SLASH) {
|
||||
start = slashCompletionRange.completionStart;
|
||||
end = slashCompletionRange.completionEnd;
|
||||
}
|
||||
|
||||
if (start === -1 || end === -1) {
|
||||
return;
|
||||
}
|
||||
|
||||
const isSlash = (buffer.lines[cursorRow] || '')[commandIndex] === '/';
|
||||
let suggestionText = suggestion;
|
||||
if (isSlash) {
|
||||
// If we are inserting (not replacing), and the preceding character is not a space, add one.
|
||||
if (completionMode === CompletionMode.SLASH) {
|
||||
if (
|
||||
completionStart.current === completionEnd.current &&
|
||||
completionStart.current > commandIndex + 1 &&
|
||||
(buffer.lines[cursorRow] || '')[completionStart.current - 1] !== ' '
|
||||
start === end &&
|
||||
start > 1 &&
|
||||
(buffer.lines[cursorRow] || '')[start - 1] !== ' '
|
||||
) {
|
||||
suggestionText = ' ' + suggestionText;
|
||||
}
|
||||
@@ -636,12 +205,20 @@ export function useCommandCompletion(
|
||||
suggestionText += ' ';
|
||||
|
||||
buffer.replaceRangeByOffset(
|
||||
logicalPosToOffset(buffer.lines, cursorRow, completionStart.current),
|
||||
logicalPosToOffset(buffer.lines, cursorRow, completionEnd.current),
|
||||
logicalPosToOffset(buffer.lines, cursorRow, start),
|
||||
logicalPosToOffset(buffer.lines, cursorRow, end),
|
||||
suggestionText,
|
||||
);
|
||||
},
|
||||
[cursorRow, buffer, suggestions, commandIndex],
|
||||
[
|
||||
cursorRow,
|
||||
buffer,
|
||||
suggestions,
|
||||
completionMode,
|
||||
completionStart,
|
||||
completionEnd,
|
||||
slashCompletionRange,
|
||||
],
|
||||
);
|
||||
|
||||
return {
|
||||
|
||||
@@ -448,6 +448,7 @@ describe('useGeminiStream', () => {
|
||||
callId: 'call1',
|
||||
responseParts: [{ text: 'tool 1 response' }],
|
||||
error: undefined,
|
||||
errorType: undefined,
|
||||
resultDisplay: 'Tool 1 success display',
|
||||
},
|
||||
tool: {
|
||||
@@ -655,6 +656,7 @@ describe('useGeminiStream', () => {
|
||||
],
|
||||
resultDisplay: undefined,
|
||||
error: undefined,
|
||||
errorType: undefined,
|
||||
},
|
||||
responseSubmittedToGemini: false,
|
||||
};
|
||||
@@ -679,6 +681,7 @@ describe('useGeminiStream', () => {
|
||||
],
|
||||
resultDisplay: undefined,
|
||||
error: undefined,
|
||||
errorType: undefined,
|
||||
},
|
||||
responseSubmittedToGemini: false,
|
||||
};
|
||||
@@ -775,6 +778,7 @@ describe('useGeminiStream', () => {
|
||||
callId: 'call1',
|
||||
responseParts: toolCallResponseParts,
|
||||
error: undefined,
|
||||
errorType: undefined,
|
||||
resultDisplay: 'Tool 1 success display',
|
||||
},
|
||||
endTime: Date.now(),
|
||||
@@ -1128,6 +1132,7 @@ describe('useGeminiStream', () => {
|
||||
responseParts: [{ text: 'Memory saved' }],
|
||||
resultDisplay: 'Success: Memory saved',
|
||||
error: undefined,
|
||||
errorType: undefined,
|
||||
},
|
||||
tool: {
|
||||
name: 'save_memory',
|
||||
@@ -1649,4 +1654,313 @@ describe('useGeminiStream', () => {
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Concurrent Execution Prevention', () => {
|
||||
it('should prevent concurrent submitQuery calls', async () => {
|
||||
let resolveFirstCall!: () => void;
|
||||
let resolveSecondCall!: () => void;
|
||||
|
||||
const firstCallPromise = new Promise<void>((resolve) => {
|
||||
resolveFirstCall = resolve;
|
||||
});
|
||||
|
||||
const secondCallPromise = new Promise<void>((resolve) => {
|
||||
resolveSecondCall = resolve;
|
||||
});
|
||||
|
||||
// Mock a long-running stream for the first call
|
||||
const firstStream = (async function* () {
|
||||
yield {
|
||||
type: ServerGeminiEventType.Content,
|
||||
value: 'First call content',
|
||||
};
|
||||
await firstCallPromise; // Wait until we manually resolve
|
||||
yield { type: ServerGeminiEventType.Finished, value: 'STOP' };
|
||||
})();
|
||||
|
||||
// Mock a stream for the second call (should not be used)
|
||||
const secondStream = (async function* () {
|
||||
yield {
|
||||
type: ServerGeminiEventType.Content,
|
||||
value: 'Second call content',
|
||||
};
|
||||
await secondCallPromise;
|
||||
yield { type: ServerGeminiEventType.Finished, value: 'STOP' };
|
||||
})();
|
||||
|
||||
let callCount = 0;
|
||||
mockSendMessageStream.mockImplementation(() => {
|
||||
callCount++;
|
||||
if (callCount === 1) {
|
||||
return firstStream;
|
||||
} else {
|
||||
return secondStream;
|
||||
}
|
||||
});
|
||||
|
||||
const { result } = renderTestHook();
|
||||
|
||||
// Start first call
|
||||
const firstCallResult = act(async () => {
|
||||
await result.current.submitQuery('First query');
|
||||
});
|
||||
|
||||
// Wait a bit to ensure first call has started
|
||||
await new Promise((resolve) => setTimeout(resolve, 10));
|
||||
|
||||
// Try to start second call while first is still running
|
||||
const secondCallResult = act(async () => {
|
||||
await result.current.submitQuery('Second query');
|
||||
});
|
||||
|
||||
// Resolve both calls
|
||||
resolveFirstCall();
|
||||
resolveSecondCall();
|
||||
|
||||
await Promise.all([firstCallResult, secondCallResult]);
|
||||
|
||||
// Verify only one call was made to sendMessageStream
|
||||
expect(mockSendMessageStream).toHaveBeenCalledTimes(1);
|
||||
expect(mockSendMessageStream).toHaveBeenCalledWith(
|
||||
'First query',
|
||||
expect.any(AbortSignal),
|
||||
expect.any(String),
|
||||
);
|
||||
|
||||
// Verify only the first query was added to history
|
||||
const userMessages = mockAddItem.mock.calls.filter(
|
||||
(call) => call[0].type === MessageType.USER,
|
||||
);
|
||||
expect(userMessages).toHaveLength(1);
|
||||
expect(userMessages[0][0].text).toBe('First query');
|
||||
});
|
||||
|
||||
it('should allow subsequent calls after first call completes', async () => {
|
||||
// Mock streams that complete immediately
|
||||
mockSendMessageStream
|
||||
.mockReturnValueOnce(
|
||||
(async function* () {
|
||||
yield {
|
||||
type: ServerGeminiEventType.Content,
|
||||
value: 'First response',
|
||||
};
|
||||
yield { type: ServerGeminiEventType.Finished, value: 'STOP' };
|
||||
})(),
|
||||
)
|
||||
.mockReturnValueOnce(
|
||||
(async function* () {
|
||||
yield {
|
||||
type: ServerGeminiEventType.Content,
|
||||
value: 'Second response',
|
||||
};
|
||||
yield { type: ServerGeminiEventType.Finished, value: 'STOP' };
|
||||
})(),
|
||||
);
|
||||
|
||||
const { result } = renderTestHook();
|
||||
|
||||
// First call
|
||||
await act(async () => {
|
||||
await result.current.submitQuery('First query');
|
||||
});
|
||||
|
||||
// Second call after first completes
|
||||
await act(async () => {
|
||||
await result.current.submitQuery('Second query');
|
||||
});
|
||||
|
||||
// Both calls should have been made
|
||||
expect(mockSendMessageStream).toHaveBeenCalledTimes(2);
|
||||
expect(mockSendMessageStream).toHaveBeenNthCalledWith(
|
||||
1,
|
||||
'First query',
|
||||
expect.any(AbortSignal),
|
||||
expect.any(String),
|
||||
);
|
||||
expect(mockSendMessageStream).toHaveBeenNthCalledWith(
|
||||
2,
|
||||
'Second query',
|
||||
expect.any(AbortSignal),
|
||||
expect.any(String),
|
||||
);
|
||||
});
|
||||
|
||||
it('should reset execution flag even when query preparation fails', async () => {
|
||||
const { result } = renderTestHook();
|
||||
|
||||
// First call with empty query (should fail in preparation)
|
||||
await act(async () => {
|
||||
await result.current.submitQuery(' '); // Empty trimmed query
|
||||
});
|
||||
|
||||
// Second call should work normally
|
||||
mockSendMessageStream.mockReturnValue(
|
||||
(async function* () {
|
||||
yield {
|
||||
type: ServerGeminiEventType.Content,
|
||||
value: 'Valid response',
|
||||
};
|
||||
yield { type: ServerGeminiEventType.Finished, value: 'STOP' };
|
||||
})(),
|
||||
);
|
||||
|
||||
await act(async () => {
|
||||
await result.current.submitQuery('Valid query');
|
||||
});
|
||||
|
||||
// The second call should have been made
|
||||
expect(mockSendMessageStream).toHaveBeenCalledTimes(1);
|
||||
expect(mockSendMessageStream).toHaveBeenCalledWith(
|
||||
'Valid query',
|
||||
expect.any(AbortSignal),
|
||||
expect.any(String),
|
||||
);
|
||||
});
|
||||
|
||||
it('should reset execution flag when user cancels', async () => {
|
||||
let resolveCancelledStream!: () => void;
|
||||
const cancelledStreamPromise = new Promise<void>((resolve) => {
|
||||
resolveCancelledStream = resolve;
|
||||
});
|
||||
|
||||
// Mock a stream that can be cancelled
|
||||
const cancelledStream = (async function* () {
|
||||
yield {
|
||||
type: ServerGeminiEventType.Content,
|
||||
value: 'Cancelled content',
|
||||
};
|
||||
await cancelledStreamPromise;
|
||||
yield { type: ServerGeminiEventType.UserCancelled };
|
||||
})();
|
||||
|
||||
mockSendMessageStream.mockReturnValueOnce(cancelledStream);
|
||||
|
||||
const { result } = renderTestHook();
|
||||
|
||||
// Start first call
|
||||
const firstCallResult = act(async () => {
|
||||
await result.current.submitQuery('First query');
|
||||
});
|
||||
|
||||
// Wait a bit then resolve to trigger cancellation
|
||||
await new Promise((resolve) => setTimeout(resolve, 10));
|
||||
resolveCancelledStream();
|
||||
await firstCallResult;
|
||||
|
||||
// Now try a second call - should work
|
||||
mockSendMessageStream.mockReturnValue(
|
||||
(async function* () {
|
||||
yield {
|
||||
type: ServerGeminiEventType.Content,
|
||||
value: 'Second response',
|
||||
};
|
||||
yield { type: ServerGeminiEventType.Finished, value: 'STOP' };
|
||||
})(),
|
||||
);
|
||||
|
||||
await act(async () => {
|
||||
await result.current.submitQuery('Second query');
|
||||
});
|
||||
|
||||
// Both calls should have been made
|
||||
expect(mockSendMessageStream).toHaveBeenCalledTimes(2);
|
||||
});
|
||||
|
||||
it('should reset execution flag when an error occurs', async () => {
|
||||
// Mock a stream that throws an error
|
||||
mockSendMessageStream.mockReturnValueOnce(
|
||||
(async function* () {
|
||||
yield { type: ServerGeminiEventType.Content, value: 'Error content' };
|
||||
throw new Error('Stream error');
|
||||
})(),
|
||||
);
|
||||
|
||||
const { result } = renderTestHook();
|
||||
|
||||
// First call that will error
|
||||
await act(async () => {
|
||||
await result.current.submitQuery('Error query');
|
||||
});
|
||||
|
||||
// Second call should work normally
|
||||
mockSendMessageStream.mockReturnValue(
|
||||
(async function* () {
|
||||
yield {
|
||||
type: ServerGeminiEventType.Content,
|
||||
value: 'Success response',
|
||||
};
|
||||
yield { type: ServerGeminiEventType.Finished, value: 'STOP' };
|
||||
})(),
|
||||
);
|
||||
|
||||
await act(async () => {
|
||||
await result.current.submitQuery('Success query');
|
||||
});
|
||||
|
||||
// Both calls should have been attempted
|
||||
expect(mockSendMessageStream).toHaveBeenCalledTimes(2);
|
||||
});
|
||||
|
||||
it('should handle rapid multiple concurrent calls correctly', async () => {
|
||||
let resolveStream!: () => void;
|
||||
const streamPromise = new Promise<void>((resolve) => {
|
||||
resolveStream = resolve;
|
||||
});
|
||||
|
||||
// Mock a long-running stream
|
||||
const longStream = (async function* () {
|
||||
yield {
|
||||
type: ServerGeminiEventType.Content,
|
||||
value: 'Long running content',
|
||||
};
|
||||
await streamPromise;
|
||||
yield { type: ServerGeminiEventType.Finished, value: 'STOP' };
|
||||
})();
|
||||
|
||||
mockSendMessageStream.mockReturnValue(longStream);
|
||||
|
||||
const { result } = renderTestHook();
|
||||
|
||||
// Start multiple concurrent calls
|
||||
const calls = [
|
||||
act(async () => {
|
||||
await result.current.submitQuery('Query 1');
|
||||
}),
|
||||
act(async () => {
|
||||
await result.current.submitQuery('Query 2');
|
||||
}),
|
||||
act(async () => {
|
||||
await result.current.submitQuery('Query 3');
|
||||
}),
|
||||
act(async () => {
|
||||
await result.current.submitQuery('Query 4');
|
||||
}),
|
||||
act(async () => {
|
||||
await result.current.submitQuery('Query 5');
|
||||
}),
|
||||
];
|
||||
|
||||
// Wait a bit then resolve the stream
|
||||
await new Promise((resolve) => setTimeout(resolve, 10));
|
||||
resolveStream();
|
||||
|
||||
// Wait for all calls to complete
|
||||
await Promise.all(calls);
|
||||
|
||||
// Only the first call should have been made
|
||||
expect(mockSendMessageStream).toHaveBeenCalledTimes(1);
|
||||
expect(mockSendMessageStream).toHaveBeenCalledWith(
|
||||
'Query 1',
|
||||
expect.any(AbortSignal),
|
||||
expect.any(String),
|
||||
);
|
||||
|
||||
// Only one user message should have been added
|
||||
const userMessages = mockAddItem.mock.calls.filter(
|
||||
(call) => call[0].type === MessageType.USER,
|
||||
);
|
||||
expect(userMessages).toHaveLength(1);
|
||||
expect(userMessages[0][0].text).toBe('Query 1');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -93,10 +93,12 @@ export const useGeminiStream = (
|
||||
performMemoryRefresh: () => Promise<void>,
|
||||
modelSwitchedFromQuotaError: boolean,
|
||||
setModelSwitchedFromQuotaError: React.Dispatch<React.SetStateAction<boolean>>,
|
||||
onEditorClose: () => void,
|
||||
) => {
|
||||
const [initError, setInitError] = useState<string | null>(null);
|
||||
const abortControllerRef = useRef<AbortController | null>(null);
|
||||
const turnCancelledRef = useRef(false);
|
||||
const isSubmittingQueryRef = useRef(false);
|
||||
const [isResponding, setIsResponding] = useState<boolean>(false);
|
||||
const [thought, setThought] = useState<ThoughtSummary | null>(null);
|
||||
const [pendingHistoryItemRef, setPendingHistoryItem] =
|
||||
@@ -133,6 +135,7 @@ export const useGeminiStream = (
|
||||
config,
|
||||
setPendingHistoryItem,
|
||||
getPreferredEditor,
|
||||
onEditorClose,
|
||||
);
|
||||
|
||||
const pendingToolCallGroupDisplay = useMemo(
|
||||
@@ -622,6 +625,11 @@ export const useGeminiStream = (
|
||||
options?: { isContinuation: boolean },
|
||||
prompt_id?: string,
|
||||
) => {
|
||||
// Prevent concurrent executions of submitQuery
|
||||
if (isSubmittingQueryRef.current) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (
|
||||
(streamingState === StreamingState.Responding ||
|
||||
streamingState === StreamingState.WaitingForConfirmation) &&
|
||||
@@ -629,6 +637,9 @@ export const useGeminiStream = (
|
||||
)
|
||||
return;
|
||||
|
||||
// Set the flag to indicate we're now executing
|
||||
isSubmittingQueryRef.current = true;
|
||||
|
||||
const userMessageTimestamp = Date.now();
|
||||
|
||||
// Reset quota error flag when starting a new query (not a continuation)
|
||||
@@ -653,6 +664,7 @@ export const useGeminiStream = (
|
||||
);
|
||||
|
||||
if (!shouldProceed || queryToSend === null) {
|
||||
isSubmittingQueryRef.current = false;
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -677,6 +689,7 @@ export const useGeminiStream = (
|
||||
);
|
||||
|
||||
if (processingStatus === StreamProcessingStatus.UserCancelled) {
|
||||
isSubmittingQueryRef.current = false;
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -708,6 +721,7 @@ export const useGeminiStream = (
|
||||
}
|
||||
} finally {
|
||||
setIsResponding(false);
|
||||
isSubmittingQueryRef.current = false;
|
||||
}
|
||||
},
|
||||
[
|
||||
|
||||
@@ -38,7 +38,6 @@ export const WITTY_LOADING_PHRASES = [
|
||||
'Defragmenting memories... both RAM and personal...',
|
||||
'Rebooting the humor module...',
|
||||
'Caching the essentials (mostly cat memes)...',
|
||||
'Running sudo make me a sandwich...',
|
||||
'Optimizing for ludicrous speed',
|
||||
"Swapping bits... don't tell the bytes...",
|
||||
'Garbage collecting... be right back...',
|
||||
@@ -66,12 +65,10 @@ export const WITTY_LOADING_PHRASES = [
|
||||
"Just a moment, I'm tuning the algorithms...",
|
||||
'Warp speed engaged...',
|
||||
'Mining for more Dilithium crystals...',
|
||||
"I'm Giving Her all she's got Captain!",
|
||||
"Don't panic...",
|
||||
'Following the white rabbit...',
|
||||
'The truth is in here... somewhere...',
|
||||
'Blowing on the cartridge...',
|
||||
'Looking for the princess in another castle...',
|
||||
'Loading... Do a barrel roll!',
|
||||
'Waiting for the respawn...',
|
||||
'Finishing the Kessel Run in less than 12 parsecs...',
|
||||
|
||||
437
packages/cli/src/ui/hooks/useQwenAuth.test.ts
Normal file
437
packages/cli/src/ui/hooks/useQwenAuth.test.ts
Normal file
@@ -0,0 +1,437 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import { renderHook, act } from '@testing-library/react';
|
||||
import { useQwenAuth, DeviceAuthorizationInfo } from './useQwenAuth.js';
|
||||
import {
|
||||
AuthType,
|
||||
qwenOAuth2Events,
|
||||
QwenOAuth2Event,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import { LoadedSettings } from '../../config/settings.js';
|
||||
|
||||
// Mock the qwenOAuth2Events
|
||||
vi.mock('@qwen-code/qwen-code-core', async () => {
|
||||
const actual = await vi.importActual('@qwen-code/qwen-code-core');
|
||||
const mockEmitter = {
|
||||
on: vi.fn().mockReturnThis(),
|
||||
off: vi.fn().mockReturnThis(),
|
||||
emit: vi.fn().mockReturnThis(),
|
||||
};
|
||||
return {
|
||||
...actual,
|
||||
qwenOAuth2Events: mockEmitter,
|
||||
QwenOAuth2Event: {
|
||||
AuthUri: 'authUri',
|
||||
AuthProgress: 'authProgress',
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
const mockQwenOAuth2Events = vi.mocked(qwenOAuth2Events);
|
||||
|
||||
describe('useQwenAuth', () => {
|
||||
const mockDeviceAuth: DeviceAuthorizationInfo = {
|
||||
verification_uri: 'https://oauth.qwen.com/device',
|
||||
verification_uri_complete: 'https://oauth.qwen.com/device?user_code=ABC123',
|
||||
user_code: 'ABC123',
|
||||
expires_in: 1800,
|
||||
};
|
||||
|
||||
const createMockSettings = (authType: AuthType): LoadedSettings =>
|
||||
({
|
||||
merged: {
|
||||
selectedAuthType: authType,
|
||||
},
|
||||
}) as LoadedSettings;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
it('should initialize with default state when not Qwen auth', () => {
|
||||
const settings = createMockSettings(AuthType.USE_GEMINI);
|
||||
const { result } = renderHook(() => useQwenAuth(settings, false));
|
||||
|
||||
expect(result.current).toEqual({
|
||||
isQwenAuthenticating: false,
|
||||
deviceAuth: null,
|
||||
authStatus: 'idle',
|
||||
authMessage: null,
|
||||
isQwenAuth: false,
|
||||
cancelQwenAuth: expect.any(Function),
|
||||
});
|
||||
});
|
||||
|
||||
it('should initialize with default state when Qwen auth but not authenticating', () => {
|
||||
const settings = createMockSettings(AuthType.QWEN_OAUTH);
|
||||
const { result } = renderHook(() => useQwenAuth(settings, false));
|
||||
|
||||
expect(result.current).toEqual({
|
||||
isQwenAuthenticating: false,
|
||||
deviceAuth: null,
|
||||
authStatus: 'idle',
|
||||
authMessage: null,
|
||||
isQwenAuth: true,
|
||||
cancelQwenAuth: expect.any(Function),
|
||||
});
|
||||
});
|
||||
|
||||
it('should set up event listeners when Qwen auth and authenticating', () => {
|
||||
const settings = createMockSettings(AuthType.QWEN_OAUTH);
|
||||
renderHook(() => useQwenAuth(settings, true));
|
||||
|
||||
expect(mockQwenOAuth2Events.on).toHaveBeenCalledWith(
|
||||
QwenOAuth2Event.AuthUri,
|
||||
expect.any(Function),
|
||||
);
|
||||
expect(mockQwenOAuth2Events.on).toHaveBeenCalledWith(
|
||||
QwenOAuth2Event.AuthProgress,
|
||||
expect.any(Function),
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle device auth event', () => {
|
||||
const settings = createMockSettings(AuthType.QWEN_OAUTH);
|
||||
let handleDeviceAuth: (deviceAuth: DeviceAuthorizationInfo) => void;
|
||||
|
||||
mockQwenOAuth2Events.on.mockImplementation((event, handler) => {
|
||||
if (event === QwenOAuth2Event.AuthUri) {
|
||||
handleDeviceAuth = handler;
|
||||
}
|
||||
return mockQwenOAuth2Events;
|
||||
});
|
||||
|
||||
const { result } = renderHook(() => useQwenAuth(settings, true));
|
||||
|
||||
act(() => {
|
||||
handleDeviceAuth!(mockDeviceAuth);
|
||||
});
|
||||
|
||||
expect(result.current.deviceAuth).toEqual(mockDeviceAuth);
|
||||
expect(result.current.authStatus).toBe('polling');
|
||||
expect(result.current.isQwenAuthenticating).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle auth progress event - success', () => {
|
||||
const settings = createMockSettings(AuthType.QWEN_OAUTH);
|
||||
let handleAuthProgress: (
|
||||
status: 'success' | 'error' | 'polling' | 'timeout' | 'rate_limit',
|
||||
message?: string,
|
||||
) => void;
|
||||
|
||||
mockQwenOAuth2Events.on.mockImplementation((event, handler) => {
|
||||
if (event === QwenOAuth2Event.AuthProgress) {
|
||||
handleAuthProgress = handler;
|
||||
}
|
||||
return mockQwenOAuth2Events;
|
||||
});
|
||||
|
||||
const { result } = renderHook(() => useQwenAuth(settings, true));
|
||||
|
||||
act(() => {
|
||||
handleAuthProgress!('success', 'Authentication successful!');
|
||||
});
|
||||
|
||||
expect(result.current.authStatus).toBe('success');
|
||||
expect(result.current.authMessage).toBe('Authentication successful!');
|
||||
});
|
||||
|
||||
it('should handle auth progress event - error', () => {
|
||||
const settings = createMockSettings(AuthType.QWEN_OAUTH);
|
||||
let handleAuthProgress: (
|
||||
status: 'success' | 'error' | 'polling' | 'timeout' | 'rate_limit',
|
||||
message?: string,
|
||||
) => void;
|
||||
|
||||
mockQwenOAuth2Events.on.mockImplementation((event, handler) => {
|
||||
if (event === QwenOAuth2Event.AuthProgress) {
|
||||
handleAuthProgress = handler;
|
||||
}
|
||||
return mockQwenOAuth2Events;
|
||||
});
|
||||
|
||||
const { result } = renderHook(() => useQwenAuth(settings, true));
|
||||
|
||||
act(() => {
|
||||
handleAuthProgress!('error', 'Authentication failed');
|
||||
});
|
||||
|
||||
expect(result.current.authStatus).toBe('error');
|
||||
expect(result.current.authMessage).toBe('Authentication failed');
|
||||
});
|
||||
|
||||
it('should handle auth progress event - polling', () => {
|
||||
const settings = createMockSettings(AuthType.QWEN_OAUTH);
|
||||
let handleAuthProgress: (
|
||||
status: 'success' | 'error' | 'polling' | 'timeout' | 'rate_limit',
|
||||
message?: string,
|
||||
) => void;
|
||||
|
||||
mockQwenOAuth2Events.on.mockImplementation((event, handler) => {
|
||||
if (event === QwenOAuth2Event.AuthProgress) {
|
||||
handleAuthProgress = handler;
|
||||
}
|
||||
return mockQwenOAuth2Events;
|
||||
});
|
||||
|
||||
const { result } = renderHook(() => useQwenAuth(settings, true));
|
||||
|
||||
act(() => {
|
||||
handleAuthProgress!('polling', 'Waiting for user authorization...');
|
||||
});
|
||||
|
||||
expect(result.current.authStatus).toBe('polling');
|
||||
expect(result.current.authMessage).toBe(
|
||||
'Waiting for user authorization...',
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle auth progress event - rate_limit', () => {
|
||||
const settings = createMockSettings(AuthType.QWEN_OAUTH);
|
||||
let handleAuthProgress: (
|
||||
status: 'success' | 'error' | 'polling' | 'timeout' | 'rate_limit',
|
||||
message?: string,
|
||||
) => void;
|
||||
|
||||
mockQwenOAuth2Events.on.mockImplementation((event, handler) => {
|
||||
if (event === QwenOAuth2Event.AuthProgress) {
|
||||
handleAuthProgress = handler;
|
||||
}
|
||||
return mockQwenOAuth2Events;
|
||||
});
|
||||
|
||||
const { result } = renderHook(() => useQwenAuth(settings, true));
|
||||
|
||||
act(() => {
|
||||
handleAuthProgress!(
|
||||
'rate_limit',
|
||||
'Too many requests. The server is rate limiting our requests. Please select a different authentication method or try again later.',
|
||||
);
|
||||
});
|
||||
|
||||
expect(result.current.authStatus).toBe('rate_limit');
|
||||
expect(result.current.authMessage).toBe(
|
||||
'Too many requests. The server is rate limiting our requests. Please select a different authentication method or try again later.',
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle auth progress event without message', () => {
|
||||
const settings = createMockSettings(AuthType.QWEN_OAUTH);
|
||||
let handleAuthProgress: (
|
||||
status: 'success' | 'error' | 'polling' | 'timeout' | 'rate_limit',
|
||||
message?: string,
|
||||
) => void;
|
||||
|
||||
mockQwenOAuth2Events.on.mockImplementation((event, handler) => {
|
||||
if (event === QwenOAuth2Event.AuthProgress) {
|
||||
handleAuthProgress = handler;
|
||||
}
|
||||
return mockQwenOAuth2Events;
|
||||
});
|
||||
|
||||
const { result } = renderHook(() => useQwenAuth(settings, true));
|
||||
|
||||
act(() => {
|
||||
handleAuthProgress!('success');
|
||||
});
|
||||
|
||||
expect(result.current.authStatus).toBe('success');
|
||||
expect(result.current.authMessage).toBe(null);
|
||||
});
|
||||
|
||||
it('should clean up event listeners when auth type changes', () => {
|
||||
const qwenSettings = createMockSettings(AuthType.QWEN_OAUTH);
|
||||
const { rerender } = renderHook(
|
||||
({ settings, isAuthenticating }) =>
|
||||
useQwenAuth(settings, isAuthenticating),
|
||||
{ initialProps: { settings: qwenSettings, isAuthenticating: true } },
|
||||
);
|
||||
|
||||
// Change to non-Qwen auth
|
||||
const geminiSettings = createMockSettings(AuthType.USE_GEMINI);
|
||||
rerender({ settings: geminiSettings, isAuthenticating: true });
|
||||
|
||||
expect(mockQwenOAuth2Events.off).toHaveBeenCalledWith(
|
||||
QwenOAuth2Event.AuthUri,
|
||||
expect.any(Function),
|
||||
);
|
||||
expect(mockQwenOAuth2Events.off).toHaveBeenCalledWith(
|
||||
QwenOAuth2Event.AuthProgress,
|
||||
expect.any(Function),
|
||||
);
|
||||
});
|
||||
|
||||
it('should clean up event listeners when authentication stops', () => {
|
||||
const settings = createMockSettings(AuthType.QWEN_OAUTH);
|
||||
const { rerender } = renderHook(
|
||||
({ isAuthenticating }) => useQwenAuth(settings, isAuthenticating),
|
||||
{ initialProps: { isAuthenticating: true } },
|
||||
);
|
||||
|
||||
// Stop authentication
|
||||
rerender({ isAuthenticating: false });
|
||||
|
||||
expect(mockQwenOAuth2Events.off).toHaveBeenCalledWith(
|
||||
QwenOAuth2Event.AuthUri,
|
||||
expect.any(Function),
|
||||
);
|
||||
expect(mockQwenOAuth2Events.off).toHaveBeenCalledWith(
|
||||
QwenOAuth2Event.AuthProgress,
|
||||
expect.any(Function),
|
||||
);
|
||||
});
|
||||
|
||||
it('should clean up event listeners on unmount', () => {
|
||||
const settings = createMockSettings(AuthType.QWEN_OAUTH);
|
||||
const { unmount } = renderHook(() => useQwenAuth(settings, true));
|
||||
|
||||
unmount();
|
||||
|
||||
expect(mockQwenOAuth2Events.off).toHaveBeenCalledWith(
|
||||
QwenOAuth2Event.AuthUri,
|
||||
expect.any(Function),
|
||||
);
|
||||
expect(mockQwenOAuth2Events.off).toHaveBeenCalledWith(
|
||||
QwenOAuth2Event.AuthProgress,
|
||||
expect.any(Function),
|
||||
);
|
||||
});
|
||||
|
||||
it('should reset state when switching from Qwen auth to another auth type', () => {
|
||||
const qwenSettings = createMockSettings(AuthType.QWEN_OAUTH);
|
||||
let handleDeviceAuth: (deviceAuth: DeviceAuthorizationInfo) => void;
|
||||
|
||||
mockQwenOAuth2Events.on.mockImplementation((event, handler) => {
|
||||
if (event === QwenOAuth2Event.AuthUri) {
|
||||
handleDeviceAuth = handler;
|
||||
}
|
||||
return mockQwenOAuth2Events;
|
||||
});
|
||||
|
||||
const { result, rerender } = renderHook(
|
||||
({ settings, isAuthenticating }) =>
|
||||
useQwenAuth(settings, isAuthenticating),
|
||||
{ initialProps: { settings: qwenSettings, isAuthenticating: true } },
|
||||
);
|
||||
|
||||
// Simulate device auth
|
||||
act(() => {
|
||||
handleDeviceAuth!(mockDeviceAuth);
|
||||
});
|
||||
|
||||
expect(result.current.deviceAuth).toEqual(mockDeviceAuth);
|
||||
expect(result.current.authStatus).toBe('polling');
|
||||
|
||||
// Switch to different auth type
|
||||
const geminiSettings = createMockSettings(AuthType.USE_GEMINI);
|
||||
rerender({ settings: geminiSettings, isAuthenticating: true });
|
||||
|
||||
expect(result.current.isQwenAuthenticating).toBe(false);
|
||||
expect(result.current.deviceAuth).toBe(null);
|
||||
expect(result.current.authStatus).toBe('idle');
|
||||
expect(result.current.authMessage).toBe(null);
|
||||
});
|
||||
|
||||
it('should reset state when authentication stops', () => {
|
||||
const settings = createMockSettings(AuthType.QWEN_OAUTH);
|
||||
let handleDeviceAuth: (deviceAuth: DeviceAuthorizationInfo) => void;
|
||||
|
||||
mockQwenOAuth2Events.on.mockImplementation((event, handler) => {
|
||||
if (event === QwenOAuth2Event.AuthUri) {
|
||||
handleDeviceAuth = handler;
|
||||
}
|
||||
return mockQwenOAuth2Events;
|
||||
});
|
||||
|
||||
const { result, rerender } = renderHook(
|
||||
({ isAuthenticating }) => useQwenAuth(settings, isAuthenticating),
|
||||
{ initialProps: { isAuthenticating: true } },
|
||||
);
|
||||
|
||||
// Simulate device auth
|
||||
act(() => {
|
||||
handleDeviceAuth!(mockDeviceAuth);
|
||||
});
|
||||
|
||||
expect(result.current.deviceAuth).toEqual(mockDeviceAuth);
|
||||
expect(result.current.authStatus).toBe('polling');
|
||||
|
||||
// Stop authentication
|
||||
rerender({ isAuthenticating: false });
|
||||
|
||||
expect(result.current.isQwenAuthenticating).toBe(false);
|
||||
expect(result.current.deviceAuth).toBe(null);
|
||||
expect(result.current.authStatus).toBe('idle');
|
||||
expect(result.current.authMessage).toBe(null);
|
||||
});
|
||||
|
||||
it('should handle cancelQwenAuth function', () => {
|
||||
const settings = createMockSettings(AuthType.QWEN_OAUTH);
|
||||
let handleDeviceAuth: (deviceAuth: DeviceAuthorizationInfo) => void;
|
||||
|
||||
mockQwenOAuth2Events.on.mockImplementation((event, handler) => {
|
||||
if (event === QwenOAuth2Event.AuthUri) {
|
||||
handleDeviceAuth = handler;
|
||||
}
|
||||
return mockQwenOAuth2Events;
|
||||
});
|
||||
|
||||
const { result } = renderHook(() => useQwenAuth(settings, true));
|
||||
|
||||
// Set up some state
|
||||
act(() => {
|
||||
handleDeviceAuth!(mockDeviceAuth);
|
||||
});
|
||||
|
||||
expect(result.current.deviceAuth).toEqual(mockDeviceAuth);
|
||||
|
||||
// Cancel auth
|
||||
act(() => {
|
||||
result.current.cancelQwenAuth();
|
||||
});
|
||||
|
||||
expect(result.current.isQwenAuthenticating).toBe(false);
|
||||
expect(result.current.deviceAuth).toBe(null);
|
||||
expect(result.current.authStatus).toBe('idle');
|
||||
expect(result.current.authMessage).toBe(null);
|
||||
});
|
||||
|
||||
it('should maintain isQwenAuth flag correctly', () => {
|
||||
// Test with Qwen OAuth
|
||||
const qwenSettings = createMockSettings(AuthType.QWEN_OAUTH);
|
||||
const { result: qwenResult } = renderHook(() =>
|
||||
useQwenAuth(qwenSettings, false),
|
||||
);
|
||||
expect(qwenResult.current.isQwenAuth).toBe(true);
|
||||
|
||||
// Test with other auth types
|
||||
const geminiSettings = createMockSettings(AuthType.USE_GEMINI);
|
||||
const { result: geminiResult } = renderHook(() =>
|
||||
useQwenAuth(geminiSettings, false),
|
||||
);
|
||||
expect(geminiResult.current.isQwenAuth).toBe(false);
|
||||
|
||||
const oauthSettings = createMockSettings(AuthType.LOGIN_WITH_GOOGLE);
|
||||
const { result: oauthResult } = renderHook(() =>
|
||||
useQwenAuth(oauthSettings, false),
|
||||
);
|
||||
expect(oauthResult.current.isQwenAuth).toBe(false);
|
||||
});
|
||||
|
||||
it('should set isQwenAuthenticating to true when starting authentication with Qwen auth', () => {
|
||||
const settings = createMockSettings(AuthType.QWEN_OAUTH);
|
||||
const { result } = renderHook(() => useQwenAuth(settings, true));
|
||||
|
||||
expect(result.current.isQwenAuthenticating).toBe(true);
|
||||
expect(result.current.authStatus).toBe('idle');
|
||||
});
|
||||
});
|
||||
120
packages/cli/src/ui/hooks/useQwenAuth.ts
Normal file
120
packages/cli/src/ui/hooks/useQwenAuth.ts
Normal file
@@ -0,0 +1,120 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { useState, useCallback, useEffect } from 'react';
|
||||
import { LoadedSettings } from '../../config/settings.js';
|
||||
import {
|
||||
AuthType,
|
||||
qwenOAuth2Events,
|
||||
QwenOAuth2Event,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
|
||||
export interface DeviceAuthorizationInfo {
|
||||
verification_uri: string;
|
||||
verification_uri_complete: string;
|
||||
user_code: string;
|
||||
expires_in: number;
|
||||
}
|
||||
|
||||
interface QwenAuthState {
|
||||
isQwenAuthenticating: boolean;
|
||||
deviceAuth: DeviceAuthorizationInfo | null;
|
||||
authStatus:
|
||||
| 'idle'
|
||||
| 'polling'
|
||||
| 'success'
|
||||
| 'error'
|
||||
| 'timeout'
|
||||
| 'rate_limit';
|
||||
authMessage: string | null;
|
||||
}
|
||||
|
||||
export const useQwenAuth = (
|
||||
settings: LoadedSettings,
|
||||
isAuthenticating: boolean,
|
||||
) => {
|
||||
const [qwenAuthState, setQwenAuthState] = useState<QwenAuthState>({
|
||||
isQwenAuthenticating: false,
|
||||
deviceAuth: null,
|
||||
authStatus: 'idle',
|
||||
authMessage: null,
|
||||
});
|
||||
|
||||
const isQwenAuth = settings.merged.selectedAuthType === AuthType.QWEN_OAUTH;
|
||||
|
||||
// Set up event listeners when authentication starts
|
||||
useEffect(() => {
|
||||
if (!isQwenAuth || !isAuthenticating) {
|
||||
// Reset state when not authenticating or not Qwen auth
|
||||
setQwenAuthState({
|
||||
isQwenAuthenticating: false,
|
||||
deviceAuth: null,
|
||||
authStatus: 'idle',
|
||||
authMessage: null,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
setQwenAuthState((prev) => ({
|
||||
...prev,
|
||||
isQwenAuthenticating: true,
|
||||
authStatus: 'idle',
|
||||
}));
|
||||
|
||||
// Set up event listeners
|
||||
const handleDeviceAuth = (deviceAuth: DeviceAuthorizationInfo) => {
|
||||
setQwenAuthState((prev) => ({
|
||||
...prev,
|
||||
deviceAuth: {
|
||||
verification_uri: deviceAuth.verification_uri,
|
||||
verification_uri_complete: deviceAuth.verification_uri_complete,
|
||||
user_code: deviceAuth.user_code,
|
||||
expires_in: deviceAuth.expires_in,
|
||||
},
|
||||
authStatus: 'polling',
|
||||
}));
|
||||
};
|
||||
|
||||
const handleAuthProgress = (
|
||||
status: 'success' | 'error' | 'polling' | 'timeout' | 'rate_limit',
|
||||
message?: string,
|
||||
) => {
|
||||
setQwenAuthState((prev) => ({
|
||||
...prev,
|
||||
authStatus: status,
|
||||
authMessage: message || null,
|
||||
}));
|
||||
};
|
||||
|
||||
// Add event listeners
|
||||
qwenOAuth2Events.on(QwenOAuth2Event.AuthUri, handleDeviceAuth);
|
||||
qwenOAuth2Events.on(QwenOAuth2Event.AuthProgress, handleAuthProgress);
|
||||
|
||||
// Cleanup event listeners when component unmounts or auth finishes
|
||||
return () => {
|
||||
qwenOAuth2Events.off(QwenOAuth2Event.AuthUri, handleDeviceAuth);
|
||||
qwenOAuth2Events.off(QwenOAuth2Event.AuthProgress, handleAuthProgress);
|
||||
};
|
||||
}, [isQwenAuth, isAuthenticating]);
|
||||
|
||||
const cancelQwenAuth = useCallback(() => {
|
||||
// Emit cancel event to stop polling
|
||||
qwenOAuth2Events.emit(QwenOAuth2Event.AuthCancel);
|
||||
|
||||
setQwenAuthState({
|
||||
isQwenAuthenticating: false,
|
||||
deviceAuth: null,
|
||||
authStatus: 'idle',
|
||||
authMessage: null,
|
||||
});
|
||||
}, []);
|
||||
|
||||
return {
|
||||
...qwenAuthState,
|
||||
isQwenAuth,
|
||||
cancelQwenAuth,
|
||||
};
|
||||
};
|
||||
@@ -70,6 +70,7 @@ export function useReactToolScheduler(
|
||||
React.SetStateAction<HistoryItemWithoutId | null>
|
||||
>,
|
||||
getPreferredEditor: () => EditorType | undefined,
|
||||
onEditorClose: () => void,
|
||||
): [TrackedToolCall[], ScheduleFn, MarkToolsAsSubmittedFn] {
|
||||
const [toolCallsForDisplay, setToolCallsForDisplay] = useState<
|
||||
TrackedToolCall[]
|
||||
@@ -140,6 +141,7 @@ export function useReactToolScheduler(
|
||||
onToolCallsUpdate: toolCallsUpdateHandler,
|
||||
getPreferredEditor,
|
||||
config,
|
||||
onEditorClose,
|
||||
}),
|
||||
[
|
||||
config,
|
||||
@@ -147,6 +149,7 @@ export function useReactToolScheduler(
|
||||
allToolCallsCompleteHandler,
|
||||
toolCallsUpdateHandler,
|
||||
getPreferredEditor,
|
||||
onEditorClose,
|
||||
],
|
||||
);
|
||||
|
||||
|
||||
@@ -41,12 +41,17 @@ export function useReverseSearchCompletion(
|
||||
navigateDown,
|
||||
} = useCompletion();
|
||||
|
||||
// whenever reverseSearchActive is on, filter history
|
||||
useEffect(() => {
|
||||
if (!reverseSearchActive) {
|
||||
resetCompletionState();
|
||||
}
|
||||
}, [reverseSearchActive, resetCompletionState]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!reverseSearchActive) {
|
||||
return;
|
||||
}
|
||||
|
||||
const q = buffer.text.toLowerCase();
|
||||
const matches = shellHistory.reduce<Suggestion[]>((acc, cmd) => {
|
||||
const idx = cmd.toLowerCase().indexOf(q);
|
||||
@@ -62,7 +67,6 @@ export function useReverseSearchCompletion(
|
||||
buffer.text,
|
||||
shellHistory,
|
||||
reverseSearchActive,
|
||||
resetCompletionState,
|
||||
setActiveSuggestionIndex,
|
||||
setShowSuggestions,
|
||||
setSuggestions,
|
||||
|
||||
@@ -66,8 +66,8 @@ export function createShowMemoryAction(
|
||||
type: MessageType.INFO,
|
||||
content:
|
||||
fileCount > 0
|
||||
? 'Hierarchical memory (GEMINI.md or other context files) is loaded but content is empty.'
|
||||
: 'No hierarchical memory (GEMINI.md or other context files) is currently loaded.',
|
||||
? 'Hierarchical memory (QWEN.md or other context files) is loaded but content is empty.'
|
||||
: 'No hierarchical memory (QWEN.md or other context files) is currently loaded.',
|
||||
timestamp: new Date(),
|
||||
});
|
||||
}
|
||||
|
||||
434
packages/cli/src/ui/hooks/useSlashCompletion.test.ts
Normal file
434
packages/cli/src/ui/hooks/useSlashCompletion.test.ts
Normal file
@@ -0,0 +1,434 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/** @vitest-environment jsdom */
|
||||
|
||||
import { describe, it, expect, vi } from 'vitest';
|
||||
import { renderHook, waitFor } from '@testing-library/react';
|
||||
import { useSlashCompletion } from './useSlashCompletion.js';
|
||||
import { CommandContext, SlashCommand } from '../commands/types.js';
|
||||
import { useState } from 'react';
|
||||
import { Suggestion } from '../components/SuggestionsDisplay.js';
|
||||
|
||||
// Test harness to capture the state from the hook's callbacks.
|
||||
function useTestHarnessForSlashCompletion(
|
||||
enabled: boolean,
|
||||
query: string | null,
|
||||
slashCommands: readonly SlashCommand[],
|
||||
commandContext: CommandContext,
|
||||
) {
|
||||
const [suggestions, setSuggestions] = useState<Suggestion[]>([]);
|
||||
const [isLoadingSuggestions, setIsLoadingSuggestions] = useState(false);
|
||||
const [isPerfectMatch, setIsPerfectMatch] = useState(false);
|
||||
|
||||
const { completionStart, completionEnd } = useSlashCompletion({
|
||||
enabled,
|
||||
query,
|
||||
slashCommands,
|
||||
commandContext,
|
||||
setSuggestions,
|
||||
setIsLoadingSuggestions,
|
||||
setIsPerfectMatch,
|
||||
});
|
||||
|
||||
return {
|
||||
suggestions,
|
||||
isLoadingSuggestions,
|
||||
isPerfectMatch,
|
||||
completionStart,
|
||||
completionEnd,
|
||||
};
|
||||
}
|
||||
|
||||
describe('useSlashCompletion', () => {
|
||||
// A minimal mock is sufficient for these tests.
|
||||
const mockCommandContext = {} as CommandContext;
|
||||
|
||||
describe('Top-Level Commands', () => {
|
||||
it('should suggest all top-level commands for the root slash', async () => {
|
||||
const slashCommands = [
|
||||
{ name: 'help', altNames: ['?'], description: 'Show help' },
|
||||
{
|
||||
name: 'stats',
|
||||
altNames: ['usage'],
|
||||
description: 'check session stats. Usage: /stats [model|tools]',
|
||||
},
|
||||
{ name: 'clear', description: 'Clear the screen' },
|
||||
{
|
||||
name: 'memory',
|
||||
description: 'Manage memory',
|
||||
subCommands: [{ name: 'show', description: 'Show memory' }],
|
||||
},
|
||||
{ name: 'chat', description: 'Manage chat history' },
|
||||
] as unknown as SlashCommand[];
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForSlashCompletion(
|
||||
true,
|
||||
'/',
|
||||
slashCommands,
|
||||
mockCommandContext,
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.current.suggestions.length).toBe(slashCommands.length);
|
||||
expect(result.current.suggestions.map((s) => s.label)).toEqual(
|
||||
expect.arrayContaining(['help', 'clear', 'memory', 'chat', 'stats']),
|
||||
);
|
||||
});
|
||||
|
||||
it('should filter commands based on partial input', async () => {
|
||||
const slashCommands = [
|
||||
{ name: 'memory', description: 'Manage memory' },
|
||||
] as unknown as SlashCommand[];
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForSlashCompletion(
|
||||
true,
|
||||
'/mem',
|
||||
slashCommands,
|
||||
mockCommandContext,
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.current.suggestions).toEqual([
|
||||
{ label: 'memory', value: 'memory', description: 'Manage memory' },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should suggest commands based on partial altNames', async () => {
|
||||
const slashCommands = [
|
||||
{
|
||||
name: 'stats',
|
||||
altNames: ['usage'],
|
||||
description: 'check session stats. Usage: /stats [model|tools]',
|
||||
},
|
||||
] as unknown as SlashCommand[];
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForSlashCompletion(
|
||||
true,
|
||||
'/usag',
|
||||
slashCommands,
|
||||
mockCommandContext,
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.current.suggestions).toEqual([
|
||||
{
|
||||
label: 'stats',
|
||||
value: 'stats',
|
||||
description: 'check session stats. Usage: /stats [model|tools]',
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it('should NOT provide suggestions for a perfectly typed command that is a leaf node', async () => {
|
||||
const slashCommands = [
|
||||
{ name: 'clear', description: 'Clear the screen', action: vi.fn() },
|
||||
] as unknown as SlashCommand[];
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForSlashCompletion(
|
||||
true,
|
||||
'/clear',
|
||||
slashCommands,
|
||||
mockCommandContext,
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.current.suggestions).toHaveLength(0);
|
||||
});
|
||||
|
||||
it.each([['/?'], ['/usage']])(
|
||||
'should not suggest commands when altNames is fully typed',
|
||||
async (query) => {
|
||||
const mockSlashCommands = [
|
||||
{
|
||||
name: 'help',
|
||||
altNames: ['?'],
|
||||
description: 'Show help',
|
||||
action: vi.fn(),
|
||||
},
|
||||
{
|
||||
name: 'stats',
|
||||
altNames: ['usage'],
|
||||
description: 'check session stats. Usage: /stats [model|tools]',
|
||||
action: vi.fn(),
|
||||
},
|
||||
] as unknown as SlashCommand[];
|
||||
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForSlashCompletion(
|
||||
true,
|
||||
query,
|
||||
mockSlashCommands,
|
||||
mockCommandContext,
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.current.suggestions).toHaveLength(0);
|
||||
},
|
||||
);
|
||||
|
||||
it('should not provide suggestions for a fully typed command that has no sub-commands or argument completion', async () => {
|
||||
const slashCommands = [
|
||||
{ name: 'clear', description: 'Clear the screen' },
|
||||
] as unknown as SlashCommand[];
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForSlashCompletion(
|
||||
true,
|
||||
'/clear ',
|
||||
slashCommands,
|
||||
mockCommandContext,
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.current.suggestions).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should not provide suggestions for an unknown command', async () => {
|
||||
const slashCommands = [
|
||||
{ name: 'help', description: 'Show help' },
|
||||
] as unknown as SlashCommand[];
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForSlashCompletion(
|
||||
true,
|
||||
'/unknown-command',
|
||||
slashCommands,
|
||||
mockCommandContext,
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.current.suggestions).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Sub-Commands', () => {
|
||||
it('should suggest sub-commands for a parent command', async () => {
|
||||
const slashCommands = [
|
||||
{
|
||||
name: 'memory',
|
||||
description: 'Manage memory',
|
||||
subCommands: [
|
||||
{ name: 'show', description: 'Show memory' },
|
||||
{ name: 'add', description: 'Add to memory' },
|
||||
],
|
||||
},
|
||||
] as unknown as SlashCommand[];
|
||||
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForSlashCompletion(
|
||||
true,
|
||||
'/memory',
|
||||
slashCommands,
|
||||
mockCommandContext,
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.current.suggestions).toHaveLength(2);
|
||||
expect(result.current.suggestions).toEqual(
|
||||
expect.arrayContaining([
|
||||
{ label: 'show', value: 'show', description: 'Show memory' },
|
||||
{ label: 'add', value: 'add', description: 'Add to memory' },
|
||||
]),
|
||||
);
|
||||
});
|
||||
|
||||
it('should suggest all sub-commands when the query ends with the parent command and a space', async () => {
|
||||
const slashCommands = [
|
||||
{
|
||||
name: 'memory',
|
||||
description: 'Manage memory',
|
||||
subCommands: [
|
||||
{ name: 'show', description: 'Show memory' },
|
||||
{ name: 'add', description: 'Add to memory' },
|
||||
],
|
||||
},
|
||||
] as unknown as SlashCommand[];
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForSlashCompletion(
|
||||
true,
|
||||
'/memory ',
|
||||
slashCommands,
|
||||
mockCommandContext,
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.current.suggestions).toHaveLength(2);
|
||||
expect(result.current.suggestions).toEqual(
|
||||
expect.arrayContaining([
|
||||
{ label: 'show', value: 'show', description: 'Show memory' },
|
||||
{ label: 'add', value: 'add', description: 'Add to memory' },
|
||||
]),
|
||||
);
|
||||
});
|
||||
|
||||
it('should filter sub-commands by prefix', async () => {
|
||||
const slashCommands = [
|
||||
{
|
||||
name: 'memory',
|
||||
description: 'Manage memory',
|
||||
subCommands: [
|
||||
{ name: 'show', description: 'Show memory' },
|
||||
{ name: 'add', description: 'Add to memory' },
|
||||
],
|
||||
},
|
||||
] as unknown as SlashCommand[];
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForSlashCompletion(
|
||||
true,
|
||||
'/memory a',
|
||||
slashCommands,
|
||||
mockCommandContext,
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.current.suggestions).toEqual([
|
||||
{ label: 'add', value: 'add', description: 'Add to memory' },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should provide no suggestions for an invalid sub-command', async () => {
|
||||
const slashCommands = [
|
||||
{
|
||||
name: 'memory',
|
||||
description: 'Manage memory',
|
||||
subCommands: [
|
||||
{ name: 'show', description: 'Show memory' },
|
||||
{ name: 'add', description: 'Add to memory' },
|
||||
],
|
||||
},
|
||||
] as unknown as SlashCommand[];
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForSlashCompletion(
|
||||
true,
|
||||
'/memory dothisnow',
|
||||
slashCommands,
|
||||
mockCommandContext,
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.current.suggestions).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Argument Completion', () => {
|
||||
it('should call the command.completion function for argument suggestions', async () => {
|
||||
const availableTags = [
|
||||
'my-chat-tag-1',
|
||||
'my-chat-tag-2',
|
||||
'another-channel',
|
||||
];
|
||||
const mockCompletionFn = vi
|
||||
.fn()
|
||||
.mockImplementation(
|
||||
async (_context: CommandContext, partialArg: string) =>
|
||||
availableTags.filter((tag) => tag.startsWith(partialArg)),
|
||||
);
|
||||
|
||||
const slashCommands = [
|
||||
{
|
||||
name: 'chat',
|
||||
description: 'Manage chat history',
|
||||
subCommands: [
|
||||
{
|
||||
name: 'resume',
|
||||
description: 'Resume a saved chat',
|
||||
completion: mockCompletionFn,
|
||||
},
|
||||
],
|
||||
},
|
||||
] as unknown as SlashCommand[];
|
||||
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForSlashCompletion(
|
||||
true,
|
||||
'/chat resume my-ch',
|
||||
slashCommands,
|
||||
mockCommandContext,
|
||||
),
|
||||
);
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockCompletionFn).toHaveBeenCalledWith(
|
||||
mockCommandContext,
|
||||
'my-ch',
|
||||
);
|
||||
});
|
||||
|
||||
await waitFor(() => {
|
||||
expect(result.current.suggestions).toEqual([
|
||||
{ label: 'my-chat-tag-1', value: 'my-chat-tag-1' },
|
||||
{ label: 'my-chat-tag-2', value: 'my-chat-tag-2' },
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
||||
it('should call command.completion with an empty string when args start with a space', async () => {
|
||||
const mockCompletionFn = vi
|
||||
.fn()
|
||||
.mockResolvedValue(['my-chat-tag-1', 'my-chat-tag-2', 'my-channel']);
|
||||
|
||||
const slashCommands = [
|
||||
{
|
||||
name: 'chat',
|
||||
description: 'Manage chat history',
|
||||
subCommands: [
|
||||
{
|
||||
name: 'resume',
|
||||
description: 'Resume a saved chat',
|
||||
completion: mockCompletionFn,
|
||||
},
|
||||
],
|
||||
},
|
||||
] as unknown as SlashCommand[];
|
||||
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForSlashCompletion(
|
||||
true,
|
||||
'/chat resume ',
|
||||
slashCommands,
|
||||
mockCommandContext,
|
||||
),
|
||||
);
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockCompletionFn).toHaveBeenCalledWith(mockCommandContext, '');
|
||||
});
|
||||
|
||||
await waitFor(() => {
|
||||
expect(result.current.suggestions).toHaveLength(3);
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle completion function that returns null', async () => {
|
||||
const completionFn = vi.fn().mockResolvedValue(null);
|
||||
const slashCommands = [
|
||||
{
|
||||
name: 'chat',
|
||||
description: 'Manage chat history',
|
||||
subCommands: [
|
||||
{
|
||||
name: 'resume',
|
||||
description: 'Resume a saved chat',
|
||||
completion: completionFn,
|
||||
},
|
||||
],
|
||||
},
|
||||
] as unknown as SlashCommand[];
|
||||
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForSlashCompletion(
|
||||
true,
|
||||
'/chat resume ',
|
||||
slashCommands,
|
||||
mockCommandContext,
|
||||
),
|
||||
);
|
||||
|
||||
await waitFor(() => {
|
||||
expect(result.current.suggestions).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
187
packages/cli/src/ui/hooks/useSlashCompletion.ts
Normal file
187
packages/cli/src/ui/hooks/useSlashCompletion.ts
Normal file
@@ -0,0 +1,187 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { useState, useEffect } from 'react';
|
||||
import { Suggestion } from '../components/SuggestionsDisplay.js';
|
||||
import { CommandContext, SlashCommand } from '../commands/types.js';
|
||||
|
||||
export interface UseSlashCompletionProps {
|
||||
enabled: boolean;
|
||||
query: string | null;
|
||||
slashCommands: readonly SlashCommand[];
|
||||
commandContext: CommandContext;
|
||||
setSuggestions: (suggestions: Suggestion[]) => void;
|
||||
setIsLoadingSuggestions: (isLoading: boolean) => void;
|
||||
setIsPerfectMatch: (isMatch: boolean) => void;
|
||||
}
|
||||
|
||||
export function useSlashCompletion(props: UseSlashCompletionProps): {
|
||||
completionStart: number;
|
||||
completionEnd: number;
|
||||
} {
|
||||
const {
|
||||
enabled,
|
||||
query,
|
||||
slashCommands,
|
||||
commandContext,
|
||||
setSuggestions,
|
||||
setIsLoadingSuggestions,
|
||||
setIsPerfectMatch,
|
||||
} = props;
|
||||
const [completionStart, setCompletionStart] = useState(-1);
|
||||
const [completionEnd, setCompletionEnd] = useState(-1);
|
||||
|
||||
useEffect(() => {
|
||||
if (!enabled || query === null) {
|
||||
return;
|
||||
}
|
||||
|
||||
const fullPath = query?.substring(1) || '';
|
||||
const hasTrailingSpace = !!query?.endsWith(' ');
|
||||
const rawParts = fullPath.split(/\s+/).filter((p) => p);
|
||||
let commandPathParts = rawParts;
|
||||
let partial = '';
|
||||
|
||||
if (!hasTrailingSpace && rawParts.length > 0) {
|
||||
partial = rawParts[rawParts.length - 1];
|
||||
commandPathParts = rawParts.slice(0, -1);
|
||||
}
|
||||
|
||||
let currentLevel: readonly SlashCommand[] | undefined = slashCommands;
|
||||
let leafCommand: SlashCommand | null = null;
|
||||
|
||||
for (const part of commandPathParts) {
|
||||
if (!currentLevel) {
|
||||
leafCommand = null;
|
||||
currentLevel = [];
|
||||
break;
|
||||
}
|
||||
const found: SlashCommand | undefined = currentLevel.find(
|
||||
(cmd) => cmd.name === part || cmd.altNames?.includes(part),
|
||||
);
|
||||
if (found) {
|
||||
leafCommand = found;
|
||||
currentLevel = found.subCommands as readonly SlashCommand[] | undefined;
|
||||
} else {
|
||||
leafCommand = null;
|
||||
currentLevel = [];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let exactMatchAsParent: SlashCommand | undefined;
|
||||
if (!hasTrailingSpace && currentLevel) {
|
||||
exactMatchAsParent = currentLevel.find(
|
||||
(cmd) =>
|
||||
(cmd.name === partial || cmd.altNames?.includes(partial)) &&
|
||||
cmd.subCommands,
|
||||
);
|
||||
|
||||
if (exactMatchAsParent) {
|
||||
leafCommand = exactMatchAsParent;
|
||||
currentLevel = exactMatchAsParent.subCommands;
|
||||
partial = '';
|
||||
}
|
||||
}
|
||||
|
||||
setIsPerfectMatch(false);
|
||||
if (!hasTrailingSpace) {
|
||||
if (leafCommand && partial === '' && leafCommand.action) {
|
||||
setIsPerfectMatch(true);
|
||||
} else if (currentLevel) {
|
||||
const perfectMatch = currentLevel.find(
|
||||
(cmd) =>
|
||||
(cmd.name === partial || cmd.altNames?.includes(partial)) &&
|
||||
cmd.action,
|
||||
);
|
||||
if (perfectMatch) {
|
||||
setIsPerfectMatch(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const depth = commandPathParts.length;
|
||||
const isArgumentCompletion =
|
||||
leafCommand?.completion &&
|
||||
(hasTrailingSpace ||
|
||||
(rawParts.length > depth && depth > 0 && partial !== ''));
|
||||
|
||||
if (hasTrailingSpace || exactMatchAsParent) {
|
||||
setCompletionStart(query.length);
|
||||
setCompletionEnd(query.length);
|
||||
} else if (partial) {
|
||||
if (isArgumentCompletion) {
|
||||
const commandSoFar = `/${commandPathParts.join(' ')}`;
|
||||
const argStartIndex =
|
||||
commandSoFar.length + (commandPathParts.length > 0 ? 1 : 0);
|
||||
setCompletionStart(argStartIndex);
|
||||
} else {
|
||||
setCompletionStart(query.length - partial.length);
|
||||
}
|
||||
setCompletionEnd(query.length);
|
||||
} else {
|
||||
setCompletionStart(1);
|
||||
setCompletionEnd(query.length);
|
||||
}
|
||||
|
||||
if (isArgumentCompletion) {
|
||||
const fetchAndSetSuggestions = async () => {
|
||||
setIsLoadingSuggestions(true);
|
||||
const argString = rawParts.slice(depth).join(' ');
|
||||
const results =
|
||||
(await leafCommand!.completion!(commandContext, argString)) || [];
|
||||
const finalSuggestions = results.map((s) => ({ label: s, value: s }));
|
||||
setSuggestions(finalSuggestions);
|
||||
setIsLoadingSuggestions(false);
|
||||
};
|
||||
fetchAndSetSuggestions();
|
||||
return;
|
||||
}
|
||||
|
||||
const commandsToSearch = currentLevel || [];
|
||||
if (commandsToSearch.length > 0) {
|
||||
let potentialSuggestions = commandsToSearch.filter(
|
||||
(cmd) =>
|
||||
cmd.description &&
|
||||
(cmd.name.startsWith(partial) ||
|
||||
cmd.altNames?.some((alt) => alt.startsWith(partial))),
|
||||
);
|
||||
|
||||
if (potentialSuggestions.length > 0 && !hasTrailingSpace) {
|
||||
const perfectMatch = potentialSuggestions.find(
|
||||
(s) => s.name === partial || s.altNames?.includes(partial),
|
||||
);
|
||||
if (perfectMatch && perfectMatch.action) {
|
||||
potentialSuggestions = [];
|
||||
}
|
||||
}
|
||||
|
||||
const finalSuggestions = potentialSuggestions.map((cmd) => ({
|
||||
label: cmd.name,
|
||||
value: cmd.name,
|
||||
description: cmd.description,
|
||||
}));
|
||||
|
||||
setSuggestions(finalSuggestions);
|
||||
return;
|
||||
}
|
||||
|
||||
setSuggestions([]);
|
||||
}, [
|
||||
enabled,
|
||||
query,
|
||||
slashCommands,
|
||||
commandContext,
|
||||
setSuggestions,
|
||||
setIsLoadingSuggestions,
|
||||
setIsPerfectMatch,
|
||||
]);
|
||||
|
||||
return {
|
||||
completionStart,
|
||||
completionEnd,
|
||||
};
|
||||
}
|
||||
@@ -1203,7 +1203,9 @@ describe('useVim hook', () => {
|
||||
});
|
||||
|
||||
// Press escape to clear pending state
|
||||
exitInsertMode(result);
|
||||
act(() => {
|
||||
result.current.handleInput({ name: 'escape' });
|
||||
});
|
||||
|
||||
// Now 'w' should just move cursor, not delete
|
||||
act(() => {
|
||||
@@ -1215,6 +1217,69 @@ describe('useVim hook', () => {
|
||||
expect(testBuffer.vimMoveWordForward).toHaveBeenCalledWith(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe('NORMAL mode escape behavior', () => {
|
||||
it('should pass escape through when no pending operator is active', () => {
|
||||
mockVimContext.vimMode = 'NORMAL';
|
||||
const { result } = renderVimHook();
|
||||
|
||||
const handled = result.current.handleInput({ name: 'escape' });
|
||||
|
||||
expect(handled).toBe(false);
|
||||
});
|
||||
|
||||
it('should handle escape and clear pending operator', () => {
|
||||
mockVimContext.vimMode = 'NORMAL';
|
||||
const { result } = renderVimHook();
|
||||
|
||||
act(() => {
|
||||
result.current.handleInput({ sequence: 'd' });
|
||||
});
|
||||
|
||||
let handled: boolean | undefined;
|
||||
act(() => {
|
||||
handled = result.current.handleInput({ name: 'escape' });
|
||||
});
|
||||
|
||||
expect(handled).toBe(true);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Shell command pass-through', () => {
|
||||
it('should pass through ctrl+r in INSERT mode', () => {
|
||||
mockVimContext.vimMode = 'INSERT';
|
||||
const { result } = renderVimHook();
|
||||
|
||||
const handled = result.current.handleInput({ name: 'r', ctrl: true });
|
||||
|
||||
expect(handled).toBe(false);
|
||||
});
|
||||
|
||||
it('should pass through ! in INSERT mode when buffer is empty', () => {
|
||||
mockVimContext.vimMode = 'INSERT';
|
||||
const emptyBuffer = createMockBuffer('');
|
||||
const { result } = renderVimHook(emptyBuffer);
|
||||
|
||||
const handled = result.current.handleInput({ sequence: '!' });
|
||||
|
||||
expect(handled).toBe(false);
|
||||
});
|
||||
|
||||
it('should handle ! as input in INSERT mode when buffer is not empty', () => {
|
||||
mockVimContext.vimMode = 'INSERT';
|
||||
const nonEmptyBuffer = createMockBuffer('not empty');
|
||||
const { result } = renderVimHook(nonEmptyBuffer);
|
||||
const key = { sequence: '!', name: '!' };
|
||||
|
||||
act(() => {
|
||||
result.current.handleInput(key);
|
||||
});
|
||||
|
||||
expect(nonEmptyBuffer.handleInput).toHaveBeenCalledWith(
|
||||
expect.objectContaining(key),
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
// Line operations (dd, cc) are tested in text-buffer.test.ts
|
||||
|
||||
@@ -260,7 +260,8 @@ export function useVim(buffer: TextBuffer, onSubmit?: (value: string) => void) {
|
||||
normalizedKey.name === 'tab' ||
|
||||
(normalizedKey.name === 'return' && !normalizedKey.ctrl) ||
|
||||
normalizedKey.name === 'up' ||
|
||||
normalizedKey.name === 'down'
|
||||
normalizedKey.name === 'down' ||
|
||||
(normalizedKey.ctrl && normalizedKey.name === 'r')
|
||||
) {
|
||||
return false; // Let InputPrompt handle completion
|
||||
}
|
||||
@@ -270,6 +271,11 @@ export function useVim(buffer: TextBuffer, onSubmit?: (value: string) => void) {
|
||||
return false; // Let InputPrompt handle clipboard functionality
|
||||
}
|
||||
|
||||
// Let InputPrompt handle shell commands
|
||||
if (normalizedKey.sequence === '!' && buffer.text.length === 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Special handling for Enter key to allow command submission (lower priority than completion)
|
||||
if (
|
||||
normalizedKey.name === 'return' &&
|
||||
@@ -399,10 +405,14 @@ export function useVim(buffer: TextBuffer, onSubmit?: (value: string) => void) {
|
||||
|
||||
// Handle NORMAL mode
|
||||
if (state.mode === 'NORMAL') {
|
||||
// Handle Escape key in NORMAL mode - clear all pending states
|
||||
// If in NORMAL mode, allow escape to pass through to other handlers
|
||||
// if there's no pending operation.
|
||||
if (normalizedKey.name === 'escape') {
|
||||
dispatch({ type: 'CLEAR_PENDING_STATES' });
|
||||
return true; // Handled by vim
|
||||
if (state.pendingOperator) {
|
||||
dispatch({ type: 'CLEAR_PENDING_STATES' });
|
||||
return true; // Handled by vim
|
||||
}
|
||||
return false; // Pass through to other handlers
|
||||
}
|
||||
|
||||
// Handle count input (numbers 1-9, and 0 if count > 0)
|
||||
|
||||
@@ -8,8 +8,9 @@ import util from 'util';
|
||||
import { ConsoleMessageItem } from '../types.js';
|
||||
|
||||
interface ConsolePatcherParams {
|
||||
onNewMessage: (message: Omit<ConsoleMessageItem, 'id'>) => void;
|
||||
onNewMessage?: (message: Omit<ConsoleMessageItem, 'id'>) => void;
|
||||
debugMode: boolean;
|
||||
stderr?: boolean;
|
||||
}
|
||||
|
||||
export class ConsolePatcher {
|
||||
@@ -46,16 +47,22 @@ export class ConsolePatcher {
|
||||
originalMethod: (...args: unknown[]) => void,
|
||||
) =>
|
||||
(...args: unknown[]) => {
|
||||
if (this.params.debugMode) {
|
||||
originalMethod.apply(console, args);
|
||||
}
|
||||
if (this.params.stderr) {
|
||||
if (type !== 'debug' || this.params.debugMode) {
|
||||
this.originalConsoleError(this.formatArgs(args));
|
||||
}
|
||||
} else {
|
||||
if (this.params.debugMode) {
|
||||
originalMethod.apply(console, args);
|
||||
}
|
||||
|
||||
if (type !== 'debug' || this.params.debugMode) {
|
||||
this.params.onNewMessage({
|
||||
type,
|
||||
content: this.formatArgs(args),
|
||||
count: 1,
|
||||
});
|
||||
if (type !== 'debug' || this.params.debugMode) {
|
||||
this.params.onNewMessage?.({
|
||||
type,
|
||||
content: this.formatArgs(args),
|
||||
count: 1,
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
21
packages/cli/src/utils/resolvePath.ts
Normal file
21
packages/cli/src/utils/resolvePath.ts
Normal file
@@ -0,0 +1,21 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import * as os from 'os';
|
||||
import * as path from 'path';
|
||||
|
||||
export function resolvePath(p: string): string {
|
||||
if (!p) {
|
||||
return '';
|
||||
}
|
||||
let expandedPath = p;
|
||||
if (p.toLowerCase().startsWith('%userprofile%')) {
|
||||
expandedPath = os.homedir() + p.substring('%userprofile%'.length);
|
||||
} else if (p === '~' || p.startsWith('~/')) {
|
||||
expandedPath = os.homedir() + p.substring(1);
|
||||
}
|
||||
return path.normalize(expandedPath);
|
||||
}
|
||||
@@ -9,7 +9,7 @@
|
||||
(subpath (param "TARGET_DIR"))
|
||||
(subpath (param "TMP_DIR"))
|
||||
(subpath (param "CACHE_DIR"))
|
||||
(subpath (string-append (param "HOME_DIR") "/.gemini"))
|
||||
(subpath (string-append (param "HOME_DIR") "/.qwen"))
|
||||
(subpath (string-append (param "HOME_DIR") "/.npm"))
|
||||
(subpath (string-append (param "HOME_DIR") "/.cache"))
|
||||
(subpath (string-append (param "HOME_DIR") "/.gitconfig"))
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
(subpath (param "TARGET_DIR"))
|
||||
(subpath (param "TMP_DIR"))
|
||||
(subpath (param "CACHE_DIR"))
|
||||
(subpath (string-append (param "HOME_DIR") "/.gemini"))
|
||||
(subpath (string-append (param "HOME_DIR") "/.qwen"))
|
||||
(subpath (string-append (param "HOME_DIR") "/.npm"))
|
||||
(subpath (string-append (param "HOME_DIR") "/.cache"))
|
||||
(subpath (string-append (param "HOME_DIR") "/.gitconfig"))
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
(subpath (param "TARGET_DIR"))
|
||||
(subpath (param "TMP_DIR"))
|
||||
(subpath (param "CACHE_DIR"))
|
||||
(subpath (string-append (param "HOME_DIR") "/.gemini"))
|
||||
(subpath (string-append (param "HOME_DIR") "/.qwen"))
|
||||
(subpath (string-append (param "HOME_DIR") "/.npm"))
|
||||
(subpath (string-append (param "HOME_DIR") "/.cache"))
|
||||
(subpath (string-append (param "HOME_DIR") "/.gitconfig"))
|
||||
|
||||
@@ -67,7 +67,7 @@
|
||||
(subpath (param "TARGET_DIR"))
|
||||
(subpath (param "TMP_DIR"))
|
||||
(subpath (param "CACHE_DIR"))
|
||||
(subpath (string-append (param "HOME_DIR") "/.gemini"))
|
||||
(subpath (string-append (param "HOME_DIR") "/.qwen"))
|
||||
(subpath (string-append (param "HOME_DIR") "/.npm"))
|
||||
(subpath (string-append (param "HOME_DIR") "/.cache"))
|
||||
(subpath (string-append (param "HOME_DIR") "/.gitconfig"))
|
||||
|
||||
@@ -67,7 +67,7 @@
|
||||
(subpath (param "TARGET_DIR"))
|
||||
(subpath (param "TMP_DIR"))
|
||||
(subpath (param "CACHE_DIR"))
|
||||
(subpath (string-append (param "HOME_DIR") "/.gemini"))
|
||||
(subpath (string-append (param "HOME_DIR") "/.qwen"))
|
||||
(subpath (string-append (param "HOME_DIR") "/.npm"))
|
||||
(subpath (string-append (param "HOME_DIR") "/.cache"))
|
||||
(subpath (string-append (param "HOME_DIR") "/.gitconfig"))
|
||||
|
||||
@@ -67,7 +67,7 @@
|
||||
(subpath (param "TARGET_DIR"))
|
||||
(subpath (param "TMP_DIR"))
|
||||
(subpath (param "CACHE_DIR"))
|
||||
(subpath (string-append (param "HOME_DIR") "/.gemini"))
|
||||
(subpath (string-append (param "HOME_DIR") "/.qwen"))
|
||||
(subpath (string-append (param "HOME_DIR") "/.npm"))
|
||||
(subpath (string-append (param "HOME_DIR") "/.cache"))
|
||||
(subpath (string-append (param "HOME_DIR") "/.gitconfig"))
|
||||
|
||||
@@ -562,6 +562,10 @@ export async function start_sandbox(
|
||||
if (process.env.OPENAI_API_KEY) {
|
||||
args.push('--env', `OPENAI_API_KEY=${process.env.OPENAI_API_KEY}`);
|
||||
}
|
||||
// copy TAVILY_API_KEY for web search tool
|
||||
if (process.env.TAVILY_API_KEY) {
|
||||
args.push('--env', `TAVILY_API_KEY=${process.env.TAVILY_API_KEY}`);
|
||||
}
|
||||
if (process.env.OPENAI_BASE_URL) {
|
||||
args.push('--env', `OPENAI_BASE_URL=${process.env.OPENAI_BASE_URL}`);
|
||||
}
|
||||
|
||||
@@ -21,6 +21,9 @@ function getAuthTypeFromEnv(): AuthType | undefined {
|
||||
if (process.env.OPENAI_API_KEY) {
|
||||
return AuthType.USE_OPENAI;
|
||||
}
|
||||
if (process.env.QWEN_OAUTH_TOKEN) {
|
||||
return AuthType.QWEN_OAUTH;
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code-core",
|
||||
"version": "0.0.5-nightly.3",
|
||||
"version": "0.0.7",
|
||||
"description": "Qwen Code Core",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
@@ -33,27 +33,32 @@
|
||||
"chardet": "^2.1.0",
|
||||
"diff": "^7.0.0",
|
||||
"dotenv": "^17.1.0",
|
||||
"fdir": "^6.4.6",
|
||||
"glob": "^10.4.5",
|
||||
"google-auth-library": "^9.11.0",
|
||||
"html-to-text": "^9.0.5",
|
||||
"https-proxy-agent": "^7.0.6",
|
||||
"ignore": "^7.0.0",
|
||||
"jsonrepair": "^3.13.0",
|
||||
"marked": "^15.0.12",
|
||||
"micromatch": "^4.0.8",
|
||||
"open": "^10.1.2",
|
||||
"openai": "^5.7.0",
|
||||
"openai": "5.11.0",
|
||||
"picomatch": "^4.0.1",
|
||||
"shell-quote": "^1.8.3",
|
||||
"simple-git": "^3.28.0",
|
||||
"strip-ansi": "^7.1.0",
|
||||
"tiktoken": "^1.0.21",
|
||||
"undici": "^7.10.0",
|
||||
"ws": "^8.18.0",
|
||||
"tiktoken": "^1.0.21"
|
||||
"ws": "^8.18.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@qwen-code/qwen-code-test-utils": "file:../test-utils",
|
||||
"@types/diff": "^7.0.2",
|
||||
"@types/dotenv": "^6.1.1",
|
||||
"@types/micromatch": "^4.0.8",
|
||||
"@types/minimatch": "^5.1.2",
|
||||
"@types/picomatch": "^4.0.1",
|
||||
"@types/ws": "^8.5.10",
|
||||
"typescript": "^5.3.3",
|
||||
"vitest": "^3.1.1"
|
||||
|
||||
@@ -18,7 +18,18 @@ import {
|
||||
} from '../core/contentGenerator.js';
|
||||
import { GeminiClient } from '../core/client.js';
|
||||
import { GitService } from '../services/gitService.js';
|
||||
import { IdeClient } from '../ide/ide-client.js';
|
||||
|
||||
vi.mock('fs', async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import('fs')>();
|
||||
return {
|
||||
...actual,
|
||||
existsSync: vi.fn().mockReturnValue(true),
|
||||
statSync: vi.fn().mockReturnValue({
|
||||
isDirectory: vi.fn().mockReturnValue(true),
|
||||
}),
|
||||
realpathSync: vi.fn((path) => path),
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock('fs', async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import('fs')>();
|
||||
@@ -60,8 +71,8 @@ vi.mock('../tools/read-many-files');
|
||||
vi.mock('../tools/memoryTool', () => ({
|
||||
MemoryTool: vi.fn(),
|
||||
setGeminiMdFilename: vi.fn(),
|
||||
getCurrentGeminiMdFilename: vi.fn(() => 'GEMINI.md'), // Mock the original filename
|
||||
DEFAULT_CONTEXT_FILENAME: 'GEMINI.md',
|
||||
getCurrentGeminiMdFilename: vi.fn(() => 'QWEN.md'), // Mock the original filename
|
||||
DEFAULT_CONTEXT_FILENAME: 'QWEN.md',
|
||||
GEMINI_CONFIG_DIR: '.gemini',
|
||||
}));
|
||||
|
||||
@@ -120,7 +131,6 @@ describe('Server Config (config.ts)', () => {
|
||||
telemetry: TELEMETRY_SETTINGS,
|
||||
sessionId: SESSION_ID,
|
||||
model: MODEL,
|
||||
ideClient: IdeClient.getInstance(false),
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
|
||||
@@ -43,11 +43,13 @@ import {
|
||||
DEFAULT_GEMINI_EMBEDDING_MODEL,
|
||||
DEFAULT_GEMINI_FLASH_MODEL,
|
||||
} from './models.js';
|
||||
import { ClearcutLogger } from '../telemetry/clearcut-logger/clearcut-logger.js';
|
||||
import { QwenLogger } from '../telemetry/qwen-logger/qwen-logger.js';
|
||||
import { shouldAttemptBrowserLaunch } from '../utils/browser.js';
|
||||
import { MCPOAuthConfig } from '../mcp/oauth-provider.js';
|
||||
import { IdeClient } from '../ide/ide-client.js';
|
||||
import type { Content } from '@google/genai';
|
||||
import { logIdeConnection } from '../telemetry/loggers.js';
|
||||
import { IdeConnectionEvent, IdeConnectionType } from '../telemetry/types.js';
|
||||
|
||||
// Re-export OAuth config type
|
||||
export type { MCPOAuthConfig };
|
||||
@@ -79,6 +81,12 @@ export interface TelemetrySettings {
|
||||
outfile?: string;
|
||||
}
|
||||
|
||||
export interface GitCoAuthorSettings {
|
||||
enabled?: boolean;
|
||||
name?: string;
|
||||
email?: string;
|
||||
}
|
||||
|
||||
export interface GeminiCLIExtension {
|
||||
name: string;
|
||||
version: string;
|
||||
@@ -164,6 +172,7 @@ export interface ConfigParameters {
|
||||
contextFileName?: string | string[];
|
||||
accessibility?: AccessibilitySettings;
|
||||
telemetry?: TelemetrySettings;
|
||||
gitCoAuthor?: GitCoAuthorSettings;
|
||||
usageStatisticsEnabled?: boolean;
|
||||
fileFiltering?: {
|
||||
respectGitIgnore?: boolean;
|
||||
@@ -189,7 +198,6 @@ export interface ConfigParameters {
|
||||
summarizeToolOutput?: Record<string, SummarizeToolOutputSettings>;
|
||||
ideModeFeature?: boolean;
|
||||
ideMode?: boolean;
|
||||
ideClient?: IdeClient;
|
||||
enableOpenAILogging?: boolean;
|
||||
sampling_params?: Record<string, unknown>;
|
||||
systemPromptMappings?: Array<{
|
||||
@@ -201,6 +209,10 @@ export interface ConfigParameters {
|
||||
timeout?: number;
|
||||
maxRetries?: number;
|
||||
};
|
||||
cliVersion?: string;
|
||||
loadMemoryFromIncludeDirectories?: boolean;
|
||||
// Web search providers
|
||||
tavilyApiKey?: string;
|
||||
}
|
||||
|
||||
export class Config {
|
||||
@@ -227,6 +239,7 @@ export class Config {
|
||||
private readonly showMemoryUsage: boolean;
|
||||
private readonly accessibility: AccessibilitySettings;
|
||||
private readonly telemetrySettings: TelemetrySettings;
|
||||
private readonly gitCoAuthor: GitCoAuthorSettings;
|
||||
private readonly usageStatisticsEnabled: boolean;
|
||||
private geminiClient!: GeminiClient;
|
||||
private readonly fileFiltering: {
|
||||
@@ -273,6 +286,10 @@ export class Config {
|
||||
timeout?: number;
|
||||
maxRetries?: number;
|
||||
};
|
||||
private readonly cliVersion?: string;
|
||||
private readonly loadMemoryFromIncludeDirectories: boolean = false;
|
||||
private readonly tavilyApiKey?: string;
|
||||
|
||||
constructor(params: ConfigParameters) {
|
||||
this.sessionId = params.sessionId;
|
||||
this.embeddingModel =
|
||||
@@ -304,6 +321,11 @@ export class Config {
|
||||
logPrompts: params.telemetry?.logPrompts ?? true,
|
||||
outfile: params.telemetry?.outfile,
|
||||
};
|
||||
this.gitCoAuthor = {
|
||||
enabled: params.gitCoAuthor?.enabled ?? true,
|
||||
name: params.gitCoAuthor?.name ?? 'Qwen-Coder',
|
||||
email: params.gitCoAuthor?.email ?? 'qwen-coder@alibabacloud.com',
|
||||
};
|
||||
this.usageStatisticsEnabled = params.usageStatisticsEnabled ?? true;
|
||||
|
||||
this.fileFiltering = {
|
||||
@@ -320,7 +342,7 @@ export class Config {
|
||||
this.model = params.model;
|
||||
this.extensionContextFilePaths = params.extensionContextFilePaths ?? [];
|
||||
this.maxSessionTurns = params.maxSessionTurns ?? -1;
|
||||
this.sessionTokenLimit = params.sessionTokenLimit ?? 32000;
|
||||
this.sessionTokenLimit = params.sessionTokenLimit ?? -1;
|
||||
this.maxFolderItems = params.maxFolderItems ?? 20;
|
||||
this.experimentalAcp = params.experimentalAcp ?? false;
|
||||
this.listExtensions = params.listExtensions ?? false;
|
||||
@@ -330,13 +352,22 @@ export class Config {
|
||||
this.summarizeToolOutput = params.summarizeToolOutput;
|
||||
this.ideModeFeature = params.ideModeFeature ?? false;
|
||||
this.ideMode = params.ideMode ?? false;
|
||||
this.ideClient =
|
||||
params.ideClient ??
|
||||
IdeClient.getInstance(this.ideMode && this.ideModeFeature);
|
||||
this.ideClient = IdeClient.getInstance();
|
||||
if (this.ideMode && this.ideModeFeature) {
|
||||
this.ideClient.connect();
|
||||
logIdeConnection(this, new IdeConnectionEvent(IdeConnectionType.START));
|
||||
}
|
||||
this.systemPromptMappings = params.systemPromptMappings;
|
||||
this.enableOpenAILogging = params.enableOpenAILogging ?? false;
|
||||
this.sampling_params = params.sampling_params;
|
||||
this.contentGenerator = params.contentGenerator;
|
||||
this.cliVersion = params.cliVersion;
|
||||
|
||||
this.loadMemoryFromIncludeDirectories =
|
||||
params.loadMemoryFromIncludeDirectories ?? false;
|
||||
|
||||
// Web search
|
||||
this.tavilyApiKey = params.tavilyApiKey;
|
||||
|
||||
if (params.contextFileName) {
|
||||
setGeminiMdFilename(params.contextFileName);
|
||||
@@ -347,7 +378,7 @@ export class Config {
|
||||
}
|
||||
|
||||
if (this.getUsageStatisticsEnabled()) {
|
||||
ClearcutLogger.getInstance(this)?.logStartSessionEvent(
|
||||
QwenLogger.getInstance(this)?.logStartSessionEvent(
|
||||
new StartSessionEvent(this),
|
||||
);
|
||||
} else {
|
||||
@@ -399,6 +430,10 @@ export class Config {
|
||||
return this.sessionId;
|
||||
}
|
||||
|
||||
shouldLoadMemoryFromIncludeDirectories(): boolean {
|
||||
return this.loadMemoryFromIncludeDirectories;
|
||||
}
|
||||
|
||||
getContentGeneratorConfig(): ContentGeneratorConfig {
|
||||
return this.contentGeneratorConfig;
|
||||
}
|
||||
@@ -571,6 +606,10 @@ export class Config {
|
||||
return this.telemetrySettings.outfile;
|
||||
}
|
||||
|
||||
getGitCoAuthor(): GitCoAuthorSettings {
|
||||
return this.gitCoAuthor;
|
||||
}
|
||||
|
||||
getGeminiClient(): GeminiClient {
|
||||
return this.geminiClient;
|
||||
}
|
||||
@@ -662,6 +701,11 @@ export class Config {
|
||||
return this.summarizeToolOutput;
|
||||
}
|
||||
|
||||
// Web search provider configuration
|
||||
getTavilyApiKey(): string | undefined {
|
||||
return this.tavilyApiKey;
|
||||
}
|
||||
|
||||
getIdeModeFeature(): boolean {
|
||||
return this.ideModeFeature;
|
||||
}
|
||||
@@ -678,12 +722,14 @@ export class Config {
|
||||
this.ideMode = value;
|
||||
}
|
||||
|
||||
setIdeClientDisconnected(): void {
|
||||
this.ideClient.setDisconnected();
|
||||
}
|
||||
|
||||
setIdeClientConnected(): void {
|
||||
this.ideClient.reconnect(this.ideMode && this.ideModeFeature);
|
||||
async setIdeModeAndSyncConnection(value: boolean): Promise<void> {
|
||||
this.ideMode = value;
|
||||
if (value) {
|
||||
await this.ideClient.connect();
|
||||
logIdeConnection(this, new IdeConnectionEvent(IdeConnectionType.SESSION));
|
||||
} else {
|
||||
this.ideClient.disconnect();
|
||||
}
|
||||
}
|
||||
|
||||
getEnableOpenAILogging(): boolean {
|
||||
@@ -702,6 +748,10 @@ export class Config {
|
||||
return this.contentGenerator?.maxRetries;
|
||||
}
|
||||
|
||||
getCliVersion(): string | undefined {
|
||||
return this.cliVersion;
|
||||
}
|
||||
|
||||
getSystemPromptMappings():
|
||||
| Array<{
|
||||
baseUrls?: string[];
|
||||
@@ -766,7 +816,10 @@ export class Config {
|
||||
registerCoreTool(ReadManyFilesTool, this);
|
||||
registerCoreTool(ShellTool, this);
|
||||
registerCoreTool(MemoryTool);
|
||||
registerCoreTool(WebSearchTool, this);
|
||||
// Conditionally register web search tool only if Tavily API key is set
|
||||
if (this.getTavilyApiKey()) {
|
||||
registerCoreTool(WebSearchTool, this);
|
||||
}
|
||||
|
||||
await registry.discoverAllTools();
|
||||
return registry;
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
import { describe, it, expect, beforeEach, vi } from 'vitest';
|
||||
import { Config } from './config.js';
|
||||
import { DEFAULT_GEMINI_MODEL, DEFAULT_GEMINI_FLASH_MODEL } from './models.js';
|
||||
import { IdeClient } from '../ide/ide-client.js';
|
||||
import fs from 'node:fs';
|
||||
|
||||
vi.mock('node:fs');
|
||||
@@ -26,7 +25,6 @@ describe('Flash Model Fallback Configuration', () => {
|
||||
debugMode: false,
|
||||
cwd: '/test',
|
||||
model: DEFAULT_GEMINI_MODEL,
|
||||
ideClient: IdeClient.getInstance(false),
|
||||
});
|
||||
|
||||
// Initialize contentGeneratorConfig for testing
|
||||
@@ -51,7 +49,6 @@ describe('Flash Model Fallback Configuration', () => {
|
||||
debugMode: false,
|
||||
cwd: '/test',
|
||||
model: DEFAULT_GEMINI_MODEL,
|
||||
ideClient: IdeClient.getInstance(false),
|
||||
});
|
||||
|
||||
// Should not crash when contentGeneratorConfig is undefined
|
||||
@@ -75,7 +72,6 @@ describe('Flash Model Fallback Configuration', () => {
|
||||
debugMode: false,
|
||||
cwd: '/test',
|
||||
model: 'custom-model',
|
||||
ideClient: IdeClient.getInstance(false),
|
||||
});
|
||||
|
||||
expect(newConfig.getModel()).toBe('custom-model');
|
||||
|
||||
@@ -4,6 +4,10 @@
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
export const DEFAULT_QWEN_MODEL = 'qwen3-coder-plus';
|
||||
// We do not have a fallback model for now, but note it here anyway.
|
||||
export const DEFAULT_QWEN_FLASH_MODEL = 'qwen3-coder-flash';
|
||||
|
||||
export const DEFAULT_GEMINI_MODEL = 'qwen3-coder-plus';
|
||||
export const DEFAULT_GEMINI_FLASH_MODEL = 'gemini-2.5-flash';
|
||||
export const DEFAULT_GEMINI_FLASH_LITE_MODEL = 'gemini-2.5-flash-lite';
|
||||
|
||||
@@ -15,6 +15,7 @@ vi.mock('openai');
|
||||
// Mock logger modules
|
||||
vi.mock('../../telemetry/loggers.js', () => ({
|
||||
logApiResponse: vi.fn(),
|
||||
logApiError: vi.fn(),
|
||||
}));
|
||||
|
||||
vi.mock('../../utils/openaiLogger.js', () => ({
|
||||
@@ -44,6 +45,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
|
||||
timeout: 120000,
|
||||
maxRetries: 3,
|
||||
}),
|
||||
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
|
||||
} as unknown as Config;
|
||||
|
||||
// Mock OpenAI client
|
||||
@@ -87,7 +89,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
|
||||
mockOpenAIClient.chat.completions.create.mockRejectedValueOnce(error);
|
||||
|
||||
try {
|
||||
await generator.generateContent(request);
|
||||
await generator.generateContent(request, 'test-prompt-id');
|
||||
} catch (thrownError: unknown) {
|
||||
// Should contain timeout-specific messaging and troubleshooting tips
|
||||
const errorMessage =
|
||||
@@ -119,7 +121,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
|
||||
mockOpenAIClient.chat.completions.create.mockRejectedValueOnce(error);
|
||||
|
||||
try {
|
||||
await generator.generateContent(request);
|
||||
await generator.generateContent(request, 'test-prompt-id');
|
||||
} catch (thrownError: unknown) {
|
||||
// Should NOT contain timeout-specific messaging
|
||||
const errorMessage =
|
||||
@@ -128,7 +130,8 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
|
||||
: String(thrownError);
|
||||
expect(errorMessage).not.toMatch(/timeout after \d+s/);
|
||||
expect(errorMessage).not.toMatch(/Troubleshooting tips:/);
|
||||
expect(errorMessage).toMatch(/OpenAI API error:/);
|
||||
// Should preserve the original error message
|
||||
expect(errorMessage).toMatch(new RegExp(error.message));
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -145,7 +148,9 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
|
||||
model: 'gpt-4',
|
||||
};
|
||||
|
||||
await expect(generator.generateContent(request)).rejects.toThrow(
|
||||
await expect(
|
||||
generator.generateContent(request, 'test-prompt-id'),
|
||||
).rejects.toThrow(
|
||||
/Request timeout after \d+s\. Try reducing input length or increasing timeout in config\./,
|
||||
);
|
||||
});
|
||||
@@ -160,9 +165,9 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
|
||||
model: 'gpt-4',
|
||||
};
|
||||
|
||||
await expect(generator.generateContent(request)).rejects.toThrow(
|
||||
'OpenAI API error: Invalid API key',
|
||||
);
|
||||
await expect(
|
||||
generator.generateContent(request, 'test-prompt-id'),
|
||||
).rejects.toThrow('Invalid API key');
|
||||
});
|
||||
|
||||
it('should include troubleshooting tips for timeout errors', async () => {
|
||||
@@ -175,7 +180,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
|
||||
};
|
||||
|
||||
try {
|
||||
await generator.generateContent(request);
|
||||
await generator.generateContent(request, 'test-prompt-id');
|
||||
} catch (error: unknown) {
|
||||
const errorMessage =
|
||||
error instanceof Error ? error.message : String(error);
|
||||
@@ -198,7 +203,9 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
|
||||
model: 'gpt-4',
|
||||
};
|
||||
|
||||
await expect(generator.generateContentStream(request)).rejects.toThrow(
|
||||
await expect(
|
||||
generator.generateContentStream(request, 'test-prompt-id'),
|
||||
).rejects.toThrow(
|
||||
/Streaming setup timeout after \d+s\. Try reducing input length or increasing timeout in config\./,
|
||||
);
|
||||
});
|
||||
@@ -213,7 +220,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
|
||||
};
|
||||
|
||||
try {
|
||||
await generator.generateContentStream(request);
|
||||
await generator.generateContentStream(request, 'test-prompt-id');
|
||||
} catch (error: unknown) {
|
||||
const errorMessage =
|
||||
error instanceof Error ? error.message : String(error);
|
||||
@@ -238,6 +245,9 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
|
||||
baseURL: '',
|
||||
timeout: 120000,
|
||||
maxRetries: 3,
|
||||
defaultHeaders: {
|
||||
'User-Agent': expect.stringMatching(/^QwenCode/),
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
@@ -247,6 +257,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
|
||||
timeout: 300000, // 5 minutes
|
||||
maxRetries: 5,
|
||||
}),
|
||||
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
|
||||
} as unknown as Config;
|
||||
|
||||
new OpenAIContentGenerator('test-key', 'gpt-4', customConfig);
|
||||
@@ -256,12 +267,16 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
|
||||
baseURL: '',
|
||||
timeout: 300000,
|
||||
maxRetries: 5,
|
||||
defaultHeaders: {
|
||||
'User-Agent': expect.stringMatching(/^QwenCode/),
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle missing timeout config gracefully', () => {
|
||||
const noTimeoutConfig = {
|
||||
getContentGeneratorConfig: vi.fn().mockReturnValue({}),
|
||||
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
|
||||
} as unknown as Config;
|
||||
|
||||
new OpenAIContentGenerator('test-key', 'gpt-4', noTimeoutConfig);
|
||||
@@ -271,33 +286,26 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
|
||||
baseURL: '',
|
||||
timeout: 120000, // default
|
||||
maxRetries: 3, // default
|
||||
defaultHeaders: {
|
||||
'User-Agent': expect.stringMatching(/^QwenCode/),
|
||||
},
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('token estimation on timeout', () => {
|
||||
it('should estimate tokens even when request times out', async () => {
|
||||
it('should surface a clear timeout error when request times out', async () => {
|
||||
const timeoutError = new Error('Request timeout');
|
||||
mockOpenAIClient.chat.completions.create.mockRejectedValue(timeoutError);
|
||||
|
||||
// Mock countTokens to return a value
|
||||
const mockCountTokens = vi.spyOn(generator, 'countTokens');
|
||||
mockCountTokens.mockResolvedValue({ totalTokens: 100 });
|
||||
|
||||
const request = {
|
||||
contents: [{ role: 'user' as const, parts: [{ text: 'Hello world' }] }],
|
||||
model: 'gpt-4',
|
||||
};
|
||||
|
||||
try {
|
||||
await generator.generateContent(request);
|
||||
} catch (_error) {
|
||||
// Verify that countTokens was called for estimation
|
||||
expect(mockCountTokens).toHaveBeenCalledWith({
|
||||
contents: request.contents,
|
||||
model: 'gpt-4',
|
||||
});
|
||||
}
|
||||
await expect(
|
||||
generator.generateContent(request, 'test-prompt-id'),
|
||||
).rejects.toThrow(/Request timeout after \d+s/);
|
||||
});
|
||||
|
||||
it('should fall back to character-based estimation if countTokens fails', async () => {
|
||||
@@ -314,9 +322,9 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
|
||||
};
|
||||
|
||||
// Should not throw due to token counting failure
|
||||
await expect(generator.generateContent(request)).rejects.toThrow(
|
||||
/Request timeout after \d+s/,
|
||||
);
|
||||
await expect(
|
||||
generator.generateContent(request, 'test-prompt-id'),
|
||||
).rejects.toThrow(/Request timeout after \d+s/);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -228,6 +228,7 @@ describe('Gemini Client (client.ts)', () => {
|
||||
getGeminiClient: vi.fn(),
|
||||
setFallbackMode: vi.fn(),
|
||||
getDebugMode: vi.fn().mockReturnValue(false),
|
||||
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
|
||||
};
|
||||
const MockedConfig = vi.mocked(Config, true);
|
||||
MockedConfig.mockImplementation(
|
||||
|
||||
@@ -797,6 +797,11 @@ export class GeminiClient {
|
||||
authType?: string,
|
||||
error?: unknown,
|
||||
): Promise<string | null> {
|
||||
// Handle different auth types
|
||||
if (authType === AuthType.QWEN_OAUTH) {
|
||||
return this.handleQwenOAuthError(error);
|
||||
}
|
||||
|
||||
// Only handle fallback for OAuth users
|
||||
if (authType !== AuthType.LOGIN_WITH_GOOGLE) {
|
||||
return null;
|
||||
@@ -835,4 +840,59 @@ export class GeminiClient {
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Handles Qwen OAuth authentication errors and rate limiting
|
||||
*/
|
||||
private async handleQwenOAuthError(error?: unknown): Promise<string | null> {
|
||||
if (!error) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const errorMessage =
|
||||
error instanceof Error
|
||||
? error.message.toLowerCase()
|
||||
: String(error).toLowerCase();
|
||||
const errorCode =
|
||||
(error as { status?: number; code?: number })?.status ||
|
||||
(error as { status?: number; code?: number })?.code;
|
||||
|
||||
// Check if this is an authentication/authorization error
|
||||
const isAuthError =
|
||||
errorCode === 401 ||
|
||||
errorCode === 403 ||
|
||||
errorMessage.includes('unauthorized') ||
|
||||
errorMessage.includes('forbidden') ||
|
||||
errorMessage.includes('invalid api key') ||
|
||||
errorMessage.includes('authentication') ||
|
||||
errorMessage.includes('access denied') ||
|
||||
(errorMessage.includes('token') && errorMessage.includes('expired'));
|
||||
|
||||
// Check if this is a rate limiting error
|
||||
const isRateLimitError =
|
||||
errorCode === 429 ||
|
||||
errorMessage.includes('429') ||
|
||||
errorMessage.includes('rate limit') ||
|
||||
errorMessage.includes('too many requests');
|
||||
|
||||
if (isAuthError) {
|
||||
console.warn('Qwen OAuth authentication error detected:', errorMessage);
|
||||
// The QwenContentGenerator should automatically handle token refresh
|
||||
// If it still fails, it likely means the refresh token is also expired
|
||||
console.log(
|
||||
'Note: If this persists, you may need to re-authenticate with Qwen OAuth',
|
||||
);
|
||||
return null;
|
||||
}
|
||||
|
||||
if (isRateLimitError) {
|
||||
console.warn('Qwen API rate limit encountered:', errorMessage);
|
||||
// For rate limiting, we don't need to do anything special
|
||||
// The retry mechanism will handle the backoff
|
||||
return null;
|
||||
}
|
||||
|
||||
// For other errors, don't handle them specially
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,7 +17,9 @@ import { Config } from '../config/config.js';
|
||||
vi.mock('../code_assist/codeAssist.js');
|
||||
vi.mock('@google/genai');
|
||||
|
||||
const mockConfig = {} as unknown as Config;
|
||||
const mockConfig = {
|
||||
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
|
||||
} as unknown as Config;
|
||||
|
||||
describe('createContentGenerator', () => {
|
||||
it('should create a CodeAssistContentGenerator', async () => {
|
||||
@@ -73,6 +75,7 @@ describe('createContentGeneratorConfig', () => {
|
||||
getSamplingParams: vi.fn().mockReturnValue(undefined),
|
||||
getContentGeneratorTimeout: vi.fn().mockReturnValue(undefined),
|
||||
getContentGeneratorMaxRetries: vi.fn().mockReturnValue(undefined),
|
||||
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
|
||||
} as unknown as Config;
|
||||
|
||||
beforeEach(() => {
|
||||
|
||||
@@ -14,7 +14,7 @@ import {
|
||||
GoogleGenAI,
|
||||
} from '@google/genai';
|
||||
import { createCodeAssistContentGenerator } from '../code_assist/codeAssist.js';
|
||||
import { DEFAULT_GEMINI_MODEL } from '../config/models.js';
|
||||
import { DEFAULT_GEMINI_MODEL, DEFAULT_QWEN_MODEL } from '../config/models.js';
|
||||
import { Config } from '../config/config.js';
|
||||
import { getEffectiveModel } from './modelCheck.js';
|
||||
import { UserTierId } from '../code_assist/types.js';
|
||||
@@ -46,6 +46,7 @@ export enum AuthType {
|
||||
USE_VERTEX_AI = 'vertex-ai',
|
||||
CLOUD_SHELL = 'cloud-shell',
|
||||
USE_OPENAI = 'openai',
|
||||
QWEN_OAUTH = 'qwen-oauth',
|
||||
}
|
||||
|
||||
export type ContentGeneratorConfig = {
|
||||
@@ -131,6 +132,17 @@ export function createContentGeneratorConfig(
|
||||
return contentGeneratorConfig;
|
||||
}
|
||||
|
||||
if (authType === AuthType.QWEN_OAUTH) {
|
||||
// For Qwen OAuth, we'll handle the API key dynamically in createContentGenerator
|
||||
// Set a special marker to indicate this is Qwen OAuth
|
||||
contentGeneratorConfig.apiKey = 'QWEN_OAUTH_DYNAMIC_TOKEN';
|
||||
|
||||
// Prefer to use qwen3-coder-plus as the default Qwen model if QWEN_MODEL is not set.
|
||||
contentGeneratorConfig.model = process.env.QWEN_MODEL || DEFAULT_QWEN_MODEL;
|
||||
|
||||
return contentGeneratorConfig;
|
||||
}
|
||||
|
||||
return contentGeneratorConfig;
|
||||
}
|
||||
|
||||
@@ -139,7 +151,7 @@ export async function createContentGenerator(
|
||||
gcConfig: Config,
|
||||
sessionId?: string,
|
||||
): Promise<ContentGenerator> {
|
||||
const version = process.env.CLI_VERSION || process.version;
|
||||
const version = gcConfig.getCliVersion() || 'unknown';
|
||||
const httpOptions = {
|
||||
headers: {
|
||||
'User-Agent': `GeminiCLI/${version} (${process.platform}; ${process.arch})`,
|
||||
@@ -184,6 +196,32 @@ export async function createContentGenerator(
|
||||
return new OpenAIContentGenerator(config.apiKey, config.model, gcConfig);
|
||||
}
|
||||
|
||||
if (config.authType === AuthType.QWEN_OAUTH) {
|
||||
if (config.apiKey !== 'QWEN_OAUTH_DYNAMIC_TOKEN') {
|
||||
throw new Error('Invalid Qwen OAuth configuration');
|
||||
}
|
||||
|
||||
// Import required classes dynamically
|
||||
const { getQwenOAuthClient: getQwenOauthClient } = await import(
|
||||
'../qwen/qwenOAuth2.js'
|
||||
);
|
||||
const { QwenContentGenerator } = await import(
|
||||
'../qwen/qwenContentGenerator.js'
|
||||
);
|
||||
|
||||
try {
|
||||
// Get the Qwen OAuth client (now includes integrated token management)
|
||||
const qwenClient = await getQwenOauthClient(gcConfig);
|
||||
|
||||
// Create the content generator with dynamic token management
|
||||
return new QwenContentGenerator(qwenClient, config.model, gcConfig);
|
||||
} catch (error) {
|
||||
throw new Error(
|
||||
`Failed to initialize Qwen: ${error instanceof Error ? error.message : String(error)}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error(
|
||||
`Error creating contentGenerator: Unsupported authType: ${config.authType}`,
|
||||
);
|
||||
|
||||
@@ -136,6 +136,7 @@ describe('CoreToolScheduler', () => {
|
||||
onAllToolCallsComplete,
|
||||
onToolCallsUpdate,
|
||||
getPreferredEditor: () => 'vscode',
|
||||
onEditorClose: vi.fn(),
|
||||
});
|
||||
|
||||
const abortController = new AbortController();
|
||||
@@ -205,6 +206,7 @@ describe('CoreToolScheduler with payload', () => {
|
||||
onAllToolCallsComplete,
|
||||
onToolCallsUpdate,
|
||||
getPreferredEditor: () => 'vscode',
|
||||
onEditorClose: vi.fn(),
|
||||
});
|
||||
|
||||
const abortController = new AbortController();
|
||||
@@ -482,6 +484,7 @@ describe('CoreToolScheduler edit cancellation', () => {
|
||||
onAllToolCallsComplete,
|
||||
onToolCallsUpdate,
|
||||
getPreferredEditor: () => 'vscode',
|
||||
onEditorClose: vi.fn(),
|
||||
});
|
||||
|
||||
const abortController = new AbortController();
|
||||
@@ -571,6 +574,7 @@ describe('CoreToolScheduler YOLO mode', () => {
|
||||
onAllToolCallsComplete,
|
||||
onToolCallsUpdate,
|
||||
getPreferredEditor: () => 'vscode',
|
||||
onEditorClose: vi.fn(),
|
||||
});
|
||||
|
||||
const abortController = new AbortController();
|
||||
|
||||
@@ -224,6 +224,7 @@ interface CoreToolSchedulerOptions {
|
||||
onToolCallsUpdate?: ToolCallsUpdateHandler;
|
||||
getPreferredEditor: () => EditorType | undefined;
|
||||
config: Config;
|
||||
onEditorClose: () => void;
|
||||
}
|
||||
|
||||
export class CoreToolScheduler {
|
||||
@@ -234,6 +235,7 @@ export class CoreToolScheduler {
|
||||
private onToolCallsUpdate?: ToolCallsUpdateHandler;
|
||||
private getPreferredEditor: () => EditorType | undefined;
|
||||
private config: Config;
|
||||
private onEditorClose: () => void;
|
||||
|
||||
constructor(options: CoreToolSchedulerOptions) {
|
||||
this.config = options.config;
|
||||
@@ -242,6 +244,7 @@ export class CoreToolScheduler {
|
||||
this.onAllToolCallsComplete = options.onAllToolCallsComplete;
|
||||
this.onToolCallsUpdate = options.onToolCallsUpdate;
|
||||
this.getPreferredEditor = options.getPreferredEditor;
|
||||
this.onEditorClose = options.onEditorClose;
|
||||
}
|
||||
|
||||
private setStatusInternal(
|
||||
@@ -563,6 +566,7 @@ export class CoreToolScheduler {
|
||||
modifyContext as ModifyContext<typeof waitingToolCall.request.args>,
|
||||
editorType,
|
||||
signal,
|
||||
this.onEditorClose,
|
||||
);
|
||||
this.setArgsInternal(callId, updatedParams);
|
||||
this.setStatusInternal(callId, 'awaiting_approval', {
|
||||
|
||||
@@ -158,14 +158,23 @@ export class GeminiChat {
|
||||
prompt_id: string,
|
||||
usageMetadata?: GenerateContentResponseUsageMetadata,
|
||||
responseText?: string,
|
||||
responseId?: string,
|
||||
): Promise<void> {
|
||||
const authType = this.config.getContentGeneratorConfig()?.authType;
|
||||
|
||||
// Don't log API responses for openaiContentGenerator
|
||||
if (authType === AuthType.QWEN_OAUTH || authType === AuthType.USE_OPENAI) {
|
||||
return;
|
||||
}
|
||||
|
||||
logApiResponse(
|
||||
this.config,
|
||||
new ApiResponseEvent(
|
||||
responseId || `gemini-${Date.now()}`,
|
||||
this.config.getModel(),
|
||||
durationMs,
|
||||
prompt_id,
|
||||
this.config.getContentGeneratorConfig()?.authType,
|
||||
authType,
|
||||
usageMetadata,
|
||||
responseText,
|
||||
),
|
||||
@@ -176,18 +185,27 @@ export class GeminiChat {
|
||||
durationMs: number,
|
||||
error: unknown,
|
||||
prompt_id: string,
|
||||
responseId?: string,
|
||||
): void {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
const errorType = error instanceof Error ? error.name : 'unknown';
|
||||
|
||||
const authType = this.config.getContentGeneratorConfig()?.authType;
|
||||
|
||||
// Don't log API errors for openaiContentGenerator
|
||||
if (authType === AuthType.QWEN_OAUTH || authType === AuthType.USE_OPENAI) {
|
||||
return;
|
||||
}
|
||||
|
||||
logApiError(
|
||||
this.config,
|
||||
new ApiErrorEvent(
|
||||
responseId,
|
||||
this.config.getModel(),
|
||||
errorMessage,
|
||||
durationMs,
|
||||
prompt_id,
|
||||
this.config.getContentGeneratorConfig()?.authType,
|
||||
authType,
|
||||
errorType,
|
||||
),
|
||||
);
|
||||
@@ -201,6 +219,11 @@ export class GeminiChat {
|
||||
authType?: string,
|
||||
error?: unknown,
|
||||
): Promise<string | null> {
|
||||
// Handle different auth types
|
||||
if (authType === AuthType.QWEN_OAUTH) {
|
||||
return this.handleQwenOAuthError(error);
|
||||
}
|
||||
|
||||
// Only handle fallback for OAuth users
|
||||
if (authType !== AuthType.LOGIN_WITH_GOOGLE) {
|
||||
return null;
|
||||
@@ -315,6 +338,7 @@ export class GeminiChat {
|
||||
prompt_id,
|
||||
response.usageMetadata,
|
||||
JSON.stringify(response),
|
||||
response.responseId,
|
||||
);
|
||||
|
||||
this.sendPromise = (async () => {
|
||||
@@ -558,6 +582,7 @@ export class GeminiChat {
|
||||
prompt_id,
|
||||
this.getFinalUsageMetadata(chunks),
|
||||
JSON.stringify(chunks),
|
||||
chunks[chunks.length - 1]?.responseId,
|
||||
);
|
||||
}
|
||||
this.recordHistory(inputContent, outputContent);
|
||||
@@ -674,4 +699,59 @@ export class GeminiChat {
|
||||
content.parts[0].thought === true
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Handles Qwen OAuth authentication errors and rate limiting
|
||||
*/
|
||||
private async handleQwenOAuthError(error?: unknown): Promise<string | null> {
|
||||
if (!error) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const errorMessage =
|
||||
error instanceof Error
|
||||
? error.message.toLowerCase()
|
||||
: String(error).toLowerCase();
|
||||
const errorCode =
|
||||
(error as { status?: number; code?: number })?.status ||
|
||||
(error as { status?: number; code?: number })?.code;
|
||||
|
||||
// Check if this is an authentication/authorization error
|
||||
const isAuthError =
|
||||
errorCode === 401 ||
|
||||
errorCode === 403 ||
|
||||
errorMessage.includes('unauthorized') ||
|
||||
errorMessage.includes('forbidden') ||
|
||||
errorMessage.includes('invalid api key') ||
|
||||
errorMessage.includes('authentication') ||
|
||||
errorMessage.includes('access denied') ||
|
||||
(errorMessage.includes('token') && errorMessage.includes('expired'));
|
||||
|
||||
// Check if this is a rate limiting error
|
||||
const isRateLimitError =
|
||||
errorCode === 429 ||
|
||||
errorMessage.includes('429') ||
|
||||
errorMessage.includes('rate limit') ||
|
||||
errorMessage.includes('too many requests');
|
||||
|
||||
if (isAuthError) {
|
||||
console.warn('Qwen OAuth authentication error detected:', errorMessage);
|
||||
// The QwenContentGenerator should automatically handle token refresh
|
||||
// If it still fails, it likely means the refresh token is also expired
|
||||
console.log(
|
||||
'Note: If this persists, you may need to re-authenticate with Qwen OAuth',
|
||||
);
|
||||
return null;
|
||||
}
|
||||
|
||||
if (isRateLimitError) {
|
||||
console.warn('Qwen API rate limit encountered:', errorMessage);
|
||||
// For rate limiting, we don't need to do anything special
|
||||
// The retry mechanism will handle the backoff
|
||||
return null;
|
||||
}
|
||||
|
||||
// For other errors, don't handle them specially
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
3092
packages/core/src/core/openaiContentGenerator.test.ts
Normal file
3092
packages/core/src/core/openaiContentGenerator.test.ts
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
@@ -20,12 +20,13 @@ import {
|
||||
FunctionCall,
|
||||
FunctionResponse,
|
||||
} from '@google/genai';
|
||||
import { ContentGenerator } from './contentGenerator.js';
|
||||
import { AuthType, ContentGenerator } from './contentGenerator.js';
|
||||
import OpenAI from 'openai';
|
||||
import { logApiResponse } from '../telemetry/loggers.js';
|
||||
import { ApiResponseEvent } from '../telemetry/types.js';
|
||||
import { logApiError, logApiResponse } from '../telemetry/loggers.js';
|
||||
import { ApiErrorEvent, ApiResponseEvent } from '../telemetry/types.js';
|
||||
import { Config } from '../config/config.js';
|
||||
import { openaiLogger } from '../utils/openaiLogger.js';
|
||||
import { safeJsonParse } from '../utils/safeJsonParse.js';
|
||||
|
||||
// OpenAI API type definitions for logging
|
||||
interface OpenAIToolCall {
|
||||
@@ -78,7 +79,7 @@ interface OpenAIResponseFormat {
|
||||
}
|
||||
|
||||
export class OpenAIContentGenerator implements ContentGenerator {
|
||||
private client: OpenAI;
|
||||
protected client: OpenAI;
|
||||
private model: string;
|
||||
private config: Config;
|
||||
private streamingToolCalls: Map<
|
||||
@@ -114,14 +115,20 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
timeoutConfig.maxRetries = contentGeneratorConfig.maxRetries;
|
||||
}
|
||||
|
||||
const version = config.getCliVersion() || 'unknown';
|
||||
const userAgent = `QwenCode/${version} (${process.platform}; ${process.arch})`;
|
||||
|
||||
// Check if using OpenRouter and add required headers
|
||||
const isOpenRouter = baseURL.includes('openrouter.ai');
|
||||
const defaultHeaders = isOpenRouter
|
||||
? {
|
||||
'HTTP-Referer': 'https://github.com/QwenLM/qwen-code.git',
|
||||
'X-Title': 'Qwen Code',
|
||||
}
|
||||
: undefined;
|
||||
const defaultHeaders = {
|
||||
'User-Agent': userAgent,
|
||||
...(isOpenRouter
|
||||
? {
|
||||
'HTTP-Referer': 'https://github.com/QwenLM/qwen-code.git',
|
||||
'X-Title': 'Qwen Code',
|
||||
}
|
||||
: {}),
|
||||
};
|
||||
|
||||
this.client = new OpenAI({
|
||||
apiKey,
|
||||
@@ -132,6 +139,19 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Hook for subclasses to customize error handling behavior
|
||||
* @param error The error that occurred
|
||||
* @param request The original request
|
||||
* @returns true if error logging should be suppressed, false otherwise
|
||||
*/
|
||||
protected shouldSuppressErrorLogging(
|
||||
_error: unknown,
|
||||
_request: GenerateContentParameters,
|
||||
): boolean {
|
||||
return false; // Default behavior: never suppress error logging
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if an error is a timeout error
|
||||
*/
|
||||
@@ -165,8 +185,49 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine if metadata should be included in the request.
|
||||
* Only include the `metadata` field if the provider is QWEN_OAUTH
|
||||
* or the baseUrl is 'https://dashscope.aliyuncs.com/compatible-mode/v1'.
|
||||
* This is because some models/providers do not support metadata or need extra configuration.
|
||||
*
|
||||
* @returns true if metadata should be included, false otherwise
|
||||
*/
|
||||
private shouldIncludeMetadata(): boolean {
|
||||
const authType = this.config.getContentGeneratorConfig?.()?.authType;
|
||||
// baseUrl may be undefined; default to empty string if so
|
||||
const baseUrl = this.client?.baseURL || '';
|
||||
|
||||
return (
|
||||
authType === AuthType.QWEN_OAUTH ||
|
||||
baseUrl === 'https://dashscope.aliyuncs.com/compatible-mode/v1'
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Build metadata object for OpenAI API requests.
|
||||
*
|
||||
* @param userPromptId The user prompt ID to include in metadata
|
||||
* @returns metadata object if shouldIncludeMetadata() returns true, undefined otherwise
|
||||
*/
|
||||
private buildMetadata(
|
||||
userPromptId: string,
|
||||
): { metadata: { sessionId?: string; promptId: string } } | undefined {
|
||||
if (!this.shouldIncludeMetadata()) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
return {
|
||||
metadata: {
|
||||
sessionId: this.config.getSessionId?.(),
|
||||
promptId: userPromptId,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
async generateContent(
|
||||
request: GenerateContentParameters,
|
||||
userPromptId: string,
|
||||
): Promise<GenerateContentResponse> {
|
||||
const startTime = Date.now();
|
||||
const messages = this.convertToOpenAIFormat(request);
|
||||
@@ -184,6 +245,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
model: this.model,
|
||||
messages,
|
||||
...samplingParams,
|
||||
...(this.buildMetadata(userPromptId) || {}),
|
||||
};
|
||||
|
||||
if (request.config?.tools) {
|
||||
@@ -201,9 +263,10 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
|
||||
// Log API response event for UI telemetry
|
||||
const responseEvent = new ApiResponseEvent(
|
||||
response.responseId || 'unknown',
|
||||
this.model,
|
||||
durationMs,
|
||||
`openai-${Date.now()}`, // Generate a prompt ID
|
||||
userPromptId,
|
||||
this.config.getContentGeneratorConfig()?.authType,
|
||||
response.usageMetadata,
|
||||
);
|
||||
@@ -229,41 +292,21 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
? error.message
|
||||
: String(error);
|
||||
|
||||
// Estimate token usage even when there's an error
|
||||
// This helps track costs and usage even for failed requests
|
||||
let estimatedUsage;
|
||||
try {
|
||||
const tokenCountResult = await this.countTokens({
|
||||
contents: request.contents,
|
||||
model: this.model,
|
||||
});
|
||||
estimatedUsage = {
|
||||
promptTokenCount: tokenCountResult.totalTokens,
|
||||
candidatesTokenCount: 0, // No completion tokens since request failed
|
||||
totalTokenCount: tokenCountResult.totalTokens,
|
||||
};
|
||||
} catch {
|
||||
// If token counting also fails, provide a minimal estimate
|
||||
const contentStr = JSON.stringify(request.contents);
|
||||
const estimatedTokens = Math.ceil(contentStr.length / 4);
|
||||
estimatedUsage = {
|
||||
promptTokenCount: estimatedTokens,
|
||||
candidatesTokenCount: 0,
|
||||
totalTokenCount: estimatedTokens,
|
||||
};
|
||||
}
|
||||
|
||||
// Log API error event for UI telemetry with estimated usage
|
||||
const errorEvent = new ApiResponseEvent(
|
||||
// Log API error event for UI telemetry
|
||||
const errorEvent = new ApiErrorEvent(
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
(error as any).requestID || 'unknown',
|
||||
this.model,
|
||||
durationMs,
|
||||
`openai-${Date.now()}`, // Generate a prompt ID
|
||||
this.config.getContentGeneratorConfig()?.authType,
|
||||
estimatedUsage,
|
||||
undefined,
|
||||
errorMessage,
|
||||
durationMs,
|
||||
userPromptId,
|
||||
this.config.getContentGeneratorConfig()?.authType,
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
(error as any).type,
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
(error as any).code,
|
||||
);
|
||||
logApiResponse(this.config, errorEvent);
|
||||
logApiError(this.config, errorEvent);
|
||||
|
||||
// Log error interaction if enabled
|
||||
if (this.config.getContentGeneratorConfig()?.enableOpenAILogging) {
|
||||
@@ -275,7 +318,10 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
);
|
||||
}
|
||||
|
||||
console.error('OpenAI API Error:', errorMessage);
|
||||
// Allow subclasses to suppress error logging for specific scenarios
|
||||
if (!this.shouldSuppressErrorLogging(error, request)) {
|
||||
console.error('OpenAI API Error:', errorMessage);
|
||||
}
|
||||
|
||||
// Provide helpful timeout-specific error message
|
||||
if (isTimeoutError) {
|
||||
@@ -288,12 +334,13 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
);
|
||||
}
|
||||
|
||||
throw new Error(`OpenAI API error: ${errorMessage}`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
async generateContentStream(
|
||||
request: GenerateContentParameters,
|
||||
userPromptId: string,
|
||||
): Promise<AsyncGenerator<GenerateContentResponse>> {
|
||||
const startTime = Date.now();
|
||||
const messages = this.convertToOpenAIFormat(request);
|
||||
@@ -310,6 +357,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
...samplingParams,
|
||||
stream: true,
|
||||
stream_options: { include_usage: true },
|
||||
...(this.buildMetadata(userPromptId) || {}),
|
||||
};
|
||||
|
||||
if (request.config?.tools) {
|
||||
@@ -318,8 +366,6 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
);
|
||||
}
|
||||
|
||||
// console.log('createParams', createParams);
|
||||
|
||||
const stream = (await this.client.chat.completions.create(
|
||||
createParams,
|
||||
)) as AsyncIterable<OpenAI.Chat.ChatCompletionChunk>;
|
||||
@@ -347,9 +393,10 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
|
||||
// Log API response event for UI telemetry
|
||||
const responseEvent = new ApiResponseEvent(
|
||||
responses[responses.length - 1]?.responseId || 'unknown',
|
||||
this.model,
|
||||
durationMs,
|
||||
`openai-stream-${Date.now()}`, // Generate a prompt ID
|
||||
userPromptId,
|
||||
this.config.getContentGeneratorConfig()?.authType,
|
||||
finalUsageMetadata,
|
||||
);
|
||||
@@ -378,40 +425,21 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
? error.message
|
||||
: String(error);
|
||||
|
||||
// Estimate token usage even when there's an error in streaming
|
||||
let estimatedUsage;
|
||||
try {
|
||||
const tokenCountResult = await this.countTokens({
|
||||
contents: request.contents,
|
||||
model: this.model,
|
||||
});
|
||||
estimatedUsage = {
|
||||
promptTokenCount: tokenCountResult.totalTokens,
|
||||
candidatesTokenCount: 0, // No completion tokens since request failed
|
||||
totalTokenCount: tokenCountResult.totalTokens,
|
||||
};
|
||||
} catch {
|
||||
// If token counting also fails, provide a minimal estimate
|
||||
const contentStr = JSON.stringify(request.contents);
|
||||
const estimatedTokens = Math.ceil(contentStr.length / 4);
|
||||
estimatedUsage = {
|
||||
promptTokenCount: estimatedTokens,
|
||||
candidatesTokenCount: 0,
|
||||
totalTokenCount: estimatedTokens,
|
||||
};
|
||||
}
|
||||
|
||||
// Log API error event for UI telemetry with estimated usage
|
||||
const errorEvent = new ApiResponseEvent(
|
||||
// Log API error event for UI telemetry
|
||||
const errorEvent = new ApiErrorEvent(
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
(error as any).requestID || 'unknown',
|
||||
this.model,
|
||||
durationMs,
|
||||
`openai-stream-${Date.now()}`, // Generate a prompt ID
|
||||
this.config.getContentGeneratorConfig()?.authType,
|
||||
estimatedUsage,
|
||||
undefined,
|
||||
errorMessage,
|
||||
durationMs,
|
||||
userPromptId,
|
||||
this.config.getContentGeneratorConfig()?.authType,
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
(error as any).type,
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
(error as any).code,
|
||||
);
|
||||
logApiResponse(this.config, errorEvent);
|
||||
logApiError(this.config, errorEvent);
|
||||
|
||||
// Log error interaction if enabled
|
||||
if (this.config.getContentGeneratorConfig()?.enableOpenAILogging) {
|
||||
@@ -451,42 +479,26 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
? error.message
|
||||
: String(error);
|
||||
|
||||
// Estimate token usage even when there's an error in streaming setup
|
||||
let estimatedUsage;
|
||||
try {
|
||||
const tokenCountResult = await this.countTokens({
|
||||
contents: request.contents,
|
||||
model: this.model,
|
||||
});
|
||||
estimatedUsage = {
|
||||
promptTokenCount: tokenCountResult.totalTokens,
|
||||
candidatesTokenCount: 0, // No completion tokens since request failed
|
||||
totalTokenCount: tokenCountResult.totalTokens,
|
||||
};
|
||||
} catch {
|
||||
// If token counting also fails, provide a minimal estimate
|
||||
const contentStr = JSON.stringify(request.contents);
|
||||
const estimatedTokens = Math.ceil(contentStr.length / 4);
|
||||
estimatedUsage = {
|
||||
promptTokenCount: estimatedTokens,
|
||||
candidatesTokenCount: 0,
|
||||
totalTokenCount: estimatedTokens,
|
||||
};
|
||||
}
|
||||
|
||||
// Log API error event for UI telemetry with estimated usage
|
||||
const errorEvent = new ApiResponseEvent(
|
||||
// Log API error event for UI telemetry
|
||||
const errorEvent = new ApiErrorEvent(
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
(error as any).requestID || 'unknown',
|
||||
this.model,
|
||||
durationMs,
|
||||
`openai-stream-${Date.now()}`, // Generate a prompt ID
|
||||
this.config.getContentGeneratorConfig()?.authType,
|
||||
estimatedUsage,
|
||||
undefined,
|
||||
errorMessage,
|
||||
durationMs,
|
||||
userPromptId,
|
||||
this.config.getContentGeneratorConfig()?.authType,
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
(error as any).type,
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
(error as any).code,
|
||||
);
|
||||
logApiResponse(this.config, errorEvent);
|
||||
logApiError(this.config, errorEvent);
|
||||
|
||||
console.error('OpenAI API Streaming Error:', errorMessage);
|
||||
// Allow subclasses to suppress error logging for specific scenarios
|
||||
if (!this.shouldSuppressErrorLogging(error, request)) {
|
||||
console.error('OpenAI API Streaming Error:', errorMessage);
|
||||
}
|
||||
|
||||
// Provide helpful timeout-specific error message for streaming setup
|
||||
if (isTimeoutError) {
|
||||
@@ -499,7 +511,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
);
|
||||
}
|
||||
|
||||
throw new Error(`OpenAI API error: ${errorMessage}`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -551,7 +563,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
|
||||
// Add combined text if any
|
||||
if (combinedText) {
|
||||
combinedParts.push({ text: combinedText });
|
||||
combinedParts.push({ text: combinedText.trimEnd() });
|
||||
}
|
||||
|
||||
// Add function calls
|
||||
@@ -728,6 +740,16 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
return convertTypes(converted) as Record<string, unknown> | undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts Gemini tools to OpenAI format for API compatibility.
|
||||
* Handles both Gemini tools (using 'parameters' field) and MCP tools (using 'parametersJsonSchema' field).
|
||||
*
|
||||
* Gemini tools use a custom parameter format that needs conversion to OpenAI JSON Schema format.
|
||||
* MCP tools already use JSON Schema format in the parametersJsonSchema field and can be used directly.
|
||||
*
|
||||
* @param geminiTools - Array of Gemini tools to convert
|
||||
* @returns Promise resolving to array of OpenAI-compatible tools
|
||||
*/
|
||||
private async convertGeminiToolsToOpenAI(
|
||||
geminiTools: ToolListUnion,
|
||||
): Promise<OpenAI.Chat.ChatCompletionTool[]> {
|
||||
@@ -748,14 +770,31 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
if (actualTool.functionDeclarations) {
|
||||
for (const func of actualTool.functionDeclarations) {
|
||||
if (func.name && func.description) {
|
||||
let parameters: Record<string, unknown> | undefined;
|
||||
|
||||
// Handle both Gemini tools (parameters) and MCP tools (parametersJsonSchema)
|
||||
if (func.parametersJsonSchema) {
|
||||
// MCP tool format - use parametersJsonSchema directly
|
||||
if (func.parametersJsonSchema) {
|
||||
// Create a shallow copy to avoid mutating the original object
|
||||
const paramsCopy = {
|
||||
...(func.parametersJsonSchema as Record<string, unknown>),
|
||||
};
|
||||
parameters = paramsCopy;
|
||||
}
|
||||
} else if (func.parameters) {
|
||||
// Gemini tool format - convert parameters to OpenAI format
|
||||
parameters = this.convertGeminiParametersToOpenAI(
|
||||
func.parameters as Record<string, unknown>,
|
||||
);
|
||||
}
|
||||
|
||||
openAITools.push({
|
||||
type: 'function',
|
||||
function: {
|
||||
name: func.name,
|
||||
description: func.description,
|
||||
parameters: this.convertGeminiParametersToOpenAI(
|
||||
(func.parameters || {}) as Record<string, unknown>,
|
||||
),
|
||||
parameters,
|
||||
},
|
||||
});
|
||||
}
|
||||
@@ -1125,7 +1164,11 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
|
||||
// Handle text content
|
||||
if (choice.message.content) {
|
||||
parts.push({ text: choice.message.content });
|
||||
if (typeof choice.message.content === 'string') {
|
||||
parts.push({ text: choice.message.content.trimEnd() });
|
||||
} else {
|
||||
parts.push({ text: choice.message.content });
|
||||
}
|
||||
}
|
||||
|
||||
// Handle tool calls
|
||||
@@ -1134,12 +1177,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
if (toolCall.function) {
|
||||
let args: Record<string, unknown> = {};
|
||||
if (toolCall.function.arguments) {
|
||||
try {
|
||||
args = JSON.parse(toolCall.function.arguments);
|
||||
} catch (error) {
|
||||
console.error('Failed to parse function arguments:', error);
|
||||
args = {};
|
||||
}
|
||||
args = safeJsonParse(toolCall.function.arguments, {});
|
||||
}
|
||||
|
||||
parts.push({
|
||||
@@ -1215,7 +1253,11 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
|
||||
// Handle text content
|
||||
if (choice.delta?.content) {
|
||||
parts.push({ text: choice.delta.content });
|
||||
if (typeof choice.delta.content === 'string') {
|
||||
parts.push({ text: choice.delta.content.trimEnd() });
|
||||
} else {
|
||||
parts.push({ text: choice.delta.content });
|
||||
}
|
||||
}
|
||||
|
||||
// Handle tool calls - only accumulate during streaming, emit when complete
|
||||
@@ -1251,19 +1293,14 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
if (accumulatedCall.name) {
|
||||
let args: Record<string, unknown> = {};
|
||||
if (accumulatedCall.arguments) {
|
||||
try {
|
||||
args = JSON.parse(accumulatedCall.arguments);
|
||||
} catch (error) {
|
||||
console.error(
|
||||
'Failed to parse final tool call arguments:',
|
||||
error,
|
||||
);
|
||||
}
|
||||
args = safeJsonParse(accumulatedCall.arguments, {});
|
||||
}
|
||||
|
||||
parts.push({
|
||||
functionCall: {
|
||||
id: accumulatedCall.id,
|
||||
id:
|
||||
accumulatedCall.id ||
|
||||
`call_${Date.now()}_${Math.random().toString(36).substring(2, 9)}`,
|
||||
name: accumulatedCall.name,
|
||||
args,
|
||||
},
|
||||
@@ -1739,7 +1776,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
}
|
||||
}
|
||||
|
||||
messageContent = textParts.join('');
|
||||
messageContent = textParts.join('').trimEnd();
|
||||
}
|
||||
|
||||
const choice: OpenAIChoice = {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user