mirror of
https://github.com/QwenLM/qwen-code.git
synced 2025-12-25 11:09:13 +00:00
Compare commits
1 Commits
release/v0
...
release/v0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bd3573ef68 |
@@ -46,7 +46,7 @@ jobs:
|
||||
|
||||
- name: 'Log in to the Container registry'
|
||||
if: |-
|
||||
${{ github.event_name != 'pull_request' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/v') || github.event.inputs.publish == 'true') }}
|
||||
${{ github.event_name != 'pull_request' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/v')) }}
|
||||
uses: 'docker/login-action@v3' # ratchet:exclude
|
||||
with:
|
||||
registry: '${{ env.REGISTRY }}'
|
||||
|
||||
28
.github/workflows/ci.yml
vendored
28
.github/workflows/ci.yml
vendored
@@ -95,19 +95,10 @@ jobs:
|
||||
with:
|
||||
node-version-file: '.nvmrc'
|
||||
cache: 'npm'
|
||||
cache-dependency-path: 'package-lock.json'
|
||||
registry-url: 'https://registry.npmjs.org/'
|
||||
|
||||
- name: 'Configure npm for rate limiting'
|
||||
run: |-
|
||||
npm config set fetch-retry-mintimeout 20000
|
||||
npm config set fetch-retry-maxtimeout 120000
|
||||
npm config set fetch-retries 5
|
||||
npm config set fetch-timeout 300000
|
||||
|
||||
- name: 'Install dependencies'
|
||||
run: |-
|
||||
npm ci --prefer-offline --no-audit --progress=false
|
||||
npm ci
|
||||
|
||||
- name: 'Run formatter check'
|
||||
run: |-
|
||||
@@ -282,24 +273,15 @@ jobs:
|
||||
with:
|
||||
node-version: '${{ matrix.node-version }}'
|
||||
cache: 'npm'
|
||||
cache-dependency-path: 'package-lock.json'
|
||||
registry-url: 'https://registry.npmjs.org/'
|
||||
|
||||
- name: 'Configure npm for rate limiting'
|
||||
run: |-
|
||||
npm config set fetch-retry-mintimeout 20000
|
||||
npm config set fetch-retry-maxtimeout 120000
|
||||
npm config set fetch-retries 5
|
||||
npm config set fetch-timeout 300000
|
||||
|
||||
- name: 'Install dependencies'
|
||||
run: |-
|
||||
npm ci --prefer-offline --no-audit --progress=false
|
||||
|
||||
- name: 'Build project'
|
||||
run: |-
|
||||
npm run build
|
||||
|
||||
- name: 'Install dependencies for testing'
|
||||
run: |-
|
||||
npm ci
|
||||
|
||||
- name: 'Run tests and generate reports'
|
||||
env:
|
||||
NO_COLOR: true
|
||||
|
||||
22
.github/workflows/e2e.yml
vendored
22
.github/workflows/e2e.yml
vendored
@@ -28,19 +28,10 @@ jobs:
|
||||
with:
|
||||
node-version: '${{ matrix.node-version }}'
|
||||
cache: 'npm'
|
||||
cache-dependency-path: 'package-lock.json'
|
||||
registry-url: 'https://registry.npmjs.org/'
|
||||
|
||||
- name: 'Configure npm for rate limiting'
|
||||
run: |-
|
||||
npm config set fetch-retry-mintimeout 20000
|
||||
npm config set fetch-retry-maxtimeout 120000
|
||||
npm config set fetch-retries 5
|
||||
npm config set fetch-timeout 300000
|
||||
|
||||
- name: 'Install dependencies'
|
||||
run: |-
|
||||
npm ci --prefer-offline --no-audit --progress=false
|
||||
npm ci
|
||||
|
||||
- name: 'Build project'
|
||||
run: |-
|
||||
@@ -83,19 +74,10 @@ jobs:
|
||||
with:
|
||||
node-version-file: '.nvmrc'
|
||||
cache: 'npm'
|
||||
cache-dependency-path: 'package-lock.json'
|
||||
registry-url: 'https://registry.npmjs.org/'
|
||||
|
||||
- name: 'Configure npm for rate limiting'
|
||||
run: |-
|
||||
npm config set fetch-retry-mintimeout 20000
|
||||
npm config set fetch-retry-maxtimeout 120000
|
||||
npm config set fetch-retries 5
|
||||
npm config set fetch-timeout 300000
|
||||
|
||||
- name: 'Install dependencies'
|
||||
run: |-
|
||||
npm ci --prefer-offline --no-audit --progress=false
|
||||
npm ci
|
||||
|
||||
- name: 'Build project'
|
||||
run: |-
|
||||
|
||||
@@ -48,11 +48,14 @@ jobs:
|
||||
OPENAI_API_KEY: '${{ secrets.OPENAI_API_KEY }}'
|
||||
OPENAI_BASE_URL: '${{ secrets.OPENAI_BASE_URL }}'
|
||||
OPENAI_MODEL: '${{ secrets.OPENAI_MODEL }}'
|
||||
settings_json: |-
|
||||
settings_json: |
|
||||
{
|
||||
"maxSessionTurns": 25,
|
||||
"coreTools": [
|
||||
"run_shell_command"
|
||||
"run_shell_command(echo)",
|
||||
"run_shell_command(gh label list)",
|
||||
"run_shell_command(gh issue edit)",
|
||||
"run_shell_command(gh issue list)"
|
||||
],
|
||||
"sandbox": false
|
||||
}
|
||||
@@ -65,7 +68,7 @@ jobs:
|
||||
## Steps
|
||||
|
||||
1. Run: `gh label list --repo ${{ github.repository }} --limit 100` to get all available labels.
|
||||
2. Use shell command `echo` to check the issue title and body provided in the environment variables: "${ISSUE_TITLE}" and "${ISSUE_BODY}".
|
||||
2. Use right tool to review the issue title and body provided in the environment variables: "${ISSUE_TITLE}" and "${ISSUE_BODY}".
|
||||
3. Ignore any existing priorities or tags on the issue. Just report your findings.
|
||||
4. Select the most relevant labels from the existing labels, focusing on kind/*, area/*, sub-area/* and priority/*. For area/* and kind/* limit yourself to only the single most applicable label in each case.
|
||||
6. Apply the selected labels to this issue using: `gh issue edit ${{ github.event.issue.number }} --repo ${{ github.repository }} --add-label "label1,label2"`.
|
||||
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
|
||||
GITHUB_REPOSITORY: '${{ github.repository }}'
|
||||
run: |-
|
||||
run: |
|
||||
echo "🔍 Finding issues without labels..."
|
||||
NO_LABEL_ISSUES=$(gh issue list --repo ${{ github.repository }} --search "is:open is:issue no:label" --json number,title,body)
|
||||
|
||||
@@ -66,7 +66,7 @@ jobs:
|
||||
OPENAI_API_KEY: '${{ secrets.OPENAI_API_KEY }}'
|
||||
OPENAI_BASE_URL: '${{ secrets.OPENAI_BASE_URL }}'
|
||||
OPENAI_MODEL: '${{ secrets.OPENAI_MODEL }}'
|
||||
settings_json: |-
|
||||
settings_json: |
|
||||
{
|
||||
"maxSessionTurns": 25,
|
||||
"coreTools": [
|
||||
@@ -88,7 +88,7 @@ jobs:
|
||||
## Steps
|
||||
|
||||
1. Run: `gh label list --repo ${{ github.repository }} --limit 100` to get all available labels.
|
||||
2. Use shell command `echo` to check environment variable for issues to triage: $ISSUES_TO_TRIAGE (JSON array of issues)
|
||||
2. Use right tool to check environment variable for issues to triage: $ISSUES_TO_TRIAGE (JSON array of issues)
|
||||
3. Review the issue title, body and any comments provided in the environment variables.
|
||||
4. Ignore any existing priorities or tags on the issue.
|
||||
5. Select the most relevant labels from the existing labels, focusing on kind/*, area/*, sub-area/* and priority/*.
|
||||
|
||||
19
.github/workflows/qwen-code-pr-review.yml
vendored
19
.github/workflows/qwen-code-pr-review.yml
vendored
@@ -16,7 +16,7 @@ on:
|
||||
|
||||
jobs:
|
||||
review-pr:
|
||||
if: |-
|
||||
if: >
|
||||
github.event_name == 'workflow_dispatch' ||
|
||||
(github.event_name == 'pull_request_target' &&
|
||||
github.event.action == 'opened' &&
|
||||
@@ -59,7 +59,7 @@ jobs:
|
||||
${{ github.event_name == 'pull_request_target' || github.event_name == 'workflow_dispatch' }}
|
||||
env:
|
||||
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
|
||||
run: |-
|
||||
run: |
|
||||
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
|
||||
PR_NUMBER=${{ github.event.inputs.pr_number }}
|
||||
else
|
||||
@@ -82,7 +82,7 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
|
||||
COMMENT_BODY: '${{ github.event.comment.body }}'
|
||||
run: |-
|
||||
run: |
|
||||
PR_NUMBER=${{ github.event.issue.number }}
|
||||
echo "pr_number=$PR_NUMBER" >> "$GITHUB_OUTPUT"
|
||||
# Extract additional instructions from comment
|
||||
@@ -110,15 +110,22 @@ jobs:
|
||||
OPENAI_API_KEY: '${{ secrets.OPENAI_API_KEY }}'
|
||||
OPENAI_BASE_URL: '${{ secrets.OPENAI_BASE_URL }}'
|
||||
OPENAI_MODEL: '${{ secrets.OPENAI_MODEL }}'
|
||||
settings_json: |-
|
||||
settings_json: |
|
||||
{
|
||||
"coreTools": [
|
||||
"run_shell_command",
|
||||
"run_shell_command(echo)",
|
||||
"run_shell_command(gh pr view)",
|
||||
"run_shell_command(gh pr diff)",
|
||||
"run_shell_command(gh pr comment)",
|
||||
"run_shell_command(cat)",
|
||||
"run_shell_command(head)",
|
||||
"run_shell_command(tail)",
|
||||
"run_shell_command(grep)",
|
||||
"write_file"
|
||||
],
|
||||
"sandbox": false
|
||||
}
|
||||
prompt: |-
|
||||
prompt: |
|
||||
You are an expert code reviewer. You have access to shell commands to gather PR information and perform the review.
|
||||
|
||||
IMPORTANT: Use the available shell commands to gather information. Do not ask for information to be provided.
|
||||
|
||||
19
.vscode/launch.json
vendored
19
.vscode/launch.json
vendored
@@ -67,19 +67,6 @@
|
||||
"console": "integratedTerminal",
|
||||
"internalConsoleOptions": "neverOpen",
|
||||
"skipFiles": ["<node_internals>/**"]
|
||||
},
|
||||
{
|
||||
"type": "node",
|
||||
"request": "launch",
|
||||
"name": "Launch CLI Non-Interactive",
|
||||
"runtimeExecutable": "npm",
|
||||
"runtimeArgs": ["run", "start", "--", "-p", "${input:prompt}", "-y"],
|
||||
"skipFiles": ["<node_internals>/**"],
|
||||
"cwd": "${workspaceFolder}",
|
||||
"console": "integratedTerminal",
|
||||
"env": {
|
||||
"GEMINI_SANDBOX": "false"
|
||||
}
|
||||
}
|
||||
],
|
||||
"inputs": [
|
||||
@@ -88,12 +75,6 @@
|
||||
"type": "promptString",
|
||||
"description": "Enter the path to the test file (e.g., ${workspaceFolder}/packages/cli/src/ui/components/LoadingIndicator.test.tsx)",
|
||||
"default": "${workspaceFolder}/packages/cli/src/ui/components/LoadingIndicator.test.tsx"
|
||||
},
|
||||
{
|
||||
"id": "prompt",
|
||||
"type": "promptString",
|
||||
"description": "Enter your prompt for non-interactive mode",
|
||||
"default": "Explain this code"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -209,7 +209,7 @@ npm run lint
|
||||
### Coding Conventions
|
||||
|
||||
- Please adhere to the coding style, patterns, and conventions used throughout the existing codebase.
|
||||
- Consult [QWEN.md](https://github.com/QwenLM/qwen-code/blob/main/QWEN.md) (typically found in the project root) for specific instructions related to AI-assisted development, including conventions for React, comments, and Git usage.
|
||||
- Consult [GEMINI.md](https://github.com/google-gemini/gemini-cli/blob/main/GEMINI.md) (typically found in the project root) for specific instructions related to AI-assisted development, including conventions for React, comments, and Git usage.
|
||||
- **Imports:** Pay special attention to import paths. The project uses ESLint to enforce restrictions on relative imports between packages.
|
||||
|
||||
### Project Structure
|
||||
|
||||
@@ -438,7 +438,7 @@ Arguments passed directly when running the CLI can override other configurations
|
||||
- `auto_edit`: Automatically approve edit tools (replace, write_file) while prompting for others
|
||||
- `yolo`: Automatically approve all tool calls (equivalent to `--yolo`)
|
||||
- Cannot be used together with `--yolo`. Use `--approval-mode=yolo` instead of `--yolo` for the new unified approach.
|
||||
- Example: `qwen --approval-mode auto_edit`
|
||||
- Example: `gemini --approval-mode auto_edit`
|
||||
- **`--telemetry`**:
|
||||
- Enables [telemetry](../telemetry.md).
|
||||
- **`--telemetry-target`**:
|
||||
|
||||
@@ -89,7 +89,7 @@ The verbose output is formatted to clearly identify the source of the logs:
|
||||
|
||||
```
|
||||
--- TEST: <log dir>:<test-name> ---
|
||||
... output from the qwen command ...
|
||||
... output from the gemini command ...
|
||||
--- END TEST: <log dir>:<test-name> ---
|
||||
```
|
||||
|
||||
|
||||
@@ -13,39 +13,10 @@ Use `run_shell_command` to interact with the underlying system, run scripts, or
|
||||
- `command` (string, required): The exact shell command to execute.
|
||||
- `description` (string, optional): A brief description of the command's purpose, which will be shown to the user.
|
||||
- `directory` (string, optional): The directory (relative to the project root) in which to execute the command. If not provided, the command runs in the project root.
|
||||
- `is_background` (boolean, required): Whether to run the command in background. This parameter is required to ensure explicit decision-making about command execution mode. Set to true for long-running processes like development servers, watchers, or daemons that should continue running without blocking further commands. Set to false for one-time commands that should complete before proceeding.
|
||||
|
||||
## How to use `run_shell_command` with Qwen Code
|
||||
|
||||
When using `run_shell_command`, the command is executed as a subprocess. You can control whether commands run in background or foreground using the `is_background` parameter, or by explicitly adding `&` to commands. The tool returns detailed information about the execution, including:
|
||||
|
||||
### Required Background Parameter
|
||||
|
||||
The `is_background` parameter is **required** for all command executions. This design ensures that the LLM (and users) must explicitly decide whether each command should run in the background or foreground, promoting intentional and predictable command execution behavior. By making this parameter mandatory, we avoid unintended fallback to foreground execution, which could block subsequent operations when dealing with long-running processes.
|
||||
|
||||
### Background vs Foreground Execution
|
||||
|
||||
The tool intelligently handles background and foreground execution based on your explicit choice:
|
||||
|
||||
**Use background execution (`is_background: true`) for:**
|
||||
|
||||
- Long-running development servers: `npm run start`, `npm run dev`, `yarn dev`
|
||||
- Build watchers: `npm run watch`, `webpack --watch`
|
||||
- Database servers: `mongod`, `mysql`, `redis-server`
|
||||
- Web servers: `python -m http.server`, `php -S localhost:8000`
|
||||
- Any command expected to run indefinitely until manually stopped
|
||||
|
||||
**Use foreground execution (`is_background: false`) for:**
|
||||
|
||||
- One-time commands: `ls`, `cat`, `grep`
|
||||
- Build commands: `npm run build`, `make`
|
||||
- Installation commands: `npm install`, `pip install`
|
||||
- Git operations: `git commit`, `git push`
|
||||
- Test runs: `npm test`, `pytest`
|
||||
|
||||
### Execution Information
|
||||
|
||||
The tool returns detailed information about the execution, including:
|
||||
When using `run_shell_command`, the command is executed as a subprocess. `run_shell_command` can start background processes using `&`. The tool returns detailed information about the execution, including:
|
||||
|
||||
- `Command`: The command that was executed.
|
||||
- `Directory`: The directory where the command was run.
|
||||
@@ -58,48 +29,28 @@ The tool returns detailed information about the execution, including:
|
||||
|
||||
Usage:
|
||||
|
||||
```bash
|
||||
run_shell_command(command="Your commands.", description="Your description of the command.", directory="Your execution directory.", is_background=false)
|
||||
```
|
||||
|
||||
**Note:** The `is_background` parameter is required and must be explicitly specified for every command execution.
|
||||
run_shell_command(command="Your commands.", description="Your description of the command.", directory="Your execution directory.")
|
||||
```
|
||||
|
||||
## `run_shell_command` examples
|
||||
|
||||
List files in the current directory:
|
||||
|
||||
```bash
|
||||
run_shell_command(command="ls -la", is_background=false)
|
||||
```
|
||||
run_shell_command(command="ls -la")
|
||||
```
|
||||
|
||||
Run a script in a specific directory:
|
||||
|
||||
```bash
|
||||
run_shell_command(command="./my_script.sh", directory="scripts", description="Run my custom script", is_background=false)
|
||||
```
|
||||
run_shell_command(command="./my_script.sh", directory="scripts", description="Run my custom script")
|
||||
```
|
||||
|
||||
Start a background development server (recommended approach):
|
||||
Start a background server:
|
||||
|
||||
```bash
|
||||
run_shell_command(command="npm run dev", description="Start development server in background", is_background=true)
|
||||
```
|
||||
|
||||
Start a background server (alternative with explicit &):
|
||||
|
||||
```bash
|
||||
run_shell_command(command="npm run dev &", description="Start development server in background", is_background=false)
|
||||
```
|
||||
|
||||
Run a build command in foreground:
|
||||
|
||||
```bash
|
||||
run_shell_command(command="npm run build", description="Build the project", is_background=false)
|
||||
```
|
||||
|
||||
Start multiple background services:
|
||||
|
||||
```bash
|
||||
run_shell_command(command="docker-compose up", description="Start all services", is_background=true)
|
||||
run_shell_command(command="npm run dev &", description="Start development server in background")
|
||||
```
|
||||
|
||||
## Important notes
|
||||
@@ -107,9 +58,7 @@ run_shell_command(command="docker-compose up", description="Start all services",
|
||||
- **Security:** Be cautious when executing commands, especially those constructed from user input, to prevent security vulnerabilities.
|
||||
- **Interactive commands:** Avoid commands that require interactive user input, as this can cause the tool to hang. Use non-interactive flags if available (e.g., `npm init -y`).
|
||||
- **Error handling:** Check the `Stderr`, `Error`, and `Exit Code` fields to determine if a command executed successfully.
|
||||
- **Background processes:** When `is_background=true` or when a command contains `&`, the tool will return immediately and the process will continue to run in the background. The `Background PIDs` field will contain the process ID of the background process.
|
||||
- **Background execution choices:** The `is_background` parameter is required and provides explicit control over execution mode. You can also add `&` to the command for manual background execution, but the `is_background` parameter must still be specified. The parameter provides clearer intent and automatically handles the background execution setup.
|
||||
- **Command descriptions:** When using `is_background=true`, the command description will include a `[background]` indicator to clearly show the execution mode.
|
||||
- **Background processes:** When a command is run in the background with `&`, the tool will return immediately and the process will continue to run in the background. The `Background PIDs` field will contain the process ID of the background process.
|
||||
|
||||
## Environment Variables
|
||||
|
||||
|
||||
12
package-lock.json
generated
12
package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.0.9",
|
||||
"version": "0.0.9-nightly.2",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.0.9",
|
||||
"version": "0.0.9-nightly.2",
|
||||
"workspaces": [
|
||||
"packages/*"
|
||||
],
|
||||
@@ -12336,7 +12336,7 @@
|
||||
},
|
||||
"packages/cli": {
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.0.9",
|
||||
"version": "0.0.9-nightly.2",
|
||||
"dependencies": {
|
||||
"@google/genai": "1.9.0",
|
||||
"@iarna/toml": "^2.2.5",
|
||||
@@ -12520,7 +12520,7 @@
|
||||
},
|
||||
"packages/core": {
|
||||
"name": "@qwen-code/qwen-code-core",
|
||||
"version": "0.0.9",
|
||||
"version": "0.0.9-nightly.2",
|
||||
"dependencies": {
|
||||
"@google/genai": "1.13.0",
|
||||
"@modelcontextprotocol/sdk": "^1.11.0",
|
||||
@@ -12671,7 +12671,7 @@
|
||||
},
|
||||
"packages/test-utils": {
|
||||
"name": "@qwen-code/qwen-code-test-utils",
|
||||
"version": "0.0.9",
|
||||
"version": "0.0.9-nightly.2",
|
||||
"license": "Apache-2.0",
|
||||
"devDependencies": {
|
||||
"typescript": "^5.3.3"
|
||||
@@ -12682,7 +12682,7 @@
|
||||
},
|
||||
"packages/vscode-ide-companion": {
|
||||
"name": "qwen-code-vscode-ide-companion",
|
||||
"version": "0.0.9",
|
||||
"version": "0.0.9-nightly.2",
|
||||
"license": "LICENSE",
|
||||
"dependencies": {
|
||||
"@modelcontextprotocol/sdk": "^1.15.1",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.0.9",
|
||||
"version": "0.0.9-nightly.2",
|
||||
"engines": {
|
||||
"node": ">=20.0.0"
|
||||
},
|
||||
@@ -13,7 +13,7 @@
|
||||
"url": "git+https://github.com/QwenLM/qwen-code.git"
|
||||
},
|
||||
"config": {
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.9"
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.9-nightly.2"
|
||||
},
|
||||
"scripts": {
|
||||
"start": "node scripts/start.js",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.0.9",
|
||||
"version": "0.0.9-nightly.2",
|
||||
"description": "Qwen Code",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
@@ -25,7 +25,7 @@
|
||||
"dist"
|
||||
],
|
||||
"config": {
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.9"
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.9-nightly.2"
|
||||
},
|
||||
"dependencies": {
|
||||
"@google/genai": "1.9.0",
|
||||
|
||||
@@ -218,7 +218,7 @@ export async function parseArguments(): Promise<CliArgs> {
|
||||
.option('proxy', {
|
||||
type: 'string',
|
||||
description:
|
||||
'Proxy for qwen client, like schema://user:password@host:port',
|
||||
'Proxy for gemini client, like schema://user:password@host:port',
|
||||
})
|
||||
.option('include-directories', {
|
||||
type: 'array',
|
||||
@@ -577,7 +577,6 @@ export async function loadCliConfig(
|
||||
'SYSTEM_TEMPLATE:{"name":"qwen3_coder","params":{"is_git_repository":{RUNTIME_VARS_IS_GIT_REPO},"sandbox":"{RUNTIME_VARS_SANDBOX}"}}',
|
||||
},
|
||||
]) as ConfigParameters['systemPromptMappings'],
|
||||
authType: settings.selectedAuthType,
|
||||
contentGenerator: settings.contentGenerator,
|
||||
cliVersion,
|
||||
tavilyApiKey:
|
||||
|
||||
@@ -88,7 +88,7 @@ export function IdeIntegrationNudge({
|
||||
<Box marginBottom={1} flexDirection="column">
|
||||
<Text>
|
||||
<Text color="yellow">{'> '}</Text>
|
||||
{`Do you want to connect ${ideName ?? 'your'} editor to Qwen Code?`}
|
||||
{`Do you want to connect ${ideName ?? 'your'} editor to Gemini CLI?`}
|
||||
</Text>
|
||||
<Text dimColor>{installText}</Text>
|
||||
</Box>
|
||||
|
||||
@@ -91,34 +91,35 @@ export const directoryCommand: SlashCommand = {
|
||||
}
|
||||
}
|
||||
|
||||
if (added.length > 0) {
|
||||
try {
|
||||
if (config.shouldLoadMemoryFromIncludeDirectories()) {
|
||||
const { memoryContent, fileCount } =
|
||||
await loadServerHierarchicalMemory(
|
||||
config.getWorkingDir(),
|
||||
[...config.getWorkspaceContext().getDirectories()],
|
||||
config.getDebugMode(),
|
||||
config.getFileService(),
|
||||
config.getExtensionContextFilePaths(),
|
||||
context.services.settings.merged.memoryImportFormat || 'tree', // Use setting or default to 'tree'
|
||||
config.getFileFilteringOptions(),
|
||||
context.services.settings.merged.memoryDiscoveryMaxDirs,
|
||||
);
|
||||
config.setUserMemory(memoryContent);
|
||||
config.setGeminiMdFileCount(fileCount);
|
||||
context.ui.setGeminiMdFileCount(fileCount);
|
||||
}
|
||||
addItem(
|
||||
{
|
||||
type: MessageType.INFO,
|
||||
text: `Successfully added memory files from the following directories if there are:\n- ${added.join('\n- ')}`,
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
} catch (error) {
|
||||
errors.push(`Error refreshing memory: ${(error as Error).message}`);
|
||||
try {
|
||||
if (config.shouldLoadMemoryFromIncludeDirectories()) {
|
||||
const { memoryContent, fileCount } =
|
||||
await loadServerHierarchicalMemory(
|
||||
config.getWorkingDir(),
|
||||
[
|
||||
...config.getWorkspaceContext().getDirectories(),
|
||||
...pathsToAdd,
|
||||
],
|
||||
config.getDebugMode(),
|
||||
config.getFileService(),
|
||||
config.getExtensionContextFilePaths(),
|
||||
context.services.settings.merged.memoryImportFormat || 'tree', // Use setting or default to 'tree'
|
||||
config.getFileFilteringOptions(),
|
||||
context.services.settings.merged.memoryDiscoveryMaxDirs,
|
||||
);
|
||||
config.setUserMemory(memoryContent);
|
||||
config.setGeminiMdFileCount(fileCount);
|
||||
context.ui.setGeminiMdFileCount(fileCount);
|
||||
}
|
||||
addItem(
|
||||
{
|
||||
type: MessageType.INFO,
|
||||
text: `Successfully added GEMINI.md files from the following directories if there are:\n- ${added.join('\n- ')}`,
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
} catch (error) {
|
||||
errors.push(`Error refreshing memory: ${(error as Error).message}`);
|
||||
}
|
||||
|
||||
if (added.length > 0) {
|
||||
|
||||
@@ -35,7 +35,7 @@ describe('docsCommand', () => {
|
||||
throw new Error('docsCommand must have an action.');
|
||||
}
|
||||
|
||||
const docsUrl = 'https://qwenlm.github.io/qwen-code-docs/en';
|
||||
const docsUrl = 'https://github.com/QwenLM/qwen-code';
|
||||
|
||||
await docsCommand.action(mockContext, '');
|
||||
|
||||
@@ -57,7 +57,7 @@ describe('docsCommand', () => {
|
||||
|
||||
// Simulate a sandbox environment
|
||||
process.env.SANDBOX = 'gemini-sandbox';
|
||||
const docsUrl = 'https://qwenlm.github.io/qwen-code-docs/en';
|
||||
const docsUrl = 'https://github.com/QwenLM/qwen-code';
|
||||
|
||||
await docsCommand.action(mockContext, '');
|
||||
|
||||
@@ -80,7 +80,7 @@ describe('docsCommand', () => {
|
||||
|
||||
// Simulate the specific 'sandbox-exec' environment
|
||||
process.env.SANDBOX = 'sandbox-exec';
|
||||
const docsUrl = 'https://qwenlm.github.io/qwen-code-docs/en';
|
||||
const docsUrl = 'https://github.com/QwenLM/qwen-code';
|
||||
|
||||
await docsCommand.action(mockContext, '');
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ export const docsCommand: SlashCommand = {
|
||||
description: 'open full Qwen Code documentation in your browser',
|
||||
kind: CommandKind.BUILT_IN,
|
||||
action: async (context: CommandContext): Promise<void> => {
|
||||
const docsUrl = 'https://qwenlm.github.io/qwen-code-docs/en';
|
||||
const docsUrl = 'https://github.com/QwenLM/qwen-code';
|
||||
|
||||
if (process.env.SANDBOX && process.env.SANDBOX !== 'sandbox-exec') {
|
||||
context.ui.addItem(
|
||||
|
||||
@@ -130,7 +130,7 @@ export const ideCommand = (config: Config | null): SlashCommand | null => {
|
||||
({
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content: `IDE integration is not supported in your current environment. To use this feature, run Qwen Code in one of these supported IDEs: ${Object.values(
|
||||
content: `IDE integration is not supported in your current environment. To use this feature, run Gemini CLI in one of these supported IDEs: ${Object.values(
|
||||
DetectedIde,
|
||||
)
|
||||
.map((ide) => getIdeInfo(ide).displayName)
|
||||
|
||||
@@ -146,7 +146,7 @@ describe('mcpCommand', () => {
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
content:
|
||||
'No MCP servers configured. Please view MCP documentation in your browser: https://qwenlm.github.io/qwen-code-docs/en/tools/mcp-server/#how-to-set-up-your-mcp-server or use the cli /docs command',
|
||||
'No MCP servers configured. Please view MCP documentation in your browser: https://goo.gle/gemini-cli-docs-mcp or use the cli /docs command',
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -58,8 +58,7 @@ const getMcpStatus = async (
|
||||
const blockedMcpServers = config.getBlockedMcpServers() || [];
|
||||
|
||||
if (serverNames.length === 0 && blockedMcpServers.length === 0) {
|
||||
const docsUrl =
|
||||
'https://qwenlm.github.io/qwen-code-docs/en/tools/mcp-server/#how-to-set-up-your-mcp-server';
|
||||
const docsUrl = 'https://goo.gle/gemini-cli-docs-mcp';
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
|
||||
@@ -117,7 +117,7 @@ describe('memoryCommand', () => {
|
||||
expect(result).toEqual({
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content: 'Usage: /memory add [--global|--project] <text to remember>',
|
||||
content: 'Usage: /memory add <text to remember>',
|
||||
});
|
||||
|
||||
expect(mockContext.ui.addItem).not.toHaveBeenCalled();
|
||||
@@ -132,7 +132,7 @@ describe('memoryCommand', () => {
|
||||
expect(mockContext.ui.addItem).toHaveBeenCalledWith(
|
||||
{
|
||||
type: MessageType.INFO,
|
||||
text: `Attempting to save to memory : "${fact}"`,
|
||||
text: `Attempting to save to memory: "${fact}"`,
|
||||
},
|
||||
expect.any(Number),
|
||||
);
|
||||
@@ -143,61 +143,6 @@ describe('memoryCommand', () => {
|
||||
toolArgs: { fact },
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle --global flag and add scope to tool args', () => {
|
||||
if (!addCommand.action) throw new Error('Command has no action');
|
||||
|
||||
const fact = 'remember this globally';
|
||||
const result = addCommand.action(mockContext, `--global ${fact}`);
|
||||
|
||||
expect(mockContext.ui.addItem).toHaveBeenCalledWith(
|
||||
{
|
||||
type: MessageType.INFO,
|
||||
text: `Attempting to save to memory (global): "${fact}"`,
|
||||
},
|
||||
expect.any(Number),
|
||||
);
|
||||
|
||||
expect(result).toEqual({
|
||||
type: 'tool',
|
||||
toolName: 'save_memory',
|
||||
toolArgs: { fact, scope: 'global' },
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle --project flag and add scope to tool args', () => {
|
||||
if (!addCommand.action) throw new Error('Command has no action');
|
||||
|
||||
const fact = 'remember this for project';
|
||||
const result = addCommand.action(mockContext, `--project ${fact}`);
|
||||
|
||||
expect(mockContext.ui.addItem).toHaveBeenCalledWith(
|
||||
{
|
||||
type: MessageType.INFO,
|
||||
text: `Attempting to save to memory (project): "${fact}"`,
|
||||
},
|
||||
expect.any(Number),
|
||||
);
|
||||
|
||||
expect(result).toEqual({
|
||||
type: 'tool',
|
||||
toolName: 'save_memory',
|
||||
toolArgs: { fact, scope: 'project' },
|
||||
});
|
||||
});
|
||||
|
||||
it('should return error if flag is provided but no fact follows', () => {
|
||||
if (!addCommand.action) throw new Error('Command has no action');
|
||||
|
||||
const result = addCommand.action(mockContext, '--global ');
|
||||
expect(result).toEqual({
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content: 'Usage: /memory add [--global|--project] <text to remember>',
|
||||
});
|
||||
|
||||
expect(mockContext.ui.addItem).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe('/memory refresh', () => {
|
||||
@@ -228,7 +173,7 @@ describe('memoryCommand', () => {
|
||||
|
||||
mockContext = createMockCommandContext({
|
||||
services: {
|
||||
config: mockConfig,
|
||||
config: Promise.resolve(mockConfig),
|
||||
settings: {
|
||||
merged: {
|
||||
memoryDiscoveryMaxDirs: 1000,
|
||||
|
||||
@@ -7,11 +7,7 @@
|
||||
import {
|
||||
getErrorMessage,
|
||||
loadServerHierarchicalMemory,
|
||||
QWEN_DIR,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import path from 'node:path';
|
||||
import os from 'os';
|
||||
import fs from 'fs/promises';
|
||||
import { MessageType } from '../types.js';
|
||||
import {
|
||||
CommandKind,
|
||||
@@ -45,136 +41,24 @@ export const memoryCommand: SlashCommand = {
|
||||
Date.now(),
|
||||
);
|
||||
},
|
||||
subCommands: [
|
||||
{
|
||||
name: '--project',
|
||||
description: 'Show project-level memory contents.',
|
||||
kind: CommandKind.BUILT_IN,
|
||||
action: async (context) => {
|
||||
try {
|
||||
const projectMemoryPath = path.join(process.cwd(), 'QWEN.md');
|
||||
const memoryContent = await fs.readFile(
|
||||
projectMemoryPath,
|
||||
'utf-8',
|
||||
);
|
||||
|
||||
const messageContent =
|
||||
memoryContent.trim().length > 0
|
||||
? `Project memory content from ${projectMemoryPath}:\n\n---\n${memoryContent}\n---`
|
||||
: 'Project memory is currently empty.';
|
||||
|
||||
context.ui.addItem(
|
||||
{
|
||||
type: MessageType.INFO,
|
||||
text: messageContent,
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
} catch (_error) {
|
||||
context.ui.addItem(
|
||||
{
|
||||
type: MessageType.INFO,
|
||||
text: 'Project memory file not found or is currently empty.',
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: '--global',
|
||||
description: 'Show global memory contents.',
|
||||
kind: CommandKind.BUILT_IN,
|
||||
action: async (context) => {
|
||||
try {
|
||||
const globalMemoryPath = path.join(
|
||||
os.homedir(),
|
||||
QWEN_DIR,
|
||||
'QWEN.md',
|
||||
);
|
||||
const globalMemoryContent = await fs.readFile(
|
||||
globalMemoryPath,
|
||||
'utf-8',
|
||||
);
|
||||
|
||||
const messageContent =
|
||||
globalMemoryContent.trim().length > 0
|
||||
? `Global memory content:\n\n---\n${globalMemoryContent}\n---`
|
||||
: 'Global memory is currently empty.';
|
||||
|
||||
context.ui.addItem(
|
||||
{
|
||||
type: MessageType.INFO,
|
||||
text: messageContent,
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
} catch (_error) {
|
||||
context.ui.addItem(
|
||||
{
|
||||
type: MessageType.INFO,
|
||||
text: 'Global memory file not found or is currently empty.',
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
}
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'add',
|
||||
description:
|
||||
'Add content to the memory. Use --global for global memory or --project for project memory.',
|
||||
description: 'Add content to the memory.',
|
||||
kind: CommandKind.BUILT_IN,
|
||||
action: (context, args): SlashCommandActionReturn | void => {
|
||||
if (!args || args.trim() === '') {
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content:
|
||||
'Usage: /memory add [--global|--project] <text to remember>',
|
||||
content: 'Usage: /memory add <text to remember>',
|
||||
};
|
||||
}
|
||||
|
||||
const trimmedArgs = args.trim();
|
||||
let scope: 'global' | 'project' | undefined;
|
||||
let fact: string;
|
||||
|
||||
// Check for scope flags
|
||||
if (trimmedArgs.startsWith('--global ')) {
|
||||
scope = 'global';
|
||||
fact = trimmedArgs.substring('--global '.length).trim();
|
||||
} else if (trimmedArgs.startsWith('--project ')) {
|
||||
scope = 'project';
|
||||
fact = trimmedArgs.substring('--project '.length).trim();
|
||||
} else if (trimmedArgs === '--global' || trimmedArgs === '--project') {
|
||||
// Flag provided but no text after it
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content:
|
||||
'Usage: /memory add [--global|--project] <text to remember>',
|
||||
};
|
||||
} else {
|
||||
// No scope specified, will be handled by the tool
|
||||
fact = trimmedArgs;
|
||||
}
|
||||
|
||||
if (!fact || fact.trim() === '') {
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content:
|
||||
'Usage: /memory add [--global|--project] <text to remember>',
|
||||
};
|
||||
}
|
||||
|
||||
const scopeText = scope ? `(${scope})` : '';
|
||||
context.ui.addItem(
|
||||
{
|
||||
type: MessageType.INFO,
|
||||
text: `Attempting to save to memory ${scopeText}: "${fact}"`,
|
||||
text: `Attempting to save to memory: "${args.trim()}"`,
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
@@ -182,67 +66,9 @@ export const memoryCommand: SlashCommand = {
|
||||
return {
|
||||
type: 'tool',
|
||||
toolName: 'save_memory',
|
||||
toolArgs: scope ? { fact, scope } : { fact },
|
||||
toolArgs: { fact: args.trim() },
|
||||
};
|
||||
},
|
||||
subCommands: [
|
||||
{
|
||||
name: '--project',
|
||||
description: 'Add content to project-level memory.',
|
||||
kind: CommandKind.BUILT_IN,
|
||||
action: (context, args): SlashCommandActionReturn | void => {
|
||||
if (!args || args.trim() === '') {
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content: 'Usage: /memory add --project <text to remember>',
|
||||
};
|
||||
}
|
||||
|
||||
context.ui.addItem(
|
||||
{
|
||||
type: MessageType.INFO,
|
||||
text: `Attempting to save to project memory: "${args.trim()}"`,
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
|
||||
return {
|
||||
type: 'tool',
|
||||
toolName: 'save_memory',
|
||||
toolArgs: { fact: args.trim(), scope: 'project' },
|
||||
};
|
||||
},
|
||||
},
|
||||
{
|
||||
name: '--global',
|
||||
description: 'Add content to global memory.',
|
||||
kind: CommandKind.BUILT_IN,
|
||||
action: (context, args): SlashCommandActionReturn | void => {
|
||||
if (!args || args.trim() === '') {
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content: 'Usage: /memory add --global <text to remember>',
|
||||
};
|
||||
}
|
||||
|
||||
context.ui.addItem(
|
||||
{
|
||||
type: MessageType.INFO,
|
||||
text: `Attempting to save to global memory: "${args.trim()}"`,
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
|
||||
return {
|
||||
type: 'tool',
|
||||
toolName: 'save_memory',
|
||||
toolArgs: { fact: args.trim(), scope: 'global' },
|
||||
};
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'refresh',
|
||||
@@ -258,7 +84,7 @@ export const memoryCommand: SlashCommand = {
|
||||
);
|
||||
|
||||
try {
|
||||
const config = context.services.config;
|
||||
const config = await context.services.config;
|
||||
if (config) {
|
||||
const { memoryContent, fileCount } =
|
||||
await loadServerHierarchicalMemory(
|
||||
|
||||
@@ -69,6 +69,10 @@ export function AuthDialog({
|
||||
return item.value === AuthType.USE_GEMINI;
|
||||
}
|
||||
|
||||
if (process.env.QWEN_OAUTH_TOKEN) {
|
||||
return item.value === AuthType.QWEN_OAUTH;
|
||||
}
|
||||
|
||||
return item.value === AuthType.LOGIN_WITH_GOOGLE;
|
||||
}),
|
||||
);
|
||||
|
||||
@@ -27,7 +27,7 @@ const renderWithWidth = (
|
||||
describe('<ContextSummaryDisplay />', () => {
|
||||
const baseProps = {
|
||||
geminiMdFileCount: 1,
|
||||
contextFileNames: ['QWEN.md'],
|
||||
contextFileNames: ['GEMINI.md'],
|
||||
mcpServers: { 'test-server': { command: 'test' } },
|
||||
showToolDescriptions: false,
|
||||
ideContext: {
|
||||
@@ -41,7 +41,7 @@ describe('<ContextSummaryDisplay />', () => {
|
||||
const { lastFrame } = renderWithWidth(120, baseProps);
|
||||
const output = lastFrame();
|
||||
expect(output).toContain(
|
||||
'Using: 1 open file (ctrl+e to view) | 1 QWEN.md file | 1 MCP server (ctrl+t to view)',
|
||||
'Using: 1 open file (ctrl+e to view) | 1 GEMINI.md file | 1 MCP server (ctrl+t to view)',
|
||||
);
|
||||
// Check for absence of newlines
|
||||
expect(output.includes('\n')).toBe(false);
|
||||
@@ -53,7 +53,7 @@ describe('<ContextSummaryDisplay />', () => {
|
||||
const expectedLines = [
|
||||
'Using:',
|
||||
' - 1 open file (ctrl+e to view)',
|
||||
' - 1 QWEN.md file',
|
||||
' - 1 GEMINI.md file',
|
||||
' - 1 MCP server (ctrl+t to view)',
|
||||
];
|
||||
const actualLines = output.split('\n');
|
||||
|
||||
@@ -644,9 +644,8 @@ export const useGeminiStream = (
|
||||
options?: { isContinuation: boolean },
|
||||
prompt_id?: string,
|
||||
) => {
|
||||
// Prevent concurrent executions of submitQuery, but allow continuations
|
||||
// which are part of the same logical flow (tool responses)
|
||||
if (isSubmittingQueryRef.current && !options?.isContinuation) {
|
||||
// Prevent concurrent executions of submitQuery
|
||||
if (isSubmittingQueryRef.current) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -140,7 +140,7 @@ describe('getInstallationInfo', () => {
|
||||
const info = getInstallationInfo(projectRoot, false);
|
||||
|
||||
expect(mockedExecSync).toHaveBeenCalledWith(
|
||||
'brew list -1 | grep -q "^qwen-code$"',
|
||||
'brew list -1 | grep -q "^gemini-cli$"',
|
||||
{ stdio: 'ignore' },
|
||||
);
|
||||
expect(info.packageManager).toBe(PackageManager.HOMEBREW);
|
||||
@@ -162,7 +162,7 @@ describe('getInstallationInfo', () => {
|
||||
const info = getInstallationInfo(projectRoot, false);
|
||||
|
||||
expect(mockedExecSync).toHaveBeenCalledWith(
|
||||
'brew list -1 | grep -q "^qwen-code$"',
|
||||
'brew list -1 | grep -q "^gemini-cli$"',
|
||||
{ stdio: 'ignore' },
|
||||
);
|
||||
// Should fall back to default global npm
|
||||
|
||||
@@ -77,8 +77,8 @@ export function getInstallationInfo(
|
||||
// Check for Homebrew
|
||||
if (process.platform === 'darwin') {
|
||||
try {
|
||||
// We do not support homebrew for now, keep forward compatibility for future use
|
||||
childProcess.execSync('brew list -1 | grep -q "^qwen-code$"', {
|
||||
// The package name in homebrew is gemini-cli
|
||||
childProcess.execSync('brew list -1 | grep -q "^gemini-cli$"', {
|
||||
stdio: 'ignore',
|
||||
});
|
||||
return {
|
||||
@@ -88,7 +88,8 @@ export function getInstallationInfo(
|
||||
'Installed via Homebrew. Please update with "brew upgrade".',
|
||||
};
|
||||
} catch (_error) {
|
||||
// continue to the next check
|
||||
// Brew is not installed or gemini-cli is not installed via brew.
|
||||
// Continue to the next check.
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -21,6 +21,9 @@ function getAuthTypeFromEnv(): AuthType | undefined {
|
||||
if (process.env.OPENAI_API_KEY) {
|
||||
return AuthType.USE_OPENAI;
|
||||
}
|
||||
if (process.env.QWEN_OAUTH_TOKEN) {
|
||||
return AuthType.QWEN_OAUTH;
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code-core",
|
||||
"version": "0.0.9",
|
||||
"version": "0.0.9-nightly.2",
|
||||
"description": "Qwen Code Core",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
|
||||
@@ -208,7 +208,6 @@ export interface ConfigParameters {
|
||||
modelNames: string[];
|
||||
template: string;
|
||||
}>;
|
||||
authType?: AuthType;
|
||||
contentGenerator?: {
|
||||
timeout?: number;
|
||||
maxRetries?: number;
|
||||
@@ -289,7 +288,6 @@ export class Config {
|
||||
private readonly summarizeToolOutput:
|
||||
| Record<string, SummarizeToolOutputSettings>
|
||||
| undefined;
|
||||
private authType?: AuthType;
|
||||
private readonly enableOpenAILogging: boolean;
|
||||
private readonly contentGenerator?: {
|
||||
timeout?: number;
|
||||
@@ -370,7 +368,6 @@ export class Config {
|
||||
this.ideMode = params.ideMode ?? false;
|
||||
this.ideClient = IdeClient.getInstance();
|
||||
this.systemPromptMappings = params.systemPromptMappings;
|
||||
this.authType = params.authType;
|
||||
this.enableOpenAILogging = params.enableOpenAILogging ?? false;
|
||||
this.contentGenerator = params.contentGenerator;
|
||||
this.cliVersion = params.cliVersion;
|
||||
@@ -454,8 +451,6 @@ export class Config {
|
||||
|
||||
// Reset the session flag since we're explicitly changing auth and using default model
|
||||
this.inFallbackMode = false;
|
||||
|
||||
this.authType = authMethod;
|
||||
}
|
||||
|
||||
getSessionId(): string {
|
||||
@@ -550,7 +545,6 @@ export class Config {
|
||||
getDebugMode(): boolean {
|
||||
return this.debugMode;
|
||||
}
|
||||
|
||||
getQuestion(): string | undefined {
|
||||
return this.question;
|
||||
}
|
||||
@@ -769,10 +763,6 @@ export class Config {
|
||||
}
|
||||
}
|
||||
|
||||
getAuthType(): AuthType | undefined {
|
||||
return this.authType;
|
||||
}
|
||||
|
||||
getEnableOpenAILogging(): boolean {
|
||||
return this.enableOpenAILogging;
|
||||
}
|
||||
|
||||
@@ -3410,10 +3410,7 @@ describe('OpenAIContentGenerator', () => {
|
||||
model: 'qwen-turbo',
|
||||
};
|
||||
|
||||
await dashscopeGenerator.generateContentStream(
|
||||
request,
|
||||
'dashscope-prompt-id',
|
||||
);
|
||||
await dashscopeGenerator.generateContent(request, 'dashscope-prompt-id');
|
||||
|
||||
// Should include cache control in last message
|
||||
expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith(
|
||||
@@ -3425,6 +3422,7 @@ describe('OpenAIContentGenerator', () => {
|
||||
expect.objectContaining({
|
||||
type: 'text',
|
||||
text: 'Hello, how are you?',
|
||||
cache_control: { type: 'ephemeral' },
|
||||
}),
|
||||
]),
|
||||
}),
|
||||
|
||||
@@ -130,7 +130,6 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
? {
|
||||
'X-DashScope-CacheControl': 'enable',
|
||||
'X-DashScope-UserAgent': userAgent,
|
||||
'X-DashScope-AuthType': contentGeneratorConfig.authType,
|
||||
}
|
||||
: {}),
|
||||
};
|
||||
@@ -236,18 +235,8 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
private async buildCreateParams(
|
||||
request: GenerateContentParameters,
|
||||
userPromptId: string,
|
||||
streaming: boolean = false,
|
||||
): Promise<Parameters<typeof this.client.chat.completions.create>[0]> {
|
||||
let messages = this.convertToOpenAIFormat(request);
|
||||
|
||||
// Add cache control to system and last messages for DashScope providers
|
||||
// Only add cache control to system message for non-streaming requests
|
||||
if (this.isDashScopeProvider()) {
|
||||
messages = this.addDashScopeCacheControl(
|
||||
messages,
|
||||
streaming ? 'both' : 'system',
|
||||
);
|
||||
}
|
||||
const messages = this.convertToOpenAIFormat(request);
|
||||
|
||||
// Build sampling parameters with clear priority:
|
||||
// 1. Request-level parameters (highest priority)
|
||||
@@ -270,11 +259,6 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
);
|
||||
}
|
||||
|
||||
if (streaming) {
|
||||
createParams.stream = true;
|
||||
createParams.stream_options = { include_usage: true };
|
||||
}
|
||||
|
||||
return createParams;
|
||||
}
|
||||
|
||||
@@ -283,11 +267,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
userPromptId: string,
|
||||
): Promise<GenerateContentResponse> {
|
||||
const startTime = Date.now();
|
||||
const createParams = await this.buildCreateParams(
|
||||
request,
|
||||
userPromptId,
|
||||
false,
|
||||
);
|
||||
const createParams = await this.buildCreateParams(request, userPromptId);
|
||||
|
||||
try {
|
||||
const completion = (await this.client.chat.completions.create(
|
||||
@@ -378,11 +358,10 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
userPromptId: string,
|
||||
): Promise<AsyncGenerator<GenerateContentResponse>> {
|
||||
const startTime = Date.now();
|
||||
const createParams = await this.buildCreateParams(
|
||||
request,
|
||||
userPromptId,
|
||||
true,
|
||||
);
|
||||
const createParams = await this.buildCreateParams(request, userPromptId);
|
||||
|
||||
createParams.stream = true;
|
||||
createParams.stream_options = { include_usage: true };
|
||||
|
||||
try {
|
||||
const stream = (await this.client.chat.completions.create(
|
||||
@@ -963,13 +942,14 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
const mergedMessages =
|
||||
this.mergeConsecutiveAssistantMessages(cleanedMessages);
|
||||
|
||||
return mergedMessages;
|
||||
// Add cache control to system and last messages for DashScope providers
|
||||
return this.addCacheControlFlag(mergedMessages, 'both');
|
||||
}
|
||||
|
||||
/**
|
||||
* Add cache control flag to specified message(s) for DashScope providers
|
||||
*/
|
||||
private addDashScopeCacheControl(
|
||||
private addCacheControlFlag(
|
||||
messages: OpenAI.Chat.ChatCompletionMessageParam[],
|
||||
target: 'system' | 'last' | 'both' = 'both',
|
||||
): OpenAI.Chat.ChatCompletionMessageParam[] {
|
||||
@@ -1371,9 +1351,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
|
||||
// Handle text content
|
||||
if (choice.delta?.content) {
|
||||
if (typeof choice.delta.content === 'string') {
|
||||
parts.push({ text: choice.delta.content });
|
||||
}
|
||||
parts.push({ text: choice.delta.content });
|
||||
}
|
||||
|
||||
// Handle tool calls - only accumulate during streaming, emit when complete
|
||||
@@ -1393,36 +1371,10 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
accumulatedCall.id = toolCall.id;
|
||||
}
|
||||
if (toolCall.function?.name) {
|
||||
// If this is a new function name, reset the arguments
|
||||
if (accumulatedCall.name !== toolCall.function.name) {
|
||||
accumulatedCall.arguments = '';
|
||||
}
|
||||
accumulatedCall.name = toolCall.function.name;
|
||||
}
|
||||
if (toolCall.function?.arguments) {
|
||||
// Check if we already have a complete JSON object
|
||||
const currentArgs = accumulatedCall.arguments;
|
||||
const newArgs = toolCall.function.arguments;
|
||||
|
||||
// If current arguments already form a complete JSON and new arguments start a new object,
|
||||
// this indicates a new tool call with the same name
|
||||
let shouldReset = false;
|
||||
if (currentArgs && newArgs.trim().startsWith('{')) {
|
||||
try {
|
||||
JSON.parse(currentArgs);
|
||||
// If we can parse current arguments as complete JSON and new args start with {,
|
||||
// this is likely a new tool call
|
||||
shouldReset = true;
|
||||
} catch {
|
||||
// Current arguments are not complete JSON, continue accumulating
|
||||
}
|
||||
}
|
||||
|
||||
if (shouldReset) {
|
||||
accumulatedCall.arguments = newArgs;
|
||||
} else {
|
||||
accumulatedCall.arguments += newArgs;
|
||||
}
|
||||
accumulatedCall.arguments += toolCall.function.arguments;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1610,7 +1562,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
}
|
||||
}
|
||||
|
||||
messageContent = textParts.join('').trimEnd();
|
||||
messageContent = textParts.join('');
|
||||
}
|
||||
|
||||
const choice: OpenAIChoice = {
|
||||
|
||||
@@ -20,117 +20,9 @@ import {
|
||||
FinishReason,
|
||||
} from '@google/genai';
|
||||
import { QwenContentGenerator } from './qwenContentGenerator.js';
|
||||
import { SharedTokenManager } from './sharedTokenManager.js';
|
||||
import { Config } from '../config/config.js';
|
||||
import { AuthType, ContentGeneratorConfig } from '../core/contentGenerator.js';
|
||||
|
||||
// Mock SharedTokenManager
|
||||
vi.mock('./sharedTokenManager.js', () => ({
|
||||
SharedTokenManager: class {
|
||||
private static instance: unknown = null;
|
||||
private mockCredentials: QwenCredentials | null = null;
|
||||
private shouldThrowError: boolean = false;
|
||||
private errorToThrow: Error | null = null;
|
||||
|
||||
static getInstance() {
|
||||
if (!this.instance) {
|
||||
this.instance = new this();
|
||||
}
|
||||
return this.instance;
|
||||
}
|
||||
|
||||
async getValidCredentials(
|
||||
qwenClient: IQwenOAuth2Client,
|
||||
): Promise<QwenCredentials> {
|
||||
// If we're configured to throw an error, do so
|
||||
if (this.shouldThrowError && this.errorToThrow) {
|
||||
throw this.errorToThrow;
|
||||
}
|
||||
|
||||
// Try to get credentials from the mock client first to trigger auth errors
|
||||
try {
|
||||
const { token } = await qwenClient.getAccessToken();
|
||||
if (token) {
|
||||
const credentials = qwenClient.getCredentials();
|
||||
return credentials;
|
||||
}
|
||||
} catch (error) {
|
||||
// If it's an auth error and we need to simulate refresh behavior
|
||||
const errorMessage =
|
||||
error instanceof Error
|
||||
? error.message.toLowerCase()
|
||||
: String(error).toLowerCase();
|
||||
const errorCode =
|
||||
(error as { status?: number; code?: number })?.status ||
|
||||
(error as { status?: number; code?: number })?.code;
|
||||
|
||||
const isAuthError =
|
||||
errorCode === 401 ||
|
||||
errorCode === 403 ||
|
||||
errorMessage.includes('unauthorized') ||
|
||||
errorMessage.includes('forbidden') ||
|
||||
errorMessage.includes('token expired');
|
||||
|
||||
if (isAuthError) {
|
||||
// Try to refresh the token through the client
|
||||
try {
|
||||
const refreshResult = await qwenClient.refreshAccessToken();
|
||||
if (refreshResult && !('error' in refreshResult)) {
|
||||
// Refresh succeeded, update client credentials and return them
|
||||
const updatedCredentials = qwenClient.getCredentials();
|
||||
return updatedCredentials;
|
||||
} else {
|
||||
// Refresh failed, throw appropriate error
|
||||
throw new Error(
|
||||
'Failed to obtain valid Qwen access token. Please re-authenticate.',
|
||||
);
|
||||
}
|
||||
} catch {
|
||||
throw new Error(
|
||||
'Failed to obtain valid Qwen access token. Please re-authenticate.',
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// Re-throw non-auth errors
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// Return mock credentials only if they're set
|
||||
if (this.mockCredentials && this.mockCredentials.access_token) {
|
||||
return this.mockCredentials;
|
||||
}
|
||||
|
||||
// Default fallback for tests that need credentials
|
||||
return {
|
||||
access_token: 'valid-token',
|
||||
refresh_token: 'valid-refresh-token',
|
||||
resource_url: 'https://test-endpoint.com/v1',
|
||||
expiry_date: Date.now() + 3600000,
|
||||
};
|
||||
}
|
||||
|
||||
getCurrentCredentials(): QwenCredentials | null {
|
||||
return this.mockCredentials;
|
||||
}
|
||||
|
||||
clearCache(): void {
|
||||
this.mockCredentials = null;
|
||||
}
|
||||
|
||||
// Helper method for tests to set credentials
|
||||
setMockCredentials(credentials: QwenCredentials | null): void {
|
||||
this.mockCredentials = credentials;
|
||||
}
|
||||
|
||||
// Helper method for tests to simulate errors
|
||||
setMockError(error: Error | null): void {
|
||||
this.shouldThrowError = !!error;
|
||||
this.errorToThrow = error;
|
||||
}
|
||||
},
|
||||
}));
|
||||
|
||||
// Mock the OpenAIContentGenerator parent class
|
||||
vi.mock('../core/openaiContentGenerator.js', () => ({
|
||||
OpenAIContentGenerator: class {
|
||||
@@ -344,10 +236,8 @@ describe('QwenContentGenerator', () => {
|
||||
it('should refresh token on auth error and retry', async () => {
|
||||
const authError = { status: 401, message: 'Unauthorized' };
|
||||
|
||||
// First call fails with auth error, second call succeeds
|
||||
vi.mocked(mockQwenClient.getAccessToken)
|
||||
.mockRejectedValueOnce(authError)
|
||||
.mockResolvedValueOnce({ token: 'refreshed-token' });
|
||||
// First call fails with auth error
|
||||
vi.mocked(mockQwenClient.getAccessToken).mockRejectedValueOnce(authError);
|
||||
|
||||
// Refresh succeeds
|
||||
vi.mocked(mockQwenClient.refreshAccessToken).mockResolvedValue({
|
||||
@@ -357,15 +247,6 @@ describe('QwenContentGenerator', () => {
|
||||
resource_url: 'https://refreshed-endpoint.com',
|
||||
});
|
||||
|
||||
// Set credentials for second call
|
||||
vi.mocked(mockQwenClient.getCredentials).mockReturnValue({
|
||||
access_token: 'refreshed-token',
|
||||
token_type: 'Bearer',
|
||||
refresh_token: 'refresh-token',
|
||||
resource_url: 'https://refreshed-endpoint.com',
|
||||
expiry_date: Date.now() + 3600000,
|
||||
});
|
||||
|
||||
const request: GenerateContentParameters = {
|
||||
model: 'qwen-turbo',
|
||||
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
|
||||
@@ -380,62 +261,12 @@ describe('QwenContentGenerator', () => {
|
||||
expect(mockQwenClient.refreshAccessToken).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should refresh token on auth error and retry for content stream', async () => {
|
||||
const authError = { status: 401, message: 'Unauthorized' };
|
||||
|
||||
// Reset mocks for this test
|
||||
vi.clearAllMocks();
|
||||
|
||||
// First call fails with auth error, second call succeeds
|
||||
vi.mocked(mockQwenClient.getAccessToken)
|
||||
.mockRejectedValueOnce(authError)
|
||||
.mockResolvedValueOnce({ token: 'refreshed-stream-token' });
|
||||
|
||||
// Refresh succeeds
|
||||
vi.mocked(mockQwenClient.refreshAccessToken).mockResolvedValue({
|
||||
access_token: 'refreshed-stream-token',
|
||||
token_type: 'Bearer',
|
||||
expires_in: 3600,
|
||||
resource_url: 'https://refreshed-stream-endpoint.com',
|
||||
});
|
||||
|
||||
// Set credentials for second call
|
||||
vi.mocked(mockQwenClient.getCredentials).mockReturnValue({
|
||||
access_token: 'refreshed-stream-token',
|
||||
token_type: 'Bearer',
|
||||
refresh_token: 'refresh-token',
|
||||
resource_url: 'https://refreshed-stream-endpoint.com',
|
||||
expiry_date: Date.now() + 3600000,
|
||||
});
|
||||
|
||||
const request: GenerateContentParameters = {
|
||||
model: 'qwen-turbo',
|
||||
contents: [{ role: 'user', parts: [{ text: 'Hello stream' }] }],
|
||||
};
|
||||
|
||||
const stream = await qwenContentGenerator.generateContentStream(
|
||||
request,
|
||||
'test-prompt-id',
|
||||
);
|
||||
const chunks: string[] = [];
|
||||
|
||||
for await (const chunk of stream) {
|
||||
chunks.push(chunk.text || '');
|
||||
}
|
||||
|
||||
expect(chunks).toEqual(['Stream chunk 1', 'Stream chunk 2']);
|
||||
expect(mockQwenClient.refreshAccessToken).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should handle token refresh failure', async () => {
|
||||
// Mock the SharedTokenManager to throw an error
|
||||
const mockTokenManager = SharedTokenManager.getInstance() as unknown as {
|
||||
setMockError: (error: Error | null) => void;
|
||||
};
|
||||
mockTokenManager.setMockError(
|
||||
new Error(
|
||||
'Failed to obtain valid Qwen access token. Please re-authenticate.',
|
||||
),
|
||||
vi.mocked(mockQwenClient.getAccessToken).mockRejectedValue(
|
||||
new Error('Token expired'),
|
||||
);
|
||||
vi.mocked(mockQwenClient.refreshAccessToken).mockRejectedValue(
|
||||
new Error('Refresh failed'),
|
||||
);
|
||||
|
||||
const request: GenerateContentParameters = {
|
||||
@@ -448,9 +279,6 @@ describe('QwenContentGenerator', () => {
|
||||
).rejects.toThrow(
|
||||
'Failed to obtain valid Qwen access token. Please re-authenticate.',
|
||||
);
|
||||
|
||||
// Clean up
|
||||
mockTokenManager.setMockError(null);
|
||||
});
|
||||
|
||||
it('should update endpoint when token is refreshed', async () => {
|
||||
@@ -719,24 +547,10 @@ describe('QwenContentGenerator', () => {
|
||||
const originalGenerateContent = parentPrototype.generateContent;
|
||||
parentPrototype.generateContent = mockGenerateContent;
|
||||
|
||||
// Mock getAccessToken to fail initially, then succeed
|
||||
let getAccessTokenCallCount = 0;
|
||||
vi.mocked(mockQwenClient.getAccessToken).mockImplementation(async () => {
|
||||
getAccessTokenCallCount++;
|
||||
if (getAccessTokenCallCount <= 2) {
|
||||
throw authError; // Fail on first two calls (initial + retry)
|
||||
}
|
||||
return { token: 'refreshed-token' }; // Succeed after refresh
|
||||
vi.mocked(mockQwenClient.getAccessToken).mockResolvedValue({
|
||||
token: 'initial-token',
|
||||
});
|
||||
|
||||
vi.mocked(mockQwenClient.getCredentials).mockReturnValue({
|
||||
access_token: 'refreshed-token',
|
||||
token_type: 'Bearer',
|
||||
refresh_token: 'refresh-token',
|
||||
resource_url: 'https://test-endpoint.com',
|
||||
expiry_date: Date.now() + 3600000,
|
||||
});
|
||||
|
||||
vi.mocked(mockQwenClient.getCredentials).mockReturnValue(mockCredentials);
|
||||
vi.mocked(mockQwenClient.refreshAccessToken).mockResolvedValue({
|
||||
access_token: 'refreshed-token',
|
||||
token_type: 'Bearer',
|
||||
@@ -823,16 +637,31 @@ describe('QwenContentGenerator', () => {
|
||||
expect(qwenContentGenerator.getCurrentToken()).toBe('cached-token');
|
||||
});
|
||||
|
||||
it('should clear token on clearToken()', () => {
|
||||
// Simulate having cached token value
|
||||
it('should clear token and endpoint on clearToken()', () => {
|
||||
// Simulate having cached values
|
||||
const qwenInstance = qwenContentGenerator as unknown as {
|
||||
currentToken: string;
|
||||
currentEndpoint: string;
|
||||
refreshPromise: Promise<string>;
|
||||
};
|
||||
qwenInstance.currentToken = 'cached-token';
|
||||
qwenInstance.currentEndpoint = 'https://cached-endpoint.com';
|
||||
qwenInstance.refreshPromise = Promise.resolve('token');
|
||||
|
||||
qwenContentGenerator.clearToken();
|
||||
|
||||
expect(qwenContentGenerator.getCurrentToken()).toBeNull();
|
||||
expect(
|
||||
(qwenContentGenerator as unknown as { currentEndpoint: string | null })
|
||||
.currentEndpoint,
|
||||
).toBeNull();
|
||||
expect(
|
||||
(
|
||||
qwenContentGenerator as unknown as {
|
||||
refreshPromise: Promise<string> | null;
|
||||
}
|
||||
).refreshPromise,
|
||||
).toBeNull();
|
||||
});
|
||||
|
||||
it('should handle concurrent token refresh requests', async () => {
|
||||
@@ -845,7 +674,9 @@ describe('QwenContentGenerator', () => {
|
||||
const authError = { status: 401, message: 'Unauthorized' };
|
||||
let parentCallCount = 0;
|
||||
|
||||
vi.mocked(mockQwenClient.getAccessToken).mockRejectedValue(authError);
|
||||
vi.mocked(mockQwenClient.getAccessToken).mockResolvedValue({
|
||||
token: 'initial-token',
|
||||
});
|
||||
vi.mocked(mockQwenClient.getCredentials).mockReturnValue(mockCredentials);
|
||||
|
||||
vi.mocked(mockQwenClient.refreshAccessToken).mockImplementation(
|
||||
@@ -894,7 +725,6 @@ describe('QwenContentGenerator', () => {
|
||||
|
||||
// The main test is that all requests succeed without crashing
|
||||
expect(results).toHaveLength(3);
|
||||
// With our new implementation through SharedTokenManager, refresh should still be called
|
||||
expect(refreshCallCount).toBeGreaterThanOrEqual(1);
|
||||
|
||||
// Restore original method
|
||||
@@ -966,24 +796,13 @@ describe('QwenContentGenerator', () => {
|
||||
);
|
||||
parentPrototype.generateContent = mockGenerateContent;
|
||||
|
||||
// Mock getAccessToken to fail initially, then succeed
|
||||
let getAccessTokenCallCount = 0;
|
||||
vi.mocked(mockQwenClient.getAccessToken).mockImplementation(async () => {
|
||||
getAccessTokenCallCount++;
|
||||
if (getAccessTokenCallCount <= 2) {
|
||||
throw authError; // Fail on first two calls (initial + retry)
|
||||
}
|
||||
return { token: 'new-token' }; // Succeed after refresh
|
||||
vi.mocked(mockQwenClient.getAccessToken).mockResolvedValue({
|
||||
token: 'initial-token',
|
||||
});
|
||||
|
||||
vi.mocked(mockQwenClient.getCredentials).mockReturnValue({
|
||||
access_token: 'new-token',
|
||||
token_type: 'Bearer',
|
||||
refresh_token: 'refresh-token',
|
||||
resource_url: 'https://new-endpoint.com',
|
||||
expiry_date: Date.now() + 7200000,
|
||||
...mockCredentials,
|
||||
resource_url: 'custom-endpoint.com',
|
||||
});
|
||||
|
||||
vi.mocked(mockQwenClient.refreshAccessToken).mockResolvedValue({
|
||||
access_token: 'new-token',
|
||||
token_type: 'Bearer',
|
||||
@@ -1007,595 +826,4 @@ describe('QwenContentGenerator', () => {
|
||||
expect(callCount).toBe(2); // Initial call + retry
|
||||
});
|
||||
});
|
||||
|
||||
describe('SharedTokenManager Integration', () => {
|
||||
it('should use SharedTokenManager to get valid credentials', async () => {
|
||||
const mockTokenManager = {
|
||||
getValidCredentials: vi.fn().mockResolvedValue({
|
||||
access_token: 'manager-token',
|
||||
resource_url: 'https://manager-endpoint.com',
|
||||
}),
|
||||
getCurrentCredentials: vi.fn(),
|
||||
clearCache: vi.fn(),
|
||||
};
|
||||
|
||||
// Mock the SharedTokenManager.getInstance()
|
||||
const originalGetInstance = SharedTokenManager.getInstance;
|
||||
SharedTokenManager.getInstance = vi
|
||||
.fn()
|
||||
.mockReturnValue(mockTokenManager);
|
||||
|
||||
// Create new instance to pick up the mock
|
||||
const newGenerator = new QwenContentGenerator(
|
||||
mockQwenClient,
|
||||
{ model: 'qwen-turbo', authType: AuthType.QWEN_OAUTH },
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
const request: GenerateContentParameters = {
|
||||
model: 'qwen-turbo',
|
||||
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
|
||||
};
|
||||
|
||||
await newGenerator.generateContent(request, 'test-prompt-id');
|
||||
|
||||
expect(mockTokenManager.getValidCredentials).toHaveBeenCalledWith(
|
||||
mockQwenClient,
|
||||
);
|
||||
|
||||
// Restore original
|
||||
SharedTokenManager.getInstance = originalGetInstance;
|
||||
});
|
||||
|
||||
it('should handle SharedTokenManager errors gracefully', async () => {
|
||||
const mockTokenManager = {
|
||||
getValidCredentials: vi
|
||||
.fn()
|
||||
.mockRejectedValue(new Error('Token manager error')),
|
||||
getCurrentCredentials: vi.fn(),
|
||||
clearCache: vi.fn(),
|
||||
};
|
||||
|
||||
const originalGetInstance = SharedTokenManager.getInstance;
|
||||
SharedTokenManager.getInstance = vi
|
||||
.fn()
|
||||
.mockReturnValue(mockTokenManager);
|
||||
|
||||
const newGenerator = new QwenContentGenerator(
|
||||
mockQwenClient,
|
||||
{ model: 'qwen-turbo', authType: AuthType.QWEN_OAUTH },
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
const request: GenerateContentParameters = {
|
||||
model: 'qwen-turbo',
|
||||
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
|
||||
};
|
||||
|
||||
await expect(
|
||||
newGenerator.generateContent(request, 'test-prompt-id'),
|
||||
).rejects.toThrow('Failed to obtain valid Qwen access token');
|
||||
|
||||
SharedTokenManager.getInstance = originalGetInstance;
|
||||
});
|
||||
|
||||
it('should handle missing access token from credentials', async () => {
|
||||
const mockTokenManager = {
|
||||
getValidCredentials: vi.fn().mockResolvedValue({
|
||||
access_token: undefined,
|
||||
resource_url: 'https://test-endpoint.com',
|
||||
}),
|
||||
getCurrentCredentials: vi.fn(),
|
||||
clearCache: vi.fn(),
|
||||
};
|
||||
|
||||
const originalGetInstance = SharedTokenManager.getInstance;
|
||||
SharedTokenManager.getInstance = vi
|
||||
.fn()
|
||||
.mockReturnValue(mockTokenManager);
|
||||
|
||||
const newGenerator = new QwenContentGenerator(
|
||||
mockQwenClient,
|
||||
{ model: 'qwen-turbo', authType: AuthType.QWEN_OAUTH },
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
const request: GenerateContentParameters = {
|
||||
model: 'qwen-turbo',
|
||||
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
|
||||
};
|
||||
|
||||
await expect(
|
||||
newGenerator.generateContent(request, 'test-prompt-id'),
|
||||
).rejects.toThrow('Failed to obtain valid Qwen access token');
|
||||
|
||||
SharedTokenManager.getInstance = originalGetInstance;
|
||||
});
|
||||
});
|
||||
|
||||
describe('getCurrentEndpoint Method', () => {
|
||||
it('should handle URLs with custom ports', () => {
|
||||
const endpoints = [
|
||||
{ input: 'localhost:8080', expected: 'https://localhost:8080/v1' },
|
||||
{
|
||||
input: 'http://localhost:8080',
|
||||
expected: 'http://localhost:8080/v1',
|
||||
},
|
||||
{
|
||||
input: 'https://api.example.com:443',
|
||||
expected: 'https://api.example.com:443/v1',
|
||||
},
|
||||
{
|
||||
input: 'api.example.com:9000/api',
|
||||
expected: 'https://api.example.com:9000/api/v1',
|
||||
},
|
||||
];
|
||||
|
||||
endpoints.forEach(({ input, expected }) => {
|
||||
vi.mocked(mockQwenClient.getAccessToken).mockResolvedValue({
|
||||
token: 'test-token',
|
||||
});
|
||||
vi.mocked(mockQwenClient.getCredentials).mockReturnValue({
|
||||
...mockCredentials,
|
||||
resource_url: input,
|
||||
});
|
||||
|
||||
const generator = qwenContentGenerator as unknown as {
|
||||
getCurrentEndpoint: (resourceUrl?: string) => string;
|
||||
};
|
||||
|
||||
expect(generator.getCurrentEndpoint(input)).toBe(expected);
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle URLs with existing paths', () => {
|
||||
const endpoints = [
|
||||
{
|
||||
input: 'https://api.example.com/api',
|
||||
expected: 'https://api.example.com/api/v1',
|
||||
},
|
||||
{
|
||||
input: 'api.example.com/api/v2',
|
||||
expected: 'https://api.example.com/api/v2/v1',
|
||||
},
|
||||
{
|
||||
input: 'https://api.example.com/api/v1',
|
||||
expected: 'https://api.example.com/api/v1',
|
||||
},
|
||||
];
|
||||
|
||||
endpoints.forEach(({ input, expected }) => {
|
||||
const generator = qwenContentGenerator as unknown as {
|
||||
getCurrentEndpoint: (resourceUrl?: string) => string;
|
||||
};
|
||||
|
||||
expect(generator.getCurrentEndpoint(input)).toBe(expected);
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle undefined resource URL', () => {
|
||||
const generator = qwenContentGenerator as unknown as {
|
||||
getCurrentEndpoint: (resourceUrl?: string) => string;
|
||||
};
|
||||
|
||||
expect(generator.getCurrentEndpoint(undefined)).toBe(
|
||||
'https://dashscope.aliyuncs.com/compatible-mode/v1',
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle empty resource URL', () => {
|
||||
const generator = qwenContentGenerator as unknown as {
|
||||
getCurrentEndpoint: (resourceUrl?: string) => string;
|
||||
};
|
||||
|
||||
// Empty string should fall back to default endpoint
|
||||
expect(generator.getCurrentEndpoint('')).toBe(
|
||||
'https://dashscope.aliyuncs.com/compatible-mode/v1',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('isAuthError Method Enhanced', () => {
|
||||
it('should identify auth errors by numeric status codes', () => {
|
||||
const authErrors = [
|
||||
{ code: 401 },
|
||||
{ status: 403 },
|
||||
{ code: '401' }, // String status codes
|
||||
{ status: '403' },
|
||||
];
|
||||
|
||||
authErrors.forEach((error) => {
|
||||
const generator = qwenContentGenerator as unknown as {
|
||||
isAuthError: (error: unknown) => boolean;
|
||||
};
|
||||
expect(generator.isAuthError(error)).toBe(true);
|
||||
});
|
||||
|
||||
// 400 is not typically an auth error, it's bad request
|
||||
const nonAuthError = { status: 400 };
|
||||
const generator = qwenContentGenerator as unknown as {
|
||||
isAuthError: (error: unknown) => boolean;
|
||||
};
|
||||
expect(generator.isAuthError(nonAuthError)).toBe(false);
|
||||
});
|
||||
|
||||
it('should identify auth errors by message content variations', () => {
|
||||
const authMessages = [
|
||||
'UNAUTHORIZED access',
|
||||
'Access is FORBIDDEN',
|
||||
'Invalid API Key provided',
|
||||
'Invalid Access Token',
|
||||
'Token has Expired',
|
||||
'Authentication Required',
|
||||
'Access Denied by server',
|
||||
'The token has expired and needs refresh',
|
||||
'Bearer token expired',
|
||||
];
|
||||
|
||||
authMessages.forEach((message) => {
|
||||
const error = new Error(message);
|
||||
const generator = qwenContentGenerator as unknown as {
|
||||
isAuthError: (error: unknown) => boolean;
|
||||
};
|
||||
expect(generator.isAuthError(error)).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
it('should not identify non-auth errors', () => {
|
||||
const nonAuthErrors = [
|
||||
new Error('Network timeout'),
|
||||
new Error('Rate limit exceeded'),
|
||||
{ status: 500 },
|
||||
{ code: 429 },
|
||||
'Internal server error',
|
||||
null,
|
||||
undefined,
|
||||
'',
|
||||
{ status: 200 },
|
||||
new Error('Model not found'),
|
||||
];
|
||||
|
||||
nonAuthErrors.forEach((error) => {
|
||||
const generator = qwenContentGenerator as unknown as {
|
||||
isAuthError: (error: unknown) => boolean;
|
||||
};
|
||||
expect(generator.isAuthError(error)).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle complex error objects', () => {
|
||||
const complexErrors = [
|
||||
{ error: { status: 401, message: 'Unauthorized' } },
|
||||
{ response: { status: 403 } },
|
||||
{ details: { code: 401 } },
|
||||
];
|
||||
|
||||
// These should not be identified as auth errors because the method only looks at top-level properties
|
||||
complexErrors.forEach((error) => {
|
||||
const generator = qwenContentGenerator as unknown as {
|
||||
isAuthError: (error: unknown) => boolean;
|
||||
};
|
||||
expect(generator.isAuthError(error)).toBe(false);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Stream Error Handling', () => {
|
||||
it('should restore credentials when stream generation fails', async () => {
|
||||
const client = (
|
||||
qwenContentGenerator as unknown as {
|
||||
client: { apiKey: string; baseURL: string };
|
||||
}
|
||||
).client;
|
||||
const originalApiKey = client.apiKey;
|
||||
const originalBaseURL = client.baseURL;
|
||||
|
||||
vi.mocked(mockQwenClient.getAccessToken).mockResolvedValue({
|
||||
token: 'stream-token',
|
||||
});
|
||||
vi.mocked(mockQwenClient.getCredentials).mockReturnValue({
|
||||
...mockCredentials,
|
||||
resource_url: 'https://stream-endpoint.com',
|
||||
});
|
||||
|
||||
// Mock parent method to throw error
|
||||
const parentPrototype = Object.getPrototypeOf(
|
||||
Object.getPrototypeOf(qwenContentGenerator),
|
||||
);
|
||||
const originalGenerateContentStream =
|
||||
parentPrototype.generateContentStream;
|
||||
parentPrototype.generateContentStream = vi
|
||||
.fn()
|
||||
.mockRejectedValue(new Error('Stream error'));
|
||||
|
||||
const request: GenerateContentParameters = {
|
||||
model: 'qwen-turbo',
|
||||
contents: [{ role: 'user', parts: [{ text: 'Stream test' }] }],
|
||||
};
|
||||
|
||||
try {
|
||||
await qwenContentGenerator.generateContentStream(
|
||||
request,
|
||||
'test-prompt-id',
|
||||
);
|
||||
} catch (error) {
|
||||
expect(error).toBeInstanceOf(Error);
|
||||
}
|
||||
|
||||
// Credentials should be restored even on error
|
||||
expect(client.apiKey).toBe(originalApiKey);
|
||||
expect(client.baseURL).toBe(originalBaseURL);
|
||||
|
||||
// Restore original method
|
||||
parentPrototype.generateContentStream = originalGenerateContentStream;
|
||||
});
|
||||
|
||||
it('should not restore credentials in finally block for successful streams', async () => {
|
||||
const client = (
|
||||
qwenContentGenerator as unknown as {
|
||||
client: { apiKey: string; baseURL: string };
|
||||
}
|
||||
).client;
|
||||
|
||||
// Set up the mock to return stream credentials
|
||||
const streamCredentials = {
|
||||
access_token: 'stream-token',
|
||||
refresh_token: 'stream-refresh-token',
|
||||
resource_url: 'https://stream-endpoint.com',
|
||||
expiry_date: Date.now() + 3600000,
|
||||
};
|
||||
|
||||
vi.mocked(mockQwenClient.getAccessToken).mockResolvedValue({
|
||||
token: 'stream-token',
|
||||
});
|
||||
vi.mocked(mockQwenClient.getCredentials).mockReturnValue(
|
||||
streamCredentials,
|
||||
);
|
||||
|
||||
// Set the SharedTokenManager mock to return stream credentials
|
||||
const mockTokenManager = SharedTokenManager.getInstance() as unknown as {
|
||||
setMockCredentials: (credentials: QwenCredentials | null) => void;
|
||||
};
|
||||
mockTokenManager.setMockCredentials(streamCredentials);
|
||||
|
||||
const request: GenerateContentParameters = {
|
||||
model: 'qwen-turbo',
|
||||
contents: [{ role: 'user', parts: [{ text: 'Stream test' }] }],
|
||||
};
|
||||
|
||||
const stream = await qwenContentGenerator.generateContentStream(
|
||||
request,
|
||||
'test-prompt-id',
|
||||
);
|
||||
|
||||
// After successful stream creation, credentials should still be set for the stream
|
||||
expect(client.apiKey).toBe('stream-token');
|
||||
expect(client.baseURL).toBe('https://stream-endpoint.com/v1');
|
||||
|
||||
// Consume the stream
|
||||
const chunks = [];
|
||||
for await (const chunk of stream) {
|
||||
chunks.push(chunk);
|
||||
}
|
||||
|
||||
expect(chunks).toHaveLength(2);
|
||||
|
||||
// Clean up
|
||||
mockTokenManager.setMockCredentials(null);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Token and Endpoint Management', () => {
|
||||
it('should get current token from SharedTokenManager', () => {
|
||||
const mockTokenManager = {
|
||||
getCurrentCredentials: vi.fn().mockReturnValue({
|
||||
access_token: 'current-token',
|
||||
}),
|
||||
};
|
||||
|
||||
const originalGetInstance = SharedTokenManager.getInstance;
|
||||
SharedTokenManager.getInstance = vi
|
||||
.fn()
|
||||
.mockReturnValue(mockTokenManager);
|
||||
|
||||
const newGenerator = new QwenContentGenerator(
|
||||
mockQwenClient,
|
||||
{ model: 'qwen-turbo', authType: AuthType.QWEN_OAUTH },
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
expect(newGenerator.getCurrentToken()).toBe('current-token');
|
||||
|
||||
SharedTokenManager.getInstance = originalGetInstance;
|
||||
});
|
||||
|
||||
it('should return null when no credentials available', () => {
|
||||
const mockTokenManager = {
|
||||
getCurrentCredentials: vi.fn().mockReturnValue(null),
|
||||
};
|
||||
|
||||
const originalGetInstance = SharedTokenManager.getInstance;
|
||||
SharedTokenManager.getInstance = vi
|
||||
.fn()
|
||||
.mockReturnValue(mockTokenManager);
|
||||
|
||||
const newGenerator = new QwenContentGenerator(
|
||||
mockQwenClient,
|
||||
{ model: 'qwen-turbo', authType: AuthType.QWEN_OAUTH },
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
expect(newGenerator.getCurrentToken()).toBeNull();
|
||||
|
||||
SharedTokenManager.getInstance = originalGetInstance;
|
||||
});
|
||||
|
||||
it('should return null when credentials have no access token', () => {
|
||||
const mockTokenManager = {
|
||||
getCurrentCredentials: vi.fn().mockReturnValue({
|
||||
access_token: undefined,
|
||||
}),
|
||||
};
|
||||
|
||||
const originalGetInstance = SharedTokenManager.getInstance;
|
||||
SharedTokenManager.getInstance = vi
|
||||
.fn()
|
||||
.mockReturnValue(mockTokenManager);
|
||||
|
||||
const newGenerator = new QwenContentGenerator(
|
||||
mockQwenClient,
|
||||
{ model: 'qwen-turbo', authType: AuthType.QWEN_OAUTH },
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
expect(newGenerator.getCurrentToken()).toBeNull();
|
||||
|
||||
SharedTokenManager.getInstance = originalGetInstance;
|
||||
});
|
||||
|
||||
it('should clear token through SharedTokenManager', () => {
|
||||
const mockTokenManager = {
|
||||
clearCache: vi.fn(),
|
||||
};
|
||||
|
||||
const originalGetInstance = SharedTokenManager.getInstance;
|
||||
SharedTokenManager.getInstance = vi
|
||||
.fn()
|
||||
.mockReturnValue(mockTokenManager);
|
||||
|
||||
const newGenerator = new QwenContentGenerator(
|
||||
mockQwenClient,
|
||||
{ model: 'qwen-turbo', authType: AuthType.QWEN_OAUTH },
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
newGenerator.clearToken();
|
||||
|
||||
expect(mockTokenManager.clearCache).toHaveBeenCalled();
|
||||
|
||||
SharedTokenManager.getInstance = originalGetInstance;
|
||||
});
|
||||
});
|
||||
|
||||
describe('Constructor and Initialization', () => {
|
||||
it('should initialize with default base URL', () => {
|
||||
const generator = new QwenContentGenerator(
|
||||
mockQwenClient,
|
||||
{ model: 'qwen-turbo', authType: AuthType.QWEN_OAUTH },
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
const client = (generator as unknown as { client: { baseURL: string } })
|
||||
.client;
|
||||
expect(client.baseURL).toBe(
|
||||
'https://dashscope.aliyuncs.com/compatible-mode/v1',
|
||||
);
|
||||
});
|
||||
|
||||
it('should get SharedTokenManager instance', () => {
|
||||
const generator = new QwenContentGenerator(
|
||||
mockQwenClient,
|
||||
{ model: 'qwen-turbo', authType: AuthType.QWEN_OAUTH },
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
const sharedManager = (
|
||||
generator as unknown as { sharedManager: SharedTokenManager }
|
||||
).sharedManager;
|
||||
expect(sharedManager).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Edge Cases and Error Conditions', () => {
|
||||
it('should handle token retrieval with warning when SharedTokenManager fails', async () => {
|
||||
const consoleSpy = vi.spyOn(console, 'warn').mockImplementation(() => {});
|
||||
|
||||
const mockTokenManager = {
|
||||
getValidCredentials: vi
|
||||
.fn()
|
||||
.mockRejectedValue(new Error('Internal token manager error')),
|
||||
};
|
||||
|
||||
const originalGetInstance = SharedTokenManager.getInstance;
|
||||
SharedTokenManager.getInstance = vi
|
||||
.fn()
|
||||
.mockReturnValue(mockTokenManager);
|
||||
|
||||
const newGenerator = new QwenContentGenerator(
|
||||
mockQwenClient,
|
||||
{ model: 'qwen-turbo', authType: AuthType.QWEN_OAUTH },
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
const request: GenerateContentParameters = {
|
||||
model: 'qwen-turbo',
|
||||
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
|
||||
};
|
||||
|
||||
await expect(
|
||||
newGenerator.generateContent(request, 'test-prompt-id'),
|
||||
).rejects.toThrow('Failed to obtain valid Qwen access token');
|
||||
|
||||
expect(consoleSpy).toHaveBeenCalledWith(
|
||||
'Failed to get token from shared manager:',
|
||||
expect.any(Error),
|
||||
);
|
||||
|
||||
consoleSpy.mockRestore();
|
||||
SharedTokenManager.getInstance = originalGetInstance;
|
||||
});
|
||||
|
||||
it('should handle all method types with token failure', async () => {
|
||||
const mockTokenManager = {
|
||||
getValidCredentials: vi
|
||||
.fn()
|
||||
.mockRejectedValue(new Error('Token error')),
|
||||
};
|
||||
|
||||
const originalGetInstance = SharedTokenManager.getInstance;
|
||||
SharedTokenManager.getInstance = vi
|
||||
.fn()
|
||||
.mockReturnValue(mockTokenManager);
|
||||
|
||||
const newGenerator = new QwenContentGenerator(
|
||||
mockQwenClient,
|
||||
{ model: 'qwen-turbo', authType: AuthType.QWEN_OAUTH },
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
const generateRequest: GenerateContentParameters = {
|
||||
model: 'qwen-turbo',
|
||||
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
|
||||
};
|
||||
|
||||
const countRequest: CountTokensParameters = {
|
||||
model: 'qwen-turbo',
|
||||
contents: [{ role: 'user', parts: [{ text: 'Count' }] }],
|
||||
};
|
||||
|
||||
const embedRequest: EmbedContentParameters = {
|
||||
model: 'qwen-turbo',
|
||||
contents: [{ parts: [{ text: 'Embed' }] }],
|
||||
};
|
||||
|
||||
// All methods should fail with the same error
|
||||
await expect(
|
||||
newGenerator.generateContent(generateRequest, 'test-id'),
|
||||
).rejects.toThrow('Failed to obtain valid Qwen access token');
|
||||
|
||||
await expect(
|
||||
newGenerator.generateContentStream(generateRequest, 'test-id'),
|
||||
).rejects.toThrow('Failed to obtain valid Qwen access token');
|
||||
|
||||
await expect(newGenerator.countTokens(countRequest)).rejects.toThrow(
|
||||
'Failed to obtain valid Qwen access token',
|
||||
);
|
||||
|
||||
await expect(newGenerator.embedContent(embedRequest)).rejects.toThrow(
|
||||
'Failed to obtain valid Qwen access token',
|
||||
);
|
||||
|
||||
SharedTokenManager.getInstance = originalGetInstance;
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -5,8 +5,12 @@
|
||||
*/
|
||||
|
||||
import { OpenAIContentGenerator } from '../core/openaiContentGenerator.js';
|
||||
import { IQwenOAuth2Client } from './qwenOAuth2.js';
|
||||
import { SharedTokenManager } from './sharedTokenManager.js';
|
||||
import {
|
||||
IQwenOAuth2Client,
|
||||
type TokenRefreshData,
|
||||
type ErrorData,
|
||||
isErrorResponse,
|
||||
} from './qwenOAuth2.js';
|
||||
import { Config } from '../config/config.js';
|
||||
import {
|
||||
GenerateContentParameters,
|
||||
@@ -27,8 +31,11 @@ const DEFAULT_QWEN_BASE_URL =
|
||||
*/
|
||||
export class QwenContentGenerator extends OpenAIContentGenerator {
|
||||
private qwenClient: IQwenOAuth2Client;
|
||||
private sharedManager: SharedTokenManager;
|
||||
private currentToken?: string;
|
||||
|
||||
// Token management (integrated from QwenTokenManager)
|
||||
private currentToken: string | null = null;
|
||||
private currentEndpoint: string | null = null;
|
||||
private refreshPromise: Promise<string> | null = null;
|
||||
|
||||
constructor(
|
||||
qwenClient: IQwenOAuth2Client,
|
||||
@@ -38,7 +45,6 @@ export class QwenContentGenerator extends OpenAIContentGenerator {
|
||||
// Initialize with empty API key, we'll override it dynamically
|
||||
super(contentGeneratorConfig, config);
|
||||
this.qwenClient = qwenClient;
|
||||
this.sharedManager = SharedTokenManager.getInstance();
|
||||
|
||||
// Set default base URL, will be updated dynamically
|
||||
this.client.baseURL = DEFAULT_QWEN_BASE_URL;
|
||||
@@ -47,8 +53,8 @@ export class QwenContentGenerator extends OpenAIContentGenerator {
|
||||
/**
|
||||
* Get the current endpoint URL with proper protocol and /v1 suffix
|
||||
*/
|
||||
private getCurrentEndpoint(resourceUrl?: string): string {
|
||||
const baseEndpoint = resourceUrl || DEFAULT_QWEN_BASE_URL;
|
||||
private getCurrentEndpoint(): string {
|
||||
const baseEndpoint = this.currentEndpoint || DEFAULT_QWEN_BASE_URL;
|
||||
const suffix = '/v1';
|
||||
|
||||
// Normalize the URL: add protocol if missing, ensure /v1 suffix
|
||||
@@ -73,149 +79,237 @@ export class QwenContentGenerator extends OpenAIContentGenerator {
|
||||
}
|
||||
|
||||
/**
|
||||
* Get valid token and endpoint using the shared token manager
|
||||
* Override to use dynamic token and endpoint
|
||||
*/
|
||||
private async getValidToken(): Promise<{ token: string; endpoint: string }> {
|
||||
try {
|
||||
// Use SharedTokenManager for consistent token/endpoint pairing and automatic refresh
|
||||
const credentials = await this.sharedManager.getValidCredentials(
|
||||
this.qwenClient,
|
||||
);
|
||||
override async generateContent(
|
||||
request: GenerateContentParameters,
|
||||
userPromptId: string,
|
||||
): Promise<GenerateContentResponse> {
|
||||
return this.withValidToken(async (token) => {
|
||||
// Temporarily update the API key and base URL
|
||||
const originalApiKey = this.client.apiKey;
|
||||
const originalBaseURL = this.client.baseURL;
|
||||
this.client.apiKey = token;
|
||||
this.client.baseURL = this.getCurrentEndpoint();
|
||||
|
||||
if (!credentials.access_token) {
|
||||
throw new Error('No access token available');
|
||||
try {
|
||||
return await super.generateContent(request, userPromptId);
|
||||
} finally {
|
||||
// Restore original values
|
||||
this.client.apiKey = originalApiKey;
|
||||
this.client.baseURL = originalBaseURL;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
token: credentials.access_token,
|
||||
endpoint: this.getCurrentEndpoint(credentials.resource_url),
|
||||
};
|
||||
} catch (error) {
|
||||
// Propagate auth errors as-is for retry logic
|
||||
if (this.isAuthError(error)) {
|
||||
/**
|
||||
* Override to use dynamic token and endpoint
|
||||
*/
|
||||
override async generateContentStream(
|
||||
request: GenerateContentParameters,
|
||||
userPromptId: string,
|
||||
): Promise<AsyncGenerator<GenerateContentResponse>> {
|
||||
return this.withValidTokenForStream(async (token) => {
|
||||
// Update the API key and base URL before streaming
|
||||
const originalApiKey = this.client.apiKey;
|
||||
const originalBaseURL = this.client.baseURL;
|
||||
this.client.apiKey = token;
|
||||
this.client.baseURL = this.getCurrentEndpoint();
|
||||
|
||||
try {
|
||||
return await super.generateContentStream(request, userPromptId);
|
||||
} catch (error) {
|
||||
// Restore original values on error
|
||||
this.client.apiKey = originalApiKey;
|
||||
this.client.baseURL = originalBaseURL;
|
||||
throw error;
|
||||
}
|
||||
console.warn('Failed to get token from shared manager:', error);
|
||||
// Note: We don't restore the values in finally for streaming because
|
||||
// the generator may continue to be used after this method returns
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Override to use dynamic token and endpoint
|
||||
*/
|
||||
override async countTokens(
|
||||
request: CountTokensParameters,
|
||||
): Promise<CountTokensResponse> {
|
||||
return this.withValidToken(async (token) => {
|
||||
const originalApiKey = this.client.apiKey;
|
||||
const originalBaseURL = this.client.baseURL;
|
||||
this.client.apiKey = token;
|
||||
this.client.baseURL = this.getCurrentEndpoint();
|
||||
|
||||
try {
|
||||
return await super.countTokens(request);
|
||||
} finally {
|
||||
this.client.apiKey = originalApiKey;
|
||||
this.client.baseURL = originalBaseURL;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Override to use dynamic token and endpoint
|
||||
*/
|
||||
override async embedContent(
|
||||
request: EmbedContentParameters,
|
||||
): Promise<EmbedContentResponse> {
|
||||
return this.withValidToken(async (token) => {
|
||||
const originalApiKey = this.client.apiKey;
|
||||
const originalBaseURL = this.client.baseURL;
|
||||
this.client.apiKey = token;
|
||||
this.client.baseURL = this.getCurrentEndpoint();
|
||||
|
||||
try {
|
||||
return await super.embedContent(request);
|
||||
} finally {
|
||||
this.client.apiKey = originalApiKey;
|
||||
this.client.baseURL = originalBaseURL;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute operation with a valid token, with retry on auth failure
|
||||
*/
|
||||
private async withValidToken<T>(
|
||||
operation: (token: string) => Promise<T>,
|
||||
): Promise<T> {
|
||||
const token = await this.getTokenWithRetry();
|
||||
|
||||
try {
|
||||
return await operation(token);
|
||||
} catch (error) {
|
||||
// Check if this is an authentication error
|
||||
if (this.isAuthError(error)) {
|
||||
// Refresh token and retry once silently
|
||||
const newToken = await this.refreshToken();
|
||||
return await operation(newToken);
|
||||
}
|
||||
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute operation with a valid token for streaming, with retry on auth failure
|
||||
*/
|
||||
private async withValidTokenForStream<T>(
|
||||
operation: (token: string) => Promise<T>,
|
||||
): Promise<T> {
|
||||
const token = await this.getTokenWithRetry();
|
||||
|
||||
try {
|
||||
return await operation(token);
|
||||
} catch (error) {
|
||||
// Check if this is an authentication error
|
||||
if (this.isAuthError(error)) {
|
||||
// Refresh token and retry once silently
|
||||
const newToken = await this.refreshToken();
|
||||
return await operation(newToken);
|
||||
}
|
||||
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get token with retry logic
|
||||
*/
|
||||
private async getTokenWithRetry(): Promise<string> {
|
||||
try {
|
||||
return await this.getValidToken();
|
||||
} catch (error) {
|
||||
console.error('Failed to get valid token:', error);
|
||||
throw new Error(
|
||||
'Failed to obtain valid Qwen access token. Please re-authenticate.',
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Token management methods (integrated from QwenTokenManager)
|
||||
|
||||
/**
|
||||
* Execute an operation with automatic credential management and retry logic.
|
||||
* This method handles:
|
||||
* - Dynamic token and endpoint retrieval
|
||||
* - Temporary client configuration updates
|
||||
* - Automatic restoration of original configuration
|
||||
* - Retry logic on authentication errors with token refresh
|
||||
*
|
||||
* @param operation - The operation to execute with updated client configuration
|
||||
* @param restoreOnCompletion - Whether to restore original config after operation completes
|
||||
* @returns The result of the operation
|
||||
* Get a valid access token, refreshing if necessary
|
||||
*/
|
||||
private async executeWithCredentialManagement<T>(
|
||||
operation: () => Promise<T>,
|
||||
restoreOnCompletion: boolean = true,
|
||||
): Promise<T> {
|
||||
// Attempt the operation with credential management and retry logic
|
||||
const attemptOperation = async (): Promise<T> => {
|
||||
const { token, endpoint } = await this.getValidToken();
|
||||
private async getValidToken(): Promise<string> {
|
||||
// If there's already a refresh in progress, wait for it
|
||||
if (this.refreshPromise) {
|
||||
return this.refreshPromise;
|
||||
}
|
||||
|
||||
// Store original configuration
|
||||
const originalApiKey = this.client.apiKey;
|
||||
const originalBaseURL = this.client.baseURL;
|
||||
|
||||
// Apply dynamic configuration
|
||||
this.client.apiKey = token;
|
||||
this.client.baseURL = endpoint;
|
||||
|
||||
try {
|
||||
const result = await operation();
|
||||
|
||||
// For streaming operations, we may need to keep the configuration active
|
||||
if (restoreOnCompletion) {
|
||||
this.client.apiKey = originalApiKey;
|
||||
this.client.baseURL = originalBaseURL;
|
||||
}
|
||||
|
||||
return result;
|
||||
} catch (error) {
|
||||
// Always restore on error
|
||||
this.client.apiKey = originalApiKey;
|
||||
this.client.baseURL = originalBaseURL;
|
||||
throw error;
|
||||
}
|
||||
};
|
||||
|
||||
// Execute with retry logic for auth errors
|
||||
try {
|
||||
return await attemptOperation();
|
||||
} catch (error) {
|
||||
if (this.isAuthError(error)) {
|
||||
try {
|
||||
// Use SharedTokenManager to properly refresh and persist the token
|
||||
// This ensures the refreshed token is saved to oauth_creds.json
|
||||
await this.sharedManager.getValidCredentials(this.qwenClient, true);
|
||||
// Retry the operation once with fresh credentials
|
||||
return await attemptOperation();
|
||||
} catch (_refreshError) {
|
||||
throw new Error(
|
||||
'Failed to obtain valid Qwen access token. Please re-authenticate.',
|
||||
);
|
||||
const { token } = await this.qwenClient.getAccessToken();
|
||||
if (token) {
|
||||
this.currentToken = token;
|
||||
// Also update endpoint from current credentials
|
||||
const credentials = this.qwenClient.getCredentials();
|
||||
if (credentials.resource_url) {
|
||||
this.currentEndpoint = credentials.resource_url;
|
||||
}
|
||||
return token;
|
||||
}
|
||||
throw error;
|
||||
} catch (error) {
|
||||
console.warn('Failed to get access token, attempting refresh:', error);
|
||||
}
|
||||
|
||||
// Start a new refresh operation
|
||||
this.refreshPromise = this.performTokenRefresh();
|
||||
|
||||
try {
|
||||
const newToken = await this.refreshPromise;
|
||||
return newToken;
|
||||
} finally {
|
||||
this.refreshPromise = null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Override to use dynamic token and endpoint with automatic retry
|
||||
* Force refresh the access token
|
||||
*/
|
||||
override async generateContent(
|
||||
request: GenerateContentParameters,
|
||||
userPromptId: string,
|
||||
): Promise<GenerateContentResponse> {
|
||||
return this.executeWithCredentialManagement(() =>
|
||||
super.generateContent(request, userPromptId),
|
||||
);
|
||||
private async refreshToken(): Promise<string> {
|
||||
this.refreshPromise = this.performTokenRefresh();
|
||||
|
||||
try {
|
||||
const newToken = await this.refreshPromise;
|
||||
return newToken;
|
||||
} finally {
|
||||
this.refreshPromise = null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Override to use dynamic token and endpoint with automatic retry.
|
||||
* Note: For streaming, the client configuration is not restored immediately
|
||||
* since the generator may continue to be used after this method returns.
|
||||
*/
|
||||
override async generateContentStream(
|
||||
request: GenerateContentParameters,
|
||||
userPromptId: string,
|
||||
): Promise<AsyncGenerator<GenerateContentResponse>> {
|
||||
return this.executeWithCredentialManagement(
|
||||
() => super.generateContentStream(request, userPromptId),
|
||||
false, // Don't restore immediately for streaming
|
||||
);
|
||||
}
|
||||
private async performTokenRefresh(): Promise<string> {
|
||||
try {
|
||||
const response = await this.qwenClient.refreshAccessToken();
|
||||
|
||||
/**
|
||||
* Override to use dynamic token and endpoint with automatic retry
|
||||
*/
|
||||
override async countTokens(
|
||||
request: CountTokensParameters,
|
||||
): Promise<CountTokensResponse> {
|
||||
return this.executeWithCredentialManagement(() =>
|
||||
super.countTokens(request),
|
||||
);
|
||||
}
|
||||
if (isErrorResponse(response)) {
|
||||
const errorData = response as ErrorData;
|
||||
throw new Error(
|
||||
`${errorData?.error || 'Unknown error'} - ${errorData?.error_description || 'No details provided'}`,
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Override to use dynamic token and endpoint with automatic retry
|
||||
*/
|
||||
override async embedContent(
|
||||
request: EmbedContentParameters,
|
||||
): Promise<EmbedContentResponse> {
|
||||
return this.executeWithCredentialManagement(() =>
|
||||
super.embedContent(request),
|
||||
);
|
||||
const tokenData = response as TokenRefreshData;
|
||||
|
||||
if (!tokenData.access_token) {
|
||||
throw new Error('Failed to refresh access token: no token returned');
|
||||
}
|
||||
|
||||
this.currentToken = tokenData.access_token;
|
||||
|
||||
// Update endpoint if provided
|
||||
if (tokenData.resource_url) {
|
||||
this.currentEndpoint = tokenData.resource_url;
|
||||
}
|
||||
|
||||
return tokenData.access_token;
|
||||
} catch (error) {
|
||||
throw new Error(
|
||||
`${error instanceof Error ? error.message : String(error)}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -237,10 +331,9 @@ export class QwenContentGenerator extends OpenAIContentGenerator {
|
||||
const errorCode = errorWithCode?.status || errorWithCode?.code;
|
||||
|
||||
return (
|
||||
errorCode === 400 ||
|
||||
errorCode === 401 ||
|
||||
errorCode === 403 ||
|
||||
errorCode === '401' ||
|
||||
errorCode === '403' ||
|
||||
errorMessage.includes('unauthorized') ||
|
||||
errorMessage.includes('forbidden') ||
|
||||
errorMessage.includes('invalid api key') ||
|
||||
@@ -256,22 +349,15 @@ export class QwenContentGenerator extends OpenAIContentGenerator {
|
||||
* Get the current cached token (may be expired)
|
||||
*/
|
||||
getCurrentToken(): string | null {
|
||||
// First check internal state for backwards compatibility with tests
|
||||
if (this.currentToken) {
|
||||
return this.currentToken;
|
||||
}
|
||||
// Fall back to SharedTokenManager
|
||||
const credentials = this.sharedManager.getCurrentCredentials();
|
||||
return credentials?.access_token || null;
|
||||
return this.currentToken;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear the cached token
|
||||
* Clear the cached token and endpoint
|
||||
*/
|
||||
clearToken(): void {
|
||||
// Clear internal state for backwards compatibility with tests
|
||||
this.currentToken = undefined;
|
||||
// Also clear SharedTokenManager
|
||||
this.sharedManager.clearCache();
|
||||
this.currentToken = null;
|
||||
this.currentEndpoint = null;
|
||||
this.refreshPromise = null;
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -13,11 +13,6 @@ import open from 'open';
|
||||
import { EventEmitter } from 'events';
|
||||
import { Config } from '../config/config.js';
|
||||
import { randomUUID } from 'node:crypto';
|
||||
import {
|
||||
SharedTokenManager,
|
||||
TokenManagerError,
|
||||
TokenError,
|
||||
} from './sharedTokenManager.js';
|
||||
|
||||
// OAuth Endpoints
|
||||
const QWEN_OAUTH_BASE_URL = 'https://chat.qwen.ai';
|
||||
@@ -239,11 +234,8 @@ export interface IQwenOAuth2Client {
|
||||
*/
|
||||
export class QwenOAuth2Client implements IQwenOAuth2Client {
|
||||
private credentials: QwenCredentials = {};
|
||||
private sharedManager: SharedTokenManager;
|
||||
|
||||
constructor() {
|
||||
this.sharedManager = SharedTokenManager.getInstance();
|
||||
}
|
||||
constructor(_options?: { proxy?: string }) {}
|
||||
|
||||
setCredentials(credentials: QwenCredentials): void {
|
||||
this.credentials = credentials;
|
||||
@@ -254,23 +246,17 @@ export class QwenOAuth2Client implements IQwenOAuth2Client {
|
||||
}
|
||||
|
||||
async getAccessToken(): Promise<{ token?: string }> {
|
||||
try {
|
||||
// Use shared manager to get valid credentials with cross-session synchronization
|
||||
const credentials = await this.sharedManager.getValidCredentials(this);
|
||||
return { token: credentials.access_token };
|
||||
} catch (error) {
|
||||
console.warn('Failed to get access token from shared manager:', error);
|
||||
|
||||
// Only return cached token if it's still valid, don't refresh uncoordinated
|
||||
// This prevents the cross-session token invalidation issue
|
||||
if (this.credentials.access_token && this.isTokenValid()) {
|
||||
return { token: this.credentials.access_token };
|
||||
}
|
||||
|
||||
// If we can't get valid credentials through shared manager, fail gracefully
|
||||
// All token refresh operations should go through the SharedTokenManager
|
||||
return { token: undefined };
|
||||
if (this.credentials.access_token && this.isTokenValid()) {
|
||||
return { token: this.credentials.access_token };
|
||||
}
|
||||
|
||||
if (this.credentials.refresh_token) {
|
||||
const refreshResponse = await this.refreshAccessToken();
|
||||
const tokenData = refreshResponse as TokenRefreshData;
|
||||
return { token: tokenData.access_token };
|
||||
}
|
||||
|
||||
return { token: undefined };
|
||||
}
|
||||
|
||||
async requestDeviceAuthorization(options: {
|
||||
@@ -303,7 +289,7 @@ export class QwenOAuth2Client implements IQwenOAuth2Client {
|
||||
}
|
||||
|
||||
const result = (await response.json()) as DeviceAuthorizationResponse;
|
||||
console.debug('Device authorization result:', result);
|
||||
console.log('Device authorization result:', result);
|
||||
|
||||
// Check if the response indicates success
|
||||
if (!isDeviceAuthorizationSuccess(result)) {
|
||||
@@ -437,8 +423,8 @@ export class QwenOAuth2Client implements IQwenOAuth2Client {
|
||||
|
||||
this.setCredentials(tokens);
|
||||
|
||||
// Note: File caching is now handled by SharedTokenManager
|
||||
// to prevent cross-session token invalidation issues
|
||||
// Cache the updated credentials to file
|
||||
await cacheQwenCredentials(tokens);
|
||||
|
||||
return responseData;
|
||||
}
|
||||
@@ -476,85 +462,68 @@ export const qwenOAuth2Events = new EventEmitter();
|
||||
export async function getQwenOAuthClient(
|
||||
config: Config,
|
||||
): Promise<QwenOAuth2Client> {
|
||||
const client = new QwenOAuth2Client();
|
||||
const client = new QwenOAuth2Client({
|
||||
proxy: config.getProxy(),
|
||||
});
|
||||
|
||||
// Use shared token manager to get valid credentials with cross-session synchronization
|
||||
const sharedManager = SharedTokenManager.getInstance();
|
||||
// If there are cached creds on disk, they always take precedence
|
||||
if (await loadCachedQwenCredentials(client)) {
|
||||
console.log('Loaded cached Qwen credentials.');
|
||||
|
||||
try {
|
||||
// Try to get valid credentials from shared cache first
|
||||
const credentials = await sharedManager.getValidCredentials(client);
|
||||
client.setCredentials(credentials);
|
||||
return client;
|
||||
} catch (error: unknown) {
|
||||
console.debug(
|
||||
'Shared token manager failed, attempting device flow:',
|
||||
error,
|
||||
);
|
||||
|
||||
// Handle specific token manager errors
|
||||
if (error instanceof TokenManagerError) {
|
||||
switch (error.type) {
|
||||
case TokenError.NO_REFRESH_TOKEN:
|
||||
console.debug(
|
||||
'No refresh token available, proceeding with device flow',
|
||||
);
|
||||
break;
|
||||
case TokenError.REFRESH_FAILED:
|
||||
console.debug('Token refresh failed, proceeding with device flow');
|
||||
break;
|
||||
case TokenError.NETWORK_ERROR:
|
||||
console.warn(
|
||||
'Network error during token refresh, trying device flow',
|
||||
);
|
||||
break;
|
||||
default:
|
||||
console.warn('Token manager error:', (error as Error).message);
|
||||
}
|
||||
}
|
||||
|
||||
// If shared manager fails, check if we have cached credentials for device flow
|
||||
if (await loadCachedQwenCredentials(client)) {
|
||||
// We have cached credentials but they might be expired
|
||||
// Try device flow instead of forcing refresh
|
||||
const result = await authWithQwenDeviceFlow(client, config);
|
||||
if (!result.success) {
|
||||
throw new Error('Qwen OAuth authentication failed');
|
||||
}
|
||||
try {
|
||||
await client.refreshAccessToken();
|
||||
return client;
|
||||
} catch (error: unknown) {
|
||||
// Handle refresh token errors
|
||||
const errorMessage =
|
||||
error instanceof Error ? error.message : String(error);
|
||||
|
||||
const isInvalidToken = errorMessage.includes(
|
||||
'Refresh token expired or invalid',
|
||||
);
|
||||
const userMessage = isInvalidToken
|
||||
? 'Cached credentials are invalid. Please re-authenticate.'
|
||||
: `Token refresh failed: ${errorMessage}`;
|
||||
const throwMessage = isInvalidToken
|
||||
? 'Cached Qwen credentials are invalid. Please re-authenticate.'
|
||||
: `Qwen token refresh failed: ${errorMessage}`;
|
||||
|
||||
// Emit token refresh error event
|
||||
qwenOAuth2Events.emit(QwenOAuth2Event.AuthProgress, 'error', userMessage);
|
||||
throw new Error(throwMessage);
|
||||
}
|
||||
|
||||
// No cached credentials, use device authorization flow for authentication
|
||||
const result = await authWithQwenDeviceFlow(client, config);
|
||||
if (!result.success) {
|
||||
// Only emit timeout event if the failure reason is actually timeout
|
||||
// Other error types (401, 429, etc.) have already emitted their specific events
|
||||
if (result.reason === 'timeout') {
|
||||
qwenOAuth2Events.emit(
|
||||
QwenOAuth2Event.AuthProgress,
|
||||
'timeout',
|
||||
'Authentication timed out. Please try again or select a different authentication method.',
|
||||
);
|
||||
}
|
||||
|
||||
// Throw error with appropriate message based on failure reason
|
||||
switch (result.reason) {
|
||||
case 'timeout':
|
||||
throw new Error('Qwen OAuth authentication timed out');
|
||||
case 'cancelled':
|
||||
throw new Error('Qwen OAuth authentication was cancelled by user');
|
||||
case 'rate_limit':
|
||||
throw new Error(
|
||||
'Too many request for Qwen OAuth authentication, please try again later.',
|
||||
);
|
||||
case 'error':
|
||||
default:
|
||||
throw new Error('Qwen OAuth authentication failed');
|
||||
}
|
||||
}
|
||||
|
||||
return client;
|
||||
}
|
||||
|
||||
// Use device authorization flow for authentication (single attempt)
|
||||
const result = await authWithQwenDeviceFlow(client, config);
|
||||
if (!result.success) {
|
||||
// Only emit timeout event if the failure reason is actually timeout
|
||||
// Other error types (401, 429, etc.) have already emitted their specific events
|
||||
if (result.reason === 'timeout') {
|
||||
qwenOAuth2Events.emit(
|
||||
QwenOAuth2Event.AuthProgress,
|
||||
'timeout',
|
||||
'Authentication timed out. Please try again or select a different authentication method.',
|
||||
);
|
||||
}
|
||||
|
||||
// Throw error with appropriate message based on failure reason
|
||||
switch (result.reason) {
|
||||
case 'timeout':
|
||||
throw new Error('Qwen OAuth authentication timed out');
|
||||
case 'cancelled':
|
||||
throw new Error('Qwen OAuth authentication was cancelled by user');
|
||||
case 'rate_limit':
|
||||
throw new Error(
|
||||
'Too many request for Qwen OAuth authentication, please try again later.',
|
||||
);
|
||||
case 'error':
|
||||
default:
|
||||
throw new Error('Qwen OAuth authentication failed');
|
||||
}
|
||||
}
|
||||
|
||||
return client;
|
||||
}
|
||||
|
||||
async function authWithQwenDeviceFlow(
|
||||
@@ -611,9 +580,7 @@ async function authWithQwenDeviceFlow(
|
||||
// causing the entire Node.js process to crash.
|
||||
if (childProcess) {
|
||||
childProcess.on('error', () => {
|
||||
console.debug(
|
||||
'Failed to open browser. Visit this URL to authorize:',
|
||||
);
|
||||
console.log('Failed to open browser. Visit this URL to authorize:');
|
||||
showFallbackMessage();
|
||||
});
|
||||
}
|
||||
@@ -632,7 +599,7 @@ async function authWithQwenDeviceFlow(
|
||||
'Waiting for authorization...',
|
||||
);
|
||||
|
||||
console.debug('Waiting for authorization...\n');
|
||||
console.log('Waiting for authorization...\n');
|
||||
|
||||
// Poll for the token
|
||||
let pollInterval = 2000; // 2 seconds, can be increased if slow_down is received
|
||||
@@ -643,7 +610,7 @@ async function authWithQwenDeviceFlow(
|
||||
for (let attempt = 0; attempt < maxAttempts; attempt++) {
|
||||
// Check if authentication was cancelled
|
||||
if (isCancelled) {
|
||||
console.debug('\nAuthentication cancelled by user.');
|
||||
console.log('\nAuthentication cancelled by user.');
|
||||
qwenOAuth2Events.emit(
|
||||
QwenOAuth2Event.AuthProgress,
|
||||
'error',
|
||||
@@ -653,7 +620,7 @@ async function authWithQwenDeviceFlow(
|
||||
}
|
||||
|
||||
try {
|
||||
console.debug('polling for token...');
|
||||
console.log('polling for token...');
|
||||
const tokenResponse = await client.pollDeviceToken({
|
||||
device_code: deviceAuth.device_code,
|
||||
code_verifier,
|
||||
@@ -686,7 +653,7 @@ async function authWithQwenDeviceFlow(
|
||||
'Authentication successful! Access token obtained.',
|
||||
);
|
||||
|
||||
console.debug('Authentication successful! Access token obtained.');
|
||||
console.log('Authentication successful! Access token obtained.');
|
||||
return { success: true };
|
||||
}
|
||||
|
||||
@@ -697,8 +664,8 @@ async function authWithQwenDeviceFlow(
|
||||
// Handle slow_down error by increasing poll interval
|
||||
if (pendingData.slowDown) {
|
||||
pollInterval = Math.min(pollInterval * 1.5, 10000); // Increase by 50%, max 10 seconds
|
||||
console.debug(
|
||||
`\nServer requested to slow down, increasing poll interval to ${pollInterval}ms'`,
|
||||
console.log(
|
||||
`\nServer requested to slow down, increasing poll interval to ${pollInterval}ms`,
|
||||
);
|
||||
} else {
|
||||
pollInterval = 2000; // Reset to default interval
|
||||
@@ -739,7 +706,7 @@ async function authWithQwenDeviceFlow(
|
||||
|
||||
// Check for cancellation after waiting
|
||||
if (isCancelled) {
|
||||
console.debug('\nAuthentication cancelled by user.');
|
||||
console.log('\nAuthentication cancelled by user.');
|
||||
qwenOAuth2Events.emit(
|
||||
QwenOAuth2Event.AuthProgress,
|
||||
'error',
|
||||
@@ -867,7 +834,7 @@ export async function clearQwenCredentials(): Promise<void> {
|
||||
try {
|
||||
const filePath = getQwenCachedCredentialPath();
|
||||
await fs.unlink(filePath);
|
||||
console.debug('Cached Qwen credentials cleared successfully.');
|
||||
console.log('Cached Qwen credentials cleared successfully.');
|
||||
} catch (error: unknown) {
|
||||
// If file doesn't exist or can't be deleted, we consider it cleared
|
||||
if (error instanceof Error && 'code' in error && error.code === 'ENOENT') {
|
||||
|
||||
@@ -1,758 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
*/
|
||||
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
import { promises as fs, unlinkSync, type Stats } from 'node:fs';
|
||||
import * as os from 'os';
|
||||
import path from 'node:path';
|
||||
|
||||
import {
|
||||
SharedTokenManager,
|
||||
TokenManagerError,
|
||||
TokenError,
|
||||
} from './sharedTokenManager.js';
|
||||
import type {
|
||||
IQwenOAuth2Client,
|
||||
QwenCredentials,
|
||||
TokenRefreshData,
|
||||
ErrorData,
|
||||
} from './qwenOAuth2.js';
|
||||
|
||||
// Mock external dependencies
|
||||
vi.mock('node:fs', () => ({
|
||||
promises: {
|
||||
stat: vi.fn(),
|
||||
readFile: vi.fn(),
|
||||
writeFile: vi.fn(),
|
||||
mkdir: vi.fn(),
|
||||
unlink: vi.fn(),
|
||||
},
|
||||
unlinkSync: vi.fn(),
|
||||
}));
|
||||
|
||||
vi.mock('node:os', () => ({
|
||||
homedir: vi.fn(),
|
||||
}));
|
||||
|
||||
vi.mock('node:path', () => ({
|
||||
default: {
|
||||
join: vi.fn(),
|
||||
dirname: vi.fn(),
|
||||
},
|
||||
}));
|
||||
|
||||
/**
|
||||
* Helper to access private properties for testing
|
||||
*/
|
||||
function getPrivateProperty<T>(obj: unknown, property: string): T {
|
||||
return (obj as Record<string, T>)[property];
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper to set private properties for testing
|
||||
*/
|
||||
function setPrivateProperty<T>(obj: unknown, property: string, value: T): void {
|
||||
(obj as Record<string, T>)[property] = value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a mock QwenOAuth2Client for testing
|
||||
*/
|
||||
function createMockQwenClient(
|
||||
initialCredentials: Partial<QwenCredentials> = {},
|
||||
): IQwenOAuth2Client {
|
||||
let credentials: QwenCredentials = {
|
||||
access_token: 'mock_access_token',
|
||||
refresh_token: 'mock_refresh_token',
|
||||
token_type: 'Bearer',
|
||||
expiry_date: Date.now() + 3600000, // 1 hour from now
|
||||
resource_url: 'https://api.example.com',
|
||||
...initialCredentials,
|
||||
};
|
||||
|
||||
return {
|
||||
setCredentials: vi.fn((creds: QwenCredentials) => {
|
||||
credentials = { ...credentials, ...creds };
|
||||
}),
|
||||
getCredentials: vi.fn(() => credentials),
|
||||
getAccessToken: vi.fn(),
|
||||
requestDeviceAuthorization: vi.fn(),
|
||||
pollDeviceToken: vi.fn(),
|
||||
refreshAccessToken: vi.fn(),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates valid mock credentials
|
||||
*/
|
||||
function createValidCredentials(
|
||||
overrides: Partial<QwenCredentials> = {},
|
||||
): QwenCredentials {
|
||||
return {
|
||||
access_token: 'valid_access_token',
|
||||
refresh_token: 'valid_refresh_token',
|
||||
token_type: 'Bearer',
|
||||
expiry_date: Date.now() + 3600000, // 1 hour from now
|
||||
resource_url: 'https://api.example.com',
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates expired mock credentials
|
||||
*/
|
||||
function createExpiredCredentials(
|
||||
overrides: Partial<QwenCredentials> = {},
|
||||
): QwenCredentials {
|
||||
return {
|
||||
access_token: 'expired_access_token',
|
||||
refresh_token: 'expired_refresh_token',
|
||||
token_type: 'Bearer',
|
||||
expiry_date: Date.now() - 3600000, // 1 hour ago
|
||||
resource_url: 'https://api.example.com',
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a successful token refresh response
|
||||
*/
|
||||
function createSuccessfulRefreshResponse(
|
||||
overrides: Partial<TokenRefreshData> = {},
|
||||
): TokenRefreshData {
|
||||
return {
|
||||
access_token: 'fresh_access_token',
|
||||
token_type: 'Bearer',
|
||||
expires_in: 3600,
|
||||
refresh_token: 'new_refresh_token',
|
||||
resource_url: 'https://api.example.com',
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an error response
|
||||
*/
|
||||
function createErrorResponse(
|
||||
error = 'invalid_grant',
|
||||
description = 'Token expired',
|
||||
): ErrorData {
|
||||
return {
|
||||
error,
|
||||
error_description: description,
|
||||
};
|
||||
}
|
||||
|
||||
describe('SharedTokenManager', () => {
|
||||
let tokenManager: SharedTokenManager;
|
||||
|
||||
// Get mocked modules
|
||||
const mockFs = vi.mocked(fs);
|
||||
const mockOs = vi.mocked(os);
|
||||
const mockPath = vi.mocked(path);
|
||||
const mockUnlinkSync = vi.mocked(unlinkSync);
|
||||
|
||||
beforeEach(() => {
|
||||
// Clean up any existing instance's listeners first
|
||||
const existingInstance = getPrivateProperty(
|
||||
SharedTokenManager,
|
||||
'instance',
|
||||
) as SharedTokenManager;
|
||||
if (existingInstance) {
|
||||
existingInstance.cleanup();
|
||||
}
|
||||
|
||||
// Reset all mocks
|
||||
vi.clearAllMocks();
|
||||
|
||||
// Setup default mock implementations
|
||||
mockOs.homedir.mockReturnValue('/home/user');
|
||||
mockPath.join.mockImplementation((...args) => args.join('/'));
|
||||
mockPath.dirname.mockImplementation((filePath) => {
|
||||
// Handle undefined/null input gracefully
|
||||
if (!filePath || typeof filePath !== 'string') {
|
||||
return '/home/user/.qwen'; // Return the expected directory path
|
||||
}
|
||||
const parts = filePath.split('/');
|
||||
const result = parts.slice(0, -1).join('/');
|
||||
return result || '/';
|
||||
});
|
||||
|
||||
// Reset singleton instance for each test
|
||||
setPrivateProperty(SharedTokenManager, 'instance', null);
|
||||
tokenManager = SharedTokenManager.getInstance();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
// Clean up listeners after each test
|
||||
if (tokenManager) {
|
||||
tokenManager.cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
describe('Singleton Pattern', () => {
|
||||
it('should return the same instance when called multiple times', () => {
|
||||
const instance1 = SharedTokenManager.getInstance();
|
||||
const instance2 = SharedTokenManager.getInstance();
|
||||
|
||||
expect(instance1).toBe(instance2);
|
||||
expect(instance1).toBe(tokenManager);
|
||||
});
|
||||
|
||||
it('should create a new instance after reset', () => {
|
||||
const instance1 = SharedTokenManager.getInstance();
|
||||
|
||||
// Reset singleton for testing
|
||||
setPrivateProperty(SharedTokenManager, 'instance', null);
|
||||
const instance2 = SharedTokenManager.getInstance();
|
||||
|
||||
expect(instance1).not.toBe(instance2);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getValidCredentials', () => {
|
||||
it('should return valid cached credentials without refresh', async () => {
|
||||
const mockClient = createMockQwenClient();
|
||||
const validCredentials = createValidCredentials();
|
||||
|
||||
// Mock file operations to indicate no file changes
|
||||
mockFs.stat.mockResolvedValue({ mtimeMs: 1000 } as Stats);
|
||||
|
||||
// Manually set cached credentials
|
||||
tokenManager.clearCache();
|
||||
const memoryCache = getPrivateProperty<{
|
||||
credentials: QwenCredentials | null;
|
||||
fileModTime: number;
|
||||
lastCheck: number;
|
||||
}>(tokenManager, 'memoryCache');
|
||||
memoryCache.credentials = validCredentials;
|
||||
memoryCache.fileModTime = 1000;
|
||||
memoryCache.lastCheck = Date.now();
|
||||
|
||||
const result = await tokenManager.getValidCredentials(mockClient);
|
||||
|
||||
expect(result).toEqual(validCredentials);
|
||||
expect(mockClient.refreshAccessToken).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should refresh expired credentials', async () => {
|
||||
const mockClient = createMockQwenClient(createExpiredCredentials());
|
||||
const refreshResponse = createSuccessfulRefreshResponse();
|
||||
|
||||
mockClient.refreshAccessToken = vi
|
||||
.fn()
|
||||
.mockResolvedValue(refreshResponse);
|
||||
|
||||
// Mock file operations
|
||||
mockFs.stat.mockResolvedValue({ mtimeMs: 1000 } as Stats);
|
||||
mockFs.writeFile.mockResolvedValue(undefined);
|
||||
mockFs.mkdir.mockResolvedValue(undefined);
|
||||
|
||||
const result = await tokenManager.getValidCredentials(mockClient);
|
||||
|
||||
expect(result.access_token).toBe(refreshResponse.access_token);
|
||||
expect(mockClient.refreshAccessToken).toHaveBeenCalled();
|
||||
expect(mockClient.setCredentials).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should force refresh when forceRefresh is true', async () => {
|
||||
const mockClient = createMockQwenClient(createValidCredentials());
|
||||
const refreshResponse = createSuccessfulRefreshResponse();
|
||||
|
||||
mockClient.refreshAccessToken = vi
|
||||
.fn()
|
||||
.mockResolvedValue(refreshResponse);
|
||||
|
||||
// Mock file operations
|
||||
mockFs.stat.mockResolvedValue({ mtimeMs: 1000 } as Stats);
|
||||
mockFs.writeFile.mockResolvedValue(undefined);
|
||||
mockFs.mkdir.mockResolvedValue(undefined);
|
||||
|
||||
const result = await tokenManager.getValidCredentials(mockClient, true);
|
||||
|
||||
expect(result.access_token).toBe(refreshResponse.access_token);
|
||||
expect(mockClient.refreshAccessToken).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should throw TokenManagerError when refresh token is missing', async () => {
|
||||
const mockClient = createMockQwenClient({
|
||||
access_token: 'expired_token',
|
||||
refresh_token: undefined, // No refresh token
|
||||
expiry_date: Date.now() - 3600000,
|
||||
});
|
||||
|
||||
await expect(
|
||||
tokenManager.getValidCredentials(mockClient),
|
||||
).rejects.toThrow(TokenManagerError);
|
||||
|
||||
await expect(
|
||||
tokenManager.getValidCredentials(mockClient),
|
||||
).rejects.toThrow('No refresh token available');
|
||||
});
|
||||
|
||||
it('should throw TokenManagerError when refresh fails', async () => {
|
||||
const mockClient = createMockQwenClient(createExpiredCredentials());
|
||||
const errorResponse = createErrorResponse();
|
||||
|
||||
mockClient.refreshAccessToken = vi.fn().mockResolvedValue(errorResponse);
|
||||
|
||||
// Mock file operations
|
||||
mockFs.stat.mockResolvedValue({ mtimeMs: 1000 } as Stats);
|
||||
|
||||
await expect(
|
||||
tokenManager.getValidCredentials(mockClient),
|
||||
).rejects.toThrow(TokenManagerError);
|
||||
});
|
||||
|
||||
it('should handle network errors during refresh', async () => {
|
||||
const mockClient = createMockQwenClient(createExpiredCredentials());
|
||||
const networkError = new Error('Network request failed');
|
||||
|
||||
mockClient.refreshAccessToken = vi.fn().mockRejectedValue(networkError);
|
||||
|
||||
// Mock file operations
|
||||
mockFs.stat.mockResolvedValue({ mtimeMs: 1000 } as Stats);
|
||||
|
||||
await expect(
|
||||
tokenManager.getValidCredentials(mockClient),
|
||||
).rejects.toThrow(TokenManagerError);
|
||||
});
|
||||
|
||||
it('should wait for ongoing refresh and return same result', async () => {
|
||||
const mockClient = createMockQwenClient(createExpiredCredentials());
|
||||
const refreshResponse = createSuccessfulRefreshResponse();
|
||||
|
||||
// Create a delayed refresh response
|
||||
let resolveRefresh: (value: TokenRefreshData) => void;
|
||||
const refreshPromise = new Promise<TokenRefreshData>((resolve) => {
|
||||
resolveRefresh = resolve;
|
||||
});
|
||||
|
||||
mockClient.refreshAccessToken = vi.fn().mockReturnValue(refreshPromise);
|
||||
|
||||
// Mock file operations
|
||||
mockFs.stat.mockResolvedValue({ mtimeMs: 1000 } as Stats);
|
||||
mockFs.writeFile.mockResolvedValue(undefined);
|
||||
mockFs.mkdir.mockResolvedValue(undefined);
|
||||
|
||||
// Start two concurrent refresh operations
|
||||
const promise1 = tokenManager.getValidCredentials(mockClient);
|
||||
const promise2 = tokenManager.getValidCredentials(mockClient);
|
||||
|
||||
// Resolve the refresh
|
||||
resolveRefresh!(refreshResponse);
|
||||
|
||||
const [result1, result2] = await Promise.all([promise1, promise2]);
|
||||
|
||||
expect(result1).toEqual(result2);
|
||||
expect(mockClient.refreshAccessToken).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('should reload credentials from file when file is modified', async () => {
|
||||
const mockClient = createMockQwenClient();
|
||||
const fileCredentials = createValidCredentials({
|
||||
access_token: 'file_access_token',
|
||||
});
|
||||
|
||||
// Mock file operations to simulate file modification
|
||||
mockFs.stat.mockResolvedValue({ mtimeMs: 2000 } as Stats);
|
||||
mockFs.readFile.mockResolvedValue(JSON.stringify(fileCredentials));
|
||||
|
||||
// Set initial cache state
|
||||
tokenManager.clearCache();
|
||||
const memoryCache = getPrivateProperty<{ fileModTime: number }>(
|
||||
tokenManager,
|
||||
'memoryCache',
|
||||
);
|
||||
memoryCache.fileModTime = 1000; // Older than file
|
||||
|
||||
const result = await tokenManager.getValidCredentials(mockClient);
|
||||
|
||||
expect(result.access_token).toBe('file_access_token');
|
||||
expect(mockFs.readFile).toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Cache Management', () => {
|
||||
it('should clear cache', () => {
|
||||
// Set some cache data
|
||||
tokenManager.clearCache();
|
||||
const memoryCache = getPrivateProperty<{
|
||||
credentials: QwenCredentials | null;
|
||||
}>(tokenManager, 'memoryCache');
|
||||
memoryCache.credentials = createValidCredentials();
|
||||
|
||||
tokenManager.clearCache();
|
||||
|
||||
expect(tokenManager.getCurrentCredentials()).toBeNull();
|
||||
});
|
||||
|
||||
it('should return current credentials from cache', () => {
|
||||
const credentials = createValidCredentials();
|
||||
|
||||
tokenManager.clearCache();
|
||||
const memoryCache = getPrivateProperty<{
|
||||
credentials: QwenCredentials | null;
|
||||
}>(tokenManager, 'memoryCache');
|
||||
memoryCache.credentials = credentials;
|
||||
|
||||
expect(tokenManager.getCurrentCredentials()).toEqual(credentials);
|
||||
});
|
||||
|
||||
it('should return null when no credentials are cached', () => {
|
||||
tokenManager.clearCache();
|
||||
|
||||
expect(tokenManager.getCurrentCredentials()).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Refresh Status', () => {
|
||||
it('should return false when no refresh is in progress', () => {
|
||||
expect(tokenManager.isRefreshInProgress()).toBe(false);
|
||||
});
|
||||
|
||||
it('should return true when refresh is in progress', async () => {
|
||||
const mockClient = createMockQwenClient(createExpiredCredentials());
|
||||
|
||||
// Clear cache to ensure refresh is triggered
|
||||
tokenManager.clearCache();
|
||||
|
||||
// Mock stat for file check to fail (no file initially)
|
||||
mockFs.stat.mockRejectedValueOnce(
|
||||
Object.assign(new Error('ENOENT'), { code: 'ENOENT' }),
|
||||
);
|
||||
|
||||
// Create a delayed refresh response
|
||||
let resolveRefresh: (value: TokenRefreshData) => void;
|
||||
const refreshPromise = new Promise<TokenRefreshData>((resolve) => {
|
||||
resolveRefresh = resolve;
|
||||
});
|
||||
|
||||
mockClient.refreshAccessToken = vi.fn().mockReturnValue(refreshPromise);
|
||||
|
||||
// Mock file operations for lock and save
|
||||
mockFs.writeFile.mockResolvedValue(undefined);
|
||||
mockFs.mkdir.mockResolvedValue(undefined);
|
||||
mockFs.stat.mockResolvedValue({ mtimeMs: 1000 } as Stats);
|
||||
|
||||
// Start refresh
|
||||
const refreshOperation = tokenManager.getValidCredentials(mockClient);
|
||||
|
||||
// Wait a tick to ensure the refresh promise is set
|
||||
await new Promise((resolve) => setImmediate(resolve));
|
||||
|
||||
expect(tokenManager.isRefreshInProgress()).toBe(true);
|
||||
|
||||
// Complete refresh
|
||||
resolveRefresh!(createSuccessfulRefreshResponse());
|
||||
await refreshOperation;
|
||||
|
||||
expect(tokenManager.isRefreshInProgress()).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Debug Info', () => {
|
||||
it('should return complete debug information', () => {
|
||||
const credentials = createValidCredentials();
|
||||
|
||||
tokenManager.clearCache();
|
||||
const memoryCache = getPrivateProperty<{
|
||||
credentials: QwenCredentials | null;
|
||||
}>(tokenManager, 'memoryCache');
|
||||
memoryCache.credentials = credentials;
|
||||
|
||||
const debugInfo = tokenManager.getDebugInfo();
|
||||
|
||||
expect(debugInfo).toHaveProperty('hasCredentials', true);
|
||||
expect(debugInfo).toHaveProperty('credentialsExpired', false);
|
||||
expect(debugInfo).toHaveProperty('isRefreshing', false);
|
||||
expect(debugInfo).toHaveProperty('cacheAge');
|
||||
expect(typeof debugInfo.cacheAge).toBe('number');
|
||||
});
|
||||
|
||||
it('should indicate expired credentials in debug info', () => {
|
||||
const expiredCredentials = createExpiredCredentials();
|
||||
|
||||
tokenManager.clearCache();
|
||||
const memoryCache = getPrivateProperty<{
|
||||
credentials: QwenCredentials | null;
|
||||
}>(tokenManager, 'memoryCache');
|
||||
memoryCache.credentials = expiredCredentials;
|
||||
|
||||
const debugInfo = tokenManager.getDebugInfo();
|
||||
|
||||
expect(debugInfo.hasCredentials).toBe(true);
|
||||
expect(debugInfo.credentialsExpired).toBe(true);
|
||||
});
|
||||
|
||||
it('should indicate no credentials in debug info', () => {
|
||||
tokenManager.clearCache();
|
||||
|
||||
const debugInfo = tokenManager.getDebugInfo();
|
||||
|
||||
expect(debugInfo.hasCredentials).toBe(false);
|
||||
expect(debugInfo.credentialsExpired).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Error Handling', () => {
|
||||
it('should create TokenManagerError with correct type and message', () => {
|
||||
const error = new TokenManagerError(
|
||||
TokenError.REFRESH_FAILED,
|
||||
'Token refresh failed',
|
||||
new Error('Original error'),
|
||||
);
|
||||
|
||||
expect(error).toBeInstanceOf(Error);
|
||||
expect(error).toBeInstanceOf(TokenManagerError);
|
||||
expect(error.type).toBe(TokenError.REFRESH_FAILED);
|
||||
expect(error.message).toBe('Token refresh failed');
|
||||
expect(error.name).toBe('TokenManagerError');
|
||||
expect(error.originalError).toBeInstanceOf(Error);
|
||||
});
|
||||
|
||||
it('should handle file access errors gracefully', async () => {
|
||||
const mockClient = createMockQwenClient(createExpiredCredentials());
|
||||
|
||||
// Mock file stat to throw access error
|
||||
const accessError = new Error(
|
||||
'Permission denied',
|
||||
) as NodeJS.ErrnoException;
|
||||
accessError.code = 'EACCES';
|
||||
mockFs.stat.mockRejectedValue(accessError);
|
||||
|
||||
await expect(
|
||||
tokenManager.getValidCredentials(mockClient),
|
||||
).rejects.toThrow(TokenManagerError);
|
||||
});
|
||||
|
||||
it('should handle missing file gracefully', async () => {
|
||||
const mockClient = createMockQwenClient();
|
||||
const validCredentials = createValidCredentials();
|
||||
|
||||
// Mock file stat to throw file not found error
|
||||
const notFoundError = new Error(
|
||||
'File not found',
|
||||
) as NodeJS.ErrnoException;
|
||||
notFoundError.code = 'ENOENT';
|
||||
mockFs.stat.mockRejectedValue(notFoundError);
|
||||
|
||||
// Set valid credentials in cache
|
||||
const memoryCache = getPrivateProperty<{
|
||||
credentials: QwenCredentials | null;
|
||||
}>(tokenManager, 'memoryCache');
|
||||
memoryCache.credentials = validCredentials;
|
||||
|
||||
const result = await tokenManager.getValidCredentials(mockClient);
|
||||
|
||||
expect(result).toEqual(validCredentials);
|
||||
});
|
||||
|
||||
it('should handle lock timeout scenarios', async () => {
|
||||
const mockClient = createMockQwenClient(createExpiredCredentials());
|
||||
|
||||
// Configure shorter timeouts for testing
|
||||
tokenManager.setLockConfig({
|
||||
maxAttempts: 3,
|
||||
attemptInterval: 50,
|
||||
});
|
||||
|
||||
// Mock stat for file check to pass (no file initially)
|
||||
mockFs.stat.mockRejectedValueOnce(
|
||||
Object.assign(new Error('ENOENT'), { code: 'ENOENT' }),
|
||||
);
|
||||
|
||||
// Mock writeFile to always throw EEXIST for lock file writes (flag: 'wx')
|
||||
// but succeed for regular file writes
|
||||
const lockError = new Error('File exists') as NodeJS.ErrnoException;
|
||||
lockError.code = 'EEXIST';
|
||||
|
||||
mockFs.writeFile.mockImplementation((path, data, options) => {
|
||||
if (typeof options === 'object' && options?.flag === 'wx') {
|
||||
return Promise.reject(lockError);
|
||||
}
|
||||
return Promise.resolve(undefined);
|
||||
});
|
||||
|
||||
// Mock stat to return recent lock file (not stale) when checking lock age
|
||||
mockFs.stat.mockResolvedValue({ mtimeMs: Date.now() } as Stats);
|
||||
|
||||
// Mock unlink to simulate lock file removal attempts
|
||||
mockFs.unlink.mockResolvedValue(undefined);
|
||||
|
||||
await expect(
|
||||
tokenManager.getValidCredentials(mockClient),
|
||||
).rejects.toThrow(TokenManagerError);
|
||||
}, 500); // 500ms timeout for lock test (3 attempts × 50ms = ~150ms + buffer)
|
||||
|
||||
it('should handle refresh response without access token', async () => {
|
||||
const mockClient = createMockQwenClient(createExpiredCredentials());
|
||||
const invalidResponse = {
|
||||
token_type: 'Bearer',
|
||||
expires_in: 3600,
|
||||
// access_token is missing, so we use undefined explicitly
|
||||
access_token: undefined,
|
||||
} as Partial<TokenRefreshData>;
|
||||
|
||||
mockClient.refreshAccessToken = vi
|
||||
.fn()
|
||||
.mockResolvedValue(invalidResponse);
|
||||
|
||||
// Mock stat for file check to pass (no file initially)
|
||||
mockFs.stat.mockRejectedValueOnce(
|
||||
Object.assign(new Error('ENOENT'), { code: 'ENOENT' }),
|
||||
);
|
||||
|
||||
// Mock file operations for lock acquisition
|
||||
mockFs.writeFile.mockResolvedValue(undefined);
|
||||
mockFs.mkdir.mockResolvedValue(undefined);
|
||||
|
||||
// Clear cache to force refresh
|
||||
tokenManager.clearCache();
|
||||
|
||||
await expect(
|
||||
tokenManager.getValidCredentials(mockClient),
|
||||
).rejects.toThrow(TokenManagerError);
|
||||
|
||||
await expect(
|
||||
tokenManager.getValidCredentials(mockClient),
|
||||
).rejects.toThrow('no token returned');
|
||||
});
|
||||
});
|
||||
|
||||
describe('File System Operations', () => {
|
||||
it('should handle file reload failures gracefully', async () => {
|
||||
const mockClient = createMockQwenClient();
|
||||
|
||||
// Mock successful refresh for when cache is cleared
|
||||
mockClient.refreshAccessToken = vi
|
||||
.fn()
|
||||
.mockResolvedValue(createSuccessfulRefreshResponse());
|
||||
|
||||
// Mock file operations
|
||||
mockFs.stat
|
||||
.mockResolvedValueOnce({ mtimeMs: 2000 } as Stats) // For checkAndReloadIfNeeded
|
||||
.mockResolvedValue({ mtimeMs: 1000 } as Stats); // For later operations
|
||||
mockFs.readFile.mockRejectedValue(new Error('Read failed'));
|
||||
mockFs.writeFile.mockResolvedValue(undefined);
|
||||
mockFs.mkdir.mockResolvedValue(undefined);
|
||||
|
||||
// Set initial cache state to trigger reload
|
||||
tokenManager.clearCache();
|
||||
const memoryCache = getPrivateProperty<{ fileModTime: number }>(
|
||||
tokenManager,
|
||||
'memoryCache',
|
||||
);
|
||||
memoryCache.fileModTime = 1000;
|
||||
|
||||
// Should not throw error, should refresh and get new credentials
|
||||
const result = await tokenManager.getValidCredentials(mockClient);
|
||||
|
||||
expect(result).toBeDefined();
|
||||
expect(result.access_token).toBe('fresh_access_token');
|
||||
});
|
||||
|
||||
it('should handle invalid JSON in credentials file', async () => {
|
||||
const mockClient = createMockQwenClient();
|
||||
|
||||
// Mock successful refresh for when cache is cleared
|
||||
mockClient.refreshAccessToken = vi
|
||||
.fn()
|
||||
.mockResolvedValue(createSuccessfulRefreshResponse());
|
||||
|
||||
// Mock file operations with invalid JSON
|
||||
mockFs.stat
|
||||
.mockResolvedValueOnce({ mtimeMs: 2000 } as Stats) // For checkAndReloadIfNeeded
|
||||
.mockResolvedValue({ mtimeMs: 1000 } as Stats); // For later operations
|
||||
mockFs.readFile.mockResolvedValue('invalid json content');
|
||||
mockFs.writeFile.mockResolvedValue(undefined);
|
||||
mockFs.mkdir.mockResolvedValue(undefined);
|
||||
|
||||
// Set initial cache state to trigger reload
|
||||
tokenManager.clearCache();
|
||||
const memoryCache = getPrivateProperty<{ fileModTime: number }>(
|
||||
tokenManager,
|
||||
'memoryCache',
|
||||
);
|
||||
memoryCache.fileModTime = 1000;
|
||||
|
||||
// Should handle JSON parse error gracefully, then refresh and get new credentials
|
||||
const result = await tokenManager.getValidCredentials(mockClient);
|
||||
|
||||
expect(result).toBeDefined();
|
||||
expect(result.access_token).toBe('fresh_access_token');
|
||||
});
|
||||
|
||||
it('should handle directory creation during save', async () => {
|
||||
const mockClient = createMockQwenClient(createExpiredCredentials());
|
||||
const refreshResponse = createSuccessfulRefreshResponse();
|
||||
|
||||
mockClient.refreshAccessToken = vi
|
||||
.fn()
|
||||
.mockResolvedValue(refreshResponse);
|
||||
|
||||
// Mock file operations
|
||||
mockFs.stat.mockResolvedValue({ mtimeMs: 1000 } as Stats);
|
||||
mockFs.writeFile.mockResolvedValue(undefined);
|
||||
mockFs.mkdir.mockResolvedValue(undefined);
|
||||
|
||||
await tokenManager.getValidCredentials(mockClient);
|
||||
|
||||
expect(mockFs.mkdir).toHaveBeenCalledWith(expect.any(String), {
|
||||
recursive: true,
|
||||
mode: 0o700,
|
||||
});
|
||||
expect(mockFs.writeFile).toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Lock File Management', () => {
|
||||
it('should clean up lock file during process cleanup', () => {
|
||||
// Create a new instance to trigger cleanup handler registration
|
||||
SharedTokenManager.getInstance();
|
||||
|
||||
// Access the private cleanup method for testing
|
||||
const cleanupHandlers = process.listeners('exit');
|
||||
const cleanup = cleanupHandlers[cleanupHandlers.length - 1] as () => void;
|
||||
|
||||
// Should not throw when lock file doesn't exist
|
||||
expect(() => cleanup()).not.toThrow();
|
||||
expect(mockUnlinkSync).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should handle stale lock cleanup', async () => {
|
||||
const mockClient = createMockQwenClient(createExpiredCredentials());
|
||||
const refreshResponse = createSuccessfulRefreshResponse();
|
||||
|
||||
mockClient.refreshAccessToken = vi
|
||||
.fn()
|
||||
.mockResolvedValue(refreshResponse);
|
||||
|
||||
// First writeFile call throws EEXIST (lock exists)
|
||||
// Second writeFile call succeeds (after stale lock cleanup)
|
||||
const lockError = new Error('File exists') as NodeJS.ErrnoException;
|
||||
lockError.code = 'EEXIST';
|
||||
mockFs.writeFile
|
||||
.mockRejectedValueOnce(lockError)
|
||||
.mockResolvedValue(undefined);
|
||||
|
||||
// Mock stat to return stale lock (old timestamp)
|
||||
mockFs.stat
|
||||
.mockResolvedValueOnce({ mtimeMs: Date.now() - 20000 } as Stats) // Stale lock
|
||||
.mockResolvedValueOnce({ mtimeMs: 1000 } as Stats); // Credentials file
|
||||
|
||||
// Mock unlink to succeed
|
||||
mockFs.unlink.mockResolvedValue(undefined);
|
||||
mockFs.mkdir.mockResolvedValue(undefined);
|
||||
|
||||
const result = await tokenManager.getValidCredentials(mockClient);
|
||||
|
||||
expect(result.access_token).toBe(refreshResponse.access_token);
|
||||
expect(mockFs.unlink).toHaveBeenCalled(); // Stale lock removed
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,662 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import path from 'node:path';
|
||||
import { promises as fs, unlinkSync } from 'node:fs';
|
||||
import * as os from 'os';
|
||||
import { randomUUID } from 'node:crypto';
|
||||
|
||||
import {
|
||||
IQwenOAuth2Client,
|
||||
type QwenCredentials,
|
||||
type TokenRefreshData,
|
||||
type ErrorData,
|
||||
isErrorResponse,
|
||||
} from './qwenOAuth2.js';
|
||||
|
||||
// File System Configuration
|
||||
const QWEN_DIR = '.qwen';
|
||||
const QWEN_CREDENTIAL_FILENAME = 'oauth_creds.json';
|
||||
const QWEN_LOCK_FILENAME = 'oauth_creds.lock';
|
||||
|
||||
// Token and Cache Configuration
|
||||
const TOKEN_REFRESH_BUFFER_MS = 30 * 1000; // 30 seconds
|
||||
const LOCK_TIMEOUT_MS = 10000; // 10 seconds lock timeout
|
||||
const CACHE_CHECK_INTERVAL_MS = 1000; // 1 second cache check interval
|
||||
|
||||
// Lock acquisition configuration (can be overridden for testing)
|
||||
interface LockConfig {
|
||||
maxAttempts: number;
|
||||
attemptInterval: number;
|
||||
}
|
||||
|
||||
const DEFAULT_LOCK_CONFIG: LockConfig = {
|
||||
maxAttempts: 50,
|
||||
attemptInterval: 200,
|
||||
};
|
||||
|
||||
/**
|
||||
* Token manager error types for better error classification
|
||||
*/
|
||||
export enum TokenError {
|
||||
REFRESH_FAILED = 'REFRESH_FAILED',
|
||||
NO_REFRESH_TOKEN = 'NO_REFRESH_TOKEN',
|
||||
LOCK_TIMEOUT = 'LOCK_TIMEOUT',
|
||||
FILE_ACCESS_ERROR = 'FILE_ACCESS_ERROR',
|
||||
NETWORK_ERROR = 'NETWORK_ERROR',
|
||||
}
|
||||
|
||||
/**
|
||||
* Custom error class for token manager operations
|
||||
*/
|
||||
export class TokenManagerError extends Error {
|
||||
constructor(
|
||||
public type: TokenError,
|
||||
message: string,
|
||||
public originalError?: unknown,
|
||||
) {
|
||||
super(message);
|
||||
this.name = 'TokenManagerError';
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Interface for the memory cache state
|
||||
*/
|
||||
interface MemoryCache {
|
||||
credentials: QwenCredentials | null;
|
||||
fileModTime: number;
|
||||
lastCheck: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates that the given data is a valid QwenCredentials object
|
||||
*
|
||||
* @param data - The data to validate
|
||||
* @returns The validated credentials object
|
||||
* @throws Error if the data is invalid
|
||||
*/
|
||||
function validateCredentials(data: unknown): QwenCredentials {
|
||||
if (!data || typeof data !== 'object') {
|
||||
throw new Error('Invalid credentials format');
|
||||
}
|
||||
|
||||
const creds = data as Partial<QwenCredentials>;
|
||||
const requiredFields = [
|
||||
'access_token',
|
||||
'refresh_token',
|
||||
'token_type',
|
||||
] as const;
|
||||
|
||||
// Check required string fields
|
||||
for (const field of requiredFields) {
|
||||
if (!creds[field] || typeof creds[field] !== 'string') {
|
||||
throw new Error(`Invalid credentials: missing ${field}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Check expiry_date
|
||||
if (!creds.expiry_date || typeof creds.expiry_date !== 'number') {
|
||||
throw new Error('Invalid credentials: missing expiry_date');
|
||||
}
|
||||
|
||||
return creds as QwenCredentials;
|
||||
}
|
||||
|
||||
/**
|
||||
* Manages OAuth tokens across multiple processes using file-based caching and locking
|
||||
*/
|
||||
export class SharedTokenManager {
|
||||
private static instance: SharedTokenManager | null = null;
|
||||
|
||||
/**
|
||||
* In-memory cache for credentials and file state tracking
|
||||
*/
|
||||
private memoryCache: MemoryCache = {
|
||||
credentials: null,
|
||||
fileModTime: 0,
|
||||
lastCheck: 0,
|
||||
};
|
||||
|
||||
/**
|
||||
* Promise tracking any ongoing token refresh operation
|
||||
*/
|
||||
private refreshPromise: Promise<QwenCredentials> | null = null;
|
||||
|
||||
/**
|
||||
* Whether cleanup handlers have been registered
|
||||
*/
|
||||
private cleanupHandlersRegistered = false;
|
||||
|
||||
/**
|
||||
* Reference to cleanup functions for proper removal
|
||||
*/
|
||||
private cleanupFunction: (() => void) | null = null;
|
||||
|
||||
/**
|
||||
* Lock configuration for testing purposes
|
||||
*/
|
||||
private lockConfig: LockConfig = DEFAULT_LOCK_CONFIG;
|
||||
|
||||
/**
|
||||
* Private constructor for singleton pattern
|
||||
*/
|
||||
private constructor() {
|
||||
this.registerCleanupHandlers();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the singleton instance
|
||||
* @returns The shared token manager instance
|
||||
*/
|
||||
static getInstance(): SharedTokenManager {
|
||||
if (!SharedTokenManager.instance) {
|
||||
SharedTokenManager.instance = new SharedTokenManager();
|
||||
}
|
||||
return SharedTokenManager.instance;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set up handlers to clean up lock files when the process exits
|
||||
*/
|
||||
private registerCleanupHandlers(): void {
|
||||
if (this.cleanupHandlersRegistered) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.cleanupFunction = () => {
|
||||
try {
|
||||
const lockPath = this.getLockFilePath();
|
||||
// Use synchronous unlink for process exit handlers
|
||||
unlinkSync(lockPath);
|
||||
} catch (_error) {
|
||||
// Ignore cleanup errors - lock file might not exist or already be cleaned up
|
||||
}
|
||||
};
|
||||
|
||||
process.on('exit', this.cleanupFunction);
|
||||
process.on('SIGINT', this.cleanupFunction);
|
||||
process.on('SIGTERM', this.cleanupFunction);
|
||||
process.on('uncaughtException', this.cleanupFunction);
|
||||
process.on('unhandledRejection', this.cleanupFunction);
|
||||
|
||||
this.cleanupHandlersRegistered = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get valid OAuth credentials, refreshing them if necessary
|
||||
*
|
||||
* @param qwenClient - The OAuth2 client instance
|
||||
* @param forceRefresh - If true, refresh token even if current one is still valid
|
||||
* @returns Promise resolving to valid credentials
|
||||
* @throws TokenManagerError if unable to obtain valid credentials
|
||||
*/
|
||||
async getValidCredentials(
|
||||
qwenClient: IQwenOAuth2Client,
|
||||
forceRefresh = false,
|
||||
): Promise<QwenCredentials> {
|
||||
try {
|
||||
// Check if credentials file has been updated by other sessions
|
||||
await this.checkAndReloadIfNeeded();
|
||||
|
||||
// Return valid cached credentials if available (unless force refresh is requested)
|
||||
if (
|
||||
!forceRefresh &&
|
||||
this.memoryCache.credentials &&
|
||||
this.isTokenValid(this.memoryCache.credentials)
|
||||
) {
|
||||
return this.memoryCache.credentials;
|
||||
}
|
||||
|
||||
// If refresh is already in progress, wait for it to complete
|
||||
if (this.refreshPromise) {
|
||||
return this.refreshPromise;
|
||||
}
|
||||
|
||||
// Start new refresh operation with distributed locking
|
||||
this.refreshPromise = this.performTokenRefresh(qwenClient, forceRefresh);
|
||||
|
||||
try {
|
||||
const credentials = await this.refreshPromise;
|
||||
return credentials;
|
||||
} catch (error) {
|
||||
// Ensure refreshPromise is cleared on error before re-throwing
|
||||
this.refreshPromise = null;
|
||||
throw error;
|
||||
} finally {
|
||||
this.refreshPromise = null;
|
||||
}
|
||||
} catch (error) {
|
||||
// Convert generic errors to TokenManagerError for better error handling
|
||||
if (error instanceof TokenManagerError) {
|
||||
throw error;
|
||||
}
|
||||
|
||||
throw new TokenManagerError(
|
||||
TokenError.REFRESH_FAILED,
|
||||
`Failed to get valid credentials: ${error instanceof Error ? error.message : String(error)}`,
|
||||
error,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if the credentials file was updated by another process and reload if so
|
||||
*/
|
||||
private async checkAndReloadIfNeeded(): Promise<void> {
|
||||
const now = Date.now();
|
||||
|
||||
// Limit check frequency to avoid excessive disk I/O
|
||||
if (now - this.memoryCache.lastCheck < CACHE_CHECK_INTERVAL_MS) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.memoryCache.lastCheck = now;
|
||||
|
||||
try {
|
||||
const filePath = this.getCredentialFilePath();
|
||||
const stats = await fs.stat(filePath);
|
||||
const fileModTime = stats.mtimeMs;
|
||||
|
||||
// Reload credentials if file has been modified since last cache
|
||||
if (fileModTime > this.memoryCache.fileModTime) {
|
||||
await this.reloadCredentialsFromFile();
|
||||
this.memoryCache.fileModTime = fileModTime;
|
||||
}
|
||||
} catch (error) {
|
||||
// Handle file access errors
|
||||
if (
|
||||
error instanceof Error &&
|
||||
'code' in error &&
|
||||
error.code !== 'ENOENT'
|
||||
) {
|
||||
// Clear cache for non-missing file errors
|
||||
this.memoryCache.credentials = null;
|
||||
this.memoryCache.fileModTime = 0;
|
||||
|
||||
throw new TokenManagerError(
|
||||
TokenError.FILE_ACCESS_ERROR,
|
||||
`Failed to access credentials file: ${error.message}`,
|
||||
error,
|
||||
);
|
||||
}
|
||||
|
||||
// For missing files (ENOENT), just reset file modification time
|
||||
// but keep existing valid credentials in memory if they exist
|
||||
this.memoryCache.fileModTime = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Load credentials from the file system into memory cache
|
||||
*/
|
||||
private async reloadCredentialsFromFile(): Promise<void> {
|
||||
try {
|
||||
const filePath = this.getCredentialFilePath();
|
||||
const content = await fs.readFile(filePath, 'utf-8');
|
||||
const parsedData = JSON.parse(content);
|
||||
const credentials = validateCredentials(parsedData);
|
||||
this.memoryCache.credentials = credentials;
|
||||
} catch (error) {
|
||||
// Log validation errors for debugging but don't throw
|
||||
if (
|
||||
error instanceof Error &&
|
||||
error.message.includes('Invalid credentials')
|
||||
) {
|
||||
console.warn(`Failed to validate credentials file: ${error.message}`);
|
||||
}
|
||||
this.memoryCache.credentials = null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Refresh the OAuth token using file locking to prevent concurrent refreshes
|
||||
*
|
||||
* @param qwenClient - The OAuth2 client instance
|
||||
* @param forceRefresh - If true, skip checking if token is already valid after getting lock
|
||||
* @returns Promise resolving to refreshed credentials
|
||||
* @throws TokenManagerError if refresh fails or lock cannot be acquired
|
||||
*/
|
||||
private async performTokenRefresh(
|
||||
qwenClient: IQwenOAuth2Client,
|
||||
forceRefresh = false,
|
||||
): Promise<QwenCredentials> {
|
||||
const lockPath = this.getLockFilePath();
|
||||
|
||||
try {
|
||||
// Check if we have a refresh token before attempting refresh
|
||||
const currentCredentials = qwenClient.getCredentials();
|
||||
if (!currentCredentials.refresh_token) {
|
||||
throw new TokenManagerError(
|
||||
TokenError.NO_REFRESH_TOKEN,
|
||||
'No refresh token available for token refresh',
|
||||
);
|
||||
}
|
||||
|
||||
// Acquire distributed file lock
|
||||
await this.acquireLock(lockPath);
|
||||
|
||||
// Double-check if another process already refreshed the token (unless force refresh is requested)
|
||||
await this.checkAndReloadIfNeeded();
|
||||
|
||||
// Use refreshed credentials if they're now valid (unless force refresh is requested)
|
||||
if (
|
||||
!forceRefresh &&
|
||||
this.memoryCache.credentials &&
|
||||
this.isTokenValid(this.memoryCache.credentials)
|
||||
) {
|
||||
qwenClient.setCredentials(this.memoryCache.credentials);
|
||||
return this.memoryCache.credentials;
|
||||
}
|
||||
|
||||
// Perform the actual token refresh
|
||||
const response = await qwenClient.refreshAccessToken();
|
||||
|
||||
if (!response || isErrorResponse(response)) {
|
||||
const errorData = response as ErrorData;
|
||||
throw new TokenManagerError(
|
||||
TokenError.REFRESH_FAILED,
|
||||
`Token refresh failed: ${errorData?.error || 'Unknown error'} - ${errorData?.error_description || 'No details provided'}`,
|
||||
);
|
||||
}
|
||||
|
||||
const tokenData = response as TokenRefreshData;
|
||||
|
||||
if (!tokenData.access_token) {
|
||||
throw new TokenManagerError(
|
||||
TokenError.REFRESH_FAILED,
|
||||
'Failed to refresh access token: no token returned',
|
||||
);
|
||||
}
|
||||
|
||||
// Create updated credentials object
|
||||
const credentials: QwenCredentials = {
|
||||
access_token: tokenData.access_token,
|
||||
token_type: tokenData.token_type,
|
||||
refresh_token:
|
||||
tokenData.refresh_token || currentCredentials.refresh_token,
|
||||
resource_url: tokenData.resource_url,
|
||||
expiry_date: Date.now() + tokenData.expires_in * 1000,
|
||||
};
|
||||
|
||||
// Update memory cache and client credentials
|
||||
this.memoryCache.credentials = credentials;
|
||||
qwenClient.setCredentials(credentials);
|
||||
|
||||
// Persist to file and update modification time
|
||||
await this.saveCredentialsToFile(credentials);
|
||||
|
||||
return credentials;
|
||||
} catch (error) {
|
||||
if (error instanceof TokenManagerError) {
|
||||
throw error;
|
||||
}
|
||||
|
||||
// Handle network-related errors
|
||||
if (
|
||||
error instanceof Error &&
|
||||
(error.message.includes('fetch') ||
|
||||
error.message.includes('network') ||
|
||||
error.message.includes('timeout'))
|
||||
) {
|
||||
throw new TokenManagerError(
|
||||
TokenError.NETWORK_ERROR,
|
||||
`Network error during token refresh: ${error.message}`,
|
||||
error,
|
||||
);
|
||||
}
|
||||
|
||||
throw new TokenManagerError(
|
||||
TokenError.REFRESH_FAILED,
|
||||
`Unexpected error during token refresh: ${error instanceof Error ? error.message : String(error)}`,
|
||||
error,
|
||||
);
|
||||
} finally {
|
||||
// Always release the file lock
|
||||
await this.releaseLock(lockPath);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Save credentials to file and update the cached file modification time
|
||||
*
|
||||
* @param credentials - The credentials to save
|
||||
*/
|
||||
private async saveCredentialsToFile(
|
||||
credentials: QwenCredentials,
|
||||
): Promise<void> {
|
||||
const filePath = this.getCredentialFilePath();
|
||||
const dirPath = path.dirname(filePath);
|
||||
|
||||
// Create directory with restricted permissions
|
||||
try {
|
||||
await fs.mkdir(dirPath, { recursive: true, mode: 0o700 });
|
||||
} catch (error) {
|
||||
throw new TokenManagerError(
|
||||
TokenError.FILE_ACCESS_ERROR,
|
||||
`Failed to create credentials directory: ${error instanceof Error ? error.message : String(error)}`,
|
||||
error,
|
||||
);
|
||||
}
|
||||
|
||||
const credString = JSON.stringify(credentials, null, 2);
|
||||
|
||||
try {
|
||||
// Write file with restricted permissions (owner read/write only)
|
||||
await fs.writeFile(filePath, credString, { mode: 0o600 });
|
||||
} catch (error) {
|
||||
throw new TokenManagerError(
|
||||
TokenError.FILE_ACCESS_ERROR,
|
||||
`Failed to write credentials file: ${error instanceof Error ? error.message : String(error)}`,
|
||||
error,
|
||||
);
|
||||
}
|
||||
|
||||
// Update cached file modification time to avoid unnecessary reloads
|
||||
try {
|
||||
const stats = await fs.stat(filePath);
|
||||
this.memoryCache.fileModTime = stats.mtimeMs;
|
||||
} catch (error) {
|
||||
// Non-fatal error, just log it
|
||||
console.warn(
|
||||
`Failed to update file modification time: ${error instanceof Error ? error.message : String(error)}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if the token is valid and not expired
|
||||
*
|
||||
* @param credentials - The credentials to validate
|
||||
* @returns true if token is valid and not expired, false otherwise
|
||||
*/
|
||||
private isTokenValid(credentials: QwenCredentials): boolean {
|
||||
if (!credentials.expiry_date || !credentials.access_token) {
|
||||
return false;
|
||||
}
|
||||
return Date.now() < credentials.expiry_date - TOKEN_REFRESH_BUFFER_MS;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the full path to the credentials file
|
||||
*
|
||||
* @returns The absolute path to the credentials file
|
||||
*/
|
||||
private getCredentialFilePath(): string {
|
||||
return path.join(os.homedir(), QWEN_DIR, QWEN_CREDENTIAL_FILENAME);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the full path to the lock file
|
||||
*
|
||||
* @returns The absolute path to the lock file
|
||||
*/
|
||||
private getLockFilePath(): string {
|
||||
return path.join(os.homedir(), QWEN_DIR, QWEN_LOCK_FILENAME);
|
||||
}
|
||||
|
||||
/**
|
||||
* Acquire a file lock to prevent other processes from refreshing tokens simultaneously
|
||||
*
|
||||
* @param lockPath - Path to the lock file
|
||||
* @throws TokenManagerError if lock cannot be acquired within timeout period
|
||||
*/
|
||||
private async acquireLock(lockPath: string): Promise<void> {
|
||||
const { maxAttempts, attemptInterval } = this.lockConfig;
|
||||
const lockId = randomUUID(); // Use random UUID instead of PID for security
|
||||
|
||||
for (let attempt = 0; attempt < maxAttempts; attempt++) {
|
||||
try {
|
||||
// Attempt to create lock file atomically (exclusive mode)
|
||||
await fs.writeFile(lockPath, lockId, { flag: 'wx' });
|
||||
return; // Successfully acquired lock
|
||||
} catch (error: unknown) {
|
||||
if ((error as NodeJS.ErrnoException).code === 'EEXIST') {
|
||||
// Lock file already exists, check if it's stale
|
||||
try {
|
||||
const stats = await fs.stat(lockPath);
|
||||
const lockAge = Date.now() - stats.mtimeMs;
|
||||
|
||||
// Remove stale locks that exceed timeout
|
||||
if (lockAge > LOCK_TIMEOUT_MS) {
|
||||
try {
|
||||
await fs.unlink(lockPath);
|
||||
console.warn(
|
||||
`Removed stale lock file: ${lockPath} (age: ${lockAge}ms)`,
|
||||
);
|
||||
continue; // Retry lock acquisition
|
||||
} catch (unlinkError) {
|
||||
// Log the error but continue trying - another process might have removed it
|
||||
console.warn(
|
||||
`Failed to remove stale lock file ${lockPath}: ${unlinkError instanceof Error ? unlinkError.message : String(unlinkError)}`,
|
||||
);
|
||||
// Still continue - the lock might have been removed by another process
|
||||
}
|
||||
}
|
||||
} catch (statError) {
|
||||
// Can't stat lock file, it might have been removed, continue trying
|
||||
console.warn(
|
||||
`Failed to stat lock file ${lockPath}: ${statError instanceof Error ? statError.message : String(statError)}`,
|
||||
);
|
||||
}
|
||||
|
||||
// Wait before retrying
|
||||
await new Promise((resolve) => setTimeout(resolve, attemptInterval));
|
||||
} else {
|
||||
throw new TokenManagerError(
|
||||
TokenError.FILE_ACCESS_ERROR,
|
||||
`Failed to create lock file: ${error instanceof Error ? error.message : String(error)}`,
|
||||
error,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
throw new TokenManagerError(
|
||||
TokenError.LOCK_TIMEOUT,
|
||||
'Failed to acquire file lock for token refresh: timeout exceeded',
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Release the file lock
|
||||
*
|
||||
* @param lockPath - Path to the lock file
|
||||
*/
|
||||
private async releaseLock(lockPath: string): Promise<void> {
|
||||
try {
|
||||
await fs.unlink(lockPath);
|
||||
} catch (error) {
|
||||
// Lock file might already be removed by another process or timeout cleanup
|
||||
// This is not an error condition, but log for debugging
|
||||
if ((error as NodeJS.ErrnoException).code !== 'ENOENT') {
|
||||
console.warn(
|
||||
`Failed to release lock file ${lockPath}: ${error instanceof Error ? error.message : String(error)}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear all cached data and reset the manager to initial state
|
||||
*/
|
||||
clearCache(): void {
|
||||
this.memoryCache = {
|
||||
credentials: null,
|
||||
fileModTime: 0,
|
||||
lastCheck: 0,
|
||||
};
|
||||
this.refreshPromise = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the current cached credentials (may be expired)
|
||||
*
|
||||
* @returns The currently cached credentials or null
|
||||
*/
|
||||
getCurrentCredentials(): QwenCredentials | null {
|
||||
return this.memoryCache.credentials;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if there's an ongoing refresh operation
|
||||
*
|
||||
* @returns true if refresh is in progress, false otherwise
|
||||
*/
|
||||
isRefreshInProgress(): boolean {
|
||||
return this.refreshPromise !== null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set lock configuration for testing purposes
|
||||
* @param config - Lock configuration
|
||||
*/
|
||||
setLockConfig(config: Partial<LockConfig>): void {
|
||||
this.lockConfig = { ...DEFAULT_LOCK_CONFIG, ...config };
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up event listeners (primarily for testing)
|
||||
*/
|
||||
cleanup(): void {
|
||||
if (this.cleanupFunction && this.cleanupHandlersRegistered) {
|
||||
this.cleanupFunction();
|
||||
|
||||
process.removeListener('exit', this.cleanupFunction);
|
||||
process.removeListener('SIGINT', this.cleanupFunction);
|
||||
process.removeListener('SIGTERM', this.cleanupFunction);
|
||||
process.removeListener('uncaughtException', this.cleanupFunction);
|
||||
process.removeListener('unhandledRejection', this.cleanupFunction);
|
||||
|
||||
this.cleanupHandlersRegistered = false;
|
||||
this.cleanupFunction = null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a summary of the current state for debugging
|
||||
*
|
||||
* @returns Object containing current state information
|
||||
*/
|
||||
getDebugInfo(): {
|
||||
hasCredentials: boolean;
|
||||
credentialsExpired: boolean;
|
||||
isRefreshing: boolean;
|
||||
cacheAge: number;
|
||||
} {
|
||||
const hasCredentials = !!this.memoryCache.credentials;
|
||||
const credentialsExpired = hasCredentials
|
||||
? !this.isTokenValid(this.memoryCache.credentials!)
|
||||
: false;
|
||||
|
||||
return {
|
||||
hasCredentials,
|
||||
credentialsExpired,
|
||||
isRefreshing: this.isRefreshInProgress(),
|
||||
cacheAge: Date.now() - this.memoryCache.lastCheck,
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -10,7 +10,7 @@ import * as path from 'path';
|
||||
import * as fs from 'fs/promises';
|
||||
import * as os from 'os';
|
||||
import type { ChildProcess } from 'node:child_process';
|
||||
import { getProjectHash, QWEN_DIR } from '../utils/paths.js';
|
||||
import { getProjectHash, GEMINI_DIR } from '../utils/paths.js';
|
||||
|
||||
const hoistedMockExec = vi.hoisted(() => vi.fn());
|
||||
vi.mock('node:child_process', () => ({
|
||||
@@ -157,7 +157,7 @@ describe('GitService', () => {
|
||||
let gitConfigPath: string;
|
||||
|
||||
beforeEach(() => {
|
||||
repoDir = path.join(homedir, QWEN_DIR, 'history', hash);
|
||||
repoDir = path.join(homedir, GEMINI_DIR, 'history', hash);
|
||||
gitConfigPath = path.join(repoDir, '.gitconfig');
|
||||
});
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ import * as os from 'os';
|
||||
import { isNodeError } from '../utils/errors.js';
|
||||
import { exec } from 'node:child_process';
|
||||
import { simpleGit, SimpleGit, CheckRepoActions } from 'simple-git';
|
||||
import { getProjectHash, QWEN_DIR } from '../utils/paths.js';
|
||||
import { getProjectHash, GEMINI_DIR } from '../utils/paths.js';
|
||||
|
||||
export class GitService {
|
||||
private projectRoot: string;
|
||||
@@ -21,7 +21,7 @@ export class GitService {
|
||||
|
||||
private getHistoryDir(): string {
|
||||
const hash = getProjectHash(this.projectRoot);
|
||||
return path.join(os.homedir(), QWEN_DIR, 'history', hash);
|
||||
return path.join(os.homedir(), GEMINI_DIR, 'history', hash);
|
||||
}
|
||||
|
||||
async initialize(): Promise<void> {
|
||||
|
||||
@@ -8,23 +8,12 @@
|
||||
* Integration test to verify circular reference handling with proxy agents
|
||||
*/
|
||||
|
||||
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||
import { describe, it, expect } from 'vitest';
|
||||
import { QwenLogger } from './qwen-logger/qwen-logger.js';
|
||||
import { RumEvent } from './qwen-logger/event-types.js';
|
||||
import { Config } from '../config/config.js';
|
||||
|
||||
describe('Circular Reference Integration Test', () => {
|
||||
beforeEach(() => {
|
||||
// Clear singleton instance before each test
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
(QwenLogger as any).instance = undefined;
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
(QwenLogger as any).instance = undefined;
|
||||
});
|
||||
|
||||
it('should handle HttpsProxyAgent-like circular references in qwen logging', () => {
|
||||
// Create a mock config with proxy
|
||||
const mockConfig = {
|
||||
@@ -75,36 +64,4 @@ describe('Circular Reference Integration Test', () => {
|
||||
logger?.enqueueLogEvent(problematicEvent);
|
||||
}).not.toThrow();
|
||||
});
|
||||
|
||||
it('should handle event overflow without memory leaks', () => {
|
||||
const mockConfig = {
|
||||
getTelemetryEnabled: () => true,
|
||||
getUsageStatisticsEnabled: () => true,
|
||||
getSessionId: () => 'test-session',
|
||||
getDebugMode: () => true,
|
||||
} as unknown as Config;
|
||||
|
||||
const logger = QwenLogger.getInstance(mockConfig);
|
||||
|
||||
// Add more events than the maximum capacity
|
||||
for (let i = 0; i < 1100; i++) {
|
||||
logger?.enqueueLogEvent({
|
||||
timestamp: Date.now(),
|
||||
event_type: 'action',
|
||||
type: 'test',
|
||||
name: `overflow-test-${i}`,
|
||||
});
|
||||
}
|
||||
|
||||
// Logger should still be functional
|
||||
expect(logger).toBeDefined();
|
||||
expect(() => {
|
||||
logger?.enqueueLogEvent({
|
||||
timestamp: Date.now(),
|
||||
event_type: 'action',
|
||||
type: 'test',
|
||||
name: 'final-test',
|
||||
});
|
||||
}).not.toThrow();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -8,6 +8,7 @@ import { LogAttributes, LogRecord, logs } from '@opentelemetry/api-logs';
|
||||
import { SemanticAttributes } from '@opentelemetry/semantic-conventions';
|
||||
import { Config } from '../config/config.js';
|
||||
import { safeJsonStringify } from '../utils/safeJsonStringify.js';
|
||||
import { ClearcutLogger } from './clearcut-logger/clearcut-logger.js';
|
||||
import {
|
||||
EVENT_API_ERROR,
|
||||
EVENT_API_REQUEST,
|
||||
@@ -149,7 +150,7 @@ export function logToolCall(config: Config, event: ToolCallEvent): void {
|
||||
}
|
||||
|
||||
export function logApiRequest(config: Config, event: ApiRequestEvent): void {
|
||||
// QwenLogger.getInstance(config)?.logApiRequestEvent(event);
|
||||
QwenLogger.getInstance(config)?.logApiRequestEvent(event);
|
||||
if (!isTelemetrySdkInitialized()) return;
|
||||
|
||||
const attributes: LogAttributes = {
|
||||
@@ -363,7 +364,6 @@ export function logIdeConnection(
|
||||
config: Config,
|
||||
event: IdeConnectionEvent,
|
||||
): void {
|
||||
QwenLogger.getInstance(config)?.logIdeConnectionEvent(event);
|
||||
if (!isTelemetrySdkInitialized()) return;
|
||||
|
||||
const attributes: LogAttributes = {
|
||||
@@ -384,7 +384,7 @@ export function logKittySequenceOverflow(
|
||||
config: Config,
|
||||
event: KittySequenceOverflowEvent,
|
||||
): void {
|
||||
QwenLogger.getInstance(config)?.logKittySequenceOverflowEvent(event);
|
||||
ClearcutLogger.getInstance(config)?.logKittySequenceOverflowEvent(event);
|
||||
if (!isTelemetrySdkInitialized()) return;
|
||||
const attributes: LogAttributes = {
|
||||
...getCommonAttributes(config),
|
||||
|
||||
@@ -79,6 +79,5 @@ export interface RumPayload {
|
||||
session: RumSession;
|
||||
view: RumView;
|
||||
events: RumEvent[];
|
||||
properties?: Record<string, unknown>;
|
||||
_v: string;
|
||||
}
|
||||
|
||||
@@ -1,407 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import {
|
||||
describe,
|
||||
it,
|
||||
expect,
|
||||
vi,
|
||||
beforeEach,
|
||||
afterEach,
|
||||
afterAll,
|
||||
} from 'vitest';
|
||||
import { QwenLogger, TEST_ONLY } from './qwen-logger.js';
|
||||
import { Config } from '../../config/config.js';
|
||||
import {
|
||||
StartSessionEvent,
|
||||
EndSessionEvent,
|
||||
IdeConnectionEvent,
|
||||
KittySequenceOverflowEvent,
|
||||
IdeConnectionType,
|
||||
} from '../types.js';
|
||||
import { RumEvent } from './event-types.js';
|
||||
|
||||
// Mock dependencies
|
||||
vi.mock('../../utils/user_id.js', () => ({
|
||||
getInstallationId: vi.fn(() => 'test-installation-id'),
|
||||
}));
|
||||
|
||||
vi.mock('../../utils/safeJsonStringify.js', () => ({
|
||||
safeJsonStringify: vi.fn((obj) => JSON.stringify(obj)),
|
||||
}));
|
||||
|
||||
// Mock https module
|
||||
vi.mock('https', () => ({
|
||||
request: vi.fn(),
|
||||
}));
|
||||
|
||||
const makeFakeConfig = (overrides: Partial<Config> = {}): Config => {
|
||||
const defaults = {
|
||||
getUsageStatisticsEnabled: () => true,
|
||||
getDebugMode: () => false,
|
||||
getSessionId: () => 'test-session-id',
|
||||
getCliVersion: () => '1.0.0',
|
||||
getProxy: () => undefined,
|
||||
getContentGeneratorConfig: () => ({ authType: 'test-auth' }),
|
||||
getMcpServers: () => ({}),
|
||||
getModel: () => 'test-model',
|
||||
getEmbeddingModel: () => 'test-embedding',
|
||||
getSandbox: () => false,
|
||||
getCoreTools: () => [],
|
||||
getApprovalMode: () => 'auto',
|
||||
getTelemetryEnabled: () => true,
|
||||
getTelemetryLogPromptsEnabled: () => false,
|
||||
getFileFilteringRespectGitIgnore: () => true,
|
||||
...overrides,
|
||||
};
|
||||
return defaults as Config;
|
||||
};
|
||||
|
||||
describe('QwenLogger', () => {
|
||||
let mockConfig: Config;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.useFakeTimers();
|
||||
vi.setSystemTime(new Date('2025-01-01T12:00:00.000Z'));
|
||||
mockConfig = makeFakeConfig();
|
||||
// Clear singleton instance
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
(QwenLogger as any).instance = undefined;
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.useRealTimers();
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
afterAll(() => {
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
(QwenLogger as any).instance = undefined;
|
||||
});
|
||||
|
||||
describe('getInstance', () => {
|
||||
it('returns undefined when usage statistics are disabled', () => {
|
||||
const config = makeFakeConfig({ getUsageStatisticsEnabled: () => false });
|
||||
const logger = QwenLogger.getInstance(config);
|
||||
expect(logger).toBeUndefined();
|
||||
});
|
||||
|
||||
it('returns an instance when usage statistics are enabled', () => {
|
||||
const logger = QwenLogger.getInstance(mockConfig);
|
||||
expect(logger).toBeInstanceOf(QwenLogger);
|
||||
});
|
||||
|
||||
it('is a singleton', () => {
|
||||
const logger1 = QwenLogger.getInstance(mockConfig);
|
||||
const logger2 = QwenLogger.getInstance(mockConfig);
|
||||
expect(logger1).toBe(logger2);
|
||||
});
|
||||
});
|
||||
|
||||
describe('event queue management', () => {
|
||||
it('should handle event overflow gracefully', () => {
|
||||
const debugConfig = makeFakeConfig({ getDebugMode: () => true });
|
||||
const logger = QwenLogger.getInstance(debugConfig)!;
|
||||
const consoleSpy = vi
|
||||
.spyOn(console, 'debug')
|
||||
.mockImplementation(() => {});
|
||||
|
||||
// Fill the queue beyond capacity
|
||||
for (let i = 0; i < TEST_ONLY.MAX_EVENTS + 10; i++) {
|
||||
logger.enqueueLogEvent({
|
||||
timestamp: Date.now(),
|
||||
event_type: 'action',
|
||||
type: 'test',
|
||||
name: `test-event-${i}`,
|
||||
});
|
||||
}
|
||||
|
||||
// Should have logged debug messages about dropping events
|
||||
expect(consoleSpy).toHaveBeenCalledWith(
|
||||
expect.stringContaining(
|
||||
'QwenLogger: Dropped old event to prevent memory leak',
|
||||
),
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle enqueue errors gracefully', () => {
|
||||
const debugConfig = makeFakeConfig({ getDebugMode: () => true });
|
||||
const logger = QwenLogger.getInstance(debugConfig)!;
|
||||
const consoleSpy = vi
|
||||
.spyOn(console, 'error')
|
||||
.mockImplementation(() => {});
|
||||
|
||||
// Mock the events deque to throw an error
|
||||
const originalPush = logger['events'].push;
|
||||
logger['events'].push = vi.fn(() => {
|
||||
throw new Error('Test error');
|
||||
});
|
||||
|
||||
logger.enqueueLogEvent({
|
||||
timestamp: Date.now(),
|
||||
event_type: 'action',
|
||||
type: 'test',
|
||||
name: 'test-event',
|
||||
});
|
||||
|
||||
expect(consoleSpy).toHaveBeenCalledWith(
|
||||
'QwenLogger: Failed to enqueue log event.',
|
||||
expect.any(Error),
|
||||
);
|
||||
|
||||
// Restore original method
|
||||
logger['events'].push = originalPush;
|
||||
});
|
||||
});
|
||||
|
||||
describe('concurrent flush protection', () => {
|
||||
it('should handle concurrent flush requests', () => {
|
||||
const debugConfig = makeFakeConfig({ getDebugMode: () => true });
|
||||
const logger = QwenLogger.getInstance(debugConfig)!;
|
||||
const consoleSpy = vi
|
||||
.spyOn(console, 'debug')
|
||||
.mockImplementation(() => {});
|
||||
|
||||
// Manually set the flush in progress flag to simulate concurrent access
|
||||
logger['isFlushInProgress'] = true;
|
||||
|
||||
// Try to flush while another flush is in progress
|
||||
const result = logger.flushToRum();
|
||||
|
||||
// Should have logged about pending flush
|
||||
expect(consoleSpy).toHaveBeenCalledWith(
|
||||
expect.stringContaining(
|
||||
'QwenLogger: Flush already in progress, marking pending flush',
|
||||
),
|
||||
);
|
||||
|
||||
// Should return a resolved promise
|
||||
expect(result).toBeInstanceOf(Promise);
|
||||
|
||||
// Reset the flag
|
||||
logger['isFlushInProgress'] = false;
|
||||
});
|
||||
});
|
||||
|
||||
describe('failed event retry mechanism', () => {
|
||||
it('should requeue failed events with size limits', () => {
|
||||
const debugConfig = makeFakeConfig({ getDebugMode: () => true });
|
||||
const logger = QwenLogger.getInstance(debugConfig)!;
|
||||
const consoleSpy = vi
|
||||
.spyOn(console, 'debug')
|
||||
.mockImplementation(() => {});
|
||||
|
||||
const failedEvents: RumEvent[] = [];
|
||||
for (let i = 0; i < TEST_ONLY.MAX_RETRY_EVENTS + 50; i++) {
|
||||
failedEvents.push({
|
||||
timestamp: Date.now(),
|
||||
event_type: 'action',
|
||||
type: 'test',
|
||||
name: `failed-event-${i}`,
|
||||
});
|
||||
}
|
||||
|
||||
// Call the private method using bracket notation
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
(logger as any).requeueFailedEvents(failedEvents);
|
||||
|
||||
// Should have logged about dropping events due to retry limit
|
||||
expect(consoleSpy).toHaveBeenCalledWith(
|
||||
expect.stringContaining('QwenLogger: Re-queued'),
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle empty retry queue gracefully', () => {
|
||||
const debugConfig = makeFakeConfig({ getDebugMode: () => true });
|
||||
const logger = QwenLogger.getInstance(debugConfig)!;
|
||||
const consoleSpy = vi
|
||||
.spyOn(console, 'debug')
|
||||
.mockImplementation(() => {});
|
||||
|
||||
// Fill the queue to capacity first
|
||||
for (let i = 0; i < TEST_ONLY.MAX_EVENTS; i++) {
|
||||
logger.enqueueLogEvent({
|
||||
timestamp: Date.now(),
|
||||
event_type: 'action',
|
||||
type: 'test',
|
||||
name: `event-${i}`,
|
||||
});
|
||||
}
|
||||
|
||||
// Try to requeue when no space is available
|
||||
const failedEvents: RumEvent[] = [
|
||||
{
|
||||
timestamp: Date.now(),
|
||||
event_type: 'action',
|
||||
type: 'test',
|
||||
name: 'failed-event',
|
||||
},
|
||||
];
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
(logger as any).requeueFailedEvents(failedEvents);
|
||||
|
||||
expect(consoleSpy).toHaveBeenCalledWith(
|
||||
expect.stringContaining('QwenLogger: No events re-queued'),
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('event handlers', () => {
|
||||
it('should log IDE connection events', () => {
|
||||
const logger = QwenLogger.getInstance(mockConfig)!;
|
||||
const enqueueSpy = vi.spyOn(logger, 'enqueueLogEvent');
|
||||
|
||||
const event = new IdeConnectionEvent(IdeConnectionType.SESSION);
|
||||
|
||||
logger.logIdeConnectionEvent(event);
|
||||
|
||||
expect(enqueueSpy).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
event_type: 'action',
|
||||
type: 'connection',
|
||||
name: 'ide_connection',
|
||||
snapshots: JSON.stringify({
|
||||
connection_type: IdeConnectionType.SESSION,
|
||||
}),
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it('should log Kitty sequence overflow events', () => {
|
||||
const logger = QwenLogger.getInstance(mockConfig)!;
|
||||
const enqueueSpy = vi.spyOn(logger, 'enqueueLogEvent');
|
||||
|
||||
const event = new KittySequenceOverflowEvent(1024, 'truncated...');
|
||||
|
||||
logger.logKittySequenceOverflowEvent(event);
|
||||
|
||||
expect(enqueueSpy).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
event_type: 'exception',
|
||||
type: 'overflow',
|
||||
name: 'kitty_sequence_overflow',
|
||||
subtype: 'kitty_sequence_overflow',
|
||||
snapshots: JSON.stringify({
|
||||
sequence_length: 1024,
|
||||
truncated_sequence: 'truncated...',
|
||||
}),
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it('should flush start session events immediately', async () => {
|
||||
const logger = QwenLogger.getInstance(mockConfig)!;
|
||||
const flushSpy = vi.spyOn(logger, 'flushToRum').mockResolvedValue({});
|
||||
|
||||
const testConfig = makeFakeConfig({
|
||||
getModel: () => 'test-model',
|
||||
getEmbeddingModel: () => 'test-embedding',
|
||||
});
|
||||
const event = new StartSessionEvent(testConfig);
|
||||
|
||||
logger.logStartSessionEvent(event);
|
||||
|
||||
expect(flushSpy).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should flush end session events immediately', async () => {
|
||||
const logger = QwenLogger.getInstance(mockConfig)!;
|
||||
const flushSpy = vi.spyOn(logger, 'flushToRum').mockResolvedValue({});
|
||||
|
||||
const event = new EndSessionEvent(mockConfig);
|
||||
|
||||
logger.logEndSessionEvent(event);
|
||||
|
||||
expect(flushSpy).toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe('flush timing', () => {
|
||||
it('should not flush if interval has not passed', () => {
|
||||
const logger = QwenLogger.getInstance(mockConfig)!;
|
||||
const flushSpy = vi.spyOn(logger, 'flushToRum');
|
||||
|
||||
// Add an event and try to flush immediately
|
||||
logger.enqueueLogEvent({
|
||||
timestamp: Date.now(),
|
||||
event_type: 'action',
|
||||
type: 'test',
|
||||
name: 'test-event',
|
||||
});
|
||||
|
||||
logger.flushIfNeeded();
|
||||
|
||||
expect(flushSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should flush when interval has passed', () => {
|
||||
const logger = QwenLogger.getInstance(mockConfig)!;
|
||||
const flushSpy = vi.spyOn(logger, 'flushToRum').mockResolvedValue({});
|
||||
|
||||
// Add an event
|
||||
logger.enqueueLogEvent({
|
||||
timestamp: Date.now(),
|
||||
event_type: 'action',
|
||||
type: 'test',
|
||||
name: 'test-event',
|
||||
});
|
||||
|
||||
// Advance time beyond flush interval
|
||||
vi.advanceTimersByTime(TEST_ONLY.FLUSH_INTERVAL_MS + 1000);
|
||||
|
||||
logger.flushIfNeeded();
|
||||
|
||||
expect(flushSpy).toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe('error handling', () => {
|
||||
it('should handle flush errors gracefully with debug mode', async () => {
|
||||
const debugConfig = makeFakeConfig({ getDebugMode: () => true });
|
||||
const logger = QwenLogger.getInstance(debugConfig)!;
|
||||
const consoleSpy = vi
|
||||
.spyOn(console, 'debug')
|
||||
.mockImplementation(() => {});
|
||||
|
||||
// Add an event first
|
||||
logger.enqueueLogEvent({
|
||||
timestamp: Date.now(),
|
||||
event_type: 'action',
|
||||
type: 'test',
|
||||
name: 'test-event',
|
||||
});
|
||||
|
||||
// Mock flushToRum to throw an error
|
||||
const originalFlush = logger.flushToRum.bind(logger);
|
||||
logger.flushToRum = vi.fn().mockRejectedValue(new Error('Network error'));
|
||||
|
||||
// Advance time to trigger flush
|
||||
vi.advanceTimersByTime(TEST_ONLY.FLUSH_INTERVAL_MS + 1000);
|
||||
|
||||
logger.flushIfNeeded();
|
||||
|
||||
// Wait for async operations
|
||||
await vi.runAllTimersAsync();
|
||||
|
||||
expect(consoleSpy).toHaveBeenCalledWith(
|
||||
'Error flushing to RUM:',
|
||||
expect.any(Error),
|
||||
);
|
||||
|
||||
// Restore original method
|
||||
logger.flushToRum = originalFlush;
|
||||
});
|
||||
});
|
||||
|
||||
describe('constants export', () => {
|
||||
it('should export test constants', () => {
|
||||
expect(TEST_ONLY.MAX_EVENTS).toBe(1000);
|
||||
expect(TEST_ONLY.MAX_RETRY_EVENTS).toBe(100);
|
||||
expect(TEST_ONLY.FLUSH_INTERVAL_MS).toBe(60000);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -7,6 +7,7 @@
|
||||
import { Buffer } from 'buffer';
|
||||
import * as https from 'https';
|
||||
import { HttpsProxyAgent } from 'https-proxy-agent';
|
||||
import { randomUUID } from 'crypto';
|
||||
|
||||
import {
|
||||
StartSessionEvent,
|
||||
@@ -21,8 +22,6 @@ import {
|
||||
NextSpeakerCheckEvent,
|
||||
SlashCommandEvent,
|
||||
MalformedJsonResponseEvent,
|
||||
IdeConnectionEvent,
|
||||
KittySequenceOverflowEvent,
|
||||
} from '../types.js';
|
||||
import {
|
||||
RumEvent,
|
||||
@@ -32,12 +31,12 @@ import {
|
||||
RumExceptionEvent,
|
||||
RumPayload,
|
||||
} from './event-types.js';
|
||||
// Removed unused EventMetadataKey import
|
||||
import { Config } from '../../config/config.js';
|
||||
import { safeJsonStringify } from '../../utils/safeJsonStringify.js';
|
||||
// Removed unused import
|
||||
import { HttpError, retryWithBackoff } from '../../utils/retry.js';
|
||||
import { getInstallationId } from '../../utils/user_id.js';
|
||||
import { FixedDeque } from 'mnemonist';
|
||||
import { AuthType } from '../../core/contentGenerator.js';
|
||||
|
||||
// Usage statistics collection endpoint
|
||||
const USAGE_STATS_HOSTNAME = 'gb4w8c3ygj-default-sea.rum.aliyuncs.com';
|
||||
@@ -45,23 +44,6 @@ const USAGE_STATS_PATH = '/';
|
||||
|
||||
const RUN_APP_ID = 'gb4w8c3ygj@851d5d500f08f92';
|
||||
|
||||
/**
|
||||
* Interval in which buffered events are sent to RUM.
|
||||
*/
|
||||
const FLUSH_INTERVAL_MS = 1000 * 60;
|
||||
|
||||
/**
|
||||
* Maximum amount of events to keep in memory. Events added after this amount
|
||||
* are dropped until the next flush to RUM, which happens periodically as
|
||||
* defined by {@link FLUSH_INTERVAL_MS}.
|
||||
*/
|
||||
const MAX_EVENTS = 1000;
|
||||
|
||||
/**
|
||||
* Maximum events to retry after a failed RUM flush
|
||||
*/
|
||||
const MAX_RETRY_EVENTS = 100;
|
||||
|
||||
export interface LogResponse {
|
||||
nextRequestWaitMs?: number;
|
||||
}
|
||||
@@ -71,42 +53,23 @@ export interface LogResponse {
|
||||
export class QwenLogger {
|
||||
private static instance: QwenLogger;
|
||||
private config?: Config;
|
||||
|
||||
/**
|
||||
* Queue of pending events that need to be flushed to the server. New events
|
||||
* are added to this queue and then flushed on demand (via `flushToRum`)
|
||||
*/
|
||||
private readonly events: FixedDeque<RumEvent>;
|
||||
|
||||
/**
|
||||
* The last time that the events were successfully flushed to the server.
|
||||
*/
|
||||
private lastFlushTime: number = Date.now();
|
||||
|
||||
private readonly events: RumEvent[] = [];
|
||||
private last_flush_time: number = Date.now();
|
||||
private flush_interval_ms: number = 1000 * 60; // Wait at least a minute before flushing events.
|
||||
private userId: string;
|
||||
private sessionId: string;
|
||||
|
||||
/**
|
||||
* The value is true when there is a pending flush happening. This prevents
|
||||
* concurrent flush operations.
|
||||
*/
|
||||
private viewId: string;
|
||||
private isFlushInProgress: boolean = false;
|
||||
|
||||
/**
|
||||
* This value is true when a flush was requested during an ongoing flush.
|
||||
*/
|
||||
private pendingFlush: boolean = false;
|
||||
|
||||
private isShutdown: boolean = false;
|
||||
|
||||
private constructor(config?: Config) {
|
||||
this.config = config;
|
||||
this.events = new FixedDeque<RumEvent>(Array, MAX_EVENTS);
|
||||
this.userId = this.generateUserId();
|
||||
this.sessionId =
|
||||
typeof this.config?.getSessionId === 'function'
|
||||
? this.config.getSessionId()
|
||||
: '';
|
||||
this.viewId = randomUUID();
|
||||
}
|
||||
|
||||
private generateUserId(): string {
|
||||
@@ -129,26 +92,7 @@ export class QwenLogger {
|
||||
}
|
||||
|
||||
enqueueLogEvent(event: RumEvent): void {
|
||||
try {
|
||||
// Manually handle overflow for FixedDeque, which throws when full.
|
||||
const wasAtCapacity = this.events.size >= MAX_EVENTS;
|
||||
|
||||
if (wasAtCapacity) {
|
||||
this.events.shift(); // Evict oldest element to make space.
|
||||
}
|
||||
|
||||
this.events.push(event);
|
||||
|
||||
if (wasAtCapacity && this.config?.getDebugMode()) {
|
||||
console.debug(
|
||||
`QwenLogger: Dropped old event to prevent memory leak (queue size: ${this.events.size})`,
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
if (this.config?.getDebugMode()) {
|
||||
console.error('QwenLogger: Failed to enqueue log event.', error);
|
||||
}
|
||||
}
|
||||
this.events.push(event);
|
||||
}
|
||||
|
||||
createRumEvent(
|
||||
@@ -199,7 +143,6 @@ export class QwenLogger {
|
||||
}
|
||||
|
||||
async createRumPayload(): Promise<RumPayload> {
|
||||
const authType = this.config?.getAuthType();
|
||||
const version = this.config?.getCliVersion() || 'unknown';
|
||||
|
||||
return {
|
||||
@@ -216,59 +159,40 @@ export class QwenLogger {
|
||||
id: this.sessionId,
|
||||
},
|
||||
view: {
|
||||
id: this.sessionId,
|
||||
id: this.viewId,
|
||||
name: 'qwen-code-cli',
|
||||
},
|
||||
|
||||
events: this.events.toArray() as RumEvent[],
|
||||
properties: {
|
||||
auth_type: authType,
|
||||
model: this.config?.getModel(),
|
||||
base_url:
|
||||
authType === AuthType.USE_OPENAI ? process.env.OPENAI_BASE_URL : '',
|
||||
},
|
||||
events: [...this.events],
|
||||
_v: `qwen-code@${version}`,
|
||||
};
|
||||
}
|
||||
|
||||
flushIfNeeded(): void {
|
||||
if (Date.now() - this.lastFlushTime < FLUSH_INTERVAL_MS) {
|
||||
if (Date.now() - this.last_flush_time < this.flush_interval_ms) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Prevent concurrent flush operations
|
||||
if (this.isFlushInProgress) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.flushToRum().catch((error) => {
|
||||
if (this.config?.getDebugMode()) {
|
||||
console.debug('Error flushing to RUM:', error);
|
||||
}
|
||||
console.debug('Error flushing to RUM:', error);
|
||||
});
|
||||
}
|
||||
|
||||
async flushToRum(): Promise<LogResponse> {
|
||||
if (this.isFlushInProgress) {
|
||||
if (this.config?.getDebugMode()) {
|
||||
console.debug(
|
||||
'QwenLogger: Flush already in progress, marking pending flush.',
|
||||
);
|
||||
}
|
||||
this.pendingFlush = true;
|
||||
return Promise.resolve({});
|
||||
}
|
||||
this.isFlushInProgress = true;
|
||||
|
||||
if (this.config?.getDebugMode()) {
|
||||
console.log('Flushing log events to RUM.');
|
||||
}
|
||||
if (this.events.size === 0) {
|
||||
this.isFlushInProgress = false;
|
||||
if (this.events.length === 0) {
|
||||
return {};
|
||||
}
|
||||
|
||||
const eventsToSend = this.events.toArray() as RumEvent[];
|
||||
this.events.clear();
|
||||
this.isFlushInProgress = true;
|
||||
|
||||
const rumPayload = await this.createRumPayload();
|
||||
// Override events with the ones we're sending
|
||||
rumPayload.events = eventsToSend;
|
||||
const flushFn = () =>
|
||||
new Promise<Buffer>((resolve, reject) => {
|
||||
const body = safeJsonStringify(rumPayload);
|
||||
@@ -322,29 +246,16 @@ export class QwenLogger {
|
||||
},
|
||||
});
|
||||
|
||||
this.lastFlushTime = Date.now();
|
||||
this.events.splice(0, this.events.length);
|
||||
this.last_flush_time = Date.now();
|
||||
return {};
|
||||
} catch (error) {
|
||||
if (this.config?.getDebugMode()) {
|
||||
console.error('RUM flush failed after multiple retries.', error);
|
||||
}
|
||||
|
||||
// Re-queue failed events for retry
|
||||
this.requeueFailedEvents(eventsToSend);
|
||||
return {};
|
||||
} finally {
|
||||
this.isFlushInProgress = false;
|
||||
|
||||
// If a flush was requested while we were flushing, flush again
|
||||
if (this.pendingFlush) {
|
||||
this.pendingFlush = false;
|
||||
// Fire and forget the pending flush
|
||||
this.flushToRum().catch((error) => {
|
||||
if (this.config?.getDebugMode()) {
|
||||
console.debug('Error in pending flush to RUM:', error);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -371,9 +282,7 @@ export class QwenLogger {
|
||||
// Flush start event immediately
|
||||
this.enqueueLogEvent(applicationEvent);
|
||||
this.flushToRum().catch((error: unknown) => {
|
||||
if (this.config?.getDebugMode()) {
|
||||
console.debug('Error flushing to RUM:', error);
|
||||
}
|
||||
console.debug('Error flushing to RUM:', error);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -542,41 +451,13 @@ export class QwenLogger {
|
||||
this.flushIfNeeded();
|
||||
}
|
||||
|
||||
logIdeConnectionEvent(event: IdeConnectionEvent): void {
|
||||
const rumEvent = this.createActionEvent('connection', 'ide_connection', {
|
||||
snapshots: JSON.stringify({ connection_type: event.connection_type }),
|
||||
});
|
||||
|
||||
this.enqueueLogEvent(rumEvent);
|
||||
this.flushIfNeeded();
|
||||
}
|
||||
|
||||
logKittySequenceOverflowEvent(event: KittySequenceOverflowEvent): void {
|
||||
const rumEvent = this.createExceptionEvent(
|
||||
'overflow',
|
||||
'kitty_sequence_overflow',
|
||||
{
|
||||
subtype: 'kitty_sequence_overflow',
|
||||
snapshots: JSON.stringify({
|
||||
sequence_length: event.sequence_length,
|
||||
truncated_sequence: event.truncated_sequence,
|
||||
}),
|
||||
},
|
||||
);
|
||||
|
||||
this.enqueueLogEvent(rumEvent);
|
||||
this.flushIfNeeded();
|
||||
}
|
||||
|
||||
logEndSessionEvent(_event: EndSessionEvent): void {
|
||||
const applicationEvent = this.createViewEvent('session', 'session_end', {});
|
||||
|
||||
// Flush immediately on session end.
|
||||
this.enqueueLogEvent(applicationEvent);
|
||||
this.flushToRum().catch((error: unknown) => {
|
||||
if (this.config?.getDebugMode()) {
|
||||
console.debug('Error flushing to RUM:', error);
|
||||
}
|
||||
console.debug('Error flushing to RUM:', error);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -599,60 +480,4 @@ export class QwenLogger {
|
||||
const event = new EndSessionEvent(this.config);
|
||||
this.logEndSessionEvent(event);
|
||||
}
|
||||
|
||||
private requeueFailedEvents(eventsToSend: RumEvent[]): void {
|
||||
// Add the events back to the front of the queue to be retried, but limit retry queue size
|
||||
const eventsToRetry = eventsToSend.slice(-MAX_RETRY_EVENTS); // Keep only the most recent events
|
||||
|
||||
// Log a warning if we're dropping events
|
||||
if (eventsToSend.length > MAX_RETRY_EVENTS && this.config?.getDebugMode()) {
|
||||
console.warn(
|
||||
`QwenLogger: Dropping ${
|
||||
eventsToSend.length - MAX_RETRY_EVENTS
|
||||
} events due to retry queue limit. Total events: ${
|
||||
eventsToSend.length
|
||||
}, keeping: ${MAX_RETRY_EVENTS}`,
|
||||
);
|
||||
}
|
||||
|
||||
// Determine how many events can be re-queued
|
||||
const availableSpace = MAX_EVENTS - this.events.size;
|
||||
const numEventsToRequeue = Math.min(eventsToRetry.length, availableSpace);
|
||||
|
||||
if (numEventsToRequeue === 0) {
|
||||
if (this.config?.getDebugMode()) {
|
||||
console.debug(
|
||||
`QwenLogger: No events re-queued (queue size: ${this.events.size})`,
|
||||
);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Get the most recent events to re-queue
|
||||
const eventsToRequeue = eventsToRetry.slice(
|
||||
eventsToRetry.length - numEventsToRequeue,
|
||||
);
|
||||
|
||||
// Prepend events to the front of the deque to be retried first.
|
||||
// We iterate backwards to maintain the original order of the failed events.
|
||||
for (let i = eventsToRequeue.length - 1; i >= 0; i--) {
|
||||
this.events.unshift(eventsToRequeue[i]);
|
||||
}
|
||||
// Clear any potential overflow
|
||||
while (this.events.size > MAX_EVENTS) {
|
||||
this.events.pop();
|
||||
}
|
||||
|
||||
if (this.config?.getDebugMode()) {
|
||||
console.debug(
|
||||
`QwenLogger: Re-queued ${numEventsToRequeue} events for retry (queue size: ${this.events.size})`,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const TEST_ONLY = {
|
||||
MAX_RETRY_EVENTS,
|
||||
MAX_EVENTS,
|
||||
FLUSH_INTERVAL_MS,
|
||||
};
|
||||
|
||||
@@ -522,16 +522,13 @@ describe('MemoryTool', () => {
|
||||
expect(result).not.toBe(false);
|
||||
|
||||
if (result && result.type === 'edit') {
|
||||
expect(result.title).toContain('Choose Memory Location');
|
||||
expect(result.title).toContain('GLOBAL');
|
||||
expect(result.title).toContain('PROJECT');
|
||||
expect(result.fileName).toBe('QWEN.md');
|
||||
expect(result.title).toBe('Choose Memory Storage Location');
|
||||
expect(result.fileName).toBe('Memory Storage Options');
|
||||
expect(result.fileDiff).toContain('Choose where to save this memory');
|
||||
expect(result.fileDiff).toContain('Test fact');
|
||||
expect(result.fileDiff).toContain('--- QWEN.md');
|
||||
expect(result.fileDiff).toContain('+++ QWEN.md');
|
||||
expect(result.fileDiff).toContain('+- Test fact');
|
||||
expect(result.originalContent).toContain('scope: global');
|
||||
expect(result.originalContent).toContain('INSTRUCTIONS:');
|
||||
expect(result.fileDiff).toContain('Global:');
|
||||
expect(result.fileDiff).toContain('Project:');
|
||||
expect(result.originalContent).toBe('');
|
||||
}
|
||||
});
|
||||
|
||||
@@ -580,16 +577,13 @@ describe('MemoryTool', () => {
|
||||
expect(description).toBe(`${expectedPath} (project)`);
|
||||
});
|
||||
|
||||
it('should show choice prompt when scope is not specified', () => {
|
||||
it('should default to global scope when scope is not specified', () => {
|
||||
const params = { fact: 'Test fact' };
|
||||
const invocation = memoryTool.build(params);
|
||||
const description = invocation.getDescription();
|
||||
|
||||
const globalPath = path.join('~', '.qwen', 'QWEN.md');
|
||||
const projectPath = path.join(process.cwd(), 'QWEN.md');
|
||||
expect(description).toBe(
|
||||
`CHOOSE: ${globalPath} (global) OR ${projectPath} (project)`,
|
||||
);
|
||||
const expectedPath = path.join('~', '.qwen', 'QWEN.md');
|
||||
expect(description).toBe(`${expectedPath} (global)`);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -199,12 +199,7 @@ class MemoryToolInvocation extends BaseToolInvocation<
|
||||
private static readonly allowlist: Set<string> = new Set();
|
||||
|
||||
getDescription(): string {
|
||||
if (!this.params.scope) {
|
||||
const globalPath = tildeifyPath(getMemoryFilePath('global'));
|
||||
const projectPath = tildeifyPath(getMemoryFilePath('project'));
|
||||
return `CHOOSE: ${globalPath} (global) OR ${projectPath} (project)`;
|
||||
}
|
||||
const scope = this.params.scope;
|
||||
const scope = this.params.scope || 'global';
|
||||
const memoryFilePath = getMemoryFilePath(scope);
|
||||
return `${tildeifyPath(memoryFilePath)} (${scope})`;
|
||||
}
|
||||
@@ -212,54 +207,26 @@ class MemoryToolInvocation extends BaseToolInvocation<
|
||||
override async shouldConfirmExecute(
|
||||
_abortSignal: AbortSignal,
|
||||
): Promise<ToolEditConfirmationDetails | false> {
|
||||
// When scope is not specified, show a choice dialog defaulting to global
|
||||
// If scope is not specified, prompt the user to choose
|
||||
if (!this.params.scope) {
|
||||
// Show preview of what would be added to global by default
|
||||
const defaultScope = 'global';
|
||||
const currentContent = await readMemoryFileContent(defaultScope);
|
||||
const newContent = computeNewContent(currentContent, this.params.fact);
|
||||
|
||||
const globalPath = tildeifyPath(getMemoryFilePath('global'));
|
||||
const projectPath = tildeifyPath(getMemoryFilePath('project'));
|
||||
|
||||
const fileName = path.basename(getMemoryFilePath(defaultScope));
|
||||
const choiceText = `Choose where to save this memory:
|
||||
|
||||
"${this.params.fact}"
|
||||
|
||||
Options:
|
||||
- Global: ${globalPath} (shared across all projects)
|
||||
- Project: ${projectPath} (current project only)
|
||||
|
||||
Preview of changes to be made to GLOBAL memory:
|
||||
`;
|
||||
const fileDiff =
|
||||
choiceText +
|
||||
Diff.createPatch(
|
||||
fileName,
|
||||
currentContent,
|
||||
newContent,
|
||||
'Current',
|
||||
'Proposed (Global)',
|
||||
DEFAULT_DIFF_OPTIONS,
|
||||
);
|
||||
|
||||
const confirmationDetails: ToolEditConfirmationDetails = {
|
||||
type: 'edit',
|
||||
title: `Choose Memory Location: GLOBAL (${globalPath}) or PROJECT (${projectPath})`,
|
||||
fileName,
|
||||
filePath: getMemoryFilePath(defaultScope),
|
||||
fileDiff,
|
||||
originalContent: `scope: global\n\n# INSTRUCTIONS:\n# - Click "Yes" to save to GLOBAL memory: ${globalPath}\n# - Click "Modify with external editor" and change "global" to "project" to save to PROJECT memory: ${projectPath}\n\n${currentContent}`,
|
||||
newContent: `scope: global\n\n# INSTRUCTIONS:\n# - Click "Yes" to save to GLOBAL memory: ${globalPath}\n# - Click "Modify with external editor" and change "global" to "project" to save to PROJECT memory: ${projectPath}\n\n${newContent}`,
|
||||
title: `Choose Memory Storage Location`,
|
||||
fileName: 'Memory Storage Options',
|
||||
filePath: '',
|
||||
fileDiff: `Choose where to save this memory:\n\n"${this.params.fact}"\n\nOptions:\n- Global: ${globalPath} (shared across all projects)\n- Project: ${projectPath} (current project only)\n\nPlease specify the scope parameter: "global" or "project"`,
|
||||
originalContent: '',
|
||||
newContent: `Memory to save: ${this.params.fact}\n\nScope options:\n- global: ${globalPath}\n- project: ${projectPath}`,
|
||||
onConfirm: async (_outcome: ToolConfirmationOutcome) => {
|
||||
// Will be handled in createUpdatedParams
|
||||
// This will be handled by the execution flow
|
||||
},
|
||||
};
|
||||
return confirmationDetails;
|
||||
}
|
||||
|
||||
// Only check allowlist when scope is specified
|
||||
const scope = this.params.scope;
|
||||
const memoryFilePath = getMemoryFilePath(scope);
|
||||
const allowlistKey = `${memoryFilePath}_${scope}`;
|
||||
@@ -312,25 +279,17 @@ Preview of changes to be made to GLOBAL memory:
|
||||
};
|
||||
}
|
||||
|
||||
// If scope is not specified and user didn't modify content, return error prompting for choice
|
||||
if (!this.params.scope && !modified_by_user) {
|
||||
const globalPath = tildeifyPath(getMemoryFilePath('global'));
|
||||
const projectPath = tildeifyPath(getMemoryFilePath('project'));
|
||||
const errorMessage = `Please specify where to save this memory:
|
||||
|
||||
Global: ${globalPath} (shared across all projects)
|
||||
Project: ${projectPath} (current project only)`;
|
||||
|
||||
// If scope is not specified, prompt the user to choose
|
||||
if (!this.params.scope) {
|
||||
const errorMessage =
|
||||
'Please specify where to save this memory. Use scope parameter: "global" for user-level (~/.qwen/QWEN.md) or "project" for current project (./QWEN.md).';
|
||||
return {
|
||||
llmContent: JSON.stringify({
|
||||
success: false,
|
||||
error: 'Please specify where to save this memory',
|
||||
}),
|
||||
returnDisplay: errorMessage,
|
||||
llmContent: JSON.stringify({ success: false, error: errorMessage }),
|
||||
returnDisplay: `${errorMessage}\n\nGlobal: ${tildeifyPath(getMemoryFilePath('global'))}\nProject: ${tildeifyPath(getMemoryFilePath('project'))}`,
|
||||
};
|
||||
}
|
||||
|
||||
const scope = this.params.scope || 'global';
|
||||
const scope = this.params.scope;
|
||||
const memoryFilePath = getMemoryFilePath(scope);
|
||||
|
||||
try {
|
||||
@@ -488,88 +447,24 @@ export class MemoryTool
|
||||
|
||||
getModifyContext(_abortSignal: AbortSignal): ModifyContext<SaveMemoryParams> {
|
||||
return {
|
||||
getFilePath: (params: SaveMemoryParams) => {
|
||||
// Determine scope from modified content or default
|
||||
let scope = params.scope || 'global';
|
||||
if (params.modified_content) {
|
||||
const scopeMatch = params.modified_content.match(
|
||||
/^scope:\s*(global|project)\s*\n/i,
|
||||
);
|
||||
if (scopeMatch) {
|
||||
scope = scopeMatch[1].toLowerCase() as 'global' | 'project';
|
||||
}
|
||||
}
|
||||
return getMemoryFilePath(scope);
|
||||
},
|
||||
getCurrentContent: async (params: SaveMemoryParams): Promise<string> => {
|
||||
// Check if content starts with scope directive
|
||||
if (params.modified_content) {
|
||||
const scopeMatch = params.modified_content.match(
|
||||
/^scope:\s*(global|project)\s*\n/i,
|
||||
);
|
||||
if (scopeMatch) {
|
||||
const scope = scopeMatch[1].toLowerCase() as 'global' | 'project';
|
||||
const content = await readMemoryFileContent(scope);
|
||||
const globalPath = tildeifyPath(getMemoryFilePath('global'));
|
||||
const projectPath = tildeifyPath(getMemoryFilePath('project'));
|
||||
return `scope: ${scope}\n\n# INSTRUCTIONS:\n# - Save as "global" for GLOBAL memory: ${globalPath}\n# - Save as "project" for PROJECT memory: ${projectPath}\n\n${content}`;
|
||||
}
|
||||
}
|
||||
const scope = params.scope || 'global';
|
||||
const content = await readMemoryFileContent(scope);
|
||||
const globalPath = tildeifyPath(getMemoryFilePath('global'));
|
||||
const projectPath = tildeifyPath(getMemoryFilePath('project'));
|
||||
return `scope: ${scope}\n\n# INSTRUCTIONS:\n# - Save as "global" for GLOBAL memory: ${globalPath}\n# - Save as "project" for PROJECT memory: ${projectPath}\n\n${content}`;
|
||||
},
|
||||
getFilePath: (params: SaveMemoryParams) =>
|
||||
getMemoryFilePath(params.scope || 'global'),
|
||||
getCurrentContent: async (params: SaveMemoryParams): Promise<string> =>
|
||||
readMemoryFileContent(params.scope || 'global'),
|
||||
getProposedContent: async (params: SaveMemoryParams): Promise<string> => {
|
||||
let scope = params.scope || 'global';
|
||||
|
||||
// Check if modified content has scope directive
|
||||
if (params.modified_content) {
|
||||
const scopeMatch = params.modified_content.match(
|
||||
/^scope:\s*(global|project)\s*\n/i,
|
||||
);
|
||||
if (scopeMatch) {
|
||||
scope = scopeMatch[1].toLowerCase() as 'global' | 'project';
|
||||
}
|
||||
}
|
||||
|
||||
const scope = params.scope || 'global';
|
||||
const currentContent = await readMemoryFileContent(scope);
|
||||
const newContent = computeNewContent(currentContent, params.fact);
|
||||
const globalPath = tildeifyPath(getMemoryFilePath('global'));
|
||||
const projectPath = tildeifyPath(getMemoryFilePath('project'));
|
||||
return `scope: ${scope}\n\n# INSTRUCTIONS:\n# - Save as "global" for GLOBAL memory: ${globalPath}\n# - Save as "project" for PROJECT memory: ${projectPath}\n\n${newContent}`;
|
||||
return computeNewContent(currentContent, params.fact);
|
||||
},
|
||||
createUpdatedParams: (
|
||||
_oldContent: string,
|
||||
modifiedProposedContent: string,
|
||||
originalParams: SaveMemoryParams,
|
||||
): SaveMemoryParams => {
|
||||
// Parse user's scope choice from modified content
|
||||
const scopeMatch = modifiedProposedContent.match(
|
||||
/^scope:\s*(global|project)/i,
|
||||
);
|
||||
const scope = scopeMatch
|
||||
? (scopeMatch[1].toLowerCase() as 'global' | 'project')
|
||||
: 'global';
|
||||
|
||||
// Strip out the scope directive and instruction lines, keep only the actual memory content
|
||||
const contentWithoutScope = modifiedProposedContent.replace(
|
||||
/^scope:\s*(global|project)\s*\n/,
|
||||
'',
|
||||
);
|
||||
const actualContent = contentWithoutScope
|
||||
.replace(/^#[^\n]*\n/gm, '')
|
||||
.replace(/^\s*\n/gm, '')
|
||||
.trim();
|
||||
|
||||
return {
|
||||
...originalParams,
|
||||
scope,
|
||||
modified_by_user: true,
|
||||
modified_content: actualContent,
|
||||
};
|
||||
},
|
||||
): SaveMemoryParams => ({
|
||||
...originalParams,
|
||||
modified_by_user: true,
|
||||
modified_content: modifiedProposedContent,
|
||||
}),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -99,47 +99,24 @@ describe('ShellTool', () => {
|
||||
|
||||
describe('build', () => {
|
||||
it('should return an invocation for a valid command', () => {
|
||||
const invocation = shellTool.build({
|
||||
command: 'ls -l',
|
||||
is_background: false,
|
||||
});
|
||||
const invocation = shellTool.build({ command: 'ls -l' });
|
||||
expect(invocation).toBeDefined();
|
||||
});
|
||||
|
||||
it('should throw an error for an empty command', () => {
|
||||
expect(() =>
|
||||
shellTool.build({ command: ' ', is_background: false }),
|
||||
).toThrow('Command cannot be empty.');
|
||||
expect(() => shellTool.build({ command: ' ' })).toThrow(
|
||||
'Command cannot be empty.',
|
||||
);
|
||||
});
|
||||
|
||||
it('should throw an error for a non-existent directory', () => {
|
||||
vi.mocked(fs.existsSync).mockReturnValue(false);
|
||||
expect(() =>
|
||||
shellTool.build({
|
||||
command: 'ls',
|
||||
directory: 'rel/path',
|
||||
is_background: false,
|
||||
}),
|
||||
shellTool.build({ command: 'ls', directory: 'rel/path' }),
|
||||
).toThrow(
|
||||
"Directory 'rel/path' is not a registered workspace directory.",
|
||||
);
|
||||
});
|
||||
|
||||
it('should include background indicator in description when is_background is true', () => {
|
||||
const invocation = shellTool.build({
|
||||
command: 'npm start',
|
||||
is_background: true,
|
||||
});
|
||||
expect(invocation.getDescription()).toContain('[background]');
|
||||
});
|
||||
|
||||
it('should not include background indicator in description when is_background is false', () => {
|
||||
const invocation = shellTool.build({
|
||||
command: 'npm test',
|
||||
is_background: false,
|
||||
});
|
||||
expect(invocation.getDescription()).not.toContain('[background]');
|
||||
});
|
||||
});
|
||||
|
||||
describe('execute', () => {
|
||||
@@ -164,10 +141,7 @@ describe('ShellTool', () => {
|
||||
};
|
||||
|
||||
it('should wrap command on linux and parse pgrep output', async () => {
|
||||
const invocation = shellTool.build({
|
||||
command: 'my-command &',
|
||||
is_background: false,
|
||||
});
|
||||
const invocation = shellTool.build({ command: 'my-command &' });
|
||||
const promise = invocation.execute(mockAbortSignal);
|
||||
resolveShellExecution({ pid: 54321 });
|
||||
|
||||
@@ -188,81 +162,9 @@ describe('ShellTool', () => {
|
||||
expect(vi.mocked(fs.unlinkSync)).toHaveBeenCalledWith(tmpFile);
|
||||
});
|
||||
|
||||
it('should add ampersand to command when is_background is true and command does not end with &', async () => {
|
||||
const invocation = shellTool.build({
|
||||
command: 'npm start',
|
||||
is_background: true,
|
||||
});
|
||||
const promise = invocation.execute(mockAbortSignal);
|
||||
resolveShellExecution({ pid: 54321 });
|
||||
|
||||
vi.mocked(fs.existsSync).mockReturnValue(true);
|
||||
vi.mocked(fs.readFileSync).mockReturnValue('54321\n54322\n');
|
||||
|
||||
await promise;
|
||||
|
||||
const tmpFile = path.join(os.tmpdir(), 'shell_pgrep_abcdef.tmp');
|
||||
const wrappedCommand = `{ npm start & }; __code=$?; pgrep -g 0 >${tmpFile} 2>&1; exit $__code;`;
|
||||
expect(mockShellExecutionService).toHaveBeenCalledWith(
|
||||
wrappedCommand,
|
||||
expect.any(String),
|
||||
expect.any(Function),
|
||||
mockAbortSignal,
|
||||
);
|
||||
});
|
||||
|
||||
it('should not add extra ampersand when is_background is true and command already ends with &', async () => {
|
||||
const invocation = shellTool.build({
|
||||
command: 'npm start &',
|
||||
is_background: true,
|
||||
});
|
||||
const promise = invocation.execute(mockAbortSignal);
|
||||
resolveShellExecution({ pid: 54321 });
|
||||
|
||||
vi.mocked(fs.existsSync).mockReturnValue(true);
|
||||
vi.mocked(fs.readFileSync).mockReturnValue('54321\n54322\n');
|
||||
|
||||
await promise;
|
||||
|
||||
const tmpFile = path.join(os.tmpdir(), 'shell_pgrep_abcdef.tmp');
|
||||
const wrappedCommand = `{ npm start & }; __code=$?; pgrep -g 0 >${tmpFile} 2>&1; exit $__code;`;
|
||||
expect(mockShellExecutionService).toHaveBeenCalledWith(
|
||||
wrappedCommand,
|
||||
expect.any(String),
|
||||
expect.any(Function),
|
||||
mockAbortSignal,
|
||||
);
|
||||
});
|
||||
|
||||
it('should not add ampersand when is_background is false', async () => {
|
||||
const invocation = shellTool.build({
|
||||
command: 'npm test',
|
||||
is_background: false,
|
||||
});
|
||||
const promise = invocation.execute(mockAbortSignal);
|
||||
resolveShellExecution({ pid: 54321 });
|
||||
|
||||
vi.mocked(fs.existsSync).mockReturnValue(true);
|
||||
vi.mocked(fs.readFileSync).mockReturnValue('54321\n54322\n');
|
||||
|
||||
await promise;
|
||||
|
||||
const tmpFile = path.join(os.tmpdir(), 'shell_pgrep_abcdef.tmp');
|
||||
const wrappedCommand = `{ npm test; }; __code=$?; pgrep -g 0 >${tmpFile} 2>&1; exit $__code;`;
|
||||
expect(mockShellExecutionService).toHaveBeenCalledWith(
|
||||
wrappedCommand,
|
||||
expect.any(String),
|
||||
expect.any(Function),
|
||||
mockAbortSignal,
|
||||
);
|
||||
});
|
||||
|
||||
it('should not wrap command on windows', async () => {
|
||||
vi.mocked(os.platform).mockReturnValue('win32');
|
||||
const invocation = shellTool.build({
|
||||
command: 'dir',
|
||||
is_background: false,
|
||||
});
|
||||
const invocation = shellTool.build({ command: 'dir' });
|
||||
const promise = invocation.execute(mockAbortSignal);
|
||||
resolveShellExecution({
|
||||
rawOutput: Buffer.from(''),
|
||||
@@ -286,10 +188,7 @@ describe('ShellTool', () => {
|
||||
|
||||
it('should format error messages correctly', async () => {
|
||||
const error = new Error('wrapped command failed');
|
||||
const invocation = shellTool.build({
|
||||
command: 'user-command',
|
||||
is_background: false,
|
||||
});
|
||||
const invocation = shellTool.build({ command: 'user-command' });
|
||||
const promise = invocation.execute(mockAbortSignal);
|
||||
resolveShellExecution({
|
||||
error,
|
||||
@@ -310,19 +209,15 @@ describe('ShellTool', () => {
|
||||
});
|
||||
|
||||
it('should throw an error for invalid parameters', () => {
|
||||
expect(() =>
|
||||
shellTool.build({ command: '', is_background: false }),
|
||||
).toThrow('Command cannot be empty.');
|
||||
expect(() => shellTool.build({ command: '' })).toThrow(
|
||||
'Command cannot be empty.',
|
||||
);
|
||||
});
|
||||
|
||||
it('should throw an error for invalid directory', () => {
|
||||
vi.mocked(fs.existsSync).mockReturnValue(false);
|
||||
expect(() =>
|
||||
shellTool.build({
|
||||
command: 'ls',
|
||||
directory: 'nonexistent',
|
||||
is_background: false,
|
||||
}),
|
||||
shellTool.build({ command: 'ls', directory: 'nonexistent' }),
|
||||
).toThrow(
|
||||
`Directory 'nonexistent' is not a registered workspace directory.`,
|
||||
);
|
||||
@@ -336,10 +231,7 @@ describe('ShellTool', () => {
|
||||
'summarized output',
|
||||
);
|
||||
|
||||
const invocation = shellTool.build({
|
||||
command: 'ls',
|
||||
is_background: false,
|
||||
});
|
||||
const invocation = shellTool.build({ command: 'ls' });
|
||||
const promise = invocation.execute(mockAbortSignal);
|
||||
resolveExecutionPromise({
|
||||
output: 'long output',
|
||||
@@ -372,10 +264,7 @@ describe('ShellTool', () => {
|
||||
});
|
||||
vi.mocked(fs.existsSync).mockReturnValue(true); // Pretend the file exists
|
||||
|
||||
const invocation = shellTool.build({
|
||||
command: 'a-command',
|
||||
is_background: false,
|
||||
});
|
||||
const invocation = shellTool.build({ command: 'a-command' });
|
||||
await expect(invocation.execute(mockAbortSignal)).rejects.toThrow(error);
|
||||
|
||||
const tmpFile = path.join(os.tmpdir(), 'shell_pgrep_abcdef.tmp');
|
||||
@@ -393,10 +282,7 @@ describe('ShellTool', () => {
|
||||
});
|
||||
|
||||
it('should throttle text output updates', async () => {
|
||||
const invocation = shellTool.build({
|
||||
command: 'stream',
|
||||
is_background: false,
|
||||
});
|
||||
const invocation = shellTool.build({ command: 'stream' });
|
||||
const promise = invocation.execute(mockAbortSignal, updateOutputMock);
|
||||
|
||||
// First chunk, should be throttled.
|
||||
@@ -436,10 +322,7 @@ describe('ShellTool', () => {
|
||||
});
|
||||
|
||||
it('should immediately show binary detection message and throttle progress', async () => {
|
||||
const invocation = shellTool.build({
|
||||
command: 'cat img',
|
||||
is_background: false,
|
||||
});
|
||||
const invocation = shellTool.build({ command: 'cat img' });
|
||||
const promise = invocation.execute(mockAbortSignal, updateOutputMock);
|
||||
|
||||
mockShellOutputCallback({ type: 'binary_detected' });
|
||||
@@ -487,7 +370,7 @@ describe('ShellTool', () => {
|
||||
describe('addCoAuthorToGitCommit', () => {
|
||||
it('should add co-author to git commit with double quotes', async () => {
|
||||
const command = 'git commit -m "Initial commit"';
|
||||
const invocation = shellTool.build({ command, is_background: false });
|
||||
const invocation = shellTool.build({ command });
|
||||
const promise = invocation.execute(mockAbortSignal);
|
||||
|
||||
// Mock the shell execution to return success
|
||||
@@ -518,7 +401,7 @@ describe('ShellTool', () => {
|
||||
|
||||
it('should add co-author to git commit with single quotes', async () => {
|
||||
const command = "git commit -m 'Fix bug'";
|
||||
const invocation = shellTool.build({ command, is_background: false });
|
||||
const invocation = shellTool.build({ command });
|
||||
const promise = invocation.execute(mockAbortSignal);
|
||||
|
||||
resolveExecutionPromise({
|
||||
@@ -547,7 +430,7 @@ describe('ShellTool', () => {
|
||||
|
||||
it('should handle git commit with additional flags', async () => {
|
||||
const command = 'git commit -a -m "Add feature"';
|
||||
const invocation = shellTool.build({ command, is_background: false });
|
||||
const invocation = shellTool.build({ command });
|
||||
const promise = invocation.execute(mockAbortSignal);
|
||||
|
||||
resolveExecutionPromise({
|
||||
@@ -576,7 +459,7 @@ describe('ShellTool', () => {
|
||||
|
||||
it('should not modify non-git commands', async () => {
|
||||
const command = 'npm install';
|
||||
const invocation = shellTool.build({ command, is_background: false });
|
||||
const invocation = shellTool.build({ command });
|
||||
const promise = invocation.execute(mockAbortSignal);
|
||||
|
||||
resolveExecutionPromise({
|
||||
@@ -604,7 +487,7 @@ describe('ShellTool', () => {
|
||||
|
||||
it('should not modify git commands without -m flag', async () => {
|
||||
const command = 'git commit';
|
||||
const invocation = shellTool.build({ command, is_background: false });
|
||||
const invocation = shellTool.build({ command });
|
||||
const promise = invocation.execute(mockAbortSignal);
|
||||
|
||||
resolveExecutionPromise({
|
||||
@@ -632,7 +515,7 @@ describe('ShellTool', () => {
|
||||
|
||||
it('should handle git commit with escaped quotes in message', async () => {
|
||||
const command = 'git commit -m "Fix \\"quoted\\" text"';
|
||||
const invocation = shellTool.build({ command, is_background: false });
|
||||
const invocation = shellTool.build({ command });
|
||||
const promise = invocation.execute(mockAbortSignal);
|
||||
|
||||
resolveExecutionPromise({
|
||||
@@ -668,7 +551,7 @@ describe('ShellTool', () => {
|
||||
});
|
||||
|
||||
const command = 'git commit -m "Initial commit"';
|
||||
const invocation = shellTool.build({ command, is_background: false });
|
||||
const invocation = shellTool.build({ command });
|
||||
const promise = invocation.execute(mockAbortSignal);
|
||||
|
||||
resolveExecutionPromise({
|
||||
@@ -703,7 +586,7 @@ describe('ShellTool', () => {
|
||||
});
|
||||
|
||||
const command = 'git commit -m "Test commit"';
|
||||
const invocation = shellTool.build({ command, is_background: false });
|
||||
const invocation = shellTool.build({ command });
|
||||
const promise = invocation.execute(mockAbortSignal);
|
||||
|
||||
resolveExecutionPromise({
|
||||
@@ -734,7 +617,7 @@ describe('ShellTool', () => {
|
||||
|
||||
describe('shouldConfirmExecute', () => {
|
||||
it('should request confirmation for a new command and whitelist it on "Always"', async () => {
|
||||
const params = { command: 'npm install', is_background: false };
|
||||
const params = { command: 'npm install' };
|
||||
const invocation = shellTool.build(params);
|
||||
const confirmation = await invocation.shouldConfirmExecute(
|
||||
new AbortController().signal,
|
||||
@@ -749,10 +632,7 @@ describe('ShellTool', () => {
|
||||
);
|
||||
|
||||
// Should now be whitelisted
|
||||
const secondInvocation = shellTool.build({
|
||||
command: 'npm test',
|
||||
is_background: false,
|
||||
});
|
||||
const secondInvocation = shellTool.build({ command: 'npm test' });
|
||||
const secondConfirmation = await secondInvocation.shouldConfirmExecute(
|
||||
new AbortController().signal,
|
||||
);
|
||||
@@ -760,9 +640,7 @@ describe('ShellTool', () => {
|
||||
});
|
||||
|
||||
it('should throw an error if validation fails', () => {
|
||||
expect(() =>
|
||||
shellTool.build({ command: '', is_background: false }),
|
||||
).toThrow();
|
||||
expect(() => shellTool.build({ command: '' })).toThrow();
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -780,7 +658,6 @@ describe('validateToolParams', () => {
|
||||
const result = shellTool.validateToolParams({
|
||||
command: 'ls',
|
||||
directory: 'test',
|
||||
is_background: false,
|
||||
});
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
@@ -797,7 +674,6 @@ describe('validateToolParams', () => {
|
||||
const result = shellTool.validateToolParams({
|
||||
command: 'ls',
|
||||
directory: 'test2',
|
||||
is_background: false,
|
||||
});
|
||||
expect(result).toContain('is not a registered workspace directory');
|
||||
});
|
||||
@@ -816,7 +692,6 @@ describe('build', () => {
|
||||
const invocation = shellTool.build({
|
||||
command: 'ls',
|
||||
directory: 'test',
|
||||
is_background: false,
|
||||
});
|
||||
expect(invocation).toBeDefined();
|
||||
});
|
||||
@@ -834,7 +709,6 @@ describe('build', () => {
|
||||
shellTool.build({
|
||||
command: 'ls',
|
||||
directory: 'test2',
|
||||
is_background: false,
|
||||
}),
|
||||
).toThrow('is not a registered workspace directory');
|
||||
});
|
||||
|
||||
@@ -37,7 +37,6 @@ export const OUTPUT_UPDATE_INTERVAL_MS = 1000;
|
||||
|
||||
export interface ShellToolParams {
|
||||
command: string;
|
||||
is_background: boolean;
|
||||
description?: string;
|
||||
directory?: string;
|
||||
}
|
||||
@@ -61,10 +60,6 @@ class ShellToolInvocation extends BaseToolInvocation<
|
||||
if (this.params.directory) {
|
||||
description += ` [in ${this.params.directory}]`;
|
||||
}
|
||||
// append background indicator
|
||||
if (this.params.is_background) {
|
||||
description += ` [background]`;
|
||||
}
|
||||
// append optional (description), replacing any line breaks with spaces
|
||||
if (this.params.description) {
|
||||
description += ` (${this.params.description.replace(/\n/g, ' ')})`;
|
||||
@@ -122,20 +117,12 @@ class ShellToolInvocation extends BaseToolInvocation<
|
||||
// Add co-author to git commit commands
|
||||
const processedCommand = this.addCoAuthorToGitCommit(strippedCommand);
|
||||
|
||||
const shouldRunInBackground = this.params.is_background;
|
||||
let finalCommand = processedCommand;
|
||||
|
||||
// If explicitly marked as background and doesn't already end with &, add it
|
||||
if (shouldRunInBackground && !finalCommand.trim().endsWith('&')) {
|
||||
finalCommand = finalCommand.trim() + ' &';
|
||||
}
|
||||
|
||||
// pgrep is not available on Windows, so we can't get background PIDs
|
||||
const commandToExecute = isWindows
|
||||
? finalCommand
|
||||
? processedCommand
|
||||
: (() => {
|
||||
// wrap command to append subprocess pids (via pgrep) to temporary file
|
||||
let command = finalCommand.trim();
|
||||
let command = processedCommand.trim();
|
||||
if (!command.endsWith('&')) command += ';';
|
||||
return `{ ${command} }; __code=$?; pgrep -g 0 >${tempFilePath} 2>&1; exit $__code;`;
|
||||
})();
|
||||
@@ -356,26 +343,7 @@ export class ShellTool extends BaseDeclarativeTool<
|
||||
super(
|
||||
ShellTool.Name,
|
||||
'Shell',
|
||||
`This tool executes a given shell command as \`bash -c <command>\`.
|
||||
|
||||
**Background vs Foreground Execution:**
|
||||
You should decide whether commands should run in background or foreground based on their nature:
|
||||
|
||||
**Use background execution (is_background: true) for:**
|
||||
- Long-running development servers: \`npm run start\`, \`npm run dev\`, \`yarn dev\`, \`bun run start\`
|
||||
- Build watchers: \`npm run watch\`, \`webpack --watch\`
|
||||
- Database servers: \`mongod\`, \`mysql\`, \`redis-server\`
|
||||
- Web servers: \`python -m http.server\`, \`php -S localhost:8000\`
|
||||
- Any command expected to run indefinitely until manually stopped
|
||||
|
||||
**Use foreground execution (is_background: false) for:**
|
||||
- One-time commands: \`ls\`, \`cat\`, \`grep\`
|
||||
- Build commands: \`npm run build\`, \`make\`
|
||||
- Installation commands: \`npm install\`, \`pip install\`
|
||||
- Git operations: \`git commit\`, \`git push\`
|
||||
- Test runs: \`npm test\`, \`pytest\`
|
||||
|
||||
Command is executed as a subprocess that leads its own process group. Command process group can be terminated as \`kill -- -PGID\` or signaled as \`kill -s SIGNAL -- -PGID\`.
|
||||
`This tool executes a given shell command as \`bash -c <command>\`. Command can start background processes using \`&\`. Command is executed as a subprocess that leads its own process group. Command process group can be terminated as \`kill -- -PGID\` or signaled as \`kill -s SIGNAL -- -PGID\`.
|
||||
|
||||
The following information is returned:
|
||||
|
||||
@@ -396,11 +364,6 @@ export class ShellTool extends BaseDeclarativeTool<
|
||||
type: 'string',
|
||||
description: 'Exact bash command to execute as `bash -c <command>`',
|
||||
},
|
||||
is_background: {
|
||||
type: 'boolean',
|
||||
description:
|
||||
'Whether to run the command in background. Default is false. Set to true for long-running processes like development servers, watchers, or daemons that should continue running without blocking further commands.',
|
||||
},
|
||||
description: {
|
||||
type: 'string',
|
||||
description:
|
||||
@@ -412,7 +375,7 @@ export class ShellTool extends BaseDeclarativeTool<
|
||||
'(OPTIONAL) Directory to run the command in, if not the project root directory. Must be relative to the project root directory and must already exist.',
|
||||
},
|
||||
},
|
||||
required: ['command', 'is_background'],
|
||||
required: ['command'],
|
||||
},
|
||||
false, // output is not markdown
|
||||
true, // output can be updated
|
||||
|
||||
@@ -210,16 +210,16 @@ describe('bfsFileSearch', () => {
|
||||
for (let i = 0; i < numTargetDirs; i++) {
|
||||
// Add target files in some directories
|
||||
fileCreationPromises.push(
|
||||
createTestFile('content', `dir${i}`, 'QWEN.md'),
|
||||
createTestFile('content', `dir${i}`, 'GEMINI.md'),
|
||||
);
|
||||
fileCreationPromises.push(
|
||||
createTestFile('content', `dir${i}`, 'subdir1', 'QWEN.md'),
|
||||
createTestFile('content', `dir${i}`, 'subdir1', 'GEMINI.md'),
|
||||
);
|
||||
}
|
||||
const expectedFiles = await Promise.all(fileCreationPromises);
|
||||
|
||||
const result = await bfsFileSearch(testRootDir, {
|
||||
fileName: 'QWEN.md',
|
||||
fileName: 'GEMINI.md',
|
||||
// Provide a generous maxDirs limit to ensure it doesn't prematurely stop
|
||||
// in this large test case. Total dirs created is 200.
|
||||
maxDirs: 250,
|
||||
|
||||
@@ -143,28 +143,9 @@ async function getGeminiMdFilePathsInternalForEachDir(
|
||||
// It's okay if it's not found.
|
||||
}
|
||||
|
||||
// Handle the case where we're in the home directory (dir is empty string or home path)
|
||||
const resolvedDir = dir ? path.resolve(dir) : resolvedHome;
|
||||
const isHomeDirectory = resolvedDir === resolvedHome;
|
||||
|
||||
if (isHomeDirectory) {
|
||||
// For home directory, only check for QWEN.md directly in the home directory
|
||||
const homeContextPath = path.join(resolvedHome, geminiMdFilename);
|
||||
try {
|
||||
await fs.access(homeContextPath, fsSync.constants.R_OK);
|
||||
if (homeContextPath !== globalMemoryPath) {
|
||||
allPaths.add(homeContextPath);
|
||||
if (debugMode)
|
||||
logger.debug(
|
||||
`Found readable home ${geminiMdFilename}: ${homeContextPath}`,
|
||||
);
|
||||
}
|
||||
} catch {
|
||||
// Not found, which is okay
|
||||
}
|
||||
} else if (dir) {
|
||||
// FIX: Only perform the workspace search (upward and downward scans)
|
||||
// if a valid currentWorkingDirectory is provided and it's not the home directory.
|
||||
// FIX: Only perform the workspace search (upward and downward scans)
|
||||
// if a valid currentWorkingDirectory is provided.
|
||||
if (dir) {
|
||||
const resolvedCwd = path.resolve(dir);
|
||||
if (debugMode)
|
||||
logger.debug(
|
||||
|
||||
@@ -8,7 +8,7 @@ import path from 'node:path';
|
||||
import os from 'os';
|
||||
import * as crypto from 'crypto';
|
||||
|
||||
export const QWEN_DIR = '.qwen';
|
||||
export const GEMINI_DIR = '.qwen';
|
||||
export const GOOGLE_ACCOUNTS_FILENAME = 'google_accounts.json';
|
||||
const TMP_DIR_NAME = 'tmp';
|
||||
const COMMANDS_DIR_NAME = 'commands';
|
||||
@@ -181,7 +181,7 @@ export function getProjectHash(projectRoot: string): string {
|
||||
*/
|
||||
export function getProjectTempDir(projectRoot: string): string {
|
||||
const hash = getProjectHash(projectRoot);
|
||||
return path.join(os.homedir(), QWEN_DIR, TMP_DIR_NAME, hash);
|
||||
return path.join(os.homedir(), GEMINI_DIR, TMP_DIR_NAME, hash);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -189,7 +189,7 @@ export function getProjectTempDir(projectRoot: string): string {
|
||||
* @returns The path to the user's commands directory.
|
||||
*/
|
||||
export function getUserCommandsDir(): string {
|
||||
return path.join(os.homedir(), QWEN_DIR, COMMANDS_DIR_NAME);
|
||||
return path.join(os.homedir(), GEMINI_DIR, COMMANDS_DIR_NAME);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -198,5 +198,5 @@ export function getUserCommandsDir(): string {
|
||||
* @returns The path to the project's commands directory.
|
||||
*/
|
||||
export function getProjectCommandsDir(projectRoot: string): string {
|
||||
return path.join(projectRoot, QWEN_DIR, COMMANDS_DIR_NAME);
|
||||
return path.join(projectRoot, GEMINI_DIR, COMMANDS_DIR_NAME);
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
import path from 'node:path';
|
||||
import { promises as fsp, existsSync, readFileSync } from 'node:fs';
|
||||
import * as os from 'os';
|
||||
import { QWEN_DIR, GOOGLE_ACCOUNTS_FILENAME } from './paths.js';
|
||||
import { GEMINI_DIR, GOOGLE_ACCOUNTS_FILENAME } from './paths.js';
|
||||
|
||||
interface UserAccounts {
|
||||
active: string | null;
|
||||
@@ -15,7 +15,7 @@ interface UserAccounts {
|
||||
}
|
||||
|
||||
function getGoogleAccountsCachePath(): string {
|
||||
return path.join(os.homedir(), QWEN_DIR, GOOGLE_ACCOUNTS_FILENAME);
|
||||
return path.join(os.homedir(), GEMINI_DIR, GOOGLE_ACCOUNTS_FILENAME);
|
||||
}
|
||||
|
||||
async function readAccounts(filePath: string): Promise<UserAccounts> {
|
||||
|
||||
@@ -8,10 +8,10 @@ import * as os from 'os';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import { randomUUID } from 'crypto';
|
||||
import { QWEN_DIR } from './paths.js';
|
||||
import { GEMINI_DIR } from './paths.js';
|
||||
|
||||
const homeDir = os.homedir() ?? '';
|
||||
const geminiDir = path.join(homeDir, QWEN_DIR);
|
||||
const geminiDir = path.join(homeDir, GEMINI_DIR);
|
||||
const installationIdFile = path.join(geminiDir, 'installation_id');
|
||||
|
||||
function ensureGeminiDirExists() {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code-test-utils",
|
||||
"version": "0.0.9",
|
||||
"version": "0.0.9-nightly.2",
|
||||
"private": true,
|
||||
"main": "src/index.ts",
|
||||
"license": "Apache-2.0",
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
"name": "qwen-code-vscode-ide-companion",
|
||||
"displayName": "Qwen Code Companion",
|
||||
"description": "Enable Qwen Code with direct access to your VS Code workspace.",
|
||||
"version": "0.0.9",
|
||||
"version": "0.0.9-nightly.2",
|
||||
"publisher": "qwenlm",
|
||||
"icon": "assets/icon.png",
|
||||
"repository": {
|
||||
|
||||
@@ -136,7 +136,7 @@ function buildImage(imageName, dockerfile) {
|
||||
if (isWindows) {
|
||||
// PowerShell doesn't support <() process substitution.
|
||||
// Create a temporary auth file that we will clean up after.
|
||||
tempAuthFile = join(os.tmpdir(), `qwen-auth-${Date.now()}.json`);
|
||||
tempAuthFile = join(os.tmpdir(), `gemini-auth-${Date.now()}.json`);
|
||||
writeFileSync(tempAuthFile, '{}');
|
||||
buildCommandArgs = `--authfile="${tempAuthFile}"`;
|
||||
} else {
|
||||
|
||||
@@ -5,7 +5,7 @@ set -euo pipefail
|
||||
|
||||
# Determine the project directory
|
||||
PROJECT_DIR=$(cd "$(dirname "$0")/.." && pwd)
|
||||
ALIAS_COMMAND="alias qwen='node "${PROJECT_DIR}/scripts/start.js"'"
|
||||
ALIAS_COMMAND="alias gemini='node "${PROJECT_DIR}/scripts/start.js"'"
|
||||
|
||||
# Detect shell and set config file path
|
||||
if [[ "${SHELL}" == *"/bash" ]]; then
|
||||
@@ -22,8 +22,8 @@ echo " ${ALIAS_COMMAND}"
|
||||
echo ""
|
||||
|
||||
# Check if the alias already exists
|
||||
if grep -q "alias qwen=" "${CONFIG_FILE}"; then
|
||||
echo "A 'qwen' alias already exists in ${CONFIG_FILE}. No changes were made."
|
||||
if grep -q "alias gemini=" "${CONFIG_FILE}"; then
|
||||
echo "A 'gemini' alias already exists in ${CONFIG_FILE}. No changes were made."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
@@ -33,7 +33,7 @@ if [[ "${REPLY}" =~ ^[Yy]$ ]]; then
|
||||
echo "${ALIAS_COMMAND}" >> "${CONFIG_FILE}"
|
||||
echo ""
|
||||
echo "Alias added to ${CONFIG_FILE}."
|
||||
echo "Please run 'source ${CONFIG_FILE}' or open a new terminal to use the 'qwen' command."
|
||||
echo "Please run 'source ${CONFIG_FILE}' or open a new terminal to use the 'gemini' command."
|
||||
else
|
||||
echo "Aborted. No changes were made."
|
||||
fi
|
||||
|
||||
Reference in New Issue
Block a user