Compare commits

..

1 Commits

Author SHA1 Message Date
github-actions[bot]
b0b5689da2 chore(release): v0.0.14-nightly.1 2025-09-26 00:10:55 +00:00
40 changed files with 252 additions and 1188 deletions

View File

@@ -1,6 +1,6 @@
name: 'Bug Report'
description: 'Report a bug to help us improve Qwen Code'
labels: ['type/bug', 'status/needs-triage']
labels: ['kind/bug', 'status/need-triage']
body:
- type: 'markdown'
attributes:

View File

@@ -1,8 +1,8 @@
name: 'Feature Request'
description: 'Suggest an idea for this project'
labels:
- 'type/feature-request'
- 'status/needs-triage'
- 'kind/enhancement'
- 'status/need-triage'
body:
- type: 'markdown'
attributes:

View File

@@ -40,13 +40,21 @@ process_pr() {
fi
if [[ -z "${ISSUE_NUMBER}" ]]; then
echo " No linked issue found for PR #${PR_NUMBER} - this is acceptable for independent contributions"
# We no longer require PRs to have linked issues
# Independent valuable contributions are encouraged
echo " No linked issue found for PR #${PR_NUMBER}, adding status/need-issue label"
if ! gh pr edit "${PR_NUMBER}" --repo "${GITHUB_REPOSITORY}" --add-label "status/need-issue" 2>/dev/null; then
echo " ⚠️ Failed to add label (may already exist or have permission issues)"
fi
# Add PR number to the list
if [[ -z "${PRS_NEEDING_COMMENT}" ]]; then
PRS_NEEDING_COMMENT="${PR_NUMBER}"
else
PRS_NEEDING_COMMENT="${PRS_NEEDING_COMMENT},${PR_NUMBER}"
fi
echo "needs_comment=true" >> "${GITHUB_OUTPUT}"
else
echo "🔗 Found linked issue #${ISSUE_NUMBER}"
# Remove status/need-issue label if present (legacy cleanup)
# Remove status/need-issue label if present
if ! gh pr edit "${PR_NUMBER}" --repo "${GITHUB_REPOSITORY}" --remove-label "status/need-issue" 2>/dev/null; then
echo " status/need-issue label not present or could not be removed"
fi
@@ -91,7 +99,7 @@ process_pr() {
local LABELS_TO_REMOVE=""
for label in "${PR_LABEL_ARRAY[@]}"; do
if [[ -n "${label}" ]] && [[ " ${ISSUE_LABEL_ARRAY[*]} " != *" ${label} "* ]]; then
# Don't remove status/need-issue since we already handled it (legacy cleanup)
# Don't remove status/need-issue since we already handled it
if [[ "${label}" != "status/need-issue" ]]; then
if [[ -z "${LABELS_TO_REMOVE}" ]]; then
LABELS_TO_REMOVE="${label}"

View File

@@ -1,201 +0,0 @@
name: 'Check Issue Completeness'
on:
issues:
types:
- 'opened'
- 'edited'
permissions:
contents: 'read'
issues: 'write'
jobs:
check-issue-info:
timeout-minutes: 2
if: |-
${{ github.repository == 'QwenLM/qwen-code' && contains(github.event.issue.labels.*.name, 'type/bug') }}
runs-on: 'ubuntu-latest'
steps:
- name: 'Check for Client Information'
id: 'check_info'
env:
ISSUE_BODY: '${{ github.event.issue.body }}'
run: |-
echo "Checking issue body for required information..."
# Convert issue body to lowercase for case-insensitive matching
ISSUE_BODY_LOWER=$(echo "$ISSUE_BODY" | tr '[:upper:]' '[:lower:]')
# Initialize flags
HAS_VERSION=false
HAS_OS_INFO=false
HAS_AUTH_METHOD=false
HAS_ABOUT_OUTPUT=false
MISSING_INFO=()
# Check for /about command output by looking for its characteristic fields
# The /about output contains: CLI Version, Git Commit, Model, Sandbox, OS, Auth Method
if echo "$ISSUE_BODY_LOWER" | grep -qE 'cli version.*[0-9]+\.[0-9]+\.[0-9]+'; then
HAS_ABOUT_OUTPUT=true
HAS_VERSION=true
fi
# If full /about output is not detected, check individual components
if [ "$HAS_ABOUT_OUTPUT" = false ]; then
# Check for version information (various formats)
if echo "$ISSUE_BODY_LOWER" | grep -qE '(cli version|version|v)[[:space:]]*[0-9]+\.[0-9]+\.[0-9]+'; then
HAS_VERSION=true
fi
# Check for OS information
if echo "$ISSUE_BODY_LOWER" | grep -qE '(^os[[:space:]]|macos|windows|linux|ubuntu|debian|fedora|arch|darwin|win32|platform)'; then
HAS_OS_INFO=true
fi
# Check for Auth Method information
if echo "$ISSUE_BODY_LOWER" | grep -qE '(auth method|authentication|login|qwen-oauth|api.?config|oauth)'; then
HAS_AUTH_METHOD=true
fi
else
# If /about output is present, assume it contains OS and auth info
HAS_OS_INFO=true
HAS_AUTH_METHOD=true
fi
# Determine what's missing
if [ "$HAS_ABOUT_OUTPUT" = false ]; then
if [ "$HAS_VERSION" = false ]; then
MISSING_INFO+=("Qwen Code version")
fi
if [ "$HAS_OS_INFO" = false ]; then
MISSING_INFO+=("operating system information")
fi
if [ "$HAS_AUTH_METHOD" = false ]; then
MISSING_INFO+=("authentication/login method")
fi
# Suggest providing /about output for completeness
if [ "$HAS_VERSION" = false ] || [ "$HAS_OS_INFO" = false ] || [ "$HAS_AUTH_METHOD" = false ]; then
MISSING_INFO+=("full output of the \`/about\` command (recommended)")
fi
fi
# Set output variables
if [ ${#MISSING_INFO[@]} -eq 0 ]; then
echo "info_complete=true" >> "$GITHUB_OUTPUT"
echo "All required information is present."
else
echo "info_complete=false" >> "$GITHUB_OUTPUT"
# Join array elements with comma
MISSING_LIST=$(IFS=','; echo "${MISSING_INFO[*]}")
echo "missing_info=$MISSING_LIST" >> "$GITHUB_OUTPUT"
echo "Missing information: $MISSING_LIST"
fi
- name: 'Comment on Issue if Information is Missing'
if: |-
${{ steps.check_info.outputs.info_complete == 'false' }}
uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea' # ratchet:actions/github-script@v7
env:
MISSING_INFO: '${{ steps.check_info.outputs.missing_info }}'
with:
github-token: '${{ secrets.GITHUB_TOKEN }}'
script: |
const missingInfo = process.env.MISSING_INFO.split(',');
const missingList = missingInfo.map(item => `- ${item}`).join('\n');
const comments = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
});
const botComment = comments.data.find(comment =>
comment.user.type === 'Bot' &&
comment.body.includes('Missing Required Information')
);
const commentBody = `### ⚠️ Missing Required Information
Thank you for reporting this issue! To help us investigate and resolve this problem more effectively, we need some additional information:
${missingList}
### How to provide this information:
Please run the following command and paste the complete output:
\`\`\`bash
qwen
# Then in the interactive CLI, run:
/about
\`\`\`
The output should look like:
\`\`\`
CLI Version 0.0.14
Git Commit 9a0cb64a
Model coder-model
Sandbox no sandbox
OS darwin
Auth Method qwen-oauth
\`\`\`
Once you provide this information, we'll be able to assist you better. Thank you! 🙏`;
if (botComment) {
await github.rest.issues.updateComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: botComment.id,
body: commentBody
});
console.log('Updated existing comment about missing information.');
} else {
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: commentBody
});
console.log('Created new comment about missing information.');
}
- name: 'Add status/need-information Label'
if: |-
${{ steps.check_info.outputs.info_complete == 'false' }}
uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea' # ratchet:actions/github-script@v7
with:
github-token: '${{ secrets.GITHUB_TOKEN }}'
script: |
await github.rest.issues.addLabels({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
labels: ['status/need-information']
});
console.log('Added status/need-information label.');
- name: 'Remove status/need-information Label if Complete'
if: |-
${{ steps.check_info.outputs.info_complete == 'true' }}
uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea' # ratchet:actions/github-script@v7
continue-on-error: true
with:
github-token: '${{ secrets.GITHUB_TOKEN }}'
script: |
try {
await github.rest.issues.removeLabel({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
name: 'status/need-information'
});
console.log('Removed status/need-information label as information is now complete.');
} catch (error) {
if (error.status === 404) {
console.log('Label not found on issue, nothing to remove.');
} else {
throw error;
}
}

View File

@@ -71,20 +71,20 @@ jobs:
1. Run: `gh label list --repo ${{ github.repository }} --limit 100` to get all available labels.
2. Use shell command `echo` to check the issue title and body provided in the environment variables: "${ISSUE_TITLE}" and "${ISSUE_BODY}".
3. Ignore any existing priorities or tags on the issue. Just report your findings.
4. Select the most relevant labels from the existing labels, focusing on type/*, category/*, scope/*, status/* and priority/*. For category/* and type/* limit yourself to only the single most applicable label in each case.
4. Select the most relevant labels from the existing labels, focusing on kind/*, area/*, sub-area/* and priority/*. For area/* and kind/* limit yourself to only the single most applicable label in each case.
6. Apply the selected labels to this issue using: `gh issue edit ${{ github.event.issue.number }} --repo ${{ github.repository }} --add-label "label1,label2"`.
7. For each issue please check if CLI version is present, this is usually in the output of the /about command and will look like 0.1.5 for anything more than 6 versions older than the most recent should add the status/need-retesting label.
8. If you see that the issue doesn't look like it has sufficient information recommend the status/need-information label.
9. Use Category and Scope definitions mentioned below to help you narrow down issues.
9. Use Area definitions mentioned below to help you narrow down issues.
## Guidelines
- Only use labels that already exist in the repository
- Do not add comments or modify the issue content
- Triage only the current issue
- Identify only one category/ label
- Identify only one type/ label
- Identify all applicable scope/*, status/* and priority/* labels based on the issue content. It's ok to have multiple of these
- Identify only one area/ label
- Identify only one kind/ label
- Identify all applicable sub-area/* and priority/* labels based on the issue content. It's ok to have multiple of these
- Once you categorize the issue if it needs information bump down the priority by 1 eg.. a p0 would become a p1 a p1 would become a p2. P2 and P3 can stay as is in this scenario
- Reference all shell variables as "${VAR}" (with quotes and braces)
- Output only valid JSON format
@@ -127,55 +127,45 @@ jobs:
Things you should know:
- If users are talking about issues where the model gets downgraded from pro to flash then i want you to categorize that as a performance issue
- This product is designed to use different models eg.. using pro, downgrading to flash etc. when users report that they dont expect the model to change those would be categorized as feature requests.
Definition of Categories and Scopes
category/cli: Command line interface and interaction
- Issues with interactive CLI features, command parsing, keyboard shortcuts
- Related scopes: scope/commands, scope/interactive, scope/non-interactive, scope/keybindings
category/core: Core engine and logic
- Issues with fundamental components, content generation, session management
- Related scopes: scope/content-generation, scope/token-management, scope/session-management, scope/model-switching
category/ui: User interface and display
- Issues with themes, UI components, rendering, markdown display
- Related scopes: scope/themes, scope/components, scope/rendering, scope/markdown
category/authentication: Authentication and authorization
- Issues with login flows, API keys, OAuth, credential storage
- Related scopes: scope/oauth, scope/api-keys, scope/token-storage
category/tools: Tool integration and execution
- Issues with MCP, shell execution, file operations, web search, memory, git integration
- Related scopes: scope/mcp, scope/shell, scope/file-operations, scope/web-search, scope/memory, scope/git
category/configuration: Configuration management
- Issues with settings, extensions, trusted folders, sandbox configuration
- Related scopes: scope/settings, scope/extensions, scope/trusted-folders, scope/sandbox
category/integration: External integrations
- Issues with IDE integration, VSCode extension, Zed integration, GitHub Actions
- Related scopes: scope/ide, scope/vscode, scope/zed, scope/github-actions
category/platform: Platform compatibility
- Issues with installation, OS compatibility, packaging
- Related scopes: scope/installation, scope/macos, scope/windows, scope/linux, scope/packaging
category/performance: Performance and optimization
- Issues with latency, memory usage, model performance, caching
- Related scopes: scope/latency, scope/memory-usage, scope/model-performance, scope/caching
category/security: Security and privacy
- Issues with data privacy, credential security, vulnerabilities
- Related scopes: scope/data-privacy, scope/credential-security, scope/vulnerability
category/telemetry: Telemetry and analytics
- Issues with metrics collection, logging, analytics
- Related scopes: scope/metrics, scope/logging, scope/analytics
category/development: Development experience
- Issues with build system, testing, CI/CD, documentation
- Related scopes: scope/build-system, scope/testing, scope/ci-cd, scope/documentation
Definition of Areas
area/ux:
- Issues concerning user-facing elements like command usability, interactive features, help docs, and perceived performance.
- I am seeing my screen flicker when using Gemini CLI
- I am seeing the output malformed
- Theme changes aren't taking effect
- My keyboard inputs arent' being recognzied
area/platform:
- Issues related to installation, packaging, OS compatibility (Windows, macOS, Linux), and the underlying CLI framework.
area/background: Issues related to long-running background tasks, daemons, and autonomous or proactive agent features.
area/models:
- i am not getting a response that is reasonable or expected. this can include things like
- I am calling a tool and the tool is not performing as expected.
- i am expecting a tool to be called and it is not getting called ,
- Including experience when using
- built-in tools (e.g., web search, code interpreter, read file, writefile, etc..),
- Function calling issues should be under this area
- i am getting responses from the model that are malformed.
- Issues concerning Gemini quality of response and inference,
- Issues talking about unnecessary token consumption.
- Issues talking about Model getting stuck in a loop be watchful as this could be the root cause for issues that otherwise seem like model performance issues.
- Memory compression
- unexpected responses,
- poor quality of generated code
area/tools:
- These are primarily issues related to Model Context Protocol
- These are issues that mention MCP support
- feature requests asking for support for new tools.
area/core: Issues with fundamental components like command parsing, configuration management, session state, and the main API client logic. Introducing multi-modality
area/contribution: Issues related to improving the developer contribution experience, such as CI/CD pipelines, build scripts, and test automation infrastructure.
area/authentication: Issues related to user identity, login flows, API key handling, credential storage, and access token management, unable to sign in selecting wrong authentication path etc..
area/security-privacy: Issues concerning vulnerability patching, dependency security, data sanitization, privacy controls, and preventing unauthorized data access.
area/extensibility: Issues related to the plugin system, extension APIs, or making the CLI's functionality available in other applications, github actions, ide support etc..
area/performance: Issues focused on model performance
- Issues with running out of capacity,
- 429 errors etc..
- could also pertain to latency,
- other general software performance like, memory usage, CPU consumption, and algorithmic efficiency.
- Switching models from one to the other unexpectedly.
- name: 'Post Issue Analysis Failure Comment'
if: |-

View File

@@ -84,16 +84,16 @@ jobs:
2. Use shell command `echo` to check environment variable for issues to triage: $ISSUES_TO_TRIAGE (JSON array of issues)
3. Review the issue title, body and any comments provided in the environment variables.
4. Ignore any existing priorities or tags on the issue.
5. Select the most relevant labels from the existing labels, focusing on type/*, category/*, scope/*, status/* and priority/*.
5. Select the most relevant labels from the existing labels, focusing on kind/*, area/*, sub-area/* and priority/*.
6. Get the list of labels already on the issue using `gh issue view ISSUE_NUMBER --repo ${{ github.repository }} --json labels -t '{{range .labels}}{{.name}}{{"\n"}}{{end}}'
7. For category/* and type/* limit yourself to only the single most applicable label in each case.
7. For area/* and kind/* limit yourself to only the single most applicable label in each case.
8. Give me a single short paragraph about why you are selecting each label in the process. use the format Issue ID: , Title, Label applied:, Label removed, ovearll explanation
9. Parse the JSON array from step 2 and for EACH INDIVIDUAL issue, apply appropriate labels using separate commands:
- `gh issue edit ISSUE_NUMBER --repo ${{ github.repository }} --add-label "label1"`
- `gh issue edit ISSUE_NUMBER --repo ${{ github.repository }} --add-label "label2"`
- Continue for each label separately
- IMPORTANT: Label each issue individually, one command per issue, one label at a time if needed.
- Make sure after you apply labels there is only one category/* and one type/* label per issue.
- Make sure after you apply labels there is only one area/* and one kind/* label per issue.
- To do this look for labels found in step 6 that no longer apply remove them one at a time using
- `gh issue edit ISSUE_NUMBER --repo ${{ github.repository }} --remove-label "label-name1"`
- `gh issue edit ISSUE_NUMBER --repo ${{ github.repository }} --remove-label "label-name2"`
@@ -113,9 +113,9 @@ jobs:
- Do not add comments or modify the issue content.
- Do not remove labels titled help wanted or good first issue.
- Triage only the current issue.
- Identify only one category/ label
- Identify only one type/ label (Do not apply type/duplicate or type/parent-issue)
- Identify all applicable scope/*, status/* and priority/* labels based on the issue content. It's ok to have multiple of these.
- Identify only one area/ label
- Identify only one kind/ label (Do not apply kind/duplicate or kind/parent-issue)
- Identify all applicable sub-area/* and priority/* labels based on the issue content. It's ok to have multiple of these.
- Once you categorize the issue if it needs information bump down the priority by 1 eg.. a p0 would become a p1 a p1 would become a p2. P2 and P3 can stay as is in this scenario.
Categorization Guidelines:
P0: Critical / Blocker
@@ -157,52 +157,48 @@ jobs:
- If users are talking about issues where the model gets downgraded from pro to flash then i want you to categorize that as a performance issue
- This product is designed to use different models eg.. using pro, downgrading to flash etc.
- When users report that they dont expect the model to change those would be categorized as feature requests.
Definition of Categories and Scopes
category/cli: Command line interface and interaction
- Issues with interactive CLI features, command parsing, keyboard shortcuts
- Related scopes: scope/commands, scope/interactive, scope/non-interactive, scope/keybindings
category/core: Core engine and logic
- Issues with fundamental components, content generation, session management
- Related scopes: scope/content-generation, scope/token-management, scope/session-management, scope/model-switching
category/ui: User interface and display
- Issues with themes, UI components, rendering, markdown display
- Related scopes: scope/themes, scope/components, scope/rendering, scope/markdown
category/authentication: Authentication and authorization
- Issues with login flows, API keys, OAuth, credential storage
- Related scopes: scope/oauth, scope/api-keys, scope/token-storage
category/tools: Tool integration and execution
- Issues with MCP, shell execution, file operations, web search, memory, git integration
- Related scopes: scope/mcp, scope/shell, scope/file-operations, scope/web-search, scope/memory, scope/git
category/configuration: Configuration management
- Issues with settings, extensions, trusted folders, sandbox configuration
- Related scopes: scope/settings, scope/extensions, scope/trusted-folders, scope/sandbox
category/integration: External integrations
- Issues with IDE integration, VSCode extension, Zed integration, GitHub Actions
- Related scopes: scope/ide, scope/vscode, scope/zed, scope/github-actions
category/platform: Platform compatibility
- Issues with installation, OS compatibility, packaging
- Related scopes: scope/installation, scope/macos, scope/windows, scope/linux, scope/packaging
category/performance: Performance and optimization
- Issues with latency, memory usage, model performance, caching
- Related scopes: scope/latency, scope/memory-usage, scope/model-performance, scope/caching
category/security: Security and privacy
- Issues with data privacy, credential security, vulnerabilities
- Related scopes: scope/data-privacy, scope/credential-security, scope/vulnerability
category/telemetry: Telemetry and analytics
- Issues with metrics collection, logging, analytics
- Related scopes: scope/metrics, scope/logging, scope/analytics
category/development: Development experience
- Issues with build system, testing, CI/CD, documentation
- Related scopes: scope/build-system, scope/testing, scope/ci-cd, scope/documentation
Definition of Areas
area/ux:
- Issues concerning user-facing elements like command usability, interactive features, help docs, and perceived performance.
- I am seeing my screen flicker when using Gemini CLI
- I am seeing the output malformed
- Theme changes aren't taking effect
- My keyboard inputs arent' being recognzied
area/platform:
- Issues related to installation, packaging, OS compatibility (Windows, macOS, Linux), and the underlying CLI framework.
area/background: Issues related to long-running background tasks, daemons, and autonomous or proactive agent features.
area/models:
- i am not getting a response that is reasonable or expected. this can include things like
- I am calling a tool and the tool is not performing as expected.
- i am expecting a tool to be called and it is not getting called ,
- Including experience when using
- built-in tools (e.g., web search, code interpreter, read file, writefile, etc..),
- Function calling issues should be under this area
- i am getting responses from the model that are malformed.
- Issues concerning Gemini quality of response and inference,
- Issues talking about unnecessary token consumption.
- Issues talking about Model getting stuck in a loop be watchful as this could be the root cause for issues that otherwise seem like model performance issues.
- Memory compression
- unexpected responses,
- poor quality of generated code
area/tools:
- These are primarily issues related to Model Context Protocol
- These are issues that mention MCP support
- feature requests asking for support for new tools.
area/core:
- Issues with fundamental components like command parsing, configuration management, session state, and the main API client logic. Introducing multi-modality
area/contribution:
- Issues related to improving the developer contribution experience, such as CI/CD pipelines, build scripts, and test automation infrastructure.
area/authentication:
- Issues related to user identity, login flows, API key handling, credential storage, and access token management, unable to sign in selecting wrong authentication path etc..
area/security-privacy:
- Issues concerning vulnerability patching, dependency security, data sanitization, privacy controls, and preventing unauthorized data access.
area/extensibility:
- Issues related to the plugin system, extension APIs, or making the CLI's functionality available in other applications, github actions, ide support etc..
area/performance:
- Issues focused on model performance
- Issues with running out of capacity,
- 429 errors etc..
- could also pertain to latency,
- other general software performance like, memory usage, CPU consumption, and algorithmic efficiency.
- Switching models from one to the other unexpectedly.

11
.vscode/launch.json vendored
View File

@@ -108,17 +108,6 @@
"request": "attach",
"skipFiles": ["<node_internals>/**"],
"type": "node"
},
{
"type": "node",
"request": "launch",
"name": "Debug Current TS File",
"runtimeExecutable": "npx",
"runtimeArgs": ["tsx", "${file}"],
"skipFiles": ["<node_internals>/**"],
"cwd": "${workspaceFolder}",
"console": "integratedTerminal",
"env": {}
}
],
"inputs": [

View File

@@ -1,13 +1,5 @@
# Changelog
## 0.0.14
- Added plan mode support for task planning
- Fixed unreliable editCorrector that injects extra escape characters
- Fixed task tool dynamic updates
- Added Qwen3-VL-Plus token limits (256K input, 32K output) and highres support
- Enhanced dashScope cache control
## 0.0.13
- Added YOLO mode support for automatic vision model switching with CLI arguments and environment variables.

12
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.0.14",
"version": "0.0.14-nightly.1",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@qwen-code/qwen-code",
"version": "0.0.14",
"version": "0.0.14-nightly.1",
"workspaces": [
"packages/*"
],
@@ -13454,7 +13454,7 @@
},
"packages/cli": {
"name": "@qwen-code/qwen-code",
"version": "0.0.14",
"version": "0.0.14-nightly.1",
"dependencies": {
"@google/genai": "1.9.0",
"@iarna/toml": "^2.2.5",
@@ -13662,7 +13662,7 @@
},
"packages/core": {
"name": "@qwen-code/qwen-code-core",
"version": "0.0.14",
"version": "0.0.14-nightly.1",
"dependencies": {
"@google/genai": "1.13.0",
"@lvce-editor/ripgrep": "^1.6.0",
@@ -13788,7 +13788,7 @@
},
"packages/test-utils": {
"name": "@qwen-code/qwen-code-test-utils",
"version": "0.0.14",
"version": "0.0.14-nightly.1",
"dev": true,
"license": "Apache-2.0",
"devDependencies": {
@@ -13800,7 +13800,7 @@
},
"packages/vscode-ide-companion": {
"name": "qwen-code-vscode-ide-companion",
"version": "0.0.14",
"version": "0.0.14-nightly.1",
"license": "LICENSE",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.15.1",

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.0.14",
"version": "0.0.14-nightly.1",
"engines": {
"node": ">=20.0.0"
},
@@ -13,7 +13,7 @@
"url": "git+https://github.com/QwenLM/qwen-code.git"
},
"config": {
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.14"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.14-nightly.1"
},
"scripts": {
"start": "node scripts/start.js",

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.0.14",
"version": "0.0.14-nightly.1",
"description": "Qwen Code",
"repository": {
"type": "git",
@@ -25,7 +25,7 @@
"dist"
],
"config": {
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.14"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.14-nightly.1"
},
"dependencies": {
"@google/genai": "1.9.0",

View File

@@ -54,11 +54,7 @@ const MockedGeminiClientClass = vi.hoisted(() =>
const MockedUserPromptEvent = vi.hoisted(() =>
vi.fn().mockImplementation(() => {}),
);
const MockedApiCancelEvent = vi.hoisted(() =>
vi.fn().mockImplementation(() => {}),
);
const mockParseAndFormatApiError = vi.hoisted(() => vi.fn());
const mockLogApiCancel = vi.hoisted(() => vi.fn());
// Vision auto-switch mocks (hoisted)
const mockHandleVisionSwitch = vi.hoisted(() =>
@@ -75,9 +71,7 @@ vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
GitService: vi.fn(),
GeminiClient: MockedGeminiClientClass,
UserPromptEvent: MockedUserPromptEvent,
ApiCancelEvent: MockedApiCancelEvent,
parseAndFormatApiError: mockParseAndFormatApiError,
logApiCancel: mockLogApiCancel,
};
});

View File

@@ -31,8 +31,6 @@ import {
ConversationFinishedEvent,
ApprovalMode,
parseAndFormatApiError,
logApiCancel,
ApiCancelEvent,
} from '@qwen-code/qwen-code-core';
import { type Part, type PartListUnion, FinishReason } from '@google/genai';
import type {
@@ -225,16 +223,6 @@ export const useGeminiStream = (
turnCancelledRef.current = true;
isSubmittingQueryRef.current = false;
abortControllerRef.current?.abort();
// Log API cancellation
const prompt_id = config.getSessionId() + '########' + getPromptCount();
const cancellationEvent = new ApiCancelEvent(
config.getModel(),
prompt_id,
config.getContentGeneratorConfig()?.authType,
);
logApiCancel(config, cancellationEvent);
if (pendingHistoryItemRef.current) {
addItem(pendingHistoryItemRef.current, Date.now());
}
@@ -254,8 +242,6 @@ export const useGeminiStream = (
setPendingHistoryItem,
onCancelSubmit,
pendingHistoryItemRef,
config,
getPromptCount,
]);
useKeypress(
@@ -462,7 +448,6 @@ export const useGeminiStream = (
if (turnCancelledRef.current) {
return;
}
if (pendingHistoryItemRef.current) {
if (pendingHistoryItemRef.current.type === 'tool_group') {
const updatedTools = pendingHistoryItemRef.current.tools.map(

View File

@@ -81,6 +81,22 @@ class GeminiAgent {
): Promise<acp.InitializeResponse> {
this.clientCapabilities = args.clientCapabilities;
const authMethods = [
{
id: AuthType.LOGIN_WITH_GOOGLE,
name: 'Log in with Google',
description: null,
},
{
id: AuthType.USE_GEMINI,
name: 'Use Gemini API key',
description:
'Requires setting the `GEMINI_API_KEY` environment variable',
},
{
id: AuthType.USE_VERTEX_AI,
name: 'Vertex AI',
description: null,
},
{
id: AuthType.USE_OPENAI,
name: 'Use OpenAI API key',
@@ -349,7 +365,6 @@ class Session {
function_name: fc.name ?? '',
function_args: args,
duration_ms: durationMs,
status: 'error',
success: false,
error: error.message,
tool_type:
@@ -468,7 +483,6 @@ class Session {
function_name: fc.name,
function_args: args,
duration_ms: durationMs,
status: 'success',
success: true,
prompt_id: promptId,
tool_type:

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code-core",
"version": "0.0.14",
"version": "0.0.14-nightly.1",
"description": "Qwen Code Core",
"repository": {
"type": "git",

View File

@@ -434,6 +434,8 @@ describe('Gemini Client (client.ts)', () => {
config: {
abortSignal,
systemInstruction: getCoreSystemPrompt(''),
temperature: 0,
topP: 1,
tools: [
{
functionDeclarations: [
@@ -484,6 +486,7 @@ describe('Gemini Client (client.ts)', () => {
abortSignal,
systemInstruction: getCoreSystemPrompt(''),
temperature: 0.9,
topP: 1, // from default
topK: 20,
tools: [
{
@@ -2458,6 +2461,7 @@ ${JSON.stringify(
abortSignal,
systemInstruction: getCoreSystemPrompt(''),
temperature: 0.5,
topP: 1,
},
contents,
},

View File

@@ -115,7 +115,10 @@ export class GeminiClient {
private chat?: GeminiChat;
private contentGenerator?: ContentGenerator;
private readonly embeddingModel: string;
private readonly generateContentConfig: GenerateContentConfig = {};
private readonly generateContentConfig: GenerateContentConfig = {
temperature: 0,
topP: 1,
};
private sessionTurnCount = 0;
private readonly loopDetector: LoopDetectionService;

View File

@@ -401,7 +401,7 @@ export class CoreToolScheduler {
}
}
const cancelledCall = {
return {
request: currentCall.request,
tool: toolInstance,
invocation,
@@ -426,8 +426,6 @@ export class CoreToolScheduler {
durationMs,
outcome,
} as CancelledToolCall;
return cancelledCall;
}
case 'validating':
return {

View File

@@ -12,7 +12,6 @@ import type { Config } from '../../config/config.js';
import { OpenAIContentGenerator } from './openaiContentGenerator.js';
import {
DashScopeOpenAICompatibleProvider,
DeepSeekOpenAICompatibleProvider,
OpenRouterOpenAICompatibleProvider,
type OpenAICompatibleProvider,
DefaultOpenAICompatibleProvider,
@@ -24,7 +23,6 @@ export { ContentGenerationPipeline, type PipelineConfig } from './pipeline.js';
export {
type OpenAICompatibleProvider,
DashScopeOpenAICompatibleProvider,
DeepSeekOpenAICompatibleProvider,
OpenRouterOpenAICompatibleProvider,
} from './provider/index.js';
@@ -63,13 +61,6 @@ export function determineProvider(
);
}
if (DeepSeekOpenAICompatibleProvider.isDeepSeekProvider(config)) {
return new DeepSeekOpenAICompatibleProvider(
contentGeneratorConfig,
cliConfig,
);
}
// Check for OpenRouter provider
if (OpenRouterOpenAICompatibleProvider.isOpenRouterProvider(config)) {
return new OpenRouterOpenAICompatibleProvider(

View File

@@ -161,9 +161,6 @@ describe('ContentGenerationPipeline', () => {
top_p: 0.9,
max_tokens: 1000,
}),
expect.objectContaining({
signal: undefined,
}),
);
expect(mockConverter.convertOpenAIResponseToGemini).toHaveBeenCalledWith(
mockOpenAIResponse,
@@ -241,9 +238,6 @@ describe('ContentGenerationPipeline', () => {
expect.objectContaining({
tools: mockTools,
}),
expect.objectContaining({
signal: undefined,
}),
);
});
@@ -280,30 +274,6 @@ describe('ContentGenerationPipeline', () => {
request,
);
});
it('should pass abort signal to OpenAI client when provided', async () => {
const abortController = new AbortController();
const request: GenerateContentParameters = {
model: 'test-model',
contents: [{ parts: [{ text: 'Hello' }], role: 'user' }],
config: { abortSignal: abortController.signal },
};
(mockConverter.convertGeminiRequestToOpenAI as Mock).mockReturnValue([]);
(mockConverter.convertOpenAIResponseToGemini as Mock).mockReturnValue(
new GenerateContentResponse(),
);
(mockClient.chat.completions.create as Mock).mockResolvedValue({
choices: [{ message: { content: 'response' } }],
});
await pipeline.execute(request, 'test-id');
expect(mockClient.chat.completions.create).toHaveBeenCalledWith(
expect.any(Object),
expect.objectContaining({ signal: abortController.signal }),
);
});
});
describe('executeStream', () => {
@@ -368,9 +338,6 @@ describe('ContentGenerationPipeline', () => {
stream: true,
stream_options: { include_usage: true },
}),
expect.objectContaining({
signal: undefined,
}),
);
expect(mockTelemetryService.logStreamingSuccess).toHaveBeenCalledWith(
expect.objectContaining({
@@ -503,42 +470,6 @@ describe('ContentGenerationPipeline', () => {
);
});
it('should pass abort signal to OpenAI client for streaming requests', async () => {
const abortController = new AbortController();
const request: GenerateContentParameters = {
model: 'test-model',
contents: [{ parts: [{ text: 'Hello' }], role: 'user' }],
config: { abortSignal: abortController.signal },
};
const mockStream = {
async *[Symbol.asyncIterator]() {
yield {
id: 'chunk-1',
choices: [{ delta: { content: 'Hello' }, finish_reason: 'stop' }],
};
},
};
(mockConverter.convertGeminiRequestToOpenAI as Mock).mockReturnValue([]);
(mockConverter.convertOpenAIChunkToGemini as Mock).mockReturnValue(
new GenerateContentResponse(),
);
(mockClient.chat.completions.create as Mock).mockResolvedValue(
mockStream,
);
const resultGenerator = await pipeline.executeStream(request, 'test-id');
for await (const _result of resultGenerator) {
// Consume stream
}
expect(mockClient.chat.completions.create).toHaveBeenCalledWith(
expect.any(Object),
expect.objectContaining({ signal: abortController.signal }),
);
});
it('should merge finishReason and usageMetadata from separate chunks', async () => {
// Arrange
const request: GenerateContentParameters = {
@@ -993,9 +924,6 @@ describe('ContentGenerationPipeline', () => {
top_p: 0.9, // Config parameter used since request overrides are not being applied in current implementation
max_tokens: 1000, // Config parameter used since request overrides are not being applied in current implementation
}),
expect.objectContaining({
signal: undefined,
}),
);
});
@@ -1032,9 +960,6 @@ describe('ContentGenerationPipeline', () => {
top_p: 0.9, // From config
max_tokens: 1000, // From config
}),
expect.objectContaining({
signal: undefined,
}),
);
});
@@ -1084,9 +1009,6 @@ describe('ContentGenerationPipeline', () => {
expect.objectContaining({
metadata: { promptId: userPromptId },
}),
expect.objectContaining({
signal: undefined,
}),
);
});
});

View File

@@ -48,9 +48,6 @@ export class ContentGenerationPipeline {
async (openaiRequest, context) => {
const openaiResponse = (await this.client.chat.completions.create(
openaiRequest,
{
signal: request.config?.abortSignal,
},
)) as OpenAI.Chat.ChatCompletion;
const geminiResponse =
@@ -81,9 +78,6 @@ export class ContentGenerationPipeline {
// Stage 1: Create OpenAI stream
const stream = (await this.client.chat.completions.create(
openaiRequest,
{
signal: request.config?.abortSignal,
},
)) as AsyncIterable<OpenAI.Chat.ChatCompletionChunk>;
// Stage 2: Process stream with conversion and logging
@@ -227,12 +221,6 @@ export class ContentGenerationPipeline {
mergedResponse.usageMetadata = lastResponse.usageMetadata;
}
// Copy other essential properties from the current response
mergedResponse.responseId = response.responseId;
mergedResponse.createTime = response.createTime;
mergedResponse.modelVersion = response.modelVersion;
mergedResponse.promptFeedback = response.promptFeedback;
// Update the collected responses with the merged response
collectedGeminiResponses[collectedGeminiResponses.length - 1] =
mergedResponse;
@@ -260,23 +248,26 @@ export class ContentGenerationPipeline {
...this.buildSamplingParameters(request),
};
// Add streaming options if present
if (streaming) {
(
baseRequest as unknown as OpenAI.Chat.ChatCompletionCreateParamsStreaming
).stream = true;
baseRequest.stream_options = { include_usage: true };
}
// Let provider enhance the request (e.g., add metadata, cache control)
const enhancedRequest = this.config.provider.buildRequest(
baseRequest,
userPromptId,
);
// Add tools if present
if (request.config?.tools) {
baseRequest.tools = await this.converter.convertGeminiToolsToOpenAI(
enhancedRequest.tools = await this.converter.convertGeminiToolsToOpenAI(
request.config.tools,
);
}
// Let provider enhance the request (e.g., add metadata, cache control)
return this.config.provider.buildRequest(baseRequest, userPromptId);
// Add streaming options if needed
if (streaming) {
enhancedRequest.stream = true;
enhancedRequest.stream_options = { include_usage: true };
}
return enhancedRequest;
}
private buildSamplingParameters(
@@ -314,9 +305,9 @@ export class ContentGenerationPipeline {
};
const params = {
// Parameters with request fallback but no defaults
...addParameterIfDefined('temperature', 'temperature', 'temperature'),
...addParameterIfDefined('top_p', 'top_p', 'topP'),
// Parameters with request fallback and defaults
temperature: getParameterValue('temperature', 'temperature', 0.0),
top_p: getParameterValue('top_p', 'topP', 1.0),
// Max tokens (special case: different property names)
...addParameterIfDefined('max_tokens', 'max_tokens', 'maxOutputTokens'),

View File

@@ -17,7 +17,6 @@ import { DashScopeOpenAICompatibleProvider } from './dashscope.js';
import type { Config } from '../../../config/config.js';
import type { ContentGeneratorConfig } from '../../contentGenerator.js';
import { AuthType } from '../../contentGenerator.js';
import type { ChatCompletionToolWithCache } from './types.js';
import { DEFAULT_TIMEOUT, DEFAULT_MAX_RETRIES } from '../constants.js';
// Mock OpenAI
@@ -254,110 +253,17 @@ describe('DashScopeOpenAICompatibleProvider', () => {
},
]);
// Last message should NOT have cache control for non-streaming requests
// Last message should NOT have cache control for non-streaming
const lastMessage = result.messages[1];
expect(lastMessage.role).toBe('user');
expect(lastMessage.content).toBe('Hello!');
});
it('should add cache control to system message only for non-streaming requests with tools', () => {
const requestWithTool: OpenAI.Chat.ChatCompletionCreateParams = {
...baseRequest,
messages: [
{ role: 'system', content: 'You are a helpful assistant.' },
{
role: 'tool',
content: 'First tool output',
tool_call_id: 'call_1',
},
{
role: 'tool',
content: 'Second tool output',
tool_call_id: 'call_2',
},
{ role: 'user', content: 'Hello!' },
],
tools: [
{
type: 'function',
function: {
name: 'mockTool',
parameters: { type: 'object', properties: {} },
},
},
],
stream: false,
};
const result = provider.buildRequest(requestWithTool, 'test-prompt-id');
expect(result.messages).toHaveLength(4);
const systemMessage = result.messages[0];
expect(systemMessage.content).toEqual([
{
type: 'text',
text: 'You are a helpful assistant.',
cache_control: { type: 'ephemeral' },
},
]);
// Tool messages should remain unchanged
const firstToolMessage = result.messages[1];
expect(firstToolMessage.role).toBe('tool');
expect(firstToolMessage.content).toBe('First tool output');
const secondToolMessage = result.messages[2];
expect(secondToolMessage.role).toBe('tool');
expect(secondToolMessage.content).toBe('Second tool output');
// Last message should NOT have cache control for non-streaming requests
const lastMessage = result.messages[3];
expect(lastMessage.role).toBe('user');
expect(lastMessage.content).toBe('Hello!');
// Tools should NOT have cache control for non-streaming requests
const tools = result.tools as ChatCompletionToolWithCache[];
expect(tools).toBeDefined();
expect(tools).toHaveLength(1);
expect(tools[0].cache_control).toBeUndefined();
});
it('should add cache control to system, last history message, and last tool definition for streaming requests', () => {
it('should add cache control to both system and last messages for streaming requests', () => {
const request = { ...baseRequest, stream: true };
const requestWithToolMessage: OpenAI.Chat.ChatCompletionCreateParams = {
...request,
messages: [
{ role: 'system', content: 'You are a helpful assistant.' },
{
role: 'tool',
content: 'First tool output',
tool_call_id: 'call_1',
},
{
role: 'tool',
content: 'Second tool output',
tool_call_id: 'call_2',
},
{ role: 'user', content: 'Hello!' },
],
tools: [
{
type: 'function',
function: {
name: 'mockTool',
parameters: { type: 'object', properties: {} },
},
},
],
};
const result = provider.buildRequest(request, 'test-prompt-id');
const result = provider.buildRequest(
requestWithToolMessage,
'test-prompt-id',
);
expect(result.messages).toHaveLength(4);
expect(result.messages).toHaveLength(2);
// System message should have cache control
const systemMessage = result.messages[0];
@@ -369,17 +275,8 @@ describe('DashScopeOpenAICompatibleProvider', () => {
},
]);
// Tool messages should remain unchanged
const firstToolMessage = result.messages[1];
expect(firstToolMessage.role).toBe('tool');
expect(firstToolMessage.content).toBe('First tool output');
const secondToolMessage = result.messages[2];
expect(secondToolMessage.role).toBe('tool');
expect(secondToolMessage.content).toBe('Second tool output');
// Last message should also have cache control
const lastMessage = result.messages[3];
// Last message should also have cache control for streaming
const lastMessage = result.messages[1];
expect(lastMessage.content).toEqual([
{
type: 'text',
@@ -387,40 +284,6 @@ describe('DashScopeOpenAICompatibleProvider', () => {
cache_control: { type: 'ephemeral' },
},
]);
const tools = result.tools as ChatCompletionToolWithCache[];
expect(tools).toBeDefined();
expect(tools).toHaveLength(1);
expect(tools[0].cache_control).toEqual({ type: 'ephemeral' });
});
it('should not add cache control to tool messages when request.tools is undefined', () => {
const requestWithoutConfiguredTools: OpenAI.Chat.ChatCompletionCreateParams =
{
...baseRequest,
messages: [
{ role: 'system', content: 'You are a helpful assistant.' },
{
role: 'tool',
content: 'Tool output',
tool_call_id: 'call_1',
},
{ role: 'user', content: 'Hello!' },
],
};
const result = provider.buildRequest(
requestWithoutConfiguredTools,
'test-prompt-id',
);
expect(result.messages).toHaveLength(3);
const toolMessage = result.messages[1];
expect(toolMessage.role).toBe('tool');
expect(toolMessage.content).toBe('Tool output');
expect(result.tools).toBeUndefined();
});
it('should include metadata in the request', () => {
@@ -825,60 +688,6 @@ describe('DashScopeOpenAICompatibleProvider', () => {
).toBe(true); // Vision-specific parameter should be preserved
});
it('should set high resolution flag for qwen3-vl-plus', () => {
const request: OpenAI.Chat.ChatCompletionCreateParams = {
model: 'qwen3-vl-plus',
messages: [
{
role: 'user',
content: [
{ type: 'text', text: 'Please inspect the image.' },
{
type: 'image_url',
image_url: { url: 'https://example.com/vl.jpg' },
},
],
},
],
max_tokens: 50000,
};
const result = provider.buildRequest(request, 'test-prompt-id');
expect(result.max_tokens).toBe(32768);
expect(
(result as { vl_high_resolution_images?: boolean })
.vl_high_resolution_images,
).toBe(true);
});
it('should set high resolution flag for the vision-model alias', () => {
const request: OpenAI.Chat.ChatCompletionCreateParams = {
model: 'vision-model',
messages: [
{
role: 'user',
content: [
{ type: 'text', text: 'Alias payload' },
{
type: 'image_url',
image_url: { url: 'https://example.com/alias.png' },
},
],
},
],
max_tokens: 9000,
};
const result = provider.buildRequest(request, 'test-prompt-id');
expect(result.max_tokens).toBe(8192);
expect(
(result as { vl_high_resolution_images?: boolean })
.vl_high_resolution_images,
).toBe(true);
});
it('should handle streaming requests with output token limits', () => {
const request: OpenAI.Chat.ChatCompletionCreateParams = {
model: 'qwen3-coder-plus',

View File

@@ -9,7 +9,6 @@ import type {
DashScopeRequestMetadata,
ChatCompletionContentPartTextWithCache,
ChatCompletionContentPartWithCache,
ChatCompletionToolWithCache,
} from './types.js';
export class DashScopeOpenAICompatibleProvider
@@ -71,8 +70,7 @@ export class DashScopeOpenAICompatibleProvider
* Build and configure the request for DashScope API.
*
* This method applies DashScope-specific configurations including:
* - Cache control for the system message, last tool message (when tools are configured),
* and the latest history message
* - Cache control for system and user messages
* - Output token limits based on model capabilities
* - Vision model specific parameters (vl_high_resolution_images)
* - Request metadata for session tracking
@@ -86,17 +84,13 @@ export class DashScopeOpenAICompatibleProvider
userPromptId: string,
): OpenAI.Chat.ChatCompletionCreateParams {
let messages = request.messages;
let tools = request.tools;
// Apply DashScope cache control only if not disabled
if (!this.shouldDisableCacheControl()) {
const { messages: updatedMessages, tools: updatedTools } =
this.addDashScopeCacheControl(
request,
request.stream ? 'all' : 'system_only',
);
messages = updatedMessages;
tools = updatedTools;
// Add cache control to system and last messages for DashScope providers
// Only add cache control to system message for non-streaming requests
const cacheTarget = request.stream ? 'both' : 'system';
messages = this.addDashScopeCacheControl(messages, cacheTarget);
}
// Apply output token limits based on model capabilities
@@ -106,11 +100,10 @@ export class DashScopeOpenAICompatibleProvider
request.model,
);
if (this.isVisionModel(request.model)) {
if (request.model.startsWith('qwen-vl')) {
return {
...requestWithTokenLimits,
messages,
...(tools ? { tools } : {}),
...(this.buildMetadata(userPromptId) || {}),
/* @ts-expect-error dashscope exclusive */
vl_high_resolution_images: true,
@@ -120,7 +113,6 @@ export class DashScopeOpenAICompatibleProvider
return {
...requestWithTokenLimits, // Preserve all original parameters including sampling params and adjusted max_tokens
messages,
...(tools ? { tools } : {}),
...(this.buildMetadata(userPromptId) || {}),
} as OpenAI.Chat.ChatCompletionCreateParams;
}
@@ -138,67 +130,75 @@ export class DashScopeOpenAICompatibleProvider
* Add cache control flag to specified message(s) for DashScope providers
*/
private addDashScopeCacheControl(
request: OpenAI.Chat.ChatCompletionCreateParams,
cacheControl: 'system_only' | 'all',
): {
messages: OpenAI.Chat.ChatCompletionMessageParam[];
tools?: ChatCompletionToolWithCache[];
} {
const messages = request.messages;
const systemIndex = messages.findIndex((msg) => msg.role === 'system');
const lastIndex = messages.length - 1;
const updatedMessages =
messages.length === 0
? messages
: messages.map((message, index) => {
const shouldAddCacheControl = Boolean(
(index === systemIndex && systemIndex !== -1) ||
(index === lastIndex && cacheControl === 'all'),
);
if (
!shouldAddCacheControl ||
!('content' in message) ||
message.content === null ||
message.content === undefined
) {
return message;
}
return {
...message,
content: this.addCacheControlToContent(message.content),
} as OpenAI.Chat.ChatCompletionMessageParam;
});
const updatedTools =
cacheControl === 'all' && request.tools?.length
? this.addCacheControlToTools(request.tools)
: (request.tools as ChatCompletionToolWithCache[] | undefined);
return {
messages: updatedMessages,
tools: updatedTools,
};
}
private addCacheControlToTools(
tools: OpenAI.Chat.ChatCompletionTool[],
): ChatCompletionToolWithCache[] {
if (tools.length === 0) {
return tools as ChatCompletionToolWithCache[];
messages: OpenAI.Chat.ChatCompletionMessageParam[],
target: 'system' | 'last' | 'both' = 'both',
): OpenAI.Chat.ChatCompletionMessageParam[] {
if (messages.length === 0) {
return messages;
}
const updatedTools = [...tools] as ChatCompletionToolWithCache[];
const lastToolIndex = tools.length - 1;
updatedTools[lastToolIndex] = {
...updatedTools[lastToolIndex],
cache_control: { type: 'ephemeral' },
};
let updatedMessages = [...messages];
return updatedTools;
// Add cache control to system message if requested
if (target === 'system' || target === 'both') {
updatedMessages = this.addCacheControlToMessage(
updatedMessages,
'system',
);
}
// Add cache control to last message if requested
if (target === 'last' || target === 'both') {
updatedMessages = this.addCacheControlToMessage(updatedMessages, 'last');
}
return updatedMessages;
}
/**
* Helper method to add cache control to a specific message
*/
private addCacheControlToMessage(
messages: OpenAI.Chat.ChatCompletionMessageParam[],
target: 'system' | 'last',
): OpenAI.Chat.ChatCompletionMessageParam[] {
const updatedMessages = [...messages];
const messageIndex = this.findTargetMessageIndex(messages, target);
if (messageIndex === -1) {
return updatedMessages;
}
const message = updatedMessages[messageIndex];
// Only process messages that have content
if (
'content' in message &&
message.content !== null &&
message.content !== undefined
) {
const updatedContent = this.addCacheControlToContent(message.content);
updatedMessages[messageIndex] = {
...message,
content: updatedContent,
} as OpenAI.Chat.ChatCompletionMessageParam;
}
return updatedMessages;
}
/**
* Find the index of the target message (system or last)
*/
private findTargetMessageIndex(
messages: OpenAI.Chat.ChatCompletionMessageParam[],
target: 'system' | 'last',
): number {
if (target === 'system') {
return messages.findIndex((msg) => msg.role === 'system');
} else {
return messages.length - 1;
}
}
/**
@@ -267,28 +267,6 @@ export class DashScopeOpenAICompatibleProvider
return contentArray;
}
private isVisionModel(model: string | undefined): boolean {
if (!model) {
return false;
}
const normalized = model.toLowerCase();
if (normalized === 'vision-model') {
return true;
}
if (normalized.startsWith('qwen-vl')) {
return true;
}
if (normalized.startsWith('qwen3-vl-plus')) {
return true;
}
return false;
}
/**
* Apply output token limit to a request's max_tokens parameter.
*

View File

@@ -1,132 +0,0 @@
/**
* @license
* Copyright 2025 Qwen
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, vi, beforeEach } from 'vitest';
import type OpenAI from 'openai';
import { DeepSeekOpenAICompatibleProvider } from './deepseek.js';
import type { ContentGeneratorConfig } from '../../contentGenerator.js';
import type { Config } from '../../../config/config.js';
// Mock OpenAI client to avoid real network calls
vi.mock('openai', () => ({
default: vi.fn().mockImplementation((config) => ({
config,
})),
}));
describe('DeepSeekOpenAICompatibleProvider', () => {
let provider: DeepSeekOpenAICompatibleProvider;
let mockContentGeneratorConfig: ContentGeneratorConfig;
let mockCliConfig: Config;
beforeEach(() => {
vi.clearAllMocks();
mockContentGeneratorConfig = {
apiKey: 'test-api-key',
baseUrl: 'https://api.deepseek.com/v1',
model: 'deepseek-chat',
} as ContentGeneratorConfig;
mockCliConfig = {
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
} as unknown as Config;
provider = new DeepSeekOpenAICompatibleProvider(
mockContentGeneratorConfig,
mockCliConfig,
);
});
describe('isDeepSeekProvider', () => {
it('returns true when baseUrl includes deepseek', () => {
const result = DeepSeekOpenAICompatibleProvider.isDeepSeekProvider(
mockContentGeneratorConfig,
);
expect(result).toBe(true);
});
it('returns false for non deepseek baseUrl', () => {
const config = {
...mockContentGeneratorConfig,
baseUrl: 'https://api.example.com/v1',
} as ContentGeneratorConfig;
const result =
DeepSeekOpenAICompatibleProvider.isDeepSeekProvider(config);
expect(result).toBe(false);
});
});
describe('buildRequest', () => {
const userPromptId = 'prompt-123';
it('converts array content into a string', () => {
const originalRequest: OpenAI.Chat.ChatCompletionCreateParams = {
model: 'deepseek-chat',
messages: [
{
role: 'user',
content: [
{ type: 'text', text: 'Hello' },
{ type: 'text', text: ' world' },
],
},
],
};
const result = provider.buildRequest(originalRequest, userPromptId);
expect(result.messages).toHaveLength(1);
expect(result.messages?.[0]).toEqual({
role: 'user',
content: 'Hello world',
});
expect(originalRequest.messages?.[0].content).toEqual([
{ type: 'text', text: 'Hello' },
{ type: 'text', text: ' world' },
]);
});
it('leaves string content unchanged', () => {
const originalRequest: OpenAI.Chat.ChatCompletionCreateParams = {
model: 'deepseek-chat',
messages: [
{
role: 'user',
content: 'Hello world',
},
],
};
const result = provider.buildRequest(originalRequest, userPromptId);
expect(result.messages?.[0].content).toBe('Hello world');
});
it('throws when encountering non-text multimodal parts', () => {
const originalRequest: OpenAI.Chat.ChatCompletionCreateParams = {
model: 'deepseek-chat',
messages: [
{
role: 'user',
content: [
{ type: 'text', text: 'Hello' },
{
type: 'image_url',
image_url: { url: 'https://example.com/image.png' },
},
],
},
],
};
expect(() =>
provider.buildRequest(originalRequest, userPromptId),
).toThrow(/only supports text content/i);
});
});
});

View File

@@ -1,79 +0,0 @@
/**
* @license
* Copyright 2025 Qwen
* SPDX-License-Identifier: Apache-2.0
*/
import type OpenAI from 'openai';
import type { Config } from '../../../config/config.js';
import type { ContentGeneratorConfig } from '../../contentGenerator.js';
import { DefaultOpenAICompatibleProvider } from './default.js';
export class DeepSeekOpenAICompatibleProvider extends DefaultOpenAICompatibleProvider {
constructor(
contentGeneratorConfig: ContentGeneratorConfig,
cliConfig: Config,
) {
super(contentGeneratorConfig, cliConfig);
}
static isDeepSeekProvider(
contentGeneratorConfig: ContentGeneratorConfig,
): boolean {
const baseUrl = contentGeneratorConfig.baseUrl ?? '';
return baseUrl.toLowerCase().includes('api.deepseek.com');
}
override buildRequest(
request: OpenAI.Chat.ChatCompletionCreateParams,
userPromptId: string,
): OpenAI.Chat.ChatCompletionCreateParams {
const baseRequest = super.buildRequest(request, userPromptId);
if (!baseRequest.messages?.length) {
return baseRequest;
}
const messages = baseRequest.messages.map((message) => {
if (!('content' in message)) {
return message;
}
const { content } = message;
if (
typeof content === 'string' ||
content === null ||
content === undefined
) {
return message;
}
if (!Array.isArray(content)) {
return message;
}
const text = content
.map((part) => {
if (part.type !== 'text') {
throw new Error(
`DeepSeek provider only supports text content. Found non-text part of type '${part.type}' in message with role '${message.role}'.`,
);
}
return part.text ?? '';
})
.join('');
return {
...message,
content: text,
} as OpenAI.Chat.ChatCompletionMessageParam;
});
return {
...baseRequest,
messages,
};
}
}

View File

@@ -1,5 +1,4 @@
export { DashScopeOpenAICompatibleProvider } from './dashscope.js';
export { DeepSeekOpenAICompatibleProvider } from './deepseek.js';
export { OpenRouterOpenAICompatibleProvider } from './openrouter.js';
export { DefaultOpenAICompatibleProvider } from './default.js';
export type {

View File

@@ -11,10 +11,6 @@ export type ChatCompletionContentPartWithCache =
| OpenAI.Chat.ChatCompletionContentPartImage
| OpenAI.Chat.ChatCompletionContentPartRefusal;
export type ChatCompletionToolWithCache = OpenAI.Chat.ChatCompletionTool & {
cache_control?: { type: 'ephemeral' };
};
export interface OpenAICompatibleProvider {
buildHeaders(): Record<string, string | undefined>;
buildClient(): OpenAI;

View File

@@ -278,11 +278,6 @@ describe('tokenLimit with output type', () => {
expect(tokenLimit('qwen-vl-max-latest', 'output')).toBe(8192); // 8K output
});
it('should return different limits for input vs output for qwen3-vl-plus', () => {
expect(tokenLimit('qwen3-vl-plus', 'input')).toBe(262144); // 256K input
expect(tokenLimit('qwen3-vl-plus', 'output')).toBe(32768); // 32K output
});
it('should return same default limits for unknown models', () => {
expect(tokenLimit('unknown-model', 'input')).toBe(DEFAULT_TOKEN_LIMIT); // 128K input
expect(tokenLimit('unknown-model', 'output')).toBe(

View File

@@ -115,7 +115,7 @@ const PATTERNS: Array<[RegExp, TokenCount]> = [
[/^coder-model$/, LIMITS['1m']],
// Commercial Qwen3-Max-Preview: 256K token context
[/^qwen3-max(-preview)?(-.*)?$/, LIMITS['256k']], // catches "qwen3-max" or "qwen3-max-preview" and date variants
[/^qwen3-max-preview(-.*)?$/, LIMITS['256k']], // catches "qwen3-max-preview" and date variants
// Open-source Qwen3-Coder variants: 256K native
[/^qwen3-coder-.*$/, LIMITS['256k']],
@@ -135,7 +135,6 @@ const PATTERNS: Array<[RegExp, TokenCount]> = [
[/^qwen-turbo.*$/, LIMITS['128k']],
// Qwen Vision Models
[/^qwen3-vl-plus$/, LIMITS['256k']], // Qwen3-VL-Plus: 256K input
[/^qwen-vl-max.*$/, LIMITS['128k']],
// Generic vision-model: same as qwen-vl-max (128K token context)
@@ -179,8 +178,8 @@ const OUTPUT_PATTERNS: Array<[RegExp, TokenCount]> = [
// Generic coder-model: same as qwen3-coder-plus (64K max output tokens)
[/^coder-model$/, LIMITS['64k']],
// Qwen3-Max: 65,536 max output tokens
[/^qwen3-max(-preview)?(-.*)?$/, LIMITS['64k']],
// Qwen3-Max-Preview: 65,536 max output tokens
[/^qwen3-max-preview(-.*)?$/, LIMITS['64k']],
// Qwen-VL-Max-Latest: 8,192 max output tokens
[/^qwen-vl-max-latest$/, LIMITS['8k']],
@@ -188,8 +187,8 @@ const OUTPUT_PATTERNS: Array<[RegExp, TokenCount]> = [
// Generic vision-model: same as qwen-vl-max-latest (8K max output tokens)
[/^vision-model$/, LIMITS['8k']],
// Qwen3-VL-Plus: 32K max output tokens
[/^qwen3-vl-plus$/, LIMITS['32k']],
// Qwen3-VL-Plus: 8,192 max output tokens
[/^qwen3-vl-plus$/, LIMITS['8k']],
];
/**

View File

@@ -84,7 +84,6 @@ export interface ToolCallRequestInfo {
args: Record<string, unknown>;
isClientInitiated: boolean;
prompt_id: string;
response_id?: string;
}
export interface ToolCallResponseInfo {
@@ -203,7 +202,6 @@ export class Turn {
readonly pendingToolCalls: ToolCallRequestInfo[];
private debugResponses: GenerateContentResponse[];
finishReason: FinishReason | undefined;
private currentResponseId?: string;
constructor(
private readonly chat: GeminiChat,
@@ -249,11 +247,6 @@ export class Turn {
this.debugResponses.push(resp);
// Track the current response ID for tool call correlation
if (resp.responseId) {
this.currentResponseId = resp.responseId;
}
const thoughtPart = resp.candidates?.[0]?.content?.parts?.[0];
if (thoughtPart?.thought) {
// Thought always has a bold "subject" part enclosed in double asterisks
@@ -353,7 +346,6 @@ export class Turn {
args,
isClientInitiated: false,
prompt_id: this.prompt_id,
response_id: this.currentResponseId,
};
this.pendingToolCalls.push(toolCallRequest);

View File

@@ -381,7 +381,6 @@ export class SubAgentScope {
let roundText = '';
let lastUsage: GenerateContentResponseUsageMetadata | undefined =
undefined;
let currentResponseId: string | undefined = undefined;
for await (const streamEvent of responseStream) {
if (abortController.signal.aborted) {
this.terminateMode = SubagentTerminateMode.CANCELLED;
@@ -396,10 +395,6 @@ export class SubAgentScope {
// Handle chunk events
if (streamEvent.type === 'chunk') {
const resp = streamEvent.value;
// Track the response ID for tool call correlation
if (resp.responseId) {
currentResponseId = resp.responseId;
}
if (resp.functionCalls) functionCalls.push(...resp.functionCalls);
const content = resp.candidates?.[0]?.content;
const parts = content?.parts || [];
@@ -460,7 +455,6 @@ export class SubAgentScope {
abortController,
promptId,
turnCounter,
currentResponseId,
);
} else {
// No tool calls — treat this as the model's final answer.
@@ -549,7 +543,6 @@ export class SubAgentScope {
* @param {FunctionCall[]} functionCalls - An array of `FunctionCall` objects to process.
* @param {ToolRegistry} toolRegistry - The tool registry to look up and execute tools.
* @param {AbortController} abortController - An `AbortController` to signal cancellation of tool executions.
* @param {string} responseId - Optional API response ID for correlation with tool calls.
* @returns {Promise<Content[]>} A promise that resolves to an array of `Content` parts representing the tool responses,
* which are then used to update the chat history.
*/
@@ -558,7 +551,6 @@ export class SubAgentScope {
abortController: AbortController,
promptId: string,
currentRound: number,
responseId?: string,
): Promise<Content[]> {
const toolResponseParts: Part[] = [];
@@ -712,7 +704,6 @@ export class SubAgentScope {
args,
isClientInitiated: true,
prompt_id: promptId,
response_id: responseId,
};
const description = this.getToolDescription(toolName, args);

View File

@@ -10,7 +10,6 @@ export const EVENT_USER_PROMPT = 'qwen-code.user_prompt';
export const EVENT_TOOL_CALL = 'qwen-code.tool_call';
export const EVENT_API_REQUEST = 'qwen-code.api_request';
export const EVENT_API_ERROR = 'qwen-code.api_error';
export const EVENT_API_CANCEL = 'qwen-code.api_cancel';
export const EVENT_API_RESPONSE = 'qwen-code.api_response';
export const EVENT_CLI_CONFIG = 'qwen-code.config';
export const EVENT_FLASH_FALLBACK = 'qwen-code.flash_fallback';

View File

@@ -17,7 +17,6 @@ export { SpanStatusCode, ValueType } from '@opentelemetry/api';
export { SemanticAttributes } from '@opentelemetry/semantic-conventions';
export {
logApiError,
logApiCancel,
logApiRequest,
logApiResponse,
logChatCompression,
@@ -36,7 +35,6 @@ export {
} from './sdk.js';
export {
ApiErrorEvent,
ApiCancelEvent,
ApiRequestEvent,
ApiResponseEvent,
ConversationFinishedEvent,
@@ -56,5 +54,4 @@ export type {
TelemetryEvent,
} from './types.js';
export * from './uiTelemetry.js';
export { QwenLogger } from './qwen-logger/qwen-logger.js';
export { DEFAULT_OTLP_ENDPOINT, DEFAULT_TELEMETRY_TARGET };

View File

@@ -550,7 +550,6 @@ describe('loggers', () => {
2,
),
duration_ms: 100,
status: 'success',
success: true,
decision: ToolCallDecision.ACCEPT,
prompt_id: 'prompt-id-1',
@@ -620,7 +619,6 @@ describe('loggers', () => {
2,
),
duration_ms: 100,
status: 'error',
success: false,
decision: ToolCallDecision.REJECT,
prompt_id: 'prompt-id-2',
@@ -693,7 +691,6 @@ describe('loggers', () => {
2,
),
duration_ms: 100,
status: 'success',
success: true,
decision: ToolCallDecision.MODIFY,
prompt_id: 'prompt-id-3',
@@ -765,7 +762,6 @@ describe('loggers', () => {
2,
),
duration_ms: 100,
status: 'success',
success: true,
prompt_id: 'prompt-id-4',
tool_type: 'native',
@@ -838,7 +834,6 @@ describe('loggers', () => {
2,
),
duration_ms: 100,
status: 'error',
success: false,
error: 'test-error',
'error.message': 'test-error',

View File

@@ -12,7 +12,6 @@ import { safeJsonStringify } from '../utils/safeJsonStringify.js';
import { UserAccountManager } from '../utils/userAccountManager.js';
import {
EVENT_API_ERROR,
EVENT_API_CANCEL,
EVENT_API_REQUEST,
EVENT_API_RESPONSE,
EVENT_CHAT_COMPRESSION,
@@ -46,7 +45,6 @@ import { QwenLogger } from './qwen-logger/qwen-logger.js';
import { isTelemetrySdkInitialized } from './sdk.js';
import type {
ApiErrorEvent,
ApiCancelEvent,
ApiRequestEvent,
ApiResponseEvent,
ChatCompressionEvent,
@@ -284,32 +282,6 @@ export function logApiError(config: Config, event: ApiErrorEvent): void {
);
}
export function logApiCancel(config: Config, event: ApiCancelEvent): void {
const uiEvent = {
...event,
'event.name': EVENT_API_CANCEL,
'event.timestamp': new Date().toISOString(),
} as UiEvent;
uiTelemetryService.addEvent(uiEvent);
QwenLogger.getInstance(config)?.logApiCancelEvent(event);
if (!isTelemetrySdkInitialized()) return;
const attributes: LogAttributes = {
...getCommonAttributes(config),
...event,
'event.name': EVENT_API_CANCEL,
'event.timestamp': new Date().toISOString(),
model_name: event.model,
};
const logger = logs.getLogger(SERVICE_NAME);
const logRecord: LogRecord = {
body: `API request cancelled for ${event.model}.`,
attributes,
};
logger.emit(logRecord);
}
export function logApiResponse(config: Config, event: ApiResponseEvent): void {
const uiEvent = {
...event,

View File

@@ -15,7 +15,6 @@ import type {
ApiRequestEvent,
ApiResponseEvent,
ApiErrorEvent,
ApiCancelEvent,
FileOperationEvent,
FlashFallbackEvent,
LoopDetectedEvent,
@@ -412,7 +411,6 @@ export class QwenLogger {
{
properties: {
prompt_id: event.prompt_id,
response_id: event.response_id,
},
snapshots: JSON.stringify({
function_name: event.function_name,
@@ -429,19 +427,6 @@ export class QwenLogger {
this.flushIfNeeded();
}
logApiCancelEvent(event: ApiCancelEvent): void {
const rumEvent = this.createActionEvent('api', 'api_cancel', {
properties: {
model: event.model,
prompt_id: event.prompt_id,
auth_type: event.auth_type,
},
});
this.enqueueLogEvent(rumEvent);
this.flushIfNeeded();
}
logFileOperationEvent(event: FileOperationEvent): void {
const rumEvent = this.createActionEvent(
'file_operation',

View File

@@ -127,13 +127,11 @@ export class ToolCallEvent implements BaseTelemetryEvent {
function_name: string;
function_args: Record<string, unknown>;
duration_ms: number;
status: 'success' | 'error' | 'cancelled';
success: boolean; // Keep for backward compatibility
success: boolean;
decision?: ToolCallDecision;
error?: string;
error_type?: string;
prompt_id: string;
response_id?: string;
tool_type: 'native' | 'mcp';
// eslint-disable-next-line @typescript-eslint/no-explicit-any
metadata?: { [key: string]: any };
@@ -144,15 +142,13 @@ export class ToolCallEvent implements BaseTelemetryEvent {
this.function_name = call.request.name;
this.function_args = call.request.args;
this.duration_ms = call.durationMs ?? 0;
this.status = call.status;
this.success = call.status === 'success'; // Keep for backward compatibility
this.success = call.status === 'success';
this.decision = call.outcome
? getDecisionFromOutcome(call.outcome)
: undefined;
this.error = call.response.error?.message;
this.error_type = call.response.errorType;
this.prompt_id = call.request.prompt_id;
this.response_id = call.request.response_id;
this.tool_type =
typeof call.tool !== 'undefined' && call.tool instanceof DiscoveredMCPTool
? 'mcp'
@@ -228,22 +224,6 @@ export class ApiErrorEvent implements BaseTelemetryEvent {
}
}
export class ApiCancelEvent implements BaseTelemetryEvent {
'event.name': 'api_cancel';
'event.timestamp': string;
model: string;
prompt_id: string;
auth_type?: string;
constructor(model: string, prompt_id: string, auth_type?: string) {
this['event.name'] = 'api_cancel';
this['event.timestamp'] = new Date().toISOString();
this.model = model;
this.prompt_id = prompt_id;
this.auth_type = auth_type;
}
}
export class ApiResponseEvent implements BaseTelemetryEvent {
'event.name': 'api_response';
'event.timestamp': string; // ISO 8601
@@ -562,7 +542,6 @@ export type TelemetryEvent =
| ToolCallEvent
| ApiRequestEvent
| ApiErrorEvent
| ApiCancelEvent
| ApiResponseEvent
| FlashFallbackEvent
| LoopDetectedEvent

View File

@@ -15,7 +15,6 @@ import {
EVENT_TOOL_CALL,
} from './constants.js';
import type {
CancelledToolCall,
CompletedToolCall,
ErroredToolCall,
SuccessfulToolCall,
@@ -26,7 +25,7 @@ import { MockTool } from '../test-utils/tools.js';
const createFakeCompletedToolCall = (
name: string,
success: boolean | 'cancelled',
success: boolean,
duration = 100,
outcome?: ToolConfirmationOutcome,
error?: Error,
@@ -40,7 +39,7 @@ const createFakeCompletedToolCall = (
};
const tool = new MockTool(name);
if (success === true) {
if (success) {
return {
status: 'success',
request,
@@ -64,30 +63,6 @@ const createFakeCompletedToolCall = (
durationMs: duration,
outcome,
} as SuccessfulToolCall;
} else if (success === 'cancelled') {
return {
status: 'cancelled',
request,
tool,
invocation: tool.build({ param: 'test' }),
response: {
callId: request.callId,
responseParts: [
{
functionResponse: {
id: request.callId,
name,
response: { error: 'Tool cancelled' },
},
},
],
error: new Error('Tool cancelled'),
errorType: ToolErrorType.UNKNOWN,
resultDisplay: 'Cancelled!',
},
durationMs: duration,
outcome,
} as CancelledToolCall;
} else {
return {
status: 'error',
@@ -436,40 +411,6 @@ describe('UiTelemetryService', () => {
});
});
it('should process a single cancelled ToolCallEvent', () => {
const toolCall = createFakeCompletedToolCall(
'test_tool',
'cancelled',
180,
ToolConfirmationOutcome.Cancel,
);
service.addEvent({
...structuredClone(new ToolCallEvent(toolCall)),
'event.name': EVENT_TOOL_CALL,
} as ToolCallEvent & { 'event.name': typeof EVENT_TOOL_CALL });
const metrics = service.getMetrics();
const { tools } = metrics;
expect(tools.totalCalls).toBe(1);
expect(tools.totalSuccess).toBe(0);
expect(tools.totalFail).toBe(1);
expect(tools.totalDurationMs).toBe(180);
expect(tools.totalDecisions[ToolCallDecision.REJECT]).toBe(1);
expect(tools.byName['test_tool']).toEqual({
count: 1,
success: 0,
fail: 1,
durationMs: 180,
decisions: {
[ToolCallDecision.ACCEPT]: 0,
[ToolCallDecision.REJECT]: 1,
[ToolCallDecision.MODIFY]: 0,
[ToolCallDecision.AUTO_ACCEPT]: 0,
},
});
});
it('should process a ToolCallEvent with modify decision', () => {
const toolCall = createFakeCompletedToolCall(
'test_tool',
@@ -696,34 +637,6 @@ describe('UiTelemetryService', () => {
expect(service.getLastPromptTokenCount()).toBe(0);
expect(spy).toHaveBeenCalledOnce();
});
it('should correctly set status field for success/error/cancelled calls', () => {
const successCall = createFakeCompletedToolCall(
'success_tool',
true,
100,
);
const errorCall = createFakeCompletedToolCall('error_tool', false, 150);
const cancelledCall = createFakeCompletedToolCall(
'cancelled_tool',
'cancelled',
200,
);
const successEvent = new ToolCallEvent(successCall);
const errorEvent = new ToolCallEvent(errorCall);
const cancelledEvent = new ToolCallEvent(cancelledCall);
// Verify status field is correctly set
expect(successEvent.status).toBe('success');
expect(errorEvent.status).toBe('error');
expect(cancelledEvent.status).toBe('cancelled');
// Verify backward compatibility with success field
expect(successEvent.success).toBe(true);
expect(errorEvent.success).toBe(false);
expect(cancelledEvent.success).toBe(false);
});
});
describe('Tool Call Event with Line Count Metadata', () => {

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code-test-utils",
"version": "0.0.14",
"version": "0.0.14-nightly.1",
"private": true,
"main": "src/index.ts",
"license": "Apache-2.0",

View File

@@ -2,7 +2,7 @@
"name": "qwen-code-vscode-ide-companion",
"displayName": "Qwen Code Companion",
"description": "Enable Qwen Code with direct access to your VS Code workspace.",
"version": "0.0.14",
"version": "0.0.14-nightly.1",
"publisher": "qwenlm",
"icon": "assets/icon.png",
"repository": {