Compare commits

..

9 Commits

Author SHA1 Message Date
mingholy.lmh
772130a487 fix: remove scripts 2025-10-17 19:37:05 +08:00
mingholy.lmh
be48c7eb4c fix: rename gemini to qwen, remove google auth hint 2025-10-17 17:00:16 +08:00
mingholy.lmh
4d773f1046 fix: lint issue 2025-10-15 22:22:54 +08:00
mingholy.lmh
0d0fd68e63 chore: re-organize labels for better triage results 2025-10-15 21:44:49 +08:00
Mingholy
40810945e0 fix: add missing trace info and cancellation events (#791)
* fix: add missing trace info and cancellation events

* fix: re-organize tool/request cancellation logging
2025-10-14 15:41:30 +08:00
Mingholy
e28255edb6 fix: token limits for qwen3-max (#724) 2025-10-14 15:40:20 +08:00
Mingholy
ae3223a317 fix: remove unavailable options (#685) 2025-10-14 15:39:48 +08:00
tanzhenxin
270dda4aa7 fix: invalid tool_calls request due to improper cancellation (#790) 2025-10-13 09:25:31 +08:00
Fan
d4fa15dd53 remove topp default value 0.0 (#785) 2025-10-09 15:41:57 +08:00
23 changed files with 629 additions and 135 deletions

View File

@@ -1,6 +1,6 @@
name: 'Bug Report'
description: 'Report a bug to help us improve Qwen Code'
labels: ['kind/bug', 'status/need-triage']
labels: ['type/bug', 'status/needs-triage']
body:
- type: 'markdown'
attributes:

View File

@@ -1,8 +1,8 @@
name: 'Feature Request'
description: 'Suggest an idea for this project'
labels:
- 'kind/enhancement'
- 'status/need-triage'
- 'type/feature-request'
- 'status/needs-triage'
body:
- type: 'markdown'
attributes:

View File

@@ -40,21 +40,13 @@ process_pr() {
fi
if [[ -z "${ISSUE_NUMBER}" ]]; then
echo " No linked issue found for PR #${PR_NUMBER}, adding status/need-issue label"
if ! gh pr edit "${PR_NUMBER}" --repo "${GITHUB_REPOSITORY}" --add-label "status/need-issue" 2>/dev/null; then
echo " ⚠️ Failed to add label (may already exist or have permission issues)"
fi
# Add PR number to the list
if [[ -z "${PRS_NEEDING_COMMENT}" ]]; then
PRS_NEEDING_COMMENT="${PR_NUMBER}"
else
PRS_NEEDING_COMMENT="${PRS_NEEDING_COMMENT},${PR_NUMBER}"
fi
echo "needs_comment=true" >> "${GITHUB_OUTPUT}"
echo " No linked issue found for PR #${PR_NUMBER} - this is acceptable for independent contributions"
# We no longer require PRs to have linked issues
# Independent valuable contributions are encouraged
else
echo "🔗 Found linked issue #${ISSUE_NUMBER}"
# Remove status/need-issue label if present
# Remove status/need-issue label if present (legacy cleanup)
if ! gh pr edit "${PR_NUMBER}" --repo "${GITHUB_REPOSITORY}" --remove-label "status/need-issue" 2>/dev/null; then
echo " status/need-issue label not present or could not be removed"
fi
@@ -99,7 +91,7 @@ process_pr() {
local LABELS_TO_REMOVE=""
for label in "${PR_LABEL_ARRAY[@]}"; do
if [[ -n "${label}" ]] && [[ " ${ISSUE_LABEL_ARRAY[*]} " != *" ${label} "* ]]; then
# Don't remove status/need-issue since we already handled it
# Don't remove status/need-issue since we already handled it (legacy cleanup)
if [[ "${label}" != "status/need-issue" ]]; then
if [[ -z "${LABELS_TO_REMOVE}" ]]; then
LABELS_TO_REMOVE="${label}"

View File

@@ -0,0 +1,201 @@
name: 'Check Issue Completeness'
on:
issues:
types:
- 'opened'
- 'edited'
permissions:
contents: 'read'
issues: 'write'
jobs:
check-issue-info:
timeout-minutes: 2
if: |-
${{ github.repository == 'QwenLM/qwen-code' && contains(github.event.issue.labels.*.name, 'type/bug') }}
runs-on: 'ubuntu-latest'
steps:
- name: 'Check for Client Information'
id: 'check_info'
env:
ISSUE_BODY: '${{ github.event.issue.body }}'
run: |-
echo "Checking issue body for required information..."
# Convert issue body to lowercase for case-insensitive matching
ISSUE_BODY_LOWER=$(echo "$ISSUE_BODY" | tr '[:upper:]' '[:lower:]')
# Initialize flags
HAS_VERSION=false
HAS_OS_INFO=false
HAS_AUTH_METHOD=false
HAS_ABOUT_OUTPUT=false
MISSING_INFO=()
# Check for /about command output by looking for its characteristic fields
# The /about output contains: CLI Version, Git Commit, Model, Sandbox, OS, Auth Method
if echo "$ISSUE_BODY_LOWER" | grep -qE 'cli version.*[0-9]+\.[0-9]+\.[0-9]+'; then
HAS_ABOUT_OUTPUT=true
HAS_VERSION=true
fi
# If full /about output is not detected, check individual components
if [ "$HAS_ABOUT_OUTPUT" = false ]; then
# Check for version information (various formats)
if echo "$ISSUE_BODY_LOWER" | grep -qE '(cli version|version|v)[[:space:]]*[0-9]+\.[0-9]+\.[0-9]+'; then
HAS_VERSION=true
fi
# Check for OS information
if echo "$ISSUE_BODY_LOWER" | grep -qE '(^os[[:space:]]|macos|windows|linux|ubuntu|debian|fedora|arch|darwin|win32|platform)'; then
HAS_OS_INFO=true
fi
# Check for Auth Method information
if echo "$ISSUE_BODY_LOWER" | grep -qE '(auth method|authentication|login|qwen-oauth|api.?config|oauth)'; then
HAS_AUTH_METHOD=true
fi
else
# If /about output is present, assume it contains OS and auth info
HAS_OS_INFO=true
HAS_AUTH_METHOD=true
fi
# Determine what's missing
if [ "$HAS_ABOUT_OUTPUT" = false ]; then
if [ "$HAS_VERSION" = false ]; then
MISSING_INFO+=("Qwen Code version")
fi
if [ "$HAS_OS_INFO" = false ]; then
MISSING_INFO+=("operating system information")
fi
if [ "$HAS_AUTH_METHOD" = false ]; then
MISSING_INFO+=("authentication/login method")
fi
# Suggest providing /about output for completeness
if [ "$HAS_VERSION" = false ] || [ "$HAS_OS_INFO" = false ] || [ "$HAS_AUTH_METHOD" = false ]; then
MISSING_INFO+=("full output of the \`/about\` command (recommended)")
fi
fi
# Set output variables
if [ ${#MISSING_INFO[@]} -eq 0 ]; then
echo "info_complete=true" >> "$GITHUB_OUTPUT"
echo "All required information is present."
else
echo "info_complete=false" >> "$GITHUB_OUTPUT"
# Join array elements with comma
MISSING_LIST=$(IFS=','; echo "${MISSING_INFO[*]}")
echo "missing_info=$MISSING_LIST" >> "$GITHUB_OUTPUT"
echo "Missing information: $MISSING_LIST"
fi
- name: 'Comment on Issue if Information is Missing'
if: |-
${{ steps.check_info.outputs.info_complete == 'false' }}
uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea' # ratchet:actions/github-script@v7
env:
MISSING_INFO: '${{ steps.check_info.outputs.missing_info }}'
with:
github-token: '${{ secrets.GITHUB_TOKEN }}'
script: |
const missingInfo = process.env.MISSING_INFO.split(',');
const missingList = missingInfo.map(item => `- ${item}`).join('\n');
const comments = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
});
const botComment = comments.data.find(comment =>
comment.user.type === 'Bot' &&
comment.body.includes('Missing Required Information')
);
const commentBody = `### ⚠️ Missing Required Information
Thank you for reporting this issue! To help us investigate and resolve this problem more effectively, we need some additional information:
${missingList}
### How to provide this information:
Please run the following command and paste the complete output:
\`\`\`bash
qwen
# Then in the interactive CLI, run:
/about
\`\`\`
The output should look like:
\`\`\`
CLI Version 0.0.14
Git Commit 9a0cb64a
Model coder-model
Sandbox no sandbox
OS darwin
Auth Method qwen-oauth
\`\`\`
Once you provide this information, we'll be able to assist you better. Thank you! 🙏`;
if (botComment) {
await github.rest.issues.updateComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: botComment.id,
body: commentBody
});
console.log('Updated existing comment about missing information.');
} else {
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: commentBody
});
console.log('Created new comment about missing information.');
}
- name: 'Add status/need-information Label'
if: |-
${{ steps.check_info.outputs.info_complete == 'false' }}
uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea' # ratchet:actions/github-script@v7
with:
github-token: '${{ secrets.GITHUB_TOKEN }}'
script: |
await github.rest.issues.addLabels({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
labels: ['status/need-information']
});
console.log('Added status/need-information label.');
- name: 'Remove status/need-information Label if Complete'
if: |-
${{ steps.check_info.outputs.info_complete == 'true' }}
uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea' # ratchet:actions/github-script@v7
continue-on-error: true
with:
github-token: '${{ secrets.GITHUB_TOKEN }}'
script: |
try {
await github.rest.issues.removeLabel({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
name: 'status/need-information'
});
console.log('Removed status/need-information label as information is now complete.');
} catch (error) {
if (error.status === 404) {
console.log('Label not found on issue, nothing to remove.');
} else {
throw error;
}
}

View File

@@ -71,20 +71,20 @@ jobs:
1. Run: `gh label list --repo ${{ github.repository }} --limit 100` to get all available labels.
2. Use shell command `echo` to check the issue title and body provided in the environment variables: "${ISSUE_TITLE}" and "${ISSUE_BODY}".
3. Ignore any existing priorities or tags on the issue. Just report your findings.
4. Select the most relevant labels from the existing labels, focusing on kind/*, area/*, sub-area/* and priority/*. For area/* and kind/* limit yourself to only the single most applicable label in each case.
4. Select the most relevant labels from the existing labels, focusing on type/*, category/*, scope/*, status/* and priority/*. For category/* and type/* limit yourself to only the single most applicable label in each case.
6. Apply the selected labels to this issue using: `gh issue edit ${{ github.event.issue.number }} --repo ${{ github.repository }} --add-label "label1,label2"`.
7. For each issue please check if CLI version is present, this is usually in the output of the /about command and will look like 0.1.5 for anything more than 6 versions older than the most recent should add the status/need-retesting label.
8. If you see that the issue doesn't look like it has sufficient information recommend the status/need-information label.
9. Use Area definitions mentioned below to help you narrow down issues.
9. Use Category and Scope definitions mentioned below to help you narrow down issues.
## Guidelines
- Only use labels that already exist in the repository
- Do not add comments or modify the issue content
- Triage only the current issue
- Identify only one area/ label
- Identify only one kind/ label
- Identify all applicable sub-area/* and priority/* labels based on the issue content. It's ok to have multiple of these
- Identify only one category/ label
- Identify only one type/ label
- Identify all applicable scope/*, status/* and priority/* labels based on the issue content. It's ok to have multiple of these
- Once you categorize the issue if it needs information bump down the priority by 1 eg.. a p0 would become a p1 a p1 would become a p2. P2 and P3 can stay as is in this scenario
- Reference all shell variables as "${VAR}" (with quotes and braces)
- Output only valid JSON format
@@ -127,45 +127,55 @@ jobs:
Things you should know:
- If users are talking about issues where the model gets downgraded from pro to flash then i want you to categorize that as a performance issue
- This product is designed to use different models eg.. using pro, downgrading to flash etc. when users report that they dont expect the model to change those would be categorized as feature requests.
Definition of Areas
area/ux:
- Issues concerning user-facing elements like command usability, interactive features, help docs, and perceived performance.
- I am seeing my screen flicker when using Gemini CLI
- I am seeing the output malformed
- Theme changes aren't taking effect
- My keyboard inputs arent' being recognzied
area/platform:
- Issues related to installation, packaging, OS compatibility (Windows, macOS, Linux), and the underlying CLI framework.
area/background: Issues related to long-running background tasks, daemons, and autonomous or proactive agent features.
area/models:
- i am not getting a response that is reasonable or expected. this can include things like
- I am calling a tool and the tool is not performing as expected.
- i am expecting a tool to be called and it is not getting called ,
- Including experience when using
- built-in tools (e.g., web search, code interpreter, read file, writefile, etc..),
- Function calling issues should be under this area
- i am getting responses from the model that are malformed.
- Issues concerning Gemini quality of response and inference,
- Issues talking about unnecessary token consumption.
- Issues talking about Model getting stuck in a loop be watchful as this could be the root cause for issues that otherwise seem like model performance issues.
- Memory compression
- unexpected responses,
- poor quality of generated code
area/tools:
- These are primarily issues related to Model Context Protocol
- These are issues that mention MCP support
- feature requests asking for support for new tools.
area/core: Issues with fundamental components like command parsing, configuration management, session state, and the main API client logic. Introducing multi-modality
area/contribution: Issues related to improving the developer contribution experience, such as CI/CD pipelines, build scripts, and test automation infrastructure.
area/authentication: Issues related to user identity, login flows, API key handling, credential storage, and access token management, unable to sign in selecting wrong authentication path etc..
area/security-privacy: Issues concerning vulnerability patching, dependency security, data sanitization, privacy controls, and preventing unauthorized data access.
area/extensibility: Issues related to the plugin system, extension APIs, or making the CLI's functionality available in other applications, github actions, ide support etc..
area/performance: Issues focused on model performance
- Issues with running out of capacity,
- 429 errors etc..
- could also pertain to latency,
- other general software performance like, memory usage, CPU consumption, and algorithmic efficiency.
- Switching models from one to the other unexpectedly.
Definition of Categories and Scopes
category/cli: Command line interface and interaction
- Issues with interactive CLI features, command parsing, keyboard shortcuts
- Related scopes: scope/commands, scope/interactive, scope/non-interactive, scope/keybindings
category/core: Core engine and logic
- Issues with fundamental components, content generation, session management
- Related scopes: scope/content-generation, scope/token-management, scope/session-management, scope/model-switching
category/ui: User interface and display
- Issues with themes, UI components, rendering, markdown display
- Related scopes: scope/themes, scope/components, scope/rendering, scope/markdown
category/authentication: Authentication and authorization
- Issues with login flows, API keys, OAuth, credential storage
- Related scopes: scope/oauth, scope/api-keys, scope/token-storage
category/tools: Tool integration and execution
- Issues with MCP, shell execution, file operations, web search, memory, git integration
- Related scopes: scope/mcp, scope/shell, scope/file-operations, scope/web-search, scope/memory, scope/git
category/configuration: Configuration management
- Issues with settings, extensions, trusted folders, sandbox configuration
- Related scopes: scope/settings, scope/extensions, scope/trusted-folders, scope/sandbox
category/integration: External integrations
- Issues with IDE integration, VSCode extension, Zed integration, GitHub Actions
- Related scopes: scope/ide, scope/vscode, scope/zed, scope/github-actions
category/platform: Platform compatibility
- Issues with installation, OS compatibility, packaging
- Related scopes: scope/installation, scope/macos, scope/windows, scope/linux, scope/packaging
category/performance: Performance and optimization
- Issues with latency, memory usage, model performance, caching
- Related scopes: scope/latency, scope/memory-usage, scope/model-performance, scope/caching
category/security: Security and privacy
- Issues with data privacy, credential security, vulnerabilities
- Related scopes: scope/data-privacy, scope/credential-security, scope/vulnerability
category/telemetry: Telemetry and analytics
- Issues with metrics collection, logging, analytics
- Related scopes: scope/metrics, scope/logging, scope/analytics
category/development: Development experience
- Issues with build system, testing, CI/CD, documentation
- Related scopes: scope/build-system, scope/testing, scope/ci-cd, scope/documentation
- name: 'Post Issue Analysis Failure Comment'
if: |-

View File

@@ -84,16 +84,16 @@ jobs:
2. Use shell command `echo` to check environment variable for issues to triage: $ISSUES_TO_TRIAGE (JSON array of issues)
3. Review the issue title, body and any comments provided in the environment variables.
4. Ignore any existing priorities or tags on the issue.
5. Select the most relevant labels from the existing labels, focusing on kind/*, area/*, sub-area/* and priority/*.
5. Select the most relevant labels from the existing labels, focusing on type/*, category/*, scope/*, status/* and priority/*.
6. Get the list of labels already on the issue using `gh issue view ISSUE_NUMBER --repo ${{ github.repository }} --json labels -t '{{range .labels}}{{.name}}{{"\n"}}{{end}}'
7. For area/* and kind/* limit yourself to only the single most applicable label in each case.
7. For category/* and type/* limit yourself to only the single most applicable label in each case.
8. Give me a single short paragraph about why you are selecting each label in the process. use the format Issue ID: , Title, Label applied:, Label removed, ovearll explanation
9. Parse the JSON array from step 2 and for EACH INDIVIDUAL issue, apply appropriate labels using separate commands:
- `gh issue edit ISSUE_NUMBER --repo ${{ github.repository }} --add-label "label1"`
- `gh issue edit ISSUE_NUMBER --repo ${{ github.repository }} --add-label "label2"`
- Continue for each label separately
- IMPORTANT: Label each issue individually, one command per issue, one label at a time if needed.
- Make sure after you apply labels there is only one area/* and one kind/* label per issue.
- Make sure after you apply labels there is only one category/* and one type/* label per issue.
- To do this look for labels found in step 6 that no longer apply remove them one at a time using
- `gh issue edit ISSUE_NUMBER --repo ${{ github.repository }} --remove-label "label-name1"`
- `gh issue edit ISSUE_NUMBER --repo ${{ github.repository }} --remove-label "label-name2"`
@@ -113,9 +113,9 @@ jobs:
- Do not add comments or modify the issue content.
- Do not remove labels titled help wanted or good first issue.
- Triage only the current issue.
- Identify only one area/ label
- Identify only one kind/ label (Do not apply kind/duplicate or kind/parent-issue)
- Identify all applicable sub-area/* and priority/* labels based on the issue content. It's ok to have multiple of these.
- Identify only one category/ label
- Identify only one type/ label (Do not apply type/duplicate or type/parent-issue)
- Identify all applicable scope/*, status/* and priority/* labels based on the issue content. It's ok to have multiple of these.
- Once you categorize the issue if it needs information bump down the priority by 1 eg.. a p0 would become a p1 a p1 would become a p2. P2 and P3 can stay as is in this scenario.
Categorization Guidelines:
P0: Critical / Blocker
@@ -157,48 +157,52 @@ jobs:
- If users are talking about issues where the model gets downgraded from pro to flash then i want you to categorize that as a performance issue
- This product is designed to use different models eg.. using pro, downgrading to flash etc.
- When users report that they dont expect the model to change those would be categorized as feature requests.
Definition of Areas
area/ux:
- Issues concerning user-facing elements like command usability, interactive features, help docs, and perceived performance.
- I am seeing my screen flicker when using Gemini CLI
- I am seeing the output malformed
- Theme changes aren't taking effect
- My keyboard inputs arent' being recognzied
area/platform:
- Issues related to installation, packaging, OS compatibility (Windows, macOS, Linux), and the underlying CLI framework.
area/background: Issues related to long-running background tasks, daemons, and autonomous or proactive agent features.
area/models:
- i am not getting a response that is reasonable or expected. this can include things like
- I am calling a tool and the tool is not performing as expected.
- i am expecting a tool to be called and it is not getting called ,
- Including experience when using
- built-in tools (e.g., web search, code interpreter, read file, writefile, etc..),
- Function calling issues should be under this area
- i am getting responses from the model that are malformed.
- Issues concerning Gemini quality of response and inference,
- Issues talking about unnecessary token consumption.
- Issues talking about Model getting stuck in a loop be watchful as this could be the root cause for issues that otherwise seem like model performance issues.
- Memory compression
- unexpected responses,
- poor quality of generated code
area/tools:
- These are primarily issues related to Model Context Protocol
- These are issues that mention MCP support
- feature requests asking for support for new tools.
area/core:
- Issues with fundamental components like command parsing, configuration management, session state, and the main API client logic. Introducing multi-modality
area/contribution:
- Issues related to improving the developer contribution experience, such as CI/CD pipelines, build scripts, and test automation infrastructure.
area/authentication:
- Issues related to user identity, login flows, API key handling, credential storage, and access token management, unable to sign in selecting wrong authentication path etc..
area/security-privacy:
- Issues concerning vulnerability patching, dependency security, data sanitization, privacy controls, and preventing unauthorized data access.
area/extensibility:
- Issues related to the plugin system, extension APIs, or making the CLI's functionality available in other applications, github actions, ide support etc..
area/performance:
- Issues focused on model performance
- Issues with running out of capacity,
- 429 errors etc..
- could also pertain to latency,
- other general software performance like, memory usage, CPU consumption, and algorithmic efficiency.
- Switching models from one to the other unexpectedly.
Definition of Categories and Scopes
category/cli: Command line interface and interaction
- Issues with interactive CLI features, command parsing, keyboard shortcuts
- Related scopes: scope/commands, scope/interactive, scope/non-interactive, scope/keybindings
category/core: Core engine and logic
- Issues with fundamental components, content generation, session management
- Related scopes: scope/content-generation, scope/token-management, scope/session-management, scope/model-switching
category/ui: User interface and display
- Issues with themes, UI components, rendering, markdown display
- Related scopes: scope/themes, scope/components, scope/rendering, scope/markdown
category/authentication: Authentication and authorization
- Issues with login flows, API keys, OAuth, credential storage
- Related scopes: scope/oauth, scope/api-keys, scope/token-storage
category/tools: Tool integration and execution
- Issues with MCP, shell execution, file operations, web search, memory, git integration
- Related scopes: scope/mcp, scope/shell, scope/file-operations, scope/web-search, scope/memory, scope/git
category/configuration: Configuration management
- Issues with settings, extensions, trusted folders, sandbox configuration
- Related scopes: scope/settings, scope/extensions, scope/trusted-folders, scope/sandbox
category/integration: External integrations
- Issues with IDE integration, VSCode extension, Zed integration, GitHub Actions
- Related scopes: scope/ide, scope/vscode, scope/zed, scope/github-actions
category/platform: Platform compatibility
- Issues with installation, OS compatibility, packaging
- Related scopes: scope/installation, scope/macos, scope/windows, scope/linux, scope/packaging
category/performance: Performance and optimization
- Issues with latency, memory usage, model performance, caching
- Related scopes: scope/latency, scope/memory-usage, scope/model-performance, scope/caching
category/security: Security and privacy
- Issues with data privacy, credential security, vulnerabilities
- Related scopes: scope/data-privacy, scope/credential-security, scope/vulnerability
category/telemetry: Telemetry and analytics
- Issues with metrics collection, logging, analytics
- Related scopes: scope/metrics, scope/logging, scope/analytics
category/development: Development experience
- Issues with build system, testing, CI/CD, documentation
- Related scopes: scope/build-system, scope/testing, scope/ci-cd, scope/documentation

11
.vscode/launch.json vendored
View File

@@ -108,6 +108,17 @@
"request": "attach",
"skipFiles": ["<node_internals>/**"],
"type": "node"
},
{
"type": "node",
"request": "launch",
"name": "Debug Current TS File",
"runtimeExecutable": "npx",
"runtimeArgs": ["tsx", "${file}"],
"skipFiles": ["<node_internals>/**"],
"cwd": "${workspaceFolder}",
"console": "integratedTerminal",
"env": {}
}
],
"inputs": [

View File

@@ -54,7 +54,11 @@ const MockedGeminiClientClass = vi.hoisted(() =>
const MockedUserPromptEvent = vi.hoisted(() =>
vi.fn().mockImplementation(() => {}),
);
const MockedApiCancelEvent = vi.hoisted(() =>
vi.fn().mockImplementation(() => {}),
);
const mockParseAndFormatApiError = vi.hoisted(() => vi.fn());
const mockLogApiCancel = vi.hoisted(() => vi.fn());
// Vision auto-switch mocks (hoisted)
const mockHandleVisionSwitch = vi.hoisted(() =>
@@ -71,7 +75,9 @@ vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
GitService: vi.fn(),
GeminiClient: MockedGeminiClientClass,
UserPromptEvent: MockedUserPromptEvent,
ApiCancelEvent: MockedApiCancelEvent,
parseAndFormatApiError: mockParseAndFormatApiError,
logApiCancel: mockLogApiCancel,
};
});

View File

@@ -31,6 +31,8 @@ import {
ConversationFinishedEvent,
ApprovalMode,
parseAndFormatApiError,
logApiCancel,
ApiCancelEvent,
} from '@qwen-code/qwen-code-core';
import { type Part, type PartListUnion, FinishReason } from '@google/genai';
import type {
@@ -223,6 +225,16 @@ export const useGeminiStream = (
turnCancelledRef.current = true;
isSubmittingQueryRef.current = false;
abortControllerRef.current?.abort();
// Log API cancellation
const prompt_id = config.getSessionId() + '########' + getPromptCount();
const cancellationEvent = new ApiCancelEvent(
config.getModel(),
prompt_id,
config.getContentGeneratorConfig()?.authType,
);
logApiCancel(config, cancellationEvent);
if (pendingHistoryItemRef.current) {
addItem(pendingHistoryItemRef.current, Date.now());
}
@@ -242,6 +254,8 @@ export const useGeminiStream = (
setPendingHistoryItem,
onCancelSubmit,
pendingHistoryItemRef,
config,
getPromptCount,
]);
useKeypress(
@@ -448,6 +462,7 @@ export const useGeminiStream = (
if (turnCancelledRef.current) {
return;
}
if (pendingHistoryItemRef.current) {
if (pendingHistoryItemRef.current.type === 'tool_group') {
const updatedTools = pendingHistoryItemRef.current.tools.map(

View File

@@ -81,22 +81,6 @@ class GeminiAgent {
): Promise<acp.InitializeResponse> {
this.clientCapabilities = args.clientCapabilities;
const authMethods = [
{
id: AuthType.LOGIN_WITH_GOOGLE,
name: 'Log in with Google',
description: null,
},
{
id: AuthType.USE_GEMINI,
name: 'Use Gemini API key',
description:
'Requires setting the `GEMINI_API_KEY` environment variable',
},
{
id: AuthType.USE_VERTEX_AI,
name: 'Vertex AI',
description: null,
},
{
id: AuthType.USE_OPENAI,
name: 'Use OpenAI API key',
@@ -365,6 +349,7 @@ class Session {
function_name: fc.name ?? '',
function_args: args,
duration_ms: durationMs,
status: 'error',
success: false,
error: error.message,
tool_type:
@@ -483,6 +468,7 @@ class Session {
function_name: fc.name,
function_args: args,
duration_ms: durationMs,
status: 'success',
success: true,
prompt_id: promptId,
tool_type:

View File

@@ -401,7 +401,7 @@ export class CoreToolScheduler {
}
}
return {
const cancelledCall = {
request: currentCall.request,
tool: toolInstance,
invocation,
@@ -426,6 +426,8 @@ export class CoreToolScheduler {
durationMs,
outcome,
} as CancelledToolCall;
return cancelledCall;
}
case 'validating':
return {

View File

@@ -161,6 +161,9 @@ describe('ContentGenerationPipeline', () => {
top_p: 0.9,
max_tokens: 1000,
}),
expect.objectContaining({
signal: undefined,
}),
);
expect(mockConverter.convertOpenAIResponseToGemini).toHaveBeenCalledWith(
mockOpenAIResponse,
@@ -238,6 +241,9 @@ describe('ContentGenerationPipeline', () => {
expect.objectContaining({
tools: mockTools,
}),
expect.objectContaining({
signal: undefined,
}),
);
});
@@ -274,6 +280,30 @@ describe('ContentGenerationPipeline', () => {
request,
);
});
it('should pass abort signal to OpenAI client when provided', async () => {
const abortController = new AbortController();
const request: GenerateContentParameters = {
model: 'test-model',
contents: [{ parts: [{ text: 'Hello' }], role: 'user' }],
config: { abortSignal: abortController.signal },
};
(mockConverter.convertGeminiRequestToOpenAI as Mock).mockReturnValue([]);
(mockConverter.convertOpenAIResponseToGemini as Mock).mockReturnValue(
new GenerateContentResponse(),
);
(mockClient.chat.completions.create as Mock).mockResolvedValue({
choices: [{ message: { content: 'response' } }],
});
await pipeline.execute(request, 'test-id');
expect(mockClient.chat.completions.create).toHaveBeenCalledWith(
expect.any(Object),
expect.objectContaining({ signal: abortController.signal }),
);
});
});
describe('executeStream', () => {
@@ -338,6 +368,9 @@ describe('ContentGenerationPipeline', () => {
stream: true,
stream_options: { include_usage: true },
}),
expect.objectContaining({
signal: undefined,
}),
);
expect(mockTelemetryService.logStreamingSuccess).toHaveBeenCalledWith(
expect.objectContaining({
@@ -470,6 +503,42 @@ describe('ContentGenerationPipeline', () => {
);
});
it('should pass abort signal to OpenAI client for streaming requests', async () => {
const abortController = new AbortController();
const request: GenerateContentParameters = {
model: 'test-model',
contents: [{ parts: [{ text: 'Hello' }], role: 'user' }],
config: { abortSignal: abortController.signal },
};
const mockStream = {
async *[Symbol.asyncIterator]() {
yield {
id: 'chunk-1',
choices: [{ delta: { content: 'Hello' }, finish_reason: 'stop' }],
};
},
};
(mockConverter.convertGeminiRequestToOpenAI as Mock).mockReturnValue([]);
(mockConverter.convertOpenAIChunkToGemini as Mock).mockReturnValue(
new GenerateContentResponse(),
);
(mockClient.chat.completions.create as Mock).mockResolvedValue(
mockStream,
);
const resultGenerator = await pipeline.executeStream(request, 'test-id');
for await (const _result of resultGenerator) {
// Consume stream
}
expect(mockClient.chat.completions.create).toHaveBeenCalledWith(
expect.any(Object),
expect.objectContaining({ signal: abortController.signal }),
);
});
it('should merge finishReason and usageMetadata from separate chunks', async () => {
// Arrange
const request: GenerateContentParameters = {
@@ -924,6 +993,9 @@ describe('ContentGenerationPipeline', () => {
top_p: 0.9, // Config parameter used since request overrides are not being applied in current implementation
max_tokens: 1000, // Config parameter used since request overrides are not being applied in current implementation
}),
expect.objectContaining({
signal: undefined,
}),
);
});
@@ -960,6 +1032,9 @@ describe('ContentGenerationPipeline', () => {
top_p: 0.9, // From config
max_tokens: 1000, // From config
}),
expect.objectContaining({
signal: undefined,
}),
);
});
@@ -1009,6 +1084,9 @@ describe('ContentGenerationPipeline', () => {
expect.objectContaining({
metadata: { promptId: userPromptId },
}),
expect.objectContaining({
signal: undefined,
}),
);
});
});

View File

@@ -48,6 +48,9 @@ export class ContentGenerationPipeline {
async (openaiRequest, context) => {
const openaiResponse = (await this.client.chat.completions.create(
openaiRequest,
{
signal: request.config?.abortSignal,
},
)) as OpenAI.Chat.ChatCompletion;
const geminiResponse =
@@ -78,6 +81,9 @@ export class ContentGenerationPipeline {
// Stage 1: Create OpenAI stream
const stream = (await this.client.chat.completions.create(
openaiRequest,
{
signal: request.config?.abortSignal,
},
)) as AsyncIterable<OpenAI.Chat.ChatCompletionChunk>;
// Stage 2: Process stream with conversion and logging
@@ -221,6 +227,12 @@ export class ContentGenerationPipeline {
mergedResponse.usageMetadata = lastResponse.usageMetadata;
}
// Copy other essential properties from the current response
mergedResponse.responseId = response.responseId;
mergedResponse.createTime = response.createTime;
mergedResponse.modelVersion = response.modelVersion;
mergedResponse.promptFeedback = response.promptFeedback;
// Update the collected responses with the merged response
collectedGeminiResponses[collectedGeminiResponses.length - 1] =
mergedResponse;

View File

@@ -115,7 +115,7 @@ const PATTERNS: Array<[RegExp, TokenCount]> = [
[/^coder-model$/, LIMITS['1m']],
// Commercial Qwen3-Max-Preview: 256K token context
[/^qwen3-max-preview(-.*)?$/, LIMITS['256k']], // catches "qwen3-max-preview" and date variants
[/^qwen3-max(-preview)?(-.*)?$/, LIMITS['256k']], // catches "qwen3-max" or "qwen3-max-preview" and date variants
// Open-source Qwen3-Coder variants: 256K native
[/^qwen3-coder-.*$/, LIMITS['256k']],
@@ -179,8 +179,8 @@ const OUTPUT_PATTERNS: Array<[RegExp, TokenCount]> = [
// Generic coder-model: same as qwen3-coder-plus (64K max output tokens)
[/^coder-model$/, LIMITS['64k']],
// Qwen3-Max-Preview: 65,536 max output tokens
[/^qwen3-max-preview(-.*)?$/, LIMITS['64k']],
// Qwen3-Max: 65,536 max output tokens
[/^qwen3-max(-preview)?(-.*)?$/, LIMITS['64k']],
// Qwen-VL-Max-Latest: 8,192 max output tokens
[/^qwen-vl-max-latest$/, LIMITS['8k']],

View File

@@ -84,6 +84,7 @@ export interface ToolCallRequestInfo {
args: Record<string, unknown>;
isClientInitiated: boolean;
prompt_id: string;
response_id?: string;
}
export interface ToolCallResponseInfo {
@@ -202,6 +203,7 @@ export class Turn {
readonly pendingToolCalls: ToolCallRequestInfo[];
private debugResponses: GenerateContentResponse[];
finishReason: FinishReason | undefined;
private currentResponseId?: string;
constructor(
private readonly chat: GeminiChat,
@@ -247,6 +249,11 @@ export class Turn {
this.debugResponses.push(resp);
// Track the current response ID for tool call correlation
if (resp.responseId) {
this.currentResponseId = resp.responseId;
}
const thoughtPart = resp.candidates?.[0]?.content?.parts?.[0];
if (thoughtPart?.thought) {
// Thought always has a bold "subject" part enclosed in double asterisks
@@ -346,6 +353,7 @@ export class Turn {
args,
isClientInitiated: false,
prompt_id: this.prompt_id,
response_id: this.currentResponseId,
};
this.pendingToolCalls.push(toolCallRequest);

View File

@@ -381,6 +381,7 @@ export class SubAgentScope {
let roundText = '';
let lastUsage: GenerateContentResponseUsageMetadata | undefined =
undefined;
let currentResponseId: string | undefined = undefined;
for await (const streamEvent of responseStream) {
if (abortController.signal.aborted) {
this.terminateMode = SubagentTerminateMode.CANCELLED;
@@ -395,6 +396,10 @@ export class SubAgentScope {
// Handle chunk events
if (streamEvent.type === 'chunk') {
const resp = streamEvent.value;
// Track the response ID for tool call correlation
if (resp.responseId) {
currentResponseId = resp.responseId;
}
if (resp.functionCalls) functionCalls.push(...resp.functionCalls);
const content = resp.candidates?.[0]?.content;
const parts = content?.parts || [];
@@ -455,6 +460,7 @@ export class SubAgentScope {
abortController,
promptId,
turnCounter,
currentResponseId,
);
} else {
// No tool calls — treat this as the model's final answer.
@@ -543,6 +549,7 @@ export class SubAgentScope {
* @param {FunctionCall[]} functionCalls - An array of `FunctionCall` objects to process.
* @param {ToolRegistry} toolRegistry - The tool registry to look up and execute tools.
* @param {AbortController} abortController - An `AbortController` to signal cancellation of tool executions.
* @param {string} responseId - Optional API response ID for correlation with tool calls.
* @returns {Promise<Content[]>} A promise that resolves to an array of `Content` parts representing the tool responses,
* which are then used to update the chat history.
*/
@@ -551,6 +558,7 @@ export class SubAgentScope {
abortController: AbortController,
promptId: string,
currentRound: number,
responseId?: string,
): Promise<Content[]> {
const toolResponseParts: Part[] = [];
@@ -704,6 +712,7 @@ export class SubAgentScope {
args,
isClientInitiated: true,
prompt_id: promptId,
response_id: responseId,
};
const description = this.getToolDescription(toolName, args);

View File

@@ -10,6 +10,7 @@ export const EVENT_USER_PROMPT = 'qwen-code.user_prompt';
export const EVENT_TOOL_CALL = 'qwen-code.tool_call';
export const EVENT_API_REQUEST = 'qwen-code.api_request';
export const EVENT_API_ERROR = 'qwen-code.api_error';
export const EVENT_API_CANCEL = 'qwen-code.api_cancel';
export const EVENT_API_RESPONSE = 'qwen-code.api_response';
export const EVENT_CLI_CONFIG = 'qwen-code.config';
export const EVENT_FLASH_FALLBACK = 'qwen-code.flash_fallback';

View File

@@ -17,6 +17,7 @@ export { SpanStatusCode, ValueType } from '@opentelemetry/api';
export { SemanticAttributes } from '@opentelemetry/semantic-conventions';
export {
logApiError,
logApiCancel,
logApiRequest,
logApiResponse,
logChatCompression,
@@ -35,6 +36,7 @@ export {
} from './sdk.js';
export {
ApiErrorEvent,
ApiCancelEvent,
ApiRequestEvent,
ApiResponseEvent,
ConversationFinishedEvent,
@@ -54,4 +56,5 @@ export type {
TelemetryEvent,
} from './types.js';
export * from './uiTelemetry.js';
export { QwenLogger } from './qwen-logger/qwen-logger.js';
export { DEFAULT_OTLP_ENDPOINT, DEFAULT_TELEMETRY_TARGET };

View File

@@ -550,6 +550,7 @@ describe('loggers', () => {
2,
),
duration_ms: 100,
status: 'success',
success: true,
decision: ToolCallDecision.ACCEPT,
prompt_id: 'prompt-id-1',
@@ -619,6 +620,7 @@ describe('loggers', () => {
2,
),
duration_ms: 100,
status: 'error',
success: false,
decision: ToolCallDecision.REJECT,
prompt_id: 'prompt-id-2',
@@ -691,6 +693,7 @@ describe('loggers', () => {
2,
),
duration_ms: 100,
status: 'success',
success: true,
decision: ToolCallDecision.MODIFY,
prompt_id: 'prompt-id-3',
@@ -762,6 +765,7 @@ describe('loggers', () => {
2,
),
duration_ms: 100,
status: 'success',
success: true,
prompt_id: 'prompt-id-4',
tool_type: 'native',
@@ -834,6 +838,7 @@ describe('loggers', () => {
2,
),
duration_ms: 100,
status: 'error',
success: false,
error: 'test-error',
'error.message': 'test-error',

View File

@@ -12,6 +12,7 @@ import { safeJsonStringify } from '../utils/safeJsonStringify.js';
import { UserAccountManager } from '../utils/userAccountManager.js';
import {
EVENT_API_ERROR,
EVENT_API_CANCEL,
EVENT_API_REQUEST,
EVENT_API_RESPONSE,
EVENT_CHAT_COMPRESSION,
@@ -45,6 +46,7 @@ import { QwenLogger } from './qwen-logger/qwen-logger.js';
import { isTelemetrySdkInitialized } from './sdk.js';
import type {
ApiErrorEvent,
ApiCancelEvent,
ApiRequestEvent,
ApiResponseEvent,
ChatCompressionEvent,
@@ -282,6 +284,32 @@ export function logApiError(config: Config, event: ApiErrorEvent): void {
);
}
export function logApiCancel(config: Config, event: ApiCancelEvent): void {
const uiEvent = {
...event,
'event.name': EVENT_API_CANCEL,
'event.timestamp': new Date().toISOString(),
} as UiEvent;
uiTelemetryService.addEvent(uiEvent);
QwenLogger.getInstance(config)?.logApiCancelEvent(event);
if (!isTelemetrySdkInitialized()) return;
const attributes: LogAttributes = {
...getCommonAttributes(config),
...event,
'event.name': EVENT_API_CANCEL,
'event.timestamp': new Date().toISOString(),
model_name: event.model,
};
const logger = logs.getLogger(SERVICE_NAME);
const logRecord: LogRecord = {
body: `API request cancelled for ${event.model}.`,
attributes,
};
logger.emit(logRecord);
}
export function logApiResponse(config: Config, event: ApiResponseEvent): void {
const uiEvent = {
...event,

View File

@@ -15,6 +15,7 @@ import type {
ApiRequestEvent,
ApiResponseEvent,
ApiErrorEvent,
ApiCancelEvent,
FileOperationEvent,
FlashFallbackEvent,
LoopDetectedEvent,
@@ -411,6 +412,7 @@ export class QwenLogger {
{
properties: {
prompt_id: event.prompt_id,
response_id: event.response_id,
},
snapshots: JSON.stringify({
function_name: event.function_name,
@@ -427,6 +429,19 @@ export class QwenLogger {
this.flushIfNeeded();
}
logApiCancelEvent(event: ApiCancelEvent): void {
const rumEvent = this.createActionEvent('api', 'api_cancel', {
properties: {
model: event.model,
prompt_id: event.prompt_id,
auth_type: event.auth_type,
},
});
this.enqueueLogEvent(rumEvent);
this.flushIfNeeded();
}
logFileOperationEvent(event: FileOperationEvent): void {
const rumEvent = this.createActionEvent(
'file_operation',

View File

@@ -127,11 +127,13 @@ export class ToolCallEvent implements BaseTelemetryEvent {
function_name: string;
function_args: Record<string, unknown>;
duration_ms: number;
success: boolean;
status: 'success' | 'error' | 'cancelled';
success: boolean; // Keep for backward compatibility
decision?: ToolCallDecision;
error?: string;
error_type?: string;
prompt_id: string;
response_id?: string;
tool_type: 'native' | 'mcp';
// eslint-disable-next-line @typescript-eslint/no-explicit-any
metadata?: { [key: string]: any };
@@ -142,13 +144,15 @@ export class ToolCallEvent implements BaseTelemetryEvent {
this.function_name = call.request.name;
this.function_args = call.request.args;
this.duration_ms = call.durationMs ?? 0;
this.success = call.status === 'success';
this.status = call.status;
this.success = call.status === 'success'; // Keep for backward compatibility
this.decision = call.outcome
? getDecisionFromOutcome(call.outcome)
: undefined;
this.error = call.response.error?.message;
this.error_type = call.response.errorType;
this.prompt_id = call.request.prompt_id;
this.response_id = call.request.response_id;
this.tool_type =
typeof call.tool !== 'undefined' && call.tool instanceof DiscoveredMCPTool
? 'mcp'
@@ -224,6 +228,22 @@ export class ApiErrorEvent implements BaseTelemetryEvent {
}
}
export class ApiCancelEvent implements BaseTelemetryEvent {
'event.name': 'api_cancel';
'event.timestamp': string;
model: string;
prompt_id: string;
auth_type?: string;
constructor(model: string, prompt_id: string, auth_type?: string) {
this['event.name'] = 'api_cancel';
this['event.timestamp'] = new Date().toISOString();
this.model = model;
this.prompt_id = prompt_id;
this.auth_type = auth_type;
}
}
export class ApiResponseEvent implements BaseTelemetryEvent {
'event.name': 'api_response';
'event.timestamp': string; // ISO 8601
@@ -542,6 +562,7 @@ export type TelemetryEvent =
| ToolCallEvent
| ApiRequestEvent
| ApiErrorEvent
| ApiCancelEvent
| ApiResponseEvent
| FlashFallbackEvent
| LoopDetectedEvent

View File

@@ -15,6 +15,7 @@ import {
EVENT_TOOL_CALL,
} from './constants.js';
import type {
CancelledToolCall,
CompletedToolCall,
ErroredToolCall,
SuccessfulToolCall,
@@ -25,7 +26,7 @@ import { MockTool } from '../test-utils/tools.js';
const createFakeCompletedToolCall = (
name: string,
success: boolean,
success: boolean | 'cancelled',
duration = 100,
outcome?: ToolConfirmationOutcome,
error?: Error,
@@ -39,7 +40,7 @@ const createFakeCompletedToolCall = (
};
const tool = new MockTool(name);
if (success) {
if (success === true) {
return {
status: 'success',
request,
@@ -63,6 +64,30 @@ const createFakeCompletedToolCall = (
durationMs: duration,
outcome,
} as SuccessfulToolCall;
} else if (success === 'cancelled') {
return {
status: 'cancelled',
request,
tool,
invocation: tool.build({ param: 'test' }),
response: {
callId: request.callId,
responseParts: [
{
functionResponse: {
id: request.callId,
name,
response: { error: 'Tool cancelled' },
},
},
],
error: new Error('Tool cancelled'),
errorType: ToolErrorType.UNKNOWN,
resultDisplay: 'Cancelled!',
},
durationMs: duration,
outcome,
} as CancelledToolCall;
} else {
return {
status: 'error',
@@ -411,6 +436,40 @@ describe('UiTelemetryService', () => {
});
});
it('should process a single cancelled ToolCallEvent', () => {
const toolCall = createFakeCompletedToolCall(
'test_tool',
'cancelled',
180,
ToolConfirmationOutcome.Cancel,
);
service.addEvent({
...structuredClone(new ToolCallEvent(toolCall)),
'event.name': EVENT_TOOL_CALL,
} as ToolCallEvent & { 'event.name': typeof EVENT_TOOL_CALL });
const metrics = service.getMetrics();
const { tools } = metrics;
expect(tools.totalCalls).toBe(1);
expect(tools.totalSuccess).toBe(0);
expect(tools.totalFail).toBe(1);
expect(tools.totalDurationMs).toBe(180);
expect(tools.totalDecisions[ToolCallDecision.REJECT]).toBe(1);
expect(tools.byName['test_tool']).toEqual({
count: 1,
success: 0,
fail: 1,
durationMs: 180,
decisions: {
[ToolCallDecision.ACCEPT]: 0,
[ToolCallDecision.REJECT]: 1,
[ToolCallDecision.MODIFY]: 0,
[ToolCallDecision.AUTO_ACCEPT]: 0,
},
});
});
it('should process a ToolCallEvent with modify decision', () => {
const toolCall = createFakeCompletedToolCall(
'test_tool',
@@ -637,6 +696,34 @@ describe('UiTelemetryService', () => {
expect(service.getLastPromptTokenCount()).toBe(0);
expect(spy).toHaveBeenCalledOnce();
});
it('should correctly set status field for success/error/cancelled calls', () => {
const successCall = createFakeCompletedToolCall(
'success_tool',
true,
100,
);
const errorCall = createFakeCompletedToolCall('error_tool', false, 150);
const cancelledCall = createFakeCompletedToolCall(
'cancelled_tool',
'cancelled',
200,
);
const successEvent = new ToolCallEvent(successCall);
const errorEvent = new ToolCallEvent(errorCall);
const cancelledEvent = new ToolCallEvent(cancelledCall);
// Verify status field is correctly set
expect(successEvent.status).toBe('success');
expect(errorEvent.status).toBe('error');
expect(cancelledEvent.status).toBe('cancelled');
// Verify backward compatibility with success field
expect(successEvent.success).toBe(true);
expect(errorEvent.success).toBe(false);
expect(cancelledEvent.success).toBe(false);
});
});
describe('Tool Call Event with Line Count Metadata', () => {