Compare commits

..

1 Commits

Author SHA1 Message Date
mingholy.lmh
510d38fe3a feat: add runtime-aware fetch options for Anthropic and OpenAI providers 2026-01-16 17:18:48 +08:00
11 changed files with 189 additions and 65 deletions

View File

@@ -275,7 +275,6 @@ If you are experiencing performance issues with file searching (e.g., with `@` c
| `tools.truncateToolOutputThreshold` | number | Truncate tool output if it is larger than this many characters. Applies to Shell, Grep, Glob, ReadFile and ReadManyFiles tools. | `25000` | Requires restart: Yes |
| `tools.truncateToolOutputLines` | number | Maximum lines or entries kept when truncating tool output. Applies to Shell, Grep, Glob, ReadFile and ReadManyFiles tools. | `1000` | Requires restart: Yes |
| `tools.autoAccept` | boolean | Controls whether the CLI automatically accepts and executes tool calls that are considered safe (e.g., read-only operations) without explicit user confirmation. If set to `true`, the CLI will bypass the confirmation prompt for tools deemed safe. | `false` | |
| `tools.experimental.skills` | boolean | Enable experimental Agent Skills feature | `false` | |
#### mcp

View File

@@ -11,29 +11,12 @@ This guide shows you how to create, use, and manage Agent Skills in **Qwen Code*
## Prerequisites
- Qwen Code (recent version)
## How to enable
### Via CLI flag
- Run with the experimental flag enabled:
```bash
qwen --experimental-skills
```
### Via settings.json
Add to your `~/.qwen/settings.json` or project's `.qwen/settings.json`:
```json
{
"tools": {
"experimental": {
"skills": true
}
}
}
```
- Basic familiarity with Qwen Code ([Quickstart](../quickstart.md))
## What are Agent Skills?

View File

@@ -334,7 +334,7 @@ export async function parseArguments(settings: Settings): Promise<CliArgs> {
.option('experimental-skills', {
type: 'boolean',
description: 'Enable experimental Skills feature',
default: settings.tools?.experimental?.skills ?? false,
default: false,
})
.option('channel', {
type: 'string',

View File

@@ -981,27 +981,6 @@ const SETTINGS_SCHEMA = {
description: 'The number of lines to keep when truncating tool output.',
showInDialog: true,
},
experimental: {
type: 'object',
label: 'Experimental',
category: 'Tools',
requiresRestart: true,
default: {},
description: 'Experimental tool features.',
showInDialog: false,
properties: {
skills: {
type: 'boolean',
label: 'Skills',
category: 'Tools',
requiresRestart: true,
default: false,
description:
'Enable experimental Agent Skills feature. When enabled, Qwen Code can use Skills from .qwen/skills/ and ~/.qwen/skills/.',
showInDialog: true,
},
},
},
},
},

View File

@@ -873,11 +873,11 @@ export default {
'Session Stats': '会话统计',
'Model Usage': '模型使用情况',
Reqs: '请求数',
'Input Tokens': '输入 token 数',
'Output Tokens': '输出 token 数',
'Input Tokens': '输入令牌',
'Output Tokens': '输出令牌',
'Savings Highlight:': '节省亮点:',
'of input tokens were served from the cache, reducing costs.':
'从缓存载入 token ,降低了成本',
'的输入令牌来自缓存,降低了成本',
'Tip: For a full token breakdown, run `/stats model`.':
'提示:要查看完整的令牌明细,请运行 `/stats model`',
'Model Stats For Nerds': '模型统计(技术细节)',

View File

@@ -8,10 +8,7 @@ import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
import * as fs from 'node:fs';
import * as path from 'node:path';
import * as os from 'node:os';
import {
updateSettingsFilePreservingFormat,
applyUpdates,
} from './commentJson.js';
import { updateSettingsFilePreservingFormat } from './commentJson.js';
describe('commentJson', () => {
let tempDir: string;
@@ -183,18 +180,3 @@ describe('commentJson', () => {
});
});
});
describe('applyUpdates', () => {
it('should apply updates correctly', () => {
const original = { a: 1, b: { c: 2 } };
const updates = { b: { c: 3 } };
const result = applyUpdates(original, updates);
expect(result).toEqual({ a: 1, b: { c: 3 } });
});
it('should apply updates correctly when empty', () => {
const original = { a: 1, b: { c: 2 } };
const updates = { b: {} };
const result = applyUpdates(original, updates);
expect(result).toEqual({ a: 1, b: {} });
});
});

View File

@@ -38,7 +38,7 @@ export function updateSettingsFilePreservingFormat(
fs.writeFileSync(filePath, updatedContent, 'utf-8');
}
export function applyUpdates(
function applyUpdates(
current: Record<string, unknown>,
updates: Record<string, unknown>,
): Record<string, unknown> {
@@ -50,7 +50,6 @@ export function applyUpdates(
typeof value === 'object' &&
value !== null &&
!Array.isArray(value) &&
Object.keys(value).length > 0 &&
typeof result[key] === 'object' &&
result[key] !== null &&
!Array.isArray(result[key])

View File

@@ -28,6 +28,7 @@ type RawMessageStreamEvent = Anthropic.RawMessageStreamEvent;
import { getDefaultTokenizer } from '../../utils/request-tokenizer/index.js';
import { safeJsonParse } from '../../utils/safeJsonParse.js';
import { AnthropicContentConverter } from './converter.js';
import { buildRuntimeFetchOptions } from '../../utils/runtimeFetchOptions.js';
type StreamingBlockState = {
type: string;
@@ -54,6 +55,9 @@ export class AnthropicContentGenerator implements ContentGenerator {
) {
const defaultHeaders = this.buildHeaders();
const baseURL = contentGeneratorConfig.baseUrl;
// Configure runtime options to ensure user-configured timeout works as expected
// bodyTimeout is always disabled (0) to let Anthropic SDK timeout control the request
const runtimeOptions = buildRuntimeFetchOptions('anthropic');
this.client = new Anthropic({
apiKey: contentGeneratorConfig.apiKey,
@@ -61,6 +65,7 @@ export class AnthropicContentGenerator implements ContentGenerator {
timeout: contentGeneratorConfig.timeout,
maxRetries: contentGeneratorConfig.maxRetries,
defaultHeaders,
...runtimeOptions,
});
this.converter = new AnthropicContentConverter(

View File

@@ -16,6 +16,7 @@ import type {
ChatCompletionContentPartWithCache,
ChatCompletionToolWithCache,
} from './types.js';
import { buildRuntimeFetchOptions } from '../../../utils/runtimeFetchOptions.js';
export class DashScopeOpenAICompatibleProvider
implements OpenAICompatibleProvider
@@ -68,12 +69,16 @@ export class DashScopeOpenAICompatibleProvider
maxRetries = DEFAULT_MAX_RETRIES,
} = this.contentGeneratorConfig;
const defaultHeaders = this.buildHeaders();
// Configure fetch options to ensure user-configured timeout works as expected
// bodyTimeout is always disabled (0) to let OpenAI SDK timeout control the request
const fetchOptions = buildRuntimeFetchOptions('openai');
return new OpenAI({
apiKey,
baseURL: baseUrl,
timeout,
maxRetries,
defaultHeaders,
...(fetchOptions ? { fetchOptions } : {}),
});
}

View File

@@ -4,6 +4,7 @@ import type { Config } from '../../../config/config.js';
import type { ContentGeneratorConfig } from '../../contentGenerator.js';
import { DEFAULT_TIMEOUT, DEFAULT_MAX_RETRIES } from '../constants.js';
import type { OpenAICompatibleProvider } from './types.js';
import { buildRuntimeFetchOptions } from '../../../utils/runtimeFetchOptions.js';
/**
* Default provider for standard OpenAI-compatible APIs
@@ -43,12 +44,16 @@ export class DefaultOpenAICompatibleProvider
maxRetries = DEFAULT_MAX_RETRIES,
} = this.contentGeneratorConfig;
const defaultHeaders = this.buildHeaders();
// Configure fetch options to ensure user-configured timeout works as expected
// bodyTimeout is always disabled (0) to let OpenAI SDK timeout control the request
const fetchOptions = buildRuntimeFetchOptions('openai');
return new OpenAI({
apiKey,
baseURL: baseUrl,
timeout,
maxRetries,
defaultHeaders,
...(fetchOptions ? { fetchOptions } : {}),
});
}

View File

@@ -0,0 +1,167 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import { EnvHttpProxyAgent } from 'undici';
/**
* JavaScript runtime type
*/
export type Runtime = 'node' | 'bun' | 'unknown';
/**
* Detect the current JavaScript runtime
*/
export function detectRuntime(): Runtime {
if (typeof process !== 'undefined' && process.versions?.['bun']) {
return 'bun';
}
if (typeof process !== 'undefined' && process.versions?.node) {
return 'node';
}
return 'unknown';
}
/**
* Runtime fetch options for OpenAI SDK
*/
export type OpenAIRuntimeFetchOptions =
| {
dispatcher?: EnvHttpProxyAgent;
timeout?: false;
}
| undefined;
/**
* Runtime fetch options for Anthropic SDK
*/
export type AnthropicRuntimeFetchOptions = {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
httpAgent?: any;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
fetch?: any;
};
/**
* SDK type identifier
*/
export type SDKType = 'openai' | 'anthropic';
/**
* Build runtime-specific fetch options for OpenAI SDK
*/
export function buildRuntimeFetchOptions(
sdkType: 'openai',
): OpenAIRuntimeFetchOptions;
/**
* Build runtime-specific fetch options for Anthropic SDK
*/
export function buildRuntimeFetchOptions(
sdkType: 'anthropic',
): AnthropicRuntimeFetchOptions;
/**
* Build runtime-specific fetch options based on the detected runtime and SDK type
* This function applies runtime-specific configurations to handle timeout differences
* across Node.js and Bun, ensuring user-configured timeout works as expected.
*
* @param sdkType - The SDK type ('openai' or 'anthropic') to determine return type
* @returns Runtime-specific options compatible with the specified SDK
*/
export function buildRuntimeFetchOptions(
sdkType: SDKType,
): OpenAIRuntimeFetchOptions | AnthropicRuntimeFetchOptions {
const runtime = detectRuntime();
// Always disable bodyTimeout (set to 0) to let SDK's timeout parameter
// control the total request time. bodyTimeout only monitors intervals between
// data chunks, not the total request time, so we disable it to ensure user-configured
// timeout works as expected for both streaming and non-streaming requests.
switch (runtime) {
case 'bun': {
if (sdkType === 'openai') {
// Bun: Disable built-in 300s timeout to let OpenAI SDK timeout control
// This ensures user-configured timeout works as expected without interference
return {
timeout: false,
};
} else {
// Bun: Use custom fetch to disable built-in 300s timeout
// This allows Anthropic SDK timeout to control the request
// Note: Bun's fetch automatically uses proxy settings from environment variables
// (HTTP_PROXY, HTTPS_PROXY, NO_PROXY), so proxy behavior is preserved
const bunFetch: typeof fetch = async (
input: RequestInfo | URL,
init?: RequestInit,
) => {
const bunFetchOptions: RequestInit = {
...init,
// @ts-expect-error - Bun-specific timeout option
timeout: false,
};
return fetch(input, bunFetchOptions);
};
return {
fetch: bunFetch,
};
}
}
case 'node': {
// Node.js: Use EnvHttpProxyAgent to configure proxy and disable bodyTimeout
// EnvHttpProxyAgent automatically reads proxy settings from environment variables
// (HTTP_PROXY, HTTPS_PROXY, NO_PROXY, etc.) to preserve proxy functionality
// bodyTimeout is always 0 (disabled) to let SDK timeout control the request
try {
const agent = new EnvHttpProxyAgent({
bodyTimeout: 0, // Disable to let SDK timeout control total request time
});
if (sdkType === 'openai') {
return {
dispatcher: agent,
};
} else {
return {
httpAgent: agent,
};
}
} catch {
// If undici is not available, return appropriate default
if (sdkType === 'openai') {
return undefined;
} else {
return {};
}
}
}
default: {
// Unknown runtime: Try to use EnvHttpProxyAgent if available
// EnvHttpProxyAgent automatically reads proxy settings from environment variables
try {
const agent = new EnvHttpProxyAgent({
bodyTimeout: 0, // Disable to let SDK timeout control total request time
});
if (sdkType === 'openai') {
return {
dispatcher: agent,
};
} else {
return {
httpAgent: agent,
};
}
} catch {
if (sdkType === 'openai') {
return undefined;
} else {
return {};
}
}
}
}
}