Compare commits

..

5 Commits

Author SHA1 Message Date
DragonnZhang
4c8414488f refactor: reorder feedback options and improve dialog feedback timestamp handling 2026-01-23 18:55:43 +08:00
DragonnZhang
6327e35a14 feat: implement persistent feedback prompting with temporary dismissal options
Add 'Fine' and 'Dismiss' options to feedback dialogs that allow temporary
dismissal without permanently closing the feedback request. Only numerical
ratings (0, 1, 2, 3) will permanently close feedback dialogs, while all
other inputs result in temporary dismissal with persistent re-prompting.

This ensures feedback collection reliability while respecting user workflow
by allowing users to temporarily dismiss prompts when busy and providing
feedback when ready.

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
2026-01-23 16:52:40 +08:00
Mingholy
829ba9c431 Merge pull request #1516 from QwenLM/mingholy/fix/runtime-timeout
feat: add runtime-aware fetch options for Anthropic and OpenAI providers
2026-01-23 14:27:50 +08:00
mingholy.lmh
4a0e55530b test: mock runtime fetch options in DashScope and Default OpenAI providers 2026-01-19 11:37:10 +08:00
mingholy.lmh
510d38fe3a feat: add runtime-aware fetch options for Anthropic and OpenAI providers 2026-01-16 17:18:48 +08:00
22 changed files with 361 additions and 85 deletions

12
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.8.0-preview.2",
"version": "0.8.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@qwen-code/qwen-code",
"version": "0.8.0-preview.2",
"version": "0.8.0",
"workspaces": [
"packages/*"
],
@@ -17343,7 +17343,7 @@
},
"packages/cli": {
"name": "@qwen-code/qwen-code",
"version": "0.8.0-preview.2",
"version": "0.8.0",
"dependencies": {
"@google/genai": "1.30.0",
"@iarna/toml": "^2.2.5",
@@ -17977,7 +17977,7 @@
},
"packages/core": {
"name": "@qwen-code/qwen-code-core",
"version": "0.8.0-preview.2",
"version": "0.8.0",
"hasInstallScript": true,
"dependencies": {
"@anthropic-ai/sdk": "^0.36.1",
@@ -21442,7 +21442,7 @@
},
"packages/test-utils": {
"name": "@qwen-code/qwen-code-test-utils",
"version": "0.8.0-preview.2",
"version": "0.8.0",
"dev": true,
"license": "Apache-2.0",
"devDependencies": {
@@ -21454,7 +21454,7 @@
},
"packages/vscode-ide-companion": {
"name": "qwen-code-vscode-ide-companion",
"version": "0.8.0-preview.2",
"version": "0.8.0",
"license": "LICENSE",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.25.1",

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.8.0-preview.2",
"version": "0.8.0",
"engines": {
"node": ">=20.0.0"
},
@@ -13,7 +13,7 @@
"url": "git+https://github.com/QwenLM/qwen-code.git"
},
"config": {
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.8.0-preview.2"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.8.0"
},
"scripts": {
"start": "cross-env node scripts/start.js",

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.8.0-preview.2",
"version": "0.8.0",
"description": "Qwen Code",
"repository": {
"type": "git",
@@ -33,7 +33,7 @@
"dist"
],
"config": {
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.8.0-preview.2"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.8.0"
},
"dependencies": {
"@google/genai": "1.30.0",

View File

@@ -298,7 +298,9 @@ export default {
'How is Qwen doing this session? (optional)':
'Wie macht sich Qwen in dieser Sitzung? (optional)',
Bad: 'Schlecht',
Fine: 'In Ordnung',
Good: 'Gut',
Dismiss: 'Ignorieren',
'Not Sure Yet': 'Noch nicht sicher',
'Any other key': 'Beliebige andere Taste',
'Disable Loading Phrases': 'Ladesprüche deaktivieren',

View File

@@ -315,7 +315,9 @@ export default {
'How is Qwen doing this session? (optional)':
'How is Qwen doing this session? (optional)',
Bad: 'Bad',
Fine: 'Fine',
Good: 'Good',
Dismiss: 'Dismiss',
'Not Sure Yet': 'Not Sure Yet',
'Any other key': 'Any other key',
'Disable Loading Phrases': 'Disable Loading Phrases',

View File

@@ -319,7 +319,9 @@ export default {
'How is Qwen doing this session? (optional)':
'Как дела у Qwen в этой сессии? (необязательно)',
Bad: 'Плохо',
Fine: 'Нормально',
Good: 'Хорошо',
Dismiss: 'Отклонить',
'Not Sure Yet': 'Пока не уверен',
'Any other key': 'Любая другая клавиша',
'Disable Loading Phrases': 'Отключить фразы при загрузке',

View File

@@ -305,7 +305,9 @@ export default {
'Enable User Feedback': '启用用户反馈',
'How is Qwen doing this session? (optional)': 'Qwen 这次表现如何?(可选)',
Bad: '不满意',
Fine: '还行',
Good: '满意',
Dismiss: '忽略',
'Not Sure Yet': '暂不评价',
'Any other key': '任意其他键',
'Disable Loading Phrases': '禁用加载短语',

View File

@@ -1326,6 +1326,7 @@ export const AppContainer = (props: AppContainerProps) => {
isFeedbackDialogOpen,
openFeedbackDialog,
closeFeedbackDialog,
temporaryCloseFeedbackDialog,
submitFeedback,
} = useFeedbackDialog({
config,
@@ -1571,6 +1572,7 @@ export const AppContainer = (props: AppContainerProps) => {
// Feedback dialog
openFeedbackDialog,
closeFeedbackDialog,
temporaryCloseFeedbackDialog,
submitFeedback,
}),
[
@@ -1611,6 +1613,7 @@ export const AppContainer = (props: AppContainerProps) => {
// Feedback dialog
openFeedbackDialog,
closeFeedbackDialog,
temporaryCloseFeedbackDialog,
submitFeedback,
],
);

View File

@@ -5,19 +5,21 @@ import { useUIActions } from './contexts/UIActionsContext.js';
import { useUIState } from './contexts/UIStateContext.js';
import { useKeypress } from './hooks/useKeypress.js';
const FEEDBACK_OPTIONS = {
export const FEEDBACK_OPTIONS = {
GOOD: 1,
BAD: 2,
NOT_SURE: 3,
FINE: 3,
DISMISS: 0,
} as const;
const FEEDBACK_OPTION_KEYS = {
[FEEDBACK_OPTIONS.GOOD]: '1',
[FEEDBACK_OPTIONS.BAD]: '2',
[FEEDBACK_OPTIONS.NOT_SURE]: 'any',
[FEEDBACK_OPTIONS.FINE]: '3',
[FEEDBACK_OPTIONS.DISMISS]: '0',
} as const;
export const FEEDBACK_DIALOG_KEYS = ['1', '2'] as const;
export const FEEDBACK_DIALOG_KEYS = ['1', '2', '3', '0'] as const;
export const FeedbackDialog: React.FC = () => {
const uiState = useUIState();
@@ -25,15 +27,19 @@ export const FeedbackDialog: React.FC = () => {
useKeypress(
(key) => {
if (key.name === FEEDBACK_OPTION_KEYS[FEEDBACK_OPTIONS.GOOD]) {
uiActions.submitFeedback(FEEDBACK_OPTIONS.GOOD);
} else if (key.name === FEEDBACK_OPTION_KEYS[FEEDBACK_OPTIONS.BAD]) {
// Handle keys 0-3: permanent close with feedback/dismiss
if (key.name === FEEDBACK_OPTION_KEYS[FEEDBACK_OPTIONS.BAD]) {
uiActions.submitFeedback(FEEDBACK_OPTIONS.BAD);
} else if (key.name === FEEDBACK_OPTION_KEYS[FEEDBACK_OPTIONS.FINE]) {
uiActions.submitFeedback(FEEDBACK_OPTIONS.FINE);
} else if (key.name === FEEDBACK_OPTION_KEYS[FEEDBACK_OPTIONS.GOOD]) {
uiActions.submitFeedback(FEEDBACK_OPTIONS.GOOD);
} else if (key.name === FEEDBACK_OPTION_KEYS[FEEDBACK_OPTIONS.DISMISS]) {
uiActions.submitFeedback(FEEDBACK_OPTIONS.DISMISS);
} else {
uiActions.submitFeedback(FEEDBACK_OPTIONS.NOT_SURE);
// Handle other keys: temporary close
uiActions.temporaryCloseFeedbackDialog();
}
uiActions.closeFeedbackDialog();
},
{ isActive: uiState.isFeedbackDialogOpen },
);
@@ -53,8 +59,16 @@ export const FeedbackDialog: React.FC = () => {
<Text color="cyan">{FEEDBACK_OPTION_KEYS[FEEDBACK_OPTIONS.BAD]}: </Text>
<Text>{t('Bad')}</Text>
<Text> </Text>
<Text color="cyan">{t('Any other key')}: </Text>
<Text>{t('Not Sure Yet')}</Text>
<Text color="cyan">
{FEEDBACK_OPTION_KEYS[FEEDBACK_OPTIONS.FINE]}:{' '}
</Text>
<Text>{t('Fine')}</Text>
<Text> </Text>
<Text color="cyan">
{FEEDBACK_OPTION_KEYS[FEEDBACK_OPTIONS.DISMISS]}:{' '}
</Text>
<Text>{t('Dismiss')}</Text>
<Text> </Text>
</Box>
</Box>
);

View File

@@ -36,6 +36,11 @@ vi.mock('../utils/clipboardUtils.js');
vi.mock('../contexts/UIStateContext.js', () => ({
useUIState: vi.fn(() => ({ isFeedbackDialogOpen: false })),
}));
vi.mock('../contexts/UIActionsContext.js', () => ({
useUIActions: vi.fn(() => ({
temporaryCloseFeedbackDialog: vi.fn(),
})),
}));
const mockSlashCommands: SlashCommand[] = [
{

View File

@@ -37,6 +37,7 @@ import * as path from 'node:path';
import { SCREEN_READER_USER_PREFIX } from '../textConstants.js';
import { useShellFocusState } from '../contexts/ShellFocusContext.js';
import { useUIState } from '../contexts/UIStateContext.js';
import { useUIActions } from '../contexts/UIActionsContext.js';
import { FEEDBACK_DIALOG_KEYS } from '../FeedbackDialog.js';
export interface InputPromptProps {
buffer: TextBuffer;
@@ -109,6 +110,7 @@ export const InputPrompt: React.FC<InputPromptProps> = ({
}) => {
const isShellFocused = useShellFocusState();
const uiState = useUIState();
const uiActions = useUIActions();
const [justNavigatedHistory, setJustNavigatedHistory] = useState(false);
const [escPressCount, setEscPressCount] = useState(0);
const [showEscapePrompt, setShowEscapePrompt] = useState(false);
@@ -337,12 +339,16 @@ export const InputPrompt: React.FC<InputPromptProps> = ({
return;
}
// Intercept feedback dialog option keys (1, 2) when dialog is open
if (
uiState.isFeedbackDialogOpen &&
(FEEDBACK_DIALOG_KEYS as readonly string[]).includes(key.name)
) {
return;
// Handle feedback dialog keyboard interactions when dialog is open
if (uiState.isFeedbackDialogOpen) {
// If it's one of the feedback option keys (1-4), let FeedbackDialog handle it
if ((FEEDBACK_DIALOG_KEYS as readonly string[]).includes(key.name)) {
return;
} else {
// For any other key, close feedback dialog temporarily and continue with normal processing
uiActions.temporaryCloseFeedbackDialog();
// Continue processing the key for normal input handling
}
}
// Reset ESC count and hide prompt on any non-ESC key
@@ -712,6 +718,7 @@ export const InputPrompt: React.FC<InputPromptProps> = ({
onToggleShortcuts,
showShortcuts,
uiState,
uiActions,
],
);

View File

@@ -71,6 +71,7 @@ export interface UIActions {
// Feedback dialog
openFeedbackDialog: () => void;
closeFeedbackDialog: () => void;
temporaryCloseFeedbackDialog: () => void;
submitFeedback: (rating: number) => void;
}

View File

@@ -15,6 +15,7 @@ import {
USER_SETTINGS_PATH,
} from '../../config/settings.js';
import type { SessionStatsState } from '../contexts/SessionContext.js';
import { FEEDBACK_OPTIONS } from '../FeedbackDialog.js';
import stripJsonComments from 'strip-json-comments';
const FEEDBACK_SHOW_PROBABILITY = 0.25; // 25% probability of showing feedback dialog
@@ -96,37 +97,48 @@ export const useFeedbackDialog = ({
}: UseFeedbackDialogProps) => {
// Feedback dialog state
const [isFeedbackDialogOpen, setIsFeedbackDialogOpen] = useState(false);
const [isFeedbackDismissedTemporarily, setIsFeedbackDismissedTemporarily] =
useState(false);
const openFeedbackDialog = useCallback(() => {
setIsFeedbackDialogOpen(true);
// Record the timestamp when feedback dialog is shown (fire and forget)
settings.setValue(
SettingScope.User,
'ui.feedbackLastShownTimestamp',
Date.now(),
);
}, [settings]);
}, []);
const closeFeedbackDialog = useCallback(
() => setIsFeedbackDialogOpen(false),
[],
);
const temporaryCloseFeedbackDialog = useCallback(() => {
setIsFeedbackDialogOpen(false);
setIsFeedbackDismissedTemporarily(true);
}, []);
const submitFeedback = useCallback(
(rating: number) => {
// Create and log the feedback event
const feedbackEvent = new UserFeedbackEvent(
sessionStats.sessionId,
rating as UserFeedbackRating,
config.getModel(),
config.getApprovalMode(),
// Only create and log feedback event for ratings 1-3 (GOOD, BAD, FINE)
// Rating 0 (DISMISS) should not trigger any telemetry
if (rating >= FEEDBACK_OPTIONS.GOOD && rating <= FEEDBACK_OPTIONS.FINE) {
const feedbackEvent = new UserFeedbackEvent(
sessionStats.sessionId,
rating as UserFeedbackRating,
config.getModel(),
config.getApprovalMode(),
);
logUserFeedback(config, feedbackEvent);
}
// Record the timestamp when feedback dialog is submitted
settings.setValue(
SettingScope.User,
'ui.feedbackLastShownTimestamp',
Date.now(),
);
logUserFeedback(config, feedbackEvent);
closeFeedbackDialog();
},
[config, sessionStats, closeFeedbackDialog],
[closeFeedbackDialog, sessionStats.sessionId, config, settings],
);
useEffect(() => {
@@ -140,13 +152,15 @@ export const useFeedbackDialog = ({
// 5. Random chance (25% probability)
// 6. Meets minimum requirements (tool calls > 10 OR user messages > 5)
// 7. Fatigue mechanism allows showing (not shown recently across sessions)
// 8. Not temporarily dismissed
if (
config.getAuthType() !== AuthType.QWEN_OAUTH ||
!config.getUsageStatisticsEnabled() ||
settings.merged.ui?.enableUserFeedback === false ||
!lastMessageIsAIResponse(history) ||
Math.random() > FEEDBACK_SHOW_PROBABILITY ||
!meetsMinimumSessionRequirements(sessionStats)
!meetsMinimumSessionRequirements(sessionStats) ||
isFeedbackDismissedTemporarily
) {
return;
}
@@ -164,15 +178,27 @@ export const useFeedbackDialog = ({
history,
sessionStats,
isFeedbackDialogOpen,
isFeedbackDismissedTemporarily,
openFeedbackDialog,
settings.merged.ui?.enableUserFeedback,
config,
]);
// Reset temporary dismissal when a new AI response starts streaming
useEffect(() => {
if (
streamingState === StreamingState.Responding &&
isFeedbackDismissedTemporarily
) {
setIsFeedbackDismissedTemporarily(false);
}
}, [streamingState, isFeedbackDismissedTemporarily]);
return {
isFeedbackDialogOpen,
openFeedbackDialog,
closeFeedbackDialog,
temporaryCloseFeedbackDialog,
submitFeedback,
};
};

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code-core",
"version": "0.8.0-preview.2",
"version": "0.8.0",
"description": "Qwen Code Core",
"repository": {
"type": "git",

View File

@@ -28,6 +28,7 @@ type RawMessageStreamEvent = Anthropic.RawMessageStreamEvent;
import { RequestTokenEstimator } from '../../utils/request-tokenizer/index.js';
import { safeJsonParse } from '../../utils/safeJsonParse.js';
import { AnthropicContentConverter } from './converter.js';
import { buildRuntimeFetchOptions } from '../../utils/runtimeFetchOptions.js';
type StreamingBlockState = {
type: string;
@@ -54,6 +55,9 @@ export class AnthropicContentGenerator implements ContentGenerator {
) {
const defaultHeaders = this.buildHeaders();
const baseURL = contentGeneratorConfig.baseUrl;
// Configure runtime options to ensure user-configured timeout works as expected
// bodyTimeout is always disabled (0) to let Anthropic SDK timeout control the request
const runtimeOptions = buildRuntimeFetchOptions('anthropic');
this.client = new Anthropic({
apiKey: contentGeneratorConfig.apiKey,
@@ -61,6 +65,7 @@ export class AnthropicContentGenerator implements ContentGenerator {
timeout: contentGeneratorConfig.timeout,
maxRetries: contentGeneratorConfig.maxRetries,
defaultHeaders,
...runtimeOptions,
});
this.converter = new AnthropicContentConverter(

View File

@@ -19,6 +19,8 @@ import type { ContentGeneratorConfig } from '../../contentGenerator.js';
import { AuthType } from '../../contentGenerator.js';
import type { ChatCompletionToolWithCache } from './types.js';
import { DEFAULT_TIMEOUT, DEFAULT_MAX_RETRIES } from '../constants.js';
import { buildRuntimeFetchOptions } from '../../../utils/runtimeFetchOptions.js';
import type { OpenAIRuntimeFetchOptions } from '../../../utils/runtimeFetchOptions.js';
// Mock OpenAI
vi.mock('openai', () => ({
@@ -32,6 +34,10 @@ vi.mock('openai', () => ({
})),
}));
vi.mock('../../../utils/runtimeFetchOptions.js', () => ({
buildRuntimeFetchOptions: vi.fn(),
}));
describe('DashScopeOpenAICompatibleProvider', () => {
let provider: DashScopeOpenAICompatibleProvider;
let mockContentGeneratorConfig: ContentGeneratorConfig;
@@ -39,6 +45,11 @@ describe('DashScopeOpenAICompatibleProvider', () => {
beforeEach(() => {
vi.clearAllMocks();
const mockedBuildRuntimeFetchOptions =
buildRuntimeFetchOptions as unknown as MockedFunction<
(sdkType: 'openai') => OpenAIRuntimeFetchOptions
>;
mockedBuildRuntimeFetchOptions.mockReturnValue(undefined);
// Mock ContentGeneratorConfig
mockContentGeneratorConfig = {
@@ -185,18 +196,20 @@ describe('DashScopeOpenAICompatibleProvider', () => {
it('should create OpenAI client with DashScope configuration', () => {
const client = provider.buildClient();
expect(OpenAI).toHaveBeenCalledWith({
apiKey: 'test-api-key',
baseURL: 'https://dashscope.aliyuncs.com/compatible-mode/v1',
timeout: 60000,
maxRetries: 2,
defaultHeaders: {
'User-Agent': `QwenCode/1.0.0 (${process.platform}; ${process.arch})`,
'X-DashScope-CacheControl': 'enable',
'X-DashScope-UserAgent': `QwenCode/1.0.0 (${process.platform}; ${process.arch})`,
'X-DashScope-AuthType': AuthType.QWEN_OAUTH,
},
});
expect(OpenAI).toHaveBeenCalledWith(
expect.objectContaining({
apiKey: 'test-api-key',
baseURL: 'https://dashscope.aliyuncs.com/compatible-mode/v1',
timeout: 60000,
maxRetries: 2,
defaultHeaders: {
'User-Agent': `QwenCode/1.0.0 (${process.platform}; ${process.arch})`,
'X-DashScope-CacheControl': 'enable',
'X-DashScope-UserAgent': `QwenCode/1.0.0 (${process.platform}; ${process.arch})`,
'X-DashScope-AuthType': AuthType.QWEN_OAUTH,
},
}),
);
expect(client).toBeDefined();
});
@@ -207,13 +220,15 @@ describe('DashScopeOpenAICompatibleProvider', () => {
provider.buildClient();
expect(OpenAI).toHaveBeenCalledWith({
apiKey: 'test-api-key',
baseURL: 'https://dashscope.aliyuncs.com/compatible-mode/v1',
timeout: DEFAULT_TIMEOUT,
maxRetries: DEFAULT_MAX_RETRIES,
defaultHeaders: expect.any(Object),
});
expect(OpenAI).toHaveBeenCalledWith(
expect.objectContaining({
apiKey: 'test-api-key',
baseURL: 'https://dashscope.aliyuncs.com/compatible-mode/v1',
timeout: DEFAULT_TIMEOUT,
maxRetries: DEFAULT_MAX_RETRIES,
defaultHeaders: expect.any(Object),
}),
);
});
});

View File

@@ -16,6 +16,7 @@ import type {
ChatCompletionContentPartWithCache,
ChatCompletionToolWithCache,
} from './types.js';
import { buildRuntimeFetchOptions } from '../../../utils/runtimeFetchOptions.js';
export class DashScopeOpenAICompatibleProvider
implements OpenAICompatibleProvider
@@ -68,12 +69,16 @@ export class DashScopeOpenAICompatibleProvider
maxRetries = DEFAULT_MAX_RETRIES,
} = this.contentGeneratorConfig;
const defaultHeaders = this.buildHeaders();
// Configure fetch options to ensure user-configured timeout works as expected
// bodyTimeout is always disabled (0) to let OpenAI SDK timeout control the request
const fetchOptions = buildRuntimeFetchOptions('openai');
return new OpenAI({
apiKey,
baseURL: baseUrl,
timeout,
maxRetries,
defaultHeaders,
...(fetchOptions ? { fetchOptions } : {}),
});
}

View File

@@ -17,6 +17,8 @@ import { DefaultOpenAICompatibleProvider } from './default.js';
import type { Config } from '../../../config/config.js';
import type { ContentGeneratorConfig } from '../../contentGenerator.js';
import { DEFAULT_TIMEOUT, DEFAULT_MAX_RETRIES } from '../constants.js';
import { buildRuntimeFetchOptions } from '../../../utils/runtimeFetchOptions.js';
import type { OpenAIRuntimeFetchOptions } from '../../../utils/runtimeFetchOptions.js';
// Mock OpenAI
vi.mock('openai', () => ({
@@ -30,6 +32,10 @@ vi.mock('openai', () => ({
})),
}));
vi.mock('../../../utils/runtimeFetchOptions.js', () => ({
buildRuntimeFetchOptions: vi.fn(),
}));
describe('DefaultOpenAICompatibleProvider', () => {
let provider: DefaultOpenAICompatibleProvider;
let mockContentGeneratorConfig: ContentGeneratorConfig;
@@ -37,6 +43,11 @@ describe('DefaultOpenAICompatibleProvider', () => {
beforeEach(() => {
vi.clearAllMocks();
const mockedBuildRuntimeFetchOptions =
buildRuntimeFetchOptions as unknown as MockedFunction<
(sdkType: 'openai') => OpenAIRuntimeFetchOptions
>;
mockedBuildRuntimeFetchOptions.mockReturnValue(undefined);
// Mock ContentGeneratorConfig
mockContentGeneratorConfig = {
@@ -112,15 +123,17 @@ describe('DefaultOpenAICompatibleProvider', () => {
it('should create OpenAI client with correct configuration', () => {
const client = provider.buildClient();
expect(OpenAI).toHaveBeenCalledWith({
apiKey: 'test-api-key',
baseURL: 'https://api.openai.com/v1',
timeout: 60000,
maxRetries: 2,
defaultHeaders: {
'User-Agent': `QwenCode/1.0.0 (${process.platform}; ${process.arch})`,
},
});
expect(OpenAI).toHaveBeenCalledWith(
expect.objectContaining({
apiKey: 'test-api-key',
baseURL: 'https://api.openai.com/v1',
timeout: 60000,
maxRetries: 2,
defaultHeaders: {
'User-Agent': `QwenCode/1.0.0 (${process.platform}; ${process.arch})`,
},
}),
);
expect(client).toBeDefined();
});
@@ -131,15 +144,17 @@ describe('DefaultOpenAICompatibleProvider', () => {
provider.buildClient();
expect(OpenAI).toHaveBeenCalledWith({
apiKey: 'test-api-key',
baseURL: 'https://api.openai.com/v1',
timeout: DEFAULT_TIMEOUT,
maxRetries: DEFAULT_MAX_RETRIES,
defaultHeaders: {
'User-Agent': `QwenCode/1.0.0 (${process.platform}; ${process.arch})`,
},
});
expect(OpenAI).toHaveBeenCalledWith(
expect.objectContaining({
apiKey: 'test-api-key',
baseURL: 'https://api.openai.com/v1',
timeout: DEFAULT_TIMEOUT,
maxRetries: DEFAULT_MAX_RETRIES,
defaultHeaders: {
'User-Agent': `QwenCode/1.0.0 (${process.platform}; ${process.arch})`,
},
}),
);
});
it('should include custom headers from buildHeaders', () => {

View File

@@ -4,6 +4,7 @@ import type { Config } from '../../../config/config.js';
import type { ContentGeneratorConfig } from '../../contentGenerator.js';
import { DEFAULT_TIMEOUT, DEFAULT_MAX_RETRIES } from '../constants.js';
import type { OpenAICompatibleProvider } from './types.js';
import { buildRuntimeFetchOptions } from '../../../utils/runtimeFetchOptions.js';
/**
* Default provider for standard OpenAI-compatible APIs
@@ -43,12 +44,16 @@ export class DefaultOpenAICompatibleProvider
maxRetries = DEFAULT_MAX_RETRIES,
} = this.contentGeneratorConfig;
const defaultHeaders = this.buildHeaders();
// Configure fetch options to ensure user-configured timeout works as expected
// bodyTimeout is always disabled (0) to let OpenAI SDK timeout control the request
const fetchOptions = buildRuntimeFetchOptions('openai');
return new OpenAI({
apiKey,
baseURL: baseUrl,
timeout,
maxRetries,
defaultHeaders,
...(fetchOptions ? { fetchOptions } : {}),
});
}

View File

@@ -0,0 +1,167 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import { EnvHttpProxyAgent } from 'undici';
/**
* JavaScript runtime type
*/
export type Runtime = 'node' | 'bun' | 'unknown';
/**
* Detect the current JavaScript runtime
*/
export function detectRuntime(): Runtime {
if (typeof process !== 'undefined' && process.versions?.['bun']) {
return 'bun';
}
if (typeof process !== 'undefined' && process.versions?.node) {
return 'node';
}
return 'unknown';
}
/**
* Runtime fetch options for OpenAI SDK
*/
export type OpenAIRuntimeFetchOptions =
| {
dispatcher?: EnvHttpProxyAgent;
timeout?: false;
}
| undefined;
/**
* Runtime fetch options for Anthropic SDK
*/
export type AnthropicRuntimeFetchOptions = {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
httpAgent?: any;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
fetch?: any;
};
/**
* SDK type identifier
*/
export type SDKType = 'openai' | 'anthropic';
/**
* Build runtime-specific fetch options for OpenAI SDK
*/
export function buildRuntimeFetchOptions(
sdkType: 'openai',
): OpenAIRuntimeFetchOptions;
/**
* Build runtime-specific fetch options for Anthropic SDK
*/
export function buildRuntimeFetchOptions(
sdkType: 'anthropic',
): AnthropicRuntimeFetchOptions;
/**
* Build runtime-specific fetch options based on the detected runtime and SDK type
* This function applies runtime-specific configurations to handle timeout differences
* across Node.js and Bun, ensuring user-configured timeout works as expected.
*
* @param sdkType - The SDK type ('openai' or 'anthropic') to determine return type
* @returns Runtime-specific options compatible with the specified SDK
*/
export function buildRuntimeFetchOptions(
sdkType: SDKType,
): OpenAIRuntimeFetchOptions | AnthropicRuntimeFetchOptions {
const runtime = detectRuntime();
// Always disable bodyTimeout (set to 0) to let SDK's timeout parameter
// control the total request time. bodyTimeout only monitors intervals between
// data chunks, not the total request time, so we disable it to ensure user-configured
// timeout works as expected for both streaming and non-streaming requests.
switch (runtime) {
case 'bun': {
if (sdkType === 'openai') {
// Bun: Disable built-in 300s timeout to let OpenAI SDK timeout control
// This ensures user-configured timeout works as expected without interference
return {
timeout: false,
};
} else {
// Bun: Use custom fetch to disable built-in 300s timeout
// This allows Anthropic SDK timeout to control the request
// Note: Bun's fetch automatically uses proxy settings from environment variables
// (HTTP_PROXY, HTTPS_PROXY, NO_PROXY), so proxy behavior is preserved
const bunFetch: typeof fetch = async (
input: RequestInfo | URL,
init?: RequestInit,
) => {
const bunFetchOptions: RequestInit = {
...init,
// @ts-expect-error - Bun-specific timeout option
timeout: false,
};
return fetch(input, bunFetchOptions);
};
return {
fetch: bunFetch,
};
}
}
case 'node': {
// Node.js: Use EnvHttpProxyAgent to configure proxy and disable bodyTimeout
// EnvHttpProxyAgent automatically reads proxy settings from environment variables
// (HTTP_PROXY, HTTPS_PROXY, NO_PROXY, etc.) to preserve proxy functionality
// bodyTimeout is always 0 (disabled) to let SDK timeout control the request
try {
const agent = new EnvHttpProxyAgent({
bodyTimeout: 0, // Disable to let SDK timeout control total request time
});
if (sdkType === 'openai') {
return {
dispatcher: agent,
};
} else {
return {
httpAgent: agent,
};
}
} catch {
// If undici is not available, return appropriate default
if (sdkType === 'openai') {
return undefined;
} else {
return {};
}
}
}
default: {
// Unknown runtime: Try to use EnvHttpProxyAgent if available
// EnvHttpProxyAgent automatically reads proxy settings from environment variables
try {
const agent = new EnvHttpProxyAgent({
bodyTimeout: 0, // Disable to let SDK timeout control total request time
});
if (sdkType === 'openai') {
return {
dispatcher: agent,
};
} else {
return {
httpAgent: agent,
};
}
} catch {
if (sdkType === 'openai') {
return undefined;
} else {
return {};
}
}
}
}
}

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code-test-utils",
"version": "0.8.0-preview.2",
"version": "0.8.0",
"private": true,
"main": "src/index.ts",
"license": "Apache-2.0",

View File

@@ -2,7 +2,7 @@
"name": "qwen-code-vscode-ide-companion",
"displayName": "Qwen Code Companion",
"description": "Enable Qwen Code with direct access to your VS Code workspace.",
"version": "0.8.0-preview.2",
"version": "0.8.0",
"publisher": "qwenlm",
"icon": "assets/icon.png",
"repository": {