Compare commits

..

1 Commits

Author SHA1 Message Date
mingholy.lmh
013dcb7b49 fix: missing tool call chunks for openai logging 2025-09-19 14:36:47 +08:00
48 changed files with 244 additions and 1784 deletions

13
.vscode/launch.json vendored
View File

@@ -101,13 +101,6 @@
"env": {
"GEMINI_SANDBOX": "false"
}
},
{
"name": "Attach by Process ID",
"processId": "${command:PickProcess}",
"request": "attach",
"skipFiles": ["<node_internals>/**"],
"type": "node"
}
],
"inputs": [
@@ -122,12 +115,6 @@
"type": "promptString",
"description": "Enter your prompt for non-interactive mode",
"default": "Explain this code"
},
{
"id": "debugPort",
"type": "promptString",
"description": "Enter the debug port number (default: 9229)",
"default": "9229"
}
]
}

View File

@@ -1,25 +1,5 @@
# Changelog
## 0.0.12
- Added vision model support for Qwen-OAuth authentication.
- Synced upstream `gemini-cli` to v0.3.4 with numerous improvements and bug fixes.
- Enhanced subagent functionality with system reminders and improved user experience.
- Added tool call type coercion for better compatibility.
- Fixed arrow key navigation issues on Windows.
- Fixed missing tool call chunks for OpenAI logging.
- Fixed system prompt issues to avoid malformed tool calls.
- Fixed terminal flicker when subagent is executing.
- Fixed duplicate subagents configuration when running in home directory.
- Fixed Esc key unable to cancel subagent dialog.
- Added confirmation prompt for `/init` command when context file exists.
- Added `skipLoopDetection` configuration option.
- Fixed `is_background` parameter reset issues.
- Enhanced Windows compatibility with multi-line paste handling.
- Improved subagent documentation and branding consistency.
- Fixed various linting errors and improved code quality.
- Miscellaneous improvements and bug fixes.
## 0.0.11
- Added subagents feature with file-based configuration system for specialized AI assistants.

View File

@@ -54,7 +54,6 @@ For detailed setup instructions, see [Authorization](#authorization).
- **Code Understanding & Editing** - Query and edit large codebases beyond traditional context window limits
- **Workflow Automation** - Automate operational tasks like handling pull requests and complex rebases
- **Enhanced Parser** - Adapted parser specifically optimized for Qwen-Coder models
- **Vision Model Support** - Automatically detect images in your input and seamlessly switch to vision-capable models for multimodal analysis
## Installation
@@ -122,58 +121,6 @@ Create or edit `.qwen/settings.json` in your home directory:
> 📝 **Note**: Session token limit applies to a single conversation, not cumulative API calls.
### Vision Model Configuration
Qwen Code includes intelligent vision model auto-switching that detects images in your input and can automatically switch to vision-capable models for multimodal analysis. **This feature is enabled by default** - when you include images in your queries, you'll see a dialog asking how you'd like to handle the vision model switch.
#### Skip the Switch Dialog (Optional)
If you don't want to see the interactive dialog each time, configure the default behavior in your `.qwen/settings.json`:
```json
{
"experimental": {
"vlmSwitchMode": "once"
}
}
```
**Available modes:**
- **`"once"`** - Switch to vision model for this query only, then revert
- **`"session"`** - Switch to vision model for the entire session
- **`"persist"`** - Continue with current model (no switching)
- **Not set** - Show interactive dialog each time (default)
#### Command Line Override
You can also set the behavior via command line:
```bash
# Switch once per query
qwen --vlm-switch-mode once
# Switch for entire session
qwen --vlm-switch-mode session
# Never switch automatically
qwen --vlm-switch-mode persist
```
#### Disable Vision Models (Optional)
To completely disable vision model support, add to your `.qwen/settings.json`:
```json
{
"experimental": {
"visionModelPreview": false
}
}
```
> 💡 **Tip**: In YOLO mode (`--yolo`), vision switching happens automatically without prompts when images are detected.
### Authorization
Choose your preferred authentication method based on your needs:

12
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.0.13",
"version": "0.0.11",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@qwen-code/qwen-code",
"version": "0.0.13",
"version": "0.0.11",
"workspaces": [
"packages/*"
],
@@ -13454,7 +13454,7 @@
},
"packages/cli": {
"name": "@qwen-code/qwen-code",
"version": "0.0.13",
"version": "0.0.11",
"dependencies": {
"@google/genai": "1.9.0",
"@iarna/toml": "^2.2.5",
@@ -13662,7 +13662,7 @@
},
"packages/core": {
"name": "@qwen-code/qwen-code-core",
"version": "0.0.13",
"version": "0.0.11",
"dependencies": {
"@google/genai": "1.13.0",
"@lvce-editor/ripgrep": "^1.6.0",
@@ -13788,7 +13788,7 @@
},
"packages/test-utils": {
"name": "@qwen-code/qwen-code-test-utils",
"version": "0.0.13",
"version": "0.0.11",
"dev": true,
"license": "Apache-2.0",
"devDependencies": {
@@ -13800,7 +13800,7 @@
},
"packages/vscode-ide-companion": {
"name": "qwen-code-vscode-ide-companion",
"version": "0.0.13",
"version": "0.0.11",
"license": "LICENSE",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.15.1",

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.0.13",
"version": "0.0.11",
"engines": {
"node": ">=20.0.0"
},
@@ -13,7 +13,7 @@
"url": "git+https://github.com/QwenLM/qwen-code.git"
},
"config": {
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.13"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.11"
},
"scripts": {
"start": "node scripts/start.js",

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.0.13",
"version": "0.0.11",
"description": "Qwen Code",
"repository": {
"type": "git",
@@ -25,7 +25,7 @@
"dist"
],
"config": {
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.13"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.11"
},
"dependencies": {
"@google/genai": "1.9.0",

View File

@@ -1514,7 +1514,7 @@ describe('loadCliConfig model selection', () => {
argv,
);
expect(config.getModel()).toBe('coder-model');
expect(config.getModel()).toBe('qwen3-coder-plus');
});
it('always prefers model from argvs', async () => {

View File

@@ -82,7 +82,6 @@ export interface CliArgs {
includeDirectories: string[] | undefined;
tavilyApiKey: string | undefined;
screenReader: boolean | undefined;
vlmSwitchMode: string | undefined;
}
export async function parseArguments(settings: Settings): Promise<CliArgs> {
@@ -250,13 +249,6 @@ export async function parseArguments(settings: Settings): Promise<CliArgs> {
description: 'Enable screen reader mode for accessibility.',
default: false,
})
.option('vlm-switch-mode', {
type: 'string',
choices: ['once', 'session', 'persist'],
description:
'Default behavior when images are detected in input. Values: once (one-time switch), session (switch for entire session), persist (continue with current model). Overrides settings files.',
default: process.env['VLM_SWITCH_MODE'],
})
.check((argv) => {
if (argv.prompt && argv['promptInteractive']) {
throw new Error(
@@ -532,9 +524,6 @@ export async function loadCliConfig(
argv.screenReader !== undefined
? argv.screenReader
: (settings.ui?.accessibility?.screenReader ?? false);
const vlmSwitchMode =
argv.vlmSwitchMode || settings.experimental?.vlmSwitchMode;
return new Config({
sessionId,
embeddingModel: DEFAULT_GEMINI_EMBEDDING_MODEL,
@@ -641,7 +630,6 @@ export async function loadCliConfig(
skipNextSpeakerCheck: settings.model?.skipNextSpeakerCheck,
enablePromptCompletion: settings.general?.enablePromptCompletion ?? false,
skipLoopDetection: settings.skipLoopDetection ?? false,
vlmSwitchMode,
});
}

View File

@@ -69,11 +69,7 @@ const MOCK_WORKSPACE_SETTINGS_PATH = pathActual.join(
);
// A more flexible type for test data that allows arbitrary properties.
type TestSettings = Settings & {
[key: string]: unknown;
nested?: { [key: string]: unknown };
nestedObj?: { [key: string]: unknown };
};
type TestSettings = Settings & { [key: string]: unknown };
vi.mock('fs', async (importOriginal) => {
// Get all the functions from the real 'fs' module
@@ -141,9 +137,6 @@ describe('Settings Loading and Merging', () => {
advanced: {
excludedEnvVars: [],
},
experimental: {},
contentGenerator: {},
systemPromptMappings: {},
extensions: {
disabled: [],
workspacesWithMigrationNudge: [],
@@ -204,9 +197,6 @@ describe('Settings Loading and Merging', () => {
advanced: {
excludedEnvVars: [],
},
experimental: {},
contentGenerator: {},
systemPromptMappings: {},
extensions: {
disabled: [],
workspacesWithMigrationNudge: [],
@@ -270,9 +260,6 @@ describe('Settings Loading and Merging', () => {
advanced: {
excludedEnvVars: [],
},
experimental: {},
contentGenerator: {},
systemPromptMappings: {},
extensions: {
disabled: [],
workspacesWithMigrationNudge: [],
@@ -333,9 +320,6 @@ describe('Settings Loading and Merging', () => {
advanced: {
excludedEnvVars: [],
},
experimental: {},
contentGenerator: {},
systemPromptMappings: {},
extensions: {
disabled: [],
workspacesWithMigrationNudge: [],
@@ -401,9 +385,6 @@ describe('Settings Loading and Merging', () => {
advanced: {
excludedEnvVars: [],
},
experimental: {},
contentGenerator: {},
systemPromptMappings: {},
extensions: {
disabled: [],
workspacesWithMigrationNudge: [],
@@ -496,9 +477,6 @@ describe('Settings Loading and Merging', () => {
advanced: {
excludedEnvVars: [],
},
experimental: {},
contentGenerator: {},
systemPromptMappings: {},
extensions: {
disabled: [],
workspacesWithMigrationNudge: [],
@@ -584,9 +562,6 @@ describe('Settings Loading and Merging', () => {
advanced: {
excludedEnvVars: [],
},
experimental: {},
contentGenerator: {},
systemPromptMappings: {},
extensions: {
disabled: [],
workspacesWithMigrationNudge: [],
@@ -716,9 +691,6 @@ describe('Settings Loading and Merging', () => {
'/system/dir',
],
},
experimental: {},
contentGenerator: {},
systemPromptMappings: {},
extensions: {
disabled: [],
workspacesWithMigrationNudge: [],
@@ -1459,9 +1431,6 @@ describe('Settings Loading and Merging', () => {
advanced: {
excludedEnvVars: [],
},
experimental: {},
contentGenerator: {},
systemPromptMappings: {},
extensions: {
disabled: [],
workspacesWithMigrationNudge: [],
@@ -1547,11 +1516,7 @@ describe('Settings Loading and Merging', () => {
'workspace_endpoint_from_env/api',
);
expect(
(
(settings.workspace.settings as TestSettings).nested as {
[key: string]: unknown;
}
)['value'],
(settings.workspace.settings as TestSettings)['nested']['value'],
).toBe('workspace_endpoint_from_env');
expect((settings.merged as TestSettings)['endpoint']).toBe(
'workspace_endpoint_from_env/api',
@@ -1801,39 +1766,19 @@ describe('Settings Loading and Merging', () => {
).toBeUndefined();
expect(
(
(settings.user.settings as TestSettings).nestedObj as {
[key: string]: unknown;
}
)['nestedNull'],
(settings.user.settings as TestSettings)['nestedObj']['nestedNull'],
).toBeNull();
expect(
(
(settings.user.settings as TestSettings).nestedObj as {
[key: string]: unknown;
}
)['nestedBool'],
(settings.user.settings as TestSettings)['nestedObj']['nestedBool'],
).toBe(true);
expect(
(
(settings.user.settings as TestSettings).nestedObj as {
[key: string]: unknown;
}
)['nestedNum'],
(settings.user.settings as TestSettings)['nestedObj']['nestedNum'],
).toBe(0);
expect(
(
(settings.user.settings as TestSettings).nestedObj as {
[key: string]: unknown;
}
)['nestedString'],
(settings.user.settings as TestSettings)['nestedObj']['nestedString'],
).toBe('literal');
expect(
(
(settings.user.settings as TestSettings).nestedObj as {
[key: string]: unknown;
}
)['anotherEnv'],
(settings.user.settings as TestSettings)['nestedObj']['anotherEnv'],
).toBe('env_string_nested_value');
delete process.env['MY_ENV_STRING'];
@@ -1919,9 +1864,6 @@ describe('Settings Loading and Merging', () => {
advanced: {
excludedEnvVars: [],
},
experimental: {},
contentGenerator: {},
systemPromptMappings: {},
extensions: {
disabled: [],
workspacesWithMigrationNudge: [],
@@ -2394,14 +2336,14 @@ describe('Settings Loading and Merging', () => {
vimMode: false,
},
model: {
maxSessionTurns: -1,
maxSessionTurns: 0,
},
context: {
includeDirectories: [],
},
security: {
folderTrust: {
enabled: false,
enabled: null,
},
},
};
@@ -2410,9 +2352,9 @@ describe('Settings Loading and Merging', () => {
expect(v1Settings).toEqual({
vimMode: false,
maxSessionTurns: -1,
maxSessionTurns: 0,
includeDirectories: [],
folderTrust: false,
folderTrust: null,
});
});

View File

@@ -396,24 +396,6 @@ function mergeSettings(
]),
],
},
experimental: {
...(systemDefaults.experimental || {}),
...(user.experimental || {}),
...(safeWorkspaceWithoutFolderTrust.experimental || {}),
...(system.experimental || {}),
},
contentGenerator: {
...(systemDefaults.contentGenerator || {}),
...(user.contentGenerator || {}),
...(safeWorkspaceWithoutFolderTrust.contentGenerator || {}),
...(system.contentGenerator || {}),
},
systemPromptMappings: {
...(systemDefaults.systemPromptMappings || {}),
...(user.systemPromptMappings || {}),
...(safeWorkspaceWithoutFolderTrust.systemPromptMappings || {}),
...(system.systemPromptMappings || {}),
},
extensions: {
...(systemDefaults.extensions || {}),
...(user.extensions || {}),

View File

@@ -746,21 +746,11 @@ export const SETTINGS_SCHEMA = {
label: 'Vision Model Preview',
category: 'Experimental',
requiresRestart: false,
default: true,
default: false,
description:
'Enable vision model support and auto-switching functionality. When disabled, vision models like qwen-vl-max-latest will be hidden and auto-switching will not occur.',
showInDialog: true,
},
vlmSwitchMode: {
type: 'string',
label: 'VLM Switch Mode',
category: 'Experimental',
requiresRestart: false,
default: undefined as string | undefined,
description:
'Default behavior when images are detected in input. Values: once (one-time switch), session (switch for entire session), persist (continue with current model). If not set, user will be prompted each time. This is a temporary experimental feature.',
showInDialog: false,
},
},
},

View File

@@ -566,9 +566,7 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
}
// Switch model for future use but return false to stop current retry
config.setModel(fallbackModel).catch((error) => {
console.error('Failed to switch to fallback model:', error);
});
config.setModel(fallbackModel);
config.setFallbackMode(true);
logFlashFallback(
config,
@@ -652,28 +650,17 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
}, []);
const handleModelSelect = useCallback(
async (modelId: string) => {
try {
await config.setModel(modelId);
setCurrentModel(modelId);
setIsModelSelectionDialogOpen(false);
addItem(
{
type: MessageType.INFO,
text: `Switched model to \`${modelId}\` for this session.`,
},
Date.now(),
);
} catch (error) {
console.error('Failed to switch model:', error);
addItem(
{
type: MessageType.ERROR,
text: `Failed to switch to model \`${modelId}\`. Please try again.`,
},
Date.now(),
);
}
(modelId: string) => {
config.setModel(modelId);
setCurrentModel(modelId);
setIsModelSelectionDialogOpen(false);
addItem(
{
type: MessageType.INFO,
text: `Switched model to \`${modelId}\` for this session.`,
},
Date.now(),
);
},
[config, setCurrentModel, addItem],
);
@@ -683,7 +670,7 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
if (!contentGeneratorConfig) return [];
const visionModelPreviewEnabled =
settings.merged.experimental?.visionModelPreview ?? true;
settings.merged.experimental?.visionModelPreview ?? false;
switch (contentGeneratorConfig.authType) {
case AuthType.QWEN_OAUTH:
@@ -772,7 +759,7 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
setModelSwitchedFromQuotaError,
refreshStatic,
() => cancelHandlerRef.current(),
settings.merged.experimental?.visionModelPreview ?? true,
settings.merged.experimental?.visionModelPreview ?? false,
handleVisionSwitchRequired,
);

View File

@@ -46,8 +46,8 @@ describe('ModelSwitchDialog', () => {
value: VisionSwitchOutcome.SwitchSessionToVL,
},
{
label: 'Continue with current model',
value: VisionSwitchOutcome.ContinueWithCurrentModel,
label: 'Do not switch, show guidance',
value: VisionSwitchOutcome.DisallowWithGuidance,
},
];
@@ -81,18 +81,18 @@ describe('ModelSwitchDialog', () => {
);
});
it('should call onSelect with ContinueWithCurrentModel when third option is selected', () => {
it('should call onSelect with DisallowWithGuidance when third option is selected', () => {
render(<ModelSwitchDialog onSelect={mockOnSelect} />);
const onSelectCallback = mockRadioButtonSelect.mock.calls[0][0].onSelect;
onSelectCallback(VisionSwitchOutcome.ContinueWithCurrentModel);
onSelectCallback(VisionSwitchOutcome.DisallowWithGuidance);
expect(mockOnSelect).toHaveBeenCalledWith(
VisionSwitchOutcome.ContinueWithCurrentModel,
VisionSwitchOutcome.DisallowWithGuidance,
);
});
it('should setup escape key handler to call onSelect with ContinueWithCurrentModel', () => {
it('should setup escape key handler to call onSelect with DisallowWithGuidance', () => {
render(<ModelSwitchDialog onSelect={mockOnSelect} />);
expect(mockUseKeypress).toHaveBeenCalledWith(expect.any(Function), {
@@ -104,7 +104,7 @@ describe('ModelSwitchDialog', () => {
keypressHandler({ name: 'escape' });
expect(mockOnSelect).toHaveBeenCalledWith(
VisionSwitchOutcome.ContinueWithCurrentModel,
VisionSwitchOutcome.DisallowWithGuidance,
);
});
@@ -126,9 +126,13 @@ describe('ModelSwitchDialog', () => {
describe('VisionSwitchOutcome enum', () => {
it('should have correct enum values', () => {
expect(VisionSwitchOutcome.SwitchOnce).toBe('once');
expect(VisionSwitchOutcome.SwitchSessionToVL).toBe('session');
expect(VisionSwitchOutcome.ContinueWithCurrentModel).toBe('persist');
expect(VisionSwitchOutcome.SwitchOnce).toBe('switch_once');
expect(VisionSwitchOutcome.SwitchSessionToVL).toBe(
'switch_session_to_vl',
);
expect(VisionSwitchOutcome.DisallowWithGuidance).toBe(
'disallow_with_guidance',
);
});
});
@@ -140,7 +144,7 @@ describe('ModelSwitchDialog', () => {
// Call multiple times
onSelectCallback(VisionSwitchOutcome.SwitchOnce);
onSelectCallback(VisionSwitchOutcome.SwitchSessionToVL);
onSelectCallback(VisionSwitchOutcome.ContinueWithCurrentModel);
onSelectCallback(VisionSwitchOutcome.DisallowWithGuidance);
expect(mockOnSelect).toHaveBeenCalledTimes(3);
expect(mockOnSelect).toHaveBeenNthCalledWith(
@@ -153,7 +157,7 @@ describe('ModelSwitchDialog', () => {
);
expect(mockOnSelect).toHaveBeenNthCalledWith(
3,
VisionSwitchOutcome.ContinueWithCurrentModel,
VisionSwitchOutcome.DisallowWithGuidance,
);
});
@@ -175,7 +179,7 @@ describe('ModelSwitchDialog', () => {
expect(mockOnSelect).toHaveBeenCalledTimes(2);
expect(mockOnSelect).toHaveBeenCalledWith(
VisionSwitchOutcome.ContinueWithCurrentModel,
VisionSwitchOutcome.DisallowWithGuidance,
);
});
});

View File

@@ -14,9 +14,9 @@ import {
import { useKeypress } from '../hooks/useKeypress.js';
export enum VisionSwitchOutcome {
SwitchOnce = 'once',
SwitchSessionToVL = 'session',
ContinueWithCurrentModel = 'persist',
SwitchOnce = 'switch_once',
SwitchSessionToVL = 'switch_session_to_vl',
DisallowWithGuidance = 'disallow_with_guidance',
}
export interface ModelSwitchDialogProps {
@@ -29,7 +29,7 @@ export const ModelSwitchDialog: React.FC<ModelSwitchDialogProps> = ({
useKeypress(
(key) => {
if (key.name === 'escape') {
onSelect(VisionSwitchOutcome.ContinueWithCurrentModel);
onSelect(VisionSwitchOutcome.DisallowWithGuidance);
}
},
{ isActive: true },
@@ -45,8 +45,8 @@ export const ModelSwitchDialog: React.FC<ModelSwitchDialogProps> = ({
value: VisionSwitchOutcome.SwitchSessionToVL,
},
{
label: 'Continue with current model',
value: VisionSwitchOutcome.ContinueWithCurrentModel,
label: 'Do not switch, show guidance',
value: VisionSwitchOutcome.DisallowWithGuidance,
},
];

View File

@@ -526,7 +526,7 @@ describe('KeypressContext - Kitty Protocol', () => {
});
await waitFor(() => {
expect(keyHandler).toHaveBeenCalledTimes(6); // 1 paste event + 5 individual chars for 'after'
expect(keyHandler).toHaveBeenCalledTimes(2); // 1 paste event + 1 paste event for 'after'
});
// Should emit paste event first
@@ -538,40 +538,12 @@ describe('KeypressContext - Kitty Protocol', () => {
}),
);
// Then process 'after' as individual characters (since it doesn't contain return)
// Then process 'after' as a paste event (since it's > 2 chars)
expect(keyHandler).toHaveBeenNthCalledWith(
2,
expect.objectContaining({
name: 'a',
paste: false,
}),
);
expect(keyHandler).toHaveBeenNthCalledWith(
3,
expect.objectContaining({
name: 'f',
paste: false,
}),
);
expect(keyHandler).toHaveBeenNthCalledWith(
4,
expect.objectContaining({
name: 't',
paste: false,
}),
);
expect(keyHandler).toHaveBeenNthCalledWith(
5,
expect.objectContaining({
name: 'e',
paste: false,
}),
);
expect(keyHandler).toHaveBeenNthCalledWith(
6,
expect.objectContaining({
name: 'r',
paste: false,
paste: true,
sequence: 'after',
}),
);
});
@@ -599,7 +571,7 @@ describe('KeypressContext - Kitty Protocol', () => {
});
await waitFor(() => {
expect(keyHandler).toHaveBeenCalledTimes(16); // 5 + 1 + 6 + 1 + 3 = 16 calls
expect(keyHandler).toHaveBeenCalledTimes(14); // Adjusted based on actual behavior
});
// Check the sequence: 'start' (5 chars) + paste1 + 'middle' (6 chars) + paste2 + 'end' (3 chars as paste)
@@ -671,18 +643,13 @@ describe('KeypressContext - Kitty Protocol', () => {
}),
);
// 'end' as individual characters (since it doesn't contain return)
// 'end' as paste event (since it's > 2 chars)
expect(keyHandler).toHaveBeenNthCalledWith(
callIndex++,
expect.objectContaining({ name: 'e' }),
);
expect(keyHandler).toHaveBeenNthCalledWith(
callIndex++,
expect.objectContaining({ name: 'n' }),
);
expect(keyHandler).toHaveBeenNthCalledWith(
callIndex++,
expect.objectContaining({ name: 'd' }),
expect.objectContaining({
paste: true,
sequence: 'end',
}),
);
});
@@ -771,18 +738,16 @@ describe('KeypressContext - Kitty Protocol', () => {
});
await waitFor(() => {
// With the current implementation, fragmented paste markers get reconstructed
// into a single paste event for 'content'
expect(keyHandler).toHaveBeenCalledTimes(1);
// With the current implementation, fragmented data gets processed differently
// The first fragment '\x1b[20' gets processed as individual characters
// The second fragment '0~content\x1b[2' gets processed as paste + individual chars
// The third fragment '01~' gets processed as individual characters
expect(keyHandler).toHaveBeenCalled();
});
// Should reconstruct the fragmented paste markers into a single paste event
expect(keyHandler).toHaveBeenCalledWith(
expect.objectContaining({
paste: true,
sequence: 'content',
}),
);
// The current implementation processes fragmented paste markers as separate events
// rather than reconstructing them into a single paste event
expect(keyHandler.mock.calls.length).toBeGreaterThan(1);
});
});
@@ -886,47 +851,28 @@ describe('KeypressContext - Kitty Protocol', () => {
stdin.emit('data', Buffer.from('lo'));
});
// With the current implementation, data is processed as individual characters
// since 'hel' doesn't contain return (0x0d)
// With the current implementation, data is processed as it arrives
// First chunk 'hel' is treated as paste (multi-character)
expect(keyHandler).toHaveBeenNthCalledWith(
1,
expect.objectContaining({
name: 'h',
sequence: 'h',
paste: false,
paste: true,
sequence: 'hel',
}),
);
// Second chunk 'lo' is processed as individual characters
expect(keyHandler).toHaveBeenNthCalledWith(
2,
expect.objectContaining({
name: 'e',
sequence: 'e',
name: 'l',
sequence: 'l',
paste: false,
}),
);
expect(keyHandler).toHaveBeenNthCalledWith(
3,
expect.objectContaining({
name: 'l',
sequence: 'l',
paste: false,
}),
);
// Second chunk 'lo' is also processed as individual characters
expect(keyHandler).toHaveBeenNthCalledWith(
4,
expect.objectContaining({
name: 'l',
sequence: 'l',
paste: false,
}),
);
expect(keyHandler).toHaveBeenNthCalledWith(
5,
expect.objectContaining({
name: 'o',
sequence: 'o',
@@ -934,7 +880,7 @@ describe('KeypressContext - Kitty Protocol', () => {
}),
);
expect(keyHandler).toHaveBeenCalledTimes(5);
expect(keyHandler).toHaveBeenCalledTimes(3);
} finally {
vi.useRealTimers();
}
@@ -961,20 +907,14 @@ describe('KeypressContext - Kitty Protocol', () => {
});
// Should flush immediately without waiting for timeout
// Large data without return gets treated as individual characters
expect(keyHandler).toHaveBeenCalledTimes(65);
// Each character should be processed individually
for (let i = 0; i < 65; i++) {
expect(keyHandler).toHaveBeenNthCalledWith(
i + 1,
expect.objectContaining({
name: 'x',
sequence: 'x',
paste: false,
}),
);
}
// Large data gets treated as paste event
expect(keyHandler).toHaveBeenCalledTimes(1);
expect(keyHandler).toHaveBeenCalledWith(
expect.objectContaining({
paste: true,
sequence: largeData,
}),
);
// Advancing timer should not cause additional calls
const callCountBefore = keyHandler.mock.calls.length;

View File

@@ -407,11 +407,7 @@ export function KeypressProvider({
return;
}
if (
(rawDataBuffer.length <= 2 && rawDataBuffer.includes(0x0d)) ||
!rawDataBuffer.includes(0x0d) ||
isPaste
) {
if (rawDataBuffer.length <= 2 || isPaste) {
keypressStream.write(rawDataBuffer);
} else {
// Flush raw data buffer as a paste event

View File

@@ -60,9 +60,7 @@ const mockParseAndFormatApiError = vi.hoisted(() => vi.fn());
const mockHandleVisionSwitch = vi.hoisted(() =>
vi.fn().mockResolvedValue({ shouldProceed: true }),
);
const mockRestoreOriginalModel = vi.hoisted(() =>
vi.fn().mockResolvedValue(undefined),
);
const mockRestoreOriginalModel = vi.hoisted(() => vi.fn());
vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
const actualCoreModule = (await importOriginal()) as any;
@@ -303,8 +301,6 @@ describe('useGeminiStream', () => {
() => {},
() => {},
() => {},
false, // visionModelPreviewEnabled
undefined, // onVisionSwitchRequired (optional)
);
},
{
@@ -466,8 +462,6 @@ describe('useGeminiStream', () => {
() => {},
() => {},
() => {},
false, // visionModelPreviewEnabled
undefined, // onVisionSwitchRequired (optional)
),
);
@@ -547,8 +541,6 @@ describe('useGeminiStream', () => {
() => {},
() => {},
() => {},
false, // visionModelPreviewEnabled
undefined, // onVisionSwitchRequired (optional)
),
);
@@ -657,8 +649,6 @@ describe('useGeminiStream', () => {
() => {},
() => {},
() => {},
false, // visionModelPreviewEnabled
undefined, // onVisionSwitchRequired (optional)
),
);
@@ -768,8 +758,6 @@ describe('useGeminiStream', () => {
() => {},
() => {},
() => {},
false, // visionModelPreviewEnabled
undefined, // onVisionSwitchRequired (optional)
),
);
@@ -899,8 +887,6 @@ describe('useGeminiStream', () => {
() => {},
() => {},
cancelSubmitSpy,
false, // visionModelPreviewEnabled
undefined, // onVisionSwitchRequired (optional)
),
);
@@ -1212,8 +1198,6 @@ describe('useGeminiStream', () => {
() => {},
() => {},
() => {},
false, // visionModelPreviewEnabled
undefined, // onVisionSwitchRequired (optional)
),
);
@@ -1267,8 +1251,6 @@ describe('useGeminiStream', () => {
() => {},
() => {},
() => {},
false, // visionModelPreviewEnabled
undefined, // onVisionSwitchRequired (optional)
),
);
@@ -1319,8 +1301,6 @@ describe('useGeminiStream', () => {
() => {},
() => {},
() => {},
false, // visionModelPreviewEnabled
undefined, // onVisionSwitchRequired (optional)
),
);
@@ -1369,8 +1349,6 @@ describe('useGeminiStream', () => {
() => {},
() => {},
() => {},
false, // visionModelPreviewEnabled
undefined, // onVisionSwitchRequired (optional)
),
);
@@ -1420,8 +1398,6 @@ describe('useGeminiStream', () => {
() => {},
() => {},
() => {},
false, // visionModelPreviewEnabled
undefined, // onVisionSwitchRequired (optional)
),
);
@@ -1511,8 +1487,6 @@ describe('useGeminiStream', () => {
() => {},
() => {},
() => {},
false, // visionModelPreviewEnabled
undefined, // onVisionSwitchRequired (optional)
),
);
@@ -1563,8 +1537,6 @@ describe('useGeminiStream', () => {
vi.fn(), // setModelSwitched
vi.fn(), // onEditorClose
vi.fn(), // onCancelSubmit
false, // visionModelPreviewEnabled
undefined, // onVisionSwitchRequired (optional)
),
);
@@ -1630,8 +1602,6 @@ describe('useGeminiStream', () => {
() => {},
() => {},
() => {},
false, // visionModelPreviewEnabled
undefined, // onVisionSwitchRequired (optional)
),
);
@@ -1710,8 +1680,6 @@ describe('useGeminiStream', () => {
() => {},
() => {},
() => {},
false, // visionModelPreviewEnabled
undefined, // onVisionSwitchRequired (optional)
),
);
@@ -1766,8 +1734,6 @@ describe('useGeminiStream', () => {
() => {},
() => {},
() => {},
false, // visionModelPreviewEnabled
undefined, // onVisionSwitchRequired (optional)
),
);
@@ -1977,8 +1943,6 @@ describe('useGeminiStream', () => {
() => {},
() => {},
() => {},
false, // visionModelPreviewEnabled
undefined, // onVisionSwitchRequired (optional)
),
);
@@ -2011,8 +1975,6 @@ describe('useGeminiStream', () => {
() => {},
() => {},
() => {},
false, // visionModelPreviewEnabled
undefined, // onVisionSwitchRequired (optional)
),
);
@@ -2066,8 +2028,6 @@ describe('useGeminiStream', () => {
() => {},
() => {},
() => {},
false, // visionModelPreviewEnabled
undefined, // onVisionSwitchRequired (optional)
),
);
@@ -2105,8 +2065,6 @@ describe('useGeminiStream', () => {
() => {},
() => {},
() => {},
false, // visionModelPreviewEnabled
undefined, // onVisionSwitchRequired (optional)
),
);

View File

@@ -89,7 +89,7 @@ export const useGeminiStream = (
setModelSwitchedFromQuotaError: React.Dispatch<React.SetStateAction<boolean>>,
onEditorClose: () => void,
onCancelSubmit: () => void,
visionModelPreviewEnabled: boolean,
visionModelPreviewEnabled: boolean = false,
onVisionSwitchRequired?: (query: PartListUnion) => Promise<{
modelOverride?: string;
persistSessionModel?: string;
@@ -765,9 +765,7 @@ export const useGeminiStream = (
if (processingStatus === StreamProcessingStatus.UserCancelled) {
// Restore original model if it was temporarily overridden
restoreOriginalModel().catch((error) => {
console.error('Failed to restore original model:', error);
});
restoreOriginalModel();
isSubmittingQueryRef.current = false;
return;
}
@@ -782,14 +780,10 @@ export const useGeminiStream = (
}
// Restore original model if it was temporarily overridden
restoreOriginalModel().catch((error) => {
console.error('Failed to restore original model:', error);
});
restoreOriginalModel();
} catch (error: unknown) {
// Restore original model if it was temporarily overridden
restoreOriginalModel().catch((error) => {
console.error('Failed to restore original model:', error);
});
restoreOriginalModel();
if (error instanceof UnauthorizedError) {
onAuthError();

View File

@@ -8,7 +8,7 @@
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { renderHook, act } from '@testing-library/react';
import type { Part, PartListUnion } from '@google/genai';
import { AuthType, type Config, ApprovalMode } from '@qwen-code/qwen-code-core';
import { AuthType, type Config } from '@qwen-code/qwen-code-core';
import {
shouldOfferVisionSwitch,
processVisionSwitchOutcome,
@@ -41,7 +41,7 @@ describe('useVisionAutoSwitch helpers', () => {
const result = shouldOfferVisionSwitch(
parts,
AuthType.QWEN_OAUTH,
'vision-model',
'qwen-vl-max-latest',
true,
);
expect(result).toBe(false);
@@ -108,56 +108,6 @@ describe('useVisionAutoSwitch helpers', () => {
);
expect(result).toBe(false);
});
it('returns true when image parts exist in YOLO mode context', () => {
const parts: PartListUnion = [
{ inlineData: { mimeType: 'image/png', data: '...' } },
];
const result = shouldOfferVisionSwitch(
parts,
AuthType.QWEN_OAUTH,
'qwen3-coder-plus',
true,
);
expect(result).toBe(true);
});
it('returns false when no image parts exist in YOLO mode context', () => {
const parts: PartListUnion = [{ text: 'just text' }];
const result = shouldOfferVisionSwitch(
parts,
AuthType.QWEN_OAUTH,
'qwen3-coder-plus',
true,
);
expect(result).toBe(false);
});
it('returns false when already using vision model in YOLO mode context', () => {
const parts: PartListUnion = [
{ inlineData: { mimeType: 'image/png', data: '...' } },
];
const result = shouldOfferVisionSwitch(
parts,
AuthType.QWEN_OAUTH,
'vision-model',
true,
);
expect(result).toBe(false);
});
it('returns false when authType is not QWEN_OAUTH in YOLO mode context', () => {
const parts: PartListUnion = [
{ inlineData: { mimeType: 'image/png', data: '...' } },
];
const result = shouldOfferVisionSwitch(
parts,
AuthType.USE_GEMINI,
'qwen3-coder-plus',
true,
);
expect(result).toBe(false);
});
});
describe('processVisionSwitchOutcome', () => {
@@ -175,11 +125,11 @@ describe('useVisionAutoSwitch helpers', () => {
expect(result).toEqual({ persistSessionModel: vl });
});
it('maps ContinueWithCurrentModel to empty result', () => {
it('maps DisallowWithGuidance to showGuidance', () => {
const result = processVisionSwitchOutcome(
VisionSwitchOutcome.ContinueWithCurrentModel,
VisionSwitchOutcome.DisallowWithGuidance,
);
expect(result).toEqual({});
expect(result).toEqual({ showGuidance: true });
});
});
@@ -201,20 +151,13 @@ describe('useVisionAutoSwitch hook', () => {
ts: number,
) => any;
const createMockConfig = (
authType: AuthType,
initialModel: string,
approvalMode: ApprovalMode = ApprovalMode.DEFAULT,
vlmSwitchMode?: string,
) => {
const createMockConfig = (authType: AuthType, initialModel: string) => {
let currentModel = initialModel;
const mockConfig: Partial<Config> = {
getModel: vi.fn(() => currentModel),
setModel: vi.fn(async (m: string) => {
setModel: vi.fn((m: string) => {
currentModel = m;
}),
getApprovalMode: vi.fn(() => approvalMode),
getVlmSwitchMode: vi.fn(() => vlmSwitchMode),
getContentGeneratorConfig: vi.fn(() => ({
authType,
model: currentModel,
@@ -283,9 +226,11 @@ describe('useVisionAutoSwitch hook', () => {
expect(onVisionSwitchRequired).not.toHaveBeenCalled();
});
it('continues with current model when dialog returns empty result', async () => {
it('shows guidance and blocks when dialog returns showGuidance', async () => {
const config = createMockConfig(AuthType.QWEN_OAUTH, 'qwen3-coder-plus');
const onVisionSwitchRequired = vi.fn().mockResolvedValue({}); // Empty result for ContinueWithCurrentModel
const onVisionSwitchRequired = vi
.fn()
.mockResolvedValue({ showGuidance: true });
const { result } = renderHook(() =>
useVisionAutoSwitch(config, addItem as any, true, onVisionSwitchRequired),
);
@@ -300,12 +245,11 @@ describe('useVisionAutoSwitch hook', () => {
res = await result.current.handleVisionSwitch(parts, userTs, false);
});
// Should not add any guidance message
expect(addItem).not.toHaveBeenCalledWith(
expect(addItem).toHaveBeenCalledWith(
{ type: MessageType.INFO, text: getVisionSwitchGuidanceMessage() },
userTs,
);
expect(res).toEqual({ shouldProceed: true });
expect(res).toEqual({ shouldProceed: false });
expect(config.setModel).not.toHaveBeenCalled();
});
@@ -314,7 +258,7 @@ describe('useVisionAutoSwitch hook', () => {
const config = createMockConfig(AuthType.QWEN_OAUTH, initialModel);
const onVisionSwitchRequired = vi
.fn()
.mockResolvedValue({ modelOverride: 'coder-model' });
.mockResolvedValue({ modelOverride: 'qwen-vl-max-latest' });
const { result } = renderHook(() =>
useVisionAutoSwitch(config, addItem as any, true, onVisionSwitchRequired),
);
@@ -329,26 +273,20 @@ describe('useVisionAutoSwitch hook', () => {
});
expect(res).toEqual({ shouldProceed: true, originalModel: initialModel });
expect(config.setModel).toHaveBeenCalledWith('coder-model', {
reason: 'vision_auto_switch',
context: 'User-prompted vision switch (one-time override)',
});
expect(config.setModel).toHaveBeenCalledWith('qwen-vl-max-latest');
// Now restore
await act(async () => {
await result.current.restoreOriginalModel();
});
expect(config.setModel).toHaveBeenLastCalledWith(initialModel, {
reason: 'vision_auto_switch',
context: 'Restoring original model after vision switch',
act(() => {
result.current.restoreOriginalModel();
});
expect(config.setModel).toHaveBeenLastCalledWith(initialModel);
});
it('persists session model when dialog requests persistence', async () => {
const config = createMockConfig(AuthType.QWEN_OAUTH, 'qwen3-coder-plus');
const onVisionSwitchRequired = vi
.fn()
.mockResolvedValue({ persistSessionModel: 'coder-model' });
.mockResolvedValue({ persistSessionModel: 'qwen-vl-max-latest' });
const { result } = renderHook(() =>
useVisionAutoSwitch(config, addItem as any, true, onVisionSwitchRequired),
);
@@ -363,17 +301,16 @@ describe('useVisionAutoSwitch hook', () => {
});
expect(res).toEqual({ shouldProceed: true });
expect(config.setModel).toHaveBeenCalledWith('coder-model', {
reason: 'vision_auto_switch',
context: 'User-prompted vision switch (session persistent)',
});
expect(config.setModel).toHaveBeenCalledWith('qwen-vl-max-latest');
// Restore should be a no-op since no one-time override was used
await act(async () => {
await result.current.restoreOriginalModel();
act(() => {
result.current.restoreOriginalModel();
});
// Last call should still be the persisted model set
expect((config.setModel as any).mock.calls.pop()?.[0]).toBe('coder-model');
expect((config.setModel as any).mock.calls.pop()?.[0]).toBe(
'qwen-vl-max-latest',
);
});
it('returns shouldProceed=true when dialog returns no special flags', async () => {
@@ -434,420 +371,4 @@ describe('useVisionAutoSwitch hook', () => {
expect(res).toEqual({ shouldProceed: true });
expect(onVisionSwitchRequired).not.toHaveBeenCalled();
});
describe('YOLO mode behavior', () => {
it('automatically switches to vision model in YOLO mode without showing dialog', async () => {
const initialModel = 'qwen3-coder-plus';
const config = createMockConfig(
AuthType.QWEN_OAUTH,
initialModel,
ApprovalMode.YOLO,
);
const onVisionSwitchRequired = vi.fn(); // Should not be called in YOLO mode
const { result } = renderHook(() =>
useVisionAutoSwitch(
config,
addItem as any,
true,
onVisionSwitchRequired,
),
);
const parts: PartListUnion = [
{ inlineData: { mimeType: 'image/png', data: '...' } },
];
let res: any;
await act(async () => {
res = await result.current.handleVisionSwitch(parts, 7070, false);
});
// Should automatically switch without calling the dialog
expect(onVisionSwitchRequired).not.toHaveBeenCalled();
expect(res).toEqual({
shouldProceed: true,
originalModel: initialModel,
});
expect(config.setModel).toHaveBeenCalledWith(getDefaultVisionModel(), {
reason: 'vision_auto_switch',
context: 'YOLO mode auto-switch for image content',
});
});
it('does not switch in YOLO mode when no images are present', async () => {
const config = createMockConfig(
AuthType.QWEN_OAUTH,
'qwen3-coder-plus',
ApprovalMode.YOLO,
);
const onVisionSwitchRequired = vi.fn();
const { result } = renderHook(() =>
useVisionAutoSwitch(
config,
addItem as any,
true,
onVisionSwitchRequired,
),
);
const parts: PartListUnion = [{ text: 'no images here' }];
let res: any;
await act(async () => {
res = await result.current.handleVisionSwitch(parts, 8080, false);
});
expect(res).toEqual({ shouldProceed: true });
expect(onVisionSwitchRequired).not.toHaveBeenCalled();
expect(config.setModel).not.toHaveBeenCalled();
});
it('does not switch in YOLO mode when already using vision model', async () => {
const config = createMockConfig(
AuthType.QWEN_OAUTH,
'vision-model',
ApprovalMode.YOLO,
);
const onVisionSwitchRequired = vi.fn();
const { result } = renderHook(() =>
useVisionAutoSwitch(
config,
addItem as any,
true,
onVisionSwitchRequired,
),
);
const parts: PartListUnion = [
{ inlineData: { mimeType: 'image/png', data: '...' } },
];
let res: any;
await act(async () => {
res = await result.current.handleVisionSwitch(parts, 9090, false);
});
expect(res).toEqual({ shouldProceed: true });
expect(onVisionSwitchRequired).not.toHaveBeenCalled();
expect(config.setModel).not.toHaveBeenCalled();
});
it('restores original model after YOLO mode auto-switch', async () => {
const initialModel = 'qwen3-coder-plus';
const config = createMockConfig(
AuthType.QWEN_OAUTH,
initialModel,
ApprovalMode.YOLO,
);
const onVisionSwitchRequired = vi.fn();
const { result } = renderHook(() =>
useVisionAutoSwitch(
config,
addItem as any,
true,
onVisionSwitchRequired,
),
);
const parts: PartListUnion = [
{ inlineData: { mimeType: 'image/png', data: '...' } },
];
// First, trigger the auto-switch
await act(async () => {
await result.current.handleVisionSwitch(parts, 10100, false);
});
// Verify model was switched
expect(config.setModel).toHaveBeenCalledWith(getDefaultVisionModel(), {
reason: 'vision_auto_switch',
context: 'YOLO mode auto-switch for image content',
});
// Now restore the original model
await act(async () => {
await result.current.restoreOriginalModel();
});
// Verify model was restored
expect(config.setModel).toHaveBeenLastCalledWith(initialModel, {
reason: 'vision_auto_switch',
context: 'Restoring original model after vision switch',
});
});
it('does not switch in YOLO mode when authType is not QWEN_OAUTH', async () => {
const config = createMockConfig(
AuthType.USE_GEMINI,
'qwen3-coder-plus',
ApprovalMode.YOLO,
);
const onVisionSwitchRequired = vi.fn();
const { result } = renderHook(() =>
useVisionAutoSwitch(
config,
addItem as any,
true,
onVisionSwitchRequired,
),
);
const parts: PartListUnion = [
{ inlineData: { mimeType: 'image/png', data: '...' } },
];
let res: any;
await act(async () => {
res = await result.current.handleVisionSwitch(parts, 11110, false);
});
expect(res).toEqual({ shouldProceed: true });
expect(onVisionSwitchRequired).not.toHaveBeenCalled();
expect(config.setModel).not.toHaveBeenCalled();
});
it('does not switch in YOLO mode when visionModelPreviewEnabled is false', async () => {
const config = createMockConfig(
AuthType.QWEN_OAUTH,
'qwen3-coder-plus',
ApprovalMode.YOLO,
);
const onVisionSwitchRequired = vi.fn();
const { result } = renderHook(() =>
useVisionAutoSwitch(
config,
addItem as any,
false,
onVisionSwitchRequired,
),
);
const parts: PartListUnion = [
{ inlineData: { mimeType: 'image/png', data: '...' } },
];
let res: any;
await act(async () => {
res = await result.current.handleVisionSwitch(parts, 12120, false);
});
expect(res).toEqual({ shouldProceed: true });
expect(onVisionSwitchRequired).not.toHaveBeenCalled();
expect(config.setModel).not.toHaveBeenCalled();
});
it('handles multiple image formats in YOLO mode', async () => {
const initialModel = 'qwen3-coder-plus';
const config = createMockConfig(
AuthType.QWEN_OAUTH,
initialModel,
ApprovalMode.YOLO,
);
const onVisionSwitchRequired = vi.fn();
const { result } = renderHook(() =>
useVisionAutoSwitch(
config,
addItem as any,
true,
onVisionSwitchRequired,
),
);
const parts: PartListUnion = [
{ text: 'Here are some images:' },
{ inlineData: { mimeType: 'image/jpeg', data: '...' } },
{ fileData: { mimeType: 'image/png', fileUri: 'file://image.png' } },
{ text: 'Please analyze them.' },
];
let res: any;
await act(async () => {
res = await result.current.handleVisionSwitch(parts, 13130, false);
});
expect(res).toEqual({
shouldProceed: true,
originalModel: initialModel,
});
expect(config.setModel).toHaveBeenCalledWith(getDefaultVisionModel(), {
reason: 'vision_auto_switch',
context: 'YOLO mode auto-switch for image content',
});
expect(onVisionSwitchRequired).not.toHaveBeenCalled();
});
});
describe('VLM switch mode default behavior', () => {
it('should automatically switch once when vlmSwitchMode is "once"', async () => {
const config = createMockConfig(
AuthType.QWEN_OAUTH,
'qwen3-coder-plus',
ApprovalMode.DEFAULT,
'once',
);
const onVisionSwitchRequired = vi.fn(); // Should not be called
const { result } = renderHook(() =>
useVisionAutoSwitch(
config,
addItem as any,
true,
onVisionSwitchRequired,
),
);
const parts: PartListUnion = [
{ inlineData: { mimeType: 'image/jpeg', data: 'base64data' } },
];
const switchResult = await result.current.handleVisionSwitch(
parts,
Date.now(),
false,
);
expect(switchResult.shouldProceed).toBe(true);
expect(switchResult.originalModel).toBe('qwen3-coder-plus');
expect(config.setModel).toHaveBeenCalledWith('vision-model', {
reason: 'vision_auto_switch',
context: 'Default VLM switch mode: once (one-time override)',
});
expect(onVisionSwitchRequired).not.toHaveBeenCalled();
});
it('should switch session when vlmSwitchMode is "session"', async () => {
const config = createMockConfig(
AuthType.QWEN_OAUTH,
'qwen3-coder-plus',
ApprovalMode.DEFAULT,
'session',
);
const onVisionSwitchRequired = vi.fn(); // Should not be called
const { result } = renderHook(() =>
useVisionAutoSwitch(
config,
addItem as any,
true,
onVisionSwitchRequired,
),
);
const parts: PartListUnion = [
{ inlineData: { mimeType: 'image/jpeg', data: 'base64data' } },
];
const switchResult = await result.current.handleVisionSwitch(
parts,
Date.now(),
false,
);
expect(switchResult.shouldProceed).toBe(true);
expect(switchResult.originalModel).toBeUndefined(); // No original model for session switch
expect(config.setModel).toHaveBeenCalledWith('vision-model', {
reason: 'vision_auto_switch',
context: 'Default VLM switch mode: session (session persistent)',
});
expect(onVisionSwitchRequired).not.toHaveBeenCalled();
});
it('should continue with current model when vlmSwitchMode is "persist"', async () => {
const config = createMockConfig(
AuthType.QWEN_OAUTH,
'qwen3-coder-plus',
ApprovalMode.DEFAULT,
'persist',
);
const onVisionSwitchRequired = vi.fn(); // Should not be called
const { result } = renderHook(() =>
useVisionAutoSwitch(
config,
addItem as any,
true,
onVisionSwitchRequired,
),
);
const parts: PartListUnion = [
{ inlineData: { mimeType: 'image/jpeg', data: 'base64data' } },
];
const switchResult = await result.current.handleVisionSwitch(
parts,
Date.now(),
false,
);
expect(switchResult.shouldProceed).toBe(true);
expect(switchResult.originalModel).toBeUndefined();
expect(config.setModel).not.toHaveBeenCalled();
expect(onVisionSwitchRequired).not.toHaveBeenCalled();
});
it('should fall back to user prompt when vlmSwitchMode is not set', async () => {
const config = createMockConfig(
AuthType.QWEN_OAUTH,
'qwen3-coder-plus',
ApprovalMode.DEFAULT,
undefined, // No default mode
);
const onVisionSwitchRequired = vi
.fn()
.mockResolvedValue({ modelOverride: 'vision-model' });
const { result } = renderHook(() =>
useVisionAutoSwitch(
config,
addItem as any,
true,
onVisionSwitchRequired,
),
);
const parts: PartListUnion = [
{ inlineData: { mimeType: 'image/jpeg', data: 'base64data' } },
];
const switchResult = await result.current.handleVisionSwitch(
parts,
Date.now(),
false,
);
expect(switchResult.shouldProceed).toBe(true);
expect(onVisionSwitchRequired).toHaveBeenCalledWith(parts);
});
it('should fall back to persist behavior when vlmSwitchMode has invalid value', async () => {
const config = createMockConfig(
AuthType.QWEN_OAUTH,
'qwen3-coder-plus',
ApprovalMode.DEFAULT,
'invalid-value',
);
const onVisionSwitchRequired = vi.fn(); // Should not be called
const { result } = renderHook(() =>
useVisionAutoSwitch(
config,
addItem as any,
true,
onVisionSwitchRequired,
),
);
const parts: PartListUnion = [
{ inlineData: { mimeType: 'image/jpeg', data: 'base64data' } },
];
const switchResult = await result.current.handleVisionSwitch(
parts,
Date.now(),
false,
);
expect(switchResult.shouldProceed).toBe(true);
expect(switchResult.originalModel).toBeUndefined();
// For invalid values, it should continue with current model (persist behavior)
expect(config.setModel).not.toHaveBeenCalled();
expect(onVisionSwitchRequired).not.toHaveBeenCalled();
});
});
});

View File

@@ -5,7 +5,7 @@
*/
import { type PartListUnion, type Part } from '@google/genai';
import { AuthType, type Config, ApprovalMode } from '@qwen-code/qwen-code-core';
import { AuthType, type Config } from '@qwen-code/qwen-code-core';
import { useCallback, useRef } from 'react';
import { VisionSwitchOutcome } from '../components/ModelSwitchDialog.js';
import {
@@ -121,7 +121,7 @@ export function shouldOfferVisionSwitch(
parts: PartListUnion,
authType: AuthType,
currentModel: string,
visionModelPreviewEnabled: boolean = true,
visionModelPreviewEnabled: boolean = false,
): boolean {
// Only trigger for qwen-oauth
if (authType !== AuthType.QWEN_OAUTH) {
@@ -166,11 +166,11 @@ export function processVisionSwitchOutcome(
case VisionSwitchOutcome.SwitchSessionToVL:
return { persistSessionModel: vlModelId };
case VisionSwitchOutcome.ContinueWithCurrentModel:
return {}; // Continue with current model, no changes needed
case VisionSwitchOutcome.DisallowWithGuidance:
return { showGuidance: true };
default:
return {}; // Default to continuing with current model
return { showGuidance: true };
}
}
@@ -198,7 +198,7 @@ export interface VisionSwitchHandlingResult {
export function useVisionAutoSwitch(
config: Config,
addItem: UseHistoryManagerReturn['addItem'],
visionModelPreviewEnabled: boolean = true,
visionModelPreviewEnabled: boolean = false,
onVisionSwitchRequired?: (query: PartListUnion) => Promise<{
modelOverride?: string;
persistSessionModel?: string;
@@ -252,91 +252,35 @@ export function useVisionAutoSwitch(
return { shouldProceed: true };
}
// In YOLO mode, automatically switch to vision model without user interaction
if (config.getApprovalMode() === ApprovalMode.YOLO) {
const vlModelId = getDefaultVisionModel();
originalModelRef.current = config.getModel();
await config.setModel(vlModelId, {
reason: 'vision_auto_switch',
context: 'YOLO mode auto-switch for image content',
});
return {
shouldProceed: true,
originalModel: originalModelRef.current,
};
}
// Check if there's a default VLM switch mode configured
const defaultVlmSwitchMode = config.getVlmSwitchMode();
if (defaultVlmSwitchMode) {
// Convert string value to VisionSwitchOutcome enum
let outcome: VisionSwitchOutcome;
switch (defaultVlmSwitchMode) {
case 'once':
outcome = VisionSwitchOutcome.SwitchOnce;
break;
case 'session':
outcome = VisionSwitchOutcome.SwitchSessionToVL;
break;
case 'persist':
outcome = VisionSwitchOutcome.ContinueWithCurrentModel;
break;
default:
// Invalid value, fall back to prompting user
outcome = VisionSwitchOutcome.ContinueWithCurrentModel;
}
// Process the default outcome
const visionSwitchResult = processVisionSwitchOutcome(outcome);
if (visionSwitchResult.modelOverride) {
// One-time model override
originalModelRef.current = config.getModel();
await config.setModel(visionSwitchResult.modelOverride, {
reason: 'vision_auto_switch',
context: `Default VLM switch mode: ${defaultVlmSwitchMode} (one-time override)`,
});
return {
shouldProceed: true,
originalModel: originalModelRef.current,
};
} else if (visionSwitchResult.persistSessionModel) {
// Persistent session model change
await config.setModel(visionSwitchResult.persistSessionModel, {
reason: 'vision_auto_switch',
context: `Default VLM switch mode: ${defaultVlmSwitchMode} (session persistent)`,
});
return { shouldProceed: true };
}
// For ContinueWithCurrentModel or any other case, proceed with current model
return { shouldProceed: true };
}
try {
const visionSwitchResult = await onVisionSwitchRequired(query);
if (visionSwitchResult.showGuidance) {
// Show guidance and don't proceed with the request
addItem(
{
type: MessageType.INFO,
text: getVisionSwitchGuidanceMessage(),
},
userMessageTimestamp,
);
return { shouldProceed: false };
}
if (visionSwitchResult.modelOverride) {
// One-time model override
originalModelRef.current = config.getModel();
await config.setModel(visionSwitchResult.modelOverride, {
reason: 'vision_auto_switch',
context: 'User-prompted vision switch (one-time override)',
});
config.setModel(visionSwitchResult.modelOverride);
return {
shouldProceed: true,
originalModel: originalModelRef.current,
};
} else if (visionSwitchResult.persistSessionModel) {
// Persistent session model change
await config.setModel(visionSwitchResult.persistSessionModel, {
reason: 'vision_auto_switch',
context: 'User-prompted vision switch (session persistent)',
});
config.setModel(visionSwitchResult.persistSessionModel);
return { shouldProceed: true };
}
// For ContinueWithCurrentModel or any other case, proceed with current model
return { shouldProceed: true };
} catch (_error) {
// If vision switch dialog was cancelled or errored, don't proceed
@@ -346,12 +290,9 @@ export function useVisionAutoSwitch(
[config, addItem, visionModelPreviewEnabled, onVisionSwitchRequired],
);
const restoreOriginalModel = useCallback(async () => {
const restoreOriginalModel = useCallback(() => {
if (originalModelRef.current) {
await config.setModel(originalModelRef.current, {
reason: 'vision_auto_switch',
context: 'Restoring original model after vision switch',
});
config.setModel(originalModelRef.current);
originalModelRef.current = null;
}
}, [config]);

View File

@@ -10,12 +10,9 @@ export type AvailableModel = {
isVision?: boolean;
};
export const MAINLINE_VLM = 'vision-model';
export const MAINLINE_CODER = 'coder-model';
export const AVAILABLE_MODELS_QWEN: AvailableModel[] = [
{ id: MAINLINE_CODER, label: MAINLINE_CODER },
{ id: MAINLINE_VLM, label: MAINLINE_VLM, isVision: true },
{ id: 'qwen3-coder-plus', label: 'qwen3-coder-plus' },
{ id: 'qwen-vl-max-latest', label: 'qwen-vl-max', isVision: true },
];
/**
@@ -45,7 +42,7 @@ export function getOpenAIAvailableModelFromEnv(): AvailableModel | null {
* until our coding model supports multimodal.
*/
export function getDefaultVisionModel(): string {
return MAINLINE_VLM;
return 'qwen-vl-max-latest';
}
export function isVisionModel(modelId: string): boolean {

View File

@@ -9,6 +9,7 @@ import { describe, it, expect, vi, beforeEach } from 'vitest';
import { MarkdownDisplay } from './MarkdownDisplay.js';
import { LoadedSettings } from '../../config/settings.js';
import { SettingsContext } from '../contexts/SettingsContext.js';
import { EOL } from 'node:os';
describe('<MarkdownDisplay />', () => {
const baseProps = {
@@ -56,7 +57,7 @@ describe('<MarkdownDisplay />', () => {
## Header 2
### Header 3
#### Header 4
`;
`.replace(/\n/g, EOL);
const { lastFrame } = render(
<SettingsContext.Provider value={mockSettings}>
<MarkdownDisplay {...baseProps} text={text} />
@@ -66,7 +67,10 @@ describe('<MarkdownDisplay />', () => {
});
it('renders a fenced code block with a language', () => {
const text = '```javascript\nconst x = 1;\nconsole.log(x);\n```';
const text = '```javascript\nconst x = 1;\nconsole.log(x);\n```'.replace(
/\n/g,
EOL,
);
const { lastFrame } = render(
<SettingsContext.Provider value={mockSettings}>
<MarkdownDisplay {...baseProps} text={text} />
@@ -76,7 +80,7 @@ describe('<MarkdownDisplay />', () => {
});
it('renders a fenced code block without a language', () => {
const text = '```\nplain text\n```';
const text = '```\nplain text\n```'.replace(/\n/g, EOL);
const { lastFrame } = render(
<SettingsContext.Provider value={mockSettings}>
<MarkdownDisplay {...baseProps} text={text} />
@@ -86,7 +90,7 @@ describe('<MarkdownDisplay />', () => {
});
it('handles unclosed (pending) code blocks', () => {
const text = '```typescript\nlet y = 2;';
const text = '```typescript\nlet y = 2;'.replace(/\n/g, EOL);
const { lastFrame } = render(
<SettingsContext.Provider value={mockSettings}>
<MarkdownDisplay {...baseProps} text={text} isPending={true} />
@@ -100,7 +104,7 @@ describe('<MarkdownDisplay />', () => {
- item A
* item B
+ item C
`;
`.replace(/\n/g, EOL);
const { lastFrame } = render(
<SettingsContext.Provider value={mockSettings}>
<MarkdownDisplay {...baseProps} text={text} />
@@ -114,7 +118,7 @@ describe('<MarkdownDisplay />', () => {
* Level 1
* Level 2
* Level 3
`;
`.replace(/\n/g, EOL);
const { lastFrame } = render(
<SettingsContext.Provider value={mockSettings}>
<MarkdownDisplay {...baseProps} text={text} />
@@ -127,7 +131,7 @@ describe('<MarkdownDisplay />', () => {
const text = `
1. First item
2. Second item
`;
`.replace(/\n/g, EOL);
const { lastFrame } = render(
<SettingsContext.Provider value={mockSettings}>
<MarkdownDisplay {...baseProps} text={text} />
@@ -143,7 +147,7 @@ Hello
World
***
Test
`;
`.replace(/\n/g, EOL);
const { lastFrame } = render(
<SettingsContext.Provider value={mockSettings}>
<MarkdownDisplay {...baseProps} text={text} />
@@ -158,7 +162,7 @@ Test
|----------|:--------:|
| Cell 1 | Cell 2 |
| Cell 3 | Cell 4 |
`;
`.replace(/\n/g, EOL);
const { lastFrame } = render(
<SettingsContext.Provider value={mockSettings}>
<MarkdownDisplay {...baseProps} text={text} />
@@ -172,7 +176,7 @@ Test
Some text before.
| A | B |
|---|
| 1 | 2 |`;
| 1 | 2 |`.replace(/\n/g, EOL);
const { lastFrame } = render(
<SettingsContext.Provider value={mockSettings}>
<MarkdownDisplay {...baseProps} text={text} />
@@ -184,7 +188,7 @@ Some text before.
it('inserts a single space between paragraphs', () => {
const text = `Paragraph 1.
Paragraph 2.`;
Paragraph 2.`.replace(/\n/g, EOL);
const { lastFrame } = render(
<SettingsContext.Provider value={mockSettings}>
<MarkdownDisplay {...baseProps} text={text} />
@@ -207,7 +211,7 @@ some code
\`\`\`
Another paragraph.
`;
`.replace(/\n/g, EOL);
const { lastFrame } = render(
<SettingsContext.Provider value={mockSettings}>
<MarkdownDisplay {...baseProps} text={text} />
@@ -217,7 +221,7 @@ Another paragraph.
});
it('hides line numbers in code blocks when showLineNumbers is false', () => {
const text = '```javascript\nconst x = 1;\n```';
const text = '```javascript\nconst x = 1;\n```'.replace(/\n/g, EOL);
const settings = new LoadedSettings(
{ path: '', settings: {} },
{ path: '', settings: {} },
@@ -238,7 +242,7 @@ Another paragraph.
});
it('shows line numbers in code blocks by default', () => {
const text = '```javascript\nconst x = 1;\n```';
const text = '```javascript\nconst x = 1;\n```'.replace(/\n/g, EOL);
const { lastFrame } = render(
<SettingsContext.Provider value={mockSettings}>
<MarkdownDisplay {...baseProps} text={text} />
@@ -247,21 +251,4 @@ Another paragraph.
expect(lastFrame()).toMatchSnapshot();
expect(lastFrame()).toContain(' 1 ');
});
it('correctly splits lines using \\n regardless of platform EOL', () => {
// Test that the component uses \n for splitting, not EOL
const textWithUnixLineEndings = 'Line 1\nLine 2\nLine 3';
const { lastFrame } = render(
<SettingsContext.Provider value={mockSettings}>
<MarkdownDisplay {...baseProps} text={textWithUnixLineEndings} />
</SettingsContext.Provider>,
);
const output = lastFrame();
expect(output).toContain('Line 1');
expect(output).toContain('Line 2');
expect(output).toContain('Line 3');
expect(output).toMatchSnapshot();
});
});

View File

@@ -6,6 +6,7 @@
import React from 'react';
import { Text, Box } from 'ink';
import { EOL } from 'node:os';
import { Colors } from '../colors.js';
import { colorizeCode } from './CodeColorizer.js';
import { TableRenderer } from './TableRenderer.js';
@@ -34,7 +35,7 @@ const MarkdownDisplayInternal: React.FC<MarkdownDisplayProps> = ({
}) => {
if (!text) return <></>;
const lines = text.split(`\n`);
const lines = text.split(EOL);
const headerRegex = /^ *(#{1,4}) +(.*)/;
const codeFenceRegex = /^ *(`{3,}|~{3,}) *(\w*?) *$/;
const ulItemRegex = /^([ \t]*)([-*+]) +(.*)/;

View File

@@ -14,12 +14,6 @@ Another paragraph.
"
`;
exports[`<MarkdownDisplay /> > correctly splits lines using \\n regardless of platform EOL 1`] = `
"Line 1
Line 2
Line 3"
`;
exports[`<MarkdownDisplay /> > handles a table at the end of the input 1`] = `
"Some text before.
| A | B |

View File

@@ -126,18 +126,6 @@ describe('validateNonInterActiveAuth', () => {
expect(refreshAuthMock).toHaveBeenCalledWith(AuthType.USE_OPENAI);
});
it('uses configured QWEN_OAUTH if provided', async () => {
const nonInteractiveConfig: NonInteractiveConfig = {
refreshAuth: refreshAuthMock,
};
await validateNonInteractiveAuth(
AuthType.QWEN_OAUTH,
undefined,
nonInteractiveConfig,
);
expect(refreshAuthMock).toHaveBeenCalledWith(AuthType.QWEN_OAUTH);
});
it('uses USE_VERTEX_AI if GOOGLE_GENAI_USE_VERTEXAI is true (with GOOGLE_CLOUD_PROJECT and GOOGLE_CLOUD_LOCATION)', async () => {
process.env['GOOGLE_GENAI_USE_VERTEXAI'] = 'true';
process.env['GOOGLE_CLOUD_PROJECT'] = 'test-project';

View File

@@ -97,18 +97,6 @@ class GeminiAgent {
name: 'Vertex AI',
description: null,
},
{
id: AuthType.USE_OPENAI,
name: 'Use OpenAI API key',
description:
'Requires setting the `OPENAI_API_KEY` environment variable',
},
{
id: AuthType.QWEN_OAUTH,
name: 'Qwen OAuth',
description:
'OAuth authentication for Qwen models with 2000 daily requests',
},
];
return {

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code-core",
"version": "0.0.13",
"version": "0.0.11",
"description": "Qwen Code Core",
"repository": {
"type": "git",

View File

@@ -737,85 +737,4 @@ describe('setApprovalMode with folder trust', () => {
expect(() => config.setApprovalMode(ApprovalMode.AUTO_EDIT)).not.toThrow();
expect(() => config.setApprovalMode(ApprovalMode.DEFAULT)).not.toThrow();
});
describe('Model Switch Logging', () => {
it('should log model switch when setModel is called with different model', async () => {
const config = new Config({
sessionId: 'test-model-switch',
targetDir: '.',
debugMode: false,
model: 'qwen3-coder-plus',
cwd: '.',
});
// Initialize the config to set up content generator
await config.initialize();
// Mock the logger's logModelSwitch method
const logModelSwitchSpy = vi.spyOn(config['logger']!, 'logModelSwitch');
// Change the model
await config.setModel('qwen-vl-max-latest', {
reason: 'vision_auto_switch',
context: 'Test model switch',
});
// Verify that logModelSwitch was called with correct parameters
expect(logModelSwitchSpy).toHaveBeenCalledWith({
fromModel: 'qwen3-coder-plus',
toModel: 'qwen-vl-max-latest',
reason: 'vision_auto_switch',
context: 'Test model switch',
});
});
it('should not log when setModel is called with same model', async () => {
const config = new Config({
sessionId: 'test-same-model',
targetDir: '.',
debugMode: false,
model: 'qwen3-coder-plus',
cwd: '.',
});
// Initialize the config to set up content generator
await config.initialize();
// Mock the logger's logModelSwitch method
const logModelSwitchSpy = vi.spyOn(config['logger']!, 'logModelSwitch');
// Set the same model
await config.setModel('qwen3-coder-plus');
// Verify that logModelSwitch was not called
expect(logModelSwitchSpy).not.toHaveBeenCalled();
});
it('should use default reason when no options provided', async () => {
const config = new Config({
sessionId: 'test-default-reason',
targetDir: '.',
debugMode: false,
model: 'qwen3-coder-plus',
cwd: '.',
});
// Initialize the config to set up content generator
await config.initialize();
// Mock the logger's logModelSwitch method
const logModelSwitchSpy = vi.spyOn(config['logger']!, 'logModelSwitch');
// Change the model without options
await config.setModel('qwen-vl-max-latest');
// Verify that logModelSwitch was called with default reason
expect(logModelSwitchSpy).toHaveBeenCalledWith({
fromModel: 'qwen3-coder-plus',
toModel: 'qwen-vl-max-latest',
reason: 'manual',
context: undefined,
});
});
});
});

View File

@@ -56,7 +56,6 @@ import {
DEFAULT_GEMINI_FLASH_MODEL,
} from './models.js';
import { Storage } from './storage.js';
import { Logger, type ModelSwitchEvent } from '../core/logger.js';
// Re-export OAuth config type
export type { AnyToolInvocation, MCPOAuthConfig };
@@ -240,7 +239,6 @@ export interface ConfigParameters {
extensionManagement?: boolean;
enablePromptCompletion?: boolean;
skipLoopDetection?: boolean;
vlmSwitchMode?: string;
}
export class Config {
@@ -332,11 +330,9 @@ export class Config {
private readonly extensionManagement: boolean;
private readonly enablePromptCompletion: boolean = false;
private readonly skipLoopDetection: boolean;
private readonly vlmSwitchMode: string | undefined;
private initialized: boolean = false;
readonly storage: Storage;
private readonly fileExclusions: FileExclusions;
private logger: Logger | null = null;
constructor(params: ConfigParameters) {
this.sessionId = params.sessionId;
@@ -428,15 +424,8 @@ export class Config {
this.extensionManagement = params.extensionManagement ?? false;
this.storage = new Storage(this.targetDir);
this.enablePromptCompletion = params.enablePromptCompletion ?? false;
this.vlmSwitchMode = params.vlmSwitchMode;
this.fileExclusions = new FileExclusions(this);
// Initialize logger asynchronously
this.logger = new Logger(this.sessionId, this.storage);
this.logger.initialize().catch((error) => {
console.debug('Failed to initialize logger:', error);
});
if (params.contextFileName) {
setGeminiMdFilename(params.contextFileName);
}
@@ -528,47 +517,21 @@ export class Config {
return this.contentGeneratorConfig?.model || this.model;
}
async setModel(
newModel: string,
options?: {
reason?: ModelSwitchEvent['reason'];
context?: string;
},
): Promise<void> {
const oldModel = this.getModel();
setModel(newModel: string): void {
if (this.contentGeneratorConfig) {
this.contentGeneratorConfig.model = newModel;
}
// Log the model switch if the model actually changed
if (oldModel !== newModel && this.logger) {
const switchEvent: ModelSwitchEvent = {
fromModel: oldModel,
toModel: newModel,
reason: options?.reason || 'manual',
context: options?.context,
};
// Log asynchronously to avoid blocking
this.logger.logModelSwitch(switchEvent).catch((error) => {
console.debug('Failed to log model switch:', error);
});
}
// Reinitialize chat with updated configuration while preserving history
const geminiClient = this.getGeminiClient();
if (geminiClient && geminiClient.isInitialized()) {
// Now await the reinitialize operation to ensure completion
try {
await geminiClient.reinitialize();
} catch (error) {
// Use async operation but don't await to avoid blocking
geminiClient.reinitialize().catch((error) => {
console.error(
'Failed to reinitialize chat with updated config:',
error,
);
throw error; // Re-throw to let callers handle the error
}
});
}
}
@@ -975,10 +938,6 @@ export class Config {
return this.skipLoopDetection;
}
getVlmSwitchMode(): string | undefined {
return this.vlmSwitchMode;
}
async getGitService(): Promise<GitService> {
if (!this.gitService) {
this.gitService = new GitService(this.targetDir, this.storage);

View File

@@ -41,7 +41,7 @@ describe('Flash Model Fallback Configuration', () => {
// with the fallback mechanism. This will be necessary we introduce more
// intelligent model routing.
describe('setModel', () => {
it('should only mark as switched if contentGeneratorConfig exists', async () => {
it('should only mark as switched if contentGeneratorConfig exists', () => {
// Create config without initializing contentGeneratorConfig
const newConfig = new Config({
sessionId: 'test-session-2',
@@ -52,15 +52,15 @@ describe('Flash Model Fallback Configuration', () => {
});
// Should not crash when contentGeneratorConfig is undefined
await newConfig.setModel(DEFAULT_GEMINI_FLASH_MODEL);
newConfig.setModel(DEFAULT_GEMINI_FLASH_MODEL);
expect(newConfig.isInFallbackMode()).toBe(false);
});
});
describe('getModel', () => {
it('should return contentGeneratorConfig model if available', async () => {
it('should return contentGeneratorConfig model if available', () => {
// Simulate initialized content generator config
await config.setModel(DEFAULT_GEMINI_FLASH_MODEL);
config.setModel(DEFAULT_GEMINI_FLASH_MODEL);
expect(config.getModel()).toBe(DEFAULT_GEMINI_FLASH_MODEL);
});
@@ -88,8 +88,8 @@ describe('Flash Model Fallback Configuration', () => {
expect(config.isInFallbackMode()).toBe(false);
});
it('should persist switched state throughout session', async () => {
await config.setModel(DEFAULT_GEMINI_FLASH_MODEL);
it('should persist switched state throughout session', () => {
config.setModel(DEFAULT_GEMINI_FLASH_MODEL);
// Setting state for fallback mode as is expected of clients
config.setFallbackMode(true);
expect(config.isInFallbackMode()).toBe(true);

View File

@@ -4,10 +4,11 @@
* SPDX-License-Identifier: Apache-2.0
*/
export const DEFAULT_QWEN_MODEL = 'coder-model';
export const DEFAULT_QWEN_FLASH_MODEL = 'coder-model';
export const DEFAULT_QWEN_MODEL = 'qwen3-coder-plus';
// We do not have a fallback model for now, but note it here anyway.
export const DEFAULT_QWEN_FLASH_MODEL = 'qwen3-coder-flash';
export const DEFAULT_GEMINI_MODEL = 'coder-model';
export const DEFAULT_GEMINI_MODEL = 'qwen3-coder-plus';
export const DEFAULT_GEMINI_FLASH_MODEL = 'gemini-2.5-flash';
export const DEFAULT_GEMINI_FLASH_LITE_MODEL = 'gemini-2.5-flash-lite';

View File

@@ -1053,7 +1053,7 @@ export class GeminiClient {
error,
);
if (accepted !== false && accepted !== null) {
await this.config.setModel(fallbackModel);
this.config.setModel(fallbackModel);
this.config.setFallbackMode(true);
return fallbackModel;
}

View File

@@ -224,7 +224,7 @@ export class GeminiChat {
error,
);
if (accepted !== false && accepted !== null) {
await this.config.setModel(fallbackModel);
this.config.setModel(fallbackModel);
this.config.setFallbackMode(true);
return fallbackModel;
}

View File

@@ -755,84 +755,4 @@ describe('Logger', () => {
expect(logger['messageId']).toBe(0);
});
});
describe('Model Switch Logging', () => {
it('should log model switch events correctly', async () => {
const testSessionId = 'test-session-model-switch';
const logger = new Logger(testSessionId, new Storage(process.cwd()));
await logger.initialize();
const modelSwitchEvent = {
fromModel: 'qwen3-coder-plus',
toModel: 'qwen-vl-max-latest',
reason: 'vision_auto_switch' as const,
context: 'YOLO mode auto-switch for image content',
};
await logger.logModelSwitch(modelSwitchEvent);
// Read the log file to verify the entry was written
const logContent = await fs.readFile(TEST_LOG_FILE_PATH, 'utf-8');
const logs: LogEntry[] = JSON.parse(logContent);
const modelSwitchLog = logs.find(
(log) =>
log.sessionId === testSessionId &&
log.type === MessageSenderType.MODEL_SWITCH,
);
expect(modelSwitchLog).toBeDefined();
expect(modelSwitchLog!.type).toBe(MessageSenderType.MODEL_SWITCH);
const loggedEvent = JSON.parse(modelSwitchLog!.message);
expect(loggedEvent.fromModel).toBe('qwen3-coder-plus');
expect(loggedEvent.toModel).toBe('qwen-vl-max-latest');
expect(loggedEvent.reason).toBe('vision_auto_switch');
expect(loggedEvent.context).toBe(
'YOLO mode auto-switch for image content',
);
});
it('should handle multiple model switch events', async () => {
const testSessionId = 'test-session-multiple-switches';
const logger = new Logger(testSessionId, new Storage(process.cwd()));
await logger.initialize();
// Log first switch
await logger.logModelSwitch({
fromModel: 'qwen3-coder-plus',
toModel: 'qwen-vl-max-latest',
reason: 'vision_auto_switch',
context: 'Auto-switch for image',
});
// Log second switch (restore)
await logger.logModelSwitch({
fromModel: 'qwen-vl-max-latest',
toModel: 'qwen3-coder-plus',
reason: 'vision_auto_switch',
context: 'Restoring original model',
});
// Read the log file to verify both entries were written
const logContent = await fs.readFile(TEST_LOG_FILE_PATH, 'utf-8');
const logs: LogEntry[] = JSON.parse(logContent);
const modelSwitchLogs = logs.filter(
(log) =>
log.sessionId === testSessionId &&
log.type === MessageSenderType.MODEL_SWITCH,
);
expect(modelSwitchLogs).toHaveLength(2);
const firstSwitch = JSON.parse(modelSwitchLogs[0].message);
expect(firstSwitch.fromModel).toBe('qwen3-coder-plus');
expect(firstSwitch.toModel).toBe('qwen-vl-max-latest');
const secondSwitch = JSON.parse(modelSwitchLogs[1].message);
expect(secondSwitch.fromModel).toBe('qwen-vl-max-latest');
expect(secondSwitch.toModel).toBe('qwen3-coder-plus');
});
});
});

View File

@@ -13,7 +13,6 @@ const LOG_FILE_NAME = 'logs.json';
export enum MessageSenderType {
USER = 'user',
MODEL_SWITCH = 'model_switch',
}
export interface LogEntry {
@@ -24,13 +23,6 @@ export interface LogEntry {
message: string;
}
export interface ModelSwitchEvent {
fromModel: string;
toModel: string;
reason: 'vision_auto_switch' | 'manual' | 'fallback' | 'other';
context?: string;
}
// This regex matches any character that is NOT a letter (a-z, A-Z),
// a number (0-9), a hyphen (-), an underscore (_), or a dot (.).
@@ -278,17 +270,6 @@ export class Logger {
}
}
async logModelSwitch(event: ModelSwitchEvent): Promise<void> {
const message = JSON.stringify({
fromModel: event.fromModel,
toModel: event.toModel,
reason: event.reason,
context: event.context,
});
await this.logMessage(MessageSenderType.MODEL_SWITCH, message);
}
private _checkpointPath(tag: string): string {
if (!tag.length) {
throw new Error('No checkpoint tag specified.');

View File

@@ -560,146 +560,4 @@ describe('DashScopeOpenAICompatibleProvider', () => {
]);
});
});
describe('output token limits', () => {
it('should limit max_tokens when it exceeds model limit for qwen3-coder-plus', () => {
const request: OpenAI.Chat.ChatCompletionCreateParams = {
model: 'qwen3-coder-plus',
messages: [{ role: 'user', content: 'Hello' }],
max_tokens: 100000, // Exceeds the 65536 limit
};
const result = provider.buildRequest(request, 'test-prompt-id');
expect(result.max_tokens).toBe(65536); // Should be limited to model's output limit
});
it('should limit max_tokens when it exceeds model limit for qwen-vl-max-latest', () => {
const request: OpenAI.Chat.ChatCompletionCreateParams = {
model: 'qwen-vl-max-latest',
messages: [{ role: 'user', content: 'Hello' }],
max_tokens: 20000, // Exceeds the 8192 limit
};
const result = provider.buildRequest(request, 'test-prompt-id');
expect(result.max_tokens).toBe(8192); // Should be limited to model's output limit
});
it('should not modify max_tokens when it is within model limit', () => {
const request: OpenAI.Chat.ChatCompletionCreateParams = {
model: 'qwen3-coder-plus',
messages: [{ role: 'user', content: 'Hello' }],
max_tokens: 1000, // Within the 65536 limit
};
const result = provider.buildRequest(request, 'test-prompt-id');
expect(result.max_tokens).toBe(1000); // Should remain unchanged
});
it('should not add max_tokens when not present in request', () => {
const request: OpenAI.Chat.ChatCompletionCreateParams = {
model: 'qwen3-coder-plus',
messages: [{ role: 'user', content: 'Hello' }],
// No max_tokens parameter
};
const result = provider.buildRequest(request, 'test-prompt-id');
expect(result.max_tokens).toBeUndefined(); // Should remain undefined
});
it('should handle null max_tokens parameter', () => {
const request: OpenAI.Chat.ChatCompletionCreateParams = {
model: 'qwen3-coder-plus',
messages: [{ role: 'user', content: 'Hello' }],
max_tokens: null,
};
const result = provider.buildRequest(request, 'test-prompt-id');
expect(result.max_tokens).toBeNull(); // Should remain null
});
it('should use default output limit for unknown models', () => {
const request: OpenAI.Chat.ChatCompletionCreateParams = {
model: 'unknown-model',
messages: [{ role: 'user', content: 'Hello' }],
max_tokens: 10000, // Exceeds the default 4096 limit
};
const result = provider.buildRequest(request, 'test-prompt-id');
expect(result.max_tokens).toBe(4096); // Should be limited to default output limit
});
it('should preserve other request parameters when limiting max_tokens', () => {
const request: OpenAI.Chat.ChatCompletionCreateParams = {
model: 'qwen3-coder-plus',
messages: [{ role: 'user', content: 'Hello' }],
max_tokens: 100000, // Will be limited
temperature: 0.8,
top_p: 0.9,
frequency_penalty: 0.1,
presence_penalty: 0.2,
stop: ['END'],
user: 'test-user',
};
const result = provider.buildRequest(request, 'test-prompt-id');
// max_tokens should be limited
expect(result.max_tokens).toBe(65536);
// Other parameters should be preserved
expect(result.temperature).toBe(0.8);
expect(result.top_p).toBe(0.9);
expect(result.frequency_penalty).toBe(0.1);
expect(result.presence_penalty).toBe(0.2);
expect(result.stop).toEqual(['END']);
expect(result.user).toBe('test-user');
});
it('should work with vision models and output token limits', () => {
const request: OpenAI.Chat.ChatCompletionCreateParams = {
model: 'qwen-vl-max-latest',
messages: [
{
role: 'user',
content: [
{ type: 'text', text: 'Look at this image:' },
{
type: 'image_url',
image_url: { url: 'https://example.com/image.jpg' },
},
],
},
],
max_tokens: 20000, // Exceeds the 8192 limit
};
const result = provider.buildRequest(request, 'test-prompt-id');
expect(result.max_tokens).toBe(8192); // Should be limited
expect(
(result as { vl_high_resolution_images?: boolean })
.vl_high_resolution_images,
).toBe(true); // Vision-specific parameter should be preserved
});
it('should handle streaming requests with output token limits', () => {
const request: OpenAI.Chat.ChatCompletionCreateParams = {
model: 'qwen3-coder-plus',
messages: [{ role: 'user', content: 'Hello' }],
max_tokens: 100000, // Exceeds the 65536 limit
stream: true,
};
const result = provider.buildRequest(request, 'test-prompt-id');
expect(result.max_tokens).toBe(65536); // Should be limited
expect(result.stream).toBe(true); // Streaming should be preserved
});
});
});

View File

@@ -3,7 +3,6 @@ import type { Config } from '../../../config/config.js';
import type { ContentGeneratorConfig } from '../../contentGenerator.js';
import { AuthType } from '../../contentGenerator.js';
import { DEFAULT_TIMEOUT, DEFAULT_MAX_RETRIES } from '../constants.js';
import { tokenLimit } from '../../tokenLimits.js';
import type {
OpenAICompatibleProvider,
DashScopeRequestMetadata,
@@ -66,19 +65,6 @@ export class DashScopeOpenAICompatibleProvider
});
}
/**
* Build and configure the request for DashScope API.
*
* This method applies DashScope-specific configurations including:
* - Cache control for system and user messages
* - Output token limits based on model capabilities
* - Vision model specific parameters (vl_high_resolution_images)
* - Request metadata for session tracking
*
* @param request - The original chat completion request parameters
* @param userPromptId - Unique identifier for the user prompt for session tracking
* @returns Configured request with DashScope-specific parameters applied
*/
buildRequest(
request: OpenAI.Chat.ChatCompletionCreateParams,
userPromptId: string,
@@ -93,28 +79,21 @@ export class DashScopeOpenAICompatibleProvider
messages = this.addDashScopeCacheControl(messages, cacheTarget);
}
// Apply output token limits based on model capabilities
// This ensures max_tokens doesn't exceed the model's maximum output limit
const requestWithTokenLimits = this.applyOutputTokenLimit(
request,
request.model,
);
if (request.model.startsWith('qwen-vl')) {
return {
...requestWithTokenLimits,
...request,
messages,
...(this.buildMetadata(userPromptId) || {}),
/* @ts-expect-error dashscope exclusive */
vl_high_resolution_images: true,
} as OpenAI.Chat.ChatCompletionCreateParams;
};
}
return {
...requestWithTokenLimits, // Preserve all original parameters including sampling params and adjusted max_tokens
...request, // Preserve all original parameters including sampling params
messages,
...(this.buildMetadata(userPromptId) || {}),
} as OpenAI.Chat.ChatCompletionCreateParams;
};
}
buildMetadata(userPromptId: string): DashScopeRequestMetadata {
@@ -267,41 +246,6 @@ export class DashScopeOpenAICompatibleProvider
return contentArray;
}
/**
* Apply output token limit to a request's max_tokens parameter.
*
* Ensures that existing max_tokens parameters don't exceed the model's maximum output
* token limit. Only modifies max_tokens when already present in the request.
*
* @param request - The chat completion request parameters
* @param model - The model name to get the output token limit for
* @returns The request with max_tokens adjusted to respect the model's limits (if present)
*/
private applyOutputTokenLimit<T extends { max_tokens?: number | null }>(
request: T,
model: string,
): T {
const currentMaxTokens = request.max_tokens;
// Only process if max_tokens is already present in the request
if (currentMaxTokens === undefined || currentMaxTokens === null) {
return request; // No max_tokens parameter, return unchanged
}
const modelLimit = tokenLimit(model, 'output');
// If max_tokens exceeds the model limit, cap it to the model's limit
if (currentMaxTokens > modelLimit) {
return {
...request,
max_tokens: modelLimit,
};
}
// If max_tokens is within the limit, return the request unchanged
return request;
}
/**
* Check if cache control should be disabled based on configuration.
*

View File

@@ -820,14 +820,6 @@ function getToolCallExamples(model?: string): string {
if (/qwen[^-]*-vl/i.test(model)) {
return qwenVlToolCallExamples;
}
// Match coder-model pattern (same as qwen3-coder)
if (/coder-model/i.test(model)) {
return qwenCoderToolCallExamples;
}
// Match vision-model pattern (same as qwen3-vl)
if (/vision-model/i.test(model)) {
return qwenVlToolCallExamples;
}
}
return generalToolCallExamples;

View File

@@ -1,10 +1,5 @@
import { describe, it, expect } from 'vitest';
import {
normalize,
tokenLimit,
DEFAULT_TOKEN_LIMIT,
DEFAULT_OUTPUT_TOKEN_LIMIT,
} from './tokenLimits.js';
import { normalize, tokenLimit, DEFAULT_TOKEN_LIMIT } from './tokenLimits.js';
describe('normalize', () => {
it('should lowercase and trim the model string', () => {
@@ -230,96 +225,3 @@ describe('tokenLimit', () => {
expect(tokenLimit('CLAUDE-3.5-SONNET')).toBe(200000);
});
});
describe('tokenLimit with output type', () => {
describe('Qwen models with output limits', () => {
it('should return the correct output limit for qwen3-coder-plus', () => {
expect(tokenLimit('qwen3-coder-plus', 'output')).toBe(65536);
expect(tokenLimit('qwen3-coder-plus-20250601', 'output')).toBe(65536);
});
it('should return the correct output limit for qwen-vl-max-latest', () => {
expect(tokenLimit('qwen-vl-max-latest', 'output')).toBe(8192);
});
});
describe('Default output limits', () => {
it('should return the default output limit for unknown models', () => {
expect(tokenLimit('unknown-model', 'output')).toBe(
DEFAULT_OUTPUT_TOKEN_LIMIT,
);
expect(tokenLimit('gpt-4', 'output')).toBe(DEFAULT_OUTPUT_TOKEN_LIMIT);
expect(tokenLimit('claude-3.5-sonnet', 'output')).toBe(
DEFAULT_OUTPUT_TOKEN_LIMIT,
);
});
it('should return the default output limit for models without specific output patterns', () => {
expect(tokenLimit('qwen3-coder-7b', 'output')).toBe(
DEFAULT_OUTPUT_TOKEN_LIMIT,
);
expect(tokenLimit('qwen-plus', 'output')).toBe(
DEFAULT_OUTPUT_TOKEN_LIMIT,
);
expect(tokenLimit('qwen-vl-max', 'output')).toBe(
DEFAULT_OUTPUT_TOKEN_LIMIT,
);
});
});
describe('Input vs Output limits comparison', () => {
it('should return different limits for input vs output for qwen3-coder-plus', () => {
expect(tokenLimit('qwen3-coder-plus', 'input')).toBe(1048576); // 1M input
expect(tokenLimit('qwen3-coder-plus', 'output')).toBe(65536); // 64K output
});
it('should return different limits for input vs output for qwen-vl-max-latest', () => {
expect(tokenLimit('qwen-vl-max-latest', 'input')).toBe(131072); // 128K input
expect(tokenLimit('qwen-vl-max-latest', 'output')).toBe(8192); // 8K output
});
it('should return same default limits for unknown models', () => {
expect(tokenLimit('unknown-model', 'input')).toBe(DEFAULT_TOKEN_LIMIT); // 128K input
expect(tokenLimit('unknown-model', 'output')).toBe(
DEFAULT_OUTPUT_TOKEN_LIMIT,
); // 4K output
});
});
describe('Backward compatibility', () => {
it('should default to input type when no type is specified', () => {
expect(tokenLimit('qwen3-coder-plus')).toBe(1048576); // Should be input limit
expect(tokenLimit('qwen-vl-max-latest')).toBe(131072); // Should be input limit
expect(tokenLimit('unknown-model')).toBe(DEFAULT_TOKEN_LIMIT); // Should be input default
});
it('should work with explicit input type', () => {
expect(tokenLimit('qwen3-coder-plus', 'input')).toBe(1048576);
expect(tokenLimit('qwen-vl-max-latest', 'input')).toBe(131072);
expect(tokenLimit('unknown-model', 'input')).toBe(DEFAULT_TOKEN_LIMIT);
});
});
describe('Model normalization with output limits', () => {
it('should handle normalized model names for output limits', () => {
expect(tokenLimit('QWEN3-CODER-PLUS', 'output')).toBe(65536);
expect(tokenLimit('qwen3-coder-plus-20250601', 'output')).toBe(65536);
expect(tokenLimit('QWEN-VL-MAX-LATEST', 'output')).toBe(8192);
});
it('should handle complex model strings for output limits', () => {
expect(
tokenLimit(
' a/b/c|QWEN3-CODER-PLUS:qwen3-coder-plus-2024-05-13 ',
'output',
),
).toBe(65536);
expect(
tokenLimit(
'provider/qwen-vl-max-latest:qwen-vl-max-latest-v1',
'output',
),
).toBe(8192);
});
});
});

View File

@@ -1,15 +1,7 @@
type Model = string;
type TokenCount = number;
/**
* Token limit types for different use cases.
* - 'input': Maximum input context window size
* - 'output': Maximum output tokens that can be generated in a single response
*/
export type TokenLimitType = 'input' | 'output';
export const DEFAULT_TOKEN_LIMIT: TokenCount = 131_072; // 128K (power-of-two)
export const DEFAULT_OUTPUT_TOKEN_LIMIT: TokenCount = 4_096; // 4K tokens
/**
* Accurate numeric limits:
@@ -26,10 +18,6 @@ const LIMITS = {
'1m': 1_048_576,
'2m': 2_097_152,
'10m': 10_485_760, // 10 million tokens
// Output token limits (typically much smaller than input limits)
'4k': 4_096,
'8k': 8_192,
'16k': 16_384,
} as const;
/** Robust normalizer: strips provider prefixes, pipes/colons, date/version suffixes, etc. */
@@ -48,7 +36,7 @@ export function normalize(model: string): string {
// - dates (e.g., -20250219), -v1, version numbers, 'latest', 'preview' etc.
s = s.replace(/-preview/g, '');
// Special handling for Qwen model names that include "-latest" as part of the model name
if (!s.match(/^qwen-(?:plus|flash|vl-max)-latest$/)) {
if (!s.match(/^qwen-(?:plus|flash)-latest$/)) {
// \d{6,} - Match 6 or more digits (dates) like -20250219 (6+ digit dates)
// \d+x\d+b - Match patterns like 4x8b, -7b, -70b
// v\d+(?:\.\d+)* - Match version patterns starting with 'v' like -v1, -v1.2, -v2.1.3
@@ -111,12 +99,6 @@ const PATTERNS: Array<[RegExp, TokenCount]> = [
// Commercial Qwen3-Coder-Flash: 1M token context
[/^qwen3-coder-flash(-.*)?$/, LIMITS['1m']], // catches "qwen3-coder-flash" and date variants
// Generic coder-model: same as qwen3-coder-plus (1M token context)
[/^coder-model$/, LIMITS['1m']],
// Commercial Qwen3-Max-Preview: 256K token context
[/^qwen3-max-preview(-.*)?$/, LIMITS['256k']], // catches "qwen3-max-preview" and date variants
// Open-source Qwen3-Coder variants: 256K native
[/^qwen3-coder-.*$/, LIMITS['256k']],
// Open-source Qwen3 2507 variants: 256K native
@@ -137,9 +119,6 @@ const PATTERNS: Array<[RegExp, TokenCount]> = [
// Qwen Vision Models
[/^qwen-vl-max.*$/, LIMITS['128k']],
// Generic vision-model: same as qwen-vl-max (128K token context)
[/^vision-model$/, LIMITS['128k']],
// -------------------
// ByteDance Seed-OSS (512K)
// -------------------
@@ -163,60 +142,16 @@ const PATTERNS: Array<[RegExp, TokenCount]> = [
[/^mistral-large-2.*$/, LIMITS['128k']],
];
/**
* Output token limit patterns for specific model families.
* These patterns define the maximum number of tokens that can be generated
* in a single response for specific models.
*/
const OUTPUT_PATTERNS: Array<[RegExp, TokenCount]> = [
// -------------------
// Alibaba / Qwen - DashScope Models
// -------------------
// Qwen3-Coder-Plus: 65,536 max output tokens
[/^qwen3-coder-plus(-.*)?$/, LIMITS['64k']],
// Generic coder-model: same as qwen3-coder-plus (64K max output tokens)
[/^coder-model$/, LIMITS['64k']],
// Qwen3-Max-Preview: 65,536 max output tokens
[/^qwen3-max-preview(-.*)?$/, LIMITS['64k']],
// Qwen-VL-Max-Latest: 8,192 max output tokens
[/^qwen-vl-max-latest$/, LIMITS['8k']],
// Generic vision-model: same as qwen-vl-max-latest (8K max output tokens)
[/^vision-model$/, LIMITS['8k']],
// Qwen3-VL-Plus: 8,192 max output tokens
[/^qwen3-vl-plus$/, LIMITS['8k']],
];
/**
* Return the token limit for a model string based on the specified type.
*
* This function determines the maximum number of tokens for either input context
* or output generation based on the model and token type. It uses the same
* normalization logic for consistency across both input and output limits.
*
* @param model - The model name to get the token limit for
* @param type - The type of token limit ('input' for context window, 'output' for generation)
* @returns The maximum number of tokens allowed for this model and type
*/
export function tokenLimit(
model: Model,
type: TokenLimitType = 'input',
): TokenCount {
/** Return the token limit for a model string (uses normalize + ordered regex list). */
export function tokenLimit(model: Model): TokenCount {
const norm = normalize(model);
// Choose the appropriate patterns based on token type
const patterns = type === 'output' ? OUTPUT_PATTERNS : PATTERNS;
for (const [regex, limit] of patterns) {
for (const [regex, limit] of PATTERNS) {
if (regex.test(norm)) {
return limit;
}
}
// Return appropriate default based on token type
return type === 'output' ? DEFAULT_OUTPUT_TOKEN_LIMIT : DEFAULT_TOKEN_LIMIT;
// final fallback: DEFAULT_TOKEN_LIMIT (power-of-two 128K)
return DEFAULT_TOKEN_LIMIT;
}

View File

@@ -712,6 +712,8 @@ async function authWithQwenDeviceFlow(
`Polling... (attempt ${attempt + 1}/${maxAttempts})`,
);
process.stdout.write('.');
// Wait with cancellation check every 100ms
await new Promise<void>((resolve) => {
const checkInterval = 100; // Check every 100ms

View File

@@ -901,37 +901,5 @@ describe('SharedTokenManager', () => {
);
}
});
it('should properly clean up timeout when file operation completes before timeout', async () => {
const tokenManager = SharedTokenManager.getInstance();
tokenManager.clearCache();
const mockClient = {
getCredentials: vi.fn().mockReturnValue(null),
setCredentials: vi.fn(),
getAccessToken: vi.fn(),
requestDeviceAuthorization: vi.fn(),
pollDeviceToken: vi.fn(),
refreshAccessToken: vi.fn(),
};
// Mock clearTimeout to verify it's called
const clearTimeoutSpy = vi.spyOn(global, 'clearTimeout');
// Mock file stat to resolve quickly (before timeout)
mockFs.stat.mockResolvedValue({ mtimeMs: 12345 } as Stats);
// Call checkAndReloadIfNeeded which uses withTimeout internally
const checkMethod = getPrivateProperty(
tokenManager,
'checkAndReloadIfNeeded',
) as (client?: IQwenOAuth2Client) => Promise<void>;
await checkMethod.call(tokenManager, mockClient);
// Verify that clearTimeout was called to clean up the timer
expect(clearTimeoutSpy).toHaveBeenCalled();
clearTimeoutSpy.mockRestore();
});
});
});

View File

@@ -290,36 +290,6 @@ export class SharedTokenManager {
}
}
/**
* Utility method to add timeout to any promise operation
* Properly cleans up the timeout when the promise completes
*/
private withTimeout<T>(
promise: Promise<T>,
timeoutMs: number,
operationType = 'Operation',
): Promise<T> {
let timeoutId: NodeJS.Timeout;
return Promise.race([
promise.finally(() => {
// Clear timeout when main promise completes (success or failure)
if (timeoutId) {
clearTimeout(timeoutId);
}
}),
new Promise<never>((_, reject) => {
timeoutId = setTimeout(
() =>
reject(
new Error(`${operationType} timed out after ${timeoutMs}ms`),
),
timeoutMs,
);
}),
]);
}
/**
* Perform the actual file check and reload operation
* This is separated to enable proper promise-based synchronization
@@ -333,12 +303,25 @@ export class SharedTokenManager {
try {
const filePath = this.getCredentialFilePath();
// Add timeout to file stat operation
const withTimeout = async <T>(
promise: Promise<T>,
timeoutMs: number,
): Promise<T> =>
Promise.race([
promise,
new Promise<never>((_, reject) =>
setTimeout(
() =>
reject(
new Error(`File operation timed out after ${timeoutMs}ms`),
),
timeoutMs,
),
),
]);
const stats = await this.withTimeout(
fs.stat(filePath),
3000,
'File operation',
);
const stats = await withTimeout(fs.stat(filePath), 3000);
const fileModTime = stats.mtimeMs;
// Reload credentials if file has been modified since last cache
@@ -468,7 +451,7 @@ export class SharedTokenManager {
// Check if we have a refresh token before attempting refresh
const currentCredentials = qwenClient.getCredentials();
if (!currentCredentials.refresh_token) {
// console.debug('create a NO_REFRESH_TOKEN error');
console.debug('create a NO_REFRESH_TOKEN error');
throw new TokenManagerError(
TokenError.NO_REFRESH_TOKEN,
'No refresh token available for token refresh',
@@ -606,12 +589,26 @@ export class SharedTokenManager {
const dirPath = path.dirname(filePath);
const tempPath = `${filePath}.tmp.${randomUUID()}`;
// Add timeout wrapper for file operations
const withTimeout = async <T>(
promise: Promise<T>,
timeoutMs: number,
): Promise<T> =>
Promise.race([
promise,
new Promise<never>((_, reject) =>
setTimeout(
() => reject(new Error(`Operation timed out after ${timeoutMs}ms`)),
timeoutMs,
),
),
]);
// Create directory with restricted permissions
try {
await this.withTimeout(
await withTimeout(
fs.mkdir(dirPath, { recursive: true, mode: 0o700 }),
5000,
'File operation',
);
} catch (error) {
throw new TokenManagerError(
@@ -625,30 +622,21 @@ export class SharedTokenManager {
try {
// Write to temporary file first with restricted permissions
await this.withTimeout(
await withTimeout(
fs.writeFile(tempPath, credString, { mode: 0o600 }),
5000,
'File operation',
);
// Atomic move to final location
await this.withTimeout(
fs.rename(tempPath, filePath),
5000,
'File operation',
);
await withTimeout(fs.rename(tempPath, filePath), 5000);
// Update cached file modification time atomically after successful write
const stats = await this.withTimeout(
fs.stat(filePath),
5000,
'File operation',
);
const stats = await withTimeout(fs.stat(filePath), 5000);
this.memoryCache.fileModTime = stats.mtimeMs;
} catch (error) {
// Clean up temp file if it exists
try {
await this.withTimeout(fs.unlink(tempPath), 1000, 'File operation');
await withTimeout(fs.unlink(tempPath), 1000);
} catch (_cleanupError) {
// Ignore cleanup errors - temp file might not exist
}

View File

@@ -72,19 +72,6 @@ async function createMockConfig(
} as unknown as ToolRegistry;
vi.spyOn(config, 'getToolRegistry').mockReturnValue(mockToolRegistry);
// Mock getContentGeneratorConfig to return a valid config
vi.spyOn(config, 'getContentGeneratorConfig').mockReturnValue({
model: DEFAULT_GEMINI_MODEL,
authType: AuthType.USE_GEMINI,
});
// Mock setModel method
vi.spyOn(config, 'setModel').mockResolvedValue();
// Mock getSessionId method
vi.spyOn(config, 'getSessionId').mockReturnValue('test-session');
return { config, toolRegistry: mockToolRegistry };
}

View File

@@ -826,7 +826,7 @@ export class SubAgentScope {
);
if (this.modelConfig.model) {
await this.runtimeContext.setModel(this.modelConfig.model);
this.runtimeContext.setModel(this.modelConfig.model);
}
return new GeminiChat(

View File

@@ -8,6 +8,7 @@ import fs from 'node:fs';
import path from 'node:path';
import { EOL } from 'node:os';
import { spawn } from 'node:child_process';
import { rgPath } from '@lvce-editor/ripgrep';
import type { ToolInvocation, ToolResult } from './tools.js';
import { BaseDeclarativeTool, BaseToolInvocation, Kind } from './tools.js';
import { SchemaValidator } from '../utils/schemaValidator.js';
@@ -17,14 +18,6 @@ import type { Config } from '../config/config.js';
const DEFAULT_TOTAL_MAX_MATCHES = 20000;
/**
* Lazy loads the ripgrep binary path to avoid loading the library until needed
*/
async function getRipgrepPath(): Promise<string> {
const { rgPath } = await import('@lvce-editor/ripgrep');
return rgPath;
}
/**
* Parameters for the GrepTool
*/
@@ -299,9 +292,8 @@ class GrepToolInvocation extends BaseToolInvocation<
rgArgs.push(absolutePath);
try {
const ripgrepPath = await getRipgrepPath();
const output = await new Promise<string>((resolve, reject) => {
const child = spawn(ripgrepPath, rgArgs, {
const child = spawn(rgPath, rgArgs, {
windowsHide: true,
});

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code-test-utils",
"version": "0.0.13",
"version": "0.0.11",
"private": true,
"main": "src/index.ts",
"license": "Apache-2.0",

View File

@@ -2,7 +2,7 @@
"name": "qwen-code-vscode-ide-companion",
"displayName": "Qwen Code Companion",
"description": "Enable Qwen Code with direct access to your VS Code workspace.",
"version": "0.0.13",
"version": "0.0.11",
"publisher": "qwenlm",
"icon": "assets/icon.png",
"repository": {