mirror of
https://github.com/QwenLM/qwen-code.git
synced 2025-12-19 09:33:53 +00:00
* refactor: openaiContentGenerator * refactor: optimize stream handling * refactor: re-organize refactored files * fix: unit test cases * feat: `/model` command for switching to vision model * fix: lint error * feat: add image tokenizer to fit vlm context window * fix: lint and type errors * feat: add `visionModelPreview` to control default visibility of vision models * fix: remove deprecated files * fix: align supported image formats with bailian doc
180 lines
5.1 KiB
TypeScript
180 lines
5.1 KiB
TypeScript
/**
|
|
* @license
|
|
* Copyright 2025 Qwen
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*/
|
|
|
|
import { describe, it, expect, beforeEach, vi } from 'vitest';
|
|
import { modelCommand } from './modelCommand.js';
|
|
import { type CommandContext } from './types.js';
|
|
import { createMockCommandContext } from '../../test-utils/mockCommandContext.js';
|
|
import {
|
|
AuthType,
|
|
type ContentGeneratorConfig,
|
|
type Config,
|
|
} from '@qwen-code/qwen-code-core';
|
|
import * as availableModelsModule from '../models/availableModels.js';
|
|
|
|
// Mock the availableModels module
|
|
vi.mock('../models/availableModels.js', () => ({
|
|
AVAILABLE_MODELS_QWEN: [
|
|
{ id: 'qwen3-coder-plus', label: 'qwen3-coder-plus' },
|
|
{ id: 'qwen-vl-max-latest', label: 'qwen-vl-max', isVision: true },
|
|
],
|
|
getOpenAIAvailableModelFromEnv: vi.fn(),
|
|
}));
|
|
|
|
// Helper function to create a mock config
|
|
function createMockConfig(
|
|
contentGeneratorConfig: ContentGeneratorConfig | null,
|
|
): Partial<Config> {
|
|
return {
|
|
getContentGeneratorConfig: vi.fn().mockReturnValue(contentGeneratorConfig),
|
|
};
|
|
}
|
|
|
|
describe('modelCommand', () => {
|
|
let mockContext: CommandContext;
|
|
const mockGetOpenAIAvailableModelFromEnv = vi.mocked(
|
|
availableModelsModule.getOpenAIAvailableModelFromEnv,
|
|
);
|
|
|
|
beforeEach(() => {
|
|
mockContext = createMockCommandContext();
|
|
vi.clearAllMocks();
|
|
});
|
|
|
|
it('should have the correct name and description', () => {
|
|
expect(modelCommand.name).toBe('model');
|
|
expect(modelCommand.description).toBe('Switch the model for this session');
|
|
});
|
|
|
|
it('should return error when config is not available', async () => {
|
|
mockContext.services.config = null;
|
|
|
|
const result = await modelCommand.action!(mockContext, '');
|
|
|
|
expect(result).toEqual({
|
|
type: 'message',
|
|
messageType: 'error',
|
|
content: 'Configuration not available.',
|
|
});
|
|
});
|
|
|
|
it('should return error when content generator config is not available', async () => {
|
|
const mockConfig = createMockConfig(null);
|
|
mockContext.services.config = mockConfig as Config;
|
|
|
|
const result = await modelCommand.action!(mockContext, '');
|
|
|
|
expect(result).toEqual({
|
|
type: 'message',
|
|
messageType: 'error',
|
|
content: 'Content generator configuration not available.',
|
|
});
|
|
});
|
|
|
|
it('should return error when auth type is not available', async () => {
|
|
const mockConfig = createMockConfig({
|
|
model: 'test-model',
|
|
authType: undefined,
|
|
});
|
|
mockContext.services.config = mockConfig as Config;
|
|
|
|
const result = await modelCommand.action!(mockContext, '');
|
|
|
|
expect(result).toEqual({
|
|
type: 'message',
|
|
messageType: 'error',
|
|
content: 'Authentication type not available.',
|
|
});
|
|
});
|
|
|
|
it('should return dialog action for QWEN_OAUTH auth type', async () => {
|
|
const mockConfig = createMockConfig({
|
|
model: 'test-model',
|
|
authType: AuthType.QWEN_OAUTH,
|
|
});
|
|
mockContext.services.config = mockConfig as Config;
|
|
|
|
const result = await modelCommand.action!(mockContext, '');
|
|
|
|
expect(result).toEqual({
|
|
type: 'dialog',
|
|
dialog: 'model',
|
|
});
|
|
});
|
|
|
|
it('should return dialog action for USE_OPENAI auth type when model is available', async () => {
|
|
mockGetOpenAIAvailableModelFromEnv.mockReturnValue({
|
|
id: 'gpt-4',
|
|
label: 'gpt-4',
|
|
});
|
|
|
|
const mockConfig = createMockConfig({
|
|
model: 'test-model',
|
|
authType: AuthType.USE_OPENAI,
|
|
});
|
|
mockContext.services.config = mockConfig as Config;
|
|
|
|
const result = await modelCommand.action!(mockContext, '');
|
|
|
|
expect(result).toEqual({
|
|
type: 'dialog',
|
|
dialog: 'model',
|
|
});
|
|
});
|
|
|
|
it('should return error for USE_OPENAI auth type when no model is available', async () => {
|
|
mockGetOpenAIAvailableModelFromEnv.mockReturnValue(null);
|
|
|
|
const mockConfig = createMockConfig({
|
|
model: 'test-model',
|
|
authType: AuthType.USE_OPENAI,
|
|
});
|
|
mockContext.services.config = mockConfig as Config;
|
|
|
|
const result = await modelCommand.action!(mockContext, '');
|
|
|
|
expect(result).toEqual({
|
|
type: 'message',
|
|
messageType: 'error',
|
|
content:
|
|
'No models available for the current authentication type (openai).',
|
|
});
|
|
});
|
|
|
|
it('should return error for unsupported auth types', async () => {
|
|
const mockConfig = createMockConfig({
|
|
model: 'test-model',
|
|
authType: 'UNSUPPORTED_AUTH_TYPE' as AuthType,
|
|
});
|
|
mockContext.services.config = mockConfig as Config;
|
|
|
|
const result = await modelCommand.action!(mockContext, '');
|
|
|
|
expect(result).toEqual({
|
|
type: 'message',
|
|
messageType: 'error',
|
|
content:
|
|
'No models available for the current authentication type (UNSUPPORTED_AUTH_TYPE).',
|
|
});
|
|
});
|
|
|
|
it('should handle undefined auth type', async () => {
|
|
const mockConfig = createMockConfig({
|
|
model: 'test-model',
|
|
authType: undefined,
|
|
});
|
|
mockContext.services.config = mockConfig as Config;
|
|
|
|
const result = await modelCommand.action!(mockContext, '');
|
|
|
|
expect(result).toEqual({
|
|
type: 'message',
|
|
messageType: 'error',
|
|
content: 'Authentication type not available.',
|
|
});
|
|
});
|
|
});
|