mirror of
https://github.com/QwenLM/qwen-code.git
synced 2026-01-23 17:26:23 +00:00
Compare commits
3 Commits
mingholy/f
...
fix/vscode
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5e80e80387 | ||
|
|
bde056b62e | ||
|
|
97497457a8 |
@@ -4,11 +4,7 @@
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type {
|
||||
Config,
|
||||
ContentGeneratorConfig,
|
||||
ModelProvidersConfig,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import type { Config, ModelProvidersConfig } from '@qwen-code/qwen-code-core';
|
||||
import {
|
||||
AuthEvent,
|
||||
AuthType,
|
||||
@@ -218,19 +214,11 @@ export const useAuthCommand = (
|
||||
|
||||
if (authType === AuthType.USE_OPENAI) {
|
||||
if (credentials) {
|
||||
// Pass settings.model.generationConfig to updateCredentials so it can be merged
|
||||
// after clearing provider-sourced config. This ensures settings.json generationConfig
|
||||
// fields (e.g., samplingParams, timeout) are preserved.
|
||||
const settingsGenerationConfig = settings.merged.model
|
||||
?.generationConfig as Partial<ContentGeneratorConfig> | undefined;
|
||||
config.updateCredentials(
|
||||
{
|
||||
apiKey: credentials.apiKey,
|
||||
baseUrl: credentials.baseUrl,
|
||||
model: credentials.model,
|
||||
},
|
||||
settingsGenerationConfig,
|
||||
);
|
||||
config.updateCredentials({
|
||||
apiKey: credentials.apiKey,
|
||||
baseUrl: credentials.baseUrl,
|
||||
model: credentials.model,
|
||||
});
|
||||
await performAuth(authType, credentials);
|
||||
}
|
||||
return;
|
||||
@@ -238,13 +226,7 @@ export const useAuthCommand = (
|
||||
|
||||
await performAuth(authType);
|
||||
},
|
||||
[
|
||||
config,
|
||||
performAuth,
|
||||
isProviderManagedModel,
|
||||
onAuthError,
|
||||
settings.merged.model?.generationConfig,
|
||||
],
|
||||
[config, performAuth, isProviderManagedModel, onAuthError],
|
||||
);
|
||||
|
||||
const openAuthDialog = useCallback(() => {
|
||||
|
||||
@@ -275,7 +275,7 @@ export function ModelDialog({ onClose }: ModelDialogProps): React.JSX.Element {
|
||||
persistModelSelection(settings, effectiveModelId);
|
||||
persistAuthTypeSelection(settings, effectiveAuthType);
|
||||
|
||||
const baseUrl = after?.baseUrl ?? t('(default)');
|
||||
const baseUrl = after?.baseUrl ?? '(default)';
|
||||
const maskedKey = maskApiKey(after?.apiKey);
|
||||
uiState?.historyManager.addItem(
|
||||
{
|
||||
@@ -322,7 +322,7 @@ export function ModelDialog({ onClose }: ModelDialogProps): React.JSX.Element {
|
||||
<>
|
||||
<ConfigRow
|
||||
label="Base URL"
|
||||
value={effectiveConfig?.baseUrl ?? t('(default)')}
|
||||
value={effectiveConfig?.baseUrl ?? ''}
|
||||
badge={formatSourceBadge(sources['baseUrl'])}
|
||||
/>
|
||||
<ConfigRow
|
||||
|
||||
@@ -1,721 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen Team
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
|
||||
import {
|
||||
AuthType,
|
||||
resolveModelConfig,
|
||||
type ProviderModelConfig,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import {
|
||||
getAuthTypeFromEnv,
|
||||
resolveCliGenerationConfig,
|
||||
} from './modelConfigUtils.js';
|
||||
import type { Settings } from '../config/settings.js';
|
||||
|
||||
vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => {
|
||||
const original =
|
||||
await importOriginal<typeof import('@qwen-code/qwen-code-core')>();
|
||||
return {
|
||||
...original,
|
||||
resolveModelConfig: vi.fn(),
|
||||
};
|
||||
});
|
||||
|
||||
describe('modelConfigUtils', () => {
|
||||
describe('getAuthTypeFromEnv', () => {
|
||||
const originalEnv = process.env;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.resetModules();
|
||||
process.env = { ...originalEnv };
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
process.env = originalEnv;
|
||||
});
|
||||
|
||||
it('should return USE_OPENAI when all OpenAI env vars are set', () => {
|
||||
process.env['OPENAI_API_KEY'] = 'test-key';
|
||||
process.env['OPENAI_MODEL'] = 'gpt-4';
|
||||
process.env['OPENAI_BASE_URL'] = 'https://api.openai.com';
|
||||
|
||||
expect(getAuthTypeFromEnv()).toBe(AuthType.USE_OPENAI);
|
||||
});
|
||||
|
||||
it('should return undefined when OpenAI env vars are incomplete', () => {
|
||||
process.env['OPENAI_API_KEY'] = 'test-key';
|
||||
process.env['OPENAI_MODEL'] = 'gpt-4';
|
||||
// Missing OPENAI_BASE_URL
|
||||
|
||||
expect(getAuthTypeFromEnv()).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should return QWEN_OAUTH when QWEN_OAUTH is set', () => {
|
||||
process.env['QWEN_OAUTH'] = 'true';
|
||||
|
||||
expect(getAuthTypeFromEnv()).toBe(AuthType.QWEN_OAUTH);
|
||||
});
|
||||
|
||||
it('should return USE_GEMINI when Gemini env vars are set', () => {
|
||||
process.env['GEMINI_API_KEY'] = 'test-key';
|
||||
process.env['GEMINI_MODEL'] = 'gemini-pro';
|
||||
|
||||
expect(getAuthTypeFromEnv()).toBe(AuthType.USE_GEMINI);
|
||||
});
|
||||
|
||||
it('should return undefined when Gemini env vars are incomplete', () => {
|
||||
process.env['GEMINI_API_KEY'] = 'test-key';
|
||||
// Missing GEMINI_MODEL
|
||||
|
||||
expect(getAuthTypeFromEnv()).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should return USE_VERTEX_AI when Google env vars are set', () => {
|
||||
process.env['GOOGLE_API_KEY'] = 'test-key';
|
||||
process.env['GOOGLE_MODEL'] = 'vertex-model';
|
||||
|
||||
expect(getAuthTypeFromEnv()).toBe(AuthType.USE_VERTEX_AI);
|
||||
});
|
||||
|
||||
it('should return undefined when Google env vars are incomplete', () => {
|
||||
process.env['GOOGLE_API_KEY'] = 'test-key';
|
||||
// Missing GOOGLE_MODEL
|
||||
|
||||
expect(getAuthTypeFromEnv()).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should return USE_ANTHROPIC when Anthropic env vars are set', () => {
|
||||
process.env['ANTHROPIC_API_KEY'] = 'test-key';
|
||||
process.env['ANTHROPIC_MODEL'] = 'claude-3';
|
||||
process.env['ANTHROPIC_BASE_URL'] = 'https://api.anthropic.com';
|
||||
|
||||
expect(getAuthTypeFromEnv()).toBe(AuthType.USE_ANTHROPIC);
|
||||
});
|
||||
|
||||
it('should return undefined when Anthropic env vars are incomplete', () => {
|
||||
process.env['ANTHROPIC_API_KEY'] = 'test-key';
|
||||
process.env['ANTHROPIC_MODEL'] = 'claude-3';
|
||||
// Missing ANTHROPIC_BASE_URL
|
||||
|
||||
expect(getAuthTypeFromEnv()).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should prioritize QWEN_OAUTH over other auth types when explicitly set', () => {
|
||||
process.env['QWEN_OAUTH'] = 'true';
|
||||
process.env['OPENAI_API_KEY'] = 'test-key';
|
||||
process.env['OPENAI_MODEL'] = 'gpt-4';
|
||||
process.env['OPENAI_BASE_URL'] = 'https://api.openai.com';
|
||||
|
||||
// QWEN_OAUTH is checked first, so it should be returned even when other auth vars are set
|
||||
expect(getAuthTypeFromEnv()).toBe(AuthType.QWEN_OAUTH);
|
||||
});
|
||||
|
||||
it('should return undefined when no auth env vars are set', () => {
|
||||
expect(getAuthTypeFromEnv()).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('resolveCliGenerationConfig', () => {
|
||||
const originalEnv = process.env;
|
||||
const originalConsoleWarn = console.warn;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.resetModules();
|
||||
process.env = { ...originalEnv };
|
||||
console.warn = vi.fn();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
process.env = originalEnv;
|
||||
console.warn = originalConsoleWarn;
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
function makeMockSettings(overrides?: Partial<Settings>): Settings {
|
||||
return {
|
||||
model: { name: 'default-model' },
|
||||
security: {
|
||||
auth: {
|
||||
apiKey: 'settings-api-key',
|
||||
baseUrl: 'https://settings.example.com',
|
||||
},
|
||||
},
|
||||
...overrides,
|
||||
} as Settings;
|
||||
}
|
||||
|
||||
it('should resolve config from argv with highest precedence', () => {
|
||||
const argv = {
|
||||
model: 'argv-model',
|
||||
openaiApiKey: 'argv-key',
|
||||
openaiBaseUrl: 'https://argv.example.com',
|
||||
};
|
||||
const settings = makeMockSettings();
|
||||
const selectedAuthType = AuthType.USE_OPENAI;
|
||||
|
||||
vi.mocked(resolveModelConfig).mockReturnValue({
|
||||
config: {
|
||||
model: 'argv-model',
|
||||
apiKey: 'argv-key',
|
||||
baseUrl: 'https://argv.example.com',
|
||||
},
|
||||
sources: {
|
||||
model: { kind: 'cli', detail: '--model' },
|
||||
apiKey: { kind: 'cli', detail: '--openaiApiKey' },
|
||||
baseUrl: { kind: 'cli', detail: '--openaiBaseUrl' },
|
||||
},
|
||||
warnings: [],
|
||||
});
|
||||
|
||||
const result = resolveCliGenerationConfig({
|
||||
argv,
|
||||
settings,
|
||||
selectedAuthType,
|
||||
});
|
||||
|
||||
expect(result.model).toBe('argv-model');
|
||||
expect(result.apiKey).toBe('argv-key');
|
||||
expect(result.baseUrl).toBe('https://argv.example.com');
|
||||
expect(vi.mocked(resolveModelConfig)).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
cli: {
|
||||
model: 'argv-model',
|
||||
apiKey: 'argv-key',
|
||||
baseUrl: 'https://argv.example.com',
|
||||
},
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it('should resolve config from settings when argv is not provided', () => {
|
||||
const argv = {};
|
||||
const settings = makeMockSettings({
|
||||
model: { name: 'settings-model' },
|
||||
security: {
|
||||
auth: {
|
||||
apiKey: 'settings-key',
|
||||
baseUrl: 'https://settings.example.com',
|
||||
},
|
||||
},
|
||||
});
|
||||
const selectedAuthType = AuthType.USE_OPENAI;
|
||||
|
||||
vi.mocked(resolveModelConfig).mockReturnValue({
|
||||
config: {
|
||||
model: 'settings-model',
|
||||
apiKey: 'settings-key',
|
||||
baseUrl: 'https://settings.example.com',
|
||||
},
|
||||
sources: {
|
||||
model: { kind: 'settings', detail: 'model.name' },
|
||||
apiKey: { kind: 'settings', detail: 'security.auth.apiKey' },
|
||||
baseUrl: { kind: 'settings', detail: 'security.auth.baseUrl' },
|
||||
},
|
||||
warnings: [],
|
||||
});
|
||||
|
||||
const result = resolveCliGenerationConfig({
|
||||
argv,
|
||||
settings,
|
||||
selectedAuthType,
|
||||
});
|
||||
|
||||
expect(result.model).toBe('settings-model');
|
||||
expect(result.apiKey).toBe('settings-key');
|
||||
expect(result.baseUrl).toBe('https://settings.example.com');
|
||||
});
|
||||
|
||||
it('should merge generationConfig from settings', () => {
|
||||
const argv = {};
|
||||
const settings = makeMockSettings({
|
||||
model: {
|
||||
name: 'test-model',
|
||||
generationConfig: {
|
||||
samplingParams: {
|
||||
temperature: 0.7,
|
||||
max_tokens: 1000,
|
||||
},
|
||||
timeout: 5000,
|
||||
} as Record<string, unknown>,
|
||||
},
|
||||
});
|
||||
const selectedAuthType = AuthType.USE_OPENAI;
|
||||
|
||||
vi.mocked(resolveModelConfig).mockReturnValue({
|
||||
config: {
|
||||
model: 'test-model',
|
||||
apiKey: '',
|
||||
baseUrl: '',
|
||||
samplingParams: {
|
||||
temperature: 0.7,
|
||||
max_tokens: 1000,
|
||||
},
|
||||
timeout: 5000,
|
||||
},
|
||||
sources: {},
|
||||
warnings: [],
|
||||
});
|
||||
|
||||
const result = resolveCliGenerationConfig({
|
||||
argv,
|
||||
settings,
|
||||
selectedAuthType,
|
||||
});
|
||||
|
||||
expect(result.generationConfig.samplingParams?.temperature).toBe(0.7);
|
||||
expect(result.generationConfig.samplingParams?.max_tokens).toBe(1000);
|
||||
expect(result.generationConfig.timeout).toBe(5000);
|
||||
});
|
||||
|
||||
it('should resolve OpenAI logging from argv', () => {
|
||||
const argv = {
|
||||
openaiLogging: true,
|
||||
openaiLoggingDir: '/custom/log/dir',
|
||||
};
|
||||
const settings = makeMockSettings();
|
||||
const selectedAuthType = AuthType.USE_OPENAI;
|
||||
|
||||
vi.mocked(resolveModelConfig).mockReturnValue({
|
||||
config: {
|
||||
model: 'test-model',
|
||||
apiKey: '',
|
||||
baseUrl: '',
|
||||
},
|
||||
sources: {},
|
||||
warnings: [],
|
||||
});
|
||||
|
||||
const result = resolveCliGenerationConfig({
|
||||
argv,
|
||||
settings,
|
||||
selectedAuthType,
|
||||
});
|
||||
|
||||
expect(result.generationConfig.enableOpenAILogging).toBe(true);
|
||||
expect(result.generationConfig.openAILoggingDir).toBe('/custom/log/dir');
|
||||
});
|
||||
|
||||
it('should resolve OpenAI logging from settings when argv is undefined', () => {
|
||||
const argv = {};
|
||||
const settings = makeMockSettings({
|
||||
model: {
|
||||
name: 'test-model',
|
||||
enableOpenAILogging: true,
|
||||
openAILoggingDir: '/settings/log/dir',
|
||||
},
|
||||
});
|
||||
const selectedAuthType = AuthType.USE_OPENAI;
|
||||
|
||||
vi.mocked(resolveModelConfig).mockReturnValue({
|
||||
config: {
|
||||
model: 'test-model',
|
||||
apiKey: '',
|
||||
baseUrl: '',
|
||||
},
|
||||
sources: {},
|
||||
warnings: [],
|
||||
});
|
||||
|
||||
const result = resolveCliGenerationConfig({
|
||||
argv,
|
||||
settings,
|
||||
selectedAuthType,
|
||||
});
|
||||
|
||||
expect(result.generationConfig.enableOpenAILogging).toBe(true);
|
||||
expect(result.generationConfig.openAILoggingDir).toBe(
|
||||
'/settings/log/dir',
|
||||
);
|
||||
});
|
||||
|
||||
it('should default OpenAI logging to false when not provided', () => {
|
||||
const argv = {};
|
||||
const settings = makeMockSettings();
|
||||
const selectedAuthType = AuthType.USE_OPENAI;
|
||||
|
||||
vi.mocked(resolveModelConfig).mockReturnValue({
|
||||
config: {
|
||||
model: 'test-model',
|
||||
apiKey: '',
|
||||
baseUrl: '',
|
||||
},
|
||||
sources: {},
|
||||
warnings: [],
|
||||
});
|
||||
|
||||
const result = resolveCliGenerationConfig({
|
||||
argv,
|
||||
settings,
|
||||
selectedAuthType,
|
||||
});
|
||||
|
||||
expect(result.generationConfig.enableOpenAILogging).toBe(false);
|
||||
});
|
||||
|
||||
it('should find modelProvider from settings when authType and model match', () => {
|
||||
const argv = { model: 'provider-model' };
|
||||
const modelProvider: ProviderModelConfig = {
|
||||
id: 'provider-model',
|
||||
name: 'Provider Model',
|
||||
generationConfig: {
|
||||
samplingParams: { temperature: 0.8 },
|
||||
},
|
||||
};
|
||||
const settings = makeMockSettings({
|
||||
modelProviders: {
|
||||
[AuthType.USE_OPENAI]: [modelProvider],
|
||||
},
|
||||
});
|
||||
const selectedAuthType = AuthType.USE_OPENAI;
|
||||
|
||||
vi.mocked(resolveModelConfig).mockReturnValue({
|
||||
config: {
|
||||
model: 'provider-model',
|
||||
apiKey: '',
|
||||
baseUrl: '',
|
||||
},
|
||||
sources: {},
|
||||
warnings: [],
|
||||
});
|
||||
|
||||
resolveCliGenerationConfig({
|
||||
argv,
|
||||
settings,
|
||||
selectedAuthType,
|
||||
});
|
||||
|
||||
expect(vi.mocked(resolveModelConfig)).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
modelProvider,
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it('should find modelProvider from settings.model.name when argv.model is not provided', () => {
|
||||
const argv = {};
|
||||
const modelProvider: ProviderModelConfig = {
|
||||
id: 'settings-model',
|
||||
name: 'Settings Model',
|
||||
generationConfig: {
|
||||
samplingParams: { temperature: 0.9 },
|
||||
},
|
||||
};
|
||||
const settings = makeMockSettings({
|
||||
model: { name: 'settings-model' },
|
||||
modelProviders: {
|
||||
[AuthType.USE_OPENAI]: [modelProvider],
|
||||
},
|
||||
});
|
||||
const selectedAuthType = AuthType.USE_OPENAI;
|
||||
|
||||
vi.mocked(resolveModelConfig).mockReturnValue({
|
||||
config: {
|
||||
model: 'settings-model',
|
||||
apiKey: '',
|
||||
baseUrl: '',
|
||||
},
|
||||
sources: {},
|
||||
warnings: [],
|
||||
});
|
||||
|
||||
resolveCliGenerationConfig({
|
||||
argv,
|
||||
settings,
|
||||
selectedAuthType,
|
||||
});
|
||||
|
||||
expect(vi.mocked(resolveModelConfig)).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
modelProvider,
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it('should not find modelProvider when authType is undefined', () => {
|
||||
const argv = { model: 'test-model' };
|
||||
const settings = makeMockSettings({
|
||||
modelProviders: {
|
||||
[AuthType.USE_OPENAI]: [{ id: 'test-model', name: 'Test Model' }],
|
||||
},
|
||||
});
|
||||
const selectedAuthType = undefined;
|
||||
|
||||
vi.mocked(resolveModelConfig).mockReturnValue({
|
||||
config: {
|
||||
model: 'test-model',
|
||||
apiKey: '',
|
||||
baseUrl: '',
|
||||
},
|
||||
sources: {},
|
||||
warnings: [],
|
||||
});
|
||||
|
||||
resolveCliGenerationConfig({
|
||||
argv,
|
||||
settings,
|
||||
selectedAuthType,
|
||||
});
|
||||
|
||||
expect(vi.mocked(resolveModelConfig)).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
modelProvider: undefined,
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it('should not find modelProvider when modelProviders is not an array', () => {
|
||||
const argv = { model: 'test-model' };
|
||||
const settings = makeMockSettings({
|
||||
modelProviders: {
|
||||
[AuthType.USE_OPENAI]: null as unknown as ProviderModelConfig[],
|
||||
},
|
||||
});
|
||||
const selectedAuthType = AuthType.USE_OPENAI;
|
||||
|
||||
vi.mocked(resolveModelConfig).mockReturnValue({
|
||||
config: {
|
||||
model: 'test-model',
|
||||
apiKey: '',
|
||||
baseUrl: '',
|
||||
},
|
||||
sources: {},
|
||||
warnings: [],
|
||||
});
|
||||
|
||||
resolveCliGenerationConfig({
|
||||
argv,
|
||||
settings,
|
||||
selectedAuthType,
|
||||
});
|
||||
|
||||
expect(vi.mocked(resolveModelConfig)).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
modelProvider: undefined,
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it('should log warnings from resolveModelConfig', () => {
|
||||
const argv = {};
|
||||
const settings = makeMockSettings();
|
||||
const selectedAuthType = AuthType.USE_OPENAI;
|
||||
|
||||
vi.mocked(resolveModelConfig).mockReturnValue({
|
||||
config: {
|
||||
model: 'test-model',
|
||||
apiKey: '',
|
||||
baseUrl: '',
|
||||
},
|
||||
sources: {},
|
||||
warnings: ['Warning 1', 'Warning 2'],
|
||||
});
|
||||
|
||||
resolveCliGenerationConfig({
|
||||
argv,
|
||||
settings,
|
||||
selectedAuthType,
|
||||
});
|
||||
|
||||
expect(console.warn).toHaveBeenCalledWith('Warning 1');
|
||||
expect(console.warn).toHaveBeenCalledWith('Warning 2');
|
||||
});
|
||||
|
||||
it('should use custom env when provided', () => {
|
||||
const argv = {};
|
||||
const settings = makeMockSettings();
|
||||
const selectedAuthType = AuthType.USE_OPENAI;
|
||||
const customEnv = {
|
||||
OPENAI_API_KEY: 'custom-key',
|
||||
OPENAI_MODEL: 'custom-model',
|
||||
};
|
||||
|
||||
vi.mocked(resolveModelConfig).mockReturnValue({
|
||||
config: {
|
||||
model: 'custom-model',
|
||||
apiKey: 'custom-key',
|
||||
baseUrl: '',
|
||||
},
|
||||
sources: {},
|
||||
warnings: [],
|
||||
});
|
||||
|
||||
resolveCliGenerationConfig({
|
||||
argv,
|
||||
settings,
|
||||
selectedAuthType,
|
||||
env: customEnv,
|
||||
});
|
||||
|
||||
expect(vi.mocked(resolveModelConfig)).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
env: customEnv,
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it('should use process.env when env is not provided', () => {
|
||||
const argv = {};
|
||||
const settings = makeMockSettings();
|
||||
const selectedAuthType = AuthType.USE_OPENAI;
|
||||
|
||||
vi.mocked(resolveModelConfig).mockReturnValue({
|
||||
config: {
|
||||
model: 'test-model',
|
||||
apiKey: '',
|
||||
baseUrl: '',
|
||||
},
|
||||
sources: {},
|
||||
warnings: [],
|
||||
});
|
||||
|
||||
resolveCliGenerationConfig({
|
||||
argv,
|
||||
settings,
|
||||
selectedAuthType,
|
||||
});
|
||||
|
||||
expect(vi.mocked(resolveModelConfig)).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
env: process.env,
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it('should return empty strings for missing model, apiKey, and baseUrl', () => {
|
||||
const argv = {};
|
||||
const settings = makeMockSettings();
|
||||
const selectedAuthType = AuthType.USE_OPENAI;
|
||||
|
||||
vi.mocked(resolveModelConfig).mockReturnValue({
|
||||
config: {
|
||||
model: '',
|
||||
apiKey: '',
|
||||
baseUrl: '',
|
||||
},
|
||||
sources: {},
|
||||
warnings: [],
|
||||
});
|
||||
|
||||
const result = resolveCliGenerationConfig({
|
||||
argv,
|
||||
settings,
|
||||
selectedAuthType,
|
||||
});
|
||||
|
||||
expect(result.model).toBe('');
|
||||
expect(result.apiKey).toBe('');
|
||||
expect(result.baseUrl).toBe('');
|
||||
});
|
||||
|
||||
it('should merge resolved config with logging settings', () => {
|
||||
const argv = {
|
||||
openaiLogging: true,
|
||||
};
|
||||
const settings = makeMockSettings({
|
||||
model: {
|
||||
name: 'test-model',
|
||||
generationConfig: {
|
||||
timeout: 5000,
|
||||
} as Record<string, unknown>,
|
||||
},
|
||||
});
|
||||
const selectedAuthType = AuthType.USE_OPENAI;
|
||||
|
||||
vi.mocked(resolveModelConfig).mockReturnValue({
|
||||
config: {
|
||||
model: 'test-model',
|
||||
apiKey: 'test-key',
|
||||
baseUrl: 'https://test.com',
|
||||
samplingParams: { temperature: 0.5 },
|
||||
},
|
||||
sources: {},
|
||||
warnings: [],
|
||||
});
|
||||
|
||||
const result = resolveCliGenerationConfig({
|
||||
argv,
|
||||
settings,
|
||||
selectedAuthType,
|
||||
});
|
||||
|
||||
expect(result.generationConfig).toEqual({
|
||||
model: 'test-model',
|
||||
apiKey: 'test-key',
|
||||
baseUrl: 'https://test.com',
|
||||
samplingParams: { temperature: 0.5 },
|
||||
enableOpenAILogging: true,
|
||||
openAILoggingDir: undefined,
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle settings without model property', () => {
|
||||
const argv = {};
|
||||
const settings = makeMockSettings({
|
||||
model: undefined as unknown as Settings['model'],
|
||||
});
|
||||
const selectedAuthType = AuthType.USE_OPENAI;
|
||||
|
||||
vi.mocked(resolveModelConfig).mockReturnValue({
|
||||
config: {
|
||||
model: '',
|
||||
apiKey: '',
|
||||
baseUrl: '',
|
||||
},
|
||||
sources: {},
|
||||
warnings: [],
|
||||
});
|
||||
|
||||
const result = resolveCliGenerationConfig({
|
||||
argv,
|
||||
settings,
|
||||
selectedAuthType,
|
||||
});
|
||||
|
||||
expect(result.model).toBe('');
|
||||
expect(vi.mocked(resolveModelConfig)).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
settings: expect.objectContaining({
|
||||
model: undefined,
|
||||
}),
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle settings without security.auth property', () => {
|
||||
const argv = {};
|
||||
const settings = makeMockSettings({
|
||||
security: undefined,
|
||||
});
|
||||
const selectedAuthType = AuthType.USE_OPENAI;
|
||||
|
||||
vi.mocked(resolveModelConfig).mockReturnValue({
|
||||
config: {
|
||||
model: '',
|
||||
apiKey: '',
|
||||
baseUrl: '',
|
||||
},
|
||||
sources: {},
|
||||
warnings: [],
|
||||
});
|
||||
|
||||
resolveCliGenerationConfig({
|
||||
argv,
|
||||
settings,
|
||||
selectedAuthType,
|
||||
});
|
||||
|
||||
expect(vi.mocked(resolveModelConfig)).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
settings: expect.objectContaining({
|
||||
apiKey: undefined,
|
||||
baseUrl: undefined,
|
||||
}),
|
||||
}),
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -44,31 +44,20 @@ export interface ResolvedCliGenerationConfig {
|
||||
}
|
||||
|
||||
export function getAuthTypeFromEnv(): AuthType | undefined {
|
||||
if (process.env['OPENAI_API_KEY']) {
|
||||
return AuthType.USE_OPENAI;
|
||||
}
|
||||
if (process.env['QWEN_OAUTH']) {
|
||||
return AuthType.QWEN_OAUTH;
|
||||
}
|
||||
|
||||
if (
|
||||
process.env['OPENAI_API_KEY'] &&
|
||||
process.env['OPENAI_MODEL'] &&
|
||||
process.env['OPENAI_BASE_URL']
|
||||
) {
|
||||
return AuthType.USE_OPENAI;
|
||||
}
|
||||
|
||||
if (process.env['GEMINI_API_KEY'] && process.env['GEMINI_MODEL']) {
|
||||
if (process.env['GEMINI_API_KEY']) {
|
||||
return AuthType.USE_GEMINI;
|
||||
}
|
||||
|
||||
if (process.env['GOOGLE_API_KEY'] && process.env['GOOGLE_MODEL']) {
|
||||
if (process.env['GOOGLE_API_KEY']) {
|
||||
return AuthType.USE_VERTEX_AI;
|
||||
}
|
||||
|
||||
if (
|
||||
process.env['ANTHROPIC_API_KEY'] &&
|
||||
process.env['ANTHROPIC_MODEL'] &&
|
||||
process.env['ANTHROPIC_BASE_URL']
|
||||
) {
|
||||
if (process.env['ANTHROPIC_API_KEY']) {
|
||||
return AuthType.USE_ANTHROPIC;
|
||||
}
|
||||
|
||||
|
||||
@@ -706,15 +706,12 @@ export class Config {
|
||||
* Exclusive for `OpenAIKeyPrompt` to update credentials via `/auth`
|
||||
* Delegates to ModelsConfig.
|
||||
*/
|
||||
updateCredentials(
|
||||
credentials: {
|
||||
apiKey?: string;
|
||||
baseUrl?: string;
|
||||
model?: string;
|
||||
},
|
||||
settingsGenerationConfig?: Partial<ContentGeneratorConfig>,
|
||||
): void {
|
||||
this._modelsConfig.updateCredentials(credentials, settingsGenerationConfig);
|
||||
updateCredentials(credentials: {
|
||||
apiKey?: string;
|
||||
baseUrl?: string;
|
||||
model?: string;
|
||||
}): void {
|
||||
this._modelsConfig.updateCredentials(credentials);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -191,7 +191,7 @@ describe('ModelsConfig', () => {
|
||||
expect(gc.apiKeyEnvKey).toBe('API_KEY_SHARED');
|
||||
});
|
||||
|
||||
it('should use provider config when modelId exists in registry even after updateCredentials', () => {
|
||||
it('should preserve settings generationConfig when model is updated via updateCredentials even if it matches modelProviders', () => {
|
||||
const modelProvidersConfig: ModelProvidersConfig = {
|
||||
openai: [
|
||||
{
|
||||
@@ -213,7 +213,7 @@ describe('ModelsConfig', () => {
|
||||
initialAuthType: AuthType.USE_OPENAI,
|
||||
modelProvidersConfig,
|
||||
generationConfig: {
|
||||
model: 'custom-model',
|
||||
model: 'model-a',
|
||||
samplingParams: { temperature: 0.9, max_tokens: 999 },
|
||||
timeout: 9999,
|
||||
maxRetries: 9,
|
||||
@@ -235,30 +235,30 @@ describe('ModelsConfig', () => {
|
||||
},
|
||||
});
|
||||
|
||||
// User manually updates credentials via updateCredentials.
|
||||
// Note: In practice, handleAuthSelect prevents using a modelId that matches a provider model,
|
||||
// but if syncAfterAuthRefresh is called with a modelId that exists in registry,
|
||||
// we should use provider config.
|
||||
modelsConfig.updateCredentials({ apiKey: 'manual-key' });
|
||||
// User manually updates the model via updateCredentials (e.g. key prompt flow).
|
||||
// Even if the model ID matches a modelProviders entry, we must not apply provider defaults
|
||||
// that would overwrite settings.model.generationConfig.
|
||||
modelsConfig.updateCredentials({ model: 'model-a' });
|
||||
|
||||
// syncAfterAuthRefresh with a modelId that exists in registry should use provider config
|
||||
modelsConfig.syncAfterAuthRefresh(AuthType.USE_OPENAI, 'model-a');
|
||||
modelsConfig.syncAfterAuthRefresh(
|
||||
AuthType.USE_OPENAI,
|
||||
modelsConfig.getModel(),
|
||||
);
|
||||
|
||||
const gc = currentGenerationConfig(modelsConfig);
|
||||
expect(gc.model).toBe('model-a');
|
||||
// Provider config should be applied
|
||||
expect(gc.samplingParams?.temperature).toBe(0.1);
|
||||
expect(gc.samplingParams?.max_tokens).toBe(123);
|
||||
expect(gc.timeout).toBe(111);
|
||||
expect(gc.maxRetries).toBe(1);
|
||||
expect(gc.samplingParams?.temperature).toBe(0.9);
|
||||
expect(gc.samplingParams?.max_tokens).toBe(999);
|
||||
expect(gc.timeout).toBe(9999);
|
||||
expect(gc.maxRetries).toBe(9);
|
||||
});
|
||||
|
||||
it('should preserve settings generationConfig when modelId does not exist in registry', () => {
|
||||
it('should preserve settings generationConfig across multiple auth refreshes after updateCredentials', () => {
|
||||
const modelProvidersConfig: ModelProvidersConfig = {
|
||||
openai: [
|
||||
{
|
||||
id: 'provider-model',
|
||||
name: 'Provider Model',
|
||||
id: 'model-a',
|
||||
name: 'Model A',
|
||||
baseUrl: 'https://api.example.com/v1',
|
||||
envKey: 'API_KEY_A',
|
||||
generationConfig: {
|
||||
@@ -270,12 +270,11 @@ describe('ModelsConfig', () => {
|
||||
],
|
||||
};
|
||||
|
||||
// Simulate settings with a custom model (not in registry)
|
||||
const modelsConfig = new ModelsConfig({
|
||||
initialAuthType: AuthType.USE_OPENAI,
|
||||
modelProvidersConfig,
|
||||
generationConfig: {
|
||||
model: 'custom-model',
|
||||
model: 'model-a',
|
||||
samplingParams: { temperature: 0.9, max_tokens: 999 },
|
||||
timeout: 9999,
|
||||
maxRetries: 9,
|
||||
@@ -297,21 +296,25 @@ describe('ModelsConfig', () => {
|
||||
},
|
||||
});
|
||||
|
||||
// User manually sets credentials for a custom model (not in registry)
|
||||
modelsConfig.updateCredentials({
|
||||
apiKey: 'manual-key',
|
||||
baseUrl: 'https://manual.example.com/v1',
|
||||
model: 'custom-model',
|
||||
model: 'model-a',
|
||||
});
|
||||
|
||||
// First auth refresh - modelId doesn't exist in registry, so credentials should be preserved
|
||||
modelsConfig.syncAfterAuthRefresh(AuthType.USE_OPENAI, 'custom-model');
|
||||
// First auth refresh
|
||||
modelsConfig.syncAfterAuthRefresh(
|
||||
AuthType.USE_OPENAI,
|
||||
modelsConfig.getModel(),
|
||||
);
|
||||
// Second auth refresh should still preserve settings generationConfig
|
||||
modelsConfig.syncAfterAuthRefresh(AuthType.USE_OPENAI, 'custom-model');
|
||||
modelsConfig.syncAfterAuthRefresh(
|
||||
AuthType.USE_OPENAI,
|
||||
modelsConfig.getModel(),
|
||||
);
|
||||
|
||||
const gc = currentGenerationConfig(modelsConfig);
|
||||
expect(gc.model).toBe('custom-model');
|
||||
// Settings-sourced generation config should be preserved since modelId doesn't exist in registry
|
||||
expect(gc.model).toBe('model-a');
|
||||
expect(gc.samplingParams?.temperature).toBe(0.9);
|
||||
expect(gc.samplingParams?.max_tokens).toBe(999);
|
||||
expect(gc.timeout).toBe(9999);
|
||||
@@ -678,120 +681,4 @@ describe('ModelsConfig', () => {
|
||||
expect(modelsConfig.getModel()).toBe('updated-model');
|
||||
expect(modelsConfig.getGenerationConfig().model).toBe('updated-model');
|
||||
});
|
||||
|
||||
describe('getAllAvailableModels', () => {
|
||||
it('should return all models across all authTypes', () => {
|
||||
const modelProvidersConfig: ModelProvidersConfig = {
|
||||
openai: [
|
||||
{
|
||||
id: 'openai-model-1',
|
||||
name: 'OpenAI Model 1',
|
||||
baseUrl: 'https://api.openai.com/v1',
|
||||
envKey: 'OPENAI_API_KEY',
|
||||
},
|
||||
{
|
||||
id: 'openai-model-2',
|
||||
name: 'OpenAI Model 2',
|
||||
baseUrl: 'https://api.openai.com/v1',
|
||||
envKey: 'OPENAI_API_KEY',
|
||||
},
|
||||
],
|
||||
anthropic: [
|
||||
{
|
||||
id: 'anthropic-model-1',
|
||||
name: 'Anthropic Model 1',
|
||||
baseUrl: 'https://api.anthropic.com/v1',
|
||||
envKey: 'ANTHROPIC_API_KEY',
|
||||
},
|
||||
],
|
||||
gemini: [
|
||||
{
|
||||
id: 'gemini-model-1',
|
||||
name: 'Gemini Model 1',
|
||||
baseUrl: 'https://generativelanguage.googleapis.com/v1',
|
||||
envKey: 'GEMINI_API_KEY',
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
const modelsConfig = new ModelsConfig({
|
||||
modelProvidersConfig,
|
||||
});
|
||||
|
||||
const allModels = modelsConfig.getAllAvailableModels();
|
||||
|
||||
// Should include qwen-oauth models (hard-coded)
|
||||
const qwenModels = allModels.filter(
|
||||
(m) => m.authType === AuthType.QWEN_OAUTH,
|
||||
);
|
||||
expect(qwenModels.length).toBeGreaterThan(0);
|
||||
|
||||
// Should include openai models
|
||||
const openaiModels = allModels.filter(
|
||||
(m) => m.authType === AuthType.USE_OPENAI,
|
||||
);
|
||||
expect(openaiModels.length).toBe(2);
|
||||
expect(openaiModels.map((m) => m.id)).toContain('openai-model-1');
|
||||
expect(openaiModels.map((m) => m.id)).toContain('openai-model-2');
|
||||
|
||||
// Should include anthropic models
|
||||
const anthropicModels = allModels.filter(
|
||||
(m) => m.authType === AuthType.USE_ANTHROPIC,
|
||||
);
|
||||
expect(anthropicModels.length).toBe(1);
|
||||
expect(anthropicModels[0].id).toBe('anthropic-model-1');
|
||||
|
||||
// Should include gemini models
|
||||
const geminiModels = allModels.filter(
|
||||
(m) => m.authType === AuthType.USE_GEMINI,
|
||||
);
|
||||
expect(geminiModels.length).toBe(1);
|
||||
expect(geminiModels[0].id).toBe('gemini-model-1');
|
||||
});
|
||||
|
||||
it('should return empty array when no models are registered', () => {
|
||||
const modelsConfig = new ModelsConfig();
|
||||
|
||||
const allModels = modelsConfig.getAllAvailableModels();
|
||||
|
||||
// Should still include qwen-oauth models (hard-coded)
|
||||
expect(allModels.length).toBeGreaterThan(0);
|
||||
const qwenModels = allModels.filter(
|
||||
(m) => m.authType === AuthType.QWEN_OAUTH,
|
||||
);
|
||||
expect(qwenModels.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('should return models with correct structure', () => {
|
||||
const modelProvidersConfig: ModelProvidersConfig = {
|
||||
openai: [
|
||||
{
|
||||
id: 'test-model',
|
||||
name: 'Test Model',
|
||||
description: 'A test model',
|
||||
baseUrl: 'https://api.example.com/v1',
|
||||
envKey: 'TEST_API_KEY',
|
||||
capabilities: {
|
||||
vision: true,
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
const modelsConfig = new ModelsConfig({
|
||||
modelProvidersConfig,
|
||||
});
|
||||
|
||||
const allModels = modelsConfig.getAllAvailableModels();
|
||||
const testModel = allModels.find((m) => m.id === 'test-model');
|
||||
|
||||
expect(testModel).toBeDefined();
|
||||
expect(testModel?.id).toBe('test-model');
|
||||
expect(testModel?.label).toBe('Test Model');
|
||||
expect(testModel?.description).toBe('A test model');
|
||||
expect(testModel?.authType).toBe(AuthType.USE_OPENAI);
|
||||
expect(testModel?.isVision).toBe(true);
|
||||
expect(testModel?.capabilities?.vision).toBe(true);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -203,18 +203,6 @@ export class ModelsConfig {
|
||||
return this.modelRegistry.getModelsForAuthType(authType);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all available models across all authTypes
|
||||
*/
|
||||
getAllAvailableModels(): AvailableModel[] {
|
||||
const allModels: AvailableModel[] = [];
|
||||
for (const authType of Object.values(AuthType)) {
|
||||
const models = this.modelRegistry.getModelsForAuthType(authType);
|
||||
allModels.push(...models);
|
||||
}
|
||||
return allModels;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a model exists for the given authType
|
||||
*/
|
||||
@@ -319,33 +307,6 @@ export class ModelsConfig {
|
||||
return this.generationConfigSources;
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge settings generation config, preserving existing values.
|
||||
* Used when provider-sourced config is cleared but settings should still apply.
|
||||
*/
|
||||
mergeSettingsGenerationConfig(
|
||||
settingsGenerationConfig?: Partial<ContentGeneratorConfig>,
|
||||
): void {
|
||||
if (!settingsGenerationConfig) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (const field of MODEL_GENERATION_CONFIG_FIELDS) {
|
||||
if (
|
||||
!(field in this._generationConfig) &&
|
||||
field in settingsGenerationConfig
|
||||
) {
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
(this._generationConfig as any)[field] =
|
||||
settingsGenerationConfig[field];
|
||||
this.generationConfigSources[field] = {
|
||||
kind: 'settings',
|
||||
detail: `model.generationConfig.${field}`,
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update credentials in generation config.
|
||||
* Sets a flag to prevent syncAfterAuthRefresh from overriding these credentials.
|
||||
@@ -353,20 +314,12 @@ export class ModelsConfig {
|
||||
* When credentials are manually set, we clear all provider-sourced configuration
|
||||
* to maintain provider atomicity (either fully applied or not at all).
|
||||
* Other layers (CLI, env, settings, defaults) will participate in resolve.
|
||||
*
|
||||
* @param settingsGenerationConfig Optional generation config from settings.json
|
||||
* to merge after clearing provider-sourced config.
|
||||
* This ensures settings.model.generationConfig fields
|
||||
* (e.g., samplingParams, timeout) are preserved.
|
||||
*/
|
||||
updateCredentials(
|
||||
credentials: {
|
||||
apiKey?: string;
|
||||
baseUrl?: string;
|
||||
model?: string;
|
||||
},
|
||||
settingsGenerationConfig?: Partial<ContentGeneratorConfig>,
|
||||
): void {
|
||||
updateCredentials(credentials: {
|
||||
apiKey?: string;
|
||||
baseUrl?: string;
|
||||
model?: string;
|
||||
}): void {
|
||||
/**
|
||||
* If any fields are updated here, we treat the resulting config as manually overridden
|
||||
* and avoid applying modelProvider defaults during the next auth refresh.
|
||||
@@ -406,14 +359,6 @@ export class ModelsConfig {
|
||||
this.strictModelProviderSelection = false;
|
||||
// Clear apiKeyEnvKey to prevent validation from requiring environment variable
|
||||
this._generationConfig.apiKeyEnvKey = undefined;
|
||||
|
||||
// After clearing provider-sourced config, merge settings.model.generationConfig
|
||||
// to ensure fields like samplingParams, timeout, etc. are preserved.
|
||||
// This follows the resolution strategy where settings.model.generationConfig
|
||||
// has lower priority than programmatic overrides but should still be applied.
|
||||
if (settingsGenerationConfig) {
|
||||
this.mergeSettingsGenerationConfig(settingsGenerationConfig);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -642,88 +587,50 @@ export class ModelsConfig {
|
||||
}
|
||||
|
||||
/**
|
||||
* Sync state after auth refresh with fallback strategy:
|
||||
* 1. If modelId can be found in modelRegistry, use the config from modelRegistry.
|
||||
* 2. Otherwise, if existing credentials exist in resolved generationConfig from other sources
|
||||
* (not modelProviders), preserve them and update authType/modelId only.
|
||||
* 3. Otherwise, fall back to default model for the authType.
|
||||
* 4. If no default is available, leave the generationConfig incomplete and let
|
||||
* resolveContentGeneratorConfigWithSources throw exceptions as expected.
|
||||
* Called by Config.refreshAuth to sync state after auth refresh.
|
||||
*
|
||||
* IMPORTANT: If credentials were manually set via updateCredentials(),
|
||||
* we should NOT override them with modelProvider defaults.
|
||||
* This handles the case where user inputs credentials via OpenAIKeyPrompt
|
||||
* after removing environment variables for a previously selected model.
|
||||
*/
|
||||
syncAfterAuthRefresh(authType: AuthType, modelId?: string): void {
|
||||
this.strictModelProviderSelection = false;
|
||||
const previousAuthType = this.currentAuthType;
|
||||
this.currentAuthType = authType;
|
||||
// Check if we have manually set credentials that should be preserved
|
||||
const preserveManualCredentials = this.hasManualCredentials;
|
||||
|
||||
// If credentials were manually set, don't apply modelProvider defaults
|
||||
// Just update the authType and preserve the manually set credentials
|
||||
if (preserveManualCredentials && authType === AuthType.USE_OPENAI) {
|
||||
this.strictModelProviderSelection = false;
|
||||
this.currentAuthType = authType;
|
||||
if (modelId) {
|
||||
this._generationConfig.model = modelId;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
this.strictModelProviderSelection = false;
|
||||
|
||||
// Step 1: If modelId exists in registry, always use config from modelRegistry
|
||||
// Manual credentials won't have a modelId that matches a provider model (handleAuthSelect prevents it),
|
||||
// so if modelId exists in registry, we should always use provider config.
|
||||
// This handles provider switching even within the same authType.
|
||||
if (modelId && this.modelRegistry.hasModel(authType, modelId)) {
|
||||
const resolved = this.modelRegistry.getModel(authType, modelId);
|
||||
if (resolved) {
|
||||
// Ensure applyResolvedModelDefaults can correctly apply authType-specific
|
||||
// behavior (e.g., Qwen OAuth placeholder token) by setting currentAuthType
|
||||
// before applying defaults.
|
||||
this.currentAuthType = authType;
|
||||
this.applyResolvedModelDefaults(resolved);
|
||||
this.strictModelProviderSelection = true;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Step 2: Check if there are existing credentials from other sources (not modelProviders)
|
||||
const apiKeySource = this.generationConfigSources['apiKey'];
|
||||
const baseUrlSource = this.generationConfigSources['baseUrl'];
|
||||
const hasExistingCredentials =
|
||||
(this._generationConfig.apiKey &&
|
||||
apiKeySource?.kind !== 'modelProviders') ||
|
||||
(this._generationConfig.baseUrl &&
|
||||
baseUrlSource?.kind !== 'modelProviders');
|
||||
|
||||
// Only preserve credentials if:
|
||||
// 1. AuthType hasn't changed (credentials are authType-specific), AND
|
||||
// 2. The modelId doesn't exist in the registry (if it did, we would have used provider config in Step 1), AND
|
||||
// 3. Either:
|
||||
// a. We have manual credentials (set via updateCredentials), OR
|
||||
// b. We have existing credentials
|
||||
// Note: Even if authType hasn't changed, switching to a different provider model (that exists in registry)
|
||||
// will use provider config (Step 1), not preserve old credentials. This ensures credentials change when
|
||||
// switching providers, independent of authType changes.
|
||||
const isAuthTypeChange = previousAuthType !== authType;
|
||||
const shouldPreserveCredentials =
|
||||
!isAuthTypeChange &&
|
||||
(modelId === undefined ||
|
||||
!this.modelRegistry.hasModel(authType, modelId)) &&
|
||||
(this.hasManualCredentials || hasExistingCredentials);
|
||||
|
||||
if (shouldPreserveCredentials) {
|
||||
// Preserve existing credentials, just update authType and modelId if provided
|
||||
if (modelId) {
|
||||
this._generationConfig.model = modelId;
|
||||
if (!this.generationConfigSources['model']) {
|
||||
this.generationConfigSources['model'] = {
|
||||
kind: 'programmatic',
|
||||
detail: 'auth refresh (preserved credentials)',
|
||||
};
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Step 3: Fall back to default model for the authType
|
||||
const defaultModel =
|
||||
this.modelRegistry.getDefaultModelForAuthType(authType);
|
||||
if (defaultModel) {
|
||||
this.applyResolvedModelDefaults(defaultModel);
|
||||
return;
|
||||
}
|
||||
|
||||
// Step 4: No default available - leave generationConfig incomplete
|
||||
// resolveContentGeneratorConfigWithSources will throw exceptions as expected
|
||||
if (modelId) {
|
||||
this._generationConfig.model = modelId;
|
||||
if (!this.generationConfigSources['model']) {
|
||||
this.generationConfigSources['model'] = {
|
||||
kind: 'programmatic',
|
||||
detail: 'auth refresh (no default model)',
|
||||
};
|
||||
} else {
|
||||
// If the provided modelId doesn't exist in the registry for the new authType,
|
||||
// use the default model for that authType instead of keeping the old model.
|
||||
// This handles the case where switching from one authType (e.g., OPENAI with
|
||||
// env vars) to another (e.g., qwen-oauth) - we should use the default model
|
||||
// for the new authType, not the old model.
|
||||
this.currentAuthType = authType;
|
||||
const defaultModel =
|
||||
this.modelRegistry.getDefaultModelForAuthType(authType);
|
||||
if (defaultModel) {
|
||||
this.applyResolvedModelDefaults(defaultModel);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -751,7 +751,6 @@ describe('getQwenOAuthClient', () => {
|
||||
beforeEach(() => {
|
||||
mockConfig = {
|
||||
isBrowserLaunchSuppressed: vi.fn().mockReturnValue(false),
|
||||
isInteractive: vi.fn().mockReturnValue(true),
|
||||
} as unknown as Config;
|
||||
|
||||
originalFetch = global.fetch;
|
||||
@@ -840,7 +839,9 @@ describe('getQwenOAuthClient', () => {
|
||||
requireCachedCredentials: true,
|
||||
}),
|
||||
),
|
||||
).rejects.toThrow('Please use /auth to re-authenticate.');
|
||||
).rejects.toThrow(
|
||||
'No cached Qwen-OAuth credentials found. Please re-authenticate.',
|
||||
);
|
||||
|
||||
expect(global.fetch).not.toHaveBeenCalled();
|
||||
|
||||
@@ -1006,7 +1007,6 @@ describe('getQwenOAuthClient - Enhanced Error Scenarios', () => {
|
||||
beforeEach(() => {
|
||||
mockConfig = {
|
||||
isBrowserLaunchSuppressed: vi.fn().mockReturnValue(false),
|
||||
isInteractive: vi.fn().mockReturnValue(true),
|
||||
} as unknown as Config;
|
||||
|
||||
originalFetch = global.fetch;
|
||||
@@ -1202,7 +1202,6 @@ describe('authWithQwenDeviceFlow - Comprehensive Testing', () => {
|
||||
beforeEach(() => {
|
||||
mockConfig = {
|
||||
isBrowserLaunchSuppressed: vi.fn().mockReturnValue(false),
|
||||
isInteractive: vi.fn().mockReturnValue(true),
|
||||
} as unknown as Config;
|
||||
|
||||
originalFetch = global.fetch;
|
||||
@@ -1406,7 +1405,6 @@ describe('Browser Launch and Error Handling', () => {
|
||||
beforeEach(() => {
|
||||
mockConfig = {
|
||||
isBrowserLaunchSuppressed: vi.fn().mockReturnValue(false),
|
||||
isInteractive: vi.fn().mockReturnValue(true),
|
||||
} as unknown as Config;
|
||||
|
||||
originalFetch = global.fetch;
|
||||
@@ -2045,7 +2043,6 @@ describe('SharedTokenManager Integration in QwenOAuth2Client', () => {
|
||||
it('should handle TokenManagerError types correctly in getQwenOAuthClient', async () => {
|
||||
const mockConfig = {
|
||||
isBrowserLaunchSuppressed: vi.fn().mockReturnValue(true),
|
||||
isInteractive: vi.fn().mockReturnValue(true),
|
||||
} as unknown as Config;
|
||||
|
||||
// Test different TokenManagerError types
|
||||
|
||||
@@ -516,7 +516,9 @@ export async function getQwenOAuthClient(
|
||||
}
|
||||
|
||||
if (options?.requireCachedCredentials) {
|
||||
throw new Error('Please use /auth to re-authenticate.');
|
||||
throw new Error(
|
||||
'No cached Qwen-OAuth credentials found. Please re-authenticate.',
|
||||
);
|
||||
}
|
||||
|
||||
// If we couldn't obtain valid credentials via SharedTokenManager, fall back to
|
||||
@@ -738,9 +740,11 @@ async function authWithQwenDeviceFlow(
|
||||
// Emit device authorization event for UI integration immediately
|
||||
qwenOAuth2Events.emit(QwenOAuth2Event.AuthUri, deviceAuth);
|
||||
|
||||
if (config.isBrowserLaunchSuppressed() || !config.isInteractive()) {
|
||||
showFallbackMessage(deviceAuth.verification_uri_complete);
|
||||
}
|
||||
// Always show the fallback message in non-interactive environments to ensure
|
||||
// users can see the authorization URL even if browser launching is attempted.
|
||||
// This is critical for headless/remote environments where browser launching
|
||||
// may silently fail without throwing an error.
|
||||
showFallbackMessage(deviceAuth.verification_uri_complete);
|
||||
|
||||
// Try to open browser if not suppressed
|
||||
if (!config.isBrowserLaunchSuppressed()) {
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
# Qwen Code Companion
|
||||
|
||||
Seamlessly integrate [Qwen Code](https://github.com/QwenLM/qwen-code) into Visual Studio Code with native IDE features and an intuitive interface. This extension bundles everything you need to get started immediately.
|
||||
[](https://marketplace.visualstudio.com/items?itemName=qwenlm.qwen-code-vscode-ide-companion)
|
||||
[](https://marketplace.visualstudio.com/items?itemName=qwenlm.qwen-code-vscode-ide-companion)
|
||||
[](https://open-vsx.org/extension/qwenlm/qwen-code-vscode-ide-companion)
|
||||
[](https://marketplace.visualstudio.com/items?itemName=qwenlm.qwen-code-vscode-ide-companion)
|
||||
|
||||
Seamlessly integrate [Qwen Code](https://github.com/QwenLM/qwen-code) into Visual Studio Code with native IDE features and an intuitive chat interface. This extension bundles everything you need — no additional installation required.
|
||||
|
||||
## Demo
|
||||
|
||||
@@ -11,7 +16,7 @@ Seamlessly integrate [Qwen Code](https://github.com/QwenLM/qwen-code) into Visua
|
||||
|
||||
## Features
|
||||
|
||||
- **Native IDE experience**: Dedicated Qwen Code sidebar panel accessed via the Qwen icon
|
||||
- **Native IDE experience**: Dedicated Qwen Code Chat panel accessed via the Qwen icon in the editor title bar
|
||||
- **Native diffing**: Review, edit, and accept changes in VS Code's diff view
|
||||
- **Auto-accept edits mode**: Automatically apply Qwen's changes as they're made
|
||||
- **File management**: @-mention files or attach files and images using the system file picker
|
||||
@@ -20,73 +25,46 @@ Seamlessly integrate [Qwen Code](https://github.com/QwenLM/qwen-code) into Visua
|
||||
|
||||
## Requirements
|
||||
|
||||
- Visual Studio Code 1.85.0 or newer
|
||||
- Visual Studio Code 1.85.0 or newer (also works with Cursor, Windsurf, and other VS Code-based editors)
|
||||
|
||||
## Installation
|
||||
## Quick Start
|
||||
|
||||
1. Install from the VS Code Marketplace: https://marketplace.visualstudio.com/items?itemName=qwenlm.qwen-code-vscode-ide-companion
|
||||
1. **Install** from the [VS Code Marketplace](https://marketplace.visualstudio.com/items?itemName=qwenlm.qwen-code-vscode-ide-companion) or [Open VSX Registry](https://open-vsx.org/extension/qwenlm/qwen-code-vscode-ide-companion)
|
||||
|
||||
2. Two ways to use
|
||||
- Chat panel: Click the Qwen icon in the Activity Bar, or run `Qwen Code: Open` from the Command Palette (`Cmd+Shift+P` / `Ctrl+Shift+P`).
|
||||
- Terminal session (classic): Run `Qwen Code: Run` to launch a session in the integrated terminal (bundled CLI).
|
||||
2. **Open the Chat panel** using one of these methods:
|
||||
- Click the **Qwen icon** in the top-right corner of the editor
|
||||
- Run `Qwen Code: Open` from the Command Palette (`Cmd+Shift+P` / `Ctrl+Shift+P`)
|
||||
|
||||
## Development and Debugging
|
||||
3. **Start chatting** — Ask Qwen to help with coding tasks, explain code, fix bugs, or write new features
|
||||
|
||||
To debug and develop this extension locally:
|
||||
## Commands
|
||||
|
||||
1. **Clone the repository**
|
||||
| Command | Description |
|
||||
| -------------------------------- | ------------------------------------------------------ |
|
||||
| `Qwen Code: Open` | Open the Qwen Code Chat panel |
|
||||
| `Qwen Code: Run` | Launch a classic terminal session with the bundled CLI |
|
||||
| `Qwen Code: Accept Current Diff` | Accept the currently displayed diff |
|
||||
| `Qwen Code: Close Diff Editor` | Close/reject the current diff |
|
||||
|
||||
```bash
|
||||
git clone https://github.com/QwenLM/qwen-code.git
|
||||
cd qwen-code
|
||||
```
|
||||
## Feedback & Issues
|
||||
|
||||
2. **Install dependencies**
|
||||
- 🐛 [Report bugs](https://github.com/QwenLM/qwen-code/issues/new?template=bug_report.yml&labels=bug,vscode-ide-companion)
|
||||
- 💡 [Request features](https://github.com/QwenLM/qwen-code/issues/new?template=feature_request.yml&labels=enhancement,vscode-ide-companion)
|
||||
- 📖 [Documentation](https://qwenlm.github.io/qwen-code-docs/)
|
||||
- 📋 [Changelog](https://github.com/QwenLM/qwen-code/releases)
|
||||
|
||||
```bash
|
||||
npm install
|
||||
# or if using pnpm
|
||||
pnpm install
|
||||
```
|
||||
## Contributing
|
||||
|
||||
3. **Start debugging**
|
||||
We welcome contributions! See our [Contributing Guide](https://github.com/QwenLM/qwen-code/blob/main/CONTRIBUTING.md) for details on:
|
||||
|
||||
```bash
|
||||
code . # Open the project root in VS Code
|
||||
```
|
||||
- Open the `packages/vscode-ide-companion/src/extension.ts` file
|
||||
- Open Debug panel (`Ctrl+Shift+D` or `Cmd+Shift+D`)
|
||||
- Select **"Launch Companion VS Code Extension"** from the debug dropdown
|
||||
- Press `F5` to launch Extension Development Host
|
||||
|
||||
4. **Make changes and reload**
|
||||
- Edit the source code in the original VS Code window
|
||||
- To see your changes, reload the Extension Development Host window by:
|
||||
- Pressing `Ctrl+R` (Windows/Linux) or `Cmd+R` (macOS)
|
||||
- Or clicking the "Reload" button in the debug toolbar
|
||||
|
||||
5. **View logs and debug output**
|
||||
- Open the Debug Console in the original VS Code window to see extension logs
|
||||
- In the Extension Development Host window, open Developer Tools with `Help > Toggle Developer Tools` to see webview logs
|
||||
|
||||
## Build for Production
|
||||
|
||||
To build the extension for distribution:
|
||||
|
||||
```bash
|
||||
npm run compile
|
||||
# or
|
||||
pnpm run compile
|
||||
```
|
||||
|
||||
To package the extension as a VSIX file:
|
||||
|
||||
```bash
|
||||
npx vsce package
|
||||
# or
|
||||
pnpm vsce package
|
||||
```
|
||||
- Setting up the development environment
|
||||
- Building and debugging the extension locally
|
||||
- Submitting pull requests
|
||||
|
||||
## Terms of Service and Privacy Notice
|
||||
|
||||
By installing this extension, you agree to the [Terms of Service](https://github.com/QwenLM/qwen-code/blob/main/docs/tos-privacy.md).
|
||||
|
||||
## License
|
||||
|
||||
[Apache-2.0](https://github.com/QwenLM/qwen-code/blob/main/LICENSE)
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
"name": "qwen-code-vscode-ide-companion",
|
||||
"displayName": "Qwen Code Companion",
|
||||
"description": "Enable Qwen Code with direct access to your VS Code workspace.",
|
||||
"version": "0.7.0",
|
||||
"version": "0.7.1",
|
||||
"publisher": "qwenlm",
|
||||
"icon": "assets/icon.png",
|
||||
"repository": {
|
||||
|
||||
@@ -314,34 +314,32 @@ export async function activate(context: vscode.ExtensionContext) {
|
||||
'cli.js',
|
||||
).fsPath;
|
||||
const execPath = process.execPath;
|
||||
const lowerExecPath = execPath.toLowerCase();
|
||||
const needsElectronRunAsNode =
|
||||
lowerExecPath.includes('code') ||
|
||||
lowerExecPath.includes('electron');
|
||||
|
||||
let qwenCmd: string;
|
||||
const terminalOptions: vscode.TerminalOptions = {
|
||||
name: `Qwen Code (${selectedFolder.name})`,
|
||||
cwd: selectedFolder.uri.fsPath,
|
||||
location,
|
||||
};
|
||||
|
||||
let qwenCmd: string;
|
||||
|
||||
if (isWindows) {
|
||||
// Use system Node via cmd.exe; avoid PowerShell parsing issues
|
||||
// On Windows, try multiple strategies to find a Node.js runtime:
|
||||
// 1. Check if VSCode ships a standalone node.exe alongside Code.exe
|
||||
// 2. Check VSCode's internal Node.js in resources directory
|
||||
// 3. Fall back to using Code.exe with ELECTRON_RUN_AS_NODE=1
|
||||
const quoteCmd = (s: string) => `"${s.replace(/"/g, '""')}"`;
|
||||
const cliQuoted = quoteCmd(cliEntry);
|
||||
// TODO: @yiliang114, temporarily run through node, and later hope to decouple from the local node
|
||||
qwenCmd = `node ${cliQuoted}`;
|
||||
terminalOptions.shellPath = process.env.ComSpec;
|
||||
} else {
|
||||
// macOS/Linux: All VSCode-like IDEs (VSCode, Cursor, Windsurf, etc.)
|
||||
// are Electron-based, so we always need ELECTRON_RUN_AS_NODE=1
|
||||
// to run Node.js scripts using the IDE's bundled runtime.
|
||||
const quotePosix = (s: string) => `"${s.replace(/"/g, '\\"')}"`;
|
||||
const baseCmd = `${quotePosix(execPath)} ${quotePosix(cliEntry)}`;
|
||||
if (needsElectronRunAsNode) {
|
||||
// macOS Electron helper needs ELECTRON_RUN_AS_NODE=1;
|
||||
qwenCmd = `ELECTRON_RUN_AS_NODE=1 ${baseCmd}`;
|
||||
} else {
|
||||
qwenCmd = baseCmd;
|
||||
}
|
||||
qwenCmd = `ELECTRON_RUN_AS_NODE=1 ${baseCmd}`;
|
||||
}
|
||||
|
||||
const terminal = vscode.window.createTerminal(terminalOptions);
|
||||
|
||||
Reference in New Issue
Block a user