diff --git a/packages/core/src/config/models.ts b/packages/core/src/config/models.ts index 15ca9cf1..27d29053 100644 --- a/packages/core/src/config/models.ts +++ b/packages/core/src/config/models.ts @@ -4,6 +4,9 @@ * SPDX-License-Identifier: Apache-2.0 */ +export const DEFAULT_QWEN_MODEL = 'qwen3-coder-plus'; +export const DEFAULT_QWEN_FLASH_MODEL = 'qwen3-coder-flash'; + export const DEFAULT_GEMINI_MODEL = 'qwen3-coder-plus'; export const DEFAULT_GEMINI_FLASH_MODEL = 'gemini-2.5-flash'; export const DEFAULT_GEMINI_FLASH_LITE_MODEL = 'gemini-2.5-flash-lite'; diff --git a/packages/core/src/core/contentGenerator.ts b/packages/core/src/core/contentGenerator.ts index c4a934e7..b04ae36d 100644 --- a/packages/core/src/core/contentGenerator.ts +++ b/packages/core/src/core/contentGenerator.ts @@ -14,7 +14,7 @@ import { GoogleGenAI, } from '@google/genai'; import { createCodeAssistContentGenerator } from '../code_assist/codeAssist.js'; -import { DEFAULT_GEMINI_MODEL } from '../config/models.js'; +import { DEFAULT_GEMINI_MODEL, DEFAULT_QWEN_MODEL } from '../config/models.js'; import { Config } from '../config/config.js'; import { getEffectiveModel } from './modelCheck.js'; import { UserTierId } from '../code_assist/types.js'; @@ -136,7 +136,9 @@ export function createContentGeneratorConfig( // For Qwen OAuth, we'll handle the API key dynamically in createContentGenerator // Set a special marker to indicate this is Qwen OAuth contentGeneratorConfig.apiKey = 'QWEN_OAUTH_DYNAMIC_TOKEN'; - contentGeneratorConfig.model = config.getModel() || DEFAULT_GEMINI_MODEL; + + // Prefer to use qwen3-coder-plus as the default Qwen model if QWEN_MODEL is not set. + contentGeneratorConfig.model = process.env.QWEN_MODEL || DEFAULT_QWEN_MODEL; return contentGeneratorConfig; } diff --git a/packages/core/src/core/openaiContentGenerator.test.ts b/packages/core/src/core/openaiContentGenerator.test.ts index 6dc0690a..84ba2dcb 100644 --- a/packages/core/src/core/openaiContentGenerator.test.ts +++ b/packages/core/src/core/openaiContentGenerator.test.ts @@ -2384,4 +2384,605 @@ describe('OpenAIContentGenerator', () => { consoleSpy.mockRestore(); }); }); + + describe('metadata control', () => { + it('should include metadata when authType is QWEN_OAUTH', async () => { + const qwenConfig = { + getContentGeneratorConfig: vi.fn().mockReturnValue({ + authType: 'qwen-oauth', + enableOpenAILogging: false, + }), + getSessionId: vi.fn().mockReturnValue('test-session-id'), + } as unknown as Config; + + const qwenGenerator = new OpenAIContentGenerator( + 'test-key', + 'qwen-turbo', + qwenConfig, + ); + + const mockResponse = { + id: 'chatcmpl-123', + choices: [ + { + index: 0, + message: { role: 'assistant', content: 'Response' }, + finish_reason: 'stop', + }, + ], + created: 1677652288, + model: 'qwen-turbo', + }; + + mockOpenAIClient.chat.completions.create.mockResolvedValue(mockResponse); + + const request: GenerateContentParameters = { + contents: [{ role: 'user', parts: [{ text: 'Hello' }] }], + model: 'qwen-turbo', + }; + + await qwenGenerator.generateContent(request, 'test-prompt-id'); + + expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith( + expect.objectContaining({ + metadata: { + sessionId: 'test-session-id', + promptId: 'test-prompt-id', + }, + }), + ); + }); + + it('should include metadata when baseURL is dashscope openai compatible mode', async () => { + // Mock environment to set dashscope base URL BEFORE creating the generator + vi.stubEnv( + 'OPENAI_BASE_URL', + 'https://dashscope.aliyuncs.com/compatible-mode/v1', + ); + + const dashscopeConfig = { + getContentGeneratorConfig: vi.fn().mockReturnValue({ + authType: 'openai', // Not QWEN_OAUTH + enableOpenAILogging: false, + }), + getSessionId: vi.fn().mockReturnValue('dashscope-session-id'), + } as unknown as Config; + + const dashscopeGenerator = new OpenAIContentGenerator( + 'test-key', + 'qwen-turbo', + dashscopeConfig, + ); + + // Debug: Check if the client was created with the correct baseURL + expect(vi.mocked(OpenAI)).toHaveBeenCalledWith( + expect.objectContaining({ + baseURL: 'https://dashscope.aliyuncs.com/compatible-mode/v1', + }), + ); + + // Mock the client's baseURL property to return the expected value + Object.defineProperty(dashscopeGenerator['client'], 'baseURL', { + value: 'https://dashscope.aliyuncs.com/compatible-mode/v1', + writable: true, + }); + + const mockResponse = { + id: 'chatcmpl-123', + choices: [ + { + index: 0, + message: { role: 'assistant', content: 'Response' }, + finish_reason: 'stop', + }, + ], + created: 1677652288, + model: 'qwen-turbo', + }; + + mockOpenAIClient.chat.completions.create.mockResolvedValue(mockResponse); + + const request: GenerateContentParameters = { + contents: [{ role: 'user', parts: [{ text: 'Hello' }] }], + model: 'qwen-turbo', + }; + + await dashscopeGenerator.generateContent(request, 'dashscope-prompt-id'); + + expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith( + expect.objectContaining({ + metadata: { + sessionId: 'dashscope-session-id', + promptId: 'dashscope-prompt-id', + }, + }), + ); + }); + + it('should NOT include metadata for regular OpenAI providers', async () => { + const regularConfig = { + getContentGeneratorConfig: vi.fn().mockReturnValue({ + authType: 'openai', + enableOpenAILogging: false, + }), + getSessionId: vi.fn().mockReturnValue('regular-session-id'), + } as unknown as Config; + + const regularGenerator = new OpenAIContentGenerator( + 'test-key', + 'gpt-4', + regularConfig, + ); + + const mockResponse = { + id: 'chatcmpl-123', + choices: [ + { + index: 0, + message: { role: 'assistant', content: 'Response' }, + finish_reason: 'stop', + }, + ], + created: 1677652288, + model: 'gpt-4', + }; + + mockOpenAIClient.chat.completions.create.mockResolvedValue(mockResponse); + + const request: GenerateContentParameters = { + contents: [{ role: 'user', parts: [{ text: 'Hello' }] }], + model: 'gpt-4', + }; + + await regularGenerator.generateContent(request, 'regular-prompt-id'); + + // Should NOT include metadata + expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith( + expect.not.objectContaining({ + metadata: expect.any(Object), + }), + ); + }); + + it('should NOT include metadata for other auth types', async () => { + const otherAuthConfig = { + getContentGeneratorConfig: vi.fn().mockReturnValue({ + authType: 'gemini-api-key', + enableOpenAILogging: false, + }), + getSessionId: vi.fn().mockReturnValue('other-session-id'), + } as unknown as Config; + + const otherGenerator = new OpenAIContentGenerator( + 'test-key', + 'gpt-4', + otherAuthConfig, + ); + + const mockResponse = { + id: 'chatcmpl-123', + choices: [ + { + index: 0, + message: { role: 'assistant', content: 'Response' }, + finish_reason: 'stop', + }, + ], + created: 1677652288, + model: 'gpt-4', + }; + + mockOpenAIClient.chat.completions.create.mockResolvedValue(mockResponse); + + const request: GenerateContentParameters = { + contents: [{ role: 'user', parts: [{ text: 'Hello' }] }], + model: 'gpt-4', + }; + + await otherGenerator.generateContent(request, 'other-prompt-id'); + + // Should NOT include metadata + expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith( + expect.not.objectContaining({ + metadata: expect.any(Object), + }), + ); + }); + + it('should NOT include metadata for other base URLs', async () => { + // Mock environment to set a different base URL + vi.stubEnv('OPENAI_BASE_URL', 'https://api.openai.com/v1'); + + const otherBaseUrlConfig = { + getContentGeneratorConfig: vi.fn().mockReturnValue({ + authType: 'openai', + enableOpenAILogging: false, + }), + getSessionId: vi.fn().mockReturnValue('other-base-url-session-id'), + } as unknown as Config; + + const otherBaseUrlGenerator = new OpenAIContentGenerator( + 'test-key', + 'gpt-4', + otherBaseUrlConfig, + ); + + const mockResponse = { + id: 'chatcmpl-123', + choices: [ + { + index: 0, + message: { role: 'assistant', content: 'Response' }, + finish_reason: 'stop', + }, + ], + created: 1677652288, + model: 'gpt-4', + }; + + mockOpenAIClient.chat.completions.create.mockResolvedValue(mockResponse); + + const request: GenerateContentParameters = { + contents: [{ role: 'user', parts: [{ text: 'Hello' }] }], + model: 'gpt-4', + }; + + await otherBaseUrlGenerator.generateContent( + request, + 'other-base-url-prompt-id', + ); + + // Should NOT include metadata + expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith( + expect.not.objectContaining({ + metadata: expect.any(Object), + }), + ); + }); + + it('should include metadata in streaming requests when conditions are met', async () => { + const qwenConfig = { + getContentGeneratorConfig: vi.fn().mockReturnValue({ + authType: 'qwen-oauth', + enableOpenAILogging: false, + }), + getSessionId: vi.fn().mockReturnValue('streaming-session-id'), + } as unknown as Config; + + const qwenGenerator = new OpenAIContentGenerator( + 'test-key', + 'qwen-turbo', + qwenConfig, + ); + + const mockStream = [ + { + id: 'chatcmpl-123', + choices: [ + { + index: 0, + delta: { content: 'Hello' }, + finish_reason: null, + }, + ], + created: 1677652288, + }, + { + id: 'chatcmpl-123', + choices: [ + { + index: 0, + delta: { content: ' there!' }, + finish_reason: 'stop', + }, + ], + created: 1677652288, + }, + ]; + + mockOpenAIClient.chat.completions.create.mockResolvedValue({ + async *[Symbol.asyncIterator]() { + for (const chunk of mockStream) { + yield chunk; + } + }, + }); + + const request: GenerateContentParameters = { + contents: [{ role: 'user', parts: [{ text: 'Hello' }] }], + model: 'qwen-turbo', + }; + + const stream = await qwenGenerator.generateContentStream( + request, + 'streaming-prompt-id', + ); + + // Verify metadata was included in the streaming request + expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith( + expect.objectContaining({ + metadata: { + sessionId: 'streaming-session-id', + promptId: 'streaming-prompt-id', + }, + stream: true, + stream_options: { include_usage: true }, + }), + ); + + // Consume the stream to complete the test + const responses = []; + for await (const response of stream) { + responses.push(response); + } + expect(responses).toHaveLength(2); + }); + + it('should NOT include metadata in streaming requests when conditions are not met', async () => { + const regularConfig = { + getContentGeneratorConfig: vi.fn().mockReturnValue({ + authType: 'openai', + enableOpenAILogging: false, + }), + getSessionId: vi.fn().mockReturnValue('regular-streaming-session-id'), + } as unknown as Config; + + const regularGenerator = new OpenAIContentGenerator( + 'test-key', + 'gpt-4', + regularConfig, + ); + + const mockStream = [ + { + id: 'chatcmpl-123', + choices: [ + { + index: 0, + delta: { content: 'Hello' }, + finish_reason: null, + }, + ], + created: 1677652288, + }, + { + id: 'chatcmpl-123', + choices: [ + { + index: 0, + delta: { content: ' there!' }, + finish_reason: 'stop', + }, + ], + created: 1677652288, + }, + ]; + + mockOpenAIClient.chat.completions.create.mockResolvedValue({ + async *[Symbol.asyncIterator]() { + for (const chunk of mockStream) { + yield chunk; + } + }, + }); + + const request: GenerateContentParameters = { + contents: [{ role: 'user', parts: [{ text: 'Hello' }] }], + model: 'gpt-4', + }; + + const stream = await regularGenerator.generateContentStream( + request, + 'regular-streaming-prompt-id', + ); + + // Verify metadata was NOT included in the streaming request + expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith( + expect.not.objectContaining({ + metadata: expect.any(Object), + }), + ); + + // Consume the stream to complete the test + const responses = []; + for await (const response of stream) { + responses.push(response); + } + expect(responses).toHaveLength(2); + }); + + it('should handle undefined sessionId gracefully', async () => { + const qwenConfig = { + getContentGeneratorConfig: vi.fn().mockReturnValue({ + authType: 'qwen-oauth', + enableOpenAILogging: false, + }), + getSessionId: vi.fn().mockReturnValue(undefined), // Undefined session ID + } as unknown as Config; + + const qwenGenerator = new OpenAIContentGenerator( + 'test-key', + 'qwen-turbo', + qwenConfig, + ); + + const mockResponse = { + id: 'chatcmpl-123', + choices: [ + { + index: 0, + message: { role: 'assistant', content: 'Response' }, + finish_reason: 'stop', + }, + ], + created: 1677652288, + model: 'qwen-turbo', + }; + + mockOpenAIClient.chat.completions.create.mockResolvedValue(mockResponse); + + const request: GenerateContentParameters = { + contents: [{ role: 'user', parts: [{ text: 'Hello' }] }], + model: 'qwen-turbo', + }; + + await qwenGenerator.generateContent( + request, + 'undefined-session-prompt-id', + ); + + expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith( + expect.objectContaining({ + metadata: { + sessionId: undefined, + promptId: 'undefined-session-prompt-id', + }, + }), + ); + }); + + it('should handle undefined baseURL gracefully', async () => { + // Ensure no base URL is set + vi.stubEnv('OPENAI_BASE_URL', ''); + + const noBaseUrlConfig = { + getContentGeneratorConfig: vi.fn().mockReturnValue({ + authType: 'openai', + enableOpenAILogging: false, + }), + getSessionId: vi.fn().mockReturnValue('no-base-url-session-id'), + } as unknown as Config; + + const noBaseUrlGenerator = new OpenAIContentGenerator( + 'test-key', + 'gpt-4', + noBaseUrlConfig, + ); + + const mockResponse = { + id: 'chatcmpl-123', + choices: [ + { + index: 0, + message: { role: 'assistant', content: 'Response' }, + finish_reason: 'stop', + }, + ], + created: 1677652288, + model: 'gpt-4', + }; + + mockOpenAIClient.chat.completions.create.mockResolvedValue(mockResponse); + + const request: GenerateContentParameters = { + contents: [{ role: 'user', parts: [{ text: 'Hello' }] }], + model: 'gpt-4', + }; + + await noBaseUrlGenerator.generateContent( + request, + 'no-base-url-prompt-id', + ); + + // Should NOT include metadata when baseURL is empty + expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith( + expect.not.objectContaining({ + metadata: expect.any(Object), + }), + ); + }); + + it('should handle undefined authType gracefully', async () => { + const undefinedAuthConfig = { + getContentGeneratorConfig: vi.fn().mockReturnValue({ + authType: undefined, // Undefined auth type + enableOpenAILogging: false, + }), + getSessionId: vi.fn().mockReturnValue('undefined-auth-session-id'), + } as unknown as Config; + + const undefinedAuthGenerator = new OpenAIContentGenerator( + 'test-key', + 'gpt-4', + undefinedAuthConfig, + ); + + const mockResponse = { + id: 'chatcmpl-123', + choices: [ + { + index: 0, + message: { role: 'assistant', content: 'Response' }, + finish_reason: 'stop', + }, + ], + created: 1677652288, + model: 'gpt-4', + }; + + mockOpenAIClient.chat.completions.create.mockResolvedValue(mockResponse); + + const request: GenerateContentParameters = { + contents: [{ role: 'user', parts: [{ text: 'Hello' }] }], + model: 'gpt-4', + }; + + await undefinedAuthGenerator.generateContent( + request, + 'undefined-auth-prompt-id', + ); + + // Should NOT include metadata when authType is undefined + expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith( + expect.not.objectContaining({ + metadata: expect.any(Object), + }), + ); + }); + + it('should handle undefined config gracefully', async () => { + const undefinedConfig = { + getContentGeneratorConfig: vi.fn().mockReturnValue(undefined), // Undefined config + getSessionId: vi.fn().mockReturnValue('undefined-config-session-id'), + } as unknown as Config; + + const undefinedConfigGenerator = new OpenAIContentGenerator( + 'test-key', + 'gpt-4', + undefinedConfig, + ); + + const mockResponse = { + id: 'chatcmpl-123', + choices: [ + { + index: 0, + message: { role: 'assistant', content: 'Response' }, + finish_reason: 'stop', + }, + ], + created: 1677652288, + model: 'gpt-4', + }; + + mockOpenAIClient.chat.completions.create.mockResolvedValue(mockResponse); + + const request: GenerateContentParameters = { + contents: [{ role: 'user', parts: [{ text: 'Hello' }] }], + model: 'gpt-4', + }; + + await undefinedConfigGenerator.generateContent( + request, + 'undefined-config-prompt-id', + ); + + // Should NOT include metadata when config is undefined + expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith( + expect.not.objectContaining({ + metadata: expect.any(Object), + }), + ); + }); + }); }); diff --git a/packages/core/src/core/openaiContentGenerator.ts b/packages/core/src/core/openaiContentGenerator.ts index 4e9e73da..311fa6f5 100644 --- a/packages/core/src/core/openaiContentGenerator.ts +++ b/packages/core/src/core/openaiContentGenerator.ts @@ -20,7 +20,7 @@ import { FunctionCall, FunctionResponse, } from '@google/genai'; -import { ContentGenerator } from './contentGenerator.js'; +import { AuthType, ContentGenerator } from './contentGenerator.js'; import OpenAI from 'openai'; import { logApiResponse } from '../telemetry/loggers.js'; import { ApiResponseEvent } from '../telemetry/types.js'; @@ -185,6 +185,46 @@ export class OpenAIContentGenerator implements ContentGenerator { ); } + /** + * Determine if metadata should be included in the request. + * Only include the `metadata` field if the provider is QWEN_OAUTH + * or the baseUrl is 'https://dashscope.aliyuncs.com/compatible-mode/v1'. + * This is because some models/providers do not support metadata or need extra configuration. + * + * @returns true if metadata should be included, false otherwise + */ + private shouldIncludeMetadata(): boolean { + const authType = this.config.getContentGeneratorConfig?.()?.authType; + // baseUrl may be undefined; default to empty string if so + const baseUrl = this.client?.baseURL || ''; + + return ( + authType === AuthType.QWEN_OAUTH || + baseUrl === 'https://dashscope.aliyuncs.com/compatible-mode/v1' + ); + } + + /** + * Build metadata object for OpenAI API requests. + * + * @param userPromptId The user prompt ID to include in metadata + * @returns metadata object if shouldIncludeMetadata() returns true, undefined otherwise + */ + private buildMetadata( + userPromptId: string, + ): { metadata: { sessionId?: string; promptId: string } } | undefined { + if (!this.shouldIncludeMetadata()) { + return undefined; + } + + return { + metadata: { + sessionId: this.config.getSessionId?.(), + promptId: userPromptId, + }, + }; + } + async generateContent( request: GenerateContentParameters, userPromptId: string, @@ -205,10 +245,7 @@ export class OpenAIContentGenerator implements ContentGenerator { model: this.model, messages, ...samplingParams, - metadata: { - sessionId: this.config.getSessionId?.(), - promptId: userPromptId, - }, + ...(this.buildMetadata(userPromptId) || {}), }; if (request.config?.tools) { @@ -339,10 +376,7 @@ export class OpenAIContentGenerator implements ContentGenerator { ...samplingParams, stream: true, stream_options: { include_usage: true }, - metadata: { - sessionId: this.config.getSessionId?.(), - promptId: userPromptId, - }, + ...(this.buildMetadata(userPromptId) || {}), }; if (request.config?.tools) {