From bd0d3479c15aaed9c3d0b36e7a0e90194c5b076d Mon Sep 17 00:00:00 2001 From: pomelo Date: Tue, 29 Jul 2025 13:11:41 +0800 Subject: [PATCH] feat: Add systemPromptMappings Configuration Feature (#108) * feat: update system prompt for qwen3-coder * feat: add default systemPromptMappings for Qwen models - Add default systemPromptMappings configuration for qwen3-coder-plus model - Support DashScope compatible mode API endpoints - Include Qwen coder system prompt template with git repository and sandbox placeholders - Add comprehensive test coverage for default and custom systemPromptMappings - Update documentation to reflect the new default configuration behavior - Ensure backward compatibility with existing user configurations * feat: remove default system prompt template * fix: test ci * feat: handle code indentation issues * feat: update prompt.test.snapshots * feat: add URL trailing slash compatibility for system prompt mappings - Add normalizeUrl() function to standardize URLs by removing trailing slashes - Add urlMatches() function to compare URLs ignoring trailing slash differences - Replace direct includes() comparison with urlMatches() for baseUrl matching - Add comprehensive tests to verify URL matching with/without trailing slashes - Fixes issue where URLs like 'https://api.example.com' and 'https://api.example.com/' were treated as different * feat: update code --- docs/cli/configuration.md | 49 +++++++++- packages/cli/src/config/config.test.ts | 74 ++++++++++++++- packages/cli/src/config/config.ts | 11 +++ packages/cli/src/config/settings.ts | 7 ++ packages/core/src/config/config.ts | 21 +++++ .../__tests__/openaiTimeoutHandling.test.ts | 5 +- packages/core/src/core/client.test.ts | 1 + packages/core/src/core/client.ts | 15 ++- packages/core/src/core/prompts.test.ts | 93 +++++++++++++++++++ packages/core/src/core/prompts.ts | 77 ++++++++++++++- 10 files changed, 342 insertions(+), 11 deletions(-) diff --git a/docs/cli/configuration.md b/docs/cli/configuration.md index 98b53551..6b2cde3e 100644 --- a/docs/cli/configuration.md +++ b/docs/cli/configuration.md @@ -215,6 +215,38 @@ In addition to a project settings file, a project's `.gemini` directory can cont "enableOpenAILogging": true ``` +- **`systemPromptMappings`** (array): + - **Description:** Configures custom system prompt templates for specific model names and base URLs. This allows you to use different system prompts for different AI models or API endpoints. + - **Default:** `undefined` (uses default system prompt) + - **Properties:** + - **`baseUrls`** (array of strings, optional): Array of base URLs to exactly match against `OPENAI_BASE_URL` environment variable. If not specified, matches any base URL. + - **`modelNames`** (array of strings, optional): Array of model names to exactly match against `OPENAI_MODEL` environment variable. If not specified, matches any model. + - **`template`** (string): The system prompt template to use when both baseUrl and modelNames match. Supports placeholders: + - `{RUNTIME_VARS_IS_GIT_REPO}`: Replaced with `true` or `false` based on whether the current directory is a git repository + - `{RUNTIME_VARS_SANDBOX}`: Replaced with the sandbox type (e.g., `"sandbox-exec"`, `"docker"`, or empty string) + - **Example:** + + ```json + "systemPromptMappings": [ + { + "baseUrls": [ + "https://dashscope.aliyuncs.com/compatible-mode/v1", + "https://dashscope-intl.aliyuncs.com/compatible-mode/v1" + ], + "modelNames": ["qwen3-coder-plus"], + "template": "SYSTEM_TEMPLATE:{\"name\":\"qwen3_coder\",\"params\":{\"is_git_repository\":{RUNTIME_VARS_IS_GIT_REPO},\"sandbox\":\"{RUNTIME_VARS_SANDBOX}\"}}" + }, + { + "modelNames": ["gpt-4"], + "template": "You are a helpful AI assistant specialized in coding tasks. Current sandbox: {RUNTIME_VARS_SANDBOX}" + }, + { + "baseUrls": ["api.openai.com"], + "template": "You are an AI coding assistant. Working in git repository: {RUNTIME_VARS_IS_GIT_REPO}" + } + ] + ``` + ### Example `settings.json`: ```json @@ -242,7 +274,22 @@ In addition to a project settings file, a project's `.gemini` directory can cont "hideTips": false, "hideBanner": false, "maxSessionTurns": 10, - "enableOpenAILogging": true + "enableOpenAILogging": true, + "systemPromptMappings": [ + { + "baseUrl": "dashscope", + "modelNames": ["qwen3"], + "template": "SYSTEM_TEMPLATE:{\"name\":\"qwen3_coder\",\"params\":{\"VARS_IS_GIT_REPO\":{VARS_IS_GIT_REPO},\"sandbox\":\"{sandbox}\"}}" + }, + { + "modelNames": ["gpt-4"], + "template": "You are a helpful AI assistant specialized in coding tasks. Current sandbox: {sandbox}" + }, + { + "baseUrl": "api.openai.com", + "template": "You are an AI coding assistant. Working in git repository: {VARS_IS_GIT_REPO}" + } + ] } ``` diff --git a/packages/cli/src/config/config.test.ts b/packages/cli/src/config/config.test.ts index e322c3b0..53caa361 100644 --- a/packages/cli/src/config/config.test.ts +++ b/packages/cli/src/config/config.test.ts @@ -6,7 +6,7 @@ import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; import * as os from 'os'; -import { loadCliConfig, parseArguments } from './config.js'; +import { loadCliConfig, parseArguments, CliArgs } from './config.js'; import { Settings } from './settings.js'; import { Extension } from './extension.js'; import * as ServerConfig from '@qwen-code/qwen-code-core'; @@ -1001,9 +1001,73 @@ describe('loadCliConfig ideMode', () => { const config = await loadCliConfig(settings, [], 'test-session', argv); expect(config.getIdeMode()).toBe(true); const mcpServers = config.getMcpServers(); - expect(mcpServers['_ide_server']).toBeDefined(); - expect(mcpServers['_ide_server'].httpUrl).toBe('http://localhost:3000/mcp'); - expect(mcpServers['_ide_server'].description).toBe('IDE connection'); - expect(mcpServers['_ide_server'].trust).toBe(false); + expect(mcpServers?.['_ide_server']).toBeDefined(); + expect(mcpServers?.['_ide_server']?.httpUrl).toBe( + 'http://localhost:3000/mcp', + ); + expect(mcpServers?.['_ide_server']?.description).toBe('IDE connection'); + expect(mcpServers?.['_ide_server']?.trust).toBe(false); + }); +}); + +describe('loadCliConfig systemPromptMappings', () => { + it('should use default systemPromptMappings when not provided in settings', async () => { + const mockSettings: Settings = { + theme: 'dark', + }; + const mockExtensions: Extension[] = []; + const mockSessionId = 'test-session'; + const mockArgv: CliArgs = { + model: 'test-model', + } as CliArgs; + + const config = await loadCliConfig( + mockSettings, + mockExtensions, + mockSessionId, + mockArgv, + ); + + expect(config.getSystemPromptMappings()).toEqual([ + { + baseUrls: [ + 'https://dashscope.aliyuncs.com/compatible-mode/v1/', + 'https://dashscope-intl.aliyuncs.com/compatible-mode/v1/', + ], + modelNames: ['qwen3-coder-plus'], + template: + 'SYSTEM_TEMPLATE:{"name":"qwen3_coder","params":{"is_git_repository":{RUNTIME_VARS_IS_GIT_REPO},"sandbox":"{RUNTIME_VARS_SANDBOX}"}}', + }, + ]); + }); + + it('should use custom systemPromptMappings when provided in settings', async () => { + const customSystemPromptMappings = [ + { + baseUrls: ['https://custom-api.com'], + modelNames: ['custom-model'], + template: 'Custom template', + }, + ]; + const mockSettings: Settings = { + theme: 'dark', + systemPromptMappings: customSystemPromptMappings, + }; + const mockExtensions: Extension[] = []; + const mockSessionId = 'test-session'; + const mockArgv: CliArgs = { + model: 'test-model', + } as CliArgs; + + const config = await loadCliConfig( + mockSettings, + mockExtensions, + mockSessionId, + mockArgv, + ); + + expect(config.getSystemPromptMappings()).toEqual( + customSystemPromptMappings, + ); }); }); diff --git a/packages/cli/src/config/config.ts b/packages/cli/src/config/config.ts index 12e68681..dc5fecb3 100644 --- a/packages/cli/src/config/config.ts +++ b/packages/cli/src/config/config.ts @@ -394,6 +394,17 @@ export async function loadCliConfig( ? settings.enableOpenAILogging : argv.openaiLogging) ?? false, sampling_params: settings.sampling_params, + systemPromptMappings: settings.systemPromptMappings ?? [ + { + baseUrls: [ + 'https://dashscope.aliyuncs.com/compatible-mode/v1/', + 'https://dashscope-intl.aliyuncs.com/compatible-mode/v1/', + ], + modelNames: ['qwen3-coder-plus'], + template: + 'SYSTEM_TEMPLATE:{"name":"qwen3_coder","params":{"is_git_repository":{RUNTIME_VARS_IS_GIT_REPO},"sandbox":"{RUNTIME_VARS_SANDBOX}"}}', + }, + ], }); } diff --git a/packages/cli/src/config/settings.ts b/packages/cli/src/config/settings.ts index 8c5f22b1..3653c5ea 100644 --- a/packages/cli/src/config/settings.ts +++ b/packages/cli/src/config/settings.ts @@ -96,6 +96,13 @@ export interface Settings { max_tokens?: number; }; + // System prompt mappings for different base URLs and model names + systemPromptMappings?: Array<{ + baseUrls?: string[]; + modelNames?: string[]; + template?: string; + }>; + // Add other settings here. ideMode?: boolean; } diff --git a/packages/core/src/config/config.ts b/packages/core/src/config/config.ts index 2ffd0bae..5cc2d270 100644 --- a/packages/core/src/config/config.ts +++ b/packages/core/src/config/config.ts @@ -154,6 +154,11 @@ export interface ConfigParameters { temperature?: number; max_tokens?: number; }; + systemPromptMappings?: Array<{ + baseUrls?: string[]; + modelNames?: string[]; + template?: string; + }>; } export class Config { @@ -204,6 +209,11 @@ export class Config { temperature?: number; max_tokens?: number; }; + private readonly systemPromptMappings?: Array<{ + baseUrls?: string[]; + modelNames?: string[]; + template?: string; + }>; private modelSwitchedDuringSession: boolean = false; private readonly maxSessionTurns: number; private readonly listExtensions: boolean; @@ -258,6 +268,7 @@ export class Config { this.ideMode = params.ideMode ?? false; this.enableOpenAILogging = params.enableOpenAILogging ?? false; this.sampling_params = params.sampling_params; + this.systemPromptMappings = params.systemPromptMappings; if (params.contextFileName) { setGeminiMdFilename(params.contextFileName); @@ -540,6 +551,16 @@ export class Config { return this.enableOpenAILogging; } + getSystemPromptMappings(): + | Array<{ + baseUrls?: string[]; + modelNames?: string[]; + template?: string; + }> + | undefined { + return this.systemPromptMappings; + } + async refreshMemory(): Promise<{ memoryContent: string; fileCount: number }> { const { memoryContent, fileCount } = await loadServerHierarchicalMemory( this.getWorkingDir(), diff --git a/packages/core/src/core/__tests__/openaiTimeoutHandling.test.ts b/packages/core/src/core/__tests__/openaiTimeoutHandling.test.ts index fb78c568..81c77b61 100644 --- a/packages/core/src/core/__tests__/openaiTimeoutHandling.test.ts +++ b/packages/core/src/core/__tests__/openaiTimeoutHandling.test.ts @@ -33,6 +33,9 @@ describe('OpenAIContentGenerator Timeout Handling', () => { // Reset mocks vi.clearAllMocks(); + // Mock environment variables + vi.stubEnv('OPENAI_BASE_URL', ''); + // Mock config mockConfig = { getContentGeneratorConfig: vi.fn().mockReturnValue({ @@ -55,7 +58,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => { vi.mocked(OpenAI).mockImplementation(() => mockOpenAIClient); // Create generator instance - generator = new OpenAIContentGenerator('test-api-key', 'gpt-4', mockConfig); + generator = new OpenAIContentGenerator('test-key', 'gpt-4', mockConfig); }); afterEach(() => { diff --git a/packages/core/src/core/client.test.ts b/packages/core/src/core/client.test.ts index 03793bda..43a17f2c 100644 --- a/packages/core/src/core/client.test.ts +++ b/packages/core/src/core/client.test.ts @@ -198,6 +198,7 @@ describe('Gemini Client (client.ts)', () => { getQuotaErrorOccurred: vi.fn().mockReturnValue(false), setQuotaErrorOccurred: vi.fn(), getNoBrowser: vi.fn().mockReturnValue(false), + getSystemPromptMappings: vi.fn().mockReturnValue(undefined), }; return mock as unknown as Config; }); diff --git a/packages/core/src/core/client.ts b/packages/core/src/core/client.ts index c5b91db5..20d2cb9a 100644 --- a/packages/core/src/core/client.ts +++ b/packages/core/src/core/client.ts @@ -238,7 +238,10 @@ export class GeminiClient { ]; try { const userMemory = this.config.getUserMemory(); - const systemInstruction = getCoreSystemPrompt(userMemory); + const systemPromptMappings = this.config.getSystemPromptMappings(); + const systemInstruction = getCoreSystemPrompt(userMemory, { + systemPromptMappings, + }); const generateContentConfigWithThinking = isThinkingSupported( this.config.getModel(), ) @@ -354,7 +357,10 @@ export class GeminiClient { model || this.config.getModel() || DEFAULT_GEMINI_FLASH_MODEL; try { const userMemory = this.config.getUserMemory(); - const systemInstruction = getCoreSystemPrompt(userMemory); + const systemPromptMappings = this.config.getSystemPromptMappings(); + const systemInstruction = getCoreSystemPrompt(userMemory, { + systemPromptMappings, + }); const requestConfig = { abortSignal, ...this.generateContentConfig, @@ -470,7 +476,10 @@ export class GeminiClient { try { const userMemory = this.config.getUserMemory(); - const systemInstruction = getCoreSystemPrompt(userMemory); + const systemPromptMappings = this.config.getSystemPromptMappings(); + const systemInstruction = getCoreSystemPrompt(userMemory, { + systemPromptMappings, + }); const requestConfig = { abortSignal, diff --git a/packages/core/src/core/prompts.test.ts b/packages/core/src/core/prompts.test.ts index bb7b0b52..028f044f 100644 --- a/packages/core/src/core/prompts.test.ts +++ b/packages/core/src/core/prompts.test.ts @@ -106,3 +106,96 @@ describe('Core System Prompt (prompts.ts)', () => { expect(prompt).toMatchSnapshot(); }); }); + +describe('URL matching with trailing slash compatibility', () => { + it('should match URLs with and without trailing slash', () => { + const config = { + systemPromptMappings: [ + { + baseUrls: ['https://api.example.com'], + modelNames: ['gpt-4'], + template: 'Custom template for example.com', + }, + { + baseUrls: ['https://api.openai.com/'], + modelNames: ['gpt-3.5-turbo'], + template: 'Custom template for openai.com', + }, + ], + }; + + // Simulate environment variables + const originalEnv = process.env; + + // Test case 1: No trailing slash in config, actual URL has trailing slash + process.env = { + ...originalEnv, + OPENAI_BASE_URL: 'https://api.example.com/', + OPENAI_MODEL: 'gpt-4', + }; + + const result1 = getCoreSystemPrompt(undefined, config); + expect(result1).toContain('Custom template for example.com'); + + // Test case 2: Config has trailing slash, actual URL has no trailing slash + process.env = { + ...originalEnv, + OPENAI_BASE_URL: 'https://api.openai.com', + OPENAI_MODEL: 'gpt-3.5-turbo', + }; + + const result2 = getCoreSystemPrompt(undefined, config); + expect(result2).toContain('Custom template for openai.com'); + + // Test case 3: No trailing slash in config, actual URL has no trailing slash + process.env = { + ...originalEnv, + OPENAI_BASE_URL: 'https://api.example.com', + OPENAI_MODEL: 'gpt-4', + }; + + const result3 = getCoreSystemPrompt(undefined, config); + expect(result3).toContain('Custom template for example.com'); + + // Test case 4: Config has trailing slash, actual URL has trailing slash + process.env = { + ...originalEnv, + OPENAI_BASE_URL: 'https://api.openai.com/', + OPENAI_MODEL: 'gpt-3.5-turbo', + }; + + const result4 = getCoreSystemPrompt(undefined, config); + expect(result4).toContain('Custom template for openai.com'); + + // Restore original environment variables + process.env = originalEnv; + }); + + it('should not match when URLs are different', () => { + const config = { + systemPromptMappings: [ + { + baseUrls: ['https://api.example.com'], + modelNames: ['gpt-4'], + template: 'Custom template for example.com', + }, + ], + }; + + const originalEnv = process.env; + + // Test case: URLs do not match + process.env = { + ...originalEnv, + OPENAI_BASE_URL: 'https://api.different.com', + OPENAI_MODEL: 'gpt-4', + }; + + const result = getCoreSystemPrompt(undefined, config); + // Should return default template, not contain custom template + expect(result).not.toContain('Custom template for example.com'); + + // Restore original environment variables + process.env = originalEnv; + }); +}); diff --git a/packages/core/src/core/prompts.ts b/packages/core/src/core/prompts.ts index 08103860..e30d23a6 100644 --- a/packages/core/src/core/prompts.ts +++ b/packages/core/src/core/prompts.ts @@ -18,7 +18,35 @@ import process from 'node:process'; import { isGitRepository } from '../utils/gitUtils.js'; import { MemoryTool, GEMINI_CONFIG_DIR } from '../tools/memoryTool.js'; -export function getCoreSystemPrompt(userMemory?: string): string { +export interface ModelTemplateMapping { + baseUrls?: string[]; + modelNames?: string[]; + template?: string; +} + +export interface SystemPromptConfig { + systemPromptMappings?: ModelTemplateMapping[]; +} + +/** + * Normalizes a URL by removing trailing slash for consistent comparison + */ +function normalizeUrl(url: string): string { + return url.endsWith('/') ? url.slice(0, -1) : url; +} + +/** + * Checks if a URL matches any URL in the array, ignoring trailing slashes + */ +function urlMatches(urlArray: string[], targetUrl: string): boolean { + const normalizedTarget = normalizeUrl(targetUrl); + return urlArray.some((url) => normalizeUrl(url) === normalizedTarget); +} + +export function getCoreSystemPrompt( + userMemory?: string, + config?: SystemPromptConfig, +): string { // if GEMINI_SYSTEM_MD is set (and not 0|false), override system prompt from file // default path is .qwen/system.md but can be modified via custom path in GEMINI_SYSTEM_MD let systemMdEnabled = false; @@ -34,6 +62,52 @@ export function getCoreSystemPrompt(userMemory?: string): string { throw new Error(`missing system prompt file '${systemMdPath}'`); } } + + // Check for system prompt mappings from global config + if (config?.systemPromptMappings) { + const currentModel = process.env.OPENAI_MODEL || ''; + const currentBaseUrl = process.env.OPENAI_BASE_URL || ''; + + const matchedMapping = config.systemPromptMappings.find((mapping) => { + const { baseUrls, modelNames } = mapping; + // Check if baseUrl matches (when specified) + if ( + baseUrls && + modelNames && + urlMatches(baseUrls, currentBaseUrl) && + modelNames.includes(currentModel) + ) { + return true; + } + + if (baseUrls && urlMatches(baseUrls, currentBaseUrl) && !modelNames) { + return true; + } + if (modelNames && modelNames.includes(currentModel) && !baseUrls) { + return true; + } + + return false; + }); + + if (matchedMapping?.template) { + const isGitRepo = isGitRepository(process.cwd()); + + // Replace placeholders in template + let template = matchedMapping.template; + template = template.replace( + '{RUNTIME_VARS_IS_GIT_REPO}', + String(isGitRepo), + ); + template = template.replace( + '{RUNTIME_VARS_SANDBOX}', + process.env.SANDBOX || '', + ); + + return template; + } + } + const basePrompt = systemMdEnabled ? fs.readFileSync(systemMdPath, 'utf8') : ` @@ -256,6 +330,7 @@ Your core function is efficient and safe assistance. Balance extreme conciseness `.trim(); // if GEMINI_WRITE_SYSTEM_MD is set (and not 0|false), write base system prompt to file + const writeSystemMdVar = process.env.GEMINI_WRITE_SYSTEM_MD?.toLowerCase(); if (writeSystemMdVar && !['0', 'false'].includes(writeSystemMdVar)) { if (['1', 'true'].includes(writeSystemMdVar)) {