mirror of
https://github.com/QwenLM/qwen-code.git
synced 2025-12-19 09:33:53 +00:00
feat: Add systemPromptMappings Configuration Feature (#108)
* feat: update system prompt for qwen3-coder * feat: add default systemPromptMappings for Qwen models - Add default systemPromptMappings configuration for qwen3-coder-plus model - Support DashScope compatible mode API endpoints - Include Qwen coder system prompt template with git repository and sandbox placeholders - Add comprehensive test coverage for default and custom systemPromptMappings - Update documentation to reflect the new default configuration behavior - Ensure backward compatibility with existing user configurations * feat: remove default system prompt template * fix: test ci * feat: handle code indentation issues * feat: update prompt.test.snapshots * feat: add URL trailing slash compatibility for system prompt mappings - Add normalizeUrl() function to standardize URLs by removing trailing slashes - Add urlMatches() function to compare URLs ignoring trailing slash differences - Replace direct includes() comparison with urlMatches() for baseUrl matching - Add comprehensive tests to verify URL matching with/without trailing slashes - Fixes issue where URLs like 'https://api.example.com' and 'https://api.example.com/' were treated as different * feat: update code
This commit is contained in:
@@ -215,6 +215,38 @@ In addition to a project settings file, a project's `.gemini` directory can cont
|
|||||||
"enableOpenAILogging": true
|
"enableOpenAILogging": true
|
||||||
```
|
```
|
||||||
|
|
||||||
|
- **`systemPromptMappings`** (array):
|
||||||
|
- **Description:** Configures custom system prompt templates for specific model names and base URLs. This allows you to use different system prompts for different AI models or API endpoints.
|
||||||
|
- **Default:** `undefined` (uses default system prompt)
|
||||||
|
- **Properties:**
|
||||||
|
- **`baseUrls`** (array of strings, optional): Array of base URLs to exactly match against `OPENAI_BASE_URL` environment variable. If not specified, matches any base URL.
|
||||||
|
- **`modelNames`** (array of strings, optional): Array of model names to exactly match against `OPENAI_MODEL` environment variable. If not specified, matches any model.
|
||||||
|
- **`template`** (string): The system prompt template to use when both baseUrl and modelNames match. Supports placeholders:
|
||||||
|
- `{RUNTIME_VARS_IS_GIT_REPO}`: Replaced with `true` or `false` based on whether the current directory is a git repository
|
||||||
|
- `{RUNTIME_VARS_SANDBOX}`: Replaced with the sandbox type (e.g., `"sandbox-exec"`, `"docker"`, or empty string)
|
||||||
|
- **Example:**
|
||||||
|
|
||||||
|
```json
|
||||||
|
"systemPromptMappings": [
|
||||||
|
{
|
||||||
|
"baseUrls": [
|
||||||
|
"https://dashscope.aliyuncs.com/compatible-mode/v1",
|
||||||
|
"https://dashscope-intl.aliyuncs.com/compatible-mode/v1"
|
||||||
|
],
|
||||||
|
"modelNames": ["qwen3-coder-plus"],
|
||||||
|
"template": "SYSTEM_TEMPLATE:{\"name\":\"qwen3_coder\",\"params\":{\"is_git_repository\":{RUNTIME_VARS_IS_GIT_REPO},\"sandbox\":\"{RUNTIME_VARS_SANDBOX}\"}}"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"modelNames": ["gpt-4"],
|
||||||
|
"template": "You are a helpful AI assistant specialized in coding tasks. Current sandbox: {RUNTIME_VARS_SANDBOX}"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"baseUrls": ["api.openai.com"],
|
||||||
|
"template": "You are an AI coding assistant. Working in git repository: {RUNTIME_VARS_IS_GIT_REPO}"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
### Example `settings.json`:
|
### Example `settings.json`:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
@@ -242,7 +274,22 @@ In addition to a project settings file, a project's `.gemini` directory can cont
|
|||||||
"hideTips": false,
|
"hideTips": false,
|
||||||
"hideBanner": false,
|
"hideBanner": false,
|
||||||
"maxSessionTurns": 10,
|
"maxSessionTurns": 10,
|
||||||
"enableOpenAILogging": true
|
"enableOpenAILogging": true,
|
||||||
|
"systemPromptMappings": [
|
||||||
|
{
|
||||||
|
"baseUrl": "dashscope",
|
||||||
|
"modelNames": ["qwen3"],
|
||||||
|
"template": "SYSTEM_TEMPLATE:{\"name\":\"qwen3_coder\",\"params\":{\"VARS_IS_GIT_REPO\":{VARS_IS_GIT_REPO},\"sandbox\":\"{sandbox}\"}}"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"modelNames": ["gpt-4"],
|
||||||
|
"template": "You are a helpful AI assistant specialized in coding tasks. Current sandbox: {sandbox}"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"baseUrl": "api.openai.com",
|
||||||
|
"template": "You are an AI coding assistant. Working in git repository: {VARS_IS_GIT_REPO}"
|
||||||
|
}
|
||||||
|
]
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
|
|
||||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||||
import * as os from 'os';
|
import * as os from 'os';
|
||||||
import { loadCliConfig, parseArguments } from './config.js';
|
import { loadCliConfig, parseArguments, CliArgs } from './config.js';
|
||||||
import { Settings } from './settings.js';
|
import { Settings } from './settings.js';
|
||||||
import { Extension } from './extension.js';
|
import { Extension } from './extension.js';
|
||||||
import * as ServerConfig from '@qwen-code/qwen-code-core';
|
import * as ServerConfig from '@qwen-code/qwen-code-core';
|
||||||
@@ -1001,9 +1001,73 @@ describe('loadCliConfig ideMode', () => {
|
|||||||
const config = await loadCliConfig(settings, [], 'test-session', argv);
|
const config = await loadCliConfig(settings, [], 'test-session', argv);
|
||||||
expect(config.getIdeMode()).toBe(true);
|
expect(config.getIdeMode()).toBe(true);
|
||||||
const mcpServers = config.getMcpServers();
|
const mcpServers = config.getMcpServers();
|
||||||
expect(mcpServers['_ide_server']).toBeDefined();
|
expect(mcpServers?.['_ide_server']).toBeDefined();
|
||||||
expect(mcpServers['_ide_server'].httpUrl).toBe('http://localhost:3000/mcp');
|
expect(mcpServers?.['_ide_server']?.httpUrl).toBe(
|
||||||
expect(mcpServers['_ide_server'].description).toBe('IDE connection');
|
'http://localhost:3000/mcp',
|
||||||
expect(mcpServers['_ide_server'].trust).toBe(false);
|
);
|
||||||
|
expect(mcpServers?.['_ide_server']?.description).toBe('IDE connection');
|
||||||
|
expect(mcpServers?.['_ide_server']?.trust).toBe(false);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('loadCliConfig systemPromptMappings', () => {
|
||||||
|
it('should use default systemPromptMappings when not provided in settings', async () => {
|
||||||
|
const mockSettings: Settings = {
|
||||||
|
theme: 'dark',
|
||||||
|
};
|
||||||
|
const mockExtensions: Extension[] = [];
|
||||||
|
const mockSessionId = 'test-session';
|
||||||
|
const mockArgv: CliArgs = {
|
||||||
|
model: 'test-model',
|
||||||
|
} as CliArgs;
|
||||||
|
|
||||||
|
const config = await loadCliConfig(
|
||||||
|
mockSettings,
|
||||||
|
mockExtensions,
|
||||||
|
mockSessionId,
|
||||||
|
mockArgv,
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(config.getSystemPromptMappings()).toEqual([
|
||||||
|
{
|
||||||
|
baseUrls: [
|
||||||
|
'https://dashscope.aliyuncs.com/compatible-mode/v1/',
|
||||||
|
'https://dashscope-intl.aliyuncs.com/compatible-mode/v1/',
|
||||||
|
],
|
||||||
|
modelNames: ['qwen3-coder-plus'],
|
||||||
|
template:
|
||||||
|
'SYSTEM_TEMPLATE:{"name":"qwen3_coder","params":{"is_git_repository":{RUNTIME_VARS_IS_GIT_REPO},"sandbox":"{RUNTIME_VARS_SANDBOX}"}}',
|
||||||
|
},
|
||||||
|
]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should use custom systemPromptMappings when provided in settings', async () => {
|
||||||
|
const customSystemPromptMappings = [
|
||||||
|
{
|
||||||
|
baseUrls: ['https://custom-api.com'],
|
||||||
|
modelNames: ['custom-model'],
|
||||||
|
template: 'Custom template',
|
||||||
|
},
|
||||||
|
];
|
||||||
|
const mockSettings: Settings = {
|
||||||
|
theme: 'dark',
|
||||||
|
systemPromptMappings: customSystemPromptMappings,
|
||||||
|
};
|
||||||
|
const mockExtensions: Extension[] = [];
|
||||||
|
const mockSessionId = 'test-session';
|
||||||
|
const mockArgv: CliArgs = {
|
||||||
|
model: 'test-model',
|
||||||
|
} as CliArgs;
|
||||||
|
|
||||||
|
const config = await loadCliConfig(
|
||||||
|
mockSettings,
|
||||||
|
mockExtensions,
|
||||||
|
mockSessionId,
|
||||||
|
mockArgv,
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(config.getSystemPromptMappings()).toEqual(
|
||||||
|
customSystemPromptMappings,
|
||||||
|
);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -394,6 +394,17 @@ export async function loadCliConfig(
|
|||||||
? settings.enableOpenAILogging
|
? settings.enableOpenAILogging
|
||||||
: argv.openaiLogging) ?? false,
|
: argv.openaiLogging) ?? false,
|
||||||
sampling_params: settings.sampling_params,
|
sampling_params: settings.sampling_params,
|
||||||
|
systemPromptMappings: settings.systemPromptMappings ?? [
|
||||||
|
{
|
||||||
|
baseUrls: [
|
||||||
|
'https://dashscope.aliyuncs.com/compatible-mode/v1/',
|
||||||
|
'https://dashscope-intl.aliyuncs.com/compatible-mode/v1/',
|
||||||
|
],
|
||||||
|
modelNames: ['qwen3-coder-plus'],
|
||||||
|
template:
|
||||||
|
'SYSTEM_TEMPLATE:{"name":"qwen3_coder","params":{"is_git_repository":{RUNTIME_VARS_IS_GIT_REPO},"sandbox":"{RUNTIME_VARS_SANDBOX}"}}',
|
||||||
|
},
|
||||||
|
],
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -96,6 +96,13 @@ export interface Settings {
|
|||||||
max_tokens?: number;
|
max_tokens?: number;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// System prompt mappings for different base URLs and model names
|
||||||
|
systemPromptMappings?: Array<{
|
||||||
|
baseUrls?: string[];
|
||||||
|
modelNames?: string[];
|
||||||
|
template?: string;
|
||||||
|
}>;
|
||||||
|
|
||||||
// Add other settings here.
|
// Add other settings here.
|
||||||
ideMode?: boolean;
|
ideMode?: boolean;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -154,6 +154,11 @@ export interface ConfigParameters {
|
|||||||
temperature?: number;
|
temperature?: number;
|
||||||
max_tokens?: number;
|
max_tokens?: number;
|
||||||
};
|
};
|
||||||
|
systemPromptMappings?: Array<{
|
||||||
|
baseUrls?: string[];
|
||||||
|
modelNames?: string[];
|
||||||
|
template?: string;
|
||||||
|
}>;
|
||||||
}
|
}
|
||||||
|
|
||||||
export class Config {
|
export class Config {
|
||||||
@@ -204,6 +209,11 @@ export class Config {
|
|||||||
temperature?: number;
|
temperature?: number;
|
||||||
max_tokens?: number;
|
max_tokens?: number;
|
||||||
};
|
};
|
||||||
|
private readonly systemPromptMappings?: Array<{
|
||||||
|
baseUrls?: string[];
|
||||||
|
modelNames?: string[];
|
||||||
|
template?: string;
|
||||||
|
}>;
|
||||||
private modelSwitchedDuringSession: boolean = false;
|
private modelSwitchedDuringSession: boolean = false;
|
||||||
private readonly maxSessionTurns: number;
|
private readonly maxSessionTurns: number;
|
||||||
private readonly listExtensions: boolean;
|
private readonly listExtensions: boolean;
|
||||||
@@ -258,6 +268,7 @@ export class Config {
|
|||||||
this.ideMode = params.ideMode ?? false;
|
this.ideMode = params.ideMode ?? false;
|
||||||
this.enableOpenAILogging = params.enableOpenAILogging ?? false;
|
this.enableOpenAILogging = params.enableOpenAILogging ?? false;
|
||||||
this.sampling_params = params.sampling_params;
|
this.sampling_params = params.sampling_params;
|
||||||
|
this.systemPromptMappings = params.systemPromptMappings;
|
||||||
|
|
||||||
if (params.contextFileName) {
|
if (params.contextFileName) {
|
||||||
setGeminiMdFilename(params.contextFileName);
|
setGeminiMdFilename(params.contextFileName);
|
||||||
@@ -540,6 +551,16 @@ export class Config {
|
|||||||
return this.enableOpenAILogging;
|
return this.enableOpenAILogging;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
getSystemPromptMappings():
|
||||||
|
| Array<{
|
||||||
|
baseUrls?: string[];
|
||||||
|
modelNames?: string[];
|
||||||
|
template?: string;
|
||||||
|
}>
|
||||||
|
| undefined {
|
||||||
|
return this.systemPromptMappings;
|
||||||
|
}
|
||||||
|
|
||||||
async refreshMemory(): Promise<{ memoryContent: string; fileCount: number }> {
|
async refreshMemory(): Promise<{ memoryContent: string; fileCount: number }> {
|
||||||
const { memoryContent, fileCount } = await loadServerHierarchicalMemory(
|
const { memoryContent, fileCount } = await loadServerHierarchicalMemory(
|
||||||
this.getWorkingDir(),
|
this.getWorkingDir(),
|
||||||
|
|||||||
@@ -33,6 +33,9 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
|
|||||||
// Reset mocks
|
// Reset mocks
|
||||||
vi.clearAllMocks();
|
vi.clearAllMocks();
|
||||||
|
|
||||||
|
// Mock environment variables
|
||||||
|
vi.stubEnv('OPENAI_BASE_URL', '');
|
||||||
|
|
||||||
// Mock config
|
// Mock config
|
||||||
mockConfig = {
|
mockConfig = {
|
||||||
getContentGeneratorConfig: vi.fn().mockReturnValue({
|
getContentGeneratorConfig: vi.fn().mockReturnValue({
|
||||||
@@ -55,7 +58,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
|
|||||||
vi.mocked(OpenAI).mockImplementation(() => mockOpenAIClient);
|
vi.mocked(OpenAI).mockImplementation(() => mockOpenAIClient);
|
||||||
|
|
||||||
// Create generator instance
|
// Create generator instance
|
||||||
generator = new OpenAIContentGenerator('test-api-key', 'gpt-4', mockConfig);
|
generator = new OpenAIContentGenerator('test-key', 'gpt-4', mockConfig);
|
||||||
});
|
});
|
||||||
|
|
||||||
afterEach(() => {
|
afterEach(() => {
|
||||||
|
|||||||
@@ -198,6 +198,7 @@ describe('Gemini Client (client.ts)', () => {
|
|||||||
getQuotaErrorOccurred: vi.fn().mockReturnValue(false),
|
getQuotaErrorOccurred: vi.fn().mockReturnValue(false),
|
||||||
setQuotaErrorOccurred: vi.fn(),
|
setQuotaErrorOccurred: vi.fn(),
|
||||||
getNoBrowser: vi.fn().mockReturnValue(false),
|
getNoBrowser: vi.fn().mockReturnValue(false),
|
||||||
|
getSystemPromptMappings: vi.fn().mockReturnValue(undefined),
|
||||||
};
|
};
|
||||||
return mock as unknown as Config;
|
return mock as unknown as Config;
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -238,7 +238,10 @@ export class GeminiClient {
|
|||||||
];
|
];
|
||||||
try {
|
try {
|
||||||
const userMemory = this.config.getUserMemory();
|
const userMemory = this.config.getUserMemory();
|
||||||
const systemInstruction = getCoreSystemPrompt(userMemory);
|
const systemPromptMappings = this.config.getSystemPromptMappings();
|
||||||
|
const systemInstruction = getCoreSystemPrompt(userMemory, {
|
||||||
|
systemPromptMappings,
|
||||||
|
});
|
||||||
const generateContentConfigWithThinking = isThinkingSupported(
|
const generateContentConfigWithThinking = isThinkingSupported(
|
||||||
this.config.getModel(),
|
this.config.getModel(),
|
||||||
)
|
)
|
||||||
@@ -354,7 +357,10 @@ export class GeminiClient {
|
|||||||
model || this.config.getModel() || DEFAULT_GEMINI_FLASH_MODEL;
|
model || this.config.getModel() || DEFAULT_GEMINI_FLASH_MODEL;
|
||||||
try {
|
try {
|
||||||
const userMemory = this.config.getUserMemory();
|
const userMemory = this.config.getUserMemory();
|
||||||
const systemInstruction = getCoreSystemPrompt(userMemory);
|
const systemPromptMappings = this.config.getSystemPromptMappings();
|
||||||
|
const systemInstruction = getCoreSystemPrompt(userMemory, {
|
||||||
|
systemPromptMappings,
|
||||||
|
});
|
||||||
const requestConfig = {
|
const requestConfig = {
|
||||||
abortSignal,
|
abortSignal,
|
||||||
...this.generateContentConfig,
|
...this.generateContentConfig,
|
||||||
@@ -470,7 +476,10 @@ export class GeminiClient {
|
|||||||
|
|
||||||
try {
|
try {
|
||||||
const userMemory = this.config.getUserMemory();
|
const userMemory = this.config.getUserMemory();
|
||||||
const systemInstruction = getCoreSystemPrompt(userMemory);
|
const systemPromptMappings = this.config.getSystemPromptMappings();
|
||||||
|
const systemInstruction = getCoreSystemPrompt(userMemory, {
|
||||||
|
systemPromptMappings,
|
||||||
|
});
|
||||||
|
|
||||||
const requestConfig = {
|
const requestConfig = {
|
||||||
abortSignal,
|
abortSignal,
|
||||||
|
|||||||
@@ -106,3 +106,96 @@ describe('Core System Prompt (prompts.ts)', () => {
|
|||||||
expect(prompt).toMatchSnapshot();
|
expect(prompt).toMatchSnapshot();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
describe('URL matching with trailing slash compatibility', () => {
|
||||||
|
it('should match URLs with and without trailing slash', () => {
|
||||||
|
const config = {
|
||||||
|
systemPromptMappings: [
|
||||||
|
{
|
||||||
|
baseUrls: ['https://api.example.com'],
|
||||||
|
modelNames: ['gpt-4'],
|
||||||
|
template: 'Custom template for example.com',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
baseUrls: ['https://api.openai.com/'],
|
||||||
|
modelNames: ['gpt-3.5-turbo'],
|
||||||
|
template: 'Custom template for openai.com',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
|
||||||
|
// Simulate environment variables
|
||||||
|
const originalEnv = process.env;
|
||||||
|
|
||||||
|
// Test case 1: No trailing slash in config, actual URL has trailing slash
|
||||||
|
process.env = {
|
||||||
|
...originalEnv,
|
||||||
|
OPENAI_BASE_URL: 'https://api.example.com/',
|
||||||
|
OPENAI_MODEL: 'gpt-4',
|
||||||
|
};
|
||||||
|
|
||||||
|
const result1 = getCoreSystemPrompt(undefined, config);
|
||||||
|
expect(result1).toContain('Custom template for example.com');
|
||||||
|
|
||||||
|
// Test case 2: Config has trailing slash, actual URL has no trailing slash
|
||||||
|
process.env = {
|
||||||
|
...originalEnv,
|
||||||
|
OPENAI_BASE_URL: 'https://api.openai.com',
|
||||||
|
OPENAI_MODEL: 'gpt-3.5-turbo',
|
||||||
|
};
|
||||||
|
|
||||||
|
const result2 = getCoreSystemPrompt(undefined, config);
|
||||||
|
expect(result2).toContain('Custom template for openai.com');
|
||||||
|
|
||||||
|
// Test case 3: No trailing slash in config, actual URL has no trailing slash
|
||||||
|
process.env = {
|
||||||
|
...originalEnv,
|
||||||
|
OPENAI_BASE_URL: 'https://api.example.com',
|
||||||
|
OPENAI_MODEL: 'gpt-4',
|
||||||
|
};
|
||||||
|
|
||||||
|
const result3 = getCoreSystemPrompt(undefined, config);
|
||||||
|
expect(result3).toContain('Custom template for example.com');
|
||||||
|
|
||||||
|
// Test case 4: Config has trailing slash, actual URL has trailing slash
|
||||||
|
process.env = {
|
||||||
|
...originalEnv,
|
||||||
|
OPENAI_BASE_URL: 'https://api.openai.com/',
|
||||||
|
OPENAI_MODEL: 'gpt-3.5-turbo',
|
||||||
|
};
|
||||||
|
|
||||||
|
const result4 = getCoreSystemPrompt(undefined, config);
|
||||||
|
expect(result4).toContain('Custom template for openai.com');
|
||||||
|
|
||||||
|
// Restore original environment variables
|
||||||
|
process.env = originalEnv;
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not match when URLs are different', () => {
|
||||||
|
const config = {
|
||||||
|
systemPromptMappings: [
|
||||||
|
{
|
||||||
|
baseUrls: ['https://api.example.com'],
|
||||||
|
modelNames: ['gpt-4'],
|
||||||
|
template: 'Custom template for example.com',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
|
||||||
|
const originalEnv = process.env;
|
||||||
|
|
||||||
|
// Test case: URLs do not match
|
||||||
|
process.env = {
|
||||||
|
...originalEnv,
|
||||||
|
OPENAI_BASE_URL: 'https://api.different.com',
|
||||||
|
OPENAI_MODEL: 'gpt-4',
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = getCoreSystemPrompt(undefined, config);
|
||||||
|
// Should return default template, not contain custom template
|
||||||
|
expect(result).not.toContain('Custom template for example.com');
|
||||||
|
|
||||||
|
// Restore original environment variables
|
||||||
|
process.env = originalEnv;
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|||||||
@@ -18,7 +18,35 @@ import process from 'node:process';
|
|||||||
import { isGitRepository } from '../utils/gitUtils.js';
|
import { isGitRepository } from '../utils/gitUtils.js';
|
||||||
import { MemoryTool, GEMINI_CONFIG_DIR } from '../tools/memoryTool.js';
|
import { MemoryTool, GEMINI_CONFIG_DIR } from '../tools/memoryTool.js';
|
||||||
|
|
||||||
export function getCoreSystemPrompt(userMemory?: string): string {
|
export interface ModelTemplateMapping {
|
||||||
|
baseUrls?: string[];
|
||||||
|
modelNames?: string[];
|
||||||
|
template?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface SystemPromptConfig {
|
||||||
|
systemPromptMappings?: ModelTemplateMapping[];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Normalizes a URL by removing trailing slash for consistent comparison
|
||||||
|
*/
|
||||||
|
function normalizeUrl(url: string): string {
|
||||||
|
return url.endsWith('/') ? url.slice(0, -1) : url;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if a URL matches any URL in the array, ignoring trailing slashes
|
||||||
|
*/
|
||||||
|
function urlMatches(urlArray: string[], targetUrl: string): boolean {
|
||||||
|
const normalizedTarget = normalizeUrl(targetUrl);
|
||||||
|
return urlArray.some((url) => normalizeUrl(url) === normalizedTarget);
|
||||||
|
}
|
||||||
|
|
||||||
|
export function getCoreSystemPrompt(
|
||||||
|
userMemory?: string,
|
||||||
|
config?: SystemPromptConfig,
|
||||||
|
): string {
|
||||||
// if GEMINI_SYSTEM_MD is set (and not 0|false), override system prompt from file
|
// if GEMINI_SYSTEM_MD is set (and not 0|false), override system prompt from file
|
||||||
// default path is .qwen/system.md but can be modified via custom path in GEMINI_SYSTEM_MD
|
// default path is .qwen/system.md but can be modified via custom path in GEMINI_SYSTEM_MD
|
||||||
let systemMdEnabled = false;
|
let systemMdEnabled = false;
|
||||||
@@ -34,6 +62,52 @@ export function getCoreSystemPrompt(userMemory?: string): string {
|
|||||||
throw new Error(`missing system prompt file '${systemMdPath}'`);
|
throw new Error(`missing system prompt file '${systemMdPath}'`);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check for system prompt mappings from global config
|
||||||
|
if (config?.systemPromptMappings) {
|
||||||
|
const currentModel = process.env.OPENAI_MODEL || '';
|
||||||
|
const currentBaseUrl = process.env.OPENAI_BASE_URL || '';
|
||||||
|
|
||||||
|
const matchedMapping = config.systemPromptMappings.find((mapping) => {
|
||||||
|
const { baseUrls, modelNames } = mapping;
|
||||||
|
// Check if baseUrl matches (when specified)
|
||||||
|
if (
|
||||||
|
baseUrls &&
|
||||||
|
modelNames &&
|
||||||
|
urlMatches(baseUrls, currentBaseUrl) &&
|
||||||
|
modelNames.includes(currentModel)
|
||||||
|
) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (baseUrls && urlMatches(baseUrls, currentBaseUrl) && !modelNames) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (modelNames && modelNames.includes(currentModel) && !baseUrls) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
});
|
||||||
|
|
||||||
|
if (matchedMapping?.template) {
|
||||||
|
const isGitRepo = isGitRepository(process.cwd());
|
||||||
|
|
||||||
|
// Replace placeholders in template
|
||||||
|
let template = matchedMapping.template;
|
||||||
|
template = template.replace(
|
||||||
|
'{RUNTIME_VARS_IS_GIT_REPO}',
|
||||||
|
String(isGitRepo),
|
||||||
|
);
|
||||||
|
template = template.replace(
|
||||||
|
'{RUNTIME_VARS_SANDBOX}',
|
||||||
|
process.env.SANDBOX || '',
|
||||||
|
);
|
||||||
|
|
||||||
|
return template;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const basePrompt = systemMdEnabled
|
const basePrompt = systemMdEnabled
|
||||||
? fs.readFileSync(systemMdPath, 'utf8')
|
? fs.readFileSync(systemMdPath, 'utf8')
|
||||||
: `
|
: `
|
||||||
@@ -256,6 +330,7 @@ Your core function is efficient and safe assistance. Balance extreme conciseness
|
|||||||
`.trim();
|
`.trim();
|
||||||
|
|
||||||
// if GEMINI_WRITE_SYSTEM_MD is set (and not 0|false), write base system prompt to file
|
// if GEMINI_WRITE_SYSTEM_MD is set (and not 0|false), write base system prompt to file
|
||||||
|
|
||||||
const writeSystemMdVar = process.env.GEMINI_WRITE_SYSTEM_MD?.toLowerCase();
|
const writeSystemMdVar = process.env.GEMINI_WRITE_SYSTEM_MD?.toLowerCase();
|
||||||
if (writeSystemMdVar && !['0', 'false'].includes(writeSystemMdVar)) {
|
if (writeSystemMdVar && !['0', 'false'].includes(writeSystemMdVar)) {
|
||||||
if (['1', 'true'].includes(writeSystemMdVar)) {
|
if (['1', 'true'].includes(writeSystemMdVar)) {
|
||||||
|
|||||||
Reference in New Issue
Block a user