This commit is contained in:
koalazf99
2025-08-01 01:44:13 +08:00
parent bdf946a321
commit 9d1ffb4af6
13 changed files with 58 additions and 58 deletions

View File

@@ -44,7 +44,7 @@ jobs:
- name: Run E2E tests
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
OPENAI_BASE_URL: ${{ secrets.OPENAI_BASE_URL }}
OPENAI_MODEL: ${{ secrets.OPENAI_MODEL }}
QWEN_CODE_API_KEY: ${{ secrets.OPENAI_API_KEY }}
QWEN_CODE_BASE_URL: ${{ secrets.OPENAI_BASE_URL }}
QWEN_CODE_MODEL: ${{ secrets.OPENAI_MODEL }}
run: npm run test:integration:${{ matrix.sandbox }} -- --verbose --keep-output

View File

@@ -97,17 +97,17 @@ Qwen Code supports multiple API providers. You can configure your API key throug
1. **Environment Variables**
```bash
export OPENAI_API_KEY="your_api_key_here"
export OPENAI_BASE_URL="your_api_endpoint"
export OPENAI_MODEL="your_model_choice"
export QWEN_CODE_API_KEY="your_api_key_here"
export QWEN_CODE_BASE_URL="your_api_endpoint"
export QWEN_CODE_MODEL="your_model_choice"
```
2. **Project `.env` File**
Create a `.env` file in your project root:
```env
OPENAI_API_KEY=your_api_key_here
OPENAI_BASE_URL=your_api_endpoint
OPENAI_MODEL=your_model_choice
QWEN_CODE_API_KEY=your_api_key_here
QWEN_CODE_BASE_URL=your_api_endpoint
QWEN_CODE_MODEL=your_model_choice
```
#### API Provider Options
@@ -123,9 +123,9 @@ Qwen Code supports multiple API providers. You can configure your API key throug
**Option 1: Alibaba Cloud Bailian** ([Apply for API Key](https://bailian.console.aliyun.com/))
```bash
export OPENAI_API_KEY="your_api_key_here"
export OPENAI_BASE_URL="https://dashscope.aliyuncs.com/compatible-mode/v1"
export OPENAI_MODEL="qwen3-coder-plus"
export QWEN_CODE_API_KEY="your_api_key_here"
export QWEN_CODE_BASE_URL="https://dashscope.aliyuncs.com/compatible-mode/v1"
export QWEN_CODE_MODEL="qwen3-coder-plus"
```
**Option 2: ModelScope (Free Tier)** ([Apply for API Key](https://modelscope.cn/docs/model-service/API-Inference/intro))
@@ -134,9 +134,9 @@ export OPENAI_MODEL="qwen3-coder-plus"
- ⚠️ Connect your Aliyun account to avoid authentication errors
```bash
export OPENAI_API_KEY="your_api_key_here"
export OPENAI_BASE_URL="https://api-inference.modelscope.cn/v1"
export OPENAI_MODEL="Qwen/Qwen3-Coder-480B-A35B-Instruct"
export QWEN_CODE_API_KEY="your_api_key_here"
export QWEN_CODE_BASE_URL="https://api-inference.modelscope.cn/v1"
export QWEN_CODE_MODEL="Qwen/Qwen3-Coder-480B-A35B-Instruct"
```
</details>
@@ -147,17 +147,17 @@ export OPENAI_MODEL="Qwen/Qwen3-Coder-480B-A35B-Instruct"
**Option 1: Alibaba Cloud ModelStudio** ([Apply for API Key](https://modelstudio.console.alibabacloud.com/))
```bash
export OPENAI_API_KEY="your_api_key_here"
export OPENAI_BASE_URL="https://dashscope-intl.aliyuncs.com/compatible-mode/v1"
export OPENAI_MODEL="qwen3-coder-plus"
export QWEN_CODE_API_KEY="your_api_key_here"
export QWEN_CODE_BASE_URL="https://dashscope-intl.aliyuncs.com/compatible-mode/v1"
export QWEN_CODE_MODEL="qwen3-coder-plus"
```
**Option 2: OpenRouter (Free Tier Available)** ([Apply for API Key](https://openrouter.ai/))
```bash
export OPENAI_API_KEY="your_api_key_here"
export OPENAI_BASE_URL="https://openrouter.ai/api/v1"
export OPENAI_MODEL="qwen/qwen3-coder:free"
export QWEN_CODE_API_KEY="your_api_key_here"
export QWEN_CODE_BASE_URL="https://openrouter.ai/api/v1"
export QWEN_CODE_MODEL="qwen/qwen3-coder:free"
```
</details>

View File

@@ -219,8 +219,8 @@ In addition to a project settings file, a project's `.gemini` directory can cont
- **Description:** Configures custom system prompt templates for specific model names and base URLs. This allows you to use different system prompts for different AI models or API endpoints.
- **Default:** `undefined` (uses default system prompt)
- **Properties:**
- **`baseUrls`** (array of strings, optional): Array of base URLs to exactly match against `OPENAI_BASE_URL` environment variable. If not specified, matches any base URL.
- **`modelNames`** (array of strings, optional): Array of model names to exactly match against `OPENAI_MODEL` environment variable. If not specified, matches any model.
- **`baseUrls`** (array of strings, optional): Array of base URLs to exactly match against `QWEN_CODE_BASE_URL` environment variable. If not specified, matches any base URL.
- **`modelNames`** (array of strings, optional): Array of model names to exactly match against `QWEN_CODE_MODEL` environment variable. If not specified, matches any model.
- **`template`** (string): The system prompt template to use when both baseUrl and modelNames match. Supports placeholders:
- `{RUNTIME_VARS_IS_GIT_REPO}`: Replaced with `true` or `false` based on whether the current directory is a git repository
- `{RUNTIME_VARS_SANDBOX}`: Replaced with the sandbox type (e.g., `"sandbox-exec"`, `"docker"`, or empty string)

View File

@@ -40,9 +40,9 @@ qwen-code --openai-api-key "your-api-key-here" --model "gpt-4-turbo"
Set the following environment variables in your shell or `.env` file:
```bash
export OPENAI_API_KEY="your-api-key-here"
export OPENAI_BASE_URL="https://api.openai.com/v1" # Optional, defaults to this value
export OPENAI_MODEL="gpt-4o" # Optional, defaults to gpt-4o
export QWEN_CODE_API_KEY="your-api-key-here"
export QWEN_CODE_BASE_URL="https://api.openai.com/v1" # Optional, defaults to this value
export QWEN_CODE_MODEL="gpt-4o" # Optional, defaults to gpt-4o
```
## Supported Models
@@ -58,7 +58,7 @@ The CLI supports all OpenAI models that are available through the OpenAI API, in
## Custom Endpoints
You can use custom endpoints by setting the `OPENAI_BASE_URL` environment variable or using the `--openai-base-url` command line argument. This is useful for:
You can use custom endpoints by setting the `QWEN_CODE_BASE_URL` environment variable or using the `--openai-base-url` command line argument. This is useful for:
- Using Azure OpenAI
- Using other OpenAI-compatible APIs

View File

@@ -39,8 +39,8 @@ export const validateAuthMethod = (authMethod: string): string | null => {
}
if (authMethod === AuthType.USE_OPENAI) {
if (!process.env.OPENAI_API_KEY) {
return 'OPENAI_API_KEY environment variable not found. You can enter it interactively or add it to your .env file.';
if (!process.env.QWEN_CODE_API_KEY) {
return 'QWEN_CODE_API_KEY environment variable not found. You can enter it interactively or add it to your .env file.';
}
return null;
}
@@ -49,13 +49,13 @@ export const validateAuthMethod = (authMethod: string): string | null => {
};
export const setOpenAIApiKey = (apiKey: string): void => {
process.env.OPENAI_API_KEY = apiKey;
process.env.QWEN_CODE_API_KEY = apiKey;
};
export const setOpenAIBaseUrl = (baseUrl: string): void => {
process.env.OPENAI_BASE_URL = baseUrl;
process.env.QWEN_CODE_BASE_URL = baseUrl;
};
export const setOpenAIModel = (model: string): void => {
process.env.OPENAI_MODEL = model;
process.env.QWEN_CODE_MODEL = model;
};

View File

@@ -264,12 +264,12 @@ export async function loadCliConfig(
// Handle OpenAI API key from command line
if (argv.openaiApiKey) {
process.env.OPENAI_API_KEY = argv.openaiApiKey;
process.env.QWEN_CODE_API_KEY = argv.openaiApiKey;
}
// Handle OpenAI base URL from command line
if (argv.openaiBaseUrl) {
process.env.OPENAI_BASE_URL = argv.openaiBaseUrl;
process.env.QWEN_CODE_BASE_URL = argv.openaiBaseUrl;
}
// Set the context filename in the server's memoryTool module BEFORE loading memory

View File

@@ -517,15 +517,15 @@ export async function start_sandbox(
args.push('--env', `GOOGLE_API_KEY=${process.env.GOOGLE_API_KEY}`);
}
// copy OPENAI_API_KEY and related env vars for Qwen
if (process.env.OPENAI_API_KEY) {
args.push('--env', `OPENAI_API_KEY=${process.env.OPENAI_API_KEY}`);
// copy QWEN_CODE_API_KEY and related env vars for Qwen
if (process.env.QWEN_CODE_API_KEY) {
args.push('--env', `QWEN_CODE_API_KEY=${process.env.QWEN_CODE_API_KEY}`);
}
if (process.env.OPENAI_BASE_URL) {
args.push('--env', `OPENAI_BASE_URL=${process.env.OPENAI_BASE_URL}`);
if (process.env.QWEN_CODE_BASE_URL) {
args.push('--env', `QWEN_CODE_BASE_URL=${process.env.QWEN_CODE_BASE_URL}`);
}
if (process.env.OPENAI_MODEL) {
args.push('--env', `OPENAI_MODEL=${process.env.OPENAI_MODEL}`);
if (process.env.QWEN_CODE_MODEL) {
args.push('--env', `QWEN_CODE_MODEL=${process.env.QWEN_CODE_MODEL}`);
}
// copy GOOGLE_GENAI_USE_VERTEXAI

View File

@@ -5,5 +5,5 @@
*/
export const DEFAULT_GEMINI_MODEL = 'qwen3-coder-plus';
export const DEFAULT_GEMINI_FLASH_MODEL = 'gemini-2.5-flash';
export const DEFAULT_GEMINI_FLASH_MODEL = 'qwen3-coder-flash';
export const DEFAULT_GEMINI_EMBEDDING_MODEL = 'gemini-embedding-001';

View File

@@ -34,7 +34,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
vi.clearAllMocks();
// Mock environment variables
vi.stubEnv('OPENAI_BASE_URL', '');
vi.stubEnv('QWEN_CODE_BASE_URL', '');
// Mock config
mockConfig = {

View File

@@ -75,7 +75,7 @@ export async function createContentGeneratorConfig(
const googleApiKey = process.env.GOOGLE_API_KEY || undefined;
const googleCloudProject = process.env.GOOGLE_CLOUD_PROJECT || undefined;
const googleCloudLocation = process.env.GOOGLE_CLOUD_LOCATION || undefined;
const openaiApiKey = process.env.OPENAI_API_KEY;
const openaiApiKey = process.env.QWEN_CODE_API_KEY;
// Use runtime model from config if available, otherwise fallback to parameter or default
const effectiveModel = model || DEFAULT_GEMINI_MODEL;
@@ -117,7 +117,7 @@ export async function createContentGeneratorConfig(
if (authType === AuthType.USE_OPENAI && openaiApiKey) {
contentGeneratorConfig.apiKey = openaiApiKey;
contentGeneratorConfig.model =
process.env.OPENAI_MODEL || DEFAULT_GEMINI_MODEL;
process.env.QWEN_CODE_MODEL || DEFAULT_GEMINI_MODEL;
return contentGeneratorConfig;
}

View File

@@ -97,7 +97,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
constructor(apiKey: string, model: string, config: Config) {
this.model = model;
this.config = config;
const baseURL = process.env.OPENAI_BASE_URL || '';
const baseURL = process.env.QWEN_CODE_BASE_URL || '';
// Configure timeout settings - using progressive timeouts
const timeoutConfig = {

View File

@@ -130,8 +130,8 @@ describe('URL matching with trailing slash compatibility', () => {
// Test case 1: No trailing slash in config, actual URL has trailing slash
process.env = {
...originalEnv,
OPENAI_BASE_URL: 'https://api.example.com/',
OPENAI_MODEL: 'gpt-4',
QWEN_CODE_BASE_URL: 'https://api.example.com/',
QWEN_CODE_MODEL: 'gpt-4',
};
const result1 = getCoreSystemPrompt(undefined, config);
@@ -140,8 +140,8 @@ describe('URL matching with trailing slash compatibility', () => {
// Test case 2: Config has trailing slash, actual URL has no trailing slash
process.env = {
...originalEnv,
OPENAI_BASE_URL: 'https://api.openai.com',
OPENAI_MODEL: 'gpt-3.5-turbo',
QWEN_CODE_BASE_URL: 'https://api.openai.com',
QWEN_CODE_MODEL: 'gpt-3.5-turbo',
};
const result2 = getCoreSystemPrompt(undefined, config);
@@ -150,8 +150,8 @@ describe('URL matching with trailing slash compatibility', () => {
// Test case 3: No trailing slash in config, actual URL has no trailing slash
process.env = {
...originalEnv,
OPENAI_BASE_URL: 'https://api.example.com',
OPENAI_MODEL: 'gpt-4',
QWEN_CODE_BASE_URL: 'https://api.example.com',
QWEN_CODE_MODEL: 'gpt-4',
};
const result3 = getCoreSystemPrompt(undefined, config);
@@ -160,8 +160,8 @@ describe('URL matching with trailing slash compatibility', () => {
// Test case 4: Config has trailing slash, actual URL has trailing slash
process.env = {
...originalEnv,
OPENAI_BASE_URL: 'https://api.openai.com/',
OPENAI_MODEL: 'gpt-3.5-turbo',
QWEN_CODE_BASE_URL: 'https://api.openai.com/',
QWEN_CODE_MODEL: 'gpt-3.5-turbo',
};
const result4 = getCoreSystemPrompt(undefined, config);
@@ -187,8 +187,8 @@ describe('URL matching with trailing slash compatibility', () => {
// Test case: URLs do not match
process.env = {
...originalEnv,
OPENAI_BASE_URL: 'https://api.different.com',
OPENAI_MODEL: 'gpt-4',
QWEN_CODE_BASE_URL: 'https://api.different.com',
QWEN_CODE_MODEL: 'gpt-4',
};
const result = getCoreSystemPrompt(undefined, config);

View File

@@ -65,8 +65,8 @@ export function getCoreSystemPrompt(
// Check for system prompt mappings from global config
if (config?.systemPromptMappings) {
const currentModel = process.env.OPENAI_MODEL || DEFAULT_GEMINI_MODEL;
const currentBaseUrl = process.env.OPENAI_BASE_URL || '';
const currentModel = process.env.QWEN_CODE_MODEL || DEFAULT_GEMINI_MODEL;
const currentBaseUrl = process.env.QWEN_CODE_BASE_URL || '';
const matchedMapping = config.systemPromptMappings.find((mapping) => {
const { baseUrls, modelNames } = mapping;