chore: use correct CLI_VERSION for logging

This commit is contained in:
tanzhenxin
2025-08-12 21:00:17 +08:00
parent ef1c8a4bfe
commit cfc1aebee6
9 changed files with 42 additions and 8 deletions

View File

@@ -412,6 +412,7 @@ export async function loadCliConfig(
}
const sandboxConfig = await loadSandboxConfig(settings, argv);
const cliVersion = await getCliVersion();
return new Config({
sessionId,
@@ -497,6 +498,7 @@ export async function loadCliConfig(
},
],
contentGenerator: settings.contentGenerator,
cliVersion,
});
}

View File

@@ -208,6 +208,7 @@ export interface ConfigParameters {
timeout?: number;
maxRetries?: number;
};
cliVersion?: string;
}
export class Config {
@@ -281,6 +282,7 @@ export class Config {
timeout?: number;
maxRetries?: number;
};
private readonly cliVersion?: string;
constructor(params: ConfigParameters) {
this.sessionId = params.sessionId;
this.embeddingModel =
@@ -350,6 +352,7 @@ export class Config {
this.enableOpenAILogging = params.enableOpenAILogging ?? false;
this.sampling_params = params.sampling_params;
this.contentGenerator = params.contentGenerator;
this.cliVersion = params.cliVersion;
if (params.contextFileName) {
setGeminiMdFilename(params.contextFileName);
@@ -719,6 +722,10 @@ export class Config {
return this.contentGenerator?.maxRetries;
}
getCliVersion(): string | undefined {
return this.cliVersion;
}
getSystemPromptMappings():
| Array<{
baseUrls?: string[];

View File

@@ -45,6 +45,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
timeout: 120000,
maxRetries: 3,
}),
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
} as unknown as Config;
// Mock OpenAI client
@@ -256,6 +257,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
timeout: 300000, // 5 minutes
maxRetries: 5,
}),
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
} as unknown as Config;
new OpenAIContentGenerator('test-key', 'gpt-4', customConfig);
@@ -274,6 +276,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
it('should handle missing timeout config gracefully', () => {
const noTimeoutConfig = {
getContentGeneratorConfig: vi.fn().mockReturnValue({}),
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
} as unknown as Config;
new OpenAIContentGenerator('test-key', 'gpt-4', noTimeoutConfig);

View File

@@ -228,6 +228,7 @@ describe('Gemini Client (client.ts)', () => {
getGeminiClient: vi.fn(),
setFallbackMode: vi.fn(),
getDebugMode: vi.fn().mockReturnValue(false),
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
};
const MockedConfig = vi.mocked(Config, true);
MockedConfig.mockImplementation(

View File

@@ -17,7 +17,9 @@ import { Config } from '../config/config.js';
vi.mock('../code_assist/codeAssist.js');
vi.mock('@google/genai');
const mockConfig = {} as unknown as Config;
const mockConfig = {
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
} as unknown as Config;
describe('createContentGenerator', () => {
it('should create a CodeAssistContentGenerator', async () => {
@@ -73,6 +75,7 @@ describe('createContentGeneratorConfig', () => {
getSamplingParams: vi.fn().mockReturnValue(undefined),
getContentGeneratorTimeout: vi.fn().mockReturnValue(undefined),
getContentGeneratorMaxRetries: vi.fn().mockReturnValue(undefined),
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
} as unknown as Config;
beforeEach(() => {

View File

@@ -151,7 +151,7 @@ export async function createContentGenerator(
gcConfig: Config,
sessionId?: string,
): Promise<ContentGenerator> {
const version = process.env.CLI_VERSION || process.version;
const version = gcConfig.getCliVersion() || 'unknown';
const httpOptions = {
headers: {
'User-Agent': `GeminiCLI/${version} (${process.platform}; ${process.arch})`,

View File

@@ -66,6 +66,7 @@ describe('OpenAIContentGenerator', () => {
top_p: 0.9,
},
}),
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
} as unknown as Config;
// Mock OpenAI client
@@ -143,6 +144,7 @@ describe('OpenAIContentGenerator', () => {
timeout: 300000,
maxRetries: 5,
}),
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
} as unknown as Config;
new OpenAIContentGenerator('test-key', 'gpt-4', customConfig);
@@ -901,6 +903,7 @@ describe('OpenAIContentGenerator', () => {
getContentGeneratorConfig: vi.fn().mockReturnValue({
enableOpenAILogging: true,
}),
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
} as unknown as Config;
const loggingGenerator = new OpenAIContentGenerator(
@@ -1023,6 +1026,7 @@ describe('OpenAIContentGenerator', () => {
getContentGeneratorConfig: vi.fn().mockReturnValue({
enableOpenAILogging: true,
}),
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
} as unknown as Config;
const loggingGenerator = new OpenAIContentGenerator(
@@ -1817,6 +1821,7 @@ describe('OpenAIContentGenerator', () => {
max_tokens: 500,
},
}),
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
} as unknown as Config;
const loggingGenerator = new OpenAIContentGenerator(
@@ -2001,6 +2006,7 @@ describe('OpenAIContentGenerator', () => {
getContentGeneratorConfig: vi.fn().mockReturnValue({
enableOpenAILogging: true,
}),
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
} as unknown as Config;
const loggingGenerator = new OpenAIContentGenerator(
@@ -2257,6 +2263,7 @@ describe('OpenAIContentGenerator', () => {
top_p: undefined,
},
}),
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
} as unknown as Config;
const testGenerator = new OpenAIContentGenerator(
@@ -2314,6 +2321,7 @@ describe('OpenAIContentGenerator', () => {
frequency_penalty: 0.3,
},
}),
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
} as unknown as Config;
const testGenerator = new OpenAIContentGenerator(
@@ -2394,6 +2402,7 @@ describe('OpenAIContentGenerator', () => {
enableOpenAILogging: false,
}),
getSessionId: vi.fn().mockReturnValue('test-session-id'),
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
} as unknown as Config;
const qwenGenerator = new OpenAIContentGenerator(
@@ -2447,6 +2456,7 @@ describe('OpenAIContentGenerator', () => {
enableOpenAILogging: false,
}),
getSessionId: vi.fn().mockReturnValue('dashscope-session-id'),
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
} as unknown as Config;
const dashscopeGenerator = new OpenAIContentGenerator(
@@ -2507,6 +2517,7 @@ describe('OpenAIContentGenerator', () => {
enableOpenAILogging: false,
}),
getSessionId: vi.fn().mockReturnValue('regular-session-id'),
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
} as unknown as Config;
const regularGenerator = new OpenAIContentGenerator(
@@ -2552,6 +2563,7 @@ describe('OpenAIContentGenerator', () => {
enableOpenAILogging: false,
}),
getSessionId: vi.fn().mockReturnValue('other-session-id'),
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
} as unknown as Config;
const otherGenerator = new OpenAIContentGenerator(
@@ -2600,6 +2612,7 @@ describe('OpenAIContentGenerator', () => {
enableOpenAILogging: false,
}),
getSessionId: vi.fn().mockReturnValue('other-base-url-session-id'),
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
} as unknown as Config;
const otherBaseUrlGenerator = new OpenAIContentGenerator(
@@ -2648,6 +2661,7 @@ describe('OpenAIContentGenerator', () => {
enableOpenAILogging: false,
}),
getSessionId: vi.fn().mockReturnValue('streaming-session-id'),
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
} as unknown as Config;
const qwenGenerator = new OpenAIContentGenerator(
@@ -2726,6 +2740,7 @@ describe('OpenAIContentGenerator', () => {
enableOpenAILogging: false,
}),
getSessionId: vi.fn().mockReturnValue('regular-streaming-session-id'),
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
} as unknown as Config;
const regularGenerator = new OpenAIContentGenerator(
@@ -2799,6 +2814,7 @@ describe('OpenAIContentGenerator', () => {
enableOpenAILogging: false,
}),
getSessionId: vi.fn().mockReturnValue(undefined), // Undefined session ID
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
} as unknown as Config;
const qwenGenerator = new OpenAIContentGenerator(
@@ -2852,6 +2868,7 @@ describe('OpenAIContentGenerator', () => {
enableOpenAILogging: false,
}),
getSessionId: vi.fn().mockReturnValue('no-base-url-session-id'),
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
} as unknown as Config;
const noBaseUrlGenerator = new OpenAIContentGenerator(
@@ -2900,6 +2917,7 @@ describe('OpenAIContentGenerator', () => {
enableOpenAILogging: false,
}),
getSessionId: vi.fn().mockReturnValue('undefined-auth-session-id'),
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
} as unknown as Config;
const undefinedAuthGenerator = new OpenAIContentGenerator(
@@ -2945,6 +2963,7 @@ describe('OpenAIContentGenerator', () => {
const undefinedConfig = {
getContentGeneratorConfig: vi.fn().mockReturnValue(undefined), // Undefined config
getSessionId: vi.fn().mockReturnValue('undefined-config-session-id'),
getCliVersion: vi.fn().mockReturnValue('1.0.0'),
} as unknown as Config;
const undefinedConfigGenerator = new OpenAIContentGenerator(

View File

@@ -114,8 +114,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
timeoutConfig.maxRetries = contentGeneratorConfig.maxRetries;
}
// Set up User-Agent header (same format as contentGenerator.ts)
const version = process.env.CLI_VERSION || process.version;
const version = config.getCliVersion() || 'unknown';
const userAgent = `QwenCode/${version} (${process.platform}; ${process.arch})`;
// Check if using OpenRouter and add required headers

View File

@@ -140,14 +140,14 @@ export class QwenLogger {
return this.createRumEvent('exception', type, name, properties);
}
createRumPayload(): RumPayload {
const version = process.env.CLI_VERSION || process.version;
async createRumPayload(): Promise<RumPayload> {
const version = this.config?.getCliVersion() || 'unknown';
return {
app: {
id: RUN_APP_ID,
env: process.env.DEBUG ? 'dev' : 'prod',
version,
version: version || 'unknown',
type: 'cli',
},
user: {
@@ -190,7 +190,7 @@ export class QwenLogger {
this.isFlushInProgress = true;
const rumPayload = this.createRumPayload();
const rumPayload = await this.createRumPayload();
const flushFn = () =>
new Promise<Buffer>((resolve, reject) => {
const body = safeJsonStringify(rumPayload);