chore: add metadata on openai content generator

This commit is contained in:
tanzhenxin
2025-08-08 14:57:13 +08:00
parent ce632725b0
commit f503be14e9
5 changed files with 85 additions and 73 deletions

View File

@@ -87,7 +87,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
mockOpenAIClient.chat.completions.create.mockRejectedValueOnce(error); mockOpenAIClient.chat.completions.create.mockRejectedValueOnce(error);
try { try {
await generator.generateContent(request); await generator.generateContent(request, 'test-prompt-id');
} catch (thrownError: unknown) { } catch (thrownError: unknown) {
// Should contain timeout-specific messaging and troubleshooting tips // Should contain timeout-specific messaging and troubleshooting tips
const errorMessage = const errorMessage =
@@ -119,7 +119,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
mockOpenAIClient.chat.completions.create.mockRejectedValueOnce(error); mockOpenAIClient.chat.completions.create.mockRejectedValueOnce(error);
try { try {
await generator.generateContent(request); await generator.generateContent(request, 'test-prompt-id');
} catch (thrownError: unknown) { } catch (thrownError: unknown) {
// Should NOT contain timeout-specific messaging // Should NOT contain timeout-specific messaging
const errorMessage = const errorMessage =
@@ -146,7 +146,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
model: 'gpt-4', model: 'gpt-4',
}; };
await expect(generator.generateContent(request)).rejects.toThrow( await expect(generator.generateContent(request, 'test-prompt-id')).rejects.toThrow(
/Request timeout after \d+s\. Try reducing input length or increasing timeout in config\./, /Request timeout after \d+s\. Try reducing input length or increasing timeout in config\./,
); );
}); });
@@ -161,7 +161,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
model: 'gpt-4', model: 'gpt-4',
}; };
await expect(generator.generateContent(request)).rejects.toThrow( await expect(generator.generateContent(request, 'test-prompt-id')).rejects.toThrow(
'Invalid API key', 'Invalid API key',
); );
}); });
@@ -176,7 +176,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
}; };
try { try {
await generator.generateContent(request); await generator.generateContent(request, 'test-prompt-id');
} catch (error: unknown) { } catch (error: unknown) {
const errorMessage = const errorMessage =
error instanceof Error ? error.message : String(error); error instanceof Error ? error.message : String(error);
@@ -199,7 +199,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
model: 'gpt-4', model: 'gpt-4',
}; };
await expect(generator.generateContentStream(request)).rejects.toThrow( await expect(generator.generateContentStream(request, 'test-prompt-id')).rejects.toThrow(
/Streaming setup timeout after \d+s\. Try reducing input length or increasing timeout in config\./, /Streaming setup timeout after \d+s\. Try reducing input length or increasing timeout in config\./,
); );
}); });
@@ -214,7 +214,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
}; };
try { try {
await generator.generateContentStream(request); await generator.generateContentStream(request, 'test-prompt-id');
} catch (error: unknown) { } catch (error: unknown) {
const errorMessage = const errorMessage =
error instanceof Error ? error.message : String(error); error instanceof Error ? error.message : String(error);
@@ -300,7 +300,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
}; };
try { try {
await generator.generateContent(request); await generator.generateContent(request, 'test-prompt-id');
} catch (_error) { } catch (_error) {
// Verify that countTokens was called for estimation // Verify that countTokens was called for estimation
expect(mockCountTokens).toHaveBeenCalledWith({ expect(mockCountTokens).toHaveBeenCalledWith({
@@ -324,7 +324,7 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
}; };
// Should not throw due to token counting failure // Should not throw due to token counting failure
await expect(generator.generateContent(request)).rejects.toThrow( await expect(generator.generateContent(request, 'test-prompt-id')).rejects.toThrow(
/Request timeout after \d+s/, /Request timeout after \d+s/,
); );
}); });

View File

@@ -189,7 +189,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4', model: 'gpt-4',
}; };
const result = await generator.generateContent(request); const result = await generator.generateContent(request, 'test-prompt-id');
expect(result.candidates).toHaveLength(1); expect(result.candidates).toHaveLength(1);
if ( if (
@@ -236,7 +236,7 @@ describe('OpenAIContentGenerator', () => {
}, },
}; };
await generator.generateContent(request); await generator.generateContent(request, 'test-prompt-id');
expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith( expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith(
expect.objectContaining({ expect.objectContaining({
@@ -302,7 +302,7 @@ describe('OpenAIContentGenerator', () => {
}, },
}; };
const result = await generator.generateContent(request); const result = await generator.generateContent(request, 'test-prompt-id');
if ( if (
result.candidates && result.candidates &&
@@ -345,7 +345,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4', model: 'gpt-4',
}; };
await generator.generateContent(request); await generator.generateContent(request, 'test-prompt-id');
expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith( expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith(
expect.objectContaining({ expect.objectContaining({
@@ -381,7 +381,7 @@ describe('OpenAIContentGenerator', () => {
}, },
}; };
await generator.generateContent(request); await generator.generateContent(request, 'test-prompt-id');
expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith( expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith(
expect.objectContaining({ expect.objectContaining({
@@ -439,7 +439,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4', model: 'gpt-4',
}; };
const stream = await generator.generateContentStream(request); const stream = await generator.generateContentStream(request, 'test-prompt-id');
const responses = []; const responses = [];
for await (const response of stream) { for await (const response of stream) {
responses.push(response); responses.push(response);
@@ -528,7 +528,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4', model: 'gpt-4',
}; };
const stream = await generator.generateContentStream(request); const stream = await generator.generateContentStream(request, 'test-prompt-id');
const responses = []; const responses = [];
for await (const response of stream) { for await (const response of stream) {
responses.push(response); responses.push(response);
@@ -668,7 +668,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4', model: 'gpt-4',
}; };
await expect(generator.generateContent(request)).rejects.toThrow( await expect(generator.generateContent(request, 'test-prompt-id')).rejects.toThrow(
'Invalid API key', 'Invalid API key',
); );
}); });
@@ -683,7 +683,7 @@ describe('OpenAIContentGenerator', () => {
}; };
try { try {
await generator.generateContent(request); await generator.generateContent(request, 'test-prompt-id');
} catch (error) { } catch (error) {
// Error should be thrown but token estimation should have been attempted // Error should be thrown but token estimation should have been attempted
expect(error).toBeInstanceOf(Error); expect(error).toBeInstanceOf(Error);
@@ -703,7 +703,7 @@ describe('OpenAIContentGenerator', () => {
}; };
try { try {
await generator.generateContent(request); await generator.generateContent(request, 'test-prompt-id');
expect.fail('Expected error to be thrown'); expect.fail('Expected error to be thrown');
} catch (error: unknown) { } catch (error: unknown) {
// Should throw the original error object with status preserved // Should throw the original error object with status preserved
@@ -763,7 +763,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4', model: 'gpt-4',
}; };
await generator.generateContent(request); await generator.generateContent(request, 'test-prompt-id');
expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith( expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith(
expect.objectContaining({ expect.objectContaining({
@@ -828,7 +828,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4', model: 'gpt-4',
}; };
await generator.generateContent(request); await generator.generateContent(request, 'test-prompt-id');
// Should not include the orphaned tool call // Should not include the orphaned tool call
expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith( expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith(
@@ -872,7 +872,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4', model: 'gpt-4',
}; };
const result = await generator.generateContent(request); const result = await generator.generateContent(request, 'test-prompt-id');
if ( if (
result.candidates && result.candidates &&
result.candidates.length > 0 && result.candidates.length > 0 &&
@@ -919,7 +919,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4', model: 'gpt-4',
}; };
await loggingGenerator.generateContent(request); await loggingGenerator.generateContent(request, 'test-prompt-id');
// Verify logging was called // Verify logging was called
const { openaiLogger } = await import('../utils/openaiLogger.js'); const { openaiLogger } = await import('../utils/openaiLogger.js');
@@ -949,7 +949,7 @@ describe('OpenAIContentGenerator', () => {
}; };
try { try {
await generator.generateContent(request); await generator.generateContent(request, 'test-prompt-id');
// Should not reach here // Should not reach here
expect(true).toBe(false); expect(true).toBe(false);
} catch (error) { } catch (error) {
@@ -969,7 +969,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4', model: 'gpt-4',
}; };
await expect(generator.generateContent(request)).rejects.toThrow( await expect(generator.generateContent(request, 'test-prompt-id')).rejects.toThrow(
/Troubleshooting tips.*Reduce input length.*Increase timeout.*Check network/s, /Troubleshooting tips.*Reduce input length.*Increase timeout.*Check network/s,
); );
}); });
@@ -985,7 +985,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4', model: 'gpt-4',
}; };
await expect(generator.generateContentStream(request)).rejects.toThrow( await expect(generator.generateContent(request, 'test-prompt-id')).rejects.toThrow(
'Streaming setup failed', 'Streaming setup failed',
); );
}); });
@@ -999,7 +999,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4', model: 'gpt-4',
}; };
await expect(generator.generateContentStream(request)).rejects.toThrow( await expect(generator.generateContentStream(request, 'test-prompt-id')).rejects.toThrow(
/Streaming setup timeout troubleshooting.*Reduce input length/s, /Streaming setup timeout troubleshooting.*Reduce input length/s,
); );
}); });
@@ -1042,7 +1042,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4', model: 'gpt-4',
}; };
const stream = await loggingGenerator.generateContentStream(request); const stream = await loggingGenerator.generateContentStream(request, 'test-prompt-id');
// Consume the stream and expect error // Consume the stream and expect error
await expect(async () => { await expect(async () => {
@@ -1113,7 +1113,7 @@ describe('OpenAIContentGenerator', () => {
}, },
}; };
await generator.generateContent(request); await generator.generateContent(request, 'test-prompt-id');
expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith( expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith(
expect.objectContaining({ expect.objectContaining({
@@ -1191,7 +1191,7 @@ describe('OpenAIContentGenerator', () => {
}, },
}; };
await generator.generateContent(request); await generator.generateContent(request, 'test-prompt-id');
expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith( expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith(
expect.objectContaining({ expect.objectContaining({
@@ -1296,7 +1296,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4', model: 'gpt-4',
}; };
await generator.generateContent(request); await generator.generateContent(request, 'test-prompt-id');
expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith( expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith(
expect.objectContaining({ expect.objectContaining({
@@ -1404,7 +1404,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4', model: 'gpt-4',
}; };
await generator.generateContent(request); await generator.generateContent(request, 'test-prompt-id');
expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith( expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith(
expect.objectContaining({ expect.objectContaining({
@@ -1460,7 +1460,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4', model: 'gpt-4',
}; };
await generator.generateContent(request); await generator.generateContent(request, 'test-prompt-id');
expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith( expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith(
expect.objectContaining({ expect.objectContaining({
@@ -1495,7 +1495,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4', model: 'gpt-4',
}; };
await expect(testGenerator.generateContent(request)).rejects.toThrow(); await expect(testGenerator.generateContent(request, 'test-prompt-id')).rejects.toThrow();
// Error logging should be suppressed // Error logging should be suppressed
expect(consoleSpy).not.toHaveBeenCalledWith( expect(consoleSpy).not.toHaveBeenCalledWith(
@@ -1519,7 +1519,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4', model: 'gpt-4',
}; };
await expect(generator.generateContent(request)).rejects.toThrow(); await expect(generator.generateContent(request, 'test-prompt-id')).rejects.toThrow();
// Error logging should occur by default // Error logging should occur by default
expect(consoleSpy).toHaveBeenCalledWith( expect(consoleSpy).toHaveBeenCalledWith(
@@ -1566,7 +1566,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4', model: 'gpt-4',
}; };
const result = await generator.generateContent(request); const result = await generator.generateContent(request, 'test-prompt-id');
// Should handle malformed JSON gracefully // Should handle malformed JSON gracefully
if ( if (
@@ -1643,7 +1643,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4', model: 'gpt-4',
}; };
const stream = await generator.generateContentStream(request); const stream = await generator.generateContentStream(request, 'test-prompt-id');
const responses = []; const responses = [];
for await (const response of stream) { for await (const response of stream) {
responses.push(response); responses.push(response);
@@ -1692,7 +1692,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4', model: 'gpt-4',
}; };
const result = await generator.generateContent(request); const result = await generator.generateContent(request, 'test-prompt-id');
expect(result.candidates).toHaveLength(1); expect(result.candidates).toHaveLength(1);
if ( if (
@@ -1733,7 +1733,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4', model: 'gpt-4',
}; };
const result = await generator.generateContent(request); const result = await generator.generateContent(request, 'test-prompt-id');
expect(result.usageMetadata).toEqual({ expect(result.usageMetadata).toEqual({
promptTokenCount: 70, // 70% of 100 promptTokenCount: 70, // 70% of 100
@@ -1772,7 +1772,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4', model: 'gpt-4',
}; };
const result = await generator.generateContent(request); const result = await generator.generateContent(request, 'test-prompt-id');
expect(result.usageMetadata).toEqual({ expect(result.usageMetadata).toEqual({
promptTokenCount: 50, promptTokenCount: 50,
@@ -1884,7 +1884,7 @@ describe('OpenAIContentGenerator', () => {
}, },
}; };
await loggingGenerator.generateContent(request); await loggingGenerator.generateContent(request, 'test-prompt-id');
// Verify that logging was called with properly converted request/response // Verify that logging was called with properly converted request/response
const { openaiLogger } = await import('../utils/openaiLogger.js'); const { openaiLogger } = await import('../utils/openaiLogger.js');
@@ -2039,7 +2039,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4', model: 'gpt-4',
}; };
const stream = await loggingGenerator.generateContentStream(request); const stream = await loggingGenerator.generateContentStream(request, 'test-prompt-id');
const responses = []; const responses = [];
for await (const response of stream) { for await (const response of stream) {
responses.push(response); responses.push(response);
@@ -2083,7 +2083,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4', model: 'gpt-4',
}; };
const stream = await generator.generateContentStream(request); const stream = await generator.generateContentStream(request, 'test-prompt-id');
const responses = []; const responses = [];
for await (const response of stream) { for await (const response of stream) {
responses.push(response); responses.push(response);
@@ -2166,7 +2166,7 @@ describe('OpenAIContentGenerator', () => {
}, },
}; };
await generator.generateContent(request); await generator.generateContent(request, 'test-prompt-id');
expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith( expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith(
expect.objectContaining({ expect.objectContaining({
@@ -2204,7 +2204,7 @@ describe('OpenAIContentGenerator', () => {
}, },
}; };
await generator.generateContent(request); await generator.generateContent(request, 'test-prompt-id');
expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith( expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith(
expect.objectContaining({ expect.objectContaining({
@@ -2260,7 +2260,7 @@ describe('OpenAIContentGenerator', () => {
}, },
}; };
await testGenerator.generateContent(request); await testGenerator.generateContent(request, 'test-prompt-id');
expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith( expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith(
expect.objectContaining({ expect.objectContaining({
@@ -2312,7 +2312,7 @@ describe('OpenAIContentGenerator', () => {
model: 'gpt-4', model: 'gpt-4',
}; };
await testGenerator.generateContent(request); await testGenerator.generateContent(request, 'test-prompt-id');
expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith( expect(mockOpenAIClient.chat.completions.create).toHaveBeenCalledWith(
expect.objectContaining({ expect.objectContaining({

View File

@@ -187,6 +187,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
async generateContent( async generateContent(
request: GenerateContentParameters, request: GenerateContentParameters,
userPromptId: string,
): Promise<GenerateContentResponse> { ): Promise<GenerateContentResponse> {
const startTime = Date.now(); const startTime = Date.now();
const messages = this.convertToOpenAIFormat(request); const messages = this.convertToOpenAIFormat(request);
@@ -204,6 +205,10 @@ export class OpenAIContentGenerator implements ContentGenerator {
model: this.model, model: this.model,
messages, messages,
...samplingParams, ...samplingParams,
metadata: {
sessionId: this.config.getSessionId?.(),
promptId: userPromptId,
},
}; };
if (request.config?.tools) { if (request.config?.tools) {
@@ -223,7 +228,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
const responseEvent = new ApiResponseEvent( const responseEvent = new ApiResponseEvent(
this.model, this.model,
durationMs, durationMs,
`openai-${Date.now()}`, // Generate a prompt ID userPromptId,
this.config.getContentGeneratorConfig()?.authType, this.config.getContentGeneratorConfig()?.authType,
response.usageMetadata, response.usageMetadata,
); );
@@ -277,7 +282,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
const errorEvent = new ApiResponseEvent( const errorEvent = new ApiResponseEvent(
this.model, this.model,
durationMs, durationMs,
`openai-${Date.now()}`, // Generate a prompt ID userPromptId,
this.config.getContentGeneratorConfig()?.authType, this.config.getContentGeneratorConfig()?.authType,
estimatedUsage, estimatedUsage,
undefined, undefined,
@@ -317,10 +322,11 @@ export class OpenAIContentGenerator implements ContentGenerator {
async generateContentStream( async generateContentStream(
request: GenerateContentParameters, request: GenerateContentParameters,
userPromptId: string,
): Promise<AsyncGenerator<GenerateContentResponse>> { ): Promise<AsyncGenerator<GenerateContentResponse>> {
const startTime = Date.now(); const startTime = Date.now();
const messages = this.convertToOpenAIFormat(request); const messages = this.convertToOpenAIFormat(request);
try { try {
// Build sampling parameters with clear priority // Build sampling parameters with clear priority
const samplingParams = this.buildSamplingParameters(request); const samplingParams = this.buildSamplingParameters(request);
@@ -333,6 +339,10 @@ export class OpenAIContentGenerator implements ContentGenerator {
...samplingParams, ...samplingParams,
stream: true, stream: true,
stream_options: { include_usage: true }, stream_options: { include_usage: true },
metadata: {
sessionId: this.config.getSessionId?.(),
promptId: userPromptId,
},
}; };
if (request.config?.tools) { if (request.config?.tools) {
@@ -372,7 +382,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
const responseEvent = new ApiResponseEvent( const responseEvent = new ApiResponseEvent(
this.model, this.model,
durationMs, durationMs,
`openai-stream-${Date.now()}`, // Generate a prompt ID userPromptId,
this.config.getContentGeneratorConfig()?.authType, this.config.getContentGeneratorConfig()?.authType,
finalUsageMetadata, finalUsageMetadata,
); );
@@ -428,7 +438,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
const errorEvent = new ApiResponseEvent( const errorEvent = new ApiResponseEvent(
this.model, this.model,
durationMs, durationMs,
`openai-stream-${Date.now()}`, // Generate a prompt ID userPromptId,
this.config.getContentGeneratorConfig()?.authType, this.config.getContentGeneratorConfig()?.authType,
estimatedUsage, estimatedUsage,
undefined, undefined,
@@ -501,7 +511,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
const errorEvent = new ApiResponseEvent( const errorEvent = new ApiResponseEvent(
this.model, this.model,
durationMs, durationMs,
`openai-stream-${Date.now()}`, // Generate a prompt ID userPromptId,
this.config.getContentGeneratorConfig()?.authType, this.config.getContentGeneratorConfig()?.authType,
estimatedUsage, estimatedUsage,
undefined, undefined,

View File

@@ -154,7 +154,7 @@ describe('QwenContentGenerator', () => {
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }], contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
}; };
const result = await qwenContentGenerator.generateContent(request); const result = await qwenContentGenerator.generateContent(request, 'test-prompt-id');
expect(result.text).toBe('Generated content'); expect(result.text).toBe('Generated content');
expect(mockQwenClient.getAccessToken).toHaveBeenCalled(); expect(mockQwenClient.getAccessToken).toHaveBeenCalled();
@@ -171,7 +171,7 @@ describe('QwenContentGenerator', () => {
contents: [{ role: 'user', parts: [{ text: 'Hello stream' }] }], contents: [{ role: 'user', parts: [{ text: 'Hello stream' }] }],
}; };
const stream = await qwenContentGenerator.generateContentStream(request); const stream = await qwenContentGenerator.generateContentStream(request, 'test-prompt-id');
const chunks: string[] = []; const chunks: string[] = [];
for await (const chunk of stream) { for await (const chunk of stream) {
@@ -238,7 +238,7 @@ describe('QwenContentGenerator', () => {
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }], contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
}; };
const result = await qwenContentGenerator.generateContent(request); const result = await qwenContentGenerator.generateContent(request, 'test-prompt-id');
expect(result.text).toBe('Generated content'); expect(result.text).toBe('Generated content');
expect(mockQwenClient.refreshAccessToken).toHaveBeenCalled(); expect(mockQwenClient.refreshAccessToken).toHaveBeenCalled();
@@ -258,7 +258,7 @@ describe('QwenContentGenerator', () => {
}; };
await expect( await expect(
qwenContentGenerator.generateContent(request), qwenContentGenerator.generateContent(request, 'test-prompt-id'),
).rejects.toThrow( ).rejects.toThrow(
'Failed to obtain valid Qwen access token. Please re-authenticate.', 'Failed to obtain valid Qwen access token. Please re-authenticate.',
); );
@@ -278,7 +278,7 @@ describe('QwenContentGenerator', () => {
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }], contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
}; };
await qwenContentGenerator.generateContent(request); await qwenContentGenerator.generateContent(request, 'test-prompt-id');
expect(mockQwenClient.getCredentials).toHaveBeenCalled(); expect(mockQwenClient.getCredentials).toHaveBeenCalled();
}); });
@@ -315,7 +315,7 @@ describe('QwenContentGenerator', () => {
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }], contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
}; };
await qwenContentGenerator.generateContent(request); await qwenContentGenerator.generateContent(request, 'test-prompt-id');
// Should use default endpoint with /v1 suffix // Should use default endpoint with /v1 suffix
expect(capturedBaseURL).toBe( expect(capturedBaseURL).toBe(
@@ -355,7 +355,7 @@ describe('QwenContentGenerator', () => {
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }], contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
}; };
await qwenContentGenerator.generateContent(request); await qwenContentGenerator.generateContent(request, 'test-prompt-id');
// Should add https:// and /v1 // Should add https:// and /v1
expect(capturedBaseURL).toBe('https://custom-endpoint.com/v1'); expect(capturedBaseURL).toBe('https://custom-endpoint.com/v1');
@@ -393,7 +393,7 @@ describe('QwenContentGenerator', () => {
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }], contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
}; };
await qwenContentGenerator.generateContent(request); await qwenContentGenerator.generateContent(request, 'test-prompt-id');
// Should preserve https:// and add /v1 // Should preserve https:// and add /v1
expect(capturedBaseURL).toBe('https://custom-endpoint.com/v1'); expect(capturedBaseURL).toBe('https://custom-endpoint.com/v1');
@@ -431,7 +431,7 @@ describe('QwenContentGenerator', () => {
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }], contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
}; };
await qwenContentGenerator.generateContent(request); await qwenContentGenerator.generateContent(request, 'test-prompt-id');
// Should not duplicate /v1 // Should not duplicate /v1
expect(capturedBaseURL).toBe('https://custom-endpoint.com/v1'); expect(capturedBaseURL).toBe('https://custom-endpoint.com/v1');
@@ -464,7 +464,7 @@ describe('QwenContentGenerator', () => {
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }], contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
}; };
await qwenContentGenerator.generateContent(request); await qwenContentGenerator.generateContent(request, 'test-prompt-id');
// Should restore original values after operation // Should restore original values after operation
expect(client.apiKey).toBe(originalApiKey); expect(client.apiKey).toBe(originalApiKey);
@@ -499,7 +499,7 @@ describe('QwenContentGenerator', () => {
}; };
try { try {
await qwenContentGenerator.generateContent(request); await qwenContentGenerator.generateContent(request, 'test-prompt-id');
} catch (error) { } catch (error) {
expect(error).toBe(mockError); expect(error).toBe(mockError);
} }
@@ -545,7 +545,7 @@ describe('QwenContentGenerator', () => {
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }], contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
}; };
const result = await qwenContentGenerator.generateContent(request); const result = await qwenContentGenerator.generateContent(request, 'test-prompt-id');
expect(result.text).toBe('Success after retry'); expect(result.text).toBe('Success after retry');
expect(mockGenerateContent).toHaveBeenCalledTimes(2); expect(mockGenerateContent).toHaveBeenCalledTimes(2);
@@ -576,7 +576,7 @@ describe('QwenContentGenerator', () => {
}; };
await expect( await expect(
qwenContentGenerator.generateContent(request), qwenContentGenerator.generateContent(request, 'test-prompt-id'),
).rejects.toThrow('Network timeout'); ).rejects.toThrow('Network timeout');
expect(mockGenerateContent).toHaveBeenCalledTimes(1); expect(mockGenerateContent).toHaveBeenCalledTimes(1);
expect(mockQwenClient.refreshAccessToken).not.toHaveBeenCalled(); expect(mockQwenClient.refreshAccessToken).not.toHaveBeenCalled();
@@ -600,7 +600,7 @@ describe('QwenContentGenerator', () => {
}; };
await expect( await expect(
qwenContentGenerator.generateContent(request), qwenContentGenerator.generateContent(request, 'test-prompt-id'),
).rejects.toThrow('Failed to obtain valid Qwen access token'); ).rejects.toThrow('Failed to obtain valid Qwen access token');
}); });
}); });
@@ -691,9 +691,9 @@ describe('QwenContentGenerator', () => {
// Make multiple concurrent requests - should all use the same refresh promise // Make multiple concurrent requests - should all use the same refresh promise
const promises = [ const promises = [
qwenContentGenerator.generateContent(request), qwenContentGenerator.generateContent(request, 'test-prompt-id'),
qwenContentGenerator.generateContent(request), qwenContentGenerator.generateContent(request, 'test-prompt-id'),
qwenContentGenerator.generateContent(request), qwenContentGenerator.generateContent(request, 'test-prompt-id'),
]; ];
const results = await Promise.all(promises); const results = await Promise.all(promises);
@@ -795,7 +795,7 @@ describe('QwenContentGenerator', () => {
contents: [{ role: 'user', parts: [{ text: 'Test message' }] }], contents: [{ role: 'user', parts: [{ text: 'Test message' }] }],
}; };
const result = await qwenContentGenerator.generateContent(request); const result = await qwenContentGenerator.generateContent(request, 'test-prompt-id');
expect(result.text).toBe('Success after refresh'); expect(result.text).toBe('Success after refresh');
expect(mockQwenClient.getAccessToken).toHaveBeenCalled(); expect(mockQwenClient.getAccessToken).toHaveBeenCalled();

View File

@@ -78,6 +78,7 @@ export class QwenContentGenerator extends OpenAIContentGenerator {
*/ */
async generateContent( async generateContent(
request: GenerateContentParameters, request: GenerateContentParameters,
userPromptId: string,
): Promise<GenerateContentResponse> { ): Promise<GenerateContentResponse> {
return this.withValidToken(async (token) => { return this.withValidToken(async (token) => {
// Temporarily update the API key and base URL // Temporarily update the API key and base URL
@@ -87,7 +88,7 @@ export class QwenContentGenerator extends OpenAIContentGenerator {
this.client.baseURL = this.getCurrentEndpoint(); this.client.baseURL = this.getCurrentEndpoint();
try { try {
return await super.generateContent(request); return await super.generateContent(request, userPromptId);
} finally { } finally {
// Restore original values // Restore original values
this.client.apiKey = originalApiKey; this.client.apiKey = originalApiKey;
@@ -101,6 +102,7 @@ export class QwenContentGenerator extends OpenAIContentGenerator {
*/ */
async generateContentStream( async generateContentStream(
request: GenerateContentParameters, request: GenerateContentParameters,
userPromptId: string,
): Promise<AsyncGenerator<GenerateContentResponse>> { ): Promise<AsyncGenerator<GenerateContentResponse>> {
return this.withValidTokenForStream(async (token) => { return this.withValidTokenForStream(async (token) => {
// Update the API key and base URL before streaming // Update the API key and base URL before streaming
@@ -110,7 +112,7 @@ export class QwenContentGenerator extends OpenAIContentGenerator {
this.client.baseURL = this.getCurrentEndpoint(); this.client.baseURL = this.getCurrentEndpoint();
try { try {
return await super.generateContentStream(request); return await super.generateContentStream(request, userPromptId);
} catch (error) { } catch (error) {
// Restore original values on error // Restore original values on error
this.client.apiKey = originalApiKey; this.client.apiKey = originalApiKey;