feat: implement usage stats logging with telemetry refactoring

This commit is contained in:
tanzhenxin
2025-08-12 12:16:38 +08:00
parent c96852dc56
commit 807844fb57
12 changed files with 203 additions and 242 deletions

View File

@@ -15,6 +15,7 @@ vi.mock('openai');
// Mock logger modules
vi.mock('../../telemetry/loggers.js', () => ({
logApiResponse: vi.fn(),
logApiError: vi.fn(),
}));
vi.mock('../../utils/openaiLogger.js', () => ({
@@ -290,28 +291,18 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
});
describe('token estimation on timeout', () => {
it('should estimate tokens even when request times out', async () => {
it('should surface a clear timeout error when request times out', async () => {
const timeoutError = new Error('Request timeout');
mockOpenAIClient.chat.completions.create.mockRejectedValue(timeoutError);
// Mock countTokens to return a value
const mockCountTokens = vi.spyOn(generator, 'countTokens');
mockCountTokens.mockResolvedValue({ totalTokens: 100 });
const request = {
contents: [{ role: 'user' as const, parts: [{ text: 'Hello world' }] }],
model: 'gpt-4',
};
try {
await generator.generateContent(request, 'test-prompt-id');
} catch (_error) {
// Verify that countTokens was called for estimation
expect(mockCountTokens).toHaveBeenCalledWith({
contents: request.contents,
model: 'gpt-4',
});
}
await expect(
generator.generateContent(request, 'test-prompt-id'),
).rejects.toThrow(/Request timeout after \d+s/);
});
it('should fall back to character-based estimation if countTokens fails', async () => {

View File

@@ -158,14 +158,23 @@ export class GeminiChat {
prompt_id: string,
usageMetadata?: GenerateContentResponseUsageMetadata,
responseText?: string,
responseId?: string,
): Promise<void> {
const authType = this.config.getContentGeneratorConfig()?.authType;
// Don't log API responses for openaiContentGenerator
if (authType === AuthType.QWEN_OAUTH || authType === AuthType.USE_OPENAI) {
return;
}
logApiResponse(
this.config,
new ApiResponseEvent(
responseId || `gemini-${Date.now()}`,
this.config.getModel(),
durationMs,
prompt_id,
this.config.getContentGeneratorConfig()?.authType,
authType,
usageMetadata,
responseText,
),
@@ -176,18 +185,27 @@ export class GeminiChat {
durationMs: number,
error: unknown,
prompt_id: string,
responseId?: string,
): void {
const errorMessage = error instanceof Error ? error.message : String(error);
const errorType = error instanceof Error ? error.name : 'unknown';
const authType = this.config.getContentGeneratorConfig()?.authType;
// Don't log API errors for openaiContentGenerator
if (authType === AuthType.QWEN_OAUTH || authType === AuthType.USE_OPENAI) {
return;
}
logApiError(
this.config,
new ApiErrorEvent(
responseId,
this.config.getModel(),
errorMessage,
durationMs,
prompt_id,
this.config.getContentGeneratorConfig()?.authType,
authType,
errorType,
),
);
@@ -320,6 +338,7 @@ export class GeminiChat {
prompt_id,
response.usageMetadata,
JSON.stringify(response),
response.responseId,
);
this.sendPromise = (async () => {
@@ -563,6 +582,7 @@ export class GeminiChat {
prompt_id,
this.getFinalUsageMetadata(chunks),
JSON.stringify(chunks),
chunks[chunks.length - 1]?.responseId,
);
}
this.recordHistory(inputContent, outputContent);

View File

@@ -23,6 +23,7 @@ vi.mock('openai');
// Mock logger modules
vi.mock('../telemetry/loggers.js', () => ({
logApiResponse: vi.fn(),
logApiError: vi.fn(),
}));
vi.mock('../utils/openaiLogger.js', () => ({

View File

@@ -22,8 +22,8 @@ import {
} from '@google/genai';
import { ContentGenerator } from './contentGenerator.js';
import OpenAI from 'openai';
import { logApiResponse } from '../telemetry/loggers.js';
import { ApiResponseEvent } from '../telemetry/types.js';
import { logApiError, logApiResponse } from '../telemetry/loggers.js';
import { ApiErrorEvent, ApiResponseEvent } from '../telemetry/types.js';
import { Config } from '../config/config.js';
import { openaiLogger } from '../utils/openaiLogger.js';
@@ -226,6 +226,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
// Log API response event for UI telemetry
const responseEvent = new ApiResponseEvent(
response.responseId || 'unknown',
this.model,
durationMs,
userPromptId,
@@ -254,41 +255,21 @@ export class OpenAIContentGenerator implements ContentGenerator {
? error.message
: String(error);
// Estimate token usage even when there's an error
// This helps track costs and usage even for failed requests
let estimatedUsage;
try {
const tokenCountResult = await this.countTokens({
contents: request.contents,
model: this.model,
});
estimatedUsage = {
promptTokenCount: tokenCountResult.totalTokens,
candidatesTokenCount: 0, // No completion tokens since request failed
totalTokenCount: tokenCountResult.totalTokens,
};
} catch {
// If token counting also fails, provide a minimal estimate
const contentStr = JSON.stringify(request.contents);
const estimatedTokens = Math.ceil(contentStr.length / 4);
estimatedUsage = {
promptTokenCount: estimatedTokens,
candidatesTokenCount: 0,
totalTokenCount: estimatedTokens,
};
}
// Log API error event for UI telemetry with estimated usage
const errorEvent = new ApiResponseEvent(
// Log API error event for UI telemetry
const errorEvent = new ApiErrorEvent(
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(error as any).requestID || 'unknown',
this.model,
errorMessage,
durationMs,
userPromptId,
this.config.getContentGeneratorConfig()?.authType,
estimatedUsage,
undefined,
errorMessage,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(error as any).type,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(error as any).code,
);
logApiResponse(this.config, errorEvent);
logApiError(this.config, errorEvent);
// Log error interaction if enabled
if (this.config.getContentGeneratorConfig()?.enableOpenAILogging) {
@@ -380,6 +361,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
// Log API response event for UI telemetry
const responseEvent = new ApiResponseEvent(
responses[responses.length - 1]?.responseId || 'unknown',
this.model,
durationMs,
userPromptId,
@@ -411,40 +393,21 @@ export class OpenAIContentGenerator implements ContentGenerator {
? error.message
: String(error);
// Estimate token usage even when there's an error in streaming
let estimatedUsage;
try {
const tokenCountResult = await this.countTokens({
contents: request.contents,
model: this.model,
});
estimatedUsage = {
promptTokenCount: tokenCountResult.totalTokens,
candidatesTokenCount: 0, // No completion tokens since request failed
totalTokenCount: tokenCountResult.totalTokens,
};
} catch {
// If token counting also fails, provide a minimal estimate
const contentStr = JSON.stringify(request.contents);
const estimatedTokens = Math.ceil(contentStr.length / 4);
estimatedUsage = {
promptTokenCount: estimatedTokens,
candidatesTokenCount: 0,
totalTokenCount: estimatedTokens,
};
}
// Log API error event for UI telemetry with estimated usage
const errorEvent = new ApiResponseEvent(
// Log API error event for UI telemetry
const errorEvent = new ApiErrorEvent(
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(error as any).requestID || 'unknown',
this.model,
errorMessage,
durationMs,
userPromptId,
this.config.getContentGeneratorConfig()?.authType,
estimatedUsage,
undefined,
errorMessage,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(error as any).type,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(error as any).code,
);
logApiResponse(this.config, errorEvent);
logApiError(this.config, errorEvent);
// Log error interaction if enabled
if (this.config.getContentGeneratorConfig()?.enableOpenAILogging) {
@@ -484,40 +447,21 @@ export class OpenAIContentGenerator implements ContentGenerator {
? error.message
: String(error);
// Estimate token usage even when there's an error in streaming setup
let estimatedUsage;
try {
const tokenCountResult = await this.countTokens({
contents: request.contents,
model: this.model,
});
estimatedUsage = {
promptTokenCount: tokenCountResult.totalTokens,
candidatesTokenCount: 0, // No completion tokens since request failed
totalTokenCount: tokenCountResult.totalTokens,
};
} catch {
// If token counting also fails, provide a minimal estimate
const contentStr = JSON.stringify(request.contents);
const estimatedTokens = Math.ceil(contentStr.length / 4);
estimatedUsage = {
promptTokenCount: estimatedTokens,
candidatesTokenCount: 0,
totalTokenCount: estimatedTokens,
};
}
// Log API error event for UI telemetry with estimated usage
const errorEvent = new ApiResponseEvent(
// Log API error event for UI telemetry
const errorEvent = new ApiErrorEvent(
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(error as any).requestID || 'unknown',
this.model,
errorMessage,
durationMs,
userPromptId,
this.config.getContentGeneratorConfig()?.authType,
estimatedUsage,
undefined,
errorMessage,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(error as any).type,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(error as any).code,
);
logApiResponse(this.config, errorEvent);
logApiError(this.config, errorEvent);
// Allow subclasses to suppress error logging for specific scenarios
if (!this.shouldSuppressErrorLogging(error, request)) {

View File

@@ -19,6 +19,8 @@ import { LoopDetectionService } from './loopDetectionService.js';
vi.mock('../telemetry/loggers.js', () => ({
logLoopDetected: vi.fn(),
logApiError: vi.fn(),
logApiResponse: vi.fn(),
}));
const TOOL_CALL_LOOP_THRESHOLD = 5;

View File

@@ -48,6 +48,8 @@ describe('Circular Reference Integration Test', () => {
const problematicEvent: RumEvent = {
timestamp: Date.now(),
event_type: 'exception',
type: 'error',
name: 'api_error',
error: new Error('Network error'),
function_args: {
filePath: '/test/file.txt',

View File

@@ -212,6 +212,7 @@ describe('loggers', () => {
toolUsePromptTokenCount: 2,
};
const event = new ApiResponseEvent(
'test-response-id',
'test-model',
100,
'prompt-id-1',
@@ -229,6 +230,7 @@ describe('loggers', () => {
'event.name': EVENT_API_RESPONSE,
'event.timestamp': '2025-01-01T00:00:00.000Z',
[SemanticAttributes.HTTP_STATUS_CODE]: 200,
response_id: 'test-response-id',
model: 'test-model',
status_code: 200,
duration_ms: 100,
@@ -275,6 +277,7 @@ describe('loggers', () => {
toolUsePromptTokenCount: 2,
};
const event = new ApiResponseEvent(
'test-response-id-2',
'test-model',
100,
'prompt-id-1',

View File

@@ -22,32 +22,28 @@ export interface RumView {
export interface RumEvent {
timestamp?: number;
event_type?: 'view' | 'action' | 'exception' | 'resource';
type: string; // Event type
name: string; // Event name
snapshots?: string; // JSON string of event snapshots
properties?: Record<string, unknown>;
// [key: string]: unknown;
}
export interface RumViewEvent extends RumEvent {
type?: string; // View event type: pv, perf
name?: string; // View event name
view_type?: string; // View rendering type
time_spent?: number; // Time spent on current view in ms
snapshots?: string; // View snapshots JSON string, mainly for native apps
}
export interface RumActionEvent extends RumEvent {
type?: string; // User action type
name?: string; // Semantic name, e.g.: click#checkout
target_name?: string; // Element user interacted with (for auto-collected actions only)
duration?: number; // Action duration in ms
snapshots?: string; // Action snapshots
method_info?: string; // Action callback, e.g.: onClick()
}
export interface RumExceptionEvent extends RumEvent {
source?: string; // Error source, e.g.: console, event
file?: string; // Error file
type?: string; // Error type: crash, custom, error
subtype?: string; // Secondary classification of error type
name?: string; // Error name
message?: string; // Concise, readable message explaining the event
stack?: string; // Stack trace or supplemental information about the error
caused_by?: string; // Exception cause
@@ -55,16 +51,13 @@ export interface RumExceptionEvent extends RumEvent {
column?: number; // Column number where exception occurred
thread_id?: string; // Thread ID
binary_images?: string; // Error source
snapshots?: string; // Error snapshots
}
export interface RumResourceEvent extends RumEvent {
type?: string; // Resource type: css, javascript, media, XHR, image, navigation (XHR/fetch will be considered as API)
method?: string; // HTTP request method: POST, GET, etc.
status_code?: string; // Resource status code
message?: string; // Error message content, corresponds to resource.error_msg
url?: string; // Resource URL
name?: string; // Default is URL path part, can be matched by rules or user configuration
provider_type?: string; // Resource provider type: first-party, cdn, ad, analytics
trace_id?: string; // Resource request TraceID
success?: number; // Resource loading success: 1 (default) success, 0 failure
@@ -78,7 +71,6 @@ export interface RumResourceEvent extends RumEvent {
download_duration?: number; // Time spent downloading response in ms (responseEnd - responseStart)
timing_data?: string; // JSON string of PerformanceResourceTiming
trace_data?: string; // Trace information snapshot JSON string
snapshots?: string; // View snapshots, mainly for native apps
}
export interface RumPayload {

View File

@@ -60,11 +60,15 @@ export class QwenLogger {
private sessionId: string;
private viewId: string;
private isFlushInProgress: boolean = false;
private isShutdown: boolean = false;
private constructor(config?: Config) {
this.config = config;
this.userId = this.generateUserId();
this.sessionId = this.config?.getSessionId() ?? '';
this.sessionId =
typeof this.config?.getSessionId === 'function'
? this.config.getSessionId()
: '';
this.viewId = randomUUID();
}
@@ -79,6 +83,9 @@ export class QwenLogger {
if (!QwenLogger.instance) {
QwenLogger.instance = new QwenLogger(config);
}
process.on('exit', QwenLogger.instance.shutdown.bind(QwenLogger.instance));
return QwenLogger.instance;
}
@@ -88,29 +95,49 @@ export class QwenLogger {
createRumEvent(
eventType: 'view' | 'action' | 'exception' | 'resource',
properties: RumEvent,
type: string,
name: string,
properties: Partial<RumEvent>,
): RumEvent {
return {
timestamp: Date.now(),
event_type: eventType,
...properties,
type,
name,
...(properties || {}),
};
}
createViewEvent(properties: RumViewEvent): RumEvent {
return this.createRumEvent('view', properties);
createViewEvent(
type: string,
name: string,
properties: Partial<RumViewEvent>,
): RumEvent {
return this.createRumEvent('view', type, name, properties);
}
createActionEvent(properties: RumActionEvent): RumEvent {
return this.createRumEvent('action', properties);
createActionEvent(
type: string,
name: string,
properties: Partial<RumActionEvent>,
): RumEvent {
return this.createRumEvent('action', type, name, properties);
}
createResourceEvent(properties: RumResourceEvent): RumEvent {
return this.createRumEvent('resource', properties);
createResourceEvent(
type: string,
name: string,
properties: Partial<RumResourceEvent>,
): RumEvent {
return this.createRumEvent('resource', type, name, properties);
}
createExceptionEvent(properties: RumExceptionEvent): RumEvent {
return this.createRumEvent('exception', properties);
createExceptionEvent(
type: string,
name: string,
properties: Partial<RumExceptionEvent>,
): RumEvent {
return this.createRumEvent('exception', type, name, properties);
}
createRumPayload(): RumPayload {
@@ -230,51 +257,12 @@ export class QwenLogger {
}
}
// Visible for testing. Decodes protobuf-encoded response from Qwen server.
decodeLogResponse(buf: Buffer): LogResponse | undefined {
// TODO(obrienowen): return specific errors to facilitate debugging.
if (buf.length < 1) {
return undefined;
}
// The first byte of the buffer is `field<<3 | type`. We're looking for field
// 1, with type varint, represented by type=0. If the first byte isn't 8, that
// means field 1 is missing or the message is corrupted. Either way, we return
// undefined.
if (buf.readUInt8(0) !== 8) {
return undefined;
}
let ms = BigInt(0);
let cont = true;
// In each byte, the most significant bit is the continuation bit. If it's
// set, we keep going. The lowest 7 bits, are data bits. They are concatenated
// in reverse order to form the final number.
for (let i = 1; cont && i < buf.length; i++) {
const byte = buf.readUInt8(i);
ms |= BigInt(byte & 0x7f) << BigInt(7 * (i - 1));
cont = (byte & 0x80) !== 0;
}
if (cont) {
// We have fallen off the buffer without seeing a terminating byte. The
// message is corrupted.
return undefined;
}
const returnVal = {
nextRequestWaitMs: Number(ms),
};
return returnVal;
}
logStartSessionEvent(event: StartSessionEvent): void {
const applicationEvent = this.createViewEvent({
type: 'session',
name: 'session_start',
snapshots: JSON.stringify({
const applicationEvent = this.createViewEvent('session', 'session_start', {
properties: {
model: event.model,
},
snapshots: JSON.stringify({
embedding_model: event.embedding_model,
sandbox_enabled: event.sandbox_enabled,
core_tools_enabled: event.core_tools_enabled,
@@ -286,8 +274,6 @@ export class QwenLogger {
telemetry_enabled: event.telemetry_enabled,
telemetry_log_user_prompts_enabled:
event.telemetry_log_user_prompts_enabled,
file_filtering_respect_git_ignore:
event.file_filtering_respect_git_ignore,
}),
});
@@ -299,13 +285,13 @@ export class QwenLogger {
}
logNewPromptEvent(event: UserPromptEvent): void {
const rumEvent = this.createActionEvent({
type: 'user_prompt',
name: 'user_prompt',
const rumEvent = this.createActionEvent('user_prompt', 'user_prompt', {
properties: {
auth_type: event.auth_type,
prompt_id: event.prompt_id,
},
snapshots: JSON.stringify({
prompt_length: event.prompt_length,
prompt_id: event.prompt_id,
auth_type: event.auth_type,
}),
});
@@ -314,55 +300,62 @@ export class QwenLogger {
}
logToolCallEvent(event: ToolCallEvent): void {
const rumEvent = this.createActionEvent({
type: 'tool_call',
name: `tool_call#${event.function_name}`,
snapshots: JSON.stringify({
function_name: event.function_name,
prompt_id: event.prompt_id,
decision: event.decision,
success: event.success,
duration_ms: event.duration_ms,
error: event.error,
error_type: event.error_type,
}),
});
const rumEvent = this.createActionEvent(
'tool_call',
`tool_call#${event.function_name}`,
{
properties: {
prompt_id: event.prompt_id,
},
snapshots: JSON.stringify({
function_name: event.function_name,
decision: event.decision,
success: event.success,
duration_ms: event.duration_ms,
error: event.error,
error_type: event.error_type,
}),
},
);
this.enqueueLogEvent(rumEvent);
this.flushIfNeeded();
}
logApiRequestEvent(event: ApiRequestEvent): void {
const rumEvent = this.createResourceEvent({
type: 'api',
name: 'api_request',
snapshots: JSON.stringify({
model: event.model,
prompt_id: event.prompt_id,
}),
});
// ignore for now
console.log('logApiRequestEvent', event);
return;
this.enqueueLogEvent(rumEvent);
this.flushIfNeeded();
// const rumEvent = this.createResourceEvent('api', 'api_request', {
// properties: {
// model: event.model,
// prompt_id: event.prompt_id,
// },
// });
// this.enqueueLogEvent(rumEvent);
// this.flushIfNeeded();
}
logApiResponseEvent(event: ApiResponseEvent): void {
const rumEvent = this.createResourceEvent({
type: 'api',
name: 'api_response',
const rumEvent = this.createResourceEvent('api', 'api_response', {
status_code: event.status_code?.toString() ?? '',
duration: event.duration_ms,
success: event.status_code === 200 ? 1 : 0,
success: 1,
message: event.error,
snapshots: JSON.stringify({
trace_id: event.response_id,
properties: {
auth_type: event.auth_type,
model: event.model,
prompt_id: event.prompt_id,
},
snapshots: JSON.stringify({
input_token_count: event.input_token_count,
output_token_count: event.output_token_count,
cached_content_token_count: event.cached_content_token_count,
thoughts_token_count: event.thoughts_token_count,
tool_token_count: event.tool_token_count,
auth_type: event.auth_type,
}),
});
@@ -371,17 +364,19 @@ export class QwenLogger {
}
logApiErrorEvent(event: ApiErrorEvent): void {
const rumEvent = this.createExceptionEvent({
type: 'error',
subtype: 'api_error',
const rumEvent = this.createResourceEvent('api', 'api_error', {
status_code: event.status_code?.toString() ?? '',
duration: event.duration_ms,
success: 0,
message: event.error,
snapshots: JSON.stringify({
trace_id: event.response_id,
properties: {
auth_type: event.auth_type,
model: event.model,
prompt_id: event.prompt_id,
},
snapshots: JSON.stringify({
error_type: event.error_type,
status_code: event.status_code,
duration_ms: event.duration_ms,
auth_type: event.auth_type,
}),
});
@@ -390,12 +385,10 @@ export class QwenLogger {
}
logFlashFallbackEvent(event: FlashFallbackEvent): void {
const rumEvent = this.createActionEvent({
type: 'fallback',
name: 'flash_fallback',
snapshots: JSON.stringify({
const rumEvent = this.createActionEvent('fallback', 'flash_fallback', {
properties: {
auth_type: event.auth_type,
}),
},
});
this.enqueueLogEvent(rumEvent);
@@ -403,11 +396,12 @@ export class QwenLogger {
}
logLoopDetectedEvent(event: LoopDetectedEvent): void {
const rumEvent = this.createExceptionEvent({
type: 'error',
const rumEvent = this.createExceptionEvent('error', 'loop_detected', {
subtype: 'loop_detected',
snapshots: JSON.stringify({
properties: {
prompt_id: event.prompt_id,
},
snapshots: JSON.stringify({
loop_type: event.loop_type,
}),
});
@@ -417,11 +411,11 @@ export class QwenLogger {
}
logNextSpeakerCheck(event: NextSpeakerCheckEvent): void {
const rumEvent = this.createActionEvent({
type: 'check',
name: 'next_speaker_check',
snapshots: JSON.stringify({
const rumEvent = this.createActionEvent('check', 'next_speaker_check', {
properties: {
prompt_id: event.prompt_id,
},
snapshots: JSON.stringify({
finish_reason: event.finish_reason,
result: event.result,
}),
@@ -432,9 +426,7 @@ export class QwenLogger {
}
logSlashCommandEvent(event: SlashCommandEvent): void {
const rumEvent = this.createActionEvent({
type: 'command',
name: 'slash_command',
const rumEvent = this.createActionEvent('command', 'slash_command', {
snapshots: JSON.stringify({
command: event.command,
subcommand: event.subcommand,
@@ -446,23 +438,23 @@ export class QwenLogger {
}
logMalformedJsonResponseEvent(event: MalformedJsonResponseEvent): void {
const rumEvent = this.createExceptionEvent({
type: 'error',
subtype: 'malformed_json_response',
snapshots: JSON.stringify({
model: event.model,
}),
});
const rumEvent = this.createExceptionEvent(
'error',
'malformed_json_response',
{
subtype: 'malformed_json_response',
properties: {
model: event.model,
},
},
);
this.enqueueLogEvent(rumEvent);
this.flushIfNeeded();
}
logEndSessionEvent(_event: EndSessionEvent): void {
const applicationEvent = this.createViewEvent({
type: 'session',
name: 'session_end',
});
const applicationEvent = this.createViewEvent('session', 'session_end', {});
// Flush immediately on session end.
this.enqueueLogEvent(applicationEvent);
@@ -484,6 +476,9 @@ export class QwenLogger {
}
shutdown() {
if (this.isShutdown) return;
this.isShutdown = true;
const event = new EndSessionEvent(this.config);
this.logEndSessionEvent(event);
}

View File

@@ -141,7 +141,6 @@ export async function shutdownTelemetry(): Promise<void> {
return;
}
try {
QwenLogger.getInstance()?.shutdown();
await sdk.shutdown();
console.log('OpenTelemetry SDK shut down successfully.');
} catch (error) {

View File

@@ -161,6 +161,7 @@ export class ApiRequestEvent {
export class ApiErrorEvent {
'event.name': 'api_error';
'event.timestamp': string; // ISO 8601
response_id?: string;
model: string;
error: string;
error_type?: string;
@@ -170,6 +171,7 @@ export class ApiErrorEvent {
auth_type?: string;
constructor(
response_id: string | undefined,
model: string,
error: string,
duration_ms: number,
@@ -180,6 +182,7 @@ export class ApiErrorEvent {
) {
this['event.name'] = 'api_error';
this['event.timestamp'] = new Date().toISOString();
this.response_id = response_id;
this.model = model;
this.error = error;
this.error_type = error_type;
@@ -193,6 +196,7 @@ export class ApiErrorEvent {
export class ApiResponseEvent {
'event.name': 'api_response';
'event.timestamp': string; // ISO 8601
response_id: string;
model: string;
status_code?: number | string;
duration_ms: number;
@@ -208,6 +212,7 @@ export class ApiResponseEvent {
auth_type?: string;
constructor(
response_id: string,
model: string,
duration_ms: number,
prompt_id: string,
@@ -218,6 +223,7 @@ export class ApiResponseEvent {
) {
this['event.name'] = 'api_response';
this['event.timestamp'] = new Date().toISOString();
this.response_id = response_id;
this.model = model;
this.duration_ms = duration_ms;
this.status_code = 200;

View File

@@ -8,3 +8,9 @@ import { setSimulate429 } from './src/utils/testUtils.js';
// Disable 429 simulation globally for all tests
setSimulate429(false);
// Some dependencies (e.g., undici) expect a global File constructor in Node.
// Provide a minimal shim for test environment if missing.
if (typeof (globalThis as unknown as { File?: unknown }).File === 'undefined') {
(globalThis as unknown as { File: unknown }).File = class {} as unknown;
}