Handle telemetry in non-interactive mode (#1002)

Changes:
- Ensure proper shutdown in non-interactive mode
- Ensures the initial user prompt is logged in non-interactive mode
- Improve telemetry for streaming - handle chunks and input token count is now alongside other token counts in response

To test:
- Follow instructions in https://github.com/google-gemini/gemini-cli/blob/main/docs/core/telemetry.md#google-cloud
- Run CLI in non-interactive mode and observe logs/metrics in GCP Logs Explorer and Metrics Explorer

#750
This commit is contained in:
Jerop Kipruto
2025-06-13 03:44:17 -04:00
committed by GitHub
parent 8bb6eca915
commit b20c8389f3
10 changed files with 126 additions and 89 deletions

View File

@@ -16,6 +16,7 @@ export {
logApiRequest,
logApiError,
logApiResponse,
combinedUsageMetadata,
} from './loggers.js';
export {
UserPromptEvent,

View File

@@ -113,7 +113,7 @@ describe('loggers', () => {
logUserPrompt(mockConfig, event);
expect(mockLogger.emit).toHaveBeenCalledWith({
body: 'User prompt. Length: 11',
body: 'User prompt. Length: 11.',
attributes: {
'session.id': 'test-session-id',
'event.name': EVENT_USER_PROMPT,
@@ -137,7 +137,7 @@ describe('loggers', () => {
logUserPrompt(mockConfig, event);
expect(mockLogger.emit).toHaveBeenCalledWith({
body: 'User prompt. Length: 11',
body: 'User prompt. Length: 11.',
attributes: {
'session.id': 'test-session-id',
'event.name': EVENT_USER_PROMPT,
@@ -250,70 +250,42 @@ describe('loggers', () => {
getSessionId: () => 'test-session-id',
} as Config;
const mockMetrics = {
recordTokenUsageMetrics: vi.fn(),
};
beforeEach(() => {
vi.spyOn(metrics, 'recordTokenUsageMetrics').mockImplementation(
mockMetrics.recordTokenUsageMetrics,
);
});
it('should log an API request with request_text', () => {
const event = {
model: 'test-model',
input_token_count: 123,
request_text: 'This is a test request',
};
logApiRequest(mockConfig, event);
expect(mockLogger.emit).toHaveBeenCalledWith({
body: 'API request to test-model. Tokens: 123.',
body: 'API request to test-model.',
attributes: {
'session.id': 'test-session-id',
'event.name': EVENT_API_REQUEST,
'event.timestamp': '2025-01-01T00:00:00.000Z',
model: 'test-model',
input_token_count: 123,
request_text: 'This is a test request',
},
});
expect(mockMetrics.recordTokenUsageMetrics).toHaveBeenCalledWith(
mockConfig,
'test-model',
123,
'input',
);
});
it('should log an API request without request_text', () => {
const event = {
model: 'test-model',
input_token_count: 456,
};
logApiRequest(mockConfig, event);
expect(mockLogger.emit).toHaveBeenCalledWith({
body: 'API request to test-model. Tokens: 456.',
body: 'API request to test-model.',
attributes: {
'session.id': 'test-session-id',
'event.name': EVENT_API_REQUEST,
'event.timestamp': '2025-01-01T00:00:00.000Z',
model: 'test-model',
input_token_count: 456,
},
});
expect(mockMetrics.recordTokenUsageMetrics).toHaveBeenCalledWith(
mockConfig,
'test-model',
456,
'input',
);
});
});

View File

@@ -31,6 +31,10 @@ import {
} from './metrics.js';
import { isTelemetrySdkInitialized } from './sdk.js';
import { ToolConfirmationOutcome } from '../index.js';
import {
GenerateContentResponse,
GenerateContentResponseUsageMetadata,
} from '@google/genai';
const shouldLogUserPrompts = (config: Config): boolean =>
config.getTelemetryLogUserPromptsEnabled() ?? false;
@@ -119,7 +123,7 @@ export function logUserPrompt(
const logger = logs.getLogger(SERVICE_NAME);
const logRecord: LogRecord = {
body: `User prompt. Length: ${event.prompt_length}`,
body: `User prompt. Length: ${event.prompt_length}.`,
attributes,
};
logger.emit(logRecord);
@@ -176,16 +180,10 @@ export function logApiRequest(
};
const logger = logs.getLogger(SERVICE_NAME);
const logRecord: LogRecord = {
body: `API request to ${event.model}. Tokens: ${event.input_token_count}.`,
body: `API request to ${event.model}.`,
attributes,
};
logger.emit(logRecord);
recordTokenUsageMetrics(
config,
event.model,
event.input_token_count,
'input',
);
}
export function logApiError(
@@ -258,6 +256,12 @@ export function logApiResponse(
event.status_code,
event.error,
);
recordTokenUsageMetrics(
config,
event.model,
event.input_token_count,
'input',
);
recordTokenUsageMetrics(
config,
event.model,
@@ -278,3 +282,43 @@ export function logApiResponse(
);
recordTokenUsageMetrics(config, event.model, event.tool_token_count, 'tool');
}
export function combinedUsageMetadata(
chunks: GenerateContentResponse[],
): GenerateContentResponseUsageMetadata {
const metadataKeys: Array<keyof GenerateContentResponseUsageMetadata> = [
'promptTokenCount',
'candidatesTokenCount',
'cachedContentTokenCount',
'thoughtsTokenCount',
'toolUsePromptTokenCount',
'totalTokenCount',
];
const totals: Record<keyof GenerateContentResponseUsageMetadata, number> = {
promptTokenCount: 0,
candidatesTokenCount: 0,
cachedContentTokenCount: 0,
thoughtsTokenCount: 0,
toolUsePromptTokenCount: 0,
totalTokenCount: 0,
cacheTokensDetails: 0,
candidatesTokensDetails: 0,
promptTokensDetails: 0,
toolUsePromptTokensDetails: 0,
trafficType: 0,
};
for (const chunk of chunks) {
if (chunk.usageMetadata) {
for (const key of metadataKeys) {
const chunkValue = chunk.usageMetadata[key];
if (typeof chunkValue === 'number') {
totals[key] += chunkValue;
}
}
}
}
return totals as unknown as GenerateContentResponseUsageMetadata;
}

View File

@@ -29,7 +29,6 @@ export interface ApiRequestEvent {
'event.name': 'api_request';
'event.timestamp': string; // ISO 8601
model: string;
input_token_count: number;
request_text?: string;
}