Merge tag 'v0.1.21' of github.com:google-gemini/gemini-cli into chore/sync-gemini-cli-v0.1.21

This commit is contained in:
mingholy.lmh
2025-08-20 22:24:50 +08:00
163 changed files with 8812 additions and 4098 deletions

View File

@@ -4,176 +4,290 @@
* SPDX-License-Identifier: Apache-2.0
*/
import { vi, describe, it, expect, beforeEach, afterEach } from 'vitest';
import * as https from 'https';
import { ClientRequest, IncomingMessage } from 'http';
import { Readable, Writable } from 'stream';
import {
ClearcutLogger,
LogResponse,
LogEventEntry,
} from './clearcut-logger.js';
import { Config } from '../../config/config.js';
vi,
describe,
it,
expect,
afterEach,
beforeAll,
afterAll,
} from 'vitest';
import { ClearcutLogger, LogEventEntry, TEST_ONLY } from './clearcut-logger.js';
import { ConfigParameters } from '../../config/config.js';
import * as userAccount from '../../utils/user_account.js';
import * as userId from '../../utils/user_id.js';
import { EventMetadataKey } from './event-metadata-key.js';
import { makeFakeConfig } from '../../test-utils/config.js';
import { http, HttpResponse } from 'msw';
import { server } from '../../mocks/msw.js';
// Mock dependencies
vi.mock('https-proxy-agent');
vi.mock('https');
vi.mock('../../utils/user_account');
vi.mock('../../utils/user_id');
const mockHttps = vi.mocked(https);
const mockUserAccount = vi.mocked(userAccount);
const mockUserId = vi.mocked(userId);
describe('ClearcutLogger', () => {
let mockConfig: Config;
let logger: ClearcutLogger | undefined;
// TODO(richieforeman): Consider moving this to test setup globally.
beforeAll(() => {
server.listen({});
});
afterEach(() => {
server.resetHandlers();
});
afterAll(() => {
server.close();
});
describe('ClearcutLogger', () => {
const NEXT_WAIT_MS = 1234;
const CLEARCUT_URL = 'https://play.googleapis.com/log';
const MOCK_DATE = new Date('2025-01-02T00:00:00.000Z');
const EXAMPLE_RESPONSE = `["${NEXT_WAIT_MS}",null,[[["ANDROID_BACKUP",0],["BATTERY_STATS",0],["SMART_SETUP",0],["TRON",0]],-3334737594024971225],[]]`;
// A helper to get the internal events array for testing
const getEvents = (l: ClearcutLogger): LogEventEntry[][] =>
l['events'].toArray() as LogEventEntry[][];
const getEventsSize = (l: ClearcutLogger): number => l['events'].size;
const getMaxEvents = (l: ClearcutLogger): number => l['max_events'];
const getMaxRetryEvents = (l: ClearcutLogger): number =>
l['max_retry_events'];
const requeueFailedEvents = (l: ClearcutLogger, events: LogEventEntry[][]) =>
l['requeueFailedEvents'](events);
beforeEach(() => {
vi.useFakeTimers();
vi.setSystemTime(new Date());
mockConfig = {
getUsageStatisticsEnabled: vi.fn().mockReturnValue(true),
getDebugMode: vi.fn().mockReturnValue(false),
getSessionId: vi.fn().mockReturnValue('test-session-id'),
getProxy: vi.fn().mockReturnValue(undefined),
} as unknown as Config;
mockUserAccount.getCachedGoogleAccount.mockReturnValue('test@google.com');
mockUserAccount.getLifetimeGoogleAccounts.mockReturnValue(1);
mockUserId.getInstallationId.mockReturnValue('test-installation-id');
logger = ClearcutLogger.getInstance(mockConfig);
expect(logger).toBeDefined();
afterEach(() => {
vi.unstubAllEnvs();
});
function setup({
config = {} as Partial<ConfigParameters>,
lifetimeGoogleAccounts = 1,
cachedGoogleAccount = 'test@google.com',
installationId = 'test-installation-id',
} = {}) {
server.resetHandlers(
http.post(CLEARCUT_URL, () => HttpResponse.text(EXAMPLE_RESPONSE)),
);
vi.useFakeTimers();
vi.setSystemTime(MOCK_DATE);
const loggerConfig = makeFakeConfig({
...config,
});
ClearcutLogger.clearInstance();
mockUserAccount.getCachedGoogleAccount.mockReturnValue(cachedGoogleAccount);
mockUserAccount.getLifetimeGoogleAccounts.mockReturnValue(
lifetimeGoogleAccounts,
);
mockUserId.getInstallationId.mockReturnValue(installationId);
const logger = ClearcutLogger.getInstance(loggerConfig);
return { logger, loggerConfig };
}
afterEach(() => {
ClearcutLogger.clearInstance();
vi.useRealTimers();
vi.restoreAllMocks();
});
it('should not return an instance if usage statistics are disabled', () => {
ClearcutLogger.clearInstance();
vi.spyOn(mockConfig, 'getUsageStatisticsEnabled').mockReturnValue(false);
const disabledLogger = ClearcutLogger.getInstance(mockConfig);
expect(disabledLogger).toBeUndefined();
describe('getInstance', () => {
it.each([
{ usageStatisticsEnabled: false, expectedValue: undefined },
{
usageStatisticsEnabled: true,
expectedValue: expect.any(ClearcutLogger),
},
])(
'returns an instance if usage statistics are enabled',
({ usageStatisticsEnabled, expectedValue }) => {
ClearcutLogger.clearInstance();
const { logger } = setup({
config: {
usageStatisticsEnabled,
},
});
expect(logger).toEqual(expectedValue);
},
);
it('is a singleton', () => {
ClearcutLogger.clearInstance();
const { loggerConfig } = setup();
const logger1 = ClearcutLogger.getInstance(loggerConfig);
const logger2 = ClearcutLogger.getInstance(loggerConfig);
expect(logger1).toBe(logger2);
});
});
describe('createLogEvent', () => {
it('logs the total number of google accounts', () => {
const { logger } = setup({
lifetimeGoogleAccounts: 9001,
});
const event = logger?.createLogEvent('abc', []);
expect(event?.event_metadata[0][0]).toEqual({
gemini_cli_key: EventMetadataKey.GEMINI_CLI_GOOGLE_ACCOUNTS_COUNT,
value: '9001',
});
});
it('logs the current surface from a github action', () => {
const { logger } = setup({});
vi.stubEnv('GITHUB_SHA', '8675309');
const event = logger?.createLogEvent('abc', []);
expect(event?.event_metadata[0][1]).toEqual({
gemini_cli_key: EventMetadataKey.GEMINI_CLI_SURFACE,
value: 'GitHub',
});
});
it('honors the value from env.SURFACE over all others', () => {
const { logger } = setup({});
vi.stubEnv('TERM_PROGRAM', 'vscode');
vi.stubEnv('SURFACE', 'ide-1234');
const event = logger?.createLogEvent('abc', []);
expect(event?.event_metadata[0][1]).toEqual({
gemini_cli_key: EventMetadataKey.GEMINI_CLI_SURFACE,
value: 'ide-1234',
});
});
it.each([
{
env: {
CURSOR_TRACE_ID: 'abc123',
GITHUB_SHA: undefined,
},
expectedValue: 'cursor',
},
{
env: {
TERM_PROGRAM: 'vscode',
GITHUB_SHA: undefined,
},
expectedValue: 'vscode',
},
{
env: {
MONOSPACE_ENV: 'true',
GITHUB_SHA: undefined,
},
expectedValue: 'firebasestudio',
},
{
env: {
__COG_BASHRC_SOURCED: 'true',
GITHUB_SHA: undefined,
},
expectedValue: 'devin',
},
{
env: {
CLOUD_SHELL: 'true',
GITHUB_SHA: undefined,
},
expectedValue: 'cloudshell',
},
])(
'logs the current surface for as $expectedValue, preempting vscode detection',
({ env, expectedValue }) => {
const { logger } = setup({});
for (const [key, value] of Object.entries(env)) {
vi.stubEnv(key, value);
}
vi.stubEnv('TERM_PROGRAM', 'vscode');
const event = logger?.createLogEvent('abc', []);
expect(event?.event_metadata[0][1]).toEqual({
gemini_cli_key: EventMetadataKey.GEMINI_CLI_SURFACE,
value: expectedValue,
});
},
);
});
describe('enqueueLogEvent', () => {
it('should add events to the queue', () => {
const { logger } = setup();
logger!.enqueueLogEvent({ test: 'event1' });
expect(getEventsSize(logger!)).toBe(1);
});
it('should evict the oldest event when the queue is full', () => {
const maxEvents = getMaxEvents(logger!);
const { logger } = setup();
for (let i = 0; i < maxEvents; i++) {
for (let i = 0; i < TEST_ONLY.MAX_EVENTS; i++) {
logger!.enqueueLogEvent({ event_id: i });
}
expect(getEventsSize(logger!)).toBe(maxEvents);
expect(getEventsSize(logger!)).toBe(TEST_ONLY.MAX_EVENTS);
const firstEvent = JSON.parse(
getEvents(logger!)[0][0].source_extension_json,
);
expect(firstEvent.event_id).toBe(0);
// This should push out the first event
logger!.enqueueLogEvent({ event_id: maxEvents });
logger!.enqueueLogEvent({ event_id: TEST_ONLY.MAX_EVENTS });
expect(getEventsSize(logger!)).toBe(maxEvents);
expect(getEventsSize(logger!)).toBe(TEST_ONLY.MAX_EVENTS);
const newFirstEvent = JSON.parse(
getEvents(logger!)[0][0].source_extension_json,
);
expect(newFirstEvent.event_id).toBe(1);
const lastEvent = JSON.parse(
getEvents(logger!)[maxEvents - 1][0].source_extension_json,
getEvents(logger!)[TEST_ONLY.MAX_EVENTS - 1][0].source_extension_json,
);
expect(lastEvent.event_id).toBe(maxEvents);
expect(lastEvent.event_id).toBe(TEST_ONLY.MAX_EVENTS);
});
});
describe('flushToClearcut', () => {
let mockRequest: Writable;
let mockResponse: Readable & Partial<IncomingMessage>;
beforeEach(() => {
mockRequest = new Writable({
write(chunk, encoding, callback) {
callback();
it('allows for usage with a configured proxy agent', async () => {
const { logger } = setup({
config: {
proxy: 'http://mycoolproxy.whatever.com:3128',
},
});
vi.spyOn(mockRequest, 'on');
vi.spyOn(mockRequest, 'end').mockReturnThis();
vi.spyOn(mockRequest, 'destroy').mockReturnThis();
mockResponse = new Readable({ read() {} }) as Readable &
Partial<IncomingMessage>;
logger!.enqueueLogEvent({ event_id: 1 });
mockHttps.request.mockImplementation(
(
_options: string | https.RequestOptions | URL,
...args: unknown[]
): ClientRequest => {
const callback = args.find((arg) => typeof arg === 'function') as
| ((res: IncomingMessage) => void)
| undefined;
const response = await logger!.flushToClearcut();
if (callback) {
callback(mockResponse as IncomingMessage);
}
return mockRequest as ClientRequest;
},
);
expect(response.nextRequestWaitMs).toBe(NEXT_WAIT_MS);
});
it('should clear events on successful flush', async () => {
mockResponse.statusCode = 200;
const mockResponseBody = { nextRequestWaitMs: 1000 };
// Encoded protobuf for {nextRequestWaitMs: 1000} which is `08 E8 07`
const encodedResponse = Buffer.from([8, 232, 7]);
const { logger } = setup();
logger!.enqueueLogEvent({ event_id: 1 });
const flushPromise = logger!.flushToClearcut();
const response = await logger!.flushToClearcut();
mockResponse.push(encodedResponse);
mockResponse.push(null); // End the stream
const response: LogResponse = await flushPromise;
expect(getEventsSize(logger!)).toBe(0);
expect(response.nextRequestWaitMs).toBe(
mockResponseBody.nextRequestWaitMs,
);
expect(getEvents(logger!)).toEqual([]);
expect(response.nextRequestWaitMs).toBe(NEXT_WAIT_MS);
});
it('should handle a network error and requeue events', async () => {
const { logger } = setup();
server.resetHandlers(http.post(CLEARCUT_URL, () => HttpResponse.error()));
logger!.enqueueLogEvent({ event_id: 1 });
logger!.enqueueLogEvent({ event_id: 2 });
expect(getEventsSize(logger!)).toBe(2);
const flushPromise = logger!.flushToClearcut();
mockRequest.emit('error', new Error('Network error'));
await flushPromise;
const x = logger!.flushToClearcut();
await x;
expect(getEventsSize(logger!)).toBe(2);
const events = getEvents(logger!);
@@ -181,18 +295,28 @@ describe('ClearcutLogger', () => {
});
it('should handle an HTTP error and requeue events', async () => {
mockResponse.statusCode = 500;
mockResponse.statusMessage = 'Internal Server Error';
const { logger } = setup();
server.resetHandlers(
http.post(
CLEARCUT_URL,
() =>
new HttpResponse(
{ 'the system is down': true },
{
status: 500,
},
),
),
);
logger!.enqueueLogEvent({ event_id: 1 });
logger!.enqueueLogEvent({ event_id: 2 });
expect(getEventsSize(logger!)).toBe(2);
const flushPromise = logger!.flushToClearcut();
mockResponse.emit('end'); // End the response to trigger promise resolution
await flushPromise;
expect(getEvents(logger!).length).toBe(2);
await logger!.flushToClearcut();
expect(getEventsSize(logger!)).toBe(2);
expect(getEvents(logger!).length).toBe(2);
const events = getEvents(logger!);
expect(JSON.parse(events[0][0].source_extension_json).event_id).toBe(1);
});
@@ -200,7 +324,8 @@ describe('ClearcutLogger', () => {
describe('requeueFailedEvents logic', () => {
it('should limit the number of requeued events to max_retry_events', () => {
const maxRetryEvents = getMaxRetryEvents(logger!);
const { logger } = setup();
const maxRetryEvents = TEST_ONLY.MAX_RETRY_EVENTS;
const eventsToLogCount = maxRetryEvents + 5;
const eventsToSend: LogEventEntry[][] = [];
for (let i = 0; i < eventsToLogCount; i++) {
@@ -225,7 +350,8 @@ describe('ClearcutLogger', () => {
});
it('should not requeue more events than available space in the queue', () => {
const maxEvents = getMaxEvents(logger!);
const { logger } = setup();
const maxEvents = TEST_ONLY.MAX_EVENTS;
const spaceToLeave = 5;
const initialEventCount = maxEvents - spaceToLeave;
for (let i = 0; i < initialEventCount; i++) {

View File

@@ -4,10 +4,7 @@
* SPDX-License-Identifier: Apache-2.0
*/
import { Buffer } from 'buffer';
import * as https from 'https';
import { HttpsProxyAgent } from 'https-proxy-agent';
import {
StartSessionEvent,
EndSessionEvent,
@@ -22,6 +19,7 @@ import {
SlashCommandEvent,
MalformedJsonResponseEvent,
IdeConnectionEvent,
KittySequenceOverflowEvent,
} from '../types.js';
import { EventMetadataKey } from './event-metadata-key.js';
import { Config } from '../../config/config.js';
@@ -32,6 +30,7 @@ import {
} from '../../utils/user_account.js';
import { getInstallationId } from '../../utils/user_id.js';
import { FixedDeque } from 'mnemonist';
import { DetectedIde, detectIde } from '../../ide/detect-ide.js';
const start_session_event_name = 'start_session';
const new_prompt_event_name = 'new_prompt';
@@ -46,6 +45,7 @@ const next_speaker_check_event_name = 'next_speaker_check';
const slash_command_event_name = 'slash_command';
const malformed_json_response_event_name = 'malformed_json_response';
const ide_connection_event_name = 'ide_connection';
const kitty_sequence_overflow_event_name = 'kitty_sequence_overflow';
export interface LogResponse {
nextRequestWaitMs?: number;
@@ -56,19 +56,25 @@ export interface LogEventEntry {
source_extension_json: string;
}
export type EventValue = {
export interface EventValue {
gemini_cli_key: EventMetadataKey | string;
value: string;
};
}
export type LogEvent = {
console_type: string;
export interface LogEvent {
console_type: 'GEMINI_CLI';
application: number;
event_name: string;
event_metadata: EventValue[][];
client_email?: string;
client_install_id?: string;
};
}
export interface LogRequest {
log_source_name: 'CONCORD';
request_time_ms: number;
log_event: LogEventEntry[][];
}
/**
* Determine the surface that the user is currently using. Surface is effectively the
@@ -80,31 +86,70 @@ export type LogEvent = {
* methods might have in their runtimes.
*/
function determineSurface(): string {
if (process.env.CLOUD_SHELL === 'true') {
return 'CLOUD_SHELL';
} else if (process.env.MONOSPACE_ENV === 'true') {
return 'FIREBASE_STUDIO';
if (process.env.SURFACE) {
return process.env.SURFACE;
} else if (process.env.GITHUB_SHA) {
return 'GitHub';
} else if (process.env.TERM_PROGRAM === 'vscode') {
return detectIde() || DetectedIde.VSCode;
} else {
return process.env.SURFACE || 'SURFACE_NOT_SET';
return 'SURFACE_NOT_SET';
}
}
/**
* Clearcut URL to send logging events to.
*/
const CLEARCUT_URL = 'https://play.googleapis.com/log?format=json&hasfast=true';
/**
* Interval in which buffered events are sent to clearcut.
*/
const FLUSH_INTERVAL_MS = 1000 * 60;
/**
* Maximum amount of events to keep in memory. Events added after this amount
* are dropped until the next flush to clearcut, which happens periodically as
* defined by {@link FLUSH_INTERVAL_MS}.
*/
const MAX_EVENTS = 1000;
/**
* Maximum events to retry after a failed clearcut flush
*/
const MAX_RETRY_EVENTS = 100;
// Singleton class for batch posting log events to Clearcut. When a new event comes in, the elapsed time
// is checked and events are flushed to Clearcut if at least a minute has passed since the last flush.
export class ClearcutLogger {
private static instance: ClearcutLogger;
private config?: Config;
/**
* Queue of pending events that need to be flushed to the server. New events
* are added to this queue and then flushed on demand (via `flushToClearcut`)
*/
private readonly events: FixedDeque<LogEventEntry[]>;
private last_flush_time: number = Date.now();
private flush_interval_ms: number = 1000 * 60; // Wait at least a minute before flushing events.
private readonly max_events: number = 1000; // Maximum events to keep in memory
private readonly max_retry_events: number = 100; // Maximum failed events to retry
private flushing: boolean = false; // Prevent concurrent flush operations
private pendingFlush: boolean = false; // Track if a flush was requested during an ongoing flush
/**
* The last time that the events were successfully flushed to the server.
*/
private lastFlushTime: number = Date.now();
/**
* the value is true when there is a pending flush happening. This prevents
* concurrent flush operations.
*/
private flushing: boolean = false;
/**
* This value is true when a flush was requested during an ongoing flush.
*/
private pendingFlush: boolean = false;
private constructor(config?: Config) {
this.config = config;
this.events = new FixedDeque<LogEventEntry[]>(Array, this.max_events);
this.events = new FixedDeque<LogEventEntry[]>(Array, MAX_EVENTS);
}
static getInstance(config?: Config): ClearcutLogger | undefined {
@@ -125,7 +170,7 @@ export class ClearcutLogger {
enqueueLogEvent(event: object): void {
try {
// Manually handle overflow for FixedDeque, which throws when full.
const wasAtCapacity = this.events.size >= this.max_events;
const wasAtCapacity = this.events.size >= MAX_EVENTS;
if (wasAtCapacity) {
this.events.shift(); // Evict oldest element to make space.
@@ -150,31 +195,14 @@ export class ClearcutLogger {
}
}
addDefaultFields(data: EventValue[]): void {
const totalAccounts = getLifetimeGoogleAccounts();
const surface = determineSurface();
const defaultLogMetadata = [
{
gemini_cli_key: EventMetadataKey.GEMINI_CLI_GOOGLE_ACCOUNTS_COUNT,
value: totalAccounts.toString(),
},
{
gemini_cli_key: EventMetadataKey.GEMINI_CLI_SURFACE,
value: surface,
},
];
data.push(...defaultLogMetadata);
}
createLogEvent(name: string, data: EventValue[]): LogEvent {
const email = getCachedGoogleAccount();
// Add default fields that should exist for all logs
this.addDefaultFields(data);
data = addDefaultFields(data);
const logEvent: LogEvent = {
console_type: 'GEMINI_CLI',
application: 102,
application: 102, // GEMINI_CLI
event_name: name,
event_metadata: [data],
};
@@ -190,7 +218,7 @@ export class ClearcutLogger {
}
flushIfNeeded(): void {
if (Date.now() - this.last_flush_time < this.flush_interval_ms) {
if (Date.now() - this.lastFlushTime < FLUSH_INTERVAL_MS) {
return;
}
@@ -217,140 +245,67 @@ export class ClearcutLogger {
const eventsToSend = this.events.toArray() as LogEventEntry[][];
this.events.clear();
return new Promise<{ buffer: Buffer; statusCode?: number }>(
(resolve, reject) => {
const request = [
{
log_source_name: 'CONCORD',
request_time_ms: Date.now(),
log_event: eventsToSend,
},
];
const body = safeJsonStringify(request);
const options = {
hostname: 'play.googleapis.com',
path: '/log',
method: 'POST',
headers: { 'Content-Length': Buffer.byteLength(body) },
timeout: 30000, // 30-second timeout
};
const bufs: Buffer[] = [];
const req = https.request(
{
...options,
agent: this.getProxyAgent(),
},
(res) => {
res.on('error', reject); // Handle stream errors
res.on('data', (buf) => bufs.push(buf));
res.on('end', () => {
try {
const buffer = Buffer.concat(bufs);
// Check if we got a successful response
if (
res.statusCode &&
res.statusCode >= 200 &&
res.statusCode < 300
) {
resolve({ buffer, statusCode: res.statusCode });
} else {
// HTTP error - reject with status code for retry handling
reject(
new Error(`HTTP ${res.statusCode}: ${res.statusMessage}`),
);
}
} catch (e) {
reject(e);
}
});
},
);
req.on('error', (e) => {
// Network-level error
reject(e);
});
req.on('timeout', () => {
if (!req.destroyed) {
req.destroy(new Error('Request timeout after 30 seconds'));
}
});
req.end(body);
const request: LogRequest[] = [
{
log_source_name: 'CONCORD',
request_time_ms: Date.now(),
log_event: eventsToSend,
},
)
.then(({ buffer }) => {
try {
this.last_flush_time = Date.now();
return this.decodeLogResponse(buffer) || {};
} catch (error: unknown) {
console.error('Error decoding log response:', error);
return {};
}
})
.catch((error: unknown) => {
// Handle both network-level and HTTP-level errors
];
let result: LogResponse = {};
try {
const response = await fetch(CLEARCUT_URL, {
method: 'POST',
body: safeJsonStringify(request),
headers: {
'Content-Type': 'application/json',
},
});
const responseBody = await response.text();
if (response.status >= 200 && response.status < 300) {
this.lastFlushTime = Date.now();
const nextRequestWaitMs = Number(JSON.parse(responseBody)[0]);
result = {
...result,
nextRequestWaitMs,
};
} else {
if (this.config?.getDebugMode()) {
console.error('Error flushing log events:', error);
console.error(
`Error flushing log events: HTTP ${response.status}: ${response.statusText}`,
);
}
// Re-queue failed events for retry
this.requeueFailedEvents(eventsToSend);
}
} catch (e: unknown) {
if (this.config?.getDebugMode()) {
console.error('Error flushing log events:', e as Error);
}
// Return empty response to maintain the Promise<LogResponse> contract
return {};
})
.finally(() => {
this.flushing = false;
// Re-queue failed events for retry
this.requeueFailedEvents(eventsToSend);
}
// If a flush was requested while we were flushing, flush again
if (this.pendingFlush) {
this.pendingFlush = false;
// Fire and forget the pending flush
this.flushToClearcut().catch((error) => {
if (this.config?.getDebugMode()) {
console.debug('Error in pending flush to Clearcut:', error);
}
});
this.flushing = false;
// If a flush was requested while we were flushing, flush again
if (this.pendingFlush) {
this.pendingFlush = false;
// Fire and forget the pending flush
this.flushToClearcut().catch((error) => {
if (this.config?.getDebugMode()) {
console.debug('Error in pending flush to Clearcut:', error);
}
});
}
// Visible for testing. Decodes protobuf-encoded response from Clearcut server.
decodeLogResponse(buf: Buffer): LogResponse | undefined {
// TODO(obrienowen): return specific errors to facilitate debugging.
if (buf.length < 1) {
return undefined;
}
// The first byte of the buffer is `field<<3 | type`. We're looking for field
// 1, with type varint, represented by type=0. If the first byte isn't 8, that
// means field 1 is missing or the message is corrupted. Either way, we return
// undefined.
if (buf.readUInt8(0) !== 8) {
return undefined;
}
let ms = BigInt(0);
let cont = true;
// In each byte, the most significant bit is the continuation bit. If it's
// set, we keep going. The lowest 7 bits, are data bits. They are concatenated
// in reverse order to form the final number.
for (let i = 1; cont && i < buf.length; i++) {
const byte = buf.readUInt8(i);
ms |= BigInt(byte & 0x7f) << BigInt(7 * (i - 1));
cont = (byte & 0x80) !== 0;
}
if (cont) {
// We have fallen off the buffer without seeing a terminating byte. The
// message is corrupted.
return undefined;
}
const returnVal = {
nextRequestWaitMs: Number(ms),
};
return returnVal;
return result;
}
logStartSessionEvent(event: StartSessionEvent): void {
@@ -687,6 +642,13 @@ export class ClearcutLogger {
});
}
if (event.status) {
data.push({
gemini_cli_key: EventMetadataKey.GEMINI_CLI_SLASH_COMMAND_STATUS,
value: JSON.stringify(event.status),
});
}
this.enqueueLogEvent(this.createLogEvent(slash_command_event_name, data));
this.flushIfNeeded();
}
@@ -718,6 +680,24 @@ export class ClearcutLogger {
this.flushIfNeeded();
}
logKittySequenceOverflowEvent(event: KittySequenceOverflowEvent): void {
const data: EventValue[] = [
{
gemini_cli_key: EventMetadataKey.GEMINI_CLI_KITTY_SEQUENCE_LENGTH,
value: event.sequence_length.toString(),
},
{
gemini_cli_key: EventMetadataKey.GEMINI_CLI_KITTY_TRUNCATED_SEQUENCE,
value: event.truncated_sequence,
},
];
this.enqueueLogEvent(
this.createLogEvent(kitty_sequence_overflow_event_name, data),
);
this.flushIfNeeded();
}
logEndSessionEvent(event: EndSessionEvent): void {
const data: EventValue[] = [
{
@@ -752,24 +732,21 @@ export class ClearcutLogger {
private requeueFailedEvents(eventsToSend: LogEventEntry[][]): void {
// Add the events back to the front of the queue to be retried, but limit retry queue size
const eventsToRetry = eventsToSend.slice(-this.max_retry_events); // Keep only the most recent events
const eventsToRetry = eventsToSend.slice(-MAX_RETRY_EVENTS); // Keep only the most recent events
// Log a warning if we're dropping events
if (
eventsToSend.length > this.max_retry_events &&
this.config?.getDebugMode()
) {
if (eventsToSend.length > MAX_RETRY_EVENTS && this.config?.getDebugMode()) {
console.warn(
`ClearcutLogger: Dropping ${
eventsToSend.length - this.max_retry_events
eventsToSend.length - MAX_RETRY_EVENTS
} events due to retry queue limit. Total events: ${
eventsToSend.length
}, keeping: ${this.max_retry_events}`,
}, keeping: ${MAX_RETRY_EVENTS}`,
);
}
// Determine how many events can be re-queued
const availableSpace = this.max_events - this.events.size;
const availableSpace = MAX_EVENTS - this.events.size;
const numEventsToRequeue = Math.min(eventsToRetry.length, availableSpace);
if (numEventsToRequeue === 0) {
@@ -792,7 +769,7 @@ export class ClearcutLogger {
this.events.unshift(eventsToRequeue[i]);
}
// Clear any potential overflow
while (this.events.size > this.max_events) {
while (this.events.size > MAX_EVENTS) {
this.events.pop();
}
@@ -803,3 +780,28 @@ export class ClearcutLogger {
}
}
}
/**
* Adds default fields to data, and returns a new data array. This fields
* should exist on all log events.
*/
function addDefaultFields(data: EventValue[]): EventValue[] {
const totalAccounts = getLifetimeGoogleAccounts();
const surface = determineSurface();
const defaultLogMetadata: EventValue[] = [
{
gemini_cli_key: EventMetadataKey.GEMINI_CLI_GOOGLE_ACCOUNTS_COUNT,
value: `${totalAccounts}`,
},
{
gemini_cli_key: EventMetadataKey.GEMINI_CLI_SURFACE,
value: surface,
},
];
return [...data, ...defaultLogMetadata];
}
export const TEST_ONLY = {
MAX_RETRY_EVENTS,
MAX_EVENTS,
};

View File

@@ -174,6 +174,9 @@ export enum EventMetadataKey {
// Logs the subcommand of the slash command.
GEMINI_CLI_SLASH_COMMAND_SUBCOMMAND = 42,
// Logs the status of the slash command (e.g. 'success', 'error')
GEMINI_CLI_SLASH_COMMAND_STATUS = 51,
// ==========================================================================
// Next Speaker Check Event Keys
// ===========================================================================
@@ -209,6 +212,16 @@ export enum EventMetadataKey {
// Logs user removed lines in edit/write tool response.
GEMINI_CLI_USER_REMOVED_LINES = 50,
// ==========================================================================
// Kitty Sequence Overflow Event Keys
// ===========================================================================
// Logs the length of the kitty sequence that overflowed.
GEMINI_CLI_KITTY_SEQUENCE_LENGTH = 53,
// Logs the truncated kitty sequence.
GEMINI_CLI_KITTY_TRUNCATED_SEQUENCE = 52,
}
export function getEventMetadataKey(

View File

@@ -28,6 +28,7 @@ export {
logApiResponse,
logFlashFallback,
logSlashCommand,
logKittySequenceOverflow,
} from './loggers.js';
export {
StartSessionEvent,
@@ -39,7 +40,10 @@ export {
ApiResponseEvent,
TelemetryEvent,
FlashFallbackEvent,
KittySequenceOverflowEvent,
SlashCommandEvent,
makeSlashCommandEvent,
SlashCommandStatus,
} from './types.js';
export { SpanStatusCode, ValueType } from '@opentelemetry/api';
export { SemanticAttributes } from '@opentelemetry/semantic-conventions';

View File

@@ -32,6 +32,7 @@ import {
NextSpeakerCheckEvent,
LoopDetectedEvent,
SlashCommandEvent,
KittySequenceOverflowEvent,
} from './types.js';
import {
recordApiErrorMetrics,
@@ -377,3 +378,21 @@ export function logIdeConnection(
};
logger.emit(logRecord);
}
export function logKittySequenceOverflow(
config: Config,
event: KittySequenceOverflowEvent,
): void {
ClearcutLogger.getInstance(config)?.logKittySequenceOverflowEvent(event);
if (!isTelemetrySdkInitialized()) return;
const attributes: LogAttributes = {
...getCommonAttributes(config),
...event,
};
const logger = logs.getLogger(SERVICE_NAME);
const logRecord: LogRecord = {
body: `Kitty sequence buffer overflow: ${event.sequence_length} bytes`,
attributes,
};
logger.emit(logRecord);
}

View File

@@ -124,24 +124,32 @@ export function initializeTelemetry(config: Config): void {
try {
sdk.start();
console.log('OpenTelemetry SDK started successfully.');
if (config.getDebugMode()) {
console.log('OpenTelemetry SDK started successfully.');
}
telemetryInitialized = true;
initializeMetrics(config);
} catch (error) {
console.error('Error starting OpenTelemetry SDK:', error);
}
process.on('SIGTERM', shutdownTelemetry);
process.on('SIGINT', shutdownTelemetry);
process.on('SIGTERM', () => {
shutdownTelemetry(config);
});
process.on('SIGINT', () => {
shutdownTelemetry(config);
});
}
export async function shutdownTelemetry(): Promise<void> {
export async function shutdownTelemetry(config: Config): Promise<void> {
if (!telemetryInitialized || !sdk) {
return;
}
try {
await sdk.shutdown();
console.log('OpenTelemetry SDK shut down successfully.');
if (config.getDebugMode()) {
console.log('OpenTelemetry SDK shut down successfully.');
}
} catch (error) {
console.error('Error shutting down SDK:', error);
} finally {

View File

@@ -45,7 +45,7 @@ describe('telemetry', () => {
afterEach(async () => {
// Ensure we shut down telemetry even if a test fails.
if (isTelemetrySdkInitialized()) {
await shutdownTelemetry();
await shutdownTelemetry(mockConfig);
}
});
@@ -57,7 +57,7 @@ describe('telemetry', () => {
it('should shutdown the telemetry service', async () => {
initializeTelemetry(mockConfig);
await shutdownTelemetry();
await shutdownTelemetry(mockConfig);
expect(mockNodeSdk.shutdown).toHaveBeenCalled();
});

View File

@@ -14,9 +14,17 @@ import {
ToolCallDecision,
} from './tool-call-decision.js';
export class StartSessionEvent {
interface BaseTelemetryEvent {
'event.name': string;
/** Current timestamp in ISO 8601 format */
'event.timestamp': string;
}
type CommonFields = keyof BaseTelemetryEvent;
export class StartSessionEvent implements BaseTelemetryEvent {
'event.name': 'cli_config';
'event.timestamp': string; // ISO 8601
'event.timestamp': string;
model: string;
embedding_model: string;
sandbox_enabled: boolean;
@@ -60,9 +68,9 @@ export class StartSessionEvent {
}
}
export class EndSessionEvent {
export class EndSessionEvent implements BaseTelemetryEvent {
'event.name': 'end_session';
'event.timestamp': string; // ISO 8601
'event.timestamp': string;
session_id?: string;
constructor(config?: Config) {
@@ -72,9 +80,9 @@ export class EndSessionEvent {
}
}
export class UserPromptEvent {
export class UserPromptEvent implements BaseTelemetryEvent {
'event.name': 'user_prompt';
'event.timestamp': string; // ISO 8601
'event.timestamp': string;
prompt_length: number;
prompt_id: string;
auth_type?: string;
@@ -95,9 +103,9 @@ export class UserPromptEvent {
}
}
export class ToolCallEvent {
export class ToolCallEvent implements BaseTelemetryEvent {
'event.name': 'tool_call';
'event.timestamp': string; // ISO 8601
'event.timestamp': string;
function_name: string;
function_args: Record<string, unknown>;
duration_ms: number;
@@ -142,9 +150,9 @@ export class ToolCallEvent {
}
}
export class ApiRequestEvent {
export class ApiRequestEvent implements BaseTelemetryEvent {
'event.name': 'api_request';
'event.timestamp': string; // ISO 8601
'event.timestamp': string;
model: string;
prompt_id: string;
request_text?: string;
@@ -158,7 +166,7 @@ export class ApiRequestEvent {
}
}
export class ApiErrorEvent {
export class ApiErrorEvent implements BaseTelemetryEvent {
'event.name': 'api_error';
'event.timestamp': string; // ISO 8601
response_id?: string;
@@ -193,7 +201,7 @@ export class ApiErrorEvent {
}
}
export class ApiResponseEvent {
export class ApiResponseEvent implements BaseTelemetryEvent {
'event.name': 'api_response';
'event.timestamp': string; // ISO 8601
response_id: string;
@@ -240,9 +248,9 @@ export class ApiResponseEvent {
}
}
export class FlashFallbackEvent {
export class FlashFallbackEvent implements BaseTelemetryEvent {
'event.name': 'flash_fallback';
'event.timestamp': string; // ISO 8601
'event.timestamp': string;
auth_type: string;
constructor(auth_type: string) {
@@ -258,9 +266,9 @@ export enum LoopType {
LLM_DETECTED_LOOP = 'llm_detected_loop',
}
export class LoopDetectedEvent {
export class LoopDetectedEvent implements BaseTelemetryEvent {
'event.name': 'loop_detected';
'event.timestamp': string; // ISO 8601
'event.timestamp': string;
loop_type: LoopType;
prompt_id: string;
@@ -272,9 +280,9 @@ export class LoopDetectedEvent {
}
}
export class NextSpeakerCheckEvent {
export class NextSpeakerCheckEvent implements BaseTelemetryEvent {
'event.name': 'next_speaker_check';
'event.timestamp': string; // ISO 8601
'event.timestamp': string;
prompt_id: string;
finish_reason: string;
result: string;
@@ -288,23 +296,36 @@ export class NextSpeakerCheckEvent {
}
}
export class SlashCommandEvent {
export interface SlashCommandEvent extends BaseTelemetryEvent {
'event.name': 'slash_command';
'event.timestamp': string; // ISO 8106
command: string;
subcommand?: string;
constructor(command: string, subcommand?: string) {
this['event.name'] = 'slash_command';
this['event.timestamp'] = new Date().toISOString();
this.command = command;
this.subcommand = subcommand;
}
status?: SlashCommandStatus;
}
export class MalformedJsonResponseEvent {
export function makeSlashCommandEvent({
command,
subcommand,
status,
}: Omit<SlashCommandEvent, CommonFields>): SlashCommandEvent {
return {
'event.name': 'slash_command',
'event.timestamp': new Date().toISOString(),
command,
subcommand,
status,
};
}
export enum SlashCommandStatus {
SUCCESS = 'success',
ERROR = 'error',
}
export class MalformedJsonResponseEvent implements BaseTelemetryEvent {
'event.name': 'malformed_json_response';
'event.timestamp': string; // ISO 8601
'event.timestamp': string;
model: string;
constructor(model: string) {
@@ -321,7 +342,7 @@ export enum IdeConnectionType {
export class IdeConnectionEvent {
'event.name': 'ide_connection';
'event.timestamp': string; // ISO 8601
'event.timestamp': string;
connection_type: IdeConnectionType;
constructor(connection_type: IdeConnectionType) {
@@ -331,6 +352,20 @@ export class IdeConnectionEvent {
}
}
export class KittySequenceOverflowEvent {
'event.name': 'kitty_sequence_overflow';
'event.timestamp': string; // ISO 8601
sequence_length: number;
truncated_sequence: string;
constructor(sequence_length: number, truncated_sequence: string) {
this['event.name'] = 'kitty_sequence_overflow';
this['event.timestamp'] = new Date().toISOString();
this.sequence_length = sequence_length;
// Truncate to first 20 chars for logging (avoid logging sensitive data)
this.truncated_sequence = truncated_sequence.substring(0, 20);
}
}
export type TelemetryEvent =
| StartSessionEvent
| EndSessionEvent
@@ -342,6 +377,7 @@ export type TelemetryEvent =
| FlashFallbackEvent
| LoopDetectedEvent
| NextSpeakerCheckEvent
| SlashCommandEvent
| KittySequenceOverflowEvent
| MalformedJsonResponseEvent
| IdeConnectionEvent;
| IdeConnectionEvent
| SlashCommandEvent;