bug(ux): update context percentage when /clear command is run (#4162)

Co-authored-by: matt korwel <matt.korwel@gmail.com>
This commit is contained in:
Nick Salerni
2025-07-17 07:14:35 -07:00
committed by GitHub
parent ac8e98511e
commit 0d64355be6
4 changed files with 160 additions and 7 deletions

View File

@@ -508,4 +508,116 @@ describe('UiTelemetryService', () => {
expect(tools.byName['tool_B'].count).toBe(1);
});
});
describe('resetLastPromptTokenCount', () => {
it('should reset the last prompt token count to 0', () => {
// First, set up some initial token count
const event = {
'event.name': EVENT_API_RESPONSE,
model: 'gemini-2.5-pro',
duration_ms: 500,
input_token_count: 100,
output_token_count: 200,
total_token_count: 300,
cached_content_token_count: 50,
thoughts_token_count: 20,
tool_token_count: 30,
} as ApiResponseEvent & { 'event.name': typeof EVENT_API_RESPONSE };
service.addEvent(event);
expect(service.getLastPromptTokenCount()).toBe(100);
// Now reset the token count
service.resetLastPromptTokenCount();
expect(service.getLastPromptTokenCount()).toBe(0);
});
it('should emit an update event when resetLastPromptTokenCount is called', () => {
const spy = vi.fn();
service.on('update', spy);
// Set up initial token count
const event = {
'event.name': EVENT_API_RESPONSE,
model: 'gemini-2.5-pro',
duration_ms: 500,
input_token_count: 100,
output_token_count: 200,
total_token_count: 300,
cached_content_token_count: 50,
thoughts_token_count: 20,
tool_token_count: 30,
} as ApiResponseEvent & { 'event.name': typeof EVENT_API_RESPONSE };
service.addEvent(event);
spy.mockClear(); // Clear the spy to focus on the reset call
service.resetLastPromptTokenCount();
expect(spy).toHaveBeenCalledOnce();
const { metrics, lastPromptTokenCount } = spy.mock.calls[0][0];
expect(metrics).toBeDefined();
expect(lastPromptTokenCount).toBe(0);
});
it('should not affect other metrics when resetLastPromptTokenCount is called', () => {
// Set up initial state with some metrics
const event = {
'event.name': EVENT_API_RESPONSE,
model: 'gemini-2.5-pro',
duration_ms: 500,
input_token_count: 100,
output_token_count: 200,
total_token_count: 300,
cached_content_token_count: 50,
thoughts_token_count: 20,
tool_token_count: 30,
} as ApiResponseEvent & { 'event.name': typeof EVENT_API_RESPONSE };
service.addEvent(event);
const metricsBefore = service.getMetrics();
service.resetLastPromptTokenCount();
const metricsAfter = service.getMetrics();
// Metrics should be unchanged
expect(metricsAfter).toEqual(metricsBefore);
// Only the last prompt token count should be reset
expect(service.getLastPromptTokenCount()).toBe(0);
});
it('should work correctly when called multiple times', () => {
const spy = vi.fn();
service.on('update', spy);
// Set up initial token count
const event = {
'event.name': EVENT_API_RESPONSE,
model: 'gemini-2.5-pro',
duration_ms: 500,
input_token_count: 100,
output_token_count: 200,
total_token_count: 300,
cached_content_token_count: 50,
thoughts_token_count: 20,
tool_token_count: 30,
} as ApiResponseEvent & { 'event.name': typeof EVENT_API_RESPONSE };
service.addEvent(event);
expect(service.getLastPromptTokenCount()).toBe(100);
// Reset once
service.resetLastPromptTokenCount();
expect(service.getLastPromptTokenCount()).toBe(0);
// Reset again - should still be 0 and still emit event
spy.mockClear();
service.resetLastPromptTokenCount();
expect(service.getLastPromptTokenCount()).toBe(0);
expect(spy).toHaveBeenCalledOnce();
});
});
});

View File

@@ -133,6 +133,14 @@ export class UiTelemetryService extends EventEmitter {
return this.#lastPromptTokenCount;
}
resetLastPromptTokenCount(): void {
this.#lastPromptTokenCount = 0;
this.emit('update', {
metrics: this.#metrics,
lastPromptTokenCount: this.#lastPromptTokenCount,
});
}
private getOrCreateModelMetrics(modelName: string): ModelMetrics {
if (!this.#metrics.models[modelName]) {
this.#metrics.models[modelName] = createInitialModelMetrics();