test(nonInteractiveCli): add tests and remove unused cost info

This commit is contained in:
mingholy.lmh
2025-11-05 17:09:37 +08:00
parent a962e10406
commit 49b1018337
10 changed files with 2746 additions and 85 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -56,7 +56,6 @@ export interface ResultOptions {
readonly apiDurationMs: number; readonly apiDurationMs: number;
readonly numTurns: number; readonly numTurns: number;
readonly usage?: ExtendedUsage; readonly usage?: ExtendedUsage;
readonly totalCostUsd?: number;
readonly stats?: SessionMetrics; readonly stats?: SessionMetrics;
readonly summary?: string; readonly summary?: string;
readonly subtype?: string; readonly subtype?: string;
@@ -1020,7 +1019,6 @@ export abstract class BaseJsonOutputAdapter {
duration_ms: options.durationMs, duration_ms: options.durationMs,
duration_api_ms: options.apiDurationMs, duration_api_ms: options.apiDurationMs,
num_turns: options.numTurns, num_turns: options.numTurns,
total_cost_usd: options.totalCostUsd ?? 0,
usage, usage,
permission_denials: [], permission_denials: [],
error: { message: errorMessage }, error: { message: errorMessage },
@@ -1037,7 +1035,6 @@ export abstract class BaseJsonOutputAdapter {
duration_api_ms: options.apiDurationMs, duration_api_ms: options.apiDurationMs,
num_turns: options.numTurns, num_turns: options.numTurns,
result: resultText, result: resultText,
total_cost_usd: options.totalCostUsd ?? 0,
usage, usage,
permission_denials: [], permission_denials: [],
}; };
@@ -1075,7 +1072,6 @@ export abstract class BaseJsonOutputAdapter {
duration_ms: 0, duration_ms: 0,
duration_api_ms: 0, duration_api_ms: 0,
num_turns: numTurns, num_turns: numTurns,
total_cost_usd: 0,
usage, usage,
permission_denials: [], permission_denials: [],
error: { message: errorMessage }, error: { message: errorMessage },

View File

@@ -392,7 +392,6 @@ describe('JsonOutputAdapter', () => {
durationMs: 1000, durationMs: 1000,
apiDurationMs: 800, apiDurationMs: 800,
numTurns: 1, numTurns: 1,
totalCostUsd: 0.01,
}); });
expect(stdoutWriteSpy).toHaveBeenCalled(); expect(stdoutWriteSpy).toHaveBeenCalled();
@@ -414,7 +413,6 @@ describe('JsonOutputAdapter', () => {
expect(resultMessage.result).toBe('Response text'); expect(resultMessage.result).toBe('Response text');
expect(resultMessage.duration_ms).toBe(1000); expect(resultMessage.duration_ms).toBe(1000);
expect(resultMessage.num_turns).toBe(1); expect(resultMessage.num_turns).toBe(1);
expect(resultMessage.total_cost_usd).toBe(0.01);
}); });
it('should emit error result', () => { it('should emit error result', () => {
@@ -424,7 +422,6 @@ describe('JsonOutputAdapter', () => {
durationMs: 500, durationMs: 500,
apiDurationMs: 300, apiDurationMs: 300,
numTurns: 1, numTurns: 1,
totalCostUsd: 0.005,
}); });
const output = stdoutWriteSpy.mock.calls[0][0] as string; const output = stdoutWriteSpy.mock.calls[0][0] as string;

View File

@@ -612,7 +612,6 @@ describe('StreamJsonOutputAdapter', () => {
durationMs: 1000, durationMs: 1000,
apiDurationMs: 800, apiDurationMs: 800,
numTurns: 1, numTurns: 1,
totalCostUsd: 0.01,
}); });
expect(stdoutWriteSpy).toHaveBeenCalled(); expect(stdoutWriteSpy).toHaveBeenCalled();
@@ -625,7 +624,6 @@ describe('StreamJsonOutputAdapter', () => {
expect(parsed.result).toBe('Response text'); expect(parsed.result).toBe('Response text');
expect(parsed.duration_ms).toBe(1000); expect(parsed.duration_ms).toBe(1000);
expect(parsed.num_turns).toBe(1); expect(parsed.num_turns).toBe(1);
expect(parsed.total_cost_usd).toBe(0.01);
}); });
it('should emit error result', () => { it('should emit error result', () => {
@@ -636,7 +634,6 @@ describe('StreamJsonOutputAdapter', () => {
durationMs: 500, durationMs: 500,
apiDurationMs: 300, apiDurationMs: 300,
numTurns: 1, numTurns: 1,
totalCostUsd: 0.005,
}); });
const output = stdoutWriteSpy.mock.calls[0][0] as string; const output = stdoutWriteSpy.mock.calls[0][0] as string;

View File

@@ -608,7 +608,6 @@ class SessionManager {
apiDurationMs, apiDurationMs,
numTurns, numTurns,
usage: undefined, usage: undefined,
totalCostUsd: undefined,
}); });
} }

View File

@@ -36,7 +36,6 @@ export interface ModelUsage {
cacheReadInputTokens: number; cacheReadInputTokens: number;
cacheCreationInputTokens: number; cacheCreationInputTokens: number;
webSearchRequests: number; webSearchRequests: number;
costUSD: number;
contextWindow: number; contextWindow: number;
} }
@@ -162,7 +161,6 @@ export interface CLIResultMessageSuccess {
duration_api_ms: number; duration_api_ms: number;
num_turns: number; num_turns: number;
result: string; result: string;
total_cost_usd: number;
usage: ExtendedUsage; usage: ExtendedUsage;
modelUsage?: Record<string, ModelUsage>; modelUsage?: Record<string, ModelUsage>;
permission_denials: CLIPermissionDenial[]; permission_denials: CLIPermissionDenial[];
@@ -178,7 +176,6 @@ export interface CLIResultMessageError {
duration_ms: number; duration_ms: number;
duration_api_ms: number; duration_api_ms: number;
num_turns: number; num_turns: number;
total_cost_usd: number;
usage: ExtendedUsage; usage: ExtendedUsage;
modelUsage?: Record<string, ModelUsage>; modelUsage?: Record<string, ModelUsage>;
permission_denials: CLIPermissionDenial[]; permission_denials: CLIPermissionDenial[];

View File

@@ -173,6 +173,45 @@ describe('runNonInteractive', () => {
vi.restoreAllMocks(); vi.restoreAllMocks();
}); });
/**
* Creates a default mock SessionMetrics object.
* Can be overridden in individual tests if needed.
*/
function createMockMetrics(
overrides?: Partial<SessionMetrics>,
): SessionMetrics {
return {
models: {},
tools: {
totalCalls: 0,
totalSuccess: 0,
totalFail: 0,
totalDurationMs: 0,
totalDecisions: {
accept: 0,
reject: 0,
modify: 0,
auto_accept: 0,
},
byName: {},
},
files: {
totalLinesAdded: 0,
totalLinesRemoved: 0,
},
...overrides,
};
}
/**
* Sets up the default mock for uiTelemetryService.getMetrics().
* Should be called in beforeEach or at the start of tests that need metrics.
*/
function setupMetricsMock(overrides?: Partial<SessionMetrics>): void {
const mockMetrics = createMockMetrics(overrides);
vi.mocked(uiTelemetryService.getMetrics).mockReturnValue(mockMetrics);
}
async function* createStreamFromEvents( async function* createStreamFromEvents(
events: ServerGeminiStreamEvent[], events: ServerGeminiStreamEvent[],
): AsyncGenerator<ServerGeminiStreamEvent> { ): AsyncGenerator<ServerGeminiStreamEvent> {
@@ -475,27 +514,7 @@ describe('runNonInteractive', () => {
createStreamFromEvents(events), createStreamFromEvents(events),
); );
(mockConfig.getOutputFormat as Mock).mockReturnValue(OutputFormat.JSON); (mockConfig.getOutputFormat as Mock).mockReturnValue(OutputFormat.JSON);
const mockMetrics: SessionMetrics = { setupMetricsMock();
models: {},
tools: {
totalCalls: 0,
totalSuccess: 0,
totalFail: 0,
totalDurationMs: 0,
totalDecisions: {
accept: 0,
reject: 0,
modify: 0,
auto_accept: 0,
},
byName: {},
},
files: {
totalLinesAdded: 0,
totalLinesRemoved: 0,
},
};
vi.mocked(uiTelemetryService.getMetrics).mockReturnValue(mockMetrics);
await runNonInteractive( await runNonInteractive(
mockConfig, mockConfig,
@@ -527,7 +546,9 @@ describe('runNonInteractive', () => {
); );
expect(resultMessage).toBeTruthy(); expect(resultMessage).toBeTruthy();
expect(resultMessage?.result).toBe('Hello World'); expect(resultMessage?.result).toBe('Hello World');
expect(resultMessage?.stats).toEqual(mockMetrics); // Get the actual metrics that were used
const actualMetrics = vi.mocked(uiTelemetryService.getMetrics)();
expect(resultMessage?.stats).toEqual(actualMetrics);
}); });
it('should write JSON output with stats for tool-only commands (no text response)', async () => { it('should write JSON output with stats for tool-only commands (no text response)', async () => {
@@ -568,8 +589,7 @@ describe('runNonInteractive', () => {
.mockReturnValueOnce(createStreamFromEvents(secondCallEvents)); .mockReturnValueOnce(createStreamFromEvents(secondCallEvents));
(mockConfig.getOutputFormat as Mock).mockReturnValue(OutputFormat.JSON); (mockConfig.getOutputFormat as Mock).mockReturnValue(OutputFormat.JSON);
const mockMetrics: SessionMetrics = { setupMetricsMock({
models: {},
tools: { tools: {
totalCalls: 1, totalCalls: 1,
totalSuccess: 1, totalSuccess: 1,
@@ -596,12 +616,7 @@ describe('runNonInteractive', () => {
}, },
}, },
}, },
files: { });
totalLinesAdded: 0,
totalLinesRemoved: 0,
},
};
vi.mocked(uiTelemetryService.getMetrics).mockReturnValue(mockMetrics);
await runNonInteractive( await runNonInteractive(
mockConfig, mockConfig,
@@ -651,27 +666,7 @@ describe('runNonInteractive', () => {
createStreamFromEvents(events), createStreamFromEvents(events),
); );
(mockConfig.getOutputFormat as Mock).mockReturnValue(OutputFormat.JSON); (mockConfig.getOutputFormat as Mock).mockReturnValue(OutputFormat.JSON);
const mockMetrics: SessionMetrics = { setupMetricsMock();
models: {},
tools: {
totalCalls: 0,
totalSuccess: 0,
totalFail: 0,
totalDurationMs: 0,
totalDecisions: {
accept: 0,
reject: 0,
modify: 0,
auto_accept: 0,
},
byName: {},
},
files: {
totalLinesAdded: 0,
totalLinesRemoved: 0,
},
};
vi.mocked(uiTelemetryService.getMetrics).mockReturnValue(mockMetrics);
await runNonInteractive( await runNonInteractive(
mockConfig, mockConfig,
@@ -703,11 +698,14 @@ describe('runNonInteractive', () => {
); );
expect(resultMessage).toBeTruthy(); expect(resultMessage).toBeTruthy();
expect(resultMessage?.result).toBe(''); expect(resultMessage?.result).toBe('');
expect(resultMessage?.stats).toEqual(mockMetrics); // Get the actual metrics that were used
const actualMetrics = vi.mocked(uiTelemetryService.getMetrics)();
expect(resultMessage?.stats).toEqual(actualMetrics);
}); });
it('should handle errors in JSON format', async () => { it('should handle errors in JSON format', async () => {
(mockConfig.getOutputFormat as Mock).mockReturnValue(OutputFormat.JSON); (mockConfig.getOutputFormat as Mock).mockReturnValue(OutputFormat.JSON);
setupMetricsMock();
const testError = new Error('Invalid input provided'); const testError = new Error('Invalid input provided');
mockGeminiClient.sendMessageStream.mockImplementation(() => { mockGeminiClient.sendMessageStream.mockImplementation(() => {
@@ -753,6 +751,7 @@ describe('runNonInteractive', () => {
it('should handle FatalInputError with custom exit code in JSON format', async () => { it('should handle FatalInputError with custom exit code in JSON format', async () => {
(mockConfig.getOutputFormat as Mock).mockReturnValue(OutputFormat.JSON); (mockConfig.getOutputFormat as Mock).mockReturnValue(OutputFormat.JSON);
setupMetricsMock();
const fatalError = new FatalInputError('Invalid command syntax provided'); const fatalError = new FatalInputError('Invalid command syntax provided');
mockGeminiClient.sendMessageStream.mockImplementation(() => { mockGeminiClient.sendMessageStream.mockImplementation(() => {
@@ -950,6 +949,7 @@ describe('runNonInteractive', () => {
it('should emit stream-json envelopes when output format is stream-json', async () => { it('should emit stream-json envelopes when output format is stream-json', async () => {
(mockConfig.getOutputFormat as Mock).mockReturnValue('stream-json'); (mockConfig.getOutputFormat as Mock).mockReturnValue('stream-json');
(mockConfig.getIncludePartialMessages as Mock).mockReturnValue(false); (mockConfig.getIncludePartialMessages as Mock).mockReturnValue(false);
setupMetricsMock();
const writes: string[] = []; const writes: string[] = [];
processStdoutSpy.mockImplementation((chunk: string | Uint8Array) => { processStdoutSpy.mockImplementation((chunk: string | Uint8Array) => {
@@ -1065,6 +1065,25 @@ describe('runNonInteractive', () => {
it('should include usage metadata and API duration in stream-json result', async () => { it('should include usage metadata and API duration in stream-json result', async () => {
(mockConfig.getOutputFormat as Mock).mockReturnValue('stream-json'); (mockConfig.getOutputFormat as Mock).mockReturnValue('stream-json');
(mockConfig.getIncludePartialMessages as Mock).mockReturnValue(false); (mockConfig.getIncludePartialMessages as Mock).mockReturnValue(false);
setupMetricsMock({
models: {
'test-model': {
api: {
totalRequests: 1,
totalErrors: 0,
totalLatencyMs: 500,
},
tokens: {
prompt: 11,
candidates: 5,
total: 16,
cached: 3,
thoughts: 0,
tool: 0,
},
},
},
});
const writes: string[] = []; const writes: string[] = [];
processStdoutSpy.mockImplementation((chunk: string | Uint8Array) => { processStdoutSpy.mockImplementation((chunk: string | Uint8Array) => {
@@ -1125,6 +1144,7 @@ describe('runNonInteractive', () => {
it('should not emit user message when userMessage option is provided (stream-json input binding)', async () => { it('should not emit user message when userMessage option is provided (stream-json input binding)', async () => {
(mockConfig.getOutputFormat as Mock).mockReturnValue('stream-json'); (mockConfig.getOutputFormat as Mock).mockReturnValue('stream-json');
(mockConfig.getIncludePartialMessages as Mock).mockReturnValue(false); (mockConfig.getIncludePartialMessages as Mock).mockReturnValue(false);
setupMetricsMock();
const writes: string[] = []; const writes: string[] = [];
processStdoutSpy.mockImplementation((chunk: string | Uint8Array) => { processStdoutSpy.mockImplementation((chunk: string | Uint8Array) => {
@@ -1198,6 +1218,7 @@ describe('runNonInteractive', () => {
it('should emit tool results as user messages in stream-json format', async () => { it('should emit tool results as user messages in stream-json format', async () => {
(mockConfig.getOutputFormat as Mock).mockReturnValue('stream-json'); (mockConfig.getOutputFormat as Mock).mockReturnValue('stream-json');
(mockConfig.getIncludePartialMessages as Mock).mockReturnValue(false); (mockConfig.getIncludePartialMessages as Mock).mockReturnValue(false);
setupMetricsMock();
const writes: string[] = []; const writes: string[] = [];
processStdoutSpy.mockImplementation((chunk: string | Uint8Array) => { processStdoutSpy.mockImplementation((chunk: string | Uint8Array) => {
@@ -1297,6 +1318,7 @@ describe('runNonInteractive', () => {
it('should emit tool errors in tool_result blocks in stream-json format', async () => { it('should emit tool errors in tool_result blocks in stream-json format', async () => {
(mockConfig.getOutputFormat as Mock).mockReturnValue('stream-json'); (mockConfig.getOutputFormat as Mock).mockReturnValue('stream-json');
(mockConfig.getIncludePartialMessages as Mock).mockReturnValue(false); (mockConfig.getIncludePartialMessages as Mock).mockReturnValue(false);
setupMetricsMock();
const writes: string[] = []; const writes: string[] = [];
processStdoutSpy.mockImplementation((chunk: string | Uint8Array) => { processStdoutSpy.mockImplementation((chunk: string | Uint8Array) => {
@@ -1390,6 +1412,7 @@ describe('runNonInteractive', () => {
it('should emit partial messages when includePartialMessages is true', async () => { it('should emit partial messages when includePartialMessages is true', async () => {
(mockConfig.getOutputFormat as Mock).mockReturnValue('stream-json'); (mockConfig.getOutputFormat as Mock).mockReturnValue('stream-json');
(mockConfig.getIncludePartialMessages as Mock).mockReturnValue(true); (mockConfig.getIncludePartialMessages as Mock).mockReturnValue(true);
setupMetricsMock();
const writes: string[] = []; const writes: string[] = [];
processStdoutSpy.mockImplementation((chunk: string | Uint8Array) => { processStdoutSpy.mockImplementation((chunk: string | Uint8Array) => {
@@ -1446,6 +1469,7 @@ describe('runNonInteractive', () => {
it('should handle thinking blocks in stream-json format', async () => { it('should handle thinking blocks in stream-json format', async () => {
(mockConfig.getOutputFormat as Mock).mockReturnValue('stream-json'); (mockConfig.getOutputFormat as Mock).mockReturnValue('stream-json');
(mockConfig.getIncludePartialMessages as Mock).mockReturnValue(false); (mockConfig.getIncludePartialMessages as Mock).mockReturnValue(false);
setupMetricsMock();
const writes: string[] = []; const writes: string[] = [];
processStdoutSpy.mockImplementation((chunk: string | Uint8Array) => { processStdoutSpy.mockImplementation((chunk: string | Uint8Array) => {
@@ -1503,6 +1527,7 @@ describe('runNonInteractive', () => {
it('should handle multiple tool calls in stream-json format', async () => { it('should handle multiple tool calls in stream-json format', async () => {
(mockConfig.getOutputFormat as Mock).mockReturnValue('stream-json'); (mockConfig.getOutputFormat as Mock).mockReturnValue('stream-json');
(mockConfig.getIncludePartialMessages as Mock).mockReturnValue(false); (mockConfig.getIncludePartialMessages as Mock).mockReturnValue(false);
setupMetricsMock();
const writes: string[] = []; const writes: string[] = [];
processStdoutSpy.mockImplementation((chunk: string | Uint8Array) => { processStdoutSpy.mockImplementation((chunk: string | Uint8Array) => {
@@ -1613,6 +1638,7 @@ describe('runNonInteractive', () => {
it('should handle userMessage with text content blocks in stream-json input mode', async () => { it('should handle userMessage with text content blocks in stream-json input mode', async () => {
(mockConfig.getOutputFormat as Mock).mockReturnValue('stream-json'); (mockConfig.getOutputFormat as Mock).mockReturnValue('stream-json');
(mockConfig.getIncludePartialMessages as Mock).mockReturnValue(false); (mockConfig.getIncludePartialMessages as Mock).mockReturnValue(false);
setupMetricsMock();
const writes: string[] = []; const writes: string[] = [];
processStdoutSpy.mockImplementation((chunk: string | Uint8Array) => { processStdoutSpy.mockImplementation((chunk: string | Uint8Array) => {

View File

@@ -36,10 +36,9 @@ import {
import { import {
normalizePartList, normalizePartList,
extractPartsFromUserMessage, extractPartsFromUserMessage,
extractUsageFromGeminiClient,
calculateApproximateCost,
buildSystemMessage, buildSystemMessage,
createTaskToolProgressHandler, createTaskToolProgressHandler,
computeUsageFromMetrics,
} from './utils/nonInteractiveHelpers.js'; } from './utils/nonInteractiveHelpers.js';
/** /**
@@ -315,8 +314,10 @@ export async function runNonInteractive(
} }
currentMessages = [{ role: 'user', parts: toolResponseParts }]; currentMessages = [{ role: 'user', parts: toolResponseParts }];
} else { } else {
const usage = extractUsageFromGeminiClient(geminiClient); // For JSON and STREAM_JSON modes, compute usage from metrics
if (adapter) { if (adapter) {
const metrics = uiTelemetryService.getMetrics();
const usage = computeUsageFromMetrics(metrics);
// Get stats for JSON format output // Get stats for JSON format output
const stats = const stats =
outputFormat === OutputFormat.JSON outputFormat === OutputFormat.JSON
@@ -328,20 +329,21 @@ export async function runNonInteractive(
apiDurationMs: totalApiDurationMs, apiDurationMs: totalApiDurationMs,
numTurns: turnCount, numTurns: turnCount,
usage, usage,
totalCostUsd: calculateApproximateCost(usage),
stats, stats,
}); });
} else { } else {
// Text output mode // Text output mode - no usage needed
process.stdout.write('\n'); process.stdout.write('\n');
} }
return; return;
} }
} }
} catch (error) { } catch (error) {
const usage = extractUsageFromGeminiClient(config.getGeminiClient()); // For JSON and STREAM_JSON modes, compute usage from metrics
const message = error instanceof Error ? error.message : String(error); const message = error instanceof Error ? error.message : String(error);
if (adapter) { if (adapter) {
const metrics = uiTelemetryService.getMetrics();
const usage = computeUsageFromMetrics(metrics);
// Get stats for JSON format output // Get stats for JSON format output
const stats = const stats =
outputFormat === OutputFormat.JSON outputFormat === OutputFormat.JSON
@@ -354,7 +356,6 @@ export async function runNonInteractive(
numTurns: turnCount, numTurns: turnCount,
errorMessage: message, errorMessage: message,
usage, usage,
totalCostUsd: calculateApproximateCost(usage),
stats, stats,
}); });
} }

File diff suppressed because it is too large Load Diff

View File

@@ -11,19 +11,20 @@ import type {
OutputUpdateHandler, OutputUpdateHandler,
ToolCallRequestInfo, ToolCallRequestInfo,
ToolCallResponseInfo, ToolCallResponseInfo,
SessionMetrics,
} from '@qwen-code/qwen-code-core'; } from '@qwen-code/qwen-code-core';
import { ToolErrorType } from '@qwen-code/qwen-code-core'; import { ToolErrorType } from '@qwen-code/qwen-code-core';
import type { Part, PartListUnion } from '@google/genai'; import type { Part, PartListUnion } from '@google/genai';
import type { import type {
CLIUserMessage, CLIUserMessage,
Usage, Usage,
ExtendedUsage,
PermissionMode, PermissionMode,
CLISystemMessage, CLISystemMessage,
} from '../nonInteractive/types.js'; } from '../nonInteractive/types.js';
import { CommandService } from '../services/CommandService.js'; import { CommandService } from '../services/CommandService.js';
import { BuiltinCommandLoader } from '../services/BuiltinCommandLoader.js'; import { BuiltinCommandLoader } from '../services/BuiltinCommandLoader.js';
import type { JsonOutputAdapterInterface } from '../nonInteractive/io/BaseJsonOutputAdapter.js'; import type { JsonOutputAdapterInterface } from '../nonInteractive/io/BaseJsonOutputAdapter.js';
import { computeSessionStats } from '../ui/utils/computeStats.js';
/** /**
* Normalizes various part list formats into a consistent Part[] array. * Normalizes various part list formats into a consistent Part[] array.
@@ -147,20 +148,38 @@ export function extractUsageFromGeminiClient(
} }
/** /**
* Calculates approximate cost for API usage. * Computes Usage information from SessionMetrics using computeSessionStats.
* Currently returns 0 as a placeholder - cost calculation logic can be added here. * Aggregates token usage across all models in the session.
* *
* @param usage - Usage information from API response * @param metrics - Session metrics from uiTelemetryService
* @returns Approximate cost in USD or undefined if not calculable * @returns Usage object with token counts
*/ */
export function calculateApproximateCost( export function computeUsageFromMetrics(metrics: SessionMetrics): Usage {
usage: Usage | ExtendedUsage | undefined, const stats = computeSessionStats(metrics);
): number | undefined { const { models } = metrics;
if (!usage) {
return undefined; // Sum up output tokens (candidates) and total tokens across all models
const totalOutputTokens = Object.values(models).reduce(
(acc, model) => acc + model.tokens.candidates,
0,
);
const totalTokens = Object.values(models).reduce(
(acc, model) => acc + model.tokens.total,
0,
);
const usage: Usage = {
input_tokens: stats.totalPromptTokens,
output_tokens: totalOutputTokens,
cache_read_input_tokens: stats.totalCachedTokens,
};
// Only include total_tokens if it's greater than 0
if (totalTokens > 0) {
usage.total_tokens = totalTokens;
} }
// TODO: Implement actual cost calculation based on token counts and model pricing
return 0; return usage;
} }
/** /**