From 2acadecaa1407f8b9b6281cc2fdcaec92d34d2ce Mon Sep 17 00:00:00 2001 From: Sandy Tao Date: Sat, 6 Sep 2025 00:01:21 -0700 Subject: [PATCH] Fix(core): Do not retry if last chunk is empty with finishReason previous chunks are good (#7859) --- packages/core/src/core/geminiChat.test.ts | 52 +++++++++++++++++++++++ packages/core/src/core/geminiChat.ts | 16 ++++--- 2 files changed, 62 insertions(+), 6 deletions(-) diff --git a/packages/core/src/core/geminiChat.test.ts b/packages/core/src/core/geminiChat.test.ts index e2ccc305..a69292fb 100644 --- a/packages/core/src/core/geminiChat.test.ts +++ b/packages/core/src/core/geminiChat.test.ts @@ -426,6 +426,58 @@ describe('GeminiChat', () => { })(), ).rejects.toThrow(EmptyStreamError); }); + + it('should succeed if the stream ends with an invalid part but has a finishReason and contained a valid part', async () => { + // 1. Mock a stream that sends a valid chunk, then an invalid one, but has a finish reason. + const streamWithInvalidEnd = (async function* () { + yield { + candidates: [ + { + content: { + role: 'model', + parts: [{ text: 'Initial valid content...' }], + }, + }, + ], + } as unknown as GenerateContentResponse; + // This second chunk is invalid, but the response has a finishReason. + yield { + candidates: [ + { + content: { + role: 'model', + parts: [{ text: '' }], // Invalid part + }, + finishReason: 'STOP', + }, + ], + } as unknown as GenerateContentResponse; + })(); + + vi.mocked(mockModelsModule.generateContentStream).mockResolvedValue( + streamWithInvalidEnd, + ); + + // 2. Action & Assert: The stream should complete without throwing an error. + const stream = await chat.sendMessageStream( + { message: 'test message' }, + 'prompt-id-valid-then-invalid-end', + ); + await expect( + (async () => { + for await (const _ of stream) { + /* consume stream */ + } + })(), + ).resolves.not.toThrow(); + + // 3. Verify history was recorded correctly with only the valid part. + const history = chat.getHistory(); + expect(history.length).toBe(2); // user turn + model turn + const modelTurn = history[1]!; + expect(modelTurn?.parts?.length).toBe(1); + expect(modelTurn?.parts![0]!.text).toBe('Initial valid content...'); + }); it('should not consolidate text into a part that also contains a functionCall', async () => { // 1. Mock the API to stream a malformed part followed by a valid text part. const multiChunkStream = (async function* () { diff --git a/packages/core/src/core/geminiChat.ts b/packages/core/src/core/geminiChat.ts index 4c62887e..5a4f578e 100644 --- a/packages/core/src/core/geminiChat.ts +++ b/packages/core/src/core/geminiChat.ts @@ -579,6 +579,7 @@ export class GeminiChat { ): AsyncGenerator { const modelResponseParts: Part[] = []; let hasReceivedAnyChunk = false; + let hasReceivedValidChunk = false; let hasToolCall = false; let lastChunk: GenerateContentResponse | null = null; let lastChunkIsInvalid = false; @@ -588,6 +589,7 @@ export class GeminiChat { lastChunk = chunk; if (isValidResponse(chunk)) { + hasReceivedValidChunk = true; lastChunkIsInvalid = false; const content = chunk.candidates?.[0]?.content; if (content?.parts) { @@ -614,15 +616,17 @@ export class GeminiChat { (candidate) => candidate.finishReason, ); - // --- FIX: The entire validation block was restructured for clarity and correctness --- // Stream validation logic: A stream is considered successful if: // 1. There's a tool call (tool calls can end without explicit finish reasons), OR - // 2. Both conditions are met: last chunk is valid AND any candidate has a finish reason + // 2. There's a finish reason AND the last chunk is valid (or we haven't received any valid chunks) // - // We throw an error only when there's no tool call AND either: - // - The last chunk is invalid, OR - // - No candidate in the last chunk has a finish reason - if (!hasToolCall && (lastChunkIsInvalid || !hasFinishReason)) { + // We throw an error only when there's no tool call AND: + // - No finish reason, OR + // - Last chunk is invalid after receiving valid content + if ( + !hasToolCall && + (!hasFinishReason || (lastChunkIsInvalid && !hasReceivedValidChunk)) + ) { throw new EmptyStreamError( 'Model stream ended with an invalid chunk or missing finish reason.', );