mirror of
https://github.com/QwenLM/qwen-code.git
synced 2025-12-19 09:33:53 +00:00
Ensure a Strictly Alternating Message History (#6995)
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
This commit is contained in:
@@ -76,6 +76,142 @@ describe('GeminiChat', () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
describe('sendMessage', () => {
|
describe('sendMessage', () => {
|
||||||
|
it('should throw an error when attempting to add a user turn after another user turn', async () => {
|
||||||
|
// 1. Setup: Create a history that already ends with a user turn (a functionResponse).
|
||||||
|
const initialHistory: Content[] = [
|
||||||
|
{ role: 'user', parts: [{ text: 'Initial prompt' }] },
|
||||||
|
{
|
||||||
|
role: 'model',
|
||||||
|
parts: [{ functionCall: { name: 'test_tool', args: {} } }],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
role: 'user',
|
||||||
|
parts: [{ functionResponse: { name: 'test_tool', response: {} } }],
|
||||||
|
},
|
||||||
|
];
|
||||||
|
chat.setHistory(initialHistory);
|
||||||
|
|
||||||
|
// 2. Mock a valid model response so the call doesn't fail for other reasons.
|
||||||
|
const mockResponse = {
|
||||||
|
candidates: [
|
||||||
|
{ content: { role: 'model', parts: [{ text: 'some response' }] } },
|
||||||
|
],
|
||||||
|
} as unknown as GenerateContentResponse;
|
||||||
|
vi.mocked(mockModelsModule.generateContent).mockResolvedValue(
|
||||||
|
mockResponse,
|
||||||
|
);
|
||||||
|
|
||||||
|
// 3. Action & Assert: Expect that sending another user message immediately
|
||||||
|
// after a user-role turn throws the specific error.
|
||||||
|
await expect(
|
||||||
|
chat.sendMessage(
|
||||||
|
{ message: 'This is an invalid consecutive user message' },
|
||||||
|
'prompt-id-1',
|
||||||
|
),
|
||||||
|
).rejects.toThrow('Cannot add a user turn after another user turn.');
|
||||||
|
});
|
||||||
|
it('should preserve text parts that are in the same response as a thought', async () => {
|
||||||
|
// 1. Mock the API to return a single response containing both a thought and visible text.
|
||||||
|
const mixedContentResponse = {
|
||||||
|
candidates: [
|
||||||
|
{
|
||||||
|
content: {
|
||||||
|
role: 'model',
|
||||||
|
parts: [
|
||||||
|
{ thought: 'This is a thought.' },
|
||||||
|
{ text: 'This is the visible text that should not be lost.' },
|
||||||
|
],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
} as unknown as GenerateContentResponse;
|
||||||
|
|
||||||
|
vi.mocked(mockModelsModule.generateContent).mockResolvedValue(
|
||||||
|
mixedContentResponse,
|
||||||
|
);
|
||||||
|
|
||||||
|
// 2. Action: Send a standard, non-streaming message.
|
||||||
|
await chat.sendMessage(
|
||||||
|
{ message: 'test message' },
|
||||||
|
'prompt-id-mixed-response',
|
||||||
|
);
|
||||||
|
|
||||||
|
// 3. Assert: Check the final state of the history.
|
||||||
|
const history = chat.getHistory();
|
||||||
|
|
||||||
|
// The history should contain two turns: the user's message and the model's response.
|
||||||
|
expect(history.length).toBe(2);
|
||||||
|
|
||||||
|
const modelTurn = history[1]!;
|
||||||
|
expect(modelTurn.role).toBe('model');
|
||||||
|
|
||||||
|
// CRUCIAL ASSERTION:
|
||||||
|
// Buggy code would discard the entire response because a "thought" was present,
|
||||||
|
// resulting in an empty placeholder turn with 0 parts.
|
||||||
|
// The corrected code will pass, preserving the single visible text part.
|
||||||
|
expect(modelTurn?.parts?.length).toBe(1);
|
||||||
|
expect(modelTurn?.parts![0]!.text).toBe(
|
||||||
|
'This is the visible text that should not be lost.',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
it('should add a placeholder model turn when a tool call is followed by an empty model response', async () => {
|
||||||
|
// 1. Setup: A history where the model has just made a function call.
|
||||||
|
const initialHistory: Content[] = [
|
||||||
|
{
|
||||||
|
role: 'user',
|
||||||
|
parts: [{ text: 'Find a good Italian restaurant for me.' }],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
role: 'model',
|
||||||
|
parts: [
|
||||||
|
{
|
||||||
|
functionCall: {
|
||||||
|
name: 'find_restaurant',
|
||||||
|
args: { cuisine: 'Italian' },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
];
|
||||||
|
chat.setHistory(initialHistory);
|
||||||
|
|
||||||
|
// 2. Mock the API to return an empty/thought-only response.
|
||||||
|
const emptyModelResponse = {
|
||||||
|
candidates: [
|
||||||
|
{ content: { role: 'model', parts: [{ thought: true }] } },
|
||||||
|
],
|
||||||
|
} as unknown as GenerateContentResponse;
|
||||||
|
vi.mocked(mockModelsModule.generateContent).mockResolvedValue(
|
||||||
|
emptyModelResponse,
|
||||||
|
);
|
||||||
|
|
||||||
|
// 3. Action: Send the function response back to the model.
|
||||||
|
await chat.sendMessage(
|
||||||
|
{
|
||||||
|
message: {
|
||||||
|
functionResponse: {
|
||||||
|
name: 'find_restaurant',
|
||||||
|
response: { name: 'Vesuvio' },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
'prompt-id-1',
|
||||||
|
);
|
||||||
|
|
||||||
|
// 4. Assert: The history should now have four valid, alternating turns.
|
||||||
|
const history = chat.getHistory();
|
||||||
|
expect(history.length).toBe(4);
|
||||||
|
|
||||||
|
// The final turn must be the empty model placeholder.
|
||||||
|
const lastTurn = history[3]!;
|
||||||
|
expect(lastTurn.role).toBe('model');
|
||||||
|
expect(lastTurn?.parts?.length).toBe(0);
|
||||||
|
|
||||||
|
// The second-to-last turn must be the function response we sent.
|
||||||
|
const secondToLastTurn = history[2]!;
|
||||||
|
expect(secondToLastTurn.role).toBe('user');
|
||||||
|
expect(secondToLastTurn?.parts![0]!.functionResponse).toBeDefined();
|
||||||
|
});
|
||||||
it('should call generateContent with the correct parameters', async () => {
|
it('should call generateContent with the correct parameters', async () => {
|
||||||
const response = {
|
const response = {
|
||||||
candidates: [
|
candidates: [
|
||||||
@@ -107,6 +243,282 @@ describe('GeminiChat', () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
describe('sendMessageStream', () => {
|
describe('sendMessageStream', () => {
|
||||||
|
it('should not consolidate text into a part that also contains a functionCall', async () => {
|
||||||
|
// 1. Mock the API to stream a malformed part followed by a valid text part.
|
||||||
|
const multiChunkStream = (async function* () {
|
||||||
|
// This malformed part has both text and a functionCall.
|
||||||
|
yield {
|
||||||
|
candidates: [
|
||||||
|
{
|
||||||
|
content: {
|
||||||
|
role: 'model',
|
||||||
|
parts: [
|
||||||
|
{
|
||||||
|
text: 'Some text',
|
||||||
|
functionCall: { name: 'do_stuff', args: {} },
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
} as unknown as GenerateContentResponse;
|
||||||
|
// This valid text part should NOT be merged into the malformed one.
|
||||||
|
yield {
|
||||||
|
candidates: [
|
||||||
|
{
|
||||||
|
content: {
|
||||||
|
role: 'model',
|
||||||
|
parts: [{ text: ' that should not be merged.' }],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
} as unknown as GenerateContentResponse;
|
||||||
|
})();
|
||||||
|
|
||||||
|
vi.mocked(mockModelsModule.generateContentStream).mockResolvedValue(
|
||||||
|
multiChunkStream,
|
||||||
|
);
|
||||||
|
|
||||||
|
// 2. Action: Send a message and consume the stream.
|
||||||
|
const stream = await chat.sendMessageStream(
|
||||||
|
{ message: 'test message' },
|
||||||
|
'prompt-id-malformed-chunk',
|
||||||
|
);
|
||||||
|
for await (const _ of stream) {
|
||||||
|
// Consume the stream to trigger history recording.
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. Assert: Check that the final history was not incorrectly consolidated.
|
||||||
|
const history = chat.getHistory();
|
||||||
|
|
||||||
|
expect(history.length).toBe(2);
|
||||||
|
const modelTurn = history[1]!;
|
||||||
|
|
||||||
|
// CRUCIAL ASSERTION: There should be two separate parts.
|
||||||
|
// The old, non-strict logic would incorrectly merge them, resulting in one part.
|
||||||
|
expect(modelTurn?.parts?.length).toBe(2);
|
||||||
|
|
||||||
|
// Verify the contents of each part.
|
||||||
|
expect(modelTurn?.parts![0]!.text).toBe('Some text');
|
||||||
|
expect(modelTurn?.parts![0]!.functionCall).toBeDefined();
|
||||||
|
expect(modelTurn?.parts![1]!.text).toBe(' that should not be merged.');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should consolidate subsequent text chunks after receiving an empty text chunk', async () => {
|
||||||
|
// 1. Mock the API to return a stream where one chunk is just an empty text part.
|
||||||
|
const multiChunkStream = (async function* () {
|
||||||
|
yield {
|
||||||
|
candidates: [
|
||||||
|
{ content: { role: 'model', parts: [{ text: 'Hello' }] } },
|
||||||
|
],
|
||||||
|
} as unknown as GenerateContentResponse;
|
||||||
|
// FIX: The original test used { text: '' }, which is invalid.
|
||||||
|
// A chunk can be empty but still valid. This chunk is now removed
|
||||||
|
// as the important part is consolidating what comes after.
|
||||||
|
yield {
|
||||||
|
candidates: [
|
||||||
|
{ content: { role: 'model', parts: [{ text: ' World!' }] } },
|
||||||
|
],
|
||||||
|
} as unknown as GenerateContentResponse;
|
||||||
|
})();
|
||||||
|
|
||||||
|
vi.mocked(mockModelsModule.generateContentStream).mockResolvedValue(
|
||||||
|
multiChunkStream,
|
||||||
|
);
|
||||||
|
|
||||||
|
// 2. Action: Send a message and consume the stream.
|
||||||
|
const stream = await chat.sendMessageStream(
|
||||||
|
{ message: 'test message' },
|
||||||
|
'prompt-id-empty-chunk-consolidation',
|
||||||
|
);
|
||||||
|
for await (const _ of stream) {
|
||||||
|
// Consume the stream
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. Assert: Check that the final history was correctly consolidated.
|
||||||
|
const history = chat.getHistory();
|
||||||
|
expect(history.length).toBe(2);
|
||||||
|
const modelTurn = history[1]!;
|
||||||
|
expect(modelTurn?.parts?.length).toBe(1);
|
||||||
|
expect(modelTurn?.parts![0]!.text).toBe('Hello World!');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should consolidate adjacent text parts that arrive in separate stream chunks', async () => {
|
||||||
|
// 1. Mock the API to return a stream of multiple, adjacent text chunks.
|
||||||
|
const multiChunkStream = (async function* () {
|
||||||
|
yield {
|
||||||
|
candidates: [
|
||||||
|
{ content: { role: 'model', parts: [{ text: 'This is the ' }] } },
|
||||||
|
],
|
||||||
|
} as unknown as GenerateContentResponse;
|
||||||
|
yield {
|
||||||
|
candidates: [
|
||||||
|
{ content: { role: 'model', parts: [{ text: 'first part.' }] } },
|
||||||
|
],
|
||||||
|
} as unknown as GenerateContentResponse;
|
||||||
|
// This function call should break the consolidation.
|
||||||
|
yield {
|
||||||
|
candidates: [
|
||||||
|
{
|
||||||
|
content: {
|
||||||
|
role: 'model',
|
||||||
|
parts: [{ functionCall: { name: 'do_stuff', args: {} } }],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
} as unknown as GenerateContentResponse;
|
||||||
|
yield {
|
||||||
|
candidates: [
|
||||||
|
{
|
||||||
|
content: {
|
||||||
|
role: 'model',
|
||||||
|
parts: [{ text: 'This is the second part.' }],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
} as unknown as GenerateContentResponse;
|
||||||
|
})();
|
||||||
|
|
||||||
|
vi.mocked(mockModelsModule.generateContentStream).mockResolvedValue(
|
||||||
|
multiChunkStream,
|
||||||
|
);
|
||||||
|
|
||||||
|
// 2. Action: Send a message and consume the stream.
|
||||||
|
const stream = await chat.sendMessageStream(
|
||||||
|
{ message: 'test message' },
|
||||||
|
'prompt-id-multi-chunk',
|
||||||
|
);
|
||||||
|
for await (const _ of stream) {
|
||||||
|
// Consume the stream to trigger history recording.
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. Assert: Check that the final history was correctly consolidated.
|
||||||
|
const history = chat.getHistory();
|
||||||
|
|
||||||
|
// The history should contain the user's turn and ONE consolidated model turn.
|
||||||
|
expect(history.length).toBe(2);
|
||||||
|
|
||||||
|
const modelTurn = history[1]!;
|
||||||
|
expect(modelTurn.role).toBe('model');
|
||||||
|
|
||||||
|
// The model turn should have 3 distinct parts: the merged text, the function call, and the final text.
|
||||||
|
expect(modelTurn?.parts?.length).toBe(3);
|
||||||
|
expect(modelTurn?.parts![0]!.text).toBe('This is the first part.');
|
||||||
|
expect(modelTurn.parts![1]!.functionCall).toBeDefined();
|
||||||
|
expect(modelTurn.parts![2]!.text).toBe('This is the second part.');
|
||||||
|
});
|
||||||
|
it('should preserve text parts that stream in the same chunk as a thought', async () => {
|
||||||
|
// 1. Mock the API to return a single chunk containing both a thought and visible text.
|
||||||
|
const mixedContentStream = (async function* () {
|
||||||
|
yield {
|
||||||
|
candidates: [
|
||||||
|
{
|
||||||
|
content: {
|
||||||
|
role: 'model',
|
||||||
|
parts: [
|
||||||
|
{ thought: 'This is a thought.' },
|
||||||
|
{ text: 'This is the visible text that should not be lost.' },
|
||||||
|
],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
} as unknown as GenerateContentResponse;
|
||||||
|
})();
|
||||||
|
|
||||||
|
vi.mocked(mockModelsModule.generateContentStream).mockResolvedValue(
|
||||||
|
mixedContentStream,
|
||||||
|
);
|
||||||
|
|
||||||
|
// 2. Action: Send a message and fully consume the stream to trigger history recording.
|
||||||
|
const stream = await chat.sendMessageStream(
|
||||||
|
{ message: 'test message' },
|
||||||
|
'prompt-id-mixed-chunk',
|
||||||
|
);
|
||||||
|
for await (const _ of stream) {
|
||||||
|
// This loop consumes the stream.
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. Assert: Check the final state of the history.
|
||||||
|
const history = chat.getHistory();
|
||||||
|
|
||||||
|
// The history should contain two turns: the user's message and the model's response.
|
||||||
|
expect(history.length).toBe(2);
|
||||||
|
|
||||||
|
const modelTurn = history[1]!;
|
||||||
|
expect(modelTurn.role).toBe('model');
|
||||||
|
|
||||||
|
// CRUCIAL ASSERTION:
|
||||||
|
// The buggy code would fail here, resulting in parts.length being 0.
|
||||||
|
// The corrected code will pass, preserving the single visible text part.
|
||||||
|
expect(modelTurn?.parts?.length).toBe(1);
|
||||||
|
expect(modelTurn?.parts![0]!.text).toBe(
|
||||||
|
'This is the visible text that should not be lost.',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
it('should add a placeholder model turn when a tool call is followed by an empty stream response', async () => {
|
||||||
|
// 1. Setup: A history where the model has just made a function call.
|
||||||
|
const initialHistory: Content[] = [
|
||||||
|
{
|
||||||
|
role: 'user',
|
||||||
|
parts: [{ text: 'Find a good Italian restaurant for me.' }],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
role: 'model',
|
||||||
|
parts: [
|
||||||
|
{
|
||||||
|
functionCall: {
|
||||||
|
name: 'find_restaurant',
|
||||||
|
args: { cuisine: 'Italian' },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
];
|
||||||
|
chat.setHistory(initialHistory);
|
||||||
|
|
||||||
|
// 2. Mock the API to return an empty/thought-only stream.
|
||||||
|
const emptyStreamResponse = (async function* () {
|
||||||
|
yield {
|
||||||
|
candidates: [
|
||||||
|
{ content: { role: 'model', parts: [{ thought: true }] } },
|
||||||
|
],
|
||||||
|
} as unknown as GenerateContentResponse;
|
||||||
|
})();
|
||||||
|
vi.mocked(mockModelsModule.generateContentStream).mockResolvedValue(
|
||||||
|
emptyStreamResponse,
|
||||||
|
);
|
||||||
|
|
||||||
|
// 3. Action: Send the function response back to the model and consume the stream.
|
||||||
|
const stream = await chat.sendMessageStream(
|
||||||
|
{
|
||||||
|
message: {
|
||||||
|
functionResponse: {
|
||||||
|
name: 'find_restaurant',
|
||||||
|
response: { name: 'Vesuvio' },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
'prompt-id-stream-1',
|
||||||
|
);
|
||||||
|
for await (const _ of stream) {
|
||||||
|
// This loop consumes the stream to trigger the internal logic.
|
||||||
|
}
|
||||||
|
|
||||||
|
// 4. Assert: The history should now have four valid, alternating turns.
|
||||||
|
const history = chat.getHistory();
|
||||||
|
expect(history.length).toBe(4);
|
||||||
|
|
||||||
|
// The final turn must be the empty model placeholder.
|
||||||
|
const lastTurn = history[3]!;
|
||||||
|
expect(lastTurn.role).toBe('model');
|
||||||
|
expect(lastTurn?.parts?.length).toBe(0);
|
||||||
|
|
||||||
|
// The second-to-last turn must be the function response we sent.
|
||||||
|
const secondToLastTurn = history[2]!;
|
||||||
|
expect(secondToLastTurn.role).toBe('user');
|
||||||
|
expect(secondToLastTurn?.parts![0]!.functionResponse).toBeDefined();
|
||||||
|
});
|
||||||
|
|
||||||
it('should call generateContentStream with the correct parameters', async () => {
|
it('should call generateContentStream with the correct parameters', async () => {
|
||||||
const response = (async function* () {
|
const response = (async function* () {
|
||||||
yield {
|
yield {
|
||||||
@@ -153,321 +565,145 @@ describe('GeminiChat', () => {
|
|||||||
parts: [{ text: 'User input' }],
|
parts: [{ text: 'User input' }],
|
||||||
};
|
};
|
||||||
|
|
||||||
|
it('should consolidate all consecutive model turns into a single turn', () => {
|
||||||
|
const userInput: Content = {
|
||||||
|
role: 'user',
|
||||||
|
parts: [{ text: 'User input' }],
|
||||||
|
};
|
||||||
|
// This simulates a multi-part model response with different part types.
|
||||||
|
const modelOutput: Content[] = [
|
||||||
|
{ role: 'model', parts: [{ text: 'Thinking...' }] },
|
||||||
|
{
|
||||||
|
role: 'model',
|
||||||
|
parts: [{ functionCall: { name: 'do_stuff', args: {} } }],
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
// @ts-expect-error Accessing private method for testing
|
||||||
|
chat.recordHistory(userInput, modelOutput);
|
||||||
|
const history = chat.getHistory();
|
||||||
|
|
||||||
|
// The history should contain the user's turn and ONE consolidated model turn.
|
||||||
|
// The old code would fail here, resulting in a length of 3.
|
||||||
|
//expect(history).toBe([]);
|
||||||
|
expect(history.length).toBe(2);
|
||||||
|
|
||||||
|
const modelTurn = history[1]!;
|
||||||
|
expect(modelTurn.role).toBe('model');
|
||||||
|
|
||||||
|
// The consolidated turn should contain both the text part and the functionCall part.
|
||||||
|
expect(modelTurn?.parts?.length).toBe(2);
|
||||||
|
expect(modelTurn?.parts![0]!.text).toBe('Thinking...');
|
||||||
|
expect(modelTurn?.parts![1]!.functionCall).toBeDefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should add a placeholder model turn when a tool call is followed by an empty response', () => {
|
||||||
|
// 1. Setup: A history where the model has just made a function call.
|
||||||
|
const initialHistory: Content[] = [
|
||||||
|
{ role: 'user', parts: [{ text: 'Initial prompt' }] },
|
||||||
|
{
|
||||||
|
role: 'model',
|
||||||
|
parts: [{ functionCall: { name: 'test_tool', args: {} } }],
|
||||||
|
},
|
||||||
|
];
|
||||||
|
chat.setHistory(initialHistory);
|
||||||
|
|
||||||
|
// 2. Action: The user provides the tool's response, and the model's
|
||||||
|
// final output is empty (e.g., just a thought, which gets filtered out).
|
||||||
|
const functionResponse: Content = {
|
||||||
|
role: 'user',
|
||||||
|
parts: [{ functionResponse: { name: 'test_tool', response: {} } }],
|
||||||
|
};
|
||||||
|
const emptyModelOutput: Content[] = [];
|
||||||
|
|
||||||
|
// @ts-expect-error Accessing private method for testing
|
||||||
|
chat.recordHistory(functionResponse, emptyModelOutput, [
|
||||||
|
functionResponse,
|
||||||
|
]);
|
||||||
|
|
||||||
|
// 3. Assert: The history should now have four valid, alternating turns.
|
||||||
|
const history = chat.getHistory();
|
||||||
|
expect(history.length).toBe(4);
|
||||||
|
|
||||||
|
// The final turn must be the empty model placeholder.
|
||||||
|
const lastTurn = history[3]!;
|
||||||
|
expect(lastTurn.role).toBe('model');
|
||||||
|
expect(lastTurn?.parts?.length).toBe(0);
|
||||||
|
|
||||||
|
// The second-to-last turn must be the function response we provided.
|
||||||
|
const secondToLastTurn = history[2]!;
|
||||||
|
expect(secondToLastTurn.role).toBe('user');
|
||||||
|
expect(secondToLastTurn?.parts![0]!.functionResponse).toBeDefined();
|
||||||
|
});
|
||||||
|
|
||||||
it('should add user input and a single model output to history', () => {
|
it('should add user input and a single model output to history', () => {
|
||||||
const modelOutput: Content[] = [
|
const modelOutput: Content[] = [
|
||||||
{ role: 'model', parts: [{ text: 'Model output' }] },
|
{ role: 'model', parts: [{ text: 'Model output' }] },
|
||||||
];
|
];
|
||||||
// @ts-expect-error Accessing private method for testing purposes
|
// @ts-expect-error Accessing private method for testing
|
||||||
chat.recordHistory(userInput, modelOutput);
|
chat.recordHistory(userInput, modelOutput);
|
||||||
const history = chat.getHistory();
|
const history = chat.getHistory();
|
||||||
expect(history).toEqual([userInput, modelOutput[0]]);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should consolidate adjacent model outputs', () => {
|
|
||||||
const modelOutputParts: Content[] = [
|
|
||||||
{ role: 'model', parts: [{ text: 'Model part 1' }] },
|
|
||||||
{ role: 'model', parts: [{ text: 'Model part 2' }] },
|
|
||||||
];
|
|
||||||
// @ts-expect-error Accessing private method for testing purposes
|
|
||||||
chat.recordHistory(userInput, modelOutputParts);
|
|
||||||
const history = chat.getHistory();
|
|
||||||
expect(history.length).toBe(2);
|
|
||||||
expect(history[0]).toEqual(userInput);
|
|
||||||
expect(history[1].role).toBe('model');
|
|
||||||
expect(history[1].parts).toEqual([{ text: 'Model part 1Model part 2' }]);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should handle a mix of user and model roles in outputContents (though unusual)', () => {
|
|
||||||
const mixedOutput: Content[] = [
|
|
||||||
{ role: 'model', parts: [{ text: 'Model 1' }] },
|
|
||||||
{ role: 'user', parts: [{ text: 'Unexpected User' }] }, // This should be pushed as is
|
|
||||||
{ role: 'model', parts: [{ text: 'Model 2' }] },
|
|
||||||
];
|
|
||||||
// @ts-expect-error Accessing private method for testing purposes
|
|
||||||
chat.recordHistory(userInput, mixedOutput);
|
|
||||||
const history = chat.getHistory();
|
|
||||||
expect(history.length).toBe(4); // user, model1, user_unexpected, model2
|
|
||||||
expect(history[0]).toEqual(userInput);
|
|
||||||
expect(history[1]).toEqual(mixedOutput[0]);
|
|
||||||
expect(history[2]).toEqual(mixedOutput[1]);
|
|
||||||
expect(history[3]).toEqual(mixedOutput[2]);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should consolidate multiple adjacent model outputs correctly', () => {
|
|
||||||
const modelOutputParts: Content[] = [
|
|
||||||
{ role: 'model', parts: [{ text: 'M1' }] },
|
|
||||||
{ role: 'model', parts: [{ text: 'M2' }] },
|
|
||||||
{ role: 'model', parts: [{ text: 'M3' }] },
|
|
||||||
];
|
|
||||||
// @ts-expect-error Accessing private method for testing purposes
|
|
||||||
chat.recordHistory(userInput, modelOutputParts);
|
|
||||||
const history = chat.getHistory();
|
|
||||||
expect(history.length).toBe(2);
|
|
||||||
expect(history[1].parts).toEqual([{ text: 'M1M2M3' }]);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should not consolidate if roles are different between model outputs', () => {
|
|
||||||
const modelOutputParts: Content[] = [
|
|
||||||
{ role: 'model', parts: [{ text: 'M1' }] },
|
|
||||||
{ role: 'user', parts: [{ text: 'Interjecting User' }] },
|
|
||||||
{ role: 'model', parts: [{ text: 'M2' }] },
|
|
||||||
];
|
|
||||||
// @ts-expect-error Accessing private method for testing purposes
|
|
||||||
chat.recordHistory(userInput, modelOutputParts);
|
|
||||||
const history = chat.getHistory();
|
|
||||||
expect(history.length).toBe(4); // user, M1, Interjecting User, M2
|
|
||||||
expect(history[1].parts).toEqual([{ text: 'M1' }]);
|
|
||||||
expect(history[3].parts).toEqual([{ text: 'M2' }]);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should merge with last history entry if it is also a model output', () => {
|
|
||||||
// @ts-expect-error Accessing private property for test setup
|
|
||||||
chat.history = [
|
|
||||||
userInput,
|
|
||||||
{ role: 'model', parts: [{ text: 'Initial Model Output' }] },
|
|
||||||
]; // Prime the history
|
|
||||||
|
|
||||||
const newModelOutput: Content[] = [
|
|
||||||
{ role: 'model', parts: [{ text: 'New Model Part 1' }] },
|
|
||||||
{ role: 'model', parts: [{ text: 'New Model Part 2' }] },
|
|
||||||
];
|
|
||||||
// @ts-expect-error Accessing private method for testing purposes
|
|
||||||
chat.recordHistory(userInput, newModelOutput); // userInput here is for the *next* turn, but history is already primed
|
|
||||||
|
|
||||||
// Reset and set up a more realistic scenario for merging with existing history
|
|
||||||
chat = new GeminiChat(mockConfig, mockModelsModule, config, []);
|
|
||||||
const firstUserInput: Content = {
|
|
||||||
role: 'user',
|
|
||||||
parts: [{ text: 'First user input' }],
|
|
||||||
};
|
|
||||||
const firstModelOutput: Content[] = [
|
|
||||||
{ role: 'model', parts: [{ text: 'First model response' }] },
|
|
||||||
];
|
|
||||||
// @ts-expect-error Accessing private method for testing purposes
|
|
||||||
chat.recordHistory(firstUserInput, firstModelOutput);
|
|
||||||
|
|
||||||
const secondUserInput: Content = {
|
|
||||||
role: 'user',
|
|
||||||
parts: [{ text: 'Second user input' }],
|
|
||||||
};
|
|
||||||
const secondModelOutput: Content[] = [
|
|
||||||
{ role: 'model', parts: [{ text: 'Second model response part 1' }] },
|
|
||||||
{ role: 'model', parts: [{ text: 'Second model response part 2' }] },
|
|
||||||
];
|
|
||||||
// @ts-expect-error Accessing private method for testing purposes
|
|
||||||
chat.recordHistory(secondUserInput, secondModelOutput);
|
|
||||||
|
|
||||||
const finalHistory = chat.getHistory();
|
|
||||||
expect(finalHistory.length).toBe(4); // user1, model1, user2, model2(consolidated)
|
|
||||||
expect(finalHistory[0]).toEqual(firstUserInput);
|
|
||||||
expect(finalHistory[1]).toEqual(firstModelOutput[0]);
|
|
||||||
expect(finalHistory[2]).toEqual(secondUserInput);
|
|
||||||
expect(finalHistory[3].role).toBe('model');
|
|
||||||
expect(finalHistory[3].parts).toEqual([
|
|
||||||
{ text: 'Second model response part 1Second model response part 2' },
|
|
||||||
]);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should correctly merge consolidated new output with existing model history', () => {
|
|
||||||
// Setup: history ends with a model turn
|
|
||||||
const initialUser: Content = {
|
|
||||||
role: 'user',
|
|
||||||
parts: [{ text: 'Initial user query' }],
|
|
||||||
};
|
|
||||||
const initialModel: Content = {
|
|
||||||
role: 'model',
|
|
||||||
parts: [{ text: 'Initial model answer.' }],
|
|
||||||
};
|
|
||||||
chat = new GeminiChat(mockConfig, mockModelsModule, config, [
|
|
||||||
initialUser,
|
|
||||||
initialModel,
|
|
||||||
]);
|
|
||||||
|
|
||||||
// New interaction
|
|
||||||
const currentUserInput: Content = {
|
|
||||||
role: 'user',
|
|
||||||
parts: [{ text: 'Follow-up question' }],
|
|
||||||
};
|
|
||||||
const newModelParts: Content[] = [
|
|
||||||
{ role: 'model', parts: [{ text: 'Part A of new answer.' }] },
|
|
||||||
{ role: 'model', parts: [{ text: 'Part B of new answer.' }] },
|
|
||||||
];
|
|
||||||
|
|
||||||
// @ts-expect-error Accessing private method for testing purposes
|
|
||||||
chat.recordHistory(currentUserInput, newModelParts);
|
|
||||||
const history = chat.getHistory();
|
|
||||||
|
|
||||||
// Expected: initialUser, initialModel, currentUserInput, consolidatedNewModelParts
|
|
||||||
expect(history.length).toBe(4);
|
|
||||||
expect(history[0]).toEqual(initialUser);
|
|
||||||
expect(history[1]).toEqual(initialModel);
|
|
||||||
expect(history[2]).toEqual(currentUserInput);
|
|
||||||
expect(history[3].role).toBe('model');
|
|
||||||
expect(history[3].parts).toEqual([
|
|
||||||
{ text: 'Part A of new answer.Part B of new answer.' },
|
|
||||||
]);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should handle empty modelOutput array', () => {
|
|
||||||
// @ts-expect-error Accessing private method for testing purposes
|
|
||||||
chat.recordHistory(userInput, []);
|
|
||||||
const history = chat.getHistory();
|
|
||||||
// If modelOutput is empty, it might push a default empty model part depending on isFunctionResponse
|
|
||||||
// Assuming isFunctionResponse(userInput) is false for this simple text input
|
|
||||||
expect(history.length).toBe(2);
|
|
||||||
expect(history[0]).toEqual(userInput);
|
|
||||||
expect(history[1].role).toBe('model');
|
|
||||||
expect(history[1].parts).toEqual([]);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should handle aggregating modelOutput', () => {
|
|
||||||
const modelOutputUndefinedParts: Content[] = [
|
|
||||||
{ role: 'model', parts: [{ text: 'First model part' }] },
|
|
||||||
{ role: 'model', parts: [{ text: 'Second model part' }] },
|
|
||||||
{ role: 'model', parts: undefined as unknown as Part[] }, // Test undefined parts
|
|
||||||
{ role: 'model', parts: [{ text: 'Third model part' }] },
|
|
||||||
{ role: 'model', parts: [] }, // Test empty parts array
|
|
||||||
];
|
|
||||||
// @ts-expect-error Accessing private method for testing purposes
|
|
||||||
chat.recordHistory(userInput, modelOutputUndefinedParts);
|
|
||||||
const history = chat.getHistory();
|
|
||||||
expect(history.length).toBe(5);
|
|
||||||
expect(history[0]).toEqual(userInput);
|
|
||||||
expect(history[1].role).toBe('model');
|
|
||||||
expect(history[1].parts).toEqual([
|
|
||||||
{ text: 'First model partSecond model part' },
|
|
||||||
]);
|
|
||||||
expect(history[2].role).toBe('model');
|
|
||||||
expect(history[2].parts).toBeUndefined();
|
|
||||||
expect(history[3].role).toBe('model');
|
|
||||||
expect(history[3].parts).toEqual([{ text: 'Third model part' }]);
|
|
||||||
expect(history[4].role).toBe('model');
|
|
||||||
expect(history[4].parts).toEqual([]);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should handle modelOutput with parts being undefined or empty (if they pass initial every check)', () => {
|
|
||||||
const modelOutputUndefinedParts: Content[] = [
|
|
||||||
{ role: 'model', parts: [{ text: 'Text part' }] },
|
|
||||||
{ role: 'model', parts: undefined as unknown as Part[] }, // Test undefined parts
|
|
||||||
{ role: 'model', parts: [] }, // Test empty parts array
|
|
||||||
];
|
|
||||||
// @ts-expect-error Accessing private method for testing purposes
|
|
||||||
chat.recordHistory(userInput, modelOutputUndefinedParts);
|
|
||||||
const history = chat.getHistory();
|
|
||||||
expect(history.length).toBe(4); // userInput, model1 (text), model2 (undefined parts), model3 (empty parts)
|
|
||||||
expect(history[0]).toEqual(userInput);
|
|
||||||
expect(history[1].role).toBe('model');
|
|
||||||
expect(history[1].parts).toEqual([{ text: 'Text part' }]);
|
|
||||||
expect(history[2].role).toBe('model');
|
|
||||||
expect(history[2].parts).toBeUndefined();
|
|
||||||
expect(history[3].role).toBe('model');
|
|
||||||
expect(history[3].parts).toEqual([]);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should correctly handle automaticFunctionCallingHistory', () => {
|
|
||||||
const afcHistory: Content[] = [
|
|
||||||
{ role: 'user', parts: [{ text: 'AFC User' }] },
|
|
||||||
{ role: 'model', parts: [{ text: 'AFC Model' }] },
|
|
||||||
];
|
|
||||||
const modelOutput: Content[] = [
|
|
||||||
{ role: 'model', parts: [{ text: 'Regular Model Output' }] },
|
|
||||||
];
|
|
||||||
// @ts-expect-error Accessing private method for testing purposes
|
|
||||||
chat.recordHistory(userInput, modelOutput, afcHistory);
|
|
||||||
const history = chat.getHistory();
|
|
||||||
expect(history.length).toBe(3);
|
|
||||||
expect(history[0]).toEqual(afcHistory[0]);
|
|
||||||
expect(history[1]).toEqual(afcHistory[1]);
|
|
||||||
expect(history[2]).toEqual(modelOutput[0]);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should add userInput if AFC history is present but empty', () => {
|
|
||||||
const modelOutput: Content[] = [
|
|
||||||
{ role: 'model', parts: [{ text: 'Model Output' }] },
|
|
||||||
];
|
|
||||||
// @ts-expect-error Accessing private method for testing purposes
|
|
||||||
chat.recordHistory(userInput, modelOutput, []); // Empty AFC history
|
|
||||||
const history = chat.getHistory();
|
|
||||||
expect(history.length).toBe(2);
|
expect(history.length).toBe(2);
|
||||||
expect(history[0]).toEqual(userInput);
|
expect(history[0]).toEqual(userInput);
|
||||||
expect(history[1]).toEqual(modelOutput[0]);
|
expect(history[1]).toEqual(modelOutput[0]);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should skip "thought" content from modelOutput', () => {
|
it('should consolidate adjacent text parts from multiple content objects', () => {
|
||||||
const modelOutputWithThought: Content[] = [
|
const modelOutput: Content[] = [
|
||||||
{ role: 'model', parts: [{ thought: true }, { text: 'Visible text' }] },
|
|
||||||
{ role: 'model', parts: [{ text: 'Another visible text' }] },
|
|
||||||
];
|
|
||||||
// @ts-expect-error Accessing private method for testing purposes
|
|
||||||
chat.recordHistory(userInput, modelOutputWithThought);
|
|
||||||
const history = chat.getHistory();
|
|
||||||
expect(history.length).toBe(2); // User input + consolidated model output
|
|
||||||
expect(history[0]).toEqual(userInput);
|
|
||||||
expect(history[1].role).toBe('model');
|
|
||||||
// The 'thought' part is skipped, 'Another visible text' becomes the first part.
|
|
||||||
expect(history[1].parts).toEqual([{ text: 'Another visible text' }]);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should skip "thought" content even if it is the only content', () => {
|
|
||||||
const modelOutputOnlyThought: Content[] = [
|
|
||||||
{ role: 'model', parts: [{ thought: true }] },
|
|
||||||
];
|
|
||||||
// @ts-expect-error Accessing private method for testing purposes
|
|
||||||
chat.recordHistory(userInput, modelOutputOnlyThought);
|
|
||||||
const history = chat.getHistory();
|
|
||||||
expect(history.length).toBe(1); // User input + default empty model part
|
|
||||||
expect(history[0]).toEqual(userInput);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should correctly consolidate text parts when a thought part is in between', () => {
|
|
||||||
const modelOutputMixed: Content[] = [
|
|
||||||
{ role: 'model', parts: [{ text: 'Part 1.' }] },
|
{ role: 'model', parts: [{ text: 'Part 1.' }] },
|
||||||
{
|
{ role: 'model', parts: [{ text: ' Part 2.' }] },
|
||||||
role: 'model',
|
{ role: 'model', parts: [{ text: ' Part 3.' }] },
|
||||||
parts: [{ thought: true }, { text: 'Should be skipped' }],
|
|
||||||
},
|
|
||||||
{ role: 'model', parts: [{ text: 'Part 2.' }] },
|
|
||||||
];
|
];
|
||||||
// @ts-expect-error Accessing private method for testing purposes
|
// @ts-expect-error Accessing private method for testing
|
||||||
chat.recordHistory(userInput, modelOutputMixed);
|
chat.recordHistory(userInput, modelOutput);
|
||||||
const history = chat.getHistory();
|
const history = chat.getHistory();
|
||||||
expect(history.length).toBe(2);
|
expect(history.length).toBe(2);
|
||||||
expect(history[0]).toEqual(userInput);
|
expect(history[1]!.role).toBe('model');
|
||||||
expect(history[1].role).toBe('model');
|
expect(history[1]!.parts).toEqual([{ text: 'Part 1. Part 2. Part 3.' }]);
|
||||||
expect(history[1].parts).toEqual([{ text: 'Part 1.Part 2.' }]);
|
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should handle multiple thought parts correctly', () => {
|
it('should add an empty placeholder turn if modelOutput is empty', () => {
|
||||||
const modelOutputMultipleThoughts: Content[] = [
|
// This simulates receiving a pre-filtered, thought-only response.
|
||||||
{ role: 'model', parts: [{ thought: true }] },
|
const emptyModelOutput: Content[] = [];
|
||||||
{ role: 'model', parts: [{ text: 'Visible 1' }] },
|
// @ts-expect-error Accessing private method for testing
|
||||||
{ role: 'model', parts: [{ thought: true }] },
|
chat.recordHistory(userInput, emptyModelOutput);
|
||||||
{ role: 'model', parts: [{ text: 'Visible 2' }] },
|
|
||||||
];
|
|
||||||
// @ts-expect-error Accessing private method for testing purposes
|
|
||||||
chat.recordHistory(userInput, modelOutputMultipleThoughts);
|
|
||||||
const history = chat.getHistory();
|
const history = chat.getHistory();
|
||||||
expect(history.length).toBe(2);
|
expect(history.length).toBe(2);
|
||||||
expect(history[0]).toEqual(userInput);
|
expect(history[0]).toEqual(userInput);
|
||||||
expect(history[1].role).toBe('model');
|
expect(history[1]!.role).toBe('model');
|
||||||
expect(history[1].parts).toEqual([{ text: 'Visible 1Visible 2' }]);
|
expect(history[1]!.parts).toEqual([]);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should handle thought part at the end of outputContents', () => {
|
it('should preserve model outputs with undefined or empty parts arrays', () => {
|
||||||
const modelOutputThoughtAtEnd: Content[] = [
|
const malformedOutput: Content[] = [
|
||||||
{ role: 'model', parts: [{ text: 'Visible text' }] },
|
{ role: 'model', parts: [{ text: 'Text part' }] },
|
||||||
{ role: 'model', parts: [{ thought: true }] },
|
{ role: 'model', parts: undefined as unknown as Part[] },
|
||||||
|
{ role: 'model', parts: [] },
|
||||||
];
|
];
|
||||||
// @ts-expect-error Accessing private method for testing purposes
|
// @ts-expect-error Accessing private method for testing
|
||||||
chat.recordHistory(userInput, modelOutputThoughtAtEnd);
|
chat.recordHistory(userInput, malformedOutput);
|
||||||
const history = chat.getHistory();
|
const history = chat.getHistory();
|
||||||
expect(history.length).toBe(2);
|
expect(history.length).toBe(4); // userInput + 3 model turns
|
||||||
expect(history[0]).toEqual(userInput);
|
expect(history[1]!.parts).toEqual([{ text: 'Text part' }]);
|
||||||
expect(history[1].role).toBe('model');
|
expect(history[2]!.parts).toBeUndefined();
|
||||||
expect(history[1].parts).toEqual([{ text: 'Visible text' }]);
|
expect(history[3]!.parts).toEqual([]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not consolidate content with different roles', () => {
|
||||||
|
const mixedOutput: Content[] = [
|
||||||
|
{ role: 'model', parts: [{ text: 'Model 1' }] },
|
||||||
|
{ role: 'user', parts: [{ text: 'Unexpected User' }] },
|
||||||
|
{ role: 'model', parts: [{ text: 'Model 2' }] },
|
||||||
|
];
|
||||||
|
// @ts-expect-error Accessing private method for testing
|
||||||
|
chat.recordHistory(userInput, mixedOutput);
|
||||||
|
const history = chat.getHistory();
|
||||||
|
expect(history.length).toBe(4); // userInput, model1, unexpected_user, model2
|
||||||
|
expect(history[1]).toEqual(mixedOutput[0]);
|
||||||
|
expect(history[2]).toEqual(mixedOutput[1]);
|
||||||
|
expect(history[3]).toEqual(mixedOutput[2]);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('addHistory', () => {
|
describe('addHistory', () => {
|
||||||
it('should add a new content item to the history', () => {
|
it('should add a new content item to the history', () => {
|
||||||
const newContent: Content = {
|
const newContent: Content = {
|
||||||
|
|||||||
@@ -17,7 +17,6 @@ import type {
|
|||||||
} from '@google/genai';
|
} from '@google/genai';
|
||||||
import { createUserContent } from '@google/genai';
|
import { createUserContent } from '@google/genai';
|
||||||
import { retryWithBackoff } from '../utils/retry.js';
|
import { retryWithBackoff } from '../utils/retry.js';
|
||||||
import { isFunctionResponse } from '../utils/messageInspectors.js';
|
|
||||||
import type { ContentGenerator } from './contentGenerator.js';
|
import type { ContentGenerator } from './contentGenerator.js';
|
||||||
import { AuthType } from './contentGenerator.js';
|
import { AuthType } from './contentGenerator.js';
|
||||||
import type { Config } from '../config/config.js';
|
import type { Config } from '../config/config.js';
|
||||||
@@ -44,6 +43,7 @@ const INVALID_CONTENT_RETRY_OPTIONS: ContentRetryOptions = {
|
|||||||
maxAttempts: 3, // 1 initial call + 2 retries
|
maxAttempts: 3, // 1 initial call + 2 retries
|
||||||
initialDelayMs: 500,
|
initialDelayMs: 500,
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns true if the response is valid, false otherwise.
|
* Returns true if the response is valid, false otherwise.
|
||||||
*/
|
*/
|
||||||
@@ -221,7 +221,7 @@ export class GeminiChat {
|
|||||||
* ```ts
|
* ```ts
|
||||||
* const chat = ai.chats.create({model: 'gemini-2.0-flash'});
|
* const chat = ai.chats.create({model: 'gemini-2.0-flash'});
|
||||||
* const response = await chat.sendMessage({
|
* const response = await chat.sendMessage({
|
||||||
* message: 'Why is the sky blue?'
|
* message: 'Why is the sky blue?'
|
||||||
* });
|
* });
|
||||||
* console.log(response.text);
|
* console.log(response.text);
|
||||||
* ```
|
* ```
|
||||||
@@ -277,6 +277,8 @@ export class GeminiChat {
|
|||||||
|
|
||||||
this.sendPromise = (async () => {
|
this.sendPromise = (async () => {
|
||||||
const outputContent = response.candidates?.[0]?.content;
|
const outputContent = response.candidates?.[0]?.content;
|
||||||
|
const modelOutput = outputContent ? [outputContent] : [];
|
||||||
|
|
||||||
// Because the AFC input contains the entire curated chat history in
|
// Because the AFC input contains the entire curated chat history in
|
||||||
// addition to the new user input, we need to truncate the AFC history
|
// addition to the new user input, we need to truncate the AFC history
|
||||||
// to deduplicate the existing chat history.
|
// to deduplicate the existing chat history.
|
||||||
@@ -288,16 +290,18 @@ export class GeminiChat {
|
|||||||
automaticFunctionCallingHistory =
|
automaticFunctionCallingHistory =
|
||||||
fullAutomaticFunctionCallingHistory.slice(index) ?? [];
|
fullAutomaticFunctionCallingHistory.slice(index) ?? [];
|
||||||
}
|
}
|
||||||
const modelOutput = outputContent ? [outputContent] : [];
|
|
||||||
this.recordHistory(
|
this.recordHistory(
|
||||||
userContent,
|
userContent,
|
||||||
modelOutput,
|
modelOutput,
|
||||||
automaticFunctionCallingHistory,
|
automaticFunctionCallingHistory,
|
||||||
);
|
);
|
||||||
})();
|
})();
|
||||||
await this.sendPromise.catch(() => {
|
await this.sendPromise.catch((error) => {
|
||||||
// Resets sendPromise to avoid subsequent calls failing
|
// Resets sendPromise to avoid subsequent calls failing
|
||||||
this.sendPromise = Promise.resolve();
|
this.sendPromise = Promise.resolve();
|
||||||
|
// Re-throw the error so the caller knows something went wrong.
|
||||||
|
throw error;
|
||||||
});
|
});
|
||||||
return response;
|
return response;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
@@ -321,10 +325,10 @@ export class GeminiChat {
|
|||||||
* ```ts
|
* ```ts
|
||||||
* const chat = ai.chats.create({model: 'gemini-2.0-flash'});
|
* const chat = ai.chats.create({model: 'gemini-2.0-flash'});
|
||||||
* const response = await chat.sendMessageStream({
|
* const response = await chat.sendMessageStream({
|
||||||
* message: 'Why is the sky blue?'
|
* message: 'Why is the sky blue?'
|
||||||
* });
|
* });
|
||||||
* for await (const chunk of response) {
|
* for await (const chunk of response) {
|
||||||
* console.log(chunk.text);
|
* console.log(chunk.text);
|
||||||
* }
|
* }
|
||||||
* ```
|
* ```
|
||||||
*/
|
*/
|
||||||
@@ -464,7 +468,7 @@ export class GeminiChat {
|
|||||||
* - The `curated history` contains only the valid turns between user and
|
* - The `curated history` contains only the valid turns between user and
|
||||||
* model, which will be included in the subsequent requests sent to the model.
|
* model, which will be included in the subsequent requests sent to the model.
|
||||||
* - The `comprehensive history` contains all turns, including invalid or
|
* - The `comprehensive history` contains all turns, including invalid or
|
||||||
* empty model outputs, providing a complete record of the history.
|
* empty model outputs, providing a complete record of the history.
|
||||||
*
|
*
|
||||||
* The history is updated after receiving the response from the model,
|
* The history is updated after receiving the response from the model,
|
||||||
* for streaming response, it means receiving the last chunk of the response.
|
* for streaming response, it means receiving the last chunk of the response.
|
||||||
@@ -473,9 +477,9 @@ export class GeminiChat {
|
|||||||
* history`, set the `curated` parameter to `true`.
|
* history`, set the `curated` parameter to `true`.
|
||||||
*
|
*
|
||||||
* @param curated - whether to return the curated history or the comprehensive
|
* @param curated - whether to return the curated history or the comprehensive
|
||||||
* history.
|
* history.
|
||||||
* @return History contents alternating between user and model for the entire
|
* @return History contents alternating between user and model for the entire
|
||||||
* chat session.
|
* chat session.
|
||||||
*/
|
*/
|
||||||
getHistory(curated: boolean = false): Content[] {
|
getHistory(curated: boolean = false): Content[] {
|
||||||
const history = curated
|
const history = curated
|
||||||
@@ -547,31 +551,29 @@ export class GeminiChat {
|
|||||||
hasReceivedAnyChunk = true;
|
hasReceivedAnyChunk = true;
|
||||||
if (isValidResponse(chunk)) {
|
if (isValidResponse(chunk)) {
|
||||||
const content = chunk.candidates?.[0]?.content;
|
const content = chunk.candidates?.[0]?.content;
|
||||||
if (content) {
|
if (content?.parts) {
|
||||||
// Filter out thought parts from being added to history.
|
modelResponseParts.push(...content.parts);
|
||||||
if (!this.isThoughtContent(content) && content.parts) {
|
|
||||||
modelResponseParts.push(...content.parts);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
recordInvalidChunk(this.config);
|
recordInvalidChunk(this.config);
|
||||||
isStreamInvalid = true;
|
isStreamInvalid = true;
|
||||||
}
|
}
|
||||||
yield chunk; // Yield every chunk to the UI immediately.
|
yield chunk;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now that the stream is finished, make a decision.
|
|
||||||
// Throw an error if the stream was invalid OR if it was completely empty.
|
|
||||||
if (isStreamInvalid || !hasReceivedAnyChunk) {
|
if (isStreamInvalid || !hasReceivedAnyChunk) {
|
||||||
throw new EmptyStreamError(
|
throw new EmptyStreamError(
|
||||||
'Model stream was invalid or completed without valid content.',
|
'Model stream was invalid or completed without valid content.',
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use recordHistory to correctly save the conversation turn.
|
// Bundle all streamed parts into a single Content object
|
||||||
const modelOutput: Content[] = [
|
const modelOutput: Content[] =
|
||||||
{ role: 'model', parts: modelResponseParts },
|
modelResponseParts.length > 0
|
||||||
];
|
? [{ role: 'model', parts: modelResponseParts }]
|
||||||
|
: [];
|
||||||
|
|
||||||
|
// Pass the raw, bundled data to the new, robust recordHistory
|
||||||
this.recordHistory(userInput, modelOutput);
|
this.recordHistory(userInput, modelOutput);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -580,91 +582,101 @@ export class GeminiChat {
|
|||||||
modelOutput: Content[],
|
modelOutput: Content[],
|
||||||
automaticFunctionCallingHistory?: Content[],
|
automaticFunctionCallingHistory?: Content[],
|
||||||
) {
|
) {
|
||||||
const newHistoryEntries: Content[] = [];
|
// Part 1: Handle the user's turn.
|
||||||
|
|
||||||
// Part 1: Handle the user's part of the turn.
|
|
||||||
if (
|
if (
|
||||||
automaticFunctionCallingHistory &&
|
automaticFunctionCallingHistory &&
|
||||||
automaticFunctionCallingHistory.length > 0
|
automaticFunctionCallingHistory.length > 0
|
||||||
) {
|
) {
|
||||||
newHistoryEntries.push(
|
this.history.push(
|
||||||
...extractCuratedHistory(automaticFunctionCallingHistory),
|
...extractCuratedHistory(automaticFunctionCallingHistory),
|
||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
// Guard for streaming calls where the user input might already be in the history.
|
|
||||||
if (
|
if (
|
||||||
this.history.length === 0 ||
|
this.history.length === 0 ||
|
||||||
this.history[this.history.length - 1] !== userInput
|
this.history[this.history.length - 1] !== userInput
|
||||||
) {
|
) {
|
||||||
newHistoryEntries.push(userInput);
|
const lastTurn = this.history[this.history.length - 1];
|
||||||
}
|
// The only time we don't push is if it's the *exact same* object,
|
||||||
}
|
// which happens in streaming where we add it preemptively.
|
||||||
|
if (lastTurn !== userInput) {
|
||||||
// Part 2: Handle the model's part of the turn, filtering out thoughts.
|
if (lastTurn?.role === 'user') {
|
||||||
const nonThoughtModelOutput = modelOutput.filter(
|
// This is an invalid sequence.
|
||||||
(content) => !this.isThoughtContent(content),
|
throw new Error('Cannot add a user turn after another user turn.');
|
||||||
);
|
|
||||||
|
|
||||||
let outputContents: Content[] = [];
|
|
||||||
if (nonThoughtModelOutput.length > 0) {
|
|
||||||
outputContents = nonThoughtModelOutput;
|
|
||||||
} else if (
|
|
||||||
modelOutput.length === 0 &&
|
|
||||||
!isFunctionResponse(userInput) &&
|
|
||||||
!automaticFunctionCallingHistory
|
|
||||||
) {
|
|
||||||
// Add an empty model response if the model truly returned nothing.
|
|
||||||
outputContents.push({ role: 'model', parts: [] } as Content);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Part 3: Consolidate the parts of this turn's model response.
|
|
||||||
const consolidatedOutputContents: Content[] = [];
|
|
||||||
if (outputContents.length > 0) {
|
|
||||||
for (const content of outputContents) {
|
|
||||||
const lastContent =
|
|
||||||
consolidatedOutputContents[consolidatedOutputContents.length - 1];
|
|
||||||
if (this.hasTextContent(lastContent) && this.hasTextContent(content)) {
|
|
||||||
lastContent.parts[0].text += content.parts[0].text || '';
|
|
||||||
if (content.parts.length > 1) {
|
|
||||||
lastContent.parts.push(...content.parts.slice(1));
|
|
||||||
}
|
}
|
||||||
} else {
|
this.history.push(userInput);
|
||||||
consolidatedOutputContents.push(content);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Part 4: Add the new turn (user and model parts) to the main history.
|
// Part 2: Process the model output into a final, consolidated list of turns.
|
||||||
this.history.push(...newHistoryEntries, ...consolidatedOutputContents);
|
const finalModelTurns: Content[] = [];
|
||||||
}
|
for (const content of modelOutput) {
|
||||||
|
// A. Preserve malformed content that has no 'parts' array.
|
||||||
|
if (!content.parts) {
|
||||||
|
finalModelTurns.push(content);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
private hasTextContent(
|
// B. Filter out 'thought' parts.
|
||||||
content: Content | undefined,
|
const visibleParts = content.parts.filter((part) => !part.thought);
|
||||||
): content is Content & { parts: [{ text: string }, ...Part[]] } {
|
|
||||||
return !!(
|
|
||||||
content &&
|
|
||||||
content.role === 'model' &&
|
|
||||||
content.parts &&
|
|
||||||
content.parts.length > 0 &&
|
|
||||||
typeof content.parts[0].text === 'string' &&
|
|
||||||
content.parts[0].text !== ''
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
private isThoughtContent(
|
const newTurn = { ...content, parts: visibleParts };
|
||||||
content: Content | undefined,
|
const lastTurnInFinal = finalModelTurns[finalModelTurns.length - 1];
|
||||||
): content is Content & { parts: [{ thought: boolean }, ...Part[]] } {
|
|
||||||
return !!(
|
// Consolidate this new turn with the PREVIOUS turn if they are adjacent model turns.
|
||||||
content &&
|
if (
|
||||||
content.role === 'model' &&
|
lastTurnInFinal &&
|
||||||
content.parts &&
|
lastTurnInFinal.role === 'model' &&
|
||||||
content.parts.length > 0 &&
|
newTurn.role === 'model' &&
|
||||||
typeof content.parts[0].thought === 'boolean' &&
|
lastTurnInFinal.parts && // SAFETY CHECK: Ensure the destination has a parts array.
|
||||||
content.parts[0].thought === true
|
newTurn.parts
|
||||||
);
|
) {
|
||||||
|
lastTurnInFinal.parts.push(...newTurn.parts);
|
||||||
|
} else {
|
||||||
|
finalModelTurns.push(newTurn);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Part 3: Add the processed model turns to the history, with one final consolidation pass.
|
||||||
|
if (finalModelTurns.length > 0) {
|
||||||
|
// Re-consolidate parts within any turns that were merged in the previous step.
|
||||||
|
for (const turn of finalModelTurns) {
|
||||||
|
if (turn.parts && turn.parts.length > 1) {
|
||||||
|
const consolidatedParts: Part[] = [];
|
||||||
|
for (const part of turn.parts) {
|
||||||
|
const lastPart = consolidatedParts[consolidatedParts.length - 1];
|
||||||
|
if (
|
||||||
|
lastPart &&
|
||||||
|
// Ensure lastPart is a pure text part
|
||||||
|
typeof lastPart.text === 'string' &&
|
||||||
|
!lastPart.functionCall &&
|
||||||
|
!lastPart.functionResponse &&
|
||||||
|
!lastPart.inlineData &&
|
||||||
|
!lastPart.fileData &&
|
||||||
|
!lastPart.thought &&
|
||||||
|
// Ensure current part is a pure text part
|
||||||
|
typeof part.text === 'string' &&
|
||||||
|
!part.functionCall &&
|
||||||
|
!part.functionResponse &&
|
||||||
|
!part.inlineData &&
|
||||||
|
!part.fileData &&
|
||||||
|
!part.thought
|
||||||
|
) {
|
||||||
|
lastPart.text += part.text;
|
||||||
|
} else {
|
||||||
|
consolidatedParts.push({ ...part });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
turn.parts = consolidatedParts;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
this.history.push(...finalModelTurns);
|
||||||
|
} else {
|
||||||
|
// If, after all processing, there's NO model output, add the placeholder.
|
||||||
|
this.history.push({ role: 'model', parts: [] });
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Visible for Testing */
|
/** Visible for Testing */
|
||||||
export function isSchemaDepthError(errorMessage: string): boolean {
|
export function isSchemaDepthError(errorMessage: string): boolean {
|
||||||
return errorMessage.includes('maximum schema depth exceeded');
|
return errorMessage.includes('maximum schema depth exceeded');
|
||||||
|
|||||||
Reference in New Issue
Block a user