fix: invalid tool_calls request due to improper cancellation (#790)

This commit is contained in:
tanzhenxin
2025-10-13 09:25:31 +08:00
committed by GitHub
parent d4fa15dd53
commit 270dda4aa7
2 changed files with 84 additions and 0 deletions

View File

@@ -161,6 +161,9 @@ describe('ContentGenerationPipeline', () => {
top_p: 0.9,
max_tokens: 1000,
}),
expect.objectContaining({
signal: undefined,
}),
);
expect(mockConverter.convertOpenAIResponseToGemini).toHaveBeenCalledWith(
mockOpenAIResponse,
@@ -238,6 +241,9 @@ describe('ContentGenerationPipeline', () => {
expect.objectContaining({
tools: mockTools,
}),
expect.objectContaining({
signal: undefined,
}),
);
});
@@ -274,6 +280,30 @@ describe('ContentGenerationPipeline', () => {
request,
);
});
it('should pass abort signal to OpenAI client when provided', async () => {
const abortController = new AbortController();
const request: GenerateContentParameters = {
model: 'test-model',
contents: [{ parts: [{ text: 'Hello' }], role: 'user' }],
config: { abortSignal: abortController.signal },
};
(mockConverter.convertGeminiRequestToOpenAI as Mock).mockReturnValue([]);
(mockConverter.convertOpenAIResponseToGemini as Mock).mockReturnValue(
new GenerateContentResponse(),
);
(mockClient.chat.completions.create as Mock).mockResolvedValue({
choices: [{ message: { content: 'response' } }],
});
await pipeline.execute(request, 'test-id');
expect(mockClient.chat.completions.create).toHaveBeenCalledWith(
expect.any(Object),
expect.objectContaining({ signal: abortController.signal }),
);
});
});
describe('executeStream', () => {
@@ -338,6 +368,9 @@ describe('ContentGenerationPipeline', () => {
stream: true,
stream_options: { include_usage: true },
}),
expect.objectContaining({
signal: undefined,
}),
);
expect(mockTelemetryService.logStreamingSuccess).toHaveBeenCalledWith(
expect.objectContaining({
@@ -470,6 +503,42 @@ describe('ContentGenerationPipeline', () => {
);
});
it('should pass abort signal to OpenAI client for streaming requests', async () => {
const abortController = new AbortController();
const request: GenerateContentParameters = {
model: 'test-model',
contents: [{ parts: [{ text: 'Hello' }], role: 'user' }],
config: { abortSignal: abortController.signal },
};
const mockStream = {
async *[Symbol.asyncIterator]() {
yield {
id: 'chunk-1',
choices: [{ delta: { content: 'Hello' }, finish_reason: 'stop' }],
};
},
};
(mockConverter.convertGeminiRequestToOpenAI as Mock).mockReturnValue([]);
(mockConverter.convertOpenAIChunkToGemini as Mock).mockReturnValue(
new GenerateContentResponse(),
);
(mockClient.chat.completions.create as Mock).mockResolvedValue(
mockStream,
);
const resultGenerator = await pipeline.executeStream(request, 'test-id');
for await (const _result of resultGenerator) {
// Consume stream
}
expect(mockClient.chat.completions.create).toHaveBeenCalledWith(
expect.any(Object),
expect.objectContaining({ signal: abortController.signal }),
);
});
it('should merge finishReason and usageMetadata from separate chunks', async () => {
// Arrange
const request: GenerateContentParameters = {
@@ -924,6 +993,9 @@ describe('ContentGenerationPipeline', () => {
top_p: 0.9, // Config parameter used since request overrides are not being applied in current implementation
max_tokens: 1000, // Config parameter used since request overrides are not being applied in current implementation
}),
expect.objectContaining({
signal: undefined,
}),
);
});
@@ -960,6 +1032,9 @@ describe('ContentGenerationPipeline', () => {
top_p: 0.9, // From config
max_tokens: 1000, // From config
}),
expect.objectContaining({
signal: undefined,
}),
);
});
@@ -1009,6 +1084,9 @@ describe('ContentGenerationPipeline', () => {
expect.objectContaining({
metadata: { promptId: userPromptId },
}),
expect.objectContaining({
signal: undefined,
}),
);
});
});

View File

@@ -48,6 +48,9 @@ export class ContentGenerationPipeline {
async (openaiRequest, context) => {
const openaiResponse = (await this.client.chat.completions.create(
openaiRequest,
{
signal: request.config?.abortSignal,
},
)) as OpenAI.Chat.ChatCompletion;
const geminiResponse =
@@ -78,6 +81,9 @@ export class ContentGenerationPipeline {
// Stage 1: Create OpenAI stream
const stream = (await this.client.chat.completions.create(
openaiRequest,
{
signal: request.config?.abortSignal,
},
)) as AsyncIterable<OpenAI.Chat.ChatCompletionChunk>;
// Stage 2: Process stream with conversion and logging