mirror of
https://github.com/QwenLM/qwen-code.git
synced 2025-12-19 09:33:53 +00:00
fix: invalid tool_calls request due to improper cancellation (#790)
This commit is contained in:
@@ -161,6 +161,9 @@ describe('ContentGenerationPipeline', () => {
|
|||||||
top_p: 0.9,
|
top_p: 0.9,
|
||||||
max_tokens: 1000,
|
max_tokens: 1000,
|
||||||
}),
|
}),
|
||||||
|
expect.objectContaining({
|
||||||
|
signal: undefined,
|
||||||
|
}),
|
||||||
);
|
);
|
||||||
expect(mockConverter.convertOpenAIResponseToGemini).toHaveBeenCalledWith(
|
expect(mockConverter.convertOpenAIResponseToGemini).toHaveBeenCalledWith(
|
||||||
mockOpenAIResponse,
|
mockOpenAIResponse,
|
||||||
@@ -238,6 +241,9 @@ describe('ContentGenerationPipeline', () => {
|
|||||||
expect.objectContaining({
|
expect.objectContaining({
|
||||||
tools: mockTools,
|
tools: mockTools,
|
||||||
}),
|
}),
|
||||||
|
expect.objectContaining({
|
||||||
|
signal: undefined,
|
||||||
|
}),
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -274,6 +280,30 @@ describe('ContentGenerationPipeline', () => {
|
|||||||
request,
|
request,
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it('should pass abort signal to OpenAI client when provided', async () => {
|
||||||
|
const abortController = new AbortController();
|
||||||
|
const request: GenerateContentParameters = {
|
||||||
|
model: 'test-model',
|
||||||
|
contents: [{ parts: [{ text: 'Hello' }], role: 'user' }],
|
||||||
|
config: { abortSignal: abortController.signal },
|
||||||
|
};
|
||||||
|
|
||||||
|
(mockConverter.convertGeminiRequestToOpenAI as Mock).mockReturnValue([]);
|
||||||
|
(mockConverter.convertOpenAIResponseToGemini as Mock).mockReturnValue(
|
||||||
|
new GenerateContentResponse(),
|
||||||
|
);
|
||||||
|
(mockClient.chat.completions.create as Mock).mockResolvedValue({
|
||||||
|
choices: [{ message: { content: 'response' } }],
|
||||||
|
});
|
||||||
|
|
||||||
|
await pipeline.execute(request, 'test-id');
|
||||||
|
|
||||||
|
expect(mockClient.chat.completions.create).toHaveBeenCalledWith(
|
||||||
|
expect.any(Object),
|
||||||
|
expect.objectContaining({ signal: abortController.signal }),
|
||||||
|
);
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('executeStream', () => {
|
describe('executeStream', () => {
|
||||||
@@ -338,6 +368,9 @@ describe('ContentGenerationPipeline', () => {
|
|||||||
stream: true,
|
stream: true,
|
||||||
stream_options: { include_usage: true },
|
stream_options: { include_usage: true },
|
||||||
}),
|
}),
|
||||||
|
expect.objectContaining({
|
||||||
|
signal: undefined,
|
||||||
|
}),
|
||||||
);
|
);
|
||||||
expect(mockTelemetryService.logStreamingSuccess).toHaveBeenCalledWith(
|
expect(mockTelemetryService.logStreamingSuccess).toHaveBeenCalledWith(
|
||||||
expect.objectContaining({
|
expect.objectContaining({
|
||||||
@@ -470,6 +503,42 @@ describe('ContentGenerationPipeline', () => {
|
|||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it('should pass abort signal to OpenAI client for streaming requests', async () => {
|
||||||
|
const abortController = new AbortController();
|
||||||
|
const request: GenerateContentParameters = {
|
||||||
|
model: 'test-model',
|
||||||
|
contents: [{ parts: [{ text: 'Hello' }], role: 'user' }],
|
||||||
|
config: { abortSignal: abortController.signal },
|
||||||
|
};
|
||||||
|
|
||||||
|
const mockStream = {
|
||||||
|
async *[Symbol.asyncIterator]() {
|
||||||
|
yield {
|
||||||
|
id: 'chunk-1',
|
||||||
|
choices: [{ delta: { content: 'Hello' }, finish_reason: 'stop' }],
|
||||||
|
};
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
(mockConverter.convertGeminiRequestToOpenAI as Mock).mockReturnValue([]);
|
||||||
|
(mockConverter.convertOpenAIChunkToGemini as Mock).mockReturnValue(
|
||||||
|
new GenerateContentResponse(),
|
||||||
|
);
|
||||||
|
(mockClient.chat.completions.create as Mock).mockResolvedValue(
|
||||||
|
mockStream,
|
||||||
|
);
|
||||||
|
|
||||||
|
const resultGenerator = await pipeline.executeStream(request, 'test-id');
|
||||||
|
for await (const _result of resultGenerator) {
|
||||||
|
// Consume stream
|
||||||
|
}
|
||||||
|
|
||||||
|
expect(mockClient.chat.completions.create).toHaveBeenCalledWith(
|
||||||
|
expect.any(Object),
|
||||||
|
expect.objectContaining({ signal: abortController.signal }),
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
it('should merge finishReason and usageMetadata from separate chunks', async () => {
|
it('should merge finishReason and usageMetadata from separate chunks', async () => {
|
||||||
// Arrange
|
// Arrange
|
||||||
const request: GenerateContentParameters = {
|
const request: GenerateContentParameters = {
|
||||||
@@ -924,6 +993,9 @@ describe('ContentGenerationPipeline', () => {
|
|||||||
top_p: 0.9, // Config parameter used since request overrides are not being applied in current implementation
|
top_p: 0.9, // Config parameter used since request overrides are not being applied in current implementation
|
||||||
max_tokens: 1000, // Config parameter used since request overrides are not being applied in current implementation
|
max_tokens: 1000, // Config parameter used since request overrides are not being applied in current implementation
|
||||||
}),
|
}),
|
||||||
|
expect.objectContaining({
|
||||||
|
signal: undefined,
|
||||||
|
}),
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -960,6 +1032,9 @@ describe('ContentGenerationPipeline', () => {
|
|||||||
top_p: 0.9, // From config
|
top_p: 0.9, // From config
|
||||||
max_tokens: 1000, // From config
|
max_tokens: 1000, // From config
|
||||||
}),
|
}),
|
||||||
|
expect.objectContaining({
|
||||||
|
signal: undefined,
|
||||||
|
}),
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -1009,6 +1084,9 @@ describe('ContentGenerationPipeline', () => {
|
|||||||
expect.objectContaining({
|
expect.objectContaining({
|
||||||
metadata: { promptId: userPromptId },
|
metadata: { promptId: userPromptId },
|
||||||
}),
|
}),
|
||||||
|
expect.objectContaining({
|
||||||
|
signal: undefined,
|
||||||
|
}),
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -48,6 +48,9 @@ export class ContentGenerationPipeline {
|
|||||||
async (openaiRequest, context) => {
|
async (openaiRequest, context) => {
|
||||||
const openaiResponse = (await this.client.chat.completions.create(
|
const openaiResponse = (await this.client.chat.completions.create(
|
||||||
openaiRequest,
|
openaiRequest,
|
||||||
|
{
|
||||||
|
signal: request.config?.abortSignal,
|
||||||
|
},
|
||||||
)) as OpenAI.Chat.ChatCompletion;
|
)) as OpenAI.Chat.ChatCompletion;
|
||||||
|
|
||||||
const geminiResponse =
|
const geminiResponse =
|
||||||
@@ -78,6 +81,9 @@ export class ContentGenerationPipeline {
|
|||||||
// Stage 1: Create OpenAI stream
|
// Stage 1: Create OpenAI stream
|
||||||
const stream = (await this.client.chat.completions.create(
|
const stream = (await this.client.chat.completions.create(
|
||||||
openaiRequest,
|
openaiRequest,
|
||||||
|
{
|
||||||
|
signal: request.config?.abortSignal,
|
||||||
|
},
|
||||||
)) as AsyncIterable<OpenAI.Chat.ChatCompletionChunk>;
|
)) as AsyncIterable<OpenAI.Chat.ChatCompletionChunk>;
|
||||||
|
|
||||||
// Stage 2: Process stream with conversion and logging
|
// Stage 2: Process stream with conversion and logging
|
||||||
|
|||||||
Reference in New Issue
Block a user