Merge pull request #397 from QwenLM/fix/multi-line-output

fix: revert trimEnd on LLM response content
This commit is contained in:
pomelo
2025-08-20 20:33:52 +08:00
committed by GitHub

View File

@@ -563,7 +563,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
// Add combined text if any
if (combinedText) {
combinedParts.push({ text: combinedText.trimEnd() });
combinedParts.push({ text: combinedText });
}
// Add function calls
@@ -1164,12 +1164,8 @@ export class OpenAIContentGenerator implements ContentGenerator {
// Handle text content
if (choice.message.content) {
if (typeof choice.message.content === 'string') {
parts.push({ text: choice.message.content.trimEnd() });
} else {
parts.push({ text: choice.message.content });
}
}
// Handle tool calls
if (choice.message.tool_calls) {
@@ -1253,12 +1249,8 @@ export class OpenAIContentGenerator implements ContentGenerator {
// Handle text content
if (choice.delta?.content) {
if (typeof choice.delta.content === 'string') {
parts.push({ text: choice.delta.content.trimEnd() });
} else {
parts.push({ text: choice.delta.content });
}
}
// Handle tool calls - only accumulate during streaming, emit when complete
if (choice.delta?.tool_calls) {
@@ -1776,7 +1768,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
}
}
messageContent = textParts.join('').trimEnd();
messageContent = textParts.join('');
}
const choice: OpenAIChoice = {