Compare commits

...

6 Commits

Author SHA1 Message Date
LaZzyMan
61aad5a162 fix: missing whitespaces for stream-json/json output format via GLM 4.7 model 2025-12-29 16:59:09 +08:00
顾盼
e27e9a5f18 Merge pull request #1288 from Weaxs/main
support merge ChatCompletionContentPart && add filterEmptyMessages
2025-12-29 10:50:30 +08:00
pomelo
2578d8c151 Merge pull request #1360 from IceyLiu/icey-feat
docs: add AionUi to ecosystem section
2025-12-29 10:12:53 +08:00
VeryLiu-lab
a877fedc52 docs: add AionUi to ecosystem section
Add AionUi as a graphical interface option for Qwen Code users.
AionUi provides a modern GUI for command-line AI tools including
Qwen Code, offering an alternative to the terminal interface.
2025-12-28 21:56:59 +08:00
Weaxs
d2bc46cbb4 remove filterEmptyMessages 2025-12-18 00:55:47 +08:00
Weaxs
84eb5c562f support merge ChatCompletionContentPart && add filterEmptyMessages 2025-12-18 00:46:48 +08:00
6 changed files with 381 additions and 11 deletions

View File

@@ -191,6 +191,7 @@ See [settings](https://qwenlm.github.io/qwen-code-docs/en/users/configuration/se
Looking for a graphical interface?
- [**AionUi**](https://github.com/iOfficeAI/AionUi) A modern GUI for command-line AI tools including Qwen Code
- [**Gemini CLI Desktop**](https://github.com/Piebald-AI/gemini-cli-desktop) A cross-platform desktop/web/mobile UI for Qwen Code
## Troubleshooting

View File

@@ -630,6 +630,67 @@ describe('BaseJsonOutputAdapter', () => {
expect(state.blocks).toHaveLength(0);
});
it('should preserve whitespace in thinking content', () => {
const state = adapter.exposeCreateMessageState();
adapter.startAssistantMessage();
adapter.exposeAppendThinking(
state,
'',
'The user just said "Hello"',
null,
);
expect(state.blocks).toHaveLength(1);
expect(state.blocks[0]).toMatchObject({
type: 'thinking',
thinking: 'The user just said "Hello"',
});
// Verify spaces are preserved
const block = state.blocks[0] as { thinking: string };
expect(block.thinking).toContain('user just');
expect(block.thinking).not.toContain('userjust');
});
it('should preserve whitespace when appending multiple thinking fragments', () => {
const state = adapter.exposeCreateMessageState();
adapter.startAssistantMessage();
// Simulate streaming thinking content in fragments
adapter.exposeAppendThinking(state, '', 'The user just', null);
adapter.exposeAppendThinking(state, '', ' said "Hello"', null);
adapter.exposeAppendThinking(
state,
'',
'. This is a simple greeting',
null,
);
expect(state.blocks).toHaveLength(1);
const block = state.blocks[0] as { thinking: string };
// Verify the complete text with all spaces preserved
expect(block.thinking).toBe(
'The user just said "Hello". This is a simple greeting',
);
// Verify specific space preservation
expect(block.thinking).toContain('user just ');
expect(block.thinking).toContain(' said');
expect(block.thinking).toContain('". This');
expect(block.thinking).not.toContain('userjust');
expect(block.thinking).not.toContain('justsaid');
});
it('should preserve leading and trailing whitespace in description', () => {
const state = adapter.exposeCreateMessageState();
adapter.startAssistantMessage();
adapter.exposeAppendThinking(state, '', ' content with spaces ', null);
expect(state.blocks).toHaveLength(1);
const block = state.blocks[0] as { thinking: string };
expect(block.thinking).toBe(' content with spaces ');
});
});
describe('appendToolUse', () => {

View File

@@ -816,9 +816,18 @@ export abstract class BaseJsonOutputAdapter {
parentToolUseId?: string | null,
): void {
const actualParentToolUseId = parentToolUseId ?? null;
const fragment = [subject?.trim(), description?.trim()]
.filter((value) => value && value.length > 0)
.join(': ');
// Build fragment without trimming to preserve whitespace in streaming content
// Only filter out null/undefined/empty values
const parts: string[] = [];
if (subject && subject.length > 0) {
parts.push(subject);
}
if (description && description.length > 0) {
parts.push(description);
}
const fragment = parts.join(': ');
if (!fragment) {
return;
}

View File

@@ -323,6 +323,68 @@ describe('StreamJsonOutputAdapter', () => {
});
});
it('should preserve whitespace in thinking content (issue #1356)', () => {
adapter.processEvent({
type: GeminiEventType.Thought,
value: {
subject: '',
description: 'The user just said "Hello"',
},
});
const message = adapter.finalizeAssistantMessage();
expect(message.message.content).toHaveLength(1);
const block = message.message.content[0] as {
type: string;
thinking: string;
};
expect(block.type).toBe('thinking');
expect(block.thinking).toBe('The user just said "Hello"');
// Verify spaces are preserved
expect(block.thinking).toContain('user just');
expect(block.thinking).not.toContain('userjust');
});
it('should preserve whitespace when streaming multiple thinking fragments (issue #1356)', () => {
// Simulate streaming thinking content in multiple events
adapter.processEvent({
type: GeminiEventType.Thought,
value: {
subject: '',
description: 'The user just',
},
});
adapter.processEvent({
type: GeminiEventType.Thought,
value: {
subject: '',
description: ' said "Hello"',
},
});
adapter.processEvent({
type: GeminiEventType.Thought,
value: {
subject: '',
description: '. This is a simple greeting',
},
});
const message = adapter.finalizeAssistantMessage();
expect(message.message.content).toHaveLength(1);
const block = message.message.content[0] as {
type: string;
thinking: string;
};
expect(block.thinking).toBe(
'The user just said "Hello". This is a simple greeting',
);
// Verify specific spaces are preserved
expect(block.thinking).toContain('user just ');
expect(block.thinking).toContain(' said');
expect(block.thinking).not.toContain('userjust');
expect(block.thinking).not.toContain('justsaid');
});
it('should append tool use from ToolCallRequest events', () => {
adapter.processEvent({
type: GeminiEventType.ToolCallRequest,

View File

@@ -542,4 +542,206 @@ describe('OpenAIContentConverter', () => {
expect(original).toEqual(originalCopy);
});
});
describe('mergeConsecutiveAssistantMessages', () => {
it('should merge two consecutive assistant messages with string content', () => {
const request: GenerateContentParameters = {
model: 'models/test',
contents: [
{
role: 'model',
parts: [{ text: 'First part' }],
},
{
role: 'model',
parts: [{ text: 'Second part' }],
},
],
};
const messages = converter.convertGeminiRequestToOpenAI(request);
expect(messages).toHaveLength(1);
expect(messages[0].role).toBe('assistant');
const content = messages[0]
.content as OpenAI.Chat.ChatCompletionContentPart[];
expect(content).toHaveLength(2);
expect(content[0]).toEqual({ type: 'text', text: 'First part' });
expect(content[1]).toEqual({ type: 'text', text: 'Second part' });
});
it('should merge multiple consecutive assistant messages', () => {
const request: GenerateContentParameters = {
model: 'models/test',
contents: [
{
role: 'model',
parts: [{ text: 'Part 1' }],
},
{
role: 'model',
parts: [{ text: 'Part 2' }],
},
{
role: 'model',
parts: [{ text: 'Part 3' }],
},
],
};
const messages = converter.convertGeminiRequestToOpenAI(request);
expect(messages).toHaveLength(1);
expect(messages[0].role).toBe('assistant');
const content = messages[0]
.content as OpenAI.Chat.ChatCompletionContentPart[];
expect(content).toHaveLength(3);
});
it('should merge tool_calls from consecutive assistant messages', () => {
const request: GenerateContentParameters = {
model: 'models/test',
contents: [
{
role: 'model',
parts: [
{
functionCall: {
id: 'call_1',
name: 'tool_1',
args: {},
},
},
],
},
{
role: 'user',
parts: [
{
functionResponse: {
id: 'call_1',
name: 'tool_1',
response: { output: 'result_1' },
},
},
],
},
{
role: 'model',
parts: [
{
functionCall: {
id: 'call_2',
name: 'tool_2',
args: {},
},
},
],
},
{
role: 'user',
parts: [
{
functionResponse: {
id: 'call_2',
name: 'tool_2',
response: { output: 'result_2' },
},
},
],
},
],
};
const messages = converter.convertGeminiRequestToOpenAI(request);
// Should have: assistant (tool_call_1), tool (result_1), assistant (tool_call_2), tool (result_2)
expect(messages).toHaveLength(4);
expect(messages[0].role).toBe('assistant');
expect(messages[1].role).toBe('tool');
expect(messages[2].role).toBe('assistant');
expect(messages[3].role).toBe('tool');
});
it('should not merge assistant messages separated by user messages', () => {
const request: GenerateContentParameters = {
model: 'models/test',
contents: [
{
role: 'model',
parts: [{ text: 'First assistant' }],
},
{
role: 'user',
parts: [{ text: 'User message' }],
},
{
role: 'model',
parts: [{ text: 'Second assistant' }],
},
],
};
const messages = converter.convertGeminiRequestToOpenAI(request);
expect(messages).toHaveLength(3);
expect(messages[0].role).toBe('assistant');
expect(messages[1].role).toBe('user');
expect(messages[2].role).toBe('assistant');
});
it('should handle merging when one message has array content and another has string', () => {
const request: GenerateContentParameters = {
model: 'models/test',
contents: [
{
role: 'model',
parts: [{ text: 'Text part' }],
},
{
role: 'model',
parts: [{ text: 'Another text' }],
},
],
};
const messages = converter.convertGeminiRequestToOpenAI(request);
expect(messages).toHaveLength(1);
const content = messages[0]
.content as OpenAI.Chat.ChatCompletionContentPart[];
expect(Array.isArray(content)).toBe(true);
expect(content).toHaveLength(2);
});
it('should merge empty content correctly', () => {
const request: GenerateContentParameters = {
model: 'models/test',
contents: [
{
role: 'model',
parts: [{ text: 'First' }],
},
{
role: 'model',
parts: [],
},
{
role: 'model',
parts: [{ text: 'Second' }],
},
],
};
const messages = converter.convertGeminiRequestToOpenAI(request);
// Empty messages should be filtered out
expect(messages).toHaveLength(1);
const content = messages[0]
.content as OpenAI.Chat.ChatCompletionContentPart[];
expect(content).toHaveLength(2);
expect(content[0]).toEqual({ type: 'text', text: 'First' });
expect(content[1]).toEqual({ type: 'text', text: 'Second' });
});
});
});

View File

@@ -1120,12 +1120,44 @@ export class OpenAIContentConverter {
// If the last message is also an assistant message, merge them
if (lastMessage.role === 'assistant') {
// Combine content
const combinedContent = [
typeof lastMessage.content === 'string' ? lastMessage.content : '',
typeof message.content === 'string' ? message.content : '',
]
.filter(Boolean)
.join('');
const lastContent = lastMessage.content;
const currentContent = message.content;
// Determine if we should use array format (if either content is an array)
const useArrayFormat =
Array.isArray(lastContent) || Array.isArray(currentContent);
let combinedContent:
| string
| OpenAI.Chat.ChatCompletionContentPart[]
| null;
if (useArrayFormat) {
// Convert both to array format and merge
const lastParts = Array.isArray(lastContent)
? lastContent
: typeof lastContent === 'string' && lastContent
? [{ type: 'text' as const, text: lastContent }]
: [];
const currentParts = Array.isArray(currentContent)
? currentContent
: typeof currentContent === 'string' && currentContent
? [{ type: 'text' as const, text: currentContent }]
: [];
combinedContent = [
...lastParts,
...currentParts,
] as OpenAI.Chat.ChatCompletionContentPart[];
} else {
// Both are strings or null, merge as strings
const lastText = typeof lastContent === 'string' ? lastContent : '';
const currentText =
typeof currentContent === 'string' ? currentContent : '';
const mergedText = [lastText, currentText].filter(Boolean).join('');
combinedContent = mergedText || null;
}
// Combine tool calls
const lastToolCalls =
@@ -1137,14 +1169,17 @@ export class OpenAIContentConverter {
// Update the last message with combined data
(
lastMessage as OpenAI.Chat.ChatCompletionMessageParam & {
content: string | null;
content: string | OpenAI.Chat.ChatCompletionContentPart[] | null;
tool_calls?: OpenAI.Chat.ChatCompletionMessageToolCall[];
}
).content = combinedContent || null;
if (combinedToolCalls.length > 0) {
(
lastMessage as OpenAI.Chat.ChatCompletionMessageParam & {
content: string | null;
content:
| string
| OpenAI.Chat.ChatCompletionContentPart[]
| null;
tool_calls?: OpenAI.Chat.ChatCompletionMessageToolCall[];
}
).tool_calls = combinedToolCalls;