Compare commits

..

9 Commits

Author SHA1 Message Date
koalazf.99
5854ac67c6 fix sampling params 2025-12-11 13:46:37 +08:00
koalazf.99
354c85bcff revert: topp & temperature default velue to none 2025-10-28 13:00:10 +08:00
pomelo-nwu
7ccba75621 test: update /chat list test to match plain text output
Updated the test expectations to match the new plain text format
without ANSI escape codes.
2025-10-28 09:15:07 +08:00
pomelo-nwu
e0e5fa5084 fix: remove hardcoded ANSI escape codes in /chat list command
The /chat list command was displaying raw ANSI escape codes instead of
colored text. This was caused by the escapeAnsiCtrlCodes function in
HistoryItemDisplay that escapes all ANSI control characters.

Changed to plain text format for better compatibility and cleaner output.
2025-10-28 09:14:00 +08:00
tanzhenxin
65cf80f4ab chore: pump version to 0.1.1 (#883) 2025-10-27 19:32:52 +08:00
tanzhenxin
1577dabf41 fix: release workflow failure 2025-10-27 17:47:03 +08:00
tanzhenxin
4328cd7f63 feat: update tool output format, use plain string instead of json string (#881) 2025-10-27 17:26:47 +08:00
pomelo
2a5577e5d7 docs: add /model command documentation (#872) 2025-10-24 17:09:52 +08:00
tanzhenxin
be633a80cc 📦 Release qwen-code CLI as a Standalone Bundled Package (#866) 2025-10-24 17:08:59 +08:00
24 changed files with 291 additions and 157 deletions

View File

@@ -167,7 +167,11 @@ jobs:
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
run: |-
git add package.json package-lock.json packages/*/package.json
git commit -m "chore(release): ${RELEASE_TAG}"
if git diff --staged --quiet; then
echo "No version changes to commit"
else
git commit -m "chore(release): ${RELEASE_TAG}"
fi
if [[ "${IS_DRY_RUN}" == "false" ]]; then
echo "Pushing release branch to remote..."
git push --set-upstream origin "${BRANCH_NAME}" --follow-tags

View File

@@ -66,17 +66,6 @@ Slash commands provide meta-level control over the CLI itself.
- **Description:** Display all directories added by `/directory add` and `--include-directories`.
- **Usage:** `/directory show`
- **`/directory`** (or **`/dir`**)
- **Description:** Manage workspace directories for multi-directory support.
- **Sub-commands:**
- **`add`**:
- **Description:** Add a directory to the workspace. The path can be absolute or relative to the current working directory. Moreover, the reference from home directory is supported as well.
- **Usage:** `/directory add <path1>,<path2>`
- **Note:** Disabled in restrictive sandbox profiles. If you're using that, use `--include-directories` when starting the session instead.
- **`show`**:
- **Description:** Display all directories added by `/directory add` and `--include-directories`.
- **Usage:** `/directory show`
- **`/editor`**
- **Description:** Open a dialog for selecting supported editors.
@@ -108,6 +97,20 @@ Slash commands provide meta-level control over the CLI itself.
- **Description:** Reload the hierarchical instructional memory from all context files (default: `QWEN.md`) found in the configured locations (global, project/ancestors, and sub-directories). This updates the model with the latest context content.
- **Note:** For more details on how context files contribute to hierarchical memory, see the [CLI Configuration documentation](./configuration.md#context-files-hierarchical-instructional-context).
- **`/model`**
- **Description:** Switch the model for the current session. Opens a dialog to select from available models based on your authentication type.
- **Usage:** `/model`
- **Features:**
- Shows a dialog with all available models for your current authentication type
- Displays model descriptions and capabilities (e.g., vision support)
- Changes the model for the current session only
- Supports both Qwen models (via OAuth) and OpenAI models (via API key)
- **Available Models:**
- **Qwen Coder:** The latest Qwen Coder model from Alibaba Cloud ModelStudio (version: qwen3-coder-plus-2025-09-23)
- **Qwen Vision:** The latest Qwen Vision model from Alibaba Cloud ModelStudio (version: qwen3-vl-plus-2025-09-23) - supports image analysis
- **OpenAI Models:** Available when using OpenAI authentication (configured via `OPENAI_MODEL` environment variable)
- **Note:** Model selection is session-specific and does not persist across different Qwen Code sessions. To set a default model, use the `model.name` setting in your configuration.
- **`/restore`**
- **Description:** Restores the project files to the state they were in just before a tool was executed. This is particularly useful for undoing file edits made by a tool. If run without a tool call ID, it will list available checkpoints to restore from.
- **Usage:** `/restore [tool_call_id]`

View File

@@ -2,7 +2,7 @@ export default {
subagents: 'Subagents',
checkpointing: 'Checkpointing',
sandbox: 'Sandbox Support',
'headless-mode': 'Headless Mode',
headless: 'Headless Mode',
'welcome-back': 'Welcome Back',
'token-caching': 'Token Caching',
};

12
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.1.0-preview.0",
"version": "0.1.1",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@qwen-code/qwen-code",
"version": "0.1.0-preview.0",
"version": "0.1.1",
"workspaces": [
"packages/*"
],
@@ -16024,7 +16024,7 @@
},
"packages/cli": {
"name": "@qwen-code/qwen-code",
"version": "0.1.0-preview.0",
"version": "0.1.1",
"dependencies": {
"@google/genai": "1.16.0",
"@iarna/toml": "^2.2.5",
@@ -16139,7 +16139,7 @@
},
"packages/core": {
"name": "@qwen-code/qwen-code-core",
"version": "0.1.0-preview.0",
"version": "0.1.1",
"hasInstallScript": true,
"dependencies": {
"@google/genai": "1.16.0",
@@ -16278,7 +16278,7 @@
},
"packages/test-utils": {
"name": "@qwen-code/qwen-code-test-utils",
"version": "0.1.0-preview.0",
"version": "0.1.1",
"dev": true,
"license": "Apache-2.0",
"devDependencies": {
@@ -16290,7 +16290,7 @@
},
"packages/vscode-ide-companion": {
"name": "qwen-code-vscode-ide-companion",
"version": "0.1.0-preview.0",
"version": "0.1.1",
"license": "LICENSE",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.15.1",

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.1.0-preview.0",
"version": "0.1.1",
"engines": {
"node": ">=20.0.0"
},
@@ -13,7 +13,7 @@
"url": "git+https://github.com/QwenLM/qwen-code.git"
},
"config": {
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.1.0-preview.0"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.1.1"
},
"scripts": {
"start": "cross-env node scripts/start.js",

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.1.0-preview.0",
"version": "0.1.1",
"description": "Qwen Code",
"repository": {
"type": "git",
@@ -25,7 +25,7 @@
"dist"
],
"config": {
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.1.0-preview.0"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.1.1"
},
"dependencies": {
"@google/genai": "1.16.0",

View File

@@ -5,6 +5,7 @@
*/
import type {
ContentGeneratorConfig,
FileFilteringOptions,
MCPServerConfig,
OutputFormat,
@@ -123,6 +124,24 @@ export interface CliArgs {
outputFormat: string | undefined;
}
type LegacySamplingSettings = {
sampling_params?: ContentGeneratorConfig['samplingParams'];
};
function getLegacySamplingParams(
settings: Settings,
): ContentGeneratorConfig['samplingParams'] | undefined {
if (
typeof settings !== 'object' ||
settings === null ||
!('sampling_params' in (settings as Record<string, unknown>))
) {
return undefined;
}
return (settings as Settings & LegacySamplingSettings).sampling_params;
}
export async function parseArguments(settings: Settings): Promise<CliArgs> {
const rawArgv = hideBin(process.argv);
const yargsInstance = yargs(rawArgv)
@@ -685,6 +704,7 @@ export async function loadCliConfig(
const vlmSwitchMode =
argv.vlmSwitchMode || settings.experimental?.vlmSwitchMode;
const legacySamplingParams = getLegacySamplingParams(settings);
return new Config({
sessionId,
embeddingModel: DEFAULT_QWEN_EMBEDDING_MODEL,
@@ -745,6 +765,8 @@ export async function loadCliConfig(
(typeof argv.openaiLogging === 'undefined'
? settings.model?.enableOpenAILogging
: argv.openaiLogging) ?? false,
// Include sampling_params from root level settings
...(legacySamplingParams ? { samplingParams: legacySamplingParams } : {}),
},
cliVersion: await getCliVersion(),
tavilyApiKey:

View File

@@ -555,7 +555,7 @@ export const AppContainer = (props: AppContainerProps) => {
historyManager.addItem(
{
type: MessageType.INFO,
text: 'Refreshing hierarchical memory (GEMINI.md or other context files)...',
text: 'Refreshing hierarchical memory (QWEN.md or other context files)...',
},
Date.now(),
);

View File

@@ -139,8 +139,8 @@ describe('chatCommand', () => {
.match(/(\d{4}-\d{2}-\d{2})T(\d{2}:\d{2}:\d{2})/);
const formattedDate = isoDate ? `${isoDate[1]} ${isoDate[2]}` : '';
expect(content).toContain(formattedDate);
const index1 = content.indexOf('- \u001b[36mtest1\u001b[0m');
const index2 = content.indexOf('- \u001b[36mtest2\u001b[0m');
const index1 = content.indexOf('- test1');
const index2 = content.indexOf('- test2');
expect(index1).toBeGreaterThanOrEqual(0);
expect(index2).toBeGreaterThan(index1);
});

View File

@@ -89,9 +89,9 @@ const listCommand: SlashCommand = {
const isoString = chat.mtime.toISOString();
const match = isoString.match(/(\d{4}-\d{2}-\d{2})T(\d{2}:\d{2}:\d{2})/);
const formattedDate = match ? `${match[1]} ${match[2]}` : 'Invalid Date';
message += ` - \u001b[36m${paddedName}\u001b[0m \u001b[90m(saved on ${formattedDate})\u001b[0m\n`;
message += ` - ${paddedName} (saved on ${formattedDate})\n`;
}
message += `\n\u001b[90mNote: Newest last, oldest first\u001b[0m`;
message += `\nNote: Newest last, oldest first`;
return {
type: 'message',
messageType: 'info',

View File

@@ -20,12 +20,14 @@ export const AVAILABLE_MODELS_QWEN: AvailableModel[] = [
{
id: MAINLINE_CODER,
label: MAINLINE_CODER,
description: 'Optimized for code generation and understanding',
description:
'The latest Qwen Coder model from Alibaba Cloud ModelStudio (version: qwen3-coder-plus-2025-09-23)',
},
{
id: MAINLINE_VLM,
label: MAINLINE_VLM,
description: 'Vision model with multimodal capabilities',
description:
'The latest Qwen Vision model from Alibaba Cloud ModelStudio (version: qwen3-vl-plus-2025-09-23)',
isVision: true,
},
];

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code-core",
"version": "0.1.0-preview.0",
"version": "0.1.1",
"description": "Qwen Code Core",
"repository": {
"type": "git",

View File

@@ -65,10 +65,7 @@ export interface GenerateJsonOptions {
*/
export class BaseLlmClient {
// Default configuration for utility tasks
private readonly defaultUtilityConfig: GenerateContentConfig = {
temperature: 0,
topP: 1,
};
private readonly defaultUtilityConfig: GenerateContentConfig = {};
constructor(
private readonly contentGenerator: ContentGenerator,

View File

@@ -149,10 +149,7 @@ const COMPRESSION_PRESERVE_THRESHOLD = 0.3;
export class GeminiClient {
private chat?: GeminiChat;
private readonly generateContentConfig: GenerateContentConfig = {
temperature: 0,
topP: 1,
};
private readonly generateContentConfig: GenerateContentConfig;
private sessionTurnCount = 0;
private readonly loopDetector: LoopDetectionService;
@@ -169,6 +166,44 @@ export class GeminiClient {
constructor(private readonly config: Config) {
this.loopDetector = new LoopDetectionService(config);
this.lastPromptId = this.config.getSessionId();
this.generateContentConfig = this.buildDefaultGenerateContentConfig();
}
private buildDefaultGenerateContentConfig(): GenerateContentConfig {
const samplingParams =
this.config.getContentGeneratorConfig()?.samplingParams;
if (!samplingParams) {
return {};
}
const config: GenerateContentConfig = {};
if (samplingParams.temperature !== undefined) {
config.temperature = samplingParams.temperature;
}
if (samplingParams.top_p !== undefined) {
config.topP = samplingParams.top_p;
}
if (samplingParams.top_k !== undefined) {
config.topK = samplingParams.top_k;
}
if (samplingParams.max_tokens !== undefined) {
config.maxOutputTokens = samplingParams.max_tokens;
}
if (samplingParams.presence_penalty !== undefined) {
config.presencePenalty = samplingParams.presence_penalty;
}
if (samplingParams.frequency_penalty !== undefined) {
config.frequencyPenalty = samplingParams.frequency_penalty;
}
return config;
}
async initialize() {

View File

@@ -7,6 +7,7 @@
import { describe, it, expect, beforeEach } from 'vitest';
import { OpenAIContentConverter } from './converter.js';
import type { StreamingToolCallParser } from './streamingToolCallParser.js';
import type { GenerateContentParameters, Content } from '@google/genai';
describe('OpenAIContentConverter', () => {
let converter: OpenAIContentConverter;
@@ -68,4 +69,77 @@ describe('OpenAIContentConverter', () => {
expect(parser.getBuffer(0)).toBe('');
});
});
describe('convertGeminiRequestToOpenAI', () => {
const createRequestWithFunctionResponse = (
response: Record<string, unknown>,
): GenerateContentParameters => {
const contents: Content[] = [
{
role: 'model',
parts: [
{
functionCall: {
id: 'call_1',
name: 'shell',
args: {},
},
},
],
},
{
role: 'user',
parts: [
{
functionResponse: {
id: 'call_1',
name: 'shell',
response,
},
},
],
},
];
return {
model: 'models/test',
contents,
};
};
it('should extract raw output from function response objects', () => {
const request = createRequestWithFunctionResponse({
output: 'Raw output text',
});
const messages = converter.convertGeminiRequestToOpenAI(request);
const toolMessage = messages.find((message) => message.role === 'tool');
expect(toolMessage).toBeDefined();
expect(toolMessage?.content).toBe('Raw output text');
});
it('should prioritize error field when present', () => {
const request = createRequestWithFunctionResponse({
error: 'Command failed',
});
const messages = converter.convertGeminiRequestToOpenAI(request);
const toolMessage = messages.find((message) => message.role === 'tool');
expect(toolMessage).toBeDefined();
expect(toolMessage?.content).toBe('Command failed');
});
it('should stringify non-string responses', () => {
const request = createRequestWithFunctionResponse({
data: { value: 42 },
});
const messages = converter.convertGeminiRequestToOpenAI(request);
const toolMessage = messages.find((message) => message.role === 'tool');
expect(toolMessage).toBeDefined();
expect(toolMessage?.content).toBe('{"data":{"value":42}}');
});
});
});

View File

@@ -276,10 +276,7 @@ export class OpenAIContentConverter {
messages.push({
role: 'tool' as const,
tool_call_id: funcResponse.id || '',
content:
typeof funcResponse.response === 'string'
? funcResponse.response
: JSON.stringify(funcResponse.response),
content: this.extractFunctionResponseContent(funcResponse.response),
});
}
return;
@@ -359,6 +356,36 @@ export class OpenAIContentConverter {
return { textParts, functionCalls, functionResponses, mediaParts };
}
private extractFunctionResponseContent(response: unknown): string {
if (response === null || response === undefined) {
return '';
}
if (typeof response === 'string') {
return response;
}
if (typeof response === 'object') {
const responseObject = response as Record<string, unknown>;
const output = responseObject['output'];
if (typeof output === 'string') {
return output;
}
const error = responseObject['error'];
if (typeof error === 'string') {
return error;
}
}
try {
const serialized = JSON.stringify(response);
return serialized ?? String(response);
} catch {
return String(response);
}
}
/**
* Determine media type from MIME type
*/

View File

@@ -131,16 +131,14 @@ describe('ExitPlanModeTool', () => {
}
const result = await invocation.execute(signal);
const expectedLlmMessage =
'User has approved your plan. You can now start coding. Start with updating your todo list if applicable.';
expect(result).toEqual({
llmContent: expectedLlmMessage,
returnDisplay: {
type: 'plan_summary',
message: 'User approved the plan.',
plan: params.plan,
},
expect(result.llmContent).toContain(
'User has approved your plan. You can now start coding',
);
expect(result.returnDisplay).toEqual({
type: 'plan_summary',
message: 'User approved the plan.',
plan: params.plan,
});
expect(mockConfig.setApprovalMode).toHaveBeenCalledWith(
@@ -188,15 +186,12 @@ describe('ExitPlanModeTool', () => {
const result = await invocation.execute(signal);
expect(result).toEqual({
llmContent: JSON.stringify({
success: false,
plan: params.plan,
error: 'Plan execution was not approved. Remaining in plan mode.',
}),
returnDisplay:
'Plan execution was not approved. Remaining in plan mode.',
});
expect(result.llmContent).toBe(
'Plan execution was not approved. Remaining in plan mode.',
);
expect(result.returnDisplay).toBe(
'Plan execution was not approved. Remaining in plan mode.',
);
expect(mockConfig.setApprovalMode).toHaveBeenCalledWith(
ApprovalMode.PLAN,
@@ -215,50 +210,6 @@ describe('ExitPlanModeTool', () => {
);
});
it('should handle execution errors gracefully', async () => {
const params: ExitPlanModeParams = {
plan: 'Test plan',
};
const invocation = tool.build(params);
const confirmation = await invocation.shouldConfirmExecute(
new AbortController().signal,
);
if (confirmation) {
// Don't approve the plan so we go through the rejection path
await confirmation.onConfirm(ToolConfirmationOutcome.Cancel);
}
// Create a spy to simulate an error during the execution
const consoleSpy = vi
.spyOn(console, 'error')
.mockImplementation(() => {});
// Mock JSON.stringify to throw an error in the rejection path
const originalStringify = JSON.stringify;
vi.spyOn(JSON, 'stringify').mockImplementationOnce(() => {
throw new Error('JSON stringify error');
});
const result = await invocation.execute(new AbortController().signal);
expect(result).toEqual({
llmContent: JSON.stringify({
success: false,
error: 'Failed to present plan. Detail: JSON stringify error',
}),
returnDisplay: 'Error presenting plan: JSON stringify error',
});
expect(consoleSpy).toHaveBeenCalledWith(
'[ExitPlanModeTool] Error executing exit_plan_mode: JSON stringify error',
);
// Restore original JSON.stringify
JSON.stringify = originalStringify;
consoleSpy.mockRestore();
});
it('should return empty tool locations', () => {
const params: ExitPlanModeParams = {
plan: 'Test plan',

View File

@@ -115,17 +115,12 @@ class ExitPlanModeToolInvocation extends BaseToolInvocation<
const rejectionMessage =
'Plan execution was not approved. Remaining in plan mode.';
return {
llmContent: JSON.stringify({
success: false,
plan,
error: rejectionMessage,
}),
llmContent: rejectionMessage,
returnDisplay: rejectionMessage,
};
}
const llmMessage =
'User has approved your plan. You can now start coding. Start with updating your todo list if applicable.';
const llmMessage = `User has approved your plan. You can now start coding. Start with updating your todo list if applicable.`;
const displayMessage = 'User approved the plan.';
return {
@@ -142,11 +137,11 @@ class ExitPlanModeToolInvocation extends BaseToolInvocation<
console.error(
`[ExitPlanModeTool] Error executing exit_plan_mode: ${errorMessage}`,
);
const errorLlmContent = `Failed to present plan: ${errorMessage}`;
return {
llmContent: JSON.stringify({
success: false,
error: `Failed to present plan. Detail: ${errorMessage}`,
}),
llmContent: errorLlmContent,
returnDisplay: `Error presenting plan: ${errorMessage}`,
};
}

View File

@@ -241,9 +241,7 @@ describe('MemoryTool', () => {
expectedFsArgument,
);
const successMessage = `Okay, I've remembered that in global memory: "${params.fact}"`;
expect(result.llmContent).toBe(
JSON.stringify({ success: true, message: successMessage }),
);
expect(result.llmContent).toBe(successMessage);
expect(result.returnDisplay).toBe(successMessage);
});
@@ -271,9 +269,7 @@ describe('MemoryTool', () => {
expectedFsArgument,
);
const successMessage = `Okay, I've remembered that in project memory: "${params.fact}"`;
expect(result.llmContent).toBe(
JSON.stringify({ success: true, message: successMessage }),
);
expect(result.llmContent).toBe(successMessage);
expect(result.returnDisplay).toBe(successMessage);
});
@@ -298,10 +294,7 @@ describe('MemoryTool', () => {
const result = await invocation.execute(mockAbortSignal);
expect(result.llmContent).toBe(
JSON.stringify({
success: false,
error: `Failed to save memory. Detail: ${underlyingError.message}`,
}),
`Error saving memory: ${underlyingError.message}`,
);
expect(result.returnDisplay).toBe(
`Error saving memory: ${underlyingError.message}`,
@@ -319,6 +312,8 @@ describe('MemoryTool', () => {
expect(result.llmContent).toContain(
'Please specify where to save this memory',
);
expect(result.llmContent).toContain('Global:');
expect(result.llmContent).toContain('Project:');
expect(result.returnDisplay).toContain('Global:');
expect(result.returnDisplay).toContain('Project:');
});

View File

@@ -309,7 +309,7 @@ Preview of changes to be made to GLOBAL memory:
if (!fact || typeof fact !== 'string' || fact.trim() === '') {
const errorMessage = 'Parameter "fact" must be a non-empty string.';
return {
llmContent: JSON.stringify({ success: false, error: errorMessage }),
llmContent: `Error: ${errorMessage}`,
returnDisplay: `Error: ${errorMessage}`,
};
}
@@ -324,10 +324,7 @@ Global: ${globalPath} (shared across all projects)
Project: ${projectPath} (current project only)`;
return {
llmContent: JSON.stringify({
success: false,
error: 'Please specify where to save this memory',
}),
llmContent: errorMessage,
returnDisplay: errorMessage,
};
}
@@ -344,10 +341,7 @@ Project: ${projectPath} (current project only)`;
await fs.writeFile(memoryFilePath, modified_content, 'utf-8');
const successMessage = `Okay, I've updated the ${scope} memory file with your modifications.`;
return {
llmContent: JSON.stringify({
success: true,
message: successMessage,
}),
llmContent: successMessage,
returnDisplay: successMessage,
};
} else {
@@ -359,10 +353,7 @@ Project: ${projectPath} (current project only)`;
});
const successMessage = `Okay, I've remembered that in ${scope} memory: "${fact}"`;
return {
llmContent: JSON.stringify({
success: true,
message: successMessage,
}),
llmContent: successMessage,
returnDisplay: successMessage,
};
}
@@ -372,11 +363,9 @@ Project: ${projectPath} (current project only)`;
console.error(
`[MemoryTool] Error executing save_memory for fact "${fact}" in ${scope}: ${errorMessage}`,
);
return {
llmContent: JSON.stringify({
success: false,
error: `Failed to save memory. Detail: ${errorMessage}`,
}),
llmContent: `Error saving memory: ${errorMessage}`,
returnDisplay: `Error saving memory: ${errorMessage}`,
error: {
message: errorMessage,

View File

@@ -141,7 +141,12 @@ describe('TodoWriteTool', () => {
const invocation = tool.build(params);
const result = await invocation.execute(mockAbortSignal);
expect(result.llmContent).toContain('success');
expect(result.llmContent).toContain(
'Todos have been modified successfully',
);
expect(result.llmContent).toContain('<system-reminder>');
expect(result.llmContent).toContain('Your todo list has changed');
expect(result.llmContent).toContain(JSON.stringify(params.todos));
expect(result.returnDisplay).toEqual({
type: 'todo_list',
todos: [
@@ -178,7 +183,12 @@ describe('TodoWriteTool', () => {
const invocation = tool.build(params);
const result = await invocation.execute(mockAbortSignal);
expect(result.llmContent).toContain('success');
expect(result.llmContent).toContain(
'Todos have been modified successfully',
);
expect(result.llmContent).toContain('<system-reminder>');
expect(result.llmContent).toContain('Your todo list has changed');
expect(result.llmContent).toContain(JSON.stringify(params.todos));
expect(result.returnDisplay).toEqual({
type: 'todo_list',
todos: [
@@ -208,7 +218,10 @@ describe('TodoWriteTool', () => {
const invocation = tool.build(params);
const result = await invocation.execute(mockAbortSignal);
expect(result.llmContent).toContain('"success":false');
expect(result.llmContent).toContain('Failed to modify todos');
expect(result.llmContent).toContain('<system-reminder>');
expect(result.llmContent).toContain('Todo list modification failed');
expect(result.llmContent).toContain('Write failed');
expect(result.returnDisplay).toContain('Error writing todos');
});
@@ -223,7 +236,10 @@ describe('TodoWriteTool', () => {
const invocation = tool.build(params);
const result = await invocation.execute(mockAbortSignal);
expect(result.llmContent).toContain('success');
expect(result.llmContent).toContain('Todo list has been cleared');
expect(result.llmContent).toContain('<system-reminder>');
expect(result.llmContent).toContain('Your todo list is now empty');
expect(result.llmContent).toContain('no pending tasks');
expect(result.returnDisplay).toEqual({
type: 'todo_list',
todos: [],

View File

@@ -340,11 +340,30 @@ class TodoWriteToolInvocation extends BaseToolInvocation<
todos: finalTodos,
};
// Create plain string format with system reminder
const todosJson = JSON.stringify(finalTodos);
let llmContent: string;
if (finalTodos.length === 0) {
// Special message for empty todos
llmContent = `Todo list has been cleared.
<system-reminder>
Your todo list is now empty. DO NOT mention this explicitly to the user. You have no pending tasks in your todo list.
</system-reminder>`;
} else {
// Normal message for todos with items
llmContent = `Todos have been modified successfully. Ensure that you continue to use the todo list to track your progress. Please proceed with the current tasks if applicable
<system-reminder>
Your todo list has changed. DO NOT mention this explicitly to the user. Here are the latest contents of your todo list:
${todosJson}. Continue on with the tasks at hand if applicable.
</system-reminder>`;
}
return {
llmContent: JSON.stringify({
success: true,
todos: finalTodos,
}),
llmContent,
returnDisplay: todoResultDisplay,
};
} catch (error) {
@@ -353,11 +372,16 @@ class TodoWriteToolInvocation extends BaseToolInvocation<
console.error(
`[TodoWriteTool] Error executing todo_write: ${errorMessage}`,
);
// Create plain string format for error with system reminder
const errorLlmContent = `Failed to modify todos. An error occurred during the operation.
<system-reminder>
Todo list modification failed with error: ${errorMessage}. You may need to retry or handle this error appropriately.
</system-reminder>`;
return {
llmContent: JSON.stringify({
success: false,
error: `Failed to write todos. Detail: ${errorMessage}`,
}),
llmContent: errorLlmContent,
returnDisplay: `Error writing todos: ${errorMessage}`,
};
}

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code-test-utils",
"version": "0.1.0-preview.0",
"version": "0.1.1",
"private": true,
"main": "src/index.ts",
"license": "Apache-2.0",

View File

@@ -2,7 +2,7 @@
"name": "qwen-code-vscode-ide-companion",
"displayName": "Qwen Code Companion",
"description": "Enable Qwen Code with direct access to your VS Code workspace.",
"version": "0.1.0-preview.0",
"version": "0.1.1",
"publisher": "qwenlm",
"icon": "assets/icon.png",
"repository": {