Compare commits

...

7 Commits

Author SHA1 Message Date
koalazf.99
514f292770 add config: tool output char limit 2025-09-17 14:13:58 +08:00
koalazf.99
a0d77f5a44 add: remove loop detect in yolo 2025-09-16 22:50:30 +08:00
koalazf.99
a9a84014e4 remove editCorrector / compress logic in yolo 2025-09-16 19:10:12 +08:00
koalazf.99
92af02c494 remove read_folder context before user input 2025-09-16 16:27:01 +08:00
koalazf.99
740768dc1b dev yolo 2025-09-16 13:57:28 +08:00
Mingholy
49d7947028 chore: bump version to 0.0.11 (#622) 2025-09-16 10:20:51 +08:00
Mingholy
b01ddf0aed fix: tool calls ui issues (#590) 2025-09-12 17:52:23 +08:00
28 changed files with 286 additions and 112 deletions

View File

@@ -1,5 +1,23 @@
# Changelog
## 0.0.11
- Added subagents feature with file-based configuration system for specialized AI assistants.
- Added Welcome Back Dialog with project summary and enhanced quit options.
- Fixed performance issues with SharedTokenManager causing 20-minute delays.
- Fixed tool calls UI issues and improved user experience.
- Fixed credential clearing when switching authentication types.
- Enhanced subagent capabilities to use tools requiring user confirmation.
- Improved ReadManyFiles tool with shared line limits across files.
- Re-implemented tokenLimits class for better compatibility with Qwen and other model types.
- Fixed chunk validation to avoid unnecessary retries.
- Resolved EditTool naming inconsistency causing agent confusion loops.
- Fixed unexpected re-authentication when auth-token is expired.
- Added Terminal Bench integration tests.
- Updated multilingual documentation links in README.
- Fixed various Windows compatibility issues.
- Miscellaneous improvements and bug fixes.
## 0.0.10
- Synced upstream `gemini-cli` to v0.2.1.

View File

@@ -41,7 +41,7 @@ For security and isolation, Qwen Code can be run inside a container. This is the
You can run the published sandbox image directly. This is useful for environments where you only have Docker and want to run the CLI.
```bash
# Run the published sandbox image
docker run --rm -it ghcr.io/qwenlm/qwen-code:0.0.10
docker run --rm -it ghcr.io/qwenlm/qwen-code:0.0.11
```
- **Using the `--sandbox` flag:**
If you have Qwen Code installed locally (using the standard installation described above), you can instruct it to run inside the sandbox container.

12
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.0.10",
"version": "0.0.11",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@qwen-code/qwen-code",
"version": "0.0.10",
"version": "0.0.11",
"workspaces": [
"packages/*"
],
@@ -12512,7 +12512,7 @@
},
"packages/cli": {
"name": "@qwen-code/qwen-code",
"version": "0.0.10",
"version": "0.0.11",
"dependencies": {
"@google/genai": "1.9.0",
"@iarna/toml": "^2.2.5",
@@ -12696,7 +12696,7 @@
},
"packages/core": {
"name": "@qwen-code/qwen-code-core",
"version": "0.0.10",
"version": "0.0.11",
"dependencies": {
"@google/genai": "1.13.0",
"@modelcontextprotocol/sdk": "^1.11.0",
@@ -12861,7 +12861,7 @@
},
"packages/test-utils": {
"name": "@qwen-code/qwen-code-test-utils",
"version": "0.0.10",
"version": "0.0.11",
"license": "Apache-2.0",
"devDependencies": {
"typescript": "^5.3.3"
@@ -12872,7 +12872,7 @@
},
"packages/vscode-ide-companion": {
"name": "qwen-code-vscode-ide-companion",
"version": "0.0.10",
"version": "0.0.11",
"license": "LICENSE",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.15.1",

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.0.10",
"version": "0.0.11",
"engines": {
"node": ">=20.0.0"
},
@@ -13,7 +13,7 @@
"url": "git+https://github.com/QwenLM/qwen-code.git"
},
"config": {
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.10"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.11"
},
"scripts": {
"start": "node scripts/start.js",

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.0.10",
"version": "0.0.11",
"description": "Qwen Code",
"repository": {
"type": "git",
@@ -25,7 +25,7 @@
"dist"
],
"config": {
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.10"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.0.11"
},
"dependencies": {
"@google/genai": "1.9.0",

View File

@@ -603,7 +603,9 @@ export async function loadCliConfig(
interactive,
trustedFolder,
shouldUseNodePtyShell: settings.shouldUseNodePtyShell,
skipStartupContext: settings.skipStartupContext,
skipNextSpeakerCheck: settings.skipNextSpeakerCheck,
toolOutputCharLimit: settings.toolOutputCharLimit,
});
}

View File

@@ -577,6 +577,16 @@ export const SETTINGS_SCHEMA = {
description: 'The maximum number of tokens allowed in a session.',
showInDialog: false,
},
toolOutputCharLimit: {
type: 'number',
label: 'Tool Output Character Limit',
category: 'General',
requiresRestart: false,
default: undefined as number | undefined,
description:
'Max characters for tool outputs (read_file, read_many_files, shell). If set, text content is truncated to this limit.',
showInDialog: true,
},
systemPromptMappings: {
type: 'object',
label: 'System Prompt Mappings',
@@ -595,12 +605,22 @@ export const SETTINGS_SCHEMA = {
description: 'The API key for the Tavily API.',
showInDialog: false,
},
skipStartupContext: {
type: 'boolean',
label: 'Skip Startup Context',
category: 'General',
requiresRestart: false,
default: true,
description:
'Do not prepend environment/folder structure context or the initial acknowledgment message.',
showInDialog: true,
},
skipNextSpeakerCheck: {
type: 'boolean',
label: 'Skip Next Speaker Check',
category: 'General',
requiresRestart: false,
default: false,
default: true,
description: 'Skip the next speaker check.',
showInDialog: true,
},

View File

@@ -12,6 +12,7 @@ import {
isTelemetrySdkInitialized,
GeminiEventType,
parseAndFormatApiError,
ApprovalMode,
} from '@qwen-code/qwen-code-core';
import { Content, Part, FunctionCall } from '@google/genai';
@@ -39,6 +40,12 @@ export async function runNonInteractive(
const geminiClient = config.getGeminiClient();
// In YOLO mode, disable next_speaker check to avoid auto-continue.
if (config.getApprovalMode && config.getApprovalMode() === ApprovalMode.YOLO) {
(config as unknown as { getSkipNextSpeakerCheck: () => boolean }).getSkipNextSpeakerCheck =
() => true;
}
const abortController = new AbortController();
let currentMessages: Content[] = [
{ role: 'user', parts: [{ text: input }] },

View File

@@ -204,7 +204,7 @@ export const StatsDisplay: React.FC<StatsDisplayProps> = ({
<StatRow title="Tool Calls:">
<Text>
{tools.totalCalls} ({' '}
<Text color={theme.status.success}> {tools.totalSuccess}</Text>{' '}
<Text color={theme.status.success}> {tools.totalSuccess}</Text>{' '}
<Text color={theme.status.error}> {tools.totalFail}</Text> )
</Text>
</StatRow>

View File

@@ -7,7 +7,7 @@ exports[`<SessionSummaryDisplay /> > renders the summary display with a title 1`
│ │
│ Interaction Summary │
│ Session ID: │
│ Tool Calls: 0 ( 0 ✖ 0 ) │
│ Tool Calls: 0 ( 0 ✖ 0 ) │
│ Success Rate: 0.0% │
│ Code Changes: +42 -15 │
│ │

View File

@@ -7,7 +7,7 @@ exports[`<StatsDisplay /> > Code Changes Display > displays Code Changes when li
│ │
│ Interaction Summary │
│ Session ID: test-session-id │
│ Tool Calls: 1 ( 1 ✖ 0 ) │
│ Tool Calls: 1 ( 1 ✖ 0 ) │
│ Success Rate: 100.0% │
│ Code Changes: +42 -18 │
│ │
@@ -28,7 +28,7 @@ exports[`<StatsDisplay /> > Code Changes Display > hides Code Changes when no li
│ │
│ Interaction Summary │
│ Session ID: test-session-id │
│ Tool Calls: 1 ( 1 ✖ 0 ) │
│ Tool Calls: 1 ( 1 ✖ 0 ) │
│ Success Rate: 100.0% │
│ │
│ Performance │
@@ -48,7 +48,7 @@ exports[`<StatsDisplay /> > Conditional Color Tests > renders success rate in gr
│ │
│ Interaction Summary │
│ Session ID: test-session-id │
│ Tool Calls: 10 ( 10 ✖ 0 ) │
│ Tool Calls: 10 ( 10 ✖ 0 ) │
│ Success Rate: 100.0% │
│ │
│ Performance │
@@ -68,7 +68,7 @@ exports[`<StatsDisplay /> > Conditional Color Tests > renders success rate in re
│ │
│ Interaction Summary │
│ Session ID: test-session-id │
│ Tool Calls: 10 ( 5 ✖ 5 ) │
│ Tool Calls: 10 ( 5 ✖ 5 ) │
│ Success Rate: 50.0% │
│ │
│ Performance │
@@ -88,7 +88,7 @@ exports[`<StatsDisplay /> > Conditional Color Tests > renders success rate in ye
│ │
│ Interaction Summary │
│ Session ID: test-session-id │
│ Tool Calls: 10 ( 9 ✖ 1 ) │
│ Tool Calls: 10 ( 9 ✖ 1 ) │
│ Success Rate: 90.0% │
│ │
│ Performance │
@@ -108,7 +108,7 @@ exports[`<StatsDisplay /> > Conditional Rendering Tests > hides Efficiency secti
│ │
│ Interaction Summary │
│ Session ID: test-session-id │
│ Tool Calls: 0 ( 0 ✖ 0 ) │
│ Tool Calls: 0 ( 0 ✖ 0 ) │
│ Success Rate: 0.0% │
│ │
│ Performance │
@@ -132,7 +132,7 @@ exports[`<StatsDisplay /> > Conditional Rendering Tests > hides User Agreement w
│ │
│ Interaction Summary │
│ Session ID: test-session-id │
│ Tool Calls: 2 ( 1 ✖ 1 ) │
│ Tool Calls: 2 ( 1 ✖ 1 ) │
│ Success Rate: 50.0% │
│ │
│ Performance │
@@ -152,7 +152,7 @@ exports[`<StatsDisplay /> > Title Rendering > renders the custom title when a ti
│ │
│ Interaction Summary │
│ Session ID: test-session-id │
│ Tool Calls: 0 ( 0 ✖ 0 ) │
│ Tool Calls: 0 ( 0 ✖ 0 ) │
│ Success Rate: 0.0% │
│ │
│ Performance │
@@ -172,7 +172,7 @@ exports[`<StatsDisplay /> > Title Rendering > renders the default title when no
│ │
│ Interaction Summary │
│ Session ID: test-session-id │
│ Tool Calls: 0 ( 0 ✖ 0 ) │
│ Tool Calls: 0 ( 0 ✖ 0 ) │
│ Success Rate: 0.0% │
│ │
│ Performance │
@@ -192,7 +192,7 @@ exports[`<StatsDisplay /> > renders a table with two models correctly 1`] = `
│ │
│ Interaction Summary │
│ Session ID: test-session-id │
│ Tool Calls: 0 ( 0 ✖ 0 ) │
│ Tool Calls: 0 ( 0 ✖ 0 ) │
│ Success Rate: 0.0% │
│ │
│ Performance │
@@ -221,7 +221,7 @@ exports[`<StatsDisplay /> > renders all sections when all data is present 1`] =
│ │
│ Interaction Summary │
│ Session ID: test-session-id │
│ Tool Calls: 2 ( 1 ✖ 1 ) │
│ Tool Calls: 2 ( 1 ✖ 1 ) │
│ Success Rate: 50.0% │
│ User Agreement: 100.0% (1 reviewed) │
│ │
@@ -250,7 +250,7 @@ exports[`<StatsDisplay /> > renders only the Performance section in its zero sta
│ │
│ Interaction Summary │
│ Session ID: test-session-id │
│ Tool Calls: 0 ( 0 ✖ 0 ) │
│ Tool Calls: 0 ( 0 ✖ 0 ) │
│ Success Rate: 0.0% │
│ │
│ Performance │

View File

@@ -80,6 +80,7 @@ export const ToolGroupMessage: React.FC<ToolGroupMessageProps> = ({
marginLeft={1}
borderDimColor={hasPending}
borderColor={borderColor}
gap={1}
>
{toolCalls.map((tool) => {
const isConfirming = toolAwaitingApproval?.callId === tool.callId;

View File

@@ -84,19 +84,19 @@ describe('<ToolMessage />', () => {
StreamingState.Idle,
);
const output = lastFrame();
expect(output).toContain(''); // Success indicator
expect(output).toContain(''); // Success indicator
expect(output).toContain('test-tool');
expect(output).toContain('A tool for testing');
expect(output).toContain('MockMarkdown:Test result');
});
describe('ToolStatusIndicator rendering', () => {
it('shows for Success status', () => {
it('shows for Success status', () => {
const { lastFrame } = renderWithContext(
<ToolMessage {...baseProps} status={ToolCallStatus.Success} />,
StreamingState.Idle,
);
expect(lastFrame()).toContain('');
expect(lastFrame()).toContain('');
});
it('shows o for Pending status', () => {
@@ -138,7 +138,7 @@ describe('<ToolMessage />', () => {
);
expect(lastFrame()).toContain('⊷');
expect(lastFrame()).not.toContain('MockRespondingSpinner');
expect(lastFrame()).not.toContain('');
expect(lastFrame()).not.toContain('');
});
it('shows paused spinner for Executing status when streamingState is WaitingForConfirmation', () => {
@@ -148,7 +148,7 @@ describe('<ToolMessage />', () => {
);
expect(lastFrame()).toContain('⊷');
expect(lastFrame()).not.toContain('MockRespondingSpinner');
expect(lastFrame()).not.toContain('');
expect(lastFrame()).not.toContain('');
});
it('shows MockRespondingSpinner for Executing status when streamingState is Responding', () => {
@@ -157,7 +157,7 @@ describe('<ToolMessage />', () => {
StreamingState.Responding, // Simulate app still responding
);
expect(lastFrame()).toContain('MockRespondingSpinner');
expect(lastFrame()).not.toContain('');
expect(lastFrame()).not.toContain('');
});
});

View File

@@ -269,7 +269,7 @@ const ToolStatusIndicator: React.FC<ToolStatusIndicatorProps> = ({
/>
)}
{status === ToolCallStatus.Success && (
<Text color={Colors.AccentGreen}></Text>
<Text color={Colors.AccentGreen}></Text>
)}
{status === ToolCallStatus.Confirming && (
<Text color={Colors.AccentYellow}>?</Text>

View File

@@ -288,7 +288,7 @@ const ToolCallItem: React.FC<{
case 'awaiting_approval':
return <Text color={theme.status.warning}>?</Text>;
case 'success':
return <Text color={color}></Text>;
return <Text color={color}></Text>;
case 'failed':
return (
<Text color={color} bold>

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code-core",
"version": "0.0.10",
"version": "0.0.11",
"description": "Qwen Code Core",
"repository": {
"type": "git",

View File

@@ -77,6 +77,11 @@ export interface BugCommandSettings {
export interface ChatCompressionSettings {
contextPercentageThreshold?: number;
/**
* When true, disables automatic chat history compression while in YOLO approval mode.
* Manual compression via commands remains available.
*/
disableInYolo?: boolean;
}
export interface SummarizeToolOutputSettings {
@@ -232,7 +237,10 @@ export interface ConfigParameters {
interactive?: boolean;
trustedFolder?: boolean;
shouldUseNodePtyShell?: boolean;
skipStartupContext?: boolean;
skipNextSpeakerCheck?: boolean;
// Character limit for tool text outputs (files and shell)
toolOutputCharLimit?: number;
}
export class Config {
@@ -317,8 +325,10 @@ export class Config {
private readonly interactive: boolean;
private readonly trustedFolder: boolean | undefined;
private readonly shouldUseNodePtyShell: boolean;
private readonly skipStartupContext: boolean;
private readonly skipNextSpeakerCheck: boolean;
private initialized: boolean = false;
private readonly toolOutputCharLimit?: number;
constructor(params: ConfigParameters) {
this.sessionId = params.sessionId;
@@ -398,7 +408,9 @@ export class Config {
this.interactive = params.interactive ?? false;
this.trustedFolder = params.trustedFolder;
this.shouldUseNodePtyShell = params.shouldUseNodePtyShell ?? false;
this.skipNextSpeakerCheck = params.skipNextSpeakerCheck ?? false;
this.skipStartupContext = params.skipStartupContext ?? true;
this.skipNextSpeakerCheck = params.skipNextSpeakerCheck ?? true;
this.toolOutputCharLimit = params.toolOutputCharLimit;
// Web search
this.tavilyApiKey = params.tavilyApiKey;
@@ -857,10 +869,22 @@ export class Config {
return this.shouldUseNodePtyShell;
}
getSkipStartupContext(): boolean {
return this.skipStartupContext;
}
getSkipNextSpeakerCheck(): boolean {
return this.skipNextSpeakerCheck;
}
/**
* Returns the configured maximum number of characters for tool outputs.
* If undefined, no character-based truncation is applied by tools.
*/
getToolOutputCharLimit(): number | undefined {
return this.toolOutputCharLimit;
}
async getGitService(): Promise<GitService> {
if (!this.gitService) {
this.gitService = new GitService(this.targetDir);

View File

@@ -24,7 +24,7 @@ import {
GeminiEventType,
ChatCompressionInfo,
} from './turn.js';
import { Config } from '../config/config.js';
import { ApprovalMode, Config } from '../config/config.js';
import { UserTierId } from '../code_assist/types.js';
import {
getCoreSystemPrompt,
@@ -228,19 +228,24 @@ export class GeminiClient {
async startChat(extraHistory?: Content[]): Promise<GeminiChat> {
this.forceFullIdeContext = true;
const envParts = await getEnvironmentContext(this.config);
const envParts = this.config.getSkipStartupContext()
? []
: await getEnvironmentContext(this.config);
const toolRegistry = this.config.getToolRegistry();
const toolDeclarations = toolRegistry.getFunctionDeclarations();
const tools: Tool[] = [{ functionDeclarations: toolDeclarations }];
const history: Content[] = [
{
role: 'user',
parts: envParts,
},
{
role: 'model',
parts: [{ text: 'Got it. Thanks for the context!' }],
},
...(
envParts.length
? [
{ role: 'user', parts: envParts },
{
role: 'model',
parts: [{ text: 'Got it. Thanks for the context!' }],
},
]
: []
),
...(extraHistory ?? []),
];
try {
@@ -473,10 +478,18 @@ export class GeminiClient {
// Track the original model from the first call to detect model switching
const initialModel = originalModel || this.config.getModel();
const compressed = await this.tryCompressChat(prompt_id);
const chatCompression = this.config.getChatCompression();
const disableAutoCompressionInYolo =
this.config.getApprovalMode() === ApprovalMode.YOLO &&
// Default to disabling auto-compression in YOLO unless explicitly set to false
(chatCompression?.disableInYolo ?? true);
if (compressed) {
yield { type: GeminiEventType.ChatCompressed, value: compressed };
if (!disableAutoCompressionInYolo) {
const compressed = await this.tryCompressChat(prompt_id);
if (compressed) {
yield { type: GeminiEventType.ChatCompressed, value: compressed };
}
}
// Check session token limit after compression using accurate token counting
@@ -551,17 +564,25 @@ export class GeminiClient {
const turn = new Turn(this.getChat(), prompt_id);
const loopDetected = await this.loopDetector.turnStarted(signal);
if (loopDetected) {
yield { type: GeminiEventType.LoopDetected };
return turn;
// Disable loop detection entirely in YOLO mode
const loopDetectionDisabled =
this.config.getApprovalMode() === ApprovalMode.YOLO;
if (!loopDetectionDisabled) {
const loopDetected = await this.loopDetector.turnStarted(signal);
if (loopDetected) {
yield { type: GeminiEventType.LoopDetected };
return turn;
}
}
const resultStream = turn.run(request, signal);
for await (const event of resultStream) {
if (this.loopDetector.addAndCheck(event)) {
yield { type: GeminiEventType.LoopDetected };
return turn;
if (!loopDetectionDisabled) {
if (this.loopDetector.addAndCheck(event)) {
yield { type: GeminiEventType.LoopDetected };
return turn;
}
}
yield event;
if (event.type === GeminiEventType.Error) {

View File

@@ -781,11 +781,15 @@ export class SubAgentScope {
);
}
const envParts = await getEnvironmentContext(this.runtimeContext);
const envHistory: Content[] = [
{ role: 'user', parts: envParts },
{ role: 'model', parts: [{ text: 'Got it. Thanks for the context!' }] },
];
const envParts = this.runtimeContext.getSkipStartupContext()
? []
: await getEnvironmentContext(this.runtimeContext);
const envHistory: Content[] = envParts.length
? [
{ role: 'user', parts: envParts },
{ role: 'model', parts: [{ text: 'Got it. Thanks for the context!' }] },
]
: [];
const start_history = [
...envHistory,

View File

@@ -156,6 +156,8 @@ class EditToolInvocation implements ToolInvocation<EditToolParams, ToolResult> {
params,
this.config.getGeminiClient(),
abortSignal,
// Disable LLM-based corrections in YOLO mode
this.config.getApprovalMode() !== ApprovalMode.YOLO,
);
finalOldString = correctedEdit.params.old_string;
finalNewString = correctedEdit.params.new_string;

View File

@@ -122,21 +122,31 @@ class ReadFileToolInvocation extends BaseToolInvocation<
}
let llmContent: PartUnion;
const charLimit = this.config.getToolOutputCharLimit();
if (result.isTruncated) {
const [start, end] = result.linesShown!;
const total = result.originalLineCount!;
const nextOffset = this.params.offset
? this.params.offset + end - start + 1
: end;
llmContent = `
IMPORTANT: The file content has been truncated.
Status: Showing lines ${start}-${end} of ${total} total lines.
Action: To read more of the file, you can use the 'offset' and 'limit' parameters in a subsequent 'read_file' call. For example, to read the next section of the file, use offset: ${nextOffset}.
--- FILE CONTENT (truncated) ---
${result.llmContent}`;
const header = `\nIMPORTANT: The file content has been truncated.\nStatus: Showing lines ${start}-${end} of ${total} total lines.\nAction: To read more of the file, you can use the 'offset' and 'limit' parameters in a subsequent 'read_file' call. For example, to read the next section of the file, use offset: ${nextOffset}.\n\n--- FILE CONTENT (truncated) ---\n`;
const body = typeof result.llmContent === 'string' ? result.llmContent : '';
let truncatedBody = body;
if (typeof charLimit === 'number' && charLimit > 0 && body.length > charLimit) {
truncatedBody = `${body.slice(0, charLimit)}\n[... File content truncated to ${charLimit} characters ...]`;
}
llmContent = header + truncatedBody;
} else {
llmContent = result.llmContent || '';
let body = result.llmContent || '';
if (
typeof body === 'string' &&
typeof charLimit === 'number' &&
charLimit > 0 &&
body.length > charLimit
) {
body = `${body.slice(0, charLimit)}\n[... File content truncated to ${charLimit} characters ...]`;
}
llmContent = body;
}
const lines =

View File

@@ -228,6 +228,7 @@ ${finalExclusionPatternsForDescription
const skippedFiles: Array<{ path: string; reason: string }> = [];
const processedFilesRelativePaths: string[] = [];
const contentParts: PartListUnion = [];
const charLimit = this.config.getToolOutputCharLimit();
const effectiveExcludes = useDefaultExcludes
? [...DEFAULT_EXCLUDES, ...exclude]
@@ -436,6 +437,9 @@ ${finalExclusionPatternsForDescription
);
const results = await Promise.allSettled(fileProcessingPromises);
let remainingContentChars =
typeof charLimit === 'number' && charLimit > 0 ? charLimit : Number.POSITIVE_INFINITY;
let globalTruncated = false;
for (const result of results) {
if (result.status === 'fulfilled') {
@@ -449,22 +453,47 @@ ${finalExclusionPatternsForDescription
});
} else {
// Handle successfully processed files
const { filePath, relativePathForDisplay, fileReadResult } =
fileResult;
const { filePath, relativePathForDisplay, fileReadResult } = fileResult;
if (typeof fileReadResult.llmContent === 'string') {
// Separator does not count toward char budget
const separator = DEFAULT_OUTPUT_SEPARATOR_FORMAT.replace(
'{filePath}',
filePath,
);
let fileContentForLlm = '';
let prefix = `${separator}\n\n`;
// Warning header (if any) does not count toward char budget
if (fileReadResult.isTruncated) {
fileContentForLlm += `[WARNING: This file was truncated. To view the full content, use the 'read_file' tool on this specific file.]\n\n`;
prefix += `[WARNING: This file was truncated. To view the full content, use the 'read_file' tool on this specific file.]\n\n`;
}
contentParts.push(prefix);
// Apply global char budget to the actual file content only
if (remainingContentChars > 0) {
const body = fileReadResult.llmContent;
if (body.length <= remainingContentChars) {
contentParts.push(body + '\n\n');
remainingContentChars -= body.length;
} else {
contentParts.push(
body.slice(0, Math.max(0, remainingContentChars)),
);
contentParts.push(
`\n[... Content truncated to ${charLimit} characters across files ...]\n`,
);
remainingContentChars = 0;
globalTruncated = true;
}
} else if (!globalTruncated && typeof charLimit === 'number') {
// No remaining budget, emit a single global truncation marker after first overflow
contentParts.push(
`\n[... Content truncated to ${charLimit} characters across files ...]\n`,
);
globalTruncated = true;
}
fileContentForLlm += fileReadResult.llmContent;
contentParts.push(`${separator}\n\n${fileContentForLlm}\n\n`);
} else {
// This is a Part for image/pdf, which we don't add the separator to.
// Non-text parts (image/pdf) do not count toward char budget
contentParts.push(fileReadResult.llmContent);
}
@@ -538,6 +567,10 @@ ${finalExclusionPatternsForDescription
'No files matching the criteria were found or all were skipped.',
);
}
if (globalTruncated && typeof charLimit === 'number') {
displayMessage += `\n\nNote: Output truncated to ${charLimit} characters (text content only).`;
}
return {
llmContent: contentParts,
returnDisplay: displayMessage.trim(),

View File

@@ -279,6 +279,24 @@ class ShellToolInvocation extends BaseToolInvocation<
}
}
// Apply character truncation (middle) to both llmContent and returnDisplay if configured
const charLimit = this.config.getToolOutputCharLimit();
const middleTruncate = (s: string, limit: number): string => {
if (!s || s.length <= limit) return s;
const marker = '\n[... Output truncated due to length ...]\n';
const keep = Math.max(0, Math.floor((limit - marker.length) / 2));
if (keep <= 0) {
return s.slice(0, limit);
}
return s.slice(0, keep) + marker + s.slice(s.length - keep);
};
if (typeof charLimit === 'number' && charLimit > 0) {
llmContent = middleTruncate(llmContent, charLimit);
if (returnDisplayMessage) {
returnDisplayMessage = middleTruncate(returnDisplayMessage, charLimit);
}
}
const summarizeConfig = this.config.getSummarizeToolOutputConfig();
if (summarizeConfig && summarizeConfig[ShellTool.Name]) {
const summary = await summarizeToolOutput(

View File

@@ -116,6 +116,8 @@ export async function getCorrectedFileContent(
},
config.getGeminiClient(),
abortSignal,
// Disable LLM-based corrections in YOLO mode
config.getApprovalMode() !== ApprovalMode.YOLO,
);
correctedContent = correctedParams.new_string;
} else {
@@ -124,6 +126,8 @@ export async function getCorrectedFileContent(
proposedContent,
config.getGeminiClient(),
abortSignal,
// Disable LLM-based corrections in YOLO mode
config.getApprovalMode() !== ApprovalMode.YOLO,
);
}
return { originalContent, correctedContent, fileExists };

View File

@@ -160,6 +160,7 @@ export async function ensureCorrectEdit(
originalParams: EditToolParams, // This is the EditToolParams from edit.ts, without \'corrected\'
client: GeminiClient,
abortSignal: AbortSignal,
llmCorrectionsEnabled: boolean = true,
): Promise<CorrectedEditResult> {
const cacheKey = `${currentContent}---${originalParams.old_string}---${originalParams.new_string}`;
const cachedResult = editCorrectionCache.get(cacheKey);
@@ -178,7 +179,7 @@ export async function ensureCorrectEdit(
let occurrences = countOccurrences(currentContent, finalOldString);
if (occurrences === expectedReplacements) {
if (newStringPotentiallyEscaped) {
if (newStringPotentiallyEscaped && llmCorrectionsEnabled) {
finalNewString = await correctNewStringEscaping(
client,
finalOldString,
@@ -225,7 +226,7 @@ export async function ensureCorrectEdit(
if (occurrences === expectedReplacements) {
finalOldString = unescapedOldStringAttempt;
if (newStringPotentiallyEscaped) {
if (newStringPotentiallyEscaped && llmCorrectionsEnabled) {
finalNewString = await correctNewString(
client,
originalParams.old_string, // original old
@@ -263,38 +264,48 @@ export async function ensureCorrectEdit(
}
}
const llmCorrectedOldString = await correctOldStringMismatch(
client,
currentContent,
unescapedOldStringAttempt,
abortSignal,
);
const llmOldOccurrences = countOccurrences(
currentContent,
llmCorrectedOldString,
);
if (llmCorrectionsEnabled) {
const llmCorrectedOldString = await correctOldStringMismatch(
client,
currentContent,
unescapedOldStringAttempt,
abortSignal,
);
const llmOldOccurrences = countOccurrences(
currentContent,
llmCorrectedOldString,
);
if (llmOldOccurrences === expectedReplacements) {
finalOldString = llmCorrectedOldString;
occurrences = llmOldOccurrences;
if (llmOldOccurrences === expectedReplacements) {
finalOldString = llmCorrectedOldString;
occurrences = llmOldOccurrences;
if (newStringPotentiallyEscaped) {
const baseNewStringForLLMCorrection = unescapeStringForGeminiBug(
originalParams.new_string,
);
finalNewString = await correctNewString(
client,
originalParams.old_string, // original old
llmCorrectedOldString, // corrected old
baseNewStringForLLMCorrection, // base new for correction
abortSignal,
);
if (newStringPotentiallyEscaped) {
const baseNewStringForLLMCorrection = unescapeStringForGeminiBug(
originalParams.new_string,
);
finalNewString = await correctNewString(
client,
originalParams.old_string, // original old
llmCorrectedOldString, // corrected old
baseNewStringForLLMCorrection, // base new for correction
abortSignal,
);
}
} else {
// LLM correction also failed for old_string
const result: CorrectedEditResult = {
params: { ...originalParams },
occurrences: 0, // Explicitly 0 as LLM failed
};
editCorrectionCache.set(cacheKey, result);
return result;
}
} else {
// LLM correction also failed for old_string
// LLM corrections disabled -> return as-is to surface mismatch upstream
const result: CorrectedEditResult = {
params: { ...originalParams },
occurrences: 0, // Explicitly 0 as LLM failed
occurrences: 0,
};
editCorrectionCache.set(cacheKey, result);
return result;
@@ -336,6 +347,7 @@ export async function ensureCorrectFileContent(
content: string,
client: GeminiClient,
abortSignal: AbortSignal,
llmCorrectionsEnabled: boolean = true,
): Promise<string> {
const cachedResult = fileContentCorrectionCache.get(content);
if (cachedResult) {
@@ -349,11 +361,9 @@ export async function ensureCorrectFileContent(
return content;
}
const correctedContent = await correctStringEscaping(
content,
client,
abortSignal,
);
const correctedContent = llmCorrectionsEnabled
? await correctStringEscaping(content, client, abortSignal)
: content;
fileContentCorrectionCache.set(content, correctedContent);
return correctedContent;
}

View File

@@ -11,7 +11,7 @@ import mime from 'mime-types';
import { FileSystemService } from '../services/fileSystemService.js';
// Constants for text file processing
export const DEFAULT_MAX_LINES_TEXT_FILE = 2000;
export const DEFAULT_MAX_LINES_TEXT_FILE = 500;
const MAX_LINE_LENGTH_TEXT_FILE = 2000;
// Default values for encoding and separator format

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code-test-utils",
"version": "0.0.10",
"version": "0.0.11",
"private": true,
"main": "src/index.ts",
"license": "Apache-2.0",

View File

@@ -2,7 +2,7 @@
"name": "qwen-code-vscode-ide-companion",
"displayName": "Qwen Code Companion",
"description": "Enable Qwen Code with direct access to your VS Code workspace.",
"version": "0.0.10",
"version": "0.0.11",
"publisher": "qwenlm",
"icon": "assets/icon.png",
"repository": {