remove editCorrector / compress logic in yolo

This commit is contained in:
koalazf.99
2025-09-16 19:10:12 +08:00
parent 92af02c494
commit a9a84014e4
5 changed files with 66 additions and 37 deletions

View File

@@ -77,6 +77,11 @@ export interface BugCommandSettings {
export interface ChatCompressionSettings {
contextPercentageThreshold?: number;
/**
* When true, disables automatic chat history compression while in YOLO approval mode.
* Manual compression via commands remains available.
*/
disableInYolo?: boolean;
}
export interface SummarizeToolOutputSettings {

View File

@@ -24,7 +24,7 @@ import {
GeminiEventType,
ChatCompressionInfo,
} from './turn.js';
import { Config } from '../config/config.js';
import { ApprovalMode, Config } from '../config/config.js';
import { UserTierId } from '../code_assist/types.js';
import {
getCoreSystemPrompt,
@@ -478,10 +478,18 @@ export class GeminiClient {
// Track the original model from the first call to detect model switching
const initialModel = originalModel || this.config.getModel();
const compressed = await this.tryCompressChat(prompt_id);
const chatCompression = this.config.getChatCompression();
const disableAutoCompressionInYolo =
this.config.getApprovalMode() === ApprovalMode.YOLO &&
// Default to disabling auto-compression in YOLO unless explicitly set to false
(chatCompression?.disableInYolo ?? true);
if (compressed) {
yield { type: GeminiEventType.ChatCompressed, value: compressed };
if (!disableAutoCompressionInYolo) {
const compressed = await this.tryCompressChat(prompt_id);
if (compressed) {
yield { type: GeminiEventType.ChatCompressed, value: compressed };
}
}
// Check session token limit after compression using accurate token counting

View File

@@ -156,6 +156,8 @@ class EditToolInvocation implements ToolInvocation<EditToolParams, ToolResult> {
params,
this.config.getGeminiClient(),
abortSignal,
// Disable LLM-based corrections in YOLO mode
this.config.getApprovalMode() !== ApprovalMode.YOLO,
);
finalOldString = correctedEdit.params.old_string;
finalNewString = correctedEdit.params.new_string;

View File

@@ -116,6 +116,8 @@ export async function getCorrectedFileContent(
},
config.getGeminiClient(),
abortSignal,
// Disable LLM-based corrections in YOLO mode
config.getApprovalMode() !== ApprovalMode.YOLO,
);
correctedContent = correctedParams.new_string;
} else {
@@ -124,6 +126,8 @@ export async function getCorrectedFileContent(
proposedContent,
config.getGeminiClient(),
abortSignal,
// Disable LLM-based corrections in YOLO mode
config.getApprovalMode() !== ApprovalMode.YOLO,
);
}
return { originalContent, correctedContent, fileExists };

View File

@@ -160,6 +160,7 @@ export async function ensureCorrectEdit(
originalParams: EditToolParams, // This is the EditToolParams from edit.ts, without \'corrected\'
client: GeminiClient,
abortSignal: AbortSignal,
llmCorrectionsEnabled: boolean = true,
): Promise<CorrectedEditResult> {
const cacheKey = `${currentContent}---${originalParams.old_string}---${originalParams.new_string}`;
const cachedResult = editCorrectionCache.get(cacheKey);
@@ -178,7 +179,7 @@ export async function ensureCorrectEdit(
let occurrences = countOccurrences(currentContent, finalOldString);
if (occurrences === expectedReplacements) {
if (newStringPotentiallyEscaped) {
if (newStringPotentiallyEscaped && llmCorrectionsEnabled) {
finalNewString = await correctNewStringEscaping(
client,
finalOldString,
@@ -225,7 +226,7 @@ export async function ensureCorrectEdit(
if (occurrences === expectedReplacements) {
finalOldString = unescapedOldStringAttempt;
if (newStringPotentiallyEscaped) {
if (newStringPotentiallyEscaped && llmCorrectionsEnabled) {
finalNewString = await correctNewString(
client,
originalParams.old_string, // original old
@@ -263,38 +264,48 @@ export async function ensureCorrectEdit(
}
}
const llmCorrectedOldString = await correctOldStringMismatch(
client,
currentContent,
unescapedOldStringAttempt,
abortSignal,
);
const llmOldOccurrences = countOccurrences(
currentContent,
llmCorrectedOldString,
);
if (llmCorrectionsEnabled) {
const llmCorrectedOldString = await correctOldStringMismatch(
client,
currentContent,
unescapedOldStringAttempt,
abortSignal,
);
const llmOldOccurrences = countOccurrences(
currentContent,
llmCorrectedOldString,
);
if (llmOldOccurrences === expectedReplacements) {
finalOldString = llmCorrectedOldString;
occurrences = llmOldOccurrences;
if (llmOldOccurrences === expectedReplacements) {
finalOldString = llmCorrectedOldString;
occurrences = llmOldOccurrences;
if (newStringPotentiallyEscaped) {
const baseNewStringForLLMCorrection = unescapeStringForGeminiBug(
originalParams.new_string,
);
finalNewString = await correctNewString(
client,
originalParams.old_string, // original old
llmCorrectedOldString, // corrected old
baseNewStringForLLMCorrection, // base new for correction
abortSignal,
);
if (newStringPotentiallyEscaped) {
const baseNewStringForLLMCorrection = unescapeStringForGeminiBug(
originalParams.new_string,
);
finalNewString = await correctNewString(
client,
originalParams.old_string, // original old
llmCorrectedOldString, // corrected old
baseNewStringForLLMCorrection, // base new for correction
abortSignal,
);
}
} else {
// LLM correction also failed for old_string
const result: CorrectedEditResult = {
params: { ...originalParams },
occurrences: 0, // Explicitly 0 as LLM failed
};
editCorrectionCache.set(cacheKey, result);
return result;
}
} else {
// LLM correction also failed for old_string
// LLM corrections disabled -> return as-is to surface mismatch upstream
const result: CorrectedEditResult = {
params: { ...originalParams },
occurrences: 0, // Explicitly 0 as LLM failed
occurrences: 0,
};
editCorrectionCache.set(cacheKey, result);
return result;
@@ -336,6 +347,7 @@ export async function ensureCorrectFileContent(
content: string,
client: GeminiClient,
abortSignal: AbortSignal,
llmCorrectionsEnabled: boolean = true,
): Promise<string> {
const cachedResult = fileContentCorrectionCache.get(content);
if (cachedResult) {
@@ -349,11 +361,9 @@ export async function ensureCorrectFileContent(
return content;
}
const correctedContent = await correctStringEscaping(
content,
client,
abortSignal,
);
const correctedContent = llmCorrectionsEnabled
? await correctStringEscaping(content, client, abortSignal)
: content;
fileContentCorrectionCache.set(content, correctedContent);
return correctedContent;
}