mirror of
https://github.com/QwenLM/qwen-code.git
synced 2025-12-19 09:33:53 +00:00
fix: tool use permission hint
This commit is contained in:
@@ -658,13 +658,31 @@ export async function loadCliConfig(
|
||||
throw err;
|
||||
}
|
||||
|
||||
// Interactive mode: explicit -i flag or (TTY + no args + no -p flag)
|
||||
// Interactive mode determination with priority:
|
||||
// 1. If promptInteractive (-i flag) is provided, it is explicitly interactive
|
||||
// 2. If outputFormat is stream-json or json (no matter input-format) along with query or prompt, it is non-interactive
|
||||
// 3. If no query or prompt is provided, the format arguments should be ignored, it is interactive
|
||||
const hasQuery = !!argv.query;
|
||||
const interactive =
|
||||
inputFormat === InputFormat.STREAM_JSON
|
||||
? false
|
||||
: !!argv.promptInteractive ||
|
||||
(process.stdin.isTTY && !hasQuery && !argv.prompt);
|
||||
const hasPrompt = !!argv.prompt;
|
||||
let interactive: boolean;
|
||||
if (argv.promptInteractive) {
|
||||
// Priority 1: Explicit -i flag means interactive
|
||||
interactive = true;
|
||||
} else if (
|
||||
(outputFormat === OutputFormat.STREAM_JSON ||
|
||||
outputFormat === OutputFormat.JSON) &&
|
||||
(hasQuery || hasPrompt)
|
||||
) {
|
||||
// Priority 2: JSON/stream-json output with query/prompt means non-interactive
|
||||
interactive = false;
|
||||
} else if (!hasQuery && !hasPrompt) {
|
||||
// Priority 3: No query or prompt means interactive (format arguments ignored)
|
||||
interactive = true;
|
||||
} else {
|
||||
// Default: If we have query/prompt but output format is TEXT, assume non-interactive
|
||||
// (fallback for edge cases where query/prompt is provided with TEXT output)
|
||||
interactive = false;
|
||||
}
|
||||
// In non-interactive mode, exclude tools that require a prompt.
|
||||
const extraExcludes: string[] = [];
|
||||
if (!interactive && !argv.experimentalAcp) {
|
||||
|
||||
@@ -266,6 +266,10 @@ export async function runNonInteractive(
|
||||
);
|
||||
|
||||
if (toolResponse.error) {
|
||||
// In JSON/STREAM_JSON mode, tool errors are tolerated and formatted
|
||||
// as tool_result blocks. handleToolError will detect JSON/STREAM_JSON mode
|
||||
// from config and allow the session to continue so the LLM can decide what to do next.
|
||||
// In text mode, we still log the error.
|
||||
handleToolError(
|
||||
finalRequestInfo.name,
|
||||
toolResponse.error,
|
||||
@@ -275,14 +279,9 @@ export async function runNonInteractive(
|
||||
? toolResponse.resultDisplay
|
||||
: undefined,
|
||||
);
|
||||
if (adapter) {
|
||||
const message =
|
||||
toolResponse.resultDisplay || toolResponse.error.message;
|
||||
adapter.emitSystemMessage('tool_error', {
|
||||
tool: finalRequestInfo.name,
|
||||
message,
|
||||
});
|
||||
}
|
||||
// Note: We no longer emit a separate system message for tool errors
|
||||
// in JSON/STREAM_JSON mode, as the error is already captured in the
|
||||
// tool_result block with is_error=true.
|
||||
}
|
||||
|
||||
if (adapter) {
|
||||
|
||||
@@ -291,90 +291,74 @@ describe('errors', () => {
|
||||
).mockReturnValue(OutputFormat.JSON);
|
||||
});
|
||||
|
||||
it('should format error as JSON and exit with default code', () => {
|
||||
expect(() => {
|
||||
handleToolError(toolName, toolError, mockConfig);
|
||||
}).toThrow('process.exit called with code: 54');
|
||||
it('should log error message to stderr and not exit', () => {
|
||||
handleToolError(toolName, toolError, mockConfig);
|
||||
|
||||
// In JSON mode, should not exit (just log to stderr)
|
||||
expect(consoleErrorSpy).toHaveBeenCalledWith(
|
||||
JSON.stringify(
|
||||
{
|
||||
error: {
|
||||
type: 'FatalToolExecutionError',
|
||||
message: 'Error executing tool test-tool: Tool failed',
|
||||
code: 54,
|
||||
},
|
||||
},
|
||||
null,
|
||||
2,
|
||||
),
|
||||
'Error executing tool test-tool: Tool failed',
|
||||
);
|
||||
expect(processExitSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should use custom error code', () => {
|
||||
expect(() => {
|
||||
handleToolError(toolName, toolError, mockConfig, 'CUSTOM_TOOL_ERROR');
|
||||
}).toThrow('process.exit called with code: 54');
|
||||
it('should log error with custom error code and not exit', () => {
|
||||
handleToolError(toolName, toolError, mockConfig, 'CUSTOM_TOOL_ERROR');
|
||||
|
||||
// In JSON mode, should not exit (just log to stderr)
|
||||
expect(consoleErrorSpy).toHaveBeenCalledWith(
|
||||
JSON.stringify(
|
||||
{
|
||||
error: {
|
||||
type: 'FatalToolExecutionError',
|
||||
message: 'Error executing tool test-tool: Tool failed',
|
||||
code: 'CUSTOM_TOOL_ERROR',
|
||||
},
|
||||
},
|
||||
null,
|
||||
2,
|
||||
),
|
||||
'Error executing tool test-tool: Tool failed',
|
||||
);
|
||||
expect(processExitSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should use numeric error code and exit with that code', () => {
|
||||
expect(() => {
|
||||
handleToolError(toolName, toolError, mockConfig, 500);
|
||||
}).toThrow('process.exit called with code: 500');
|
||||
it('should log error with numeric error code and not exit', () => {
|
||||
handleToolError(toolName, toolError, mockConfig, 500);
|
||||
|
||||
// In JSON mode, should not exit (just log to stderr)
|
||||
expect(consoleErrorSpy).toHaveBeenCalledWith(
|
||||
JSON.stringify(
|
||||
{
|
||||
error: {
|
||||
type: 'FatalToolExecutionError',
|
||||
message: 'Error executing tool test-tool: Tool failed',
|
||||
code: 500,
|
||||
},
|
||||
},
|
||||
null,
|
||||
2,
|
||||
),
|
||||
'Error executing tool test-tool: Tool failed',
|
||||
);
|
||||
expect(processExitSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should prefer resultDisplay over error message', () => {
|
||||
expect(() => {
|
||||
handleToolError(
|
||||
toolName,
|
||||
toolError,
|
||||
mockConfig,
|
||||
'DISPLAY_ERROR',
|
||||
'Display message',
|
||||
);
|
||||
}).toThrow('process.exit called with code: 54');
|
||||
|
||||
expect(consoleErrorSpy).toHaveBeenCalledWith(
|
||||
JSON.stringify(
|
||||
{
|
||||
error: {
|
||||
type: 'FatalToolExecutionError',
|
||||
message: 'Error executing tool test-tool: Display message',
|
||||
code: 'DISPLAY_ERROR',
|
||||
},
|
||||
},
|
||||
null,
|
||||
2,
|
||||
),
|
||||
handleToolError(
|
||||
toolName,
|
||||
toolError,
|
||||
mockConfig,
|
||||
'DISPLAY_ERROR',
|
||||
'Display message',
|
||||
);
|
||||
|
||||
// In JSON mode, should not exit (just log to stderr)
|
||||
expect(consoleErrorSpy).toHaveBeenCalledWith(
|
||||
'Error executing tool test-tool: Display message',
|
||||
);
|
||||
expect(processExitSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should not exit in JSON mode', () => {
|
||||
handleToolError(toolName, toolError, mockConfig);
|
||||
|
||||
// Should not throw (no exit)
|
||||
expect(consoleErrorSpy).toHaveBeenCalledWith(
|
||||
'Error executing tool test-tool: Tool failed',
|
||||
);
|
||||
expect(processExitSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should not exit in STREAM_JSON mode', () => {
|
||||
(
|
||||
mockConfig.getOutputFormat as ReturnType<typeof vi.fn>
|
||||
).mockReturnValue(OutputFormat.STREAM_JSON);
|
||||
|
||||
handleToolError(toolName, toolError, mockConfig);
|
||||
|
||||
// Should not exit in STREAM_JSON mode
|
||||
expect(consoleErrorSpy).toHaveBeenCalledWith(
|
||||
'Error executing tool test-tool: Tool failed',
|
||||
);
|
||||
expect(processExitSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -10,7 +10,6 @@ import {
|
||||
JsonFormatter,
|
||||
parseAndFormatApiError,
|
||||
FatalTurnLimitedError,
|
||||
FatalToolExecutionError,
|
||||
FatalCancellationError,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
|
||||
@@ -88,32 +87,29 @@ export function handleError(
|
||||
|
||||
/**
|
||||
* Handles tool execution errors specifically.
|
||||
* In JSON mode, outputs formatted JSON error and exits.
|
||||
* In JSON/STREAM_JSON mode, outputs error message to stderr only and does not exit.
|
||||
* The error will be properly formatted in the tool_result block by the adapter,
|
||||
* allowing the session to continue so the LLM can decide what to do next.
|
||||
* In text mode, outputs error message to stderr only.
|
||||
*
|
||||
* @param toolName - Name of the tool that failed
|
||||
* @param toolError - The error that occurred during tool execution
|
||||
* @param config - Configuration object
|
||||
* @param errorCode - Optional error code
|
||||
* @param resultDisplay - Optional display message for the error
|
||||
*/
|
||||
export function handleToolError(
|
||||
toolName: string,
|
||||
toolError: Error,
|
||||
config: Config,
|
||||
errorCode?: string | number,
|
||||
_errorCode?: string | number,
|
||||
resultDisplay?: string,
|
||||
): void {
|
||||
const errorMessage = `Error executing tool ${toolName}: ${resultDisplay || toolError.message}`;
|
||||
const toolExecutionError = new FatalToolExecutionError(errorMessage);
|
||||
|
||||
if (config.getOutputFormat() === OutputFormat.JSON) {
|
||||
const formatter = new JsonFormatter();
|
||||
const formattedError = formatter.formatError(
|
||||
toolExecutionError,
|
||||
errorCode ?? toolExecutionError.exitCode,
|
||||
// Always just log to stderr; JSON/streaming formatting happens in the tool_result block elsewhere
|
||||
if (config.getDebugMode()) {
|
||||
console.error(
|
||||
`Error executing tool ${toolName}: ${resultDisplay || toolError.message}`,
|
||||
);
|
||||
|
||||
console.error(formattedError);
|
||||
process.exit(
|
||||
typeof errorCode === 'number' ? errorCode : toolExecutionError.exitCode,
|
||||
);
|
||||
} else {
|
||||
console.error(errorMessage);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -371,6 +371,8 @@ describe('CoreToolScheduler', () => {
|
||||
getUseSmartEdit: () => false,
|
||||
getUseModelRouter: () => false,
|
||||
getGeminiClient: () => null, // No client needed for these tests
|
||||
getExcludeTools: () => undefined,
|
||||
isInteractive: () => true,
|
||||
} as unknown as Config;
|
||||
const mockToolRegistry = {
|
||||
getAllToolNames: () => ['list_files', 'read_file', 'write_file'],
|
||||
@@ -400,6 +402,241 @@ describe('CoreToolScheduler', () => {
|
||||
' Did you mean one of: "list_files", "read_file", "write_file"?',
|
||||
);
|
||||
});
|
||||
|
||||
it('should use Levenshtein suggestions for excluded tools (getToolSuggestion only handles non-excluded)', () => {
|
||||
// Create mocked tool registry
|
||||
const mockToolRegistry = {
|
||||
getAllToolNames: () => ['list_files', 'read_file'],
|
||||
} as unknown as ToolRegistry;
|
||||
|
||||
// Create mocked config with excluded tools
|
||||
const mockConfig = {
|
||||
getToolRegistry: () => mockToolRegistry,
|
||||
getUseSmartEdit: () => false,
|
||||
getUseModelRouter: () => false,
|
||||
getGeminiClient: () => null,
|
||||
getExcludeTools: () => ['write_file', 'edit', 'run_shell_command'],
|
||||
isInteractive: () => false, // Value doesn't matter, but included for completeness
|
||||
} as unknown as Config;
|
||||
|
||||
// Create scheduler
|
||||
const scheduler = new CoreToolScheduler({
|
||||
config: mockConfig,
|
||||
getPreferredEditor: () => 'vscode',
|
||||
onEditorClose: vi.fn(),
|
||||
});
|
||||
|
||||
// getToolSuggestion no longer handles excluded tools - it only handles truly missing tools
|
||||
// So excluded tools will use Levenshtein distance to find similar registered tools
|
||||
// @ts-expect-error accessing private method
|
||||
const excludedTool = scheduler.getToolSuggestion('write_file');
|
||||
expect(excludedTool).toContain('Did you mean');
|
||||
});
|
||||
|
||||
it('should use Levenshtein suggestions for non-excluded tools', () => {
|
||||
// Create mocked tool registry
|
||||
const mockToolRegistry = {
|
||||
getAllToolNames: () => ['list_files', 'read_file'],
|
||||
} as unknown as ToolRegistry;
|
||||
|
||||
// Create mocked config with excluded tools
|
||||
const mockConfig = {
|
||||
getToolRegistry: () => mockToolRegistry,
|
||||
getUseSmartEdit: () => false,
|
||||
getUseModelRouter: () => false,
|
||||
getGeminiClient: () => null,
|
||||
getExcludeTools: () => ['write_file', 'edit'],
|
||||
isInteractive: () => false, // Value doesn't matter
|
||||
} as unknown as Config;
|
||||
|
||||
// Create scheduler
|
||||
const scheduler = new CoreToolScheduler({
|
||||
config: mockConfig,
|
||||
getPreferredEditor: () => 'vscode',
|
||||
onEditorClose: vi.fn(),
|
||||
});
|
||||
|
||||
// Test that non-excluded tool (hallucinated) still uses Levenshtein suggestions
|
||||
// @ts-expect-error accessing private method
|
||||
const hallucinatedTool = scheduler.getToolSuggestion('list_fils');
|
||||
expect(hallucinatedTool).toContain('Did you mean');
|
||||
expect(hallucinatedTool).not.toContain(
|
||||
'not available in the current environment',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('excluded tools handling', () => {
|
||||
it('should return permission error for excluded tools instead of "not found" message', async () => {
|
||||
const onAllToolCallsComplete = vi.fn();
|
||||
const onToolCallsUpdate = vi.fn();
|
||||
|
||||
const mockToolRegistry = {
|
||||
getTool: () => undefined, // Tool not in registry
|
||||
getAllToolNames: () => ['list_files', 'read_file'],
|
||||
getFunctionDeclarations: () => [],
|
||||
tools: new Map(),
|
||||
discovery: {},
|
||||
registerTool: () => {},
|
||||
getToolByName: () => undefined,
|
||||
getToolByDisplayName: () => undefined,
|
||||
getTools: () => [],
|
||||
discoverTools: async () => {},
|
||||
getAllTools: () => [],
|
||||
getToolsByServer: () => [],
|
||||
} as unknown as ToolRegistry;
|
||||
|
||||
const mockConfig = {
|
||||
getSessionId: () => 'test-session-id',
|
||||
getUsageStatisticsEnabled: () => true,
|
||||
getDebugMode: () => false,
|
||||
getApprovalMode: () => ApprovalMode.DEFAULT,
|
||||
getAllowedTools: () => [],
|
||||
getExcludeTools: () => ['write_file', 'edit', 'run_shell_command'],
|
||||
getContentGeneratorConfig: () => ({
|
||||
model: 'test-model',
|
||||
authType: 'oauth-personal',
|
||||
}),
|
||||
getShellExecutionConfig: () => ({
|
||||
terminalWidth: 90,
|
||||
terminalHeight: 30,
|
||||
}),
|
||||
storage: {
|
||||
getProjectTempDir: () => '/tmp',
|
||||
},
|
||||
getTruncateToolOutputThreshold: () =>
|
||||
DEFAULT_TRUNCATE_TOOL_OUTPUT_THRESHOLD,
|
||||
getTruncateToolOutputLines: () => DEFAULT_TRUNCATE_TOOL_OUTPUT_LINES,
|
||||
getToolRegistry: () => mockToolRegistry,
|
||||
getUseSmartEdit: () => false,
|
||||
getUseModelRouter: () => false,
|
||||
getGeminiClient: () => null,
|
||||
} as unknown as Config;
|
||||
|
||||
const scheduler = new CoreToolScheduler({
|
||||
config: mockConfig,
|
||||
onAllToolCallsComplete,
|
||||
onToolCallsUpdate,
|
||||
getPreferredEditor: () => 'vscode',
|
||||
onEditorClose: vi.fn(),
|
||||
});
|
||||
|
||||
const abortController = new AbortController();
|
||||
const request = {
|
||||
callId: '1',
|
||||
name: 'write_file', // Excluded tool
|
||||
args: {},
|
||||
isClientInitiated: false,
|
||||
prompt_id: 'prompt-id-excluded',
|
||||
};
|
||||
|
||||
await scheduler.schedule([request], abortController.signal);
|
||||
|
||||
// Wait for completion
|
||||
await vi.waitFor(() => {
|
||||
expect(onAllToolCallsComplete).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
const completedCalls = onAllToolCallsComplete.mock
|
||||
.calls[0][0] as ToolCall[];
|
||||
expect(completedCalls).toHaveLength(1);
|
||||
const completedCall = completedCalls[0];
|
||||
expect(completedCall.status).toBe('error');
|
||||
|
||||
if (completedCall.status === 'error') {
|
||||
const errorMessage = completedCall.response.error?.message;
|
||||
expect(errorMessage).toBe(
|
||||
'Qwen Code requires permission to use write_file, but that permission was declined.',
|
||||
);
|
||||
// Should NOT contain "not found in registry"
|
||||
expect(errorMessage).not.toContain('not found in registry');
|
||||
}
|
||||
});
|
||||
|
||||
it('should return "not found" message for truly missing tools (not excluded)', async () => {
|
||||
const onAllToolCallsComplete = vi.fn();
|
||||
const onToolCallsUpdate = vi.fn();
|
||||
|
||||
const mockToolRegistry = {
|
||||
getTool: () => undefined, // Tool not in registry
|
||||
getAllToolNames: () => ['list_files', 'read_file'],
|
||||
getFunctionDeclarations: () => [],
|
||||
tools: new Map(),
|
||||
discovery: {},
|
||||
registerTool: () => {},
|
||||
getToolByName: () => undefined,
|
||||
getToolByDisplayName: () => undefined,
|
||||
getTools: () => [],
|
||||
discoverTools: async () => {},
|
||||
getAllTools: () => [],
|
||||
getToolsByServer: () => [],
|
||||
} as unknown as ToolRegistry;
|
||||
|
||||
const mockConfig = {
|
||||
getSessionId: () => 'test-session-id',
|
||||
getUsageStatisticsEnabled: () => true,
|
||||
getDebugMode: () => false,
|
||||
getApprovalMode: () => ApprovalMode.DEFAULT,
|
||||
getAllowedTools: () => [],
|
||||
getExcludeTools: () => ['write_file', 'edit'], // Different excluded tools
|
||||
getContentGeneratorConfig: () => ({
|
||||
model: 'test-model',
|
||||
authType: 'oauth-personal',
|
||||
}),
|
||||
getShellExecutionConfig: () => ({
|
||||
terminalWidth: 90,
|
||||
terminalHeight: 30,
|
||||
}),
|
||||
storage: {
|
||||
getProjectTempDir: () => '/tmp',
|
||||
},
|
||||
getTruncateToolOutputThreshold: () =>
|
||||
DEFAULT_TRUNCATE_TOOL_OUTPUT_THRESHOLD,
|
||||
getTruncateToolOutputLines: () => DEFAULT_TRUNCATE_TOOL_OUTPUT_LINES,
|
||||
getToolRegistry: () => mockToolRegistry,
|
||||
getUseSmartEdit: () => false,
|
||||
getUseModelRouter: () => false,
|
||||
getGeminiClient: () => null,
|
||||
} as unknown as Config;
|
||||
|
||||
const scheduler = new CoreToolScheduler({
|
||||
config: mockConfig,
|
||||
onAllToolCallsComplete,
|
||||
onToolCallsUpdate,
|
||||
getPreferredEditor: () => 'vscode',
|
||||
onEditorClose: vi.fn(),
|
||||
});
|
||||
|
||||
const abortController = new AbortController();
|
||||
const request = {
|
||||
callId: '1',
|
||||
name: 'nonexistent_tool', // Not excluded, just doesn't exist
|
||||
args: {},
|
||||
isClientInitiated: false,
|
||||
prompt_id: 'prompt-id-missing',
|
||||
};
|
||||
|
||||
await scheduler.schedule([request], abortController.signal);
|
||||
|
||||
// Wait for completion
|
||||
await vi.waitFor(() => {
|
||||
expect(onAllToolCallsComplete).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
const completedCalls = onAllToolCallsComplete.mock
|
||||
.calls[0][0] as ToolCall[];
|
||||
expect(completedCalls).toHaveLength(1);
|
||||
const completedCall = completedCalls[0];
|
||||
expect(completedCall.status).toBe('error');
|
||||
|
||||
if (completedCall.status === 'error') {
|
||||
const errorMessage = completedCall.response.error?.message;
|
||||
// Should contain "not found in registry"
|
||||
expect(errorMessage).toContain('not found in registry');
|
||||
// Should NOT contain permission message
|
||||
expect(errorMessage).not.toContain('requires permission');
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -590,12 +590,16 @@ export class CoreToolScheduler {
|
||||
|
||||
/**
|
||||
* Generates a suggestion string for a tool name that was not found in the registry.
|
||||
* It finds the closest matches based on Levenshtein distance.
|
||||
* Uses Levenshtein distance to suggest similar tool names for hallucinated or misspelled tools.
|
||||
* Note: Excluded tools are handled separately before calling this method, so this only
|
||||
* handles the case where a tool is truly not found (hallucinated or typo).
|
||||
* @param unknownToolName The tool name that was not found.
|
||||
* @param topN The number of suggestions to return. Defaults to 3.
|
||||
* @returns A suggestion string like " Did you mean 'tool'?" or " Did you mean one of: 'tool1', 'tool2'?", or an empty string if no suggestions are found.
|
||||
* @returns A suggestion string like " Did you mean 'tool'?" or " Did you mean one of: 'tool1', 'tool2'?",
|
||||
* or an empty string if no suggestions are found.
|
||||
*/
|
||||
private getToolSuggestion(unknownToolName: string, topN = 3): string {
|
||||
// Use Levenshtein distance to find similar tool names from the registry.
|
||||
const allToolNames = this.toolRegistry.getAllToolNames();
|
||||
|
||||
const matches = allToolNames.map((toolName) => ({
|
||||
@@ -673,8 +677,35 @@ export class CoreToolScheduler {
|
||||
|
||||
const newToolCalls: ToolCall[] = requestsToProcess.map(
|
||||
(reqInfo): ToolCall => {
|
||||
// Check if the tool is excluded due to permissions/environment restrictions
|
||||
// This check should happen before registry lookup to provide a clear permission error
|
||||
const excludeTools = this.config.getExcludeTools?.() ?? undefined;
|
||||
if (excludeTools && excludeTools.length > 0) {
|
||||
const normalizedToolName = reqInfo.name.toLowerCase().trim();
|
||||
const excludedMatch = excludeTools.find(
|
||||
(excludedTool) =>
|
||||
excludedTool.toLowerCase().trim() === normalizedToolName,
|
||||
);
|
||||
|
||||
if (excludedMatch) {
|
||||
// The tool exists but is excluded - return permission error directly
|
||||
const permissionErrorMessage = `Qwen Code requires permission to use ${excludedMatch}, but that permission was declined.`;
|
||||
return {
|
||||
status: 'error',
|
||||
request: reqInfo,
|
||||
response: createErrorResponse(
|
||||
reqInfo,
|
||||
new Error(permissionErrorMessage),
|
||||
ToolErrorType.TOOL_NOT_REGISTERED,
|
||||
),
|
||||
durationMs: 0,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
const toolInstance = this.toolRegistry.getTool(reqInfo.name);
|
||||
if (!toolInstance) {
|
||||
// Tool is not in registry and not excluded - likely hallucinated or typo
|
||||
const suggestion = this.getToolSuggestion(reqInfo.name);
|
||||
const errorMessage = `Tool "${reqInfo.name}" not found in registry. Tools must use the exact names that are registered.${suggestion}`;
|
||||
return {
|
||||
|
||||
Reference in New Issue
Block a user