mirror of
https://github.com/QwenLM/qwen-code.git
synced 2025-12-19 09:33:53 +00:00
feat: subagent phase 2 implementation
This commit is contained in:
@@ -26,7 +26,11 @@ import {
|
||||
} from './turn.js';
|
||||
import { Config } from '../config/config.js';
|
||||
import { UserTierId } from '../code_assist/types.js';
|
||||
import { getCoreSystemPrompt, getCompressionPrompt } from './prompts.js';
|
||||
import {
|
||||
getCoreSystemPrompt,
|
||||
getCompressionPrompt,
|
||||
getCustomSystemPrompt,
|
||||
} from './prompts.js';
|
||||
import { checkNextSpeaker } from '../utils/nextSpeakerChecker.js';
|
||||
import { reportError } from '../utils/errorReporting.js';
|
||||
import { GeminiChat } from './geminiChat.js';
|
||||
@@ -618,11 +622,15 @@ export class GeminiClient {
|
||||
model || this.config.getModel() || DEFAULT_GEMINI_FLASH_MODEL;
|
||||
try {
|
||||
const userMemory = this.config.getUserMemory();
|
||||
const systemInstruction = getCoreSystemPrompt(userMemory);
|
||||
const finalSystemInstruction = config.systemInstruction
|
||||
? getCustomSystemPrompt(config.systemInstruction, userMemory)
|
||||
: getCoreSystemPrompt(userMemory);
|
||||
|
||||
const requestConfig = {
|
||||
abortSignal,
|
||||
...this.generateContentConfig,
|
||||
...config,
|
||||
systemInstruction: finalSystemInstruction,
|
||||
};
|
||||
|
||||
// Convert schema to function declaration
|
||||
@@ -644,7 +652,6 @@ export class GeminiClient {
|
||||
model: modelToUse,
|
||||
config: {
|
||||
...requestConfig,
|
||||
systemInstruction,
|
||||
tools,
|
||||
},
|
||||
contents,
|
||||
@@ -706,12 +713,14 @@ export class GeminiClient {
|
||||
|
||||
try {
|
||||
const userMemory = this.config.getUserMemory();
|
||||
const systemInstruction = getCoreSystemPrompt(userMemory);
|
||||
const finalSystemInstruction = generationConfig.systemInstruction
|
||||
? getCustomSystemPrompt(generationConfig.systemInstruction, userMemory)
|
||||
: getCoreSystemPrompt(userMemory);
|
||||
|
||||
const requestConfig: GenerateContentConfig = {
|
||||
abortSignal,
|
||||
...configToUse,
|
||||
systemInstruction,
|
||||
systemInstruction: finalSystemInstruction,
|
||||
};
|
||||
|
||||
const apiCall = () =>
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { getCoreSystemPrompt } from './prompts.js';
|
||||
import { getCoreSystemPrompt, getCustomSystemPrompt } from './prompts.js';
|
||||
import { isGitRepository } from '../utils/gitUtils.js';
|
||||
import fs from 'node:fs';
|
||||
import os from 'node:os';
|
||||
@@ -363,3 +363,45 @@ describe('URL matching with trailing slash compatibility', () => {
|
||||
process.env = originalEnv;
|
||||
});
|
||||
});
|
||||
|
||||
describe('getCustomSystemPrompt', () => {
|
||||
it('should handle string custom instruction without user memory', () => {
|
||||
const customInstruction =
|
||||
'You are a helpful assistant specialized in code review.';
|
||||
const result = getCustomSystemPrompt(customInstruction);
|
||||
|
||||
expect(result).toBe(
|
||||
'You are a helpful assistant specialized in code review.',
|
||||
);
|
||||
expect(result).not.toContain('---');
|
||||
});
|
||||
|
||||
it('should handle string custom instruction with user memory', () => {
|
||||
const customInstruction =
|
||||
'You are a helpful assistant specialized in code review.';
|
||||
const userMemory =
|
||||
'Remember to be extra thorough.\nFocus on security issues.';
|
||||
const result = getCustomSystemPrompt(customInstruction, userMemory);
|
||||
|
||||
expect(result).toBe(
|
||||
'You are a helpful assistant specialized in code review.\n\n---\n\nRemember to be extra thorough.\nFocus on security issues.',
|
||||
);
|
||||
expect(result).toContain('---');
|
||||
});
|
||||
|
||||
it('should handle Content object with parts array and user memory', () => {
|
||||
const customInstruction = {
|
||||
parts: [
|
||||
{ text: 'You are a code assistant. ' },
|
||||
{ text: 'Always provide examples.' },
|
||||
],
|
||||
};
|
||||
const userMemory = 'User prefers TypeScript examples.';
|
||||
const result = getCustomSystemPrompt(customInstruction, userMemory);
|
||||
|
||||
expect(result).toBe(
|
||||
'You are a code assistant. Always provide examples.\n\n---\n\nUser prefers TypeScript examples.',
|
||||
);
|
||||
expect(result).toContain('---');
|
||||
});
|
||||
});
|
||||
|
||||
@@ -18,6 +18,7 @@ import process from 'node:process';
|
||||
import { isGitRepository } from '../utils/gitUtils.js';
|
||||
import { MemoryTool, GEMINI_CONFIG_DIR } from '../tools/memoryTool.js';
|
||||
import { TodoWriteTool } from '../tools/todoWrite.js';
|
||||
import { GenerateContentConfig } from '@google/genai';
|
||||
|
||||
export interface ModelTemplateMapping {
|
||||
baseUrls?: string[];
|
||||
@@ -44,6 +45,48 @@ function urlMatches(urlArray: string[], targetUrl: string): boolean {
|
||||
return urlArray.some((url) => normalizeUrl(url) === normalizedTarget);
|
||||
}
|
||||
|
||||
/**
|
||||
* Processes a custom system instruction by appending user memory if available.
|
||||
* This function should only be used when there is actually a custom instruction.
|
||||
*
|
||||
* @param customInstruction - Custom system instruction (ContentUnion from @google/genai)
|
||||
* @param userMemory - User memory to append
|
||||
* @returns Processed custom system instruction with user memory appended
|
||||
*/
|
||||
export function getCustomSystemPrompt(
|
||||
customInstruction: GenerateContentConfig['systemInstruction'],
|
||||
userMemory?: string,
|
||||
): string {
|
||||
// Extract text from custom instruction
|
||||
let instructionText = '';
|
||||
|
||||
if (typeof customInstruction === 'string') {
|
||||
instructionText = customInstruction;
|
||||
} else if (Array.isArray(customInstruction)) {
|
||||
// PartUnion[]
|
||||
instructionText = customInstruction
|
||||
.map((part) => (typeof part === 'string' ? part : part.text || ''))
|
||||
.join('');
|
||||
} else if (customInstruction && 'parts' in customInstruction) {
|
||||
// Content
|
||||
instructionText =
|
||||
customInstruction.parts
|
||||
?.map((part) => (typeof part === 'string' ? part : part.text || ''))
|
||||
.join('') || '';
|
||||
} else if (customInstruction && 'text' in customInstruction) {
|
||||
// PartUnion (single part)
|
||||
instructionText = customInstruction.text || '';
|
||||
}
|
||||
|
||||
// Append user memory using the same pattern as getCoreSystemPrompt
|
||||
const memorySuffix =
|
||||
userMemory && userMemory.trim().length > 0
|
||||
? `\n\n---\n\n${userMemory.trim()}`
|
||||
: '';
|
||||
|
||||
return `${instructionText}${memorySuffix}`;
|
||||
}
|
||||
|
||||
export function getCoreSystemPrompt(
|
||||
userMemory?: string,
|
||||
config?: SystemPromptConfig,
|
||||
|
||||
@@ -44,6 +44,7 @@ export * from './utils/textUtils.js';
|
||||
export * from './utils/formatters.js';
|
||||
export * from './utils/filesearch/fileSearch.js';
|
||||
export * from './utils/errorParsing.js';
|
||||
export * from './utils/subagentGenerator.js';
|
||||
|
||||
// Export services
|
||||
export * from './services/fileDiscoveryService.js';
|
||||
|
||||
@@ -8,7 +8,8 @@ import { createHash } from 'crypto';
|
||||
import { GeminiEventType, ServerGeminiStreamEvent } from '../core/turn.js';
|
||||
import { logLoopDetected } from '../telemetry/loggers.js';
|
||||
import { LoopDetectedEvent, LoopType } from '../telemetry/types.js';
|
||||
import { Config, DEFAULT_GEMINI_FLASH_MODEL } from '../config/config.js';
|
||||
import { Config } from '../config/config.js';
|
||||
import { DEFAULT_QWEN_FLASH_MODEL } from '../config/models.js';
|
||||
|
||||
const TOOL_CALL_LOOP_THRESHOLD = 5;
|
||||
const CONTENT_LOOP_THRESHOLD = 10;
|
||||
@@ -360,7 +361,7 @@ Please analyze the conversation history to determine the possibility that the co
|
||||
try {
|
||||
result = await this.config
|
||||
.getGeminiClient()
|
||||
.generateJson(contents, schema, signal, DEFAULT_GEMINI_FLASH_MODEL);
|
||||
.generateJson(contents, schema, signal, DEFAULT_QWEN_FLASH_MODEL);
|
||||
} catch (e) {
|
||||
// Do nothing, treat it as a non-loop.
|
||||
this.config.getDebugMode() ? console.error(e) : console.debug(e);
|
||||
|
||||
@@ -388,7 +388,7 @@ export class MemoryTool
|
||||
constructor() {
|
||||
super(
|
||||
MemoryTool.Name,
|
||||
'Save Memory',
|
||||
'SaveMemory',
|
||||
memoryToolDescription,
|
||||
Kind.Think,
|
||||
memoryToolSchemaData.parametersJsonSchema as Record<string, unknown>,
|
||||
|
||||
@@ -407,7 +407,7 @@ export class TodoWriteTool extends BaseDeclarativeTool<
|
||||
constructor(private readonly config: Config) {
|
||||
super(
|
||||
TodoWriteTool.Name,
|
||||
'Todo Write',
|
||||
'TodoWrite',
|
||||
todoWriteToolDescription,
|
||||
Kind.Think,
|
||||
todoWriteToolSchemaData.parametersJsonSchema as Record<string, unknown>,
|
||||
|
||||
@@ -157,7 +157,7 @@ export class WebSearchTool extends BaseDeclarativeTool<
|
||||
constructor(private readonly config: Config) {
|
||||
super(
|
||||
WebSearchTool.Name,
|
||||
'TavilySearch',
|
||||
'WebSearch',
|
||||
'Performs a web search using the Tavily API and returns a concise answer with sources. Requires the TAVILY_API_KEY environment variable.',
|
||||
Kind.Search,
|
||||
{
|
||||
|
||||
@@ -12,14 +12,14 @@ import { ReadFileTool } from '../tools/read-file.js';
|
||||
import { ReadManyFilesTool } from '../tools/read-many-files.js';
|
||||
import { GrepTool } from '../tools/grep.js';
|
||||
import { LruCache } from './LruCache.js';
|
||||
import { DEFAULT_GEMINI_FLASH_LITE_MODEL } from '../config/models.js';
|
||||
import { DEFAULT_QWEN_FLASH_MODEL } from '../config/models.js';
|
||||
import {
|
||||
isFunctionResponse,
|
||||
isFunctionCall,
|
||||
} from '../utils/messageInspectors.js';
|
||||
import * as fs from 'fs';
|
||||
|
||||
const EditModel = DEFAULT_GEMINI_FLASH_LITE_MODEL;
|
||||
const EditModel = DEFAULT_QWEN_FLASH_MODEL;
|
||||
const EditConfig: GenerateContentConfig = {
|
||||
thinkingConfig: {
|
||||
thinkingBudget: 0,
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
import { describe, it, expect, vi, beforeEach, Mock, afterEach } from 'vitest';
|
||||
import { Content, GoogleGenAI, Models } from '@google/genai';
|
||||
import { DEFAULT_GEMINI_FLASH_MODEL } from '../config/models.js';
|
||||
import { DEFAULT_QWEN_FLASH_MODEL } from '../config/models.js';
|
||||
import { GeminiClient } from '../core/client.js';
|
||||
import { Config } from '../config/config.js';
|
||||
import { checkNextSpeaker, NextSpeakerResponse } from './nextSpeakerChecker.js';
|
||||
@@ -233,7 +233,7 @@ describe('checkNextSpeaker', () => {
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
it('should call generateJson with DEFAULT_GEMINI_FLASH_MODEL', async () => {
|
||||
it('should call generateJson with DEFAULT_QWEN_FLASH_MODEL', async () => {
|
||||
(chatInstance.getHistory as Mock).mockReturnValue([
|
||||
{ role: 'model', parts: [{ text: 'Some model output.' }] },
|
||||
] as Content[]);
|
||||
@@ -248,6 +248,6 @@ describe('checkNextSpeaker', () => {
|
||||
expect(mockGeminiClient.generateJson).toHaveBeenCalled();
|
||||
const generateJsonCall = (mockGeminiClient.generateJson as Mock).mock
|
||||
.calls[0];
|
||||
expect(generateJsonCall[3]).toBe(DEFAULT_GEMINI_FLASH_MODEL);
|
||||
expect(generateJsonCall[3]).toBe(DEFAULT_QWEN_FLASH_MODEL);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
*/
|
||||
|
||||
import { Content } from '@google/genai';
|
||||
import { DEFAULT_GEMINI_FLASH_MODEL } from '../config/models.js';
|
||||
import { DEFAULT_QWEN_FLASH_MODEL } from '../config/models.js';
|
||||
import { GeminiClient } from '../core/client.js';
|
||||
import { GeminiChat } from '../core/geminiChat.js';
|
||||
import { isFunctionResponse } from './messageInspectors.js';
|
||||
@@ -112,7 +112,7 @@ export async function checkNextSpeaker(
|
||||
contents,
|
||||
RESPONSE_SCHEMA,
|
||||
abortSignal,
|
||||
DEFAULT_GEMINI_FLASH_MODEL,
|
||||
DEFAULT_QWEN_FLASH_MODEL,
|
||||
)) as unknown as NextSpeakerResponse;
|
||||
|
||||
if (
|
||||
|
||||
262
packages/core/src/utils/subagentGenerator.test.ts
Normal file
262
packages/core/src/utils/subagentGenerator.test.ts
Normal file
@@ -0,0 +1,262 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, beforeEach, Mock, afterEach } from 'vitest';
|
||||
import { Content, GoogleGenAI, Models } from '@google/genai';
|
||||
import { DEFAULT_QWEN_MODEL } from '../config/models.js';
|
||||
import { GeminiClient } from '../core/client.js';
|
||||
import { Config } from '../config/config.js';
|
||||
import {
|
||||
subagentGenerator,
|
||||
SubagentGeneratedContent,
|
||||
} from './subagentGenerator.js';
|
||||
|
||||
// Mock GeminiClient and Config constructor
|
||||
vi.mock('../core/client.js');
|
||||
vi.mock('../config/config.js');
|
||||
|
||||
// Define mocks for GoogleGenAI and Models instances that will be used across tests
|
||||
const mockModelsInstance = {
|
||||
generateContent: vi.fn(),
|
||||
generateContentStream: vi.fn(),
|
||||
countTokens: vi.fn(),
|
||||
embedContent: vi.fn(),
|
||||
batchEmbedContents: vi.fn(),
|
||||
} as unknown as Models;
|
||||
|
||||
const mockGoogleGenAIInstance = {
|
||||
getGenerativeModel: vi.fn().mockReturnValue(mockModelsInstance),
|
||||
} as unknown as GoogleGenAI;
|
||||
|
||||
vi.mock('@google/genai', async () => {
|
||||
const actualGenAI =
|
||||
await vi.importActual<typeof import('@google/genai')>('@google/genai');
|
||||
return {
|
||||
...actualGenAI,
|
||||
GoogleGenAI: vi.fn(() => mockGoogleGenAIInstance),
|
||||
};
|
||||
});
|
||||
|
||||
describe('subagentGenerator', () => {
|
||||
let mockGeminiClient: GeminiClient;
|
||||
let MockConfig: Mock;
|
||||
const abortSignal = new AbortController().signal;
|
||||
|
||||
beforeEach(() => {
|
||||
MockConfig = vi.mocked(Config);
|
||||
const mockConfigInstance = new MockConfig(
|
||||
'test-api-key',
|
||||
'gemini-pro',
|
||||
false,
|
||||
'.',
|
||||
false,
|
||||
undefined,
|
||||
false,
|
||||
undefined,
|
||||
undefined,
|
||||
undefined,
|
||||
);
|
||||
|
||||
mockGeminiClient = new GeminiClient(mockConfigInstance);
|
||||
|
||||
// Reset mocks before each test to ensure test isolation
|
||||
vi.mocked(mockModelsInstance.generateContent).mockReset();
|
||||
vi.mocked(mockModelsInstance.generateContentStream).mockReset();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
it('should throw error for empty user description', async () => {
|
||||
await expect(
|
||||
subagentGenerator('', mockGeminiClient, abortSignal),
|
||||
).rejects.toThrow('User description cannot be empty');
|
||||
|
||||
await expect(
|
||||
subagentGenerator(' ', mockGeminiClient, abortSignal),
|
||||
).rejects.toThrow('User description cannot be empty');
|
||||
|
||||
expect(mockGeminiClient.generateJson).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should successfully generate content with valid LLM response', async () => {
|
||||
const userDescription = 'help with code reviews and suggestions';
|
||||
const mockApiResponse: SubagentGeneratedContent = {
|
||||
name: 'code-review-assistant',
|
||||
description:
|
||||
'A specialized subagent that helps with code reviews and provides improvement suggestions.',
|
||||
systemPrompt:
|
||||
'You are a code review expert. Analyze code for best practices, bugs, and improvements.',
|
||||
};
|
||||
|
||||
(mockGeminiClient.generateJson as Mock).mockResolvedValue(mockApiResponse);
|
||||
|
||||
const result = await subagentGenerator(
|
||||
userDescription,
|
||||
mockGeminiClient,
|
||||
abortSignal,
|
||||
);
|
||||
|
||||
expect(result).toEqual(mockApiResponse);
|
||||
expect(mockGeminiClient.generateJson).toHaveBeenCalledTimes(1);
|
||||
|
||||
// Verify the call parameters
|
||||
const generateJsonCall = (mockGeminiClient.generateJson as Mock).mock
|
||||
.calls[0];
|
||||
const contents = generateJsonCall[0] as Content[];
|
||||
|
||||
// Should have 1 user message with the query
|
||||
expect(contents).toHaveLength(1);
|
||||
expect(contents[0]?.role).toBe('user');
|
||||
expect(contents[0]?.parts?.[0]?.text).toContain(
|
||||
`Create an agent configuration based on this request: "${userDescription}"`,
|
||||
);
|
||||
|
||||
// Check that system prompt is passed in the config parameter
|
||||
expect(generateJsonCall[2]).toBe(abortSignal);
|
||||
expect(generateJsonCall[3]).toBe(DEFAULT_QWEN_MODEL);
|
||||
expect(generateJsonCall[4]).toEqual(
|
||||
expect.objectContaining({
|
||||
systemInstruction: expect.stringContaining(
|
||||
'You are an elite AI agent architect',
|
||||
),
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it('should throw error when LLM response is missing required fields', async () => {
|
||||
const userDescription = 'help with documentation';
|
||||
const incompleteResponse = {
|
||||
name: 'doc-helper',
|
||||
description: 'Helps with documentation',
|
||||
// Missing systemPrompt
|
||||
};
|
||||
|
||||
(mockGeminiClient.generateJson as Mock).mockResolvedValue(
|
||||
incompleteResponse,
|
||||
);
|
||||
|
||||
await expect(
|
||||
subagentGenerator(userDescription, mockGeminiClient, abortSignal),
|
||||
).rejects.toThrow('Invalid response from LLM: missing required fields');
|
||||
|
||||
expect(mockGeminiClient.generateJson).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('should throw error when LLM response has empty fields', async () => {
|
||||
const userDescription = 'database optimization';
|
||||
const emptyFieldsResponse = {
|
||||
name: '',
|
||||
description: 'Helps with database optimization',
|
||||
systemPrompt: 'You are a database expert.',
|
||||
};
|
||||
|
||||
(mockGeminiClient.generateJson as Mock).mockResolvedValue(
|
||||
emptyFieldsResponse,
|
||||
);
|
||||
|
||||
await expect(
|
||||
subagentGenerator(userDescription, mockGeminiClient, abortSignal),
|
||||
).rejects.toThrow('Invalid response from LLM: missing required fields');
|
||||
});
|
||||
|
||||
it('should throw error when generateJson throws an error', async () => {
|
||||
const userDescription = 'testing automation';
|
||||
(mockGeminiClient.generateJson as Mock).mockRejectedValue(
|
||||
new Error('API Error'),
|
||||
);
|
||||
|
||||
await expect(
|
||||
subagentGenerator(userDescription, mockGeminiClient, abortSignal),
|
||||
).rejects.toThrow('API Error');
|
||||
});
|
||||
|
||||
it('should call generateJson with correct schema and model', async () => {
|
||||
const userDescription = 'data analysis';
|
||||
const mockResponse: SubagentGeneratedContent = {
|
||||
name: 'data-analyst',
|
||||
description: 'Analyzes data and provides insights.',
|
||||
systemPrompt: 'You are a data analysis expert.',
|
||||
};
|
||||
|
||||
(mockGeminiClient.generateJson as Mock).mockResolvedValue(mockResponse);
|
||||
|
||||
await subagentGenerator(userDescription, mockGeminiClient, abortSignal);
|
||||
|
||||
expect(mockGeminiClient.generateJson).toHaveBeenCalledWith(
|
||||
expect.any(Array),
|
||||
expect.objectContaining({
|
||||
type: 'object',
|
||||
properties: expect.objectContaining({
|
||||
name: expect.objectContaining({ type: 'string' }),
|
||||
description: expect.objectContaining({ type: 'string' }),
|
||||
systemPrompt: expect.objectContaining({ type: 'string' }),
|
||||
}),
|
||||
required: ['name', 'description', 'systemPrompt'],
|
||||
}),
|
||||
abortSignal,
|
||||
DEFAULT_QWEN_MODEL,
|
||||
expect.objectContaining({
|
||||
systemInstruction: expect.stringContaining(
|
||||
'You are an elite AI agent architect',
|
||||
),
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it('should include user description in the prompt', async () => {
|
||||
const userDescription = 'machine learning model training';
|
||||
const mockResponse: SubagentGeneratedContent = {
|
||||
name: 'ml-trainer',
|
||||
description: 'Trains machine learning models.',
|
||||
systemPrompt: 'You are an ML expert.',
|
||||
};
|
||||
|
||||
(mockGeminiClient.generateJson as Mock).mockResolvedValue(mockResponse);
|
||||
|
||||
await subagentGenerator(userDescription, mockGeminiClient, abortSignal);
|
||||
|
||||
const generateJsonCall = (mockGeminiClient.generateJson as Mock).mock
|
||||
.calls[0];
|
||||
const contents = generateJsonCall[0] as Content[];
|
||||
|
||||
// Check user query (only message)
|
||||
expect(contents).toHaveLength(1);
|
||||
const userQueryContent = contents[0]?.parts?.[0]?.text;
|
||||
expect(userQueryContent).toContain(userDescription);
|
||||
expect(userQueryContent).toContain(
|
||||
'Create an agent configuration based on this request:',
|
||||
);
|
||||
|
||||
// Check that system prompt is passed in the config parameter
|
||||
expect(generateJsonCall[4]).toEqual(
|
||||
expect.objectContaining({
|
||||
systemInstruction: expect.stringContaining(
|
||||
'You are an elite AI agent architect',
|
||||
),
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it('should throw error for null response from generateJson', async () => {
|
||||
const userDescription = 'security auditing';
|
||||
(mockGeminiClient.generateJson as Mock).mockResolvedValue(null);
|
||||
|
||||
await expect(
|
||||
subagentGenerator(userDescription, mockGeminiClient, abortSignal),
|
||||
).rejects.toThrow('Invalid response from LLM: missing required fields');
|
||||
});
|
||||
|
||||
it('should throw error for undefined response from generateJson', async () => {
|
||||
const userDescription = 'api documentation';
|
||||
(mockGeminiClient.generateJson as Mock).mockResolvedValue(undefined);
|
||||
|
||||
await expect(
|
||||
subagentGenerator(userDescription, mockGeminiClient, abortSignal),
|
||||
).rejects.toThrow('Invalid response from LLM: missing required fields');
|
||||
});
|
||||
});
|
||||
148
packages/core/src/utils/subagentGenerator.ts
Normal file
148
packages/core/src/utils/subagentGenerator.ts
Normal file
@@ -0,0 +1,148 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { Content } from '@google/genai';
|
||||
import { DEFAULT_QWEN_MODEL } from '../config/models.js';
|
||||
import { GeminiClient } from '../core/client.js';
|
||||
|
||||
const SYSTEM_PROMPT = `You are an elite AI agent architect specializing in crafting high-performance agent configurations. Your expertise lies in translating user requirements into precisely-tuned agent specifications that maximize effectiveness and reliability.
|
||||
|
||||
**Important Context**: You may have access to project-specific instructions from QWEN.md files and other context that may include coding standards, project structure, and custom requirements. Consider this context when creating agents to ensure they align with the project's established patterns and practices.
|
||||
|
||||
When a user describes what they want an agent to do, you will:
|
||||
|
||||
1. **Extract Core Intent**: Identify the fundamental purpose, key responsibilities, and success criteria for the agent. Look for both explicit requirements and implicit needs. Consider any project-specific context from QWEN.md files. For agents that are meant to review code, you should assume that the user is asking to review recently written code and not the whole codebase, unless the user has explicitly instructed you otherwise.
|
||||
|
||||
2. **Design Expert Persona**: Create a compelling expert identity that embodies deep domain knowledge relevant to the task. The persona should inspire confidence and guide the agent's decision-making approach.
|
||||
|
||||
3. **Architect Comprehensive Instructions**: Develop a system prompt that:
|
||||
- Establishes clear behavioral boundaries and operational parameters
|
||||
- Provides specific methodologies and best practices for task execution
|
||||
- Anticipates edge cases and provides guidance for handling them
|
||||
- Incorporates any specific requirements or preferences mentioned by the user
|
||||
- Defines output format expectations when relevant
|
||||
- Aligns with project-specific coding standards and patterns from QWEN.md
|
||||
|
||||
4. **Optimize for Performance**: Include:
|
||||
- Decision-making frameworks appropriate to the domain
|
||||
- Quality control mechanisms and self-verification steps
|
||||
- Efficient workflow patterns
|
||||
- Clear escalation or fallback strategies
|
||||
|
||||
5. **Create Identifier**: Design a concise, descriptive identifier that:
|
||||
- Uses lowercase letters, numbers, and hyphens only
|
||||
- Is typically 2-4 words joined by hyphens
|
||||
- Clearly indicates the agent's primary function
|
||||
- Is memorable and easy to type
|
||||
- Avoids generic terms like "helper" or "assistant"
|
||||
|
||||
6 **Example agent descriptions**:
|
||||
- in the 'whenToUse' field of the JSON object, you should include examples of when this agent should be used.
|
||||
- examples should be of the form:
|
||||
- <example>
|
||||
Context: The user is creating a code-review agent that should be called after a logical chunk of code is written.
|
||||
user: "Please write a function that checks if a number is prime"
|
||||
assistant: "Here is the relevant function: "
|
||||
<function call omitted for brevity only for this example>
|
||||
<commentary>
|
||||
Since the user is greeting, use the Task tool to launch the greeting-responder agent to respond with a friendly joke.
|
||||
</commentary>
|
||||
assistant: "Now let me use the code-reviewer agent to review the code"
|
||||
</example>
|
||||
- <example>
|
||||
Context: User is creating an agent to respond to the word "hello" with a friendly jok.
|
||||
user: "Hello"
|
||||
assistant: "I'm going to use the Task tool to launch the greeting-responder agent to respond with a friendly joke"
|
||||
<commentary>
|
||||
Since the user is greeting, use the greeting-responder agent to respond with a friendly joke.
|
||||
</commentary>
|
||||
</example>
|
||||
- If the user mentioned or implied that the agent should be used proactively, you should include examples of this.
|
||||
- NOTE: Ensure that in the examples, you are making the assistant use the Agent tool and not simply respond directly to the task.
|
||||
|
||||
Key principles for your system prompts:
|
||||
- Be specific rather than generic - avoid vague instructions
|
||||
- Include concrete examples when they would clarify behavior
|
||||
- Balance comprehensiveness with clarity - every instruction should add value
|
||||
- Ensure the agent has enough context to handle variations of the core task
|
||||
- Make the agent proactive in seeking clarification when needed
|
||||
- Build in quality assurance and self-correction mechanisms
|
||||
|
||||
Remember: The agents you create should be autonomous experts capable of handling their designated tasks with minimal additional guidance. Your system prompts are their complete operational manual.
|
||||
`;
|
||||
|
||||
const createUserPrompt = (userInput: string): string =>
|
||||
`Create an agent configuration based on this request: "${userInput}"`;
|
||||
|
||||
const RESPONSE_SCHEMA: Record<string, unknown> = {
|
||||
type: 'object',
|
||||
properties: {
|
||||
name: {
|
||||
type: 'string',
|
||||
description:
|
||||
"A unique, descriptive identifier using lowercase letters, numbers, and hyphens (e.g., 'code-reviewer', 'api-docs-writer', 'test-generator')",
|
||||
},
|
||||
description: {
|
||||
type: 'string',
|
||||
description:
|
||||
"A precise, actionable description starting with 'Use this agent when...' that clearly defines the triggering conditions and use cases",
|
||||
},
|
||||
systemPrompt: {
|
||||
type: 'string',
|
||||
description:
|
||||
"The complete system prompt that will govern the agent's behavior, written in second person ('You are...', 'You will...') and structured for maximum clarity and effectiveness",
|
||||
},
|
||||
},
|
||||
required: ['name', 'description', 'systemPrompt'],
|
||||
};
|
||||
|
||||
export interface SubagentGeneratedContent {
|
||||
name: string;
|
||||
description: string;
|
||||
systemPrompt: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates subagent configuration content using LLM.
|
||||
*
|
||||
* @param userDescription - The user's description of what the subagent should do
|
||||
* @param geminiClient - Initialized GeminiClient instance
|
||||
* @param abortSignal - AbortSignal for cancelling the request
|
||||
* @returns Promise resolving to generated subagent content
|
||||
*/
|
||||
export async function subagentGenerator(
|
||||
userDescription: string,
|
||||
geminiClient: GeminiClient,
|
||||
abortSignal: AbortSignal,
|
||||
): Promise<SubagentGeneratedContent> {
|
||||
if (!userDescription.trim()) {
|
||||
throw new Error('User description cannot be empty');
|
||||
}
|
||||
|
||||
const userPrompt = createUserPrompt(userDescription);
|
||||
const contents: Content[] = [{ role: 'user', parts: [{ text: userPrompt }] }];
|
||||
|
||||
const parsedResponse = (await geminiClient.generateJson(
|
||||
contents,
|
||||
RESPONSE_SCHEMA,
|
||||
abortSignal,
|
||||
DEFAULT_QWEN_MODEL,
|
||||
{
|
||||
systemInstruction: SYSTEM_PROMPT,
|
||||
},
|
||||
)) as unknown as SubagentGeneratedContent;
|
||||
|
||||
if (
|
||||
!parsedResponse ||
|
||||
!parsedResponse.name ||
|
||||
!parsedResponse.description ||
|
||||
!parsedResponse.systemPrompt
|
||||
) {
|
||||
throw new Error('Invalid response from LLM: missing required fields');
|
||||
}
|
||||
|
||||
return parsedResponse;
|
||||
}
|
||||
Reference in New Issue
Block a user