mirror of
https://github.com/QwenLM/qwen-code.git
synced 2025-12-19 09:33:53 +00:00
Merge remote-tracking branch 'upstream/main' into fix/issue-1186-schema-converter
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code-core",
|
||||
"version": "0.4.1",
|
||||
"version": "0.5.1",
|
||||
"description": "Qwen Code Core",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
|
||||
@@ -318,6 +318,7 @@ export interface ConfigParameters {
|
||||
generationConfig?: Partial<ContentGeneratorConfig>;
|
||||
cliVersion?: string;
|
||||
loadMemoryFromIncludeDirectories?: boolean;
|
||||
chatRecording?: boolean;
|
||||
// Web search providers
|
||||
webSearch?: {
|
||||
provider: Array<{
|
||||
@@ -349,6 +350,7 @@ export interface ConfigParameters {
|
||||
skipStartupContext?: boolean;
|
||||
sdkMode?: boolean;
|
||||
sessionSubagents?: SubagentConfig[];
|
||||
channel?: string;
|
||||
}
|
||||
|
||||
function normalizeConfigOutputFormat(
|
||||
@@ -456,6 +458,7 @@ export class Config {
|
||||
| undefined;
|
||||
private readonly cliVersion?: string;
|
||||
private readonly experimentalZedIntegration: boolean = false;
|
||||
private readonly chatRecordingEnabled: boolean;
|
||||
private readonly loadMemoryFromIncludeDirectories: boolean = false;
|
||||
private readonly webSearch?: {
|
||||
provider: Array<{
|
||||
@@ -485,6 +488,7 @@ export class Config {
|
||||
private readonly enableToolOutputTruncation: boolean;
|
||||
private readonly eventEmitter?: EventEmitter;
|
||||
private readonly useSmartEdit: boolean;
|
||||
private readonly channel: string | undefined;
|
||||
|
||||
constructor(params: ConfigParameters) {
|
||||
this.sessionId = params.sessionId ?? randomUUID();
|
||||
@@ -570,6 +574,8 @@ export class Config {
|
||||
._generationConfig as ContentGeneratorConfig;
|
||||
this.cliVersion = params.cliVersion;
|
||||
|
||||
this.chatRecordingEnabled = params.chatRecording ?? true;
|
||||
|
||||
this.loadMemoryFromIncludeDirectories =
|
||||
params.loadMemoryFromIncludeDirectories ?? false;
|
||||
this.chatCompression = params.chatCompression;
|
||||
@@ -598,6 +604,7 @@ export class Config {
|
||||
this.enableToolOutputTruncation = params.enableToolOutputTruncation ?? true;
|
||||
this.useSmartEdit = params.useSmartEdit ?? false;
|
||||
this.extensionManagement = params.extensionManagement ?? true;
|
||||
this.channel = params.channel;
|
||||
this.storage = new Storage(this.targetDir);
|
||||
this.vlmSwitchMode = params.vlmSwitchMode;
|
||||
this.inputFormat = params.inputFormat ?? InputFormat.TEXT;
|
||||
@@ -615,7 +622,9 @@ export class Config {
|
||||
setGlobalDispatcher(new ProxyAgent(this.getProxy() as string));
|
||||
}
|
||||
this.geminiClient = new GeminiClient(this);
|
||||
this.chatRecordingService = new ChatRecordingService(this);
|
||||
this.chatRecordingService = this.chatRecordingEnabled
|
||||
? new ChatRecordingService(this)
|
||||
: undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -735,7 +744,9 @@ export class Config {
|
||||
startNewSession(sessionId?: string): string {
|
||||
this.sessionId = sessionId ?? randomUUID();
|
||||
this.sessionData = undefined;
|
||||
this.chatRecordingService = new ChatRecordingService(this);
|
||||
this.chatRecordingService = this.chatRecordingEnabled
|
||||
? new ChatRecordingService(this)
|
||||
: undefined;
|
||||
if (this.initialized) {
|
||||
logStartSession(this, new StartSessionEvent(this));
|
||||
}
|
||||
@@ -1144,6 +1155,10 @@ export class Config {
|
||||
return this.cliVersion;
|
||||
}
|
||||
|
||||
getChannel(): string | undefined {
|
||||
return this.channel;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the current FileSystemService
|
||||
*/
|
||||
@@ -1260,7 +1275,10 @@ export class Config {
|
||||
/**
|
||||
* Returns the chat recording service.
|
||||
*/
|
||||
getChatRecordingService(): ChatRecordingService {
|
||||
getChatRecordingService(): ChatRecordingService | undefined {
|
||||
if (!this.chatRecordingEnabled) {
|
||||
return undefined;
|
||||
}
|
||||
if (!this.chatRecordingService) {
|
||||
this.chatRecordingService = new ChatRecordingService(this);
|
||||
}
|
||||
|
||||
@@ -7,7 +7,13 @@
|
||||
import { describe, it, expect, beforeEach } from 'vitest';
|
||||
import { OpenAIContentConverter } from './converter.js';
|
||||
import type { StreamingToolCallParser } from './streamingToolCallParser.js';
|
||||
import type { GenerateContentParameters, Content } from '@google/genai';
|
||||
import {
|
||||
Type,
|
||||
type GenerateContentParameters,
|
||||
type Content,
|
||||
type Tool,
|
||||
type CallableTool,
|
||||
} from '@google/genai';
|
||||
import type OpenAI from 'openai';
|
||||
|
||||
describe('OpenAIContentConverter', () => {
|
||||
@@ -202,4 +208,338 @@ describe('OpenAIContentConverter', () => {
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('convertGeminiToolsToOpenAI', () => {
|
||||
it('should convert Gemini tools with parameters field', async () => {
|
||||
const geminiTools = [
|
||||
{
|
||||
functionDeclarations: [
|
||||
{
|
||||
name: 'get_weather',
|
||||
description: 'Get weather for a location',
|
||||
parameters: {
|
||||
type: Type.OBJECT,
|
||||
properties: {
|
||||
location: { type: Type.STRING },
|
||||
},
|
||||
required: ['location'],
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
] as Tool[];
|
||||
|
||||
const result = await converter.convertGeminiToolsToOpenAI(geminiTools);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0]).toEqual({
|
||||
type: 'function',
|
||||
function: {
|
||||
name: 'get_weather',
|
||||
description: 'Get weather for a location',
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
location: { type: 'string' },
|
||||
},
|
||||
required: ['location'],
|
||||
},
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it('should convert MCP tools with parametersJsonSchema field', async () => {
|
||||
// MCP tools use parametersJsonSchema which contains plain JSON schema (not Gemini types)
|
||||
const mcpTools = [
|
||||
{
|
||||
functionDeclarations: [
|
||||
{
|
||||
name: 'read_file',
|
||||
description: 'Read a file from disk',
|
||||
parametersJsonSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
path: { type: 'string' },
|
||||
},
|
||||
required: ['path'],
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
] as Tool[];
|
||||
|
||||
const result = await converter.convertGeminiToolsToOpenAI(mcpTools);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0]).toEqual({
|
||||
type: 'function',
|
||||
function: {
|
||||
name: 'read_file',
|
||||
description: 'Read a file from disk',
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
path: { type: 'string' },
|
||||
},
|
||||
required: ['path'],
|
||||
},
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle CallableTool by resolving tool function', async () => {
|
||||
const callableTools = [
|
||||
{
|
||||
tool: async () => ({
|
||||
functionDeclarations: [
|
||||
{
|
||||
name: 'dynamic_tool',
|
||||
description: 'A dynamically resolved tool',
|
||||
parameters: {
|
||||
type: Type.OBJECT,
|
||||
properties: {},
|
||||
},
|
||||
},
|
||||
],
|
||||
}),
|
||||
},
|
||||
] as CallableTool[];
|
||||
|
||||
const result = await converter.convertGeminiToolsToOpenAI(callableTools);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].function.name).toBe('dynamic_tool');
|
||||
});
|
||||
|
||||
it('should skip functions without name or description', async () => {
|
||||
const geminiTools = [
|
||||
{
|
||||
functionDeclarations: [
|
||||
{
|
||||
name: 'valid_tool',
|
||||
description: 'A valid tool',
|
||||
},
|
||||
{
|
||||
name: 'missing_description',
|
||||
// no description
|
||||
},
|
||||
{
|
||||
// no name
|
||||
description: 'Missing name',
|
||||
},
|
||||
],
|
||||
},
|
||||
] as Tool[];
|
||||
|
||||
const result = await converter.convertGeminiToolsToOpenAI(geminiTools);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].function.name).toBe('valid_tool');
|
||||
});
|
||||
|
||||
it('should handle tools without functionDeclarations', async () => {
|
||||
const emptyTools: Tool[] = [{} as Tool, { functionDeclarations: [] }];
|
||||
|
||||
const result = await converter.convertGeminiToolsToOpenAI(emptyTools);
|
||||
|
||||
expect(result).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should handle functions without parameters', async () => {
|
||||
const geminiTools: Tool[] = [
|
||||
{
|
||||
functionDeclarations: [
|
||||
{
|
||||
name: 'no_params_tool',
|
||||
description: 'A tool without parameters',
|
||||
},
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
const result = await converter.convertGeminiToolsToOpenAI(geminiTools);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].function.parameters).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should not mutate original parametersJsonSchema', async () => {
|
||||
const originalSchema = {
|
||||
type: 'object',
|
||||
properties: { foo: { type: 'string' } },
|
||||
};
|
||||
const mcpTools: Tool[] = [
|
||||
{
|
||||
functionDeclarations: [
|
||||
{
|
||||
name: 'test_tool',
|
||||
description: 'Test tool',
|
||||
parametersJsonSchema: originalSchema,
|
||||
},
|
||||
],
|
||||
} as Tool,
|
||||
];
|
||||
|
||||
const result = await converter.convertGeminiToolsToOpenAI(mcpTools);
|
||||
|
||||
// Verify the result is a copy, not the same reference
|
||||
expect(result[0].function.parameters).not.toBe(originalSchema);
|
||||
expect(result[0].function.parameters).toEqual(originalSchema);
|
||||
});
|
||||
});
|
||||
|
||||
describe('convertGeminiToolParametersToOpenAI', () => {
|
||||
it('should convert type names to lowercase', () => {
|
||||
const params = {
|
||||
type: 'OBJECT',
|
||||
properties: {
|
||||
count: { type: 'INTEGER' },
|
||||
amount: { type: 'NUMBER' },
|
||||
name: { type: 'STRING' },
|
||||
},
|
||||
};
|
||||
|
||||
const result = converter.convertGeminiToolParametersToOpenAI(params);
|
||||
|
||||
expect(result).toEqual({
|
||||
type: 'object',
|
||||
properties: {
|
||||
count: { type: 'integer' },
|
||||
amount: { type: 'number' },
|
||||
name: { type: 'string' },
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it('should convert string numeric constraints to numbers', () => {
|
||||
const params = {
|
||||
type: 'object',
|
||||
properties: {
|
||||
value: {
|
||||
type: 'number',
|
||||
minimum: '0',
|
||||
maximum: '100',
|
||||
multipleOf: '0.5',
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const result = converter.convertGeminiToolParametersToOpenAI(params);
|
||||
const properties = result?.['properties'] as Record<string, unknown>;
|
||||
|
||||
expect(properties?.['value']).toEqual({
|
||||
type: 'number',
|
||||
minimum: 0,
|
||||
maximum: 100,
|
||||
multipleOf: 0.5,
|
||||
});
|
||||
});
|
||||
|
||||
it('should convert string length constraints to integers', () => {
|
||||
const params = {
|
||||
type: 'object',
|
||||
properties: {
|
||||
text: {
|
||||
type: 'string',
|
||||
minLength: '1',
|
||||
maxLength: '100',
|
||||
},
|
||||
items: {
|
||||
type: 'array',
|
||||
minItems: '0',
|
||||
maxItems: '10',
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const result = converter.convertGeminiToolParametersToOpenAI(params);
|
||||
const properties = result?.['properties'] as Record<string, unknown>;
|
||||
|
||||
expect(properties?.['text']).toEqual({
|
||||
type: 'string',
|
||||
minLength: 1,
|
||||
maxLength: 100,
|
||||
});
|
||||
expect(properties?.['items']).toEqual({
|
||||
type: 'array',
|
||||
minItems: 0,
|
||||
maxItems: 10,
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle nested objects', () => {
|
||||
const params = {
|
||||
type: 'object',
|
||||
properties: {
|
||||
nested: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
deep: {
|
||||
type: 'INTEGER',
|
||||
minimum: '0',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const result = converter.convertGeminiToolParametersToOpenAI(params);
|
||||
const properties = result?.['properties'] as Record<string, unknown>;
|
||||
const nested = properties?.['nested'] as Record<string, unknown>;
|
||||
const nestedProperties = nested?.['properties'] as Record<
|
||||
string,
|
||||
unknown
|
||||
>;
|
||||
|
||||
expect(nestedProperties?.['deep']).toEqual({
|
||||
type: 'integer',
|
||||
minimum: 0,
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle arrays', () => {
|
||||
const params = {
|
||||
type: 'array',
|
||||
items: {
|
||||
type: 'INTEGER',
|
||||
},
|
||||
};
|
||||
|
||||
const result = converter.convertGeminiToolParametersToOpenAI(params);
|
||||
|
||||
expect(result).toEqual({
|
||||
type: 'array',
|
||||
items: {
|
||||
type: 'integer',
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it('should return undefined for null or non-object input', () => {
|
||||
expect(
|
||||
converter.convertGeminiToolParametersToOpenAI(
|
||||
null as unknown as Record<string, unknown>,
|
||||
),
|
||||
).toBeNull();
|
||||
expect(
|
||||
converter.convertGeminiToolParametersToOpenAI(
|
||||
undefined as unknown as Record<string, unknown>,
|
||||
),
|
||||
).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should not mutate the original parameters', () => {
|
||||
const original = {
|
||||
type: 'OBJECT',
|
||||
properties: {
|
||||
count: { type: 'INTEGER' },
|
||||
},
|
||||
};
|
||||
const originalCopy = JSON.parse(JSON.stringify(original));
|
||||
|
||||
converter.convertGeminiToolParametersToOpenAI(original);
|
||||
|
||||
expect(original).toEqual(originalCopy);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -199,13 +199,11 @@ export class OpenAIContentConverter {
|
||||
// Handle both Gemini tools (parameters) and MCP tools (parametersJsonSchema)
|
||||
if (func.parametersJsonSchema) {
|
||||
// MCP tool format - use parametersJsonSchema directly
|
||||
if (func.parametersJsonSchema) {
|
||||
// Create a shallow copy to avoid mutating the original object
|
||||
const paramsCopy = {
|
||||
...(func.parametersJsonSchema as Record<string, unknown>),
|
||||
};
|
||||
parameters = paramsCopy;
|
||||
}
|
||||
// Create a shallow copy to avoid mutating the original object
|
||||
const paramsCopy = {
|
||||
...(func.parametersJsonSchema as Record<string, unknown>),
|
||||
};
|
||||
parameters = paramsCopy;
|
||||
} else if (func.parameters) {
|
||||
// Gemini tool format - convert parameters to OpenAI format
|
||||
parameters = this.convertGeminiToolParametersToOpenAI(
|
||||
|
||||
@@ -130,10 +130,13 @@ export class DashScopeOpenAICompatibleProvider
|
||||
}
|
||||
|
||||
buildMetadata(userPromptId: string): DashScopeRequestMetadata {
|
||||
const channel = this.cliConfig.getChannel?.();
|
||||
|
||||
return {
|
||||
metadata: {
|
||||
sessionId: this.cliConfig.getSessionId?.(),
|
||||
promptId: userPromptId,
|
||||
...(channel ? { channel } : {}),
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
@@ -28,5 +28,6 @@ export type DashScopeRequestMetadata = {
|
||||
metadata: {
|
||||
sessionId?: string;
|
||||
promptId: string;
|
||||
channel?: string;
|
||||
};
|
||||
};
|
||||
|
||||
@@ -761,7 +761,6 @@ describe('getQwenOAuthClient', () => {
|
||||
});
|
||||
|
||||
it('should load cached credentials if available', async () => {
|
||||
const fs = await import('node:fs');
|
||||
const mockCredentials = {
|
||||
access_token: 'cached-token',
|
||||
refresh_token: 'cached-refresh',
|
||||
@@ -769,10 +768,6 @@ describe('getQwenOAuthClient', () => {
|
||||
expiry_date: Date.now() + 3600000,
|
||||
};
|
||||
|
||||
vi.mocked(fs.promises.readFile).mockResolvedValue(
|
||||
JSON.stringify(mockCredentials),
|
||||
);
|
||||
|
||||
// Mock SharedTokenManager to use cached credentials
|
||||
const mockTokenManager = {
|
||||
getValidCredentials: vi.fn().mockResolvedValue(mockCredentials),
|
||||
@@ -792,18 +787,6 @@ describe('getQwenOAuthClient', () => {
|
||||
});
|
||||
|
||||
it('should handle cached credentials refresh failure', async () => {
|
||||
const fs = await import('node:fs');
|
||||
const mockCredentials = {
|
||||
access_token: 'cached-token',
|
||||
refresh_token: 'expired-refresh',
|
||||
token_type: 'Bearer',
|
||||
expiry_date: Date.now() + 3600000, // Valid expiry time so loadCachedQwenCredentials returns true
|
||||
};
|
||||
|
||||
vi.mocked(fs.promises.readFile).mockResolvedValue(
|
||||
JSON.stringify(mockCredentials),
|
||||
);
|
||||
|
||||
// Mock SharedTokenManager to fail with a specific error
|
||||
const mockTokenManager = {
|
||||
getValidCredentials: vi
|
||||
@@ -833,6 +816,35 @@ describe('getQwenOAuthClient', () => {
|
||||
|
||||
SharedTokenManager.getInstance = originalGetInstance;
|
||||
});
|
||||
|
||||
it('should not start device flow when requireCachedCredentials is true', async () => {
|
||||
// Make SharedTokenManager fail so we hit the fallback path
|
||||
const mockTokenManager = {
|
||||
getValidCredentials: vi
|
||||
.fn()
|
||||
.mockRejectedValue(new Error('No credentials')),
|
||||
};
|
||||
|
||||
const originalGetInstance = SharedTokenManager.getInstance;
|
||||
SharedTokenManager.getInstance = vi.fn().mockReturnValue(mockTokenManager);
|
||||
|
||||
// If requireCachedCredentials is honored, device-flow network requests should not start
|
||||
vi.mocked(global.fetch).mockResolvedValue({ ok: true } as Response);
|
||||
|
||||
await expect(
|
||||
import('./qwenOAuth2.js').then((module) =>
|
||||
module.getQwenOAuthClient(mockConfig, {
|
||||
requireCachedCredentials: true,
|
||||
}),
|
||||
),
|
||||
).rejects.toThrow(
|
||||
'No cached Qwen-OAuth credentials found. Please re-authenticate.',
|
||||
);
|
||||
|
||||
expect(global.fetch).not.toHaveBeenCalled();
|
||||
|
||||
SharedTokenManager.getInstance = originalGetInstance;
|
||||
});
|
||||
});
|
||||
|
||||
describe('CredentialsClearRequiredError', () => {
|
||||
@@ -1574,178 +1586,6 @@ describe('Credential Caching Functions', () => {
|
||||
expect(updatedCredentials.access_token).toBe('new-token');
|
||||
});
|
||||
});
|
||||
|
||||
describe('loadCachedQwenCredentials', () => {
|
||||
it('should load and validate cached credentials successfully', async () => {
|
||||
const { promises: fs } = await import('node:fs');
|
||||
const mockCredentials = {
|
||||
access_token: 'cached-token',
|
||||
refresh_token: 'cached-refresh',
|
||||
token_type: 'Bearer',
|
||||
expiry_date: Date.now() + 3600000,
|
||||
};
|
||||
|
||||
vi.mocked(fs.readFile).mockResolvedValue(JSON.stringify(mockCredentials));
|
||||
|
||||
// Test through getQwenOAuthClient which calls loadCachedQwenCredentials
|
||||
const mockConfig = {
|
||||
isBrowserLaunchSuppressed: vi.fn().mockReturnValue(true),
|
||||
} as unknown as Config;
|
||||
|
||||
// Make SharedTokenManager fail to test the fallback
|
||||
const mockTokenManager = {
|
||||
getValidCredentials: vi
|
||||
.fn()
|
||||
.mockRejectedValue(new Error('No cached creds')),
|
||||
};
|
||||
|
||||
const originalGetInstance = SharedTokenManager.getInstance;
|
||||
SharedTokenManager.getInstance = vi
|
||||
.fn()
|
||||
.mockReturnValue(mockTokenManager);
|
||||
|
||||
// Mock successful auth flow after cache load fails
|
||||
const mockAuthResponse = {
|
||||
ok: true,
|
||||
json: async () => ({
|
||||
device_code: 'test-device-code',
|
||||
user_code: 'TEST123',
|
||||
verification_uri: 'https://chat.qwen.ai/device',
|
||||
verification_uri_complete: 'https://chat.qwen.ai/device?code=TEST123',
|
||||
expires_in: 1800,
|
||||
}),
|
||||
};
|
||||
|
||||
const mockTokenResponse = {
|
||||
ok: true,
|
||||
json: async () => ({
|
||||
access_token: 'new-access-token',
|
||||
refresh_token: 'new-refresh-token',
|
||||
token_type: 'Bearer',
|
||||
expires_in: 3600,
|
||||
scope: 'openid profile email model.completion',
|
||||
}),
|
||||
};
|
||||
|
||||
global.fetch = vi
|
||||
.fn()
|
||||
.mockResolvedValueOnce(mockAuthResponse as Response)
|
||||
.mockResolvedValue(mockTokenResponse as Response);
|
||||
|
||||
try {
|
||||
await import('./qwenOAuth2.js').then((module) =>
|
||||
module.getQwenOAuthClient(mockConfig),
|
||||
);
|
||||
} catch {
|
||||
// Expected to fail in test environment
|
||||
}
|
||||
|
||||
expect(fs.readFile).toHaveBeenCalled();
|
||||
SharedTokenManager.getInstance = originalGetInstance;
|
||||
});
|
||||
|
||||
it('should handle invalid cached credentials gracefully', async () => {
|
||||
const { promises: fs } = await import('node:fs');
|
||||
|
||||
// Mock file read to return invalid JSON
|
||||
vi.mocked(fs.readFile).mockResolvedValue('invalid-json');
|
||||
|
||||
const mockConfig = {
|
||||
isBrowserLaunchSuppressed: vi.fn().mockReturnValue(true),
|
||||
} as unknown as Config;
|
||||
|
||||
const mockTokenManager = {
|
||||
getValidCredentials: vi
|
||||
.fn()
|
||||
.mockRejectedValue(new Error('No cached creds')),
|
||||
};
|
||||
|
||||
const originalGetInstance = SharedTokenManager.getInstance;
|
||||
SharedTokenManager.getInstance = vi
|
||||
.fn()
|
||||
.mockReturnValue(mockTokenManager);
|
||||
|
||||
// Mock auth flow
|
||||
const mockAuthResponse = {
|
||||
ok: true,
|
||||
json: async () => ({
|
||||
device_code: 'test-device-code',
|
||||
user_code: 'TEST123',
|
||||
verification_uri: 'https://chat.qwen.ai/device',
|
||||
verification_uri_complete: 'https://chat.qwen.ai/device?code=TEST123',
|
||||
expires_in: 1800,
|
||||
}),
|
||||
};
|
||||
|
||||
const mockTokenResponse = {
|
||||
ok: true,
|
||||
json: async () => ({
|
||||
access_token: 'new-token',
|
||||
refresh_token: 'new-refresh',
|
||||
token_type: 'Bearer',
|
||||
expires_in: 3600,
|
||||
}),
|
||||
};
|
||||
|
||||
global.fetch = vi
|
||||
.fn()
|
||||
.mockResolvedValueOnce(mockAuthResponse as Response)
|
||||
.mockResolvedValue(mockTokenResponse as Response);
|
||||
|
||||
try {
|
||||
await import('./qwenOAuth2.js').then((module) =>
|
||||
module.getQwenOAuthClient(mockConfig),
|
||||
);
|
||||
} catch {
|
||||
// Expected to fail in test environment
|
||||
}
|
||||
|
||||
SharedTokenManager.getInstance = originalGetInstance;
|
||||
});
|
||||
|
||||
it('should handle file access errors', async () => {
|
||||
const { promises: fs } = await import('node:fs');
|
||||
|
||||
vi.mocked(fs.readFile).mockRejectedValue(new Error('File not found'));
|
||||
|
||||
const mockConfig = {
|
||||
isBrowserLaunchSuppressed: vi.fn().mockReturnValue(true),
|
||||
} as unknown as Config;
|
||||
|
||||
const mockTokenManager = {
|
||||
getValidCredentials: vi
|
||||
.fn()
|
||||
.mockRejectedValue(new Error('No cached creds')),
|
||||
};
|
||||
|
||||
const originalGetInstance = SharedTokenManager.getInstance;
|
||||
SharedTokenManager.getInstance = vi
|
||||
.fn()
|
||||
.mockReturnValue(mockTokenManager);
|
||||
|
||||
// Mock device flow to fail quickly
|
||||
const mockAuthResponse = {
|
||||
ok: true,
|
||||
json: async () => ({
|
||||
error: 'invalid_request',
|
||||
error_description: 'Invalid request parameters',
|
||||
}),
|
||||
};
|
||||
|
||||
global.fetch = vi.fn().mockResolvedValue(mockAuthResponse as Response);
|
||||
|
||||
// Should proceed to device flow when cache loading fails
|
||||
try {
|
||||
await import('./qwenOAuth2.js').then((module) =>
|
||||
module.getQwenOAuthClient(mockConfig),
|
||||
);
|
||||
} catch {
|
||||
// Expected to fail in test environment
|
||||
}
|
||||
|
||||
SharedTokenManager.getInstance = originalGetInstance;
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Enhanced Error Handling and Edge Cases', () => {
|
||||
|
||||
@@ -514,26 +514,14 @@ export async function getQwenOAuthClient(
|
||||
}
|
||||
}
|
||||
|
||||
// If shared manager fails, check if we have cached credentials for device flow
|
||||
if (await loadCachedQwenCredentials(client)) {
|
||||
// We have cached credentials but they might be expired
|
||||
// Try device flow instead of forcing refresh
|
||||
const result = await authWithQwenDeviceFlow(client, config);
|
||||
if (!result.success) {
|
||||
// Use detailed error message if available, otherwise use default
|
||||
const errorMessage =
|
||||
result.message || 'Qwen OAuth authentication failed';
|
||||
throw new Error(errorMessage);
|
||||
}
|
||||
return client;
|
||||
}
|
||||
|
||||
if (options?.requireCachedCredentials) {
|
||||
throw new Error(
|
||||
'No cached Qwen-OAuth credentials found. Please re-authenticate.',
|
||||
);
|
||||
}
|
||||
|
||||
// If we couldn't obtain valid credentials via SharedTokenManager, fall back to
|
||||
// interactive device authorization (unless explicitly forbidden above).
|
||||
const result = await authWithQwenDeviceFlow(client, config);
|
||||
if (!result.success) {
|
||||
// Only emit timeout event if the failure reason is actually timeout
|
||||
@@ -689,6 +677,19 @@ async function authWithQwenDeviceFlow(
|
||||
// Cache the new tokens
|
||||
await cacheQwenCredentials(credentials);
|
||||
|
||||
// IMPORTANT:
|
||||
// SharedTokenManager maintains an in-memory cache and throttles file checks.
|
||||
// If we only write the creds file here, a subsequent `getQwenOAuthClient()`
|
||||
// call in the same process (within the throttle window) may not re-read the
|
||||
// updated file and could incorrectly re-trigger device auth.
|
||||
// Clearing the cache forces the next call to reload from disk.
|
||||
try {
|
||||
SharedTokenManager.getInstance().clearCache();
|
||||
} catch {
|
||||
// In unit tests we sometimes mock SharedTokenManager.getInstance() with a
|
||||
// minimal stub; cache invalidation is best-effort and should not break auth.
|
||||
}
|
||||
|
||||
// Emit auth progress success event
|
||||
qwenOAuth2Events.emit(
|
||||
QwenOAuth2Event.AuthProgress,
|
||||
@@ -847,27 +848,6 @@ async function authWithQwenDeviceFlow(
|
||||
}
|
||||
}
|
||||
|
||||
async function loadCachedQwenCredentials(
|
||||
client: QwenOAuth2Client,
|
||||
): Promise<boolean> {
|
||||
try {
|
||||
const keyFile = getQwenCachedCredentialPath();
|
||||
const creds = await fs.readFile(keyFile, 'utf-8');
|
||||
const credentials = JSON.parse(creds) as QwenCredentials;
|
||||
client.setCredentials(credentials);
|
||||
|
||||
// Verify that the credentials are still valid
|
||||
const { token } = await client.getAccessToken();
|
||||
if (!token) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
} catch (_) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
async function cacheQwenCredentials(credentials: QwenCredentials) {
|
||||
const filePath = getQwenCachedCredentialPath();
|
||||
try {
|
||||
@@ -913,6 +893,14 @@ export async function clearQwenCredentials(): Promise<void> {
|
||||
}
|
||||
// Log other errors but don't throw - clearing credentials should be non-critical
|
||||
console.warn('Warning: Failed to clear cached Qwen credentials:', error);
|
||||
} finally {
|
||||
// Also clear SharedTokenManager in-memory cache to prevent stale credentials
|
||||
// from being reused within the same process after the file is removed.
|
||||
try {
|
||||
SharedTokenManager.getInstance().clearCache();
|
||||
} catch {
|
||||
// Best-effort; don't fail credential clearing if SharedTokenManager is mocked.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -58,6 +58,7 @@ export type {
|
||||
SubAgentStartEvent,
|
||||
SubAgentRoundEvent,
|
||||
SubAgentStreamTextEvent,
|
||||
SubAgentUsageEvent,
|
||||
SubAgentToolCallEvent,
|
||||
SubAgentToolResultEvent,
|
||||
SubAgentFinishEvent,
|
||||
|
||||
@@ -10,7 +10,7 @@ import type {
|
||||
ToolConfirmationOutcome,
|
||||
ToolResultDisplay,
|
||||
} from '../tools/tools.js';
|
||||
import type { Part } from '@google/genai';
|
||||
import type { Part, GenerateContentResponseUsageMetadata } from '@google/genai';
|
||||
|
||||
export type SubAgentEvent =
|
||||
| 'start'
|
||||
@@ -20,6 +20,7 @@ export type SubAgentEvent =
|
||||
| 'tool_call'
|
||||
| 'tool_result'
|
||||
| 'tool_waiting_approval'
|
||||
| 'usage_metadata'
|
||||
| 'finish'
|
||||
| 'error';
|
||||
|
||||
@@ -31,6 +32,7 @@ export enum SubAgentEventType {
|
||||
TOOL_CALL = 'tool_call',
|
||||
TOOL_RESULT = 'tool_result',
|
||||
TOOL_WAITING_APPROVAL = 'tool_waiting_approval',
|
||||
USAGE_METADATA = 'usage_metadata',
|
||||
FINISH = 'finish',
|
||||
ERROR = 'error',
|
||||
}
|
||||
@@ -57,6 +59,14 @@ export interface SubAgentStreamTextEvent {
|
||||
timestamp: number;
|
||||
}
|
||||
|
||||
export interface SubAgentUsageEvent {
|
||||
subagentId: string;
|
||||
round: number;
|
||||
usage: GenerateContentResponseUsageMetadata;
|
||||
durationMs?: number;
|
||||
timestamp: number;
|
||||
}
|
||||
|
||||
export interface SubAgentToolCallEvent {
|
||||
subagentId: string;
|
||||
round: number;
|
||||
|
||||
@@ -50,6 +50,15 @@ describe('SubagentStatistics', () => {
|
||||
expect(summary.outputTokens).toBe(600);
|
||||
expect(summary.totalTokens).toBe(1800);
|
||||
});
|
||||
|
||||
it('should track thought and cached tokens', () => {
|
||||
stats.recordTokens(100, 50, 10, 5);
|
||||
|
||||
const summary = stats.getSummary();
|
||||
expect(summary.thoughtTokens).toBe(10);
|
||||
expect(summary.cachedTokens).toBe(5);
|
||||
expect(summary.totalTokens).toBe(165); // 100 + 50 + 10 + 5
|
||||
});
|
||||
});
|
||||
|
||||
describe('tool usage statistics', () => {
|
||||
@@ -93,14 +102,14 @@ describe('SubagentStatistics', () => {
|
||||
stats.start(baseTime);
|
||||
stats.setRounds(2);
|
||||
stats.recordToolCall('file_read', true, 100);
|
||||
stats.recordTokens(1000, 500);
|
||||
stats.recordTokens(1000, 500, 20, 10);
|
||||
|
||||
const result = stats.formatCompact('Test task', baseTime + 5000);
|
||||
|
||||
expect(result).toContain('📋 Task Completed: Test task');
|
||||
expect(result).toContain('🔧 Tool Usage: 1 calls, 100.0% success');
|
||||
expect(result).toContain('⏱️ Duration: 5.0s | 🔁 Rounds: 2');
|
||||
expect(result).toContain('🔢 Tokens: 1,500 (in 1000, out 500)');
|
||||
expect(result).toContain('🔢 Tokens: 1,530 (in 1000, out 500)');
|
||||
});
|
||||
|
||||
it('should handle zero tool calls', () => {
|
||||
|
||||
@@ -23,6 +23,8 @@ export interface SubagentStatsSummary {
|
||||
successRate: number;
|
||||
inputTokens: number;
|
||||
outputTokens: number;
|
||||
thoughtTokens: number;
|
||||
cachedTokens: number;
|
||||
totalTokens: number;
|
||||
estimatedCost: number;
|
||||
toolUsage: ToolUsageStats[];
|
||||
@@ -36,6 +38,8 @@ export class SubagentStatistics {
|
||||
private failedToolCalls = 0;
|
||||
private inputTokens = 0;
|
||||
private outputTokens = 0;
|
||||
private thoughtTokens = 0;
|
||||
private cachedTokens = 0;
|
||||
private toolUsage = new Map<string, ToolUsageStats>();
|
||||
|
||||
start(now = Date.now()) {
|
||||
@@ -74,9 +78,16 @@ export class SubagentStatistics {
|
||||
this.toolUsage.set(name, tu);
|
||||
}
|
||||
|
||||
recordTokens(input: number, output: number) {
|
||||
recordTokens(
|
||||
input: number,
|
||||
output: number,
|
||||
thought: number = 0,
|
||||
cached: number = 0,
|
||||
) {
|
||||
this.inputTokens += Math.max(0, input || 0);
|
||||
this.outputTokens += Math.max(0, output || 0);
|
||||
this.thoughtTokens += Math.max(0, thought || 0);
|
||||
this.cachedTokens += Math.max(0, cached || 0);
|
||||
}
|
||||
|
||||
getSummary(now = Date.now()): SubagentStatsSummary {
|
||||
@@ -86,7 +97,11 @@ export class SubagentStatistics {
|
||||
totalToolCalls > 0
|
||||
? (this.successfulToolCalls / totalToolCalls) * 100
|
||||
: 0;
|
||||
const totalTokens = this.inputTokens + this.outputTokens;
|
||||
const totalTokens =
|
||||
this.inputTokens +
|
||||
this.outputTokens +
|
||||
this.thoughtTokens +
|
||||
this.cachedTokens;
|
||||
const estimatedCost = this.inputTokens * 3e-5 + this.outputTokens * 6e-5;
|
||||
return {
|
||||
rounds: this.rounds,
|
||||
@@ -97,6 +112,8 @@ export class SubagentStatistics {
|
||||
successRate,
|
||||
inputTokens: this.inputTokens,
|
||||
outputTokens: this.outputTokens,
|
||||
thoughtTokens: this.thoughtTokens,
|
||||
cachedTokens: this.cachedTokens,
|
||||
totalTokens,
|
||||
estimatedCost,
|
||||
toolUsage: Array.from(this.toolUsage.values()),
|
||||
@@ -116,8 +133,12 @@ export class SubagentStatistics {
|
||||
`⏱️ Duration: ${this.fmtDuration(stats.totalDurationMs)} | 🔁 Rounds: ${stats.rounds}`,
|
||||
];
|
||||
if (typeof stats.totalTokens === 'number') {
|
||||
const parts = [
|
||||
`in ${stats.inputTokens ?? 0}`,
|
||||
`out ${stats.outputTokens ?? 0}`,
|
||||
];
|
||||
lines.push(
|
||||
`🔢 Tokens: ${stats.totalTokens.toLocaleString()}${stats.inputTokens || stats.outputTokens ? ` (in ${stats.inputTokens ?? 0}, out ${stats.outputTokens ?? 0})` : ''}`,
|
||||
`🔢 Tokens: ${stats.totalTokens.toLocaleString()}${parts.length ? ` (${parts.join(', ')})` : ''}`,
|
||||
);
|
||||
}
|
||||
return lines.join('\n');
|
||||
@@ -152,8 +173,12 @@ export class SubagentStatistics {
|
||||
`🔧 Tools: ${stats.totalToolCalls} calls, ${sr.toFixed(1)}% success (${stats.successfulToolCalls} ok, ${stats.failedToolCalls} failed)`,
|
||||
);
|
||||
if (typeof stats.totalTokens === 'number') {
|
||||
const parts = [
|
||||
`in ${stats.inputTokens ?? 0}`,
|
||||
`out ${stats.outputTokens ?? 0}`,
|
||||
];
|
||||
lines.push(
|
||||
`🔢 Tokens: ${stats.totalTokens.toLocaleString()} (in ${stats.inputTokens ?? 0}, out ${stats.outputTokens ?? 0})`,
|
||||
`🔢 Tokens: ${stats.totalTokens.toLocaleString()} (${parts.join(', ')})`,
|
||||
);
|
||||
}
|
||||
if (stats.toolUsage && stats.toolUsage.length) {
|
||||
|
||||
@@ -69,6 +69,8 @@ async function createMockConfig(
|
||||
targetDir: '.',
|
||||
debugMode: false,
|
||||
cwd: process.cwd(),
|
||||
// Avoid writing any chat recording records from tests (e.g. via tool-call telemetry).
|
||||
chatRecording: false,
|
||||
};
|
||||
const config = new Config(configParams);
|
||||
await config.initialize();
|
||||
|
||||
@@ -41,6 +41,7 @@ import type {
|
||||
SubAgentToolResultEvent,
|
||||
SubAgentStreamTextEvent,
|
||||
SubAgentErrorEvent,
|
||||
SubAgentUsageEvent,
|
||||
} from './subagent-events.js';
|
||||
import {
|
||||
type SubAgentEventEmitter,
|
||||
@@ -369,6 +370,7 @@ export class SubAgentScope {
|
||||
},
|
||||
};
|
||||
|
||||
const roundStreamStart = Date.now();
|
||||
const responseStream = await chat.sendMessageStream(
|
||||
this.modelConfig.model ||
|
||||
this.runtimeContext.getModel() ||
|
||||
@@ -439,10 +441,19 @@ export class SubAgentScope {
|
||||
if (lastUsage) {
|
||||
const inTok = Number(lastUsage.promptTokenCount || 0);
|
||||
const outTok = Number(lastUsage.candidatesTokenCount || 0);
|
||||
if (isFinite(inTok) || isFinite(outTok)) {
|
||||
const thoughtTok = Number(lastUsage.thoughtsTokenCount || 0);
|
||||
const cachedTok = Number(lastUsage.cachedContentTokenCount || 0);
|
||||
if (
|
||||
isFinite(inTok) ||
|
||||
isFinite(outTok) ||
|
||||
isFinite(thoughtTok) ||
|
||||
isFinite(cachedTok)
|
||||
) {
|
||||
this.stats.recordTokens(
|
||||
isFinite(inTok) ? inTok : 0,
|
||||
isFinite(outTok) ? outTok : 0,
|
||||
isFinite(thoughtTok) ? thoughtTok : 0,
|
||||
isFinite(cachedTok) ? cachedTok : 0,
|
||||
);
|
||||
// mirror legacy fields for compatibility
|
||||
this.executionStats.inputTokens =
|
||||
@@ -453,11 +464,20 @@ export class SubAgentScope {
|
||||
(isFinite(outTok) ? outTok : 0);
|
||||
this.executionStats.totalTokens =
|
||||
(this.executionStats.inputTokens || 0) +
|
||||
(this.executionStats.outputTokens || 0);
|
||||
(this.executionStats.outputTokens || 0) +
|
||||
(isFinite(thoughtTok) ? thoughtTok : 0) +
|
||||
(isFinite(cachedTok) ? cachedTok : 0);
|
||||
this.executionStats.estimatedCost =
|
||||
(this.executionStats.inputTokens || 0) * 3e-5 +
|
||||
(this.executionStats.outputTokens || 0) * 6e-5;
|
||||
}
|
||||
this.eventEmitter?.emit(SubAgentEventType.USAGE_METADATA, {
|
||||
subagentId: this.subagentId,
|
||||
round: turnCounter,
|
||||
usage: lastUsage,
|
||||
durationMs: Date.now() - roundStreamStart,
|
||||
timestamp: Date.now(),
|
||||
} as SubAgentUsageEvent);
|
||||
}
|
||||
|
||||
if (functionCalls.length > 0) {
|
||||
|
||||
@@ -249,6 +249,9 @@ export class QwenLogger {
|
||||
authType === AuthType.USE_OPENAI
|
||||
? this.config?.getContentGeneratorConfig().baseUrl || ''
|
||||
: '',
|
||||
...(this.config?.getChannel?.()
|
||||
? { channel: this.config.getChannel() }
|
||||
: {}),
|
||||
},
|
||||
_v: `qwen-code@${version}`,
|
||||
} as RumPayload;
|
||||
|
||||
@@ -23,6 +23,12 @@ export type UiEvent =
|
||||
| (ApiErrorEvent & { 'event.name': typeof EVENT_API_ERROR })
|
||||
| (ToolCallEvent & { 'event.name': typeof EVENT_TOOL_CALL });
|
||||
|
||||
export {
|
||||
EVENT_API_ERROR,
|
||||
EVENT_API_RESPONSE,
|
||||
EVENT_TOOL_CALL,
|
||||
} from './constants.js';
|
||||
|
||||
export interface ToolCallStats {
|
||||
count: number;
|
||||
success: number;
|
||||
|
||||
@@ -198,6 +198,52 @@ describe('GlobTool', () => {
|
||||
);
|
||||
});
|
||||
|
||||
it('should find files even if workspace path casing differs from glob results (Windows/macOS)', async () => {
|
||||
// Only relevant for Windows and macOS
|
||||
if (process.platform !== 'win32' && process.platform !== 'darwin') {
|
||||
return;
|
||||
}
|
||||
|
||||
let mismatchedRootDir = tempRootDir;
|
||||
|
||||
if (process.platform === 'win32') {
|
||||
// 1. Create a path with mismatched casing for the workspace root
|
||||
// e.g., if tempRootDir is "C:\Users\...", make it "c:\Users\..."
|
||||
const drive = path.parse(tempRootDir).root;
|
||||
if (!drive || !drive.match(/^[A-Z]:\\/)) {
|
||||
// Skip if we can't determine/manipulate the drive letter easily
|
||||
return;
|
||||
}
|
||||
|
||||
const lowerDrive = drive.toLowerCase();
|
||||
mismatchedRootDir = lowerDrive + tempRootDir.substring(drive.length);
|
||||
} else {
|
||||
// macOS: change the casing of the path
|
||||
if (tempRootDir === tempRootDir.toLowerCase()) {
|
||||
mismatchedRootDir = tempRootDir.toUpperCase();
|
||||
} else {
|
||||
mismatchedRootDir = tempRootDir.toLowerCase();
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Create a new GlobTool instance with this mismatched root
|
||||
const mismatchedConfig = {
|
||||
...mockConfig,
|
||||
getTargetDir: () => mismatchedRootDir,
|
||||
getWorkspaceContext: () =>
|
||||
createMockWorkspaceContext(mismatchedRootDir),
|
||||
} as unknown as Config;
|
||||
|
||||
const mismatchedGlobTool = new GlobTool(mismatchedConfig);
|
||||
|
||||
// 3. Execute search
|
||||
const params: GlobToolParams = { pattern: '*.txt' };
|
||||
const invocation = mismatchedGlobTool.build(params);
|
||||
const result = await invocation.execute(abortSignal);
|
||||
|
||||
expect(result.llmContent).toContain('Found 2 file(s)');
|
||||
});
|
||||
|
||||
it('should return error if path is outside workspace', async () => {
|
||||
// Bypassing validation to test execute method directly
|
||||
vi.spyOn(globTool, 'validateToolParams').mockReturnValue(null);
|
||||
|
||||
@@ -134,12 +134,21 @@ class GlobToolInvocation extends BaseToolInvocation<
|
||||
this.getFileFilteringOptions(),
|
||||
);
|
||||
|
||||
const normalizePathForComparison = (p: string) =>
|
||||
process.platform === 'win32' || process.platform === 'darwin'
|
||||
? p.toLowerCase()
|
||||
: p;
|
||||
|
||||
const filteredAbsolutePaths = new Set(
|
||||
filteredPaths.map((p) => path.resolve(this.config.getTargetDir(), p)),
|
||||
filteredPaths.map((p) =>
|
||||
normalizePathForComparison(
|
||||
path.resolve(this.config.getTargetDir(), p),
|
||||
),
|
||||
),
|
||||
);
|
||||
|
||||
const filteredEntries = allEntries.filter((entry) =>
|
||||
filteredAbsolutePaths.has(entry.fullpath()),
|
||||
filteredAbsolutePaths.has(normalizePathForComparison(entry.fullpath())),
|
||||
);
|
||||
|
||||
if (!filteredEntries || filteredEntries.length === 0) {
|
||||
|
||||
Reference in New Issue
Block a user