mirror of
https://github.com/QwenLM/qwen-code.git
synced 2025-12-20 16:57:46 +00:00
Auth First Run (#1207)
Co-authored-by: Tommaso Sciortino <sciortino@gmail.com> Co-authored-by: N. Taylor Mullen <ntaylormullen@google.com>
This commit is contained in:
@@ -4,15 +4,23 @@
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { ContentGenerator } from '../core/contentGenerator.js';
|
||||
import { AuthType, ContentGenerator } from '../core/contentGenerator.js';
|
||||
import { getOauthClient } from './oauth2.js';
|
||||
import { setupUser } from './setup.js';
|
||||
import { CodeAssistServer, HttpOptions } from './server.js';
|
||||
|
||||
export async function createCodeAssistContentGenerator(
|
||||
httpOptions: HttpOptions,
|
||||
authType: AuthType,
|
||||
): Promise<ContentGenerator> {
|
||||
const authClient = await getOauthClient();
|
||||
const projectId = await setupUser(authClient);
|
||||
return new CodeAssistServer(authClient, projectId, httpOptions);
|
||||
if (
|
||||
authType === AuthType.LOGIN_WITH_GOOGLE_ENTERPRISE ||
|
||||
authType === AuthType.LOGIN_WITH_GOOGLE_PERSONAL
|
||||
) {
|
||||
const authClient = await getOauthClient();
|
||||
const projectId = await setupUser(authClient);
|
||||
return new CodeAssistServer(authClient, projectId, httpOptions);
|
||||
}
|
||||
|
||||
throw new Error(`Unsupported authType: ${authType}`);
|
||||
}
|
||||
|
||||
13
packages/core/src/code_assist/errors.ts
Normal file
13
packages/core/src/code_assist/errors.ts
Normal file
@@ -0,0 +1,13 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { GaxiosError } from 'gaxios';
|
||||
|
||||
export function isAuthError(error: unknown): boolean {
|
||||
return (
|
||||
error instanceof GaxiosError && error.response?.data?.error?.code === 401
|
||||
);
|
||||
}
|
||||
@@ -192,3 +192,11 @@ async function cacheCredentials(credentials: Credentials) {
|
||||
function getCachedCredentialPath(): string {
|
||||
return path.join(os.homedir(), GEMINI_DIR, CREDENTIAL_FILENAME);
|
||||
}
|
||||
|
||||
export async function clearCachedCredentialFile() {
|
||||
try {
|
||||
await fs.rm(getCachedCredentialPath());
|
||||
} catch (_) {
|
||||
/* empty */
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { OAuth2Client } from 'google-auth-library';
|
||||
import { AuthClient } from 'google-auth-library';
|
||||
import {
|
||||
LoadCodeAssistResponse,
|
||||
LoadCodeAssistRequest,
|
||||
@@ -45,7 +45,7 @@ export const CODE_ASSIST_API_VERSION = 'v1internal';
|
||||
|
||||
export class CodeAssistServer implements ContentGenerator {
|
||||
constructor(
|
||||
readonly auth: OAuth2Client,
|
||||
readonly auth: AuthClient,
|
||||
readonly projectId?: string,
|
||||
readonly httpOptions: HttpOptions = {},
|
||||
) {}
|
||||
|
||||
@@ -42,6 +42,21 @@ vi.mock('../tools/memoryTool', () => ({
|
||||
GEMINI_CONFIG_DIR: '.gemini',
|
||||
}));
|
||||
|
||||
vi.mock('../core/contentGenerator.js', async (importOriginal) => {
|
||||
const actual =
|
||||
await importOriginal<typeof import('../core/contentGenerator.js')>();
|
||||
return {
|
||||
...actual,
|
||||
createContentGeneratorConfig: vi.fn(),
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock('../core/client.js', () => ({
|
||||
GeminiClient: vi.fn().mockImplementation(() => ({
|
||||
// Mock any methods on GeminiClient that might be used.
|
||||
})),
|
||||
}));
|
||||
|
||||
vi.mock('../telemetry/index.js', async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import('../telemetry/index.js')>();
|
||||
return {
|
||||
@@ -51,7 +66,6 @@ vi.mock('../telemetry/index.js', async (importOriginal) => {
|
||||
});
|
||||
|
||||
describe('Server Config (config.ts)', () => {
|
||||
const API_KEY = 'server-api-key';
|
||||
const MODEL = 'gemini-pro';
|
||||
const SANDBOX: SandboxConfig = {
|
||||
command: 'docker',
|
||||
@@ -67,10 +81,6 @@ describe('Server Config (config.ts)', () => {
|
||||
const SESSION_ID = 'test-session-id';
|
||||
const baseParams: ConfigParameters = {
|
||||
cwd: '/tmp',
|
||||
contentGeneratorConfig: {
|
||||
apiKey: API_KEY,
|
||||
model: MODEL,
|
||||
},
|
||||
embeddingModel: EMBEDDING_MODEL,
|
||||
sandbox: SANDBOX,
|
||||
targetDir: TARGET_DIR,
|
||||
@@ -80,6 +90,7 @@ describe('Server Config (config.ts)', () => {
|
||||
userMemory: USER_MEMORY,
|
||||
telemetry: TELEMETRY_SETTINGS,
|
||||
sessionId: SESSION_ID,
|
||||
model: MODEL,
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
@@ -87,6 +98,32 @@ describe('Server Config (config.ts)', () => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
// i can't get vi mocking to import in core. only in cli. can't fix it now.
|
||||
// describe('refreshAuth', () => {
|
||||
// it('should refresh auth and update config', async () => {
|
||||
// const config = new Config(baseParams);
|
||||
// const newModel = 'gemini-ultra';
|
||||
// const authType = AuthType.USE_GEMINI;
|
||||
// const mockContentConfig = {
|
||||
// model: newModel,
|
||||
// apiKey: 'test-key',
|
||||
// };
|
||||
|
||||
// (createContentGeneratorConfig as vi.Mock).mockResolvedValue(
|
||||
// mockContentConfig,
|
||||
// );
|
||||
|
||||
// await config.refreshAuth(authType);
|
||||
|
||||
// expect(createContentGeneratorConfig).toHaveBeenCalledWith(
|
||||
// newModel,
|
||||
// authType,
|
||||
// );
|
||||
// expect(config.getContentGeneratorConfig()).toEqual(mockContentConfig);
|
||||
// expect(GeminiClient).toHaveBeenCalledWith(config);
|
||||
// });
|
||||
// });
|
||||
|
||||
it('Config constructor should store userMemory correctly', () => {
|
||||
const config = new Config(baseParams);
|
||||
|
||||
|
||||
@@ -6,7 +6,11 @@
|
||||
|
||||
import * as path from 'node:path';
|
||||
import process from 'node:process';
|
||||
import { ContentGeneratorConfig } from '../core/contentGenerator.js';
|
||||
import {
|
||||
AuthType,
|
||||
ContentGeneratorConfig,
|
||||
createContentGeneratorConfig,
|
||||
} from '../core/contentGenerator.js';
|
||||
import { ToolRegistry } from '../tools/tool-registry.js';
|
||||
import { LSTool } from '../tools/ls.js';
|
||||
import { ReadFileTool } from '../tools/read-file.js';
|
||||
@@ -80,7 +84,6 @@ export interface SandboxConfig {
|
||||
|
||||
export interface ConfigParameters {
|
||||
sessionId: string;
|
||||
contentGeneratorConfig: ContentGeneratorConfig;
|
||||
embeddingModel?: string;
|
||||
sandbox?: SandboxConfig;
|
||||
targetDir: string;
|
||||
@@ -106,12 +109,13 @@ export interface ConfigParameters {
|
||||
cwd: string;
|
||||
fileDiscoveryService?: FileDiscoveryService;
|
||||
bugCommand?: BugCommandSettings;
|
||||
model: string;
|
||||
}
|
||||
|
||||
export class Config {
|
||||
private toolRegistry: Promise<ToolRegistry>;
|
||||
private readonly sessionId: string;
|
||||
private readonly contentGeneratorConfig: ContentGeneratorConfig;
|
||||
private contentGeneratorConfig!: ContentGeneratorConfig;
|
||||
private readonly embeddingModel: string;
|
||||
private readonly sandbox: SandboxConfig | undefined;
|
||||
private readonly targetDir: string;
|
||||
@@ -130,7 +134,7 @@ export class Config {
|
||||
private readonly showMemoryUsage: boolean;
|
||||
private readonly accessibility: AccessibilitySettings;
|
||||
private readonly telemetrySettings: TelemetrySettings;
|
||||
private readonly geminiClient: GeminiClient;
|
||||
private geminiClient!: GeminiClient;
|
||||
private readonly fileFilteringRespectGitIgnore: boolean;
|
||||
private fileDiscoveryService: FileDiscoveryService | null = null;
|
||||
private gitService: GitService | undefined = undefined;
|
||||
@@ -138,10 +142,10 @@ export class Config {
|
||||
private readonly proxy: string | undefined;
|
||||
private readonly cwd: string;
|
||||
private readonly bugCommand: BugCommandSettings | undefined;
|
||||
private readonly model: string;
|
||||
|
||||
constructor(params: ConfigParameters) {
|
||||
this.sessionId = params.sessionId;
|
||||
this.contentGeneratorConfig = params.contentGeneratorConfig;
|
||||
this.embeddingModel =
|
||||
params.embeddingModel ?? DEFAULT_GEMINI_EMBEDDING_MODEL;
|
||||
this.sandbox = params.sandbox;
|
||||
@@ -174,12 +178,12 @@ export class Config {
|
||||
this.cwd = params.cwd ?? process.cwd();
|
||||
this.fileDiscoveryService = params.fileDiscoveryService ?? null;
|
||||
this.bugCommand = params.bugCommand;
|
||||
this.model = params.model;
|
||||
|
||||
if (params.contextFileName) {
|
||||
setGeminiMdFilename(params.contextFileName);
|
||||
}
|
||||
|
||||
this.geminiClient = new GeminiClient(this);
|
||||
this.toolRegistry = createToolRegistry(this);
|
||||
|
||||
if (this.telemetrySettings.enabled) {
|
||||
@@ -187,6 +191,19 @@ export class Config {
|
||||
}
|
||||
}
|
||||
|
||||
async refreshAuth(authMethod: AuthType) {
|
||||
const contentConfig = await createContentGeneratorConfig(
|
||||
this.getModel(),
|
||||
authMethod,
|
||||
);
|
||||
|
||||
const gc = new GeminiClient(this);
|
||||
await gc.initialize(contentConfig);
|
||||
|
||||
this.contentGeneratorConfig = contentConfig;
|
||||
this.geminiClient = gc;
|
||||
}
|
||||
|
||||
getSessionId(): string {
|
||||
return this.sessionId;
|
||||
}
|
||||
@@ -196,7 +213,7 @@ export class Config {
|
||||
}
|
||||
|
||||
getModel(): string {
|
||||
return this.contentGeneratorConfig.model;
|
||||
return this.contentGeneratorConfig?.model || this.model;
|
||||
}
|
||||
|
||||
getEmbeddingModel(): string {
|
||||
|
||||
@@ -22,6 +22,8 @@ When requested to perform tasks like fixing bugs, adding features, refactoring,
|
||||
1. **Understand:** Think about the user's request and the relevant codebase context. Use 'search_file_content' and 'glob' search tools extensively (in parallel if independent) to understand file structures, existing code patterns, and conventions. Use 'read_file' and 'read_many_files' to understand context and validate any assumptions you may have.
|
||||
2. **Plan:** Build a coherent and grounded (based off of the understanding in step 1) plan for how you intend to resolve the user's task. Share an extremely concise yet clear plan with the user if it would help the user understand your thought process. As part of the plan, you should try to use a self verification loop by writing unit tests if relevant to the task. Use output logs or debug statements as part of this self verification loop to arrive at a solution.
|
||||
3. **Implement:** Use the available tools (e.g., 'replace', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
4. **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
5. **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
|
||||
## New Applications
|
||||
|
||||
@@ -200,6 +202,8 @@ When requested to perform tasks like fixing bugs, adding features, refactoring,
|
||||
1. **Understand:** Think about the user's request and the relevant codebase context. Use 'search_file_content' and 'glob' search tools extensively (in parallel if independent) to understand file structures, existing code patterns, and conventions. Use 'read_file' and 'read_many_files' to understand context and validate any assumptions you may have.
|
||||
2. **Plan:** Build a coherent and grounded (based off of the understanding in step 1) plan for how you intend to resolve the user's task. Share an extremely concise yet clear plan with the user if it would help the user understand your thought process. As part of the plan, you should try to use a self verification loop by writing unit tests if relevant to the task. Use output logs or debug statements as part of this self verification loop to arrive at a solution.
|
||||
3. **Implement:** Use the available tools (e.g., 'replace', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
4. **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
5. **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
|
||||
## New Applications
|
||||
|
||||
@@ -388,6 +392,8 @@ When requested to perform tasks like fixing bugs, adding features, refactoring,
|
||||
1. **Understand:** Think about the user's request and the relevant codebase context. Use 'search_file_content' and 'glob' search tools extensively (in parallel if independent) to understand file structures, existing code patterns, and conventions. Use 'read_file' and 'read_many_files' to understand context and validate any assumptions you may have.
|
||||
2. **Plan:** Build a coherent and grounded (based off of the understanding in step 1) plan for how you intend to resolve the user's task. Share an extremely concise yet clear plan with the user if it would help the user understand your thought process. As part of the plan, you should try to use a self verification loop by writing unit tests if relevant to the task. Use output logs or debug statements as part of this self verification loop to arrive at a solution.
|
||||
3. **Implement:** Use the available tools (e.g., 'replace', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
4. **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
5. **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
|
||||
## New Applications
|
||||
|
||||
@@ -561,6 +567,8 @@ When requested to perform tasks like fixing bugs, adding features, refactoring,
|
||||
1. **Understand:** Think about the user's request and the relevant codebase context. Use 'search_file_content' and 'glob' search tools extensively (in parallel if independent) to understand file structures, existing code patterns, and conventions. Use 'read_file' and 'read_many_files' to understand context and validate any assumptions you may have.
|
||||
2. **Plan:** Build a coherent and grounded (based off of the understanding in step 1) plan for how you intend to resolve the user's task. Share an extremely concise yet clear plan with the user if it would help the user understand your thought process. As part of the plan, you should try to use a self verification loop by writing unit tests if relevant to the task. Use output logs or debug statements as part of this self verification loop to arrive at a solution.
|
||||
3. **Implement:** Use the available tools (e.g., 'replace', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
4. **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
5. **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
|
||||
## New Applications
|
||||
|
||||
@@ -734,6 +742,8 @@ When requested to perform tasks like fixing bugs, adding features, refactoring,
|
||||
1. **Understand:** Think about the user's request and the relevant codebase context. Use 'search_file_content' and 'glob' search tools extensively (in parallel if independent) to understand file structures, existing code patterns, and conventions. Use 'read_file' and 'read_many_files' to understand context and validate any assumptions you may have.
|
||||
2. **Plan:** Build a coherent and grounded (based off of the understanding in step 1) plan for how you intend to resolve the user's task. Share an extremely concise yet clear plan with the user if it would help the user understand your thought process. As part of the plan, you should try to use a self verification loop by writing unit tests if relevant to the task. Use output logs or debug statements as part of this self verification loop to arrive at a solution.
|
||||
3. **Implement:** Use the available tools (e.g., 'replace', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
4. **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
5. **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
|
||||
## New Applications
|
||||
|
||||
@@ -907,6 +917,8 @@ When requested to perform tasks like fixing bugs, adding features, refactoring,
|
||||
1. **Understand:** Think about the user's request and the relevant codebase context. Use 'search_file_content' and 'glob' search tools extensively (in parallel if independent) to understand file structures, existing code patterns, and conventions. Use 'read_file' and 'read_many_files' to understand context and validate any assumptions you may have.
|
||||
2. **Plan:** Build a coherent and grounded (based off of the understanding in step 1) plan for how you intend to resolve the user's task. Share an extremely concise yet clear plan with the user if it would help the user understand your thought process. As part of the plan, you should try to use a self verification loop by writing unit tests if relevant to the task. Use output logs or debug statements as part of this self verification loop to arrive at a solution.
|
||||
3. **Implement:** Use the available tools (e.g., 'replace', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
4. **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
5. **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
|
||||
## New Applications
|
||||
|
||||
@@ -1080,6 +1092,8 @@ When requested to perform tasks like fixing bugs, adding features, refactoring,
|
||||
1. **Understand:** Think about the user's request and the relevant codebase context. Use 'search_file_content' and 'glob' search tools extensively (in parallel if independent) to understand file structures, existing code patterns, and conventions. Use 'read_file' and 'read_many_files' to understand context and validate any assumptions you may have.
|
||||
2. **Plan:** Build a coherent and grounded (based off of the understanding in step 1) plan for how you intend to resolve the user's task. Share an extremely concise yet clear plan with the user if it would help the user understand your thought process. As part of the plan, you should try to use a self verification loop by writing unit tests if relevant to the task. Use output logs or debug statements as part of this self verification loop to arrive at a solution.
|
||||
3. **Implement:** Use the available tools (e.g., 'replace', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
4. **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
5. **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
|
||||
## New Applications
|
||||
|
||||
@@ -1253,6 +1267,8 @@ When requested to perform tasks like fixing bugs, adding features, refactoring,
|
||||
1. **Understand:** Think about the user's request and the relevant codebase context. Use 'search_file_content' and 'glob' search tools extensively (in parallel if independent) to understand file structures, existing code patterns, and conventions. Use 'read_file' and 'read_many_files' to understand context and validate any assumptions you may have.
|
||||
2. **Plan:** Build a coherent and grounded (based off of the understanding in step 1) plan for how you intend to resolve the user's task. Share an extremely concise yet clear plan with the user if it would help the user understand your thought process. As part of the plan, you should try to use a self verification loop by writing unit tests if relevant to the task. Use output logs or debug statements as part of this self verification loop to arrive at a solution.
|
||||
3. **Implement:** Use the available tools (e.g., 'replace', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
4. **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
5. **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
|
||||
## New Applications
|
||||
|
||||
@@ -1426,6 +1442,8 @@ When requested to perform tasks like fixing bugs, adding features, refactoring,
|
||||
1. **Understand:** Think about the user's request and the relevant codebase context. Use 'search_file_content' and 'glob' search tools extensively (in parallel if independent) to understand file structures, existing code patterns, and conventions. Use 'read_file' and 'read_many_files' to understand context and validate any assumptions you may have.
|
||||
2. **Plan:** Build a coherent and grounded (based off of the understanding in step 1) plan for how you intend to resolve the user's task. Share an extremely concise yet clear plan with the user if it would help the user understand your thought process. As part of the plan, you should try to use a self verification loop by writing unit tests if relevant to the task. Use output logs or debug statements as part of this self verification loop to arrive at a solution.
|
||||
3. **Implement:** Use the available tools (e.g., 'replace', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
4. **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
5. **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
|
||||
## New Applications
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ import {
|
||||
GoogleGenAI,
|
||||
} from '@google/genai';
|
||||
import { GeminiClient } from './client.js';
|
||||
import { ContentGenerator } from './contentGenerator.js';
|
||||
import { AuthType, ContentGenerator } from './contentGenerator.js';
|
||||
import { GeminiChat } from './geminiChat.js';
|
||||
import { Config } from '../config/config.js';
|
||||
import { Turn } from './turn.js';
|
||||
@@ -102,13 +102,17 @@ describe('Gemini Client (client.ts)', () => {
|
||||
};
|
||||
const fileService = new FileDiscoveryService('/test/dir');
|
||||
const MockedConfig = vi.mocked(Config, true);
|
||||
const contentGeneratorConfig = {
|
||||
model: 'test-model',
|
||||
apiKey: 'test-key',
|
||||
vertexai: false,
|
||||
authType: AuthType.USE_GEMINI,
|
||||
};
|
||||
MockedConfig.mockImplementation(() => {
|
||||
const mock = {
|
||||
getContentGeneratorConfig: vi.fn().mockReturnValue({
|
||||
model: 'test-model',
|
||||
apiKey: 'test-key',
|
||||
vertexai: false,
|
||||
}),
|
||||
getContentGeneratorConfig: vi
|
||||
.fn()
|
||||
.mockReturnValue(contentGeneratorConfig),
|
||||
getToolRegistry: vi.fn().mockResolvedValue(mockToolRegistry),
|
||||
getModel: vi.fn().mockReturnValue('test-model'),
|
||||
getEmbeddingModel: vi.fn().mockReturnValue('test-embedding-model'),
|
||||
@@ -131,7 +135,7 @@ describe('Gemini Client (client.ts)', () => {
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
const mockConfig = new Config({} as any);
|
||||
client = new GeminiClient(mockConfig);
|
||||
await client.initialize();
|
||||
await client.initialize(contentGeneratorConfig);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
|
||||
@@ -33,6 +33,7 @@ import { getErrorMessage } from '../utils/errors.js';
|
||||
import { tokenLimit } from './tokenLimits.js';
|
||||
import {
|
||||
ContentGenerator,
|
||||
ContentGeneratorConfig,
|
||||
createContentGenerator,
|
||||
} from './contentGenerator.js';
|
||||
import { ProxyAgent, setGlobalDispatcher } from 'undici';
|
||||
@@ -63,12 +64,18 @@ export class GeminiClient {
|
||||
this.embeddingModel = config.getEmbeddingModel();
|
||||
}
|
||||
|
||||
async initialize() {
|
||||
async initialize(contentGeneratorConfig: ContentGeneratorConfig) {
|
||||
this.contentGenerator = await createContentGenerator(
|
||||
this.config.getContentGeneratorConfig(),
|
||||
contentGeneratorConfig,
|
||||
);
|
||||
this.chat = await this.startChat();
|
||||
}
|
||||
private getContentGenerator(): ContentGenerator {
|
||||
if (!this.contentGenerator) {
|
||||
throw new Error('Content generator not initialized');
|
||||
}
|
||||
return this.contentGenerator;
|
||||
}
|
||||
|
||||
async addHistory(content: Content) {
|
||||
this.getChat().addHistory(content);
|
||||
@@ -81,13 +88,6 @@ export class GeminiClient {
|
||||
return this.chat;
|
||||
}
|
||||
|
||||
private getContentGenerator(): ContentGenerator {
|
||||
if (!this.contentGenerator) {
|
||||
throw new Error('Content generator not initialized');
|
||||
}
|
||||
return this.contentGenerator;
|
||||
}
|
||||
|
||||
async getHistory(): Promise<Content[]> {
|
||||
return this.getChat().getHistory();
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi } from 'vitest';
|
||||
import { createContentGenerator } from './contentGenerator.js';
|
||||
import { createContentGenerator, AuthType } from './contentGenerator.js';
|
||||
import { createCodeAssistContentGenerator } from '../code_assist/codeAssist.js';
|
||||
import { GoogleGenAI } from '@google/genai';
|
||||
|
||||
@@ -20,7 +20,7 @@ describe('contentGenerator', () => {
|
||||
);
|
||||
const generator = await createContentGenerator({
|
||||
model: 'test-model',
|
||||
codeAssist: true,
|
||||
authType: AuthType.LOGIN_WITH_GOOGLE_PERSONAL,
|
||||
});
|
||||
expect(createCodeAssistContentGenerator).toHaveBeenCalled();
|
||||
expect(generator).toBe(mockGenerator);
|
||||
@@ -34,6 +34,7 @@ describe('contentGenerator', () => {
|
||||
const generator = await createContentGenerator({
|
||||
model: 'test-model',
|
||||
apiKey: 'test-api-key',
|
||||
authType: AuthType.USE_GEMINI,
|
||||
});
|
||||
expect(GoogleGenAI).toHaveBeenCalledWith({
|
||||
apiKey: 'test-api-key',
|
||||
|
||||
@@ -14,6 +14,8 @@ import {
|
||||
GoogleGenAI,
|
||||
} from '@google/genai';
|
||||
import { createCodeAssistContentGenerator } from '../code_assist/codeAssist.js';
|
||||
import { DEFAULT_GEMINI_MODEL } from '../config/models.js';
|
||||
import { getEffectiveModel } from './modelCheck.js';
|
||||
|
||||
/**
|
||||
* Interface abstracting the core functionalities for generating content and counting tokens.
|
||||
@@ -32,13 +34,77 @@ export interface ContentGenerator {
|
||||
embedContent(request: EmbedContentParameters): Promise<EmbedContentResponse>;
|
||||
}
|
||||
|
||||
export enum AuthType {
|
||||
LOGIN_WITH_GOOGLE_PERSONAL = 'oauth-personal',
|
||||
LOGIN_WITH_GOOGLE_ENTERPRISE = 'oauth-enterprise',
|
||||
USE_GEMINI = 'gemini-api-key',
|
||||
USE_VERTEX_AI = 'vertex-ai',
|
||||
}
|
||||
|
||||
export type ContentGeneratorConfig = {
|
||||
model: string;
|
||||
apiKey?: string;
|
||||
vertexai?: boolean;
|
||||
codeAssist?: boolean;
|
||||
authType?: AuthType | undefined;
|
||||
};
|
||||
|
||||
export async function createContentGeneratorConfig(
|
||||
model: string | undefined,
|
||||
authType: AuthType | undefined,
|
||||
): Promise<ContentGeneratorConfig> {
|
||||
const geminiApiKey = process.env.GEMINI_API_KEY;
|
||||
const googleApiKey = process.env.GOOGLE_API_KEY;
|
||||
const googleCloudProject = process.env.GOOGLE_CLOUD_PROJECT;
|
||||
const googleCloudLocation = process.env.GOOGLE_CLOUD_LOCATION;
|
||||
|
||||
const contentGeneratorConfig: ContentGeneratorConfig = {
|
||||
model: model || DEFAULT_GEMINI_MODEL,
|
||||
authType,
|
||||
};
|
||||
|
||||
// if we are using google auth nothing else to validate for now
|
||||
if (authType === AuthType.LOGIN_WITH_GOOGLE_PERSONAL) {
|
||||
return contentGeneratorConfig;
|
||||
}
|
||||
|
||||
// if its enterprise make sure we have a cloud project
|
||||
if (
|
||||
authType === AuthType.LOGIN_WITH_GOOGLE_ENTERPRISE &&
|
||||
!!googleCloudProject
|
||||
) {
|
||||
return contentGeneratorConfig;
|
||||
}
|
||||
|
||||
//
|
||||
if (authType === AuthType.USE_GEMINI && geminiApiKey) {
|
||||
contentGeneratorConfig.apiKey = geminiApiKey;
|
||||
contentGeneratorConfig.model = await getEffectiveModel(
|
||||
contentGeneratorConfig.apiKey,
|
||||
contentGeneratorConfig.model,
|
||||
);
|
||||
|
||||
return contentGeneratorConfig;
|
||||
}
|
||||
|
||||
if (
|
||||
authType === AuthType.USE_VERTEX_AI &&
|
||||
!!googleApiKey &&
|
||||
googleCloudProject &&
|
||||
googleCloudLocation
|
||||
) {
|
||||
contentGeneratorConfig.apiKey = googleApiKey;
|
||||
contentGeneratorConfig.vertexai = true;
|
||||
contentGeneratorConfig.model = await getEffectiveModel(
|
||||
contentGeneratorConfig.apiKey,
|
||||
contentGeneratorConfig.model,
|
||||
);
|
||||
|
||||
return contentGeneratorConfig;
|
||||
}
|
||||
|
||||
return contentGeneratorConfig;
|
||||
}
|
||||
|
||||
export async function createContentGenerator(
|
||||
config: ContentGeneratorConfig,
|
||||
): Promise<ContentGenerator> {
|
||||
@@ -48,13 +114,27 @@ export async function createContentGenerator(
|
||||
'User-Agent': `GeminiCLI/${version}/(${process.platform}; ${process.arch})`,
|
||||
},
|
||||
};
|
||||
if (config.codeAssist) {
|
||||
return await createCodeAssistContentGenerator(httpOptions);
|
||||
if (
|
||||
config.authType === AuthType.LOGIN_WITH_GOOGLE_PERSONAL ||
|
||||
config.authType === AuthType.LOGIN_WITH_GOOGLE_ENTERPRISE
|
||||
) {
|
||||
return createCodeAssistContentGenerator(httpOptions, config.authType);
|
||||
}
|
||||
const googleGenAI = new GoogleGenAI({
|
||||
apiKey: config.apiKey === '' ? undefined : config.apiKey,
|
||||
vertexai: config.vertexai,
|
||||
httpOptions,
|
||||
});
|
||||
return googleGenAI.models;
|
||||
|
||||
if (
|
||||
config.authType === AuthType.USE_GEMINI ||
|
||||
config.authType === AuthType.USE_VERTEX_AI
|
||||
) {
|
||||
const googleGenAI = new GoogleGenAI({
|
||||
apiKey: config.apiKey === '' ? undefined : config.apiKey,
|
||||
vertexai: config.vertexai,
|
||||
httpOptions,
|
||||
});
|
||||
|
||||
return googleGenAI.models;
|
||||
}
|
||||
|
||||
throw new Error(
|
||||
`Error creating contentGenerator: Unsupported authType: ${config.authType}`,
|
||||
);
|
||||
}
|
||||
|
||||
68
packages/core/src/core/modelCheck.ts
Normal file
68
packages/core/src/core/modelCheck.ts
Normal file
@@ -0,0 +1,68 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import {
|
||||
DEFAULT_GEMINI_MODEL,
|
||||
DEFAULT_GEMINI_FLASH_MODEL,
|
||||
} from '@gemini-cli/core';
|
||||
|
||||
/**
|
||||
* Checks if the default "pro" model is rate-limited and returns a fallback "flash"
|
||||
* model if necessary. This function is designed to be silent.
|
||||
* @param apiKey The API key to use for the check.
|
||||
* @param currentConfiguredModel The model currently configured in settings.
|
||||
* @returns An object indicating the model to use, whether a switch occurred,
|
||||
* and the original model if a switch happened.
|
||||
*/
|
||||
export async function getEffectiveModel(
|
||||
apiKey: string,
|
||||
currentConfiguredModel: string,
|
||||
): Promise<string> {
|
||||
if (currentConfiguredModel !== DEFAULT_GEMINI_MODEL) {
|
||||
// Only check if the user is trying to use the specific pro model we want to fallback from.
|
||||
return currentConfiguredModel;
|
||||
}
|
||||
|
||||
const modelToTest = DEFAULT_GEMINI_MODEL;
|
||||
const fallbackModel = DEFAULT_GEMINI_FLASH_MODEL;
|
||||
const endpoint = `https://generativelanguage.googleapis.com/v1beta/models/${modelToTest}:generateContent?key=${apiKey}`;
|
||||
const body = JSON.stringify({
|
||||
contents: [{ parts: [{ text: 'test' }] }],
|
||||
generationConfig: {
|
||||
maxOutputTokens: 1,
|
||||
temperature: 0,
|
||||
topK: 1,
|
||||
thinkingConfig: { thinkingBudget: 0, includeThoughts: false },
|
||||
},
|
||||
});
|
||||
|
||||
const controller = new AbortController();
|
||||
const timeoutId = setTimeout(() => controller.abort(), 2000); // 500ms timeout for the request
|
||||
|
||||
try {
|
||||
const response = await fetch(endpoint, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body,
|
||||
signal: controller.signal,
|
||||
});
|
||||
|
||||
clearTimeout(timeoutId);
|
||||
|
||||
if (response.status === 429) {
|
||||
console.log(
|
||||
`[INFO] Your configured model (${modelToTest}) was temporarily unavailable. Switched to ${fallbackModel} for this session.`,
|
||||
);
|
||||
return fallbackModel;
|
||||
}
|
||||
// For any other case (success, other error codes), we stick to the original model.
|
||||
return currentConfiguredModel;
|
||||
} catch (_error) {
|
||||
clearTimeout(timeoutId);
|
||||
// On timeout or any other fetch error, stick to the original model.
|
||||
return currentConfiguredModel;
|
||||
}
|
||||
}
|
||||
@@ -58,6 +58,8 @@ When requested to perform tasks like fixing bugs, adding features, refactoring,
|
||||
1. **Understand:** Think about the user's request and the relevant codebase context. Use '${GrepTool.Name}' and '${GlobTool.Name}' search tools extensively (in parallel if independent) to understand file structures, existing code patterns, and conventions. Use '${ReadFileTool.Name}' and '${ReadManyFilesTool.Name}' to understand context and validate any assumptions you may have.
|
||||
2. **Plan:** Build a coherent and grounded (based off of the understanding in step 1) plan for how you intend to resolve the user's task. Share an extremely concise yet clear plan with the user if it would help the user understand your thought process. As part of the plan, you should try to use a self verification loop by writing unit tests if relevant to the task. Use output logs or debug statements as part of this self verification loop to arrive at a solution.
|
||||
3. **Implement:** Use the available tools (e.g., '${EditTool.Name}', '${WriteFileTool.Name}' '${ShellTool.Name}' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
4. **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
5. **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
|
||||
## New Applications
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ import { getResponseText } from '../utils/generateContentResponseUtilities.js';
|
||||
import { reportError } from '../utils/errorReporting.js';
|
||||
import { getErrorMessage } from '../utils/errors.js';
|
||||
import { GeminiChat } from './geminiChat.js';
|
||||
import { isAuthError } from '../code_assist/errors.js';
|
||||
|
||||
// Define a structure for tools passed to the server
|
||||
export interface ServerTool {
|
||||
@@ -222,6 +223,9 @@ export class Turn {
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
if (isAuthError(error)) {
|
||||
throw error;
|
||||
}
|
||||
if (signal.aborted) {
|
||||
yield { type: GeminiEventType.UserCancelled };
|
||||
// Regular cancellation error, fail gracefully.
|
||||
|
||||
@@ -20,6 +20,8 @@ export * from './core/coreToolScheduler.js';
|
||||
export * from './core/nonInteractiveToolExecutor.js';
|
||||
|
||||
export * from './code_assist/codeAssist.js';
|
||||
export * from './code_assist/oauth2.js';
|
||||
export * from './code_assist/errors.js';
|
||||
|
||||
// Export utilities
|
||||
export * from './utils/paths.js';
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { ToolConfirmationOutcome } from '../index.js';
|
||||
import { AuthType, ToolConfirmationOutcome } from '../index.js';
|
||||
import { logs } from '@opentelemetry/api-logs';
|
||||
import { SemanticAttributes } from '@opentelemetry/semantic-conventions';
|
||||
import { Config } from '../config/config.js';
|
||||
@@ -57,8 +57,7 @@ describe('loggers', () => {
|
||||
getContentGeneratorConfig: () => ({
|
||||
model: 'test-model',
|
||||
apiKey: 'test-api-key',
|
||||
vertexai: true,
|
||||
codeAssist: false,
|
||||
authType: AuthType.USE_VERTEX_AI,
|
||||
}),
|
||||
getTelemetryLogPromptsEnabled: () => true,
|
||||
getFileFilteringRespectGitIgnore: () => true,
|
||||
@@ -86,7 +85,6 @@ describe('loggers', () => {
|
||||
approval_mode: 'default',
|
||||
api_key_enabled: true,
|
||||
vertex_ai_enabled: true,
|
||||
code_assist_enabled: false,
|
||||
log_user_prompts_enabled: true,
|
||||
file_filtering_respect_git_ignore: true,
|
||||
debug_mode: true,
|
||||
|
||||
@@ -35,6 +35,7 @@ import {
|
||||
GenerateContentResponse,
|
||||
GenerateContentResponseUsageMetadata,
|
||||
} from '@google/genai';
|
||||
import { AuthType } from '../core/contentGenerator.js';
|
||||
|
||||
const shouldLogUserPrompts = (config: Config): boolean =>
|
||||
config.getTelemetryLogPromptsEnabled() ?? false;
|
||||
@@ -72,6 +73,14 @@ export function logCliConfiguration(config: Config): void {
|
||||
if (!isTelemetrySdkInitialized()) return;
|
||||
|
||||
const generatorConfig = config.getContentGeneratorConfig();
|
||||
let useGemini = false;
|
||||
let useVertex = false;
|
||||
|
||||
if (generatorConfig && generatorConfig.authType) {
|
||||
useGemini = generatorConfig.authType === AuthType.USE_GEMINI;
|
||||
useVertex = generatorConfig.authType === AuthType.USE_VERTEX_AI;
|
||||
}
|
||||
|
||||
const mcpServers = config.getMcpServers();
|
||||
const attributes: LogAttributes = {
|
||||
...getCommonAttributes(config),
|
||||
@@ -82,9 +91,8 @@ export function logCliConfiguration(config: Config): void {
|
||||
sandbox_enabled: !!config.getSandbox(),
|
||||
core_tools_enabled: (config.getCoreTools() ?? []).join(','),
|
||||
approval_mode: config.getApprovalMode(),
|
||||
api_key_enabled: !!generatorConfig.apiKey,
|
||||
vertex_ai_enabled: !!generatorConfig.vertexai,
|
||||
code_assist_enabled: !!generatorConfig.codeAssist,
|
||||
api_key_enabled: useGemini || useVertex,
|
||||
vertex_ai_enabled: useVertex,
|
||||
log_user_prompts_enabled: config.getTelemetryLogPromptsEnabled(),
|
||||
file_filtering_respect_git_ignore:
|
||||
config.getFileFilteringRespectGitIgnore(),
|
||||
|
||||
@@ -27,9 +27,7 @@ describe('telemetry', () => {
|
||||
|
||||
mockConfig = new Config({
|
||||
sessionId: 'test-session-id',
|
||||
contentGeneratorConfig: {
|
||||
model: 'test-model',
|
||||
},
|
||||
model: 'test-model',
|
||||
targetDir: '/test/dir',
|
||||
debugMode: false,
|
||||
cwd: '/test/dir',
|
||||
|
||||
@@ -125,11 +125,7 @@ class MockTool extends BaseTool<{ param: string }, ToolResult> {
|
||||
|
||||
const baseConfigParams: ConfigParameters = {
|
||||
cwd: '/tmp',
|
||||
contentGeneratorConfig: {
|
||||
model: 'test-model',
|
||||
apiKey: 'test-api-key',
|
||||
vertexai: false,
|
||||
},
|
||||
model: 'test-model',
|
||||
embeddingModel: 'test-embedding-model',
|
||||
sandbox: undefined,
|
||||
targetDir: '/test/dir',
|
||||
|
||||
Reference in New Issue
Block a user