mirror of
https://github.com/QwenLM/qwen-code.git
synced 2025-12-24 10:39:17 +00:00
Compare commits
1 Commits
fix/trimen
...
v0.0.6
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
da3addae2e |
65
.github/workflows/build-and-publish-image.yml
vendored
65
.github/workflows/build-and-publish-image.yml
vendored
@@ -1,65 +0,0 @@
|
||||
name: Build and Publish Docker Image
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
publish:
|
||||
description: 'Publish to GHCR (only works on main branch)'
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
|
||||
jobs:
|
||||
build-and-push-to-ghcr:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=ref,event=pr
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=sha,prefix=sha-,format=short
|
||||
|
||||
- name: Log in to the Container registry
|
||||
if: github.event_name != 'pull_request' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/v'))
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
id: build-and-push
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: ${{ github.event_name != 'pull_request' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/v') || github.event.inputs.publish == 'true') }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
build-args: |
|
||||
CLI_VERSION_ARG=${{ github.sha }}
|
||||
@@ -24,7 +24,7 @@ jobs:
|
||||
ISSUE_TITLE: ${{ github.event.issue.title }}
|
||||
ISSUE_BODY: ${{ github.event.issue.body }}
|
||||
with:
|
||||
version: 0.0.6
|
||||
version: 0.0.5
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
settings_json: |
|
||||
{
|
||||
|
||||
@@ -42,7 +42,7 @@ jobs:
|
||||
ISSUES_TO_TRIAGE: ${{ steps.find_issues.outputs.issues_to_triage }}
|
||||
REPOSITORY: ${{ github.repository }}
|
||||
with:
|
||||
version: 0.0.6
|
||||
version: 0.0.5
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
OPENAI_BASE_URL: ${{ secrets.OPENAI_BASE_URL }}
|
||||
OPENAI_MODEL: ${{ secrets.OPENAI_MODEL }}
|
||||
|
||||
55
Dockerfile
55
Dockerfile
@@ -1,31 +1,3 @@
|
||||
# Build stage
|
||||
FROM docker.io/library/node:20-slim AS builder
|
||||
|
||||
# Install build dependencies
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
python3 \
|
||||
make \
|
||||
g++ \
|
||||
git \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Set up npm global package folder
|
||||
RUN mkdir -p /usr/local/share/npm-global
|
||||
ENV NPM_CONFIG_PREFIX=/usr/local/share/npm-global
|
||||
ENV PATH=$PATH:/usr/local/share/npm-global/bin
|
||||
|
||||
# Copy source code
|
||||
COPY . /home/node/app
|
||||
WORKDIR /home/node/app
|
||||
|
||||
# Install dependencies and build packages
|
||||
RUN npm ci \
|
||||
&& npm run build --workspaces \
|
||||
&& npm pack -w @qwen-code/qwen-code --pack-destination ./packages/cli/dist \
|
||||
&& npm pack -w @qwen-code/qwen-code-core --pack-destination ./packages/core/dist
|
||||
|
||||
# Runtime stage
|
||||
FROM docker.io/library/node:20-slim
|
||||
|
||||
ARG SANDBOX_NAME="qwen-code-sandbox"
|
||||
@@ -33,9 +5,11 @@ ARG CLI_VERSION_ARG
|
||||
ENV SANDBOX="$SANDBOX_NAME"
|
||||
ENV CLI_VERSION=$CLI_VERSION_ARG
|
||||
|
||||
# Install runtime dependencies
|
||||
# install minimal set of packages, then clean up
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
python3 \
|
||||
make \
|
||||
g++ \
|
||||
man-db \
|
||||
curl \
|
||||
dnsutils \
|
||||
@@ -55,19 +29,22 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Set up npm global package folder
|
||||
RUN mkdir -p /usr/local/share/npm-global
|
||||
# set up npm global package folder under /usr/local/share
|
||||
# give it to non-root user node, already set up in base image
|
||||
RUN mkdir -p /usr/local/share/npm-global \
|
||||
&& chown -R node:node /usr/local/share/npm-global
|
||||
ENV NPM_CONFIG_PREFIX=/usr/local/share/npm-global
|
||||
ENV PATH=$PATH:/usr/local/share/npm-global/bin
|
||||
|
||||
# Copy built packages from builder stage
|
||||
COPY --from=builder /home/node/app/packages/cli/dist/*.tgz /tmp/
|
||||
COPY --from=builder /home/node/app/packages/core/dist/*.tgz /tmp/
|
||||
# switch to non-root user node
|
||||
USER node
|
||||
|
||||
# Install built packages globally
|
||||
RUN npm install -g /tmp/*.tgz \
|
||||
# install qwen-code and clean up
|
||||
COPY packages/cli/dist/qwen-code-*.tgz /usr/local/share/npm-global/qwen-code.tgz
|
||||
COPY packages/core/dist/qwen-code-qwen-code-core-*.tgz /usr/local/share/npm-global/qwen-code-core.tgz
|
||||
RUN npm install -g /usr/local/share/npm-global/qwen-code.tgz /usr/local/share/npm-global/qwen-code-core.tgz \
|
||||
&& npm cache clean --force \
|
||||
&& rm -rf /tmp/*.tgz
|
||||
&& rm -f /usr/local/share/npm-global/qwen-{code,code-core}.tgz
|
||||
|
||||
# Default entrypoint when none specified
|
||||
CMD ["qwen"]
|
||||
# default entrypoint when none specified
|
||||
CMD ["qwen"]
|
||||
@@ -56,7 +56,7 @@ find initiatives that interest you.
|
||||
Gemini CLI is an open-source project, and we welcome contributions from the community! Whether you're a developer, a designer, or just an enthusiastic user you can find our [Community Guidelines here](https://github.com/google-gemini/gemini-cli/blob/main/CONTRIBUTING.md) to learn how to get started. There are many ways to get involved:
|
||||
|
||||
- **Roadmap:** Please review and find areas in our [roadmap](https://github.com/google-gemini/gemini-cli/issues/4191) that you would like to contribute to. Contributions based on this will be easiest to integrate with.
|
||||
- **Report Bugs:** If you find an issue, please create a [bug](https://github.com/google-gemini/gemini-cli/issues/new?template=bug_report.yml) with as much detail as possible. If you believe it is a critical breaking issue preventing direct CLI usage, please tag it as `priority/p0`.
|
||||
- **Report Bugs:** If you find an issue, please create a bug(https://github.com/google-gemini/gemini-cli/issues/new?template=bug_report.yml) with as much detail as possible. If you believe it is a critical breaking issue preventing direct CLI usage, please tag it as `priorty/p0`.
|
||||
- **Suggest Features:** Have a great idea? We'd love to hear it! Open a [feature request](https://github.com/google-gemini/gemini-cli/issues/new?template=feature_request.yml).
|
||||
- **Contribute Code:** Check out our [CONTRIBUTING.md](https://github.com/google-gemini/gemini-cli/blob/main/CONTRIBUTING.md) file for guidelines on how to submit pull requests. We have a list of "good first issues" for new contributors.
|
||||
- **Write Documentation:** Help us improve our documentation, tutorials, and examples.
|
||||
|
||||
@@ -27,9 +27,6 @@ Slash commands provide meta-level control over the CLI itself.
|
||||
- **Usage:** `/chat resume <tag>`
|
||||
- **`list`**
|
||||
- **Description:** Lists available tags for chat state resumption.
|
||||
- **`delete`**
|
||||
- **Description:** Deletes a saved conversation checkpoint.
|
||||
- **Usage:** `/chat delete <tag>`
|
||||
|
||||
- **`/clear`**
|
||||
- **Description:** Clear the terminal screen, including the visible session history and scrollback within the CLI. The underlying session data (for history recall) might be preserved depending on the exact implementation, but the visual display is cleared.
|
||||
@@ -52,17 +49,6 @@ Slash commands provide meta-level control over the CLI itself.
|
||||
- **Description:** Display all directories added by `/directory add` and `--include-directories`.
|
||||
- **Usage:** `/directory show`
|
||||
|
||||
- **`/directory`** (or **`/dir`**)
|
||||
- **Description:** Manage workspace directories for multi-directory support.
|
||||
- **Sub-commands:**
|
||||
- **`add`**:
|
||||
- **Description:** Add a directory to the workspace. The path can be absolute or relative to the current working directory. Moreover, the reference from home directory is supported as well.
|
||||
- **Usage:** `/directory add <path1>,<path2>`
|
||||
- **Note:** Disabled in restrictive sandbox profiles. If you're using that, use `--include-directories` when starting the session instead.
|
||||
- **`show`**:
|
||||
- **Description:** Display all directories added by `/directory add` and `--include-directories`.
|
||||
- **Usage:** `/directory show`
|
||||
|
||||
- **`/editor`**
|
||||
- **Description:** Open a dialog for selecting supported editors.
|
||||
|
||||
@@ -267,7 +253,7 @@ Please generate a Conventional Commit message based on the following git diff:
|
||||
|
||||
```diff
|
||||
!{git diff --staged}
|
||||
```
|
||||
````
|
||||
|
||||
"""
|
||||
|
||||
@@ -288,7 +274,7 @@ First, ensure the user commands directory exists, then create a `refactor` subdi
|
||||
```bash
|
||||
mkdir -p ~/.gemini/commands/refactor
|
||||
touch ~/.gemini/commands/refactor/pure.toml
|
||||
```
|
||||
````
|
||||
|
||||
**2. Add the content to the file:**
|
||||
|
||||
|
||||
@@ -248,26 +248,6 @@ In addition to a project settings file, a project's `.gemini` directory can cont
|
||||
"excludedProjectEnvVars": ["DEBUG", "DEBUG_MODE", "NODE_ENV"]
|
||||
```
|
||||
|
||||
- **`includeDirectories`** (array of strings):
|
||||
- **Description:** Specifies an array of additional absolute or relative paths to include in the workspace context. This allows you to work with files across multiple directories as if they were one. Paths can use `~` to refer to the user's home directory. This setting can be combined with the `--include-directories` command-line flag.
|
||||
- **Default:** `[]`
|
||||
- **Example:**
|
||||
```json
|
||||
"includeDirectories": [
|
||||
"/path/to/another/project",
|
||||
"../shared-library",
|
||||
"~/common-utils"
|
||||
]
|
||||
```
|
||||
|
||||
- **`loadMemoryFromIncludeDirectories`** (boolean):
|
||||
- **Description:** Controls the behavior of the `/memory refresh` command. If set to `true`, `QWEN.md` files should be loaded from all directories that are added. If set to `false`, `QWEN.md` should only be loaded from the current directory.
|
||||
- **Default:** `false`
|
||||
- **Example:**
|
||||
```json
|
||||
"loadMemoryFromIncludeDirectories": true
|
||||
```
|
||||
|
||||
### Example `settings.json`:
|
||||
|
||||
```json
|
||||
@@ -300,9 +280,7 @@ In addition to a project settings file, a project's `.gemini` directory can cont
|
||||
"tokenBudget": 100
|
||||
}
|
||||
},
|
||||
"excludedProjectEnvVars": ["DEBUG", "DEBUG_MODE", "NODE_ENV"],
|
||||
"includeDirectories": ["path/to/dir1", "~/path/to/dir2", "../path/to/dir3"],
|
||||
"loadMemoryFromIncludeDirectories": true
|
||||
"excludedProjectEnvVars": ["DEBUG", "DEBUG_MODE", "NODE_ENV"]
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -15,11 +15,9 @@ The Gemini CLI core (`packages/core`) features a robust system for defining, reg
|
||||
- `execute()`: The core method that performs the tool's action and returns a `ToolResult`.
|
||||
|
||||
- **`ToolResult` (`tools.ts`):** An interface defining the structure of a tool's execution outcome:
|
||||
- `llmContent`: The factual content to be included in the history sent back to the LLM for context. This can be a simple string or a `PartListUnion` (an array of `Part` objects and strings) for rich content.
|
||||
- `llmContent`: The factual string content to be included in the history sent back to the LLM for context.
|
||||
- `returnDisplay`: A user-friendly string (often Markdown) or a special object (like `FileDiff`) for display in the CLI.
|
||||
|
||||
- **Returning Rich Content:** Tools are not limited to returning simple text. The `llmContent` can be a `PartListUnion`, which is an array that can contain a mix of `Part` objects (for images, audio, etc.) and `string`s. This allows a single tool execution to return multiple pieces of rich content.
|
||||
|
||||
- **Tool Registry (`tool-registry.ts`):** A class (`ToolRegistry`) responsible for:
|
||||
- **Registering Tools:** Holding a collection of all available built-in tools (e.g., `ReadFileTool`, `ShellTool`).
|
||||
- **Discovering Tools:** It can also discover tools dynamically:
|
||||
|
||||
@@ -169,7 +169,6 @@ Use the `/mcp auth` command to manage OAuth authentication:
|
||||
- **`scopes`** (string[]): Required OAuth scopes
|
||||
- **`redirectUri`** (string): Custom redirect URI (defaults to `http://localhost:7777/oauth/callback`)
|
||||
- **`tokenParamName`** (string): Query parameter name for tokens in SSE URLs
|
||||
- **`audiences`** (string[]): Audiences the token is valid for
|
||||
|
||||
#### Token Management
|
||||
|
||||
@@ -572,56 +571,6 @@ The MCP integration tracks several states:
|
||||
|
||||
This comprehensive integration makes MCP servers a powerful way to extend the Gemini CLI's capabilities while maintaining security, reliability, and ease of use.
|
||||
|
||||
## Returning Rich Content from Tools
|
||||
|
||||
MCP tools are not limited to returning simple text. You can return rich, multi-part content, including text, images, audio, and other binary data in a single tool response. This allows you to build powerful tools that can provide diverse information to the model in a single turn.
|
||||
|
||||
All data returned from the tool is processed and sent to the model as context for its next generation, enabling it to reason about or summarize the provided information.
|
||||
|
||||
### How It Works
|
||||
|
||||
To return rich content, your tool's response must adhere to the MCP specification for a [`CallToolResult`](https://modelcontextprotocol.io/specification/2025-06-18/server/tools#tool-result). The `content` field of the result should be an array of `ContentBlock` objects. The Gemini CLI will correctly process this array, separating text from binary data and packaging it for the model.
|
||||
|
||||
You can mix and match different content block types in the `content` array. The supported block types include:
|
||||
|
||||
- `text`
|
||||
- `image`
|
||||
- `audio`
|
||||
- `resource` (embedded content)
|
||||
- `resource_link`
|
||||
|
||||
### Example: Returning Text and an Image
|
||||
|
||||
Here is an example of a valid JSON response from an MCP tool that returns both a text description and an image:
|
||||
|
||||
```json
|
||||
{
|
||||
"content": [
|
||||
{
|
||||
"type": "text",
|
||||
"text": "Here is the logo you requested."
|
||||
},
|
||||
{
|
||||
"type": "image",
|
||||
"data": "BASE64_ENCODED_IMAGE_DATA_HERE",
|
||||
"mimeType": "image/png"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"text": "The logo was created in 2025."
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
When the Gemini CLI receives this response, it will:
|
||||
|
||||
1. Extract all the text and combine it into a single `functionResponse` part for the model.
|
||||
2. Present the image data as a separate `inlineData` part.
|
||||
3. Provide a clean, user-friendly summary in the CLI, indicating that both text and an image were received.
|
||||
|
||||
This enables you to build sophisticated tools that can provide rich, multi-modal context to the Gemini model.
|
||||
|
||||
## MCP Prompts as Slash Commands
|
||||
|
||||
In addition to tools, MCP servers can expose predefined prompts that can be executed as slash commands within the Gemini CLI. This allows you to create shortcuts for common or complex queries that can be easily invoked by name.
|
||||
|
||||
@@ -52,7 +52,7 @@ Read the main README, all Markdown files in the `docs` directory, and a specific
|
||||
read_many_files(paths=["README.md", "docs/**/*.md", "assets/logo.png"], exclude=["docs/OLD_README.md"])
|
||||
```
|
||||
|
||||
Read all JavaScript files but explicitly include test files and all JPEGs in an `images` folder:
|
||||
Read all JavaScript files but explicitly including test files and all JPEGs in an `images` folder:
|
||||
|
||||
```
|
||||
read_many_files(paths=["**/*.js"], include=["**/*.test.js", "images/**/*.jpg"], useDefaultExcludes=False)
|
||||
|
||||
@@ -137,5 +137,6 @@ To block all shell commands, add the `run_shell_command` wildcard to `excludeToo
|
||||
|
||||
## Security Note for `excludeTools`
|
||||
|
||||
Command-specific restrictions in `excludeTools` for `run_shell_command` are based on simple string matching and can be easily bypassed. This feature is **not a security mechanism** and should not be relied upon to safely execute untrusted code. It is recommended to use `coreTools` to explicitly select commands
|
||||
Command-specific restrictions in
|
||||
`excludeTools` for `run_shell_command` are based on simple string matching and can be easily bypassed. This feature is **not a security mechanism** and should not be relied upon to safely execute untrusted code. It is recommended to use `coreTools` to explicitly select commands
|
||||
that can be executed.
|
||||
|
||||
@@ -1,38 +1,28 @@
|
||||
# Troubleshooting guide
|
||||
# Troubleshooting Guide
|
||||
|
||||
This guide provides solutions to common issues and debugging tips, including topics on:
|
||||
This guide provides solutions to common issues and debugging tips.
|
||||
|
||||
- Authentication or login errors
|
||||
- Frequently asked questions (FAQs)
|
||||
- Debugging tips
|
||||
- Existing GitHub Issues similar to yours or creating new Issues
|
||||
|
||||
## Authentication or login errors
|
||||
## Authentication
|
||||
|
||||
- **Error: `Failed to login. Message: Request contains an invalid argument`**
|
||||
- Users with Google Workspace accounts or Google Cloud accounts
|
||||
- Users with Google Workspace accounts, or users with Google Cloud accounts
|
||||
associated with their Gmail accounts may not be able to activate the free
|
||||
tier of the Google Code Assist plan.
|
||||
- For Google Cloud accounts, you can work around this by setting
|
||||
`GOOGLE_CLOUD_PROJECT` to your project ID.
|
||||
- Alternatively, you can obtain the Gemini API key from
|
||||
[Google AI Studio](http://aistudio.google.com/app/apikey), which also includes a
|
||||
- You can also grab an API key from [AI Studio](https://aistudio.google.com/app/apikey), which also includes a
|
||||
separate free tier.
|
||||
|
||||
## Frequently asked questions (FAQs)
|
||||
|
||||
- **Q: How do I update Gemini CLI to the latest version?**
|
||||
- A: If you installed it globally via `npm`, update it using the command `npm install -g @google/gemini-cli@latest`. If you compiled it from source, pull the latest changes from the repository, and then rebuild using the command `npm run build`.
|
||||
- A: If installed globally via npm, update Gemini CLI using the command `npm install -g @google/gemini-cli@latest`. If run from source, pull the latest changes from the repository and rebuild using `npm run build`.
|
||||
|
||||
- **Q: Where are the Gemini CLI configuration or settings files stored?**
|
||||
- A: The Gemini CLI configuration is stored in two `settings.json` files:
|
||||
1. In your home directory: `~/.gemini/settings.json`.
|
||||
2. In your project's root directory: `./.gemini/settings.json`.
|
||||
|
||||
Refer to [Gemini CLI Configuration](./cli/configuration.md) for more details.
|
||||
- **Q: Where are Gemini CLI configuration files stored?**
|
||||
- A: The CLI configuration is stored within two `settings.json` files: one in your home directory and one in your project's root directory. In both locations, `settings.json` is found in the `.gemini/` folder. Refer to [CLI Configuration](./cli/configuration.md) for more details.
|
||||
|
||||
- **Q: Why don't I see cached token counts in my stats output?**
|
||||
- A: Cached token information is only displayed when cached tokens are being used. This feature is available for API key users (Gemini API key or Google Cloud Vertex AI) but not for OAuth users (such as Google Personal/Enterprise accounts like Google Gmail or Google Workspace, respectively). This is because the Gemini Code Assist API does not support cached content creation. You can still view your total token usage using the `/stats` command in Gemini CLI.
|
||||
- A: Cached token information is only displayed when cached tokens are being used. This feature is available for API key users (Gemini API key or Vertex AI) but not for OAuth users (Google Personal/Enterprise accounts) at this time, as the Code Assist API does not support cached content creation. You can still view your total token usage with the `/stats` command.
|
||||
|
||||
## Common error messages and solutions
|
||||
|
||||
@@ -41,27 +31,26 @@ This guide provides solutions to common issues and debugging tips, including top
|
||||
- **Solution:**
|
||||
Either stop the other process that is using the port or configure the MCP server to use a different port.
|
||||
|
||||
- **Error: Command not found (when attempting to run Gemini CLI with `gemini`).**
|
||||
- **Cause:** Gemini CLI is not correctly installed or it is not in your system's `PATH`.
|
||||
- **Error: Command not found (when attempting to run Gemini CLI).**
|
||||
- **Cause:** Gemini CLI is not correctly installed or not in your system's PATH.
|
||||
- **Solution:**
|
||||
The update depends on how you installed Gemini CLI:
|
||||
- If you installed `gemini` globally, check that your `npm` global binary directory is in your `PATH`. You can update Gemini CLI using the command `npm install -g @google/gemini-cli@latest`.
|
||||
- If you are running `gemini` from source, ensure you are using the correct command to invoke it (e.g., `node packages/cli/dist/index.js ...`). To update Gemini CLI, pull the latest changes from the repository, and then rebuild using the command `npm run build`.
|
||||
1. Ensure Gemini CLI installation was successful.
|
||||
2. If installed globally, check that your npm global binary directory is in your PATH.
|
||||
3. If running from source, ensure you are using the correct command to invoke it (e.g., `node packages/cli/dist/index.js ...`).
|
||||
|
||||
- **Error: `MODULE_NOT_FOUND` or import errors.**
|
||||
- **Cause:** Dependencies are not installed correctly, or the project hasn't been built.
|
||||
- **Solution:**
|
||||
1. Run `npm install` to ensure all dependencies are present.
|
||||
2. Run `npm run build` to compile the project.
|
||||
3. Verify that the build completed successfully with `npm run start`.
|
||||
|
||||
- **Error: "Operation not permitted", "Permission denied", or similar.**
|
||||
- **Cause:** When sandboxing is enabled, Gemini CLI may attempt operations that are restricted by your sandbox configuration, such as writing outside the project directory or system temp directory.
|
||||
- **Solution:** Refer to the [Configuration: Sandboxing](./cli/configuration.md#sandboxing) documentation for more information, including how to customize your sandbox configuration.
|
||||
- **Cause:** If sandboxing is enabled, then the application is likely attempting an operation restricted by your sandbox, such as writing outside the project directory or system temp directory.
|
||||
- **Solution:** See [Sandboxing](./cli/configuration.md#sandboxing) for more information, including how to customize your sandbox configuration.
|
||||
|
||||
- **Gemini CLI is not running in interactive mode in "CI" environments**
|
||||
- **Issue:** The Gemini CLI does not enter interactive mode (no prompt appears) if an environment variable starting with `CI_` (e.g., `CI_TOKEN`) is set. This is because the `is-in-ci` package, used by the underlying UI framework, detects these variables and assumes a non-interactive CI environment.
|
||||
- **Cause:** The `is-in-ci` package checks for the presence of `CI`, `CONTINUOUS_INTEGRATION`, or any environment variable with a `CI_` prefix. When any of these are found, it signals that the environment is non-interactive, which prevents the Gemini CLI from starting in its interactive mode.
|
||||
- **CLI is not interactive in "CI" environments**
|
||||
- **Issue:** The CLI does not enter interactive mode (no prompt appears) if an environment variable starting with `CI_` (e.g., `CI_TOKEN`) is set. This is because the `is-in-ci` package, used by the underlying UI framework, detects these variables and assumes a non-interactive CI environment.
|
||||
- **Cause:** The `is-in-ci` package checks for the presence of `CI`, `CONTINUOUS_INTEGRATION`, or any environment variable with a `CI_` prefix. When any of these are found, it signals that the environment is non-interactive, which prevents the CLI from starting in its interactive mode.
|
||||
- **Solution:** If the `CI_` prefixed variable is not needed for the CLI to function, you can temporarily unset it for the command. e.g., `env -u CI_TOKEN gemini`
|
||||
|
||||
- **DEBUG mode not working from project .env file**
|
||||
@@ -83,11 +72,9 @@ This guide provides solutions to common issues and debugging tips, including top
|
||||
- **Tool issues:**
|
||||
- If a specific tool is failing, try to isolate the issue by running the simplest possible version of the command or operation the tool performs.
|
||||
- For `run_shell_command`, check that the command works directly in your shell first.
|
||||
- For _file system tools_, verify that paths are correct and check the permissions.
|
||||
- For file system tools, double-check paths and permissions.
|
||||
|
||||
- **Pre-flight checks:**
|
||||
- Always run `npm run preflight` before committing code. This can catch many common issues related to formatting, linting, and type errors.
|
||||
|
||||
## Existing GitHub Issues similar to yours or creating new Issues
|
||||
|
||||
If you encounter an issue that was not covered here in this _Troubleshooting guide_, consider searching the Gemini CLI [Issue tracker on GitHub](https://github.com/google-gemini/gemini-cli/issues). If you can't find an issue similar to yours, consider creating a new GitHub Issue with a detailed description. Pull requests are also welcome!
|
||||
If you encounter an issue not covered here, consider searching the project's issue tracker on GitHub or reporting a new issue with detailed information.
|
||||
|
||||
2674
package-lock.json
generated
2674
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -75,8 +75,7 @@
|
||||
"pretty-format": "^30.0.2",
|
||||
"react-dom": "^19.1.0",
|
||||
"typescript": "^5.3.3",
|
||||
"vitest": "^3.1.1",
|
||||
"@qwen-code/qwen-code-test-utils": "file:../test-utils"
|
||||
"vitest": "^3.1.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=20"
|
||||
|
||||
@@ -6,9 +6,7 @@
|
||||
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import * as os from 'os';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import { loadCliConfig, parseArguments } from './config.js';
|
||||
import { loadCliConfig, parseArguments, CliArgs } from './config.js';
|
||||
import { Settings } from './settings.js';
|
||||
import { Extension } from './extension.js';
|
||||
import * as ServerConfig from '@qwen-code/qwen-code-core';
|
||||
@@ -46,7 +44,7 @@ vi.mock('@qwen-code/qwen-code-core', async () => {
|
||||
},
|
||||
loadEnvironment: vi.fn(),
|
||||
loadServerHierarchicalMemory: vi.fn(
|
||||
(cwd, dirs, debug, fileService, extensionPaths, _maxDirs) =>
|
||||
(cwd, debug, fileService, extensionPaths, _maxDirs) =>
|
||||
Promise.resolve({
|
||||
memoryContent: extensionPaths?.join(',') || '',
|
||||
fileCount: extensionPaths?.length || 0,
|
||||
@@ -501,7 +499,6 @@ describe('Hierarchical Memory Loading (config.ts) - Placeholder Suite', () => {
|
||||
await loadCliConfig(settings, extensions, 'session-id', argv);
|
||||
expect(ServerConfig.loadServerHierarchicalMemory).toHaveBeenCalledWith(
|
||||
expect.any(String),
|
||||
[],
|
||||
false,
|
||||
expect.any(Object),
|
||||
[
|
||||
@@ -1081,86 +1078,14 @@ describe('loadCliConfig ideModeFeature', () => {
|
||||
const config = await loadCliConfig(settings, [], 'test-session', argv);
|
||||
expect(config.getIdeModeFeature()).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
vi.mock('fs', async () => {
|
||||
const actualFs = await vi.importActual<typeof fs>('fs');
|
||||
const MOCK_CWD1 = process.cwd();
|
||||
const MOCK_CWD2 = path.resolve(path.sep, 'home', 'user', 'project');
|
||||
|
||||
const mockPaths = new Set([
|
||||
MOCK_CWD1,
|
||||
MOCK_CWD2,
|
||||
path.resolve(path.sep, 'cli', 'path1'),
|
||||
path.resolve(path.sep, 'settings', 'path1'),
|
||||
path.join(os.homedir(), 'settings', 'path2'),
|
||||
path.join(MOCK_CWD2, 'cli', 'path2'),
|
||||
path.join(MOCK_CWD2, 'settings', 'path3'),
|
||||
]);
|
||||
|
||||
return {
|
||||
...actualFs,
|
||||
existsSync: vi.fn((p) => mockPaths.has(p.toString())),
|
||||
statSync: vi.fn((p) => {
|
||||
if (mockPaths.has(p.toString())) {
|
||||
return { isDirectory: () => true };
|
||||
}
|
||||
// Fallback for other paths if needed, though the test should be specific.
|
||||
return actualFs.statSync(p);
|
||||
}),
|
||||
realpathSync: vi.fn((p) => p),
|
||||
};
|
||||
});
|
||||
|
||||
describe('loadCliConfig with includeDirectories', () => {
|
||||
const originalArgv = process.argv;
|
||||
const originalEnv = { ...process.env };
|
||||
|
||||
beforeEach(() => {
|
||||
vi.resetAllMocks();
|
||||
vi.mocked(os.homedir).mockReturnValue('/mock/home/user');
|
||||
process.env.GEMINI_API_KEY = 'test-api-key';
|
||||
vi.spyOn(process, 'cwd').mockReturnValue(
|
||||
path.resolve(path.sep, 'home', 'user', 'project'),
|
||||
);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
process.argv = originalArgv;
|
||||
process.env = originalEnv;
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
it('should combine and resolve paths from settings and CLI arguments', async () => {
|
||||
const mockCwd = path.resolve(path.sep, 'home', 'user', 'project');
|
||||
process.argv = [
|
||||
'node',
|
||||
'script.js',
|
||||
'--include-directories',
|
||||
`${path.resolve(path.sep, 'cli', 'path1')},${path.join(mockCwd, 'cli', 'path2')}`,
|
||||
];
|
||||
it('should be false when settings.ideModeFeature is true, but SANDBOX is set', async () => {
|
||||
process.argv = ['node', 'script.js'];
|
||||
const argv = await parseArguments();
|
||||
const settings: Settings = {
|
||||
includeDirectories: [
|
||||
path.resolve(path.sep, 'settings', 'path1'),
|
||||
path.join(os.homedir(), 'settings', 'path2'),
|
||||
path.join(mockCwd, 'settings', 'path3'),
|
||||
],
|
||||
};
|
||||
process.env.TERM_PROGRAM = 'vscode';
|
||||
process.env.SANDBOX = 'true';
|
||||
const settings: Settings = { ideModeFeature: true };
|
||||
const config = await loadCliConfig(settings, [], 'test-session', argv);
|
||||
const expected = [
|
||||
mockCwd,
|
||||
path.resolve(path.sep, 'cli', 'path1'),
|
||||
path.join(mockCwd, 'cli', 'path2'),
|
||||
path.resolve(path.sep, 'settings', 'path1'),
|
||||
path.join(os.homedir(), 'settings', 'path2'),
|
||||
path.join(mockCwd, 'settings', 'path3'),
|
||||
];
|
||||
expect(config.getWorkspaceContext().getDirectories()).toEqual(
|
||||
expect.arrayContaining(expected),
|
||||
);
|
||||
expect(config.getWorkspaceContext().getDirectories()).toHaveLength(
|
||||
expected.length,
|
||||
);
|
||||
expect(config.getIdeModeFeature()).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -22,13 +22,13 @@ import {
|
||||
FileDiscoveryService,
|
||||
TelemetryTarget,
|
||||
FileFilteringOptions,
|
||||
IdeClient,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import { Settings } from './settings.js';
|
||||
|
||||
import { Extension, annotateActiveExtensions } from './extension.js';
|
||||
import { getCliVersion } from '../utils/version.js';
|
||||
import { loadSandboxConfig } from './sandboxConfig.js';
|
||||
import { resolvePath } from '../utils/resolvePath.js';
|
||||
|
||||
// Simple console logger for now - replace with actual logger if available
|
||||
const logger = {
|
||||
@@ -68,7 +68,6 @@ export interface CliArgs {
|
||||
openaiBaseUrl: string | undefined;
|
||||
proxy: string | undefined;
|
||||
includeDirectories: string[] | undefined;
|
||||
loadMemoryFromIncludeDirectories: boolean | undefined;
|
||||
}
|
||||
|
||||
export async function parseArguments(): Promise<CliArgs> {
|
||||
@@ -229,12 +228,6 @@ export async function parseArguments(): Promise<CliArgs> {
|
||||
// Handle comma-separated values
|
||||
dirs.flatMap((dir) => dir.split(',').map((d) => d.trim())),
|
||||
})
|
||||
.option('load-memory-from-include-directories', {
|
||||
type: 'boolean',
|
||||
description:
|
||||
'If true, when refreshing memory, QWEN.md files should be loaded from all directories that are added. If false, QWEN.md files should only be loaded from the primary working directory.',
|
||||
default: false,
|
||||
})
|
||||
.version(await getCliVersion()) // This will enable the --version flag based on package.json
|
||||
.alias('v', 'version')
|
||||
.help()
|
||||
@@ -262,7 +255,6 @@ export async function parseArguments(): Promise<CliArgs> {
|
||||
// TODO: Consider if App.tsx should get memory via a server call or if Config should refresh itself.
|
||||
export async function loadHierarchicalGeminiMemory(
|
||||
currentWorkingDirectory: string,
|
||||
includeDirectoriesToReadGemini: readonly string[] = [],
|
||||
debugMode: boolean,
|
||||
fileService: FileDiscoveryService,
|
||||
settings: Settings,
|
||||
@@ -288,7 +280,6 @@ export async function loadHierarchicalGeminiMemory(
|
||||
// Directly call the server function with the corrected path.
|
||||
return loadServerHierarchicalMemory(
|
||||
effectiveCwd,
|
||||
includeDirectoriesToReadGemini,
|
||||
debugMode,
|
||||
fileService,
|
||||
extensionContextFilePaths,
|
||||
@@ -311,10 +302,13 @@ export async function loadCliConfig(
|
||||
) ||
|
||||
false;
|
||||
const memoryImportFormat = settings.memoryImportFormat || 'tree';
|
||||
|
||||
const ideMode = settings.ideMode ?? false;
|
||||
|
||||
const ideModeFeature =
|
||||
argv.ideModeFeature ?? settings.ideModeFeature ?? false;
|
||||
(argv.ideModeFeature ?? settings.ideModeFeature ?? false) &&
|
||||
!process.env.SANDBOX;
|
||||
|
||||
const ideClient = IdeClient.getInstance(ideMode && ideModeFeature);
|
||||
|
||||
const allExtensions = annotateActiveExtensions(
|
||||
extensions,
|
||||
@@ -356,14 +350,9 @@ export async function loadCliConfig(
|
||||
...settings.fileFiltering,
|
||||
};
|
||||
|
||||
const includeDirectories = (settings.includeDirectories || [])
|
||||
.map(resolvePath)
|
||||
.concat((argv.includeDirectories || []).map(resolvePath));
|
||||
|
||||
// Call the (now wrapper) loadHierarchicalGeminiMemory which calls the server's version
|
||||
const { memoryContent, fileCount } = await loadHierarchicalGeminiMemory(
|
||||
process.cwd(),
|
||||
settings.loadMemoryFromIncludeDirectories ? includeDirectories : [],
|
||||
debugMode,
|
||||
fileService,
|
||||
settings,
|
||||
@@ -430,11 +419,7 @@ export async function loadCliConfig(
|
||||
embeddingModel: DEFAULT_GEMINI_EMBEDDING_MODEL,
|
||||
sandbox: sandboxConfig,
|
||||
targetDir: process.cwd(),
|
||||
includeDirectories,
|
||||
loadMemoryFromIncludeDirectories:
|
||||
argv.loadMemoryFromIncludeDirectories ||
|
||||
settings.loadMemoryFromIncludeDirectories ||
|
||||
false,
|
||||
includeDirectories: argv.includeDirectories,
|
||||
debugMode,
|
||||
question: argv.promptInteractive || argv.prompt || '',
|
||||
fullContext: argv.allFiles || argv.all_files || false,
|
||||
@@ -495,6 +480,7 @@ export async function loadCliConfig(
|
||||
summarizeToolOutput: settings.summarizeToolOutput,
|
||||
ideMode,
|
||||
ideModeFeature,
|
||||
ideClient,
|
||||
enableOpenAILogging:
|
||||
(typeof argv.openaiLogging === 'undefined'
|
||||
? settings.enableOpenAILogging
|
||||
|
||||
@@ -112,7 +112,6 @@ describe('Settings Loading and Merging', () => {
|
||||
expect(settings.merged).toEqual({
|
||||
customThemes: {},
|
||||
mcpServers: {},
|
||||
includeDirectories: [],
|
||||
});
|
||||
expect(settings.errors.length).toBe(0);
|
||||
});
|
||||
@@ -146,7 +145,6 @@ describe('Settings Loading and Merging', () => {
|
||||
...systemSettingsContent,
|
||||
customThemes: {},
|
||||
mcpServers: {},
|
||||
includeDirectories: [],
|
||||
});
|
||||
});
|
||||
|
||||
@@ -180,7 +178,6 @@ describe('Settings Loading and Merging', () => {
|
||||
...userSettingsContent,
|
||||
customThemes: {},
|
||||
mcpServers: {},
|
||||
includeDirectories: [],
|
||||
});
|
||||
});
|
||||
|
||||
@@ -212,7 +209,6 @@ describe('Settings Loading and Merging', () => {
|
||||
...workspaceSettingsContent,
|
||||
customThemes: {},
|
||||
mcpServers: {},
|
||||
includeDirectories: [],
|
||||
});
|
||||
});
|
||||
|
||||
@@ -250,7 +246,6 @@ describe('Settings Loading and Merging', () => {
|
||||
contextFileName: 'WORKSPACE_CONTEXT.md',
|
||||
customThemes: {},
|
||||
mcpServers: {},
|
||||
includeDirectories: [],
|
||||
});
|
||||
});
|
||||
|
||||
@@ -300,7 +295,6 @@ describe('Settings Loading and Merging', () => {
|
||||
allowMCPServers: ['server1', 'server2'],
|
||||
customThemes: {},
|
||||
mcpServers: {},
|
||||
includeDirectories: [],
|
||||
});
|
||||
});
|
||||
|
||||
@@ -622,40 +616,6 @@ describe('Settings Loading and Merging', () => {
|
||||
expect(settings.merged.mcpServers).toEqual({});
|
||||
});
|
||||
|
||||
it('should merge includeDirectories from all scopes', () => {
|
||||
(mockFsExistsSync as Mock).mockReturnValue(true);
|
||||
const systemSettingsContent = {
|
||||
includeDirectories: ['/system/dir'],
|
||||
};
|
||||
const userSettingsContent = {
|
||||
includeDirectories: ['/user/dir1', '/user/dir2'],
|
||||
};
|
||||
const workspaceSettingsContent = {
|
||||
includeDirectories: ['/workspace/dir'],
|
||||
};
|
||||
|
||||
(fs.readFileSync as Mock).mockImplementation(
|
||||
(p: fs.PathOrFileDescriptor) => {
|
||||
if (p === getSystemSettingsPath())
|
||||
return JSON.stringify(systemSettingsContent);
|
||||
if (p === USER_SETTINGS_PATH)
|
||||
return JSON.stringify(userSettingsContent);
|
||||
if (p === MOCK_WORKSPACE_SETTINGS_PATH)
|
||||
return JSON.stringify(workspaceSettingsContent);
|
||||
return '{}';
|
||||
},
|
||||
);
|
||||
|
||||
const settings = loadSettings(MOCK_WORKSPACE_DIR);
|
||||
|
||||
expect(settings.merged.includeDirectories).toEqual([
|
||||
'/system/dir',
|
||||
'/user/dir1',
|
||||
'/user/dir2',
|
||||
'/workspace/dir',
|
||||
]);
|
||||
});
|
||||
|
||||
it('should handle JSON parsing errors gracefully', () => {
|
||||
(mockFsExistsSync as Mock).mockReturnValue(true); // Both files "exist"
|
||||
const invalidJsonContent = 'invalid json';
|
||||
@@ -694,7 +654,6 @@ describe('Settings Loading and Merging', () => {
|
||||
expect(settings.merged).toEqual({
|
||||
customThemes: {},
|
||||
mcpServers: {},
|
||||
includeDirectories: [],
|
||||
});
|
||||
|
||||
// Check that error objects are populated in settings.errors
|
||||
@@ -1131,7 +1090,6 @@ describe('Settings Loading and Merging', () => {
|
||||
...systemSettingsContent,
|
||||
customThemes: {},
|
||||
mcpServers: {},
|
||||
includeDirectories: [],
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -132,7 +132,6 @@ export interface Settings {
|
||||
// Environment variables to exclude from project .env files
|
||||
excludedProjectEnvVars?: string[];
|
||||
dnsResolutionOrder?: DnsResolutionOrder;
|
||||
|
||||
sampling_params?: Record<string, unknown>;
|
||||
systemPromptMappings?: Array<{
|
||||
baseUrls: string[];
|
||||
@@ -143,10 +142,6 @@ export interface Settings {
|
||||
timeout?: number;
|
||||
maxRetries?: number;
|
||||
};
|
||||
|
||||
includeDirectories?: string[];
|
||||
|
||||
loadMemoryFromIncludeDirectories?: boolean;
|
||||
}
|
||||
|
||||
export interface SettingsError {
|
||||
@@ -202,11 +197,6 @@ export class LoadedSettings {
|
||||
...(workspace.mcpServers || {}),
|
||||
...(system.mcpServers || {}),
|
||||
},
|
||||
includeDirectories: [
|
||||
...(system.includeDirectories || []),
|
||||
...(user.includeDirectories || []),
|
||||
...(workspace.includeDirectories || []),
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
@@ -397,7 +387,7 @@ export function loadSettings(workspaceDir: string): LoadedSettings {
|
||||
const settingsErrors: SettingsError[] = [];
|
||||
const systemSettingsPath = getSystemSettingsPath();
|
||||
|
||||
// Resolve paths to their canonical representation to handle symlinks
|
||||
// FIX: Resolve paths to their canonical representation to handle symlinks
|
||||
const resolvedWorkspaceDir = path.resolve(workspaceDir);
|
||||
const resolvedHomeDir = path.resolve(homedir());
|
||||
|
||||
@@ -452,6 +442,7 @@ export function loadSettings(workspaceDir: string): LoadedSettings {
|
||||
});
|
||||
}
|
||||
|
||||
// This comparison is now much more reliable.
|
||||
if (realWorkspaceDir !== realHomeDir) {
|
||||
// Load workspace settings
|
||||
try {
|
||||
|
||||
@@ -70,7 +70,6 @@ describe('runNonInteractive', () => {
|
||||
getIdeMode: vi.fn().mockReturnValue(false),
|
||||
getFullContext: vi.fn().mockReturnValue(false),
|
||||
getContentGeneratorConfig: vi.fn().mockReturnValue({}),
|
||||
getDebugMode: vi.fn().mockReturnValue(false),
|
||||
} as unknown as Config;
|
||||
});
|
||||
|
||||
|
||||
@@ -17,37 +17,28 @@ import {
|
||||
import { Content, Part, FunctionCall } from '@google/genai';
|
||||
|
||||
import { parseAndFormatApiError } from './ui/utils/errorParsing.js';
|
||||
import { ConsolePatcher } from './ui/utils/ConsolePatcher.js';
|
||||
|
||||
export async function runNonInteractive(
|
||||
config: Config,
|
||||
input: string,
|
||||
prompt_id: string,
|
||||
): Promise<void> {
|
||||
const consolePatcher = new ConsolePatcher({
|
||||
stderr: true,
|
||||
debugMode: config.getDebugMode(),
|
||||
await config.initialize();
|
||||
// Handle EPIPE errors when the output is piped to a command that closes early.
|
||||
process.stdout.on('error', (err: NodeJS.ErrnoException) => {
|
||||
if (err.code === 'EPIPE') {
|
||||
// Exit gracefully if the pipe is closed.
|
||||
process.exit(0);
|
||||
}
|
||||
});
|
||||
|
||||
const geminiClient = config.getGeminiClient();
|
||||
const toolRegistry: ToolRegistry = await config.getToolRegistry();
|
||||
|
||||
const abortController = new AbortController();
|
||||
let currentMessages: Content[] = [{ role: 'user', parts: [{ text: input }] }];
|
||||
let turnCount = 0;
|
||||
try {
|
||||
await config.initialize();
|
||||
consolePatcher.patch();
|
||||
// Handle EPIPE errors when the output is piped to a command that closes early.
|
||||
process.stdout.on('error', (err: NodeJS.ErrnoException) => {
|
||||
if (err.code === 'EPIPE') {
|
||||
// Exit gracefully if the pipe is closed.
|
||||
process.exit(0);
|
||||
}
|
||||
});
|
||||
|
||||
const geminiClient = config.getGeminiClient();
|
||||
const toolRegistry: ToolRegistry = await config.getToolRegistry();
|
||||
|
||||
const abortController = new AbortController();
|
||||
let currentMessages: Content[] = [
|
||||
{ role: 'user', parts: [{ text: input }] },
|
||||
];
|
||||
let turnCount = 0;
|
||||
while (true) {
|
||||
turnCount++;
|
||||
if (
|
||||
@@ -142,7 +133,6 @@ export async function runNonInteractive(
|
||||
);
|
||||
process.exit(1);
|
||||
} finally {
|
||||
consolePatcher.cleanup();
|
||||
if (isTelemetrySdkInitialized()) {
|
||||
await shutdownTelemetry();
|
||||
}
|
||||
|
||||
@@ -308,9 +308,6 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
|
||||
try {
|
||||
const { memoryContent, fileCount } = await loadHierarchicalGeminiMemory(
|
||||
process.cwd(),
|
||||
settings.merged.loadMemoryFromIncludeDirectories
|
||||
? config.getWorkspaceContext().getDirectories()
|
||||
: [],
|
||||
config.getDebugMode(),
|
||||
config.getFileService(),
|
||||
settings.merged,
|
||||
@@ -515,7 +512,6 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
|
||||
openPrivacyNotice,
|
||||
toggleVimEnabled,
|
||||
setIsProcessing,
|
||||
setGeminiMdFileCount,
|
||||
);
|
||||
|
||||
const {
|
||||
@@ -537,7 +533,6 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
|
||||
performMemoryRefresh,
|
||||
modelSwitchedFromQuotaError,
|
||||
setModelSwitchedFromQuotaError,
|
||||
refreshStatic,
|
||||
);
|
||||
|
||||
// Input handling
|
||||
@@ -636,7 +631,7 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => {
|
||||
if (config) {
|
||||
setGeminiMdFileCount(config.getGeminiMdFileCount());
|
||||
}
|
||||
}, [config, config.getGeminiMdFileCount]);
|
||||
}, [config]);
|
||||
|
||||
const logger = useLogger();
|
||||
const [userMessages, setUserMessages] = useState<string[]>([]);
|
||||
|
||||
@@ -40,24 +40,11 @@ describe('directoryCommand', () => {
|
||||
getGeminiClient: vi.fn().mockReturnValue({
|
||||
addDirectoryContext: vi.fn(),
|
||||
}),
|
||||
getWorkingDir: () => '/test/dir',
|
||||
shouldLoadMemoryFromIncludeDirectories: () => false,
|
||||
getDebugMode: () => false,
|
||||
getFileService: () => ({}),
|
||||
getExtensionContextFilePaths: () => [],
|
||||
getFileFilteringOptions: () => ({ ignore: [], include: [] }),
|
||||
setUserMemory: vi.fn(),
|
||||
setGeminiMdFileCount: vi.fn(),
|
||||
} as unknown as Config;
|
||||
|
||||
mockContext = {
|
||||
services: {
|
||||
config: mockConfig,
|
||||
settings: {
|
||||
merged: {
|
||||
memoryDiscoveryMaxDirs: 1000,
|
||||
},
|
||||
},
|
||||
},
|
||||
ui: {
|
||||
addItem: vi.fn(),
|
||||
|
||||
@@ -8,7 +8,6 @@ import { SlashCommand, CommandContext, CommandKind } from './types.js';
|
||||
import { MessageType } from '../types.js';
|
||||
import * as os from 'os';
|
||||
import * as path from 'path';
|
||||
import { loadServerHierarchicalMemory } from '@qwen-code/qwen-code-core';
|
||||
|
||||
export function expandHomeDir(p: string): string {
|
||||
if (!p) {
|
||||
@@ -17,7 +16,7 @@ export function expandHomeDir(p: string): string {
|
||||
let expandedPath = p;
|
||||
if (p.toLowerCase().startsWith('%userprofile%')) {
|
||||
expandedPath = os.homedir() + p.substring('%userprofile%'.length);
|
||||
} else if (p === '~' || p.startsWith('~/')) {
|
||||
} else if (p.startsWith('~')) {
|
||||
expandedPath = os.homedir() + p.substring(1);
|
||||
}
|
||||
return path.normalize(expandedPath);
|
||||
@@ -91,37 +90,6 @@ export const directoryCommand: SlashCommand = {
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
if (config.shouldLoadMemoryFromIncludeDirectories()) {
|
||||
const { memoryContent, fileCount } =
|
||||
await loadServerHierarchicalMemory(
|
||||
config.getWorkingDir(),
|
||||
[
|
||||
...config.getWorkspaceContext().getDirectories(),
|
||||
...pathsToAdd,
|
||||
],
|
||||
config.getDebugMode(),
|
||||
config.getFileService(),
|
||||
config.getExtensionContextFilePaths(),
|
||||
context.services.settings.merged.memoryImportFormat || 'tree', // Use setting or default to 'tree'
|
||||
config.getFileFilteringOptions(),
|
||||
context.services.settings.merged.memoryDiscoveryMaxDirs,
|
||||
);
|
||||
config.setUserMemory(memoryContent);
|
||||
config.setGeminiMdFileCount(fileCount);
|
||||
context.ui.setGeminiMdFileCount(fileCount);
|
||||
}
|
||||
addItem(
|
||||
{
|
||||
type: MessageType.INFO,
|
||||
text: `Successfully added GEMINI.md files from the following directories if there are:\n- ${added.join('\n- ')}`,
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
} catch (error) {
|
||||
errors.push(`Error refreshing memory: ${(error as Error).message}`);
|
||||
}
|
||||
|
||||
if (added.length > 0) {
|
||||
const gemini = config.getGeminiClient();
|
||||
if (gemini) {
|
||||
|
||||
@@ -42,15 +42,9 @@ describe('ideCommand', () => {
|
||||
mockConfig = {
|
||||
getIdeModeFeature: vi.fn(),
|
||||
getIdeMode: vi.fn(),
|
||||
getIdeClient: vi.fn(() => ({
|
||||
reconnect: vi.fn(),
|
||||
disconnect: vi.fn(),
|
||||
getCurrentIde: vi.fn(),
|
||||
getDetectedIdeDisplayName: vi.fn(),
|
||||
getConnectionStatus: vi.fn(),
|
||||
})),
|
||||
setIdeModeAndSyncConnection: vi.fn(),
|
||||
getIdeClient: vi.fn(),
|
||||
setIdeMode: vi.fn(),
|
||||
setIdeClientDisconnected: vi.fn(),
|
||||
} as unknown as Config;
|
||||
|
||||
platformSpy = vi.spyOn(process, 'platform', 'get');
|
||||
|
||||
@@ -8,7 +8,6 @@ import {
|
||||
Config,
|
||||
DetectedIde,
|
||||
IDEConnectionStatus,
|
||||
IdeClient,
|
||||
getIdeDisplayName,
|
||||
getIdeInstaller,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
@@ -20,35 +19,6 @@ import {
|
||||
} from './types.js';
|
||||
import { SettingScope } from '../../config/settings.js';
|
||||
|
||||
function getIdeStatusMessage(ideClient: IdeClient): {
|
||||
messageType: 'info' | 'error';
|
||||
content: string;
|
||||
} {
|
||||
const connection = ideClient.getConnectionStatus();
|
||||
switch (connection.status) {
|
||||
case IDEConnectionStatus.Connected:
|
||||
return {
|
||||
messageType: 'info',
|
||||
content: `🟢 Connected to ${ideClient.getDetectedIdeDisplayName()}`,
|
||||
};
|
||||
case IDEConnectionStatus.Connecting:
|
||||
return {
|
||||
messageType: 'info',
|
||||
content: `🟡 Connecting...`,
|
||||
};
|
||||
default: {
|
||||
let content = `🔴 Disconnected`;
|
||||
if (connection?.details) {
|
||||
content += `: ${connection.details}`;
|
||||
}
|
||||
return {
|
||||
messageType: 'error',
|
||||
content,
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const ideCommand = (config: Config | null): SlashCommand | null => {
|
||||
if (!config || !config.getIdeModeFeature()) {
|
||||
return null;
|
||||
@@ -84,13 +54,33 @@ export const ideCommand = (config: Config | null): SlashCommand | null => {
|
||||
name: 'status',
|
||||
description: 'check status of IDE integration',
|
||||
kind: CommandKind.BUILT_IN,
|
||||
action: (): SlashCommandActionReturn => {
|
||||
const { messageType, content } = getIdeStatusMessage(ideClient);
|
||||
return {
|
||||
type: 'message',
|
||||
messageType,
|
||||
content,
|
||||
} as const;
|
||||
action: (_context: CommandContext): SlashCommandActionReturn => {
|
||||
const connection = ideClient.getConnectionStatus();
|
||||
switch (connection.status) {
|
||||
case IDEConnectionStatus.Connected:
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
content: `🟢 Connected to ${ideClient.getDetectedIdeDisplayName()}`,
|
||||
} as const;
|
||||
case IDEConnectionStatus.Connecting:
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
content: `🟡 Connecting...`,
|
||||
} as const;
|
||||
default: {
|
||||
let content = `🔴 Disconnected`;
|
||||
if (connection?.details) {
|
||||
content += `: ${connection.details}`;
|
||||
}
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content,
|
||||
} as const;
|
||||
}
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
@@ -120,10 +110,6 @@ export const ideCommand = (config: Config | null): SlashCommand | null => {
|
||||
);
|
||||
|
||||
const result = await installer.install();
|
||||
if (result.success) {
|
||||
config.setIdeMode(true);
|
||||
context.services.settings.setValue(SettingScope.User, 'ideMode', true);
|
||||
}
|
||||
context.ui.addItem(
|
||||
{
|
||||
type: result.success ? 'info' : 'error',
|
||||
@@ -140,15 +126,8 @@ export const ideCommand = (config: Config | null): SlashCommand | null => {
|
||||
kind: CommandKind.BUILT_IN,
|
||||
action: async (context: CommandContext) => {
|
||||
context.services.settings.setValue(SettingScope.User, 'ideMode', true);
|
||||
await config.setIdeModeAndSyncConnection(true);
|
||||
const { messageType, content } = getIdeStatusMessage(ideClient);
|
||||
context.ui.addItem(
|
||||
{
|
||||
type: messageType,
|
||||
text: content,
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
config.setIdeMode(true);
|
||||
config.setIdeClientConnected();
|
||||
},
|
||||
};
|
||||
|
||||
@@ -158,15 +137,8 @@ export const ideCommand = (config: Config | null): SlashCommand | null => {
|
||||
kind: CommandKind.BUILT_IN,
|
||||
action: async (context: CommandContext) => {
|
||||
context.services.settings.setValue(SettingScope.User, 'ideMode', false);
|
||||
await config.setIdeModeAndSyncConnection(false);
|
||||
const { messageType, content } = getIdeStatusMessage(ideClient);
|
||||
context.ui.addItem(
|
||||
{
|
||||
type: messageType,
|
||||
text: content,
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
config.setIdeMode(false);
|
||||
config.setIdeClientDisconnected();
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@@ -161,10 +161,6 @@ describe('memoryCommand', () => {
|
||||
getDebugMode: () => false,
|
||||
getFileService: () => ({}) as FileDiscoveryService,
|
||||
getExtensionContextFilePaths: () => [],
|
||||
shouldLoadMemoryFromIncludeDirectories: () => false,
|
||||
getWorkspaceContext: () => ({
|
||||
getDirectories: () => [],
|
||||
}),
|
||||
getFileFilteringOptions: () => ({
|
||||
ignore: [],
|
||||
include: [],
|
||||
|
||||
@@ -89,9 +89,6 @@ export const memoryCommand: SlashCommand = {
|
||||
const { memoryContent, fileCount } =
|
||||
await loadServerHierarchicalMemory(
|
||||
config.getWorkingDir(),
|
||||
config.shouldLoadMemoryFromIncludeDirectories()
|
||||
? config.getWorkspaceContext().getDirectories()
|
||||
: [],
|
||||
config.getDebugMode(),
|
||||
config.getFileService(),
|
||||
config.getExtensionContextFilePaths(),
|
||||
|
||||
@@ -49,7 +49,7 @@ describe('setupGithubCommand', () => {
|
||||
`curl -fsSL -o "${fakeRepoRoot}/.github/workflows/gemini-issue-automated-triage.yml"`,
|
||||
`curl -fsSL -o "${fakeRepoRoot}/.github/workflows/gemini-issue-scheduled-triage.yml"`,
|
||||
`curl -fsSL -o "${fakeRepoRoot}/.github/workflows/gemini-pr-review.yml"`,
|
||||
'https://raw.githubusercontent.com/google-github-actions/run-gemini-cli/refs/tags/v0/examples/workflows/',
|
||||
'https://raw.githubusercontent.com/google-github-actions/run-gemini-cli/refs/heads/v0/examples/workflows/',
|
||||
];
|
||||
|
||||
for (const substring of expectedSubstrings) {
|
||||
|
||||
@@ -28,7 +28,7 @@ export const setupGithubCommand: SlashCommand = {
|
||||
}
|
||||
|
||||
const version = 'v0';
|
||||
const workflowBaseUrl = `https://raw.githubusercontent.com/google-github-actions/run-gemini-cli/refs/tags/${version}/examples/workflows/`;
|
||||
const workflowBaseUrl = `https://raw.githubusercontent.com/google-github-actions/run-gemini-cli/refs/heads/${version}/examples/workflows/`;
|
||||
|
||||
const workflows = [
|
||||
'gemini-cli/gemini-cli.yml',
|
||||
|
||||
@@ -59,7 +59,6 @@ export interface CommandContext {
|
||||
/** Toggles a special display mode. */
|
||||
toggleCorgiMode: () => void;
|
||||
toggleVimEnabled: () => Promise<boolean>;
|
||||
setGeminiMdFileCount: (count: number) => void;
|
||||
};
|
||||
// Session-specific data
|
||||
session: {
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
*/
|
||||
|
||||
import { render } from 'ink-testing-library';
|
||||
import { waitFor } from '@testing-library/react';
|
||||
import { InputPrompt, InputPromptProps } from './InputPrompt.js';
|
||||
import type { TextBuffer } from './shared/text-buffer.js';
|
||||
import { Config } from '@qwen-code/qwen-code-core';
|
||||
@@ -1227,12 +1226,11 @@ describe('InputPrompt', () => {
|
||||
stdin.write('\x12');
|
||||
await wait();
|
||||
stdin.write('\x1B');
|
||||
await wait();
|
||||
|
||||
await waitFor(() => {
|
||||
expect(stdout.lastFrame()).not.toContain('(r:)');
|
||||
});
|
||||
|
||||
expect(stdout.lastFrame()).not.toContain('echo hello');
|
||||
const frame = stdout.lastFrame();
|
||||
expect(frame).not.toContain('(r:)');
|
||||
expect(frame).not.toContain('echo hello');
|
||||
|
||||
unmount();
|
||||
});
|
||||
@@ -1242,11 +1240,9 @@ describe('InputPrompt', () => {
|
||||
stdin.write('\x12');
|
||||
await wait();
|
||||
stdin.write('\t');
|
||||
await wait();
|
||||
|
||||
await waitFor(() => {
|
||||
expect(stdout.lastFrame()).not.toContain('(r:)');
|
||||
});
|
||||
|
||||
expect(stdout.lastFrame()).not.toContain('(r:)');
|
||||
expect(props.buffer.setText).toHaveBeenCalledWith('echo hello');
|
||||
unmount();
|
||||
});
|
||||
@@ -1257,11 +1253,9 @@ describe('InputPrompt', () => {
|
||||
await wait();
|
||||
expect(stdout.lastFrame()).toContain('(r:)');
|
||||
stdin.write('\r');
|
||||
await wait();
|
||||
|
||||
await waitFor(() => {
|
||||
expect(stdout.lastFrame()).not.toContain('(r:)');
|
||||
});
|
||||
|
||||
expect(stdout.lastFrame()).not.toContain('(r:)');
|
||||
expect(props.onSubmit).toHaveBeenCalledWith('echo hello');
|
||||
unmount();
|
||||
});
|
||||
@@ -1274,10 +1268,9 @@ describe('InputPrompt', () => {
|
||||
await wait();
|
||||
expect(stdout.lastFrame()).toContain('(r:)');
|
||||
stdin.write('\x1B');
|
||||
await wait();
|
||||
|
||||
await waitFor(() => {
|
||||
expect(stdout.lastFrame()).not.toContain('(r:)');
|
||||
});
|
||||
expect(stdout.lastFrame()).not.toContain('(r:)');
|
||||
expect(props.buffer.text).toBe('initial text');
|
||||
expect(props.buffer.cursor).toEqual([0, 3]);
|
||||
|
||||
|
||||
@@ -51,7 +51,6 @@ export const useSlashCommandProcessor = (
|
||||
openPrivacyNotice: () => void,
|
||||
toggleVimEnabled: () => Promise<boolean>,
|
||||
setIsProcessing: (isProcessing: boolean) => void,
|
||||
setGeminiMdFileCount: (count: number) => void,
|
||||
) => {
|
||||
const session = useSessionStats();
|
||||
const [commands, setCommands] = useState<readonly SlashCommand[]>([]);
|
||||
@@ -164,7 +163,6 @@ export const useSlashCommandProcessor = (
|
||||
setPendingItem: setPendingCompressionItem,
|
||||
toggleCorgiMode,
|
||||
toggleVimEnabled,
|
||||
setGeminiMdFileCount,
|
||||
},
|
||||
session: {
|
||||
stats: session.stats,
|
||||
@@ -189,7 +187,6 @@ export const useSlashCommandProcessor = (
|
||||
toggleCorgiMode,
|
||||
toggleVimEnabled,
|
||||
sessionShellAllowlist,
|
||||
setGeminiMdFileCount,
|
||||
],
|
||||
);
|
||||
|
||||
|
||||
@@ -1,380 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/** @vitest-environment jsdom */
|
||||
|
||||
import { describe, it, expect, beforeEach, vi, afterEach } from 'vitest';
|
||||
import { renderHook, waitFor, act } from '@testing-library/react';
|
||||
import { useAtCompletion } from './useAtCompletion.js';
|
||||
import { Config, FileSearch } from '@qwen-code/qwen-code-core';
|
||||
import {
|
||||
createTmpDir,
|
||||
cleanupTmpDir,
|
||||
FileSystemStructure,
|
||||
} from '@qwen-code/qwen-code-test-utils';
|
||||
import { useState } from 'react';
|
||||
import { Suggestion } from '../components/SuggestionsDisplay.js';
|
||||
|
||||
// Test harness to capture the state from the hook's callbacks.
|
||||
function useTestHarnessForAtCompletion(
|
||||
enabled: boolean,
|
||||
pattern: string,
|
||||
config: Config | undefined,
|
||||
cwd: string,
|
||||
) {
|
||||
const [suggestions, setSuggestions] = useState<Suggestion[]>([]);
|
||||
const [isLoadingSuggestions, setIsLoadingSuggestions] = useState(false);
|
||||
|
||||
useAtCompletion({
|
||||
enabled,
|
||||
pattern,
|
||||
config,
|
||||
cwd,
|
||||
setSuggestions,
|
||||
setIsLoadingSuggestions,
|
||||
});
|
||||
|
||||
return { suggestions, isLoadingSuggestions };
|
||||
}
|
||||
|
||||
describe('useAtCompletion', () => {
|
||||
let testRootDir: string;
|
||||
let mockConfig: Config;
|
||||
|
||||
beforeEach(() => {
|
||||
mockConfig = {
|
||||
getFileFilteringOptions: vi.fn(() => ({
|
||||
respectGitIgnore: true,
|
||||
respectGeminiIgnore: true,
|
||||
})),
|
||||
} as unknown as Config;
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (testRootDir) {
|
||||
await cleanupTmpDir(testRootDir);
|
||||
}
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
describe('File Search Logic', () => {
|
||||
it('should perform a recursive search for an empty pattern', async () => {
|
||||
const structure: FileSystemStructure = {
|
||||
'file.txt': '',
|
||||
src: {
|
||||
'index.js': '',
|
||||
components: ['Button.tsx', 'Button with spaces.tsx'],
|
||||
},
|
||||
};
|
||||
testRootDir = await createTmpDir(structure);
|
||||
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForAtCompletion(true, '', mockConfig, testRootDir),
|
||||
);
|
||||
|
||||
await waitFor(() => {
|
||||
expect(result.current.suggestions.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
expect(result.current.suggestions.map((s) => s.value)).toEqual([
|
||||
'src/',
|
||||
'src/components/',
|
||||
'file.txt',
|
||||
'src/components/Button\\ with\\ spaces.tsx',
|
||||
'src/components/Button.tsx',
|
||||
'src/index.js',
|
||||
]);
|
||||
});
|
||||
|
||||
it('should correctly filter the recursive list based on a pattern', async () => {
|
||||
const structure: FileSystemStructure = {
|
||||
'file.txt': '',
|
||||
src: {
|
||||
'index.js': '',
|
||||
components: {
|
||||
'Button.tsx': '',
|
||||
},
|
||||
},
|
||||
};
|
||||
testRootDir = await createTmpDir(structure);
|
||||
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForAtCompletion(true, 'src/', mockConfig, testRootDir),
|
||||
);
|
||||
|
||||
await waitFor(() => {
|
||||
expect(result.current.suggestions.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
expect(result.current.suggestions.map((s) => s.value)).toEqual([
|
||||
'src/',
|
||||
'src/components/',
|
||||
'src/components/Button.tsx',
|
||||
'src/index.js',
|
||||
]);
|
||||
});
|
||||
|
||||
it('should append a trailing slash to directory paths in suggestions', async () => {
|
||||
const structure: FileSystemStructure = {
|
||||
'file.txt': '',
|
||||
dir: {},
|
||||
};
|
||||
testRootDir = await createTmpDir(structure);
|
||||
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForAtCompletion(true, '', mockConfig, testRootDir),
|
||||
);
|
||||
|
||||
await waitFor(() => {
|
||||
expect(result.current.suggestions.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
expect(result.current.suggestions.map((s) => s.value)).toEqual([
|
||||
'dir/',
|
||||
'file.txt',
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('UI State and Loading Behavior', () => {
|
||||
it('should be in a loading state during initial file system crawl', async () => {
|
||||
testRootDir = await createTmpDir({});
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForAtCompletion(true, '', mockConfig, testRootDir),
|
||||
);
|
||||
|
||||
// It's initially true because the effect runs synchronously.
|
||||
expect(result.current.isLoadingSuggestions).toBe(true);
|
||||
|
||||
// Wait for the loading to complete.
|
||||
await waitFor(() => {
|
||||
expect(result.current.isLoadingSuggestions).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
it('should NOT show a loading indicator for subsequent searches that complete under 100ms', async () => {
|
||||
const structure: FileSystemStructure = { 'a.txt': '', 'b.txt': '' };
|
||||
testRootDir = await createTmpDir(structure);
|
||||
|
||||
const { result, rerender } = renderHook(
|
||||
({ pattern }) =>
|
||||
useTestHarnessForAtCompletion(true, pattern, mockConfig, testRootDir),
|
||||
{ initialProps: { pattern: 'a' } },
|
||||
);
|
||||
|
||||
await waitFor(() => {
|
||||
expect(result.current.suggestions.map((s) => s.value)).toEqual([
|
||||
'a.txt',
|
||||
]);
|
||||
});
|
||||
expect(result.current.isLoadingSuggestions).toBe(false);
|
||||
|
||||
rerender({ pattern: 'b' });
|
||||
|
||||
// Wait for the final result
|
||||
await waitFor(() => {
|
||||
expect(result.current.suggestions.map((s) => s.value)).toEqual([
|
||||
'b.txt',
|
||||
]);
|
||||
});
|
||||
|
||||
expect(result.current.isLoadingSuggestions).toBe(false);
|
||||
});
|
||||
|
||||
it('should show a loading indicator and clear old suggestions for subsequent searches that take longer than 100ms', async () => {
|
||||
const structure: FileSystemStructure = { 'a.txt': '', 'b.txt': '' };
|
||||
testRootDir = await createTmpDir(structure);
|
||||
|
||||
// Spy on the search method to introduce an artificial delay
|
||||
const originalSearch = FileSearch.prototype.search;
|
||||
vi.spyOn(FileSearch.prototype, 'search').mockImplementation(
|
||||
async function (...args) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 200));
|
||||
return originalSearch.apply(this, args);
|
||||
},
|
||||
);
|
||||
|
||||
const { result, rerender } = renderHook(
|
||||
({ pattern }) =>
|
||||
useTestHarnessForAtCompletion(true, pattern, mockConfig, testRootDir),
|
||||
{ initialProps: { pattern: 'a' } },
|
||||
);
|
||||
|
||||
// Wait for the initial (slow) search to complete
|
||||
await waitFor(() => {
|
||||
expect(result.current.suggestions.map((s) => s.value)).toEqual([
|
||||
'a.txt',
|
||||
]);
|
||||
});
|
||||
|
||||
// Now, rerender to trigger the second search
|
||||
rerender({ pattern: 'b' });
|
||||
|
||||
// Wait for the loading indicator to appear
|
||||
await waitFor(() => {
|
||||
expect(result.current.isLoadingSuggestions).toBe(true);
|
||||
});
|
||||
|
||||
// Suggestions should be cleared while loading
|
||||
expect(result.current.suggestions).toEqual([]);
|
||||
|
||||
// Wait for the final (slow) search to complete
|
||||
await waitFor(
|
||||
() => {
|
||||
expect(result.current.suggestions.map((s) => s.value)).toEqual([
|
||||
'b.txt',
|
||||
]);
|
||||
},
|
||||
{ timeout: 1000 },
|
||||
); // Increase timeout for the slow search
|
||||
|
||||
expect(result.current.isLoadingSuggestions).toBe(false);
|
||||
});
|
||||
|
||||
it('should abort the previous search when a new one starts', async () => {
|
||||
const structure: FileSystemStructure = { 'a.txt': '', 'b.txt': '' };
|
||||
testRootDir = await createTmpDir(structure);
|
||||
|
||||
const abortSpy = vi.spyOn(AbortController.prototype, 'abort');
|
||||
const searchSpy = vi
|
||||
.spyOn(FileSearch.prototype, 'search')
|
||||
.mockImplementation(async (...args) => {
|
||||
const delay = args[0] === 'a' ? 500 : 50;
|
||||
await new Promise((resolve) => setTimeout(resolve, delay));
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
return [args[0] as any];
|
||||
});
|
||||
|
||||
const { result, rerender } = renderHook(
|
||||
({ pattern }) =>
|
||||
useTestHarnessForAtCompletion(true, pattern, mockConfig, testRootDir),
|
||||
{ initialProps: { pattern: 'a' } },
|
||||
);
|
||||
|
||||
// Wait for the hook to be ready (initialization is complete)
|
||||
await waitFor(() => {
|
||||
expect(searchSpy).toHaveBeenCalledWith('a', expect.any(Object));
|
||||
});
|
||||
|
||||
// Now that the first search is in-flight, trigger the second one.
|
||||
act(() => {
|
||||
rerender({ pattern: 'b' });
|
||||
});
|
||||
|
||||
// The abort should have been called for the first search.
|
||||
expect(abortSpy).toHaveBeenCalledTimes(1);
|
||||
|
||||
// Wait for the final result, which should be from the second, faster search.
|
||||
await waitFor(
|
||||
() => {
|
||||
expect(result.current.suggestions.map((s) => s.value)).toEqual(['b']);
|
||||
},
|
||||
{ timeout: 1000 },
|
||||
);
|
||||
|
||||
// The search spy should have been called for both patterns.
|
||||
expect(searchSpy).toHaveBeenCalledWith('b', expect.any(Object));
|
||||
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Filtering and Configuration', () => {
|
||||
it('should respect .gitignore files', async () => {
|
||||
const gitignoreContent = ['dist/', '*.log'].join('\n');
|
||||
const structure: FileSystemStructure = {
|
||||
'.git': {},
|
||||
'.gitignore': gitignoreContent,
|
||||
dist: {},
|
||||
'test.log': '',
|
||||
src: {},
|
||||
};
|
||||
testRootDir = await createTmpDir(structure);
|
||||
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForAtCompletion(true, '', mockConfig, testRootDir),
|
||||
);
|
||||
|
||||
await waitFor(() => {
|
||||
expect(result.current.suggestions.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
expect(result.current.suggestions.map((s) => s.value)).toEqual([
|
||||
'src/',
|
||||
'.gitignore',
|
||||
]);
|
||||
});
|
||||
|
||||
it('should work correctly when config is undefined', async () => {
|
||||
const structure: FileSystemStructure = {
|
||||
node_modules: {},
|
||||
src: {},
|
||||
};
|
||||
testRootDir = await createTmpDir(structure);
|
||||
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForAtCompletion(true, '', undefined, testRootDir),
|
||||
);
|
||||
|
||||
await waitFor(() => {
|
||||
expect(result.current.suggestions.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
expect(result.current.suggestions.map((s) => s.value)).toEqual([
|
||||
'node_modules/',
|
||||
'src/',
|
||||
]);
|
||||
});
|
||||
|
||||
it('should reset and re-initialize when the cwd changes', async () => {
|
||||
const structure1: FileSystemStructure = { 'file1.txt': '' };
|
||||
const rootDir1 = await createTmpDir(structure1);
|
||||
const structure2: FileSystemStructure = { 'file2.txt': '' };
|
||||
const rootDir2 = await createTmpDir(structure2);
|
||||
|
||||
const { result, rerender } = renderHook(
|
||||
({ cwd, pattern }) =>
|
||||
useTestHarnessForAtCompletion(true, pattern, mockConfig, cwd),
|
||||
{
|
||||
initialProps: {
|
||||
cwd: rootDir1,
|
||||
pattern: 'file',
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
// Wait for initial suggestions from the first directory
|
||||
await waitFor(() => {
|
||||
expect(result.current.suggestions.map((s) => s.value)).toEqual([
|
||||
'file1.txt',
|
||||
]);
|
||||
});
|
||||
|
||||
// Change the CWD
|
||||
act(() => {
|
||||
rerender({ cwd: rootDir2, pattern: 'file' });
|
||||
});
|
||||
|
||||
// After CWD changes, suggestions should be cleared and it should load again.
|
||||
await waitFor(() => {
|
||||
expect(result.current.isLoadingSuggestions).toBe(true);
|
||||
expect(result.current.suggestions).toEqual([]);
|
||||
});
|
||||
|
||||
// Wait for the new suggestions from the second directory
|
||||
await waitFor(() => {
|
||||
expect(result.current.suggestions.map((s) => s.value)).toEqual([
|
||||
'file2.txt',
|
||||
]);
|
||||
});
|
||||
expect(result.current.isLoadingSuggestions).toBe(false);
|
||||
|
||||
await cleanupTmpDir(rootDir1);
|
||||
await cleanupTmpDir(rootDir2);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,235 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { useEffect, useReducer, useRef } from 'react';
|
||||
import { Config, FileSearch, escapePath } from '@qwen-code/qwen-code-core';
|
||||
import {
|
||||
Suggestion,
|
||||
MAX_SUGGESTIONS_TO_SHOW,
|
||||
} from '../components/SuggestionsDisplay.js';
|
||||
|
||||
export enum AtCompletionStatus {
|
||||
IDLE = 'idle',
|
||||
INITIALIZING = 'initializing',
|
||||
READY = 'ready',
|
||||
SEARCHING = 'searching',
|
||||
ERROR = 'error',
|
||||
}
|
||||
|
||||
interface AtCompletionState {
|
||||
status: AtCompletionStatus;
|
||||
suggestions: Suggestion[];
|
||||
isLoading: boolean;
|
||||
pattern: string | null;
|
||||
}
|
||||
|
||||
type AtCompletionAction =
|
||||
| { type: 'INITIALIZE' }
|
||||
| { type: 'INITIALIZE_SUCCESS' }
|
||||
| { type: 'SEARCH'; payload: string }
|
||||
| { type: 'SEARCH_SUCCESS'; payload: Suggestion[] }
|
||||
| { type: 'SET_LOADING'; payload: boolean }
|
||||
| { type: 'ERROR' }
|
||||
| { type: 'RESET' };
|
||||
|
||||
const initialState: AtCompletionState = {
|
||||
status: AtCompletionStatus.IDLE,
|
||||
suggestions: [],
|
||||
isLoading: false,
|
||||
pattern: null,
|
||||
};
|
||||
|
||||
function atCompletionReducer(
|
||||
state: AtCompletionState,
|
||||
action: AtCompletionAction,
|
||||
): AtCompletionState {
|
||||
switch (action.type) {
|
||||
case 'INITIALIZE':
|
||||
return {
|
||||
...state,
|
||||
status: AtCompletionStatus.INITIALIZING,
|
||||
isLoading: true,
|
||||
};
|
||||
case 'INITIALIZE_SUCCESS':
|
||||
return { ...state, status: AtCompletionStatus.READY, isLoading: false };
|
||||
case 'SEARCH':
|
||||
// Keep old suggestions, don't set loading immediately
|
||||
return {
|
||||
...state,
|
||||
status: AtCompletionStatus.SEARCHING,
|
||||
pattern: action.payload,
|
||||
};
|
||||
case 'SEARCH_SUCCESS':
|
||||
return {
|
||||
...state,
|
||||
status: AtCompletionStatus.READY,
|
||||
suggestions: action.payload,
|
||||
isLoading: false,
|
||||
};
|
||||
case 'SET_LOADING':
|
||||
// Only show loading if we are still in a searching state
|
||||
if (state.status === AtCompletionStatus.SEARCHING) {
|
||||
return { ...state, isLoading: action.payload, suggestions: [] };
|
||||
}
|
||||
return state;
|
||||
case 'ERROR':
|
||||
return {
|
||||
...state,
|
||||
status: AtCompletionStatus.ERROR,
|
||||
isLoading: false,
|
||||
suggestions: [],
|
||||
};
|
||||
case 'RESET':
|
||||
return initialState;
|
||||
default:
|
||||
return state;
|
||||
}
|
||||
}
|
||||
|
||||
export interface UseAtCompletionProps {
|
||||
enabled: boolean;
|
||||
pattern: string;
|
||||
config: Config | undefined;
|
||||
cwd: string;
|
||||
setSuggestions: (suggestions: Suggestion[]) => void;
|
||||
setIsLoadingSuggestions: (isLoading: boolean) => void;
|
||||
}
|
||||
|
||||
export function useAtCompletion(props: UseAtCompletionProps): void {
|
||||
const {
|
||||
enabled,
|
||||
pattern,
|
||||
config,
|
||||
cwd,
|
||||
setSuggestions,
|
||||
setIsLoadingSuggestions,
|
||||
} = props;
|
||||
const [state, dispatch] = useReducer(atCompletionReducer, initialState);
|
||||
const fileSearch = useRef<FileSearch | null>(null);
|
||||
const searchAbortController = useRef<AbortController | null>(null);
|
||||
const slowSearchTimer = useRef<NodeJS.Timeout | null>(null);
|
||||
|
||||
useEffect(() => {
|
||||
setSuggestions(state.suggestions);
|
||||
}, [state.suggestions, setSuggestions]);
|
||||
|
||||
useEffect(() => {
|
||||
setIsLoadingSuggestions(state.isLoading);
|
||||
}, [state.isLoading, setIsLoadingSuggestions]);
|
||||
|
||||
useEffect(() => {
|
||||
dispatch({ type: 'RESET' });
|
||||
}, [cwd, config]);
|
||||
|
||||
// Reacts to user input (`pattern`) ONLY.
|
||||
useEffect(() => {
|
||||
if (!enabled) {
|
||||
// reset when first getting out of completion suggestions
|
||||
if (
|
||||
state.status === AtCompletionStatus.READY ||
|
||||
state.status === AtCompletionStatus.ERROR
|
||||
) {
|
||||
dispatch({ type: 'RESET' });
|
||||
}
|
||||
return;
|
||||
}
|
||||
if (pattern === null) {
|
||||
dispatch({ type: 'RESET' });
|
||||
return;
|
||||
}
|
||||
|
||||
if (state.status === AtCompletionStatus.IDLE) {
|
||||
dispatch({ type: 'INITIALIZE' });
|
||||
} else if (
|
||||
(state.status === AtCompletionStatus.READY ||
|
||||
state.status === AtCompletionStatus.SEARCHING) &&
|
||||
pattern !== state.pattern // Only search if the pattern has changed
|
||||
) {
|
||||
dispatch({ type: 'SEARCH', payload: pattern });
|
||||
}
|
||||
}, [enabled, pattern, state.status, state.pattern]);
|
||||
|
||||
// The "Worker" that performs async operations based on status.
|
||||
useEffect(() => {
|
||||
const initialize = async () => {
|
||||
try {
|
||||
const searcher = new FileSearch({
|
||||
projectRoot: cwd,
|
||||
ignoreDirs: [],
|
||||
useGitignore:
|
||||
config?.getFileFilteringOptions()?.respectGitIgnore ?? true,
|
||||
useGeminiignore:
|
||||
config?.getFileFilteringOptions()?.respectGeminiIgnore ?? true,
|
||||
cache: true,
|
||||
cacheTtl: 30, // 30 seconds
|
||||
});
|
||||
await searcher.initialize();
|
||||
fileSearch.current = searcher;
|
||||
dispatch({ type: 'INITIALIZE_SUCCESS' });
|
||||
if (state.pattern !== null) {
|
||||
dispatch({ type: 'SEARCH', payload: state.pattern });
|
||||
}
|
||||
} catch (_) {
|
||||
dispatch({ type: 'ERROR' });
|
||||
}
|
||||
};
|
||||
|
||||
const search = async () => {
|
||||
if (!fileSearch.current || state.pattern === null) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (slowSearchTimer.current) {
|
||||
clearTimeout(slowSearchTimer.current);
|
||||
}
|
||||
|
||||
const controller = new AbortController();
|
||||
searchAbortController.current = controller;
|
||||
|
||||
slowSearchTimer.current = setTimeout(() => {
|
||||
dispatch({ type: 'SET_LOADING', payload: true });
|
||||
}, 100);
|
||||
|
||||
try {
|
||||
const results = await fileSearch.current.search(state.pattern, {
|
||||
signal: controller.signal,
|
||||
maxResults: MAX_SUGGESTIONS_TO_SHOW * 3,
|
||||
});
|
||||
|
||||
if (slowSearchTimer.current) {
|
||||
clearTimeout(slowSearchTimer.current);
|
||||
}
|
||||
|
||||
if (controller.signal.aborted) {
|
||||
return;
|
||||
}
|
||||
|
||||
const suggestions = results.map((p) => ({
|
||||
label: p,
|
||||
value: escapePath(p),
|
||||
}));
|
||||
dispatch({ type: 'SEARCH_SUCCESS', payload: suggestions });
|
||||
} catch (error) {
|
||||
if (!(error instanceof Error && error.name === 'AbortError')) {
|
||||
dispatch({ type: 'ERROR' });
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if (state.status === AtCompletionStatus.INITIALIZING) {
|
||||
initialize();
|
||||
} else if (state.status === AtCompletionStatus.SEARCHING) {
|
||||
search();
|
||||
}
|
||||
|
||||
return () => {
|
||||
searchAbortController.current?.abort();
|
||||
if (slowSearchTimer.current) {
|
||||
clearTimeout(slowSearchTimer.current);
|
||||
}
|
||||
};
|
||||
}, [state.status, state.pattern, config, cwd]);
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -4,7 +4,20 @@
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { useCallback, useMemo, useEffect } from 'react';
|
||||
import { useEffect, useCallback, useMemo, useRef } from 'react';
|
||||
import * as fs from 'fs/promises';
|
||||
import * as path from 'path';
|
||||
import { glob } from 'glob';
|
||||
import {
|
||||
isNodeError,
|
||||
escapePath,
|
||||
unescapePath,
|
||||
getErrorMessage,
|
||||
Config,
|
||||
FileDiscoveryService,
|
||||
DEFAULT_FILE_FILTERING_OPTIONS,
|
||||
SHELL_SPECIAL_CHARS,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import { Suggestion } from '../components/SuggestionsDisplay.js';
|
||||
import { CommandContext, SlashCommand } from '../commands/types.js';
|
||||
import {
|
||||
@@ -13,17 +26,8 @@ import {
|
||||
} from '../components/shared/text-buffer.js';
|
||||
import { isSlashCommand } from '../utils/commandUtils.js';
|
||||
import { toCodePoints } from '../utils/textUtils.js';
|
||||
import { useAtCompletion } from './useAtCompletion.js';
|
||||
import { useSlashCompletion } from './useSlashCompletion.js';
|
||||
import { Config } from '@qwen-code/qwen-code-core';
|
||||
import { useCompletion } from './useCompletion.js';
|
||||
|
||||
export enum CompletionMode {
|
||||
IDLE = 'IDLE',
|
||||
AT = 'AT',
|
||||
SLASH = 'SLASH',
|
||||
}
|
||||
|
||||
export interface UseCommandCompletionReturn {
|
||||
suggestions: Suggestion[];
|
||||
activeSuggestionIndex: number;
|
||||
@@ -68,109 +72,541 @@ export function useCommandCompletion(
|
||||
navigateDown,
|
||||
} = useCompletion();
|
||||
|
||||
const completionStart = useRef(-1);
|
||||
const completionEnd = useRef(-1);
|
||||
|
||||
const cursorRow = buffer.cursor[0];
|
||||
const cursorCol = buffer.cursor[1];
|
||||
|
||||
const { completionMode, query, completionStart, completionEnd } =
|
||||
useMemo(() => {
|
||||
const currentLine = buffer.lines[cursorRow] || '';
|
||||
if (cursorRow === 0 && isSlashCommand(currentLine.trim())) {
|
||||
return {
|
||||
completionMode: CompletionMode.SLASH,
|
||||
query: currentLine,
|
||||
completionStart: 0,
|
||||
completionEnd: currentLine.length,
|
||||
};
|
||||
// Check if cursor is after @ or / without unescaped spaces
|
||||
const commandIndex = useMemo(() => {
|
||||
const currentLine = buffer.lines[cursorRow] || '';
|
||||
if (cursorRow === 0 && isSlashCommand(currentLine.trim())) {
|
||||
return currentLine.indexOf('/');
|
||||
}
|
||||
|
||||
// For other completions like '@', we search backwards from the cursor.
|
||||
|
||||
const codePoints = toCodePoints(currentLine);
|
||||
for (let i = cursorCol - 1; i >= 0; i--) {
|
||||
const char = codePoints[i];
|
||||
|
||||
if (char === ' ') {
|
||||
// Check for unescaped spaces.
|
||||
let backslashCount = 0;
|
||||
for (let j = i - 1; j >= 0 && codePoints[j] === '\\'; j--) {
|
||||
backslashCount++;
|
||||
}
|
||||
if (backslashCount % 2 === 0) {
|
||||
return -1; // Inactive on unescaped space.
|
||||
}
|
||||
} else if (char === '@') {
|
||||
// Active if we find an '@' before any unescaped space.
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
||||
return -1;
|
||||
}, [cursorRow, cursorCol, buffer.lines]);
|
||||
|
||||
useEffect(() => {
|
||||
if (commandIndex === -1 || reverseSearchActive) {
|
||||
setTimeout(resetCompletionState, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
const currentLine = buffer.lines[cursorRow] || '';
|
||||
const codePoints = toCodePoints(currentLine);
|
||||
|
||||
if (codePoints[commandIndex] === '/') {
|
||||
// Always reset perfect match at the beginning of processing.
|
||||
setIsPerfectMatch(false);
|
||||
|
||||
const fullPath = currentLine.substring(commandIndex + 1);
|
||||
const hasTrailingSpace = currentLine.endsWith(' ');
|
||||
|
||||
// Get all non-empty parts of the command.
|
||||
const rawParts = fullPath.split(/\s+/).filter((p) => p);
|
||||
|
||||
let commandPathParts = rawParts;
|
||||
let partial = '';
|
||||
|
||||
// If there's no trailing space, the last part is potentially a partial segment.
|
||||
// We tentatively separate it.
|
||||
if (!hasTrailingSpace && rawParts.length > 0) {
|
||||
partial = rawParts[rawParts.length - 1];
|
||||
commandPathParts = rawParts.slice(0, -1);
|
||||
}
|
||||
|
||||
const codePoints = toCodePoints(currentLine);
|
||||
for (let i = cursorCol - 1; i >= 0; i--) {
|
||||
const char = codePoints[i];
|
||||
// Traverse the Command Tree using the tentative completed path
|
||||
let currentLevel: readonly SlashCommand[] | undefined = slashCommands;
|
||||
let leafCommand: SlashCommand | null = null;
|
||||
|
||||
if (char === ' ') {
|
||||
let backslashCount = 0;
|
||||
for (let j = i - 1; j >= 0 && codePoints[j] === '\\'; j--) {
|
||||
backslashCount++;
|
||||
}
|
||||
if (backslashCount % 2 === 0) {
|
||||
return {
|
||||
completionMode: CompletionMode.IDLE,
|
||||
query: null,
|
||||
completionStart: -1,
|
||||
completionEnd: -1,
|
||||
};
|
||||
}
|
||||
} else if (char === '@') {
|
||||
let end = codePoints.length;
|
||||
for (let i = cursorCol; i < codePoints.length; i++) {
|
||||
if (codePoints[i] === ' ') {
|
||||
let backslashCount = 0;
|
||||
for (let j = i - 1; j >= 0 && codePoints[j] === '\\'; j--) {
|
||||
backslashCount++;
|
||||
}
|
||||
|
||||
if (backslashCount % 2 === 0) {
|
||||
end = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
const pathStart = i + 1;
|
||||
const partialPath = currentLine.substring(pathStart, end);
|
||||
return {
|
||||
completionMode: CompletionMode.AT,
|
||||
query: partialPath,
|
||||
completionStart: pathStart,
|
||||
completionEnd: end,
|
||||
};
|
||||
for (const part of commandPathParts) {
|
||||
if (!currentLevel) {
|
||||
leafCommand = null;
|
||||
currentLevel = [];
|
||||
break;
|
||||
}
|
||||
const found: SlashCommand | undefined = currentLevel.find(
|
||||
(cmd) => cmd.name === part || cmd.altNames?.includes(part),
|
||||
);
|
||||
if (found) {
|
||||
leafCommand = found;
|
||||
currentLevel = found.subCommands as
|
||||
| readonly SlashCommand[]
|
||||
| undefined;
|
||||
} else {
|
||||
leafCommand = null;
|
||||
currentLevel = [];
|
||||
break;
|
||||
}
|
||||
}
|
||||
return {
|
||||
completionMode: CompletionMode.IDLE,
|
||||
query: null,
|
||||
completionStart: -1,
|
||||
completionEnd: -1,
|
||||
};
|
||||
}, [cursorRow, cursorCol, buffer.lines]);
|
||||
|
||||
useAtCompletion({
|
||||
enabled: completionMode === CompletionMode.AT,
|
||||
pattern: query || '',
|
||||
config,
|
||||
cwd,
|
||||
setSuggestions,
|
||||
setIsLoadingSuggestions,
|
||||
});
|
||||
let exactMatchAsParent: SlashCommand | undefined;
|
||||
// Handle the Ambiguous Case
|
||||
if (!hasTrailingSpace && currentLevel) {
|
||||
exactMatchAsParent = currentLevel.find(
|
||||
(cmd) =>
|
||||
(cmd.name === partial || cmd.altNames?.includes(partial)) &&
|
||||
cmd.subCommands,
|
||||
);
|
||||
|
||||
const slashCompletionRange = useSlashCompletion({
|
||||
enabled: completionMode === CompletionMode.SLASH,
|
||||
query,
|
||||
slashCommands,
|
||||
commandContext,
|
||||
setSuggestions,
|
||||
setIsLoadingSuggestions,
|
||||
setIsPerfectMatch,
|
||||
});
|
||||
if (exactMatchAsParent) {
|
||||
// It's a perfect match for a parent command. Override our initial guess.
|
||||
// Treat it as a completed command path.
|
||||
leafCommand = exactMatchAsParent;
|
||||
currentLevel = exactMatchAsParent.subCommands;
|
||||
partial = ''; // We now want to suggest ALL of its sub-commands.
|
||||
}
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
setActiveSuggestionIndex(suggestions.length > 0 ? 0 : -1);
|
||||
setVisibleStartIndex(0);
|
||||
}, [suggestions, setActiveSuggestionIndex, setVisibleStartIndex]);
|
||||
// Check for perfect, executable match
|
||||
if (!hasTrailingSpace) {
|
||||
if (leafCommand && partial === '' && leafCommand.action) {
|
||||
// Case: /command<enter> - command has action, no sub-commands were suggested
|
||||
setIsPerfectMatch(true);
|
||||
} else if (currentLevel) {
|
||||
// Case: /command subcommand<enter>
|
||||
const perfectMatch = currentLevel.find(
|
||||
(cmd) =>
|
||||
(cmd.name === partial || cmd.altNames?.includes(partial)) &&
|
||||
cmd.action,
|
||||
);
|
||||
if (perfectMatch) {
|
||||
setIsPerfectMatch(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
if (completionMode === CompletionMode.IDLE || reverseSearchActive) {
|
||||
const depth = commandPathParts.length;
|
||||
const isArgumentCompletion =
|
||||
leafCommand?.completion &&
|
||||
(hasTrailingSpace ||
|
||||
(rawParts.length > depth && depth > 0 && partial !== ''));
|
||||
|
||||
// Set completion range
|
||||
if (hasTrailingSpace || exactMatchAsParent) {
|
||||
completionStart.current = currentLine.length;
|
||||
completionEnd.current = currentLine.length;
|
||||
} else if (partial) {
|
||||
if (isArgumentCompletion) {
|
||||
const commandSoFar = `/${commandPathParts.join(' ')}`;
|
||||
const argStartIndex =
|
||||
commandSoFar.length + (commandPathParts.length > 0 ? 1 : 0);
|
||||
completionStart.current = argStartIndex;
|
||||
} else {
|
||||
completionStart.current = currentLine.length - partial.length;
|
||||
}
|
||||
completionEnd.current = currentLine.length;
|
||||
} else {
|
||||
// e.g. /
|
||||
completionStart.current = commandIndex + 1;
|
||||
completionEnd.current = currentLine.length;
|
||||
}
|
||||
|
||||
// Provide Suggestions based on the now-corrected context
|
||||
if (isArgumentCompletion) {
|
||||
const fetchAndSetSuggestions = async () => {
|
||||
setIsLoadingSuggestions(true);
|
||||
const argString = rawParts.slice(depth).join(' ');
|
||||
const results =
|
||||
(await leafCommand!.completion!(commandContext, argString)) || [];
|
||||
const finalSuggestions = results.map((s) => ({ label: s, value: s }));
|
||||
setSuggestions(finalSuggestions);
|
||||
setShowSuggestions(finalSuggestions.length > 0);
|
||||
setActiveSuggestionIndex(finalSuggestions.length > 0 ? 0 : -1);
|
||||
setIsLoadingSuggestions(false);
|
||||
};
|
||||
fetchAndSetSuggestions();
|
||||
return;
|
||||
}
|
||||
|
||||
// Command/Sub-command Completion
|
||||
const commandsToSearch = currentLevel || [];
|
||||
if (commandsToSearch.length > 0) {
|
||||
let potentialSuggestions = commandsToSearch.filter(
|
||||
(cmd) =>
|
||||
cmd.description &&
|
||||
(cmd.name.startsWith(partial) ||
|
||||
cmd.altNames?.some((alt) => alt.startsWith(partial))),
|
||||
);
|
||||
|
||||
// If a user's input is an exact match and it is a leaf command,
|
||||
// enter should submit immediately.
|
||||
if (potentialSuggestions.length > 0 && !hasTrailingSpace) {
|
||||
const perfectMatch = potentialSuggestions.find(
|
||||
(s) => s.name === partial || s.altNames?.includes(partial),
|
||||
);
|
||||
if (perfectMatch && perfectMatch.action) {
|
||||
potentialSuggestions = [];
|
||||
}
|
||||
}
|
||||
|
||||
const finalSuggestions = potentialSuggestions.map((cmd) => ({
|
||||
label: cmd.name,
|
||||
value: cmd.name,
|
||||
description: cmd.description,
|
||||
}));
|
||||
|
||||
setSuggestions(finalSuggestions);
|
||||
setShowSuggestions(finalSuggestions.length > 0);
|
||||
setActiveSuggestionIndex(finalSuggestions.length > 0 ? 0 : -1);
|
||||
setIsLoadingSuggestions(false);
|
||||
return;
|
||||
}
|
||||
|
||||
// If we fall through, no suggestions are available.
|
||||
resetCompletionState();
|
||||
return;
|
||||
}
|
||||
// Show suggestions if we are loading OR if there are results to display.
|
||||
setShowSuggestions(isLoadingSuggestions || suggestions.length > 0);
|
||||
|
||||
// Handle At Command Completion
|
||||
completionEnd.current = codePoints.length;
|
||||
for (let i = cursorCol; i < codePoints.length; i++) {
|
||||
if (codePoints[i] === ' ') {
|
||||
let backslashCount = 0;
|
||||
for (let j = i - 1; j >= 0 && codePoints[j] === '\\'; j--) {
|
||||
backslashCount++;
|
||||
}
|
||||
|
||||
if (backslashCount % 2 === 0) {
|
||||
completionEnd.current = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const pathStart = commandIndex + 1;
|
||||
const partialPath = currentLine.substring(pathStart, completionEnd.current);
|
||||
const lastSlashIndex = partialPath.lastIndexOf('/');
|
||||
completionStart.current =
|
||||
lastSlashIndex === -1 ? pathStart : pathStart + lastSlashIndex + 1;
|
||||
const baseDirRelative =
|
||||
lastSlashIndex === -1
|
||||
? '.'
|
||||
: partialPath.substring(0, lastSlashIndex + 1);
|
||||
const prefix = unescapePath(
|
||||
lastSlashIndex === -1
|
||||
? partialPath
|
||||
: partialPath.substring(lastSlashIndex + 1),
|
||||
);
|
||||
|
||||
let isMounted = true;
|
||||
|
||||
const findFilesRecursively = async (
|
||||
startDir: string,
|
||||
searchPrefix: string,
|
||||
fileDiscovery: FileDiscoveryService | null,
|
||||
filterOptions: {
|
||||
respectGitIgnore?: boolean;
|
||||
respectGeminiIgnore?: boolean;
|
||||
},
|
||||
currentRelativePath = '',
|
||||
depth = 0,
|
||||
maxDepth = 10, // Limit recursion depth
|
||||
maxResults = 50, // Limit number of results
|
||||
): Promise<Suggestion[]> => {
|
||||
if (depth > maxDepth) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const lowerSearchPrefix = searchPrefix.toLowerCase();
|
||||
let foundSuggestions: Suggestion[] = [];
|
||||
try {
|
||||
const entries = await fs.readdir(startDir, { withFileTypes: true });
|
||||
for (const entry of entries) {
|
||||
if (foundSuggestions.length >= maxResults) break;
|
||||
|
||||
const entryPathRelative = path.join(currentRelativePath, entry.name);
|
||||
const entryPathFromRoot = path.relative(
|
||||
startDir,
|
||||
path.join(startDir, entry.name),
|
||||
);
|
||||
|
||||
// Conditionally ignore dotfiles
|
||||
if (!searchPrefix.startsWith('.') && entry.name.startsWith('.')) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if this entry should be ignored by filtering options
|
||||
if (
|
||||
fileDiscovery &&
|
||||
fileDiscovery.shouldIgnoreFile(entryPathFromRoot, filterOptions)
|
||||
) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (entry.name.toLowerCase().startsWith(lowerSearchPrefix)) {
|
||||
foundSuggestions.push({
|
||||
label: entryPathRelative + (entry.isDirectory() ? '/' : ''),
|
||||
value: escapePath(
|
||||
entryPathRelative + (entry.isDirectory() ? '/' : ''),
|
||||
),
|
||||
});
|
||||
}
|
||||
if (
|
||||
entry.isDirectory() &&
|
||||
entry.name !== 'node_modules' &&
|
||||
!entry.name.startsWith('.')
|
||||
) {
|
||||
if (foundSuggestions.length < maxResults) {
|
||||
foundSuggestions = foundSuggestions.concat(
|
||||
await findFilesRecursively(
|
||||
path.join(startDir, entry.name),
|
||||
searchPrefix, // Pass original searchPrefix for recursive calls
|
||||
fileDiscovery,
|
||||
filterOptions,
|
||||
entryPathRelative,
|
||||
depth + 1,
|
||||
maxDepth,
|
||||
maxResults - foundSuggestions.length,
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (_err) {
|
||||
// Ignore errors like permission denied or ENOENT during recursive search
|
||||
}
|
||||
return foundSuggestions.slice(0, maxResults);
|
||||
};
|
||||
|
||||
const findFilesWithGlob = async (
|
||||
searchPrefix: string,
|
||||
fileDiscoveryService: FileDiscoveryService,
|
||||
filterOptions: {
|
||||
respectGitIgnore?: boolean;
|
||||
respectGeminiIgnore?: boolean;
|
||||
},
|
||||
searchDir: string,
|
||||
maxResults = 50,
|
||||
): Promise<Suggestion[]> => {
|
||||
const globPattern = `**/${searchPrefix}*`;
|
||||
const files = await glob(globPattern, {
|
||||
cwd: searchDir,
|
||||
dot: searchPrefix.startsWith('.'),
|
||||
nocase: true,
|
||||
});
|
||||
|
||||
const suggestions: Suggestion[] = files
|
||||
.filter((file) => {
|
||||
if (fileDiscoveryService) {
|
||||
return !fileDiscoveryService.shouldIgnoreFile(file, filterOptions);
|
||||
}
|
||||
return true;
|
||||
})
|
||||
.map((file: string) => {
|
||||
const absolutePath = path.resolve(searchDir, file);
|
||||
const label = path.relative(cwd, absolutePath);
|
||||
return {
|
||||
label,
|
||||
value: escapePath(label),
|
||||
};
|
||||
})
|
||||
.slice(0, maxResults);
|
||||
|
||||
return suggestions;
|
||||
};
|
||||
|
||||
const fetchSuggestions = async () => {
|
||||
setIsLoadingSuggestions(true);
|
||||
let fetchedSuggestions: Suggestion[] = [];
|
||||
|
||||
const fileDiscoveryService = config ? config.getFileService() : null;
|
||||
const enableRecursiveSearch =
|
||||
config?.getEnableRecursiveFileSearch() ?? true;
|
||||
const filterOptions =
|
||||
config?.getFileFilteringOptions() ?? DEFAULT_FILE_FILTERING_OPTIONS;
|
||||
|
||||
try {
|
||||
// If there's no slash, or it's the root, do a recursive search from workspace directories
|
||||
for (const dir of dirs) {
|
||||
let fetchedSuggestionsPerDir: Suggestion[] = [];
|
||||
if (
|
||||
partialPath.indexOf('/') === -1 &&
|
||||
prefix &&
|
||||
enableRecursiveSearch
|
||||
) {
|
||||
if (fileDiscoveryService) {
|
||||
fetchedSuggestionsPerDir = await findFilesWithGlob(
|
||||
prefix,
|
||||
fileDiscoveryService,
|
||||
filterOptions,
|
||||
dir,
|
||||
);
|
||||
} else {
|
||||
fetchedSuggestionsPerDir = await findFilesRecursively(
|
||||
dir,
|
||||
prefix,
|
||||
null,
|
||||
filterOptions,
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// Original behavior: list files in the specific directory
|
||||
const lowerPrefix = prefix.toLowerCase();
|
||||
const baseDirAbsolute = path.resolve(dir, baseDirRelative);
|
||||
const entries = await fs.readdir(baseDirAbsolute, {
|
||||
withFileTypes: true,
|
||||
});
|
||||
|
||||
// Filter entries using git-aware filtering
|
||||
const filteredEntries = [];
|
||||
for (const entry of entries) {
|
||||
// Conditionally ignore dotfiles
|
||||
if (!prefix.startsWith('.') && entry.name.startsWith('.')) {
|
||||
continue;
|
||||
}
|
||||
if (!entry.name.toLowerCase().startsWith(lowerPrefix)) continue;
|
||||
|
||||
const relativePath = path.relative(
|
||||
dir,
|
||||
path.join(baseDirAbsolute, entry.name),
|
||||
);
|
||||
if (
|
||||
fileDiscoveryService &&
|
||||
fileDiscoveryService.shouldIgnoreFile(
|
||||
relativePath,
|
||||
filterOptions,
|
||||
)
|
||||
) {
|
||||
continue;
|
||||
}
|
||||
|
||||
filteredEntries.push(entry);
|
||||
}
|
||||
|
||||
fetchedSuggestionsPerDir = filteredEntries.map((entry) => {
|
||||
const absolutePath = path.resolve(baseDirAbsolute, entry.name);
|
||||
const label =
|
||||
cwd === dir ? entry.name : path.relative(cwd, absolutePath);
|
||||
const suggestionLabel = entry.isDirectory() ? label + '/' : label;
|
||||
return {
|
||||
label: suggestionLabel,
|
||||
value: escapePath(suggestionLabel),
|
||||
};
|
||||
});
|
||||
}
|
||||
fetchedSuggestions = [
|
||||
...fetchedSuggestions,
|
||||
...fetchedSuggestionsPerDir,
|
||||
];
|
||||
}
|
||||
|
||||
// Like glob, we always return forward slashes for path separators, even on Windows.
|
||||
// But preserve backslash escaping for special characters.
|
||||
const specialCharsLookahead = `(?![${SHELL_SPECIAL_CHARS.source.slice(1, -1)}])`;
|
||||
const pathSeparatorRegex = new RegExp(
|
||||
`\\\\${specialCharsLookahead}`,
|
||||
'g',
|
||||
);
|
||||
fetchedSuggestions = fetchedSuggestions.map((suggestion) => ({
|
||||
...suggestion,
|
||||
label: suggestion.label.replace(pathSeparatorRegex, '/'),
|
||||
value: suggestion.value.replace(pathSeparatorRegex, '/'),
|
||||
}));
|
||||
|
||||
// Sort by depth, then directories first, then alphabetically
|
||||
fetchedSuggestions.sort((a, b) => {
|
||||
const depthA = (a.label.match(/\//g) || []).length;
|
||||
const depthB = (b.label.match(/\//g) || []).length;
|
||||
|
||||
if (depthA !== depthB) {
|
||||
return depthA - depthB;
|
||||
}
|
||||
|
||||
const aIsDir = a.label.endsWith('/');
|
||||
const bIsDir = b.label.endsWith('/');
|
||||
if (aIsDir && !bIsDir) return -1;
|
||||
if (!aIsDir && bIsDir) return 1;
|
||||
|
||||
// exclude extension when comparing
|
||||
const filenameA = a.label.substring(
|
||||
0,
|
||||
a.label.length - path.extname(a.label).length,
|
||||
);
|
||||
const filenameB = b.label.substring(
|
||||
0,
|
||||
b.label.length - path.extname(b.label).length,
|
||||
);
|
||||
|
||||
return (
|
||||
filenameA.localeCompare(filenameB) || a.label.localeCompare(b.label)
|
||||
);
|
||||
});
|
||||
|
||||
if (isMounted) {
|
||||
setSuggestions(fetchedSuggestions);
|
||||
setShowSuggestions(fetchedSuggestions.length > 0);
|
||||
setActiveSuggestionIndex(fetchedSuggestions.length > 0 ? 0 : -1);
|
||||
setVisibleStartIndex(0);
|
||||
}
|
||||
} catch (error: unknown) {
|
||||
if (isNodeError(error) && error.code === 'ENOENT') {
|
||||
if (isMounted) {
|
||||
setSuggestions([]);
|
||||
setShowSuggestions(false);
|
||||
}
|
||||
} else {
|
||||
console.error(
|
||||
`Error fetching completion suggestions for ${partialPath}: ${getErrorMessage(error)}`,
|
||||
);
|
||||
if (isMounted) {
|
||||
resetCompletionState();
|
||||
}
|
||||
}
|
||||
}
|
||||
if (isMounted) {
|
||||
setIsLoadingSuggestions(false);
|
||||
}
|
||||
};
|
||||
|
||||
const debounceTimeout = setTimeout(fetchSuggestions, 100);
|
||||
|
||||
return () => {
|
||||
isMounted = false;
|
||||
clearTimeout(debounceTimeout);
|
||||
};
|
||||
}, [
|
||||
completionMode,
|
||||
suggestions.length,
|
||||
isLoadingSuggestions,
|
||||
reverseSearchActive,
|
||||
buffer.text,
|
||||
cursorRow,
|
||||
cursorCol,
|
||||
buffer.lines,
|
||||
dirs,
|
||||
cwd,
|
||||
commandIndex,
|
||||
resetCompletionState,
|
||||
slashCommands,
|
||||
commandContext,
|
||||
config,
|
||||
reverseSearchActive,
|
||||
setSuggestions,
|
||||
setShowSuggestions,
|
||||
setActiveSuggestionIndex,
|
||||
setIsLoadingSuggestions,
|
||||
setIsPerfectMatch,
|
||||
setVisibleStartIndex,
|
||||
]);
|
||||
|
||||
const handleAutocomplete = useCallback(
|
||||
@@ -180,23 +616,18 @@ export function useCommandCompletion(
|
||||
}
|
||||
const suggestion = suggestions[indexToUse].value;
|
||||
|
||||
let start = completionStart;
|
||||
let end = completionEnd;
|
||||
if (completionMode === CompletionMode.SLASH) {
|
||||
start = slashCompletionRange.completionStart;
|
||||
end = slashCompletionRange.completionEnd;
|
||||
}
|
||||
|
||||
if (start === -1 || end === -1) {
|
||||
if (completionStart.current === -1 || completionEnd.current === -1) {
|
||||
return;
|
||||
}
|
||||
|
||||
const isSlash = (buffer.lines[cursorRow] || '')[commandIndex] === '/';
|
||||
let suggestionText = suggestion;
|
||||
if (completionMode === CompletionMode.SLASH) {
|
||||
if (isSlash) {
|
||||
// If we are inserting (not replacing), and the preceding character is not a space, add one.
|
||||
if (
|
||||
start === end &&
|
||||
start > 1 &&
|
||||
(buffer.lines[cursorRow] || '')[start - 1] !== ' '
|
||||
completionStart.current === completionEnd.current &&
|
||||
completionStart.current > commandIndex + 1 &&
|
||||
(buffer.lines[cursorRow] || '')[completionStart.current - 1] !== ' '
|
||||
) {
|
||||
suggestionText = ' ' + suggestionText;
|
||||
}
|
||||
@@ -205,20 +636,12 @@ export function useCommandCompletion(
|
||||
suggestionText += ' ';
|
||||
|
||||
buffer.replaceRangeByOffset(
|
||||
logicalPosToOffset(buffer.lines, cursorRow, start),
|
||||
logicalPosToOffset(buffer.lines, cursorRow, end),
|
||||
logicalPosToOffset(buffer.lines, cursorRow, completionStart.current),
|
||||
logicalPosToOffset(buffer.lines, cursorRow, completionEnd.current),
|
||||
suggestionText,
|
||||
);
|
||||
},
|
||||
[
|
||||
cursorRow,
|
||||
buffer,
|
||||
suggestions,
|
||||
completionMode,
|
||||
completionStart,
|
||||
completionEnd,
|
||||
slashCompletionRange,
|
||||
],
|
||||
[cursorRow, buffer, suggestions, commandIndex],
|
||||
);
|
||||
|
||||
return {
|
||||
|
||||
@@ -448,7 +448,6 @@ describe('useGeminiStream', () => {
|
||||
callId: 'call1',
|
||||
responseParts: [{ text: 'tool 1 response' }],
|
||||
error: undefined,
|
||||
errorType: undefined,
|
||||
resultDisplay: 'Tool 1 success display',
|
||||
},
|
||||
tool: {
|
||||
@@ -656,7 +655,6 @@ describe('useGeminiStream', () => {
|
||||
],
|
||||
resultDisplay: undefined,
|
||||
error: undefined,
|
||||
errorType: undefined,
|
||||
},
|
||||
responseSubmittedToGemini: false,
|
||||
};
|
||||
@@ -681,7 +679,6 @@ describe('useGeminiStream', () => {
|
||||
],
|
||||
resultDisplay: undefined,
|
||||
error: undefined,
|
||||
errorType: undefined,
|
||||
},
|
||||
responseSubmittedToGemini: false,
|
||||
};
|
||||
@@ -778,7 +775,6 @@ describe('useGeminiStream', () => {
|
||||
callId: 'call1',
|
||||
responseParts: toolCallResponseParts,
|
||||
error: undefined,
|
||||
errorType: undefined,
|
||||
resultDisplay: 'Tool 1 success display',
|
||||
},
|
||||
endTime: Date.now(),
|
||||
@@ -1132,7 +1128,6 @@ describe('useGeminiStream', () => {
|
||||
responseParts: [{ text: 'Memory saved' }],
|
||||
resultDisplay: 'Success: Memory saved',
|
||||
error: undefined,
|
||||
errorType: undefined,
|
||||
},
|
||||
tool: {
|
||||
name: 'save_memory',
|
||||
@@ -1654,313 +1649,4 @@ describe('useGeminiStream', () => {
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Concurrent Execution Prevention', () => {
|
||||
it('should prevent concurrent submitQuery calls', async () => {
|
||||
let resolveFirstCall!: () => void;
|
||||
let resolveSecondCall!: () => void;
|
||||
|
||||
const firstCallPromise = new Promise<void>((resolve) => {
|
||||
resolveFirstCall = resolve;
|
||||
});
|
||||
|
||||
const secondCallPromise = new Promise<void>((resolve) => {
|
||||
resolveSecondCall = resolve;
|
||||
});
|
||||
|
||||
// Mock a long-running stream for the first call
|
||||
const firstStream = (async function* () {
|
||||
yield {
|
||||
type: ServerGeminiEventType.Content,
|
||||
value: 'First call content',
|
||||
};
|
||||
await firstCallPromise; // Wait until we manually resolve
|
||||
yield { type: ServerGeminiEventType.Finished, value: 'STOP' };
|
||||
})();
|
||||
|
||||
// Mock a stream for the second call (should not be used)
|
||||
const secondStream = (async function* () {
|
||||
yield {
|
||||
type: ServerGeminiEventType.Content,
|
||||
value: 'Second call content',
|
||||
};
|
||||
await secondCallPromise;
|
||||
yield { type: ServerGeminiEventType.Finished, value: 'STOP' };
|
||||
})();
|
||||
|
||||
let callCount = 0;
|
||||
mockSendMessageStream.mockImplementation(() => {
|
||||
callCount++;
|
||||
if (callCount === 1) {
|
||||
return firstStream;
|
||||
} else {
|
||||
return secondStream;
|
||||
}
|
||||
});
|
||||
|
||||
const { result } = renderTestHook();
|
||||
|
||||
// Start first call
|
||||
const firstCallResult = act(async () => {
|
||||
await result.current.submitQuery('First query');
|
||||
});
|
||||
|
||||
// Wait a bit to ensure first call has started
|
||||
await new Promise((resolve) => setTimeout(resolve, 10));
|
||||
|
||||
// Try to start second call while first is still running
|
||||
const secondCallResult = act(async () => {
|
||||
await result.current.submitQuery('Second query');
|
||||
});
|
||||
|
||||
// Resolve both calls
|
||||
resolveFirstCall();
|
||||
resolveSecondCall();
|
||||
|
||||
await Promise.all([firstCallResult, secondCallResult]);
|
||||
|
||||
// Verify only one call was made to sendMessageStream
|
||||
expect(mockSendMessageStream).toHaveBeenCalledTimes(1);
|
||||
expect(mockSendMessageStream).toHaveBeenCalledWith(
|
||||
'First query',
|
||||
expect.any(AbortSignal),
|
||||
expect.any(String),
|
||||
);
|
||||
|
||||
// Verify only the first query was added to history
|
||||
const userMessages = mockAddItem.mock.calls.filter(
|
||||
(call) => call[0].type === MessageType.USER,
|
||||
);
|
||||
expect(userMessages).toHaveLength(1);
|
||||
expect(userMessages[0][0].text).toBe('First query');
|
||||
});
|
||||
|
||||
it('should allow subsequent calls after first call completes', async () => {
|
||||
// Mock streams that complete immediately
|
||||
mockSendMessageStream
|
||||
.mockReturnValueOnce(
|
||||
(async function* () {
|
||||
yield {
|
||||
type: ServerGeminiEventType.Content,
|
||||
value: 'First response',
|
||||
};
|
||||
yield { type: ServerGeminiEventType.Finished, value: 'STOP' };
|
||||
})(),
|
||||
)
|
||||
.mockReturnValueOnce(
|
||||
(async function* () {
|
||||
yield {
|
||||
type: ServerGeminiEventType.Content,
|
||||
value: 'Second response',
|
||||
};
|
||||
yield { type: ServerGeminiEventType.Finished, value: 'STOP' };
|
||||
})(),
|
||||
);
|
||||
|
||||
const { result } = renderTestHook();
|
||||
|
||||
// First call
|
||||
await act(async () => {
|
||||
await result.current.submitQuery('First query');
|
||||
});
|
||||
|
||||
// Second call after first completes
|
||||
await act(async () => {
|
||||
await result.current.submitQuery('Second query');
|
||||
});
|
||||
|
||||
// Both calls should have been made
|
||||
expect(mockSendMessageStream).toHaveBeenCalledTimes(2);
|
||||
expect(mockSendMessageStream).toHaveBeenNthCalledWith(
|
||||
1,
|
||||
'First query',
|
||||
expect.any(AbortSignal),
|
||||
expect.any(String),
|
||||
);
|
||||
expect(mockSendMessageStream).toHaveBeenNthCalledWith(
|
||||
2,
|
||||
'Second query',
|
||||
expect.any(AbortSignal),
|
||||
expect.any(String),
|
||||
);
|
||||
});
|
||||
|
||||
it('should reset execution flag even when query preparation fails', async () => {
|
||||
const { result } = renderTestHook();
|
||||
|
||||
// First call with empty query (should fail in preparation)
|
||||
await act(async () => {
|
||||
await result.current.submitQuery(' '); // Empty trimmed query
|
||||
});
|
||||
|
||||
// Second call should work normally
|
||||
mockSendMessageStream.mockReturnValue(
|
||||
(async function* () {
|
||||
yield {
|
||||
type: ServerGeminiEventType.Content,
|
||||
value: 'Valid response',
|
||||
};
|
||||
yield { type: ServerGeminiEventType.Finished, value: 'STOP' };
|
||||
})(),
|
||||
);
|
||||
|
||||
await act(async () => {
|
||||
await result.current.submitQuery('Valid query');
|
||||
});
|
||||
|
||||
// The second call should have been made
|
||||
expect(mockSendMessageStream).toHaveBeenCalledTimes(1);
|
||||
expect(mockSendMessageStream).toHaveBeenCalledWith(
|
||||
'Valid query',
|
||||
expect.any(AbortSignal),
|
||||
expect.any(String),
|
||||
);
|
||||
});
|
||||
|
||||
it('should reset execution flag when user cancels', async () => {
|
||||
let resolveCancelledStream!: () => void;
|
||||
const cancelledStreamPromise = new Promise<void>((resolve) => {
|
||||
resolveCancelledStream = resolve;
|
||||
});
|
||||
|
||||
// Mock a stream that can be cancelled
|
||||
const cancelledStream = (async function* () {
|
||||
yield {
|
||||
type: ServerGeminiEventType.Content,
|
||||
value: 'Cancelled content',
|
||||
};
|
||||
await cancelledStreamPromise;
|
||||
yield { type: ServerGeminiEventType.UserCancelled };
|
||||
})();
|
||||
|
||||
mockSendMessageStream.mockReturnValueOnce(cancelledStream);
|
||||
|
||||
const { result } = renderTestHook();
|
||||
|
||||
// Start first call
|
||||
const firstCallResult = act(async () => {
|
||||
await result.current.submitQuery('First query');
|
||||
});
|
||||
|
||||
// Wait a bit then resolve to trigger cancellation
|
||||
await new Promise((resolve) => setTimeout(resolve, 10));
|
||||
resolveCancelledStream();
|
||||
await firstCallResult;
|
||||
|
||||
// Now try a second call - should work
|
||||
mockSendMessageStream.mockReturnValue(
|
||||
(async function* () {
|
||||
yield {
|
||||
type: ServerGeminiEventType.Content,
|
||||
value: 'Second response',
|
||||
};
|
||||
yield { type: ServerGeminiEventType.Finished, value: 'STOP' };
|
||||
})(),
|
||||
);
|
||||
|
||||
await act(async () => {
|
||||
await result.current.submitQuery('Second query');
|
||||
});
|
||||
|
||||
// Both calls should have been made
|
||||
expect(mockSendMessageStream).toHaveBeenCalledTimes(2);
|
||||
});
|
||||
|
||||
it('should reset execution flag when an error occurs', async () => {
|
||||
// Mock a stream that throws an error
|
||||
mockSendMessageStream.mockReturnValueOnce(
|
||||
(async function* () {
|
||||
yield { type: ServerGeminiEventType.Content, value: 'Error content' };
|
||||
throw new Error('Stream error');
|
||||
})(),
|
||||
);
|
||||
|
||||
const { result } = renderTestHook();
|
||||
|
||||
// First call that will error
|
||||
await act(async () => {
|
||||
await result.current.submitQuery('Error query');
|
||||
});
|
||||
|
||||
// Second call should work normally
|
||||
mockSendMessageStream.mockReturnValue(
|
||||
(async function* () {
|
||||
yield {
|
||||
type: ServerGeminiEventType.Content,
|
||||
value: 'Success response',
|
||||
};
|
||||
yield { type: ServerGeminiEventType.Finished, value: 'STOP' };
|
||||
})(),
|
||||
);
|
||||
|
||||
await act(async () => {
|
||||
await result.current.submitQuery('Success query');
|
||||
});
|
||||
|
||||
// Both calls should have been attempted
|
||||
expect(mockSendMessageStream).toHaveBeenCalledTimes(2);
|
||||
});
|
||||
|
||||
it('should handle rapid multiple concurrent calls correctly', async () => {
|
||||
let resolveStream!: () => void;
|
||||
const streamPromise = new Promise<void>((resolve) => {
|
||||
resolveStream = resolve;
|
||||
});
|
||||
|
||||
// Mock a long-running stream
|
||||
const longStream = (async function* () {
|
||||
yield {
|
||||
type: ServerGeminiEventType.Content,
|
||||
value: 'Long running content',
|
||||
};
|
||||
await streamPromise;
|
||||
yield { type: ServerGeminiEventType.Finished, value: 'STOP' };
|
||||
})();
|
||||
|
||||
mockSendMessageStream.mockReturnValue(longStream);
|
||||
|
||||
const { result } = renderTestHook();
|
||||
|
||||
// Start multiple concurrent calls
|
||||
const calls = [
|
||||
act(async () => {
|
||||
await result.current.submitQuery('Query 1');
|
||||
}),
|
||||
act(async () => {
|
||||
await result.current.submitQuery('Query 2');
|
||||
}),
|
||||
act(async () => {
|
||||
await result.current.submitQuery('Query 3');
|
||||
}),
|
||||
act(async () => {
|
||||
await result.current.submitQuery('Query 4');
|
||||
}),
|
||||
act(async () => {
|
||||
await result.current.submitQuery('Query 5');
|
||||
}),
|
||||
];
|
||||
|
||||
// Wait a bit then resolve the stream
|
||||
await new Promise((resolve) => setTimeout(resolve, 10));
|
||||
resolveStream();
|
||||
|
||||
// Wait for all calls to complete
|
||||
await Promise.all(calls);
|
||||
|
||||
// Only the first call should have been made
|
||||
expect(mockSendMessageStream).toHaveBeenCalledTimes(1);
|
||||
expect(mockSendMessageStream).toHaveBeenCalledWith(
|
||||
'Query 1',
|
||||
expect.any(AbortSignal),
|
||||
expect.any(String),
|
||||
);
|
||||
|
||||
// Only one user message should have been added
|
||||
const userMessages = mockAddItem.mock.calls.filter(
|
||||
(call) => call[0].type === MessageType.USER,
|
||||
);
|
||||
expect(userMessages).toHaveLength(1);
|
||||
expect(userMessages[0][0].text).toBe('Query 1');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -93,12 +93,10 @@ export const useGeminiStream = (
|
||||
performMemoryRefresh: () => Promise<void>,
|
||||
modelSwitchedFromQuotaError: boolean,
|
||||
setModelSwitchedFromQuotaError: React.Dispatch<React.SetStateAction<boolean>>,
|
||||
onEditorClose: () => void,
|
||||
) => {
|
||||
const [initError, setInitError] = useState<string | null>(null);
|
||||
const abortControllerRef = useRef<AbortController | null>(null);
|
||||
const turnCancelledRef = useRef(false);
|
||||
const isSubmittingQueryRef = useRef(false);
|
||||
const [isResponding, setIsResponding] = useState<boolean>(false);
|
||||
const [thought, setThought] = useState<ThoughtSummary | null>(null);
|
||||
const [pendingHistoryItemRef, setPendingHistoryItem] =
|
||||
@@ -135,7 +133,6 @@ export const useGeminiStream = (
|
||||
config,
|
||||
setPendingHistoryItem,
|
||||
getPreferredEditor,
|
||||
onEditorClose,
|
||||
);
|
||||
|
||||
const pendingToolCallGroupDisplay = useMemo(
|
||||
@@ -625,11 +622,6 @@ export const useGeminiStream = (
|
||||
options?: { isContinuation: boolean },
|
||||
prompt_id?: string,
|
||||
) => {
|
||||
// Prevent concurrent executions of submitQuery
|
||||
if (isSubmittingQueryRef.current) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (
|
||||
(streamingState === StreamingState.Responding ||
|
||||
streamingState === StreamingState.WaitingForConfirmation) &&
|
||||
@@ -637,9 +629,6 @@ export const useGeminiStream = (
|
||||
)
|
||||
return;
|
||||
|
||||
// Set the flag to indicate we're now executing
|
||||
isSubmittingQueryRef.current = true;
|
||||
|
||||
const userMessageTimestamp = Date.now();
|
||||
|
||||
// Reset quota error flag when starting a new query (not a continuation)
|
||||
@@ -664,7 +653,6 @@ export const useGeminiStream = (
|
||||
);
|
||||
|
||||
if (!shouldProceed || queryToSend === null) {
|
||||
isSubmittingQueryRef.current = false;
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -689,7 +677,6 @@ export const useGeminiStream = (
|
||||
);
|
||||
|
||||
if (processingStatus === StreamProcessingStatus.UserCancelled) {
|
||||
isSubmittingQueryRef.current = false;
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -721,7 +708,6 @@ export const useGeminiStream = (
|
||||
}
|
||||
} finally {
|
||||
setIsResponding(false);
|
||||
isSubmittingQueryRef.current = false;
|
||||
}
|
||||
},
|
||||
[
|
||||
|
||||
@@ -38,6 +38,7 @@ export const WITTY_LOADING_PHRASES = [
|
||||
'Defragmenting memories... both RAM and personal...',
|
||||
'Rebooting the humor module...',
|
||||
'Caching the essentials (mostly cat memes)...',
|
||||
'Running sudo make me a sandwich...',
|
||||
'Optimizing for ludicrous speed',
|
||||
"Swapping bits... don't tell the bytes...",
|
||||
'Garbage collecting... be right back...',
|
||||
@@ -65,10 +66,12 @@ export const WITTY_LOADING_PHRASES = [
|
||||
"Just a moment, I'm tuning the algorithms...",
|
||||
'Warp speed engaged...',
|
||||
'Mining for more Dilithium crystals...',
|
||||
"I'm Giving Her all she's got Captain!",
|
||||
"Don't panic...",
|
||||
'Following the white rabbit...',
|
||||
'The truth is in here... somewhere...',
|
||||
'Blowing on the cartridge...',
|
||||
'Looking for the princess in another castle...',
|
||||
'Loading... Do a barrel roll!',
|
||||
'Waiting for the respawn...',
|
||||
'Finishing the Kessel Run in less than 12 parsecs...',
|
||||
|
||||
@@ -70,7 +70,6 @@ export function useReactToolScheduler(
|
||||
React.SetStateAction<HistoryItemWithoutId | null>
|
||||
>,
|
||||
getPreferredEditor: () => EditorType | undefined,
|
||||
onEditorClose: () => void,
|
||||
): [TrackedToolCall[], ScheduleFn, MarkToolsAsSubmittedFn] {
|
||||
const [toolCallsForDisplay, setToolCallsForDisplay] = useState<
|
||||
TrackedToolCall[]
|
||||
@@ -141,7 +140,6 @@ export function useReactToolScheduler(
|
||||
onToolCallsUpdate: toolCallsUpdateHandler,
|
||||
getPreferredEditor,
|
||||
config,
|
||||
onEditorClose,
|
||||
}),
|
||||
[
|
||||
config,
|
||||
@@ -149,7 +147,6 @@ export function useReactToolScheduler(
|
||||
allToolCallsCompleteHandler,
|
||||
toolCallsUpdateHandler,
|
||||
getPreferredEditor,
|
||||
onEditorClose,
|
||||
],
|
||||
);
|
||||
|
||||
|
||||
@@ -41,17 +41,12 @@ export function useReverseSearchCompletion(
|
||||
navigateDown,
|
||||
} = useCompletion();
|
||||
|
||||
// whenever reverseSearchActive is on, filter history
|
||||
useEffect(() => {
|
||||
if (!reverseSearchActive) {
|
||||
resetCompletionState();
|
||||
}
|
||||
}, [reverseSearchActive, resetCompletionState]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!reverseSearchActive) {
|
||||
return;
|
||||
}
|
||||
|
||||
const q = buffer.text.toLowerCase();
|
||||
const matches = shellHistory.reduce<Suggestion[]>((acc, cmd) => {
|
||||
const idx = cmd.toLowerCase().indexOf(q);
|
||||
@@ -67,6 +62,7 @@ export function useReverseSearchCompletion(
|
||||
buffer.text,
|
||||
shellHistory,
|
||||
reverseSearchActive,
|
||||
resetCompletionState,
|
||||
setActiveSuggestionIndex,
|
||||
setShowSuggestions,
|
||||
setSuggestions,
|
||||
|
||||
@@ -1,434 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/** @vitest-environment jsdom */
|
||||
|
||||
import { describe, it, expect, vi } from 'vitest';
|
||||
import { renderHook, waitFor } from '@testing-library/react';
|
||||
import { useSlashCompletion } from './useSlashCompletion.js';
|
||||
import { CommandContext, SlashCommand } from '../commands/types.js';
|
||||
import { useState } from 'react';
|
||||
import { Suggestion } from '../components/SuggestionsDisplay.js';
|
||||
|
||||
// Test harness to capture the state from the hook's callbacks.
|
||||
function useTestHarnessForSlashCompletion(
|
||||
enabled: boolean,
|
||||
query: string | null,
|
||||
slashCommands: readonly SlashCommand[],
|
||||
commandContext: CommandContext,
|
||||
) {
|
||||
const [suggestions, setSuggestions] = useState<Suggestion[]>([]);
|
||||
const [isLoadingSuggestions, setIsLoadingSuggestions] = useState(false);
|
||||
const [isPerfectMatch, setIsPerfectMatch] = useState(false);
|
||||
|
||||
const { completionStart, completionEnd } = useSlashCompletion({
|
||||
enabled,
|
||||
query,
|
||||
slashCommands,
|
||||
commandContext,
|
||||
setSuggestions,
|
||||
setIsLoadingSuggestions,
|
||||
setIsPerfectMatch,
|
||||
});
|
||||
|
||||
return {
|
||||
suggestions,
|
||||
isLoadingSuggestions,
|
||||
isPerfectMatch,
|
||||
completionStart,
|
||||
completionEnd,
|
||||
};
|
||||
}
|
||||
|
||||
describe('useSlashCompletion', () => {
|
||||
// A minimal mock is sufficient for these tests.
|
||||
const mockCommandContext = {} as CommandContext;
|
||||
|
||||
describe('Top-Level Commands', () => {
|
||||
it('should suggest all top-level commands for the root slash', async () => {
|
||||
const slashCommands = [
|
||||
{ name: 'help', altNames: ['?'], description: 'Show help' },
|
||||
{
|
||||
name: 'stats',
|
||||
altNames: ['usage'],
|
||||
description: 'check session stats. Usage: /stats [model|tools]',
|
||||
},
|
||||
{ name: 'clear', description: 'Clear the screen' },
|
||||
{
|
||||
name: 'memory',
|
||||
description: 'Manage memory',
|
||||
subCommands: [{ name: 'show', description: 'Show memory' }],
|
||||
},
|
||||
{ name: 'chat', description: 'Manage chat history' },
|
||||
] as unknown as SlashCommand[];
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForSlashCompletion(
|
||||
true,
|
||||
'/',
|
||||
slashCommands,
|
||||
mockCommandContext,
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.current.suggestions.length).toBe(slashCommands.length);
|
||||
expect(result.current.suggestions.map((s) => s.label)).toEqual(
|
||||
expect.arrayContaining(['help', 'clear', 'memory', 'chat', 'stats']),
|
||||
);
|
||||
});
|
||||
|
||||
it('should filter commands based on partial input', async () => {
|
||||
const slashCommands = [
|
||||
{ name: 'memory', description: 'Manage memory' },
|
||||
] as unknown as SlashCommand[];
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForSlashCompletion(
|
||||
true,
|
||||
'/mem',
|
||||
slashCommands,
|
||||
mockCommandContext,
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.current.suggestions).toEqual([
|
||||
{ label: 'memory', value: 'memory', description: 'Manage memory' },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should suggest commands based on partial altNames', async () => {
|
||||
const slashCommands = [
|
||||
{
|
||||
name: 'stats',
|
||||
altNames: ['usage'],
|
||||
description: 'check session stats. Usage: /stats [model|tools]',
|
||||
},
|
||||
] as unknown as SlashCommand[];
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForSlashCompletion(
|
||||
true,
|
||||
'/usag',
|
||||
slashCommands,
|
||||
mockCommandContext,
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.current.suggestions).toEqual([
|
||||
{
|
||||
label: 'stats',
|
||||
value: 'stats',
|
||||
description: 'check session stats. Usage: /stats [model|tools]',
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it('should NOT provide suggestions for a perfectly typed command that is a leaf node', async () => {
|
||||
const slashCommands = [
|
||||
{ name: 'clear', description: 'Clear the screen', action: vi.fn() },
|
||||
] as unknown as SlashCommand[];
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForSlashCompletion(
|
||||
true,
|
||||
'/clear',
|
||||
slashCommands,
|
||||
mockCommandContext,
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.current.suggestions).toHaveLength(0);
|
||||
});
|
||||
|
||||
it.each([['/?'], ['/usage']])(
|
||||
'should not suggest commands when altNames is fully typed',
|
||||
async (query) => {
|
||||
const mockSlashCommands = [
|
||||
{
|
||||
name: 'help',
|
||||
altNames: ['?'],
|
||||
description: 'Show help',
|
||||
action: vi.fn(),
|
||||
},
|
||||
{
|
||||
name: 'stats',
|
||||
altNames: ['usage'],
|
||||
description: 'check session stats. Usage: /stats [model|tools]',
|
||||
action: vi.fn(),
|
||||
},
|
||||
] as unknown as SlashCommand[];
|
||||
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForSlashCompletion(
|
||||
true,
|
||||
query,
|
||||
mockSlashCommands,
|
||||
mockCommandContext,
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.current.suggestions).toHaveLength(0);
|
||||
},
|
||||
);
|
||||
|
||||
it('should not provide suggestions for a fully typed command that has no sub-commands or argument completion', async () => {
|
||||
const slashCommands = [
|
||||
{ name: 'clear', description: 'Clear the screen' },
|
||||
] as unknown as SlashCommand[];
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForSlashCompletion(
|
||||
true,
|
||||
'/clear ',
|
||||
slashCommands,
|
||||
mockCommandContext,
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.current.suggestions).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should not provide suggestions for an unknown command', async () => {
|
||||
const slashCommands = [
|
||||
{ name: 'help', description: 'Show help' },
|
||||
] as unknown as SlashCommand[];
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForSlashCompletion(
|
||||
true,
|
||||
'/unknown-command',
|
||||
slashCommands,
|
||||
mockCommandContext,
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.current.suggestions).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Sub-Commands', () => {
|
||||
it('should suggest sub-commands for a parent command', async () => {
|
||||
const slashCommands = [
|
||||
{
|
||||
name: 'memory',
|
||||
description: 'Manage memory',
|
||||
subCommands: [
|
||||
{ name: 'show', description: 'Show memory' },
|
||||
{ name: 'add', description: 'Add to memory' },
|
||||
],
|
||||
},
|
||||
] as unknown as SlashCommand[];
|
||||
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForSlashCompletion(
|
||||
true,
|
||||
'/memory',
|
||||
slashCommands,
|
||||
mockCommandContext,
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.current.suggestions).toHaveLength(2);
|
||||
expect(result.current.suggestions).toEqual(
|
||||
expect.arrayContaining([
|
||||
{ label: 'show', value: 'show', description: 'Show memory' },
|
||||
{ label: 'add', value: 'add', description: 'Add to memory' },
|
||||
]),
|
||||
);
|
||||
});
|
||||
|
||||
it('should suggest all sub-commands when the query ends with the parent command and a space', async () => {
|
||||
const slashCommands = [
|
||||
{
|
||||
name: 'memory',
|
||||
description: 'Manage memory',
|
||||
subCommands: [
|
||||
{ name: 'show', description: 'Show memory' },
|
||||
{ name: 'add', description: 'Add to memory' },
|
||||
],
|
||||
},
|
||||
] as unknown as SlashCommand[];
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForSlashCompletion(
|
||||
true,
|
||||
'/memory ',
|
||||
slashCommands,
|
||||
mockCommandContext,
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.current.suggestions).toHaveLength(2);
|
||||
expect(result.current.suggestions).toEqual(
|
||||
expect.arrayContaining([
|
||||
{ label: 'show', value: 'show', description: 'Show memory' },
|
||||
{ label: 'add', value: 'add', description: 'Add to memory' },
|
||||
]),
|
||||
);
|
||||
});
|
||||
|
||||
it('should filter sub-commands by prefix', async () => {
|
||||
const slashCommands = [
|
||||
{
|
||||
name: 'memory',
|
||||
description: 'Manage memory',
|
||||
subCommands: [
|
||||
{ name: 'show', description: 'Show memory' },
|
||||
{ name: 'add', description: 'Add to memory' },
|
||||
],
|
||||
},
|
||||
] as unknown as SlashCommand[];
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForSlashCompletion(
|
||||
true,
|
||||
'/memory a',
|
||||
slashCommands,
|
||||
mockCommandContext,
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.current.suggestions).toEqual([
|
||||
{ label: 'add', value: 'add', description: 'Add to memory' },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should provide no suggestions for an invalid sub-command', async () => {
|
||||
const slashCommands = [
|
||||
{
|
||||
name: 'memory',
|
||||
description: 'Manage memory',
|
||||
subCommands: [
|
||||
{ name: 'show', description: 'Show memory' },
|
||||
{ name: 'add', description: 'Add to memory' },
|
||||
],
|
||||
},
|
||||
] as unknown as SlashCommand[];
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForSlashCompletion(
|
||||
true,
|
||||
'/memory dothisnow',
|
||||
slashCommands,
|
||||
mockCommandContext,
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.current.suggestions).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Argument Completion', () => {
|
||||
it('should call the command.completion function for argument suggestions', async () => {
|
||||
const availableTags = [
|
||||
'my-chat-tag-1',
|
||||
'my-chat-tag-2',
|
||||
'another-channel',
|
||||
];
|
||||
const mockCompletionFn = vi
|
||||
.fn()
|
||||
.mockImplementation(
|
||||
async (_context: CommandContext, partialArg: string) =>
|
||||
availableTags.filter((tag) => tag.startsWith(partialArg)),
|
||||
);
|
||||
|
||||
const slashCommands = [
|
||||
{
|
||||
name: 'chat',
|
||||
description: 'Manage chat history',
|
||||
subCommands: [
|
||||
{
|
||||
name: 'resume',
|
||||
description: 'Resume a saved chat',
|
||||
completion: mockCompletionFn,
|
||||
},
|
||||
],
|
||||
},
|
||||
] as unknown as SlashCommand[];
|
||||
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForSlashCompletion(
|
||||
true,
|
||||
'/chat resume my-ch',
|
||||
slashCommands,
|
||||
mockCommandContext,
|
||||
),
|
||||
);
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockCompletionFn).toHaveBeenCalledWith(
|
||||
mockCommandContext,
|
||||
'my-ch',
|
||||
);
|
||||
});
|
||||
|
||||
await waitFor(() => {
|
||||
expect(result.current.suggestions).toEqual([
|
||||
{ label: 'my-chat-tag-1', value: 'my-chat-tag-1' },
|
||||
{ label: 'my-chat-tag-2', value: 'my-chat-tag-2' },
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
||||
it('should call command.completion with an empty string when args start with a space', async () => {
|
||||
const mockCompletionFn = vi
|
||||
.fn()
|
||||
.mockResolvedValue(['my-chat-tag-1', 'my-chat-tag-2', 'my-channel']);
|
||||
|
||||
const slashCommands = [
|
||||
{
|
||||
name: 'chat',
|
||||
description: 'Manage chat history',
|
||||
subCommands: [
|
||||
{
|
||||
name: 'resume',
|
||||
description: 'Resume a saved chat',
|
||||
completion: mockCompletionFn,
|
||||
},
|
||||
],
|
||||
},
|
||||
] as unknown as SlashCommand[];
|
||||
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForSlashCompletion(
|
||||
true,
|
||||
'/chat resume ',
|
||||
slashCommands,
|
||||
mockCommandContext,
|
||||
),
|
||||
);
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockCompletionFn).toHaveBeenCalledWith(mockCommandContext, '');
|
||||
});
|
||||
|
||||
await waitFor(() => {
|
||||
expect(result.current.suggestions).toHaveLength(3);
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle completion function that returns null', async () => {
|
||||
const completionFn = vi.fn().mockResolvedValue(null);
|
||||
const slashCommands = [
|
||||
{
|
||||
name: 'chat',
|
||||
description: 'Manage chat history',
|
||||
subCommands: [
|
||||
{
|
||||
name: 'resume',
|
||||
description: 'Resume a saved chat',
|
||||
completion: completionFn,
|
||||
},
|
||||
],
|
||||
},
|
||||
] as unknown as SlashCommand[];
|
||||
|
||||
const { result } = renderHook(() =>
|
||||
useTestHarnessForSlashCompletion(
|
||||
true,
|
||||
'/chat resume ',
|
||||
slashCommands,
|
||||
mockCommandContext,
|
||||
),
|
||||
);
|
||||
|
||||
await waitFor(() => {
|
||||
expect(result.current.suggestions).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,187 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { useState, useEffect } from 'react';
|
||||
import { Suggestion } from '../components/SuggestionsDisplay.js';
|
||||
import { CommandContext, SlashCommand } from '../commands/types.js';
|
||||
|
||||
export interface UseSlashCompletionProps {
|
||||
enabled: boolean;
|
||||
query: string | null;
|
||||
slashCommands: readonly SlashCommand[];
|
||||
commandContext: CommandContext;
|
||||
setSuggestions: (suggestions: Suggestion[]) => void;
|
||||
setIsLoadingSuggestions: (isLoading: boolean) => void;
|
||||
setIsPerfectMatch: (isMatch: boolean) => void;
|
||||
}
|
||||
|
||||
export function useSlashCompletion(props: UseSlashCompletionProps): {
|
||||
completionStart: number;
|
||||
completionEnd: number;
|
||||
} {
|
||||
const {
|
||||
enabled,
|
||||
query,
|
||||
slashCommands,
|
||||
commandContext,
|
||||
setSuggestions,
|
||||
setIsLoadingSuggestions,
|
||||
setIsPerfectMatch,
|
||||
} = props;
|
||||
const [completionStart, setCompletionStart] = useState(-1);
|
||||
const [completionEnd, setCompletionEnd] = useState(-1);
|
||||
|
||||
useEffect(() => {
|
||||
if (!enabled || query === null) {
|
||||
return;
|
||||
}
|
||||
|
||||
const fullPath = query?.substring(1) || '';
|
||||
const hasTrailingSpace = !!query?.endsWith(' ');
|
||||
const rawParts = fullPath.split(/\s+/).filter((p) => p);
|
||||
let commandPathParts = rawParts;
|
||||
let partial = '';
|
||||
|
||||
if (!hasTrailingSpace && rawParts.length > 0) {
|
||||
partial = rawParts[rawParts.length - 1];
|
||||
commandPathParts = rawParts.slice(0, -1);
|
||||
}
|
||||
|
||||
let currentLevel: readonly SlashCommand[] | undefined = slashCommands;
|
||||
let leafCommand: SlashCommand | null = null;
|
||||
|
||||
for (const part of commandPathParts) {
|
||||
if (!currentLevel) {
|
||||
leafCommand = null;
|
||||
currentLevel = [];
|
||||
break;
|
||||
}
|
||||
const found: SlashCommand | undefined = currentLevel.find(
|
||||
(cmd) => cmd.name === part || cmd.altNames?.includes(part),
|
||||
);
|
||||
if (found) {
|
||||
leafCommand = found;
|
||||
currentLevel = found.subCommands as readonly SlashCommand[] | undefined;
|
||||
} else {
|
||||
leafCommand = null;
|
||||
currentLevel = [];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let exactMatchAsParent: SlashCommand | undefined;
|
||||
if (!hasTrailingSpace && currentLevel) {
|
||||
exactMatchAsParent = currentLevel.find(
|
||||
(cmd) =>
|
||||
(cmd.name === partial || cmd.altNames?.includes(partial)) &&
|
||||
cmd.subCommands,
|
||||
);
|
||||
|
||||
if (exactMatchAsParent) {
|
||||
leafCommand = exactMatchAsParent;
|
||||
currentLevel = exactMatchAsParent.subCommands;
|
||||
partial = '';
|
||||
}
|
||||
}
|
||||
|
||||
setIsPerfectMatch(false);
|
||||
if (!hasTrailingSpace) {
|
||||
if (leafCommand && partial === '' && leafCommand.action) {
|
||||
setIsPerfectMatch(true);
|
||||
} else if (currentLevel) {
|
||||
const perfectMatch = currentLevel.find(
|
||||
(cmd) =>
|
||||
(cmd.name === partial || cmd.altNames?.includes(partial)) &&
|
||||
cmd.action,
|
||||
);
|
||||
if (perfectMatch) {
|
||||
setIsPerfectMatch(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const depth = commandPathParts.length;
|
||||
const isArgumentCompletion =
|
||||
leafCommand?.completion &&
|
||||
(hasTrailingSpace ||
|
||||
(rawParts.length > depth && depth > 0 && partial !== ''));
|
||||
|
||||
if (hasTrailingSpace || exactMatchAsParent) {
|
||||
setCompletionStart(query.length);
|
||||
setCompletionEnd(query.length);
|
||||
} else if (partial) {
|
||||
if (isArgumentCompletion) {
|
||||
const commandSoFar = `/${commandPathParts.join(' ')}`;
|
||||
const argStartIndex =
|
||||
commandSoFar.length + (commandPathParts.length > 0 ? 1 : 0);
|
||||
setCompletionStart(argStartIndex);
|
||||
} else {
|
||||
setCompletionStart(query.length - partial.length);
|
||||
}
|
||||
setCompletionEnd(query.length);
|
||||
} else {
|
||||
setCompletionStart(1);
|
||||
setCompletionEnd(query.length);
|
||||
}
|
||||
|
||||
if (isArgumentCompletion) {
|
||||
const fetchAndSetSuggestions = async () => {
|
||||
setIsLoadingSuggestions(true);
|
||||
const argString = rawParts.slice(depth).join(' ');
|
||||
const results =
|
||||
(await leafCommand!.completion!(commandContext, argString)) || [];
|
||||
const finalSuggestions = results.map((s) => ({ label: s, value: s }));
|
||||
setSuggestions(finalSuggestions);
|
||||
setIsLoadingSuggestions(false);
|
||||
};
|
||||
fetchAndSetSuggestions();
|
||||
return;
|
||||
}
|
||||
|
||||
const commandsToSearch = currentLevel || [];
|
||||
if (commandsToSearch.length > 0) {
|
||||
let potentialSuggestions = commandsToSearch.filter(
|
||||
(cmd) =>
|
||||
cmd.description &&
|
||||
(cmd.name.startsWith(partial) ||
|
||||
cmd.altNames?.some((alt) => alt.startsWith(partial))),
|
||||
);
|
||||
|
||||
if (potentialSuggestions.length > 0 && !hasTrailingSpace) {
|
||||
const perfectMatch = potentialSuggestions.find(
|
||||
(s) => s.name === partial || s.altNames?.includes(partial),
|
||||
);
|
||||
if (perfectMatch && perfectMatch.action) {
|
||||
potentialSuggestions = [];
|
||||
}
|
||||
}
|
||||
|
||||
const finalSuggestions = potentialSuggestions.map((cmd) => ({
|
||||
label: cmd.name,
|
||||
value: cmd.name,
|
||||
description: cmd.description,
|
||||
}));
|
||||
|
||||
setSuggestions(finalSuggestions);
|
||||
return;
|
||||
}
|
||||
|
||||
setSuggestions([]);
|
||||
}, [
|
||||
enabled,
|
||||
query,
|
||||
slashCommands,
|
||||
commandContext,
|
||||
setSuggestions,
|
||||
setIsLoadingSuggestions,
|
||||
setIsPerfectMatch,
|
||||
]);
|
||||
|
||||
return {
|
||||
completionStart,
|
||||
completionEnd,
|
||||
};
|
||||
}
|
||||
@@ -1203,9 +1203,7 @@ describe('useVim hook', () => {
|
||||
});
|
||||
|
||||
// Press escape to clear pending state
|
||||
act(() => {
|
||||
result.current.handleInput({ name: 'escape' });
|
||||
});
|
||||
exitInsertMode(result);
|
||||
|
||||
// Now 'w' should just move cursor, not delete
|
||||
act(() => {
|
||||
@@ -1217,69 +1215,6 @@ describe('useVim hook', () => {
|
||||
expect(testBuffer.vimMoveWordForward).toHaveBeenCalledWith(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe('NORMAL mode escape behavior', () => {
|
||||
it('should pass escape through when no pending operator is active', () => {
|
||||
mockVimContext.vimMode = 'NORMAL';
|
||||
const { result } = renderVimHook();
|
||||
|
||||
const handled = result.current.handleInput({ name: 'escape' });
|
||||
|
||||
expect(handled).toBe(false);
|
||||
});
|
||||
|
||||
it('should handle escape and clear pending operator', () => {
|
||||
mockVimContext.vimMode = 'NORMAL';
|
||||
const { result } = renderVimHook();
|
||||
|
||||
act(() => {
|
||||
result.current.handleInput({ sequence: 'd' });
|
||||
});
|
||||
|
||||
let handled: boolean | undefined;
|
||||
act(() => {
|
||||
handled = result.current.handleInput({ name: 'escape' });
|
||||
});
|
||||
|
||||
expect(handled).toBe(true);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Shell command pass-through', () => {
|
||||
it('should pass through ctrl+r in INSERT mode', () => {
|
||||
mockVimContext.vimMode = 'INSERT';
|
||||
const { result } = renderVimHook();
|
||||
|
||||
const handled = result.current.handleInput({ name: 'r', ctrl: true });
|
||||
|
||||
expect(handled).toBe(false);
|
||||
});
|
||||
|
||||
it('should pass through ! in INSERT mode when buffer is empty', () => {
|
||||
mockVimContext.vimMode = 'INSERT';
|
||||
const emptyBuffer = createMockBuffer('');
|
||||
const { result } = renderVimHook(emptyBuffer);
|
||||
|
||||
const handled = result.current.handleInput({ sequence: '!' });
|
||||
|
||||
expect(handled).toBe(false);
|
||||
});
|
||||
|
||||
it('should handle ! as input in INSERT mode when buffer is not empty', () => {
|
||||
mockVimContext.vimMode = 'INSERT';
|
||||
const nonEmptyBuffer = createMockBuffer('not empty');
|
||||
const { result } = renderVimHook(nonEmptyBuffer);
|
||||
const key = { sequence: '!', name: '!' };
|
||||
|
||||
act(() => {
|
||||
result.current.handleInput(key);
|
||||
});
|
||||
|
||||
expect(nonEmptyBuffer.handleInput).toHaveBeenCalledWith(
|
||||
expect.objectContaining(key),
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
// Line operations (dd, cc) are tested in text-buffer.test.ts
|
||||
|
||||
@@ -260,8 +260,7 @@ export function useVim(buffer: TextBuffer, onSubmit?: (value: string) => void) {
|
||||
normalizedKey.name === 'tab' ||
|
||||
(normalizedKey.name === 'return' && !normalizedKey.ctrl) ||
|
||||
normalizedKey.name === 'up' ||
|
||||
normalizedKey.name === 'down' ||
|
||||
(normalizedKey.ctrl && normalizedKey.name === 'r')
|
||||
normalizedKey.name === 'down'
|
||||
) {
|
||||
return false; // Let InputPrompt handle completion
|
||||
}
|
||||
@@ -271,11 +270,6 @@ export function useVim(buffer: TextBuffer, onSubmit?: (value: string) => void) {
|
||||
return false; // Let InputPrompt handle clipboard functionality
|
||||
}
|
||||
|
||||
// Let InputPrompt handle shell commands
|
||||
if (normalizedKey.sequence === '!' && buffer.text.length === 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Special handling for Enter key to allow command submission (lower priority than completion)
|
||||
if (
|
||||
normalizedKey.name === 'return' &&
|
||||
@@ -405,14 +399,10 @@ export function useVim(buffer: TextBuffer, onSubmit?: (value: string) => void) {
|
||||
|
||||
// Handle NORMAL mode
|
||||
if (state.mode === 'NORMAL') {
|
||||
// If in NORMAL mode, allow escape to pass through to other handlers
|
||||
// if there's no pending operation.
|
||||
// Handle Escape key in NORMAL mode - clear all pending states
|
||||
if (normalizedKey.name === 'escape') {
|
||||
if (state.pendingOperator) {
|
||||
dispatch({ type: 'CLEAR_PENDING_STATES' });
|
||||
return true; // Handled by vim
|
||||
}
|
||||
return false; // Pass through to other handlers
|
||||
dispatch({ type: 'CLEAR_PENDING_STATES' });
|
||||
return true; // Handled by vim
|
||||
}
|
||||
|
||||
// Handle count input (numbers 1-9, and 0 if count > 0)
|
||||
|
||||
@@ -8,9 +8,8 @@ import util from 'util';
|
||||
import { ConsoleMessageItem } from '../types.js';
|
||||
|
||||
interface ConsolePatcherParams {
|
||||
onNewMessage?: (message: Omit<ConsoleMessageItem, 'id'>) => void;
|
||||
onNewMessage: (message: Omit<ConsoleMessageItem, 'id'>) => void;
|
||||
debugMode: boolean;
|
||||
stderr?: boolean;
|
||||
}
|
||||
|
||||
export class ConsolePatcher {
|
||||
@@ -47,22 +46,16 @@ export class ConsolePatcher {
|
||||
originalMethod: (...args: unknown[]) => void,
|
||||
) =>
|
||||
(...args: unknown[]) => {
|
||||
if (this.params.stderr) {
|
||||
if (type !== 'debug' || this.params.debugMode) {
|
||||
this.originalConsoleError(this.formatArgs(args));
|
||||
}
|
||||
} else {
|
||||
if (this.params.debugMode) {
|
||||
originalMethod.apply(console, args);
|
||||
}
|
||||
if (this.params.debugMode) {
|
||||
originalMethod.apply(console, args);
|
||||
}
|
||||
|
||||
if (type !== 'debug' || this.params.debugMode) {
|
||||
this.params.onNewMessage?.({
|
||||
type,
|
||||
content: this.formatArgs(args),
|
||||
count: 1,
|
||||
});
|
||||
}
|
||||
if (type !== 'debug' || this.params.debugMode) {
|
||||
this.params.onNewMessage({
|
||||
type,
|
||||
content: this.formatArgs(args),
|
||||
count: 1,
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import * as os from 'os';
|
||||
import * as path from 'path';
|
||||
|
||||
export function resolvePath(p: string): string {
|
||||
if (!p) {
|
||||
return '';
|
||||
}
|
||||
let expandedPath = p;
|
||||
if (p.toLowerCase().startsWith('%userprofile%')) {
|
||||
expandedPath = os.homedir() + p.substring('%userprofile%'.length);
|
||||
} else if (p === '~' || p.startsWith('~/')) {
|
||||
expandedPath = os.homedir() + p.substring(1);
|
||||
}
|
||||
return path.normalize(expandedPath);
|
||||
}
|
||||
@@ -33,7 +33,6 @@
|
||||
"chardet": "^2.1.0",
|
||||
"diff": "^7.0.0",
|
||||
"dotenv": "^17.1.0",
|
||||
"fdir": "^6.4.6",
|
||||
"glob": "^10.4.5",
|
||||
"google-auth-library": "^9.11.0",
|
||||
"html-to-text": "^9.0.5",
|
||||
@@ -42,8 +41,7 @@
|
||||
"marked": "^15.0.12",
|
||||
"micromatch": "^4.0.8",
|
||||
"open": "^10.1.2",
|
||||
"openai": "5.11.0",
|
||||
"picomatch": "^4.0.1",
|
||||
"openai": "^5.7.0",
|
||||
"shell-quote": "^1.8.3",
|
||||
"simple-git": "^3.28.0",
|
||||
"strip-ansi": "^7.1.0",
|
||||
@@ -52,12 +50,10 @@
|
||||
"ws": "^8.18.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@qwen-code/qwen-code-test-utils": "file:../test-utils",
|
||||
"@types/diff": "^7.0.2",
|
||||
"@types/dotenv": "^6.1.1",
|
||||
"@types/micromatch": "^4.0.8",
|
||||
"@types/minimatch": "^5.1.2",
|
||||
"@types/picomatch": "^4.0.1",
|
||||
"@types/ws": "^8.5.10",
|
||||
"typescript": "^5.3.3",
|
||||
"vitest": "^3.1.1"
|
||||
|
||||
@@ -18,18 +18,7 @@ import {
|
||||
} from '../core/contentGenerator.js';
|
||||
import { GeminiClient } from '../core/client.js';
|
||||
import { GitService } from '../services/gitService.js';
|
||||
|
||||
vi.mock('fs', async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import('fs')>();
|
||||
return {
|
||||
...actual,
|
||||
existsSync: vi.fn().mockReturnValue(true),
|
||||
statSync: vi.fn().mockReturnValue({
|
||||
isDirectory: vi.fn().mockReturnValue(true),
|
||||
}),
|
||||
realpathSync: vi.fn((path) => path),
|
||||
};
|
||||
});
|
||||
import { IdeClient } from '../ide/ide-client.js';
|
||||
|
||||
vi.mock('fs', async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import('fs')>();
|
||||
@@ -131,6 +120,7 @@ describe('Server Config (config.ts)', () => {
|
||||
telemetry: TELEMETRY_SETTINGS,
|
||||
sessionId: SESSION_ID,
|
||||
model: MODEL,
|
||||
ideClient: IdeClient.getInstance(false),
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
|
||||
@@ -48,8 +48,6 @@ import { shouldAttemptBrowserLaunch } from '../utils/browser.js';
|
||||
import { MCPOAuthConfig } from '../mcp/oauth-provider.js';
|
||||
import { IdeClient } from '../ide/ide-client.js';
|
||||
import type { Content } from '@google/genai';
|
||||
import { logIdeConnection } from '../telemetry/loggers.js';
|
||||
import { IdeConnectionEvent, IdeConnectionType } from '../telemetry/types.js';
|
||||
|
||||
// Re-export OAuth config type
|
||||
export type { MCPOAuthConfig };
|
||||
@@ -198,6 +196,7 @@ export interface ConfigParameters {
|
||||
summarizeToolOutput?: Record<string, SummarizeToolOutputSettings>;
|
||||
ideModeFeature?: boolean;
|
||||
ideMode?: boolean;
|
||||
ideClient?: IdeClient;
|
||||
enableOpenAILogging?: boolean;
|
||||
sampling_params?: Record<string, unknown>;
|
||||
systemPromptMappings?: Array<{
|
||||
@@ -210,7 +209,6 @@ export interface ConfigParameters {
|
||||
maxRetries?: number;
|
||||
};
|
||||
cliVersion?: string;
|
||||
loadMemoryFromIncludeDirectories?: boolean;
|
||||
}
|
||||
|
||||
export class Config {
|
||||
@@ -285,8 +283,6 @@ export class Config {
|
||||
maxRetries?: number;
|
||||
};
|
||||
private readonly cliVersion?: string;
|
||||
private readonly loadMemoryFromIncludeDirectories: boolean = false;
|
||||
|
||||
constructor(params: ConfigParameters) {
|
||||
this.sessionId = params.sessionId;
|
||||
this.embeddingModel =
|
||||
@@ -349,20 +345,15 @@ export class Config {
|
||||
this.summarizeToolOutput = params.summarizeToolOutput;
|
||||
this.ideModeFeature = params.ideModeFeature ?? false;
|
||||
this.ideMode = params.ideMode ?? false;
|
||||
this.ideClient = IdeClient.getInstance();
|
||||
if (this.ideMode && this.ideModeFeature) {
|
||||
this.ideClient.connect();
|
||||
logIdeConnection(this, new IdeConnectionEvent(IdeConnectionType.START));
|
||||
}
|
||||
this.ideClient =
|
||||
params.ideClient ??
|
||||
IdeClient.getInstance(this.ideMode && this.ideModeFeature);
|
||||
this.systemPromptMappings = params.systemPromptMappings;
|
||||
this.enableOpenAILogging = params.enableOpenAILogging ?? false;
|
||||
this.sampling_params = params.sampling_params;
|
||||
this.contentGenerator = params.contentGenerator;
|
||||
this.cliVersion = params.cliVersion;
|
||||
|
||||
this.loadMemoryFromIncludeDirectories =
|
||||
params.loadMemoryFromIncludeDirectories ?? false;
|
||||
|
||||
if (params.contextFileName) {
|
||||
setGeminiMdFilename(params.contextFileName);
|
||||
}
|
||||
@@ -424,10 +415,6 @@ export class Config {
|
||||
return this.sessionId;
|
||||
}
|
||||
|
||||
shouldLoadMemoryFromIncludeDirectories(): boolean {
|
||||
return this.loadMemoryFromIncludeDirectories;
|
||||
}
|
||||
|
||||
getContentGeneratorConfig(): ContentGeneratorConfig {
|
||||
return this.contentGeneratorConfig;
|
||||
}
|
||||
@@ -711,14 +698,12 @@ export class Config {
|
||||
this.ideMode = value;
|
||||
}
|
||||
|
||||
async setIdeModeAndSyncConnection(value: boolean): Promise<void> {
|
||||
this.ideMode = value;
|
||||
if (value) {
|
||||
await this.ideClient.connect();
|
||||
logIdeConnection(this, new IdeConnectionEvent(IdeConnectionType.SESSION));
|
||||
} else {
|
||||
this.ideClient.disconnect();
|
||||
}
|
||||
setIdeClientDisconnected(): void {
|
||||
this.ideClient.setDisconnected();
|
||||
}
|
||||
|
||||
setIdeClientConnected(): void {
|
||||
this.ideClient.reconnect(this.ideMode && this.ideModeFeature);
|
||||
}
|
||||
|
||||
getEnableOpenAILogging(): boolean {
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
import { describe, it, expect, beforeEach, vi } from 'vitest';
|
||||
import { Config } from './config.js';
|
||||
import { DEFAULT_GEMINI_MODEL, DEFAULT_GEMINI_FLASH_MODEL } from './models.js';
|
||||
import { IdeClient } from '../ide/ide-client.js';
|
||||
import fs from 'node:fs';
|
||||
|
||||
vi.mock('node:fs');
|
||||
@@ -25,6 +26,7 @@ describe('Flash Model Fallback Configuration', () => {
|
||||
debugMode: false,
|
||||
cwd: '/test',
|
||||
model: DEFAULT_GEMINI_MODEL,
|
||||
ideClient: IdeClient.getInstance(false),
|
||||
});
|
||||
|
||||
// Initialize contentGeneratorConfig for testing
|
||||
@@ -49,6 +51,7 @@ describe('Flash Model Fallback Configuration', () => {
|
||||
debugMode: false,
|
||||
cwd: '/test',
|
||||
model: DEFAULT_GEMINI_MODEL,
|
||||
ideClient: IdeClient.getInstance(false),
|
||||
});
|
||||
|
||||
// Should not crash when contentGeneratorConfig is undefined
|
||||
@@ -72,6 +75,7 @@ describe('Flash Model Fallback Configuration', () => {
|
||||
debugMode: false,
|
||||
cwd: '/test',
|
||||
model: 'custom-model',
|
||||
ideClient: IdeClient.getInstance(false),
|
||||
});
|
||||
|
||||
expect(newConfig.getModel()).toBe('custom-model');
|
||||
|
||||
@@ -136,7 +136,6 @@ describe('CoreToolScheduler', () => {
|
||||
onAllToolCallsComplete,
|
||||
onToolCallsUpdate,
|
||||
getPreferredEditor: () => 'vscode',
|
||||
onEditorClose: vi.fn(),
|
||||
});
|
||||
|
||||
const abortController = new AbortController();
|
||||
@@ -206,7 +205,6 @@ describe('CoreToolScheduler with payload', () => {
|
||||
onAllToolCallsComplete,
|
||||
onToolCallsUpdate,
|
||||
getPreferredEditor: () => 'vscode',
|
||||
onEditorClose: vi.fn(),
|
||||
});
|
||||
|
||||
const abortController = new AbortController();
|
||||
@@ -484,7 +482,6 @@ describe('CoreToolScheduler edit cancellation', () => {
|
||||
onAllToolCallsComplete,
|
||||
onToolCallsUpdate,
|
||||
getPreferredEditor: () => 'vscode',
|
||||
onEditorClose: vi.fn(),
|
||||
});
|
||||
|
||||
const abortController = new AbortController();
|
||||
@@ -574,7 +571,6 @@ describe('CoreToolScheduler YOLO mode', () => {
|
||||
onAllToolCallsComplete,
|
||||
onToolCallsUpdate,
|
||||
getPreferredEditor: () => 'vscode',
|
||||
onEditorClose: vi.fn(),
|
||||
});
|
||||
|
||||
const abortController = new AbortController();
|
||||
|
||||
@@ -224,7 +224,6 @@ interface CoreToolSchedulerOptions {
|
||||
onToolCallsUpdate?: ToolCallsUpdateHandler;
|
||||
getPreferredEditor: () => EditorType | undefined;
|
||||
config: Config;
|
||||
onEditorClose: () => void;
|
||||
}
|
||||
|
||||
export class CoreToolScheduler {
|
||||
@@ -235,7 +234,6 @@ export class CoreToolScheduler {
|
||||
private onToolCallsUpdate?: ToolCallsUpdateHandler;
|
||||
private getPreferredEditor: () => EditorType | undefined;
|
||||
private config: Config;
|
||||
private onEditorClose: () => void;
|
||||
|
||||
constructor(options: CoreToolSchedulerOptions) {
|
||||
this.config = options.config;
|
||||
@@ -244,7 +242,6 @@ export class CoreToolScheduler {
|
||||
this.onAllToolCallsComplete = options.onAllToolCallsComplete;
|
||||
this.onToolCallsUpdate = options.onToolCallsUpdate;
|
||||
this.getPreferredEditor = options.getPreferredEditor;
|
||||
this.onEditorClose = options.onEditorClose;
|
||||
}
|
||||
|
||||
private setStatusInternal(
|
||||
@@ -566,7 +563,6 @@ export class CoreToolScheduler {
|
||||
modifyContext as ModifyContext<typeof waitingToolCall.request.args>,
|
||||
editorType,
|
||||
signal,
|
||||
this.onEditorClose,
|
||||
);
|
||||
this.setArgsInternal(callId, updatedParams);
|
||||
this.setStatusInternal(callId, 'awaiting_approval', {
|
||||
|
||||
@@ -564,7 +564,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
|
||||
// Add combined text if any
|
||||
if (combinedText) {
|
||||
combinedParts.push({ text: combinedText.trimEnd() });
|
||||
combinedParts.push({ text: combinedText });
|
||||
}
|
||||
|
||||
// Add function calls
|
||||
@@ -1138,11 +1138,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
|
||||
// Handle text content
|
||||
if (choice.message.content) {
|
||||
if (typeof choice.message.content === 'string') {
|
||||
parts.push({ text: choice.message.content.trimEnd() });
|
||||
} else {
|
||||
parts.push({ text: choice.message.content });
|
||||
}
|
||||
parts.push({ text: choice.message.content });
|
||||
}
|
||||
|
||||
// Handle tool calls
|
||||
@@ -1232,11 +1228,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
|
||||
// Handle text content
|
||||
if (choice.delta?.content) {
|
||||
if (typeof choice.delta.content === 'string') {
|
||||
parts.push({ text: choice.delta.content.trimEnd() });
|
||||
} else {
|
||||
parts.push({ text: choice.delta.content });
|
||||
}
|
||||
parts.push({ text: choice.delta.content });
|
||||
}
|
||||
|
||||
// Handle tool calls - only accumulate during streaming, emit when complete
|
||||
@@ -1284,9 +1276,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
|
||||
parts.push({
|
||||
functionCall: {
|
||||
id:
|
||||
accumulatedCall.id ||
|
||||
`call_${Date.now()}_${Math.random().toString(36).substring(2, 9)}`,
|
||||
id: accumulatedCall.id,
|
||||
name: accumulatedCall.name,
|
||||
args,
|
||||
},
|
||||
@@ -1762,7 +1752,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
}
|
||||
}
|
||||
|
||||
messageContent = textParts.join('').trimEnd();
|
||||
messageContent = textParts.join('');
|
||||
}
|
||||
|
||||
const choice: OpenAIChoice = {
|
||||
|
||||
@@ -33,58 +33,34 @@ export enum IDEConnectionStatus {
|
||||
* Manages the connection to and interaction with the IDE server.
|
||||
*/
|
||||
export class IdeClient {
|
||||
private static instance: IdeClient;
|
||||
private client: Client | undefined = undefined;
|
||||
client: Client | undefined = undefined;
|
||||
private state: IDEConnectionState = {
|
||||
status: IDEConnectionStatus.Disconnected,
|
||||
details:
|
||||
'IDE integration is currently disabled. To enable it, run /ide enable.',
|
||||
};
|
||||
private static instance: IdeClient;
|
||||
private readonly currentIde: DetectedIde | undefined;
|
||||
private readonly currentIdeDisplayName: string | undefined;
|
||||
|
||||
private constructor() {
|
||||
constructor(ideMode: boolean) {
|
||||
this.currentIde = detectIde();
|
||||
if (this.currentIde) {
|
||||
this.currentIdeDisplayName = getIdeDisplayName(this.currentIde);
|
||||
}
|
||||
if (!ideMode) {
|
||||
return;
|
||||
}
|
||||
this.init().catch((err) => {
|
||||
logger.debug('Failed to initialize IdeClient:', err);
|
||||
});
|
||||
}
|
||||
|
||||
static getInstance(): IdeClient {
|
||||
static getInstance(ideMode: boolean): IdeClient {
|
||||
if (!IdeClient.instance) {
|
||||
IdeClient.instance = new IdeClient();
|
||||
IdeClient.instance = new IdeClient(ideMode);
|
||||
}
|
||||
return IdeClient.instance;
|
||||
}
|
||||
|
||||
async connect(): Promise<void> {
|
||||
this.setState(IDEConnectionStatus.Connecting);
|
||||
|
||||
if (!this.currentIde || !this.currentIdeDisplayName) {
|
||||
this.setState(IDEConnectionStatus.Disconnected);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!this.validateWorkspacePath()) {
|
||||
return;
|
||||
}
|
||||
|
||||
const port = this.getPortFromEnv();
|
||||
if (!port) {
|
||||
return;
|
||||
}
|
||||
|
||||
await this.establishConnection(port);
|
||||
}
|
||||
|
||||
disconnect() {
|
||||
this.setState(
|
||||
IDEConnectionStatus.Disconnected,
|
||||
'IDE integration disabled. To enable it again, run /ide enable.',
|
||||
);
|
||||
this.client?.close();
|
||||
}
|
||||
|
||||
getCurrentIde(): DetectedIde | undefined {
|
||||
return this.currentIde;
|
||||
}
|
||||
@@ -94,60 +70,45 @@ export class IdeClient {
|
||||
}
|
||||
|
||||
private setState(status: IDEConnectionStatus, details?: string) {
|
||||
const isAlreadyDisconnected =
|
||||
this.state.status === IDEConnectionStatus.Disconnected &&
|
||||
status === IDEConnectionStatus.Disconnected;
|
||||
|
||||
// Only update details if the state wasn't already disconnected, so that
|
||||
// the first detail message is preserved.
|
||||
if (!isAlreadyDisconnected) {
|
||||
this.state = { status, details };
|
||||
}
|
||||
this.state = { status, details };
|
||||
|
||||
if (status === IDEConnectionStatus.Disconnected) {
|
||||
logger.debug('IDE integration disconnected:', details);
|
||||
logger.debug('IDE integration is disconnected. ', details);
|
||||
ideContext.clearIdeContext();
|
||||
}
|
||||
}
|
||||
|
||||
private validateWorkspacePath(): boolean {
|
||||
const ideWorkspacePath = process.env['GEMINI_CLI_IDE_WORKSPACE_PATH'];
|
||||
if (ideWorkspacePath === undefined) {
|
||||
this.setState(
|
||||
IDEConnectionStatus.Disconnected,
|
||||
`Failed to connect to IDE companion extension for ${this.currentIdeDisplayName}. Please ensure the extension is running and try refreshing your terminal. To install the extension, run /ide install.`,
|
||||
);
|
||||
return false;
|
||||
}
|
||||
if (ideWorkspacePath === '') {
|
||||
this.setState(
|
||||
IDEConnectionStatus.Disconnected,
|
||||
`To use this feature, please open a single workspace folder in ${this.currentIdeDisplayName} and try again.`,
|
||||
);
|
||||
return false;
|
||||
}
|
||||
if (ideWorkspacePath !== process.cwd()) {
|
||||
this.setState(
|
||||
IDEConnectionStatus.Disconnected,
|
||||
`Directory mismatch. Gemini CLI is running in a different location than the open workspace in ${this.currentIdeDisplayName}. Please run the CLI from the same directory as your project's root folder.`,
|
||||
);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private getPortFromEnv(): string | undefined {
|
||||
const port = process.env['GEMINI_CLI_IDE_SERVER_PORT'];
|
||||
if (!port) {
|
||||
this.setState(
|
||||
IDEConnectionStatus.Disconnected,
|
||||
`Failed to connect to IDE companion extension for ${this.currentIdeDisplayName}. Please ensure the extension is running and try refreshing your terminal. To install the extension, run /ide install.`,
|
||||
'Gemini CLI Companion extension not found. Install via /ide install and restart the CLI in a fresh terminal window.',
|
||||
);
|
||||
return undefined;
|
||||
}
|
||||
return port;
|
||||
}
|
||||
|
||||
private validateWorkspacePath(): boolean {
|
||||
const ideWorkspacePath = process.env['GEMINI_CLI_IDE_WORKSPACE_PATH'];
|
||||
if (!ideWorkspacePath) {
|
||||
this.setState(
|
||||
IDEConnectionStatus.Disconnected,
|
||||
'IDE integration requires a single workspace folder to be open in the IDE. Please ensure one folder is open and try again.',
|
||||
);
|
||||
return false;
|
||||
}
|
||||
if (ideWorkspacePath !== process.cwd()) {
|
||||
this.setState(
|
||||
IDEConnectionStatus.Disconnected,
|
||||
`Gemini CLI is running in a different directory (${process.cwd()}) from the IDE's open workspace (${ideWorkspacePath}). Please run Gemini CLI in the same directory.`,
|
||||
);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private registerClientHandlers() {
|
||||
if (!this.client) {
|
||||
return;
|
||||
@@ -159,20 +120,20 @@ export class IdeClient {
|
||||
ideContext.setIdeContext(notification.params);
|
||||
},
|
||||
);
|
||||
|
||||
this.client.onerror = (_error) => {
|
||||
this.setState(
|
||||
IDEConnectionStatus.Disconnected,
|
||||
`IDE connection error. The connection was lost unexpectedly. Please try reconnecting by running /ide enable`,
|
||||
);
|
||||
this.setState(IDEConnectionStatus.Disconnected, 'Client error.');
|
||||
};
|
||||
|
||||
this.client.onclose = () => {
|
||||
this.setState(
|
||||
IDEConnectionStatus.Disconnected,
|
||||
`IDE connection error. The connection was lost unexpectedly. Please try reconnecting by running /ide enable`,
|
||||
);
|
||||
this.setState(IDEConnectionStatus.Disconnected, 'Connection closed.');
|
||||
};
|
||||
}
|
||||
|
||||
async reconnect(ideMode: boolean) {
|
||||
IdeClient.instance = new IdeClient(ideMode);
|
||||
}
|
||||
|
||||
private async establishConnection(port: string) {
|
||||
let transport: StreamableHTTPClientTransport | undefined;
|
||||
try {
|
||||
@@ -189,12 +150,12 @@ export class IdeClient {
|
||||
this.registerClientHandlers();
|
||||
|
||||
await this.client.connect(transport);
|
||||
this.registerClientHandlers();
|
||||
|
||||
this.setState(IDEConnectionStatus.Connected);
|
||||
} catch (_error) {
|
||||
} catch (error) {
|
||||
this.setState(
|
||||
IDEConnectionStatus.Disconnected,
|
||||
`Failed to connect to IDE companion extension for ${this.currentIdeDisplayName}. Please ensure the extension is running and try refreshing your terminal. To install the extension, run /ide install.`,
|
||||
`Failed to connect to IDE server: ${error}`,
|
||||
);
|
||||
if (transport) {
|
||||
try {
|
||||
|
||||
@@ -41,7 +41,6 @@ export * from './utils/shell-utils.js';
|
||||
export * from './utils/systemEncoding.js';
|
||||
export * from './utils/textUtils.js';
|
||||
export * from './utils/formatters.js';
|
||||
export * from './utils/filesearch/fileSearch.js';
|
||||
|
||||
// Export services
|
||||
export * from './services/fileDiscoveryService.js';
|
||||
|
||||
@@ -4,17 +4,7 @@
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { vi } from 'vitest';
|
||||
|
||||
// Mock dependencies AT THE TOP
|
||||
const mockOpenBrowserSecurely = vi.hoisted(() => vi.fn());
|
||||
vi.mock('../utils/secure-browser-launcher.js', () => ({
|
||||
openBrowserSecurely: mockOpenBrowserSecurely,
|
||||
}));
|
||||
vi.mock('node:crypto');
|
||||
vi.mock('./oauth-token-storage.js');
|
||||
|
||||
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import * as http from 'node:http';
|
||||
import * as crypto from 'node:crypto';
|
||||
import {
|
||||
@@ -25,6 +15,14 @@ import {
|
||||
} from './oauth-provider.js';
|
||||
import { MCPOAuthTokenStorage, MCPOAuthToken } from './oauth-token-storage.js';
|
||||
|
||||
// Mock dependencies
|
||||
const mockOpenBrowserSecurely = vi.hoisted(() => vi.fn());
|
||||
vi.mock('../utils/secure-browser-launcher.js', () => ({
|
||||
openBrowserSecurely: mockOpenBrowserSecurely,
|
||||
}));
|
||||
vi.mock('node:crypto');
|
||||
vi.mock('./oauth-token-storage.js');
|
||||
|
||||
// Mock fetch globally
|
||||
const mockFetch = vi.fn();
|
||||
global.fetch = mockFetch;
|
||||
@@ -48,7 +46,6 @@ describe('MCPOAuthProvider', () => {
|
||||
tokenUrl: 'https://auth.example.com/token',
|
||||
scopes: ['read', 'write'],
|
||||
redirectUri: 'http://localhost:7777/oauth/callback',
|
||||
audiences: ['https://api.example.com'],
|
||||
};
|
||||
|
||||
const mockToken: MCPOAuthToken = {
|
||||
@@ -723,105 +720,6 @@ describe('MCPOAuthProvider', () => {
|
||||
expect(capturedUrl!).toContain('code_challenge_method=S256');
|
||||
expect(capturedUrl!).toContain('scope=read+write');
|
||||
expect(capturedUrl!).toContain('resource=https%3A%2F%2Fauth.example.com');
|
||||
expect(capturedUrl!).toContain('audience=https%3A%2F%2Fapi.example.com');
|
||||
});
|
||||
|
||||
it('should correctly append parameters to an authorization URL that already has query params', async () => {
|
||||
// Mock to capture the URL that would be opened
|
||||
let capturedUrl: string;
|
||||
mockOpenBrowserSecurely.mockImplementation((url: string) => {
|
||||
capturedUrl = url;
|
||||
return Promise.resolve();
|
||||
});
|
||||
|
||||
let callbackHandler: unknown;
|
||||
vi.mocked(http.createServer).mockImplementation((handler) => {
|
||||
callbackHandler = handler;
|
||||
return mockHttpServer as unknown as http.Server;
|
||||
});
|
||||
|
||||
mockHttpServer.listen.mockImplementation((port, callback) => {
|
||||
callback?.();
|
||||
setTimeout(() => {
|
||||
const mockReq = {
|
||||
url: '/oauth/callback?code=auth_code_123&state=bW9ja19zdGF0ZV8xNl9ieXRlcw',
|
||||
};
|
||||
const mockRes = {
|
||||
writeHead: vi.fn(),
|
||||
end: vi.fn(),
|
||||
};
|
||||
(callbackHandler as (req: unknown, res: unknown) => void)(
|
||||
mockReq,
|
||||
mockRes,
|
||||
);
|
||||
}, 10);
|
||||
});
|
||||
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: () => Promise.resolve(mockTokenResponse),
|
||||
});
|
||||
|
||||
const configWithParamsInUrl = {
|
||||
...mockConfig,
|
||||
authorizationUrl: 'https://auth.example.com/authorize?audience=1234',
|
||||
};
|
||||
|
||||
await MCPOAuthProvider.authenticate('test-server', configWithParamsInUrl);
|
||||
|
||||
const url = new URL(capturedUrl!);
|
||||
expect(url.searchParams.get('audience')).toBe('1234');
|
||||
expect(url.searchParams.get('client_id')).toBe('test-client-id');
|
||||
expect(url.search.startsWith('?audience=1234&')).toBe(true);
|
||||
});
|
||||
|
||||
it('should correctly append parameters to a URL with a fragment', async () => {
|
||||
// Mock to capture the URL that would be opened
|
||||
let capturedUrl: string;
|
||||
mockOpenBrowserSecurely.mockImplementation((url: string) => {
|
||||
capturedUrl = url;
|
||||
return Promise.resolve();
|
||||
});
|
||||
|
||||
let callbackHandler: unknown;
|
||||
vi.mocked(http.createServer).mockImplementation((handler) => {
|
||||
callbackHandler = handler;
|
||||
return mockHttpServer as unknown as http.Server;
|
||||
});
|
||||
|
||||
mockHttpServer.listen.mockImplementation((port, callback) => {
|
||||
callback?.();
|
||||
setTimeout(() => {
|
||||
const mockReq = {
|
||||
url: '/oauth/callback?code=auth_code_123&state=bW9ja19zdGF0ZV8xNl9ieXRlcw',
|
||||
};
|
||||
const mockRes = {
|
||||
writeHead: vi.fn(),
|
||||
end: vi.fn(),
|
||||
};
|
||||
(callbackHandler as (req: unknown, res: unknown) => void)(
|
||||
mockReq,
|
||||
mockRes,
|
||||
);
|
||||
}, 10);
|
||||
});
|
||||
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: () => Promise.resolve(mockTokenResponse),
|
||||
});
|
||||
|
||||
const configWithFragment = {
|
||||
...mockConfig,
|
||||
authorizationUrl: 'https://auth.example.com/authorize#login',
|
||||
};
|
||||
|
||||
await MCPOAuthProvider.authenticate('test-server', configWithFragment);
|
||||
|
||||
const url = new URL(capturedUrl!);
|
||||
expect(url.searchParams.get('client_id')).toBe('test-client-id');
|
||||
expect(url.hash).toBe('#login');
|
||||
expect(url.pathname).toBe('/authorize');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -22,7 +22,6 @@ export interface MCPOAuthConfig {
|
||||
authorizationUrl?: string;
|
||||
tokenUrl?: string;
|
||||
scopes?: string[];
|
||||
audiences?: string[];
|
||||
redirectUri?: string;
|
||||
tokenParamName?: string; // For SSE connections, specifies the query parameter name for the token
|
||||
}
|
||||
@@ -298,10 +297,6 @@ export class MCPOAuthProvider {
|
||||
params.append('scope', config.scopes.join(' '));
|
||||
}
|
||||
|
||||
if (config.audiences && config.audiences.length > 0) {
|
||||
params.append('audience', config.audiences.join(' '));
|
||||
}
|
||||
|
||||
// Add resource parameter for MCP OAuth spec compliance
|
||||
// Use the MCP server URL if provided, otherwise fall back to authorization URL
|
||||
const resourceUrl = mcpServerUrl || config.authorizationUrl!;
|
||||
@@ -313,11 +308,7 @@ export class MCPOAuthProvider {
|
||||
);
|
||||
}
|
||||
|
||||
const url = new URL(config.authorizationUrl!);
|
||||
params.forEach((value, key) => {
|
||||
url.searchParams.append(key, value);
|
||||
});
|
||||
return url.toString();
|
||||
return `${config.authorizationUrl}?${params.toString()}`;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -351,10 +342,6 @@ export class MCPOAuthProvider {
|
||||
params.append('client_secret', config.clientSecret);
|
||||
}
|
||||
|
||||
if (config.audiences && config.audiences.length > 0) {
|
||||
params.append('audience', config.audiences.join(' '));
|
||||
}
|
||||
|
||||
// Add resource parameter for MCP OAuth spec compliance
|
||||
// Use the MCP server URL if provided, otherwise fall back to token URL
|
||||
const resourceUrl = mcpServerUrl || config.tokenUrl!;
|
||||
@@ -413,10 +400,6 @@ export class MCPOAuthProvider {
|
||||
params.append('scope', config.scopes.join(' '));
|
||||
}
|
||||
|
||||
if (config.audiences && config.audiences.length > 0) {
|
||||
params.append('audience', config.audiences.join(' '));
|
||||
}
|
||||
|
||||
// Add resource parameter for MCP OAuth spec compliance
|
||||
// Use the MCP server URL if provided, otherwise fall back to token URL
|
||||
const resourceUrl = mcpServerUrl || tokenUrl;
|
||||
|
||||
@@ -53,22 +53,4 @@ export class PromptRegistry {
|
||||
}
|
||||
return serverPrompts.sort((a, b) => a.name.localeCompare(b.name));
|
||||
}
|
||||
|
||||
/**
|
||||
* Clears all the prompts from the registry.
|
||||
*/
|
||||
clear(): void {
|
||||
this.prompts.clear();
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes all prompts from a specific server.
|
||||
*/
|
||||
removePromptsByServer(serverName: string): void {
|
||||
for (const [name, prompt] of this.prompts.entries()) {
|
||||
if (prompt.serverName === serverName) {
|
||||
this.prompts.delete(name);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,7 +21,6 @@ import {
|
||||
NextSpeakerCheckEvent,
|
||||
SlashCommandEvent,
|
||||
MalformedJsonResponseEvent,
|
||||
IdeConnectionEvent,
|
||||
} from '../types.js';
|
||||
import { EventMetadataKey } from './event-metadata-key.js';
|
||||
import { Config } from '../../config/config.js';
|
||||
@@ -45,7 +44,6 @@ const loop_detected_event_name = 'loop_detected';
|
||||
const next_speaker_check_event_name = 'next_speaker_check';
|
||||
const slash_command_event_name = 'slash_command';
|
||||
const malformed_json_response_event_name = 'malformed_json_response';
|
||||
const ide_connection_event_name = 'ide_connection';
|
||||
|
||||
export interface LogResponse {
|
||||
nextRequestWaitMs?: number;
|
||||
@@ -580,18 +578,6 @@ export class ClearcutLogger {
|
||||
this.flushIfNeeded();
|
||||
}
|
||||
|
||||
logIdeConnectionEvent(event: IdeConnectionEvent): void {
|
||||
const data = [
|
||||
{
|
||||
gemini_cli_key: EventMetadataKey.GEMINI_CLI_IDE_CONNECTION_TYPE,
|
||||
value: JSON.stringify(event.connection_type),
|
||||
},
|
||||
];
|
||||
|
||||
this.enqueueLogEvent(this.createLogEvent(ide_connection_event_name, data));
|
||||
this.flushIfNeeded();
|
||||
}
|
||||
|
||||
logEndSessionEvent(event: EndSessionEvent): void {
|
||||
const data = [
|
||||
{
|
||||
|
||||
@@ -190,13 +190,6 @@ export enum EventMetadataKey {
|
||||
|
||||
// Logs the model that produced the malformed JSON response.
|
||||
GEMINI_CLI_MALFORMED_JSON_RESPONSE_MODEL = 45,
|
||||
|
||||
// ==========================================================================
|
||||
// IDE Connection Event Keys
|
||||
// ===========================================================================
|
||||
|
||||
// Logs the type of the IDE connection.
|
||||
GEMINI_CLI_IDE_CONNECTION_TYPE = 46,
|
||||
}
|
||||
|
||||
export function getEventMetadataKey(
|
||||
|
||||
@@ -15,7 +15,6 @@ export const EVENT_CLI_CONFIG = 'qwen-code.config';
|
||||
export const EVENT_FLASH_FALLBACK = 'qwen-code.flash_fallback';
|
||||
export const EVENT_NEXT_SPEAKER_CHECK = 'qwen-code.next_speaker_check';
|
||||
export const EVENT_SLASH_COMMAND = 'qwen-code.slash_command';
|
||||
export const EVENT_IDE_CONNECTION = 'qwen-code.ide_connection';
|
||||
|
||||
export const METRIC_TOOL_CALL_COUNT = 'qwen-code.tool.call.count';
|
||||
export const METRIC_TOOL_CALL_LATENCY = 'qwen-code.tool.call.latency';
|
||||
|
||||
@@ -12,7 +12,6 @@ import {
|
||||
EVENT_API_REQUEST,
|
||||
EVENT_API_RESPONSE,
|
||||
EVENT_CLI_CONFIG,
|
||||
EVENT_IDE_CONNECTION,
|
||||
EVENT_TOOL_CALL,
|
||||
EVENT_USER_PROMPT,
|
||||
EVENT_FLASH_FALLBACK,
|
||||
@@ -24,7 +23,6 @@ import {
|
||||
ApiErrorEvent,
|
||||
ApiRequestEvent,
|
||||
ApiResponseEvent,
|
||||
IdeConnectionEvent,
|
||||
StartSessionEvent,
|
||||
ToolCallEvent,
|
||||
UserPromptEvent,
|
||||
@@ -357,23 +355,3 @@ export function logSlashCommand(
|
||||
};
|
||||
logger.emit(logRecord);
|
||||
}
|
||||
|
||||
export function logIdeConnection(
|
||||
config: Config,
|
||||
event: IdeConnectionEvent,
|
||||
): void {
|
||||
if (!isTelemetrySdkInitialized()) return;
|
||||
|
||||
const attributes: LogAttributes = {
|
||||
...getCommonAttributes(config),
|
||||
...event,
|
||||
'event.name': EVENT_IDE_CONNECTION,
|
||||
};
|
||||
|
||||
const logger = logs.getLogger(SERVICE_NAME);
|
||||
const logRecord: LogRecord = {
|
||||
body: `Ide connection. Type: ${event.connection_type}.`,
|
||||
attributes,
|
||||
};
|
||||
logger.emit(logRecord);
|
||||
}
|
||||
|
||||
@@ -322,16 +322,19 @@ export class QwenLogger {
|
||||
this.flushIfNeeded();
|
||||
}
|
||||
|
||||
logApiRequestEvent(event: ApiRequestEvent): void {
|
||||
const rumEvent = this.createResourceEvent('api', 'api_request', {
|
||||
properties: {
|
||||
model: event.model,
|
||||
prompt_id: event.prompt_id,
|
||||
},
|
||||
});
|
||||
logApiRequestEvent(_event: ApiRequestEvent): void {
|
||||
// ignore for now
|
||||
return;
|
||||
|
||||
this.enqueueLogEvent(rumEvent);
|
||||
this.flushIfNeeded();
|
||||
// const rumEvent = this.createResourceEvent('api', 'api_request', {
|
||||
// properties: {
|
||||
// model: event.model,
|
||||
// prompt_id: event.prompt_id,
|
||||
// },
|
||||
// });
|
||||
|
||||
// this.enqueueLogEvent(rumEvent);
|
||||
// this.flushIfNeeded();
|
||||
}
|
||||
|
||||
logApiResponseEvent(event: ApiResponseEvent): void {
|
||||
|
||||
@@ -12,6 +12,7 @@ import {
|
||||
} from './sdk.js';
|
||||
import { Config } from '../config/config.js';
|
||||
import { NodeSDK } from '@opentelemetry/sdk-node';
|
||||
import { IdeClient } from '../ide/ide-client.js';
|
||||
|
||||
vi.mock('@opentelemetry/sdk-node');
|
||||
vi.mock('../config/config.js');
|
||||
@@ -29,6 +30,7 @@ describe('telemetry', () => {
|
||||
targetDir: '/test/dir',
|
||||
debugMode: false,
|
||||
cwd: '/test/dir',
|
||||
ideClient: IdeClient.getInstance(false),
|
||||
});
|
||||
vi.spyOn(mockConfig, 'getTelemetryEnabled').mockReturnValue(true);
|
||||
vi.spyOn(mockConfig, 'getTelemetryOtlpEndpoint').mockReturnValue(
|
||||
|
||||
@@ -314,23 +314,6 @@ export class MalformedJsonResponseEvent {
|
||||
}
|
||||
}
|
||||
|
||||
export enum IdeConnectionType {
|
||||
START = 'start',
|
||||
SESSION = 'session',
|
||||
}
|
||||
|
||||
export class IdeConnectionEvent {
|
||||
'event.name': 'ide_connection';
|
||||
'event.timestamp': string; // ISO 8601
|
||||
connection_type: IdeConnectionType;
|
||||
|
||||
constructor(connection_type: IdeConnectionType) {
|
||||
this['event.name'] = 'ide_connection';
|
||||
this['event.timestamp'] = new Date().toISOString();
|
||||
this.connection_type = connection_type;
|
||||
}
|
||||
}
|
||||
|
||||
export type TelemetryEvent =
|
||||
| StartSessionEvent
|
||||
| EndSessionEvent
|
||||
@@ -343,5 +326,4 @@ export type TelemetryEvent =
|
||||
| LoopDetectedEvent
|
||||
| NextSpeakerCheckEvent
|
||||
| SlashCommandEvent
|
||||
| MalformedJsonResponseEvent
|
||||
| IdeConnectionEvent;
|
||||
| MalformedJsonResponseEvent;
|
||||
|
||||
@@ -58,7 +58,9 @@ describe('mcp-client', () => {
|
||||
const mockedClient = {} as unknown as ClientLib.Client;
|
||||
const consoleErrorSpy = vi
|
||||
.spyOn(console, 'error')
|
||||
.mockImplementation(() => {});
|
||||
.mockImplementation(() => {
|
||||
// no-op
|
||||
});
|
||||
|
||||
const testError = new Error('Invalid tool name');
|
||||
vi.mocked(DiscoveredMCPTool).mockImplementation(
|
||||
@@ -111,17 +113,12 @@ describe('mcp-client', () => {
|
||||
{ name: 'prompt2' },
|
||||
],
|
||||
});
|
||||
const mockGetServerCapabilities = vi.fn().mockReturnValue({
|
||||
prompts: {},
|
||||
});
|
||||
const mockedClient = {
|
||||
getServerCapabilities: mockGetServerCapabilities,
|
||||
request: mockRequest,
|
||||
} as unknown as ClientLib.Client;
|
||||
|
||||
await discoverPrompts('test-server', mockedClient, mockedPromptRegistry);
|
||||
|
||||
expect(mockGetServerCapabilities).toHaveBeenCalledOnce();
|
||||
expect(mockRequest).toHaveBeenCalledWith(
|
||||
{ method: 'prompts/list', params: {} },
|
||||
expect.anything(),
|
||||
@@ -132,67 +129,37 @@ describe('mcp-client', () => {
|
||||
const mockRequest = vi.fn().mockResolvedValue({
|
||||
prompts: [],
|
||||
});
|
||||
const mockGetServerCapabilities = vi.fn().mockReturnValue({
|
||||
prompts: {},
|
||||
});
|
||||
|
||||
const mockedClient = {
|
||||
getServerCapabilities: mockGetServerCapabilities,
|
||||
request: mockRequest,
|
||||
} as unknown as ClientLib.Client;
|
||||
|
||||
const consoleLogSpy = vi
|
||||
.spyOn(console, 'debug')
|
||||
.mockImplementation(() => {});
|
||||
.mockImplementation(() => {
|
||||
// no-op
|
||||
});
|
||||
|
||||
await discoverPrompts('test-server', mockedClient, mockedPromptRegistry);
|
||||
|
||||
expect(mockGetServerCapabilities).toHaveBeenCalledOnce();
|
||||
expect(mockRequest).toHaveBeenCalledOnce();
|
||||
expect(consoleLogSpy).not.toHaveBeenCalled();
|
||||
|
||||
consoleLogSpy.mockRestore();
|
||||
});
|
||||
|
||||
it('should do nothing if the server has no prompt support', async () => {
|
||||
const mockRequest = vi.fn().mockResolvedValue({
|
||||
prompts: [],
|
||||
});
|
||||
const mockGetServerCapabilities = vi.fn().mockReturnValue({});
|
||||
|
||||
const mockedClient = {
|
||||
getServerCapabilities: mockGetServerCapabilities,
|
||||
request: mockRequest,
|
||||
} as unknown as ClientLib.Client;
|
||||
|
||||
const consoleLogSpy = vi
|
||||
.spyOn(console, 'debug')
|
||||
.mockImplementation(() => {});
|
||||
|
||||
await discoverPrompts('test-server', mockedClient, mockedPromptRegistry);
|
||||
|
||||
expect(mockGetServerCapabilities).toHaveBeenCalledOnce();
|
||||
expect(mockRequest).not.toHaveBeenCalled();
|
||||
expect(consoleLogSpy).not.toHaveBeenCalled();
|
||||
|
||||
consoleLogSpy.mockRestore();
|
||||
});
|
||||
|
||||
it('should log an error if discovery fails', async () => {
|
||||
const testError = new Error('test error');
|
||||
testError.message = 'test error';
|
||||
const mockRequest = vi.fn().mockRejectedValue(testError);
|
||||
const mockGetServerCapabilities = vi.fn().mockReturnValue({
|
||||
prompts: {},
|
||||
});
|
||||
const mockedClient = {
|
||||
getServerCapabilities: mockGetServerCapabilities,
|
||||
request: mockRequest,
|
||||
} as unknown as ClientLib.Client;
|
||||
|
||||
const consoleErrorSpy = vi
|
||||
.spyOn(console, 'error')
|
||||
.mockImplementation(() => {});
|
||||
.mockImplementation(() => {
|
||||
// no-op
|
||||
});
|
||||
|
||||
await discoverPrompts('test-server', mockedClient, mockedPromptRegistry);
|
||||
|
||||
|
||||
@@ -496,9 +496,6 @@ export async function discoverPrompts(
|
||||
promptRegistry: PromptRegistry,
|
||||
): Promise<Prompt[]> {
|
||||
try {
|
||||
// Only request prompts if the server supports them.
|
||||
if (mcpClient.getServerCapabilities()?.prompts == null) return [];
|
||||
|
||||
const response = await mcpClient.request(
|
||||
{ method: 'prompts/list', params: {} },
|
||||
ListPromptsResultSchema,
|
||||
|
||||
@@ -131,11 +131,8 @@ describe('DiscoveredMCPTool', () => {
|
||||
success: true,
|
||||
details: 'executed',
|
||||
};
|
||||
const mockFunctionResponseContent = [
|
||||
{
|
||||
type: 'text',
|
||||
text: JSON.stringify(mockToolSuccessResultObject),
|
||||
},
|
||||
const mockFunctionResponseContent: Part[] = [
|
||||
{ text: JSON.stringify(mockToolSuccessResultObject) },
|
||||
];
|
||||
const mockMcpToolResponseParts: Part[] = [
|
||||
{
|
||||
@@ -152,13 +149,11 @@ describe('DiscoveredMCPTool', () => {
|
||||
expect(mockCallTool).toHaveBeenCalledWith([
|
||||
{ name: serverToolName, args: params },
|
||||
]);
|
||||
expect(toolResult.llmContent).toEqual(mockMcpToolResponseParts);
|
||||
|
||||
const stringifiedResponseContent = JSON.stringify(
|
||||
mockToolSuccessResultObject,
|
||||
);
|
||||
expect(toolResult.llmContent).toEqual([
|
||||
{ text: stringifiedResponseContent },
|
||||
]);
|
||||
expect(toolResult.returnDisplay).toBe(stringifiedResponseContent);
|
||||
});
|
||||
|
||||
@@ -175,9 +170,6 @@ describe('DiscoveredMCPTool', () => {
|
||||
mockCallTool.mockResolvedValue(mockMcpToolResponsePartsEmpty);
|
||||
const toolResult: ToolResult = await tool.execute(params);
|
||||
expect(toolResult.returnDisplay).toBe('```json\n[]\n```');
|
||||
expect(toolResult.llmContent).toEqual([
|
||||
{ text: '[Error: Could not parse tool response]' },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should propagate rejection if mcpTool.callTool rejects', async () => {
|
||||
@@ -194,361 +186,6 @@ describe('DiscoveredMCPTool', () => {
|
||||
|
||||
await expect(tool.execute(params)).rejects.toThrow(expectedError);
|
||||
});
|
||||
|
||||
it('should handle a simple text response correctly', async () => {
|
||||
const tool = new DiscoveredMCPTool(
|
||||
mockCallableToolInstance,
|
||||
serverName,
|
||||
serverToolName,
|
||||
baseDescription,
|
||||
inputSchema,
|
||||
);
|
||||
const params = { query: 'test' };
|
||||
const successMessage = 'This is a success message.';
|
||||
|
||||
// Simulate the response from the GenAI SDK, which wraps the MCP
|
||||
// response in a functionResponse Part.
|
||||
const sdkResponse: Part[] = [
|
||||
{
|
||||
functionResponse: {
|
||||
name: serverToolName,
|
||||
response: {
|
||||
// The `content` array contains MCP ContentBlocks.
|
||||
content: [{ type: 'text', text: successMessage }],
|
||||
},
|
||||
},
|
||||
},
|
||||
];
|
||||
mockCallTool.mockResolvedValue(sdkResponse);
|
||||
|
||||
const toolResult = await tool.execute(params);
|
||||
|
||||
// 1. Assert that the llmContent sent to the scheduler is a clean Part array.
|
||||
expect(toolResult.llmContent).toEqual([{ text: successMessage }]);
|
||||
|
||||
// 2. Assert that the display output is the simple text message.
|
||||
expect(toolResult.returnDisplay).toBe(successMessage);
|
||||
|
||||
// 3. Verify that the underlying callTool was made correctly.
|
||||
expect(mockCallTool).toHaveBeenCalledWith([
|
||||
{ name: serverToolName, args: params },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should handle an AudioBlock response', async () => {
|
||||
const tool = new DiscoveredMCPTool(
|
||||
mockCallableToolInstance,
|
||||
serverName,
|
||||
serverToolName,
|
||||
baseDescription,
|
||||
inputSchema,
|
||||
);
|
||||
const params = { action: 'play' };
|
||||
const sdkResponse: Part[] = [
|
||||
{
|
||||
functionResponse: {
|
||||
name: serverToolName,
|
||||
response: {
|
||||
content: [
|
||||
{
|
||||
type: 'audio',
|
||||
data: 'BASE64_AUDIO_DATA',
|
||||
mimeType: 'audio/mp3',
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
];
|
||||
mockCallTool.mockResolvedValue(sdkResponse);
|
||||
|
||||
const toolResult = await tool.execute(params);
|
||||
|
||||
expect(toolResult.llmContent).toEqual([
|
||||
{
|
||||
text: `[Tool '${serverToolName}' provided the following audio data with mime-type: audio/mp3]`,
|
||||
},
|
||||
{
|
||||
inlineData: {
|
||||
mimeType: 'audio/mp3',
|
||||
data: 'BASE64_AUDIO_DATA',
|
||||
},
|
||||
},
|
||||
]);
|
||||
expect(toolResult.returnDisplay).toBe('[Audio: audio/mp3]');
|
||||
});
|
||||
|
||||
it('should handle a ResourceLinkBlock response', async () => {
|
||||
const tool = new DiscoveredMCPTool(
|
||||
mockCallableToolInstance,
|
||||
serverName,
|
||||
serverToolName,
|
||||
baseDescription,
|
||||
inputSchema,
|
||||
);
|
||||
const params = { resource: 'get' };
|
||||
const sdkResponse: Part[] = [
|
||||
{
|
||||
functionResponse: {
|
||||
name: serverToolName,
|
||||
response: {
|
||||
content: [
|
||||
{
|
||||
type: 'resource_link',
|
||||
uri: 'file:///path/to/thing',
|
||||
name: 'resource-name',
|
||||
title: 'My Resource',
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
];
|
||||
mockCallTool.mockResolvedValue(sdkResponse);
|
||||
|
||||
const toolResult = await tool.execute(params);
|
||||
|
||||
expect(toolResult.llmContent).toEqual([
|
||||
{
|
||||
text: 'Resource Link: My Resource at file:///path/to/thing',
|
||||
},
|
||||
]);
|
||||
expect(toolResult.returnDisplay).toBe(
|
||||
'[Link to My Resource: file:///path/to/thing]',
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle an embedded text ResourceBlock response', async () => {
|
||||
const tool = new DiscoveredMCPTool(
|
||||
mockCallableToolInstance,
|
||||
serverName,
|
||||
serverToolName,
|
||||
baseDescription,
|
||||
inputSchema,
|
||||
);
|
||||
const params = { resource: 'get' };
|
||||
const sdkResponse: Part[] = [
|
||||
{
|
||||
functionResponse: {
|
||||
name: serverToolName,
|
||||
response: {
|
||||
content: [
|
||||
{
|
||||
type: 'resource',
|
||||
resource: {
|
||||
uri: 'file:///path/to/text.txt',
|
||||
text: 'This is the text content.',
|
||||
mimeType: 'text/plain',
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
];
|
||||
mockCallTool.mockResolvedValue(sdkResponse);
|
||||
|
||||
const toolResult = await tool.execute(params);
|
||||
|
||||
expect(toolResult.llmContent).toEqual([
|
||||
{ text: 'This is the text content.' },
|
||||
]);
|
||||
expect(toolResult.returnDisplay).toBe('This is the text content.');
|
||||
});
|
||||
|
||||
it('should handle an embedded binary ResourceBlock response', async () => {
|
||||
const tool = new DiscoveredMCPTool(
|
||||
mockCallableToolInstance,
|
||||
serverName,
|
||||
serverToolName,
|
||||
baseDescription,
|
||||
inputSchema,
|
||||
);
|
||||
const params = { resource: 'get' };
|
||||
const sdkResponse: Part[] = [
|
||||
{
|
||||
functionResponse: {
|
||||
name: serverToolName,
|
||||
response: {
|
||||
content: [
|
||||
{
|
||||
type: 'resource',
|
||||
resource: {
|
||||
uri: 'file:///path/to/data.bin',
|
||||
blob: 'BASE64_BINARY_DATA',
|
||||
mimeType: 'application/octet-stream',
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
];
|
||||
mockCallTool.mockResolvedValue(sdkResponse);
|
||||
|
||||
const toolResult = await tool.execute(params);
|
||||
|
||||
expect(toolResult.llmContent).toEqual([
|
||||
{
|
||||
text: `[Tool '${serverToolName}' provided the following embedded resource with mime-type: application/octet-stream]`,
|
||||
},
|
||||
{
|
||||
inlineData: {
|
||||
mimeType: 'application/octet-stream',
|
||||
data: 'BASE64_BINARY_DATA',
|
||||
},
|
||||
},
|
||||
]);
|
||||
expect(toolResult.returnDisplay).toBe(
|
||||
'[Embedded Resource: application/octet-stream]',
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle a mix of content block types', async () => {
|
||||
const tool = new DiscoveredMCPTool(
|
||||
mockCallableToolInstance,
|
||||
serverName,
|
||||
serverToolName,
|
||||
baseDescription,
|
||||
inputSchema,
|
||||
);
|
||||
const params = { action: 'complex' };
|
||||
const sdkResponse: Part[] = [
|
||||
{
|
||||
functionResponse: {
|
||||
name: serverToolName,
|
||||
response: {
|
||||
content: [
|
||||
{ type: 'text', text: 'First part.' },
|
||||
{
|
||||
type: 'image',
|
||||
data: 'BASE64_IMAGE_DATA',
|
||||
mimeType: 'image/jpeg',
|
||||
},
|
||||
{ type: 'text', text: 'Second part.' },
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
];
|
||||
mockCallTool.mockResolvedValue(sdkResponse);
|
||||
|
||||
const toolResult = await tool.execute(params);
|
||||
|
||||
expect(toolResult.llmContent).toEqual([
|
||||
{ text: 'First part.' },
|
||||
{
|
||||
text: `[Tool '${serverToolName}' provided the following image data with mime-type: image/jpeg]`,
|
||||
},
|
||||
{
|
||||
inlineData: {
|
||||
mimeType: 'image/jpeg',
|
||||
data: 'BASE64_IMAGE_DATA',
|
||||
},
|
||||
},
|
||||
{ text: 'Second part.' },
|
||||
]);
|
||||
expect(toolResult.returnDisplay).toBe(
|
||||
'First part.\n[Image: image/jpeg]\nSecond part.',
|
||||
);
|
||||
});
|
||||
|
||||
it('should ignore unknown content block types', async () => {
|
||||
const tool = new DiscoveredMCPTool(
|
||||
mockCallableToolInstance,
|
||||
serverName,
|
||||
serverToolName,
|
||||
baseDescription,
|
||||
inputSchema,
|
||||
);
|
||||
const params = { action: 'test' };
|
||||
const sdkResponse: Part[] = [
|
||||
{
|
||||
functionResponse: {
|
||||
name: serverToolName,
|
||||
response: {
|
||||
content: [
|
||||
{ type: 'text', text: 'Valid part.' },
|
||||
{ type: 'future_block', data: 'some-data' },
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
];
|
||||
mockCallTool.mockResolvedValue(sdkResponse);
|
||||
|
||||
const toolResult = await tool.execute(params);
|
||||
|
||||
expect(toolResult.llmContent).toEqual([{ text: 'Valid part.' }]);
|
||||
expect(toolResult.returnDisplay).toBe(
|
||||
'Valid part.\n[Unknown content type: future_block]',
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle a complex mix of content block types', async () => {
|
||||
const tool = new DiscoveredMCPTool(
|
||||
mockCallableToolInstance,
|
||||
serverName,
|
||||
serverToolName,
|
||||
baseDescription,
|
||||
inputSchema,
|
||||
);
|
||||
const params = { action: 'super-complex' };
|
||||
const sdkResponse: Part[] = [
|
||||
{
|
||||
functionResponse: {
|
||||
name: serverToolName,
|
||||
response: {
|
||||
content: [
|
||||
{ type: 'text', text: 'Here is a resource.' },
|
||||
{
|
||||
type: 'resource_link',
|
||||
uri: 'file:///path/to/resource',
|
||||
name: 'resource-name',
|
||||
title: 'My Resource',
|
||||
},
|
||||
{
|
||||
type: 'resource',
|
||||
resource: {
|
||||
uri: 'file:///path/to/text.txt',
|
||||
text: 'Embedded text content.',
|
||||
mimeType: 'text/plain',
|
||||
},
|
||||
},
|
||||
{
|
||||
type: 'image',
|
||||
data: 'BASE64_IMAGE_DATA',
|
||||
mimeType: 'image/jpeg',
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
];
|
||||
mockCallTool.mockResolvedValue(sdkResponse);
|
||||
|
||||
const toolResult = await tool.execute(params);
|
||||
|
||||
expect(toolResult.llmContent).toEqual([
|
||||
{ text: 'Here is a resource.' },
|
||||
{
|
||||
text: 'Resource Link: My Resource at file:///path/to/resource',
|
||||
},
|
||||
{ text: 'Embedded text content.' },
|
||||
{
|
||||
text: `[Tool '${serverToolName}' provided the following image data with mime-type: image/jpeg]`,
|
||||
},
|
||||
{
|
||||
inlineData: {
|
||||
mimeType: 'image/jpeg',
|
||||
data: 'BASE64_IMAGE_DATA',
|
||||
},
|
||||
},
|
||||
]);
|
||||
expect(toolResult.returnDisplay).toBe(
|
||||
'Here is a resource.\n[Link to My Resource: file:///path/to/resource]\nEmbedded text content.\n[Image: image/jpeg]',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('shouldConfirmExecute', () => {
|
||||
|
||||
@@ -22,40 +22,6 @@ import {
|
||||
|
||||
type ToolParams = Record<string, unknown>;
|
||||
|
||||
// Discriminated union for MCP Content Blocks to ensure type safety.
|
||||
type McpTextBlock = {
|
||||
type: 'text';
|
||||
text: string;
|
||||
};
|
||||
|
||||
type McpMediaBlock = {
|
||||
type: 'image' | 'audio';
|
||||
mimeType: string;
|
||||
data: string;
|
||||
};
|
||||
|
||||
type McpResourceBlock = {
|
||||
type: 'resource';
|
||||
resource: {
|
||||
text?: string;
|
||||
blob?: string;
|
||||
mimeType?: string;
|
||||
};
|
||||
};
|
||||
|
||||
type McpResourceLinkBlock = {
|
||||
type: 'resource_link';
|
||||
uri: string;
|
||||
title?: string;
|
||||
name?: string;
|
||||
};
|
||||
|
||||
type McpContentBlock =
|
||||
| McpTextBlock
|
||||
| McpMediaBlock
|
||||
| McpResourceBlock
|
||||
| McpResourceLinkBlock;
|
||||
|
||||
export class DiscoveredMCPTool extends BaseTool<ToolParams, ToolResult> {
|
||||
private static readonly allowlist: Set<string> = new Set();
|
||||
|
||||
@@ -148,145 +114,70 @@ export class DiscoveredMCPTool extends BaseTool<ToolParams, ToolResult> {
|
||||
},
|
||||
];
|
||||
|
||||
const rawResponseParts = await this.mcpTool.callTool(functionCalls);
|
||||
const transformedParts = transformMcpContentToParts(rawResponseParts);
|
||||
const responseParts: Part[] = await this.mcpTool.callTool(functionCalls);
|
||||
|
||||
return {
|
||||
llmContent: transformedParts,
|
||||
returnDisplay: getStringifiedResultForDisplay(rawResponseParts),
|
||||
llmContent: responseParts,
|
||||
returnDisplay: getStringifiedResultForDisplay(responseParts),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
function transformTextBlock(block: McpTextBlock): Part {
|
||||
return { text: block.text };
|
||||
}
|
||||
|
||||
function transformImageAudioBlock(
|
||||
block: McpMediaBlock,
|
||||
toolName: string,
|
||||
): Part[] {
|
||||
return [
|
||||
{
|
||||
text: `[Tool '${toolName}' provided the following ${
|
||||
block.type
|
||||
} data with mime-type: ${block.mimeType}]`,
|
||||
},
|
||||
{
|
||||
inlineData: {
|
||||
mimeType: block.mimeType,
|
||||
data: block.data,
|
||||
},
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
function transformResourceBlock(
|
||||
block: McpResourceBlock,
|
||||
toolName: string,
|
||||
): Part | Part[] | null {
|
||||
const resource = block.resource;
|
||||
if (resource?.text) {
|
||||
return { text: resource.text };
|
||||
}
|
||||
if (resource?.blob) {
|
||||
const mimeType = resource.mimeType || 'application/octet-stream';
|
||||
return [
|
||||
{
|
||||
text: `[Tool '${toolName}' provided the following embedded resource with mime-type: ${mimeType}]`,
|
||||
},
|
||||
{
|
||||
inlineData: {
|
||||
mimeType,
|
||||
data: resource.blob,
|
||||
},
|
||||
},
|
||||
];
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
function transformResourceLinkBlock(block: McpResourceLinkBlock): Part {
|
||||
return {
|
||||
text: `Resource Link: ${block.title || block.name} at ${block.uri}`,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Transforms the raw MCP content blocks from the SDK response into a
|
||||
* standard GenAI Part array.
|
||||
* @param sdkResponse The raw Part[] array from `mcpTool.callTool()`.
|
||||
* @returns A clean Part[] array ready for the scheduler.
|
||||
*/
|
||||
function transformMcpContentToParts(sdkResponse: Part[]): Part[] {
|
||||
const funcResponse = sdkResponse?.[0]?.functionResponse;
|
||||
const mcpContent = funcResponse?.response?.content as McpContentBlock[];
|
||||
const toolName = funcResponse?.name || 'unknown tool';
|
||||
|
||||
if (!Array.isArray(mcpContent)) {
|
||||
return [{ text: '[Error: Could not parse tool response]' }];
|
||||
}
|
||||
|
||||
const transformed = mcpContent.flatMap(
|
||||
(block: McpContentBlock): Part | Part[] | null => {
|
||||
switch (block.type) {
|
||||
case 'text':
|
||||
return transformTextBlock(block);
|
||||
case 'image':
|
||||
case 'audio':
|
||||
return transformImageAudioBlock(block, toolName);
|
||||
case 'resource':
|
||||
return transformResourceBlock(block, toolName);
|
||||
case 'resource_link':
|
||||
return transformResourceLinkBlock(block);
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
return transformed.filter((part): part is Part => part !== null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Processes the raw response from the MCP tool to generate a clean,
|
||||
* human-readable string for display in the CLI. It summarizes non-text
|
||||
* content and presents text directly.
|
||||
* Processes an array of `Part` objects, primarily from a tool's execution result,
|
||||
* to generate a user-friendly string representation, typically for display in a CLI.
|
||||
*
|
||||
* @param rawResponse The raw Part[] array from the GenAI SDK.
|
||||
* @returns A formatted string representing the tool's output.
|
||||
* The `result` array can contain various types of `Part` objects:
|
||||
* 1. `FunctionResponse` parts:
|
||||
* - If the `response.content` of a `FunctionResponse` is an array consisting solely
|
||||
* of `TextPart` objects, their text content is concatenated into a single string.
|
||||
* This is to present simple textual outputs directly.
|
||||
* - If `response.content` is an array but contains other types of `Part` objects (or a mix),
|
||||
* the `content` array itself is preserved. This handles structured data like JSON objects or arrays
|
||||
* returned by a tool.
|
||||
* - If `response.content` is not an array or is missing, the entire `functionResponse`
|
||||
* object is preserved.
|
||||
* 2. Other `Part` types (e.g., `TextPart` directly in the `result` array):
|
||||
* - These are preserved as is.
|
||||
*
|
||||
* All processed parts are then collected into an array, which is JSON.stringify-ed
|
||||
* with indentation and wrapped in a markdown JSON code block.
|
||||
*/
|
||||
function getStringifiedResultForDisplay(rawResponse: Part[]): string {
|
||||
const mcpContent = rawResponse?.[0]?.functionResponse?.response
|
||||
?.content as McpContentBlock[];
|
||||
|
||||
if (!Array.isArray(mcpContent)) {
|
||||
return '```json\n' + JSON.stringify(rawResponse, null, 2) + '\n```';
|
||||
function getStringifiedResultForDisplay(result: Part[]) {
|
||||
if (!result || result.length === 0) {
|
||||
return '```json\n[]\n```';
|
||||
}
|
||||
|
||||
const displayParts = mcpContent.map((block: McpContentBlock): string => {
|
||||
switch (block.type) {
|
||||
case 'text':
|
||||
return block.text;
|
||||
case 'image':
|
||||
return `[Image: ${block.mimeType}]`;
|
||||
case 'audio':
|
||||
return `[Audio: ${block.mimeType}]`;
|
||||
case 'resource_link':
|
||||
return `[Link to ${block.title || block.name}: ${block.uri}]`;
|
||||
case 'resource':
|
||||
if (block.resource?.text) {
|
||||
return block.resource.text;
|
||||
const processFunctionResponse = (part: Part) => {
|
||||
if (part.functionResponse) {
|
||||
const responseContent = part.functionResponse.response?.content;
|
||||
if (responseContent && Array.isArray(responseContent)) {
|
||||
// Check if all parts in responseContent are simple TextParts
|
||||
const allTextParts = responseContent.every(
|
||||
(p: Part) => p.text !== undefined,
|
||||
);
|
||||
if (allTextParts) {
|
||||
return responseContent.map((p: Part) => p.text).join('');
|
||||
}
|
||||
return `[Embedded Resource: ${
|
||||
block.resource?.mimeType || 'unknown type'
|
||||
}]`;
|
||||
default:
|
||||
return `[Unknown content type: ${(block as { type: string }).type}]`;
|
||||
}
|
||||
});
|
||||
// If not all simple text parts, return the array of these content parts for JSON stringification
|
||||
return responseContent;
|
||||
}
|
||||
|
||||
return displayParts.join('\n');
|
||||
// If no content, or not an array, or not a functionResponse, stringify the whole functionResponse part for inspection
|
||||
return part.functionResponse;
|
||||
}
|
||||
return part; // Fallback for unexpected structure or non-FunctionResponsePart
|
||||
};
|
||||
|
||||
const processedResults =
|
||||
result.length === 1
|
||||
? processFunctionResponse(result[0])
|
||||
: result.map(processFunctionResponse);
|
||||
if (typeof processedResults === 'string') {
|
||||
return processedResults;
|
||||
}
|
||||
|
||||
return '```json\n' + JSON.stringify(processedResults, null, 2) + '\n```';
|
||||
}
|
||||
|
||||
/** Visible for testing */
|
||||
|
||||
@@ -94,7 +94,6 @@ describe('modifyWithEditor', () => {
|
||||
mockModifyContext,
|
||||
'vscode' as EditorType,
|
||||
abortSignal,
|
||||
vi.fn(),
|
||||
);
|
||||
|
||||
expect(mockModifyContext.getCurrentContent).toHaveBeenCalledWith(
|
||||
@@ -149,7 +148,6 @@ describe('modifyWithEditor', () => {
|
||||
mockModifyContext,
|
||||
'vscode' as EditorType,
|
||||
abortSignal,
|
||||
vi.fn(),
|
||||
);
|
||||
|
||||
const stats = await fsp.stat(diffDir);
|
||||
@@ -167,7 +165,6 @@ describe('modifyWithEditor', () => {
|
||||
mockModifyContext,
|
||||
'vscode' as EditorType,
|
||||
abortSignal,
|
||||
vi.fn(),
|
||||
);
|
||||
|
||||
expect(mkdirSpy).not.toHaveBeenCalled();
|
||||
@@ -186,7 +183,6 @@ describe('modifyWithEditor', () => {
|
||||
mockModifyContext,
|
||||
'vscode' as EditorType,
|
||||
abortSignal,
|
||||
vi.fn(),
|
||||
);
|
||||
|
||||
expect(mockCreatePatch).toHaveBeenCalledWith(
|
||||
@@ -215,7 +211,6 @@ describe('modifyWithEditor', () => {
|
||||
mockModifyContext,
|
||||
'vscode' as EditorType,
|
||||
abortSignal,
|
||||
vi.fn(),
|
||||
);
|
||||
|
||||
expect(mockCreatePatch).toHaveBeenCalledWith(
|
||||
@@ -246,7 +241,6 @@ describe('modifyWithEditor', () => {
|
||||
mockModifyContext,
|
||||
'vscode' as EditorType,
|
||||
abortSignal,
|
||||
vi.fn(),
|
||||
),
|
||||
).rejects.toThrow('Editor failed to open');
|
||||
|
||||
@@ -273,7 +267,6 @@ describe('modifyWithEditor', () => {
|
||||
mockModifyContext,
|
||||
'vscode' as EditorType,
|
||||
abortSignal,
|
||||
vi.fn(),
|
||||
);
|
||||
|
||||
expect(consoleErrorSpy).toHaveBeenCalledTimes(2);
|
||||
@@ -297,7 +290,6 @@ describe('modifyWithEditor', () => {
|
||||
mockModifyContext,
|
||||
'vscode' as EditorType,
|
||||
abortSignal,
|
||||
vi.fn(),
|
||||
);
|
||||
|
||||
expect(mockOpenDiff).toHaveBeenCalledOnce();
|
||||
@@ -319,7 +311,6 @@ describe('modifyWithEditor', () => {
|
||||
mockModifyContext,
|
||||
'vscode' as EditorType,
|
||||
abortSignal,
|
||||
vi.fn(),
|
||||
);
|
||||
|
||||
expect(mockOpenDiff).toHaveBeenCalledOnce();
|
||||
|
||||
@@ -138,7 +138,6 @@ export async function modifyWithEditor<ToolParams>(
|
||||
modifyContext: ModifyContext<ToolParams>,
|
||||
editorType: EditorType,
|
||||
_abortSignal: AbortSignal,
|
||||
onEditorClose: () => void,
|
||||
): Promise<ModifyResult<ToolParams>> {
|
||||
const currentContent = await modifyContext.getCurrentContent(originalParams);
|
||||
const proposedContent =
|
||||
@@ -151,7 +150,7 @@ export async function modifyWithEditor<ToolParams>(
|
||||
);
|
||||
|
||||
try {
|
||||
await openDiff(oldPath, newPath, editorType, onEditorClose);
|
||||
await openDiff(oldPath, newPath, editorType);
|
||||
const result = getUpdatedParams(
|
||||
oldPath,
|
||||
newPath,
|
||||
|
||||
@@ -477,139 +477,4 @@ describe('ReadManyFilesTool', () => {
|
||||
fs.rmSync(tempDir2, { recursive: true, force: true });
|
||||
});
|
||||
});
|
||||
|
||||
describe('Batch Processing', () => {
|
||||
const createMultipleFiles = (count: number, contentPrefix = 'Content') => {
|
||||
const files: string[] = [];
|
||||
for (let i = 0; i < count; i++) {
|
||||
const fileName = `file${i}.txt`;
|
||||
createFile(fileName, `${contentPrefix} ${i}`);
|
||||
files.push(fileName);
|
||||
}
|
||||
return files;
|
||||
};
|
||||
|
||||
const createFile = (filePath: string, content = '') => {
|
||||
const fullPath = path.join(tempRootDir, filePath);
|
||||
fs.mkdirSync(path.dirname(fullPath), { recursive: true });
|
||||
fs.writeFileSync(fullPath, content);
|
||||
};
|
||||
|
||||
it('should process files in parallel for performance', async () => {
|
||||
// Mock detectFileType to add artificial delay to simulate I/O
|
||||
const detectFileTypeSpy = vi.spyOn(
|
||||
await import('../utils/fileUtils.js'),
|
||||
'detectFileType',
|
||||
);
|
||||
|
||||
// Create files
|
||||
const fileCount = 4;
|
||||
const files = createMultipleFiles(fileCount, 'Batch test');
|
||||
|
||||
// Mock with 100ms delay per file to simulate I/O operations
|
||||
detectFileTypeSpy.mockImplementation(async (_filePath: string) => {
|
||||
await new Promise((resolve) => setTimeout(resolve, 100));
|
||||
return 'text';
|
||||
});
|
||||
|
||||
const startTime = Date.now();
|
||||
const params = { paths: files };
|
||||
const result = await tool.execute(params, new AbortController().signal);
|
||||
const endTime = Date.now();
|
||||
|
||||
const processingTime = endTime - startTime;
|
||||
|
||||
console.log(
|
||||
`Processing time: ${processingTime}ms for ${fileCount} files`,
|
||||
);
|
||||
|
||||
// Verify parallel processing performance improvement
|
||||
// Parallel processing should complete in ~100ms (single file time)
|
||||
// Sequential would take ~400ms (4 files × 100ms each)
|
||||
expect(processingTime).toBeLessThan(200); // Should PASS with parallel implementation
|
||||
|
||||
// Verify all files were processed
|
||||
const content = result.llmContent as string[];
|
||||
expect(content).toHaveLength(fileCount);
|
||||
|
||||
// Cleanup mock
|
||||
detectFileTypeSpy.mockRestore();
|
||||
});
|
||||
|
||||
it('should handle batch processing errors gracefully', async () => {
|
||||
// Create mix of valid and problematic files
|
||||
createFile('valid1.txt', 'Valid content 1');
|
||||
createFile('valid2.txt', 'Valid content 2');
|
||||
createFile('valid3.txt', 'Valid content 3');
|
||||
|
||||
const params = {
|
||||
paths: [
|
||||
'valid1.txt',
|
||||
'valid2.txt',
|
||||
'nonexistent-file.txt', // This will fail
|
||||
'valid3.txt',
|
||||
],
|
||||
};
|
||||
|
||||
const result = await tool.execute(params, new AbortController().signal);
|
||||
const content = result.llmContent as string[];
|
||||
|
||||
// Should successfully process valid files despite one failure
|
||||
expect(content.length).toBeGreaterThanOrEqual(3);
|
||||
expect(result.returnDisplay).toContain('Successfully read');
|
||||
|
||||
// Verify valid files were processed
|
||||
const expectedPath1 = path.join(tempRootDir, 'valid1.txt');
|
||||
const expectedPath3 = path.join(tempRootDir, 'valid3.txt');
|
||||
expect(content.some((c) => c.includes(expectedPath1))).toBe(true);
|
||||
expect(content.some((c) => c.includes(expectedPath3))).toBe(true);
|
||||
});
|
||||
|
||||
it('should execute file operations concurrently', async () => {
|
||||
// Track execution order to verify concurrency
|
||||
const executionOrder: string[] = [];
|
||||
const detectFileTypeSpy = vi.spyOn(
|
||||
await import('../utils/fileUtils.js'),
|
||||
'detectFileType',
|
||||
);
|
||||
|
||||
const files = ['file1.txt', 'file2.txt', 'file3.txt'];
|
||||
files.forEach((file) => createFile(file, 'test content'));
|
||||
|
||||
// Mock to track concurrent vs sequential execution
|
||||
detectFileTypeSpy.mockImplementation(async (filePath: string) => {
|
||||
const fileName = filePath.split('/').pop() || '';
|
||||
executionOrder.push(`start:${fileName}`);
|
||||
|
||||
// Add delay to make timing differences visible
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
|
||||
executionOrder.push(`end:${fileName}`);
|
||||
return 'text';
|
||||
});
|
||||
|
||||
await tool.execute({ paths: files }, new AbortController().signal);
|
||||
|
||||
console.log('Execution order:', executionOrder);
|
||||
|
||||
// Verify concurrent execution pattern
|
||||
// In parallel execution: all "start:" events should come before all "end:" events
|
||||
// In sequential execution: "start:file1", "end:file1", "start:file2", "end:file2", etc.
|
||||
|
||||
const startEvents = executionOrder.filter((e) =>
|
||||
e.startsWith('start:'),
|
||||
).length;
|
||||
const firstEndIndex = executionOrder.findIndex((e) =>
|
||||
e.startsWith('end:'),
|
||||
);
|
||||
const startsBeforeFirstEnd = executionOrder
|
||||
.slice(0, firstEndIndex)
|
||||
.filter((e) => e.startsWith('start:')).length;
|
||||
|
||||
// For parallel processing, ALL start events should happen before the first end event
|
||||
expect(startsBeforeFirstEnd).toBe(startEvents); // Should PASS with parallel implementation
|
||||
|
||||
detectFileTypeSpy.mockRestore();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -70,27 +70,6 @@ export interface ReadManyFilesParams {
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Result type for file processing operations
|
||||
*/
|
||||
type FileProcessingResult =
|
||||
| {
|
||||
success: true;
|
||||
filePath: string;
|
||||
relativePathForDisplay: string;
|
||||
fileReadResult: NonNullable<
|
||||
Awaited<ReturnType<typeof processSingleFileContent>>
|
||||
>;
|
||||
reason?: undefined;
|
||||
}
|
||||
| {
|
||||
success: false;
|
||||
filePath: string;
|
||||
relativePathForDisplay: string;
|
||||
fileReadResult?: undefined;
|
||||
reason: string;
|
||||
};
|
||||
|
||||
/**
|
||||
* Default exclusion patterns for commonly ignored directories and binary file types.
|
||||
* These are compatible with glob ignore patterns.
|
||||
@@ -434,124 +413,66 @@ Use this tool when the user's query implies needing the content of several files
|
||||
|
||||
const sortedFiles = Array.from(filesToConsider).sort();
|
||||
|
||||
const fileProcessingPromises = sortedFiles.map(
|
||||
async (filePath): Promise<FileProcessingResult> => {
|
||||
try {
|
||||
const relativePathForDisplay = path
|
||||
.relative(this.config.getTargetDir(), filePath)
|
||||
.replace(/\\/g, '/');
|
||||
for (const filePath of sortedFiles) {
|
||||
const relativePathForDisplay = path
|
||||
.relative(this.config.getTargetDir(), filePath)
|
||||
.replace(/\\/g, '/');
|
||||
|
||||
const fileType = await detectFileType(filePath);
|
||||
const fileType = await detectFileType(filePath);
|
||||
|
||||
if (fileType === 'image' || fileType === 'pdf') {
|
||||
const fileExtension = path.extname(filePath).toLowerCase();
|
||||
const fileNameWithoutExtension = path.basename(
|
||||
filePath,
|
||||
fileExtension,
|
||||
);
|
||||
const requestedExplicitly = inputPatterns.some(
|
||||
(pattern: string) =>
|
||||
pattern.toLowerCase().includes(fileExtension) ||
|
||||
pattern.includes(fileNameWithoutExtension),
|
||||
);
|
||||
if (fileType === 'image' || fileType === 'pdf') {
|
||||
const fileExtension = path.extname(filePath).toLowerCase();
|
||||
const fileNameWithoutExtension = path.basename(filePath, fileExtension);
|
||||
const requestedExplicitly = inputPatterns.some(
|
||||
(pattern: string) =>
|
||||
pattern.toLowerCase().includes(fileExtension) ||
|
||||
pattern.includes(fileNameWithoutExtension),
|
||||
);
|
||||
|
||||
if (!requestedExplicitly) {
|
||||
return {
|
||||
success: false,
|
||||
filePath,
|
||||
relativePathForDisplay,
|
||||
reason:
|
||||
'asset file (image/pdf) was not explicitly requested by name or extension',
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Use processSingleFileContent for all file types now
|
||||
const fileReadResult = await processSingleFileContent(
|
||||
filePath,
|
||||
this.config.getTargetDir(),
|
||||
);
|
||||
|
||||
if (fileReadResult.error) {
|
||||
return {
|
||||
success: false,
|
||||
filePath,
|
||||
relativePathForDisplay,
|
||||
reason: `Read error: ${fileReadResult.error}`,
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
filePath,
|
||||
relativePathForDisplay,
|
||||
fileReadResult,
|
||||
};
|
||||
} catch (error) {
|
||||
const relativePathForDisplay = path
|
||||
.relative(this.config.getTargetDir(), filePath)
|
||||
.replace(/\\/g, '/');
|
||||
|
||||
return {
|
||||
success: false,
|
||||
filePath,
|
||||
relativePathForDisplay,
|
||||
reason: `Unexpected error: ${error instanceof Error ? error.message : String(error)}`,
|
||||
};
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
const results = await Promise.allSettled(fileProcessingPromises);
|
||||
|
||||
for (const result of results) {
|
||||
if (result.status === 'fulfilled') {
|
||||
const fileResult = result.value;
|
||||
|
||||
if (!fileResult.success) {
|
||||
// Handle skipped files (images/PDFs not requested or read errors)
|
||||
if (!requestedExplicitly) {
|
||||
skippedFiles.push({
|
||||
path: fileResult.relativePathForDisplay,
|
||||
reason: fileResult.reason,
|
||||
path: relativePathForDisplay,
|
||||
reason:
|
||||
'asset file (image/pdf) was not explicitly requested by name or extension',
|
||||
});
|
||||
} else {
|
||||
// Handle successfully processed files
|
||||
const { filePath, relativePathForDisplay, fileReadResult } =
|
||||
fileResult;
|
||||
|
||||
if (typeof fileReadResult.llmContent === 'string') {
|
||||
const separator = DEFAULT_OUTPUT_SEPARATOR_FORMAT.replace(
|
||||
'{filePath}',
|
||||
filePath,
|
||||
);
|
||||
contentParts.push(
|
||||
`${separator}\n\n${fileReadResult.llmContent}\n\n`,
|
||||
);
|
||||
} else {
|
||||
contentParts.push(fileReadResult.llmContent); // This is a Part for image/pdf
|
||||
}
|
||||
|
||||
processedFilesRelativePaths.push(relativePathForDisplay);
|
||||
|
||||
const lines =
|
||||
typeof fileReadResult.llmContent === 'string'
|
||||
? fileReadResult.llmContent.split('\n').length
|
||||
: undefined;
|
||||
const mimetype = getSpecificMimeType(filePath);
|
||||
recordFileOperationMetric(
|
||||
this.config,
|
||||
FileOperation.READ,
|
||||
lines,
|
||||
mimetype,
|
||||
path.extname(filePath),
|
||||
);
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
// Handle Promise rejection (unexpected errors)
|
||||
}
|
||||
|
||||
// Use processSingleFileContent for all file types now
|
||||
const fileReadResult = await processSingleFileContent(
|
||||
filePath,
|
||||
this.config.getTargetDir(),
|
||||
);
|
||||
|
||||
if (fileReadResult.error) {
|
||||
skippedFiles.push({
|
||||
path: 'unknown',
|
||||
reason: `Unexpected error: ${result.reason}`,
|
||||
path: relativePathForDisplay,
|
||||
reason: `Read error: ${fileReadResult.error}`,
|
||||
});
|
||||
} else {
|
||||
if (typeof fileReadResult.llmContent === 'string') {
|
||||
const separator = DEFAULT_OUTPUT_SEPARATOR_FORMAT.replace(
|
||||
'{filePath}',
|
||||
filePath,
|
||||
);
|
||||
contentParts.push(`${separator}\n\n${fileReadResult.llmContent}\n\n`);
|
||||
} else {
|
||||
contentParts.push(fileReadResult.llmContent); // This is a Part for image/pdf
|
||||
}
|
||||
processedFilesRelativePaths.push(relativePathForDisplay);
|
||||
const lines =
|
||||
typeof fileReadResult.llmContent === 'string'
|
||||
? fileReadResult.llmContent.split('\n').length
|
||||
: undefined;
|
||||
const mimetype = getSpecificMimeType(filePath);
|
||||
recordFileOperationMetric(
|
||||
this.config,
|
||||
FileOperation.READ,
|
||||
lines,
|
||||
mimetype,
|
||||
path.extname(filePath),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -543,37 +543,3 @@ describe('validateToolParams', () => {
|
||||
expect(result).toContain('is not a registered workspace directory');
|
||||
});
|
||||
});
|
||||
|
||||
describe('validateToolParams', () => {
|
||||
it('should return null for valid directory', () => {
|
||||
const config = {
|
||||
getCoreTools: () => undefined,
|
||||
getExcludeTools: () => undefined,
|
||||
getTargetDir: () => '/root',
|
||||
getWorkspaceContext: () =>
|
||||
createMockWorkspaceContext('/root', ['/users/test']),
|
||||
} as unknown as Config;
|
||||
const shellTool = new ShellTool(config);
|
||||
const result = shellTool.validateToolParams({
|
||||
command: 'ls',
|
||||
directory: 'test',
|
||||
});
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
it('should return error for directory outside workspace', () => {
|
||||
const config = {
|
||||
getCoreTools: () => undefined,
|
||||
getExcludeTools: () => undefined,
|
||||
getTargetDir: () => '/root',
|
||||
getWorkspaceContext: () =>
|
||||
createMockWorkspaceContext('/root', ['/users/test']),
|
||||
} as unknown as Config;
|
||||
const shellTool = new ShellTool(config);
|
||||
const result = shellTool.validateToolParams({
|
||||
command: 'ls',
|
||||
directory: 'test2',
|
||||
});
|
||||
expect(result).toContain('is not a registered workspace directory');
|
||||
});
|
||||
});
|
||||
|
||||
@@ -30,6 +30,7 @@ import {
|
||||
Schema,
|
||||
} from '@google/genai';
|
||||
import { spawn } from 'node:child_process';
|
||||
import { IdeClient } from '../ide/ide-client.js';
|
||||
import fs from 'node:fs';
|
||||
|
||||
vi.mock('node:fs');
|
||||
@@ -139,6 +140,7 @@ const baseConfigParams: ConfigParameters = {
|
||||
geminiMdFileCount: 0,
|
||||
approvalMode: ApprovalMode.DEFAULT,
|
||||
sessionId: 'test-session-id',
|
||||
ideClient: IdeClient.getInstance(false),
|
||||
};
|
||||
|
||||
describe('ToolRegistry', () => {
|
||||
@@ -170,10 +172,6 @@ describe('ToolRegistry', () => {
|
||||
);
|
||||
vi.spyOn(config, 'getMcpServers');
|
||||
vi.spyOn(config, 'getMcpServerCommand');
|
||||
vi.spyOn(config, 'getPromptRegistry').mockReturnValue({
|
||||
clear: vi.fn(),
|
||||
removePromptsByServer: vi.fn(),
|
||||
} as any);
|
||||
mockDiscoverMcpTools.mockReset().mockResolvedValue(undefined);
|
||||
});
|
||||
|
||||
@@ -355,7 +353,7 @@ describe('ToolRegistry', () => {
|
||||
mcpServerConfigVal,
|
||||
undefined,
|
||||
toolRegistry,
|
||||
config.getPromptRegistry(),
|
||||
undefined,
|
||||
false,
|
||||
);
|
||||
});
|
||||
@@ -378,7 +376,7 @@ describe('ToolRegistry', () => {
|
||||
mcpServerConfigVal,
|
||||
undefined,
|
||||
toolRegistry,
|
||||
config.getPromptRegistry(),
|
||||
undefined,
|
||||
false,
|
||||
);
|
||||
});
|
||||
|
||||
@@ -150,14 +150,6 @@ export class ToolRegistry {
|
||||
this.tools.set(tool.name, tool);
|
||||
}
|
||||
|
||||
private removeDiscoveredTools(): void {
|
||||
for (const tool of this.tools.values()) {
|
||||
if (tool instanceof DiscoveredTool || tool instanceof DiscoveredMCPTool) {
|
||||
this.tools.delete(tool.name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Discovers tools from project (if available and configured).
|
||||
* Can be called multiple times to update discovered tools.
|
||||
@@ -165,9 +157,11 @@ export class ToolRegistry {
|
||||
*/
|
||||
async discoverAllTools(): Promise<void> {
|
||||
// remove any previously discovered tools
|
||||
this.removeDiscoveredTools();
|
||||
|
||||
this.config.getPromptRegistry().clear();
|
||||
for (const tool of this.tools.values()) {
|
||||
if (tool instanceof DiscoveredTool || tool instanceof DiscoveredMCPTool) {
|
||||
this.tools.delete(tool.name);
|
||||
}
|
||||
}
|
||||
|
||||
await this.discoverAndRegisterToolsFromCommand();
|
||||
|
||||
@@ -188,9 +182,11 @@ export class ToolRegistry {
|
||||
*/
|
||||
async discoverMcpTools(): Promise<void> {
|
||||
// remove any previously discovered tools
|
||||
this.removeDiscoveredTools();
|
||||
|
||||
this.config.getPromptRegistry().clear();
|
||||
for (const tool of this.tools.values()) {
|
||||
if (tool instanceof DiscoveredMCPTool) {
|
||||
this.tools.delete(tool.name);
|
||||
}
|
||||
}
|
||||
|
||||
// discover tools using MCP servers, if configured
|
||||
await discoverMcpTools(
|
||||
@@ -214,8 +210,6 @@ export class ToolRegistry {
|
||||
}
|
||||
}
|
||||
|
||||
this.config.getPromptRegistry().removePromptsByServer(serverName);
|
||||
|
||||
const mcpServers = this.config.getMcpServers() ?? {};
|
||||
const serverConfig = mcpServers[serverName];
|
||||
if (serverConfig) {
|
||||
|
||||
@@ -331,7 +331,7 @@ describe('editor utils', () => {
|
||||
}),
|
||||
};
|
||||
(spawn as Mock).mockReturnValue(mockSpawn);
|
||||
await openDiff('old.txt', 'new.txt', editor, () => {});
|
||||
await openDiff('old.txt', 'new.txt', editor);
|
||||
const diffCommand = getDiffCommand('old.txt', 'new.txt', editor)!;
|
||||
expect(spawn).toHaveBeenCalledWith(
|
||||
diffCommand.command,
|
||||
@@ -361,9 +361,9 @@ describe('editor utils', () => {
|
||||
}),
|
||||
};
|
||||
(spawn as Mock).mockReturnValue(mockSpawn);
|
||||
await expect(
|
||||
openDiff('old.txt', 'new.txt', editor, () => {}),
|
||||
).rejects.toThrow('spawn error');
|
||||
await expect(openDiff('old.txt', 'new.txt', editor)).rejects.toThrow(
|
||||
'spawn error',
|
||||
);
|
||||
});
|
||||
|
||||
it(`should reject if ${editor} exits with non-zero code`, async () => {
|
||||
@@ -375,9 +375,9 @@ describe('editor utils', () => {
|
||||
}),
|
||||
};
|
||||
(spawn as Mock).mockReturnValue(mockSpawn);
|
||||
await expect(
|
||||
openDiff('old.txt', 'new.txt', editor, () => {}),
|
||||
).rejects.toThrow(`${editor} exited with code 1`);
|
||||
await expect(openDiff('old.txt', 'new.txt', editor)).rejects.toThrow(
|
||||
`${editor} exited with code 1`,
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -385,7 +385,7 @@ describe('editor utils', () => {
|
||||
for (const editor of execSyncEditors) {
|
||||
it(`should call execSync for ${editor} on non-windows`, async () => {
|
||||
Object.defineProperty(process, 'platform', { value: 'linux' });
|
||||
await openDiff('old.txt', 'new.txt', editor, () => {});
|
||||
await openDiff('old.txt', 'new.txt', editor);
|
||||
expect(execSync).toHaveBeenCalledTimes(1);
|
||||
const diffCommand = getDiffCommand('old.txt', 'new.txt', editor)!;
|
||||
const expectedCommand = `${
|
||||
@@ -399,7 +399,7 @@ describe('editor utils', () => {
|
||||
|
||||
it(`should call execSync for ${editor} on windows`, async () => {
|
||||
Object.defineProperty(process, 'platform', { value: 'win32' });
|
||||
await openDiff('old.txt', 'new.txt', editor, () => {});
|
||||
await openDiff('old.txt', 'new.txt', editor);
|
||||
expect(execSync).toHaveBeenCalledTimes(1);
|
||||
const diffCommand = getDiffCommand('old.txt', 'new.txt', editor)!;
|
||||
const expectedCommand = `${diffCommand.command} ${diffCommand.args.join(
|
||||
@@ -417,46 +417,11 @@ describe('editor utils', () => {
|
||||
.spyOn(console, 'error')
|
||||
.mockImplementation(() => {});
|
||||
// @ts-expect-error Testing unsupported editor
|
||||
await openDiff('old.txt', 'new.txt', 'foobar', () => {});
|
||||
await openDiff('old.txt', 'new.txt', 'foobar');
|
||||
expect(consoleErrorSpy).toHaveBeenCalledWith(
|
||||
'No diff tool available. Install a supported editor.',
|
||||
);
|
||||
});
|
||||
|
||||
describe('onEditorClose callback', () => {
|
||||
it('should call onEditorClose for execSync editors', async () => {
|
||||
(execSync as Mock).mockReturnValue(Buffer.from(`/usr/bin/`));
|
||||
const onEditorClose = vi.fn();
|
||||
await openDiff('old.txt', 'new.txt', 'vim', onEditorClose);
|
||||
expect(execSync).toHaveBeenCalledTimes(1);
|
||||
expect(onEditorClose).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('should call onEditorClose for execSync editors when an error is thrown', async () => {
|
||||
(execSync as Mock).mockImplementation(() => {
|
||||
throw new Error('test error');
|
||||
});
|
||||
const onEditorClose = vi.fn();
|
||||
openDiff('old.txt', 'new.txt', 'vim', onEditorClose);
|
||||
expect(execSync).toHaveBeenCalledTimes(1);
|
||||
expect(onEditorClose).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('should not call onEditorClose for spawn editors', async () => {
|
||||
const onEditorClose = vi.fn();
|
||||
const mockSpawn = {
|
||||
on: vi.fn((event, cb) => {
|
||||
if (event === 'close') {
|
||||
cb(0);
|
||||
}
|
||||
}),
|
||||
};
|
||||
(spawn as Mock).mockReturnValue(mockSpawn);
|
||||
await openDiff('old.txt', 'new.txt', 'vscode', onEditorClose);
|
||||
expect(spawn).toHaveBeenCalledTimes(1);
|
||||
expect(onEditorClose).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('allowEditorTypeInSandbox', () => {
|
||||
|
||||
@@ -164,7 +164,6 @@ export async function openDiff(
|
||||
oldPath: string,
|
||||
newPath: string,
|
||||
editor: EditorType,
|
||||
onEditorClose: () => void,
|
||||
): Promise<void> {
|
||||
const diffCommand = getDiffCommand(oldPath, newPath, editor);
|
||||
if (!diffCommand) {
|
||||
@@ -207,16 +206,10 @@ export async function openDiff(
|
||||
process.platform === 'win32'
|
||||
? `${diffCommand.command} ${diffCommand.args.join(' ')}`
|
||||
: `${diffCommand.command} ${diffCommand.args.map((arg) => `"${arg}"`).join(' ')}`;
|
||||
try {
|
||||
execSync(command, {
|
||||
stdio: 'inherit',
|
||||
encoding: 'utf8',
|
||||
});
|
||||
} catch (e) {
|
||||
console.error('Error in onEditorClose callback:', e);
|
||||
} finally {
|
||||
onEditorClose();
|
||||
}
|
||||
execSync(command, {
|
||||
stdio: 'inherit',
|
||||
encoding: 'utf8',
|
||||
});
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
@@ -426,29 +426,6 @@ describe('fileUtils', () => {
|
||||
expect(result.linesShown).toEqual([6, 10]);
|
||||
});
|
||||
|
||||
it('should identify truncation when reading the end of a file', async () => {
|
||||
const lines = Array.from({ length: 20 }, (_, i) => `Line ${i + 1}`);
|
||||
actualNodeFs.writeFileSync(testTextFilePath, lines.join('\n'));
|
||||
|
||||
// Read from line 11 to 20. The start is not 0, so it's truncated.
|
||||
const result = await processSingleFileContent(
|
||||
testTextFilePath,
|
||||
tempRootDir,
|
||||
10,
|
||||
10,
|
||||
);
|
||||
const expectedContent = lines.slice(10, 20).join('\n');
|
||||
|
||||
expect(result.llmContent).toContain(expectedContent);
|
||||
expect(result.llmContent).toContain(
|
||||
'[File content truncated: showing lines 11-20 of 20 total lines. Use offset/limit parameters to view more.]',
|
||||
);
|
||||
expect(result.returnDisplay).toBe('Read lines 11-20 of 20 from test.txt');
|
||||
expect(result.isTruncated).toBe(true); // This is the key check for the bug
|
||||
expect(result.originalLineCount).toBe(20);
|
||||
expect(result.linesShown).toEqual([11, 20]);
|
||||
});
|
||||
|
||||
it('should handle limit exceeding file length', async () => {
|
||||
const lines = ['Line 1', 'Line 2'];
|
||||
actualNodeFs.writeFileSync(testTextFilePath, lines.join('\n'));
|
||||
|
||||
@@ -299,8 +299,7 @@ export async function processSingleFileContent(
|
||||
return line;
|
||||
});
|
||||
|
||||
const contentRangeTruncated =
|
||||
startLine > 0 || endLine < originalLineCount;
|
||||
const contentRangeTruncated = endLine < originalLineCount;
|
||||
const isTruncated = contentRangeTruncated || linesWereTruncatedInLength;
|
||||
|
||||
let llmTextContent = '';
|
||||
|
||||
@@ -1,112 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, afterEach, beforeEach } from 'vitest';
|
||||
import { getCacheKey, read, write, clear } from './crawlCache.js';
|
||||
|
||||
describe('CrawlCache', () => {
|
||||
describe('getCacheKey', () => {
|
||||
it('should generate a consistent hash', () => {
|
||||
const key1 = getCacheKey('/foo', 'bar');
|
||||
const key2 = getCacheKey('/foo', 'bar');
|
||||
expect(key1).toBe(key2);
|
||||
});
|
||||
|
||||
it('should generate a different hash for different directories', () => {
|
||||
const key1 = getCacheKey('/foo', 'bar');
|
||||
const key2 = getCacheKey('/bar', 'bar');
|
||||
expect(key1).not.toBe(key2);
|
||||
});
|
||||
|
||||
it('should generate a different hash for different ignore content', () => {
|
||||
const key1 = getCacheKey('/foo', 'bar');
|
||||
const key2 = getCacheKey('/foo', 'baz');
|
||||
expect(key1).not.toBe(key2);
|
||||
});
|
||||
});
|
||||
|
||||
describe('in-memory cache operations', () => {
|
||||
beforeEach(() => {
|
||||
// Ensure a clean slate before each test
|
||||
clear();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
// Restore real timers after each test that uses fake ones
|
||||
vi.useRealTimers();
|
||||
});
|
||||
|
||||
it('should write and read data from the cache', () => {
|
||||
const key = 'test-key';
|
||||
const data = ['foo', 'bar'];
|
||||
write(key, data, 10000); // 10 second TTL
|
||||
const cachedData = read(key);
|
||||
expect(cachedData).toEqual(data);
|
||||
});
|
||||
|
||||
it('should return undefined for a nonexistent key', () => {
|
||||
const cachedData = read('nonexistent-key');
|
||||
expect(cachedData).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should clear the cache', () => {
|
||||
const key = 'test-key';
|
||||
const data = ['foo', 'bar'];
|
||||
write(key, data, 10000);
|
||||
clear();
|
||||
const cachedData = read(key);
|
||||
expect(cachedData).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should automatically evict a cache entry after its TTL expires', async () => {
|
||||
vi.useFakeTimers();
|
||||
const key = 'ttl-key';
|
||||
const data = ['foo'];
|
||||
const ttl = 5000; // 5 seconds
|
||||
|
||||
write(key, data, ttl);
|
||||
|
||||
// Should exist immediately after writing
|
||||
expect(read(key)).toEqual(data);
|
||||
|
||||
// Advance time just before expiration
|
||||
await vi.advanceTimersByTimeAsync(ttl - 1);
|
||||
expect(read(key)).toEqual(data);
|
||||
|
||||
// Advance time past expiration
|
||||
await vi.advanceTimersByTimeAsync(1);
|
||||
expect(read(key)).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should reset the timer when an entry is updated', async () => {
|
||||
vi.useFakeTimers();
|
||||
const key = 'update-key';
|
||||
const initialData = ['initial'];
|
||||
const updatedData = ['updated'];
|
||||
const ttl = 5000; // 5 seconds
|
||||
|
||||
// Write initial data
|
||||
write(key, initialData, ttl);
|
||||
|
||||
// Advance time, but not enough to expire
|
||||
await vi.advanceTimersByTimeAsync(3000);
|
||||
expect(read(key)).toEqual(initialData);
|
||||
|
||||
// Update the data, which should reset the timer
|
||||
write(key, updatedData, ttl);
|
||||
expect(read(key)).toEqual(updatedData);
|
||||
|
||||
// Advance time again. If the timer wasn't reset, the total elapsed
|
||||
// time (3000 + 3000 = 6000) would cause an eviction.
|
||||
await vi.advanceTimersByTimeAsync(3000);
|
||||
expect(read(key)).toEqual(updatedData);
|
||||
|
||||
// Advance past the new expiration time
|
||||
await vi.advanceTimersByTimeAsync(2001);
|
||||
expect(read(key)).toBeUndefined();
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,65 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import crypto from 'node:crypto';
|
||||
|
||||
const crawlCache = new Map<string, string[]>();
|
||||
const cacheTimers = new Map<string, NodeJS.Timeout>();
|
||||
|
||||
/**
|
||||
* Generates a unique cache key based on the project directory and the content
|
||||
* of ignore files. This ensures that the cache is invalidated if the project
|
||||
* or ignore rules change.
|
||||
*/
|
||||
export const getCacheKey = (
|
||||
directory: string,
|
||||
ignoreContent: string,
|
||||
): string => {
|
||||
const hash = crypto.createHash('sha256');
|
||||
hash.update(directory);
|
||||
hash.update(ignoreContent);
|
||||
return hash.digest('hex');
|
||||
};
|
||||
|
||||
/**
|
||||
* Reads cached data from the in-memory cache.
|
||||
* Returns undefined if the key is not found.
|
||||
*/
|
||||
export const read = (key: string): string[] | undefined => crawlCache.get(key);
|
||||
|
||||
/**
|
||||
* Writes data to the in-memory cache and sets a timer to evict it after the TTL.
|
||||
*/
|
||||
export const write = (key: string, results: string[], ttlMs: number): void => {
|
||||
// Clear any existing timer for this key to prevent premature deletion
|
||||
if (cacheTimers.has(key)) {
|
||||
clearTimeout(cacheTimers.get(key)!);
|
||||
}
|
||||
|
||||
// Store the new data
|
||||
crawlCache.set(key, results);
|
||||
|
||||
// Set a timer to automatically delete the cache entry after the TTL
|
||||
const timerId = setTimeout(() => {
|
||||
crawlCache.delete(key);
|
||||
cacheTimers.delete(key);
|
||||
}, ttlMs);
|
||||
|
||||
// Store the timer handle so we can clear it if the entry is updated
|
||||
cacheTimers.set(key, timerId);
|
||||
};
|
||||
|
||||
/**
|
||||
* Clears the entire cache and all active timers.
|
||||
* Primarily used for testing.
|
||||
*/
|
||||
export const clear = (): void => {
|
||||
for (const timerId of cacheTimers.values()) {
|
||||
clearTimeout(timerId);
|
||||
}
|
||||
crawlCache.clear();
|
||||
cacheTimers.clear();
|
||||
};
|
||||
@@ -1,642 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
|
||||
import * as fs from 'fs/promises';
|
||||
import * as path from 'path';
|
||||
import * as cache from './crawlCache.js';
|
||||
import { FileSearch, AbortError, filter } from './fileSearch.js';
|
||||
import { createTmpDir, cleanupTmpDir } from '@qwen-code/qwen-code-test-utils';
|
||||
|
||||
type FileSearchWithPrivateMethods = FileSearch & {
|
||||
performCrawl: () => Promise<void>;
|
||||
};
|
||||
|
||||
describe('FileSearch', () => {
|
||||
let tmpDir: string;
|
||||
afterEach(async () => {
|
||||
if (tmpDir) {
|
||||
await cleanupTmpDir(tmpDir);
|
||||
}
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
it('should use .geminiignore rules', async () => {
|
||||
tmpDir = await createTmpDir({
|
||||
'.geminiignore': 'dist/',
|
||||
dist: ['ignored.js'],
|
||||
src: ['not-ignored.js'],
|
||||
});
|
||||
|
||||
const fileSearch = new FileSearch({
|
||||
projectRoot: tmpDir,
|
||||
useGitignore: false,
|
||||
useGeminiignore: true,
|
||||
ignoreDirs: [],
|
||||
cache: false,
|
||||
cacheTtl: 0,
|
||||
});
|
||||
|
||||
await fileSearch.initialize();
|
||||
const results = await fileSearch.search('');
|
||||
|
||||
expect(results).toEqual(['src/', '.geminiignore', 'src/not-ignored.js']);
|
||||
});
|
||||
|
||||
it('should combine .gitignore and .geminiignore rules', async () => {
|
||||
tmpDir = await createTmpDir({
|
||||
'.gitignore': 'dist/',
|
||||
'.geminiignore': 'build/',
|
||||
dist: ['ignored-by-git.js'],
|
||||
build: ['ignored-by-gemini.js'],
|
||||
src: ['not-ignored.js'],
|
||||
});
|
||||
|
||||
const fileSearch = new FileSearch({
|
||||
projectRoot: tmpDir,
|
||||
useGitignore: true,
|
||||
useGeminiignore: true,
|
||||
ignoreDirs: [],
|
||||
cache: false,
|
||||
cacheTtl: 0,
|
||||
});
|
||||
|
||||
await fileSearch.initialize();
|
||||
const results = await fileSearch.search('');
|
||||
|
||||
expect(results).toEqual([
|
||||
'src/',
|
||||
'.geminiignore',
|
||||
'.gitignore',
|
||||
'src/not-ignored.js',
|
||||
]);
|
||||
});
|
||||
|
||||
it('should use ignoreDirs option', async () => {
|
||||
tmpDir = await createTmpDir({
|
||||
logs: ['some.log'],
|
||||
src: ['main.js'],
|
||||
});
|
||||
|
||||
const fileSearch = new FileSearch({
|
||||
projectRoot: tmpDir,
|
||||
useGitignore: false,
|
||||
useGeminiignore: false,
|
||||
ignoreDirs: ['logs'],
|
||||
cache: false,
|
||||
cacheTtl: 0,
|
||||
});
|
||||
|
||||
await fileSearch.initialize();
|
||||
const results = await fileSearch.search('');
|
||||
|
||||
expect(results).toEqual(['src/', 'src/main.js']);
|
||||
});
|
||||
|
||||
it('should handle negated directories', async () => {
|
||||
tmpDir = await createTmpDir({
|
||||
'.gitignore': ['build/**', '!build/public', '!build/public/**'].join(
|
||||
'\n',
|
||||
),
|
||||
build: {
|
||||
'private.js': '',
|
||||
public: ['index.html'],
|
||||
},
|
||||
src: ['main.js'],
|
||||
});
|
||||
|
||||
const fileSearch = new FileSearch({
|
||||
projectRoot: tmpDir,
|
||||
useGitignore: true,
|
||||
useGeminiignore: false,
|
||||
ignoreDirs: [],
|
||||
cache: false,
|
||||
cacheTtl: 0,
|
||||
});
|
||||
|
||||
await fileSearch.initialize();
|
||||
const results = await fileSearch.search('');
|
||||
|
||||
expect(results).toEqual([
|
||||
'build/',
|
||||
'build/public/',
|
||||
'src/',
|
||||
'.gitignore',
|
||||
'build/public/index.html',
|
||||
'src/main.js',
|
||||
]);
|
||||
});
|
||||
|
||||
it('should filter results with a search pattern', async () => {
|
||||
tmpDir = await createTmpDir({
|
||||
src: {
|
||||
'main.js': '',
|
||||
'util.ts': '',
|
||||
'style.css': '',
|
||||
},
|
||||
});
|
||||
|
||||
const fileSearch = new FileSearch({
|
||||
projectRoot: tmpDir,
|
||||
useGitignore: false,
|
||||
useGeminiignore: false,
|
||||
ignoreDirs: [],
|
||||
cache: false,
|
||||
cacheTtl: 0,
|
||||
});
|
||||
|
||||
await fileSearch.initialize();
|
||||
const results = await fileSearch.search('**/*.js');
|
||||
|
||||
expect(results).toEqual(['src/main.js']);
|
||||
});
|
||||
|
||||
it('should handle root-level file negation', async () => {
|
||||
tmpDir = await createTmpDir({
|
||||
'.gitignore': ['*.mk', '!Foo.mk'].join('\n'),
|
||||
'bar.mk': '',
|
||||
'Foo.mk': '',
|
||||
});
|
||||
|
||||
const fileSearch = new FileSearch({
|
||||
projectRoot: tmpDir,
|
||||
useGitignore: true,
|
||||
useGeminiignore: false,
|
||||
ignoreDirs: [],
|
||||
cache: false,
|
||||
cacheTtl: 0,
|
||||
});
|
||||
|
||||
await fileSearch.initialize();
|
||||
const results = await fileSearch.search('');
|
||||
|
||||
expect(results).toEqual(['.gitignore', 'Foo.mk']);
|
||||
});
|
||||
|
||||
it('should handle directory negation with glob', async () => {
|
||||
tmpDir = await createTmpDir({
|
||||
'.gitignore': [
|
||||
'third_party/**',
|
||||
'!third_party/foo',
|
||||
'!third_party/foo/bar',
|
||||
'!third_party/foo/bar/baz_buffer',
|
||||
].join('\n'),
|
||||
third_party: {
|
||||
foo: {
|
||||
bar: {
|
||||
baz_buffer: '',
|
||||
},
|
||||
},
|
||||
ignore_this: '',
|
||||
},
|
||||
});
|
||||
|
||||
const fileSearch = new FileSearch({
|
||||
projectRoot: tmpDir,
|
||||
useGitignore: true,
|
||||
useGeminiignore: false,
|
||||
ignoreDirs: [],
|
||||
cache: false,
|
||||
cacheTtl: 0,
|
||||
});
|
||||
|
||||
await fileSearch.initialize();
|
||||
const results = await fileSearch.search('');
|
||||
|
||||
expect(results).toEqual([
|
||||
'third_party/',
|
||||
'third_party/foo/',
|
||||
'third_party/foo/bar/',
|
||||
'.gitignore',
|
||||
'third_party/foo/bar/baz_buffer',
|
||||
]);
|
||||
});
|
||||
|
||||
it('should correctly handle negated patterns in .gitignore', async () => {
|
||||
tmpDir = await createTmpDir({
|
||||
'.gitignore': ['dist/**', '!dist/keep.js'].join('\n'),
|
||||
dist: ['ignore.js', 'keep.js'],
|
||||
src: ['main.js'],
|
||||
});
|
||||
|
||||
const fileSearch = new FileSearch({
|
||||
projectRoot: tmpDir,
|
||||
useGitignore: true,
|
||||
useGeminiignore: false,
|
||||
ignoreDirs: [],
|
||||
cache: false,
|
||||
cacheTtl: 0,
|
||||
});
|
||||
|
||||
await fileSearch.initialize();
|
||||
const results = await fileSearch.search('');
|
||||
|
||||
expect(results).toEqual([
|
||||
'dist/',
|
||||
'src/',
|
||||
'.gitignore',
|
||||
'dist/keep.js',
|
||||
'src/main.js',
|
||||
]);
|
||||
});
|
||||
|
||||
// New test cases start here
|
||||
|
||||
it('should initialize correctly when ignore files are missing', async () => {
|
||||
tmpDir = await createTmpDir({
|
||||
src: ['file1.js'],
|
||||
});
|
||||
|
||||
const fileSearch = new FileSearch({
|
||||
projectRoot: tmpDir,
|
||||
useGitignore: true,
|
||||
useGeminiignore: true,
|
||||
ignoreDirs: [],
|
||||
cache: false,
|
||||
cacheTtl: 0,
|
||||
});
|
||||
|
||||
// Expect no errors to be thrown during initialization
|
||||
await expect(fileSearch.initialize()).resolves.toBeUndefined();
|
||||
const results = await fileSearch.search('');
|
||||
expect(results).toEqual(['src/', 'src/file1.js']);
|
||||
});
|
||||
|
||||
it('should respect maxResults option in search', async () => {
|
||||
tmpDir = await createTmpDir({
|
||||
src: {
|
||||
'file1.js': '',
|
||||
'file2.js': '',
|
||||
'file3.js': '',
|
||||
'file4.js': '',
|
||||
},
|
||||
});
|
||||
|
||||
const fileSearch = new FileSearch({
|
||||
projectRoot: tmpDir,
|
||||
useGitignore: false,
|
||||
useGeminiignore: false,
|
||||
ignoreDirs: [],
|
||||
cache: false,
|
||||
cacheTtl: 0,
|
||||
});
|
||||
|
||||
await fileSearch.initialize();
|
||||
const results = await fileSearch.search('**/*.js', { maxResults: 2 });
|
||||
|
||||
expect(results).toEqual(['src/file1.js', 'src/file2.js']); // Assuming alphabetical sort
|
||||
});
|
||||
|
||||
it('should return empty array when no matches are found', async () => {
|
||||
tmpDir = await createTmpDir({
|
||||
src: ['file1.js'],
|
||||
});
|
||||
|
||||
const fileSearch = new FileSearch({
|
||||
projectRoot: tmpDir,
|
||||
useGitignore: false,
|
||||
useGeminiignore: false,
|
||||
ignoreDirs: [],
|
||||
cache: false,
|
||||
cacheTtl: 0,
|
||||
});
|
||||
|
||||
await fileSearch.initialize();
|
||||
const results = await fileSearch.search('nonexistent-file.xyz');
|
||||
|
||||
expect(results).toEqual([]);
|
||||
});
|
||||
|
||||
it('should throw AbortError when filter is aborted', async () => {
|
||||
const controller = new AbortController();
|
||||
const dummyPaths = Array.from({ length: 5000 }, (_, i) => `file${i}.js`); // Large array to ensure yielding
|
||||
|
||||
const filterPromise = filter(dummyPaths, '*.js', controller.signal);
|
||||
|
||||
// Abort after a short delay to ensure filter has started
|
||||
setTimeout(() => controller.abort(), 1);
|
||||
|
||||
await expect(filterPromise).rejects.toThrow(AbortError);
|
||||
});
|
||||
|
||||
describe('with in-memory cache', () => {
|
||||
beforeEach(() => {
|
||||
cache.clear();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.useRealTimers();
|
||||
});
|
||||
|
||||
it('should throw an error if search is called before initialization', async () => {
|
||||
tmpDir = await createTmpDir({});
|
||||
const fileSearch = new FileSearch({
|
||||
projectRoot: tmpDir,
|
||||
useGitignore: false,
|
||||
useGeminiignore: false,
|
||||
ignoreDirs: [],
|
||||
cache: false,
|
||||
cacheTtl: 0,
|
||||
});
|
||||
|
||||
await expect(fileSearch.search('')).rejects.toThrow(
|
||||
'Engine not initialized. Call initialize() first.',
|
||||
);
|
||||
});
|
||||
|
||||
it('should hit the cache for subsequent searches', async () => {
|
||||
tmpDir = await createTmpDir({ 'file1.js': '' });
|
||||
const getOptions = () => ({
|
||||
projectRoot: tmpDir,
|
||||
useGitignore: false,
|
||||
useGeminiignore: false,
|
||||
ignoreDirs: [],
|
||||
cache: true,
|
||||
cacheTtl: 10,
|
||||
});
|
||||
|
||||
const fs1 = new FileSearch(getOptions());
|
||||
const crawlSpy1 = vi.spyOn(
|
||||
fs1 as FileSearchWithPrivateMethods,
|
||||
'performCrawl',
|
||||
);
|
||||
await fs1.initialize();
|
||||
expect(crawlSpy1).toHaveBeenCalledTimes(1);
|
||||
|
||||
// Second search should hit the cache because the options are identical
|
||||
const fs2 = new FileSearch(getOptions());
|
||||
const crawlSpy2 = vi.spyOn(
|
||||
fs2 as FileSearchWithPrivateMethods,
|
||||
'performCrawl',
|
||||
);
|
||||
await fs2.initialize();
|
||||
expect(crawlSpy2).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should miss the cache when ignore rules change', async () => {
|
||||
tmpDir = await createTmpDir({
|
||||
'.gitignore': 'a.txt',
|
||||
'a.txt': '',
|
||||
'b.txt': '',
|
||||
});
|
||||
const options = {
|
||||
projectRoot: tmpDir,
|
||||
useGitignore: true,
|
||||
useGeminiignore: false,
|
||||
ignoreDirs: [],
|
||||
cache: true,
|
||||
cacheTtl: 10000,
|
||||
};
|
||||
|
||||
// Initial search to populate the cache
|
||||
const fs1 = new FileSearch(options);
|
||||
const crawlSpy1 = vi.spyOn(
|
||||
fs1 as FileSearchWithPrivateMethods,
|
||||
'performCrawl',
|
||||
);
|
||||
await fs1.initialize();
|
||||
const results1 = await fs1.search('');
|
||||
expect(crawlSpy1).toHaveBeenCalledTimes(1);
|
||||
expect(results1).toEqual(['.gitignore', 'b.txt']);
|
||||
|
||||
// Modify the ignore file
|
||||
await fs.writeFile(path.join(tmpDir, '.gitignore'), 'b.txt');
|
||||
|
||||
// Second search should miss the cache and trigger a recrawl
|
||||
const fs2 = new FileSearch(options);
|
||||
const crawlSpy2 = vi.spyOn(
|
||||
fs2 as FileSearchWithPrivateMethods,
|
||||
'performCrawl',
|
||||
);
|
||||
await fs2.initialize();
|
||||
const results2 = await fs2.search('');
|
||||
expect(crawlSpy2).toHaveBeenCalledTimes(1);
|
||||
expect(results2).toEqual(['.gitignore', 'a.txt']);
|
||||
});
|
||||
|
||||
it('should miss the cache after TTL expires', async () => {
|
||||
vi.useFakeTimers();
|
||||
tmpDir = await createTmpDir({ 'file1.js': '' });
|
||||
const options = {
|
||||
projectRoot: tmpDir,
|
||||
useGitignore: false,
|
||||
useGeminiignore: false,
|
||||
ignoreDirs: [],
|
||||
cache: true,
|
||||
cacheTtl: 10, // 10 seconds
|
||||
};
|
||||
|
||||
// Initial search to populate the cache
|
||||
const fs1 = new FileSearch(options);
|
||||
await fs1.initialize();
|
||||
|
||||
// Advance time past the TTL
|
||||
await vi.advanceTimersByTimeAsync(11000);
|
||||
|
||||
// Second search should miss the cache and trigger a recrawl
|
||||
const fs2 = new FileSearch(options);
|
||||
const crawlSpy = vi.spyOn(
|
||||
fs2 as FileSearchWithPrivateMethods,
|
||||
'performCrawl',
|
||||
);
|
||||
await fs2.initialize();
|
||||
|
||||
expect(crawlSpy).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle empty or commented-only ignore files', async () => {
|
||||
tmpDir = await createTmpDir({
|
||||
'.gitignore': '# This is a comment\n\n \n',
|
||||
src: ['main.js'],
|
||||
});
|
||||
|
||||
const fileSearch = new FileSearch({
|
||||
projectRoot: tmpDir,
|
||||
useGitignore: true,
|
||||
useGeminiignore: false,
|
||||
ignoreDirs: [],
|
||||
cache: false,
|
||||
cacheTtl: 0,
|
||||
});
|
||||
|
||||
await fileSearch.initialize();
|
||||
const results = await fileSearch.search('');
|
||||
|
||||
expect(results).toEqual(['src/', '.gitignore', 'src/main.js']);
|
||||
});
|
||||
|
||||
it('should always ignore the .git directory', async () => {
|
||||
tmpDir = await createTmpDir({
|
||||
'.git': ['config', 'HEAD'],
|
||||
src: ['main.js'],
|
||||
});
|
||||
|
||||
const fileSearch = new FileSearch({
|
||||
projectRoot: tmpDir,
|
||||
useGitignore: false, // Explicitly disable .gitignore to isolate this rule
|
||||
useGeminiignore: false,
|
||||
ignoreDirs: [],
|
||||
cache: false,
|
||||
cacheTtl: 0,
|
||||
});
|
||||
|
||||
await fileSearch.initialize();
|
||||
const results = await fileSearch.search('');
|
||||
|
||||
expect(results).toEqual(['src/', 'src/main.js']);
|
||||
});
|
||||
|
||||
it('should be cancellable via AbortSignal', async () => {
|
||||
const largeDir: Record<string, string> = {};
|
||||
for (let i = 0; i < 100; i++) {
|
||||
largeDir[`file${i}.js`] = '';
|
||||
}
|
||||
tmpDir = await createTmpDir(largeDir);
|
||||
|
||||
const fileSearch = new FileSearch({
|
||||
projectRoot: tmpDir,
|
||||
useGitignore: false,
|
||||
useGeminiignore: false,
|
||||
ignoreDirs: [],
|
||||
cache: false,
|
||||
cacheTtl: 0,
|
||||
});
|
||||
|
||||
await fileSearch.initialize();
|
||||
|
||||
const controller = new AbortController();
|
||||
const searchPromise = fileSearch.search('**/*.js', {
|
||||
signal: controller.signal,
|
||||
});
|
||||
|
||||
// Yield to allow the search to start before aborting.
|
||||
await new Promise((resolve) => setImmediate(resolve));
|
||||
|
||||
controller.abort();
|
||||
|
||||
await expect(searchPromise).rejects.toThrow(AbortError);
|
||||
});
|
||||
|
||||
it('should leverage ResultCache for bestBaseQuery optimization', async () => {
|
||||
tmpDir = await createTmpDir({
|
||||
src: {
|
||||
'foo.js': '',
|
||||
'bar.ts': '',
|
||||
nested: {
|
||||
'baz.js': '',
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const fileSearch = new FileSearch({
|
||||
projectRoot: tmpDir,
|
||||
useGitignore: false,
|
||||
useGeminiignore: false,
|
||||
ignoreDirs: [],
|
||||
cache: true, // Enable caching for this test
|
||||
cacheTtl: 0,
|
||||
});
|
||||
|
||||
await fileSearch.initialize();
|
||||
|
||||
// Perform a broad search to prime the cache
|
||||
const broadResults = await fileSearch.search('src/**');
|
||||
expect(broadResults).toEqual([
|
||||
'src/',
|
||||
'src/nested/',
|
||||
'src/bar.ts',
|
||||
'src/foo.js',
|
||||
'src/nested/baz.js',
|
||||
]);
|
||||
|
||||
// Perform a more specific search that should leverage the broad search's cached results
|
||||
const specificResults = await fileSearch.search('src/**/*.js');
|
||||
expect(specificResults).toEqual(['src/foo.js', 'src/nested/baz.js']);
|
||||
|
||||
// Although we can't directly inspect ResultCache.hits/misses from here,
|
||||
// the correctness of specificResults after a broad search implicitly
|
||||
// verifies that the caching mechanism, including bestBaseQuery, is working.
|
||||
});
|
||||
|
||||
it('should be case-insensitive by default', async () => {
|
||||
tmpDir = await createTmpDir({
|
||||
'File1.Js': '',
|
||||
'file2.js': '',
|
||||
'FILE3.JS': '',
|
||||
'other.txt': '',
|
||||
});
|
||||
|
||||
const fileSearch = new FileSearch({
|
||||
projectRoot: tmpDir,
|
||||
useGitignore: false,
|
||||
useGeminiignore: false,
|
||||
ignoreDirs: [],
|
||||
cache: false,
|
||||
cacheTtl: 0,
|
||||
});
|
||||
|
||||
await fileSearch.initialize();
|
||||
|
||||
// Search with a lowercase pattern
|
||||
let results = await fileSearch.search('file*.js');
|
||||
expect(results).toHaveLength(3);
|
||||
expect(results).toEqual(
|
||||
expect.arrayContaining(['File1.Js', 'file2.js', 'FILE3.JS']),
|
||||
);
|
||||
|
||||
// Search with an uppercase pattern
|
||||
results = await fileSearch.search('FILE*.JS');
|
||||
expect(results).toHaveLength(3);
|
||||
expect(results).toEqual(
|
||||
expect.arrayContaining(['File1.Js', 'file2.js', 'FILE3.JS']),
|
||||
);
|
||||
|
||||
// Search with a mixed-case pattern
|
||||
results = await fileSearch.search('FiLe*.Js');
|
||||
expect(results).toHaveLength(3);
|
||||
expect(results).toEqual(
|
||||
expect.arrayContaining(['File1.Js', 'file2.js', 'FILE3.JS']),
|
||||
);
|
||||
});
|
||||
|
||||
it('should respect maxResults even when the cache returns an exact match', async () => {
|
||||
tmpDir = await createTmpDir({
|
||||
'file1.js': '',
|
||||
'file2.js': '',
|
||||
'file3.js': '',
|
||||
'file4.js': '',
|
||||
'file5.js': '',
|
||||
});
|
||||
|
||||
const fileSearch = new FileSearch({
|
||||
projectRoot: tmpDir,
|
||||
useGitignore: false,
|
||||
useGeminiignore: false,
|
||||
ignoreDirs: [],
|
||||
cache: true, // Ensure caching is enabled
|
||||
cacheTtl: 10000,
|
||||
});
|
||||
|
||||
await fileSearch.initialize();
|
||||
|
||||
// 1. Perform a broad search to populate the cache with an exact match.
|
||||
const initialResults = await fileSearch.search('*.js');
|
||||
expect(initialResults).toEqual([
|
||||
'file1.js',
|
||||
'file2.js',
|
||||
'file3.js',
|
||||
'file4.js',
|
||||
'file5.js',
|
||||
]);
|
||||
|
||||
// 2. Perform the same search again, but this time with a maxResults limit.
|
||||
const limitedResults = await fileSearch.search('*.js', { maxResults: 2 });
|
||||
|
||||
// 3. Assert that the maxResults limit was respected, even with a cache hit.
|
||||
expect(limitedResults).toEqual(['file1.js', 'file2.js']);
|
||||
});
|
||||
});
|
||||
@@ -1,269 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import path from 'node:path';
|
||||
import fs from 'node:fs';
|
||||
import { fdir } from 'fdir';
|
||||
import picomatch from 'picomatch';
|
||||
import { Ignore } from './ignore.js';
|
||||
import { ResultCache } from './result-cache.js';
|
||||
import * as cache from './crawlCache.js';
|
||||
|
||||
export type FileSearchOptions = {
|
||||
projectRoot: string;
|
||||
ignoreDirs: string[];
|
||||
useGitignore: boolean;
|
||||
useGeminiignore: boolean;
|
||||
cache: boolean;
|
||||
cacheTtl: number;
|
||||
};
|
||||
|
||||
export class AbortError extends Error {
|
||||
constructor(message = 'Search aborted') {
|
||||
super(message);
|
||||
this.name = 'AbortError';
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Filters a list of paths based on a given pattern.
|
||||
* @param allPaths The list of all paths to filter.
|
||||
* @param pattern The picomatch pattern to filter by.
|
||||
* @param signal An AbortSignal to cancel the operation.
|
||||
* @returns A promise that resolves to the filtered and sorted list of paths.
|
||||
*/
|
||||
export async function filter(
|
||||
allPaths: string[],
|
||||
pattern: string,
|
||||
signal: AbortSignal | undefined,
|
||||
): Promise<string[]> {
|
||||
const patternFilter = picomatch(pattern, {
|
||||
dot: true,
|
||||
contains: true,
|
||||
nocase: true,
|
||||
});
|
||||
|
||||
const results: string[] = [];
|
||||
for (const [i, p] of allPaths.entries()) {
|
||||
// Yield control to the event loop periodically to prevent blocking.
|
||||
if (i % 1000 === 0) {
|
||||
await new Promise((resolve) => setImmediate(resolve));
|
||||
if (signal?.aborted) {
|
||||
throw new AbortError();
|
||||
}
|
||||
}
|
||||
|
||||
if (patternFilter(p)) {
|
||||
results.push(p);
|
||||
}
|
||||
}
|
||||
|
||||
results.sort((a, b) => {
|
||||
const aIsDir = a.endsWith('/');
|
||||
const bIsDir = b.endsWith('/');
|
||||
|
||||
if (aIsDir && !bIsDir) return -1;
|
||||
if (!aIsDir && bIsDir) return 1;
|
||||
|
||||
// This is 40% faster than localeCompare and the only thing we would really
|
||||
// gain from localeCompare is case-sensitive sort
|
||||
return a < b ? -1 : a > b ? 1 : 0;
|
||||
});
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
export type SearchOptions = {
|
||||
signal?: AbortSignal;
|
||||
maxResults?: number;
|
||||
};
|
||||
|
||||
/**
|
||||
* Provides a fast and efficient way to search for files within a project,
|
||||
* respecting .gitignore and .geminiignore rules, and utilizing caching
|
||||
* for improved performance.
|
||||
*/
|
||||
export class FileSearch {
|
||||
private readonly absoluteDir: string;
|
||||
private readonly ignore: Ignore = new Ignore();
|
||||
private resultCache: ResultCache | undefined;
|
||||
private allFiles: string[] = [];
|
||||
|
||||
/**
|
||||
* Constructs a new `FileSearch` instance.
|
||||
* @param options Configuration options for the file search.
|
||||
*/
|
||||
constructor(private readonly options: FileSearchOptions) {
|
||||
this.absoluteDir = path.resolve(options.projectRoot);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initializes the file search engine by loading ignore rules, crawling the
|
||||
* file system, and building the in-memory cache. This method must be called
|
||||
* before performing any searches.
|
||||
*/
|
||||
async initialize(): Promise<void> {
|
||||
this.loadIgnoreRules();
|
||||
await this.crawlFiles();
|
||||
this.buildResultCache();
|
||||
}
|
||||
|
||||
/**
|
||||
* Searches for files matching a given pattern.
|
||||
* @param pattern The picomatch pattern to search for (e.g., '*.js', 'src/**').
|
||||
* @param options Search options, including an AbortSignal and maxResults.
|
||||
* @returns A promise that resolves to a list of matching file paths, relative
|
||||
* to the project root.
|
||||
*/
|
||||
async search(
|
||||
pattern: string,
|
||||
options: SearchOptions = {},
|
||||
): Promise<string[]> {
|
||||
if (!this.resultCache) {
|
||||
throw new Error('Engine not initialized. Call initialize() first.');
|
||||
}
|
||||
|
||||
pattern = pattern || '*';
|
||||
|
||||
const { files: candidates, isExactMatch } =
|
||||
await this.resultCache!.get(pattern);
|
||||
|
||||
let filteredCandidates;
|
||||
if (isExactMatch) {
|
||||
filteredCandidates = candidates;
|
||||
} else {
|
||||
// Apply the user's picomatch pattern filter
|
||||
filteredCandidates = await filter(candidates, pattern, options.signal);
|
||||
this.resultCache!.set(pattern, filteredCandidates);
|
||||
}
|
||||
|
||||
// Trade-off: We apply a two-stage filtering process.
|
||||
// 1. During the file system crawl (`performCrawl`), we only apply directory-level
|
||||
// ignore rules (e.g., `node_modules/`, `dist/`). This is because applying
|
||||
// a full ignore filter (which includes file-specific patterns like `*.log`)
|
||||
// during the crawl can significantly slow down `fdir`.
|
||||
// 2. Here, in the `search` method, we apply the full ignore filter
|
||||
// (including file patterns) to the `filteredCandidates` (which have already
|
||||
// been filtered by the user's search pattern and sorted). For autocomplete,
|
||||
// the number of displayed results is small (MAX_SUGGESTIONS_TO_SHOW),
|
||||
// so applying the full filter to this truncated list is much more efficient
|
||||
// than applying it to every file during the initial crawl.
|
||||
const fileFilter = this.ignore.getFileFilter();
|
||||
const results: string[] = [];
|
||||
for (const [i, candidate] of filteredCandidates.entries()) {
|
||||
// Yield to the event loop to avoid blocking on large result sets.
|
||||
if (i % 1000 === 0) {
|
||||
await new Promise((resolve) => setImmediate(resolve));
|
||||
if (options.signal?.aborted) {
|
||||
throw new AbortError();
|
||||
}
|
||||
}
|
||||
|
||||
if (results.length >= (options.maxResults ?? Infinity)) {
|
||||
break;
|
||||
}
|
||||
// The `ignore` library throws an error if the path is '.', so we skip it.
|
||||
if (candidate === '.') {
|
||||
continue;
|
||||
}
|
||||
if (!fileFilter(candidate)) {
|
||||
results.push(candidate);
|
||||
}
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads ignore rules from .gitignore and .geminiignore files, and applies
|
||||
* any additional ignore directories specified in the options.
|
||||
*/
|
||||
private loadIgnoreRules(): void {
|
||||
if (this.options.useGitignore) {
|
||||
const gitignorePath = path.join(this.absoluteDir, '.gitignore');
|
||||
if (fs.existsSync(gitignorePath)) {
|
||||
this.ignore.add(fs.readFileSync(gitignorePath, 'utf8'));
|
||||
}
|
||||
}
|
||||
|
||||
if (this.options.useGeminiignore) {
|
||||
const geminiignorePath = path.join(this.absoluteDir, '.geminiignore');
|
||||
if (fs.existsSync(geminiignorePath)) {
|
||||
this.ignore.add(fs.readFileSync(geminiignorePath, 'utf8'));
|
||||
}
|
||||
}
|
||||
|
||||
const ignoreDirs = ['.git', ...this.options.ignoreDirs];
|
||||
this.ignore.add(
|
||||
ignoreDirs.map((dir) => {
|
||||
if (dir.endsWith('/')) {
|
||||
return dir;
|
||||
}
|
||||
return `${dir}/`;
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Crawls the file system to get a list of all files and directories,
|
||||
* optionally using a cache for faster initialization.
|
||||
*/
|
||||
private async crawlFiles(): Promise<void> {
|
||||
if (this.options.cache) {
|
||||
const cacheKey = cache.getCacheKey(
|
||||
this.absoluteDir,
|
||||
this.ignore.getFingerprint(),
|
||||
);
|
||||
const cachedResults = cache.read(cacheKey);
|
||||
|
||||
if (cachedResults) {
|
||||
this.allFiles = cachedResults;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
this.allFiles = await this.performCrawl();
|
||||
|
||||
if (this.options.cache) {
|
||||
const cacheKey = cache.getCacheKey(
|
||||
this.absoluteDir,
|
||||
this.ignore.getFingerprint(),
|
||||
);
|
||||
cache.write(cacheKey, this.allFiles, this.options.cacheTtl * 1000);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Performs the actual file system crawl using `fdir`, applying directory
|
||||
* ignore rules.
|
||||
* @returns A promise that resolves to a list of all files and directories.
|
||||
*/
|
||||
private async performCrawl(): Promise<string[]> {
|
||||
const dirFilter = this.ignore.getDirectoryFilter();
|
||||
|
||||
// We use `fdir` for fast file system traversal. A key performance
|
||||
// optimization for large workspaces is to exclude entire directories
|
||||
// early in the traversal process. This is why we apply directory-specific
|
||||
// ignore rules (e.g., `node_modules/`, `dist/`) directly to `fdir`'s
|
||||
// exclude filter.
|
||||
const api = new fdir()
|
||||
.withRelativePaths()
|
||||
.withDirs()
|
||||
.withPathSeparator('/') // Always use unix style paths
|
||||
.exclude((_, dirPath) => {
|
||||
const relativePath = path.relative(this.absoluteDir, dirPath);
|
||||
return dirFilter(`${relativePath}/`);
|
||||
});
|
||||
|
||||
return api.crawl(this.absoluteDir).withPromise();
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds the in-memory cache for fast pattern matching.
|
||||
*/
|
||||
private buildResultCache(): void {
|
||||
this.resultCache = new ResultCache(this.allFiles, this.absoluteDir);
|
||||
}
|
||||
}
|
||||
@@ -1,65 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect } from 'vitest';
|
||||
import { Ignore } from './ignore.js';
|
||||
|
||||
describe('Ignore', () => {
|
||||
describe('getDirectoryFilter', () => {
|
||||
it('should ignore directories matching directory patterns', () => {
|
||||
const ig = new Ignore().add(['foo/', 'bar/']);
|
||||
const dirFilter = ig.getDirectoryFilter();
|
||||
expect(dirFilter('foo/')).toBe(true);
|
||||
expect(dirFilter('bar/')).toBe(true);
|
||||
expect(dirFilter('baz/')).toBe(false);
|
||||
});
|
||||
|
||||
it('should not ignore directories with file patterns', () => {
|
||||
const ig = new Ignore().add(['foo.js', '*.log']);
|
||||
const dirFilter = ig.getDirectoryFilter();
|
||||
expect(dirFilter('foo.js')).toBe(false);
|
||||
expect(dirFilter('foo.log')).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getFileFilter', () => {
|
||||
it('should not ignore files with directory patterns', () => {
|
||||
const ig = new Ignore().add(['foo/', 'bar/']);
|
||||
const fileFilter = ig.getFileFilter();
|
||||
expect(fileFilter('foo')).toBe(false);
|
||||
expect(fileFilter('foo/file.txt')).toBe(false);
|
||||
});
|
||||
|
||||
it('should ignore files matching file patterns', () => {
|
||||
const ig = new Ignore().add(['*.log', 'foo.js']);
|
||||
const fileFilter = ig.getFileFilter();
|
||||
expect(fileFilter('foo.log')).toBe(true);
|
||||
expect(fileFilter('foo.js')).toBe(true);
|
||||
expect(fileFilter('bar.txt')).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
it('should accumulate patterns across multiple add() calls', () => {
|
||||
const ig = new Ignore().add('foo.js');
|
||||
ig.add('bar.js');
|
||||
const fileFilter = ig.getFileFilter();
|
||||
expect(fileFilter('foo.js')).toBe(true);
|
||||
expect(fileFilter('bar.js')).toBe(true);
|
||||
expect(fileFilter('baz.js')).toBe(false);
|
||||
});
|
||||
|
||||
it('should return a stable and consistent fingerprint', () => {
|
||||
const ig1 = new Ignore().add(['foo', '!bar']);
|
||||
const ig2 = new Ignore().add('foo\n!bar');
|
||||
|
||||
// Fingerprints should be identical for the same rules.
|
||||
expect(ig1.getFingerprint()).toBe(ig2.getFingerprint());
|
||||
|
||||
// Adding a new rule should change the fingerprint.
|
||||
ig2.add('baz');
|
||||
expect(ig1.getFingerprint()).not.toBe(ig2.getFingerprint());
|
||||
});
|
||||
});
|
||||
@@ -1,93 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import ignore from 'ignore';
|
||||
import picomatch from 'picomatch';
|
||||
|
||||
const hasFileExtension = picomatch('**/*[*.]*');
|
||||
|
||||
export class Ignore {
|
||||
private readonly allPatterns: string[] = [];
|
||||
private dirIgnorer = ignore();
|
||||
private fileIgnorer = ignore();
|
||||
|
||||
/**
|
||||
* Adds one or more ignore patterns.
|
||||
* @param patterns A single pattern string or an array of pattern strings.
|
||||
* Each pattern can be a glob-like string similar to .gitignore rules.
|
||||
* @returns The `Ignore` instance for chaining.
|
||||
*/
|
||||
add(patterns: string | string[]): this {
|
||||
if (typeof patterns === 'string') {
|
||||
patterns = patterns.split(/\r?\n/);
|
||||
}
|
||||
|
||||
for (const p of patterns) {
|
||||
const pattern = p.trim();
|
||||
|
||||
if (pattern === '' || pattern.startsWith('#')) {
|
||||
continue;
|
||||
}
|
||||
|
||||
this.allPatterns.push(pattern);
|
||||
|
||||
const isPositiveDirPattern =
|
||||
pattern.endsWith('/') && !pattern.startsWith('!');
|
||||
|
||||
if (isPositiveDirPattern) {
|
||||
this.dirIgnorer.add(pattern);
|
||||
} else {
|
||||
// An ambiguous pattern (e.g., "build") could match a file or a
|
||||
// directory. To optimize the file system crawl, we use a heuristic:
|
||||
// patterns without a dot in the last segment are included in the
|
||||
// directory exclusion check.
|
||||
//
|
||||
// This heuristic can fail. For example, an ignore pattern of "my.assets"
|
||||
// intended to exclude a directory will not be treated as a directory
|
||||
// pattern because it contains a ".". This results in crawling a
|
||||
// directory that should have been excluded, reducing efficiency.
|
||||
// Correctness is still maintained. The incorrectly crawled directory
|
||||
// will be filtered out by the final ignore check.
|
||||
//
|
||||
// For maximum crawl efficiency, users should explicitly mark directory
|
||||
// patterns with a trailing slash (e.g., "my.assets/").
|
||||
this.fileIgnorer.add(pattern);
|
||||
if (!hasFileExtension(pattern)) {
|
||||
this.dirIgnorer.add(pattern);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a predicate that matches explicit directory ignore patterns (patterns ending with '/').
|
||||
* @returns {(dirPath: string) => boolean}
|
||||
*/
|
||||
getDirectoryFilter(): (dirPath: string) => boolean {
|
||||
return (dirPath: string) => this.dirIgnorer.ignores(dirPath);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a predicate that matches file ignore patterns (all patterns not ending with '/').
|
||||
* Note: This may also match directories if a file pattern matches a directory name, but all explicit directory patterns are handled by getDirectoryFilter.
|
||||
* @returns {(filePath: string) => boolean}
|
||||
*/
|
||||
getFileFilter(): (filePath: string) => boolean {
|
||||
return (filePath: string) => this.fileIgnorer.ignores(filePath);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a string representing the current set of ignore patterns.
|
||||
* This can be used to generate a unique identifier for the ignore configuration,
|
||||
* useful for caching purposes.
|
||||
* @returns A string fingerprint of the ignore patterns.
|
||||
*/
|
||||
getFingerprint(): string {
|
||||
return this.allPatterns.join('\n');
|
||||
}
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import path from 'node:path';
|
||||
import { test, expect } from 'vitest';
|
||||
import { ResultCache } from './result-cache.js';
|
||||
|
||||
test('ResultCache basic usage', async () => {
|
||||
const files = [
|
||||
'foo.txt',
|
||||
'bar.js',
|
||||
'baz.md',
|
||||
'subdir/file.txt',
|
||||
'subdir/other.js',
|
||||
'subdir/nested/file.md',
|
||||
];
|
||||
const cache = new ResultCache(files, path.resolve('.'));
|
||||
const { files: resultFiles, isExactMatch } = await cache.get('*.js');
|
||||
expect(resultFiles).toEqual(files);
|
||||
expect(isExactMatch).toBe(false);
|
||||
});
|
||||
|
||||
test('ResultCache cache hit/miss', async () => {
|
||||
const files = ['foo.txt', 'bar.js', 'baz.md'];
|
||||
const cache = new ResultCache(files, path.resolve('.'));
|
||||
// First call: miss
|
||||
const { files: result1Files, isExactMatch: isExactMatch1 } =
|
||||
await cache.get('*.js');
|
||||
expect(result1Files).toEqual(files);
|
||||
expect(isExactMatch1).toBe(false);
|
||||
|
||||
// Simulate FileSearch applying the filter and setting the result
|
||||
cache.set('*.js', ['bar.js']);
|
||||
|
||||
// Second call: hit
|
||||
const { files: result2Files, isExactMatch: isExactMatch2 } =
|
||||
await cache.get('*.js');
|
||||
expect(result2Files).toEqual(['bar.js']);
|
||||
expect(isExactMatch2).toBe(true);
|
||||
});
|
||||
|
||||
test('ResultCache best base query', async () => {
|
||||
const files = ['foo.txt', 'foobar.js', 'baz.md'];
|
||||
const cache = new ResultCache(files, path.resolve('.'));
|
||||
|
||||
// Cache a broader query
|
||||
cache.set('foo', ['foo.txt', 'foobar.js']);
|
||||
|
||||
// Search for a more specific query that starts with the broader one
|
||||
const { files: resultFiles, isExactMatch } = await cache.get('foobar');
|
||||
expect(resultFiles).toEqual(['foo.txt', 'foobar.js']);
|
||||
expect(isExactMatch).toBe(false);
|
||||
});
|
||||
@@ -1,70 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/**
|
||||
* Implements an in-memory cache for file search results.
|
||||
* This cache optimizes subsequent searches by leveraging previously computed results.
|
||||
*/
|
||||
export class ResultCache {
|
||||
private readonly cache: Map<string, string[]>;
|
||||
private hits = 0;
|
||||
private misses = 0;
|
||||
|
||||
constructor(
|
||||
private readonly allFiles: string[],
|
||||
private readonly absoluteDir: string,
|
||||
) {
|
||||
this.cache = new Map();
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves cached search results for a given query, or provides a base set
|
||||
* of files to search from.
|
||||
* @param query The search query pattern.
|
||||
* @returns An object containing the files to search and a boolean indicating
|
||||
* if the result is an exact cache hit.
|
||||
*/
|
||||
async get(
|
||||
query: string,
|
||||
): Promise<{ files: string[]; isExactMatch: boolean }> {
|
||||
const isCacheHit = this.cache.has(query);
|
||||
|
||||
if (isCacheHit) {
|
||||
this.hits++;
|
||||
return { files: this.cache.get(query)!, isExactMatch: true };
|
||||
}
|
||||
|
||||
this.misses++;
|
||||
|
||||
// This is the core optimization of the memory cache.
|
||||
// If a user first searches for "foo", and then for "foobar",
|
||||
// we don't need to search through all files again. We can start
|
||||
// from the results of the "foo" search.
|
||||
// This finds the most specific, already-cached query that is a prefix
|
||||
// of the current query.
|
||||
let bestBaseQuery = '';
|
||||
for (const key of this.cache?.keys?.() ?? []) {
|
||||
if (query.startsWith(key) && key.length > bestBaseQuery.length) {
|
||||
bestBaseQuery = key;
|
||||
}
|
||||
}
|
||||
|
||||
const filesToSearch = bestBaseQuery
|
||||
? this.cache.get(bestBaseQuery)!
|
||||
: this.allFiles;
|
||||
|
||||
return { files: filesToSearch, isExactMatch: false };
|
||||
}
|
||||
|
||||
/**
|
||||
* Stores search results in the cache.
|
||||
* @param query The search query pattern.
|
||||
* @param results The matching file paths to cache.
|
||||
*/
|
||||
set(query: string, results: string[]): void {
|
||||
this.cache.set(query, results);
|
||||
}
|
||||
}
|
||||
@@ -17,8 +17,7 @@ import {
|
||||
import { DEFAULT_GEMINI_FLASH_MODEL } from '../config/models.js';
|
||||
import { retryWithBackoff } from './retry.js';
|
||||
import { AuthType } from '../core/contentGenerator.js';
|
||||
|
||||
vi.mock('node:fs');
|
||||
import { IdeClient } from '../ide/ide-client.js';
|
||||
|
||||
vi.mock('node:fs');
|
||||
|
||||
@@ -36,6 +35,7 @@ describe('Flash Fallback Integration', () => {
|
||||
debugMode: false,
|
||||
cwd: '/test',
|
||||
model: 'gemini-2.5-pro',
|
||||
ideClient: IdeClient.getInstance(false),
|
||||
});
|
||||
|
||||
// Reset simulation state for each test
|
||||
|
||||
@@ -67,7 +67,6 @@ describe('loadServerHierarchicalMemory', () => {
|
||||
it('should return empty memory and count if no context files are found', async () => {
|
||||
const result = await loadServerHierarchicalMemory(
|
||||
cwd,
|
||||
[],
|
||||
false,
|
||||
new FileDiscoveryService(projectRoot),
|
||||
);
|
||||
@@ -86,13 +85,14 @@ describe('loadServerHierarchicalMemory', () => {
|
||||
|
||||
const result = await loadServerHierarchicalMemory(
|
||||
cwd,
|
||||
[],
|
||||
false,
|
||||
new FileDiscoveryService(projectRoot),
|
||||
);
|
||||
|
||||
expect(result).toEqual({
|
||||
memoryContent: `--- Context from: ${path.relative(cwd, defaultContextFile)} ---\ndefault context content\n--- End of Context from: ${path.relative(cwd, defaultContextFile)} ---`,
|
||||
memoryContent: `--- Context from: ${path.relative(cwd, defaultContextFile)} ---
|
||||
default context content
|
||||
--- End of Context from: ${path.relative(cwd, defaultContextFile)} ---`,
|
||||
fileCount: 1,
|
||||
});
|
||||
});
|
||||
@@ -108,13 +108,14 @@ describe('loadServerHierarchicalMemory', () => {
|
||||
|
||||
const result = await loadServerHierarchicalMemory(
|
||||
cwd,
|
||||
[],
|
||||
false,
|
||||
new FileDiscoveryService(projectRoot),
|
||||
);
|
||||
|
||||
expect(result).toEqual({
|
||||
memoryContent: `--- Context from: ${path.relative(cwd, customContextFile)} ---\ncustom context content\n--- End of Context from: ${path.relative(cwd, customContextFile)} ---`,
|
||||
memoryContent: `--- Context from: ${path.relative(cwd, customContextFile)} ---
|
||||
custom context content
|
||||
--- End of Context from: ${path.relative(cwd, customContextFile)} ---`,
|
||||
fileCount: 1,
|
||||
});
|
||||
});
|
||||
@@ -134,13 +135,18 @@ describe('loadServerHierarchicalMemory', () => {
|
||||
|
||||
const result = await loadServerHierarchicalMemory(
|
||||
cwd,
|
||||
[],
|
||||
false,
|
||||
new FileDiscoveryService(projectRoot),
|
||||
);
|
||||
|
||||
expect(result).toEqual({
|
||||
memoryContent: `--- Context from: ${path.relative(cwd, projectContextFile)} ---\nproject context content\n--- End of Context from: ${path.relative(cwd, projectContextFile)} ---\n\n--- Context from: ${path.relative(cwd, cwdContextFile)} ---\ncwd context content\n--- End of Context from: ${path.relative(cwd, cwdContextFile)} ---`,
|
||||
memoryContent: `--- Context from: ${path.relative(cwd, projectContextFile)} ---
|
||||
project context content
|
||||
--- End of Context from: ${path.relative(cwd, projectContextFile)} ---
|
||||
|
||||
--- Context from: ${path.relative(cwd, cwdContextFile)} ---
|
||||
cwd context content
|
||||
--- End of Context from: ${path.relative(cwd, cwdContextFile)} ---`,
|
||||
fileCount: 2,
|
||||
});
|
||||
});
|
||||
@@ -157,13 +163,18 @@ describe('loadServerHierarchicalMemory', () => {
|
||||
|
||||
const result = await loadServerHierarchicalMemory(
|
||||
cwd,
|
||||
[],
|
||||
false,
|
||||
new FileDiscoveryService(projectRoot),
|
||||
);
|
||||
|
||||
expect(result).toEqual({
|
||||
memoryContent: `--- Context from: ${customFilename} ---\nCWD custom memory\n--- End of Context from: ${customFilename} ---\n\n--- Context from: ${path.join('subdir', customFilename)} ---\nSubdir custom memory\n--- End of Context from: ${path.join('subdir', customFilename)} ---`,
|
||||
memoryContent: `--- Context from: ${customFilename} ---
|
||||
CWD custom memory
|
||||
--- End of Context from: ${customFilename} ---
|
||||
|
||||
--- Context from: ${path.join('subdir', customFilename)} ---
|
||||
Subdir custom memory
|
||||
--- End of Context from: ${path.join('subdir', customFilename)} ---`,
|
||||
fileCount: 2,
|
||||
});
|
||||
});
|
||||
@@ -180,13 +191,18 @@ describe('loadServerHierarchicalMemory', () => {
|
||||
|
||||
const result = await loadServerHierarchicalMemory(
|
||||
cwd,
|
||||
[],
|
||||
false,
|
||||
new FileDiscoveryService(projectRoot),
|
||||
);
|
||||
|
||||
expect(result).toEqual({
|
||||
memoryContent: `--- Context from: ${path.relative(cwd, projectRootGeminiFile)} ---\nProject root memory\n--- End of Context from: ${path.relative(cwd, projectRootGeminiFile)} ---\n\n--- Context from: ${path.relative(cwd, srcGeminiFile)} ---\nSrc directory memory\n--- End of Context from: ${path.relative(cwd, srcGeminiFile)} ---`,
|
||||
memoryContent: `--- Context from: ${path.relative(cwd, projectRootGeminiFile)} ---
|
||||
Project root memory
|
||||
--- End of Context from: ${path.relative(cwd, projectRootGeminiFile)} ---
|
||||
|
||||
--- Context from: ${path.relative(cwd, srcGeminiFile)} ---
|
||||
Src directory memory
|
||||
--- End of Context from: ${path.relative(cwd, srcGeminiFile)} ---`,
|
||||
fileCount: 2,
|
||||
});
|
||||
});
|
||||
@@ -203,13 +219,18 @@ describe('loadServerHierarchicalMemory', () => {
|
||||
|
||||
const result = await loadServerHierarchicalMemory(
|
||||
cwd,
|
||||
[],
|
||||
false,
|
||||
new FileDiscoveryService(projectRoot),
|
||||
);
|
||||
|
||||
expect(result).toEqual({
|
||||
memoryContent: `--- Context from: ${DEFAULT_CONTEXT_FILENAME} ---\nCWD memory\n--- End of Context from: ${DEFAULT_CONTEXT_FILENAME} ---\n\n--- Context from: ${path.join('subdir', DEFAULT_CONTEXT_FILENAME)} ---\nSubdir memory\n--- End of Context from: ${path.join('subdir', DEFAULT_CONTEXT_FILENAME)} ---`,
|
||||
memoryContent: `--- Context from: ${DEFAULT_CONTEXT_FILENAME} ---
|
||||
CWD memory
|
||||
--- End of Context from: ${DEFAULT_CONTEXT_FILENAME} ---
|
||||
|
||||
--- Context from: ${path.join('subdir', DEFAULT_CONTEXT_FILENAME)} ---
|
||||
Subdir memory
|
||||
--- End of Context from: ${path.join('subdir', DEFAULT_CONTEXT_FILENAME)} ---`,
|
||||
fileCount: 2,
|
||||
});
|
||||
});
|
||||
@@ -238,13 +259,30 @@ describe('loadServerHierarchicalMemory', () => {
|
||||
|
||||
const result = await loadServerHierarchicalMemory(
|
||||
cwd,
|
||||
[],
|
||||
false,
|
||||
new FileDiscoveryService(projectRoot),
|
||||
);
|
||||
|
||||
expect(result).toEqual({
|
||||
memoryContent: `--- Context from: ${path.relative(cwd, defaultContextFile)} ---\ndefault context content\n--- End of Context from: ${path.relative(cwd, defaultContextFile)} ---\n\n--- Context from: ${path.relative(cwd, rootGeminiFile)} ---\nProject parent memory\n--- End of Context from: ${path.relative(cwd, rootGeminiFile)} ---\n\n--- Context from: ${path.relative(cwd, projectRootGeminiFile)} ---\nProject root memory\n--- End of Context from: ${path.relative(cwd, projectRootGeminiFile)} ---\n\n--- Context from: ${path.relative(cwd, cwdGeminiFile)} ---\nCWD memory\n--- End of Context from: ${path.relative(cwd, cwdGeminiFile)} ---\n\n--- Context from: ${path.relative(cwd, subDirGeminiFile)} ---\nSubdir memory\n--- End of Context from: ${path.relative(cwd, subDirGeminiFile)} ---`,
|
||||
memoryContent: `--- Context from: ${path.relative(cwd, defaultContextFile)} ---
|
||||
default context content
|
||||
--- End of Context from: ${path.relative(cwd, defaultContextFile)} ---
|
||||
|
||||
--- Context from: ${path.relative(cwd, rootGeminiFile)} ---
|
||||
Project parent memory
|
||||
--- End of Context from: ${path.relative(cwd, rootGeminiFile)} ---
|
||||
|
||||
--- Context from: ${path.relative(cwd, projectRootGeminiFile)} ---
|
||||
Project root memory
|
||||
--- End of Context from: ${path.relative(cwd, projectRootGeminiFile)} ---
|
||||
|
||||
--- Context from: ${path.relative(cwd, cwdGeminiFile)} ---
|
||||
CWD memory
|
||||
--- End of Context from: ${path.relative(cwd, cwdGeminiFile)} ---
|
||||
|
||||
--- Context from: ${path.relative(cwd, subDirGeminiFile)} ---
|
||||
Subdir memory
|
||||
--- End of Context from: ${path.relative(cwd, subDirGeminiFile)} ---`,
|
||||
fileCount: 5,
|
||||
});
|
||||
});
|
||||
@@ -264,7 +302,6 @@ describe('loadServerHierarchicalMemory', () => {
|
||||
|
||||
const result = await loadServerHierarchicalMemory(
|
||||
cwd,
|
||||
[],
|
||||
false,
|
||||
new FileDiscoveryService(projectRoot),
|
||||
[],
|
||||
@@ -277,7 +314,9 @@ describe('loadServerHierarchicalMemory', () => {
|
||||
);
|
||||
|
||||
expect(result).toEqual({
|
||||
memoryContent: `--- Context from: ${path.relative(cwd, regularSubDirGeminiFile)} ---\nMy code memory\n--- End of Context from: ${path.relative(cwd, regularSubDirGeminiFile)} ---`,
|
||||
memoryContent: `--- Context from: ${path.relative(cwd, regularSubDirGeminiFile)} ---
|
||||
My code memory
|
||||
--- End of Context from: ${path.relative(cwd, regularSubDirGeminiFile)} ---`,
|
||||
fileCount: 1,
|
||||
});
|
||||
});
|
||||
@@ -294,7 +333,6 @@ describe('loadServerHierarchicalMemory', () => {
|
||||
// Pass the custom limit directly to the function
|
||||
await loadServerHierarchicalMemory(
|
||||
cwd,
|
||||
[],
|
||||
true,
|
||||
new FileDiscoveryService(projectRoot),
|
||||
[],
|
||||
@@ -315,7 +353,6 @@ describe('loadServerHierarchicalMemory', () => {
|
||||
|
||||
const result = await loadServerHierarchicalMemory(
|
||||
cwd,
|
||||
[],
|
||||
false,
|
||||
new FileDiscoveryService(projectRoot),
|
||||
);
|
||||
@@ -334,36 +371,15 @@ describe('loadServerHierarchicalMemory', () => {
|
||||
|
||||
const result = await loadServerHierarchicalMemory(
|
||||
cwd,
|
||||
[],
|
||||
false,
|
||||
new FileDiscoveryService(projectRoot),
|
||||
[extensionFilePath],
|
||||
);
|
||||
|
||||
expect(result).toEqual({
|
||||
memoryContent: `--- Context from: ${path.relative(cwd, extensionFilePath)} ---\nExtension memory content\n--- End of Context from: ${path.relative(cwd, extensionFilePath)} ---`,
|
||||
fileCount: 1,
|
||||
});
|
||||
});
|
||||
|
||||
it('should load memory from included directories', async () => {
|
||||
const includedDir = await createEmptyDir(
|
||||
path.join(testRootDir, 'included'),
|
||||
);
|
||||
const includedFile = await createTestFile(
|
||||
path.join(includedDir, DEFAULT_CONTEXT_FILENAME),
|
||||
'included directory memory',
|
||||
);
|
||||
|
||||
const result = await loadServerHierarchicalMemory(
|
||||
cwd,
|
||||
[includedDir],
|
||||
false,
|
||||
new FileDiscoveryService(projectRoot),
|
||||
);
|
||||
|
||||
expect(result).toEqual({
|
||||
memoryContent: `--- Context from: ${path.relative(cwd, includedFile)} ---\nincluded directory memory\n--- End of Context from: ${path.relative(cwd, includedFile)} ---`,
|
||||
memoryContent: `--- Context from: ${path.relative(cwd, extensionFilePath)} ---
|
||||
Extension memory content
|
||||
--- End of Context from: ${path.relative(cwd, extensionFilePath)} ---`,
|
||||
fileCount: 1,
|
||||
});
|
||||
});
|
||||
|
||||
@@ -83,36 +83,6 @@ async function findProjectRoot(startDir: string): Promise<string | null> {
|
||||
|
||||
async function getGeminiMdFilePathsInternal(
|
||||
currentWorkingDirectory: string,
|
||||
includeDirectoriesToReadGemini: readonly string[],
|
||||
userHomePath: string,
|
||||
debugMode: boolean,
|
||||
fileService: FileDiscoveryService,
|
||||
extensionContextFilePaths: string[] = [],
|
||||
fileFilteringOptions: FileFilteringOptions,
|
||||
maxDirs: number,
|
||||
): Promise<string[]> {
|
||||
const dirs = new Set<string>([
|
||||
...includeDirectoriesToReadGemini,
|
||||
currentWorkingDirectory,
|
||||
]);
|
||||
const paths = [];
|
||||
for (const dir of dirs) {
|
||||
const pathsByDir = await getGeminiMdFilePathsInternalForEachDir(
|
||||
dir,
|
||||
userHomePath,
|
||||
debugMode,
|
||||
fileService,
|
||||
extensionContextFilePaths,
|
||||
fileFilteringOptions,
|
||||
maxDirs,
|
||||
);
|
||||
paths.push(...pathsByDir);
|
||||
}
|
||||
return Array.from(new Set<string>(paths));
|
||||
}
|
||||
|
||||
async function getGeminiMdFilePathsInternalForEachDir(
|
||||
dir: string,
|
||||
userHomePath: string,
|
||||
debugMode: boolean,
|
||||
fileService: FileDiscoveryService,
|
||||
@@ -145,8 +115,8 @@ async function getGeminiMdFilePathsInternalForEachDir(
|
||||
|
||||
// FIX: Only perform the workspace search (upward and downward scans)
|
||||
// if a valid currentWorkingDirectory is provided.
|
||||
if (dir) {
|
||||
const resolvedCwd = path.resolve(dir);
|
||||
if (currentWorkingDirectory) {
|
||||
const resolvedCwd = path.resolve(currentWorkingDirectory);
|
||||
if (debugMode)
|
||||
logger.debug(
|
||||
`Searching for ${geminiMdFilename} starting from CWD: ${resolvedCwd}`,
|
||||
@@ -287,7 +257,6 @@ function concatenateInstructions(
|
||||
*/
|
||||
export async function loadServerHierarchicalMemory(
|
||||
currentWorkingDirectory: string,
|
||||
includeDirectoriesToReadGemini: readonly string[],
|
||||
debugMode: boolean,
|
||||
fileService: FileDiscoveryService,
|
||||
extensionContextFilePaths: string[] = [],
|
||||
@@ -305,7 +274,6 @@ export async function loadServerHierarchicalMemory(
|
||||
const userHomePath = homedir();
|
||||
const filePaths = await getGeminiMdFilePathsInternal(
|
||||
currentWorkingDirectory,
|
||||
includeDirectoriesToReadGemini,
|
||||
userHomePath,
|
||||
debugMode,
|
||||
fileService,
|
||||
|
||||
@@ -15,8 +15,6 @@ import * as path from 'path';
|
||||
export class WorkspaceContext {
|
||||
private directories: Set<string>;
|
||||
|
||||
private initialDirectories: Set<string>;
|
||||
|
||||
/**
|
||||
* Creates a new WorkspaceContext with the given initial directory and optional additional directories.
|
||||
* @param initialDirectory The initial working directory (usually cwd)
|
||||
@@ -24,14 +22,11 @@ export class WorkspaceContext {
|
||||
*/
|
||||
constructor(initialDirectory: string, additionalDirectories: string[] = []) {
|
||||
this.directories = new Set<string>();
|
||||
this.initialDirectories = new Set<string>();
|
||||
|
||||
this.addDirectoryInternal(initialDirectory);
|
||||
this.addInitialDirectoryInternal(initialDirectory);
|
||||
|
||||
for (const dir of additionalDirectories) {
|
||||
this.addDirectoryInternal(dir);
|
||||
this.addInitialDirectoryInternal(dir);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -74,33 +69,6 @@ export class WorkspaceContext {
|
||||
this.directories.add(realPath);
|
||||
}
|
||||
|
||||
private addInitialDirectoryInternal(
|
||||
directory: string,
|
||||
basePath: string = process.cwd(),
|
||||
): void {
|
||||
const absolutePath = path.isAbsolute(directory)
|
||||
? directory
|
||||
: path.resolve(basePath, directory);
|
||||
|
||||
if (!fs.existsSync(absolutePath)) {
|
||||
throw new Error(`Directory does not exist: ${absolutePath}`);
|
||||
}
|
||||
|
||||
const stats = fs.statSync(absolutePath);
|
||||
if (!stats.isDirectory()) {
|
||||
throw new Error(`Path is not a directory: ${absolutePath}`);
|
||||
}
|
||||
|
||||
let realPath: string;
|
||||
try {
|
||||
realPath = fs.realpathSync(absolutePath);
|
||||
} catch (_error) {
|
||||
throw new Error(`Failed to resolve path: ${absolutePath}`);
|
||||
}
|
||||
|
||||
this.initialDirectories.add(realPath);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a copy of all workspace directories.
|
||||
* @returns Array of absolute directory paths
|
||||
@@ -109,17 +77,6 @@ export class WorkspaceContext {
|
||||
return Array.from(this.directories);
|
||||
}
|
||||
|
||||
getInitialDirectories(): readonly string[] {
|
||||
return Array.from(this.initialDirectories);
|
||||
}
|
||||
|
||||
setDirectories(directories: readonly string[]): void {
|
||||
this.directories.clear();
|
||||
for (const dir of directories) {
|
||||
this.addDirectoryInternal(dir);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if a given path is within any of the workspace directories.
|
||||
* @param pathToCheck The path to validate
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
export * from './src/file-system-test-helpers.js';
|
||||
@@ -1,18 +0,0 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code-test-utils",
|
||||
"version": "0.1.18",
|
||||
"private": true,
|
||||
"main": "src/index.ts",
|
||||
"license": "Apache-2.0",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"build": "node ../../scripts/build_package.js",
|
||||
"typecheck": "tsc --noEmit"
|
||||
},
|
||||
"devDependencies": {
|
||||
"typescript": "^5.3.3"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=20"
|
||||
}
|
||||
}
|
||||
@@ -1,98 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import * as fs from 'fs/promises';
|
||||
import * as path from 'path';
|
||||
import * as os from 'os';
|
||||
|
||||
/**
|
||||
* Defines the structure of a virtual file system to be created for testing.
|
||||
* Keys are file or directory names, and values can be:
|
||||
* - A string: The content of a file.
|
||||
* - A `FileSystemStructure` object: Represents a subdirectory with its own structure.
|
||||
* - An array of strings or `FileSystemStructure` objects: Represents a directory
|
||||
* where strings are empty files and objects are subdirectories.
|
||||
*
|
||||
* @example
|
||||
* // Example 1: Simple files and directories
|
||||
* const structure1 = {
|
||||
* 'file1.txt': 'Hello, world!',
|
||||
* 'empty-dir': [],
|
||||
* 'src': {
|
||||
* 'main.js': '// Main application file',
|
||||
* 'utils.ts': '// Utility functions',
|
||||
* },
|
||||
* };
|
||||
*
|
||||
* @example
|
||||
* // Example 2: Nested directories and empty files within an array
|
||||
* const structure2 = {
|
||||
* 'config.json': '{ "port": 3000 }',
|
||||
* 'data': [
|
||||
* 'users.csv',
|
||||
* 'products.json',
|
||||
* {
|
||||
* 'logs': [
|
||||
* 'error.log',
|
||||
* 'access.log',
|
||||
* ],
|
||||
* },
|
||||
* ],
|
||||
* };
|
||||
*/
|
||||
export type FileSystemStructure = {
|
||||
[name: string]:
|
||||
| string
|
||||
| FileSystemStructure
|
||||
| Array<string | FileSystemStructure>;
|
||||
};
|
||||
|
||||
/**
|
||||
* Recursively creates files and directories based on the provided `FileSystemStructure`.
|
||||
* @param dir The base directory where the structure will be created.
|
||||
* @param structure The `FileSystemStructure` defining the files and directories.
|
||||
*/
|
||||
async function create(dir: string, structure: FileSystemStructure) {
|
||||
for (const [name, content] of Object.entries(structure)) {
|
||||
const newPath = path.join(dir, name);
|
||||
if (typeof content === 'string') {
|
||||
await fs.writeFile(newPath, content);
|
||||
} else if (Array.isArray(content)) {
|
||||
await fs.mkdir(newPath, { recursive: true });
|
||||
for (const item of content) {
|
||||
if (typeof item === 'string') {
|
||||
await fs.writeFile(path.join(newPath, item), '');
|
||||
} else {
|
||||
await create(newPath, item as FileSystemStructure);
|
||||
}
|
||||
}
|
||||
} else if (typeof content === 'object' && content !== null) {
|
||||
await fs.mkdir(newPath, { recursive: true });
|
||||
await create(newPath, content as FileSystemStructure);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a temporary directory and populates it with a given file system structure.
|
||||
* @param structure The `FileSystemStructure` to create within the temporary directory.
|
||||
* @returns A promise that resolves to the absolute path of the created temporary directory.
|
||||
*/
|
||||
export async function createTmpDir(
|
||||
structure: FileSystemStructure,
|
||||
): Promise<string> {
|
||||
const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), 'gemini-cli-test-'));
|
||||
await create(tmpDir, structure);
|
||||
return tmpDir;
|
||||
}
|
||||
|
||||
/**
|
||||
* Cleans up (deletes) a temporary directory and its contents.
|
||||
* @param dir The absolute path to the temporary directory to clean up.
|
||||
*/
|
||||
export async function cleanupTmpDir(dir: string) {
|
||||
await fs.rm(dir, { recursive: true, force: true });
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
export * from './file-system-test-helpers.js';
|
||||
@@ -1,11 +0,0 @@
|
||||
{
|
||||
"extends": "../../tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"outDir": "dist",
|
||||
"lib": ["DOM", "DOM.Iterable", "ES2021"],
|
||||
"composite": true,
|
||||
"types": ["node"]
|
||||
},
|
||||
"include": ["index.ts", "src/**/*.ts", "src/**/*.json"],
|
||||
"exclude": ["node_modules", "dist"]
|
||||
}
|
||||
@@ -2,7 +2,7 @@
|
||||
"name": "qwen-code-vscode-ide-companion",
|
||||
"displayName": "Qwen Code Companion",
|
||||
"description": "Enable Qwen Code with direct access to your VS Code workspace.",
|
||||
"version": "0.0.6",
|
||||
"version": "0.0.5",
|
||||
"publisher": "qwenlm",
|
||||
"icon": "assets/icon.png",
|
||||
"repository": {
|
||||
|
||||
@@ -19,12 +19,11 @@
|
||||
|
||||
import { execSync } from 'child_process';
|
||||
import { existsSync, mkdirSync, writeFileSync } from 'fs';
|
||||
import { dirname, join, relative } from 'path';
|
||||
import { dirname, join } from 'path';
|
||||
import { fileURLToPath } from 'url';
|
||||
|
||||
const __dirname = dirname(fileURLToPath(import.meta.url));
|
||||
const root = join(__dirname, '..');
|
||||
const scriptPath = relative(root, fileURLToPath(import.meta.url));
|
||||
const generatedDir = join(root, 'packages/cli/src/generated');
|
||||
const gitCommitFile = join(generatedDir, 'git-commit.ts');
|
||||
let gitCommitInfo = 'N/A';
|
||||
@@ -39,6 +38,12 @@ try {
|
||||
}).trim();
|
||||
if (gitHash) {
|
||||
gitCommitInfo = gitHash;
|
||||
const gitStatus = execSync('git status --porcelain', {
|
||||
encoding: 'utf-8',
|
||||
}).trim();
|
||||
if (gitStatus) {
|
||||
gitCommitInfo = `${gitHash} (local modifications)`;
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// ignore
|
||||
@@ -50,7 +55,7 @@ const fileContent = `/**
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
// This file is auto-generated by the build script (${scriptPath})
|
||||
// This file is auto-generated by the build script (scripts/build.js)
|
||||
// Do not edit this file manually.
|
||||
export const GIT_COMMIT_INFO = '${gitCommitInfo}';
|
||||
`;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user