mirror of
https://github.com/QwenLM/qwen-code.git
synced 2026-01-06 17:09:14 +00:00
Compare commits
1 Commits
v0.6.0-nig
...
v0.6.0-nig
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
236a953a45 |
87
.github/workflows/release-sdk.yml
vendored
87
.github/workflows/release-sdk.yml
vendored
@@ -91,8 +91,6 @@ jobs:
|
||||
with:
|
||||
node-version-file: '.nvmrc'
|
||||
cache: 'npm'
|
||||
registry-url: 'https://registry.npmjs.org'
|
||||
scope: '@qwen-code'
|
||||
|
||||
- name: 'Install Dependencies'
|
||||
run: |-
|
||||
@@ -128,14 +126,6 @@ jobs:
|
||||
IS_PREVIEW: '${{ steps.vars.outputs.is_preview }}'
|
||||
MANUAL_VERSION: '${{ inputs.version }}'
|
||||
|
||||
- name: 'Set SDK package version (local only)'
|
||||
env:
|
||||
RELEASE_VERSION: '${{ steps.version.outputs.RELEASE_VERSION }}'
|
||||
run: |-
|
||||
# Ensure the package version matches the computed release version.
|
||||
# This is required for nightly/preview because npm does not allow re-publishing the same version.
|
||||
npm version -w @qwen-code/sdk "${RELEASE_VERSION}" --no-git-tag-version --allow-same-version
|
||||
|
||||
- name: 'Build CLI Bundle'
|
||||
run: |
|
||||
npm run build
|
||||
@@ -168,21 +158,7 @@ jobs:
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
- name: 'Build SDK'
|
||||
working-directory: 'packages/sdk-typescript'
|
||||
run: |-
|
||||
npm run build
|
||||
|
||||
- name: 'Publish @qwen-code/sdk'
|
||||
working-directory: 'packages/sdk-typescript'
|
||||
run: |-
|
||||
npm publish --access public --tag=${{ steps.version.outputs.NPM_TAG }} ${{ steps.vars.outputs.is_dry_run == 'true' && '--dry-run' || '' }}
|
||||
env:
|
||||
NODE_AUTH_TOKEN: '${{ secrets.NPM_TOKEN }}'
|
||||
|
||||
- name: 'Create and switch to a release branch'
|
||||
if: |-
|
||||
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
|
||||
id: 'release_branch'
|
||||
env:
|
||||
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
|
||||
@@ -191,22 +167,50 @@ jobs:
|
||||
git switch -c "${BRANCH_NAME}"
|
||||
echo "BRANCH_NAME=${BRANCH_NAME}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: 'Commit and Push package version (stable only)'
|
||||
if: |-
|
||||
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
|
||||
- name: 'Update package version'
|
||||
env:
|
||||
RELEASE_VERSION: '${{ steps.version.outputs.RELEASE_VERSION }}'
|
||||
run: |-
|
||||
# Use npm workspaces so the root lockfile is updated consistently.
|
||||
npm version -w @qwen-code/sdk "${RELEASE_VERSION}" --no-git-tag-version --allow-same-version
|
||||
|
||||
- name: 'Commit and Conditionally Push package version'
|
||||
env:
|
||||
BRANCH_NAME: '${{ steps.release_branch.outputs.BRANCH_NAME }}'
|
||||
IS_DRY_RUN: '${{ steps.vars.outputs.is_dry_run }}'
|
||||
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
|
||||
run: |-
|
||||
# Only persist version bumps after a successful publish.
|
||||
git add packages/sdk-typescript/package.json package-lock.json
|
||||
if git diff --staged --quiet; then
|
||||
echo "No version changes to commit"
|
||||
else
|
||||
git commit -m "chore(release): sdk-typescript ${RELEASE_TAG}"
|
||||
fi
|
||||
echo "Pushing release branch to remote..."
|
||||
git push --set-upstream origin "${BRANCH_NAME}" --follow-tags
|
||||
if [[ "${IS_DRY_RUN}" == "false" ]]; then
|
||||
echo "Pushing release branch to remote..."
|
||||
git push --set-upstream origin "${BRANCH_NAME}" --follow-tags
|
||||
else
|
||||
echo "Dry run enabled. Skipping push."
|
||||
fi
|
||||
|
||||
- name: 'Build SDK'
|
||||
working-directory: 'packages/sdk-typescript'
|
||||
run: |-
|
||||
npm run build
|
||||
|
||||
- name: 'Configure npm for publishing'
|
||||
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4
|
||||
with:
|
||||
node-version-file: '.nvmrc'
|
||||
registry-url: 'https://registry.npmjs.org'
|
||||
scope: '@qwen-code'
|
||||
|
||||
- name: 'Publish @qwen-code/sdk'
|
||||
working-directory: 'packages/sdk-typescript'
|
||||
run: |-
|
||||
npm publish --access public --tag=${{ steps.version.outputs.NPM_TAG }} ${{ steps.vars.outputs.is_dry_run == 'true' && '--dry-run' || '' }}
|
||||
env:
|
||||
NODE_AUTH_TOKEN: '${{ secrets.NPM_TOKEN }}'
|
||||
|
||||
- name: 'Create GitHub Release and Tag'
|
||||
if: |-
|
||||
@@ -216,29 +220,16 @@ jobs:
|
||||
RELEASE_BRANCH: '${{ steps.release_branch.outputs.BRANCH_NAME }}'
|
||||
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
|
||||
PREVIOUS_RELEASE_TAG: '${{ steps.version.outputs.PREVIOUS_RELEASE_TAG }}'
|
||||
IS_NIGHTLY: '${{ steps.vars.outputs.is_nightly }}'
|
||||
IS_PREVIEW: '${{ steps.vars.outputs.is_preview }}'
|
||||
REF: '${{ github.event.inputs.ref || github.sha }}'
|
||||
run: |-
|
||||
# For stable releases, use the release branch; for nightly/preview, use the current ref
|
||||
if [[ "${IS_NIGHTLY}" == "true" || "${IS_PREVIEW}" == "true" ]]; then
|
||||
TARGET="${REF}"
|
||||
PRERELEASE_FLAG="--prerelease"
|
||||
else
|
||||
TARGET="${RELEASE_BRANCH}"
|
||||
PRERELEASE_FLAG=""
|
||||
fi
|
||||
|
||||
gh release create "sdk-typescript-${RELEASE_TAG}" \
|
||||
--target "${TARGET}" \
|
||||
--target "$RELEASE_BRANCH" \
|
||||
--title "SDK TypeScript Release ${RELEASE_TAG}" \
|
||||
--notes-start-tag "sdk-typescript-${PREVIOUS_RELEASE_TAG}" \
|
||||
--generate-notes \
|
||||
${PRERELEASE_FLAG}
|
||||
--generate-notes
|
||||
|
||||
- name: 'Create PR to merge release branch into main'
|
||||
if: |-
|
||||
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
|
||||
${{ steps.vars.outputs.is_dry_run == 'false' }}
|
||||
id: 'pr'
|
||||
env:
|
||||
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
|
||||
@@ -260,7 +251,7 @@ jobs:
|
||||
|
||||
- name: 'Wait for CI checks to complete'
|
||||
if: |-
|
||||
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
|
||||
${{ steps.vars.outputs.is_dry_run == 'false' }}
|
||||
env:
|
||||
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
|
||||
PR_URL: '${{ steps.pr.outputs.PR_URL }}'
|
||||
@@ -271,7 +262,7 @@ jobs:
|
||||
|
||||
- name: 'Enable auto-merge for release PR'
|
||||
if: |-
|
||||
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
|
||||
${{ steps.vars.outputs.is_dry_run == 'false' }}
|
||||
env:
|
||||
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
|
||||
PR_URL: '${{ steps.pr.outputs.PR_URL }}'
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
# Qwen Code overview
|
||||
[](https://npm-compare.com/@qwen-code/qwen-code)
|
||||
[](https://www.npmjs.com/package/@qwen-code/qwen-code)
|
||||
|
||||
> Learn about Qwen Code, Qwen's agentic coding tool that lives in your terminal and helps you turn ideas into code faster than ever before.
|
||||
|
||||
@@ -48,7 +46,7 @@ You'll be prompted to log in on first use. That's it! [Continue with Quickstart
|
||||
|
||||
> [!note]
|
||||
>
|
||||
> **New VS Code Extension (Beta)**: Prefer a graphical interface? Our new **VS Code extension** provides an easy-to-use native IDE experience without requiring terminal familiarity. Simply install from the marketplace and start coding with Qwen Code directly in your sidebar. Download and install the [Qwen Code Companion](https://marketplace.visualstudio.com/items?itemName=qwenlm.qwen-code-vscode-ide-companion) now.
|
||||
> **New VS Code Extension (Beta)**: Prefer a graphical interface? Our new **VS Code extension** provides an easy-to-use native IDE experience without requiring terminal familiarity. Simply install from the marketplace and start coding with Qwen Code directly in your sidebar. You can search for **Qwen Code** in the VS Code Marketplace and download it.
|
||||
|
||||
## What Qwen Code does for you
|
||||
|
||||
|
||||
@@ -5,6 +5,8 @@
|
||||
*/
|
||||
|
||||
import { describe, it, expect } from 'vitest';
|
||||
import { existsSync } from 'node:fs';
|
||||
import * as path from 'node:path';
|
||||
import { TestRig, printDebugInfo, validateModelOutput } from './test-helper.js';
|
||||
|
||||
describe('file-system', () => {
|
||||
@@ -200,8 +202,8 @@ describe('file-system', () => {
|
||||
const readAttempt = toolLogs.find(
|
||||
(log) => log.toolRequest.name === 'read_file',
|
||||
);
|
||||
const editAttempt = toolLogs.find(
|
||||
(log) => log.toolRequest.name === 'edit_file',
|
||||
const writeAttempt = toolLogs.find(
|
||||
(log) => log.toolRequest.name === 'write_file',
|
||||
);
|
||||
const successfulReplace = toolLogs.find(
|
||||
(log) => log.toolRequest.name === 'replace' && log.toolRequest.success,
|
||||
@@ -224,15 +226,15 @@ describe('file-system', () => {
|
||||
|
||||
// CRITICAL: Verify that no matter what the model did, it never successfully
|
||||
// wrote or replaced anything.
|
||||
if (editAttempt) {
|
||||
if (writeAttempt) {
|
||||
console.error(
|
||||
'A edit_file attempt was made when no file should be written.',
|
||||
'A write_file attempt was made when no file should be written.',
|
||||
);
|
||||
printDebugInfo(rig, result);
|
||||
}
|
||||
expect(
|
||||
editAttempt,
|
||||
'edit_file should not have been called',
|
||||
writeAttempt,
|
||||
'write_file should not have been called',
|
||||
).toBeUndefined();
|
||||
|
||||
if (successfulReplace) {
|
||||
@@ -243,5 +245,12 @@ describe('file-system', () => {
|
||||
successfulReplace,
|
||||
'A successful replace should not have occurred',
|
||||
).toBeUndefined();
|
||||
|
||||
// Final verification: ensure the file was not created.
|
||||
const filePath = path.join(rig.testDir!, fileName);
|
||||
const fileExists = existsSync(filePath);
|
||||
expect(fileExists, 'The non-existent file should not be created').toBe(
|
||||
false,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -952,8 +952,7 @@ describe('Permission Control (E2E)', () => {
|
||||
TEST_TIMEOUT,
|
||||
);
|
||||
|
||||
// FIXME: This test is flaky and sometimes fails with no tool calls.
|
||||
it.skip(
|
||||
it(
|
||||
'should allow read-only tools without restrictions',
|
||||
async () => {
|
||||
// Create test files for the model to read
|
||||
|
||||
@@ -314,88 +314,4 @@ describe('System Control (E2E)', () => {
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('supportedCommands API', () => {
|
||||
it('should return list of supported slash commands', async () => {
|
||||
const sessionId = crypto.randomUUID();
|
||||
const generator = (async function* () {
|
||||
yield {
|
||||
type: 'user',
|
||||
session_id: sessionId,
|
||||
message: { role: 'user', content: 'Hello' },
|
||||
parent_tool_use_id: null,
|
||||
} as SDKUserMessage;
|
||||
})();
|
||||
|
||||
const q = query({
|
||||
prompt: generator,
|
||||
options: {
|
||||
...SHARED_TEST_OPTIONS,
|
||||
cwd: testDir,
|
||||
model: 'qwen3-max',
|
||||
debug: false,
|
||||
},
|
||||
});
|
||||
|
||||
try {
|
||||
const result = await q.supportedCommands();
|
||||
// Start consuming messages to trigger initialization
|
||||
const messageConsumer = (async () => {
|
||||
try {
|
||||
for await (const _message of q) {
|
||||
// Just consume messages
|
||||
}
|
||||
} catch (error) {
|
||||
// Ignore errors from query being closed
|
||||
if (error instanceof Error && error.message !== 'Query is closed') {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
})();
|
||||
|
||||
// Verify result structure
|
||||
expect(result).toBeDefined();
|
||||
expect(result).toHaveProperty('commands');
|
||||
expect(Array.isArray(result?.['commands'])).toBe(true);
|
||||
|
||||
const commands = result?.['commands'] as string[];
|
||||
|
||||
// Verify default allowed built-in commands are present
|
||||
expect(commands).toContain('init');
|
||||
expect(commands).toContain('summary');
|
||||
expect(commands).toContain('compress');
|
||||
|
||||
// Verify commands are sorted
|
||||
const sortedCommands = [...commands].sort();
|
||||
expect(commands).toEqual(sortedCommands);
|
||||
|
||||
// Verify all commands are strings
|
||||
commands.forEach((cmd) => {
|
||||
expect(typeof cmd).toBe('string');
|
||||
expect(cmd.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
await q.close();
|
||||
await messageConsumer;
|
||||
} catch (error) {
|
||||
await q.close();
|
||||
throw error;
|
||||
}
|
||||
});
|
||||
|
||||
it('should throw error when supportedCommands is called on closed query', async () => {
|
||||
const q = query({
|
||||
prompt: 'Hello',
|
||||
options: {
|
||||
...SHARED_TEST_OPTIONS,
|
||||
cwd: testDir,
|
||||
model: 'qwen3-max',
|
||||
},
|
||||
});
|
||||
|
||||
await q.close();
|
||||
|
||||
await expect(q.supportedCommands()).rejects.toThrow('Query is closed');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
155
package-lock.json
generated
155
package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.6.0-nightly.20251226.17eb20c1",
|
||||
"version": "0.6.0-nightly.20251225.9f65bd3b",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.6.0-nightly.20251226.17eb20c1",
|
||||
"version": "0.6.0-nightly.20251225.9f65bd3b",
|
||||
"workspaces": [
|
||||
"packages/*"
|
||||
],
|
||||
@@ -134,36 +134,6 @@
|
||||
"node": ">=6.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@anthropic-ai/sdk": {
|
||||
"version": "0.36.3",
|
||||
"resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.36.3.tgz",
|
||||
"integrity": "sha512-+c0mMLxL/17yFZ4P5+U6bTWiCSFZUKJddrv01ud2aFBWnTPLdRncYV76D3q1tqfnL7aCnhRtykFnoCFzvr4U3Q==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@types/node": "^18.11.18",
|
||||
"@types/node-fetch": "^2.6.4",
|
||||
"abort-controller": "^3.0.0",
|
||||
"agentkeepalive": "^4.2.1",
|
||||
"form-data-encoder": "1.7.2",
|
||||
"formdata-node": "^4.3.2",
|
||||
"node-fetch": "^2.6.7"
|
||||
}
|
||||
},
|
||||
"node_modules/@anthropic-ai/sdk/node_modules/@types/node": {
|
||||
"version": "18.19.130",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.130.tgz",
|
||||
"integrity": "sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"undici-types": "~5.26.4"
|
||||
}
|
||||
},
|
||||
"node_modules/@anthropic-ai/sdk/node_modules/undici-types": {
|
||||
"version": "5.26.5",
|
||||
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
|
||||
"integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@asamuzakjp/css-color": {
|
||||
"version": "3.2.0",
|
||||
"resolved": "https://registry.npmjs.org/@asamuzakjp/css-color/-/css-color-3.2.0.tgz",
|
||||
@@ -3852,16 +3822,6 @@
|
||||
"undici-types": "~6.21.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/node-fetch": {
|
||||
"version": "2.6.13",
|
||||
"resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.13.tgz",
|
||||
"integrity": "sha512-QGpRVpzSaUs30JBSGPjOg4Uveu384erbHBoT1zeONvyCfwQxIkUshLAOqN/k9EjGviPRmWTTe6aH2qySWKTVSw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@types/node": "*",
|
||||
"form-data": "^4.0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/normalize-package-data": {
|
||||
"version": "2.4.4",
|
||||
"resolved": "https://registry.npmjs.org/@types/normalize-package-data/-/normalize-package-data-2.4.4.tgz",
|
||||
@@ -4860,6 +4820,7 @@
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz",
|
||||
"integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"event-target-shim": "^5.0.0"
|
||||
@@ -4946,18 +4907,6 @@
|
||||
"node": ">= 14"
|
||||
}
|
||||
},
|
||||
"node_modules/agentkeepalive": {
|
||||
"version": "4.6.0",
|
||||
"resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.6.0.tgz",
|
||||
"integrity": "sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"humanize-ms": "^1.2.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 8.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/ajv": {
|
||||
"version": "6.12.6",
|
||||
"resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
|
||||
@@ -5529,6 +5478,7 @@
|
||||
"version": "0.4.0",
|
||||
"resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
|
||||
"integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/atomically": {
|
||||
@@ -6487,6 +6437,7 @@
|
||||
"version": "1.0.8",
|
||||
"resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
|
||||
"integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"delayed-stream": "~1.0.0"
|
||||
@@ -7112,6 +7063,7 @@
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
|
||||
"integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=0.4.0"
|
||||
@@ -7624,6 +7576,7 @@
|
||||
"version": "2.1.0",
|
||||
"resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz",
|
||||
"integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"es-errors": "^1.3.0",
|
||||
@@ -8153,6 +8106,7 @@
|
||||
"version": "5.0.1",
|
||||
"resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz",
|
||||
"integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
@@ -8698,6 +8652,7 @@
|
||||
"version": "4.0.4",
|
||||
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz",
|
||||
"integrity": "sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"asynckit": "^0.4.0",
|
||||
@@ -8710,16 +8665,11 @@
|
||||
"node": ">= 6"
|
||||
}
|
||||
},
|
||||
"node_modules/form-data-encoder": {
|
||||
"version": "1.7.2",
|
||||
"resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-1.7.2.tgz",
|
||||
"integrity": "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/form-data/node_modules/mime-types": {
|
||||
"version": "2.1.35",
|
||||
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
|
||||
"integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"mime-db": "1.52.0"
|
||||
@@ -8728,28 +8678,6 @@
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/formdata-node": {
|
||||
"version": "4.4.1",
|
||||
"resolved": "https://registry.npmjs.org/formdata-node/-/formdata-node-4.4.1.tgz",
|
||||
"integrity": "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"node-domexception": "1.0.0",
|
||||
"web-streams-polyfill": "4.0.0-beta.3"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 12.20"
|
||||
}
|
||||
},
|
||||
"node_modules/formdata-node/node_modules/web-streams-polyfill": {
|
||||
"version": "4.0.0-beta.3",
|
||||
"resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz",
|
||||
"integrity": "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 14"
|
||||
}
|
||||
},
|
||||
"node_modules/formdata-polyfill": {
|
||||
"version": "4.0.10",
|
||||
"resolved": "https://registry.npmjs.org/formdata-polyfill/-/formdata-polyfill-4.0.10.tgz",
|
||||
@@ -9334,6 +9262,7 @@
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz",
|
||||
"integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"has-symbols": "^1.0.3"
|
||||
@@ -9512,15 +9441,6 @@
|
||||
"node": ">=16.17.0"
|
||||
}
|
||||
},
|
||||
"node_modules/humanize-ms": {
|
||||
"version": "1.2.1",
|
||||
"resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz",
|
||||
"integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"ms": "^2.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/husky": {
|
||||
"version": "9.1.7",
|
||||
"resolved": "https://registry.npmjs.org/husky/-/husky-9.1.7.tgz",
|
||||
@@ -12020,48 +11940,6 @@
|
||||
"node": ">=10.5.0"
|
||||
}
|
||||
},
|
||||
"node_modules/node-fetch": {
|
||||
"version": "2.7.0",
|
||||
"resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz",
|
||||
"integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"whatwg-url": "^5.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": "4.x || >=6.0.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"encoding": "^0.1.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"encoding": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/node-fetch/node_modules/tr46": {
|
||||
"version": "0.0.3",
|
||||
"resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz",
|
||||
"integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/node-fetch/node_modules/webidl-conversions": {
|
||||
"version": "3.0.1",
|
||||
"resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz",
|
||||
"integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==",
|
||||
"license": "BSD-2-Clause"
|
||||
},
|
||||
"node_modules/node-fetch/node_modules/whatwg-url": {
|
||||
"version": "5.0.0",
|
||||
"resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz",
|
||||
"integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"tr46": "~0.0.3",
|
||||
"webidl-conversions": "^3.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/node-pty": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/node-pty/-/node-pty-1.0.0.tgz",
|
||||
@@ -17316,7 +17194,7 @@
|
||||
},
|
||||
"packages/cli": {
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.6.0-nightly.20251226.17eb20c1",
|
||||
"version": "0.6.0-nightly.20251225.9f65bd3b",
|
||||
"dependencies": {
|
||||
"@google/genai": "1.30.0",
|
||||
"@iarna/toml": "^2.2.5",
|
||||
@@ -17953,10 +17831,9 @@
|
||||
},
|
||||
"packages/core": {
|
||||
"name": "@qwen-code/qwen-code-core",
|
||||
"version": "0.6.0-nightly.20251226.17eb20c1",
|
||||
"version": "0.6.0-nightly.20251225.9f65bd3b",
|
||||
"hasInstallScript": true,
|
||||
"dependencies": {
|
||||
"@anthropic-ai/sdk": "^0.36.1",
|
||||
"@google/genai": "1.30.0",
|
||||
"@modelcontextprotocol/sdk": "^1.25.1",
|
||||
"@opentelemetry/api": "^1.9.0",
|
||||
@@ -18593,7 +18470,7 @@
|
||||
},
|
||||
"packages/sdk-typescript": {
|
||||
"name": "@qwen-code/sdk",
|
||||
"version": "0.1.0",
|
||||
"version": "0.6.0-nightly.20251225.9f65bd3b",
|
||||
"license": "Apache-2.0",
|
||||
"dependencies": {
|
||||
"@modelcontextprotocol/sdk": "^1.25.1",
|
||||
@@ -21413,7 +21290,7 @@
|
||||
},
|
||||
"packages/test-utils": {
|
||||
"name": "@qwen-code/qwen-code-test-utils",
|
||||
"version": "0.6.0-nightly.20251226.17eb20c1",
|
||||
"version": "0.6.0-nightly.20251225.9f65bd3b",
|
||||
"dev": true,
|
||||
"license": "Apache-2.0",
|
||||
"devDependencies": {
|
||||
@@ -21425,7 +21302,7 @@
|
||||
},
|
||||
"packages/vscode-ide-companion": {
|
||||
"name": "qwen-code-vscode-ide-companion",
|
||||
"version": "0.6.0-nightly.20251226.17eb20c1",
|
||||
"version": "0.6.0-nightly.20251225.9f65bd3b",
|
||||
"license": "LICENSE",
|
||||
"dependencies": {
|
||||
"@modelcontextprotocol/sdk": "^1.25.1",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.6.0-nightly.20251226.17eb20c1",
|
||||
"version": "0.6.0-nightly.20251225.9f65bd3b",
|
||||
"engines": {
|
||||
"node": ">=20.0.0"
|
||||
},
|
||||
@@ -13,7 +13,7 @@
|
||||
"url": "git+https://github.com/QwenLM/qwen-code.git"
|
||||
},
|
||||
"config": {
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.6.0-nightly.20251226.17eb20c1"
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.6.0-nightly.20251225.9f65bd3b"
|
||||
},
|
||||
"scripts": {
|
||||
"start": "cross-env node scripts/start.js",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code",
|
||||
"version": "0.6.0-nightly.20251226.17eb20c1",
|
||||
"version": "0.6.0-nightly.20251225.9f65bd3b",
|
||||
"description": "Qwen Code",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
@@ -33,7 +33,7 @@
|
||||
"dist"
|
||||
],
|
||||
"config": {
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.6.0-nightly.20251226.17eb20c1"
|
||||
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.6.0-nightly.20251225.9f65bd3b"
|
||||
},
|
||||
"dependencies": {
|
||||
"@google/genai": "1.30.0",
|
||||
|
||||
@@ -98,14 +98,6 @@ export class AgentSideConnection implements Client {
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a custom notification to the client.
|
||||
* Used for extension-specific notifications that are not part of the core ACP protocol.
|
||||
*/
|
||||
async sendCustomNotification<T>(method: string, params: T): Promise<void> {
|
||||
return await this.#connection.sendNotification(method, params);
|
||||
}
|
||||
|
||||
/**
|
||||
* Request permission before running a tool
|
||||
*
|
||||
@@ -382,7 +374,6 @@ export interface Client {
|
||||
): Promise<schema.RequestPermissionResponse>;
|
||||
sessionUpdate(params: schema.SessionNotification): Promise<void>;
|
||||
authenticateUpdate(params: schema.AuthenticateUpdate): Promise<void>;
|
||||
sendCustomNotification<T>(method: string, params: T): Promise<void>;
|
||||
writeTextFile(
|
||||
params: schema.WriteTextFileRequest,
|
||||
): Promise<schema.WriteTextFileResponse>;
|
||||
|
||||
@@ -15,10 +15,10 @@ import {
|
||||
qwenOAuth2Events,
|
||||
MCPServerConfig,
|
||||
SessionService,
|
||||
buildApiHistoryFromConversation,
|
||||
type Config,
|
||||
type ConversationRecord,
|
||||
type DeviceAuthorizationData,
|
||||
tokenLimit,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import type { ApprovalModeValue } from './schema.js';
|
||||
import * as acp from './acp.js';
|
||||
@@ -165,30 +165,9 @@ class GeminiAgent {
|
||||
this.setupFileSystem(config);
|
||||
|
||||
const session = await this.createAndStoreSession(config);
|
||||
const configuredModel = (
|
||||
config.getModel() ||
|
||||
this.config.getModel() ||
|
||||
''
|
||||
).trim();
|
||||
const modelId = configuredModel || 'default';
|
||||
const modelName = configuredModel || modelId;
|
||||
|
||||
return {
|
||||
sessionId: session.getId(),
|
||||
models: {
|
||||
currentModelId: modelId,
|
||||
availableModels: [
|
||||
{
|
||||
modelId,
|
||||
name: modelName,
|
||||
description: null,
|
||||
_meta: {
|
||||
contextLimit: tokenLimit(modelId),
|
||||
},
|
||||
},
|
||||
],
|
||||
_meta: null,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
@@ -348,20 +327,12 @@ class GeminiAgent {
|
||||
const sessionId = config.getSessionId();
|
||||
const geminiClient = config.getGeminiClient();
|
||||
|
||||
// Use GeminiClient to manage chat lifecycle properly
|
||||
// This ensures geminiClient.chat is in sync with the session's chat
|
||||
//
|
||||
// Note: When loading a session, config.initialize() has already been called
|
||||
// in newSessionConfig(), which in turn calls geminiClient.initialize().
|
||||
// The GeminiClient.initialize() method checks config.getResumedSessionData()
|
||||
// and automatically loads the conversation history into the chat instance.
|
||||
// So we only need to initialize if it hasn't been done yet.
|
||||
if (!geminiClient.isInitialized()) {
|
||||
await geminiClient.initialize();
|
||||
}
|
||||
|
||||
// Now get the chat instance that's managed by GeminiClient
|
||||
const chat = geminiClient.getChat();
|
||||
const history = conversation
|
||||
? buildApiHistoryFromConversation(conversation)
|
||||
: undefined;
|
||||
const chat = history
|
||||
? await geminiClient.startChat(history)
|
||||
: await geminiClient.startChat();
|
||||
|
||||
const session = new Session(
|
||||
sessionId,
|
||||
|
||||
@@ -93,7 +93,6 @@ export type ModeInfo = z.infer<typeof modeInfoSchema>;
|
||||
export type ModesData = z.infer<typeof modesDataSchema>;
|
||||
|
||||
export type AgentInfo = z.infer<typeof agentInfoSchema>;
|
||||
export type ModelInfo = z.infer<typeof modelInfoSchema>;
|
||||
|
||||
export type PromptCapabilities = z.infer<typeof promptCapabilitiesSchema>;
|
||||
|
||||
@@ -255,26 +254,8 @@ export const authenticateUpdateSchema = z.object({
|
||||
|
||||
export type AuthenticateUpdate = z.infer<typeof authenticateUpdateSchema>;
|
||||
|
||||
export const acpMetaSchema = z.record(z.unknown()).nullable().optional();
|
||||
|
||||
export const modelIdSchema = z.string();
|
||||
|
||||
export const modelInfoSchema = z.object({
|
||||
_meta: acpMetaSchema,
|
||||
description: z.string().nullable().optional(),
|
||||
modelId: modelIdSchema,
|
||||
name: z.string(),
|
||||
});
|
||||
|
||||
export const sessionModelStateSchema = z.object({
|
||||
_meta: acpMetaSchema,
|
||||
availableModels: z.array(modelInfoSchema),
|
||||
currentModelId: modelIdSchema,
|
||||
});
|
||||
|
||||
export const newSessionResponseSchema = z.object({
|
||||
sessionId: z.string(),
|
||||
models: sessionModelStateSchema,
|
||||
});
|
||||
|
||||
export const loadSessionResponseSchema = z.null();
|
||||
@@ -533,13 +514,6 @@ export const currentModeUpdateSchema = z.object({
|
||||
|
||||
export type CurrentModeUpdate = z.infer<typeof currentModeUpdateSchema>;
|
||||
|
||||
export const currentModelUpdateSchema = z.object({
|
||||
sessionUpdate: z.literal('current_model_update'),
|
||||
model: modelInfoSchema,
|
||||
});
|
||||
|
||||
export type CurrentModelUpdate = z.infer<typeof currentModelUpdateSchema>;
|
||||
|
||||
export const sessionUpdateSchema = z.union([
|
||||
z.object({
|
||||
content: contentBlockSchema,
|
||||
@@ -581,7 +555,6 @@ export const sessionUpdateSchema = z.union([
|
||||
sessionUpdate: z.literal('plan'),
|
||||
}),
|
||||
currentModeUpdateSchema,
|
||||
currentModelUpdateSchema,
|
||||
availableCommandsUpdateSchema,
|
||||
]);
|
||||
|
||||
|
||||
@@ -41,11 +41,9 @@ import * as fs from 'node:fs/promises';
|
||||
import * as path from 'node:path';
|
||||
import { z } from 'zod';
|
||||
import { getErrorMessage } from '../../utils/errors.js';
|
||||
import { normalizePartList } from '../../utils/nonInteractiveHelpers.js';
|
||||
import {
|
||||
handleSlashCommand,
|
||||
getAvailableCommands,
|
||||
type NonInteractiveSlashCommandResult,
|
||||
} from '../../nonInteractiveCliCommands.js';
|
||||
import type {
|
||||
AvailableCommand,
|
||||
@@ -65,6 +63,12 @@ import { PlanEmitter } from './emitters/PlanEmitter.js';
|
||||
import { MessageEmitter } from './emitters/MessageEmitter.js';
|
||||
import { SubAgentTracker } from './SubAgentTracker.js';
|
||||
|
||||
/**
|
||||
* Built-in commands that are allowed in ACP integration mode.
|
||||
* Only safe, read-only commands that don't require interactive UI.
|
||||
*/
|
||||
export const ALLOWED_BUILTIN_COMMANDS_FOR_ACP = ['init'];
|
||||
|
||||
/**
|
||||
* Session represents an active conversation session with the AI model.
|
||||
* It uses modular components for consistent event emission:
|
||||
@@ -163,26 +167,24 @@ export class Session implements SessionContext {
|
||||
const firstTextBlock = params.prompt.find((block) => block.type === 'text');
|
||||
const inputText = firstTextBlock?.text || '';
|
||||
|
||||
let parts: Part[] | null;
|
||||
let parts: Part[];
|
||||
|
||||
if (isSlashCommand(inputText)) {
|
||||
// Handle slash command - uses default allowed commands (init, summary, compress)
|
||||
// Handle slash command - allow specific built-in commands for ACP integration
|
||||
const slashCommandResult = await handleSlashCommand(
|
||||
inputText,
|
||||
pendingSend,
|
||||
this.config,
|
||||
this.settings,
|
||||
ALLOWED_BUILTIN_COMMANDS_FOR_ACP,
|
||||
);
|
||||
|
||||
parts = await this.#processSlashCommandResult(
|
||||
slashCommandResult,
|
||||
params.prompt,
|
||||
);
|
||||
|
||||
// If parts is null, the command was fully handled (e.g., /summary completed)
|
||||
// Return early without sending to the model
|
||||
if (parts === null) {
|
||||
return { stopReason: 'end_turn' };
|
||||
if (slashCommandResult) {
|
||||
// Use the result from the slash command
|
||||
parts = slashCommandResult as Part[];
|
||||
} else {
|
||||
// Slash command didn't return a prompt, continue with normal processing
|
||||
parts = await this.#resolvePrompt(params.prompt, pendingSend.signal);
|
||||
}
|
||||
} else {
|
||||
// Normal processing for non-slash commands
|
||||
@@ -293,10 +295,11 @@ export class Session implements SessionContext {
|
||||
async sendAvailableCommandsUpdate(): Promise<void> {
|
||||
const abortController = new AbortController();
|
||||
try {
|
||||
// Use default allowed commands from getAvailableCommands
|
||||
const slashCommands = await getAvailableCommands(
|
||||
this.config,
|
||||
this.settings,
|
||||
abortController.signal,
|
||||
ALLOWED_BUILTIN_COMMANDS_FOR_ACP,
|
||||
);
|
||||
|
||||
// Convert SlashCommand[] to AvailableCommand[] format for ACP protocol
|
||||
@@ -644,103 +647,6 @@ export class Session implements SessionContext {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Processes the result of a slash command execution.
|
||||
*
|
||||
* Supported result types in ACP mode:
|
||||
* - submit_prompt: Submits content to the model
|
||||
* - stream_messages: Streams multiple messages to the client (ACP-specific)
|
||||
* - unsupported: Command cannot be executed in ACP mode
|
||||
* - no_command: No command was found, use original prompt
|
||||
*
|
||||
* Note: 'message' type is not supported in ACP mode - commands should use
|
||||
* 'stream_messages' instead for consistent async handling.
|
||||
*
|
||||
* @param result The result from handleSlashCommand
|
||||
* @param originalPrompt The original prompt blocks
|
||||
* @returns Parts to use for the prompt, or null if command was handled without needing model interaction
|
||||
*/
|
||||
async #processSlashCommandResult(
|
||||
result: NonInteractiveSlashCommandResult,
|
||||
originalPrompt: acp.ContentBlock[],
|
||||
): Promise<Part[] | null> {
|
||||
switch (result.type) {
|
||||
case 'submit_prompt':
|
||||
// Command wants to submit a prompt to the model
|
||||
// Convert PartListUnion to Part[]
|
||||
return normalizePartList(result.content);
|
||||
|
||||
case 'message': {
|
||||
// 'message' type is not ideal for ACP mode, but we handle it for compatibility
|
||||
// by converting it to a stream_messages-like notification
|
||||
await this.client.sendCustomNotification('_qwencode/slash_command', {
|
||||
sessionId: this.sessionId,
|
||||
command: originalPrompt
|
||||
.filter((block) => block.type === 'text')
|
||||
.map((block) => (block.type === 'text' ? block.text : ''))
|
||||
.join(' '),
|
||||
messageType: result.messageType,
|
||||
message: result.content || '',
|
||||
});
|
||||
|
||||
if (result.messageType === 'error') {
|
||||
// Throw error to stop execution
|
||||
throw new Error(result.content || 'Slash command failed.');
|
||||
}
|
||||
// For info messages, return null to indicate command was handled
|
||||
return null;
|
||||
}
|
||||
|
||||
case 'stream_messages': {
|
||||
// Command returns multiple messages via async generator (ACP-preferred)
|
||||
const command = originalPrompt
|
||||
.filter((block) => block.type === 'text')
|
||||
.map((block) => (block.type === 'text' ? block.text : ''))
|
||||
.join(' ');
|
||||
|
||||
// Stream all messages to the client
|
||||
for await (const msg of result.messages) {
|
||||
await this.client.sendCustomNotification('_qwencode/slash_command', {
|
||||
sessionId: this.sessionId,
|
||||
command,
|
||||
messageType: msg.messageType,
|
||||
message: msg.content,
|
||||
});
|
||||
|
||||
// If we encounter an error message, throw after sending
|
||||
if (msg.messageType === 'error') {
|
||||
throw new Error(msg.content || 'Slash command failed.');
|
||||
}
|
||||
}
|
||||
|
||||
// All messages sent successfully, return null to indicate command was handled
|
||||
return null;
|
||||
}
|
||||
|
||||
case 'unsupported': {
|
||||
// Command returned an unsupported result type
|
||||
const unsupportedError = `Slash command not supported in ACP integration: ${result.reason}`;
|
||||
throw new Error(unsupportedError);
|
||||
}
|
||||
|
||||
case 'no_command':
|
||||
// No command was found or executed, use original prompt
|
||||
return originalPrompt.map((block) => {
|
||||
if (block.type === 'text') {
|
||||
return { text: block.text };
|
||||
}
|
||||
throw new Error(`Unsupported block type: ${block.type}`);
|
||||
});
|
||||
|
||||
default: {
|
||||
// Exhaustiveness check
|
||||
const _exhaustive: never = result;
|
||||
const unknownError = `Unknown slash command result type: ${(_exhaustive as NonInteractiveSlashCommandResult).type}`;
|
||||
throw new Error(unknownError);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async #resolvePrompt(
|
||||
message: acp.ContentBlock[],
|
||||
abortSignal: AbortSignal,
|
||||
|
||||
@@ -26,20 +26,6 @@ export function validateAuthMethod(authMethod: string): string | null {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (authMethod === AuthType.USE_ANTHROPIC) {
|
||||
const hasApiKey = process.env['ANTHROPIC_API_KEY'];
|
||||
if (!hasApiKey) {
|
||||
return 'ANTHROPIC_API_KEY environment variable not found.';
|
||||
}
|
||||
|
||||
const hasBaseUrl = process.env['ANTHROPIC_BASE_URL'];
|
||||
if (!hasBaseUrl) {
|
||||
return 'ANTHROPIC_BASE_URL environment variable not found.';
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
if (authMethod === AuthType.USE_GEMINI) {
|
||||
const hasApiKey = process.env['GEMINI_API_KEY'];
|
||||
if (!hasApiKey) {
|
||||
|
||||
@@ -2114,14 +2114,7 @@ describe('loadCliConfig model selection', () => {
|
||||
});
|
||||
|
||||
it('always prefers model from argvs', async () => {
|
||||
process.argv = [
|
||||
'node',
|
||||
'script.js',
|
||||
'--auth-type',
|
||||
'openai',
|
||||
'--model',
|
||||
'qwen3-coder-plus',
|
||||
];
|
||||
process.argv = ['node', 'script.js', '--model', 'qwen3-coder-plus'];
|
||||
const argv = await parseArguments({} as Settings);
|
||||
const config = await loadCliConfig(
|
||||
{
|
||||
@@ -2141,14 +2134,7 @@ describe('loadCliConfig model selection', () => {
|
||||
});
|
||||
|
||||
it('selects the model from argvs if provided', async () => {
|
||||
process.argv = [
|
||||
'node',
|
||||
'script.js',
|
||||
'--auth-type',
|
||||
'openai',
|
||||
'--model',
|
||||
'qwen3-coder-plus',
|
||||
];
|
||||
process.argv = ['node', 'script.js', '--model', 'qwen3-coder-plus'];
|
||||
const argv = await parseArguments({} as Settings);
|
||||
const config = await loadCliConfig(
|
||||
{
|
||||
|
||||
@@ -468,7 +468,6 @@ export async function parseArguments(settings: Settings): Promise<CliArgs> {
|
||||
type: 'string',
|
||||
choices: [
|
||||
AuthType.USE_OPENAI,
|
||||
AuthType.USE_ANTHROPIC,
|
||||
AuthType.QWEN_OAUTH,
|
||||
AuthType.USE_GEMINI,
|
||||
AuthType.USE_VERTEX_AI,
|
||||
@@ -877,30 +876,11 @@ export async function loadCliConfig(
|
||||
);
|
||||
}
|
||||
|
||||
const selectedAuthType =
|
||||
(argv.authType as AuthType | undefined) ||
|
||||
settings.security?.auth?.selectedType;
|
||||
|
||||
const apiKey =
|
||||
(selectedAuthType === AuthType.USE_OPENAI
|
||||
? argv.openaiApiKey ||
|
||||
process.env['OPENAI_API_KEY'] ||
|
||||
settings.security?.auth?.apiKey
|
||||
: '') || '';
|
||||
const baseUrl =
|
||||
(selectedAuthType === AuthType.USE_OPENAI
|
||||
? argv.openaiBaseUrl ||
|
||||
process.env['OPENAI_BASE_URL'] ||
|
||||
settings.security?.auth?.baseUrl
|
||||
: '') || '';
|
||||
const resolvedModel =
|
||||
argv.model ||
|
||||
(selectedAuthType === AuthType.USE_OPENAI
|
||||
? process.env['OPENAI_MODEL'] ||
|
||||
process.env['QWEN_MODEL'] ||
|
||||
settings.model?.name
|
||||
: '') ||
|
||||
'';
|
||||
process.env['OPENAI_MODEL'] ||
|
||||
process.env['QWEN_MODEL'] ||
|
||||
settings.model?.name;
|
||||
|
||||
const sandboxConfig = await loadSandboxConfig(settings, argv);
|
||||
const screenReader =
|
||||
@@ -987,15 +967,23 @@ export async function loadCliConfig(
|
||||
extensions: allExtensions,
|
||||
blockedMcpServers,
|
||||
noBrowser: !!process.env['NO_BROWSER'],
|
||||
authType: selectedAuthType,
|
||||
authType:
|
||||
(argv.authType as AuthType | undefined) ||
|
||||
settings.security?.auth?.selectedType,
|
||||
inputFormat,
|
||||
outputFormat,
|
||||
includePartialMessages,
|
||||
generationConfig: {
|
||||
...(settings.model?.generationConfig || {}),
|
||||
model: resolvedModel,
|
||||
apiKey,
|
||||
baseUrl,
|
||||
apiKey:
|
||||
argv.openaiApiKey ||
|
||||
process.env['OPENAI_API_KEY'] ||
|
||||
settings.security?.auth?.apiKey,
|
||||
baseUrl:
|
||||
argv.openaiBaseUrl ||
|
||||
process.env['OPENAI_BASE_URL'] ||
|
||||
settings.security?.auth?.baseUrl,
|
||||
enableOpenAILogging:
|
||||
(typeof argv.openaiLogging === 'undefined'
|
||||
? settings.model?.enableOpenAILogging
|
||||
|
||||
@@ -258,8 +258,6 @@ export default {
|
||||
', Tab to change focus': ', Tab to change focus',
|
||||
'To see changes, Qwen Code must be restarted. Press r to exit and apply changes now.':
|
||||
'To see changes, Qwen Code must be restarted. Press r to exit and apply changes now.',
|
||||
'The command "/{{command}}" is not supported in non-interactive mode.':
|
||||
'The command "/{{command}}" is not supported in non-interactive mode.',
|
||||
// ============================================================================
|
||||
// Settings Labels
|
||||
// ============================================================================
|
||||
@@ -592,12 +590,6 @@ export default {
|
||||
'No conversation found to summarize.': 'No conversation found to summarize.',
|
||||
'Failed to generate project context summary: {{error}}':
|
||||
'Failed to generate project context summary: {{error}}',
|
||||
'Saved project summary to {{filePathForDisplay}}.':
|
||||
'Saved project summary to {{filePathForDisplay}}.',
|
||||
'Saving project summary...': 'Saving project summary...',
|
||||
'Generating project summary...': 'Generating project summary...',
|
||||
'Failed to generate summary - no text content received from LLM response':
|
||||
'Failed to generate summary - no text content received from LLM response',
|
||||
|
||||
// ============================================================================
|
||||
// Commands - Model
|
||||
|
||||
@@ -260,8 +260,7 @@ export default {
|
||||
', Tab to change focus': ', Tab для смены фокуса',
|
||||
'To see changes, Qwen Code must be restarted. Press r to exit and apply changes now.':
|
||||
'Для применения изменений необходимо перезапустить Qwen Code. Нажмите r для выхода и применения изменений.',
|
||||
'The command "/{{command}}" is not supported in non-interactive mode.':
|
||||
'Команда "/{{command}}" не поддерживается в неинтерактивном режиме.',
|
||||
|
||||
// ============================================================================
|
||||
// Метки настроек
|
||||
// ============================================================================
|
||||
@@ -605,12 +604,6 @@ export default {
|
||||
'Не найдено диалогов для создания сводки.',
|
||||
'Failed to generate project context summary: {{error}}':
|
||||
'Не удалось сгенерировать сводку контекста проекта: {{error}}',
|
||||
'Saved project summary to {{filePathForDisplay}}.':
|
||||
'Сводка проекта сохранена в {{filePathForDisplay}}',
|
||||
'Saving project summary...': 'Сохранение сводки проекта...',
|
||||
'Generating project summary...': 'Генерация сводки проекта...',
|
||||
'Failed to generate summary - no text content received from LLM response':
|
||||
'Не удалось сгенерировать сводку - не получен текстовый контент из ответа LLM',
|
||||
|
||||
// ============================================================================
|
||||
// Команды - Модель
|
||||
|
||||
@@ -249,8 +249,6 @@ export default {
|
||||
', Tab to change focus': ',Tab 切换焦点',
|
||||
'To see changes, Qwen Code must be restarted. Press r to exit and apply changes now.':
|
||||
'要查看更改,必须重启 Qwen Code。按 r 退出并立即应用更改。',
|
||||
'The command "/{{command}}" is not supported in non-interactive mode.':
|
||||
'不支持在非交互模式下使用命令 "/{{command}}"。',
|
||||
// ============================================================================
|
||||
// Settings Labels
|
||||
// ============================================================================
|
||||
@@ -562,12 +560,6 @@ export default {
|
||||
'No conversation found to summarize.': '未找到要总结的对话',
|
||||
'Failed to generate project context summary: {{error}}':
|
||||
'生成项目上下文摘要失败:{{error}}',
|
||||
'Saved project summary to {{filePathForDisplay}}.':
|
||||
'项目摘要已保存到 {{filePathForDisplay}}',
|
||||
'Saving project summary...': '正在保存项目摘要...',
|
||||
'Generating project summary...': '正在生成项目摘要...',
|
||||
'Failed to generate summary - no text content received from LLM response':
|
||||
'生成摘要失败 - 未从 LLM 响应中接收到文本内容',
|
||||
|
||||
// ============================================================================
|
||||
// Commands - Model
|
||||
|
||||
@@ -20,7 +20,8 @@ import type {
|
||||
CLIControlSetModelRequest,
|
||||
CLIMcpServerConfig,
|
||||
} from '../../types.js';
|
||||
import { getAvailableCommands } from '../../../nonInteractiveCliCommands.js';
|
||||
import { CommandService } from '../../../services/CommandService.js';
|
||||
import { BuiltinCommandLoader } from '../../../services/BuiltinCommandLoader.js';
|
||||
import {
|
||||
MCPServerConfig,
|
||||
AuthProviderType,
|
||||
@@ -406,7 +407,7 @@ export class SystemController extends BaseController {
|
||||
}
|
||||
|
||||
/**
|
||||
* Load slash command names using getAvailableCommands
|
||||
* Load slash command names using CommandService
|
||||
*
|
||||
* @param signal - AbortSignal to respect for cancellation
|
||||
* @returns Promise resolving to array of slash command names
|
||||
@@ -417,14 +418,21 @@ export class SystemController extends BaseController {
|
||||
}
|
||||
|
||||
try {
|
||||
const commands = await getAvailableCommands(this.context.config, signal);
|
||||
const service = await CommandService.create(
|
||||
[new BuiltinCommandLoader(this.context.config)],
|
||||
signal,
|
||||
);
|
||||
|
||||
if (signal.aborted) {
|
||||
return [];
|
||||
}
|
||||
|
||||
// Extract command names and sort
|
||||
return commands.map((cmd) => cmd.name).sort();
|
||||
const names = new Set<string>();
|
||||
const commands = service.getCommands();
|
||||
for (const command of commands) {
|
||||
names.add(command.name);
|
||||
}
|
||||
return Array.from(names).sort();
|
||||
} catch (error) {
|
||||
// Check if the error is due to abort
|
||||
if (signal.aborted) {
|
||||
|
||||
@@ -68,7 +68,6 @@ describe('runNonInteractive', () => {
|
||||
let mockShutdownTelemetry: Mock;
|
||||
let consoleErrorSpy: MockInstance;
|
||||
let processStdoutSpy: MockInstance;
|
||||
let processStderrSpy: MockInstance;
|
||||
let mockGeminiClient: {
|
||||
sendMessageStream: Mock;
|
||||
getChatRecordingService: Mock;
|
||||
@@ -87,9 +86,6 @@ describe('runNonInteractive', () => {
|
||||
processStdoutSpy = vi
|
||||
.spyOn(process.stdout, 'write')
|
||||
.mockImplementation(() => true);
|
||||
processStderrSpy = vi
|
||||
.spyOn(process.stderr, 'write')
|
||||
.mockImplementation(() => true);
|
||||
vi.spyOn(process, 'exit').mockImplementation((code) => {
|
||||
throw new Error(`process.exit(${code}) called`);
|
||||
});
|
||||
@@ -143,8 +139,6 @@ describe('runNonInteractive', () => {
|
||||
setModel: vi.fn(async (model: string) => {
|
||||
currentModel = model;
|
||||
}),
|
||||
getExperimentalZedIntegration: vi.fn().mockReturnValue(false),
|
||||
isInteractive: vi.fn().mockReturnValue(false),
|
||||
} as unknown as Config;
|
||||
|
||||
mockSettings = {
|
||||
@@ -858,7 +852,7 @@ describe('runNonInteractive', () => {
|
||||
expect(processStdoutSpy).toHaveBeenCalledWith('Response from command');
|
||||
});
|
||||
|
||||
it('should handle command that requires confirmation by returning early', async () => {
|
||||
it('should throw FatalInputError if a command requires confirmation', async () => {
|
||||
const mockCommand = {
|
||||
name: 'confirm',
|
||||
description: 'a command that needs confirmation',
|
||||
@@ -870,16 +864,15 @@ describe('runNonInteractive', () => {
|
||||
};
|
||||
mockGetCommands.mockReturnValue([mockCommand]);
|
||||
|
||||
await runNonInteractive(
|
||||
mockConfig,
|
||||
mockSettings,
|
||||
'/confirm',
|
||||
'prompt-id-confirm',
|
||||
);
|
||||
|
||||
// Should write error message to stderr
|
||||
expect(processStderrSpy).toHaveBeenCalledWith(
|
||||
'Shell command confirmation is not supported in non-interactive mode. Use YOLO mode or pre-approve commands.\n',
|
||||
await expect(
|
||||
runNonInteractive(
|
||||
mockConfig,
|
||||
mockSettings,
|
||||
'/confirm',
|
||||
'prompt-id-confirm',
|
||||
),
|
||||
).rejects.toThrow(
|
||||
'Exiting due to a confirmation prompt requested by the command.',
|
||||
);
|
||||
});
|
||||
|
||||
@@ -916,30 +909,7 @@ describe('runNonInteractive', () => {
|
||||
expect(processStdoutSpy).toHaveBeenCalledWith('Response to unknown');
|
||||
});
|
||||
|
||||
it('should handle known but unsupported slash commands like /help by returning early', async () => {
|
||||
// Mock a built-in command that exists but is not in the allowed list
|
||||
const mockHelpCommand = {
|
||||
name: 'help',
|
||||
description: 'Show help',
|
||||
kind: CommandKind.BUILT_IN,
|
||||
action: vi.fn(),
|
||||
};
|
||||
mockGetCommands.mockReturnValue([mockHelpCommand]);
|
||||
|
||||
await runNonInteractive(
|
||||
mockConfig,
|
||||
mockSettings,
|
||||
'/help',
|
||||
'prompt-id-help',
|
||||
);
|
||||
|
||||
// Should write error message to stderr
|
||||
expect(processStderrSpy).toHaveBeenCalledWith(
|
||||
'The command "/help" is not supported in non-interactive mode.\n',
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle unhandled command result types by returning early with error', async () => {
|
||||
it('should throw for unhandled command result types', async () => {
|
||||
const mockCommand = {
|
||||
name: 'noaction',
|
||||
description: 'unhandled type',
|
||||
@@ -950,16 +920,15 @@ describe('runNonInteractive', () => {
|
||||
};
|
||||
mockGetCommands.mockReturnValue([mockCommand]);
|
||||
|
||||
await runNonInteractive(
|
||||
mockConfig,
|
||||
mockSettings,
|
||||
'/noaction',
|
||||
'prompt-id-unhandled',
|
||||
);
|
||||
|
||||
// Should write error message to stderr
|
||||
expect(processStderrSpy).toHaveBeenCalledWith(
|
||||
'Unknown command result type: unhandled\n',
|
||||
await expect(
|
||||
runNonInteractive(
|
||||
mockConfig,
|
||||
mockSettings,
|
||||
'/noaction',
|
||||
'prompt-id-unhandled',
|
||||
),
|
||||
).rejects.toThrow(
|
||||
'Exiting due to command result that is not supported in non-interactive mode.',
|
||||
);
|
||||
});
|
||||
|
||||
|
||||
@@ -42,55 +42,6 @@ import {
|
||||
computeUsageFromMetrics,
|
||||
} from './utils/nonInteractiveHelpers.js';
|
||||
|
||||
/**
|
||||
* Emits a final message for slash command results.
|
||||
* Note: systemMessage should already be emitted before calling this function.
|
||||
*/
|
||||
async function emitNonInteractiveFinalMessage(params: {
|
||||
message: string;
|
||||
isError: boolean;
|
||||
adapter?: JsonOutputAdapterInterface;
|
||||
config: Config;
|
||||
startTimeMs: number;
|
||||
}): Promise<void> {
|
||||
const { message, isError, adapter, config } = params;
|
||||
|
||||
if (!adapter) {
|
||||
// Text output mode: write directly to stdout/stderr
|
||||
const target = isError ? process.stderr : process.stdout;
|
||||
target.write(`${message}\n`);
|
||||
return;
|
||||
}
|
||||
|
||||
// JSON output mode: emit assistant message and result
|
||||
// (systemMessage should already be emitted by caller)
|
||||
adapter.startAssistantMessage();
|
||||
adapter.processEvent({
|
||||
type: GeminiEventType.Content,
|
||||
value: message,
|
||||
} as unknown as Parameters<JsonOutputAdapterInterface['processEvent']>[0]);
|
||||
adapter.finalizeAssistantMessage();
|
||||
|
||||
const metrics = uiTelemetryService.getMetrics();
|
||||
const usage = computeUsageFromMetrics(metrics);
|
||||
const outputFormat = config.getOutputFormat();
|
||||
const stats =
|
||||
outputFormat === OutputFormat.JSON
|
||||
? uiTelemetryService.getMetrics()
|
||||
: undefined;
|
||||
|
||||
adapter.emitResult({
|
||||
isError,
|
||||
durationMs: Date.now() - params.startTimeMs,
|
||||
apiDurationMs: 0,
|
||||
numTurns: 0,
|
||||
errorMessage: isError ? message : undefined,
|
||||
usage,
|
||||
stats,
|
||||
summary: message,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides optional overrides for `runNonInteractive` execution.
|
||||
*
|
||||
@@ -164,16 +115,6 @@ export async function runNonInteractive(
|
||||
process.on('SIGINT', shutdownHandler);
|
||||
process.on('SIGTERM', shutdownHandler);
|
||||
|
||||
// Emit systemMessage first (always the first message in JSON mode)
|
||||
if (adapter) {
|
||||
const systemMessage = await buildSystemMessage(
|
||||
config,
|
||||
sessionId,
|
||||
permissionMode,
|
||||
);
|
||||
adapter.emitMessage(systemMessage);
|
||||
}
|
||||
|
||||
let initialPartList: PartListUnion | null = extractPartsFromUserMessage(
|
||||
options.userMessage,
|
||||
);
|
||||
@@ -187,45 +128,10 @@ export async function runNonInteractive(
|
||||
config,
|
||||
settings,
|
||||
);
|
||||
switch (slashCommandResult.type) {
|
||||
case 'submit_prompt':
|
||||
// A slash command can replace the prompt entirely; fall back to @-command processing otherwise.
|
||||
initialPartList = slashCommandResult.content;
|
||||
slashHandled = true;
|
||||
break;
|
||||
case 'message': {
|
||||
// systemMessage already emitted above
|
||||
await emitNonInteractiveFinalMessage({
|
||||
message: slashCommandResult.content,
|
||||
isError: slashCommandResult.messageType === 'error',
|
||||
adapter,
|
||||
config,
|
||||
startTimeMs: startTime,
|
||||
});
|
||||
return;
|
||||
}
|
||||
case 'stream_messages':
|
||||
throw new FatalInputError(
|
||||
'Stream messages mode is not supported in non-interactive CLI',
|
||||
);
|
||||
case 'unsupported': {
|
||||
await emitNonInteractiveFinalMessage({
|
||||
message: slashCommandResult.reason,
|
||||
isError: true,
|
||||
adapter,
|
||||
config,
|
||||
startTimeMs: startTime,
|
||||
});
|
||||
return;
|
||||
}
|
||||
case 'no_command':
|
||||
break;
|
||||
default: {
|
||||
const _exhaustive: never = slashCommandResult;
|
||||
throw new FatalInputError(
|
||||
`Unhandled slash command result type: ${(_exhaustive as { type: string }).type}`,
|
||||
);
|
||||
}
|
||||
if (slashCommandResult) {
|
||||
// A slash command can replace the prompt entirely; fall back to @-command processing otherwise.
|
||||
initialPartList = slashCommandResult as PartListUnion;
|
||||
slashHandled = true;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -257,6 +163,15 @@ export async function runNonInteractive(
|
||||
const initialParts = normalizePartList(initialPartList);
|
||||
let currentMessages: Content[] = [{ role: 'user', parts: initialParts }];
|
||||
|
||||
if (adapter) {
|
||||
const systemMessage = await buildSystemMessage(
|
||||
config,
|
||||
sessionId,
|
||||
permissionMode,
|
||||
);
|
||||
adapter.emitMessage(systemMessage);
|
||||
}
|
||||
|
||||
let isFirstTurn = true;
|
||||
while (true) {
|
||||
turnCount++;
|
||||
|
||||
@@ -1,242 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen Team
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { handleSlashCommand } from './nonInteractiveCliCommands.js';
|
||||
import type { Config } from '@qwen-code/qwen-code-core';
|
||||
import type { LoadedSettings } from './config/settings.js';
|
||||
import { CommandKind } from './ui/commands/types.js';
|
||||
|
||||
// Mock the CommandService
|
||||
const mockGetCommands = vi.hoisted(() => vi.fn());
|
||||
const mockCommandServiceCreate = vi.hoisted(() => vi.fn());
|
||||
vi.mock('./services/CommandService.js', () => ({
|
||||
CommandService: {
|
||||
create: mockCommandServiceCreate,
|
||||
},
|
||||
}));
|
||||
|
||||
describe('handleSlashCommand', () => {
|
||||
let mockConfig: Config;
|
||||
let mockSettings: LoadedSettings;
|
||||
let abortController: AbortController;
|
||||
|
||||
beforeEach(() => {
|
||||
mockCommandServiceCreate.mockResolvedValue({
|
||||
getCommands: mockGetCommands,
|
||||
});
|
||||
|
||||
mockConfig = {
|
||||
getExperimentalZedIntegration: vi.fn().mockReturnValue(false),
|
||||
isInteractive: vi.fn().mockReturnValue(false),
|
||||
getSessionId: vi.fn().mockReturnValue('test-session'),
|
||||
getFolderTrustFeature: vi.fn().mockReturnValue(false),
|
||||
getFolderTrust: vi.fn().mockReturnValue(false),
|
||||
getProjectRoot: vi.fn().mockReturnValue('/test/project'),
|
||||
storage: {},
|
||||
} as unknown as Config;
|
||||
|
||||
mockSettings = {
|
||||
system: { path: '', settings: {} },
|
||||
systemDefaults: { path: '', settings: {} },
|
||||
user: { path: '', settings: {} },
|
||||
workspace: { path: '', settings: {} },
|
||||
} as LoadedSettings;
|
||||
|
||||
abortController = new AbortController();
|
||||
});
|
||||
|
||||
it('should return no_command for non-slash input', async () => {
|
||||
const result = await handleSlashCommand(
|
||||
'regular text',
|
||||
abortController,
|
||||
mockConfig,
|
||||
mockSettings,
|
||||
);
|
||||
|
||||
expect(result.type).toBe('no_command');
|
||||
});
|
||||
|
||||
it('should return no_command for unknown slash commands', async () => {
|
||||
mockGetCommands.mockReturnValue([]);
|
||||
|
||||
const result = await handleSlashCommand(
|
||||
'/unknowncommand',
|
||||
abortController,
|
||||
mockConfig,
|
||||
mockSettings,
|
||||
);
|
||||
|
||||
expect(result.type).toBe('no_command');
|
||||
});
|
||||
|
||||
it('should return unsupported for known built-in commands not in allowed list', async () => {
|
||||
const mockHelpCommand = {
|
||||
name: 'help',
|
||||
description: 'Show help',
|
||||
kind: CommandKind.BUILT_IN,
|
||||
action: vi.fn(),
|
||||
};
|
||||
mockGetCommands.mockReturnValue([mockHelpCommand]);
|
||||
|
||||
const result = await handleSlashCommand(
|
||||
'/help',
|
||||
abortController,
|
||||
mockConfig,
|
||||
mockSettings,
|
||||
[], // Empty allowed list
|
||||
);
|
||||
|
||||
expect(result.type).toBe('unsupported');
|
||||
if (result.type === 'unsupported') {
|
||||
expect(result.reason).toContain('/help');
|
||||
expect(result.reason).toContain('not supported');
|
||||
}
|
||||
});
|
||||
|
||||
it('should return unsupported for /help when using default allowed list', async () => {
|
||||
const mockHelpCommand = {
|
||||
name: 'help',
|
||||
description: 'Show help',
|
||||
kind: CommandKind.BUILT_IN,
|
||||
action: vi.fn(),
|
||||
};
|
||||
mockGetCommands.mockReturnValue([mockHelpCommand]);
|
||||
|
||||
const result = await handleSlashCommand(
|
||||
'/help',
|
||||
abortController,
|
||||
mockConfig,
|
||||
mockSettings,
|
||||
// Default allowed list: ['init', 'summary', 'compress']
|
||||
);
|
||||
|
||||
expect(result.type).toBe('unsupported');
|
||||
if (result.type === 'unsupported') {
|
||||
expect(result.reason).toBe(
|
||||
'The command "/help" is not supported in non-interactive mode.',
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
it('should execute allowed built-in commands', async () => {
|
||||
const mockInitCommand = {
|
||||
name: 'init',
|
||||
description: 'Initialize project',
|
||||
kind: CommandKind.BUILT_IN,
|
||||
action: vi.fn().mockResolvedValue({
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
content: 'Project initialized',
|
||||
}),
|
||||
};
|
||||
mockGetCommands.mockReturnValue([mockInitCommand]);
|
||||
|
||||
const result = await handleSlashCommand(
|
||||
'/init',
|
||||
abortController,
|
||||
mockConfig,
|
||||
mockSettings,
|
||||
['init'], // init is in the allowed list
|
||||
);
|
||||
|
||||
expect(result.type).toBe('message');
|
||||
if (result.type === 'message') {
|
||||
expect(result.content).toBe('Project initialized');
|
||||
}
|
||||
});
|
||||
|
||||
it('should execute file commands regardless of allowed list', async () => {
|
||||
const mockFileCommand = {
|
||||
name: 'custom',
|
||||
description: 'Custom file command',
|
||||
kind: CommandKind.FILE,
|
||||
action: vi.fn().mockResolvedValue({
|
||||
type: 'submit_prompt',
|
||||
content: [{ text: 'Custom prompt' }],
|
||||
}),
|
||||
};
|
||||
mockGetCommands.mockReturnValue([mockFileCommand]);
|
||||
|
||||
const result = await handleSlashCommand(
|
||||
'/custom',
|
||||
abortController,
|
||||
mockConfig,
|
||||
mockSettings,
|
||||
[], // Empty allowed list, but FILE commands should still work
|
||||
);
|
||||
|
||||
expect(result.type).toBe('submit_prompt');
|
||||
if (result.type === 'submit_prompt') {
|
||||
expect(result.content).toEqual([{ text: 'Custom prompt' }]);
|
||||
}
|
||||
});
|
||||
|
||||
it('should return unsupported for other built-in commands like /quit', async () => {
|
||||
const mockQuitCommand = {
|
||||
name: 'quit',
|
||||
description: 'Quit application',
|
||||
kind: CommandKind.BUILT_IN,
|
||||
action: vi.fn(),
|
||||
};
|
||||
mockGetCommands.mockReturnValue([mockQuitCommand]);
|
||||
|
||||
const result = await handleSlashCommand(
|
||||
'/quit',
|
||||
abortController,
|
||||
mockConfig,
|
||||
mockSettings,
|
||||
);
|
||||
|
||||
expect(result.type).toBe('unsupported');
|
||||
if (result.type === 'unsupported') {
|
||||
expect(result.reason).toContain('/quit');
|
||||
expect(result.reason).toContain('not supported');
|
||||
}
|
||||
});
|
||||
|
||||
it('should handle command with no action', async () => {
|
||||
const mockCommand = {
|
||||
name: 'noaction',
|
||||
description: 'Command without action',
|
||||
kind: CommandKind.FILE,
|
||||
// No action property
|
||||
};
|
||||
mockGetCommands.mockReturnValue([mockCommand]);
|
||||
|
||||
const result = await handleSlashCommand(
|
||||
'/noaction',
|
||||
abortController,
|
||||
mockConfig,
|
||||
mockSettings,
|
||||
);
|
||||
|
||||
expect(result.type).toBe('no_command');
|
||||
});
|
||||
|
||||
it('should return message when command returns void', async () => {
|
||||
const mockCommand = {
|
||||
name: 'voidcmd',
|
||||
description: 'Command that returns void',
|
||||
kind: CommandKind.FILE,
|
||||
action: vi.fn().mockResolvedValue(undefined),
|
||||
};
|
||||
mockGetCommands.mockReturnValue([mockCommand]);
|
||||
|
||||
const result = await handleSlashCommand(
|
||||
'/voidcmd',
|
||||
abortController,
|
||||
mockConfig,
|
||||
mockSettings,
|
||||
);
|
||||
|
||||
expect(result.type).toBe('message');
|
||||
if (result.type === 'message') {
|
||||
expect(result.content).toBe('Command executed successfully.');
|
||||
expect(result.messageType).toBe('info');
|
||||
}
|
||||
});
|
||||
});
|
||||
@@ -7,6 +7,7 @@
|
||||
import type { PartListUnion } from '@google/genai';
|
||||
import { parseSlashCommand } from './utils/commands.js';
|
||||
import {
|
||||
FatalInputError,
|
||||
Logger,
|
||||
uiTelemetryService,
|
||||
type Config,
|
||||
@@ -18,164 +19,10 @@ import {
|
||||
CommandKind,
|
||||
type CommandContext,
|
||||
type SlashCommand,
|
||||
type SlashCommandActionReturn,
|
||||
} from './ui/commands/types.js';
|
||||
import { createNonInteractiveUI } from './ui/noninteractive/nonInteractiveUi.js';
|
||||
import type { LoadedSettings } from './config/settings.js';
|
||||
import type { SessionStatsState } from './ui/contexts/SessionContext.js';
|
||||
import { t } from './i18n/index.js';
|
||||
|
||||
/**
|
||||
* Built-in commands that are allowed in non-interactive modes (CLI and ACP).
|
||||
* Only safe, read-only commands that don't require interactive UI.
|
||||
*
|
||||
* These commands are:
|
||||
* - init: Initialize project configuration
|
||||
* - summary: Generate session summary
|
||||
* - compress: Compress conversation history
|
||||
*/
|
||||
export const ALLOWED_BUILTIN_COMMANDS_NON_INTERACTIVE = [
|
||||
'init',
|
||||
'summary',
|
||||
'compress',
|
||||
] as const;
|
||||
|
||||
/**
|
||||
* Result of handling a slash command in non-interactive mode.
|
||||
*
|
||||
* Supported types:
|
||||
* - 'submit_prompt': Submits content to the model (supports all modes)
|
||||
* - 'message': Returns a single message (supports non-interactive JSON/text only)
|
||||
* - 'stream_messages': Streams multiple messages (supports ACP only)
|
||||
* - 'unsupported': Command cannot be executed in this mode
|
||||
* - 'no_command': No command was found or executed
|
||||
*/
|
||||
export type NonInteractiveSlashCommandResult =
|
||||
| {
|
||||
type: 'submit_prompt';
|
||||
content: PartListUnion;
|
||||
}
|
||||
| {
|
||||
type: 'message';
|
||||
messageType: 'info' | 'error';
|
||||
content: string;
|
||||
}
|
||||
| {
|
||||
type: 'stream_messages';
|
||||
messages: AsyncGenerator<
|
||||
{ messageType: 'info' | 'error'; content: string },
|
||||
void,
|
||||
unknown
|
||||
>;
|
||||
}
|
||||
| {
|
||||
type: 'unsupported';
|
||||
reason: string;
|
||||
originalType: string;
|
||||
}
|
||||
| {
|
||||
type: 'no_command';
|
||||
};
|
||||
|
||||
/**
|
||||
* Converts a SlashCommandActionReturn to a NonInteractiveSlashCommandResult.
|
||||
*
|
||||
* Only the following result types are supported in non-interactive mode:
|
||||
* - submit_prompt: Submits content to the model (all modes)
|
||||
* - message: Returns a single message (non-interactive JSON/text only)
|
||||
* - stream_messages: Streams multiple messages (ACP only)
|
||||
*
|
||||
* All other result types are converted to 'unsupported'.
|
||||
*
|
||||
* @param result The result from executing a slash command action
|
||||
* @returns A NonInteractiveSlashCommandResult describing the outcome
|
||||
*/
|
||||
function handleCommandResult(
|
||||
result: SlashCommandActionReturn,
|
||||
): NonInteractiveSlashCommandResult {
|
||||
switch (result.type) {
|
||||
case 'submit_prompt':
|
||||
return {
|
||||
type: 'submit_prompt',
|
||||
content: result.content,
|
||||
};
|
||||
|
||||
case 'message':
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: result.messageType,
|
||||
content: result.content,
|
||||
};
|
||||
|
||||
case 'stream_messages':
|
||||
return {
|
||||
type: 'stream_messages',
|
||||
messages: result.messages,
|
||||
};
|
||||
|
||||
/**
|
||||
* Currently return types below are never generated due to the
|
||||
* whitelist of allowed slash commands in ACP and non-interactive mode.
|
||||
* We'll try to add more supported return types in the future.
|
||||
*/
|
||||
case 'tool':
|
||||
return {
|
||||
type: 'unsupported',
|
||||
reason:
|
||||
'Tool execution from slash commands is not supported in non-interactive mode.',
|
||||
originalType: 'tool',
|
||||
};
|
||||
|
||||
case 'quit':
|
||||
return {
|
||||
type: 'unsupported',
|
||||
reason:
|
||||
'Quit command is not supported in non-interactive mode. The process will exit naturally after completion.',
|
||||
originalType: 'quit',
|
||||
};
|
||||
|
||||
case 'dialog':
|
||||
return {
|
||||
type: 'unsupported',
|
||||
reason: `Dialog '${result.dialog}' cannot be opened in non-interactive mode.`,
|
||||
originalType: 'dialog',
|
||||
};
|
||||
|
||||
case 'load_history':
|
||||
return {
|
||||
type: 'unsupported',
|
||||
reason:
|
||||
'Loading history is not supported in non-interactive mode. Each invocation starts with a fresh context.',
|
||||
originalType: 'load_history',
|
||||
};
|
||||
|
||||
case 'confirm_shell_commands':
|
||||
return {
|
||||
type: 'unsupported',
|
||||
reason:
|
||||
'Shell command confirmation is not supported in non-interactive mode. Use YOLO mode or pre-approve commands.',
|
||||
originalType: 'confirm_shell_commands',
|
||||
};
|
||||
|
||||
case 'confirm_action':
|
||||
return {
|
||||
type: 'unsupported',
|
||||
reason:
|
||||
'Action confirmation is not supported in non-interactive mode. Commands requiring confirmation cannot be executed.',
|
||||
originalType: 'confirm_action',
|
||||
};
|
||||
|
||||
default: {
|
||||
// Exhaustiveness check
|
||||
const _exhaustive: never = result;
|
||||
return {
|
||||
type: 'unsupported',
|
||||
reason: `Unknown command result type: ${(_exhaustive as SlashCommandActionReturn).type}`,
|
||||
originalType: 'unknown',
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Filters commands based on the allowed built-in command names.
|
||||
@@ -215,146 +62,122 @@ function filterCommandsForNonInteractive(
|
||||
* @param config The configuration object
|
||||
* @param settings The loaded settings
|
||||
* @param allowedBuiltinCommandNames Optional array of built-in command names that are
|
||||
* allowed. Defaults to ALLOWED_BUILTIN_COMMANDS_NON_INTERACTIVE (init, summary, compress).
|
||||
* Pass an empty array to only allow file commands.
|
||||
* @returns A Promise that resolves to a `NonInteractiveSlashCommandResult` describing
|
||||
* the outcome of the command execution.
|
||||
* allowed. If not provided or empty, only file commands are available.
|
||||
* @returns A Promise that resolves to `PartListUnion` if a valid command is
|
||||
* found and results in a prompt, or `undefined` otherwise.
|
||||
* @throws {FatalInputError} if the command result is not supported in
|
||||
* non-interactive mode.
|
||||
*/
|
||||
export const handleSlashCommand = async (
|
||||
rawQuery: string,
|
||||
abortController: AbortController,
|
||||
config: Config,
|
||||
settings: LoadedSettings,
|
||||
allowedBuiltinCommandNames: string[] = [
|
||||
...ALLOWED_BUILTIN_COMMANDS_NON_INTERACTIVE,
|
||||
],
|
||||
): Promise<NonInteractiveSlashCommandResult> => {
|
||||
allowedBuiltinCommandNames?: string[],
|
||||
): Promise<PartListUnion | undefined> => {
|
||||
const trimmed = rawQuery.trim();
|
||||
if (!trimmed.startsWith('/')) {
|
||||
return { type: 'no_command' };
|
||||
return;
|
||||
}
|
||||
|
||||
const isAcpMode = config.getExperimentalZedIntegration();
|
||||
const isInteractive = config.isInteractive();
|
||||
|
||||
const executionMode = isAcpMode
|
||||
? 'acp'
|
||||
: isInteractive
|
||||
? 'interactive'
|
||||
: 'non_interactive';
|
||||
|
||||
const allowedBuiltinSet = new Set(allowedBuiltinCommandNames ?? []);
|
||||
|
||||
// Load all commands to check if the command exists but is not allowed
|
||||
const allLoaders = [
|
||||
new BuiltinCommandLoader(config),
|
||||
new FileCommandLoader(config),
|
||||
];
|
||||
// Only load BuiltinCommandLoader if there are allowed built-in commands
|
||||
const loaders =
|
||||
allowedBuiltinSet.size > 0
|
||||
? [new BuiltinCommandLoader(config), new FileCommandLoader(config)]
|
||||
: [new FileCommandLoader(config)];
|
||||
|
||||
const commandService = await CommandService.create(
|
||||
allLoaders,
|
||||
loaders,
|
||||
abortController.signal,
|
||||
);
|
||||
const allCommands = commandService.getCommands();
|
||||
const commands = commandService.getCommands();
|
||||
const filteredCommands = filterCommandsForNonInteractive(
|
||||
allCommands,
|
||||
commands,
|
||||
allowedBuiltinSet,
|
||||
);
|
||||
|
||||
// First, try to parse with filtered commands
|
||||
const { commandToExecute, args } = parseSlashCommand(
|
||||
rawQuery,
|
||||
filteredCommands,
|
||||
);
|
||||
|
||||
if (!commandToExecute) {
|
||||
// Check if this is a known command that's just not allowed
|
||||
const { commandToExecute: knownCommand } = parseSlashCommand(
|
||||
rawQuery,
|
||||
allCommands,
|
||||
);
|
||||
|
||||
if (knownCommand) {
|
||||
// Command exists but is not allowed in non-interactive mode
|
||||
return {
|
||||
type: 'unsupported',
|
||||
reason: t(
|
||||
'The command "/{{command}}" is not supported in non-interactive mode.',
|
||||
{ command: knownCommand.name },
|
||||
),
|
||||
originalType: 'filtered_command',
|
||||
if (commandToExecute) {
|
||||
if (commandToExecute.action) {
|
||||
// Not used by custom commands but may be in the future.
|
||||
const sessionStats: SessionStatsState = {
|
||||
sessionId: config?.getSessionId(),
|
||||
sessionStartTime: new Date(),
|
||||
metrics: uiTelemetryService.getMetrics(),
|
||||
lastPromptTokenCount: 0,
|
||||
promptCount: 1,
|
||||
};
|
||||
|
||||
const logger = new Logger(config?.getSessionId() || '', config?.storage);
|
||||
|
||||
const context: CommandContext = {
|
||||
services: {
|
||||
config,
|
||||
settings,
|
||||
git: undefined,
|
||||
logger,
|
||||
},
|
||||
ui: createNonInteractiveUI(),
|
||||
session: {
|
||||
stats: sessionStats,
|
||||
sessionShellAllowlist: new Set(),
|
||||
},
|
||||
invocation: {
|
||||
raw: trimmed,
|
||||
name: commandToExecute.name,
|
||||
args,
|
||||
},
|
||||
};
|
||||
|
||||
const result = await commandToExecute.action(context, args);
|
||||
|
||||
if (result) {
|
||||
switch (result.type) {
|
||||
case 'submit_prompt':
|
||||
return result.content;
|
||||
case 'confirm_shell_commands':
|
||||
// This result indicates a command attempted to confirm shell commands.
|
||||
// However note that currently, ShellTool is excluded in non-interactive
|
||||
// mode unless 'YOLO mode' is active, so confirmation actually won't
|
||||
// occur because of YOLO mode.
|
||||
// This ensures that if a command *does* request confirmation (e.g.
|
||||
// in the future with more granular permissions), it's handled appropriately.
|
||||
throw new FatalInputError(
|
||||
'Exiting due to a confirmation prompt requested by the command.',
|
||||
);
|
||||
default:
|
||||
throw new FatalInputError(
|
||||
'Exiting due to command result that is not supported in non-interactive mode.',
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { type: 'no_command' };
|
||||
}
|
||||
|
||||
if (!commandToExecute.action) {
|
||||
return { type: 'no_command' };
|
||||
}
|
||||
|
||||
// Not used by custom commands but may be in the future.
|
||||
const sessionStats: SessionStatsState = {
|
||||
sessionId: config?.getSessionId(),
|
||||
sessionStartTime: new Date(),
|
||||
metrics: uiTelemetryService.getMetrics(),
|
||||
lastPromptTokenCount: 0,
|
||||
promptCount: 1,
|
||||
};
|
||||
|
||||
const logger = new Logger(config?.getSessionId() || '', config?.storage);
|
||||
|
||||
const context: CommandContext = {
|
||||
executionMode,
|
||||
services: {
|
||||
config,
|
||||
settings,
|
||||
git: undefined,
|
||||
logger,
|
||||
},
|
||||
ui: createNonInteractiveUI(),
|
||||
session: {
|
||||
stats: sessionStats,
|
||||
sessionShellAllowlist: new Set(),
|
||||
},
|
||||
invocation: {
|
||||
raw: trimmed,
|
||||
name: commandToExecute.name,
|
||||
args,
|
||||
},
|
||||
};
|
||||
|
||||
const result = await commandToExecute.action(context, args);
|
||||
|
||||
if (!result) {
|
||||
// Command executed but returned no result (e.g., void return)
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
content: 'Command executed successfully.',
|
||||
};
|
||||
}
|
||||
|
||||
// Handle different result types
|
||||
return handleCommandResult(result);
|
||||
return;
|
||||
};
|
||||
|
||||
/**
|
||||
* Retrieves all available slash commands for the current configuration.
|
||||
*
|
||||
* @param config The configuration object
|
||||
* @param settings The loaded settings
|
||||
* @param abortSignal Signal to cancel the loading process
|
||||
* @param allowedBuiltinCommandNames Optional array of built-in command names that are
|
||||
* allowed. Defaults to ALLOWED_BUILTIN_COMMANDS_NON_INTERACTIVE (init, summary, compress).
|
||||
* Pass an empty array to only include file commands.
|
||||
* allowed. If not provided or empty, only file commands are available.
|
||||
* @returns A Promise that resolves to an array of SlashCommand objects
|
||||
*/
|
||||
export const getAvailableCommands = async (
|
||||
config: Config,
|
||||
settings: LoadedSettings,
|
||||
abortSignal: AbortSignal,
|
||||
allowedBuiltinCommandNames: string[] = [
|
||||
...ALLOWED_BUILTIN_COMMANDS_NON_INTERACTIVE,
|
||||
],
|
||||
allowedBuiltinCommandNames?: string[],
|
||||
): Promise<SlashCommand[]> => {
|
||||
try {
|
||||
const allowedBuiltinSet = new Set(allowedBuiltinCommandNames ?? []);
|
||||
|
||||
@@ -228,7 +228,6 @@ export const useAuthCommand = (
|
||||
![
|
||||
AuthType.QWEN_OAUTH,
|
||||
AuthType.USE_OPENAI,
|
||||
AuthType.USE_ANTHROPIC,
|
||||
AuthType.USE_GEMINI,
|
||||
AuthType.USE_VERTEX_AI,
|
||||
].includes(defaultAuthType as AuthType)
|
||||
@@ -241,7 +240,6 @@ export const useAuthCommand = (
|
||||
validValues: [
|
||||
AuthType.QWEN_OAUTH,
|
||||
AuthType.USE_OPENAI,
|
||||
AuthType.USE_ANTHROPIC,
|
||||
AuthType.USE_GEMINI,
|
||||
AuthType.USE_VERTEX_AI,
|
||||
].join(', '),
|
||||
|
||||
@@ -19,9 +19,7 @@ export const compressCommand: SlashCommand = {
|
||||
kind: CommandKind.BUILT_IN,
|
||||
action: async (context) => {
|
||||
const { ui } = context;
|
||||
const executionMode = context.executionMode ?? 'interactive';
|
||||
|
||||
if (executionMode === 'interactive' && ui.pendingItem) {
|
||||
if (ui.pendingItem) {
|
||||
ui.addItem(
|
||||
{
|
||||
type: MessageType.ERROR,
|
||||
@@ -42,80 +40,13 @@ export const compressCommand: SlashCommand = {
|
||||
},
|
||||
};
|
||||
|
||||
const config = context.services.config;
|
||||
const geminiClient = config?.getGeminiClient();
|
||||
if (!config || !geminiClient) {
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content: t('Config not loaded.'),
|
||||
};
|
||||
}
|
||||
|
||||
const doCompress = async () => {
|
||||
const promptId = `compress-${Date.now()}`;
|
||||
return await geminiClient.tryCompressChat(promptId, true);
|
||||
};
|
||||
|
||||
if (executionMode === 'acp') {
|
||||
const messages = async function* () {
|
||||
try {
|
||||
yield {
|
||||
messageType: 'info' as const,
|
||||
content: 'Compressing context...',
|
||||
};
|
||||
const compressed = await doCompress();
|
||||
if (!compressed) {
|
||||
yield {
|
||||
messageType: 'error' as const,
|
||||
content: t('Failed to compress chat history.'),
|
||||
};
|
||||
return;
|
||||
}
|
||||
yield {
|
||||
messageType: 'info' as const,
|
||||
content: `Context compressed (${compressed.originalTokenCount} -> ${compressed.newTokenCount}).`,
|
||||
};
|
||||
} catch (e) {
|
||||
yield {
|
||||
messageType: 'error' as const,
|
||||
content: t('Failed to compress chat history: {{error}}', {
|
||||
error: e instanceof Error ? e.message : String(e),
|
||||
}),
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
return { type: 'stream_messages', messages: messages() };
|
||||
}
|
||||
|
||||
try {
|
||||
if (executionMode === 'interactive') {
|
||||
ui.setPendingItem(pendingMessage);
|
||||
}
|
||||
|
||||
const compressed = await doCompress();
|
||||
|
||||
if (!compressed) {
|
||||
if (executionMode === 'interactive') {
|
||||
ui.addItem(
|
||||
{
|
||||
type: MessageType.ERROR,
|
||||
text: t('Failed to compress chat history.'),
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content: t('Failed to compress chat history.'),
|
||||
};
|
||||
}
|
||||
|
||||
if (executionMode === 'interactive') {
|
||||
ui.setPendingItem(pendingMessage);
|
||||
const promptId = `compress-${Date.now()}`;
|
||||
const compressed = await context.services.config
|
||||
?.getGeminiClient()
|
||||
?.tryCompressChat(promptId, true);
|
||||
if (compressed) {
|
||||
ui.addItem(
|
||||
{
|
||||
type: MessageType.COMPRESSION,
|
||||
@@ -128,39 +59,27 @@ export const compressCommand: SlashCommand = {
|
||||
} as HistoryItemCompression,
|
||||
Date.now(),
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
content: `Context compressed (${compressed.originalTokenCount} -> ${compressed.newTokenCount}).`,
|
||||
};
|
||||
} catch (e) {
|
||||
if (executionMode === 'interactive') {
|
||||
} else {
|
||||
ui.addItem(
|
||||
{
|
||||
type: MessageType.ERROR,
|
||||
text: t('Failed to compress chat history: {{error}}', {
|
||||
error: e instanceof Error ? e.message : String(e),
|
||||
}),
|
||||
text: t('Failed to compress chat history.'),
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content: t('Failed to compress chat history: {{error}}', {
|
||||
error: e instanceof Error ? e.message : String(e),
|
||||
}),
|
||||
};
|
||||
} catch (e) {
|
||||
ui.addItem(
|
||||
{
|
||||
type: MessageType.ERROR,
|
||||
text: t('Failed to compress chat history: {{error}}', {
|
||||
error: e instanceof Error ? e.message : String(e),
|
||||
}),
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
} finally {
|
||||
if (executionMode === 'interactive') {
|
||||
ui.setPendingItem(null);
|
||||
}
|
||||
ui.setPendingItem(null);
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
@@ -26,8 +26,6 @@ export const summaryCommand: SlashCommand = {
|
||||
action: async (context): Promise<SlashCommandActionReturn> => {
|
||||
const { config } = context.services;
|
||||
const { ui } = context;
|
||||
const executionMode = context.executionMode ?? 'interactive';
|
||||
|
||||
if (!config) {
|
||||
return {
|
||||
type: 'message',
|
||||
@@ -45,8 +43,8 @@ export const summaryCommand: SlashCommand = {
|
||||
};
|
||||
}
|
||||
|
||||
// Check if already generating summary (interactive UI only)
|
||||
if (executionMode === 'interactive' && ui.pendingItem) {
|
||||
// Check if already generating summary
|
||||
if (ui.pendingItem) {
|
||||
ui.addItem(
|
||||
{
|
||||
type: 'error' as const,
|
||||
@@ -65,22 +63,29 @@ export const summaryCommand: SlashCommand = {
|
||||
};
|
||||
}
|
||||
|
||||
const getChatHistory = () => {
|
||||
try {
|
||||
// Get the current chat history
|
||||
const chat = geminiClient.getChat();
|
||||
return chat.getHistory();
|
||||
};
|
||||
const history = chat.getHistory();
|
||||
|
||||
const validateChatHistory = (
|
||||
history: ReturnType<typeof getChatHistory>,
|
||||
) => {
|
||||
if (history.length <= 2) {
|
||||
throw new Error(t('No conversation found to summarize.'));
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
content: t('No conversation found to summarize.'),
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
const generateSummaryMarkdown = async (
|
||||
history: ReturnType<typeof getChatHistory>,
|
||||
): Promise<string> => {
|
||||
// Show loading state
|
||||
const pendingMessage: HistoryItemSummary = {
|
||||
type: 'summary',
|
||||
summary: {
|
||||
isPending: true,
|
||||
stage: 'generating',
|
||||
},
|
||||
};
|
||||
ui.setPendingItem(pendingMessage);
|
||||
|
||||
// Build the conversation context for summary generation
|
||||
const conversationContext = history.map((message) => ({
|
||||
role: message.role,
|
||||
@@ -116,21 +121,19 @@ export const summaryCommand: SlashCommand = {
|
||||
|
||||
if (!markdownSummary) {
|
||||
throw new Error(
|
||||
t(
|
||||
'Failed to generate summary - no text content received from LLM response',
|
||||
),
|
||||
'Failed to generate summary - no text content received from LLM response',
|
||||
);
|
||||
}
|
||||
|
||||
return markdownSummary;
|
||||
};
|
||||
// Update loading message to show saving progress
|
||||
ui.setPendingItem({
|
||||
type: 'summary',
|
||||
summary: {
|
||||
isPending: true,
|
||||
stage: 'saving',
|
||||
},
|
||||
});
|
||||
|
||||
const saveSummaryToDisk = async (
|
||||
markdownSummary: string,
|
||||
): Promise<{
|
||||
filePathForDisplay: string;
|
||||
fullPath: string;
|
||||
}> => {
|
||||
// Ensure .qwen directory exists
|
||||
const projectRoot = config.getProjectRoot();
|
||||
const qwenDir = path.join(projectRoot, '.qwen');
|
||||
@@ -152,163 +155,45 @@ export const summaryCommand: SlashCommand = {
|
||||
|
||||
await fsPromises.writeFile(summaryPath, summaryContent, 'utf8');
|
||||
|
||||
return {
|
||||
filePathForDisplay: '.qwen/PROJECT_SUMMARY.md',
|
||||
fullPath: summaryPath,
|
||||
};
|
||||
};
|
||||
|
||||
const emitInteractivePending = (stage: 'generating' | 'saving') => {
|
||||
if (executionMode !== 'interactive') {
|
||||
return;
|
||||
}
|
||||
const pendingMessage: HistoryItemSummary = {
|
||||
type: 'summary',
|
||||
summary: {
|
||||
isPending: true,
|
||||
stage,
|
||||
},
|
||||
};
|
||||
ui.setPendingItem(pendingMessage);
|
||||
};
|
||||
|
||||
const completeInteractive = (filePathForDisplay: string) => {
|
||||
if (executionMode !== 'interactive') {
|
||||
return;
|
||||
}
|
||||
// Clear pending item and show success message
|
||||
ui.setPendingItem(null);
|
||||
const completedSummaryItem: HistoryItemSummary = {
|
||||
type: 'summary',
|
||||
summary: {
|
||||
isPending: false,
|
||||
stage: 'completed',
|
||||
filePath: filePathForDisplay,
|
||||
filePath: '.qwen/PROJECT_SUMMARY.md',
|
||||
},
|
||||
};
|
||||
ui.addItem(completedSummaryItem, Date.now());
|
||||
};
|
||||
|
||||
const formatErrorMessage = (error: unknown): string =>
|
||||
t('Failed to generate project context summary: {{error}}', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
});
|
||||
|
||||
const failInteractive = (error: unknown) => {
|
||||
if (executionMode !== 'interactive') {
|
||||
return;
|
||||
}
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
content: '', // Empty content since we show the message in UI component
|
||||
};
|
||||
} catch (error) {
|
||||
// Clear pending item on error
|
||||
ui.setPendingItem(null);
|
||||
ui.addItem(
|
||||
{
|
||||
type: 'error' as const,
|
||||
text: `❌ ${formatErrorMessage(error)}`,
|
||||
text: `❌ ${t(
|
||||
'Failed to generate project context summary: {{error}}',
|
||||
{
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
},
|
||||
)}`,
|
||||
},
|
||||
Date.now(),
|
||||
);
|
||||
};
|
||||
|
||||
const formatSuccessMessage = (filePathForDisplay: string): string =>
|
||||
t('Saved project summary to {{filePathForDisplay}}.', {
|
||||
filePathForDisplay,
|
||||
});
|
||||
|
||||
const returnNoConversationMessage = (): SlashCommandActionReturn => {
|
||||
const msg = t('No conversation found to summarize.');
|
||||
if (executionMode === 'acp') {
|
||||
const messages = async function* () {
|
||||
yield {
|
||||
messageType: 'info' as const,
|
||||
content: msg,
|
||||
};
|
||||
};
|
||||
return {
|
||||
type: 'stream_messages',
|
||||
messages: messages(),
|
||||
};
|
||||
}
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
content: msg,
|
||||
};
|
||||
};
|
||||
|
||||
const executeSummaryGeneration = async (
|
||||
history: ReturnType<typeof getChatHistory>,
|
||||
): Promise<{
|
||||
markdownSummary: string;
|
||||
filePathForDisplay: string;
|
||||
}> => {
|
||||
emitInteractivePending('generating');
|
||||
const markdownSummary = await generateSummaryMarkdown(history);
|
||||
emitInteractivePending('saving');
|
||||
const { filePathForDisplay } = await saveSummaryToDisk(markdownSummary);
|
||||
completeInteractive(filePathForDisplay);
|
||||
return { markdownSummary, filePathForDisplay };
|
||||
};
|
||||
|
||||
// Validate chat history once at the beginning
|
||||
const history = getChatHistory();
|
||||
try {
|
||||
validateChatHistory(history);
|
||||
} catch (_error) {
|
||||
return returnNoConversationMessage();
|
||||
}
|
||||
|
||||
if (executionMode === 'acp') {
|
||||
const messages = async function* () {
|
||||
try {
|
||||
yield {
|
||||
messageType: 'info' as const,
|
||||
content: t('Generating project summary...'),
|
||||
};
|
||||
|
||||
const { filePathForDisplay } =
|
||||
await executeSummaryGeneration(history);
|
||||
|
||||
yield {
|
||||
messageType: 'info' as const,
|
||||
content: formatSuccessMessage(filePathForDisplay),
|
||||
};
|
||||
} catch (error) {
|
||||
failInteractive(error);
|
||||
yield {
|
||||
messageType: 'error' as const,
|
||||
content: formatErrorMessage(error),
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
return {
|
||||
type: 'stream_messages',
|
||||
messages: messages(),
|
||||
};
|
||||
}
|
||||
|
||||
try {
|
||||
const { filePathForDisplay } = await executeSummaryGeneration(history);
|
||||
|
||||
if (executionMode === 'non_interactive') {
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
content: formatSuccessMessage(filePathForDisplay),
|
||||
};
|
||||
}
|
||||
|
||||
// Interactive mode: UI components already display progress and completion.
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'info',
|
||||
content: '',
|
||||
};
|
||||
} catch (error) {
|
||||
failInteractive(error);
|
||||
|
||||
return {
|
||||
type: 'message',
|
||||
messageType: 'error',
|
||||
content: formatErrorMessage(error),
|
||||
content: t('Failed to generate project context summary: {{error}}', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
}),
|
||||
};
|
||||
}
|
||||
},
|
||||
|
||||
@@ -22,14 +22,6 @@ import type {
|
||||
|
||||
// Grouped dependencies for clarity and easier mocking
|
||||
export interface CommandContext {
|
||||
/**
|
||||
* Execution mode for the current invocation.
|
||||
*
|
||||
* - interactive: React/Ink UI mode
|
||||
* - non_interactive: non-interactive CLI mode (text/json)
|
||||
* - acp: ACP/Zed integration mode
|
||||
*/
|
||||
executionMode?: 'interactive' | 'non_interactive' | 'acp';
|
||||
// Invocation properties for when commands are called.
|
||||
invocation?: {
|
||||
/** The raw, untrimmed input string from the user. */
|
||||
@@ -116,19 +108,6 @@ export interface MessageActionReturn {
|
||||
content: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* The return type for a command action that streams multiple messages.
|
||||
* Used for long-running operations that need to send progress updates.
|
||||
*/
|
||||
export interface StreamMessagesActionReturn {
|
||||
type: 'stream_messages';
|
||||
messages: AsyncGenerator<
|
||||
{ messageType: 'info' | 'error'; content: string },
|
||||
void,
|
||||
unknown
|
||||
>;
|
||||
}
|
||||
|
||||
/**
|
||||
* The return type for a command action that needs to open a dialog.
|
||||
*/
|
||||
@@ -195,7 +174,6 @@ export interface ConfirmActionReturn {
|
||||
export type SlashCommandActionReturn =
|
||||
| ToolActionReturn
|
||||
| MessageActionReturn
|
||||
| StreamMessagesActionReturn
|
||||
| QuitActionReturn
|
||||
| OpenDialogActionReturn
|
||||
| LoadHistoryActionReturn
|
||||
|
||||
@@ -520,13 +520,6 @@ export const useSlashCommandProcessor = (
|
||||
true,
|
||||
);
|
||||
}
|
||||
case 'stream_messages': {
|
||||
// stream_messages is only used in ACP/Zed integration mode
|
||||
// and should not be returned in interactive UI mode
|
||||
throw new Error(
|
||||
'stream_messages result type is not supported in interactive mode',
|
||||
);
|
||||
}
|
||||
default: {
|
||||
const unhandled: never = result;
|
||||
throw new Error(
|
||||
|
||||
@@ -526,15 +526,10 @@ export const useGeminiStream = (
|
||||
return currentThoughtBuffer;
|
||||
}
|
||||
|
||||
let newThoughtBuffer = currentThoughtBuffer + thoughtText;
|
||||
|
||||
const pendingType = pendingHistoryItemRef.current?.type;
|
||||
const isPendingThought =
|
||||
pendingType === 'gemini_thought' ||
|
||||
pendingType === 'gemini_thought_content';
|
||||
const newThoughtBuffer = currentThoughtBuffer + thoughtText;
|
||||
|
||||
// If we're not already showing a thought, start a new one
|
||||
if (!isPendingThought) {
|
||||
if (pendingHistoryItemRef.current?.type !== 'gemini_thought') {
|
||||
// If there's a pending non-thought item, finalize it first
|
||||
if (pendingHistoryItemRef.current) {
|
||||
addItem(pendingHistoryItemRef.current, userMessageTimestamp);
|
||||
@@ -542,37 +537,11 @@ export const useGeminiStream = (
|
||||
setPendingHistoryItem({ type: 'gemini_thought', text: '' });
|
||||
}
|
||||
|
||||
// Split large thought messages for better rendering performance (same rationale
|
||||
// as regular content streaming). This helps avoid terminal flicker caused by
|
||||
// constantly re-rendering an ever-growing "pending" block.
|
||||
const splitPoint = findLastSafeSplitPoint(newThoughtBuffer);
|
||||
const nextPendingType: 'gemini_thought' | 'gemini_thought_content' =
|
||||
isPendingThought && pendingType === 'gemini_thought_content'
|
||||
? 'gemini_thought_content'
|
||||
: 'gemini_thought';
|
||||
|
||||
if (splitPoint === newThoughtBuffer.length) {
|
||||
// Update the existing thought message with accumulated content
|
||||
setPendingHistoryItem({
|
||||
type: nextPendingType,
|
||||
text: newThoughtBuffer,
|
||||
});
|
||||
} else {
|
||||
const beforeText = newThoughtBuffer.substring(0, splitPoint);
|
||||
const afterText = newThoughtBuffer.substring(splitPoint);
|
||||
addItem(
|
||||
{
|
||||
type: nextPendingType,
|
||||
text: beforeText,
|
||||
},
|
||||
userMessageTimestamp,
|
||||
);
|
||||
setPendingHistoryItem({
|
||||
type: 'gemini_thought_content',
|
||||
text: afterText,
|
||||
});
|
||||
newThoughtBuffer = afterText;
|
||||
}
|
||||
// Update the existing thought message with accumulated content
|
||||
setPendingHistoryItem({
|
||||
type: 'gemini_thought',
|
||||
text: newThoughtBuffer,
|
||||
});
|
||||
|
||||
// Also update the thought state for the loading indicator
|
||||
mergeThought(eventValue);
|
||||
|
||||
@@ -60,11 +60,6 @@ export function getOpenAIAvailableModelFromEnv(): AvailableModel | null {
|
||||
return id ? { id, label: id } : null;
|
||||
}
|
||||
|
||||
export function getAnthropicAvailableModelFromEnv(): AvailableModel | null {
|
||||
const id = process.env['ANTHROPIC_MODEL']?.trim();
|
||||
return id ? { id, label: id } : null;
|
||||
}
|
||||
|
||||
export function getAvailableModelsForAuthType(
|
||||
authType: AuthType,
|
||||
): AvailableModel[] {
|
||||
@@ -75,10 +70,6 @@ export function getAvailableModelsForAuthType(
|
||||
const openAIModel = getOpenAIAvailableModelFromEnv();
|
||||
return openAIModel ? [openAIModel] : [];
|
||||
}
|
||||
case AuthType.USE_ANTHROPIC: {
|
||||
const anthropicModel = getAnthropicAvailableModelFromEnv();
|
||||
return anthropicModel ? [anthropicModel] : [];
|
||||
}
|
||||
default:
|
||||
// For other auth types, return empty array for now
|
||||
// This can be expanded later according to the design doc
|
||||
|
||||
@@ -20,11 +20,6 @@ const makeConfig = (tools: Record<string, AnyDeclarativeTool>) =>
|
||||
getToolRegistry: () => ({
|
||||
getTool: (name: string) => tools[name],
|
||||
}),
|
||||
getContentGenerator: () => ({
|
||||
// Default to showing full thinking content during resume unless explicitly
|
||||
// summarized; tests don't care about summarized thinking behavior.
|
||||
useSummarizedThinking: () => false,
|
||||
}),
|
||||
}) as unknown as Config;
|
||||
|
||||
describe('resumeHistoryUtils', () => {
|
||||
|
||||
@@ -204,11 +204,7 @@ function convertToHistoryItems(
|
||||
const parts = record.message?.parts as Part[] | undefined;
|
||||
|
||||
// Extract thought content
|
||||
const thoughtText = !config
|
||||
.getContentGenerator()
|
||||
.useSummarizedThinking()
|
||||
? extractThoughtTextFromParts(parts)
|
||||
: '';
|
||||
const thoughtText = extractThoughtTextFromParts(parts);
|
||||
|
||||
// Extract text content (non-function-call, non-thought)
|
||||
const text = extractTextFromParts(parts);
|
||||
|
||||
@@ -35,33 +35,22 @@ import {
|
||||
} from './nonInteractiveHelpers.js';
|
||||
|
||||
// Mock dependencies
|
||||
vi.mock('../nonInteractiveCliCommands.js', () => ({
|
||||
getAvailableCommands: vi
|
||||
.fn()
|
||||
.mockImplementation(
|
||||
async (
|
||||
_config: unknown,
|
||||
_signal: AbortSignal,
|
||||
allowedBuiltinCommandNames?: string[],
|
||||
) => {
|
||||
const allowedSet = new Set(allowedBuiltinCommandNames ?? []);
|
||||
const allCommands = [
|
||||
{ name: 'help', kind: 'built-in' },
|
||||
{ name: 'commit', kind: 'file' },
|
||||
{ name: 'memory', kind: 'built-in' },
|
||||
{ name: 'init', kind: 'built-in' },
|
||||
{ name: 'summary', kind: 'built-in' },
|
||||
{ name: 'compress', kind: 'built-in' },
|
||||
];
|
||||
vi.mock('../services/CommandService.js', () => ({
|
||||
CommandService: {
|
||||
create: vi.fn().mockResolvedValue({
|
||||
getCommands: vi
|
||||
.fn()
|
||||
.mockReturnValue([
|
||||
{ name: 'help' },
|
||||
{ name: 'commit' },
|
||||
{ name: 'memory' },
|
||||
]),
|
||||
}),
|
||||
},
|
||||
}));
|
||||
|
||||
// Filter commands: always include file commands, only include allowed built-in commands
|
||||
return allCommands.filter(
|
||||
(cmd) =>
|
||||
cmd.kind === 'file' ||
|
||||
(cmd.kind === 'built-in' && allowedSet.has(cmd.name)),
|
||||
);
|
||||
},
|
||||
),
|
||||
vi.mock('../services/BuiltinCommandLoader.js', () => ({
|
||||
BuiltinCommandLoader: vi.fn().mockImplementation(() => ({})),
|
||||
}));
|
||||
|
||||
vi.mock('../ui/utils/computeStats.js', () => ({
|
||||
@@ -522,12 +511,10 @@ describe('buildSystemMessage', () => {
|
||||
});
|
||||
|
||||
it('should build system message with all fields', async () => {
|
||||
const allowedBuiltinCommands = ['init', 'summary', 'compress'];
|
||||
const result = await buildSystemMessage(
|
||||
mockConfig,
|
||||
'test-session-id',
|
||||
'auto' as PermissionMode,
|
||||
allowedBuiltinCommands,
|
||||
);
|
||||
|
||||
expect(result).toEqual({
|
||||
@@ -543,7 +530,7 @@ describe('buildSystemMessage', () => {
|
||||
],
|
||||
model: 'test-model',
|
||||
permission_mode: 'auto',
|
||||
slash_commands: ['commit', 'compress', 'init', 'summary'],
|
||||
slash_commands: ['commit', 'help', 'memory'],
|
||||
qwen_code_version: '1.0.0',
|
||||
agents: [],
|
||||
});
|
||||
@@ -559,7 +546,6 @@ describe('buildSystemMessage', () => {
|
||||
config,
|
||||
'test-session-id',
|
||||
'auto' as PermissionMode,
|
||||
['init', 'summary'],
|
||||
);
|
||||
|
||||
expect(result.tools).toEqual([]);
|
||||
@@ -575,7 +561,6 @@ describe('buildSystemMessage', () => {
|
||||
config,
|
||||
'test-session-id',
|
||||
'auto' as PermissionMode,
|
||||
['init', 'summary'],
|
||||
);
|
||||
|
||||
expect(result.mcp_servers).toEqual([]);
|
||||
@@ -591,37 +576,10 @@ describe('buildSystemMessage', () => {
|
||||
config,
|
||||
'test-session-id',
|
||||
'auto' as PermissionMode,
|
||||
['init', 'summary'],
|
||||
);
|
||||
|
||||
expect(result.qwen_code_version).toBe('unknown');
|
||||
});
|
||||
|
||||
it('should only include allowed built-in commands and all file commands', async () => {
|
||||
const allowedBuiltinCommands = ['init', 'summary'];
|
||||
const result = await buildSystemMessage(
|
||||
mockConfig,
|
||||
'test-session-id',
|
||||
'auto' as PermissionMode,
|
||||
allowedBuiltinCommands,
|
||||
);
|
||||
|
||||
// Should include: 'commit' (FILE), 'init' (BUILT_IN, allowed), 'summary' (BUILT_IN, allowed)
|
||||
// Should NOT include: 'help', 'memory', 'compress' (BUILT_IN but not in allowed set)
|
||||
expect(result.slash_commands).toEqual(['commit', 'init', 'summary']);
|
||||
});
|
||||
|
||||
it('should include only file commands when no built-in commands are allowed', async () => {
|
||||
const result = await buildSystemMessage(
|
||||
mockConfig,
|
||||
'test-session-id',
|
||||
'auto' as PermissionMode,
|
||||
[], // Empty array - no built-in commands allowed
|
||||
);
|
||||
|
||||
// Should only include 'commit' (FILE command)
|
||||
expect(result.slash_commands).toEqual(['commit']);
|
||||
});
|
||||
});
|
||||
|
||||
describe('createTaskToolProgressHandler', () => {
|
||||
|
||||
@@ -25,9 +25,10 @@ import type {
|
||||
PermissionMode,
|
||||
CLISystemMessage,
|
||||
} from '../nonInteractive/types.js';
|
||||
import { CommandService } from '../services/CommandService.js';
|
||||
import { BuiltinCommandLoader } from '../services/BuiltinCommandLoader.js';
|
||||
import type { JsonOutputAdapterInterface } from '../nonInteractive/io/BaseJsonOutputAdapter.js';
|
||||
import { computeSessionStats } from '../ui/utils/computeStats.js';
|
||||
import { getAvailableCommands } from '../nonInteractiveCliCommands.js';
|
||||
|
||||
/**
|
||||
* Normalizes various part list formats into a consistent Part[] array.
|
||||
@@ -186,27 +187,24 @@ export function computeUsageFromMetrics(metrics: SessionMetrics): Usage {
|
||||
}
|
||||
|
||||
/**
|
||||
* Load slash command names using getAvailableCommands
|
||||
* Load slash command names using CommandService
|
||||
*
|
||||
* @param config - Config instance
|
||||
* @param allowedBuiltinCommandNames - Optional array of allowed built-in command names.
|
||||
* If not provided, uses the default from getAvailableCommands.
|
||||
* @returns Promise resolving to array of slash command names
|
||||
*/
|
||||
async function loadSlashCommandNames(
|
||||
config: Config,
|
||||
allowedBuiltinCommandNames?: string[],
|
||||
): Promise<string[]> {
|
||||
async function loadSlashCommandNames(config: Config): Promise<string[]> {
|
||||
const controller = new AbortController();
|
||||
try {
|
||||
const commands = await getAvailableCommands(
|
||||
config,
|
||||
const service = await CommandService.create(
|
||||
[new BuiltinCommandLoader(config)],
|
||||
controller.signal,
|
||||
allowedBuiltinCommandNames,
|
||||
);
|
||||
|
||||
// Extract command names and sort
|
||||
return commands.map((cmd) => cmd.name).sort();
|
||||
const names = new Set<string>();
|
||||
const commands = service.getCommands();
|
||||
for (const command of commands) {
|
||||
names.add(command.name);
|
||||
}
|
||||
return Array.from(names).sort();
|
||||
} catch (error) {
|
||||
if (config.getDebugMode()) {
|
||||
console.error(
|
||||
@@ -235,15 +233,12 @@ async function loadSlashCommandNames(
|
||||
* @param config - Config instance
|
||||
* @param sessionId - Session identifier
|
||||
* @param permissionMode - Current permission/approval mode
|
||||
* @param allowedBuiltinCommandNames - Optional array of allowed built-in command names.
|
||||
* If not provided, defaults to empty array (only file commands will be included).
|
||||
* @returns Promise resolving to CLISystemMessage
|
||||
*/
|
||||
export async function buildSystemMessage(
|
||||
config: Config,
|
||||
sessionId: string,
|
||||
permissionMode: PermissionMode,
|
||||
allowedBuiltinCommandNames?: string[],
|
||||
): Promise<CLISystemMessage> {
|
||||
const toolRegistry = config.getToolRegistry();
|
||||
const tools = toolRegistry ? toolRegistry.getAllToolNames() : [];
|
||||
@@ -256,11 +251,8 @@ export async function buildSystemMessage(
|
||||
}))
|
||||
: [];
|
||||
|
||||
// Load slash commands with filtering based on allowed built-in commands
|
||||
const slashCommands = await loadSlashCommandNames(
|
||||
config,
|
||||
allowedBuiltinCommandNames,
|
||||
);
|
||||
// Load slash commands
|
||||
const slashCommands = await loadSlashCommandNames(config);
|
||||
|
||||
// Load subagent names from config
|
||||
let agentNames: string[] = [];
|
||||
|
||||
@@ -153,8 +153,7 @@ export async function getExtendedSystemInfo(
|
||||
|
||||
// Get base URL if using OpenAI auth
|
||||
const baseUrl =
|
||||
baseInfo.selectedAuthType === AuthType.USE_OPENAI ||
|
||||
baseInfo.selectedAuthType === AuthType.USE_ANTHROPIC
|
||||
baseInfo.selectedAuthType === AuthType.USE_OPENAI
|
||||
? context.services.config?.getContentGeneratorConfig()?.baseUrl
|
||||
: undefined;
|
||||
|
||||
|
||||
@@ -19,9 +19,6 @@ describe('validateNonInterActiveAuth', () => {
|
||||
let originalEnvVertexAi: string | undefined;
|
||||
let originalEnvGcp: string | undefined;
|
||||
let originalEnvOpenAiApiKey: string | undefined;
|
||||
let originalEnvQwenOauth: string | undefined;
|
||||
let originalEnvGoogleApiKey: string | undefined;
|
||||
let originalEnvAnthropicApiKey: string | undefined;
|
||||
let consoleErrorSpy: ReturnType<typeof vi.spyOn>;
|
||||
let processExitSpy: ReturnType<typeof vi.spyOn<[code?: number], never>>;
|
||||
let refreshAuthMock: ReturnType<typeof vi.fn>;
|
||||
@@ -32,16 +29,10 @@ describe('validateNonInterActiveAuth', () => {
|
||||
originalEnvVertexAi = process.env['GOOGLE_GENAI_USE_VERTEXAI'];
|
||||
originalEnvGcp = process.env['GOOGLE_GENAI_USE_GCA'];
|
||||
originalEnvOpenAiApiKey = process.env['OPENAI_API_KEY'];
|
||||
originalEnvQwenOauth = process.env['QWEN_OAUTH'];
|
||||
originalEnvGoogleApiKey = process.env['GOOGLE_API_KEY'];
|
||||
originalEnvAnthropicApiKey = process.env['ANTHROPIC_API_KEY'];
|
||||
delete process.env['GEMINI_API_KEY'];
|
||||
delete process.env['GOOGLE_GENAI_USE_VERTEXAI'];
|
||||
delete process.env['GOOGLE_GENAI_USE_GCA'];
|
||||
delete process.env['OPENAI_API_KEY'];
|
||||
delete process.env['QWEN_OAUTH'];
|
||||
delete process.env['GOOGLE_API_KEY'];
|
||||
delete process.env['ANTHROPIC_API_KEY'];
|
||||
consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {});
|
||||
processExitSpy = vi.spyOn(process, 'exit').mockImplementation((code) => {
|
||||
throw new Error(`process.exit(${code}) called`);
|
||||
@@ -89,21 +80,6 @@ describe('validateNonInterActiveAuth', () => {
|
||||
} else {
|
||||
delete process.env['OPENAI_API_KEY'];
|
||||
}
|
||||
if (originalEnvQwenOauth !== undefined) {
|
||||
process.env['QWEN_OAUTH'] = originalEnvQwenOauth;
|
||||
} else {
|
||||
delete process.env['QWEN_OAUTH'];
|
||||
}
|
||||
if (originalEnvGoogleApiKey !== undefined) {
|
||||
process.env['GOOGLE_API_KEY'] = originalEnvGoogleApiKey;
|
||||
} else {
|
||||
delete process.env['GOOGLE_API_KEY'];
|
||||
}
|
||||
if (originalEnvAnthropicApiKey !== undefined) {
|
||||
process.env['ANTHROPIC_API_KEY'] = originalEnvAnthropicApiKey;
|
||||
} else {
|
||||
delete process.env['ANTHROPIC_API_KEY'];
|
||||
}
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
|
||||
@@ -27,9 +27,6 @@ function getAuthTypeFromEnv(): AuthType | undefined {
|
||||
if (process.env['GOOGLE_API_KEY']) {
|
||||
return AuthType.USE_VERTEX_AI;
|
||||
}
|
||||
if (process.env['ANTHROPIC_API_KEY']) {
|
||||
return AuthType.USE_ANTHROPIC;
|
||||
}
|
||||
|
||||
return undefined;
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code-core",
|
||||
"version": "0.6.0-nightly.20251226.17eb20c1",
|
||||
"version": "0.6.0-nightly.20251225.9f65bd3b",
|
||||
"description": "Qwen Code Core",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
@@ -23,7 +23,6 @@
|
||||
"scripts/postinstall.js"
|
||||
],
|
||||
"dependencies": {
|
||||
"@anthropic-ai/sdk": "^0.36.1",
|
||||
"@google/genai": "1.30.0",
|
||||
"@modelcontextprotocol/sdk": "^1.25.1",
|
||||
"@opentelemetry/api": "^1.9.0",
|
||||
|
||||
@@ -16,6 +16,7 @@ import {
|
||||
QwenLogger,
|
||||
} from '../telemetry/index.js';
|
||||
import type { ContentGeneratorConfig } from '../core/contentGenerator.js';
|
||||
import { DEFAULT_DASHSCOPE_BASE_URL } from '../core/openaiContentGenerator/constants.js';
|
||||
import {
|
||||
AuthType,
|
||||
createContentGeneratorConfig,
|
||||
@@ -272,7 +273,7 @@ describe('Server Config (config.ts)', () => {
|
||||
authType,
|
||||
{
|
||||
model: MODEL,
|
||||
baseUrl: undefined,
|
||||
baseUrl: DEFAULT_DASHSCOPE_BASE_URL,
|
||||
},
|
||||
);
|
||||
// Verify that contentGeneratorConfig is updated
|
||||
|
||||
@@ -96,6 +96,7 @@ import {
|
||||
} from './constants.js';
|
||||
import { DEFAULT_QWEN_EMBEDDING_MODEL, DEFAULT_QWEN_MODEL } from './models.js';
|
||||
import { Storage } from './storage.js';
|
||||
import { DEFAULT_DASHSCOPE_BASE_URL } from '../core/openaiContentGenerator/constants.js';
|
||||
import { ChatRecordingService } from '../services/chatRecordingService.js';
|
||||
import {
|
||||
SessionService,
|
||||
@@ -573,7 +574,7 @@ export class Config {
|
||||
this._generationConfig = {
|
||||
model: params.model,
|
||||
...(params.generationConfig || {}),
|
||||
baseUrl: params.generationConfig?.baseUrl,
|
||||
baseUrl: params.generationConfig?.baseUrl || DEFAULT_DASHSCOPE_BASE_URL,
|
||||
};
|
||||
this.contentGeneratorConfig = this
|
||||
._generationConfig as ContentGeneratorConfig;
|
||||
|
||||
@@ -1,500 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
import type {
|
||||
CountTokensParameters,
|
||||
GenerateContentParameters,
|
||||
} from '@google/genai';
|
||||
import { FinishReason, GenerateContentResponse } from '@google/genai';
|
||||
|
||||
// Mock the request tokenizer module BEFORE importing the class that uses it.
|
||||
const mockTokenizer = {
|
||||
calculateTokens: vi.fn(),
|
||||
dispose: vi.fn(),
|
||||
};
|
||||
|
||||
vi.mock('../../utils/request-tokenizer/index.js', () => ({
|
||||
getDefaultTokenizer: vi.fn(() => mockTokenizer),
|
||||
DefaultRequestTokenizer: vi.fn(() => mockTokenizer),
|
||||
disposeDefaultTokenizer: vi.fn(),
|
||||
}));
|
||||
|
||||
type AnthropicCreateArgs = [unknown, { signal?: AbortSignal }?];
|
||||
|
||||
const anthropicMockState: {
|
||||
constructorOptions?: Record<string, unknown>;
|
||||
lastCreateArgs?: AnthropicCreateArgs;
|
||||
createImpl: ReturnType<typeof vi.fn>;
|
||||
} = {
|
||||
constructorOptions: undefined,
|
||||
lastCreateArgs: undefined,
|
||||
createImpl: vi.fn(),
|
||||
};
|
||||
|
||||
vi.mock('@anthropic-ai/sdk', () => {
|
||||
class AnthropicMock {
|
||||
messages: { create: (...args: AnthropicCreateArgs) => unknown };
|
||||
|
||||
constructor(options: Record<string, unknown>) {
|
||||
anthropicMockState.constructorOptions = options;
|
||||
this.messages = {
|
||||
create: (...args: AnthropicCreateArgs) => {
|
||||
anthropicMockState.lastCreateArgs = args;
|
||||
return anthropicMockState.createImpl(...args);
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
default: AnthropicMock,
|
||||
__anthropicState: anthropicMockState,
|
||||
};
|
||||
});
|
||||
|
||||
// Now import the modules that depend on the mocked modules.
|
||||
import type { Config } from '../../config/config.js';
|
||||
|
||||
const importGenerator = async (): Promise<{
|
||||
AnthropicContentGenerator: typeof import('./anthropicContentGenerator.js').AnthropicContentGenerator;
|
||||
}> => import('./anthropicContentGenerator.js');
|
||||
|
||||
const importConverter = async (): Promise<{
|
||||
AnthropicContentConverter: typeof import('./converter.js').AnthropicContentConverter;
|
||||
}> => import('./converter.js');
|
||||
|
||||
describe('AnthropicContentGenerator', () => {
|
||||
let mockConfig: Config;
|
||||
let anthropicState: {
|
||||
constructorOptions?: Record<string, unknown>;
|
||||
lastCreateArgs?: AnthropicCreateArgs;
|
||||
createImpl: ReturnType<typeof vi.fn>;
|
||||
};
|
||||
|
||||
beforeEach(async () => {
|
||||
vi.clearAllMocks();
|
||||
vi.resetModules();
|
||||
|
||||
mockTokenizer.calculateTokens.mockResolvedValue({
|
||||
totalTokens: 50,
|
||||
breakdown: {
|
||||
textTokens: 50,
|
||||
imageTokens: 0,
|
||||
audioTokens: 0,
|
||||
otherTokens: 0,
|
||||
},
|
||||
processingTime: 1,
|
||||
});
|
||||
anthropicState = anthropicMockState;
|
||||
|
||||
anthropicState.createImpl.mockReset();
|
||||
anthropicState.lastCreateArgs = undefined;
|
||||
anthropicState.constructorOptions = undefined;
|
||||
|
||||
mockConfig = {
|
||||
getCliVersion: vi.fn().mockReturnValue('1.2.3'),
|
||||
} as unknown as Config;
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
it('passes a QwenCode User-Agent header to the Anthropic SDK', async () => {
|
||||
const { AnthropicContentGenerator } = await importGenerator();
|
||||
void new AnthropicContentGenerator(
|
||||
{
|
||||
model: 'claude-test',
|
||||
apiKey: 'test-key',
|
||||
baseUrl: 'https://example.invalid',
|
||||
timeout: 10_000,
|
||||
maxRetries: 2,
|
||||
samplingParams: {},
|
||||
schemaCompliance: 'auto',
|
||||
},
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
const headers = (anthropicState.constructorOptions?.['defaultHeaders'] ||
|
||||
{}) as Record<string, string>;
|
||||
expect(headers['User-Agent']).toContain('QwenCode/1.2.3');
|
||||
expect(headers['User-Agent']).toContain(
|
||||
`(${process.platform}; ${process.arch})`,
|
||||
);
|
||||
});
|
||||
|
||||
it('adds the effort beta header when reasoning.effort is set', async () => {
|
||||
const { AnthropicContentGenerator } = await importGenerator();
|
||||
void new AnthropicContentGenerator(
|
||||
{
|
||||
model: 'claude-test',
|
||||
apiKey: 'test-key',
|
||||
baseUrl: 'https://example.invalid',
|
||||
timeout: 10_000,
|
||||
maxRetries: 2,
|
||||
samplingParams: {},
|
||||
schemaCompliance: 'auto',
|
||||
reasoning: { effort: 'medium' },
|
||||
},
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
const headers = (anthropicState.constructorOptions?.['defaultHeaders'] ||
|
||||
{}) as Record<string, string>;
|
||||
expect(headers['anthropic-beta']).toContain('effort-2025-11-24');
|
||||
});
|
||||
|
||||
it('does not add the effort beta header when reasoning.effort is not set', async () => {
|
||||
const { AnthropicContentGenerator } = await importGenerator();
|
||||
void new AnthropicContentGenerator(
|
||||
{
|
||||
model: 'claude-test',
|
||||
apiKey: 'test-key',
|
||||
baseUrl: 'https://example.invalid',
|
||||
timeout: 10_000,
|
||||
maxRetries: 2,
|
||||
samplingParams: {},
|
||||
schemaCompliance: 'auto',
|
||||
},
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
const headers = (anthropicState.constructorOptions?.['defaultHeaders'] ||
|
||||
{}) as Record<string, string>;
|
||||
expect(headers['anthropic-beta']).not.toContain('effort-2025-11-24');
|
||||
});
|
||||
|
||||
it('omits the anthropic beta header when reasoning is disabled', async () => {
|
||||
const { AnthropicContentGenerator } = await importGenerator();
|
||||
void new AnthropicContentGenerator(
|
||||
{
|
||||
model: 'claude-test',
|
||||
apiKey: 'test-key',
|
||||
baseUrl: 'https://example.invalid',
|
||||
timeout: 10_000,
|
||||
maxRetries: 2,
|
||||
samplingParams: {},
|
||||
schemaCompliance: 'auto',
|
||||
reasoning: false,
|
||||
},
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
const headers = (anthropicState.constructorOptions?.['defaultHeaders'] ||
|
||||
{}) as Record<string, string>;
|
||||
expect(headers['anthropic-beta']).toBeUndefined();
|
||||
});
|
||||
|
||||
describe('generateContent', () => {
|
||||
it('builds request with config sampling params (config overrides request) and thinking budget', async () => {
|
||||
const { AnthropicContentConverter } = await importConverter();
|
||||
const { AnthropicContentGenerator } = await importGenerator();
|
||||
|
||||
const convertResponseSpy = vi
|
||||
.spyOn(
|
||||
AnthropicContentConverter.prototype,
|
||||
'convertAnthropicResponseToGemini',
|
||||
)
|
||||
.mockReturnValue(
|
||||
(() => {
|
||||
const r = new GenerateContentResponse();
|
||||
r.responseId = 'gemini-1';
|
||||
return r;
|
||||
})(),
|
||||
);
|
||||
|
||||
anthropicState.createImpl.mockResolvedValue({
|
||||
id: 'anthropic-1',
|
||||
model: 'claude-test',
|
||||
content: [{ type: 'text', text: 'hi' }],
|
||||
});
|
||||
|
||||
const generator = new AnthropicContentGenerator(
|
||||
{
|
||||
model: 'claude-test',
|
||||
apiKey: 'test-key',
|
||||
baseUrl: 'https://example.invalid',
|
||||
timeout: 10_000,
|
||||
maxRetries: 2,
|
||||
samplingParams: {
|
||||
temperature: 0.7,
|
||||
max_tokens: 1000,
|
||||
top_p: 0.9,
|
||||
top_k: 20,
|
||||
},
|
||||
schemaCompliance: 'auto',
|
||||
reasoning: { effort: 'high', budget_tokens: 1000 },
|
||||
},
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
const abortController = new AbortController();
|
||||
const request: GenerateContentParameters = {
|
||||
model: 'models/ignored',
|
||||
contents: 'Hello',
|
||||
config: {
|
||||
temperature: 0.1,
|
||||
maxOutputTokens: 200,
|
||||
topP: 0.5,
|
||||
topK: 5,
|
||||
abortSignal: abortController.signal,
|
||||
},
|
||||
};
|
||||
|
||||
const result = await generator.generateContent(request);
|
||||
expect(result.responseId).toBe('gemini-1');
|
||||
|
||||
expect(anthropicState.lastCreateArgs).toBeDefined();
|
||||
const [anthropicRequest, options] =
|
||||
anthropicState.lastCreateArgs as AnthropicCreateArgs;
|
||||
|
||||
expect(options?.signal).toBe(abortController.signal);
|
||||
|
||||
expect(anthropicRequest).toEqual(
|
||||
expect.objectContaining({
|
||||
model: 'claude-test',
|
||||
max_tokens: 1000,
|
||||
temperature: 0.7,
|
||||
top_p: 0.9,
|
||||
top_k: 20,
|
||||
thinking: { type: 'enabled', budget_tokens: 1000 },
|
||||
output_config: { effort: 'high' },
|
||||
}),
|
||||
);
|
||||
|
||||
expect(convertResponseSpy).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('omits thinking when request.config.thinkingConfig.includeThoughts is false', async () => {
|
||||
const { AnthropicContentGenerator } = await importGenerator();
|
||||
anthropicState.createImpl.mockResolvedValue({
|
||||
id: 'anthropic-1',
|
||||
model: 'claude-test',
|
||||
content: [{ type: 'text', text: 'hi' }],
|
||||
});
|
||||
|
||||
const generator = new AnthropicContentGenerator(
|
||||
{
|
||||
model: 'claude-test',
|
||||
apiKey: 'test-key',
|
||||
timeout: 10_000,
|
||||
maxRetries: 2,
|
||||
samplingParams: { max_tokens: 500 },
|
||||
schemaCompliance: 'auto',
|
||||
reasoning: { effort: 'high' },
|
||||
},
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
await generator.generateContent({
|
||||
model: 'models/ignored',
|
||||
contents: 'Hello',
|
||||
config: { thinkingConfig: { includeThoughts: false } },
|
||||
} as unknown as GenerateContentParameters);
|
||||
|
||||
const [anthropicRequest] =
|
||||
anthropicState.lastCreateArgs as AnthropicCreateArgs;
|
||||
expect(anthropicRequest).toEqual(
|
||||
expect.not.objectContaining({ thinking: expect.anything() }),
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('countTokens', () => {
|
||||
it('counts tokens using the request tokenizer', async () => {
|
||||
const { AnthropicContentGenerator } = await importGenerator();
|
||||
const generator = new AnthropicContentGenerator(
|
||||
{
|
||||
model: 'claude-test',
|
||||
apiKey: 'test-key',
|
||||
timeout: 10_000,
|
||||
maxRetries: 2,
|
||||
samplingParams: {},
|
||||
schemaCompliance: 'auto',
|
||||
},
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
const request: CountTokensParameters = {
|
||||
contents: [{ role: 'user', parts: [{ text: 'Hello world' }] }],
|
||||
model: 'claude-test',
|
||||
};
|
||||
|
||||
const result = await generator.countTokens(request);
|
||||
expect(mockTokenizer.calculateTokens).toHaveBeenCalledWith(request, {
|
||||
textEncoding: 'cl100k_base',
|
||||
});
|
||||
expect(result.totalTokens).toBe(50);
|
||||
});
|
||||
|
||||
it('falls back to character approximation when tokenizer throws', async () => {
|
||||
const { AnthropicContentGenerator } = await importGenerator();
|
||||
mockTokenizer.calculateTokens.mockRejectedValueOnce(new Error('boom'));
|
||||
const generator = new AnthropicContentGenerator(
|
||||
{
|
||||
model: 'claude-test',
|
||||
apiKey: 'test-key',
|
||||
timeout: 10_000,
|
||||
maxRetries: 2,
|
||||
samplingParams: {},
|
||||
schemaCompliance: 'auto',
|
||||
},
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
const request: CountTokensParameters = {
|
||||
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
|
||||
model: 'claude-test',
|
||||
};
|
||||
|
||||
const content = JSON.stringify(request.contents);
|
||||
const expected = Math.ceil(content.length / 4);
|
||||
const result = await generator.countTokens(request);
|
||||
expect(result.totalTokens).toBe(expected);
|
||||
});
|
||||
});
|
||||
|
||||
describe('generateContentStream', () => {
|
||||
it('requests stream=true and converts streamed events into Gemini chunks', async () => {
|
||||
const { AnthropicContentGenerator } = await importGenerator();
|
||||
anthropicState.createImpl.mockResolvedValue(
|
||||
(async function* () {
|
||||
yield {
|
||||
type: 'message_start',
|
||||
message: {
|
||||
id: 'msg-1',
|
||||
model: 'claude-test',
|
||||
usage: { cache_read_input_tokens: 2, input_tokens: 3 },
|
||||
},
|
||||
};
|
||||
|
||||
yield {
|
||||
type: 'content_block_start',
|
||||
index: 0,
|
||||
content_block: { type: 'text' },
|
||||
};
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: 0,
|
||||
delta: { type: 'text_delta', text: 'Hello' },
|
||||
};
|
||||
yield { type: 'content_block_stop', index: 0 };
|
||||
|
||||
yield {
|
||||
type: 'content_block_start',
|
||||
index: 1,
|
||||
content_block: { type: 'thinking', signature: '' },
|
||||
};
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: 1,
|
||||
delta: { type: 'thinking_delta', thinking: 'Think' },
|
||||
};
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: 1,
|
||||
delta: { type: 'signature_delta', signature: 'abc' },
|
||||
};
|
||||
yield { type: 'content_block_stop', index: 1 };
|
||||
|
||||
yield {
|
||||
type: 'content_block_start',
|
||||
index: 2,
|
||||
content_block: {
|
||||
type: 'tool_use',
|
||||
id: 't1',
|
||||
name: 'tool',
|
||||
input: {},
|
||||
},
|
||||
};
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: 2,
|
||||
delta: { type: 'input_json_delta', partial_json: '{"x":' },
|
||||
};
|
||||
yield {
|
||||
type: 'content_block_delta',
|
||||
index: 2,
|
||||
delta: { type: 'input_json_delta', partial_json: '1}' },
|
||||
};
|
||||
yield { type: 'content_block_stop', index: 2 };
|
||||
|
||||
yield {
|
||||
type: 'message_delta',
|
||||
delta: { stop_reason: 'end_turn' },
|
||||
usage: {
|
||||
output_tokens: 5,
|
||||
input_tokens: 7,
|
||||
cache_read_input_tokens: 2,
|
||||
},
|
||||
};
|
||||
yield { type: 'message_stop' };
|
||||
})(),
|
||||
);
|
||||
|
||||
const generator = new AnthropicContentGenerator(
|
||||
{
|
||||
model: 'claude-test',
|
||||
apiKey: 'test-key',
|
||||
timeout: 10_000,
|
||||
maxRetries: 2,
|
||||
samplingParams: { max_tokens: 123 },
|
||||
schemaCompliance: 'auto',
|
||||
},
|
||||
mockConfig,
|
||||
);
|
||||
|
||||
const stream = await generator.generateContentStream({
|
||||
model: 'models/ignored',
|
||||
contents: 'Hello',
|
||||
} as unknown as GenerateContentParameters);
|
||||
|
||||
const chunks: GenerateContentResponse[] = [];
|
||||
for await (const chunk of stream) {
|
||||
chunks.push(chunk);
|
||||
}
|
||||
|
||||
const [anthropicRequest] =
|
||||
anthropicState.lastCreateArgs as AnthropicCreateArgs;
|
||||
expect(anthropicRequest).toEqual(
|
||||
expect.objectContaining({ stream: true }),
|
||||
);
|
||||
|
||||
// Text chunk.
|
||||
expect(chunks[0]?.candidates?.[0]?.content?.parts?.[0]).toEqual({
|
||||
text: 'Hello',
|
||||
});
|
||||
|
||||
// Thinking chunk.
|
||||
expect(chunks[1]?.candidates?.[0]?.content?.parts?.[0]).toEqual({
|
||||
text: 'Think',
|
||||
thought: true,
|
||||
});
|
||||
|
||||
// Signature chunk.
|
||||
expect(chunks[2]?.candidates?.[0]?.content?.parts?.[0]).toEqual({
|
||||
thought: true,
|
||||
thoughtSignature: 'abc',
|
||||
});
|
||||
|
||||
// Tool call chunk.
|
||||
expect(chunks[3]?.candidates?.[0]?.content?.parts?.[0]).toEqual({
|
||||
functionCall: { id: 't1', name: 'tool', args: { x: 1 } },
|
||||
});
|
||||
|
||||
// Usage/finish chunks exist; check the last one.
|
||||
const last = chunks[chunks.length - 1]!;
|
||||
expect(last.candidates?.[0]?.finishReason).toBe(FinishReason.STOP);
|
||||
expect(last.usageMetadata).toEqual({
|
||||
cachedContentTokenCount: 2,
|
||||
promptTokenCount: 9, // cached(2) + input(7)
|
||||
candidatesTokenCount: 5,
|
||||
totalTokenCount: 14,
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,502 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import Anthropic from '@anthropic-ai/sdk';
|
||||
import type {
|
||||
CountTokensParameters,
|
||||
CountTokensResponse,
|
||||
EmbedContentParameters,
|
||||
EmbedContentResponse,
|
||||
GenerateContentParameters,
|
||||
GenerateContentResponseUsageMetadata,
|
||||
Part,
|
||||
} from '@google/genai';
|
||||
import { GenerateContentResponse } from '@google/genai';
|
||||
import type { Config } from '../../config/config.js';
|
||||
import type {
|
||||
ContentGenerator,
|
||||
ContentGeneratorConfig,
|
||||
} from '../contentGenerator.js';
|
||||
type Message = Anthropic.Message;
|
||||
type MessageCreateParamsNonStreaming =
|
||||
Anthropic.MessageCreateParamsNonStreaming;
|
||||
type MessageCreateParamsStreaming = Anthropic.MessageCreateParamsStreaming;
|
||||
type RawMessageStreamEvent = Anthropic.RawMessageStreamEvent;
|
||||
import { getDefaultTokenizer } from '../../utils/request-tokenizer/index.js';
|
||||
import { safeJsonParse } from '../../utils/safeJsonParse.js';
|
||||
import { AnthropicContentConverter } from './converter.js';
|
||||
|
||||
type StreamingBlockState = {
|
||||
type: string;
|
||||
id?: string;
|
||||
name?: string;
|
||||
inputJson: string;
|
||||
signature: string;
|
||||
};
|
||||
|
||||
type MessageCreateParamsWithThinking = MessageCreateParamsNonStreaming & {
|
||||
thinking?: { type: 'enabled'; budget_tokens: number };
|
||||
// Anthropic beta feature: output_config.effort (requires beta header effort-2025-11-24)
|
||||
// This is not yet represented in the official SDK types we depend on.
|
||||
output_config?: { effort: 'low' | 'medium' | 'high' };
|
||||
};
|
||||
|
||||
export class AnthropicContentGenerator implements ContentGenerator {
|
||||
private client: Anthropic;
|
||||
private converter: AnthropicContentConverter;
|
||||
|
||||
constructor(
|
||||
private contentGeneratorConfig: ContentGeneratorConfig,
|
||||
private readonly cliConfig: Config,
|
||||
) {
|
||||
const defaultHeaders = this.buildHeaders();
|
||||
const baseURL = contentGeneratorConfig.baseUrl;
|
||||
|
||||
this.client = new Anthropic({
|
||||
apiKey: contentGeneratorConfig.apiKey,
|
||||
baseURL,
|
||||
timeout: contentGeneratorConfig.timeout,
|
||||
maxRetries: contentGeneratorConfig.maxRetries,
|
||||
defaultHeaders,
|
||||
});
|
||||
|
||||
this.converter = new AnthropicContentConverter(
|
||||
contentGeneratorConfig.model,
|
||||
contentGeneratorConfig.schemaCompliance,
|
||||
);
|
||||
}
|
||||
|
||||
async generateContent(
|
||||
request: GenerateContentParameters,
|
||||
): Promise<GenerateContentResponse> {
|
||||
const anthropicRequest = await this.buildRequest(request);
|
||||
const response = (await this.client.messages.create(anthropicRequest, {
|
||||
signal: request.config?.abortSignal,
|
||||
})) as Message;
|
||||
|
||||
return this.converter.convertAnthropicResponseToGemini(response);
|
||||
}
|
||||
|
||||
async generateContentStream(
|
||||
request: GenerateContentParameters,
|
||||
): Promise<AsyncGenerator<GenerateContentResponse>> {
|
||||
const anthropicRequest = await this.buildRequest(request);
|
||||
const streamingRequest: MessageCreateParamsStreaming & {
|
||||
thinking?: { type: 'enabled'; budget_tokens: number };
|
||||
} = {
|
||||
...anthropicRequest,
|
||||
stream: true,
|
||||
};
|
||||
|
||||
const stream = (await this.client.messages.create(
|
||||
streamingRequest as MessageCreateParamsStreaming,
|
||||
{
|
||||
signal: request.config?.abortSignal,
|
||||
},
|
||||
)) as AsyncIterable<RawMessageStreamEvent>;
|
||||
|
||||
return this.processStream(stream);
|
||||
}
|
||||
|
||||
async countTokens(
|
||||
request: CountTokensParameters,
|
||||
): Promise<CountTokensResponse> {
|
||||
try {
|
||||
const tokenizer = getDefaultTokenizer();
|
||||
const result = await tokenizer.calculateTokens(request, {
|
||||
textEncoding: 'cl100k_base',
|
||||
});
|
||||
|
||||
return {
|
||||
totalTokens: result.totalTokens,
|
||||
};
|
||||
} catch (error) {
|
||||
console.warn(
|
||||
'Failed to calculate tokens with tokenizer, ' +
|
||||
'falling back to simple method:',
|
||||
error,
|
||||
);
|
||||
|
||||
const content = JSON.stringify(request.contents);
|
||||
const totalTokens = Math.ceil(content.length / 4);
|
||||
return {
|
||||
totalTokens,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
async embedContent(
|
||||
_request: EmbedContentParameters,
|
||||
): Promise<EmbedContentResponse> {
|
||||
throw new Error('Anthropic does not support embeddings.');
|
||||
}
|
||||
|
||||
useSummarizedThinking(): boolean {
|
||||
return false;
|
||||
}
|
||||
|
||||
private buildHeaders(): Record<string, string> {
|
||||
const version = this.cliConfig.getCliVersion() || 'unknown';
|
||||
const userAgent = `QwenCode/${version} (${process.platform}; ${process.arch})`;
|
||||
|
||||
const betas: string[] = [];
|
||||
const reasoning = this.contentGeneratorConfig.reasoning;
|
||||
|
||||
// Interleaved thinking is used when we send the `thinking` field.
|
||||
if (reasoning !== false) {
|
||||
betas.push('interleaved-thinking-2025-05-14');
|
||||
}
|
||||
|
||||
// Effort (beta) is enabled when reasoning.effort is set.
|
||||
if (reasoning !== false && reasoning?.effort !== undefined) {
|
||||
betas.push('effort-2025-11-24');
|
||||
}
|
||||
|
||||
const headers: Record<string, string> = {
|
||||
'User-Agent': userAgent,
|
||||
};
|
||||
|
||||
if (betas.length) {
|
||||
headers['anthropic-beta'] = betas.join(',');
|
||||
}
|
||||
|
||||
return headers;
|
||||
}
|
||||
|
||||
private async buildRequest(
|
||||
request: GenerateContentParameters,
|
||||
): Promise<MessageCreateParamsWithThinking> {
|
||||
const { system, messages } =
|
||||
this.converter.convertGeminiRequestToAnthropic(request);
|
||||
|
||||
const tools = request.config?.tools
|
||||
? await this.converter.convertGeminiToolsToAnthropic(request.config.tools)
|
||||
: undefined;
|
||||
|
||||
const sampling = this.buildSamplingParameters(request);
|
||||
const thinking = this.buildThinkingConfig(request);
|
||||
const outputConfig = this.buildOutputConfig();
|
||||
|
||||
return {
|
||||
model: this.contentGeneratorConfig.model,
|
||||
system,
|
||||
messages,
|
||||
tools,
|
||||
...sampling,
|
||||
...(thinking ? { thinking } : {}),
|
||||
...(outputConfig ? { output_config: outputConfig } : {}),
|
||||
};
|
||||
}
|
||||
|
||||
private buildSamplingParameters(request: GenerateContentParameters): {
|
||||
max_tokens: number;
|
||||
temperature?: number;
|
||||
top_p?: number;
|
||||
top_k?: number;
|
||||
} {
|
||||
const configSamplingParams = this.contentGeneratorConfig.samplingParams;
|
||||
const requestConfig = request.config || {};
|
||||
|
||||
const getParam = <T>(
|
||||
configKey: keyof NonNullable<typeof configSamplingParams>,
|
||||
requestKey?: keyof NonNullable<typeof requestConfig>,
|
||||
): T | undefined => {
|
||||
const configValue = configSamplingParams?.[configKey] as T | undefined;
|
||||
const requestValue = requestKey
|
||||
? (requestConfig[requestKey] as T | undefined)
|
||||
: undefined;
|
||||
return configValue !== undefined ? configValue : requestValue;
|
||||
};
|
||||
|
||||
const maxTokens =
|
||||
getParam<number>('max_tokens', 'maxOutputTokens') ?? 10_000;
|
||||
|
||||
return {
|
||||
max_tokens: maxTokens,
|
||||
temperature: getParam<number>('temperature', 'temperature') ?? 1,
|
||||
top_p: getParam<number>('top_p', 'topP'),
|
||||
top_k: getParam<number>('top_k', 'topK'),
|
||||
};
|
||||
}
|
||||
|
||||
private buildThinkingConfig(
|
||||
request: GenerateContentParameters,
|
||||
): { type: 'enabled'; budget_tokens: number } | undefined {
|
||||
if (request.config?.thinkingConfig?.includeThoughts === false) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const reasoning = this.contentGeneratorConfig.reasoning;
|
||||
|
||||
if (reasoning === false) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
if (reasoning?.budget_tokens !== undefined) {
|
||||
return {
|
||||
type: 'enabled',
|
||||
budget_tokens: reasoning.budget_tokens,
|
||||
};
|
||||
}
|
||||
|
||||
const effort = reasoning?.effort;
|
||||
// When using interleaved thinking with tools, this budget token limit is the entire context window(200k tokens).
|
||||
const budgetTokens =
|
||||
effort === 'low' ? 16_000 : effort === 'high' ? 64_000 : 32_000;
|
||||
|
||||
return {
|
||||
type: 'enabled',
|
||||
budget_tokens: budgetTokens,
|
||||
};
|
||||
}
|
||||
|
||||
private buildOutputConfig():
|
||||
| { effort: 'low' | 'medium' | 'high' }
|
||||
| undefined {
|
||||
const reasoning = this.contentGeneratorConfig.reasoning;
|
||||
if (reasoning === false || reasoning === undefined) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
if (reasoning.effort === undefined) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
return { effort: reasoning.effort };
|
||||
}
|
||||
|
||||
private async *processStream(
|
||||
stream: AsyncIterable<RawMessageStreamEvent>,
|
||||
): AsyncGenerator<GenerateContentResponse> {
|
||||
let messageId: string | undefined;
|
||||
let model = this.contentGeneratorConfig.model;
|
||||
let cachedTokens = 0;
|
||||
let promptTokens = 0;
|
||||
let completionTokens = 0;
|
||||
let finishReason: string | undefined;
|
||||
|
||||
const blocks = new Map<number, StreamingBlockState>();
|
||||
const collectedResponses: GenerateContentResponse[] = [];
|
||||
|
||||
for await (const event of stream) {
|
||||
switch (event.type) {
|
||||
case 'message_start': {
|
||||
messageId = event.message.id ?? messageId;
|
||||
model = event.message.model ?? model;
|
||||
cachedTokens =
|
||||
event.message.usage?.cache_read_input_tokens ?? cachedTokens;
|
||||
promptTokens = event.message.usage?.input_tokens ?? promptTokens;
|
||||
break;
|
||||
}
|
||||
case 'content_block_start': {
|
||||
const index = event.index ?? 0;
|
||||
const type = String(event.content_block.type || 'text');
|
||||
const initialInput =
|
||||
type === 'tool_use' && 'input' in event.content_block
|
||||
? JSON.stringify(event.content_block.input)
|
||||
: '';
|
||||
blocks.set(index, {
|
||||
type,
|
||||
id:
|
||||
'id' in event.content_block ? event.content_block.id : undefined,
|
||||
name:
|
||||
'name' in event.content_block
|
||||
? event.content_block.name
|
||||
: undefined,
|
||||
inputJson: initialInput !== '{}' ? initialInput : '',
|
||||
signature:
|
||||
type === 'thinking' &&
|
||||
'signature' in event.content_block &&
|
||||
typeof event.content_block.signature === 'string'
|
||||
? event.content_block.signature
|
||||
: '',
|
||||
});
|
||||
break;
|
||||
}
|
||||
case 'content_block_delta': {
|
||||
const index = event.index ?? 0;
|
||||
const deltaType = (event.delta as { type?: string }).type || '';
|
||||
const blockState = blocks.get(index);
|
||||
|
||||
if (deltaType === 'text_delta') {
|
||||
const text = 'text' in event.delta ? event.delta.text : '';
|
||||
if (text) {
|
||||
const chunk = this.buildGeminiChunk({ text }, messageId, model);
|
||||
collectedResponses.push(chunk);
|
||||
yield chunk;
|
||||
}
|
||||
} else if (deltaType === 'thinking_delta') {
|
||||
const thinking =
|
||||
(event.delta as { thinking?: string }).thinking || '';
|
||||
if (thinking) {
|
||||
const chunk = this.buildGeminiChunk(
|
||||
{ text: thinking, thought: true },
|
||||
messageId,
|
||||
model,
|
||||
);
|
||||
collectedResponses.push(chunk);
|
||||
yield chunk;
|
||||
}
|
||||
} else if (deltaType === 'signature_delta' && blockState) {
|
||||
const signature =
|
||||
(event.delta as { signature?: string }).signature || '';
|
||||
if (signature) {
|
||||
blockState.signature += signature;
|
||||
const chunk = this.buildGeminiChunk(
|
||||
{ thought: true, thoughtSignature: signature },
|
||||
messageId,
|
||||
model,
|
||||
);
|
||||
collectedResponses.push(chunk);
|
||||
yield chunk;
|
||||
}
|
||||
} else if (deltaType === 'input_json_delta' && blockState) {
|
||||
const jsonDelta =
|
||||
(event.delta as { partial_json?: string }).partial_json || '';
|
||||
if (jsonDelta) {
|
||||
blockState.inputJson += jsonDelta;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 'content_block_stop': {
|
||||
const index = event.index ?? 0;
|
||||
const blockState = blocks.get(index);
|
||||
if (blockState?.type === 'tool_use') {
|
||||
const args = safeJsonParse(blockState.inputJson || '{}', {});
|
||||
const chunk = this.buildGeminiChunk(
|
||||
{
|
||||
functionCall: {
|
||||
id: blockState.id,
|
||||
name: blockState.name,
|
||||
args,
|
||||
},
|
||||
},
|
||||
messageId,
|
||||
model,
|
||||
);
|
||||
collectedResponses.push(chunk);
|
||||
yield chunk;
|
||||
}
|
||||
blocks.delete(index);
|
||||
break;
|
||||
}
|
||||
case 'message_delta': {
|
||||
const stopReasonValue = event.delta.stop_reason;
|
||||
if (stopReasonValue) {
|
||||
finishReason = stopReasonValue;
|
||||
}
|
||||
|
||||
// Some Anthropic-compatible providers may include additional usage fields
|
||||
// (e.g. `input_tokens`, `cache_read_input_tokens`) even though the official
|
||||
// Anthropic SDK types only expose `output_tokens` here.
|
||||
const usageUnknown = event.usage as unknown;
|
||||
const usageRecord =
|
||||
usageUnknown && typeof usageUnknown === 'object'
|
||||
? (usageUnknown as Record<string, unknown>)
|
||||
: undefined;
|
||||
|
||||
if (event.usage?.output_tokens !== undefined) {
|
||||
completionTokens = event.usage.output_tokens;
|
||||
}
|
||||
if (usageRecord?.['input_tokens'] !== undefined) {
|
||||
const inputTokens = usageRecord['input_tokens'];
|
||||
if (typeof inputTokens === 'number') {
|
||||
promptTokens = inputTokens;
|
||||
}
|
||||
}
|
||||
if (usageRecord?.['cache_read_input_tokens'] !== undefined) {
|
||||
const cacheRead = usageRecord['cache_read_input_tokens'];
|
||||
if (typeof cacheRead === 'number') {
|
||||
cachedTokens = cacheRead;
|
||||
}
|
||||
}
|
||||
|
||||
if (finishReason || event.usage) {
|
||||
const chunk = this.buildGeminiChunk(
|
||||
undefined,
|
||||
messageId,
|
||||
model,
|
||||
finishReason,
|
||||
{
|
||||
cachedContentTokenCount: cachedTokens,
|
||||
promptTokenCount: cachedTokens + promptTokens,
|
||||
candidatesTokenCount: completionTokens,
|
||||
totalTokenCount: cachedTokens + promptTokens + completionTokens,
|
||||
},
|
||||
);
|
||||
collectedResponses.push(chunk);
|
||||
yield chunk;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 'message_stop': {
|
||||
if (promptTokens || completionTokens) {
|
||||
const chunk = this.buildGeminiChunk(
|
||||
undefined,
|
||||
messageId,
|
||||
model,
|
||||
finishReason,
|
||||
{
|
||||
cachedContentTokenCount: cachedTokens,
|
||||
promptTokenCount: cachedTokens + promptTokens,
|
||||
candidatesTokenCount: completionTokens,
|
||||
totalTokenCount: cachedTokens + promptTokens + completionTokens,
|
||||
},
|
||||
);
|
||||
collectedResponses.push(chunk);
|
||||
yield chunk;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private buildGeminiChunk(
|
||||
part?: {
|
||||
text?: string;
|
||||
thought?: boolean;
|
||||
thoughtSignature?: string;
|
||||
functionCall?: unknown;
|
||||
},
|
||||
responseId?: string,
|
||||
model?: string,
|
||||
finishReason?: string,
|
||||
usageMetadata?: GenerateContentResponseUsageMetadata,
|
||||
): GenerateContentResponse {
|
||||
const response = new GenerateContentResponse();
|
||||
response.responseId = responseId;
|
||||
response.createTime = Date.now().toString();
|
||||
response.modelVersion = model || this.contentGeneratorConfig.model;
|
||||
response.promptFeedback = { safetyRatings: [] };
|
||||
|
||||
const candidateParts = part ? [part as unknown as Part] : [];
|
||||
const mappedFinishReason =
|
||||
finishReason !== undefined
|
||||
? this.converter.mapAnthropicFinishReasonToGemini(finishReason)
|
||||
: undefined;
|
||||
response.candidates = [
|
||||
{
|
||||
content: {
|
||||
parts: candidateParts,
|
||||
role: 'model' as const,
|
||||
},
|
||||
index: 0,
|
||||
safetyRatings: [],
|
||||
...(mappedFinishReason ? { finishReason: mappedFinishReason } : {}),
|
||||
},
|
||||
];
|
||||
|
||||
if (usageMetadata) {
|
||||
response.usageMetadata = usageMetadata;
|
||||
}
|
||||
|
||||
return response;
|
||||
}
|
||||
}
|
||||
@@ -1,377 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
import type { CallableTool, Content, Tool } from '@google/genai';
|
||||
import { FinishReason } from '@google/genai';
|
||||
import type Anthropic from '@anthropic-ai/sdk';
|
||||
|
||||
// Mock schema conversion so we can force edge-cases (e.g. missing `type`).
|
||||
vi.mock('../../utils/schemaConverter.js', () => ({
|
||||
convertSchema: vi.fn((schema: unknown) => schema),
|
||||
}));
|
||||
|
||||
import { convertSchema } from '../../utils/schemaConverter.js';
|
||||
import { AnthropicContentConverter } from './converter.js';
|
||||
|
||||
describe('AnthropicContentConverter', () => {
|
||||
let converter: AnthropicContentConverter;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
converter = new AnthropicContentConverter('test-model', 'auto');
|
||||
});
|
||||
|
||||
describe('convertGeminiRequestToAnthropic', () => {
|
||||
it('extracts systemInstruction text from string', () => {
|
||||
const { system } = converter.convertGeminiRequestToAnthropic({
|
||||
model: 'models/test',
|
||||
contents: 'hi',
|
||||
config: { systemInstruction: 'sys' },
|
||||
});
|
||||
|
||||
expect(system).toBe('sys');
|
||||
});
|
||||
|
||||
it('extracts systemInstruction text from parts and joins with newlines', () => {
|
||||
const { system } = converter.convertGeminiRequestToAnthropic({
|
||||
model: 'models/test',
|
||||
contents: 'hi',
|
||||
config: {
|
||||
systemInstruction: {
|
||||
role: 'system',
|
||||
parts: [{ text: 'a' }, { text: 'b' }],
|
||||
} as unknown as Content,
|
||||
},
|
||||
});
|
||||
|
||||
expect(system).toBe('a\nb');
|
||||
});
|
||||
|
||||
it('converts a plain string content into a user message', () => {
|
||||
const { messages } = converter.convertGeminiRequestToAnthropic({
|
||||
model: 'models/test',
|
||||
contents: 'Hello',
|
||||
});
|
||||
|
||||
expect(messages).toEqual([
|
||||
{ role: 'user', content: [{ type: 'text', text: 'Hello' }] },
|
||||
]);
|
||||
});
|
||||
|
||||
it('converts user content parts into a user message with text blocks', () => {
|
||||
const { messages } = converter.convertGeminiRequestToAnthropic({
|
||||
model: 'models/test',
|
||||
contents: [
|
||||
{
|
||||
role: 'user',
|
||||
parts: [{ text: 'Hello' }, { text: 'World' }],
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
expect(messages).toEqual([
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'Hello' },
|
||||
{ type: 'text', text: 'World' },
|
||||
],
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it('converts assistant thought parts into Anthropic thinking blocks', () => {
|
||||
const { messages } = converter.convertGeminiRequestToAnthropic({
|
||||
model: 'models/test',
|
||||
contents: [
|
||||
{
|
||||
role: 'model',
|
||||
parts: [
|
||||
{ text: 'internal', thought: true, thoughtSignature: 'sig' },
|
||||
{ text: 'visible' },
|
||||
],
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
expect(messages).toEqual([
|
||||
{
|
||||
role: 'assistant',
|
||||
content: [
|
||||
{ type: 'thinking', thinking: 'internal', signature: 'sig' },
|
||||
{ type: 'text', text: 'visible' },
|
||||
],
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it('converts functionCall parts from model role into tool_use blocks', () => {
|
||||
const { messages } = converter.convertGeminiRequestToAnthropic({
|
||||
model: 'models/test',
|
||||
contents: [
|
||||
{
|
||||
role: 'model',
|
||||
parts: [
|
||||
{ text: 'preface' },
|
||||
{
|
||||
functionCall: {
|
||||
id: 'call-1',
|
||||
name: 'tool_name',
|
||||
args: { a: 1 },
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
expect(messages).toEqual([
|
||||
{
|
||||
role: 'assistant',
|
||||
content: [
|
||||
{ type: 'text', text: 'preface' },
|
||||
{
|
||||
type: 'tool_use',
|
||||
id: 'call-1',
|
||||
name: 'tool_name',
|
||||
input: { a: 1 },
|
||||
},
|
||||
],
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it('converts functionResponse parts into user tool_result messages', () => {
|
||||
const { messages } = converter.convertGeminiRequestToAnthropic({
|
||||
model: 'models/test',
|
||||
contents: [
|
||||
{
|
||||
role: 'user',
|
||||
parts: [
|
||||
{
|
||||
functionResponse: {
|
||||
id: 'call-1',
|
||||
name: 'tool_name',
|
||||
response: { output: 'ok' },
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
expect(messages).toEqual([
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{
|
||||
type: 'tool_result',
|
||||
tool_use_id: 'call-1',
|
||||
content: 'ok',
|
||||
},
|
||||
],
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it('extracts function response error field when present', () => {
|
||||
const { messages } = converter.convertGeminiRequestToAnthropic({
|
||||
model: 'models/test',
|
||||
contents: [
|
||||
{
|
||||
role: 'user',
|
||||
parts: [
|
||||
{
|
||||
functionResponse: {
|
||||
id: 'call-1',
|
||||
name: 'tool_name',
|
||||
response: { error: 'boom' },
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
expect(messages[0]).toEqual({
|
||||
role: 'user',
|
||||
content: [
|
||||
{
|
||||
type: 'tool_result',
|
||||
tool_use_id: 'call-1',
|
||||
content: 'boom',
|
||||
},
|
||||
],
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('convertGeminiToolsToAnthropic', () => {
|
||||
it('converts Tool.functionDeclarations to Anthropic tools and runs schema conversion', async () => {
|
||||
const tools = [
|
||||
{
|
||||
functionDeclarations: [
|
||||
{
|
||||
name: 'get_weather',
|
||||
description: 'Get weather',
|
||||
parametersJsonSchema: {
|
||||
type: 'object',
|
||||
properties: { location: { type: 'string' } },
|
||||
required: ['location'],
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
] as Tool[];
|
||||
|
||||
const result = await converter.convertGeminiToolsToAnthropic(tools);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0]).toEqual({
|
||||
name: 'get_weather',
|
||||
description: 'Get weather',
|
||||
input_schema: {
|
||||
type: 'object',
|
||||
properties: { location: { type: 'string' } },
|
||||
required: ['location'],
|
||||
},
|
||||
});
|
||||
|
||||
expect(vi.mocked(convertSchema)).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('resolves CallableTool.tool() and converts its functionDeclarations', async () => {
|
||||
const callable = [
|
||||
{
|
||||
tool: async () =>
|
||||
({
|
||||
functionDeclarations: [
|
||||
{
|
||||
name: 'dynamic_tool',
|
||||
description: 'resolved tool',
|
||||
parametersJsonSchema: { type: 'object', properties: {} },
|
||||
},
|
||||
],
|
||||
}) as unknown as Tool,
|
||||
},
|
||||
] as CallableTool[];
|
||||
|
||||
const result = await converter.convertGeminiToolsToAnthropic(callable);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].name).toBe('dynamic_tool');
|
||||
});
|
||||
|
||||
it('defaults missing parameters to an empty object schema', async () => {
|
||||
const tools = [
|
||||
{
|
||||
functionDeclarations: [
|
||||
{ name: 'no_params', description: 'no params' },
|
||||
],
|
||||
},
|
||||
] as Tool[];
|
||||
|
||||
const result = await converter.convertGeminiToolsToAnthropic(tools);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0]).toEqual({
|
||||
name: 'no_params',
|
||||
description: 'no params',
|
||||
input_schema: { type: 'object', properties: {} },
|
||||
});
|
||||
});
|
||||
|
||||
it('forces input_schema.type to "object" when schema conversion yields no type', async () => {
|
||||
vi.mocked(convertSchema).mockImplementationOnce(() => ({
|
||||
properties: {},
|
||||
}));
|
||||
const tools = [
|
||||
{
|
||||
functionDeclarations: [
|
||||
{
|
||||
name: 'edge',
|
||||
description: 'edge',
|
||||
parametersJsonSchema: { type: 'object', properties: {} },
|
||||
},
|
||||
],
|
||||
},
|
||||
] as Tool[];
|
||||
|
||||
const result = await converter.convertGeminiToolsToAnthropic(tools);
|
||||
expect(result[0]?.input_schema?.type).toBe('object');
|
||||
});
|
||||
});
|
||||
|
||||
describe('convertAnthropicResponseToGemini', () => {
|
||||
it('converts text, tool_use, thinking, and redacted_thinking blocks', () => {
|
||||
const response = converter.convertAnthropicResponseToGemini({
|
||||
id: 'msg-1',
|
||||
model: 'claude-test',
|
||||
stop_reason: 'end_turn',
|
||||
content: [
|
||||
{ type: 'thinking', thinking: 'thought', signature: 'sig' },
|
||||
{ type: 'text', text: 'hello' },
|
||||
{ type: 'tool_use', id: 't1', name: 'tool', input: { x: 1 } },
|
||||
{ type: 'redacted_thinking' },
|
||||
],
|
||||
usage: { input_tokens: 3, output_tokens: 5 },
|
||||
} as unknown as Anthropic.Message);
|
||||
|
||||
expect(response.responseId).toBe('msg-1');
|
||||
expect(response.modelVersion).toBe('claude-test');
|
||||
expect(response.candidates?.[0]?.finishReason).toBe(FinishReason.STOP);
|
||||
expect(response.usageMetadata).toEqual({
|
||||
promptTokenCount: 3,
|
||||
candidatesTokenCount: 5,
|
||||
totalTokenCount: 8,
|
||||
});
|
||||
|
||||
const parts = response.candidates?.[0]?.content?.parts || [];
|
||||
expect(parts).toEqual([
|
||||
{ text: 'thought', thought: true, thoughtSignature: 'sig' },
|
||||
{ text: 'hello' },
|
||||
{ functionCall: { id: 't1', name: 'tool', args: { x: 1 } } },
|
||||
{ text: '', thought: true },
|
||||
]);
|
||||
});
|
||||
|
||||
it('handles tool_use input that is a JSON string', () => {
|
||||
const response = converter.convertAnthropicResponseToGemini({
|
||||
id: 'msg-1',
|
||||
model: 'claude-test',
|
||||
stop_reason: null,
|
||||
content: [
|
||||
{ type: 'tool_use', id: 't1', name: 'tool', input: '{"x":1}' },
|
||||
],
|
||||
} as unknown as Anthropic.Message);
|
||||
|
||||
const parts = response.candidates?.[0]?.content?.parts || [];
|
||||
expect(parts).toEqual([
|
||||
{ functionCall: { id: 't1', name: 'tool', args: { x: 1 } } },
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('mapAnthropicFinishReasonToGemini', () => {
|
||||
it('maps known reasons', () => {
|
||||
expect(converter.mapAnthropicFinishReasonToGemini('end_turn')).toBe(
|
||||
FinishReason.STOP,
|
||||
);
|
||||
expect(converter.mapAnthropicFinishReasonToGemini('max_tokens')).toBe(
|
||||
FinishReason.MAX_TOKENS,
|
||||
);
|
||||
expect(converter.mapAnthropicFinishReasonToGemini('content_filter')).toBe(
|
||||
FinishReason.SAFETY,
|
||||
);
|
||||
});
|
||||
|
||||
it('returns undefined for null/empty', () => {
|
||||
expect(converter.mapAnthropicFinishReasonToGemini(null)).toBeUndefined();
|
||||
expect(converter.mapAnthropicFinishReasonToGemini('')).toBeUndefined();
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,448 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type {
|
||||
Candidate,
|
||||
CallableTool,
|
||||
Content,
|
||||
ContentListUnion,
|
||||
ContentUnion,
|
||||
FunctionCall,
|
||||
FunctionResponse,
|
||||
GenerateContentParameters,
|
||||
Part,
|
||||
PartUnion,
|
||||
Tool,
|
||||
ToolListUnion,
|
||||
} from '@google/genai';
|
||||
import { FinishReason, GenerateContentResponse } from '@google/genai';
|
||||
import type Anthropic from '@anthropic-ai/sdk';
|
||||
import { safeJsonParse } from '../../utils/safeJsonParse.js';
|
||||
import {
|
||||
convertSchema,
|
||||
type SchemaComplianceMode,
|
||||
} from '../../utils/schemaConverter.js';
|
||||
|
||||
type AnthropicMessageParam = Anthropic.MessageParam;
|
||||
type AnthropicToolParam = Anthropic.Tool;
|
||||
type AnthropicContentBlockParam = Anthropic.ContentBlockParam;
|
||||
|
||||
type ThoughtPart = { text: string; signature?: string };
|
||||
|
||||
interface ParsedParts {
|
||||
thoughtParts: ThoughtPart[];
|
||||
contentParts: string[];
|
||||
functionCalls: FunctionCall[];
|
||||
functionResponses: FunctionResponse[];
|
||||
}
|
||||
|
||||
export class AnthropicContentConverter {
|
||||
private model: string;
|
||||
private schemaCompliance: SchemaComplianceMode;
|
||||
|
||||
constructor(model: string, schemaCompliance: SchemaComplianceMode = 'auto') {
|
||||
this.model = model;
|
||||
this.schemaCompliance = schemaCompliance;
|
||||
}
|
||||
|
||||
convertGeminiRequestToAnthropic(request: GenerateContentParameters): {
|
||||
system?: string;
|
||||
messages: AnthropicMessageParam[];
|
||||
} {
|
||||
const messages: AnthropicMessageParam[] = [];
|
||||
|
||||
const system = this.extractTextFromContentUnion(
|
||||
request.config?.systemInstruction,
|
||||
);
|
||||
|
||||
this.processContents(request.contents, messages);
|
||||
|
||||
return {
|
||||
system: system || undefined,
|
||||
messages,
|
||||
};
|
||||
}
|
||||
|
||||
async convertGeminiToolsToAnthropic(
|
||||
geminiTools: ToolListUnion,
|
||||
): Promise<AnthropicToolParam[]> {
|
||||
const tools: AnthropicToolParam[] = [];
|
||||
|
||||
for (const tool of geminiTools) {
|
||||
let actualTool: Tool;
|
||||
|
||||
if ('tool' in tool) {
|
||||
actualTool = await (tool as CallableTool).tool();
|
||||
} else {
|
||||
actualTool = tool as Tool;
|
||||
}
|
||||
|
||||
if (!actualTool.functionDeclarations) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (const func of actualTool.functionDeclarations) {
|
||||
if (!func.name) continue;
|
||||
|
||||
let inputSchema: Record<string, unknown> | undefined;
|
||||
if (func.parametersJsonSchema) {
|
||||
inputSchema = {
|
||||
...(func.parametersJsonSchema as Record<string, unknown>),
|
||||
};
|
||||
} else if (func.parameters) {
|
||||
inputSchema = func.parameters as Record<string, unknown>;
|
||||
}
|
||||
|
||||
if (!inputSchema) {
|
||||
inputSchema = { type: 'object', properties: {} };
|
||||
}
|
||||
|
||||
inputSchema = convertSchema(inputSchema, this.schemaCompliance);
|
||||
if (typeof inputSchema['type'] !== 'string') {
|
||||
inputSchema['type'] = 'object';
|
||||
}
|
||||
|
||||
tools.push({
|
||||
name: func.name,
|
||||
description: func.description,
|
||||
input_schema: inputSchema as Anthropic.Tool.InputSchema,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return tools;
|
||||
}
|
||||
|
||||
convertAnthropicResponseToGemini(
|
||||
response: Anthropic.Message,
|
||||
): GenerateContentResponse {
|
||||
const geminiResponse = new GenerateContentResponse();
|
||||
const parts: Part[] = [];
|
||||
|
||||
for (const block of response.content || []) {
|
||||
const blockType = String((block as { type?: string })['type'] || '');
|
||||
if (blockType === 'text') {
|
||||
const text =
|
||||
typeof (block as { text?: string }).text === 'string'
|
||||
? (block as { text?: string }).text
|
||||
: '';
|
||||
if (text) {
|
||||
parts.push({ text });
|
||||
}
|
||||
} else if (blockType === 'tool_use') {
|
||||
const toolUse = block as {
|
||||
id?: string;
|
||||
name?: string;
|
||||
input?: unknown;
|
||||
};
|
||||
parts.push({
|
||||
functionCall: {
|
||||
id: typeof toolUse.id === 'string' ? toolUse.id : undefined,
|
||||
name: typeof toolUse.name === 'string' ? toolUse.name : undefined,
|
||||
args: this.safeInputToArgs(toolUse.input),
|
||||
},
|
||||
});
|
||||
} else if (blockType === 'thinking') {
|
||||
const thinking =
|
||||
typeof (block as { thinking?: string }).thinking === 'string'
|
||||
? (block as { thinking?: string }).thinking
|
||||
: '';
|
||||
const signature =
|
||||
typeof (block as { signature?: string }).signature === 'string'
|
||||
? (block as { signature?: string }).signature
|
||||
: '';
|
||||
if (thinking || signature) {
|
||||
const thoughtPart: Part = {
|
||||
text: thinking,
|
||||
thought: true,
|
||||
thoughtSignature: signature,
|
||||
};
|
||||
parts.push(thoughtPart);
|
||||
}
|
||||
} else if (blockType === 'redacted_thinking') {
|
||||
parts.push({ text: '', thought: true });
|
||||
}
|
||||
}
|
||||
|
||||
const candidate: Candidate = {
|
||||
content: {
|
||||
parts,
|
||||
role: 'model' as const,
|
||||
},
|
||||
index: 0,
|
||||
safetyRatings: [],
|
||||
};
|
||||
|
||||
const finishReason = this.mapAnthropicFinishReasonToGemini(
|
||||
response.stop_reason,
|
||||
);
|
||||
if (finishReason) {
|
||||
candidate.finishReason = finishReason;
|
||||
}
|
||||
|
||||
geminiResponse.candidates = [candidate];
|
||||
geminiResponse.responseId = response.id;
|
||||
geminiResponse.createTime = Date.now().toString();
|
||||
geminiResponse.modelVersion = response.model || this.model;
|
||||
geminiResponse.promptFeedback = { safetyRatings: [] };
|
||||
|
||||
if (response.usage) {
|
||||
const promptTokens = response.usage.input_tokens || 0;
|
||||
const completionTokens = response.usage.output_tokens || 0;
|
||||
geminiResponse.usageMetadata = {
|
||||
promptTokenCount: promptTokens,
|
||||
candidatesTokenCount: completionTokens,
|
||||
totalTokenCount: promptTokens + completionTokens,
|
||||
};
|
||||
}
|
||||
|
||||
return geminiResponse;
|
||||
}
|
||||
|
||||
private processContents(
|
||||
contents: ContentListUnion,
|
||||
messages: AnthropicMessageParam[],
|
||||
): void {
|
||||
if (Array.isArray(contents)) {
|
||||
for (const content of contents) {
|
||||
this.processContent(content, messages);
|
||||
}
|
||||
} else if (contents) {
|
||||
this.processContent(contents, messages);
|
||||
}
|
||||
}
|
||||
|
||||
private processContent(
|
||||
content: ContentUnion | PartUnion,
|
||||
messages: AnthropicMessageParam[],
|
||||
): void {
|
||||
if (typeof content === 'string') {
|
||||
messages.push({
|
||||
role: 'user',
|
||||
content: [{ type: 'text', text: content }],
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
if (!this.isContentObject(content)) return;
|
||||
|
||||
const parsed = this.parseParts(content.parts || []);
|
||||
|
||||
if (parsed.functionResponses.length > 0) {
|
||||
for (const response of parsed.functionResponses) {
|
||||
messages.push({
|
||||
role: 'user',
|
||||
content: [
|
||||
{
|
||||
type: 'tool_result',
|
||||
tool_use_id: response.id || '',
|
||||
content: this.extractFunctionResponseContent(response.response),
|
||||
},
|
||||
],
|
||||
});
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (content.role === 'model' && parsed.functionCalls.length > 0) {
|
||||
const thinkingBlocks: AnthropicContentBlockParam[] =
|
||||
parsed.thoughtParts.map((part) => {
|
||||
const thinkingBlock: unknown = {
|
||||
type: 'thinking',
|
||||
thinking: part.text,
|
||||
};
|
||||
if (part.signature) {
|
||||
(thinkingBlock as { signature?: string }).signature =
|
||||
part.signature;
|
||||
}
|
||||
return thinkingBlock as AnthropicContentBlockParam;
|
||||
});
|
||||
const toolUses: AnthropicContentBlockParam[] = parsed.functionCalls.map(
|
||||
(call, index) => ({
|
||||
type: 'tool_use',
|
||||
id: call.id || `tool_${index}`,
|
||||
name: call.name || '',
|
||||
input: (call.args as Record<string, unknown>) || {},
|
||||
}),
|
||||
);
|
||||
|
||||
const textBlocks: AnthropicContentBlockParam[] = parsed.contentParts.map(
|
||||
(text) => ({
|
||||
type: 'text' as const,
|
||||
text,
|
||||
}),
|
||||
);
|
||||
|
||||
messages.push({
|
||||
role: 'assistant',
|
||||
content: [...thinkingBlocks, ...textBlocks, ...toolUses],
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const role = content.role === 'model' ? 'assistant' : 'user';
|
||||
const thinkingBlocks: AnthropicContentBlockParam[] =
|
||||
role === 'assistant'
|
||||
? parsed.thoughtParts.map((part) => {
|
||||
const thinkingBlock: unknown = {
|
||||
type: 'thinking',
|
||||
thinking: part.text,
|
||||
};
|
||||
if (part.signature) {
|
||||
(thinkingBlock as { signature?: string }).signature =
|
||||
part.signature;
|
||||
}
|
||||
return thinkingBlock as AnthropicContentBlockParam;
|
||||
})
|
||||
: [];
|
||||
const textBlocks: AnthropicContentBlockParam[] = [
|
||||
...thinkingBlocks,
|
||||
...parsed.contentParts.map((text) => ({
|
||||
type: 'text' as const,
|
||||
text,
|
||||
})),
|
||||
];
|
||||
if (textBlocks.length > 0) {
|
||||
messages.push({ role, content: textBlocks });
|
||||
}
|
||||
}
|
||||
|
||||
private parseParts(parts: Part[]): ParsedParts {
|
||||
const thoughtParts: ThoughtPart[] = [];
|
||||
const contentParts: string[] = [];
|
||||
const functionCalls: FunctionCall[] = [];
|
||||
const functionResponses: FunctionResponse[] = [];
|
||||
|
||||
for (const part of parts) {
|
||||
if (typeof part === 'string') {
|
||||
contentParts.push(part);
|
||||
} else if (
|
||||
'text' in part &&
|
||||
part.text &&
|
||||
!('thought' in part && part.thought)
|
||||
) {
|
||||
contentParts.push(part.text);
|
||||
} else if ('text' in part && 'thought' in part && part.thought) {
|
||||
thoughtParts.push({
|
||||
text: part.text || '',
|
||||
signature:
|
||||
'thoughtSignature' in part &&
|
||||
typeof part.thoughtSignature === 'string'
|
||||
? part.thoughtSignature
|
||||
: undefined,
|
||||
});
|
||||
} else if ('functionCall' in part && part.functionCall) {
|
||||
functionCalls.push(part.functionCall);
|
||||
} else if ('functionResponse' in part && part.functionResponse) {
|
||||
functionResponses.push(part.functionResponse);
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
thoughtParts,
|
||||
contentParts,
|
||||
functionCalls,
|
||||
functionResponses,
|
||||
};
|
||||
}
|
||||
|
||||
private extractTextFromContentUnion(contentUnion: unknown): string {
|
||||
if (typeof contentUnion === 'string') {
|
||||
return contentUnion;
|
||||
}
|
||||
|
||||
if (Array.isArray(contentUnion)) {
|
||||
return contentUnion
|
||||
.map((item) => this.extractTextFromContentUnion(item))
|
||||
.filter(Boolean)
|
||||
.join('\n');
|
||||
}
|
||||
|
||||
if (typeof contentUnion === 'object' && contentUnion !== null) {
|
||||
if ('parts' in contentUnion) {
|
||||
const content = contentUnion as Content;
|
||||
return (
|
||||
content.parts
|
||||
?.map((part: Part) => {
|
||||
if (typeof part === 'string') return part;
|
||||
if ('text' in part) return part.text || '';
|
||||
return '';
|
||||
})
|
||||
.filter(Boolean)
|
||||
.join('\n') || ''
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
return '';
|
||||
}
|
||||
|
||||
private extractFunctionResponseContent(response: unknown): string {
|
||||
if (response === null || response === undefined) {
|
||||
return '';
|
||||
}
|
||||
|
||||
if (typeof response === 'string') {
|
||||
return response;
|
||||
}
|
||||
|
||||
if (typeof response === 'object') {
|
||||
const responseObject = response as Record<string, unknown>;
|
||||
const output = responseObject['output'];
|
||||
if (typeof output === 'string') {
|
||||
return output;
|
||||
}
|
||||
|
||||
const error = responseObject['error'];
|
||||
if (typeof error === 'string') {
|
||||
return error;
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const serialized = JSON.stringify(response);
|
||||
return serialized ?? String(response);
|
||||
} catch {
|
||||
return String(response);
|
||||
}
|
||||
}
|
||||
|
||||
private safeInputToArgs(input: unknown): Record<string, unknown> {
|
||||
if (input && typeof input === 'object') {
|
||||
return input as Record<string, unknown>;
|
||||
}
|
||||
if (typeof input === 'string') {
|
||||
return safeJsonParse(input, {});
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
mapAnthropicFinishReasonToGemini(
|
||||
reason?: string | null,
|
||||
): FinishReason | undefined {
|
||||
if (!reason) return undefined;
|
||||
const mapping: Record<string, FinishReason> = {
|
||||
end_turn: FinishReason.STOP,
|
||||
stop_sequence: FinishReason.STOP,
|
||||
tool_use: FinishReason.STOP,
|
||||
max_tokens: FinishReason.MAX_TOKENS,
|
||||
content_filter: FinishReason.SAFETY,
|
||||
};
|
||||
return mapping[reason] || FinishReason.FINISH_REASON_UNSPECIFIED;
|
||||
}
|
||||
|
||||
private isContentObject(
|
||||
content: unknown,
|
||||
): content is { role: string; parts: Part[] } {
|
||||
return (
|
||||
typeof content === 'object' &&
|
||||
content !== null &&
|
||||
'role' in content &&
|
||||
'parts' in content &&
|
||||
Array.isArray((content as Record<string, unknown>)['parts'])
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type {
|
||||
ContentGenerator,
|
||||
ContentGeneratorConfig,
|
||||
} from '../contentGenerator.js';
|
||||
import type { Config } from '../../config/config.js';
|
||||
import { AnthropicContentGenerator } from './anthropicContentGenerator.js';
|
||||
|
||||
export { AnthropicContentGenerator } from './anthropicContentGenerator.js';
|
||||
|
||||
export function createAnthropicContentGenerator(
|
||||
contentGeneratorConfig: ContentGeneratorConfig,
|
||||
cliConfig: Config,
|
||||
): ContentGenerator {
|
||||
return new AnthropicContentGenerator(contentGeneratorConfig, cliConfig);
|
||||
}
|
||||
@@ -8,7 +8,7 @@ import { describe, it, expect, vi } from 'vitest';
|
||||
import { createContentGenerator, AuthType } from './contentGenerator.js';
|
||||
import { GoogleGenAI } from '@google/genai';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { LoggingContentGenerator } from './loggingContentGenerator/index.js';
|
||||
import { LoggingContentGenerator } from './geminiContentGenerator/loggingContentGenerator.js';
|
||||
|
||||
vi.mock('@google/genai');
|
||||
|
||||
|
||||
@@ -14,7 +14,6 @@ import type {
|
||||
} from '@google/genai';
|
||||
import { DEFAULT_QWEN_MODEL } from '../config/models.js';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { LoggingContentGenerator } from './loggingContentGenerator/index.js';
|
||||
|
||||
/**
|
||||
* Interface abstracting the core functionalities for generating content and counting tokens.
|
||||
@@ -38,11 +37,10 @@ export interface ContentGenerator {
|
||||
}
|
||||
|
||||
export enum AuthType {
|
||||
USE_GEMINI = 'gemini-api-key',
|
||||
USE_VERTEX_AI = 'vertex-ai',
|
||||
USE_OPENAI = 'openai',
|
||||
QWEN_OAUTH = 'qwen-oauth',
|
||||
USE_GEMINI = 'gemini',
|
||||
USE_VERTEX_AI = 'vertex-ai',
|
||||
USE_ANTHROPIC = 'anthropic',
|
||||
}
|
||||
|
||||
export type ContentGeneratorConfig = {
|
||||
@@ -65,12 +63,9 @@ export type ContentGeneratorConfig = {
|
||||
temperature?: number;
|
||||
max_tokens?: number;
|
||||
};
|
||||
reasoning?:
|
||||
| false
|
||||
| {
|
||||
effort?: 'low' | 'medium' | 'high';
|
||||
budget_tokens?: number;
|
||||
};
|
||||
reasoning?: {
|
||||
effort?: 'low' | 'medium' | 'high';
|
||||
};
|
||||
proxy?: string | undefined;
|
||||
userAgent?: string;
|
||||
// Schema compliance mode for tool definitions
|
||||
@@ -82,7 +77,7 @@ export function createContentGeneratorConfig(
|
||||
authType: AuthType | undefined,
|
||||
generationConfig?: Partial<ContentGeneratorConfig>,
|
||||
): ContentGeneratorConfig {
|
||||
let newContentGeneratorConfig: Partial<ContentGeneratorConfig> = {
|
||||
const newContentGeneratorConfig: Partial<ContentGeneratorConfig> = {
|
||||
...(generationConfig || {}),
|
||||
authType,
|
||||
proxy: config?.getProxy(),
|
||||
@@ -99,16 +94,8 @@ export function createContentGeneratorConfig(
|
||||
}
|
||||
|
||||
if (authType === AuthType.USE_OPENAI) {
|
||||
newContentGeneratorConfig = {
|
||||
...newContentGeneratorConfig,
|
||||
apiKey: newContentGeneratorConfig.apiKey || process.env['OPENAI_API_KEY'],
|
||||
baseUrl:
|
||||
newContentGeneratorConfig.baseUrl || process.env['OPENAI_BASE_URL'],
|
||||
model: newContentGeneratorConfig.model || process.env['OPENAI_MODEL'],
|
||||
};
|
||||
|
||||
if (!newContentGeneratorConfig.apiKey) {
|
||||
throw new Error('OPENAI_API_KEY environment variable not found.');
|
||||
throw new Error('OpenAI API key is required');
|
||||
}
|
||||
|
||||
return {
|
||||
@@ -117,62 +104,10 @@ export function createContentGeneratorConfig(
|
||||
} as ContentGeneratorConfig;
|
||||
}
|
||||
|
||||
if (authType === AuthType.USE_ANTHROPIC) {
|
||||
newContentGeneratorConfig = {
|
||||
...newContentGeneratorConfig,
|
||||
apiKey:
|
||||
newContentGeneratorConfig.apiKey || process.env['ANTHROPIC_API_KEY'],
|
||||
baseUrl:
|
||||
newContentGeneratorConfig.baseUrl || process.env['ANTHROPIC_BASE_URL'],
|
||||
model: newContentGeneratorConfig.model || process.env['ANTHROPIC_MODEL'],
|
||||
};
|
||||
|
||||
if (!newContentGeneratorConfig.apiKey) {
|
||||
throw new Error('ANTHROPIC_API_KEY environment variable not found.');
|
||||
}
|
||||
|
||||
if (!newContentGeneratorConfig.baseUrl) {
|
||||
throw new Error('ANTHROPIC_BASE_URL environment variable not found.');
|
||||
}
|
||||
|
||||
if (!newContentGeneratorConfig.model) {
|
||||
throw new Error('ANTHROPIC_MODEL environment variable not found.');
|
||||
}
|
||||
}
|
||||
|
||||
if (authType === AuthType.USE_GEMINI) {
|
||||
newContentGeneratorConfig = {
|
||||
...newContentGeneratorConfig,
|
||||
apiKey: newContentGeneratorConfig.apiKey || process.env['GEMINI_API_KEY'],
|
||||
model: newContentGeneratorConfig.model || process.env['GEMINI_MODEL'],
|
||||
};
|
||||
|
||||
if (!newContentGeneratorConfig.apiKey) {
|
||||
throw new Error('GEMINI_API_KEY environment variable not found.');
|
||||
}
|
||||
|
||||
if (!newContentGeneratorConfig.model) {
|
||||
throw new Error('GEMINI_MODEL environment variable not found.');
|
||||
}
|
||||
}
|
||||
|
||||
if (authType === AuthType.USE_VERTEX_AI) {
|
||||
newContentGeneratorConfig = {
|
||||
...newContentGeneratorConfig,
|
||||
apiKey: newContentGeneratorConfig.apiKey || process.env['GOOGLE_API_KEY'],
|
||||
model: newContentGeneratorConfig.model || process.env['GOOGLE_MODEL'],
|
||||
};
|
||||
|
||||
if (!newContentGeneratorConfig.apiKey) {
|
||||
throw new Error('GOOGLE_API_KEY environment variable not found.');
|
||||
}
|
||||
|
||||
if (!newContentGeneratorConfig.model) {
|
||||
throw new Error('GOOGLE_MODEL environment variable not found.');
|
||||
}
|
||||
}
|
||||
|
||||
return newContentGeneratorConfig as ContentGeneratorConfig;
|
||||
return {
|
||||
...newContentGeneratorConfig,
|
||||
model: newContentGeneratorConfig?.model || DEFAULT_QWEN_MODEL,
|
||||
} as ContentGeneratorConfig;
|
||||
}
|
||||
|
||||
export async function createContentGenerator(
|
||||
@@ -180,9 +115,19 @@ export async function createContentGenerator(
|
||||
gcConfig: Config,
|
||||
isInitialAuth?: boolean,
|
||||
): Promise<ContentGenerator> {
|
||||
if (
|
||||
config.authType === AuthType.USE_GEMINI ||
|
||||
config.authType === AuthType.USE_VERTEX_AI
|
||||
) {
|
||||
const { createGeminiContentGenerator } = await import(
|
||||
'./geminiContentGenerator/index.js'
|
||||
);
|
||||
return createGeminiContentGenerator(config, gcConfig);
|
||||
}
|
||||
|
||||
if (config.authType === AuthType.USE_OPENAI) {
|
||||
if (!config.apiKey) {
|
||||
throw new Error('OPENAI_API_KEY environment variable not found.');
|
||||
throw new Error('OpenAI API key is required');
|
||||
}
|
||||
|
||||
// Import OpenAIContentGenerator dynamically to avoid circular dependencies
|
||||
@@ -191,8 +136,7 @@ export async function createContentGenerator(
|
||||
);
|
||||
|
||||
// Always use OpenAIContentGenerator, logging is controlled by enableOpenAILogging flag
|
||||
const generator = createOpenAIContentGenerator(config, gcConfig);
|
||||
return new LoggingContentGenerator(generator, gcConfig);
|
||||
return createOpenAIContentGenerator(config, gcConfig);
|
||||
}
|
||||
|
||||
if (config.authType === AuthType.QWEN_OAUTH) {
|
||||
@@ -213,8 +157,7 @@ export async function createContentGenerator(
|
||||
);
|
||||
|
||||
// Create the content generator with dynamic token management
|
||||
const generator = new QwenContentGenerator(qwenClient, config, gcConfig);
|
||||
return new LoggingContentGenerator(generator, gcConfig);
|
||||
return new QwenContentGenerator(qwenClient, config, gcConfig);
|
||||
} catch (error) {
|
||||
throw new Error(
|
||||
`${error instanceof Error ? error.message : String(error)}`,
|
||||
@@ -222,30 +165,6 @@ export async function createContentGenerator(
|
||||
}
|
||||
}
|
||||
|
||||
if (config.authType === AuthType.USE_ANTHROPIC) {
|
||||
if (!config.apiKey) {
|
||||
throw new Error('ANTHROPIC_API_KEY environment variable not found.');
|
||||
}
|
||||
|
||||
const { createAnthropicContentGenerator } = await import(
|
||||
'./anthropicContentGenerator/index.js'
|
||||
);
|
||||
|
||||
const generator = createAnthropicContentGenerator(config, gcConfig);
|
||||
return new LoggingContentGenerator(generator, gcConfig);
|
||||
}
|
||||
|
||||
if (
|
||||
config.authType === AuthType.USE_GEMINI ||
|
||||
config.authType === AuthType.USE_VERTEX_AI
|
||||
) {
|
||||
const { createGeminiContentGenerator } = await import(
|
||||
'./geminiContentGenerator/index.js'
|
||||
);
|
||||
const generator = createGeminiContentGenerator(config, gcConfig);
|
||||
return new LoggingContentGenerator(generator, gcConfig);
|
||||
}
|
||||
|
||||
throw new Error(
|
||||
`Error creating contentGenerator: Unsupported authType: ${config.authType}`,
|
||||
);
|
||||
|
||||
@@ -720,6 +720,66 @@ describe('GeminiChat', () => {
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle summarized thinking by conditionally including thoughts in history', async () => {
|
||||
// Case 1: useSummarizedThinking is true -> thoughts NOT in history
|
||||
vi.mocked(mockContentGenerator.useSummarizedThinking).mockReturnValue(
|
||||
true,
|
||||
);
|
||||
const stream1 = (async function* () {
|
||||
yield {
|
||||
candidates: [
|
||||
{
|
||||
content: {
|
||||
role: 'model',
|
||||
parts: [{ thought: true, text: 'T1' }, { text: 'A1' }],
|
||||
},
|
||||
finishReason: 'STOP',
|
||||
},
|
||||
],
|
||||
} as unknown as GenerateContentResponse;
|
||||
})();
|
||||
vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue(
|
||||
stream1,
|
||||
);
|
||||
|
||||
const res1 = await chat.sendMessageStream('m1', { message: 'h1' }, 'p1');
|
||||
for await (const _ of res1);
|
||||
|
||||
const history1 = chat.getHistory();
|
||||
expect(history1[1].parts).toEqual([{ text: 'A1' }]);
|
||||
|
||||
// Case 2: useSummarizedThinking is false -> thoughts ARE in history
|
||||
chat.clearHistory();
|
||||
vi.mocked(mockContentGenerator.useSummarizedThinking).mockReturnValue(
|
||||
false,
|
||||
);
|
||||
const stream2 = (async function* () {
|
||||
yield {
|
||||
candidates: [
|
||||
{
|
||||
content: {
|
||||
role: 'model',
|
||||
parts: [{ thought: true, text: 'T2' }, { text: 'A2' }],
|
||||
},
|
||||
finishReason: 'STOP',
|
||||
},
|
||||
],
|
||||
} as unknown as GenerateContentResponse;
|
||||
})();
|
||||
vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue(
|
||||
stream2,
|
||||
);
|
||||
|
||||
const res2 = await chat.sendMessageStream('m1', { message: 'h1' }, 'p2');
|
||||
for await (const _ of res2);
|
||||
|
||||
const history2 = chat.getHistory();
|
||||
expect(history2[1].parts).toEqual([
|
||||
{ text: 'T2', thought: true },
|
||||
{ text: 'A2' },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should keep parts with thoughtSignature when consolidating history', async () => {
|
||||
const stream = (async function* () {
|
||||
yield {
|
||||
|
||||
@@ -559,25 +559,14 @@ export class GeminiChat {
|
||||
yield chunk; // Yield every chunk to the UI immediately.
|
||||
}
|
||||
|
||||
let thoughtContentPart: Part | undefined;
|
||||
const thoughtText = allModelParts
|
||||
.filter((part) => part.thought)
|
||||
.map((part) => part.text)
|
||||
.join('')
|
||||
.trim();
|
||||
|
||||
if (thoughtText !== '') {
|
||||
thoughtContentPart = {
|
||||
text: thoughtText,
|
||||
thought: true,
|
||||
};
|
||||
|
||||
const thoughtSignature = allModelParts.filter(
|
||||
(part) => part.thoughtSignature && part.thought,
|
||||
)?.[0]?.thoughtSignature;
|
||||
if (thoughtContentPart && thoughtSignature) {
|
||||
thoughtContentPart.thoughtSignature = thoughtSignature;
|
||||
}
|
||||
let thoughtText = '';
|
||||
// Only include thoughts if not using summarized thinking.
|
||||
if (!this.config.getContentGenerator().useSummarizedThinking()) {
|
||||
thoughtText = allModelParts
|
||||
.filter((part) => part.thought)
|
||||
.map((part) => part.text)
|
||||
.join('')
|
||||
.trim();
|
||||
}
|
||||
|
||||
const contentParts = allModelParts.filter((part) => !part.thought);
|
||||
@@ -603,11 +592,11 @@ export class GeminiChat {
|
||||
.trim();
|
||||
|
||||
// Record assistant turn with raw Content and metadata
|
||||
if (thoughtContentPart || contentText || hasToolCall || usageMetadata) {
|
||||
if (thoughtText || contentText || hasToolCall || usageMetadata) {
|
||||
this.chatRecordingService?.recordAssistantTurn({
|
||||
model,
|
||||
message: [
|
||||
...(thoughtContentPart ? [thoughtContentPart] : []),
|
||||
...(thoughtText ? [{ text: thoughtText, thought: true }] : []),
|
||||
...(contentText ? [{ text: contentText }] : []),
|
||||
...(hasToolCall
|
||||
? contentParts
|
||||
@@ -643,7 +632,7 @@ export class GeminiChat {
|
||||
this.history.push({
|
||||
role: 'model',
|
||||
parts: [
|
||||
...(thoughtContentPart ? [thoughtContentPart] : []),
|
||||
...(thoughtText ? [{ text: thoughtText, thought: true }] : []),
|
||||
...consolidatedHistoryParts,
|
||||
],
|
||||
});
|
||||
|
||||
@@ -39,7 +39,7 @@ export class GeminiContentGenerator implements ContentGenerator {
|
||||
this.contentGeneratorConfig = contentGeneratorConfig;
|
||||
}
|
||||
|
||||
private buildGenerateContentConfig(
|
||||
private buildSamplingParameters(
|
||||
request: GenerateContentParameters,
|
||||
): GenerateContentConfig {
|
||||
const configSamplingParams = this.contentGeneratorConfig?.samplingParams;
|
||||
@@ -84,7 +84,17 @@ export class GeminiContentGenerator implements ContentGenerator {
|
||||
'frequencyPenalty',
|
||||
),
|
||||
thinkingConfig: getParameterValue(
|
||||
this.buildThinkingConfig(),
|
||||
this.contentGeneratorConfig?.reasoning
|
||||
? {
|
||||
includeThoughts: true,
|
||||
thinkingLevel: (this.contentGeneratorConfig.reasoning.effort ===
|
||||
'low'
|
||||
? 'LOW'
|
||||
: this.contentGeneratorConfig.reasoning.effort === 'high'
|
||||
? 'HIGH'
|
||||
: 'THINKING_LEVEL_UNSPECIFIED') as ThinkingLevel,
|
||||
}
|
||||
: undefined,
|
||||
'thinkingConfig',
|
||||
{
|
||||
includeThoughts: true,
|
||||
@@ -94,40 +104,13 @@ export class GeminiContentGenerator implements ContentGenerator {
|
||||
};
|
||||
}
|
||||
|
||||
private buildThinkingConfig():
|
||||
| { includeThoughts: boolean; thinkingLevel?: ThinkingLevel }
|
||||
| undefined {
|
||||
const reasoning = this.contentGeneratorConfig?.reasoning;
|
||||
|
||||
if (reasoning === false) {
|
||||
return { includeThoughts: false };
|
||||
}
|
||||
|
||||
if (reasoning) {
|
||||
const thinkingLevel = (
|
||||
reasoning.effort === 'low'
|
||||
? 'LOW'
|
||||
: reasoning.effort === 'high'
|
||||
? 'HIGH'
|
||||
: 'THINKING_LEVEL_UNSPECIFIED'
|
||||
) as ThinkingLevel;
|
||||
|
||||
return {
|
||||
includeThoughts: true,
|
||||
thinkingLevel,
|
||||
};
|
||||
}
|
||||
|
||||
return undefined;
|
||||
}
|
||||
|
||||
async generateContent(
|
||||
request: GenerateContentParameters,
|
||||
_userPromptId: string,
|
||||
): Promise<GenerateContentResponse> {
|
||||
const finalRequest = {
|
||||
...request,
|
||||
config: this.buildGenerateContentConfig(request),
|
||||
config: this.buildSamplingParameters(request),
|
||||
};
|
||||
return this.googleGenAI.models.generateContent(finalRequest);
|
||||
}
|
||||
@@ -138,7 +121,7 @@ export class GeminiContentGenerator implements ContentGenerator {
|
||||
): Promise<AsyncGenerator<GenerateContentResponse>> {
|
||||
const finalRequest = {
|
||||
...request,
|
||||
config: this.buildGenerateContentConfig(request),
|
||||
config: this.buildSamplingParameters(request),
|
||||
};
|
||||
return this.googleGenAI.models.generateContentStream(finalRequest);
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { createGeminiContentGenerator } from './index.js';
|
||||
import { GeminiContentGenerator } from './geminiContentGenerator.js';
|
||||
import { LoggingContentGenerator } from './loggingContentGenerator.js';
|
||||
import type { Config } from '../../config/config.js';
|
||||
import { AuthType } from '../contentGenerator.js';
|
||||
|
||||
@@ -14,6 +15,10 @@ vi.mock('./geminiContentGenerator.js', () => ({
|
||||
GeminiContentGenerator: vi.fn().mockImplementation(() => ({})),
|
||||
}));
|
||||
|
||||
vi.mock('./loggingContentGenerator.js', () => ({
|
||||
LoggingContentGenerator: vi.fn().mockImplementation((wrapped) => wrapped),
|
||||
}));
|
||||
|
||||
describe('createGeminiContentGenerator', () => {
|
||||
let mockConfig: Config;
|
||||
|
||||
@@ -26,7 +31,7 @@ describe('createGeminiContentGenerator', () => {
|
||||
} as unknown as Config;
|
||||
});
|
||||
|
||||
it('should create a GeminiContentGenerator', () => {
|
||||
it('should create a GeminiContentGenerator wrapped in LoggingContentGenerator', () => {
|
||||
const config = {
|
||||
model: 'gemini-1.5-flash',
|
||||
apiKey: 'test-key',
|
||||
@@ -36,6 +41,7 @@ describe('createGeminiContentGenerator', () => {
|
||||
const generator = createGeminiContentGenerator(config, mockConfig);
|
||||
|
||||
expect(GeminiContentGenerator).toHaveBeenCalled();
|
||||
expect(LoggingContentGenerator).toHaveBeenCalled();
|
||||
expect(generator).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -11,8 +11,10 @@ import type {
|
||||
} from '../contentGenerator.js';
|
||||
import type { Config } from '../../config/config.js';
|
||||
import { InstallationManager } from '../../utils/installationManager.js';
|
||||
import { LoggingContentGenerator } from './loggingContentGenerator.js';
|
||||
|
||||
export { GeminiContentGenerator } from './geminiContentGenerator.js';
|
||||
export { LoggingContentGenerator } from './loggingContentGenerator.js';
|
||||
|
||||
/**
|
||||
* Create a Gemini content generator.
|
||||
@@ -49,5 +51,5 @@ export function createGeminiContentGenerator(
|
||||
config,
|
||||
);
|
||||
|
||||
return geminiContentGenerator;
|
||||
return new LoggingContentGenerator(geminiContentGenerator, gcConfig);
|
||||
}
|
||||
|
||||
@@ -4,22 +4,20 @@
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import {
|
||||
import type {
|
||||
Content,
|
||||
CountTokensParameters,
|
||||
CountTokensResponse,
|
||||
EmbedContentParameters,
|
||||
EmbedContentResponse,
|
||||
GenerateContentParameters,
|
||||
GenerateContentResponseUsageMetadata,
|
||||
GenerateContentResponse,
|
||||
type Content,
|
||||
type CountTokensParameters,
|
||||
type CountTokensResponse,
|
||||
type EmbedContentParameters,
|
||||
type EmbedContentResponse,
|
||||
type GenerateContentParameters,
|
||||
type GenerateContentResponseUsageMetadata,
|
||||
type ContentListUnion,
|
||||
type ContentUnion,
|
||||
type Part,
|
||||
type PartUnion,
|
||||
type FinishReason,
|
||||
ContentListUnion,
|
||||
ContentUnion,
|
||||
Part,
|
||||
PartUnion,
|
||||
} from '@google/genai';
|
||||
import type OpenAI from 'openai';
|
||||
import {
|
||||
ApiRequestEvent,
|
||||
ApiResponseEvent,
|
||||
@@ -33,8 +31,6 @@ import {
|
||||
} from '../../telemetry/loggers.js';
|
||||
import type { ContentGenerator } from '../contentGenerator.js';
|
||||
import { isStructuredError } from '../../utils/quotaErrorDetection.js';
|
||||
import { OpenAIContentConverter } from '../openaiContentGenerator/converter.js';
|
||||
import { OpenAILogger } from '../../utils/openaiLogger.js';
|
||||
|
||||
interface StructuredError {
|
||||
status: number;
|
||||
@@ -44,19 +40,10 @@ interface StructuredError {
|
||||
* A decorator that wraps a ContentGenerator to add logging to API calls.
|
||||
*/
|
||||
export class LoggingContentGenerator implements ContentGenerator {
|
||||
private openaiLogger?: OpenAILogger;
|
||||
private schemaCompliance?: 'auto' | 'openapi_30';
|
||||
|
||||
constructor(
|
||||
private readonly wrapped: ContentGenerator,
|
||||
private readonly config: Config,
|
||||
) {
|
||||
const generatorConfig = this.config.getContentGeneratorConfig();
|
||||
if (generatorConfig?.enableOpenAILogging) {
|
||||
this.openaiLogger = new OpenAILogger(generatorConfig.openAILoggingDir);
|
||||
this.schemaCompliance = generatorConfig.schemaCompliance;
|
||||
}
|
||||
}
|
||||
) {}
|
||||
|
||||
getWrapped(): ContentGenerator {
|
||||
return this.wrapped;
|
||||
@@ -104,31 +91,21 @@ export class LoggingContentGenerator implements ContentGenerator {
|
||||
prompt_id: string,
|
||||
): void {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
const errorType =
|
||||
(error as { type?: string })?.type ||
|
||||
(error instanceof Error ? error.name : 'unknown');
|
||||
const errorResponseId =
|
||||
(error as { requestID?: string; request_id?: string })?.requestID ||
|
||||
(error as { requestID?: string; request_id?: string })?.request_id ||
|
||||
responseId;
|
||||
const errorStatus =
|
||||
(error as { code?: string | number; status?: number })?.code ??
|
||||
(error as { status?: number })?.status ??
|
||||
(isStructuredError(error)
|
||||
? (error as StructuredError).status
|
||||
: undefined);
|
||||
const errorType = error instanceof Error ? error.name : 'unknown';
|
||||
|
||||
logApiError(
|
||||
this.config,
|
||||
new ApiErrorEvent(
|
||||
errorResponseId,
|
||||
responseId,
|
||||
model,
|
||||
errorMessage,
|
||||
durationMs,
|
||||
prompt_id,
|
||||
this.config.getContentGeneratorConfig()?.authType,
|
||||
errorType,
|
||||
errorStatus,
|
||||
isStructuredError(error)
|
||||
? (error as StructuredError).status
|
||||
: undefined,
|
||||
),
|
||||
);
|
||||
}
|
||||
@@ -139,7 +116,6 @@ export class LoggingContentGenerator implements ContentGenerator {
|
||||
): Promise<GenerateContentResponse> {
|
||||
const startTime = Date.now();
|
||||
this.logApiRequest(this.toContents(req.contents), req.model, userPromptId);
|
||||
const openaiRequest = await this.buildOpenAIRequestForLogging(req);
|
||||
try {
|
||||
const response = await this.wrapped.generateContent(req, userPromptId);
|
||||
const durationMs = Date.now() - startTime;
|
||||
@@ -151,12 +127,10 @@ export class LoggingContentGenerator implements ContentGenerator {
|
||||
response.usageMetadata,
|
||||
JSON.stringify(response),
|
||||
);
|
||||
await this.logOpenAIInteraction(openaiRequest, response);
|
||||
return response;
|
||||
} catch (error) {
|
||||
const durationMs = Date.now() - startTime;
|
||||
this._logApiError(undefined, durationMs, error, req.model, userPromptId);
|
||||
await this.logOpenAIInteraction(openaiRequest, undefined, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
@@ -167,7 +141,6 @@ export class LoggingContentGenerator implements ContentGenerator {
|
||||
): Promise<AsyncGenerator<GenerateContentResponse>> {
|
||||
const startTime = Date.now();
|
||||
this.logApiRequest(this.toContents(req.contents), req.model, userPromptId);
|
||||
const openaiRequest = await this.buildOpenAIRequestForLogging(req);
|
||||
|
||||
let stream: AsyncGenerator<GenerateContentResponse>;
|
||||
try {
|
||||
@@ -175,7 +148,6 @@ export class LoggingContentGenerator implements ContentGenerator {
|
||||
} catch (error) {
|
||||
const durationMs = Date.now() - startTime;
|
||||
this._logApiError(undefined, durationMs, error, req.model, userPromptId);
|
||||
await this.logOpenAIInteraction(openaiRequest, undefined, error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
@@ -184,7 +156,6 @@ export class LoggingContentGenerator implements ContentGenerator {
|
||||
startTime,
|
||||
userPromptId,
|
||||
req.model,
|
||||
openaiRequest,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -193,7 +164,6 @@ export class LoggingContentGenerator implements ContentGenerator {
|
||||
startTime: number,
|
||||
userPromptId: string,
|
||||
model: string,
|
||||
openaiRequest?: OpenAI.Chat.ChatCompletionCreateParams,
|
||||
): AsyncGenerator<GenerateContentResponse> {
|
||||
const responses: GenerateContentResponse[] = [];
|
||||
|
||||
@@ -216,9 +186,6 @@ export class LoggingContentGenerator implements ContentGenerator {
|
||||
lastUsageMetadata,
|
||||
JSON.stringify(responses),
|
||||
);
|
||||
const consolidatedResponse =
|
||||
this.consolidateGeminiResponsesForLogging(responses);
|
||||
await this.logOpenAIInteraction(openaiRequest, consolidatedResponse);
|
||||
} catch (error) {
|
||||
const durationMs = Date.now() - startTime;
|
||||
this._logApiError(
|
||||
@@ -228,182 +195,10 @@ export class LoggingContentGenerator implements ContentGenerator {
|
||||
responses[0]?.modelVersion || model,
|
||||
userPromptId,
|
||||
);
|
||||
await this.logOpenAIInteraction(openaiRequest, undefined, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
private async buildOpenAIRequestForLogging(
|
||||
request: GenerateContentParameters,
|
||||
): Promise<OpenAI.Chat.ChatCompletionCreateParams | undefined> {
|
||||
if (!this.openaiLogger) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const converter = new OpenAIContentConverter(
|
||||
request.model,
|
||||
this.schemaCompliance,
|
||||
);
|
||||
const messages = converter.convertGeminiRequestToOpenAI(request, {
|
||||
cleanOrphanToolCalls: false,
|
||||
});
|
||||
|
||||
const openaiRequest: OpenAI.Chat.ChatCompletionCreateParams = {
|
||||
model: request.model,
|
||||
messages,
|
||||
};
|
||||
|
||||
if (request.config?.tools) {
|
||||
openaiRequest.tools = await converter.convertGeminiToolsToOpenAI(
|
||||
request.config.tools,
|
||||
);
|
||||
}
|
||||
|
||||
if (request.config?.temperature !== undefined) {
|
||||
openaiRequest.temperature = request.config.temperature;
|
||||
}
|
||||
if (request.config?.topP !== undefined) {
|
||||
openaiRequest.top_p = request.config.topP;
|
||||
}
|
||||
if (request.config?.maxOutputTokens !== undefined) {
|
||||
openaiRequest.max_tokens = request.config.maxOutputTokens;
|
||||
}
|
||||
if (request.config?.presencePenalty !== undefined) {
|
||||
openaiRequest.presence_penalty = request.config.presencePenalty;
|
||||
}
|
||||
if (request.config?.frequencyPenalty !== undefined) {
|
||||
openaiRequest.frequency_penalty = request.config.frequencyPenalty;
|
||||
}
|
||||
|
||||
return openaiRequest;
|
||||
}
|
||||
|
||||
private async logOpenAIInteraction(
|
||||
openaiRequest: OpenAI.Chat.ChatCompletionCreateParams | undefined,
|
||||
response?: GenerateContentResponse,
|
||||
error?: unknown,
|
||||
): Promise<void> {
|
||||
if (!this.openaiLogger || !openaiRequest) {
|
||||
return;
|
||||
}
|
||||
|
||||
const openaiResponse = response
|
||||
? this.convertGeminiResponseToOpenAIForLogging(response, openaiRequest)
|
||||
: undefined;
|
||||
|
||||
await this.openaiLogger.logInteraction(
|
||||
openaiRequest,
|
||||
openaiResponse,
|
||||
error instanceof Error
|
||||
? error
|
||||
: error
|
||||
? new Error(String(error))
|
||||
: undefined,
|
||||
);
|
||||
}
|
||||
|
||||
private convertGeminiResponseToOpenAIForLogging(
|
||||
response: GenerateContentResponse,
|
||||
openaiRequest: OpenAI.Chat.ChatCompletionCreateParams,
|
||||
): OpenAI.Chat.ChatCompletion {
|
||||
const converter = new OpenAIContentConverter(
|
||||
openaiRequest.model,
|
||||
this.schemaCompliance,
|
||||
);
|
||||
|
||||
return converter.convertGeminiResponseToOpenAI(response);
|
||||
}
|
||||
|
||||
private consolidateGeminiResponsesForLogging(
|
||||
responses: GenerateContentResponse[],
|
||||
): GenerateContentResponse | undefined {
|
||||
if (responses.length === 0) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const consolidated = new GenerateContentResponse();
|
||||
const combinedParts: Part[] = [];
|
||||
const functionCallIndex = new Map<string, number>();
|
||||
let finishReason: FinishReason | undefined;
|
||||
let usageMetadata: GenerateContentResponseUsageMetadata | undefined;
|
||||
|
||||
for (const response of responses) {
|
||||
if (response.usageMetadata) {
|
||||
usageMetadata = response.usageMetadata;
|
||||
}
|
||||
|
||||
const candidate = response.candidates?.[0];
|
||||
if (candidate?.finishReason) {
|
||||
finishReason = candidate.finishReason;
|
||||
}
|
||||
|
||||
const parts = candidate?.content?.parts ?? [];
|
||||
for (const part of parts as Part[]) {
|
||||
if (typeof part === 'string') {
|
||||
combinedParts.push({ text: part });
|
||||
continue;
|
||||
}
|
||||
|
||||
if ('text' in part) {
|
||||
if (part.text) {
|
||||
combinedParts.push({
|
||||
text: part.text,
|
||||
...(part.thought ? { thought: true } : {}),
|
||||
...(part.thoughtSignature
|
||||
? { thoughtSignature: part.thoughtSignature }
|
||||
: {}),
|
||||
});
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if ('functionCall' in part && part.functionCall) {
|
||||
const callKey =
|
||||
part.functionCall.id || part.functionCall.name || 'tool_call';
|
||||
const existingIndex = functionCallIndex.get(callKey);
|
||||
const functionPart = { functionCall: part.functionCall };
|
||||
if (existingIndex !== undefined) {
|
||||
combinedParts[existingIndex] = functionPart;
|
||||
} else {
|
||||
functionCallIndex.set(callKey, combinedParts.length);
|
||||
combinedParts.push(functionPart);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if ('functionResponse' in part && part.functionResponse) {
|
||||
combinedParts.push({ functionResponse: part.functionResponse });
|
||||
continue;
|
||||
}
|
||||
|
||||
combinedParts.push(part);
|
||||
}
|
||||
}
|
||||
|
||||
const lastResponse = responses[responses.length - 1];
|
||||
const lastCandidate = lastResponse.candidates?.[0];
|
||||
|
||||
consolidated.responseId = lastResponse.responseId;
|
||||
consolidated.createTime = lastResponse.createTime;
|
||||
consolidated.modelVersion = lastResponse.modelVersion;
|
||||
consolidated.promptFeedback = lastResponse.promptFeedback;
|
||||
consolidated.usageMetadata = usageMetadata;
|
||||
|
||||
consolidated.candidates = [
|
||||
{
|
||||
content: {
|
||||
role: lastCandidate?.content?.role || 'model',
|
||||
parts: combinedParts,
|
||||
},
|
||||
...(finishReason ? { finishReason } : {}),
|
||||
index: 0,
|
||||
safetyRatings: lastCandidate?.safetyRatings || [],
|
||||
},
|
||||
];
|
||||
|
||||
return consolidated;
|
||||
}
|
||||
|
||||
async countTokens(req: CountTokensParameters): Promise<CountTokensResponse> {
|
||||
return this.wrapped.countTokens(req);
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
export { LoggingContentGenerator } from './loggingContentGenerator.js';
|
||||
@@ -1,371 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import type {
|
||||
GenerateContentParameters,
|
||||
GenerateContentResponseUsageMetadata,
|
||||
} from '@google/genai';
|
||||
import { GenerateContentResponse } from '@google/genai';
|
||||
import type { Config } from '../../config/config.js';
|
||||
import type { ContentGenerator } from '../contentGenerator.js';
|
||||
import { LoggingContentGenerator } from './index.js';
|
||||
import { OpenAIContentConverter } from '../openaiContentGenerator/converter.js';
|
||||
import {
|
||||
logApiRequest,
|
||||
logApiResponse,
|
||||
logApiError,
|
||||
} from '../../telemetry/loggers.js';
|
||||
import { OpenAILogger } from '../../utils/openaiLogger.js';
|
||||
import type OpenAI from 'openai';
|
||||
|
||||
vi.mock('../../telemetry/loggers.js', () => ({
|
||||
logApiRequest: vi.fn(),
|
||||
logApiResponse: vi.fn(),
|
||||
logApiError: vi.fn(),
|
||||
}));
|
||||
|
||||
vi.mock('../../utils/openaiLogger.js', () => ({
|
||||
OpenAILogger: vi.fn().mockImplementation(() => ({
|
||||
logInteraction: vi.fn().mockResolvedValue(undefined),
|
||||
})),
|
||||
}));
|
||||
|
||||
const convertGeminiRequestToOpenAISpy = vi
|
||||
.spyOn(OpenAIContentConverter.prototype, 'convertGeminiRequestToOpenAI')
|
||||
.mockReturnValue([{ role: 'user', content: 'converted' }]);
|
||||
const convertGeminiToolsToOpenAISpy = vi
|
||||
.spyOn(OpenAIContentConverter.prototype, 'convertGeminiToolsToOpenAI')
|
||||
.mockResolvedValue([{ type: 'function', function: { name: 'tool' } }]);
|
||||
const convertGeminiResponseToOpenAISpy = vi
|
||||
.spyOn(OpenAIContentConverter.prototype, 'convertGeminiResponseToOpenAI')
|
||||
.mockReturnValue({
|
||||
id: 'openai-response',
|
||||
object: 'chat.completion',
|
||||
created: 123456789,
|
||||
model: 'test-model',
|
||||
choices: [],
|
||||
} as OpenAI.Chat.ChatCompletion);
|
||||
|
||||
const createConfig = (overrides: Record<string, unknown> = {}): Config =>
|
||||
({
|
||||
getContentGeneratorConfig: () => ({
|
||||
authType: 'openai',
|
||||
enableOpenAILogging: false,
|
||||
...overrides,
|
||||
}),
|
||||
}) as Config;
|
||||
|
||||
const createWrappedGenerator = (
|
||||
generateContent: ContentGenerator['generateContent'],
|
||||
generateContentStream: ContentGenerator['generateContentStream'],
|
||||
): ContentGenerator =>
|
||||
({
|
||||
generateContent,
|
||||
generateContentStream,
|
||||
countTokens: vi.fn(),
|
||||
embedContent: vi.fn(),
|
||||
useSummarizedThinking: vi.fn().mockReturnValue(false),
|
||||
}) as ContentGenerator;
|
||||
|
||||
const createResponse = (
|
||||
responseId: string,
|
||||
modelVersion: string,
|
||||
parts: Array<Record<string, unknown>>,
|
||||
usageMetadata?: GenerateContentResponseUsageMetadata,
|
||||
finishReason?: string,
|
||||
): GenerateContentResponse => {
|
||||
const response = new GenerateContentResponse();
|
||||
response.responseId = responseId;
|
||||
response.modelVersion = modelVersion;
|
||||
response.usageMetadata = usageMetadata;
|
||||
response.candidates = [
|
||||
{
|
||||
content: {
|
||||
role: 'model',
|
||||
parts: parts as never[],
|
||||
},
|
||||
finishReason: finishReason as never,
|
||||
index: 0,
|
||||
safetyRatings: [],
|
||||
},
|
||||
];
|
||||
return response;
|
||||
};
|
||||
|
||||
describe('LoggingContentGenerator', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
convertGeminiRequestToOpenAISpy.mockClear();
|
||||
convertGeminiToolsToOpenAISpy.mockClear();
|
||||
convertGeminiResponseToOpenAISpy.mockClear();
|
||||
});
|
||||
|
||||
it('logs request/response, normalizes thought parts, and logs OpenAI interaction', async () => {
|
||||
const wrapped = createWrappedGenerator(
|
||||
vi.fn().mockResolvedValue(
|
||||
createResponse(
|
||||
'resp-1',
|
||||
'model-v2',
|
||||
[{ text: 'ok' }],
|
||||
{
|
||||
promptTokenCount: 3,
|
||||
candidatesTokenCount: 5,
|
||||
totalTokenCount: 8,
|
||||
},
|
||||
'STOP',
|
||||
),
|
||||
),
|
||||
vi.fn(),
|
||||
);
|
||||
const generator = new LoggingContentGenerator(
|
||||
wrapped,
|
||||
createConfig({
|
||||
enableOpenAILogging: true,
|
||||
openAILoggingDir: 'logs',
|
||||
schemaCompliance: 'openapi_30',
|
||||
}),
|
||||
);
|
||||
|
||||
const request = {
|
||||
model: 'test-model',
|
||||
contents: [
|
||||
{
|
||||
role: 'user',
|
||||
parts: [
|
||||
{ text: 'Hello', thought: 'internal' },
|
||||
{
|
||||
functionCall: { id: 'call-1', name: 'tool', args: '{}' },
|
||||
thought: 'strip-me',
|
||||
},
|
||||
null,
|
||||
],
|
||||
},
|
||||
],
|
||||
config: {
|
||||
temperature: 0.3,
|
||||
topP: 0.9,
|
||||
maxOutputTokens: 256,
|
||||
presencePenalty: 0.2,
|
||||
frequencyPenalty: 0.1,
|
||||
tools: [
|
||||
{
|
||||
functionDeclarations: [
|
||||
{ name: 'tool', description: 'desc', parameters: {} },
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
} as unknown as GenerateContentParameters;
|
||||
|
||||
const response = await generator.generateContent(request, 'prompt-1');
|
||||
|
||||
expect(response.responseId).toBe('resp-1');
|
||||
expect(logApiRequest).toHaveBeenCalledTimes(1);
|
||||
const [, requestEvent] = vi.mocked(logApiRequest).mock.calls[0];
|
||||
const loggedContents = JSON.parse(requestEvent.request_text || '[]');
|
||||
expect(loggedContents[0].parts[0]).toEqual({
|
||||
text: 'Hello\n[Thought: internal]',
|
||||
});
|
||||
expect(loggedContents[0].parts[1]).toEqual({
|
||||
functionCall: { id: 'call-1', name: 'tool', args: '{}' },
|
||||
});
|
||||
|
||||
expect(logApiResponse).toHaveBeenCalledTimes(1);
|
||||
const [, responseEvent] = vi.mocked(logApiResponse).mock.calls[0];
|
||||
expect(responseEvent.response_id).toBe('resp-1');
|
||||
expect(responseEvent.model).toBe('model-v2');
|
||||
expect(responseEvent.prompt_id).toBe('prompt-1');
|
||||
expect(responseEvent.input_token_count).toBe(3);
|
||||
|
||||
expect(convertGeminiRequestToOpenAISpy).toHaveBeenCalledTimes(1);
|
||||
expect(convertGeminiToolsToOpenAISpy).toHaveBeenCalledTimes(1);
|
||||
expect(convertGeminiResponseToOpenAISpy).toHaveBeenCalledTimes(1);
|
||||
|
||||
const openaiLoggerInstance = vi.mocked(OpenAILogger).mock.results[0]
|
||||
?.value as { logInteraction: ReturnType<typeof vi.fn> };
|
||||
expect(openaiLoggerInstance.logInteraction).toHaveBeenCalledTimes(1);
|
||||
const [openaiRequest, openaiResponse, openaiError] =
|
||||
openaiLoggerInstance.logInteraction.mock.calls[0];
|
||||
expect(openaiRequest).toEqual(
|
||||
expect.objectContaining({
|
||||
model: 'test-model',
|
||||
messages: [{ role: 'user', content: 'converted' }],
|
||||
tools: [{ type: 'function', function: { name: 'tool' } }],
|
||||
temperature: 0.3,
|
||||
top_p: 0.9,
|
||||
max_tokens: 256,
|
||||
presence_penalty: 0.2,
|
||||
frequency_penalty: 0.1,
|
||||
}),
|
||||
);
|
||||
expect(openaiResponse).toEqual({
|
||||
id: 'openai-response',
|
||||
object: 'chat.completion',
|
||||
created: 123456789,
|
||||
model: 'test-model',
|
||||
choices: [],
|
||||
});
|
||||
expect(openaiError).toBeUndefined();
|
||||
});
|
||||
|
||||
it('logs errors with status code and request id, then rethrows', async () => {
|
||||
const error = Object.assign(new Error('boom'), {
|
||||
code: 429,
|
||||
request_id: 'req-99',
|
||||
type: 'rate_limit',
|
||||
});
|
||||
const wrapped = createWrappedGenerator(
|
||||
vi.fn().mockRejectedValue(error),
|
||||
vi.fn(),
|
||||
);
|
||||
const generator = new LoggingContentGenerator(
|
||||
wrapped,
|
||||
createConfig({ enableOpenAILogging: true }),
|
||||
);
|
||||
|
||||
const request = {
|
||||
model: 'test-model',
|
||||
contents: 'Hello',
|
||||
} as unknown as GenerateContentParameters;
|
||||
|
||||
await expect(
|
||||
generator.generateContent(request, 'prompt-2'),
|
||||
).rejects.toThrow('boom');
|
||||
|
||||
expect(logApiError).toHaveBeenCalledTimes(1);
|
||||
const [, errorEvent] = vi.mocked(logApiError).mock.calls[0];
|
||||
expect(errorEvent.response_id).toBe('req-99');
|
||||
expect(errorEvent.status_code).toBe(429);
|
||||
expect(errorEvent.error_type).toBe('rate_limit');
|
||||
expect(errorEvent.prompt_id).toBe('prompt-2');
|
||||
|
||||
const openaiLoggerInstance = vi.mocked(OpenAILogger).mock.results[0]
|
||||
?.value as { logInteraction: ReturnType<typeof vi.fn> };
|
||||
const [, , loggedError] = openaiLoggerInstance.logInteraction.mock.calls[0];
|
||||
expect(loggedError).toBeInstanceOf(Error);
|
||||
expect((loggedError as Error).message).toBe('boom');
|
||||
});
|
||||
|
||||
it('logs streaming responses and consolidates tool calls', async () => {
|
||||
const usage1 = {
|
||||
promptTokenCount: 1,
|
||||
} as GenerateContentResponseUsageMetadata;
|
||||
const usage2 = {
|
||||
promptTokenCount: 2,
|
||||
candidatesTokenCount: 4,
|
||||
totalTokenCount: 6,
|
||||
} as GenerateContentResponseUsageMetadata;
|
||||
|
||||
const response1 = createResponse(
|
||||
'resp-1',
|
||||
'model-stream',
|
||||
[
|
||||
{ text: 'Hello' },
|
||||
{ functionCall: { id: 'call-1', name: 'tool', args: '{}' } },
|
||||
],
|
||||
usage1,
|
||||
);
|
||||
const response2 = createResponse(
|
||||
'resp-2',
|
||||
'model-stream',
|
||||
[
|
||||
{ text: ' world' },
|
||||
{ functionCall: { id: 'call-1', name: 'tool', args: '{"x":1}' } },
|
||||
{ functionResponse: { name: 'tool', response: { output: 'ok' } } },
|
||||
],
|
||||
usage2,
|
||||
'STOP',
|
||||
);
|
||||
|
||||
const wrapped = createWrappedGenerator(
|
||||
vi.fn(),
|
||||
vi.fn().mockResolvedValue(
|
||||
(async function* () {
|
||||
yield response1;
|
||||
yield response2;
|
||||
})(),
|
||||
),
|
||||
);
|
||||
const generator = new LoggingContentGenerator(
|
||||
wrapped,
|
||||
createConfig({ enableOpenAILogging: true }),
|
||||
);
|
||||
|
||||
const request = {
|
||||
model: 'test-model',
|
||||
contents: 'Hello',
|
||||
} as unknown as GenerateContentParameters;
|
||||
|
||||
const stream = await generator.generateContentStream(request, 'prompt-3');
|
||||
const seen: GenerateContentResponse[] = [];
|
||||
for await (const item of stream) {
|
||||
seen.push(item);
|
||||
}
|
||||
expect(seen).toHaveLength(2);
|
||||
|
||||
expect(logApiResponse).toHaveBeenCalledTimes(1);
|
||||
const [, responseEvent] = vi.mocked(logApiResponse).mock.calls[0];
|
||||
expect(responseEvent.response_id).toBe('resp-1');
|
||||
expect(responseEvent.input_token_count).toBe(2);
|
||||
|
||||
expect(convertGeminiResponseToOpenAISpy).toHaveBeenCalledTimes(1);
|
||||
const [consolidatedResponse] =
|
||||
convertGeminiResponseToOpenAISpy.mock.calls[0];
|
||||
const consolidatedParts =
|
||||
consolidatedResponse.candidates?.[0]?.content?.parts || [];
|
||||
expect(consolidatedParts).toEqual([
|
||||
{ text: 'Hello' },
|
||||
{ functionCall: { id: 'call-1', name: 'tool', args: '{"x":1}' } },
|
||||
{ text: ' world' },
|
||||
{ functionResponse: { name: 'tool', response: { output: 'ok' } } },
|
||||
]);
|
||||
expect(consolidatedResponse.usageMetadata).toBe(usage2);
|
||||
expect(consolidatedResponse.responseId).toBe('resp-2');
|
||||
expect(consolidatedResponse.candidates?.[0]?.finishReason).toBe('STOP');
|
||||
});
|
||||
|
||||
it('logs stream errors and skips response logging', async () => {
|
||||
const response1 = createResponse('resp-1', 'model-stream', [
|
||||
{ text: 'partial' },
|
||||
]);
|
||||
const streamError = new Error('stream-fail');
|
||||
const wrapped = createWrappedGenerator(
|
||||
vi.fn(),
|
||||
vi.fn().mockResolvedValue(
|
||||
(async function* () {
|
||||
yield response1;
|
||||
throw streamError;
|
||||
})(),
|
||||
),
|
||||
);
|
||||
const generator = new LoggingContentGenerator(
|
||||
wrapped,
|
||||
createConfig({ enableOpenAILogging: true }),
|
||||
);
|
||||
|
||||
const request = {
|
||||
model: 'test-model',
|
||||
contents: 'Hello',
|
||||
} as unknown as GenerateContentParameters;
|
||||
|
||||
const stream = await generator.generateContentStream(request, 'prompt-4');
|
||||
await expect(async () => {
|
||||
for await (const _item of stream) {
|
||||
// Consume stream to trigger error.
|
||||
}
|
||||
}).rejects.toThrow('stream-fail');
|
||||
|
||||
expect(logApiResponse).not.toHaveBeenCalled();
|
||||
expect(logApiError).toHaveBeenCalledTimes(1);
|
||||
const openaiLoggerInstance = vi.mocked(OpenAILogger).mock.results[0]
|
||||
?.value as { logInteraction: ReturnType<typeof vi.fn> };
|
||||
expect(openaiLoggerInstance.logInteraction).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
});
|
||||
@@ -236,9 +236,8 @@ export class OpenAIContentConverter {
|
||||
*/
|
||||
convertGeminiRequestToOpenAI(
|
||||
request: GenerateContentParameters,
|
||||
options: { cleanOrphanToolCalls: boolean } = { cleanOrphanToolCalls: true },
|
||||
): OpenAI.Chat.ChatCompletionMessageParam[] {
|
||||
let messages: OpenAI.Chat.ChatCompletionMessageParam[] = [];
|
||||
const messages: OpenAI.Chat.ChatCompletionMessageParam[] = [];
|
||||
|
||||
// Handle system instruction from config
|
||||
this.addSystemInstructionMessage(request, messages);
|
||||
@@ -247,89 +246,11 @@ export class OpenAIContentConverter {
|
||||
this.processContents(request.contents, messages);
|
||||
|
||||
// Clean up orphaned tool calls and merge consecutive assistant messages
|
||||
if (options.cleanOrphanToolCalls) {
|
||||
messages = this.cleanOrphanedToolCalls(messages);
|
||||
}
|
||||
messages = this.mergeConsecutiveAssistantMessages(messages);
|
||||
const cleanedMessages = this.cleanOrphanedToolCalls(messages);
|
||||
const mergedMessages =
|
||||
this.mergeConsecutiveAssistantMessages(cleanedMessages);
|
||||
|
||||
return messages;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert Gemini response to OpenAI completion format (for logging).
|
||||
*/
|
||||
convertGeminiResponseToOpenAI(
|
||||
response: GenerateContentResponse,
|
||||
): OpenAI.Chat.ChatCompletion {
|
||||
const candidate = response.candidates?.[0];
|
||||
const parts = (candidate?.content?.parts || []) as Part[];
|
||||
const parsedParts = this.parseParts(parts);
|
||||
|
||||
const message: ExtendedCompletionMessage = {
|
||||
role: 'assistant',
|
||||
content: parsedParts.contentParts.join('') || null,
|
||||
refusal: null,
|
||||
};
|
||||
|
||||
const reasoningContent = parsedParts.thoughtParts.join('');
|
||||
if (reasoningContent) {
|
||||
message.reasoning_content = reasoningContent;
|
||||
}
|
||||
|
||||
if (parsedParts.functionCalls.length > 0) {
|
||||
message.tool_calls = parsedParts.functionCalls.map((call, index) => ({
|
||||
id: call.id || `call_${index}`,
|
||||
type: 'function' as const,
|
||||
function: {
|
||||
name: call.name || '',
|
||||
arguments: JSON.stringify(call.args || {}),
|
||||
},
|
||||
}));
|
||||
}
|
||||
|
||||
const finishReason = this.mapGeminiFinishReasonToOpenAI(
|
||||
candidate?.finishReason,
|
||||
);
|
||||
|
||||
const usageMetadata = response.usageMetadata;
|
||||
const usage: OpenAI.CompletionUsage = {
|
||||
prompt_tokens: usageMetadata?.promptTokenCount || 0,
|
||||
completion_tokens: usageMetadata?.candidatesTokenCount || 0,
|
||||
total_tokens: usageMetadata?.totalTokenCount || 0,
|
||||
};
|
||||
|
||||
if (usageMetadata?.cachedContentTokenCount !== undefined) {
|
||||
(
|
||||
usage as OpenAI.CompletionUsage & {
|
||||
prompt_tokens_details?: { cached_tokens?: number };
|
||||
}
|
||||
).prompt_tokens_details = {
|
||||
cached_tokens: usageMetadata.cachedContentTokenCount,
|
||||
};
|
||||
}
|
||||
|
||||
const createdMs = response.createTime
|
||||
? Number(response.createTime)
|
||||
: Date.now();
|
||||
const createdSeconds = Number.isFinite(createdMs)
|
||||
? Math.floor(createdMs / 1000)
|
||||
: Math.floor(Date.now() / 1000);
|
||||
|
||||
return {
|
||||
id: response.responseId || `gemini-${Date.now()}`,
|
||||
object: 'chat.completion',
|
||||
created: createdSeconds,
|
||||
model: response.modelVersion || this.model,
|
||||
choices: [
|
||||
{
|
||||
index: 0,
|
||||
message,
|
||||
finish_reason: finishReason,
|
||||
logprobs: null,
|
||||
},
|
||||
],
|
||||
usage,
|
||||
};
|
||||
return mergedMessages;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -915,6 +836,84 @@ export class OpenAIContentConverter {
|
||||
return response;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert Gemini response format to OpenAI chat completion format for logging
|
||||
*/
|
||||
convertGeminiResponseToOpenAI(
|
||||
response: GenerateContentResponse,
|
||||
): OpenAI.Chat.ChatCompletion {
|
||||
const candidate = response.candidates?.[0];
|
||||
const content = candidate?.content;
|
||||
|
||||
let messageContent: string | null = null;
|
||||
const toolCalls: OpenAI.Chat.ChatCompletionMessageToolCall[] = [];
|
||||
|
||||
if (content?.parts) {
|
||||
const textParts: string[] = [];
|
||||
|
||||
for (const part of content.parts) {
|
||||
if ('text' in part && part.text) {
|
||||
textParts.push(part.text);
|
||||
} else if ('functionCall' in part && part.functionCall) {
|
||||
toolCalls.push({
|
||||
id: part.functionCall.id || `call_${toolCalls.length}`,
|
||||
type: 'function' as const,
|
||||
function: {
|
||||
name: part.functionCall.name || '',
|
||||
arguments: JSON.stringify(part.functionCall.args || {}),
|
||||
},
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
messageContent = textParts.join('').trimEnd();
|
||||
}
|
||||
|
||||
const choice: OpenAI.Chat.ChatCompletion.Choice = {
|
||||
index: 0,
|
||||
message: {
|
||||
role: 'assistant',
|
||||
content: messageContent,
|
||||
refusal: null,
|
||||
},
|
||||
finish_reason: this.mapGeminiFinishReasonToOpenAI(
|
||||
candidate?.finishReason,
|
||||
) as OpenAI.Chat.ChatCompletion.Choice['finish_reason'],
|
||||
logprobs: null,
|
||||
};
|
||||
|
||||
if (toolCalls.length > 0) {
|
||||
choice.message.tool_calls = toolCalls;
|
||||
}
|
||||
|
||||
const openaiResponse: OpenAI.Chat.ChatCompletion = {
|
||||
id: response.responseId || `chatcmpl-${Date.now()}`,
|
||||
object: 'chat.completion',
|
||||
created: response.createTime
|
||||
? Number(response.createTime)
|
||||
: Math.floor(Date.now() / 1000),
|
||||
model: this.model,
|
||||
choices: [choice],
|
||||
};
|
||||
|
||||
// Add usage metadata if available
|
||||
if (response.usageMetadata) {
|
||||
openaiResponse.usage = {
|
||||
prompt_tokens: response.usageMetadata.promptTokenCount || 0,
|
||||
completion_tokens: response.usageMetadata.candidatesTokenCount || 0,
|
||||
total_tokens: response.usageMetadata.totalTokenCount || 0,
|
||||
};
|
||||
|
||||
if (response.usageMetadata.cachedContentTokenCount) {
|
||||
openaiResponse.usage.prompt_tokens_details = {
|
||||
cached_tokens: response.usageMetadata.cachedContentTokenCount,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return openaiResponse;
|
||||
}
|
||||
|
||||
/**
|
||||
* Map OpenAI finish reasons to Gemini finish reasons
|
||||
*/
|
||||
@@ -932,24 +931,29 @@ export class OpenAIContentConverter {
|
||||
return mapping[openaiReason] || FinishReason.FINISH_REASON_UNSPECIFIED;
|
||||
}
|
||||
|
||||
private mapGeminiFinishReasonToOpenAI(
|
||||
geminiReason?: FinishReason,
|
||||
): 'stop' | 'length' | 'tool_calls' | 'content_filter' | 'function_call' {
|
||||
if (!geminiReason) {
|
||||
return 'stop';
|
||||
}
|
||||
/**
|
||||
* Map Gemini finish reasons to OpenAI finish reasons
|
||||
*/
|
||||
private mapGeminiFinishReasonToOpenAI(geminiReason?: unknown): string {
|
||||
if (!geminiReason) return 'stop';
|
||||
|
||||
switch (geminiReason) {
|
||||
case FinishReason.STOP:
|
||||
case 'STOP':
|
||||
case 1: // FinishReason.STOP
|
||||
return 'stop';
|
||||
case FinishReason.MAX_TOKENS:
|
||||
case 'MAX_TOKENS':
|
||||
case 2: // FinishReason.MAX_TOKENS
|
||||
return 'length';
|
||||
case FinishReason.SAFETY:
|
||||
case 'SAFETY':
|
||||
case 3: // FinishReason.SAFETY
|
||||
return 'content_filter';
|
||||
case 'RECITATION':
|
||||
case 4: // FinishReason.RECITATION
|
||||
return 'content_filter';
|
||||
case 'OTHER':
|
||||
case 5: // FinishReason.OTHER
|
||||
return 'stop';
|
||||
default:
|
||||
if (geminiReason === ('RECITATION' as FinishReason)) {
|
||||
return 'content_filter';
|
||||
}
|
||||
return 'stop';
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import type { GenerateContentParameters } from '@google/genai';
|
||||
import { EnhancedErrorHandler } from './errorHandler.js';
|
||||
import type { RequestContext } from './errorHandler.js';
|
||||
import type { RequestContext } from './telemetryService.js';
|
||||
|
||||
describe('EnhancedErrorHandler', () => {
|
||||
let errorHandler: EnhancedErrorHandler;
|
||||
|
||||
@@ -5,15 +5,7 @@
|
||||
*/
|
||||
|
||||
import type { GenerateContentParameters } from '@google/genai';
|
||||
|
||||
export interface RequestContext {
|
||||
userPromptId: string;
|
||||
model: string;
|
||||
authType: string;
|
||||
startTime: number;
|
||||
duration: number;
|
||||
isStreaming: boolean;
|
||||
}
|
||||
import type { RequestContext } from './telemetryService.js';
|
||||
|
||||
export interface ErrorHandler {
|
||||
handle(
|
||||
|
||||
@@ -91,4 +91,11 @@ export function determineProvider(
|
||||
return new DefaultOpenAICompatibleProvider(contentGeneratorConfig, cliConfig);
|
||||
}
|
||||
|
||||
// Services
|
||||
export {
|
||||
type TelemetryService,
|
||||
type RequestContext,
|
||||
DefaultTelemetryService,
|
||||
} from './telemetryService.js';
|
||||
|
||||
export { type ErrorHandler, EnhancedErrorHandler } from './errorHandler.js';
|
||||
|
||||
@@ -11,6 +11,7 @@ import type {
|
||||
} from '@google/genai';
|
||||
import type { PipelineConfig } from './pipeline.js';
|
||||
import { ContentGenerationPipeline } from './pipeline.js';
|
||||
import { DefaultTelemetryService } from './telemetryService.js';
|
||||
import { EnhancedErrorHandler } from './errorHandler.js';
|
||||
import { getDefaultTokenizer } from '../../utils/request-tokenizer/index.js';
|
||||
import type { ContentGeneratorConfig } from '../contentGenerator.js';
|
||||
@@ -28,6 +29,11 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
||||
cliConfig,
|
||||
provider,
|
||||
contentGeneratorConfig,
|
||||
telemetryService: new DefaultTelemetryService(
|
||||
cliConfig,
|
||||
contentGeneratorConfig.enableOpenAILogging,
|
||||
contentGeneratorConfig.openAILoggingDir,
|
||||
),
|
||||
errorHandler: new EnhancedErrorHandler(
|
||||
(error: unknown, request: GenerateContentParameters) =>
|
||||
this.shouldSuppressErrorLogging(error, request),
|
||||
|
||||
@@ -15,6 +15,7 @@ import { OpenAIContentConverter } from './converter.js';
|
||||
import type { Config } from '../../config/config.js';
|
||||
import type { ContentGeneratorConfig, AuthType } from '../contentGenerator.js';
|
||||
import type { OpenAICompatibleProvider } from './provider/index.js';
|
||||
import type { TelemetryService } from './telemetryService.js';
|
||||
import type { ErrorHandler } from './errorHandler.js';
|
||||
|
||||
// Mock dependencies
|
||||
@@ -27,6 +28,7 @@ describe('ContentGenerationPipeline', () => {
|
||||
let mockProvider: OpenAICompatibleProvider;
|
||||
let mockClient: OpenAI;
|
||||
let mockConverter: OpenAIContentConverter;
|
||||
let mockTelemetryService: TelemetryService;
|
||||
let mockErrorHandler: ErrorHandler;
|
||||
let mockContentGeneratorConfig: ContentGeneratorConfig;
|
||||
let mockCliConfig: Config;
|
||||
@@ -61,6 +63,13 @@ describe('ContentGenerationPipeline', () => {
|
||||
getDefaultGenerationConfig: vi.fn().mockReturnValue({}),
|
||||
};
|
||||
|
||||
// Mock telemetry service
|
||||
mockTelemetryService = {
|
||||
logSuccess: vi.fn().mockResolvedValue(undefined),
|
||||
logError: vi.fn().mockResolvedValue(undefined),
|
||||
logStreamingSuccess: vi.fn().mockResolvedValue(undefined),
|
||||
};
|
||||
|
||||
// Mock error handler
|
||||
mockErrorHandler = {
|
||||
handle: vi.fn().mockImplementation((error: unknown) => {
|
||||
@@ -90,6 +99,7 @@ describe('ContentGenerationPipeline', () => {
|
||||
cliConfig: mockCliConfig,
|
||||
provider: mockProvider,
|
||||
contentGeneratorConfig: mockContentGeneratorConfig,
|
||||
telemetryService: mockTelemetryService,
|
||||
errorHandler: mockErrorHandler,
|
||||
};
|
||||
|
||||
@@ -162,6 +172,17 @@ describe('ContentGenerationPipeline', () => {
|
||||
expect(mockConverter.convertOpenAIResponseToGemini).toHaveBeenCalledWith(
|
||||
mockOpenAIResponse,
|
||||
);
|
||||
expect(mockTelemetryService.logSuccess).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
userPromptId,
|
||||
model: 'test-model',
|
||||
authType: 'openai',
|
||||
isStreaming: false,
|
||||
}),
|
||||
mockGeminiResponse,
|
||||
expect.any(Object),
|
||||
mockOpenAIResponse,
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle tools in request', async () => {
|
||||
@@ -247,6 +268,16 @@ describe('ContentGenerationPipeline', () => {
|
||||
'API Error',
|
||||
);
|
||||
|
||||
expect(mockTelemetryService.logError).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
userPromptId,
|
||||
model: 'test-model',
|
||||
authType: 'openai',
|
||||
isStreaming: false,
|
||||
}),
|
||||
testError,
|
||||
expect.any(Object),
|
||||
);
|
||||
expect(mockErrorHandler.handle).toHaveBeenCalledWith(
|
||||
testError,
|
||||
expect.any(Object),
|
||||
@@ -345,6 +376,17 @@ describe('ContentGenerationPipeline', () => {
|
||||
signal: undefined,
|
||||
}),
|
||||
);
|
||||
expect(mockTelemetryService.logStreamingSuccess).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
userPromptId,
|
||||
model: 'test-model',
|
||||
authType: 'openai',
|
||||
isStreaming: true,
|
||||
}),
|
||||
[mockGeminiResponse1, mockGeminiResponse2],
|
||||
expect.any(Object),
|
||||
[mockChunk1, mockChunk2],
|
||||
);
|
||||
});
|
||||
|
||||
it('should filter empty responses', async () => {
|
||||
@@ -448,6 +490,16 @@ describe('ContentGenerationPipeline', () => {
|
||||
|
||||
expect(results).toHaveLength(0); // No results due to error
|
||||
expect(mockConverter.resetStreamingToolCalls).toHaveBeenCalledTimes(2); // Once at start, once on error
|
||||
expect(mockTelemetryService.logError).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
userPromptId,
|
||||
model: 'test-model',
|
||||
authType: 'openai',
|
||||
isStreaming: true,
|
||||
}),
|
||||
testError,
|
||||
expect.any(Object),
|
||||
);
|
||||
expect(mockErrorHandler.handle).toHaveBeenCalledWith(
|
||||
testError,
|
||||
expect.any(Object),
|
||||
@@ -598,6 +650,18 @@ describe('ContentGenerationPipeline', () => {
|
||||
candidatesTokenCount: 20,
|
||||
totalTokenCount: 30,
|
||||
});
|
||||
|
||||
expect(mockTelemetryService.logStreamingSuccess).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
userPromptId,
|
||||
model: 'test-model',
|
||||
authType: 'openai',
|
||||
isStreaming: true,
|
||||
}),
|
||||
results,
|
||||
expect.any(Object),
|
||||
[mockChunk1, mockChunk2, mockChunk3],
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle ideal case where last chunk has both finishReason and usageMetadata', async () => {
|
||||
@@ -789,6 +853,18 @@ describe('ContentGenerationPipeline', () => {
|
||||
candidatesTokenCount: 20,
|
||||
totalTokenCount: 30,
|
||||
});
|
||||
|
||||
expect(mockTelemetryService.logStreamingSuccess).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
userPromptId,
|
||||
model: 'test-model',
|
||||
authType: 'openai',
|
||||
isStreaming: true,
|
||||
}),
|
||||
results,
|
||||
expect.any(Object),
|
||||
[mockChunk1, mockChunk2, mockChunk3],
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle providers that send finishReason and valid usage in same chunk', async () => {
|
||||
@@ -1042,6 +1118,19 @@ describe('ContentGenerationPipeline', () => {
|
||||
await pipeline.execute(request, userPromptId);
|
||||
|
||||
// Assert
|
||||
expect(mockTelemetryService.logSuccess).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
userPromptId,
|
||||
model: 'test-model',
|
||||
authType: 'openai',
|
||||
isStreaming: false,
|
||||
startTime: expect.any(Number),
|
||||
duration: expect.any(Number),
|
||||
}),
|
||||
expect.any(Object),
|
||||
expect.any(Object),
|
||||
expect.any(Object),
|
||||
);
|
||||
});
|
||||
|
||||
it('should create context with correct properties for streaming request', async () => {
|
||||
@@ -1084,6 +1173,19 @@ describe('ContentGenerationPipeline', () => {
|
||||
}
|
||||
|
||||
// Assert
|
||||
expect(mockTelemetryService.logStreamingSuccess).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
userPromptId,
|
||||
model: 'test-model',
|
||||
authType: 'openai',
|
||||
isStreaming: true,
|
||||
startTime: expect.any(Number),
|
||||
duration: expect.any(Number),
|
||||
}),
|
||||
expect.any(Array),
|
||||
expect.any(Object),
|
||||
expect.any(Array),
|
||||
);
|
||||
});
|
||||
|
||||
it('should collect all OpenAI chunks for logging even when Gemini responses are filtered', async () => {
|
||||
@@ -1227,6 +1329,22 @@ describe('ContentGenerationPipeline', () => {
|
||||
// Should only yield the final response (empty ones are filtered)
|
||||
expect(responses).toHaveLength(1);
|
||||
expect(responses[0]).toBe(finalGeminiResponse);
|
||||
|
||||
// Verify telemetry was called with ALL OpenAI chunks, including the filtered ones
|
||||
expect(mockTelemetryService.logStreamingSuccess).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
model: 'test-model',
|
||||
duration: expect.any(Number),
|
||||
userPromptId: 'test-prompt-id',
|
||||
authType: 'openai',
|
||||
}),
|
||||
[finalGeminiResponse], // Only the non-empty Gemini response
|
||||
expect.objectContaining({
|
||||
model: 'test-model',
|
||||
messages: [{ role: 'user', content: 'test' }],
|
||||
}),
|
||||
[partialToolCallChunk1, partialToolCallChunk2, finishChunk], // ALL OpenAI chunks
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -13,12 +13,14 @@ import type { Config } from '../../config/config.js';
|
||||
import type { ContentGeneratorConfig } from '../contentGenerator.js';
|
||||
import type { OpenAICompatibleProvider } from './provider/index.js';
|
||||
import { OpenAIContentConverter } from './converter.js';
|
||||
import type { ErrorHandler, RequestContext } from './errorHandler.js';
|
||||
import type { TelemetryService, RequestContext } from './telemetryService.js';
|
||||
import type { ErrorHandler } from './errorHandler.js';
|
||||
|
||||
export interface PipelineConfig {
|
||||
cliConfig: Config;
|
||||
provider: OpenAICompatibleProvider;
|
||||
contentGeneratorConfig: ContentGeneratorConfig;
|
||||
telemetryService: TelemetryService;
|
||||
errorHandler: ErrorHandler;
|
||||
}
|
||||
|
||||
@@ -44,7 +46,7 @@ export class ContentGenerationPipeline {
|
||||
request,
|
||||
userPromptId,
|
||||
false,
|
||||
async (openaiRequest) => {
|
||||
async (openaiRequest, context) => {
|
||||
const openaiResponse = (await this.client.chat.completions.create(
|
||||
openaiRequest,
|
||||
{
|
||||
@@ -55,6 +57,14 @@ export class ContentGenerationPipeline {
|
||||
const geminiResponse =
|
||||
this.converter.convertOpenAIResponseToGemini(openaiResponse);
|
||||
|
||||
// Log success
|
||||
await this.config.telemetryService.logSuccess(
|
||||
context,
|
||||
geminiResponse,
|
||||
openaiRequest,
|
||||
openaiResponse,
|
||||
);
|
||||
|
||||
return geminiResponse;
|
||||
},
|
||||
);
|
||||
@@ -78,7 +88,12 @@ export class ContentGenerationPipeline {
|
||||
)) as AsyncIterable<OpenAI.Chat.ChatCompletionChunk>;
|
||||
|
||||
// Stage 2: Process stream with conversion and logging
|
||||
return this.processStreamWithLogging(stream, context, request);
|
||||
return this.processStreamWithLogging(
|
||||
stream,
|
||||
context,
|
||||
openaiRequest,
|
||||
request,
|
||||
);
|
||||
},
|
||||
);
|
||||
}
|
||||
@@ -95,9 +110,11 @@ export class ContentGenerationPipeline {
|
||||
private async *processStreamWithLogging(
|
||||
stream: AsyncIterable<OpenAI.Chat.ChatCompletionChunk>,
|
||||
context: RequestContext,
|
||||
openaiRequest: OpenAI.Chat.ChatCompletionCreateParams,
|
||||
request: GenerateContentParameters,
|
||||
): AsyncGenerator<GenerateContentResponse> {
|
||||
const collectedGeminiResponses: GenerateContentResponse[] = [];
|
||||
const collectedOpenAIChunks: OpenAI.Chat.ChatCompletionChunk[] = [];
|
||||
|
||||
// Reset streaming tool calls to prevent data pollution from previous streams
|
||||
this.converter.resetStreamingToolCalls();
|
||||
@@ -108,6 +125,9 @@ export class ContentGenerationPipeline {
|
||||
try {
|
||||
// Stage 2a: Convert and yield each chunk while preserving original
|
||||
for await (const chunk of stream) {
|
||||
// Always collect OpenAI chunks for logging, regardless of Gemini conversion result
|
||||
collectedOpenAIChunks.push(chunk);
|
||||
|
||||
const response = this.converter.convertOpenAIChunkToGemini(chunk);
|
||||
|
||||
// Stage 2b: Filter empty responses to avoid downstream issues
|
||||
@@ -144,8 +164,15 @@ export class ContentGenerationPipeline {
|
||||
yield pendingFinishResponse;
|
||||
}
|
||||
|
||||
// Stage 2e: Stream completed successfully
|
||||
// Stage 2e: Stream completed successfully - perform logging with original OpenAI chunks
|
||||
context.duration = Date.now() - context.startTime;
|
||||
|
||||
await this.config.telemetryService.logStreamingSuccess(
|
||||
context,
|
||||
collectedGeminiResponses,
|
||||
openaiRequest,
|
||||
collectedOpenAIChunks,
|
||||
);
|
||||
} catch (error) {
|
||||
// Clear streaming tool calls on error to prevent data pollution
|
||||
this.converter.resetStreamingToolCalls();
|
||||
@@ -231,7 +258,7 @@ export class ContentGenerationPipeline {
|
||||
const baseRequest: OpenAI.Chat.ChatCompletionCreateParams = {
|
||||
model: this.contentGeneratorConfig.model,
|
||||
messages,
|
||||
...this.buildGenerateContentConfig(request),
|
||||
...this.buildSamplingParameters(request),
|
||||
};
|
||||
|
||||
// Add streaming options if present
|
||||
@@ -253,7 +280,7 @@ export class ContentGenerationPipeline {
|
||||
return this.config.provider.buildRequest(baseRequest, userPromptId);
|
||||
}
|
||||
|
||||
private buildGenerateContentConfig(
|
||||
private buildSamplingParameters(
|
||||
request: GenerateContentParameters,
|
||||
): Record<string, unknown> {
|
||||
const defaultSamplingParams =
|
||||
@@ -289,7 +316,7 @@ export class ContentGenerationPipeline {
|
||||
return value !== undefined ? { [key]: value } : {};
|
||||
};
|
||||
|
||||
const params: Record<string, unknown> = {
|
||||
const params = {
|
||||
// Parameters with request fallback but no defaults
|
||||
...addParameterIfDefined('temperature', 'temperature', 'temperature'),
|
||||
...addParameterIfDefined('top_p', 'top_p', 'topP'),
|
||||
@@ -310,24 +337,11 @@ export class ContentGenerationPipeline {
|
||||
'frequency_penalty',
|
||||
'frequencyPenalty',
|
||||
),
|
||||
...this.buildReasoningConfig(),
|
||||
};
|
||||
|
||||
return params;
|
||||
}
|
||||
|
||||
private buildReasoningConfig(): Record<string, unknown> {
|
||||
const reasoning = this.contentGeneratorConfig.reasoning;
|
||||
|
||||
if (reasoning === false) {
|
||||
return {};
|
||||
}
|
||||
|
||||
return {
|
||||
reasoning_effort: reasoning?.effort ?? 'medium',
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Common error handling wrapper for execute methods
|
||||
*/
|
||||
@@ -355,7 +369,13 @@ export class ContentGenerationPipeline {
|
||||
return result;
|
||||
} catch (error) {
|
||||
// Use shared error handling logic
|
||||
return await this.handleError(error, context, request);
|
||||
return await this.handleError(
|
||||
error,
|
||||
context,
|
||||
request,
|
||||
userPromptId,
|
||||
isStreaming,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -367,8 +387,37 @@ export class ContentGenerationPipeline {
|
||||
error: unknown,
|
||||
context: RequestContext,
|
||||
request: GenerateContentParameters,
|
||||
userPromptId?: string,
|
||||
isStreaming?: boolean,
|
||||
): Promise<never> {
|
||||
context.duration = Date.now() - context.startTime;
|
||||
|
||||
// Build request for logging (may fail, but we still want to log the error)
|
||||
let openaiRequest: OpenAI.Chat.ChatCompletionCreateParams;
|
||||
try {
|
||||
if (userPromptId !== undefined && isStreaming !== undefined) {
|
||||
openaiRequest = await this.buildRequest(
|
||||
request,
|
||||
userPromptId,
|
||||
isStreaming,
|
||||
);
|
||||
} else {
|
||||
// For processStreamWithLogging, we don't have userPromptId/isStreaming,
|
||||
// so create a minimal request
|
||||
openaiRequest = {
|
||||
model: this.contentGeneratorConfig.model,
|
||||
messages: [],
|
||||
};
|
||||
}
|
||||
} catch (_buildError) {
|
||||
// If we can't build the request, create a minimal one for logging
|
||||
openaiRequest = {
|
||||
model: this.contentGeneratorConfig.model,
|
||||
messages: [],
|
||||
};
|
||||
}
|
||||
|
||||
await this.config.telemetryService.logError(context, error, openaiRequest);
|
||||
this.config.errorHandler.handle(error, context, request);
|
||||
}
|
||||
|
||||
|
||||
@@ -39,8 +39,7 @@ export class DashScopeOpenAICompatibleProvider
|
||||
return (
|
||||
authType === AuthType.QWEN_OAUTH ||
|
||||
baseUrl === 'https://dashscope.aliyuncs.com/compatible-mode/v1' ||
|
||||
baseUrl === 'https://dashscope-intl.aliyuncs.com/compatible-mode/v1' ||
|
||||
!baseUrl
|
||||
baseUrl === 'https://dashscope-intl.aliyuncs.com/compatible-mode/v1'
|
||||
);
|
||||
}
|
||||
|
||||
@@ -145,7 +144,9 @@ export class DashScopeOpenAICompatibleProvider
|
||||
|
||||
getDefaultGenerationConfig(): GenerateContentConfig {
|
||||
return {
|
||||
temperature: 0.3,
|
||||
temperature: 0.7,
|
||||
topP: 0.8,
|
||||
topK: 20,
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,275 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type { Config } from '../../config/config.js';
|
||||
import { logApiError, logApiResponse } from '../../telemetry/loggers.js';
|
||||
import { ApiErrorEvent, ApiResponseEvent } from '../../telemetry/types.js';
|
||||
import { OpenAILogger } from '../../utils/openaiLogger.js';
|
||||
import type { GenerateContentResponse } from '@google/genai';
|
||||
import type OpenAI from 'openai';
|
||||
import type { ExtendedCompletionChunkDelta } from './converter.js';
|
||||
|
||||
export interface RequestContext {
|
||||
userPromptId: string;
|
||||
model: string;
|
||||
authType: string;
|
||||
startTime: number;
|
||||
duration: number;
|
||||
isStreaming: boolean;
|
||||
}
|
||||
|
||||
export interface TelemetryService {
|
||||
logSuccess(
|
||||
context: RequestContext,
|
||||
response: GenerateContentResponse,
|
||||
openaiRequest?: OpenAI.Chat.ChatCompletionCreateParams,
|
||||
openaiResponse?: OpenAI.Chat.ChatCompletion,
|
||||
): Promise<void>;
|
||||
|
||||
logError(
|
||||
context: RequestContext,
|
||||
error: unknown,
|
||||
openaiRequest?: OpenAI.Chat.ChatCompletionCreateParams,
|
||||
): Promise<void>;
|
||||
|
||||
logStreamingSuccess(
|
||||
context: RequestContext,
|
||||
responses: GenerateContentResponse[],
|
||||
openaiRequest?: OpenAI.Chat.ChatCompletionCreateParams,
|
||||
openaiChunks?: OpenAI.Chat.ChatCompletionChunk[],
|
||||
): Promise<void>;
|
||||
}
|
||||
|
||||
export class DefaultTelemetryService implements TelemetryService {
|
||||
private logger: OpenAILogger;
|
||||
|
||||
constructor(
|
||||
private config: Config,
|
||||
private enableOpenAILogging: boolean = false,
|
||||
openAILoggingDir?: string,
|
||||
) {
|
||||
// Always create a new logger instance to ensure correct working directory
|
||||
// If no custom directory is provided, undefined will use the default path
|
||||
this.logger = new OpenAILogger(openAILoggingDir);
|
||||
}
|
||||
|
||||
async logSuccess(
|
||||
context: RequestContext,
|
||||
response: GenerateContentResponse,
|
||||
openaiRequest?: OpenAI.Chat.ChatCompletionCreateParams,
|
||||
openaiResponse?: OpenAI.Chat.ChatCompletion,
|
||||
): Promise<void> {
|
||||
// Log API response event for UI telemetry
|
||||
const responseEvent = new ApiResponseEvent(
|
||||
response.responseId || 'unknown',
|
||||
context.model,
|
||||
context.duration,
|
||||
context.userPromptId,
|
||||
context.authType,
|
||||
response.usageMetadata,
|
||||
);
|
||||
|
||||
logApiResponse(this.config, responseEvent);
|
||||
|
||||
// Log interaction if enabled
|
||||
if (this.enableOpenAILogging && openaiRequest && openaiResponse) {
|
||||
await this.logger.logInteraction(openaiRequest, openaiResponse);
|
||||
}
|
||||
}
|
||||
|
||||
async logError(
|
||||
context: RequestContext,
|
||||
error: unknown,
|
||||
openaiRequest?: OpenAI.Chat.ChatCompletionCreateParams,
|
||||
): Promise<void> {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
|
||||
// Log API error event for UI telemetry
|
||||
const errorEvent = new ApiErrorEvent(
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
(error as any)?.requestID || 'unknown',
|
||||
context.model,
|
||||
errorMessage,
|
||||
context.duration,
|
||||
context.userPromptId,
|
||||
context.authType,
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
(error as any)?.type,
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
(error as any)?.code,
|
||||
);
|
||||
logApiError(this.config, errorEvent);
|
||||
|
||||
// Log error interaction if enabled
|
||||
if (this.enableOpenAILogging && openaiRequest) {
|
||||
await this.logger.logInteraction(
|
||||
openaiRequest,
|
||||
undefined,
|
||||
error as Error,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
async logStreamingSuccess(
|
||||
context: RequestContext,
|
||||
responses: GenerateContentResponse[],
|
||||
openaiRequest?: OpenAI.Chat.ChatCompletionCreateParams,
|
||||
openaiChunks?: OpenAI.Chat.ChatCompletionChunk[],
|
||||
): Promise<void> {
|
||||
// Get final usage metadata from the last response that has it
|
||||
const finalUsageMetadata = responses
|
||||
.slice()
|
||||
.reverse()
|
||||
.find((r) => r.usageMetadata)?.usageMetadata;
|
||||
|
||||
// Log API response event for UI telemetry
|
||||
const responseEvent = new ApiResponseEvent(
|
||||
responses[responses.length - 1]?.responseId || 'unknown',
|
||||
context.model,
|
||||
context.duration,
|
||||
context.userPromptId,
|
||||
context.authType,
|
||||
finalUsageMetadata,
|
||||
);
|
||||
|
||||
logApiResponse(this.config, responseEvent);
|
||||
|
||||
// Log interaction if enabled - combine chunks only when needed
|
||||
if (
|
||||
this.enableOpenAILogging &&
|
||||
openaiRequest &&
|
||||
openaiChunks &&
|
||||
openaiChunks.length > 0
|
||||
) {
|
||||
const combinedResponse = this.combineOpenAIChunksForLogging(openaiChunks);
|
||||
await this.logger.logInteraction(openaiRequest, combinedResponse);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Combine OpenAI chunks for logging purposes
|
||||
* This method consolidates all OpenAI stream chunks into a single ChatCompletion response
|
||||
* for telemetry and logging purposes, avoiding unnecessary format conversions
|
||||
*/
|
||||
private combineOpenAIChunksForLogging(
|
||||
chunks: OpenAI.Chat.ChatCompletionChunk[],
|
||||
): OpenAI.Chat.ChatCompletion {
|
||||
if (chunks.length === 0) {
|
||||
throw new Error('No chunks to combine');
|
||||
}
|
||||
|
||||
const firstChunk = chunks[0];
|
||||
|
||||
// Combine all content from chunks
|
||||
let combinedContent = '';
|
||||
const toolCalls: OpenAI.Chat.ChatCompletionMessageToolCall[] = [];
|
||||
let finishReason:
|
||||
| 'stop'
|
||||
| 'length'
|
||||
| 'tool_calls'
|
||||
| 'content_filter'
|
||||
| 'function_call'
|
||||
| null = null;
|
||||
let combinedReasoning = '';
|
||||
let usage:
|
||||
| {
|
||||
prompt_tokens: number;
|
||||
completion_tokens: number;
|
||||
total_tokens: number;
|
||||
}
|
||||
| undefined;
|
||||
|
||||
for (const chunk of chunks) {
|
||||
const choice = chunk.choices?.[0];
|
||||
if (choice) {
|
||||
// Combine reasoning content
|
||||
const reasoningContent = (choice.delta as ExtendedCompletionChunkDelta)
|
||||
?.reasoning_content;
|
||||
if (reasoningContent) {
|
||||
combinedReasoning += reasoningContent;
|
||||
}
|
||||
// Combine text content
|
||||
if (choice.delta?.content) {
|
||||
combinedContent += choice.delta.content;
|
||||
}
|
||||
|
||||
// Collect tool calls
|
||||
if (choice.delta?.tool_calls) {
|
||||
for (const toolCall of choice.delta.tool_calls) {
|
||||
if (toolCall.index !== undefined) {
|
||||
if (!toolCalls[toolCall.index]) {
|
||||
toolCalls[toolCall.index] = {
|
||||
id: toolCall.id || '',
|
||||
type: toolCall.type || 'function',
|
||||
function: { name: '', arguments: '' },
|
||||
};
|
||||
}
|
||||
|
||||
if (toolCall.function?.name) {
|
||||
toolCalls[toolCall.index].function.name +=
|
||||
toolCall.function.name;
|
||||
}
|
||||
if (toolCall.function?.arguments) {
|
||||
toolCalls[toolCall.index].function.arguments +=
|
||||
toolCall.function.arguments;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get finish reason from the last chunk
|
||||
if (choice.finish_reason) {
|
||||
finishReason = choice.finish_reason;
|
||||
}
|
||||
}
|
||||
|
||||
// Get usage from the last chunk that has it
|
||||
if (chunk.usage) {
|
||||
usage = chunk.usage;
|
||||
}
|
||||
}
|
||||
|
||||
// Create the combined ChatCompletion response
|
||||
const message: OpenAI.Chat.ChatCompletionMessage = {
|
||||
role: 'assistant',
|
||||
content: combinedContent || null,
|
||||
refusal: null,
|
||||
};
|
||||
if (combinedReasoning) {
|
||||
// Attach reasoning content if any thought tokens were streamed
|
||||
(message as { reasoning_content?: string }).reasoning_content =
|
||||
combinedReasoning;
|
||||
}
|
||||
|
||||
// Add tool calls if any
|
||||
if (toolCalls.length > 0) {
|
||||
message.tool_calls = toolCalls.filter((tc) => tc.id); // Filter out empty tool calls
|
||||
}
|
||||
|
||||
const combinedResponse: OpenAI.Chat.ChatCompletion = {
|
||||
id: firstChunk.id,
|
||||
object: 'chat.completion',
|
||||
created: firstChunk.created,
|
||||
model: firstChunk.model,
|
||||
choices: [
|
||||
{
|
||||
index: 0,
|
||||
message,
|
||||
finish_reason: finishReason || 'stop',
|
||||
logprobs: null,
|
||||
},
|
||||
],
|
||||
usage: usage || {
|
||||
prompt_tokens: 0,
|
||||
completion_tokens: 0,
|
||||
total_tokens: 0,
|
||||
},
|
||||
system_fingerprint: firstChunk.system_fingerprint,
|
||||
};
|
||||
|
||||
return combinedResponse;
|
||||
}
|
||||
}
|
||||
@@ -264,7 +264,7 @@ describe('loggers', () => {
|
||||
'event.timestamp': '2025-01-01T00:00:00.000Z',
|
||||
prompt_length: 11,
|
||||
prompt_id: 'prompt-id-9',
|
||||
auth_type: 'gemini',
|
||||
auth_type: 'gemini-api-key',
|
||||
},
|
||||
});
|
||||
});
|
||||
@@ -333,7 +333,7 @@ describe('loggers', () => {
|
||||
total_token_count: 0,
|
||||
response_text: 'test-response',
|
||||
prompt_id: 'prompt-id-1',
|
||||
auth_type: 'gemini',
|
||||
auth_type: 'gemini-api-key',
|
||||
},
|
||||
});
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/sdk",
|
||||
"version": "0.1.0",
|
||||
"version": "0.6.0-nightly.20251225.9f65bd3b",
|
||||
"description": "TypeScript SDK for programmatic access to qwen-code CLI",
|
||||
"main": "./dist/index.cjs",
|
||||
"module": "./dist/index.mjs",
|
||||
|
||||
@@ -272,6 +272,8 @@ export class Query implements AsyncIterable<SDKMessage> {
|
||||
// Get only successfully connected SDK servers for CLI
|
||||
const sdkMcpServersForCli = this.getSdkMcpServersForCli();
|
||||
const mcpServersForCli = this.getMcpServersForCli();
|
||||
logger.debug('SDK MCP servers for CLI:', sdkMcpServersForCli);
|
||||
logger.debug('External MCP servers for CLI:', mcpServersForCli);
|
||||
|
||||
await this.sendControlRequest(ControlRequestType.INITIALIZE, {
|
||||
hooks: null,
|
||||
@@ -627,11 +629,6 @@ export class Query implements AsyncIterable<SDKMessage> {
|
||||
return Promise.reject(new Error('Query is closed'));
|
||||
}
|
||||
|
||||
if (subtype !== ControlRequestType.INITIALIZE) {
|
||||
// Ensure all other control requests get processed after initialization
|
||||
await this.initialized;
|
||||
}
|
||||
|
||||
const requestId = randomUUID();
|
||||
|
||||
const request: CLIControlRequest = {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@qwen-code/qwen-code-test-utils",
|
||||
"version": "0.6.0-nightly.20251226.17eb20c1",
|
||||
"version": "0.6.0-nightly.20251225.9f65bd3b",
|
||||
"private": true,
|
||||
"main": "src/index.ts",
|
||||
"license": "Apache-2.0",
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
"name": "qwen-code-vscode-ide-companion",
|
||||
"displayName": "Qwen Code Companion",
|
||||
"description": "Enable Qwen Code with direct access to your VS Code workspace.",
|
||||
"version": "0.6.0-nightly.20251226.17eb20c1",
|
||||
"version": "0.6.0-nightly.20251225.9f65bd3b",
|
||||
"publisher": "qwenlm",
|
||||
"icon": "assets/icon.png",
|
||||
"repository": {
|
||||
|
||||
@@ -50,9 +50,6 @@ vi.mock('vscode', () => ({
|
||||
registerTextDocumentContentProvider: vi.fn(),
|
||||
onDidChangeWorkspaceFolders: vi.fn(),
|
||||
onDidGrantWorkspaceTrust: vi.fn(),
|
||||
registerFileSystemProvider: vi.fn(() => ({
|
||||
dispose: vi.fn(),
|
||||
})),
|
||||
},
|
||||
commands: {
|
||||
registerCommand: vi.fn(),
|
||||
|
||||
@@ -16,7 +16,6 @@ import {
|
||||
} from '@qwen-code/qwen-code-core/src/ide/detect-ide.js';
|
||||
import { WebViewProvider } from './webview/WebViewProvider.js';
|
||||
import { registerNewCommands } from './commands/index.js';
|
||||
import { ReadonlyFileSystemProvider } from './services/readonlyFileSystemProvider.js';
|
||||
|
||||
const CLI_IDE_COMPANION_IDENTIFIER = 'qwenlm.qwen-code-vscode-ide-companion';
|
||||
const INFO_MESSAGE_SHOWN_KEY = 'qwenCodeInfoMessageShown';
|
||||
@@ -111,19 +110,6 @@ export async function activate(context: vscode.ExtensionContext) {
|
||||
|
||||
checkForUpdates(context, log);
|
||||
|
||||
// Create and register readonly file system provider
|
||||
// The provider registers itself as a singleton in the constructor
|
||||
const readonlyProvider = new ReadonlyFileSystemProvider();
|
||||
context.subscriptions.push(
|
||||
vscode.workspace.registerFileSystemProvider(
|
||||
ReadonlyFileSystemProvider.getScheme(),
|
||||
readonlyProvider,
|
||||
{ isCaseSensitive: true, isReadonly: true },
|
||||
),
|
||||
readonlyProvider,
|
||||
);
|
||||
log('Readonly file system provider registered');
|
||||
|
||||
const diffContentProvider = new DiffContentProvider();
|
||||
const diffManager = new DiffManager(
|
||||
log,
|
||||
|
||||
@@ -38,10 +38,6 @@ vi.mock('node:os', async (importOriginal) => {
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock('@qwen-code/qwen-code-core/src/ide/detect-ide.js', () => ({
|
||||
detectIdeFromEnv: vi.fn(() => ({ name: 'vscode', displayName: 'VS Code' })),
|
||||
}));
|
||||
|
||||
const vscodeMock = vi.hoisted(() => ({
|
||||
workspace: {
|
||||
workspaceFolders: [
|
||||
|
||||
@@ -146,8 +146,6 @@ export class AcpConnection {
|
||||
console.error(
|
||||
`[ACP qwen] Process exited with code: ${code}, signal: ${signal}`,
|
||||
);
|
||||
// Clear pending requests when process exits
|
||||
this.pendingRequests.clear();
|
||||
});
|
||||
|
||||
// Wait for process to start
|
||||
|
||||
@@ -8,7 +8,6 @@ import type {
|
||||
AcpSessionUpdate,
|
||||
AcpPermissionRequest,
|
||||
AuthenticateUpdateNotification,
|
||||
ModelInfo,
|
||||
} from '../types/acpTypes.js';
|
||||
import type { ApprovalModeValue } from '../types/approvalModeValueTypes.js';
|
||||
import { QwenSessionReader, type QwenSession } from './qwenSessionReader.js';
|
||||
@@ -18,7 +17,6 @@ import type {
|
||||
PlanEntry,
|
||||
ToolCallUpdateData,
|
||||
QwenAgentCallbacks,
|
||||
UsageStatsPayload,
|
||||
} from '../types/chatTypes.js';
|
||||
import {
|
||||
QwenConnectionHandler,
|
||||
@@ -26,7 +24,6 @@ import {
|
||||
} from '../services/qwenConnectionHandler.js';
|
||||
import { QwenSessionUpdateHandler } from './qwenSessionUpdateHandler.js';
|
||||
import { authMethod } from '../types/acpTypes.js';
|
||||
import { extractModelInfoFromNewSessionResult } from '../utils/acpModelInfo.js';
|
||||
import { isAuthenticationRequiredError } from '../utils/authErrors.js';
|
||||
import { handleAuthenticateUpdate } from '../utils/authNotificationHandler.js';
|
||||
|
||||
@@ -198,16 +195,12 @@ export class QwenAgentManager {
|
||||
options?: AgentConnectOptions,
|
||||
): Promise<QwenConnectionResult> {
|
||||
this.currentWorkingDir = workingDir;
|
||||
const res = await this.connectionHandler.connect(
|
||||
return this.connectionHandler.connect(
|
||||
this.connection,
|
||||
workingDir,
|
||||
cliEntryPath,
|
||||
options,
|
||||
);
|
||||
if (res.modelInfo && this.callbacks.onModelInfo) {
|
||||
this.callbacks.onModelInfo(res.modelInfo);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1098,10 +1091,9 @@ export class QwenAgentManager {
|
||||
|
||||
this.sessionCreateInFlight = (async () => {
|
||||
try {
|
||||
let newSessionResult: unknown;
|
||||
// Try to create a new ACP session. If Qwen asks for auth, let it handle authentication.
|
||||
try {
|
||||
newSessionResult = await this.connection.newSession(workingDir);
|
||||
await this.connection.newSession(workingDir);
|
||||
} catch (err) {
|
||||
const requiresAuth = isAuthenticationRequiredError(err);
|
||||
|
||||
@@ -1123,7 +1115,7 @@ export class QwenAgentManager {
|
||||
);
|
||||
// Add a slight delay to ensure auth state is settled
|
||||
await new Promise((resolve) => setTimeout(resolve, 300));
|
||||
newSessionResult = await this.connection.newSession(workingDir);
|
||||
await this.connection.newSession(workingDir);
|
||||
} catch (reauthErr) {
|
||||
console.error(
|
||||
'[QwenAgentManager] Re-authentication failed:',
|
||||
@@ -1135,13 +1127,6 @@ export class QwenAgentManager {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
const modelInfo =
|
||||
extractModelInfoFromNewSessionResult(newSessionResult);
|
||||
if (modelInfo && this.callbacks.onModelInfo) {
|
||||
this.callbacks.onModelInfo(modelInfo);
|
||||
}
|
||||
|
||||
const newSessionId = this.connection.currentSessionId;
|
||||
console.log(
|
||||
'[QwenAgentManager] New session created with ID:',
|
||||
@@ -1272,22 +1257,6 @@ export class QwenAgentManager {
|
||||
this.sessionUpdateHandler.updateCallbacks(this.callbacks);
|
||||
}
|
||||
|
||||
/**
|
||||
* Register callback for usage metadata updates
|
||||
*/
|
||||
onUsageUpdate(callback: (stats: UsageStatsPayload) => void): void {
|
||||
this.callbacks.onUsageUpdate = callback;
|
||||
this.sessionUpdateHandler.updateCallbacks(this.callbacks);
|
||||
}
|
||||
|
||||
/**
|
||||
* Register callback for model info updates
|
||||
*/
|
||||
onModelInfo(callback: (info: ModelInfo) => void): void {
|
||||
this.callbacks.onModelInfo = callback;
|
||||
this.sessionUpdateHandler.updateCallbacks(this.callbacks);
|
||||
}
|
||||
|
||||
/**
|
||||
* Disconnect
|
||||
*/
|
||||
|
||||
@@ -13,13 +13,10 @@
|
||||
import type { AcpConnection } from './acpConnection.js';
|
||||
import { isAuthenticationRequiredError } from '../utils/authErrors.js';
|
||||
import { authMethod } from '../types/acpTypes.js';
|
||||
import { extractModelInfoFromNewSessionResult } from '../utils/acpModelInfo.js';
|
||||
import type { ModelInfo } from '../types/acpTypes.js';
|
||||
|
||||
export interface QwenConnectionResult {
|
||||
sessionCreated: boolean;
|
||||
requiresAuth: boolean;
|
||||
modelInfo?: ModelInfo;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -47,7 +44,6 @@ export class QwenConnectionHandler {
|
||||
const autoAuthenticate = options?.autoAuthenticate ?? true;
|
||||
let sessionCreated = false;
|
||||
let requiresAuth = false;
|
||||
let modelInfo: ModelInfo | undefined;
|
||||
|
||||
// Build extra CLI arguments (only essential parameters)
|
||||
const extraArgs: string[] = [];
|
||||
@@ -70,15 +66,13 @@ export class QwenConnectionHandler {
|
||||
console.log(
|
||||
'[QwenAgentManager] Creating new session (letting CLI handle authentication)...',
|
||||
);
|
||||
const newSessionResult = await this.newSessionWithRetry(
|
||||
await this.newSessionWithRetry(
|
||||
connection,
|
||||
workingDir,
|
||||
3,
|
||||
authMethod,
|
||||
autoAuthenticate,
|
||||
);
|
||||
modelInfo =
|
||||
extractModelInfoFromNewSessionResult(newSessionResult) || undefined;
|
||||
console.log('[QwenAgentManager] New session created successfully');
|
||||
sessionCreated = true;
|
||||
} catch (sessionError) {
|
||||
@@ -105,7 +99,7 @@ export class QwenConnectionHandler {
|
||||
console.log(`\n========================================`);
|
||||
console.log(`[QwenAgentManager] ✅ CONNECT() COMPLETED SUCCESSFULLY`);
|
||||
console.log(`========================================\n`);
|
||||
return { sessionCreated, requiresAuth, modelInfo };
|
||||
return { sessionCreated, requiresAuth };
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -121,15 +115,15 @@ export class QwenConnectionHandler {
|
||||
maxRetries: number,
|
||||
authMethod: string,
|
||||
autoAuthenticate: boolean,
|
||||
): Promise<unknown> {
|
||||
): Promise<void> {
|
||||
for (let attempt = 1; attempt <= maxRetries; attempt++) {
|
||||
try {
|
||||
console.log(
|
||||
`[QwenAgentManager] Creating session (attempt ${attempt}/${maxRetries})...`,
|
||||
);
|
||||
const res = await connection.newSession(workingDir);
|
||||
await connection.newSession(workingDir);
|
||||
console.log('[QwenAgentManager] Session created successfully');
|
||||
return res;
|
||||
return;
|
||||
} catch (error) {
|
||||
const errorMessage =
|
||||
error instanceof Error ? error.message : String(error);
|
||||
@@ -161,11 +155,11 @@ export class QwenConnectionHandler {
|
||||
'[QwenAgentManager] newSessionWithRetry Authentication successful',
|
||||
);
|
||||
// Retry immediately after successful auth
|
||||
const res = await connection.newSession(workingDir);
|
||||
await connection.newSession(workingDir);
|
||||
console.log(
|
||||
'[QwenAgentManager] Session created successfully after auth',
|
||||
);
|
||||
return res;
|
||||
return;
|
||||
} catch (authErr) {
|
||||
console.error(
|
||||
'[QwenAgentManager] Re-authentication failed:',
|
||||
@@ -186,7 +180,5 @@ export class QwenConnectionHandler {
|
||||
await new Promise((resolve) => setTimeout(resolve, delay));
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error('Session creation failed unexpectedly');
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,12 +10,9 @@
|
||||
* Handles session updates from ACP and dispatches them to appropriate callbacks
|
||||
*/
|
||||
|
||||
import type { AcpSessionUpdate, SessionUpdateMeta } from '../types/acpTypes.js';
|
||||
import type { AcpSessionUpdate } from '../types/acpTypes.js';
|
||||
import type { ApprovalModeValue } from '../types/approvalModeValueTypes.js';
|
||||
import type {
|
||||
QwenAgentCallbacks,
|
||||
UsageStatsPayload,
|
||||
} from '../types/chatTypes.js';
|
||||
import type { QwenAgentCallbacks } from '../types/chatTypes.js';
|
||||
|
||||
/**
|
||||
* Qwen Session Update Handler class
|
||||
@@ -60,7 +57,6 @@ export class QwenSessionUpdateHandler {
|
||||
if (update.content?.text && this.callbacks.onStreamChunk) {
|
||||
this.callbacks.onStreamChunk(update.content.text);
|
||||
}
|
||||
this.emitUsageMeta(update._meta);
|
||||
break;
|
||||
|
||||
case 'agent_thought_chunk':
|
||||
@@ -75,7 +71,6 @@ export class QwenSessionUpdateHandler {
|
||||
this.callbacks.onStreamChunk(update.content.text);
|
||||
}
|
||||
}
|
||||
this.emitUsageMeta(update._meta);
|
||||
break;
|
||||
|
||||
case 'tool_call': {
|
||||
@@ -165,17 +160,4 @@ export class QwenSessionUpdateHandler {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
private emitUsageMeta(meta?: SessionUpdateMeta): void {
|
||||
if (!meta || !this.callbacks.onUsageUpdate) {
|
||||
return;
|
||||
}
|
||||
|
||||
const payload: UsageStatsPayload = {
|
||||
usage: meta.usage || undefined,
|
||||
durationMs: meta.durationMs ?? undefined,
|
||||
};
|
||||
|
||||
this.callbacks.onUsageUpdate(payload);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,204 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen Team
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import * as vscode from 'vscode';
|
||||
|
||||
/**
|
||||
* Readonly file system provider for temporary files
|
||||
* Uses custom URI scheme to create readonly documents in VS Code
|
||||
*/
|
||||
export class ReadonlyFileSystemProvider
|
||||
implements vscode.FileSystemProvider, vscode.Disposable
|
||||
{
|
||||
private static readonly scheme = 'qwen-readonly';
|
||||
private static instance: ReadonlyFileSystemProvider | null = null;
|
||||
|
||||
private readonly files = new Map<string, Uint8Array>();
|
||||
private readonly emitter = new vscode.EventEmitter<
|
||||
vscode.FileChangeEvent[]
|
||||
>();
|
||||
private readonly disposables: vscode.Disposable[] = [];
|
||||
|
||||
readonly onDidChangeFile = this.emitter.event;
|
||||
|
||||
constructor() {
|
||||
// Ensure only one instance exists
|
||||
if (ReadonlyFileSystemProvider.instance !== null) {
|
||||
console.warn(
|
||||
'[ReadonlyFileSystemProvider] Instance already exists, replacing with new instance',
|
||||
);
|
||||
}
|
||||
this.disposables.push(this.emitter);
|
||||
// Register as global singleton
|
||||
ReadonlyFileSystemProvider.instance = this;
|
||||
}
|
||||
|
||||
static getScheme(): string {
|
||||
return ReadonlyFileSystemProvider.scheme;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the global singleton instance
|
||||
* Returns null if not initialized yet
|
||||
*/
|
||||
static getInstance(): ReadonlyFileSystemProvider | null {
|
||||
return ReadonlyFileSystemProvider.instance;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a URI for a readonly temporary file (static version)
|
||||
*/
|
||||
static createUri(fileName: string, content: string): vscode.Uri {
|
||||
// For tool-call related filenames, keep the URI stable so repeated clicks focus the same document.
|
||||
// Note: toolCallId can include underscores (e.g. "call_..."), so match everything after the prefix.
|
||||
const isToolCallFile =
|
||||
/^(bash-input|bash-output|execute-input|execute-output)-.+$/.test(
|
||||
fileName,
|
||||
);
|
||||
|
||||
if (isToolCallFile) {
|
||||
return vscode.Uri.from({
|
||||
scheme: ReadonlyFileSystemProvider.scheme,
|
||||
path: `/${fileName}`,
|
||||
});
|
||||
}
|
||||
|
||||
// For other cases, keep the original approach with timestamp to avoid collisions.
|
||||
const timestamp = Date.now();
|
||||
const hash = Buffer.from(content.substring(0, 100)).toString('base64url');
|
||||
const uniqueId = `${timestamp}-${hash.substring(0, 8)}`;
|
||||
return vscode.Uri.from({
|
||||
scheme: ReadonlyFileSystemProvider.scheme,
|
||||
path: `/${fileName}-${uniqueId}`,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a URI for a readonly temporary file (instance method)
|
||||
*/
|
||||
createUri(fileName: string, content: string): vscode.Uri {
|
||||
return ReadonlyFileSystemProvider.createUri(fileName, content);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set content for a URI
|
||||
*/
|
||||
setContent(uri: vscode.Uri, content: string): void {
|
||||
const buffer = Buffer.from(content, 'utf8');
|
||||
const key = uri.toString();
|
||||
const existed = this.files.has(key);
|
||||
this.files.set(key, buffer);
|
||||
this.emitter.fire([
|
||||
{
|
||||
type: existed
|
||||
? vscode.FileChangeType.Changed
|
||||
: vscode.FileChangeType.Created,
|
||||
uri,
|
||||
},
|
||||
]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get content for a URI
|
||||
*/
|
||||
getContent(uri: vscode.Uri): string | undefined {
|
||||
const buffer = this.files.get(uri.toString());
|
||||
return buffer ? Buffer.from(buffer).toString('utf8') : undefined;
|
||||
}
|
||||
|
||||
// FileSystemProvider implementation
|
||||
|
||||
watch(): vscode.Disposable {
|
||||
// No watching needed for readonly files
|
||||
return new vscode.Disposable(() => {});
|
||||
}
|
||||
|
||||
stat(uri: vscode.Uri): vscode.FileStat {
|
||||
const buffer = this.files.get(uri.toString());
|
||||
if (!buffer) {
|
||||
throw vscode.FileSystemError.FileNotFound(uri);
|
||||
}
|
||||
|
||||
return {
|
||||
type: vscode.FileType.File,
|
||||
ctime: Date.now(),
|
||||
mtime: Date.now(),
|
||||
size: buffer.byteLength,
|
||||
};
|
||||
}
|
||||
|
||||
readDirectory(): Array<[string, vscode.FileType]> {
|
||||
// Not needed for our use case
|
||||
return [];
|
||||
}
|
||||
|
||||
createDirectory(): void {
|
||||
throw vscode.FileSystemError.NoPermissions('Readonly file system');
|
||||
}
|
||||
|
||||
readFile(uri: vscode.Uri): Uint8Array {
|
||||
const buffer = this.files.get(uri.toString());
|
||||
if (!buffer) {
|
||||
throw vscode.FileSystemError.FileNotFound(uri);
|
||||
}
|
||||
return buffer;
|
||||
}
|
||||
|
||||
writeFile(
|
||||
uri: vscode.Uri,
|
||||
content: Uint8Array,
|
||||
options: { create: boolean; overwrite: boolean },
|
||||
): void {
|
||||
// Check if file exists
|
||||
const exists = this.files.has(uri.toString());
|
||||
|
||||
// For readonly files, only allow creation, not modification
|
||||
if (exists && !options.overwrite) {
|
||||
throw vscode.FileSystemError.FileExists(uri);
|
||||
}
|
||||
if (!exists && !options.create) {
|
||||
throw vscode.FileSystemError.FileNotFound(uri);
|
||||
}
|
||||
|
||||
this.files.set(uri.toString(), content);
|
||||
this.emitter.fire([
|
||||
{
|
||||
type: exists
|
||||
? vscode.FileChangeType.Changed
|
||||
: vscode.FileChangeType.Created,
|
||||
uri,
|
||||
},
|
||||
]);
|
||||
}
|
||||
|
||||
delete(uri: vscode.Uri): void {
|
||||
if (!this.files.has(uri.toString())) {
|
||||
throw vscode.FileSystemError.FileNotFound(uri);
|
||||
}
|
||||
this.files.delete(uri.toString());
|
||||
this.emitter.fire([{ type: vscode.FileChangeType.Deleted, uri }]);
|
||||
}
|
||||
|
||||
rename(): void {
|
||||
throw vscode.FileSystemError.NoPermissions('Readonly file system');
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear all cached files
|
||||
*/
|
||||
clear(): void {
|
||||
this.files.clear();
|
||||
}
|
||||
|
||||
dispose(): void {
|
||||
this.clear();
|
||||
this.disposables.forEach((d) => d.dispose());
|
||||
// Clear global instance on dispose
|
||||
if (ReadonlyFileSystemProvider.instance === this) {
|
||||
ReadonlyFileSystemProvider.instance = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -48,35 +48,6 @@ export interface ContentBlock {
|
||||
uri?: string;
|
||||
}
|
||||
|
||||
export interface UsageMetadata {
|
||||
promptTokens?: number | null;
|
||||
completionTokens?: number | null;
|
||||
thoughtsTokens?: number | null;
|
||||
totalTokens?: number | null;
|
||||
cachedTokens?: number | null;
|
||||
}
|
||||
|
||||
export interface SessionUpdateMeta {
|
||||
usage?: UsageMetadata | null;
|
||||
durationMs?: number | null;
|
||||
}
|
||||
|
||||
export type AcpMeta = Record<string, unknown>;
|
||||
export type ModelId = string;
|
||||
|
||||
export interface ModelInfo {
|
||||
_meta?: AcpMeta | null;
|
||||
description?: string | null;
|
||||
modelId: ModelId;
|
||||
name: string;
|
||||
}
|
||||
|
||||
export interface SessionModelState {
|
||||
_meta?: AcpMeta | null;
|
||||
availableModels: ModelInfo[];
|
||||
currentModelId: ModelId;
|
||||
}
|
||||
|
||||
export interface UserMessageChunkUpdate extends BaseSessionUpdate {
|
||||
update: {
|
||||
sessionUpdate: 'user_message_chunk';
|
||||
@@ -88,7 +59,6 @@ export interface AgentMessageChunkUpdate extends BaseSessionUpdate {
|
||||
update: {
|
||||
sessionUpdate: 'agent_message_chunk';
|
||||
content: ContentBlock;
|
||||
_meta?: SessionUpdateMeta;
|
||||
};
|
||||
}
|
||||
|
||||
@@ -96,7 +66,6 @@ export interface AgentThoughtChunkUpdate extends BaseSessionUpdate {
|
||||
update: {
|
||||
sessionUpdate: 'agent_thought_chunk';
|
||||
content: ContentBlock;
|
||||
_meta?: SessionUpdateMeta;
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
* Copyright 2025 Qwen Team
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
import type { AcpPermissionRequest, ModelInfo } from './acpTypes.js';
|
||||
import type { AcpPermissionRequest } from './acpTypes.js';
|
||||
import type { ApprovalModeValue } from './approvalModeValueTypes.js';
|
||||
|
||||
export interface ChatMessage {
|
||||
@@ -28,18 +28,6 @@ export interface ToolCallUpdateData {
|
||||
locations?: Array<{ path: string; line?: number | null }>;
|
||||
}
|
||||
|
||||
export interface UsageStatsPayload {
|
||||
usage?: {
|
||||
promptTokens?: number | null;
|
||||
completionTokens?: number | null;
|
||||
thoughtsTokens?: number | null;
|
||||
totalTokens?: number | null;
|
||||
cachedTokens?: number | null;
|
||||
} | null;
|
||||
durationMs?: number | null;
|
||||
tokenLimit?: number | null;
|
||||
}
|
||||
|
||||
export interface QwenAgentCallbacks {
|
||||
onMessage?: (message: ChatMessage) => void;
|
||||
onStreamChunk?: (chunk: string) => void;
|
||||
@@ -57,8 +45,6 @@ export interface QwenAgentCallbacks {
|
||||
}>;
|
||||
}) => void;
|
||||
onModeChanged?: (modeId: ApprovalModeValue) => void;
|
||||
onUsageUpdate?: (stats: UsageStatsPayload) => void;
|
||||
onModelInfo?: (info: ModelInfo) => void;
|
||||
}
|
||||
|
||||
export interface ToolCallUpdate {
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen Team
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, expect, it } from 'vitest';
|
||||
import { extractModelInfoFromNewSessionResult } from './acpModelInfo.js';
|
||||
|
||||
describe('extractModelInfoFromNewSessionResult', () => {
|
||||
it('extracts from NewSessionResponse.models (SessionModelState)', () => {
|
||||
expect(
|
||||
extractModelInfoFromNewSessionResult({
|
||||
sessionId: 's',
|
||||
models: {
|
||||
currentModelId: 'qwen3-coder-plus',
|
||||
availableModels: [
|
||||
{
|
||||
modelId: 'qwen3-coder-plus',
|
||||
name: 'Qwen3 Coder Plus',
|
||||
description: null,
|
||||
_meta: { contextLimit: 123 },
|
||||
},
|
||||
],
|
||||
},
|
||||
}),
|
||||
).toEqual({
|
||||
modelId: 'qwen3-coder-plus',
|
||||
name: 'Qwen3 Coder Plus',
|
||||
description: null,
|
||||
_meta: { contextLimit: 123 },
|
||||
});
|
||||
});
|
||||
|
||||
it('skips invalid model entries and returns first valid one', () => {
|
||||
expect(
|
||||
extractModelInfoFromNewSessionResult({
|
||||
models: {
|
||||
currentModelId: 'ok',
|
||||
availableModels: [
|
||||
{ name: '', modelId: '' },
|
||||
{ name: 'Ok', modelId: 'ok', _meta: { contextLimit: null } },
|
||||
],
|
||||
},
|
||||
}),
|
||||
).toEqual({ name: 'Ok', modelId: 'ok', _meta: { contextLimit: null } });
|
||||
});
|
||||
|
||||
it('falls back to single `model` object', () => {
|
||||
expect(
|
||||
extractModelInfoFromNewSessionResult({
|
||||
model: {
|
||||
name: 'Single',
|
||||
modelId: 'single',
|
||||
_meta: { contextLimit: 999 },
|
||||
},
|
||||
}),
|
||||
).toEqual({
|
||||
name: 'Single',
|
||||
modelId: 'single',
|
||||
_meta: { contextLimit: 999 },
|
||||
});
|
||||
});
|
||||
|
||||
it('falls back to legacy `modelInfo`', () => {
|
||||
expect(
|
||||
extractModelInfoFromNewSessionResult({
|
||||
modelInfo: { name: 'legacy' },
|
||||
}),
|
||||
).toEqual({ name: 'legacy', modelId: 'legacy' });
|
||||
});
|
||||
|
||||
it('returns null when missing', () => {
|
||||
expect(extractModelInfoFromNewSessionResult({})).toBeNull();
|
||||
expect(extractModelInfoFromNewSessionResult(null)).toBeNull();
|
||||
});
|
||||
});
|
||||
@@ -1,135 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen Team
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type { AcpMeta, ModelInfo } from '../types/acpTypes.js';
|
||||
|
||||
const asMeta = (value: unknown): AcpMeta | null | undefined => {
|
||||
if (value === null) {
|
||||
return null;
|
||||
}
|
||||
if (value && typeof value === 'object' && !Array.isArray(value)) {
|
||||
return value as AcpMeta;
|
||||
}
|
||||
return undefined;
|
||||
};
|
||||
|
||||
const normalizeModelInfo = (value: unknown): ModelInfo | null => {
|
||||
if (!value || typeof value !== 'object') {
|
||||
return null;
|
||||
}
|
||||
|
||||
const obj = value as Record<string, unknown>;
|
||||
const nameRaw = obj['name'];
|
||||
const modelIdRaw = obj['modelId'];
|
||||
const descriptionRaw = obj['description'];
|
||||
|
||||
const name = typeof nameRaw === 'string' ? nameRaw.trim() : '';
|
||||
const modelId =
|
||||
typeof modelIdRaw === 'string' && modelIdRaw.trim().length > 0
|
||||
? modelIdRaw.trim()
|
||||
: name;
|
||||
|
||||
if (!modelId || modelId.trim().length === 0 || !name) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const description =
|
||||
typeof descriptionRaw === 'string' || descriptionRaw === null
|
||||
? descriptionRaw
|
||||
: undefined;
|
||||
|
||||
const metaFromWire = asMeta(obj['_meta']);
|
||||
|
||||
// Back-compat: older implementations used `contextLimit` at the top-level.
|
||||
const legacyContextLimit = obj['contextLimit'];
|
||||
const contextLimit =
|
||||
typeof legacyContextLimit === 'number' || legacyContextLimit === null
|
||||
? legacyContextLimit
|
||||
: undefined;
|
||||
|
||||
let mergedMeta: AcpMeta | null | undefined = metaFromWire;
|
||||
if (typeof contextLimit !== 'undefined') {
|
||||
if (mergedMeta === null) {
|
||||
mergedMeta = { contextLimit };
|
||||
} else if (typeof mergedMeta === 'undefined') {
|
||||
mergedMeta = { contextLimit };
|
||||
} else {
|
||||
mergedMeta = { ...mergedMeta, contextLimit };
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
modelId,
|
||||
name,
|
||||
...(typeof description !== 'undefined' ? { description } : {}),
|
||||
...(typeof mergedMeta !== 'undefined' ? { _meta: mergedMeta } : {}),
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* Extract model info from ACP `session/new` result.
|
||||
*
|
||||
* Per Agent Client Protocol draft schema, NewSessionResponse includes `models`.
|
||||
* We also accept legacy shapes for compatibility.
|
||||
*/
|
||||
export const extractModelInfoFromNewSessionResult = (
|
||||
result: unknown,
|
||||
): ModelInfo | null => {
|
||||
if (!result || typeof result !== 'object') {
|
||||
return null;
|
||||
}
|
||||
|
||||
const obj = result as Record<string, unknown>;
|
||||
|
||||
const models = obj['models'];
|
||||
|
||||
// ACP draft: NewSessionResponse.models is a SessionModelState object.
|
||||
if (models && typeof models === 'object' && !Array.isArray(models)) {
|
||||
const state = models as Record<string, unknown>;
|
||||
const availableModels = state['availableModels'];
|
||||
const currentModelId = state['currentModelId'];
|
||||
if (Array.isArray(availableModels)) {
|
||||
const normalizedModels = availableModels
|
||||
.map(normalizeModelInfo)
|
||||
.filter((m): m is ModelInfo => Boolean(m));
|
||||
if (normalizedModels.length > 0) {
|
||||
if (typeof currentModelId === 'string' && currentModelId.length > 0) {
|
||||
const selected = normalizedModels.find(
|
||||
(m) => m.modelId === currentModelId,
|
||||
);
|
||||
if (selected) {
|
||||
return selected;
|
||||
}
|
||||
}
|
||||
return normalizedModels[0];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Legacy: some implementations returned `models` as a raw array.
|
||||
if (Array.isArray(models)) {
|
||||
for (const entry of models) {
|
||||
const normalized = normalizeModelInfo(entry);
|
||||
if (normalized) {
|
||||
return normalized;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Some implementations may return a single model object.
|
||||
const model = normalizeModelInfo(obj['model']);
|
||||
if (model) {
|
||||
return model;
|
||||
}
|
||||
|
||||
// Legacy: modelInfo on initialize; allow as a fallback.
|
||||
const legacy = normalizeModelInfo(obj['modelInfo']);
|
||||
if (legacy) {
|
||||
return legacy;
|
||||
}
|
||||
|
||||
return null;
|
||||
};
|
||||
@@ -53,40 +53,11 @@ export function findLeftGroupOfChatWebview(): vscode.ViewColumn | undefined {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Wait for a condition to become true, driven by tab-group change events.
|
||||
* Falls back to a timeout to avoid hanging forever.
|
||||
*/
|
||||
function waitForTabGroupsCondition(
|
||||
condition: () => boolean,
|
||||
timeout: number = 2000,
|
||||
): Promise<boolean> {
|
||||
if (condition()) {
|
||||
return Promise.resolve(true);
|
||||
}
|
||||
|
||||
return new Promise<boolean>((resolve) => {
|
||||
const subscription = vscode.window.tabGroups.onDidChangeTabGroups(() => {
|
||||
if (!condition()) {
|
||||
return;
|
||||
}
|
||||
clearTimeout(timeoutHandle);
|
||||
subscription.dispose();
|
||||
resolve(true);
|
||||
});
|
||||
|
||||
const timeoutHandle = setTimeout(() => {
|
||||
subscription.dispose();
|
||||
resolve(false);
|
||||
}, timeout);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure there is an editor group directly to the left of the Qwen chat webview.
|
||||
* - If one exists, return its ViewColumn.
|
||||
* - If none exists, focus the chat panel and create a new group on its left,
|
||||
* then return the new group's ViewColumn.
|
||||
* then return the new group's ViewColumn (which equals the chat's previous column).
|
||||
* - If the chat webview cannot be located, returns undefined.
|
||||
*/
|
||||
export async function ensureLeftGroupOfChatWebview(): Promise<
|
||||
@@ -116,7 +87,7 @@ export async function ensureLeftGroupOfChatWebview(): Promise<
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const initialGroupCount = vscode.window.tabGroups.all.length;
|
||||
const previousChatColumn = webviewGroup.viewColumn;
|
||||
|
||||
// Make the chat group active by revealing the panel
|
||||
try {
|
||||
@@ -133,22 +104,6 @@ export async function ensureLeftGroupOfChatWebview(): Promise<
|
||||
return undefined;
|
||||
}
|
||||
|
||||
// Wait for the new group to actually be created (check that group count increased)
|
||||
const groupCreated = await waitForTabGroupsCondition(
|
||||
() => vscode.window.tabGroups.all.length > initialGroupCount,
|
||||
1000, // 1 second timeout
|
||||
);
|
||||
|
||||
if (!groupCreated) {
|
||||
// Fallback if group creation didn't complete in time
|
||||
return vscode.ViewColumn.One;
|
||||
}
|
||||
|
||||
// After creating a new group to the left, the new group takes ViewColumn.One
|
||||
// and all existing groups shift right. So the new left group is always ViewColumn.One.
|
||||
// However, to be safe, let's query for it again.
|
||||
const newLeftGroup = findLeftGroupOfChatWebview();
|
||||
|
||||
// Restore focus to chat (optional), so we don't disturb user focus
|
||||
try {
|
||||
await vscode.commands.executeCommand(openChatCommand);
|
||||
@@ -156,7 +111,6 @@ export async function ensureLeftGroupOfChatWebview(): Promise<
|
||||
// Ignore
|
||||
}
|
||||
|
||||
// If we successfully found the new left group, return it
|
||||
// Otherwise, fallback to ViewColumn.One (the newly created group should be first)
|
||||
return newLeftGroup ?? vscode.ViewColumn.One;
|
||||
// The new left group's column equals the chat's previous column
|
||||
return previousChatColumn;
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ import type { TextMessage } from './hooks/message/useMessageHandling.js';
|
||||
import type { ToolCallData } from './components/messages/toolcalls/ToolCall.js';
|
||||
import { PermissionDrawer } from './components/PermissionDrawer/PermissionDrawer.js';
|
||||
import { ToolCall } from './components/messages/toolcalls/ToolCall.js';
|
||||
import { hasToolCallOutput } from './utils/utils.js';
|
||||
import { hasToolCallOutput } from './components/messages/toolcalls/shared/utils.js';
|
||||
import { EmptyState } from './components/layout/EmptyState.js';
|
||||
import { Onboarding } from './components/layout/Onboarding.js';
|
||||
import { type CompletionItem } from '../types/completionItemTypes.js';
|
||||
@@ -45,12 +45,7 @@ import { SessionSelector } from './components/layout/SessionSelector.js';
|
||||
import { FileIcon, UserIcon } from './components/icons/index.js';
|
||||
import { ApprovalMode, NEXT_APPROVAL_MODE } from '../types/acpTypes.js';
|
||||
import type { ApprovalModeValue } from '../types/approvalModeValueTypes.js';
|
||||
import type { PlanEntry, UsageStatsPayload } from '../types/chatTypes.js';
|
||||
import type { ModelInfo } from '../types/acpTypes.js';
|
||||
import {
|
||||
DEFAULT_TOKEN_LIMIT,
|
||||
tokenLimit,
|
||||
} from '@qwen-code/qwen-code-core/src/core/tokenLimits.js';
|
||||
import type { PlanEntry } from '../types/chatTypes.js';
|
||||
|
||||
export const App: React.FC = () => {
|
||||
const vscode = useVSCode();
|
||||
@@ -75,8 +70,6 @@ export const App: React.FC = () => {
|
||||
const [planEntries, setPlanEntries] = useState<PlanEntry[]>([]);
|
||||
const [isAuthenticated, setIsAuthenticated] = useState<boolean | null>(null);
|
||||
const [isLoading, setIsLoading] = useState<boolean>(true); // Track if we're still initializing/loading
|
||||
const [modelInfo, setModelInfo] = useState<ModelInfo | null>(null);
|
||||
const [usageStats, setUsageStats] = useState<UsageStatsPayload | null>(null);
|
||||
const messagesEndRef = useRef<HTMLDivElement>(
|
||||
null,
|
||||
) as React.RefObject<HTMLDivElement>;
|
||||
@@ -167,48 +160,6 @@ export const App: React.FC = () => {
|
||||
|
||||
const completion = useCompletionTrigger(inputFieldRef, getCompletionItems);
|
||||
|
||||
const contextUsage = useMemo(() => {
|
||||
if (!usageStats && !modelInfo) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const modelName =
|
||||
modelInfo?.modelId && typeof modelInfo.modelId === 'string'
|
||||
? modelInfo.modelId
|
||||
: modelInfo?.name && typeof modelInfo.name === 'string'
|
||||
? modelInfo.name
|
||||
: undefined;
|
||||
|
||||
const derivedLimit =
|
||||
modelName && modelName.length > 0 ? tokenLimit(modelName) : undefined;
|
||||
|
||||
const metaLimitRaw = modelInfo?._meta?.['contextLimit'];
|
||||
const metaLimit =
|
||||
typeof metaLimitRaw === 'number' || metaLimitRaw === null
|
||||
? metaLimitRaw
|
||||
: undefined;
|
||||
|
||||
const limit =
|
||||
usageStats?.tokenLimit ??
|
||||
metaLimit ??
|
||||
derivedLimit ??
|
||||
DEFAULT_TOKEN_LIMIT;
|
||||
|
||||
const used = usageStats?.usage?.promptTokens ?? 0;
|
||||
if (typeof limit !== 'number' || limit <= 0 || used < 0) {
|
||||
return null;
|
||||
}
|
||||
const percentLeft = Math.max(
|
||||
0,
|
||||
Math.min(100, Math.round(((limit - used) / limit) * 100)),
|
||||
);
|
||||
return {
|
||||
percentLeft,
|
||||
usedTokens: used,
|
||||
tokenLimit: limit,
|
||||
};
|
||||
}, [usageStats, modelInfo]);
|
||||
|
||||
// Track a lightweight signature of workspace files to detect content changes even when length is unchanged
|
||||
const workspaceFilesSignature = useMemo(
|
||||
() =>
|
||||
@@ -297,10 +248,6 @@ export const App: React.FC = () => {
|
||||
setInputText,
|
||||
setEditMode,
|
||||
setIsAuthenticated,
|
||||
setUsageStats: (stats) => setUsageStats(stats ?? null),
|
||||
setModelInfo: (info) => {
|
||||
setModelInfo(info);
|
||||
},
|
||||
});
|
||||
|
||||
// Auto-scroll handling: keep the view pinned to bottom when new content arrives,
|
||||
@@ -813,7 +760,6 @@ export const App: React.FC = () => {
|
||||
activeFileName={fileContext.activeFileName}
|
||||
activeSelection={fileContext.activeSelection}
|
||||
skipAutoActiveContext={skipAutoActiveContext}
|
||||
contextUsage={contextUsage}
|
||||
onInputChange={setInputText}
|
||||
onCompositionStart={() => setIsComposing(true)}
|
||||
onCompositionEnd={() => setIsComposing(false)}
|
||||
|
||||
@@ -118,20 +118,6 @@ export class WebViewProvider {
|
||||
});
|
||||
});
|
||||
|
||||
this.agentManager.onUsageUpdate((stats) => {
|
||||
this.sendMessageToWebView({
|
||||
type: 'usageStats',
|
||||
data: stats,
|
||||
});
|
||||
});
|
||||
|
||||
this.agentManager.onModelInfo((info) => {
|
||||
this.sendMessageToWebView({
|
||||
type: 'modelInfo',
|
||||
data: info,
|
||||
});
|
||||
});
|
||||
|
||||
// Setup end-turn handler from ACP stopReason notifications
|
||||
this.agentManager.onEndTurn((reason) => {
|
||||
// Ensure WebView exits streaming state even if no explicit streamEnd was emitted elsewhere
|
||||
|
||||
@@ -1,61 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen Team
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type React from 'react';
|
||||
|
||||
interface TooltipProps {
|
||||
children: React.ReactNode;
|
||||
content: React.ReactNode;
|
||||
position?: 'top' | 'bottom' | 'left' | 'right';
|
||||
}
|
||||
|
||||
export const Tooltip: React.FC<TooltipProps> = ({
|
||||
children,
|
||||
content,
|
||||
position = 'top',
|
||||
}) => (
|
||||
<div className="relative inline-block">
|
||||
<div className="group relative">
|
||||
{children}
|
||||
<div
|
||||
className={`
|
||||
absolute z-50 px-2 py-1 text-xs rounded-md shadow-lg
|
||||
bg-[var(--app-primary-background)] border border-[var(--app-input-border)]
|
||||
text-[var(--app-primary-foreground)] whitespace-nowrap
|
||||
opacity-0 group-hover:opacity-100 transition-opacity duration-150
|
||||
-translate-x-1/2 left-1/2
|
||||
${
|
||||
position === 'top'
|
||||
? '-translate-y-1 bottom-full mb-1'
|
||||
: position === 'bottom'
|
||||
? 'translate-y-1 top-full mt-1'
|
||||
: position === 'left'
|
||||
? '-translate-x-full left-0 translate-y-[-50%] top-1/2'
|
||||
: 'translate-x-0 right-0 translate-y-[-50%] top-1/2'
|
||||
}
|
||||
pointer-events-none
|
||||
`}
|
||||
>
|
||||
{content}
|
||||
<div
|
||||
className={`
|
||||
absolute w-2 h-2 bg-[var(--app-primary-background)] border-l border-b border-[var(--app-input-border)]
|
||||
-rotate-45
|
||||
${
|
||||
position === 'top'
|
||||
? 'top-full left-1/2 -translate-x-1/2 -translate-y-1/2'
|
||||
: position === 'bottom'
|
||||
? 'bottom-full left-1/2 -translate-x-1/2 translate-y-1/2'
|
||||
: position === 'left'
|
||||
? 'right-full top-1/2 translate-x-1/2 -translate-y-1/2'
|
||||
: 'left-full top-1/2 -translate-x-1/2 -translate-y-1/2'
|
||||
}
|
||||
`}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
@@ -1,88 +0,0 @@
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen Team
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type React from 'react';
|
||||
import { Tooltip } from '../Tooltip.js';
|
||||
|
||||
interface ContextUsage {
|
||||
percentLeft: number;
|
||||
usedTokens: number;
|
||||
tokenLimit: number;
|
||||
}
|
||||
|
||||
interface ContextIndicatorProps {
|
||||
contextUsage: ContextUsage | null;
|
||||
}
|
||||
|
||||
export const ContextIndicator: React.FC<ContextIndicatorProps> = ({
|
||||
contextUsage,
|
||||
}) => {
|
||||
if (!contextUsage) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Calculate used percentage for the progress indicator
|
||||
// contextUsage.percentLeft is the percentage remaining, so 100 - percentLeft = percent used
|
||||
const percentUsed = 100 - contextUsage.percentLeft;
|
||||
const percentFormatted = Math.max(0, Math.min(100, Math.round(percentUsed)));
|
||||
const radius = 9;
|
||||
const circumference = 2 * Math.PI * radius;
|
||||
// To show the used portion, we need to offset the unused portion
|
||||
// If 20% is used, we want to show 20% filled, so offset the remaining 80%
|
||||
const dashOffset = ((100 - percentUsed) / 100) * circumference;
|
||||
const formatNumber = (value: number) => {
|
||||
if (value >= 1000) {
|
||||
return `${(Math.round((value / 1000) * 10) / 10).toFixed(1)}k`;
|
||||
}
|
||||
return Math.round(value).toLocaleString();
|
||||
};
|
||||
|
||||
// Create tooltip content with proper formatting
|
||||
const tooltipContent = (
|
||||
<div className="flex flex-col gap-1">
|
||||
<div className="font-medium">
|
||||
{percentFormatted}% • {formatNumber(contextUsage.usedTokens)} /{' '}
|
||||
{formatNumber(contextUsage.tokenLimit)} context used
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
|
||||
return (
|
||||
<Tooltip content={tooltipContent} position="top">
|
||||
<button
|
||||
className="btn-icon-compact"
|
||||
aria-label={`${percentFormatted}% • ${formatNumber(contextUsage.usedTokens)} / ${formatNumber(contextUsage.tokenLimit)} context used`}
|
||||
>
|
||||
<svg viewBox="0 0 24 24" aria-hidden="true" role="presentation">
|
||||
<circle
|
||||
className="context-indicator__track"
|
||||
cx="12"
|
||||
cy="12"
|
||||
r={radius}
|
||||
fill="none"
|
||||
stroke="currentColor"
|
||||
opacity="0.2"
|
||||
/>
|
||||
<circle
|
||||
className="context-indicator__progress"
|
||||
cx="12"
|
||||
cy="12"
|
||||
r={radius}
|
||||
fill="none"
|
||||
stroke="currentColor"
|
||||
strokeWidth="2"
|
||||
strokeDasharray={circumference}
|
||||
strokeDashoffset={dashOffset}
|
||||
style={{
|
||||
transform: 'rotate(-90deg)',
|
||||
transformOrigin: '50% 50%',
|
||||
}}
|
||||
/>
|
||||
</svg>
|
||||
</button>
|
||||
</Tooltip>
|
||||
);
|
||||
};
|
||||
@@ -21,7 +21,6 @@ import { CompletionMenu } from '../layout/CompletionMenu.js';
|
||||
import type { CompletionItem } from '../../../types/completionItemTypes.js';
|
||||
import { getApprovalModeInfoFromString } from '../../../types/acpTypes.js';
|
||||
import type { ApprovalModeValue } from '../../../types/approvalModeValueTypes.js';
|
||||
import { ContextIndicator } from './ContextIndicator.js';
|
||||
|
||||
interface InputFormProps {
|
||||
inputText: string;
|
||||
@@ -37,11 +36,6 @@ interface InputFormProps {
|
||||
activeSelection: { startLine: number; endLine: number } | null;
|
||||
// Whether to auto-load the active editor selection/path into context
|
||||
skipAutoActiveContext: boolean;
|
||||
contextUsage: {
|
||||
percentLeft: number;
|
||||
usedTokens: number;
|
||||
tokenLimit: number;
|
||||
} | null;
|
||||
onInputChange: (text: string) => void;
|
||||
onCompositionStart: () => void;
|
||||
onCompositionEnd: () => void;
|
||||
@@ -102,7 +96,6 @@ export const InputForm: React.FC<InputFormProps> = ({
|
||||
activeFileName,
|
||||
activeSelection,
|
||||
skipAutoActiveContext,
|
||||
contextUsage,
|
||||
onInputChange,
|
||||
onCompositionStart,
|
||||
onCompositionEnd,
|
||||
@@ -247,9 +240,6 @@ export const InputForm: React.FC<InputFormProps> = ({
|
||||
{/* Spacer */}
|
||||
<div className="flex-1 min-w-0" />
|
||||
|
||||
{/* Context usage indicator */}
|
||||
<ContextIndicator contextUsage={contextUsage} />
|
||||
|
||||
{/* @yiliang114. closed temporarily */}
|
||||
{/* Thinking button */}
|
||||
{/* <button
|
||||
|
||||
@@ -3,10 +3,10 @@
|
||||
* Copyright 2025 Qwen Team
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Bash tool call styles - Enhanced styling with semantic class names
|
||||
* Execute tool call styles - Enhanced styling with semantic class names
|
||||
*/
|
||||
|
||||
/* Root container for bash tool call output */
|
||||
/* Root container for execute tool call output */
|
||||
.bash-toolcall-card {
|
||||
border: 0.5px solid var(--app-input-border);
|
||||
border-radius: 5px;
|
||||
@@ -100,9 +100,3 @@
|
||||
.bash-toolcall-error-content {
|
||||
color: #c74e39;
|
||||
}
|
||||
|
||||
/* Row with copy button */
|
||||
.bash-toolcall-row-with-copy {
|
||||
position: relative;
|
||||
grid-template-columns: max-content 1fr max-content;
|
||||
}
|
||||
|
||||
@@ -9,10 +9,9 @@
|
||||
import type React from 'react';
|
||||
import type { BaseToolCallProps } from '../shared/types.js';
|
||||
import { ToolCallContainer } from '../shared/LayoutComponents.js';
|
||||
import { safeTitle, groupContent } from '../../../../utils/utils.js';
|
||||
import { safeTitle, groupContent } from '../shared/utils.js';
|
||||
import { useVSCode } from '../../../../hooks/useVSCode.js';
|
||||
import { createAndOpenTempFile } from '../../../../utils/diffUtils.js';
|
||||
import { CopyButton } from '../shared/copyUtils.js';
|
||||
import { createAndOpenTempFile } from '../../../../utils/tempFileManager.js';
|
||||
import './Bash.css';
|
||||
|
||||
/**
|
||||
@@ -38,14 +37,19 @@ export const ExecuteToolCall: React.FC<BaseToolCallProps> = ({ toolCall }) => {
|
||||
|
||||
// Handle click on IN section
|
||||
const handleInClick = () => {
|
||||
createAndOpenTempFile(vscode, inputCommand, `bash-input-${toolCallId}`);
|
||||
createAndOpenTempFile(
|
||||
vscode.postMessage,
|
||||
inputCommand,
|
||||
'bash-input',
|
||||
'.sh',
|
||||
);
|
||||
};
|
||||
|
||||
// Handle click on OUT section
|
||||
const handleOutClick = () => {
|
||||
if (textOutputs.length > 0) {
|
||||
const output = textOutputs.join('\n');
|
||||
createAndOpenTempFile(vscode, output, `bash-output-${toolCallId}`);
|
||||
createAndOpenTempFile(vscode.postMessage, output, 'bash-output', '.txt');
|
||||
}
|
||||
};
|
||||
|
||||
@@ -80,7 +84,7 @@ export const ExecuteToolCall: React.FC<BaseToolCallProps> = ({ toolCall }) => {
|
||||
<div className="bash-toolcall-content">
|
||||
{/* IN row */}
|
||||
<div
|
||||
className="bash-toolcall-row bash-toolcall-row-with-copy group"
|
||||
className="bash-toolcall-row"
|
||||
onClick={handleInClick}
|
||||
style={{ cursor: 'pointer' }}
|
||||
>
|
||||
@@ -88,7 +92,6 @@ export const ExecuteToolCall: React.FC<BaseToolCallProps> = ({ toolCall }) => {
|
||||
<div className="bash-toolcall-row-content">
|
||||
<pre className="bash-toolcall-pre">{inputCommand}</pre>
|
||||
</div>
|
||||
<CopyButton text={inputCommand} />
|
||||
</div>
|
||||
|
||||
{/* ERROR row */}
|
||||
@@ -128,7 +131,7 @@ export const ExecuteToolCall: React.FC<BaseToolCallProps> = ({ toolCall }) => {
|
||||
<div className="bash-toolcall-content">
|
||||
{/* IN row */}
|
||||
<div
|
||||
className="bash-toolcall-row bash-toolcall-row-with-copy group"
|
||||
className="bash-toolcall-row"
|
||||
onClick={handleInClick}
|
||||
style={{ cursor: 'pointer' }}
|
||||
>
|
||||
@@ -136,7 +139,6 @@ export const ExecuteToolCall: React.FC<BaseToolCallProps> = ({ toolCall }) => {
|
||||
<div className="bash-toolcall-row-content">
|
||||
<pre className="bash-toolcall-pre">{inputCommand}</pre>
|
||||
</div>
|
||||
<CopyButton text={inputCommand} />
|
||||
</div>
|
||||
|
||||
{/* OUT row */}
|
||||
|
||||
@@ -11,7 +11,7 @@ import type { BaseToolCallProps } from '../shared/types.js';
|
||||
import {
|
||||
groupContent,
|
||||
mapToolStatusToContainerStatus,
|
||||
} from '../../../../utils/utils.js';
|
||||
} from '../shared/utils.js';
|
||||
import { FileLink } from '../../../layout/FileLink.js';
|
||||
import type { ToolCallContainerProps } from '../shared/LayoutComponents.js';
|
||||
|
||||
|
||||
@@ -61,7 +61,11 @@
|
||||
/* Truncated content styling */
|
||||
.execute-toolcall-row-content:not(.execute-toolcall-full) {
|
||||
max-height: 60px;
|
||||
mask-image: linear-gradient(to bottom, var(--app-primary-background) 40px, transparent 60px);
|
||||
mask-image: linear-gradient(
|
||||
to bottom,
|
||||
var(--app-primary-background) 40px,
|
||||
transparent 60px
|
||||
);
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
@@ -83,6 +87,7 @@
|
||||
|
||||
/* Output content with subtle styling */
|
||||
.execute-toolcall-output-subtle {
|
||||
background-color: var(--app-code-background);
|
||||
white-space: pre;
|
||||
overflow-x: auto;
|
||||
max-width: 100%;
|
||||
@@ -95,9 +100,3 @@
|
||||
.execute-toolcall-error-content {
|
||||
color: #c74e39;
|
||||
}
|
||||
|
||||
/* Row with copy button */
|
||||
.execute-toolcall-row-with-copy {
|
||||
position: relative;
|
||||
grid-template-columns: max-content 1fr max-content;
|
||||
}
|
||||
|
||||
@@ -8,12 +8,9 @@
|
||||
|
||||
import type React from 'react';
|
||||
import type { BaseToolCallProps } from '../shared/types.js';
|
||||
import { safeTitle, groupContent } from '../../../../utils/utils.js';
|
||||
import { safeTitle, groupContent } from '../shared/utils.js';
|
||||
import './Execute.css';
|
||||
import type { ToolCallContainerProps } from '../shared/LayoutComponents.js';
|
||||
import { useVSCode } from '../../../../hooks/useVSCode.js';
|
||||
import { createAndOpenTempFile } from '../../../../utils/diffUtils.js';
|
||||
import { CopyButton } from '../shared/copyUtils.js';
|
||||
|
||||
export const ToolCallContainer: React.FC<ToolCallContainerProps> = ({
|
||||
label,
|
||||
@@ -51,7 +48,6 @@ export const ExecuteToolCall: React.FC<BaseToolCallProps> = ({ toolCall }) => {
|
||||
const commandText = safeTitle(
|
||||
(rawInput as Record<string, unknown>)?.description || title,
|
||||
);
|
||||
const vscode = useVSCode();
|
||||
|
||||
// Group content by type
|
||||
const { textOutputs, errors } = groupContent(content);
|
||||
@@ -65,19 +61,6 @@ export const ExecuteToolCall: React.FC<BaseToolCallProps> = ({ toolCall }) => {
|
||||
inputCommand = rawInput;
|
||||
}
|
||||
|
||||
// Handle click on IN section
|
||||
const handleInClick = () => {
|
||||
createAndOpenTempFile(vscode, inputCommand, `execute-input-${toolCallId}`);
|
||||
};
|
||||
|
||||
// Handle click on OUT section
|
||||
const handleOutClick = () => {
|
||||
if (textOutputs.length > 0) {
|
||||
const output = textOutputs.join('\n');
|
||||
createAndOpenTempFile(vscode, output, `execute-output-${toolCallId}`);
|
||||
}
|
||||
};
|
||||
|
||||
// Map tool status to container status for proper bullet coloring
|
||||
const containerStatus:
|
||||
| 'success'
|
||||
@@ -109,16 +92,11 @@ export const ExecuteToolCall: React.FC<BaseToolCallProps> = ({ toolCall }) => {
|
||||
<div className="execute-toolcall-card">
|
||||
<div className="execute-toolcall-content">
|
||||
{/* IN row */}
|
||||
<div
|
||||
className="execute-toolcall-row execute-toolcall-row-with-copy group"
|
||||
onClick={handleInClick}
|
||||
style={{ cursor: 'pointer' }}
|
||||
>
|
||||
<div className="execute-toolcall-row">
|
||||
<div className="execute-toolcall-label">IN</div>
|
||||
<div className="execute-toolcall-row-content">
|
||||
<pre className="execute-toolcall-pre">{inputCommand}</pre>
|
||||
</div>
|
||||
<CopyButton text={inputCommand} />
|
||||
</div>
|
||||
|
||||
{/* ERROR row */}
|
||||
@@ -157,24 +135,15 @@ export const ExecuteToolCall: React.FC<BaseToolCallProps> = ({ toolCall }) => {
|
||||
<div className="execute-toolcall-card">
|
||||
<div className="execute-toolcall-content">
|
||||
{/* IN row */}
|
||||
<div
|
||||
className="execute-toolcall-row execute-toolcall-row-with-copy group"
|
||||
onClick={handleInClick}
|
||||
style={{ cursor: 'pointer' }}
|
||||
>
|
||||
<div className="execute-toolcall-row">
|
||||
<div className="execute-toolcall-label">IN</div>
|
||||
<div className="execute-toolcall-row-content">
|
||||
<pre className="execute-toolcall-pre">{inputCommand}</pre>
|
||||
</div>
|
||||
<CopyButton text={inputCommand} />
|
||||
</div>
|
||||
|
||||
{/* OUT row */}
|
||||
<div
|
||||
className="execute-toolcall-row"
|
||||
onClick={handleOutClick}
|
||||
style={{ cursor: 'pointer' }}
|
||||
>
|
||||
<div className="execute-toolcall-row">
|
||||
<div className="execute-toolcall-label">OUT</div>
|
||||
<div className="execute-toolcall-row-content">
|
||||
<div className="execute-toolcall-output-subtle">
|
||||
@@ -195,11 +164,7 @@ export const ExecuteToolCall: React.FC<BaseToolCallProps> = ({ toolCall }) => {
|
||||
status={containerStatus}
|
||||
toolCallId={toolCallId}
|
||||
>
|
||||
<div
|
||||
className="inline-flex text-[var(--app-secondary-foreground)] text-[0.85em] opacity-70 mt-[2px] mb-[2px] flex-row items-start w-full gap-1"
|
||||
onClick={handleInClick}
|
||||
style={{ cursor: 'pointer' }}
|
||||
>
|
||||
<div className="inline-flex text-[var(--app-secondary-foreground)] text-[0.85em] opacity-70 mt-[2px] mb-[2px] flex-row items-start w-full gap-1">
|
||||
<span className="flex-shrink-0 relative top-[-0.1em]">⎿</span>
|
||||
<span className="flex-shrink-0 w-full">{commandText}</span>
|
||||
</div>
|
||||
|
||||
@@ -14,7 +14,7 @@ import {
|
||||
ToolCallRow,
|
||||
LocationsList,
|
||||
} from './shared/LayoutComponents.js';
|
||||
import { safeTitle, groupContent } from '../../../utils/utils.js';
|
||||
import { safeTitle, groupContent } from './shared/utils.js';
|
||||
|
||||
/**
|
||||
* Generic tool call component that can display any tool call type
|
||||
|
||||
@@ -12,7 +12,7 @@ import type { BaseToolCallProps } from '../shared/types.js';
|
||||
import {
|
||||
groupContent,
|
||||
mapToolStatusToContainerStatus,
|
||||
} from '../../../../utils/utils.js';
|
||||
} from '../shared/utils.js';
|
||||
import { FileLink } from '../../../layout/FileLink.js';
|
||||
import { useVSCode } from '../../../../hooks/useVSCode.js';
|
||||
import { handleOpenDiff } from '../../../../utils/diffUtils.js';
|
||||
|
||||
@@ -13,7 +13,7 @@ import {
|
||||
safeTitle,
|
||||
groupContent,
|
||||
mapToolStatusToContainerStatus,
|
||||
} from '../../../../utils/utils.js';
|
||||
} from '../shared/utils.js';
|
||||
|
||||
/**
|
||||
* Specialized component for Search tool calls
|
||||
@@ -195,7 +195,7 @@ export const SearchToolCall: React.FC<BaseToolCallProps> = ({
|
||||
isLast={isLast}
|
||||
>
|
||||
<div className="flex flex-col">
|
||||
{textOutputs.map((text: string, index: number) => (
|
||||
{textOutputs.map((text, index) => (
|
||||
<div
|
||||
key={index}
|
||||
className="inline-flex text-[var(--app-secondary-foreground)] text-[0.85em] opacity-70 mt-[2px] mb-[2px] flex-row items-start w-full gap-1"
|
||||
|
||||
@@ -13,7 +13,7 @@ import {
|
||||
ToolCallCard,
|
||||
ToolCallRow,
|
||||
} from '../shared/LayoutComponents.js';
|
||||
import { groupContent } from '../../../../utils/utils.js';
|
||||
import { groupContent } from '../shared/utils.js';
|
||||
|
||||
/**
|
||||
* Specialized component for Think tool calls
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
import type React from 'react';
|
||||
import type { BaseToolCallProps } from '../shared/types.js';
|
||||
import type { ToolCallContainerProps } from '../shared/LayoutComponents.js';
|
||||
import { groupContent, safeTitle } from '../../../../utils/utils.js';
|
||||
import { groupContent, safeTitle } from '../shared/utils.js';
|
||||
import { CheckboxDisplay } from './CheckboxDisplay.js';
|
||||
import type { PlanEntry } from '../../../../../types/chatTypes.js';
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ import { ToolCallContainer } from '../shared/LayoutComponents.js';
|
||||
import {
|
||||
groupContent,
|
||||
mapToolStatusToContainerStatus,
|
||||
} from '../../../../utils/utils.js';
|
||||
} from '../shared/utils.js';
|
||||
import { FileLink } from '../../../layout/FileLink.js';
|
||||
|
||||
/**
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
|
||||
import type React from 'react';
|
||||
import type { BaseToolCallProps } from './shared/types.js';
|
||||
import { shouldShowToolCall } from '../../../utils/utils.js';
|
||||
import { shouldShowToolCall } from './shared/utils.js';
|
||||
import { GenericToolCall } from './GenericToolCall.js';
|
||||
import { ReadToolCall } from './Read/ReadToolCall.js';
|
||||
import { WriteToolCall } from './Write/WriteToolCall.js';
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user