Compare commits

..

2 Commits

Author SHA1 Message Date
mingholy.lmh
c81c24d45d chore: improve release-sdk workflow 2025-12-25 10:46:57 +08:00
mingholy.lmh
4407597794 chore: skip bumping sdk version when release nightly/preview or dry run 2025-12-24 18:12:23 +08:00
50 changed files with 2138 additions and 3084 deletions

View File

@@ -91,6 +91,8 @@ jobs:
with:
node-version-file: '.nvmrc'
cache: 'npm'
registry-url: 'https://registry.npmjs.org'
scope: '@qwen-code'
- name: 'Install Dependencies'
run: |-
@@ -126,6 +128,14 @@ jobs:
IS_PREVIEW: '${{ steps.vars.outputs.is_preview }}'
MANUAL_VERSION: '${{ inputs.version }}'
- name: 'Set SDK package version (local only)'
env:
RELEASE_VERSION: '${{ steps.version.outputs.RELEASE_VERSION }}'
run: |-
# Ensure the package version matches the computed release version.
# This is required for nightly/preview because npm does not allow re-publishing the same version.
npm version -w @qwen-code/sdk "${RELEASE_VERSION}" --no-git-tag-version --allow-same-version
- name: 'Build CLI Bundle'
run: |
npm run build
@@ -158,7 +168,21 @@ jobs:
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
- name: 'Build SDK'
working-directory: 'packages/sdk-typescript'
run: |-
npm run build
- name: 'Publish @qwen-code/sdk'
working-directory: 'packages/sdk-typescript'
run: |-
npm publish --access public --tag=${{ steps.version.outputs.NPM_TAG }} ${{ steps.vars.outputs.is_dry_run == 'true' && '--dry-run' || '' }}
env:
NODE_AUTH_TOKEN: '${{ secrets.NPM_TOKEN }}'
- name: 'Create and switch to a release branch'
if: |-
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
id: 'release_branch'
env:
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
@@ -167,50 +191,22 @@ jobs:
git switch -c "${BRANCH_NAME}"
echo "BRANCH_NAME=${BRANCH_NAME}" >> "${GITHUB_OUTPUT}"
- name: 'Update package version'
env:
RELEASE_VERSION: '${{ steps.version.outputs.RELEASE_VERSION }}'
run: |-
# Use npm workspaces so the root lockfile is updated consistently.
npm version -w @qwen-code/sdk "${RELEASE_VERSION}" --no-git-tag-version --allow-same-version
- name: 'Commit and Conditionally Push package version'
- name: 'Commit and Push package version (stable only)'
if: |-
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
env:
BRANCH_NAME: '${{ steps.release_branch.outputs.BRANCH_NAME }}'
IS_DRY_RUN: '${{ steps.vars.outputs.is_dry_run }}'
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
run: |-
# Only persist version bumps after a successful publish.
git add packages/sdk-typescript/package.json package-lock.json
if git diff --staged --quiet; then
echo "No version changes to commit"
else
git commit -m "chore(release): sdk-typescript ${RELEASE_TAG}"
fi
if [[ "${IS_DRY_RUN}" == "false" ]]; then
echo "Pushing release branch to remote..."
git push --set-upstream origin "${BRANCH_NAME}" --follow-tags
else
echo "Dry run enabled. Skipping push."
fi
- name: 'Build SDK'
working-directory: 'packages/sdk-typescript'
run: |-
npm run build
- name: 'Configure npm for publishing'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4
with:
node-version-file: '.nvmrc'
registry-url: 'https://registry.npmjs.org'
scope: '@qwen-code'
- name: 'Publish @qwen-code/sdk'
working-directory: 'packages/sdk-typescript'
run: |-
npm publish --access public --tag=${{ steps.version.outputs.NPM_TAG }} ${{ steps.vars.outputs.is_dry_run == 'true' && '--dry-run' || '' }}
env:
NODE_AUTH_TOKEN: '${{ secrets.NPM_TOKEN }}'
echo "Pushing release branch to remote..."
git push --set-upstream origin "${BRANCH_NAME}" --follow-tags
- name: 'Create GitHub Release and Tag'
if: |-
@@ -220,16 +216,29 @@ jobs:
RELEASE_BRANCH: '${{ steps.release_branch.outputs.BRANCH_NAME }}'
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
PREVIOUS_RELEASE_TAG: '${{ steps.version.outputs.PREVIOUS_RELEASE_TAG }}'
IS_NIGHTLY: '${{ steps.vars.outputs.is_nightly }}'
IS_PREVIEW: '${{ steps.vars.outputs.is_preview }}'
REF: '${{ github.event.inputs.ref || github.sha }}'
run: |-
# For stable releases, use the release branch; for nightly/preview, use the current ref
if [[ "${IS_NIGHTLY}" == "true" || "${IS_PREVIEW}" == "true" ]]; then
TARGET="${REF}"
PRERELEASE_FLAG="--prerelease"
else
TARGET="${RELEASE_BRANCH}"
PRERELEASE_FLAG=""
fi
gh release create "sdk-typescript-${RELEASE_TAG}" \
--target "$RELEASE_BRANCH" \
--target "${TARGET}" \
--title "SDK TypeScript Release ${RELEASE_TAG}" \
--notes-start-tag "sdk-typescript-${PREVIOUS_RELEASE_TAG}" \
--generate-notes
--generate-notes \
${PRERELEASE_FLAG}
- name: 'Create PR to merge release branch into main'
if: |-
${{ steps.vars.outputs.is_dry_run == 'false' }}
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
id: 'pr'
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
@@ -251,7 +260,7 @@ jobs:
- name: 'Wait for CI checks to complete'
if: |-
${{ steps.vars.outputs.is_dry_run == 'false' }}
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
PR_URL: '${{ steps.pr.outputs.PR_URL }}'
@@ -262,7 +271,7 @@ jobs:
- name: 'Enable auto-merge for release PR'
if: |-
${{ steps.vars.outputs.is_dry_run == 'false' }}
${{ steps.vars.outputs.is_dry_run == 'false' && steps.vars.outputs.is_nightly == 'false' && steps.vars.outputs.is_preview == 'false' }}
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
PR_URL: '${{ steps.pr.outputs.PR_URL }}'

View File

@@ -202,8 +202,8 @@ describe('file-system', () => {
const readAttempt = toolLogs.find(
(log) => log.toolRequest.name === 'read_file',
);
const editAttempt = toolLogs.find(
(log) => log.toolRequest.name === 'edit_file',
const writeAttempt = toolLogs.find(
(log) => log.toolRequest.name === 'write_file',
);
const successfulReplace = toolLogs.find(
(log) => log.toolRequest.name === 'replace' && log.toolRequest.success,
@@ -226,15 +226,15 @@ describe('file-system', () => {
// CRITICAL: Verify that no matter what the model did, it never successfully
// wrote or replaced anything.
if (editAttempt) {
if (writeAttempt) {
console.error(
'A edit_file attempt was made when no file should be written.',
'A write_file attempt was made when no file should be written.',
);
printDebugInfo(rig, result);
}
expect(
editAttempt,
'edit_file should not have been called',
writeAttempt,
'write_file should not have been called',
).toBeUndefined();
if (successfulReplace) {

View File

@@ -952,8 +952,7 @@ describe('Permission Control (E2E)', () => {
TEST_TIMEOUT,
);
// FIXME: This test is flaky and sometimes fails with no tool calls.
it.skip(
it(
'should allow read-only tools without restrictions',
async () => {
// Create test files for the model to read

155
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.6.0-preview.1",
"version": "0.6.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@qwen-code/qwen-code",
"version": "0.6.0-preview.1",
"version": "0.6.0",
"workspaces": [
"packages/*"
],
@@ -134,36 +134,6 @@
"node": ">=6.0.0"
}
},
"node_modules/@anthropic-ai/sdk": {
"version": "0.36.3",
"resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.36.3.tgz",
"integrity": "sha512-+c0mMLxL/17yFZ4P5+U6bTWiCSFZUKJddrv01ud2aFBWnTPLdRncYV76D3q1tqfnL7aCnhRtykFnoCFzvr4U3Q==",
"license": "MIT",
"dependencies": {
"@types/node": "^18.11.18",
"@types/node-fetch": "^2.6.4",
"abort-controller": "^3.0.0",
"agentkeepalive": "^4.2.1",
"form-data-encoder": "1.7.2",
"formdata-node": "^4.3.2",
"node-fetch": "^2.6.7"
}
},
"node_modules/@anthropic-ai/sdk/node_modules/@types/node": {
"version": "18.19.130",
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.130.tgz",
"integrity": "sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg==",
"license": "MIT",
"dependencies": {
"undici-types": "~5.26.4"
}
},
"node_modules/@anthropic-ai/sdk/node_modules/undici-types": {
"version": "5.26.5",
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
"integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
"license": "MIT"
},
"node_modules/@asamuzakjp/css-color": {
"version": "3.2.0",
"resolved": "https://registry.npmjs.org/@asamuzakjp/css-color/-/css-color-3.2.0.tgz",
@@ -3852,16 +3822,6 @@
"undici-types": "~6.21.0"
}
},
"node_modules/@types/node-fetch": {
"version": "2.6.13",
"resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.13.tgz",
"integrity": "sha512-QGpRVpzSaUs30JBSGPjOg4Uveu384erbHBoT1zeONvyCfwQxIkUshLAOqN/k9EjGviPRmWTTe6aH2qySWKTVSw==",
"license": "MIT",
"dependencies": {
"@types/node": "*",
"form-data": "^4.0.4"
}
},
"node_modules/@types/normalize-package-data": {
"version": "2.4.4",
"resolved": "https://registry.npmjs.org/@types/normalize-package-data/-/normalize-package-data-2.4.4.tgz",
@@ -4860,6 +4820,7 @@
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz",
"integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==",
"dev": true,
"license": "MIT",
"dependencies": {
"event-target-shim": "^5.0.0"
@@ -4946,18 +4907,6 @@
"node": ">= 14"
}
},
"node_modules/agentkeepalive": {
"version": "4.6.0",
"resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.6.0.tgz",
"integrity": "sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ==",
"license": "MIT",
"dependencies": {
"humanize-ms": "^1.2.1"
},
"engines": {
"node": ">= 8.0.0"
}
},
"node_modules/ajv": {
"version": "6.12.6",
"resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
@@ -5529,6 +5478,7 @@
"version": "0.4.0",
"resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
"integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==",
"dev": true,
"license": "MIT"
},
"node_modules/atomically": {
@@ -6487,6 +6437,7 @@
"version": "1.0.8",
"resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
"integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
"dev": true,
"license": "MIT",
"dependencies": {
"delayed-stream": "~1.0.0"
@@ -7112,6 +7063,7 @@
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
"integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=0.4.0"
@@ -7624,6 +7576,7 @@
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz",
"integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==",
"dev": true,
"license": "MIT",
"dependencies": {
"es-errors": "^1.3.0",
@@ -8153,6 +8106,7 @@
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz",
"integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=6"
@@ -8698,6 +8652,7 @@
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz",
"integrity": "sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==",
"dev": true,
"license": "MIT",
"dependencies": {
"asynckit": "^0.4.0",
@@ -8710,16 +8665,11 @@
"node": ">= 6"
}
},
"node_modules/form-data-encoder": {
"version": "1.7.2",
"resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-1.7.2.tgz",
"integrity": "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==",
"license": "MIT"
},
"node_modules/form-data/node_modules/mime-types": {
"version": "2.1.35",
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
"integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
"dev": true,
"license": "MIT",
"dependencies": {
"mime-db": "1.52.0"
@@ -8728,28 +8678,6 @@
"node": ">= 0.6"
}
},
"node_modules/formdata-node": {
"version": "4.4.1",
"resolved": "https://registry.npmjs.org/formdata-node/-/formdata-node-4.4.1.tgz",
"integrity": "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==",
"license": "MIT",
"dependencies": {
"node-domexception": "1.0.0",
"web-streams-polyfill": "4.0.0-beta.3"
},
"engines": {
"node": ">= 12.20"
}
},
"node_modules/formdata-node/node_modules/web-streams-polyfill": {
"version": "4.0.0-beta.3",
"resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz",
"integrity": "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==",
"license": "MIT",
"engines": {
"node": ">= 14"
}
},
"node_modules/formdata-polyfill": {
"version": "4.0.10",
"resolved": "https://registry.npmjs.org/formdata-polyfill/-/formdata-polyfill-4.0.10.tgz",
@@ -9334,6 +9262,7 @@
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz",
"integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==",
"dev": true,
"license": "MIT",
"dependencies": {
"has-symbols": "^1.0.3"
@@ -9512,15 +9441,6 @@
"node": ">=16.17.0"
}
},
"node_modules/humanize-ms": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz",
"integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==",
"license": "MIT",
"dependencies": {
"ms": "^2.0.0"
}
},
"node_modules/husky": {
"version": "9.1.7",
"resolved": "https://registry.npmjs.org/husky/-/husky-9.1.7.tgz",
@@ -12020,48 +11940,6 @@
"node": ">=10.5.0"
}
},
"node_modules/node-fetch": {
"version": "2.7.0",
"resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz",
"integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==",
"license": "MIT",
"dependencies": {
"whatwg-url": "^5.0.0"
},
"engines": {
"node": "4.x || >=6.0.0"
},
"peerDependencies": {
"encoding": "^0.1.0"
},
"peerDependenciesMeta": {
"encoding": {
"optional": true
}
}
},
"node_modules/node-fetch/node_modules/tr46": {
"version": "0.0.3",
"resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz",
"integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==",
"license": "MIT"
},
"node_modules/node-fetch/node_modules/webidl-conversions": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz",
"integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==",
"license": "BSD-2-Clause"
},
"node_modules/node-fetch/node_modules/whatwg-url": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz",
"integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==",
"license": "MIT",
"dependencies": {
"tr46": "~0.0.3",
"webidl-conversions": "^3.0.0"
}
},
"node_modules/node-pty": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/node-pty/-/node-pty-1.0.0.tgz",
@@ -17316,7 +17194,7 @@
},
"packages/cli": {
"name": "@qwen-code/qwen-code",
"version": "0.6.0-preview.1",
"version": "0.6.0",
"dependencies": {
"@google/genai": "1.30.0",
"@iarna/toml": "^2.2.5",
@@ -17953,10 +17831,9 @@
},
"packages/core": {
"name": "@qwen-code/qwen-code-core",
"version": "0.6.0-preview.1",
"version": "0.6.0",
"hasInstallScript": true,
"dependencies": {
"@anthropic-ai/sdk": "^0.36.1",
"@google/genai": "1.30.0",
"@modelcontextprotocol/sdk": "^1.25.1",
"@opentelemetry/api": "^1.9.0",
@@ -18593,7 +18470,7 @@
},
"packages/sdk-typescript": {
"name": "@qwen-code/sdk",
"version": "0.6.0-preview.1",
"version": "0.1.0",
"license": "Apache-2.0",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.25.1",
@@ -21413,7 +21290,7 @@
},
"packages/test-utils": {
"name": "@qwen-code/qwen-code-test-utils",
"version": "0.6.0-preview.1",
"version": "0.6.0",
"dev": true,
"license": "Apache-2.0",
"devDependencies": {
@@ -21425,7 +21302,7 @@
},
"packages/vscode-ide-companion": {
"name": "qwen-code-vscode-ide-companion",
"version": "0.6.0-preview.1",
"version": "0.6.0",
"license": "LICENSE",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.25.1",

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.6.0-preview.1",
"version": "0.6.0",
"engines": {
"node": ">=20.0.0"
},
@@ -13,7 +13,7 @@
"url": "git+https://github.com/QwenLM/qwen-code.git"
},
"config": {
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.6.0-preview.1"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.6.0"
},
"scripts": {
"start": "cross-env node scripts/start.js",

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.6.0-preview.1",
"version": "0.6.0",
"description": "Qwen Code",
"repository": {
"type": "git",
@@ -33,7 +33,7 @@
"dist"
],
"config": {
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.6.0-preview.1"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.6.0"
},
"dependencies": {
"@google/genai": "1.30.0",

View File

@@ -26,20 +26,6 @@ export function validateAuthMethod(authMethod: string): string | null {
return null;
}
if (authMethod === AuthType.USE_ANTHROPIC) {
const hasApiKey = process.env['ANTHROPIC_API_KEY'];
if (!hasApiKey) {
return 'ANTHROPIC_API_KEY environment variable not found.';
}
const hasBaseUrl = process.env['ANTHROPIC_BASE_URL'];
if (!hasBaseUrl) {
return 'ANTHROPIC_BASE_URL environment variable not found.';
}
return null;
}
if (authMethod === AuthType.USE_GEMINI) {
const hasApiKey = process.env['GEMINI_API_KEY'];
if (!hasApiKey) {

View File

@@ -2114,14 +2114,7 @@ describe('loadCliConfig model selection', () => {
});
it('always prefers model from argvs', async () => {
process.argv = [
'node',
'script.js',
'--auth-type',
'openai',
'--model',
'qwen3-coder-plus',
];
process.argv = ['node', 'script.js', '--model', 'qwen3-coder-plus'];
const argv = await parseArguments({} as Settings);
const config = await loadCliConfig(
{
@@ -2141,14 +2134,7 @@ describe('loadCliConfig model selection', () => {
});
it('selects the model from argvs if provided', async () => {
process.argv = [
'node',
'script.js',
'--auth-type',
'openai',
'--model',
'qwen3-coder-plus',
];
process.argv = ['node', 'script.js', '--model', 'qwen3-coder-plus'];
const argv = await parseArguments({} as Settings);
const config = await loadCliConfig(
{

View File

@@ -468,7 +468,6 @@ export async function parseArguments(settings: Settings): Promise<CliArgs> {
type: 'string',
choices: [
AuthType.USE_OPENAI,
AuthType.USE_ANTHROPIC,
AuthType.QWEN_OAUTH,
AuthType.USE_GEMINI,
AuthType.USE_VERTEX_AI,
@@ -877,30 +876,11 @@ export async function loadCliConfig(
);
}
const selectedAuthType =
(argv.authType as AuthType | undefined) ||
settings.security?.auth?.selectedType;
const apiKey =
(selectedAuthType === AuthType.USE_OPENAI
? argv.openaiApiKey ||
process.env['OPENAI_API_KEY'] ||
settings.security?.auth?.apiKey
: '') || '';
const baseUrl =
(selectedAuthType === AuthType.USE_OPENAI
? argv.openaiBaseUrl ||
process.env['OPENAI_BASE_URL'] ||
settings.security?.auth?.baseUrl
: '') || '';
const resolvedModel =
argv.model ||
(selectedAuthType === AuthType.USE_OPENAI
? process.env['OPENAI_MODEL'] ||
process.env['QWEN_MODEL'] ||
settings.model?.name
: '') ||
'';
process.env['OPENAI_MODEL'] ||
process.env['QWEN_MODEL'] ||
settings.model?.name;
const sandboxConfig = await loadSandboxConfig(settings, argv);
const screenReader =
@@ -987,15 +967,23 @@ export async function loadCliConfig(
extensions: allExtensions,
blockedMcpServers,
noBrowser: !!process.env['NO_BROWSER'],
authType: selectedAuthType,
authType:
(argv.authType as AuthType | undefined) ||
settings.security?.auth?.selectedType,
inputFormat,
outputFormat,
includePartialMessages,
generationConfig: {
...(settings.model?.generationConfig || {}),
model: resolvedModel,
apiKey,
baseUrl,
apiKey:
argv.openaiApiKey ||
process.env['OPENAI_API_KEY'] ||
settings.security?.auth?.apiKey,
baseUrl:
argv.openaiBaseUrl ||
process.env['OPENAI_BASE_URL'] ||
settings.security?.auth?.baseUrl,
enableOpenAILogging:
(typeof argv.openaiLogging === 'undefined'
? settings.model?.enableOpenAILogging

View File

@@ -228,7 +228,6 @@ export const useAuthCommand = (
![
AuthType.QWEN_OAUTH,
AuthType.USE_OPENAI,
AuthType.USE_ANTHROPIC,
AuthType.USE_GEMINI,
AuthType.USE_VERTEX_AI,
].includes(defaultAuthType as AuthType)
@@ -241,7 +240,6 @@ export const useAuthCommand = (
validValues: [
AuthType.QWEN_OAUTH,
AuthType.USE_OPENAI,
AuthType.USE_ANTHROPIC,
AuthType.USE_GEMINI,
AuthType.USE_VERTEX_AI,
].join(', '),

View File

@@ -526,15 +526,10 @@ export const useGeminiStream = (
return currentThoughtBuffer;
}
let newThoughtBuffer = currentThoughtBuffer + thoughtText;
const pendingType = pendingHistoryItemRef.current?.type;
const isPendingThought =
pendingType === 'gemini_thought' ||
pendingType === 'gemini_thought_content';
const newThoughtBuffer = currentThoughtBuffer + thoughtText;
// If we're not already showing a thought, start a new one
if (!isPendingThought) {
if (pendingHistoryItemRef.current?.type !== 'gemini_thought') {
// If there's a pending non-thought item, finalize it first
if (pendingHistoryItemRef.current) {
addItem(pendingHistoryItemRef.current, userMessageTimestamp);
@@ -542,37 +537,11 @@ export const useGeminiStream = (
setPendingHistoryItem({ type: 'gemini_thought', text: '' });
}
// Split large thought messages for better rendering performance (same rationale
// as regular content streaming). This helps avoid terminal flicker caused by
// constantly re-rendering an ever-growing "pending" block.
const splitPoint = findLastSafeSplitPoint(newThoughtBuffer);
const nextPendingType: 'gemini_thought' | 'gemini_thought_content' =
isPendingThought && pendingType === 'gemini_thought_content'
? 'gemini_thought_content'
: 'gemini_thought';
if (splitPoint === newThoughtBuffer.length) {
// Update the existing thought message with accumulated content
setPendingHistoryItem({
type: nextPendingType,
text: newThoughtBuffer,
});
} else {
const beforeText = newThoughtBuffer.substring(0, splitPoint);
const afterText = newThoughtBuffer.substring(splitPoint);
addItem(
{
type: nextPendingType,
text: beforeText,
},
userMessageTimestamp,
);
setPendingHistoryItem({
type: 'gemini_thought_content',
text: afterText,
});
newThoughtBuffer = afterText;
}
// Update the existing thought message with accumulated content
setPendingHistoryItem({
type: 'gemini_thought',
text: newThoughtBuffer,
});
// Also update the thought state for the loading indicator
mergeThought(eventValue);

View File

@@ -60,11 +60,6 @@ export function getOpenAIAvailableModelFromEnv(): AvailableModel | null {
return id ? { id, label: id } : null;
}
export function getAnthropicAvailableModelFromEnv(): AvailableModel | null {
const id = process.env['ANTHROPIC_MODEL']?.trim();
return id ? { id, label: id } : null;
}
export function getAvailableModelsForAuthType(
authType: AuthType,
): AvailableModel[] {
@@ -75,10 +70,6 @@ export function getAvailableModelsForAuthType(
const openAIModel = getOpenAIAvailableModelFromEnv();
return openAIModel ? [openAIModel] : [];
}
case AuthType.USE_ANTHROPIC: {
const anthropicModel = getAnthropicAvailableModelFromEnv();
return anthropicModel ? [anthropicModel] : [];
}
default:
// For other auth types, return empty array for now
// This can be expanded later according to the design doc

View File

@@ -20,11 +20,6 @@ const makeConfig = (tools: Record<string, AnyDeclarativeTool>) =>
getToolRegistry: () => ({
getTool: (name: string) => tools[name],
}),
getContentGenerator: () => ({
// Default to showing full thinking content during resume unless explicitly
// summarized; tests don't care about summarized thinking behavior.
useSummarizedThinking: () => false,
}),
}) as unknown as Config;
describe('resumeHistoryUtils', () => {

View File

@@ -204,11 +204,7 @@ function convertToHistoryItems(
const parts = record.message?.parts as Part[] | undefined;
// Extract thought content
const thoughtText = !config
.getContentGenerator()
.useSummarizedThinking()
? extractThoughtTextFromParts(parts)
: '';
const thoughtText = extractThoughtTextFromParts(parts);
// Extract text content (non-function-call, non-thought)
const text = extractTextFromParts(parts);

View File

@@ -153,8 +153,7 @@ export async function getExtendedSystemInfo(
// Get base URL if using OpenAI auth
const baseUrl =
baseInfo.selectedAuthType === AuthType.USE_OPENAI ||
baseInfo.selectedAuthType === AuthType.USE_ANTHROPIC
baseInfo.selectedAuthType === AuthType.USE_OPENAI
? context.services.config?.getContentGeneratorConfig()?.baseUrl
: undefined;

View File

@@ -19,9 +19,6 @@ describe('validateNonInterActiveAuth', () => {
let originalEnvVertexAi: string | undefined;
let originalEnvGcp: string | undefined;
let originalEnvOpenAiApiKey: string | undefined;
let originalEnvQwenOauth: string | undefined;
let originalEnvGoogleApiKey: string | undefined;
let originalEnvAnthropicApiKey: string | undefined;
let consoleErrorSpy: ReturnType<typeof vi.spyOn>;
let processExitSpy: ReturnType<typeof vi.spyOn<[code?: number], never>>;
let refreshAuthMock: ReturnType<typeof vi.fn>;
@@ -32,16 +29,10 @@ describe('validateNonInterActiveAuth', () => {
originalEnvVertexAi = process.env['GOOGLE_GENAI_USE_VERTEXAI'];
originalEnvGcp = process.env['GOOGLE_GENAI_USE_GCA'];
originalEnvOpenAiApiKey = process.env['OPENAI_API_KEY'];
originalEnvQwenOauth = process.env['QWEN_OAUTH'];
originalEnvGoogleApiKey = process.env['GOOGLE_API_KEY'];
originalEnvAnthropicApiKey = process.env['ANTHROPIC_API_KEY'];
delete process.env['GEMINI_API_KEY'];
delete process.env['GOOGLE_GENAI_USE_VERTEXAI'];
delete process.env['GOOGLE_GENAI_USE_GCA'];
delete process.env['OPENAI_API_KEY'];
delete process.env['QWEN_OAUTH'];
delete process.env['GOOGLE_API_KEY'];
delete process.env['ANTHROPIC_API_KEY'];
consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {});
processExitSpy = vi.spyOn(process, 'exit').mockImplementation((code) => {
throw new Error(`process.exit(${code}) called`);
@@ -89,21 +80,6 @@ describe('validateNonInterActiveAuth', () => {
} else {
delete process.env['OPENAI_API_KEY'];
}
if (originalEnvQwenOauth !== undefined) {
process.env['QWEN_OAUTH'] = originalEnvQwenOauth;
} else {
delete process.env['QWEN_OAUTH'];
}
if (originalEnvGoogleApiKey !== undefined) {
process.env['GOOGLE_API_KEY'] = originalEnvGoogleApiKey;
} else {
delete process.env['GOOGLE_API_KEY'];
}
if (originalEnvAnthropicApiKey !== undefined) {
process.env['ANTHROPIC_API_KEY'] = originalEnvAnthropicApiKey;
} else {
delete process.env['ANTHROPIC_API_KEY'];
}
vi.restoreAllMocks();
});

View File

@@ -27,9 +27,6 @@ function getAuthTypeFromEnv(): AuthType | undefined {
if (process.env['GOOGLE_API_KEY']) {
return AuthType.USE_VERTEX_AI;
}
if (process.env['ANTHROPIC_API_KEY']) {
return AuthType.USE_ANTHROPIC;
}
return undefined;
}

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code-core",
"version": "0.6.0-preview.1",
"version": "0.6.0",
"description": "Qwen Code Core",
"repository": {
"type": "git",
@@ -23,7 +23,6 @@
"scripts/postinstall.js"
],
"dependencies": {
"@anthropic-ai/sdk": "^0.36.1",
"@google/genai": "1.30.0",
"@modelcontextprotocol/sdk": "^1.25.1",
"@opentelemetry/api": "^1.9.0",

View File

@@ -16,6 +16,7 @@ import {
QwenLogger,
} from '../telemetry/index.js';
import type { ContentGeneratorConfig } from '../core/contentGenerator.js';
import { DEFAULT_DASHSCOPE_BASE_URL } from '../core/openaiContentGenerator/constants.js';
import {
AuthType,
createContentGeneratorConfig,
@@ -272,7 +273,7 @@ describe('Server Config (config.ts)', () => {
authType,
{
model: MODEL,
baseUrl: undefined,
baseUrl: DEFAULT_DASHSCOPE_BASE_URL,
},
);
// Verify that contentGeneratorConfig is updated

View File

@@ -96,6 +96,7 @@ import {
} from './constants.js';
import { DEFAULT_QWEN_EMBEDDING_MODEL, DEFAULT_QWEN_MODEL } from './models.js';
import { Storage } from './storage.js';
import { DEFAULT_DASHSCOPE_BASE_URL } from '../core/openaiContentGenerator/constants.js';
import { ChatRecordingService } from '../services/chatRecordingService.js';
import {
SessionService,
@@ -573,7 +574,7 @@ export class Config {
this._generationConfig = {
model: params.model,
...(params.generationConfig || {}),
baseUrl: params.generationConfig?.baseUrl,
baseUrl: params.generationConfig?.baseUrl || DEFAULT_DASHSCOPE_BASE_URL,
};
this.contentGeneratorConfig = this
._generationConfig as ContentGeneratorConfig;

View File

@@ -1,500 +0,0 @@
/**
* @license
* Copyright 2025 Qwen
* SPDX-License-Identifier: Apache-2.0
*/
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
import type {
CountTokensParameters,
GenerateContentParameters,
} from '@google/genai';
import { FinishReason, GenerateContentResponse } from '@google/genai';
// Mock the request tokenizer module BEFORE importing the class that uses it.
const mockTokenizer = {
calculateTokens: vi.fn(),
dispose: vi.fn(),
};
vi.mock('../../utils/request-tokenizer/index.js', () => ({
getDefaultTokenizer: vi.fn(() => mockTokenizer),
DefaultRequestTokenizer: vi.fn(() => mockTokenizer),
disposeDefaultTokenizer: vi.fn(),
}));
type AnthropicCreateArgs = [unknown, { signal?: AbortSignal }?];
const anthropicMockState: {
constructorOptions?: Record<string, unknown>;
lastCreateArgs?: AnthropicCreateArgs;
createImpl: ReturnType<typeof vi.fn>;
} = {
constructorOptions: undefined,
lastCreateArgs: undefined,
createImpl: vi.fn(),
};
vi.mock('@anthropic-ai/sdk', () => {
class AnthropicMock {
messages: { create: (...args: AnthropicCreateArgs) => unknown };
constructor(options: Record<string, unknown>) {
anthropicMockState.constructorOptions = options;
this.messages = {
create: (...args: AnthropicCreateArgs) => {
anthropicMockState.lastCreateArgs = args;
return anthropicMockState.createImpl(...args);
},
};
}
}
return {
default: AnthropicMock,
__anthropicState: anthropicMockState,
};
});
// Now import the modules that depend on the mocked modules.
import type { Config } from '../../config/config.js';
const importGenerator = async (): Promise<{
AnthropicContentGenerator: typeof import('./anthropicContentGenerator.js').AnthropicContentGenerator;
}> => import('./anthropicContentGenerator.js');
const importConverter = async (): Promise<{
AnthropicContentConverter: typeof import('./converter.js').AnthropicContentConverter;
}> => import('./converter.js');
describe('AnthropicContentGenerator', () => {
let mockConfig: Config;
let anthropicState: {
constructorOptions?: Record<string, unknown>;
lastCreateArgs?: AnthropicCreateArgs;
createImpl: ReturnType<typeof vi.fn>;
};
beforeEach(async () => {
vi.clearAllMocks();
vi.resetModules();
mockTokenizer.calculateTokens.mockResolvedValue({
totalTokens: 50,
breakdown: {
textTokens: 50,
imageTokens: 0,
audioTokens: 0,
otherTokens: 0,
},
processingTime: 1,
});
anthropicState = anthropicMockState;
anthropicState.createImpl.mockReset();
anthropicState.lastCreateArgs = undefined;
anthropicState.constructorOptions = undefined;
mockConfig = {
getCliVersion: vi.fn().mockReturnValue('1.2.3'),
} as unknown as Config;
});
afterEach(() => {
vi.restoreAllMocks();
});
it('passes a QwenCode User-Agent header to the Anthropic SDK', async () => {
const { AnthropicContentGenerator } = await importGenerator();
void new AnthropicContentGenerator(
{
model: 'claude-test',
apiKey: 'test-key',
baseUrl: 'https://example.invalid',
timeout: 10_000,
maxRetries: 2,
samplingParams: {},
schemaCompliance: 'auto',
},
mockConfig,
);
const headers = (anthropicState.constructorOptions?.['defaultHeaders'] ||
{}) as Record<string, string>;
expect(headers['User-Agent']).toContain('QwenCode/1.2.3');
expect(headers['User-Agent']).toContain(
`(${process.platform}; ${process.arch})`,
);
});
it('adds the effort beta header when reasoning.effort is set', async () => {
const { AnthropicContentGenerator } = await importGenerator();
void new AnthropicContentGenerator(
{
model: 'claude-test',
apiKey: 'test-key',
baseUrl: 'https://example.invalid',
timeout: 10_000,
maxRetries: 2,
samplingParams: {},
schemaCompliance: 'auto',
reasoning: { effort: 'medium' },
},
mockConfig,
);
const headers = (anthropicState.constructorOptions?.['defaultHeaders'] ||
{}) as Record<string, string>;
expect(headers['anthropic-beta']).toContain('effort-2025-11-24');
});
it('does not add the effort beta header when reasoning.effort is not set', async () => {
const { AnthropicContentGenerator } = await importGenerator();
void new AnthropicContentGenerator(
{
model: 'claude-test',
apiKey: 'test-key',
baseUrl: 'https://example.invalid',
timeout: 10_000,
maxRetries: 2,
samplingParams: {},
schemaCompliance: 'auto',
},
mockConfig,
);
const headers = (anthropicState.constructorOptions?.['defaultHeaders'] ||
{}) as Record<string, string>;
expect(headers['anthropic-beta']).not.toContain('effort-2025-11-24');
});
it('omits the anthropic beta header when reasoning is disabled', async () => {
const { AnthropicContentGenerator } = await importGenerator();
void new AnthropicContentGenerator(
{
model: 'claude-test',
apiKey: 'test-key',
baseUrl: 'https://example.invalid',
timeout: 10_000,
maxRetries: 2,
samplingParams: {},
schemaCompliance: 'auto',
reasoning: false,
},
mockConfig,
);
const headers = (anthropicState.constructorOptions?.['defaultHeaders'] ||
{}) as Record<string, string>;
expect(headers['anthropic-beta']).toBeUndefined();
});
describe('generateContent', () => {
it('builds request with config sampling params (config overrides request) and thinking budget', async () => {
const { AnthropicContentConverter } = await importConverter();
const { AnthropicContentGenerator } = await importGenerator();
const convertResponseSpy = vi
.spyOn(
AnthropicContentConverter.prototype,
'convertAnthropicResponseToGemini',
)
.mockReturnValue(
(() => {
const r = new GenerateContentResponse();
r.responseId = 'gemini-1';
return r;
})(),
);
anthropicState.createImpl.mockResolvedValue({
id: 'anthropic-1',
model: 'claude-test',
content: [{ type: 'text', text: 'hi' }],
});
const generator = new AnthropicContentGenerator(
{
model: 'claude-test',
apiKey: 'test-key',
baseUrl: 'https://example.invalid',
timeout: 10_000,
maxRetries: 2,
samplingParams: {
temperature: 0.7,
max_tokens: 1000,
top_p: 0.9,
top_k: 20,
},
schemaCompliance: 'auto',
reasoning: { effort: 'high', budget_tokens: 1000 },
},
mockConfig,
);
const abortController = new AbortController();
const request: GenerateContentParameters = {
model: 'models/ignored',
contents: 'Hello',
config: {
temperature: 0.1,
maxOutputTokens: 200,
topP: 0.5,
topK: 5,
abortSignal: abortController.signal,
},
};
const result = await generator.generateContent(request);
expect(result.responseId).toBe('gemini-1');
expect(anthropicState.lastCreateArgs).toBeDefined();
const [anthropicRequest, options] =
anthropicState.lastCreateArgs as AnthropicCreateArgs;
expect(options?.signal).toBe(abortController.signal);
expect(anthropicRequest).toEqual(
expect.objectContaining({
model: 'claude-test',
max_tokens: 1000,
temperature: 0.7,
top_p: 0.9,
top_k: 20,
thinking: { type: 'enabled', budget_tokens: 1000 },
output_config: { effort: 'high' },
}),
);
expect(convertResponseSpy).toHaveBeenCalledTimes(1);
});
it('omits thinking when request.config.thinkingConfig.includeThoughts is false', async () => {
const { AnthropicContentGenerator } = await importGenerator();
anthropicState.createImpl.mockResolvedValue({
id: 'anthropic-1',
model: 'claude-test',
content: [{ type: 'text', text: 'hi' }],
});
const generator = new AnthropicContentGenerator(
{
model: 'claude-test',
apiKey: 'test-key',
timeout: 10_000,
maxRetries: 2,
samplingParams: { max_tokens: 500 },
schemaCompliance: 'auto',
reasoning: { effort: 'high' },
},
mockConfig,
);
await generator.generateContent({
model: 'models/ignored',
contents: 'Hello',
config: { thinkingConfig: { includeThoughts: false } },
} as unknown as GenerateContentParameters);
const [anthropicRequest] =
anthropicState.lastCreateArgs as AnthropicCreateArgs;
expect(anthropicRequest).toEqual(
expect.not.objectContaining({ thinking: expect.anything() }),
);
});
});
describe('countTokens', () => {
it('counts tokens using the request tokenizer', async () => {
const { AnthropicContentGenerator } = await importGenerator();
const generator = new AnthropicContentGenerator(
{
model: 'claude-test',
apiKey: 'test-key',
timeout: 10_000,
maxRetries: 2,
samplingParams: {},
schemaCompliance: 'auto',
},
mockConfig,
);
const request: CountTokensParameters = {
contents: [{ role: 'user', parts: [{ text: 'Hello world' }] }],
model: 'claude-test',
};
const result = await generator.countTokens(request);
expect(mockTokenizer.calculateTokens).toHaveBeenCalledWith(request, {
textEncoding: 'cl100k_base',
});
expect(result.totalTokens).toBe(50);
});
it('falls back to character approximation when tokenizer throws', async () => {
const { AnthropicContentGenerator } = await importGenerator();
mockTokenizer.calculateTokens.mockRejectedValueOnce(new Error('boom'));
const generator = new AnthropicContentGenerator(
{
model: 'claude-test',
apiKey: 'test-key',
timeout: 10_000,
maxRetries: 2,
samplingParams: {},
schemaCompliance: 'auto',
},
mockConfig,
);
const request: CountTokensParameters = {
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
model: 'claude-test',
};
const content = JSON.stringify(request.contents);
const expected = Math.ceil(content.length / 4);
const result = await generator.countTokens(request);
expect(result.totalTokens).toBe(expected);
});
});
describe('generateContentStream', () => {
it('requests stream=true and converts streamed events into Gemini chunks', async () => {
const { AnthropicContentGenerator } = await importGenerator();
anthropicState.createImpl.mockResolvedValue(
(async function* () {
yield {
type: 'message_start',
message: {
id: 'msg-1',
model: 'claude-test',
usage: { cache_read_input_tokens: 2, input_tokens: 3 },
},
};
yield {
type: 'content_block_start',
index: 0,
content_block: { type: 'text' },
};
yield {
type: 'content_block_delta',
index: 0,
delta: { type: 'text_delta', text: 'Hello' },
};
yield { type: 'content_block_stop', index: 0 };
yield {
type: 'content_block_start',
index: 1,
content_block: { type: 'thinking', signature: '' },
};
yield {
type: 'content_block_delta',
index: 1,
delta: { type: 'thinking_delta', thinking: 'Think' },
};
yield {
type: 'content_block_delta',
index: 1,
delta: { type: 'signature_delta', signature: 'abc' },
};
yield { type: 'content_block_stop', index: 1 };
yield {
type: 'content_block_start',
index: 2,
content_block: {
type: 'tool_use',
id: 't1',
name: 'tool',
input: {},
},
};
yield {
type: 'content_block_delta',
index: 2,
delta: { type: 'input_json_delta', partial_json: '{"x":' },
};
yield {
type: 'content_block_delta',
index: 2,
delta: { type: 'input_json_delta', partial_json: '1}' },
};
yield { type: 'content_block_stop', index: 2 };
yield {
type: 'message_delta',
delta: { stop_reason: 'end_turn' },
usage: {
output_tokens: 5,
input_tokens: 7,
cache_read_input_tokens: 2,
},
};
yield { type: 'message_stop' };
})(),
);
const generator = new AnthropicContentGenerator(
{
model: 'claude-test',
apiKey: 'test-key',
timeout: 10_000,
maxRetries: 2,
samplingParams: { max_tokens: 123 },
schemaCompliance: 'auto',
},
mockConfig,
);
const stream = await generator.generateContentStream({
model: 'models/ignored',
contents: 'Hello',
} as unknown as GenerateContentParameters);
const chunks: GenerateContentResponse[] = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
const [anthropicRequest] =
anthropicState.lastCreateArgs as AnthropicCreateArgs;
expect(anthropicRequest).toEqual(
expect.objectContaining({ stream: true }),
);
// Text chunk.
expect(chunks[0]?.candidates?.[0]?.content?.parts?.[0]).toEqual({
text: 'Hello',
});
// Thinking chunk.
expect(chunks[1]?.candidates?.[0]?.content?.parts?.[0]).toEqual({
text: 'Think',
thought: true,
});
// Signature chunk.
expect(chunks[2]?.candidates?.[0]?.content?.parts?.[0]).toEqual({
thought: true,
thoughtSignature: 'abc',
});
// Tool call chunk.
expect(chunks[3]?.candidates?.[0]?.content?.parts?.[0]).toEqual({
functionCall: { id: 't1', name: 'tool', args: { x: 1 } },
});
// Usage/finish chunks exist; check the last one.
const last = chunks[chunks.length - 1]!;
expect(last.candidates?.[0]?.finishReason).toBe(FinishReason.STOP);
expect(last.usageMetadata).toEqual({
cachedContentTokenCount: 2,
promptTokenCount: 9, // cached(2) + input(7)
candidatesTokenCount: 5,
totalTokenCount: 14,
});
});
});
});

View File

@@ -1,502 +0,0 @@
/**
* @license
* Copyright 2025 Qwen
* SPDX-License-Identifier: Apache-2.0
*/
import Anthropic from '@anthropic-ai/sdk';
import type {
CountTokensParameters,
CountTokensResponse,
EmbedContentParameters,
EmbedContentResponse,
GenerateContentParameters,
GenerateContentResponseUsageMetadata,
Part,
} from '@google/genai';
import { GenerateContentResponse } from '@google/genai';
import type { Config } from '../../config/config.js';
import type {
ContentGenerator,
ContentGeneratorConfig,
} from '../contentGenerator.js';
type Message = Anthropic.Message;
type MessageCreateParamsNonStreaming =
Anthropic.MessageCreateParamsNonStreaming;
type MessageCreateParamsStreaming = Anthropic.MessageCreateParamsStreaming;
type RawMessageStreamEvent = Anthropic.RawMessageStreamEvent;
import { getDefaultTokenizer } from '../../utils/request-tokenizer/index.js';
import { safeJsonParse } from '../../utils/safeJsonParse.js';
import { AnthropicContentConverter } from './converter.js';
type StreamingBlockState = {
type: string;
id?: string;
name?: string;
inputJson: string;
signature: string;
};
type MessageCreateParamsWithThinking = MessageCreateParamsNonStreaming & {
thinking?: { type: 'enabled'; budget_tokens: number };
// Anthropic beta feature: output_config.effort (requires beta header effort-2025-11-24)
// This is not yet represented in the official SDK types we depend on.
output_config?: { effort: 'low' | 'medium' | 'high' };
};
export class AnthropicContentGenerator implements ContentGenerator {
private client: Anthropic;
private converter: AnthropicContentConverter;
constructor(
private contentGeneratorConfig: ContentGeneratorConfig,
private readonly cliConfig: Config,
) {
const defaultHeaders = this.buildHeaders();
const baseURL = contentGeneratorConfig.baseUrl;
this.client = new Anthropic({
apiKey: contentGeneratorConfig.apiKey,
baseURL,
timeout: contentGeneratorConfig.timeout,
maxRetries: contentGeneratorConfig.maxRetries,
defaultHeaders,
});
this.converter = new AnthropicContentConverter(
contentGeneratorConfig.model,
contentGeneratorConfig.schemaCompliance,
);
}
async generateContent(
request: GenerateContentParameters,
): Promise<GenerateContentResponse> {
const anthropicRequest = await this.buildRequest(request);
const response = (await this.client.messages.create(anthropicRequest, {
signal: request.config?.abortSignal,
})) as Message;
return this.converter.convertAnthropicResponseToGemini(response);
}
async generateContentStream(
request: GenerateContentParameters,
): Promise<AsyncGenerator<GenerateContentResponse>> {
const anthropicRequest = await this.buildRequest(request);
const streamingRequest: MessageCreateParamsStreaming & {
thinking?: { type: 'enabled'; budget_tokens: number };
} = {
...anthropicRequest,
stream: true,
};
const stream = (await this.client.messages.create(
streamingRequest as MessageCreateParamsStreaming,
{
signal: request.config?.abortSignal,
},
)) as AsyncIterable<RawMessageStreamEvent>;
return this.processStream(stream);
}
async countTokens(
request: CountTokensParameters,
): Promise<CountTokensResponse> {
try {
const tokenizer = getDefaultTokenizer();
const result = await tokenizer.calculateTokens(request, {
textEncoding: 'cl100k_base',
});
return {
totalTokens: result.totalTokens,
};
} catch (error) {
console.warn(
'Failed to calculate tokens with tokenizer, ' +
'falling back to simple method:',
error,
);
const content = JSON.stringify(request.contents);
const totalTokens = Math.ceil(content.length / 4);
return {
totalTokens,
};
}
}
async embedContent(
_request: EmbedContentParameters,
): Promise<EmbedContentResponse> {
throw new Error('Anthropic does not support embeddings.');
}
useSummarizedThinking(): boolean {
return false;
}
private buildHeaders(): Record<string, string> {
const version = this.cliConfig.getCliVersion() || 'unknown';
const userAgent = `QwenCode/${version} (${process.platform}; ${process.arch})`;
const betas: string[] = [];
const reasoning = this.contentGeneratorConfig.reasoning;
// Interleaved thinking is used when we send the `thinking` field.
if (reasoning !== false) {
betas.push('interleaved-thinking-2025-05-14');
}
// Effort (beta) is enabled when reasoning.effort is set.
if (reasoning !== false && reasoning?.effort !== undefined) {
betas.push('effort-2025-11-24');
}
const headers: Record<string, string> = {
'User-Agent': userAgent,
};
if (betas.length) {
headers['anthropic-beta'] = betas.join(',');
}
return headers;
}
private async buildRequest(
request: GenerateContentParameters,
): Promise<MessageCreateParamsWithThinking> {
const { system, messages } =
this.converter.convertGeminiRequestToAnthropic(request);
const tools = request.config?.tools
? await this.converter.convertGeminiToolsToAnthropic(request.config.tools)
: undefined;
const sampling = this.buildSamplingParameters(request);
const thinking = this.buildThinkingConfig(request);
const outputConfig = this.buildOutputConfig();
return {
model: this.contentGeneratorConfig.model,
system,
messages,
tools,
...sampling,
...(thinking ? { thinking } : {}),
...(outputConfig ? { output_config: outputConfig } : {}),
};
}
private buildSamplingParameters(request: GenerateContentParameters): {
max_tokens: number;
temperature?: number;
top_p?: number;
top_k?: number;
} {
const configSamplingParams = this.contentGeneratorConfig.samplingParams;
const requestConfig = request.config || {};
const getParam = <T>(
configKey: keyof NonNullable<typeof configSamplingParams>,
requestKey?: keyof NonNullable<typeof requestConfig>,
): T | undefined => {
const configValue = configSamplingParams?.[configKey] as T | undefined;
const requestValue = requestKey
? (requestConfig[requestKey] as T | undefined)
: undefined;
return configValue !== undefined ? configValue : requestValue;
};
const maxTokens =
getParam<number>('max_tokens', 'maxOutputTokens') ?? 10_000;
return {
max_tokens: maxTokens,
temperature: getParam<number>('temperature', 'temperature') ?? 1,
top_p: getParam<number>('top_p', 'topP'),
top_k: getParam<number>('top_k', 'topK'),
};
}
private buildThinkingConfig(
request: GenerateContentParameters,
): { type: 'enabled'; budget_tokens: number } | undefined {
if (request.config?.thinkingConfig?.includeThoughts === false) {
return undefined;
}
const reasoning = this.contentGeneratorConfig.reasoning;
if (reasoning === false) {
return undefined;
}
if (reasoning?.budget_tokens !== undefined) {
return {
type: 'enabled',
budget_tokens: reasoning.budget_tokens,
};
}
const effort = reasoning?.effort;
// When using interleaved thinking with tools, this budget token limit is the entire context window(200k tokens).
const budgetTokens =
effort === 'low' ? 16_000 : effort === 'high' ? 64_000 : 32_000;
return {
type: 'enabled',
budget_tokens: budgetTokens,
};
}
private buildOutputConfig():
| { effort: 'low' | 'medium' | 'high' }
| undefined {
const reasoning = this.contentGeneratorConfig.reasoning;
if (reasoning === false || reasoning === undefined) {
return undefined;
}
if (reasoning.effort === undefined) {
return undefined;
}
return { effort: reasoning.effort };
}
private async *processStream(
stream: AsyncIterable<RawMessageStreamEvent>,
): AsyncGenerator<GenerateContentResponse> {
let messageId: string | undefined;
let model = this.contentGeneratorConfig.model;
let cachedTokens = 0;
let promptTokens = 0;
let completionTokens = 0;
let finishReason: string | undefined;
const blocks = new Map<number, StreamingBlockState>();
const collectedResponses: GenerateContentResponse[] = [];
for await (const event of stream) {
switch (event.type) {
case 'message_start': {
messageId = event.message.id ?? messageId;
model = event.message.model ?? model;
cachedTokens =
event.message.usage?.cache_read_input_tokens ?? cachedTokens;
promptTokens = event.message.usage?.input_tokens ?? promptTokens;
break;
}
case 'content_block_start': {
const index = event.index ?? 0;
const type = String(event.content_block.type || 'text');
const initialInput =
type === 'tool_use' && 'input' in event.content_block
? JSON.stringify(event.content_block.input)
: '';
blocks.set(index, {
type,
id:
'id' in event.content_block ? event.content_block.id : undefined,
name:
'name' in event.content_block
? event.content_block.name
: undefined,
inputJson: initialInput !== '{}' ? initialInput : '',
signature:
type === 'thinking' &&
'signature' in event.content_block &&
typeof event.content_block.signature === 'string'
? event.content_block.signature
: '',
});
break;
}
case 'content_block_delta': {
const index = event.index ?? 0;
const deltaType = (event.delta as { type?: string }).type || '';
const blockState = blocks.get(index);
if (deltaType === 'text_delta') {
const text = 'text' in event.delta ? event.delta.text : '';
if (text) {
const chunk = this.buildGeminiChunk({ text }, messageId, model);
collectedResponses.push(chunk);
yield chunk;
}
} else if (deltaType === 'thinking_delta') {
const thinking =
(event.delta as { thinking?: string }).thinking || '';
if (thinking) {
const chunk = this.buildGeminiChunk(
{ text: thinking, thought: true },
messageId,
model,
);
collectedResponses.push(chunk);
yield chunk;
}
} else if (deltaType === 'signature_delta' && blockState) {
const signature =
(event.delta as { signature?: string }).signature || '';
if (signature) {
blockState.signature += signature;
const chunk = this.buildGeminiChunk(
{ thought: true, thoughtSignature: signature },
messageId,
model,
);
collectedResponses.push(chunk);
yield chunk;
}
} else if (deltaType === 'input_json_delta' && blockState) {
const jsonDelta =
(event.delta as { partial_json?: string }).partial_json || '';
if (jsonDelta) {
blockState.inputJson += jsonDelta;
}
}
break;
}
case 'content_block_stop': {
const index = event.index ?? 0;
const blockState = blocks.get(index);
if (blockState?.type === 'tool_use') {
const args = safeJsonParse(blockState.inputJson || '{}', {});
const chunk = this.buildGeminiChunk(
{
functionCall: {
id: blockState.id,
name: blockState.name,
args,
},
},
messageId,
model,
);
collectedResponses.push(chunk);
yield chunk;
}
blocks.delete(index);
break;
}
case 'message_delta': {
const stopReasonValue = event.delta.stop_reason;
if (stopReasonValue) {
finishReason = stopReasonValue;
}
// Some Anthropic-compatible providers may include additional usage fields
// (e.g. `input_tokens`, `cache_read_input_tokens`) even though the official
// Anthropic SDK types only expose `output_tokens` here.
const usageUnknown = event.usage as unknown;
const usageRecord =
usageUnknown && typeof usageUnknown === 'object'
? (usageUnknown as Record<string, unknown>)
: undefined;
if (event.usage?.output_tokens !== undefined) {
completionTokens = event.usage.output_tokens;
}
if (usageRecord?.['input_tokens'] !== undefined) {
const inputTokens = usageRecord['input_tokens'];
if (typeof inputTokens === 'number') {
promptTokens = inputTokens;
}
}
if (usageRecord?.['cache_read_input_tokens'] !== undefined) {
const cacheRead = usageRecord['cache_read_input_tokens'];
if (typeof cacheRead === 'number') {
cachedTokens = cacheRead;
}
}
if (finishReason || event.usage) {
const chunk = this.buildGeminiChunk(
undefined,
messageId,
model,
finishReason,
{
cachedContentTokenCount: cachedTokens,
promptTokenCount: cachedTokens + promptTokens,
candidatesTokenCount: completionTokens,
totalTokenCount: cachedTokens + promptTokens + completionTokens,
},
);
collectedResponses.push(chunk);
yield chunk;
}
break;
}
case 'message_stop': {
if (promptTokens || completionTokens) {
const chunk = this.buildGeminiChunk(
undefined,
messageId,
model,
finishReason,
{
cachedContentTokenCount: cachedTokens,
promptTokenCount: cachedTokens + promptTokens,
candidatesTokenCount: completionTokens,
totalTokenCount: cachedTokens + promptTokens + completionTokens,
},
);
collectedResponses.push(chunk);
yield chunk;
}
break;
}
default:
break;
}
}
}
private buildGeminiChunk(
part?: {
text?: string;
thought?: boolean;
thoughtSignature?: string;
functionCall?: unknown;
},
responseId?: string,
model?: string,
finishReason?: string,
usageMetadata?: GenerateContentResponseUsageMetadata,
): GenerateContentResponse {
const response = new GenerateContentResponse();
response.responseId = responseId;
response.createTime = Date.now().toString();
response.modelVersion = model || this.contentGeneratorConfig.model;
response.promptFeedback = { safetyRatings: [] };
const candidateParts = part ? [part as unknown as Part] : [];
const mappedFinishReason =
finishReason !== undefined
? this.converter.mapAnthropicFinishReasonToGemini(finishReason)
: undefined;
response.candidates = [
{
content: {
parts: candidateParts,
role: 'model' as const,
},
index: 0,
safetyRatings: [],
...(mappedFinishReason ? { finishReason: mappedFinishReason } : {}),
},
];
if (usageMetadata) {
response.usageMetadata = usageMetadata;
}
return response;
}
}

View File

@@ -1,377 +0,0 @@
/**
* @license
* Copyright 2025 Qwen
* SPDX-License-Identifier: Apache-2.0
*/
import { beforeEach, describe, expect, it, vi } from 'vitest';
import type { CallableTool, Content, Tool } from '@google/genai';
import { FinishReason } from '@google/genai';
import type Anthropic from '@anthropic-ai/sdk';
// Mock schema conversion so we can force edge-cases (e.g. missing `type`).
vi.mock('../../utils/schemaConverter.js', () => ({
convertSchema: vi.fn((schema: unknown) => schema),
}));
import { convertSchema } from '../../utils/schemaConverter.js';
import { AnthropicContentConverter } from './converter.js';
describe('AnthropicContentConverter', () => {
let converter: AnthropicContentConverter;
beforeEach(() => {
vi.clearAllMocks();
converter = new AnthropicContentConverter('test-model', 'auto');
});
describe('convertGeminiRequestToAnthropic', () => {
it('extracts systemInstruction text from string', () => {
const { system } = converter.convertGeminiRequestToAnthropic({
model: 'models/test',
contents: 'hi',
config: { systemInstruction: 'sys' },
});
expect(system).toBe('sys');
});
it('extracts systemInstruction text from parts and joins with newlines', () => {
const { system } = converter.convertGeminiRequestToAnthropic({
model: 'models/test',
contents: 'hi',
config: {
systemInstruction: {
role: 'system',
parts: [{ text: 'a' }, { text: 'b' }],
} as unknown as Content,
},
});
expect(system).toBe('a\nb');
});
it('converts a plain string content into a user message', () => {
const { messages } = converter.convertGeminiRequestToAnthropic({
model: 'models/test',
contents: 'Hello',
});
expect(messages).toEqual([
{ role: 'user', content: [{ type: 'text', text: 'Hello' }] },
]);
});
it('converts user content parts into a user message with text blocks', () => {
const { messages } = converter.convertGeminiRequestToAnthropic({
model: 'models/test',
contents: [
{
role: 'user',
parts: [{ text: 'Hello' }, { text: 'World' }],
},
],
});
expect(messages).toEqual([
{
role: 'user',
content: [
{ type: 'text', text: 'Hello' },
{ type: 'text', text: 'World' },
],
},
]);
});
it('converts assistant thought parts into Anthropic thinking blocks', () => {
const { messages } = converter.convertGeminiRequestToAnthropic({
model: 'models/test',
contents: [
{
role: 'model',
parts: [
{ text: 'internal', thought: true, thoughtSignature: 'sig' },
{ text: 'visible' },
],
},
],
});
expect(messages).toEqual([
{
role: 'assistant',
content: [
{ type: 'thinking', thinking: 'internal', signature: 'sig' },
{ type: 'text', text: 'visible' },
],
},
]);
});
it('converts functionCall parts from model role into tool_use blocks', () => {
const { messages } = converter.convertGeminiRequestToAnthropic({
model: 'models/test',
contents: [
{
role: 'model',
parts: [
{ text: 'preface' },
{
functionCall: {
id: 'call-1',
name: 'tool_name',
args: { a: 1 },
},
},
],
},
],
});
expect(messages).toEqual([
{
role: 'assistant',
content: [
{ type: 'text', text: 'preface' },
{
type: 'tool_use',
id: 'call-1',
name: 'tool_name',
input: { a: 1 },
},
],
},
]);
});
it('converts functionResponse parts into user tool_result messages', () => {
const { messages } = converter.convertGeminiRequestToAnthropic({
model: 'models/test',
contents: [
{
role: 'user',
parts: [
{
functionResponse: {
id: 'call-1',
name: 'tool_name',
response: { output: 'ok' },
},
},
],
},
],
});
expect(messages).toEqual([
{
role: 'user',
content: [
{
type: 'tool_result',
tool_use_id: 'call-1',
content: 'ok',
},
],
},
]);
});
it('extracts function response error field when present', () => {
const { messages } = converter.convertGeminiRequestToAnthropic({
model: 'models/test',
contents: [
{
role: 'user',
parts: [
{
functionResponse: {
id: 'call-1',
name: 'tool_name',
response: { error: 'boom' },
},
},
],
},
],
});
expect(messages[0]).toEqual({
role: 'user',
content: [
{
type: 'tool_result',
tool_use_id: 'call-1',
content: 'boom',
},
],
});
});
});
describe('convertGeminiToolsToAnthropic', () => {
it('converts Tool.functionDeclarations to Anthropic tools and runs schema conversion', async () => {
const tools = [
{
functionDeclarations: [
{
name: 'get_weather',
description: 'Get weather',
parametersJsonSchema: {
type: 'object',
properties: { location: { type: 'string' } },
required: ['location'],
},
},
],
},
] as Tool[];
const result = await converter.convertGeminiToolsToAnthropic(tools);
expect(result).toHaveLength(1);
expect(result[0]).toEqual({
name: 'get_weather',
description: 'Get weather',
input_schema: {
type: 'object',
properties: { location: { type: 'string' } },
required: ['location'],
},
});
expect(vi.mocked(convertSchema)).toHaveBeenCalledTimes(1);
});
it('resolves CallableTool.tool() and converts its functionDeclarations', async () => {
const callable = [
{
tool: async () =>
({
functionDeclarations: [
{
name: 'dynamic_tool',
description: 'resolved tool',
parametersJsonSchema: { type: 'object', properties: {} },
},
],
}) as unknown as Tool,
},
] as CallableTool[];
const result = await converter.convertGeminiToolsToAnthropic(callable);
expect(result).toHaveLength(1);
expect(result[0].name).toBe('dynamic_tool');
});
it('defaults missing parameters to an empty object schema', async () => {
const tools = [
{
functionDeclarations: [
{ name: 'no_params', description: 'no params' },
],
},
] as Tool[];
const result = await converter.convertGeminiToolsToAnthropic(tools);
expect(result).toHaveLength(1);
expect(result[0]).toEqual({
name: 'no_params',
description: 'no params',
input_schema: { type: 'object', properties: {} },
});
});
it('forces input_schema.type to "object" when schema conversion yields no type', async () => {
vi.mocked(convertSchema).mockImplementationOnce(() => ({
properties: {},
}));
const tools = [
{
functionDeclarations: [
{
name: 'edge',
description: 'edge',
parametersJsonSchema: { type: 'object', properties: {} },
},
],
},
] as Tool[];
const result = await converter.convertGeminiToolsToAnthropic(tools);
expect(result[0]?.input_schema?.type).toBe('object');
});
});
describe('convertAnthropicResponseToGemini', () => {
it('converts text, tool_use, thinking, and redacted_thinking blocks', () => {
const response = converter.convertAnthropicResponseToGemini({
id: 'msg-1',
model: 'claude-test',
stop_reason: 'end_turn',
content: [
{ type: 'thinking', thinking: 'thought', signature: 'sig' },
{ type: 'text', text: 'hello' },
{ type: 'tool_use', id: 't1', name: 'tool', input: { x: 1 } },
{ type: 'redacted_thinking' },
],
usage: { input_tokens: 3, output_tokens: 5 },
} as unknown as Anthropic.Message);
expect(response.responseId).toBe('msg-1');
expect(response.modelVersion).toBe('claude-test');
expect(response.candidates?.[0]?.finishReason).toBe(FinishReason.STOP);
expect(response.usageMetadata).toEqual({
promptTokenCount: 3,
candidatesTokenCount: 5,
totalTokenCount: 8,
});
const parts = response.candidates?.[0]?.content?.parts || [];
expect(parts).toEqual([
{ text: 'thought', thought: true, thoughtSignature: 'sig' },
{ text: 'hello' },
{ functionCall: { id: 't1', name: 'tool', args: { x: 1 } } },
{ text: '', thought: true },
]);
});
it('handles tool_use input that is a JSON string', () => {
const response = converter.convertAnthropicResponseToGemini({
id: 'msg-1',
model: 'claude-test',
stop_reason: null,
content: [
{ type: 'tool_use', id: 't1', name: 'tool', input: '{"x":1}' },
],
} as unknown as Anthropic.Message);
const parts = response.candidates?.[0]?.content?.parts || [];
expect(parts).toEqual([
{ functionCall: { id: 't1', name: 'tool', args: { x: 1 } } },
]);
});
});
describe('mapAnthropicFinishReasonToGemini', () => {
it('maps known reasons', () => {
expect(converter.mapAnthropicFinishReasonToGemini('end_turn')).toBe(
FinishReason.STOP,
);
expect(converter.mapAnthropicFinishReasonToGemini('max_tokens')).toBe(
FinishReason.MAX_TOKENS,
);
expect(converter.mapAnthropicFinishReasonToGemini('content_filter')).toBe(
FinishReason.SAFETY,
);
});
it('returns undefined for null/empty', () => {
expect(converter.mapAnthropicFinishReasonToGemini(null)).toBeUndefined();
expect(converter.mapAnthropicFinishReasonToGemini('')).toBeUndefined();
});
});
});

View File

@@ -1,448 +0,0 @@
/**
* @license
* Copyright 2025 Qwen
* SPDX-License-Identifier: Apache-2.0
*/
import type {
Candidate,
CallableTool,
Content,
ContentListUnion,
ContentUnion,
FunctionCall,
FunctionResponse,
GenerateContentParameters,
Part,
PartUnion,
Tool,
ToolListUnion,
} from '@google/genai';
import { FinishReason, GenerateContentResponse } from '@google/genai';
import type Anthropic from '@anthropic-ai/sdk';
import { safeJsonParse } from '../../utils/safeJsonParse.js';
import {
convertSchema,
type SchemaComplianceMode,
} from '../../utils/schemaConverter.js';
type AnthropicMessageParam = Anthropic.MessageParam;
type AnthropicToolParam = Anthropic.Tool;
type AnthropicContentBlockParam = Anthropic.ContentBlockParam;
type ThoughtPart = { text: string; signature?: string };
interface ParsedParts {
thoughtParts: ThoughtPart[];
contentParts: string[];
functionCalls: FunctionCall[];
functionResponses: FunctionResponse[];
}
export class AnthropicContentConverter {
private model: string;
private schemaCompliance: SchemaComplianceMode;
constructor(model: string, schemaCompliance: SchemaComplianceMode = 'auto') {
this.model = model;
this.schemaCompliance = schemaCompliance;
}
convertGeminiRequestToAnthropic(request: GenerateContentParameters): {
system?: string;
messages: AnthropicMessageParam[];
} {
const messages: AnthropicMessageParam[] = [];
const system = this.extractTextFromContentUnion(
request.config?.systemInstruction,
);
this.processContents(request.contents, messages);
return {
system: system || undefined,
messages,
};
}
async convertGeminiToolsToAnthropic(
geminiTools: ToolListUnion,
): Promise<AnthropicToolParam[]> {
const tools: AnthropicToolParam[] = [];
for (const tool of geminiTools) {
let actualTool: Tool;
if ('tool' in tool) {
actualTool = await (tool as CallableTool).tool();
} else {
actualTool = tool as Tool;
}
if (!actualTool.functionDeclarations) {
continue;
}
for (const func of actualTool.functionDeclarations) {
if (!func.name) continue;
let inputSchema: Record<string, unknown> | undefined;
if (func.parametersJsonSchema) {
inputSchema = {
...(func.parametersJsonSchema as Record<string, unknown>),
};
} else if (func.parameters) {
inputSchema = func.parameters as Record<string, unknown>;
}
if (!inputSchema) {
inputSchema = { type: 'object', properties: {} };
}
inputSchema = convertSchema(inputSchema, this.schemaCompliance);
if (typeof inputSchema['type'] !== 'string') {
inputSchema['type'] = 'object';
}
tools.push({
name: func.name,
description: func.description,
input_schema: inputSchema as Anthropic.Tool.InputSchema,
});
}
}
return tools;
}
convertAnthropicResponseToGemini(
response: Anthropic.Message,
): GenerateContentResponse {
const geminiResponse = new GenerateContentResponse();
const parts: Part[] = [];
for (const block of response.content || []) {
const blockType = String((block as { type?: string })['type'] || '');
if (blockType === 'text') {
const text =
typeof (block as { text?: string }).text === 'string'
? (block as { text?: string }).text
: '';
if (text) {
parts.push({ text });
}
} else if (blockType === 'tool_use') {
const toolUse = block as {
id?: string;
name?: string;
input?: unknown;
};
parts.push({
functionCall: {
id: typeof toolUse.id === 'string' ? toolUse.id : undefined,
name: typeof toolUse.name === 'string' ? toolUse.name : undefined,
args: this.safeInputToArgs(toolUse.input),
},
});
} else if (blockType === 'thinking') {
const thinking =
typeof (block as { thinking?: string }).thinking === 'string'
? (block as { thinking?: string }).thinking
: '';
const signature =
typeof (block as { signature?: string }).signature === 'string'
? (block as { signature?: string }).signature
: '';
if (thinking || signature) {
const thoughtPart: Part = {
text: thinking,
thought: true,
thoughtSignature: signature,
};
parts.push(thoughtPart);
}
} else if (blockType === 'redacted_thinking') {
parts.push({ text: '', thought: true });
}
}
const candidate: Candidate = {
content: {
parts,
role: 'model' as const,
},
index: 0,
safetyRatings: [],
};
const finishReason = this.mapAnthropicFinishReasonToGemini(
response.stop_reason,
);
if (finishReason) {
candidate.finishReason = finishReason;
}
geminiResponse.candidates = [candidate];
geminiResponse.responseId = response.id;
geminiResponse.createTime = Date.now().toString();
geminiResponse.modelVersion = response.model || this.model;
geminiResponse.promptFeedback = { safetyRatings: [] };
if (response.usage) {
const promptTokens = response.usage.input_tokens || 0;
const completionTokens = response.usage.output_tokens || 0;
geminiResponse.usageMetadata = {
promptTokenCount: promptTokens,
candidatesTokenCount: completionTokens,
totalTokenCount: promptTokens + completionTokens,
};
}
return geminiResponse;
}
private processContents(
contents: ContentListUnion,
messages: AnthropicMessageParam[],
): void {
if (Array.isArray(contents)) {
for (const content of contents) {
this.processContent(content, messages);
}
} else if (contents) {
this.processContent(contents, messages);
}
}
private processContent(
content: ContentUnion | PartUnion,
messages: AnthropicMessageParam[],
): void {
if (typeof content === 'string') {
messages.push({
role: 'user',
content: [{ type: 'text', text: content }],
});
return;
}
if (!this.isContentObject(content)) return;
const parsed = this.parseParts(content.parts || []);
if (parsed.functionResponses.length > 0) {
for (const response of parsed.functionResponses) {
messages.push({
role: 'user',
content: [
{
type: 'tool_result',
tool_use_id: response.id || '',
content: this.extractFunctionResponseContent(response.response),
},
],
});
}
return;
}
if (content.role === 'model' && parsed.functionCalls.length > 0) {
const thinkingBlocks: AnthropicContentBlockParam[] =
parsed.thoughtParts.map((part) => {
const thinkingBlock: unknown = {
type: 'thinking',
thinking: part.text,
};
if (part.signature) {
(thinkingBlock as { signature?: string }).signature =
part.signature;
}
return thinkingBlock as AnthropicContentBlockParam;
});
const toolUses: AnthropicContentBlockParam[] = parsed.functionCalls.map(
(call, index) => ({
type: 'tool_use',
id: call.id || `tool_${index}`,
name: call.name || '',
input: (call.args as Record<string, unknown>) || {},
}),
);
const textBlocks: AnthropicContentBlockParam[] = parsed.contentParts.map(
(text) => ({
type: 'text' as const,
text,
}),
);
messages.push({
role: 'assistant',
content: [...thinkingBlocks, ...textBlocks, ...toolUses],
});
return;
}
const role = content.role === 'model' ? 'assistant' : 'user';
const thinkingBlocks: AnthropicContentBlockParam[] =
role === 'assistant'
? parsed.thoughtParts.map((part) => {
const thinkingBlock: unknown = {
type: 'thinking',
thinking: part.text,
};
if (part.signature) {
(thinkingBlock as { signature?: string }).signature =
part.signature;
}
return thinkingBlock as AnthropicContentBlockParam;
})
: [];
const textBlocks: AnthropicContentBlockParam[] = [
...thinkingBlocks,
...parsed.contentParts.map((text) => ({
type: 'text' as const,
text,
})),
];
if (textBlocks.length > 0) {
messages.push({ role, content: textBlocks });
}
}
private parseParts(parts: Part[]): ParsedParts {
const thoughtParts: ThoughtPart[] = [];
const contentParts: string[] = [];
const functionCalls: FunctionCall[] = [];
const functionResponses: FunctionResponse[] = [];
for (const part of parts) {
if (typeof part === 'string') {
contentParts.push(part);
} else if (
'text' in part &&
part.text &&
!('thought' in part && part.thought)
) {
contentParts.push(part.text);
} else if ('text' in part && 'thought' in part && part.thought) {
thoughtParts.push({
text: part.text || '',
signature:
'thoughtSignature' in part &&
typeof part.thoughtSignature === 'string'
? part.thoughtSignature
: undefined,
});
} else if ('functionCall' in part && part.functionCall) {
functionCalls.push(part.functionCall);
} else if ('functionResponse' in part && part.functionResponse) {
functionResponses.push(part.functionResponse);
}
}
return {
thoughtParts,
contentParts,
functionCalls,
functionResponses,
};
}
private extractTextFromContentUnion(contentUnion: unknown): string {
if (typeof contentUnion === 'string') {
return contentUnion;
}
if (Array.isArray(contentUnion)) {
return contentUnion
.map((item) => this.extractTextFromContentUnion(item))
.filter(Boolean)
.join('\n');
}
if (typeof contentUnion === 'object' && contentUnion !== null) {
if ('parts' in contentUnion) {
const content = contentUnion as Content;
return (
content.parts
?.map((part: Part) => {
if (typeof part === 'string') return part;
if ('text' in part) return part.text || '';
return '';
})
.filter(Boolean)
.join('\n') || ''
);
}
}
return '';
}
private extractFunctionResponseContent(response: unknown): string {
if (response === null || response === undefined) {
return '';
}
if (typeof response === 'string') {
return response;
}
if (typeof response === 'object') {
const responseObject = response as Record<string, unknown>;
const output = responseObject['output'];
if (typeof output === 'string') {
return output;
}
const error = responseObject['error'];
if (typeof error === 'string') {
return error;
}
}
try {
const serialized = JSON.stringify(response);
return serialized ?? String(response);
} catch {
return String(response);
}
}
private safeInputToArgs(input: unknown): Record<string, unknown> {
if (input && typeof input === 'object') {
return input as Record<string, unknown>;
}
if (typeof input === 'string') {
return safeJsonParse(input, {});
}
return {};
}
mapAnthropicFinishReasonToGemini(
reason?: string | null,
): FinishReason | undefined {
if (!reason) return undefined;
const mapping: Record<string, FinishReason> = {
end_turn: FinishReason.STOP,
stop_sequence: FinishReason.STOP,
tool_use: FinishReason.STOP,
max_tokens: FinishReason.MAX_TOKENS,
content_filter: FinishReason.SAFETY,
};
return mapping[reason] || FinishReason.FINISH_REASON_UNSPECIFIED;
}
private isContentObject(
content: unknown,
): content is { role: string; parts: Part[] } {
return (
typeof content === 'object' &&
content !== null &&
'role' in content &&
'parts' in content &&
Array.isArray((content as Record<string, unknown>)['parts'])
);
}
}

View File

@@ -1,21 +0,0 @@
/**
* @license
* Copyright 2025 Qwen
* SPDX-License-Identifier: Apache-2.0
*/
import type {
ContentGenerator,
ContentGeneratorConfig,
} from '../contentGenerator.js';
import type { Config } from '../../config/config.js';
import { AnthropicContentGenerator } from './anthropicContentGenerator.js';
export { AnthropicContentGenerator } from './anthropicContentGenerator.js';
export function createAnthropicContentGenerator(
contentGeneratorConfig: ContentGeneratorConfig,
cliConfig: Config,
): ContentGenerator {
return new AnthropicContentGenerator(contentGeneratorConfig, cliConfig);
}

View File

@@ -8,7 +8,7 @@ import { describe, it, expect, vi } from 'vitest';
import { createContentGenerator, AuthType } from './contentGenerator.js';
import { GoogleGenAI } from '@google/genai';
import type { Config } from '../config/config.js';
import { LoggingContentGenerator } from './loggingContentGenerator/index.js';
import { LoggingContentGenerator } from './geminiContentGenerator/loggingContentGenerator.js';
vi.mock('@google/genai');

View File

@@ -14,7 +14,6 @@ import type {
} from '@google/genai';
import { DEFAULT_QWEN_MODEL } from '../config/models.js';
import type { Config } from '../config/config.js';
import { LoggingContentGenerator } from './loggingContentGenerator/index.js';
/**
* Interface abstracting the core functionalities for generating content and counting tokens.
@@ -38,11 +37,10 @@ export interface ContentGenerator {
}
export enum AuthType {
USE_GEMINI = 'gemini-api-key',
USE_VERTEX_AI = 'vertex-ai',
USE_OPENAI = 'openai',
QWEN_OAUTH = 'qwen-oauth',
USE_GEMINI = 'gemini',
USE_VERTEX_AI = 'vertex-ai',
USE_ANTHROPIC = 'anthropic',
}
export type ContentGeneratorConfig = {
@@ -65,12 +63,9 @@ export type ContentGeneratorConfig = {
temperature?: number;
max_tokens?: number;
};
reasoning?:
| false
| {
effort?: 'low' | 'medium' | 'high';
budget_tokens?: number;
};
reasoning?: {
effort?: 'low' | 'medium' | 'high';
};
proxy?: string | undefined;
userAgent?: string;
// Schema compliance mode for tool definitions
@@ -82,7 +77,7 @@ export function createContentGeneratorConfig(
authType: AuthType | undefined,
generationConfig?: Partial<ContentGeneratorConfig>,
): ContentGeneratorConfig {
let newContentGeneratorConfig: Partial<ContentGeneratorConfig> = {
const newContentGeneratorConfig: Partial<ContentGeneratorConfig> = {
...(generationConfig || {}),
authType,
proxy: config?.getProxy(),
@@ -99,16 +94,8 @@ export function createContentGeneratorConfig(
}
if (authType === AuthType.USE_OPENAI) {
newContentGeneratorConfig = {
...newContentGeneratorConfig,
apiKey: newContentGeneratorConfig.apiKey || process.env['OPENAI_API_KEY'],
baseUrl:
newContentGeneratorConfig.baseUrl || process.env['OPENAI_BASE_URL'],
model: newContentGeneratorConfig.model || process.env['OPENAI_MODEL'],
};
if (!newContentGeneratorConfig.apiKey) {
throw new Error('OPENAI_API_KEY environment variable not found.');
throw new Error('OpenAI API key is required');
}
return {
@@ -117,62 +104,10 @@ export function createContentGeneratorConfig(
} as ContentGeneratorConfig;
}
if (authType === AuthType.USE_ANTHROPIC) {
newContentGeneratorConfig = {
...newContentGeneratorConfig,
apiKey:
newContentGeneratorConfig.apiKey || process.env['ANTHROPIC_API_KEY'],
baseUrl:
newContentGeneratorConfig.baseUrl || process.env['ANTHROPIC_BASE_URL'],
model: newContentGeneratorConfig.model || process.env['ANTHROPIC_MODEL'],
};
if (!newContentGeneratorConfig.apiKey) {
throw new Error('ANTHROPIC_API_KEY environment variable not found.');
}
if (!newContentGeneratorConfig.baseUrl) {
throw new Error('ANTHROPIC_BASE_URL environment variable not found.');
}
if (!newContentGeneratorConfig.model) {
throw new Error('ANTHROPIC_MODEL environment variable not found.');
}
}
if (authType === AuthType.USE_GEMINI) {
newContentGeneratorConfig = {
...newContentGeneratorConfig,
apiKey: newContentGeneratorConfig.apiKey || process.env['GEMINI_API_KEY'],
model: newContentGeneratorConfig.model || process.env['GEMINI_MODEL'],
};
if (!newContentGeneratorConfig.apiKey) {
throw new Error('GEMINI_API_KEY environment variable not found.');
}
if (!newContentGeneratorConfig.model) {
throw new Error('GEMINI_MODEL environment variable not found.');
}
}
if (authType === AuthType.USE_VERTEX_AI) {
newContentGeneratorConfig = {
...newContentGeneratorConfig,
apiKey: newContentGeneratorConfig.apiKey || process.env['GOOGLE_API_KEY'],
model: newContentGeneratorConfig.model || process.env['GOOGLE_MODEL'],
};
if (!newContentGeneratorConfig.apiKey) {
throw new Error('GOOGLE_API_KEY environment variable not found.');
}
if (!newContentGeneratorConfig.model) {
throw new Error('GOOGLE_MODEL environment variable not found.');
}
}
return newContentGeneratorConfig as ContentGeneratorConfig;
return {
...newContentGeneratorConfig,
model: newContentGeneratorConfig?.model || DEFAULT_QWEN_MODEL,
} as ContentGeneratorConfig;
}
export async function createContentGenerator(
@@ -180,9 +115,19 @@ export async function createContentGenerator(
gcConfig: Config,
isInitialAuth?: boolean,
): Promise<ContentGenerator> {
if (
config.authType === AuthType.USE_GEMINI ||
config.authType === AuthType.USE_VERTEX_AI
) {
const { createGeminiContentGenerator } = await import(
'./geminiContentGenerator/index.js'
);
return createGeminiContentGenerator(config, gcConfig);
}
if (config.authType === AuthType.USE_OPENAI) {
if (!config.apiKey) {
throw new Error('OPENAI_API_KEY environment variable not found.');
throw new Error('OpenAI API key is required');
}
// Import OpenAIContentGenerator dynamically to avoid circular dependencies
@@ -191,8 +136,7 @@ export async function createContentGenerator(
);
// Always use OpenAIContentGenerator, logging is controlled by enableOpenAILogging flag
const generator = createOpenAIContentGenerator(config, gcConfig);
return new LoggingContentGenerator(generator, gcConfig);
return createOpenAIContentGenerator(config, gcConfig);
}
if (config.authType === AuthType.QWEN_OAUTH) {
@@ -213,8 +157,7 @@ export async function createContentGenerator(
);
// Create the content generator with dynamic token management
const generator = new QwenContentGenerator(qwenClient, config, gcConfig);
return new LoggingContentGenerator(generator, gcConfig);
return new QwenContentGenerator(qwenClient, config, gcConfig);
} catch (error) {
throw new Error(
`${error instanceof Error ? error.message : String(error)}`,
@@ -222,30 +165,6 @@ export async function createContentGenerator(
}
}
if (config.authType === AuthType.USE_ANTHROPIC) {
if (!config.apiKey) {
throw new Error('ANTHROPIC_API_KEY environment variable not found.');
}
const { createAnthropicContentGenerator } = await import(
'./anthropicContentGenerator/index.js'
);
const generator = createAnthropicContentGenerator(config, gcConfig);
return new LoggingContentGenerator(generator, gcConfig);
}
if (
config.authType === AuthType.USE_GEMINI ||
config.authType === AuthType.USE_VERTEX_AI
) {
const { createGeminiContentGenerator } = await import(
'./geminiContentGenerator/index.js'
);
const generator = createGeminiContentGenerator(config, gcConfig);
return new LoggingContentGenerator(generator, gcConfig);
}
throw new Error(
`Error creating contentGenerator: Unsupported authType: ${config.authType}`,
);

View File

@@ -720,6 +720,66 @@ describe('GeminiChat', () => {
);
});
it('should handle summarized thinking by conditionally including thoughts in history', async () => {
// Case 1: useSummarizedThinking is true -> thoughts NOT in history
vi.mocked(mockContentGenerator.useSummarizedThinking).mockReturnValue(
true,
);
const stream1 = (async function* () {
yield {
candidates: [
{
content: {
role: 'model',
parts: [{ thought: true, text: 'T1' }, { text: 'A1' }],
},
finishReason: 'STOP',
},
],
} as unknown as GenerateContentResponse;
})();
vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue(
stream1,
);
const res1 = await chat.sendMessageStream('m1', { message: 'h1' }, 'p1');
for await (const _ of res1);
const history1 = chat.getHistory();
expect(history1[1].parts).toEqual([{ text: 'A1' }]);
// Case 2: useSummarizedThinking is false -> thoughts ARE in history
chat.clearHistory();
vi.mocked(mockContentGenerator.useSummarizedThinking).mockReturnValue(
false,
);
const stream2 = (async function* () {
yield {
candidates: [
{
content: {
role: 'model',
parts: [{ thought: true, text: 'T2' }, { text: 'A2' }],
},
finishReason: 'STOP',
},
],
} as unknown as GenerateContentResponse;
})();
vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue(
stream2,
);
const res2 = await chat.sendMessageStream('m1', { message: 'h1' }, 'p2');
for await (const _ of res2);
const history2 = chat.getHistory();
expect(history2[1].parts).toEqual([
{ text: 'T2', thought: true },
{ text: 'A2' },
]);
});
it('should keep parts with thoughtSignature when consolidating history', async () => {
const stream = (async function* () {
yield {

View File

@@ -559,25 +559,14 @@ export class GeminiChat {
yield chunk; // Yield every chunk to the UI immediately.
}
let thoughtContentPart: Part | undefined;
const thoughtText = allModelParts
.filter((part) => part.thought)
.map((part) => part.text)
.join('')
.trim();
if (thoughtText !== '') {
thoughtContentPart = {
text: thoughtText,
thought: true,
};
const thoughtSignature = allModelParts.filter(
(part) => part.thoughtSignature && part.thought,
)?.[0]?.thoughtSignature;
if (thoughtContentPart && thoughtSignature) {
thoughtContentPart.thoughtSignature = thoughtSignature;
}
let thoughtText = '';
// Only include thoughts if not using summarized thinking.
if (!this.config.getContentGenerator().useSummarizedThinking()) {
thoughtText = allModelParts
.filter((part) => part.thought)
.map((part) => part.text)
.join('')
.trim();
}
const contentParts = allModelParts.filter((part) => !part.thought);
@@ -603,11 +592,11 @@ export class GeminiChat {
.trim();
// Record assistant turn with raw Content and metadata
if (thoughtContentPart || contentText || hasToolCall || usageMetadata) {
if (thoughtText || contentText || hasToolCall || usageMetadata) {
this.chatRecordingService?.recordAssistantTurn({
model,
message: [
...(thoughtContentPart ? [thoughtContentPart] : []),
...(thoughtText ? [{ text: thoughtText, thought: true }] : []),
...(contentText ? [{ text: contentText }] : []),
...(hasToolCall
? contentParts
@@ -643,7 +632,7 @@ export class GeminiChat {
this.history.push({
role: 'model',
parts: [
...(thoughtContentPart ? [thoughtContentPart] : []),
...(thoughtText ? [{ text: thoughtText, thought: true }] : []),
...consolidatedHistoryParts,
],
});

View File

@@ -39,7 +39,7 @@ export class GeminiContentGenerator implements ContentGenerator {
this.contentGeneratorConfig = contentGeneratorConfig;
}
private buildGenerateContentConfig(
private buildSamplingParameters(
request: GenerateContentParameters,
): GenerateContentConfig {
const configSamplingParams = this.contentGeneratorConfig?.samplingParams;
@@ -84,7 +84,17 @@ export class GeminiContentGenerator implements ContentGenerator {
'frequencyPenalty',
),
thinkingConfig: getParameterValue(
this.buildThinkingConfig(),
this.contentGeneratorConfig?.reasoning
? {
includeThoughts: true,
thinkingLevel: (this.contentGeneratorConfig.reasoning.effort ===
'low'
? 'LOW'
: this.contentGeneratorConfig.reasoning.effort === 'high'
? 'HIGH'
: 'THINKING_LEVEL_UNSPECIFIED') as ThinkingLevel,
}
: undefined,
'thinkingConfig',
{
includeThoughts: true,
@@ -94,40 +104,13 @@ export class GeminiContentGenerator implements ContentGenerator {
};
}
private buildThinkingConfig():
| { includeThoughts: boolean; thinkingLevel?: ThinkingLevel }
| undefined {
const reasoning = this.contentGeneratorConfig?.reasoning;
if (reasoning === false) {
return { includeThoughts: false };
}
if (reasoning) {
const thinkingLevel = (
reasoning.effort === 'low'
? 'LOW'
: reasoning.effort === 'high'
? 'HIGH'
: 'THINKING_LEVEL_UNSPECIFIED'
) as ThinkingLevel;
return {
includeThoughts: true,
thinkingLevel,
};
}
return undefined;
}
async generateContent(
request: GenerateContentParameters,
_userPromptId: string,
): Promise<GenerateContentResponse> {
const finalRequest = {
...request,
config: this.buildGenerateContentConfig(request),
config: this.buildSamplingParameters(request),
};
return this.googleGenAI.models.generateContent(finalRequest);
}
@@ -138,7 +121,7 @@ export class GeminiContentGenerator implements ContentGenerator {
): Promise<AsyncGenerator<GenerateContentResponse>> {
const finalRequest = {
...request,
config: this.buildGenerateContentConfig(request),
config: this.buildSamplingParameters(request),
};
return this.googleGenAI.models.generateContentStream(finalRequest);
}

View File

@@ -7,6 +7,7 @@
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { createGeminiContentGenerator } from './index.js';
import { GeminiContentGenerator } from './geminiContentGenerator.js';
import { LoggingContentGenerator } from './loggingContentGenerator.js';
import type { Config } from '../../config/config.js';
import { AuthType } from '../contentGenerator.js';
@@ -14,6 +15,10 @@ vi.mock('./geminiContentGenerator.js', () => ({
GeminiContentGenerator: vi.fn().mockImplementation(() => ({})),
}));
vi.mock('./loggingContentGenerator.js', () => ({
LoggingContentGenerator: vi.fn().mockImplementation((wrapped) => wrapped),
}));
describe('createGeminiContentGenerator', () => {
let mockConfig: Config;
@@ -26,7 +31,7 @@ describe('createGeminiContentGenerator', () => {
} as unknown as Config;
});
it('should create a GeminiContentGenerator', () => {
it('should create a GeminiContentGenerator wrapped in LoggingContentGenerator', () => {
const config = {
model: 'gemini-1.5-flash',
apiKey: 'test-key',
@@ -36,6 +41,7 @@ describe('createGeminiContentGenerator', () => {
const generator = createGeminiContentGenerator(config, mockConfig);
expect(GeminiContentGenerator).toHaveBeenCalled();
expect(LoggingContentGenerator).toHaveBeenCalled();
expect(generator).toBeDefined();
});
});

View File

@@ -11,8 +11,10 @@ import type {
} from '../contentGenerator.js';
import type { Config } from '../../config/config.js';
import { InstallationManager } from '../../utils/installationManager.js';
import { LoggingContentGenerator } from './loggingContentGenerator.js';
export { GeminiContentGenerator } from './geminiContentGenerator.js';
export { LoggingContentGenerator } from './loggingContentGenerator.js';
/**
* Create a Gemini content generator.
@@ -49,5 +51,5 @@ export function createGeminiContentGenerator(
config,
);
return geminiContentGenerator;
return new LoggingContentGenerator(geminiContentGenerator, gcConfig);
}

View File

@@ -4,22 +4,20 @@
* SPDX-License-Identifier: Apache-2.0
*/
import {
import type {
Content,
CountTokensParameters,
CountTokensResponse,
EmbedContentParameters,
EmbedContentResponse,
GenerateContentParameters,
GenerateContentResponseUsageMetadata,
GenerateContentResponse,
type Content,
type CountTokensParameters,
type CountTokensResponse,
type EmbedContentParameters,
type EmbedContentResponse,
type GenerateContentParameters,
type GenerateContentResponseUsageMetadata,
type ContentListUnion,
type ContentUnion,
type Part,
type PartUnion,
type FinishReason,
ContentListUnion,
ContentUnion,
Part,
PartUnion,
} from '@google/genai';
import type OpenAI from 'openai';
import {
ApiRequestEvent,
ApiResponseEvent,
@@ -33,8 +31,6 @@ import {
} from '../../telemetry/loggers.js';
import type { ContentGenerator } from '../contentGenerator.js';
import { isStructuredError } from '../../utils/quotaErrorDetection.js';
import { OpenAIContentConverter } from '../openaiContentGenerator/converter.js';
import { OpenAILogger } from '../../utils/openaiLogger.js';
interface StructuredError {
status: number;
@@ -44,19 +40,10 @@ interface StructuredError {
* A decorator that wraps a ContentGenerator to add logging to API calls.
*/
export class LoggingContentGenerator implements ContentGenerator {
private openaiLogger?: OpenAILogger;
private schemaCompliance?: 'auto' | 'openapi_30';
constructor(
private readonly wrapped: ContentGenerator,
private readonly config: Config,
) {
const generatorConfig = this.config.getContentGeneratorConfig();
if (generatorConfig?.enableOpenAILogging) {
this.openaiLogger = new OpenAILogger(generatorConfig.openAILoggingDir);
this.schemaCompliance = generatorConfig.schemaCompliance;
}
}
) {}
getWrapped(): ContentGenerator {
return this.wrapped;
@@ -104,31 +91,21 @@ export class LoggingContentGenerator implements ContentGenerator {
prompt_id: string,
): void {
const errorMessage = error instanceof Error ? error.message : String(error);
const errorType =
(error as { type?: string })?.type ||
(error instanceof Error ? error.name : 'unknown');
const errorResponseId =
(error as { requestID?: string; request_id?: string })?.requestID ||
(error as { requestID?: string; request_id?: string })?.request_id ||
responseId;
const errorStatus =
(error as { code?: string | number; status?: number })?.code ??
(error as { status?: number })?.status ??
(isStructuredError(error)
? (error as StructuredError).status
: undefined);
const errorType = error instanceof Error ? error.name : 'unknown';
logApiError(
this.config,
new ApiErrorEvent(
errorResponseId,
responseId,
model,
errorMessage,
durationMs,
prompt_id,
this.config.getContentGeneratorConfig()?.authType,
errorType,
errorStatus,
isStructuredError(error)
? (error as StructuredError).status
: undefined,
),
);
}
@@ -139,7 +116,6 @@ export class LoggingContentGenerator implements ContentGenerator {
): Promise<GenerateContentResponse> {
const startTime = Date.now();
this.logApiRequest(this.toContents(req.contents), req.model, userPromptId);
const openaiRequest = await this.buildOpenAIRequestForLogging(req);
try {
const response = await this.wrapped.generateContent(req, userPromptId);
const durationMs = Date.now() - startTime;
@@ -151,12 +127,10 @@ export class LoggingContentGenerator implements ContentGenerator {
response.usageMetadata,
JSON.stringify(response),
);
await this.logOpenAIInteraction(openaiRequest, response);
return response;
} catch (error) {
const durationMs = Date.now() - startTime;
this._logApiError(undefined, durationMs, error, req.model, userPromptId);
await this.logOpenAIInteraction(openaiRequest, undefined, error);
throw error;
}
}
@@ -167,7 +141,6 @@ export class LoggingContentGenerator implements ContentGenerator {
): Promise<AsyncGenerator<GenerateContentResponse>> {
const startTime = Date.now();
this.logApiRequest(this.toContents(req.contents), req.model, userPromptId);
const openaiRequest = await this.buildOpenAIRequestForLogging(req);
let stream: AsyncGenerator<GenerateContentResponse>;
try {
@@ -175,7 +148,6 @@ export class LoggingContentGenerator implements ContentGenerator {
} catch (error) {
const durationMs = Date.now() - startTime;
this._logApiError(undefined, durationMs, error, req.model, userPromptId);
await this.logOpenAIInteraction(openaiRequest, undefined, error);
throw error;
}
@@ -184,7 +156,6 @@ export class LoggingContentGenerator implements ContentGenerator {
startTime,
userPromptId,
req.model,
openaiRequest,
);
}
@@ -193,7 +164,6 @@ export class LoggingContentGenerator implements ContentGenerator {
startTime: number,
userPromptId: string,
model: string,
openaiRequest?: OpenAI.Chat.ChatCompletionCreateParams,
): AsyncGenerator<GenerateContentResponse> {
const responses: GenerateContentResponse[] = [];
@@ -216,9 +186,6 @@ export class LoggingContentGenerator implements ContentGenerator {
lastUsageMetadata,
JSON.stringify(responses),
);
const consolidatedResponse =
this.consolidateGeminiResponsesForLogging(responses);
await this.logOpenAIInteraction(openaiRequest, consolidatedResponse);
} catch (error) {
const durationMs = Date.now() - startTime;
this._logApiError(
@@ -228,182 +195,10 @@ export class LoggingContentGenerator implements ContentGenerator {
responses[0]?.modelVersion || model,
userPromptId,
);
await this.logOpenAIInteraction(openaiRequest, undefined, error);
throw error;
}
}
private async buildOpenAIRequestForLogging(
request: GenerateContentParameters,
): Promise<OpenAI.Chat.ChatCompletionCreateParams | undefined> {
if (!this.openaiLogger) {
return undefined;
}
const converter = new OpenAIContentConverter(
request.model,
this.schemaCompliance,
);
const messages = converter.convertGeminiRequestToOpenAI(request, {
cleanOrphanToolCalls: false,
});
const openaiRequest: OpenAI.Chat.ChatCompletionCreateParams = {
model: request.model,
messages,
};
if (request.config?.tools) {
openaiRequest.tools = await converter.convertGeminiToolsToOpenAI(
request.config.tools,
);
}
if (request.config?.temperature !== undefined) {
openaiRequest.temperature = request.config.temperature;
}
if (request.config?.topP !== undefined) {
openaiRequest.top_p = request.config.topP;
}
if (request.config?.maxOutputTokens !== undefined) {
openaiRequest.max_tokens = request.config.maxOutputTokens;
}
if (request.config?.presencePenalty !== undefined) {
openaiRequest.presence_penalty = request.config.presencePenalty;
}
if (request.config?.frequencyPenalty !== undefined) {
openaiRequest.frequency_penalty = request.config.frequencyPenalty;
}
return openaiRequest;
}
private async logOpenAIInteraction(
openaiRequest: OpenAI.Chat.ChatCompletionCreateParams | undefined,
response?: GenerateContentResponse,
error?: unknown,
): Promise<void> {
if (!this.openaiLogger || !openaiRequest) {
return;
}
const openaiResponse = response
? this.convertGeminiResponseToOpenAIForLogging(response, openaiRequest)
: undefined;
await this.openaiLogger.logInteraction(
openaiRequest,
openaiResponse,
error instanceof Error
? error
: error
? new Error(String(error))
: undefined,
);
}
private convertGeminiResponseToOpenAIForLogging(
response: GenerateContentResponse,
openaiRequest: OpenAI.Chat.ChatCompletionCreateParams,
): OpenAI.Chat.ChatCompletion {
const converter = new OpenAIContentConverter(
openaiRequest.model,
this.schemaCompliance,
);
return converter.convertGeminiResponseToOpenAI(response);
}
private consolidateGeminiResponsesForLogging(
responses: GenerateContentResponse[],
): GenerateContentResponse | undefined {
if (responses.length === 0) {
return undefined;
}
const consolidated = new GenerateContentResponse();
const combinedParts: Part[] = [];
const functionCallIndex = new Map<string, number>();
let finishReason: FinishReason | undefined;
let usageMetadata: GenerateContentResponseUsageMetadata | undefined;
for (const response of responses) {
if (response.usageMetadata) {
usageMetadata = response.usageMetadata;
}
const candidate = response.candidates?.[0];
if (candidate?.finishReason) {
finishReason = candidate.finishReason;
}
const parts = candidate?.content?.parts ?? [];
for (const part of parts as Part[]) {
if (typeof part === 'string') {
combinedParts.push({ text: part });
continue;
}
if ('text' in part) {
if (part.text) {
combinedParts.push({
text: part.text,
...(part.thought ? { thought: true } : {}),
...(part.thoughtSignature
? { thoughtSignature: part.thoughtSignature }
: {}),
});
}
continue;
}
if ('functionCall' in part && part.functionCall) {
const callKey =
part.functionCall.id || part.functionCall.name || 'tool_call';
const existingIndex = functionCallIndex.get(callKey);
const functionPart = { functionCall: part.functionCall };
if (existingIndex !== undefined) {
combinedParts[existingIndex] = functionPart;
} else {
functionCallIndex.set(callKey, combinedParts.length);
combinedParts.push(functionPart);
}
continue;
}
if ('functionResponse' in part && part.functionResponse) {
combinedParts.push({ functionResponse: part.functionResponse });
continue;
}
combinedParts.push(part);
}
}
const lastResponse = responses[responses.length - 1];
const lastCandidate = lastResponse.candidates?.[0];
consolidated.responseId = lastResponse.responseId;
consolidated.createTime = lastResponse.createTime;
consolidated.modelVersion = lastResponse.modelVersion;
consolidated.promptFeedback = lastResponse.promptFeedback;
consolidated.usageMetadata = usageMetadata;
consolidated.candidates = [
{
content: {
role: lastCandidate?.content?.role || 'model',
parts: combinedParts,
},
...(finishReason ? { finishReason } : {}),
index: 0,
safetyRatings: lastCandidate?.safetyRatings || [],
},
];
return consolidated;
}
async countTokens(req: CountTokensParameters): Promise<CountTokensResponse> {
return this.wrapped.countTokens(req);
}

View File

@@ -1,7 +0,0 @@
/**
* @license
* Copyright 2025 Qwen
* SPDX-License-Identifier: Apache-2.0
*/
export { LoggingContentGenerator } from './loggingContentGenerator.js';

View File

@@ -1,371 +0,0 @@
/**
* @license
* Copyright 2025 Qwen
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import type {
GenerateContentParameters,
GenerateContentResponseUsageMetadata,
} from '@google/genai';
import { GenerateContentResponse } from '@google/genai';
import type { Config } from '../../config/config.js';
import type { ContentGenerator } from '../contentGenerator.js';
import { LoggingContentGenerator } from './index.js';
import { OpenAIContentConverter } from '../openaiContentGenerator/converter.js';
import {
logApiRequest,
logApiResponse,
logApiError,
} from '../../telemetry/loggers.js';
import { OpenAILogger } from '../../utils/openaiLogger.js';
import type OpenAI from 'openai';
vi.mock('../../telemetry/loggers.js', () => ({
logApiRequest: vi.fn(),
logApiResponse: vi.fn(),
logApiError: vi.fn(),
}));
vi.mock('../../utils/openaiLogger.js', () => ({
OpenAILogger: vi.fn().mockImplementation(() => ({
logInteraction: vi.fn().mockResolvedValue(undefined),
})),
}));
const convertGeminiRequestToOpenAISpy = vi
.spyOn(OpenAIContentConverter.prototype, 'convertGeminiRequestToOpenAI')
.mockReturnValue([{ role: 'user', content: 'converted' }]);
const convertGeminiToolsToOpenAISpy = vi
.spyOn(OpenAIContentConverter.prototype, 'convertGeminiToolsToOpenAI')
.mockResolvedValue([{ type: 'function', function: { name: 'tool' } }]);
const convertGeminiResponseToOpenAISpy = vi
.spyOn(OpenAIContentConverter.prototype, 'convertGeminiResponseToOpenAI')
.mockReturnValue({
id: 'openai-response',
object: 'chat.completion',
created: 123456789,
model: 'test-model',
choices: [],
} as OpenAI.Chat.ChatCompletion);
const createConfig = (overrides: Record<string, unknown> = {}): Config =>
({
getContentGeneratorConfig: () => ({
authType: 'openai',
enableOpenAILogging: false,
...overrides,
}),
}) as Config;
const createWrappedGenerator = (
generateContent: ContentGenerator['generateContent'],
generateContentStream: ContentGenerator['generateContentStream'],
): ContentGenerator =>
({
generateContent,
generateContentStream,
countTokens: vi.fn(),
embedContent: vi.fn(),
useSummarizedThinking: vi.fn().mockReturnValue(false),
}) as ContentGenerator;
const createResponse = (
responseId: string,
modelVersion: string,
parts: Array<Record<string, unknown>>,
usageMetadata?: GenerateContentResponseUsageMetadata,
finishReason?: string,
): GenerateContentResponse => {
const response = new GenerateContentResponse();
response.responseId = responseId;
response.modelVersion = modelVersion;
response.usageMetadata = usageMetadata;
response.candidates = [
{
content: {
role: 'model',
parts: parts as never[],
},
finishReason: finishReason as never,
index: 0,
safetyRatings: [],
},
];
return response;
};
describe('LoggingContentGenerator', () => {
beforeEach(() => {
vi.clearAllMocks();
});
afterEach(() => {
convertGeminiRequestToOpenAISpy.mockClear();
convertGeminiToolsToOpenAISpy.mockClear();
convertGeminiResponseToOpenAISpy.mockClear();
});
it('logs request/response, normalizes thought parts, and logs OpenAI interaction', async () => {
const wrapped = createWrappedGenerator(
vi.fn().mockResolvedValue(
createResponse(
'resp-1',
'model-v2',
[{ text: 'ok' }],
{
promptTokenCount: 3,
candidatesTokenCount: 5,
totalTokenCount: 8,
},
'STOP',
),
),
vi.fn(),
);
const generator = new LoggingContentGenerator(
wrapped,
createConfig({
enableOpenAILogging: true,
openAILoggingDir: 'logs',
schemaCompliance: 'openapi_30',
}),
);
const request = {
model: 'test-model',
contents: [
{
role: 'user',
parts: [
{ text: 'Hello', thought: 'internal' },
{
functionCall: { id: 'call-1', name: 'tool', args: '{}' },
thought: 'strip-me',
},
null,
],
},
],
config: {
temperature: 0.3,
topP: 0.9,
maxOutputTokens: 256,
presencePenalty: 0.2,
frequencyPenalty: 0.1,
tools: [
{
functionDeclarations: [
{ name: 'tool', description: 'desc', parameters: {} },
],
},
],
},
} as unknown as GenerateContentParameters;
const response = await generator.generateContent(request, 'prompt-1');
expect(response.responseId).toBe('resp-1');
expect(logApiRequest).toHaveBeenCalledTimes(1);
const [, requestEvent] = vi.mocked(logApiRequest).mock.calls[0];
const loggedContents = JSON.parse(requestEvent.request_text || '[]');
expect(loggedContents[0].parts[0]).toEqual({
text: 'Hello\n[Thought: internal]',
});
expect(loggedContents[0].parts[1]).toEqual({
functionCall: { id: 'call-1', name: 'tool', args: '{}' },
});
expect(logApiResponse).toHaveBeenCalledTimes(1);
const [, responseEvent] = vi.mocked(logApiResponse).mock.calls[0];
expect(responseEvent.response_id).toBe('resp-1');
expect(responseEvent.model).toBe('model-v2');
expect(responseEvent.prompt_id).toBe('prompt-1');
expect(responseEvent.input_token_count).toBe(3);
expect(convertGeminiRequestToOpenAISpy).toHaveBeenCalledTimes(1);
expect(convertGeminiToolsToOpenAISpy).toHaveBeenCalledTimes(1);
expect(convertGeminiResponseToOpenAISpy).toHaveBeenCalledTimes(1);
const openaiLoggerInstance = vi.mocked(OpenAILogger).mock.results[0]
?.value as { logInteraction: ReturnType<typeof vi.fn> };
expect(openaiLoggerInstance.logInteraction).toHaveBeenCalledTimes(1);
const [openaiRequest, openaiResponse, openaiError] =
openaiLoggerInstance.logInteraction.mock.calls[0];
expect(openaiRequest).toEqual(
expect.objectContaining({
model: 'test-model',
messages: [{ role: 'user', content: 'converted' }],
tools: [{ type: 'function', function: { name: 'tool' } }],
temperature: 0.3,
top_p: 0.9,
max_tokens: 256,
presence_penalty: 0.2,
frequency_penalty: 0.1,
}),
);
expect(openaiResponse).toEqual({
id: 'openai-response',
object: 'chat.completion',
created: 123456789,
model: 'test-model',
choices: [],
});
expect(openaiError).toBeUndefined();
});
it('logs errors with status code and request id, then rethrows', async () => {
const error = Object.assign(new Error('boom'), {
code: 429,
request_id: 'req-99',
type: 'rate_limit',
});
const wrapped = createWrappedGenerator(
vi.fn().mockRejectedValue(error),
vi.fn(),
);
const generator = new LoggingContentGenerator(
wrapped,
createConfig({ enableOpenAILogging: true }),
);
const request = {
model: 'test-model',
contents: 'Hello',
} as unknown as GenerateContentParameters;
await expect(
generator.generateContent(request, 'prompt-2'),
).rejects.toThrow('boom');
expect(logApiError).toHaveBeenCalledTimes(1);
const [, errorEvent] = vi.mocked(logApiError).mock.calls[0];
expect(errorEvent.response_id).toBe('req-99');
expect(errorEvent.status_code).toBe(429);
expect(errorEvent.error_type).toBe('rate_limit');
expect(errorEvent.prompt_id).toBe('prompt-2');
const openaiLoggerInstance = vi.mocked(OpenAILogger).mock.results[0]
?.value as { logInteraction: ReturnType<typeof vi.fn> };
const [, , loggedError] = openaiLoggerInstance.logInteraction.mock.calls[0];
expect(loggedError).toBeInstanceOf(Error);
expect((loggedError as Error).message).toBe('boom');
});
it('logs streaming responses and consolidates tool calls', async () => {
const usage1 = {
promptTokenCount: 1,
} as GenerateContentResponseUsageMetadata;
const usage2 = {
promptTokenCount: 2,
candidatesTokenCount: 4,
totalTokenCount: 6,
} as GenerateContentResponseUsageMetadata;
const response1 = createResponse(
'resp-1',
'model-stream',
[
{ text: 'Hello' },
{ functionCall: { id: 'call-1', name: 'tool', args: '{}' } },
],
usage1,
);
const response2 = createResponse(
'resp-2',
'model-stream',
[
{ text: ' world' },
{ functionCall: { id: 'call-1', name: 'tool', args: '{"x":1}' } },
{ functionResponse: { name: 'tool', response: { output: 'ok' } } },
],
usage2,
'STOP',
);
const wrapped = createWrappedGenerator(
vi.fn(),
vi.fn().mockResolvedValue(
(async function* () {
yield response1;
yield response2;
})(),
),
);
const generator = new LoggingContentGenerator(
wrapped,
createConfig({ enableOpenAILogging: true }),
);
const request = {
model: 'test-model',
contents: 'Hello',
} as unknown as GenerateContentParameters;
const stream = await generator.generateContentStream(request, 'prompt-3');
const seen: GenerateContentResponse[] = [];
for await (const item of stream) {
seen.push(item);
}
expect(seen).toHaveLength(2);
expect(logApiResponse).toHaveBeenCalledTimes(1);
const [, responseEvent] = vi.mocked(logApiResponse).mock.calls[0];
expect(responseEvent.response_id).toBe('resp-1');
expect(responseEvent.input_token_count).toBe(2);
expect(convertGeminiResponseToOpenAISpy).toHaveBeenCalledTimes(1);
const [consolidatedResponse] =
convertGeminiResponseToOpenAISpy.mock.calls[0];
const consolidatedParts =
consolidatedResponse.candidates?.[0]?.content?.parts || [];
expect(consolidatedParts).toEqual([
{ text: 'Hello' },
{ functionCall: { id: 'call-1', name: 'tool', args: '{"x":1}' } },
{ text: ' world' },
{ functionResponse: { name: 'tool', response: { output: 'ok' } } },
]);
expect(consolidatedResponse.usageMetadata).toBe(usage2);
expect(consolidatedResponse.responseId).toBe('resp-2');
expect(consolidatedResponse.candidates?.[0]?.finishReason).toBe('STOP');
});
it('logs stream errors and skips response logging', async () => {
const response1 = createResponse('resp-1', 'model-stream', [
{ text: 'partial' },
]);
const streamError = new Error('stream-fail');
const wrapped = createWrappedGenerator(
vi.fn(),
vi.fn().mockResolvedValue(
(async function* () {
yield response1;
throw streamError;
})(),
),
);
const generator = new LoggingContentGenerator(
wrapped,
createConfig({ enableOpenAILogging: true }),
);
const request = {
model: 'test-model',
contents: 'Hello',
} as unknown as GenerateContentParameters;
const stream = await generator.generateContentStream(request, 'prompt-4');
await expect(async () => {
for await (const _item of stream) {
// Consume stream to trigger error.
}
}).rejects.toThrow('stream-fail');
expect(logApiResponse).not.toHaveBeenCalled();
expect(logApiError).toHaveBeenCalledTimes(1);
const openaiLoggerInstance = vi.mocked(OpenAILogger).mock.results[0]
?.value as { logInteraction: ReturnType<typeof vi.fn> };
expect(openaiLoggerInstance.logInteraction).toHaveBeenCalledTimes(1);
});
});

View File

@@ -236,9 +236,8 @@ export class OpenAIContentConverter {
*/
convertGeminiRequestToOpenAI(
request: GenerateContentParameters,
options: { cleanOrphanToolCalls: boolean } = { cleanOrphanToolCalls: true },
): OpenAI.Chat.ChatCompletionMessageParam[] {
let messages: OpenAI.Chat.ChatCompletionMessageParam[] = [];
const messages: OpenAI.Chat.ChatCompletionMessageParam[] = [];
// Handle system instruction from config
this.addSystemInstructionMessage(request, messages);
@@ -247,89 +246,11 @@ export class OpenAIContentConverter {
this.processContents(request.contents, messages);
// Clean up orphaned tool calls and merge consecutive assistant messages
if (options.cleanOrphanToolCalls) {
messages = this.cleanOrphanedToolCalls(messages);
}
messages = this.mergeConsecutiveAssistantMessages(messages);
const cleanedMessages = this.cleanOrphanedToolCalls(messages);
const mergedMessages =
this.mergeConsecutiveAssistantMessages(cleanedMessages);
return messages;
}
/**
* Convert Gemini response to OpenAI completion format (for logging).
*/
convertGeminiResponseToOpenAI(
response: GenerateContentResponse,
): OpenAI.Chat.ChatCompletion {
const candidate = response.candidates?.[0];
const parts = (candidate?.content?.parts || []) as Part[];
const parsedParts = this.parseParts(parts);
const message: ExtendedCompletionMessage = {
role: 'assistant',
content: parsedParts.contentParts.join('') || null,
refusal: null,
};
const reasoningContent = parsedParts.thoughtParts.join('');
if (reasoningContent) {
message.reasoning_content = reasoningContent;
}
if (parsedParts.functionCalls.length > 0) {
message.tool_calls = parsedParts.functionCalls.map((call, index) => ({
id: call.id || `call_${index}`,
type: 'function' as const,
function: {
name: call.name || '',
arguments: JSON.stringify(call.args || {}),
},
}));
}
const finishReason = this.mapGeminiFinishReasonToOpenAI(
candidate?.finishReason,
);
const usageMetadata = response.usageMetadata;
const usage: OpenAI.CompletionUsage = {
prompt_tokens: usageMetadata?.promptTokenCount || 0,
completion_tokens: usageMetadata?.candidatesTokenCount || 0,
total_tokens: usageMetadata?.totalTokenCount || 0,
};
if (usageMetadata?.cachedContentTokenCount !== undefined) {
(
usage as OpenAI.CompletionUsage & {
prompt_tokens_details?: { cached_tokens?: number };
}
).prompt_tokens_details = {
cached_tokens: usageMetadata.cachedContentTokenCount,
};
}
const createdMs = response.createTime
? Number(response.createTime)
: Date.now();
const createdSeconds = Number.isFinite(createdMs)
? Math.floor(createdMs / 1000)
: Math.floor(Date.now() / 1000);
return {
id: response.responseId || `gemini-${Date.now()}`,
object: 'chat.completion',
created: createdSeconds,
model: response.modelVersion || this.model,
choices: [
{
index: 0,
message,
finish_reason: finishReason,
logprobs: null,
},
],
usage,
};
return mergedMessages;
}
/**
@@ -915,6 +836,84 @@ export class OpenAIContentConverter {
return response;
}
/**
* Convert Gemini response format to OpenAI chat completion format for logging
*/
convertGeminiResponseToOpenAI(
response: GenerateContentResponse,
): OpenAI.Chat.ChatCompletion {
const candidate = response.candidates?.[0];
const content = candidate?.content;
let messageContent: string | null = null;
const toolCalls: OpenAI.Chat.ChatCompletionMessageToolCall[] = [];
if (content?.parts) {
const textParts: string[] = [];
for (const part of content.parts) {
if ('text' in part && part.text) {
textParts.push(part.text);
} else if ('functionCall' in part && part.functionCall) {
toolCalls.push({
id: part.functionCall.id || `call_${toolCalls.length}`,
type: 'function' as const,
function: {
name: part.functionCall.name || '',
arguments: JSON.stringify(part.functionCall.args || {}),
},
});
}
}
messageContent = textParts.join('').trimEnd();
}
const choice: OpenAI.Chat.ChatCompletion.Choice = {
index: 0,
message: {
role: 'assistant',
content: messageContent,
refusal: null,
},
finish_reason: this.mapGeminiFinishReasonToOpenAI(
candidate?.finishReason,
) as OpenAI.Chat.ChatCompletion.Choice['finish_reason'],
logprobs: null,
};
if (toolCalls.length > 0) {
choice.message.tool_calls = toolCalls;
}
const openaiResponse: OpenAI.Chat.ChatCompletion = {
id: response.responseId || `chatcmpl-${Date.now()}`,
object: 'chat.completion',
created: response.createTime
? Number(response.createTime)
: Math.floor(Date.now() / 1000),
model: this.model,
choices: [choice],
};
// Add usage metadata if available
if (response.usageMetadata) {
openaiResponse.usage = {
prompt_tokens: response.usageMetadata.promptTokenCount || 0,
completion_tokens: response.usageMetadata.candidatesTokenCount || 0,
total_tokens: response.usageMetadata.totalTokenCount || 0,
};
if (response.usageMetadata.cachedContentTokenCount) {
openaiResponse.usage.prompt_tokens_details = {
cached_tokens: response.usageMetadata.cachedContentTokenCount,
};
}
}
return openaiResponse;
}
/**
* Map OpenAI finish reasons to Gemini finish reasons
*/
@@ -932,24 +931,29 @@ export class OpenAIContentConverter {
return mapping[openaiReason] || FinishReason.FINISH_REASON_UNSPECIFIED;
}
private mapGeminiFinishReasonToOpenAI(
geminiReason?: FinishReason,
): 'stop' | 'length' | 'tool_calls' | 'content_filter' | 'function_call' {
if (!geminiReason) {
return 'stop';
}
/**
* Map Gemini finish reasons to OpenAI finish reasons
*/
private mapGeminiFinishReasonToOpenAI(geminiReason?: unknown): string {
if (!geminiReason) return 'stop';
switch (geminiReason) {
case FinishReason.STOP:
case 'STOP':
case 1: // FinishReason.STOP
return 'stop';
case FinishReason.MAX_TOKENS:
case 'MAX_TOKENS':
case 2: // FinishReason.MAX_TOKENS
return 'length';
case FinishReason.SAFETY:
case 'SAFETY':
case 3: // FinishReason.SAFETY
return 'content_filter';
case 'RECITATION':
case 4: // FinishReason.RECITATION
return 'content_filter';
case 'OTHER':
case 5: // FinishReason.OTHER
return 'stop';
default:
if (geminiReason === ('RECITATION' as FinishReason)) {
return 'content_filter';
}
return 'stop';
}
}

View File

@@ -7,7 +7,7 @@
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import type { GenerateContentParameters } from '@google/genai';
import { EnhancedErrorHandler } from './errorHandler.js';
import type { RequestContext } from './errorHandler.js';
import type { RequestContext } from './telemetryService.js';
describe('EnhancedErrorHandler', () => {
let errorHandler: EnhancedErrorHandler;

View File

@@ -5,15 +5,7 @@
*/
import type { GenerateContentParameters } from '@google/genai';
export interface RequestContext {
userPromptId: string;
model: string;
authType: string;
startTime: number;
duration: number;
isStreaming: boolean;
}
import type { RequestContext } from './telemetryService.js';
export interface ErrorHandler {
handle(

View File

@@ -91,4 +91,11 @@ export function determineProvider(
return new DefaultOpenAICompatibleProvider(contentGeneratorConfig, cliConfig);
}
// Services
export {
type TelemetryService,
type RequestContext,
DefaultTelemetryService,
} from './telemetryService.js';
export { type ErrorHandler, EnhancedErrorHandler } from './errorHandler.js';

View File

@@ -11,6 +11,7 @@ import type {
} from '@google/genai';
import type { PipelineConfig } from './pipeline.js';
import { ContentGenerationPipeline } from './pipeline.js';
import { DefaultTelemetryService } from './telemetryService.js';
import { EnhancedErrorHandler } from './errorHandler.js';
import { getDefaultTokenizer } from '../../utils/request-tokenizer/index.js';
import type { ContentGeneratorConfig } from '../contentGenerator.js';
@@ -28,6 +29,11 @@ export class OpenAIContentGenerator implements ContentGenerator {
cliConfig,
provider,
contentGeneratorConfig,
telemetryService: new DefaultTelemetryService(
cliConfig,
contentGeneratorConfig.enableOpenAILogging,
contentGeneratorConfig.openAILoggingDir,
),
errorHandler: new EnhancedErrorHandler(
(error: unknown, request: GenerateContentParameters) =>
this.shouldSuppressErrorLogging(error, request),

View File

@@ -15,6 +15,7 @@ import { OpenAIContentConverter } from './converter.js';
import type { Config } from '../../config/config.js';
import type { ContentGeneratorConfig, AuthType } from '../contentGenerator.js';
import type { OpenAICompatibleProvider } from './provider/index.js';
import type { TelemetryService } from './telemetryService.js';
import type { ErrorHandler } from './errorHandler.js';
// Mock dependencies
@@ -27,6 +28,7 @@ describe('ContentGenerationPipeline', () => {
let mockProvider: OpenAICompatibleProvider;
let mockClient: OpenAI;
let mockConverter: OpenAIContentConverter;
let mockTelemetryService: TelemetryService;
let mockErrorHandler: ErrorHandler;
let mockContentGeneratorConfig: ContentGeneratorConfig;
let mockCliConfig: Config;
@@ -61,6 +63,13 @@ describe('ContentGenerationPipeline', () => {
getDefaultGenerationConfig: vi.fn().mockReturnValue({}),
};
// Mock telemetry service
mockTelemetryService = {
logSuccess: vi.fn().mockResolvedValue(undefined),
logError: vi.fn().mockResolvedValue(undefined),
logStreamingSuccess: vi.fn().mockResolvedValue(undefined),
};
// Mock error handler
mockErrorHandler = {
handle: vi.fn().mockImplementation((error: unknown) => {
@@ -90,6 +99,7 @@ describe('ContentGenerationPipeline', () => {
cliConfig: mockCliConfig,
provider: mockProvider,
contentGeneratorConfig: mockContentGeneratorConfig,
telemetryService: mockTelemetryService,
errorHandler: mockErrorHandler,
};
@@ -162,6 +172,17 @@ describe('ContentGenerationPipeline', () => {
expect(mockConverter.convertOpenAIResponseToGemini).toHaveBeenCalledWith(
mockOpenAIResponse,
);
expect(mockTelemetryService.logSuccess).toHaveBeenCalledWith(
expect.objectContaining({
userPromptId,
model: 'test-model',
authType: 'openai',
isStreaming: false,
}),
mockGeminiResponse,
expect.any(Object),
mockOpenAIResponse,
);
});
it('should handle tools in request', async () => {
@@ -247,6 +268,16 @@ describe('ContentGenerationPipeline', () => {
'API Error',
);
expect(mockTelemetryService.logError).toHaveBeenCalledWith(
expect.objectContaining({
userPromptId,
model: 'test-model',
authType: 'openai',
isStreaming: false,
}),
testError,
expect.any(Object),
);
expect(mockErrorHandler.handle).toHaveBeenCalledWith(
testError,
expect.any(Object),
@@ -345,6 +376,17 @@ describe('ContentGenerationPipeline', () => {
signal: undefined,
}),
);
expect(mockTelemetryService.logStreamingSuccess).toHaveBeenCalledWith(
expect.objectContaining({
userPromptId,
model: 'test-model',
authType: 'openai',
isStreaming: true,
}),
[mockGeminiResponse1, mockGeminiResponse2],
expect.any(Object),
[mockChunk1, mockChunk2],
);
});
it('should filter empty responses', async () => {
@@ -448,6 +490,16 @@ describe('ContentGenerationPipeline', () => {
expect(results).toHaveLength(0); // No results due to error
expect(mockConverter.resetStreamingToolCalls).toHaveBeenCalledTimes(2); // Once at start, once on error
expect(mockTelemetryService.logError).toHaveBeenCalledWith(
expect.objectContaining({
userPromptId,
model: 'test-model',
authType: 'openai',
isStreaming: true,
}),
testError,
expect.any(Object),
);
expect(mockErrorHandler.handle).toHaveBeenCalledWith(
testError,
expect.any(Object),
@@ -598,6 +650,18 @@ describe('ContentGenerationPipeline', () => {
candidatesTokenCount: 20,
totalTokenCount: 30,
});
expect(mockTelemetryService.logStreamingSuccess).toHaveBeenCalledWith(
expect.objectContaining({
userPromptId,
model: 'test-model',
authType: 'openai',
isStreaming: true,
}),
results,
expect.any(Object),
[mockChunk1, mockChunk2, mockChunk3],
);
});
it('should handle ideal case where last chunk has both finishReason and usageMetadata', async () => {
@@ -789,6 +853,18 @@ describe('ContentGenerationPipeline', () => {
candidatesTokenCount: 20,
totalTokenCount: 30,
});
expect(mockTelemetryService.logStreamingSuccess).toHaveBeenCalledWith(
expect.objectContaining({
userPromptId,
model: 'test-model',
authType: 'openai',
isStreaming: true,
}),
results,
expect.any(Object),
[mockChunk1, mockChunk2, mockChunk3],
);
});
it('should handle providers that send finishReason and valid usage in same chunk', async () => {
@@ -1042,6 +1118,19 @@ describe('ContentGenerationPipeline', () => {
await pipeline.execute(request, userPromptId);
// Assert
expect(mockTelemetryService.logSuccess).toHaveBeenCalledWith(
expect.objectContaining({
userPromptId,
model: 'test-model',
authType: 'openai',
isStreaming: false,
startTime: expect.any(Number),
duration: expect.any(Number),
}),
expect.any(Object),
expect.any(Object),
expect.any(Object),
);
});
it('should create context with correct properties for streaming request', async () => {
@@ -1084,6 +1173,19 @@ describe('ContentGenerationPipeline', () => {
}
// Assert
expect(mockTelemetryService.logStreamingSuccess).toHaveBeenCalledWith(
expect.objectContaining({
userPromptId,
model: 'test-model',
authType: 'openai',
isStreaming: true,
startTime: expect.any(Number),
duration: expect.any(Number),
}),
expect.any(Array),
expect.any(Object),
expect.any(Array),
);
});
it('should collect all OpenAI chunks for logging even when Gemini responses are filtered', async () => {
@@ -1227,6 +1329,22 @@ describe('ContentGenerationPipeline', () => {
// Should only yield the final response (empty ones are filtered)
expect(responses).toHaveLength(1);
expect(responses[0]).toBe(finalGeminiResponse);
// Verify telemetry was called with ALL OpenAI chunks, including the filtered ones
expect(mockTelemetryService.logStreamingSuccess).toHaveBeenCalledWith(
expect.objectContaining({
model: 'test-model',
duration: expect.any(Number),
userPromptId: 'test-prompt-id',
authType: 'openai',
}),
[finalGeminiResponse], // Only the non-empty Gemini response
expect.objectContaining({
model: 'test-model',
messages: [{ role: 'user', content: 'test' }],
}),
[partialToolCallChunk1, partialToolCallChunk2, finishChunk], // ALL OpenAI chunks
);
});
});
});

View File

@@ -13,12 +13,14 @@ import type { Config } from '../../config/config.js';
import type { ContentGeneratorConfig } from '../contentGenerator.js';
import type { OpenAICompatibleProvider } from './provider/index.js';
import { OpenAIContentConverter } from './converter.js';
import type { ErrorHandler, RequestContext } from './errorHandler.js';
import type { TelemetryService, RequestContext } from './telemetryService.js';
import type { ErrorHandler } from './errorHandler.js';
export interface PipelineConfig {
cliConfig: Config;
provider: OpenAICompatibleProvider;
contentGeneratorConfig: ContentGeneratorConfig;
telemetryService: TelemetryService;
errorHandler: ErrorHandler;
}
@@ -44,7 +46,7 @@ export class ContentGenerationPipeline {
request,
userPromptId,
false,
async (openaiRequest) => {
async (openaiRequest, context) => {
const openaiResponse = (await this.client.chat.completions.create(
openaiRequest,
{
@@ -55,6 +57,14 @@ export class ContentGenerationPipeline {
const geminiResponse =
this.converter.convertOpenAIResponseToGemini(openaiResponse);
// Log success
await this.config.telemetryService.logSuccess(
context,
geminiResponse,
openaiRequest,
openaiResponse,
);
return geminiResponse;
},
);
@@ -78,7 +88,12 @@ export class ContentGenerationPipeline {
)) as AsyncIterable<OpenAI.Chat.ChatCompletionChunk>;
// Stage 2: Process stream with conversion and logging
return this.processStreamWithLogging(stream, context, request);
return this.processStreamWithLogging(
stream,
context,
openaiRequest,
request,
);
},
);
}
@@ -95,9 +110,11 @@ export class ContentGenerationPipeline {
private async *processStreamWithLogging(
stream: AsyncIterable<OpenAI.Chat.ChatCompletionChunk>,
context: RequestContext,
openaiRequest: OpenAI.Chat.ChatCompletionCreateParams,
request: GenerateContentParameters,
): AsyncGenerator<GenerateContentResponse> {
const collectedGeminiResponses: GenerateContentResponse[] = [];
const collectedOpenAIChunks: OpenAI.Chat.ChatCompletionChunk[] = [];
// Reset streaming tool calls to prevent data pollution from previous streams
this.converter.resetStreamingToolCalls();
@@ -108,6 +125,9 @@ export class ContentGenerationPipeline {
try {
// Stage 2a: Convert and yield each chunk while preserving original
for await (const chunk of stream) {
// Always collect OpenAI chunks for logging, regardless of Gemini conversion result
collectedOpenAIChunks.push(chunk);
const response = this.converter.convertOpenAIChunkToGemini(chunk);
// Stage 2b: Filter empty responses to avoid downstream issues
@@ -144,8 +164,15 @@ export class ContentGenerationPipeline {
yield pendingFinishResponse;
}
// Stage 2e: Stream completed successfully
// Stage 2e: Stream completed successfully - perform logging with original OpenAI chunks
context.duration = Date.now() - context.startTime;
await this.config.telemetryService.logStreamingSuccess(
context,
collectedGeminiResponses,
openaiRequest,
collectedOpenAIChunks,
);
} catch (error) {
// Clear streaming tool calls on error to prevent data pollution
this.converter.resetStreamingToolCalls();
@@ -231,7 +258,7 @@ export class ContentGenerationPipeline {
const baseRequest: OpenAI.Chat.ChatCompletionCreateParams = {
model: this.contentGeneratorConfig.model,
messages,
...this.buildGenerateContentConfig(request),
...this.buildSamplingParameters(request),
};
// Add streaming options if present
@@ -253,7 +280,7 @@ export class ContentGenerationPipeline {
return this.config.provider.buildRequest(baseRequest, userPromptId);
}
private buildGenerateContentConfig(
private buildSamplingParameters(
request: GenerateContentParameters,
): Record<string, unknown> {
const defaultSamplingParams =
@@ -289,7 +316,7 @@ export class ContentGenerationPipeline {
return value !== undefined ? { [key]: value } : {};
};
const params: Record<string, unknown> = {
const params = {
// Parameters with request fallback but no defaults
...addParameterIfDefined('temperature', 'temperature', 'temperature'),
...addParameterIfDefined('top_p', 'top_p', 'topP'),
@@ -310,24 +337,11 @@ export class ContentGenerationPipeline {
'frequency_penalty',
'frequencyPenalty',
),
...this.buildReasoningConfig(),
};
return params;
}
private buildReasoningConfig(): Record<string, unknown> {
const reasoning = this.contentGeneratorConfig.reasoning;
if (reasoning === false) {
return {};
}
return {
reasoning_effort: reasoning?.effort ?? 'medium',
};
}
/**
* Common error handling wrapper for execute methods
*/
@@ -355,7 +369,13 @@ export class ContentGenerationPipeline {
return result;
} catch (error) {
// Use shared error handling logic
return await this.handleError(error, context, request);
return await this.handleError(
error,
context,
request,
userPromptId,
isStreaming,
);
}
}
@@ -367,8 +387,37 @@ export class ContentGenerationPipeline {
error: unknown,
context: RequestContext,
request: GenerateContentParameters,
userPromptId?: string,
isStreaming?: boolean,
): Promise<never> {
context.duration = Date.now() - context.startTime;
// Build request for logging (may fail, but we still want to log the error)
let openaiRequest: OpenAI.Chat.ChatCompletionCreateParams;
try {
if (userPromptId !== undefined && isStreaming !== undefined) {
openaiRequest = await this.buildRequest(
request,
userPromptId,
isStreaming,
);
} else {
// For processStreamWithLogging, we don't have userPromptId/isStreaming,
// so create a minimal request
openaiRequest = {
model: this.contentGeneratorConfig.model,
messages: [],
};
}
} catch (_buildError) {
// If we can't build the request, create a minimal one for logging
openaiRequest = {
model: this.contentGeneratorConfig.model,
messages: [],
};
}
await this.config.telemetryService.logError(context, error, openaiRequest);
this.config.errorHandler.handle(error, context, request);
}

View File

@@ -39,8 +39,7 @@ export class DashScopeOpenAICompatibleProvider
return (
authType === AuthType.QWEN_OAUTH ||
baseUrl === 'https://dashscope.aliyuncs.com/compatible-mode/v1' ||
baseUrl === 'https://dashscope-intl.aliyuncs.com/compatible-mode/v1' ||
!baseUrl
baseUrl === 'https://dashscope-intl.aliyuncs.com/compatible-mode/v1'
);
}
@@ -145,7 +144,9 @@ export class DashScopeOpenAICompatibleProvider
getDefaultGenerationConfig(): GenerateContentConfig {
return {
temperature: 0.3,
temperature: 0.7,
topP: 0.8,
topK: 20,
};
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,275 @@
/**
* @license
* Copyright 2025 Qwen
* SPDX-License-Identifier: Apache-2.0
*/
import type { Config } from '../../config/config.js';
import { logApiError, logApiResponse } from '../../telemetry/loggers.js';
import { ApiErrorEvent, ApiResponseEvent } from '../../telemetry/types.js';
import { OpenAILogger } from '../../utils/openaiLogger.js';
import type { GenerateContentResponse } from '@google/genai';
import type OpenAI from 'openai';
import type { ExtendedCompletionChunkDelta } from './converter.js';
export interface RequestContext {
userPromptId: string;
model: string;
authType: string;
startTime: number;
duration: number;
isStreaming: boolean;
}
export interface TelemetryService {
logSuccess(
context: RequestContext,
response: GenerateContentResponse,
openaiRequest?: OpenAI.Chat.ChatCompletionCreateParams,
openaiResponse?: OpenAI.Chat.ChatCompletion,
): Promise<void>;
logError(
context: RequestContext,
error: unknown,
openaiRequest?: OpenAI.Chat.ChatCompletionCreateParams,
): Promise<void>;
logStreamingSuccess(
context: RequestContext,
responses: GenerateContentResponse[],
openaiRequest?: OpenAI.Chat.ChatCompletionCreateParams,
openaiChunks?: OpenAI.Chat.ChatCompletionChunk[],
): Promise<void>;
}
export class DefaultTelemetryService implements TelemetryService {
private logger: OpenAILogger;
constructor(
private config: Config,
private enableOpenAILogging: boolean = false,
openAILoggingDir?: string,
) {
// Always create a new logger instance to ensure correct working directory
// If no custom directory is provided, undefined will use the default path
this.logger = new OpenAILogger(openAILoggingDir);
}
async logSuccess(
context: RequestContext,
response: GenerateContentResponse,
openaiRequest?: OpenAI.Chat.ChatCompletionCreateParams,
openaiResponse?: OpenAI.Chat.ChatCompletion,
): Promise<void> {
// Log API response event for UI telemetry
const responseEvent = new ApiResponseEvent(
response.responseId || 'unknown',
context.model,
context.duration,
context.userPromptId,
context.authType,
response.usageMetadata,
);
logApiResponse(this.config, responseEvent);
// Log interaction if enabled
if (this.enableOpenAILogging && openaiRequest && openaiResponse) {
await this.logger.logInteraction(openaiRequest, openaiResponse);
}
}
async logError(
context: RequestContext,
error: unknown,
openaiRequest?: OpenAI.Chat.ChatCompletionCreateParams,
): Promise<void> {
const errorMessage = error instanceof Error ? error.message : String(error);
// Log API error event for UI telemetry
const errorEvent = new ApiErrorEvent(
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(error as any)?.requestID || 'unknown',
context.model,
errorMessage,
context.duration,
context.userPromptId,
context.authType,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(error as any)?.type,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(error as any)?.code,
);
logApiError(this.config, errorEvent);
// Log error interaction if enabled
if (this.enableOpenAILogging && openaiRequest) {
await this.logger.logInteraction(
openaiRequest,
undefined,
error as Error,
);
}
}
async logStreamingSuccess(
context: RequestContext,
responses: GenerateContentResponse[],
openaiRequest?: OpenAI.Chat.ChatCompletionCreateParams,
openaiChunks?: OpenAI.Chat.ChatCompletionChunk[],
): Promise<void> {
// Get final usage metadata from the last response that has it
const finalUsageMetadata = responses
.slice()
.reverse()
.find((r) => r.usageMetadata)?.usageMetadata;
// Log API response event for UI telemetry
const responseEvent = new ApiResponseEvent(
responses[responses.length - 1]?.responseId || 'unknown',
context.model,
context.duration,
context.userPromptId,
context.authType,
finalUsageMetadata,
);
logApiResponse(this.config, responseEvent);
// Log interaction if enabled - combine chunks only when needed
if (
this.enableOpenAILogging &&
openaiRequest &&
openaiChunks &&
openaiChunks.length > 0
) {
const combinedResponse = this.combineOpenAIChunksForLogging(openaiChunks);
await this.logger.logInteraction(openaiRequest, combinedResponse);
}
}
/**
* Combine OpenAI chunks for logging purposes
* This method consolidates all OpenAI stream chunks into a single ChatCompletion response
* for telemetry and logging purposes, avoiding unnecessary format conversions
*/
private combineOpenAIChunksForLogging(
chunks: OpenAI.Chat.ChatCompletionChunk[],
): OpenAI.Chat.ChatCompletion {
if (chunks.length === 0) {
throw new Error('No chunks to combine');
}
const firstChunk = chunks[0];
// Combine all content from chunks
let combinedContent = '';
const toolCalls: OpenAI.Chat.ChatCompletionMessageToolCall[] = [];
let finishReason:
| 'stop'
| 'length'
| 'tool_calls'
| 'content_filter'
| 'function_call'
| null = null;
let combinedReasoning = '';
let usage:
| {
prompt_tokens: number;
completion_tokens: number;
total_tokens: number;
}
| undefined;
for (const chunk of chunks) {
const choice = chunk.choices?.[0];
if (choice) {
// Combine reasoning content
const reasoningContent = (choice.delta as ExtendedCompletionChunkDelta)
?.reasoning_content;
if (reasoningContent) {
combinedReasoning += reasoningContent;
}
// Combine text content
if (choice.delta?.content) {
combinedContent += choice.delta.content;
}
// Collect tool calls
if (choice.delta?.tool_calls) {
for (const toolCall of choice.delta.tool_calls) {
if (toolCall.index !== undefined) {
if (!toolCalls[toolCall.index]) {
toolCalls[toolCall.index] = {
id: toolCall.id || '',
type: toolCall.type || 'function',
function: { name: '', arguments: '' },
};
}
if (toolCall.function?.name) {
toolCalls[toolCall.index].function.name +=
toolCall.function.name;
}
if (toolCall.function?.arguments) {
toolCalls[toolCall.index].function.arguments +=
toolCall.function.arguments;
}
}
}
}
// Get finish reason from the last chunk
if (choice.finish_reason) {
finishReason = choice.finish_reason;
}
}
// Get usage from the last chunk that has it
if (chunk.usage) {
usage = chunk.usage;
}
}
// Create the combined ChatCompletion response
const message: OpenAI.Chat.ChatCompletionMessage = {
role: 'assistant',
content: combinedContent || null,
refusal: null,
};
if (combinedReasoning) {
// Attach reasoning content if any thought tokens were streamed
(message as { reasoning_content?: string }).reasoning_content =
combinedReasoning;
}
// Add tool calls if any
if (toolCalls.length > 0) {
message.tool_calls = toolCalls.filter((tc) => tc.id); // Filter out empty tool calls
}
const combinedResponse: OpenAI.Chat.ChatCompletion = {
id: firstChunk.id,
object: 'chat.completion',
created: firstChunk.created,
model: firstChunk.model,
choices: [
{
index: 0,
message,
finish_reason: finishReason || 'stop',
logprobs: null,
},
],
usage: usage || {
prompt_tokens: 0,
completion_tokens: 0,
total_tokens: 0,
},
system_fingerprint: firstChunk.system_fingerprint,
};
return combinedResponse;
}
}

View File

@@ -264,7 +264,7 @@ describe('loggers', () => {
'event.timestamp': '2025-01-01T00:00:00.000Z',
prompt_length: 11,
prompt_id: 'prompt-id-9',
auth_type: 'gemini',
auth_type: 'gemini-api-key',
},
});
});
@@ -333,7 +333,7 @@ describe('loggers', () => {
total_token_count: 0,
response_text: 'test-response',
prompt_id: 'prompt-id-1',
auth_type: 'gemini',
auth_type: 'gemini-api-key',
},
});

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/sdk",
"version": "0.6.0-preview.1",
"version": "0.1.0",
"description": "TypeScript SDK for programmatic access to qwen-code CLI",
"main": "./dist/index.cjs",
"module": "./dist/index.mjs",

View File

@@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code-test-utils",
"version": "0.6.0-preview.1",
"version": "0.6.0",
"private": true,
"main": "src/index.ts",
"license": "Apache-2.0",

View File

@@ -2,7 +2,7 @@
"name": "qwen-code-vscode-ide-companion",
"displayName": "Qwen Code Companion",
"description": "Enable Qwen Code with direct access to your VS Code workspace.",
"version": "0.6.0-preview.1",
"version": "0.6.0",
"publisher": "qwenlm",
"icon": "assets/icon.png",
"repository": {

View File

@@ -37,7 +37,8 @@ if (!versionType) {
run(`npm version ${versionType} --no-git-tag-version --allow-same-version`);
// 3. Get all workspaces and filter out the one we don't want to version.
const workspacesToExclude = [];
// We intend to maintain sdk version independently.
const workspacesToExclude = ['@qwen-code/sdk'];
let lsOutput;
try {
lsOutput = JSON.parse(