Compare commits

..

5 Commits

Author SHA1 Message Date
DragonnZhang
064d00021d refactor(insight): update insight page assets and styles 2026-01-21 22:29:56 +08:00
DragonnZhang
d8c0b6d07c feat: add new insight page with Vite setup 2026-01-21 22:29:56 +08:00
DragonnZhang
efe65ab8c0 feat(insight): add insight command and server for personalized programming insights 2026-01-21 22:29:56 +08:00
tanzhenxin
21b26a400a Merge pull request #1563 from QwenLM/fix/issue-1549-skip-enoent-import
fix: skip non-existent file imports instead of warning (ENOENT)
2026-01-21 20:11:00 +08:00
LaZzyMan
1562780393 fix: skip non-existent file imports instead of warning (ENOENT) 2026-01-21 14:13:20 +08:00
43 changed files with 35014 additions and 1110 deletions

2
.gitignore vendored
View File

@@ -12,7 +12,7 @@
!.gemini/config.yaml
!.gemini/commands/
# Note: .qwen-clipboard/ is NOT in gitignore so Gemini can access pasted images
# Note: .gemini-clipboard/ is NOT in gitignore so Gemini can access pasted images
# Dependency directory
node_modules

View File

@@ -26,6 +26,7 @@ export default tseslint.config(
'dist/**',
'docs-site/.next/**',
'docs-site/out/**',
'packages/cli/src/services/insight-page/**',
],
},
eslint.configs.recommended,

View File

@@ -125,7 +125,7 @@
"lint-staged": {
"*.{js,jsx,ts,tsx}": [
"prettier --write",
"eslint --fix --max-warnings 0"
"eslint --fix --max-warnings 0 --no-warn-ignored"
],
"*.{json,md}": [
"prettier --write"

View File

@@ -39,6 +39,7 @@ import { themeCommand } from '../ui/commands/themeCommand.js';
import { toolsCommand } from '../ui/commands/toolsCommand.js';
import { vimCommand } from '../ui/commands/vimCommand.js';
import { setupGithubCommand } from '../ui/commands/setupGithubCommand.js';
import { insightCommand } from '../ui/commands/insightCommand.js';
/**
* Loads the core, hard-coded slash commands that are an integral part
@@ -88,6 +89,7 @@ export class BuiltinCommandLoader implements ICommandLoader {
vimCommand,
setupGithubCommand,
terminalSetupCommand,
insightCommand,
];
return allDefinitions.filter((cmd): cmd is SlashCommand => cmd !== null);

View File

@@ -0,0 +1,120 @@
# Qwen Code Insights Page
A React-based visualization dashboard for displaying coding activity insights and statistics.
## Development
This application consists of two parts:
1. **Backend (Express Server)**: Serves API endpoints and processes chat history data
2. **Frontend (Vite + React)**: Development server with HMR
### Running in Development Mode
You need to run both the backend and frontend servers:
**Terminal 1 - Backend Server (Port 3001):**
```bash
pnpm dev:server
```
**Terminal 2 - Frontend Dev Server (Port 3000):**
```bash
pnpm dev
```
Then open <http://localhost:3000> in your browser.
The Vite dev server will proxy `/api` requests to the backend server at port 3001.
### Building for Production
```bash
pnpm build
```
This compiles TypeScript and builds the React application. The output will be in the `dist/` directory.
In production, the Express server serves both the static files and API endpoints from a single port.
## Architecture
- **Frontend**: React + TypeScript + Vite + Chart.js
- **Backend**: Express + Node.js
- **Data Source**: JSONL chat history files from `~/.qwen/projects/*/chats/`
## Original Vite Template Info
This template provides a minimal setup to get React working in Vite with HMR and some ESLint rules.
Currently, two official plugins are available:
- [@vitejs/plugin-react](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react) uses [Babel](https://babeljs.io/) (or [oxc](https://oxc.rs) when used in [rolldown-vite](https://vite.dev/guide/rolldown)) for Fast Refresh
- [@vitejs/plugin-react-swc](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react-swc) uses [SWC](https://swc.rs/) for Fast Refresh
## React Compiler
The React Compiler is not enabled on this template because of its impact on dev & build performances. To add it, see [this documentation](https://react.dev/learn/react-compiler/installation).
## Expanding the ESLint configuration
If you are developing a production application, we recommend updating the configuration to enable type-aware lint rules:
```js
export default defineConfig([
globalIgnores(['dist']),
{
files: ['**/*.{ts,tsx}'],
extends: [
// Other configs...
// Remove tseslint.configs.recommended and replace with this
tseslint.configs.recommendedTypeChecked,
// Alternatively, use this for stricter rules
tseslint.configs.strictTypeChecked,
// Optionally, add this for stylistic rules
tseslint.configs.stylisticTypeChecked,
// Other configs...
],
languageOptions: {
parserOptions: {
project: ['./tsconfig.node.json', './tsconfig.app.json'],
tsconfigRootDir: import.meta.dirname,
},
// other options...
},
},
]);
```
You can also install [eslint-plugin-react-x](https://github.com/Rel1cx/eslint-react/tree/main/packages/plugins/eslint-plugin-react-x) and [eslint-plugin-react-dom](https://github.com/Rel1cx/eslint-react/tree/main/packages/plugins/eslint-plugin-react-dom) for React-specific lint rules:
```js
// eslint.config.js
import reactX from 'eslint-plugin-react-x';
import reactDom from 'eslint-plugin-react-dom';
export default defineConfig([
globalIgnores(['dist']),
{
files: ['**/*.{ts,tsx}'],
extends: [
// Other configs...
// Enable lint rules for React
reactX.configs['recommended-typescript'],
// Enable lint rules for React DOM
reactDom.configs.recommended,
],
languageOptions: {
parserOptions: {
project: ['./tsconfig.node.json', './tsconfig.app.json'],
tsconfigRootDir: import.meta.dirname,
},
// other options...
},
},
]);
```

View File

@@ -0,0 +1,13 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<link rel="icon" type="image/svg+xml" href="/qwen.png" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Qwen Code Insight</title>
</head>
<body>
<div id="root"></div>
<script type="module" src="/src/main.tsx"></script>
</body>
</html>

View File

@@ -0,0 +1,42 @@
{
"name": "insight-page",
"private": true,
"version": "0.0.0",
"type": "module",
"scripts": {
"dev": "vite",
"dev:server": "BASE_DIR=$HOME/.qwen/projects PORT=3001 tsx ../insightServer.ts",
"build": "tsc -b && vite build",
"lint": "eslint .",
"preview": "vite preview"
},
"dependencies": {
"@uiw/react-heat-map": "^2.3.3",
"chart.js": "^4.5.1",
"html2canvas": "^1.4.1",
"react": "^19.2.0",
"react-dom": "^19.2.0"
},
"devDependencies": {
"@eslint/js": "^9.39.1",
"@types/node": "^24.10.1",
"@types/react": "^19.2.5",
"@types/react-dom": "^19.2.3",
"@vitejs/plugin-react": "^5.1.1",
"autoprefixer": "^10.4.20",
"eslint": "^9.39.1",
"eslint-plugin-react-hooks": "^7.0.1",
"eslint-plugin-react-refresh": "^0.4.24",
"globals": "^16.5.0",
"postcss": "^8.4.49",
"tailwindcss": "^3.4.17",
"typescript": "~5.9.3",
"typescript-eslint": "^8.46.4",
"vite": "npm:rolldown-vite@7.2.5"
},
"pnpm": {
"overrides": {
"vite": "npm:rolldown-vite@7.2.5"
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,6 @@
export default {
plugins: {
tailwindcss: {},
autoprefixer: {},
},
};

Binary file not shown.

After

Width:  |  Height:  |  Size: 79 KiB

View File

@@ -0,0 +1,395 @@
import { useEffect, useRef, useState, type CSSProperties } from 'react';
import {
Chart,
LineController,
LineElement,
BarController,
BarElement,
CategoryScale,
LinearScale,
PointElement,
Legend,
Title,
Tooltip,
} from 'chart.js';
import type { ChartConfiguration } from 'chart.js';
import HeatMap from '@uiw/react-heat-map';
import html2canvas from 'html2canvas';
// Register Chart.js components
Chart.register(
LineController,
LineElement,
BarController,
BarElement,
CategoryScale,
LinearScale,
PointElement,
Legend,
Title,
Tooltip,
);
interface UsageMetadata {
input: number;
output: number;
total: number;
}
interface InsightData {
heatmap: { [date: string]: number };
tokenUsage: { [date: string]: UsageMetadata };
currentStreak: number;
longestStreak: number;
longestWorkDate: string | null;
longestWorkDuration: number;
activeHours: { [hour: number]: number };
latestActiveTime: string | null;
achievements: Array<{
id: string;
name: string;
description: string;
}>;
}
function App() {
const [insights, setInsights] = useState<InsightData | null>(null);
const [loading, setLoading] = useState(true);
const [error, setError] = useState<string | null>(null);
const hourChartRef = useRef<HTMLCanvasElement>(null);
const hourChartInstance = useRef<Chart | null>(null);
const containerRef = useRef<HTMLDivElement>(null);
// Load insights data
useEffect(() => {
const loadInsights = async () => {
try {
setLoading(true);
const response = await fetch('/api/insights');
if (!response.ok) {
throw new Error('Failed to fetch insights');
}
const data: InsightData = await response.json();
setInsights(data);
setError(null);
} catch (err) {
setError((err as Error).message);
setInsights(null);
} finally {
setLoading(false);
}
};
loadInsights();
}, []);
// Create hour chart when insights change
useEffect(() => {
if (!insights || !hourChartRef.current) return;
// Destroy existing chart if it exists
if (hourChartInstance.current) {
hourChartInstance.current.destroy();
}
const labels = Array.from({ length: 24 }, (_, i) => `${i}:00`);
const data = labels.map((_, i) => insights.activeHours[i] || 0);
const ctx = hourChartRef.current.getContext('2d');
if (!ctx) return;
hourChartInstance.current = new Chart(ctx, {
type: 'bar',
data: {
labels,
datasets: [
{
label: 'Activity per Hour',
data,
backgroundColor: 'rgba(52, 152, 219, 0.7)',
borderColor: 'rgba(52, 152, 219, 1)',
borderWidth: 1,
},
],
},
options: {
indexAxis: 'y',
responsive: true,
maintainAspectRatio: false,
scales: {
x: {
beginAtZero: true,
},
},
plugins: {
legend: {
display: false,
},
},
} as ChartConfiguration['options'],
});
}, [insights]);
const handleExport = async () => {
if (!containerRef.current) return;
try {
const button = document.getElementById('export-btn') as HTMLButtonElement;
button.style.display = 'none';
const canvas = await html2canvas(containerRef.current, {
scale: 2,
useCORS: true,
logging: false,
});
const imgData = canvas.toDataURL('image/png');
const link = document.createElement('a');
link.href = imgData;
link.download = `qwen-insights-${new Date().toISOString().slice(0, 10)}.png`;
link.click();
button.style.display = 'block';
} catch (err) {
console.error('Error capturing image:', err);
alert('Failed to export image. Please try again.');
}
};
if (loading) {
return (
<div className="flex min-h-screen items-center justify-center bg-gradient-to-br from-slate-50 via-white to-slate-100">
<div className="glass-card px-8 py-6 text-center">
<h2 className="text-xl font-semibold text-slate-900">
Loading insights...
</h2>
<p className="mt-2 text-sm text-slate-600">
Fetching your coding patterns
</p>
</div>
</div>
);
}
if (error || !insights) {
return (
<div className="flex min-h-screen items-center justify-center bg-gradient-to-br from-slate-50 via-white to-slate-100">
<div className="glass-card px-8 py-6 text-center">
<h2 className="text-xl font-semibold text-rose-700">
Error loading insights
</h2>
<p className="mt-2 text-sm text-slate-600">
{error || 'Please try again later.'}
</p>
</div>
</div>
);
}
// Prepare heatmap data for react-heat-map
const heatmapData = Object.entries(insights.heatmap).map(([date, count]) => ({
date,
count,
}));
const cardClass = 'glass-card p-6';
const sectionTitleClass =
'text-lg font-semibold tracking-tight text-slate-900';
const captionClass = 'text-sm font-medium text-slate-500';
return (
<div className="min-h-screen" ref={containerRef}>
<div className="mx-auto max-w-6xl px-6 py-10 md:py-12">
<header className="mb-8 space-y-3 text-center">
<p className="text-xs font-semibold uppercase tracking-[0.2em] text-slate-500">
Insights
</p>
<h1 className="text-3xl font-semibold text-slate-900 md:text-4xl">
Qwen Code Insights
</h1>
<p className="text-sm text-slate-600">
Your personalized coding journey and patterns
</p>
</header>
<div className="grid gap-4 md:grid-cols-3 md:gap-6">
<div className={`${cardClass} h-full`}>
<div className="flex items-start justify-between">
<div>
<p className={captionClass}>Current Streak</p>
<p className="mt-1 text-4xl font-bold text-slate-900">
{insights.currentStreak}
<span className="ml-2 text-base font-semibold text-slate-500">
days
</span>
</p>
</div>
<span className="rounded-full bg-emerald-50 px-4 py-2 text-sm font-semibold text-emerald-700">
Longest {insights.longestStreak}d
</span>
</div>
</div>
<div className={`${cardClass} h-full`}>
<div className="flex items-center justify-between">
<h3 className={sectionTitleClass}>Active Hours</h3>
<span className="rounded-full bg-slate-100 px-3 py-1 text-xs font-semibold text-slate-600">
24h
</span>
</div>
<div className="mt-4 h-56 w-full">
<canvas ref={hourChartRef}></canvas>
</div>
</div>
<div className={`${cardClass} h-full space-y-3`}>
<h3 className={sectionTitleClass}>Work Session</h3>
<div className="grid grid-cols-2 gap-3 text-sm text-slate-700">
<div className="rounded-xl bg-slate-50 px-3 py-2">
<p className="text-xs font-semibold uppercase tracking-wide text-slate-500">
Longest
</p>
<p className="mt-1 text-lg font-semibold text-slate-900">
{insights.longestWorkDuration}m
</p>
</div>
<div className="rounded-xl bg-slate-50 px-3 py-2">
<p className="text-xs font-semibold uppercase tracking-wide text-slate-500">
Date
</p>
<p className="mt-1 text-lg font-semibold text-slate-900">
{insights.longestWorkDate || '-'}
</p>
</div>
<div className="col-span-2 rounded-xl bg-slate-50 px-3 py-2">
<p className="text-xs font-semibold uppercase tracking-wide text-slate-500">
Last Active
</p>
<p className="mt-1 text-lg font-semibold text-slate-900">
{insights.latestActiveTime || '-'}
</p>
</div>
</div>
</div>
</div>
<div className={`${cardClass} mt-4 space-y-4 md:mt-6`}>
<div className="flex items-center justify-between">
<h3 className={sectionTitleClass}>Activity Heatmap</h3>
<span className="text-xs font-semibold text-slate-500">
Past year
</span>
</div>
<div className="overflow-x-auto">
<div className="min-w-[720px] rounded-xl border border-slate-100 bg-white/70 p-4 shadow-inner shadow-slate-100">
<HeatMap
value={heatmapData}
width={1000}
style={{ color: '#0f172a' } satisfies CSSProperties}
startDate={
new Date(new Date().setFullYear(new Date().getFullYear() - 1))
}
endDate={new Date()}
rectSize={14}
legendCellSize={12}
rectProps={{
rx: 2,
}}
panelColors={{
0: '#e2e8f0',
2: '#a5d8ff',
4: '#74c0fc',
10: '#339af0',
20: '#1c7ed6',
}}
/>
</div>
</div>
</div>
<div className={`${cardClass} mt-4 md:mt-6`}>
<div className="space-y-3">
<h3 className={sectionTitleClass}>Token Usage</h3>
<div className="grid grid-cols-3 gap-3">
<div className="rounded-xl bg-slate-50 px-4 py-3">
<p className="text-xs font-semibold uppercase tracking-wide text-slate-500">
Input
</p>
<p className="mt-1 text-2xl font-bold text-slate-900">
{Object.values(insights.tokenUsage)
.reduce((acc, usage) => acc + usage.input, 0)
.toLocaleString()}
</p>
</div>
<div className="rounded-xl bg-slate-50 px-4 py-3">
<p className="text-xs font-semibold uppercase tracking-wide text-slate-500">
Output
</p>
<p className="mt-1 text-2xl font-bold text-slate-900">
{Object.values(insights.tokenUsage)
.reduce((acc, usage) => acc + usage.output, 0)
.toLocaleString()}
</p>
</div>
<div className="rounded-xl bg-slate-50 px-4 py-3">
<p className="text-xs font-semibold uppercase tracking-wide text-slate-500">
Total
</p>
<p className="mt-1 text-2xl font-bold text-slate-900">
{Object.values(insights.tokenUsage)
.reduce((acc, usage) => acc + usage.total, 0)
.toLocaleString()}
</p>
</div>
</div>
</div>
</div>
<div className={`${cardClass} mt-4 space-y-4 md:mt-6`}>
<div className="flex items-center justify-between">
<h3 className={sectionTitleClass}>Achievements</h3>
<span className="text-xs font-semibold text-slate-500">
{insights.achievements.length} total
</span>
</div>
{insights.achievements.length === 0 ? (
<p className="text-sm text-slate-600">
No achievements yet. Keep coding!
</p>
) : (
<div className="divide-y divide-slate-200">
{insights.achievements.map((achievement) => (
<div
key={achievement.id}
className="flex flex-col gap-1 py-3 text-left"
>
<span className="text-base font-semibold text-slate-900">
{achievement.name}
</span>
<p className="text-sm text-slate-600">
{achievement.description}
</p>
</div>
))}
</div>
)}
</div>
<div className="mt-6 flex justify-center">
<button
id="export-btn"
className="group inline-flex items-center gap-2 rounded-full bg-slate-900 px-5 py-3 text-sm font-semibold text-white shadow-soft transition focus-visible:outline focus-visible:outline-2 focus-visible:outline-offset-2 focus-visible:outline-slate-400 hover:-translate-y-[1px] hover:shadow-lg active:translate-y-[1px]"
onClick={handleExport}
>
Export as Image
<span className="text-slate-200 transition group-hover:translate-x-0.5">
</span>
</button>
</div>
</div>
</div>
);
}
export default App;

View File

@@ -0,0 +1,15 @@
@tailwind base;
@tailwind components;
@tailwind utilities;
@layer base {
body {
@apply min-h-screen bg-gradient-to-br from-slate-50 via-white to-slate-100 text-slate-900 antialiased;
}
}
@layer components {
.glass-card {
@apply rounded-2xl border border-slate-200 bg-white/80 shadow-soft backdrop-blur;
}
}

View File

@@ -0,0 +1,10 @@
import { StrictMode } from 'react';
import { createRoot } from 'react-dom/client';
import './index.css';
import App from './App.tsx';
createRoot(document.getElementById('root')!).render(
<StrictMode>
<App />
</StrictMode>,
);

View File

@@ -0,0 +1,18 @@
import type { Config } from 'tailwindcss';
const config: Config = {
content: ['./index.html', './src/**/*.{ts,tsx}'],
theme: {
extend: {
boxShadow: {
soft: '0 10px 40px rgba(15, 23, 42, 0.08)',
},
borderRadius: {
xl: '1.25rem',
},
},
},
plugins: [],
};
export default config;

View File

@@ -0,0 +1,28 @@
{
"compilerOptions": {
"tsBuildInfoFile": "./node_modules/.tmp/tsconfig.app.tsbuildinfo",
"target": "ES2022",
"useDefineForClassFields": true,
"lib": ["ES2022", "DOM", "DOM.Iterable"],
"module": "ESNext",
"types": ["vite/client"],
"skipLibCheck": true,
/* Bundler mode */
"moduleResolution": "bundler",
"allowImportingTsExtensions": true,
"verbatimModuleSyntax": true,
"moduleDetection": "force",
"noEmit": true,
"jsx": "react-jsx",
/* Linting */
"strict": true,
"noUnusedLocals": true,
"noUnusedParameters": true,
"erasableSyntaxOnly": true,
"noFallthroughCasesInSwitch": true,
"noUncheckedSideEffectImports": true
},
"include": ["src"]
}

View File

@@ -0,0 +1,7 @@
{
"files": [],
"references": [
{ "path": "./tsconfig.app.json" },
{ "path": "./tsconfig.node.json" }
]
}

View File

@@ -0,0 +1,26 @@
{
"compilerOptions": {
"tsBuildInfoFile": "./node_modules/.tmp/tsconfig.node.tsbuildinfo",
"target": "ES2023",
"lib": ["ES2023"],
"module": "ESNext",
"types": ["node"],
"skipLibCheck": true,
/* Bundler mode */
"moduleResolution": "bundler",
"allowImportingTsExtensions": true,
"verbatimModuleSyntax": true,
"moduleDetection": "force",
"noEmit": true,
/* Linting */
"strict": true,
"noUnusedLocals": true,
"noUnusedParameters": true,
"erasableSyntaxOnly": true,
"noFallthroughCasesInSwitch": true,
"noUncheckedSideEffectImports": true
},
"include": ["vite.config.ts"]
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,14 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<link rel="icon" type="image/svg+xml" href="/qwen.png" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>insight-page</title>
<script type="module" crossorigin src="/assets/index-D7obW1Jn.js"></script>
<link rel="stylesheet" crossorigin href="/assets/index-CV6J1oXz.css">
</head>
<body>
<div id="root"></div>
</body>
</html>

Binary file not shown.

After

Width:  |  Height:  |  Size: 79 KiB

View File

@@ -0,0 +1,19 @@
import { defineConfig } from 'vite';
import react from '@vitejs/plugin-react';
// https://vite.dev/config/
export default defineConfig({
plugins: [react()],
build: {
outDir: 'views',
},
server: {
port: 3000,
proxy: {
'/api': {
target: 'http://localhost:3001',
changeOrigin: true,
},
},
},
});

View File

@@ -0,0 +1,404 @@
/**
* @license
* Copyright 2025 Qwen Code
* SPDX-License-Identifier: Apache-2.0
*/
import express from 'express';
import fs from 'fs/promises';
import path, { dirname } from 'path';
import { fileURLToPath } from 'url';
import type { ChatRecord } from '@qwen-code/qwen-code-core';
import { read } from '@qwen-code/qwen-code-core/src/utils/jsonl-utils.js';
interface StreakData {
currentStreak: number;
longestStreak: number;
dates: string[];
}
// For heat map data
interface HeatMapData {
[date: string]: number;
}
// For token usage data
interface TokenUsageData {
[date: string]: {
input: number;
output: number;
total: number;
};
}
// For achievement data
interface AchievementData {
id: string;
name: string;
description: string;
}
// For the final insight data
interface InsightData {
heatmap: HeatMapData;
tokenUsage: TokenUsageData;
currentStreak: number;
longestStreak: number;
longestWorkDate: string | null;
longestWorkDuration: number; // in minutes
activeHours: { [hour: number]: number };
latestActiveTime: string | null;
achievements: AchievementData[];
}
function debugLog(message: string) {
const timestamp = new Date().toISOString();
const logMessage = `[${timestamp}] ${message}\n`;
console.log(logMessage);
}
debugLog('Insight server starting...');
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const app = express();
const PORT = process.env['PORT'];
const BASE_DIR = process.env['BASE_DIR'];
if (!BASE_DIR) {
debugLog('BASE_DIR environment variable is required');
process.exit(1);
}
// Serve static assets from the views/assets directory
app.use(
'/assets',
express.static(path.join(__dirname, 'insight-page', 'views', 'assets')),
);
app.get('/', (_req, res) => {
res.sendFile(path.join(__dirname, 'insight-page', 'views', 'index.html'));
});
// API endpoint to get insight data
app.get('/api/insights', async (_req, res) => {
try {
debugLog('Received request for insights data');
const insights = await generateInsights(BASE_DIR);
res.json(insights);
} catch (error) {
debugLog(`Error generating insights: ${error}`);
res.status(500).json({ error: 'Failed to generate insights' });
}
});
// Process chat files from all projects in the base directory and generate insights
async function generateInsights(baseDir: string): Promise<InsightData> {
// Initialize data structures
const heatmap: HeatMapData = {};
const tokenUsage: TokenUsageData = {};
const activeHours: { [hour: number]: number } = {};
const sessionStartTimes: { [sessionId: string]: Date } = {};
const sessionEndTimes: { [sessionId: string]: Date } = {};
try {
// Get all project directories in the base directory
const projectDirs = await fs.readdir(baseDir);
// Process each project directory
for (const projectDir of projectDirs) {
const projectPath = path.join(baseDir, projectDir);
const stats = await fs.stat(projectPath);
// Only process if it's a directory
if (stats.isDirectory()) {
const chatsDir = path.join(projectPath, 'chats');
let chatFiles: string[] = [];
try {
// Get all chat files in the chats directory
const files = await fs.readdir(chatsDir);
chatFiles = files.filter((file) => file.endsWith('.jsonl'));
} catch (error) {
if ((error as NodeJS.ErrnoException).code !== 'ENOENT') {
debugLog(
`Error reading chats directory for project ${projectDir}: ${error}`,
);
}
// Continue to next project if chats directory doesn't exist
continue;
}
// Process each chat file in this project
for (const file of chatFiles) {
const filePath = path.join(chatsDir, file);
const records = await read<ChatRecord>(filePath);
// Process each record
for (const record of records) {
const timestamp = new Date(record.timestamp);
const dateKey = formatDate(timestamp);
const hour = timestamp.getHours();
// Update heatmap (count of interactions per day)
heatmap[dateKey] = (heatmap[dateKey] || 0) + 1;
// Update active hours
activeHours[hour] = (activeHours[hour] || 0) + 1;
// Update token usage
if (record.usageMetadata) {
const usage = tokenUsage[dateKey] || {
input: 0,
output: 0,
total: 0,
};
usage.input += record.usageMetadata.promptTokenCount || 0;
usage.output += record.usageMetadata.candidatesTokenCount || 0;
usage.total += record.usageMetadata.totalTokenCount || 0;
tokenUsage[dateKey] = usage;
}
// Track session times
if (!sessionStartTimes[record.sessionId]) {
sessionStartTimes[record.sessionId] = timestamp;
}
sessionEndTimes[record.sessionId] = timestamp;
}
}
}
}
} catch (error) {
if ((error as NodeJS.ErrnoException).code === 'ENOENT') {
// Base directory doesn't exist, return empty insights
debugLog(`Base directory does not exist: ${baseDir}`);
} else {
debugLog(`Error reading base directory: ${error}`);
}
}
// Calculate streak data
const streakData = calculateStreaks(Object.keys(heatmap));
// Calculate longest work session
let longestWorkDuration = 0;
let longestWorkDate: string | null = null;
for (const sessionId in sessionStartTimes) {
const start = sessionStartTimes[sessionId];
const end = sessionEndTimes[sessionId];
const durationMinutes = Math.round(
(end.getTime() - start.getTime()) / (1000 * 60),
);
if (durationMinutes > longestWorkDuration) {
longestWorkDuration = durationMinutes;
longestWorkDate = formatDate(start);
}
}
// Calculate latest active time
let latestActiveTime: string | null = null;
let latestTimestamp = new Date(0);
for (const dateStr in heatmap) {
const date = new Date(dateStr);
if (date > latestTimestamp) {
latestTimestamp = date;
latestActiveTime = date.toLocaleTimeString([], {
hour: '2-digit',
minute: '2-digit',
});
}
}
// Calculate achievements
const achievements = calculateAchievements(activeHours, heatmap, tokenUsage);
return {
heatmap,
tokenUsage,
currentStreak: streakData.currentStreak,
longestStreak: streakData.longestStreak,
longestWorkDate,
longestWorkDuration,
activeHours,
latestActiveTime,
achievements,
};
}
// Helper function to format date as YYYY-MM-DD
function formatDate(date: Date): string {
return date.toISOString().split('T')[0];
}
// Calculate streaks from activity dates
function calculateStreaks(dates: string[]): StreakData {
if (dates.length === 0) {
return { currentStreak: 0, longestStreak: 0, dates: [] };
}
// Convert string dates to Date objects and sort them
const dateObjects = dates.map((dateStr) => new Date(dateStr));
dateObjects.sort((a, b) => a.getTime() - b.getTime());
let currentStreak = 1;
let maxStreak = 1;
let currentDate = new Date(dateObjects[0]);
currentDate.setHours(0, 0, 0, 0); // Normalize to start of day
for (let i = 1; i < dateObjects.length; i++) {
const nextDate = new Date(dateObjects[i]);
nextDate.setHours(0, 0, 0, 0); // Normalize to start of day
// Calculate difference in days
const diffDays = Math.floor(
(nextDate.getTime() - currentDate.getTime()) / (1000 * 60 * 60 * 24),
);
if (diffDays === 1) {
// Consecutive day
currentStreak++;
maxStreak = Math.max(maxStreak, currentStreak);
} else if (diffDays > 1) {
// Gap in streak
currentStreak = 1;
}
// If diffDays === 0, same day, so streak continues
currentDate = nextDate;
}
// Check if the streak is still ongoing (if last activity was yesterday or today)
const today = new Date();
today.setHours(0, 0, 0, 0);
const yesterday = new Date(today);
yesterday.setDate(yesterday.getDate() - 1);
if (
currentDate.getTime() === today.getTime() ||
currentDate.getTime() === yesterday.getTime()
) {
// The streak might still be active, so we don't reset it
}
return {
currentStreak,
longestStreak: maxStreak,
dates,
};
}
// Calculate achievements based on user behavior
function calculateAchievements(
activeHours: { [hour: number]: number },
heatmap: HeatMapData,
_tokenUsage: TokenUsageData,
): AchievementData[] {
const achievements: AchievementData[] = [];
// Total activities
const totalActivities = Object.values(heatmap).reduce(
(sum, count) => sum + count,
0,
);
// Total tokens used - commented out since it's not currently used
// const totalTokens = Object.values(tokenUsage).reduce((sum, usage) => sum + usage.total, 0);
// Total sessions
const totalSessions = Object.keys(heatmap).length;
// Calculate percentage of activity per hour
const totalHourlyActivity = Object.values(activeHours).reduce(
(sum, count) => sum + count,
0,
);
if (totalHourlyActivity > 0) {
// Midnight debugger: 20% of sessions happen between 12AM-5AM
const midnightActivity =
(activeHours[0] || 0) +
(activeHours[1] || 0) +
(activeHours[2] || 0) +
(activeHours[3] || 0) +
(activeHours[4] || 0) +
(activeHours[5] || 0);
if (midnightActivity / totalHourlyActivity >= 0.2) {
achievements.push({
id: 'midnight-debugger',
name: 'Midnight Debugger',
description: '20% of your sessions happen between 12AM-5AM',
});
}
// Morning coder: 20% of sessions happen between 6AM-9AM
const morningActivity =
(activeHours[6] || 0) +
(activeHours[7] || 0) +
(activeHours[8] || 0) +
(activeHours[9] || 0);
if (morningActivity / totalHourlyActivity >= 0.2) {
achievements.push({
id: 'morning-coder',
name: 'Morning Coder',
description: '20% of your sessions happen between 6AM-9AM',
});
}
}
// Patient king: average conversation length >= 10 exchanges
if (totalSessions > 0) {
const avgExchanges = totalActivities / totalSessions;
if (avgExchanges >= 10) {
achievements.push({
id: 'patient-king',
name: 'Patient King',
description: 'Your average conversation length is 10+ exchanges',
});
}
}
// Quick finisher: 70% of sessions have <= 2 exchanges
let quickSessions = 0;
// Since we don't have per-session exchange counts easily available,
// we'll estimate based on the distribution of activities
if (totalSessions > 0) {
// This is a simplified calculation - in a real implementation,
// we'd need to count exchanges per session
const avgPerSession = totalActivities / totalSessions;
if (avgPerSession <= 2) {
// Estimate based on low average
quickSessions = Math.floor(totalSessions * 0.7);
}
if (quickSessions / totalSessions >= 0.7) {
achievements.push({
id: 'quick-finisher',
name: 'Quick Finisher',
description: '70% of your sessions end in 2 exchanges or fewer',
});
}
}
// Explorer: for users with insufficient data or default
if (achievements.length === 0) {
achievements.push({
id: 'explorer',
name: 'Explorer',
description: 'Getting started with Qwen Code',
});
}
return achievements;
}
// Start the server
app.listen(PORT, () => {
debugLog(`Server running at http://localhost:${PORT}/`);
debugLog(`Analyzing projects in: ${BASE_DIR}`);
debugLog('Server is running. Press Ctrl+C to stop.');
});

View File

@@ -0,0 +1,190 @@
/**
* @license
* Copyright 2025 Qwen Code
* SPDX-License-Identifier: Apache-2.0
*/
import type { CommandContext, SlashCommand } from './types.js';
import { CommandKind } from './types.js';
import { MessageType } from '../types.js';
import { t } from '../../i18n/index.js';
import { spawn } from 'child_process';
import { join } from 'path';
import os from 'os';
import { registerCleanup } from '../../utils/cleanup.js';
import net from 'net';
// Track the insight server subprocess so we can terminate it on quit
let insightServerProcess: import('child_process').ChildProcess | null = null;
// Find an available port starting from a default port
async function findAvailablePort(startingPort: number = 3000): Promise<number> {
return new Promise((resolve, reject) => {
let port = startingPort;
const checkPort = () => {
const server = net.createServer();
server.listen(port, () => {
server.once('close', () => {
resolve(port);
});
server.close();
});
server.on('error', (err: NodeJS.ErrnoException) => {
if (err.code === 'EADDRINUSE') {
port++; // Try next port
checkPort();
} else {
reject(err);
}
});
};
checkPort();
});
}
export const insightCommand: SlashCommand = {
name: 'insight',
get description() {
return t(
'generate personalized programming insights from your chat history',
);
},
kind: CommandKind.BUILT_IN,
action: async (context: CommandContext) => {
try {
context.ui.setDebugMessage(t('Starting insight server...'));
// If there's an existing insight server process, terminate it first
if (insightServerProcess && !insightServerProcess.killed) {
insightServerProcess.kill();
insightServerProcess = null;
}
// Find an available port
const availablePort = await findAvailablePort(3000);
const projectsDir = join(os.homedir(), '.qwen', 'projects');
// Path to the insight server script
const insightScriptPath = join(
process.cwd(),
'packages',
'cli',
'src',
'services',
'insightServer.ts',
);
// Spawn the insight server process
const serverProcess = spawn('npx', ['tsx', insightScriptPath], {
stdio: 'pipe',
env: {
...process.env,
NODE_ENV: 'production',
BASE_DIR: projectsDir,
PORT: String(availablePort),
},
});
// Store the server process for cleanup
insightServerProcess = serverProcess;
// Register cleanup function to terminate the server process on quit
registerCleanup(() => {
if (insightServerProcess && !insightServerProcess.killed) {
insightServerProcess.kill();
insightServerProcess = null;
}
});
serverProcess.stderr.on('data', (data) => {
// Forward error output to parent process stderr
process.stderr.write(`Insight server error: ${data}`);
context.ui.addItem(
{
type: MessageType.ERROR,
text: `Insight server error: ${data.toString()}`,
},
Date.now(),
);
});
serverProcess.on('close', (code) => {
console.log(`Insight server process exited with code ${code}`);
context.ui.setDebugMessage(t('Insight server stopped.'));
// Reset the reference when the process closes
if (insightServerProcess === serverProcess) {
insightServerProcess = null;
}
});
const url = `http://localhost:${availablePort}`;
// Open browser automatically
const openBrowser = async () => {
try {
const { exec } = await import('child_process');
const { promisify } = await import('util');
const execAsync = promisify(exec);
switch (process.platform) {
case 'darwin': // macOS
await execAsync(`open ${url}`);
break;
case 'win32': // Windows
await execAsync(`start ${url}`);
break;
default: // Linux and others
await execAsync(`xdg-open ${url}`);
}
context.ui.addItem(
{
type: MessageType.INFO,
text: `Insight server started. Visit: ${url}`,
},
Date.now(),
);
} catch (err) {
console.error('Failed to open browser automatically:', err);
context.ui.addItem(
{
type: MessageType.INFO,
text: `Insight server started. Please visit: ${url}`,
},
Date.now(),
);
}
};
// Wait for the server to start (give it some time to bind to the port)
setTimeout(openBrowser, 1000);
// Inform the user that the server is running
context.ui.addItem(
{
type: MessageType.INFO,
text: t(
'Insight server started. Check your browser for the visualization.',
),
},
Date.now(),
);
} catch (error) {
context.ui.addItem(
{
type: MessageType.ERROR,
text: t('Failed to start insight server: {{error}}', {
error: (error as Error).message,
}),
},
Date.now(),
);
}
},
};

View File

@@ -376,7 +376,7 @@ describe('InputPrompt', () => {
it('should handle Ctrl+V when clipboard has an image', async () => {
vi.mocked(clipboardUtils.clipboardHasImage).mockResolvedValue(true);
vi.mocked(clipboardUtils.saveClipboardImage).mockResolvedValue(
'/test/.qwen-clipboard/clipboard-123.png',
'/test/.gemini-clipboard/clipboard-123.png',
);
const { stdin, unmount } = renderWithProviders(
@@ -436,7 +436,7 @@ describe('InputPrompt', () => {
it('should insert image path at cursor position with proper spacing', async () => {
const imagePath = path.join(
'test',
'.qwen-clipboard',
'.gemini-clipboard',
'clipboard-456.png',
);
vi.mocked(clipboardUtils.clipboardHasImage).mockResolvedValue(true);

View File

@@ -52,7 +52,7 @@ export async function saveClipboardImage(
// Create a temporary directory for clipboard images within the target directory
// This avoids security restrictions on paths outside the target directory
const baseDir = targetDir || process.cwd();
const tempDir = path.join(baseDir, '.qwen-clipboard');
const tempDir = path.join(baseDir, '.gemini-clipboard');
await fs.mkdir(tempDir, { recursive: true });
// Generate a unique filename with timestamp
@@ -130,7 +130,7 @@ export async function cleanupOldClipboardImages(
): Promise<void> {
try {
const baseDir = targetDir || process.cwd();
const tempDir = path.join(baseDir, '.qwen-clipboard');
const tempDir = path.join(baseDir, '.gemini-clipboard');
const files = await fs.readdir(tempDir);
const oneHourAgo = Date.now() - 60 * 60 * 1000;

View File

@@ -208,238 +208,6 @@ describe('AnthropicContentConverter', () => {
],
});
});
it('converts function response with inlineData image parts into tool_result with images', () => {
const { messages } = converter.convertGeminiRequestToAnthropic({
model: 'models/test',
contents: [
{
role: 'user',
parts: [
{
functionResponse: {
id: 'call-1',
name: 'Read',
response: { output: 'Image content' },
parts: [
{
inlineData: {
mimeType: 'image/png',
data: 'base64encodeddata',
},
},
],
},
},
],
},
],
});
expect(messages).toEqual([
{
role: 'user',
content: [
{
type: 'tool_result',
tool_use_id: 'call-1',
content: [
{ type: 'text', text: 'Image content' },
{
type: 'image',
source: {
type: 'base64',
media_type: 'image/png',
data: 'base64encodeddata',
},
},
],
},
],
},
]);
});
it('renders non-image inlineData as a text block (avoids invalid image media_type)', () => {
const { messages } = converter.convertGeminiRequestToAnthropic({
model: 'models/test',
contents: [
{
role: 'user',
parts: [
{
functionResponse: {
id: 'call-1',
name: 'Read',
response: { output: 'Audio content' },
parts: [
{
inlineData: {
mimeType: 'audio/mpeg',
data: 'base64encodedaudiodata',
},
},
],
},
},
],
},
],
});
expect(messages).toHaveLength(1);
expect(messages[0]?.role).toBe('user');
const toolResult = messages[0]?.content?.[0] as {
type: string;
content: Array<{ type: string; text?: string }>;
};
expect(toolResult.type).toBe('tool_result');
expect(Array.isArray(toolResult.content)).toBe(true);
expect(toolResult.content[0]).toEqual({
type: 'text',
text: 'Audio content',
});
expect(toolResult.content[1]?.type).toBe('text');
expect(toolResult.content[1]?.text).toContain(
'Unsupported inline media type for Anthropic',
);
expect(toolResult.content[1]?.text).toContain('audio/mpeg');
});
it('converts fileData with PDF into document block', () => {
const { messages } = converter.convertGeminiRequestToAnthropic({
model: 'models/test',
contents: [
{
role: 'user',
parts: [
{
functionResponse: {
id: 'call-1',
name: 'Read',
response: { output: 'PDF content' },
parts: [
{
fileData: {
mimeType: 'application/pdf',
fileUri: 'pdfbase64data',
},
},
],
},
},
],
},
],
});
expect(messages).toEqual([
{
role: 'user',
content: [
{
type: 'tool_result',
tool_use_id: 'call-1',
content: [
{ type: 'text', text: 'PDF content' },
{
type: 'document',
source: {
type: 'base64',
media_type: 'application/pdf',
data: 'pdfbase64data',
},
},
],
},
],
},
]);
});
it('associates each image with its preceding functionResponse', () => {
const { messages } = converter.convertGeminiRequestToAnthropic({
model: 'models/test',
contents: [
{
role: 'user',
parts: [
// Tool 1 with image 1
{
functionResponse: {
id: 'call-1',
name: 'Read',
response: { output: 'File 1' },
parts: [
{
inlineData: {
mimeType: 'image/png',
data: 'image1data',
},
},
],
},
},
// Tool 2 with image 2
{
functionResponse: {
id: 'call-2',
name: 'Read',
response: { output: 'File 2' },
parts: [
{
inlineData: {
mimeType: 'image/jpeg',
data: 'image2data',
},
},
],
},
},
],
},
],
});
// Multiple tool_result blocks are emitted in order
expect(messages).toHaveLength(1);
expect(messages[0]).toEqual({
role: 'user',
content: [
{
type: 'tool_result',
tool_use_id: 'call-1',
content: [
{ type: 'text', text: 'File 1' },
{
type: 'image',
source: {
type: 'base64',
media_type: 'image/png',
data: 'image1data',
},
},
],
},
{
type: 'tool_result',
tool_use_id: 'call-2',
content: [
{ type: 'text', text: 'File 2' },
{
type: 'image',
source: {
type: 'base64',
media_type: 'image/jpeg',
data: 'image2data',
},
},
],
},
],
});
});
});
describe('convertGeminiToolsToAnthropic', () => {

View File

@@ -10,6 +10,7 @@ import type {
Content,
ContentListUnion,
ContentUnion,
FunctionCall,
FunctionResponse,
GenerateContentParameters,
Part,
@@ -29,6 +30,15 @@ type AnthropicMessageParam = Anthropic.MessageParam;
type AnthropicToolParam = Anthropic.Tool;
type AnthropicContentBlockParam = Anthropic.ContentBlockParam;
type ThoughtPart = { text: string; signature?: string };
interface ParsedParts {
thoughtParts: ThoughtPart[];
contentParts: string[];
functionCalls: FunctionCall[];
functionResponses: FunctionResponse[];
}
export class AnthropicContentConverter {
private model: string;
private schemaCompliance: SchemaComplianceMode;
@@ -218,161 +228,127 @@ export class AnthropicContentConverter {
}
if (!this.isContentObject(content)) return;
const parts = content.parts || [];
const parsed = this.parseParts(content.parts || []);
if (parsed.functionResponses.length > 0) {
for (const response of parsed.functionResponses) {
messages.push({
role: 'user',
content: [
{
type: 'tool_result',
tool_use_id: response.id || '',
content: this.extractFunctionResponseContent(response.response),
},
],
});
}
return;
}
if (content.role === 'model' && parsed.functionCalls.length > 0) {
const thinkingBlocks: AnthropicContentBlockParam[] =
parsed.thoughtParts.map((part) => {
const thinkingBlock: unknown = {
type: 'thinking',
thinking: part.text,
};
if (part.signature) {
(thinkingBlock as { signature?: string }).signature =
part.signature;
}
return thinkingBlock as AnthropicContentBlockParam;
});
const toolUses: AnthropicContentBlockParam[] = parsed.functionCalls.map(
(call, index) => ({
type: 'tool_use',
id: call.id || `tool_${index}`,
name: call.name || '',
input: (call.args as Record<string, unknown>) || {},
}),
);
const textBlocks: AnthropicContentBlockParam[] = parsed.contentParts.map(
(text) => ({
type: 'text' as const,
text,
}),
);
messages.push({
role: 'assistant',
content: [...thinkingBlocks, ...textBlocks, ...toolUses],
});
return;
}
const role = content.role === 'model' ? 'assistant' : 'user';
const contentBlocks: AnthropicContentBlockParam[] = [];
let toolCallIndex = 0;
const thinkingBlocks: AnthropicContentBlockParam[] =
role === 'assistant'
? parsed.thoughtParts.map((part) => {
const thinkingBlock: unknown = {
type: 'thinking',
thinking: part.text,
};
if (part.signature) {
(thinkingBlock as { signature?: string }).signature =
part.signature;
}
return thinkingBlock as AnthropicContentBlockParam;
})
: [];
const textBlocks: AnthropicContentBlockParam[] = [
...thinkingBlocks,
...parsed.contentParts.map((text) => ({
type: 'text' as const,
text,
})),
];
if (textBlocks.length > 0) {
messages.push({ role, content: textBlocks });
}
}
private parseParts(parts: Part[]): ParsedParts {
const thoughtParts: ThoughtPart[] = [];
const contentParts: string[] = [];
const functionCalls: FunctionCall[] = [];
const functionResponses: FunctionResponse[] = [];
for (const part of parts) {
if (typeof part === 'string') {
contentBlocks.push({ type: 'text', text: part });
continue;
}
if ('text' in part && 'thought' in part && part.thought) {
if (role === 'assistant') {
const thinkingBlock: unknown = {
type: 'thinking',
thinking: part.text || '',
};
if (
contentParts.push(part);
} else if (
'text' in part &&
part.text &&
!('thought' in part && part.thought)
) {
contentParts.push(part.text);
} else if ('text' in part && 'thought' in part && part.thought) {
thoughtParts.push({
text: part.text || '',
signature:
'thoughtSignature' in part &&
typeof part.thoughtSignature === 'string'
) {
(thinkingBlock as { signature?: string }).signature =
part.thoughtSignature;
}
contentBlocks.push(thinkingBlock as AnthropicContentBlockParam);
}
? part.thoughtSignature
: undefined,
});
} else if ('functionCall' in part && part.functionCall) {
functionCalls.push(part.functionCall);
} else if ('functionResponse' in part && part.functionResponse) {
functionResponses.push(part.functionResponse);
}
if ('text' in part && part.text && !('thought' in part && part.thought)) {
contentBlocks.push({ type: 'text', text: part.text });
}
const mediaBlock = this.createMediaBlockFromPart(part);
if (mediaBlock) {
contentBlocks.push(mediaBlock);
}
if ('functionCall' in part && part.functionCall) {
if (role === 'assistant') {
contentBlocks.push({
type: 'tool_use',
id: part.functionCall.id || `tool_${toolCallIndex}`,
name: part.functionCall.name || '',
input: (part.functionCall.args as Record<string, unknown>) || {},
});
toolCallIndex += 1;
}
}
if (part.functionResponse) {
const toolResultBlock = this.createToolResultBlock(
part.functionResponse,
);
if (toolResultBlock && role === 'user') {
contentBlocks.push(toolResultBlock);
}
}
}
if (contentBlocks.length > 0) {
messages.push({ role, content: contentBlocks });
}
}
private createToolResultBlock(
response: FunctionResponse,
): Anthropic.ToolResultBlockParam | null {
const textContent = this.extractFunctionResponseContent(response.response);
type ToolResultContent = Anthropic.ToolResultBlockParam['content'];
const partBlocks: AnthropicContentBlockParam[] = [];
for (const part of response.parts || []) {
const block = this.createMediaBlockFromPart(part);
if (block) {
partBlocks.push(block);
}
}
let content: ToolResultContent;
if (partBlocks.length > 0) {
const blocks: AnthropicContentBlockParam[] = [];
if (textContent) {
blocks.push({ type: 'text', text: textContent });
}
blocks.push(...partBlocks);
content = blocks as unknown as ToolResultContent;
} else {
content = textContent;
}
return {
type: 'tool_result',
tool_use_id: response.id || '',
content,
thoughtParts,
contentParts,
functionCalls,
functionResponses,
};
}
private createMediaBlockFromPart(
part: Part,
): AnthropicContentBlockParam | null {
if (part.inlineData?.mimeType && part.inlineData?.data) {
if (!this.isSupportedAnthropicImageMimeType(part.inlineData.mimeType)) {
const displayName = part.inlineData.displayName ?? '';
return {
type: 'text',
text: `Unsupported inline media type for Anthropic: ${part.inlineData.mimeType}${displayName}.`,
};
}
return {
type: 'image',
source: {
type: 'base64',
media_type: part.inlineData.mimeType as
| 'image/jpeg'
| 'image/png'
| 'image/gif'
| 'image/webp',
data: part.inlineData.data,
},
};
}
if (part.fileData?.mimeType && part.fileData?.fileUri) {
if (part.fileData.mimeType !== 'application/pdf') {
const displayName = part.fileData.displayName ?? '';
return {
type: 'text',
text: `Unsupported file media for Anthropic: ${part.fileData.mimeType}${displayName}`,
};
}
return {
type: 'document',
source: {
type: 'base64',
media_type: part.fileData.mimeType as 'application/pdf',
data: part.fileData.fileUri,
},
};
}
return null;
}
private isSupportedAnthropicImageMimeType(
mimeType: string,
): mimeType is 'image/jpeg' | 'image/png' | 'image/gif' | 'image/webp' {
return (
mimeType === 'image/jpeg' ||
mimeType === 'image/png' ||
mimeType === 'image/gif' ||
mimeType === 'image/webp'
);
}
private extractTextFromContentUnion(contentUnion: unknown): string {
if (typeof contentUnion === 'string') {
return contentUnion;

View File

@@ -800,11 +800,11 @@ describe('convertToFunctionResponse', () => {
name: toolName,
id: callId,
response: {
output: '',
output: 'Binary content of type image/png was processed.',
},
parts: [{ inlineData: { mimeType: 'image/png', data: 'base64...' } }],
},
},
llmContent,
]);
});
@@ -819,15 +819,11 @@ describe('convertToFunctionResponse', () => {
name: toolName,
id: callId,
response: {
output: '',
output: 'Binary content of type application/pdf was processed.',
},
parts: [
{
fileData: { mimeType: 'application/pdf', fileUri: 'gs://...' },
},
],
},
},
llmContent,
]);
});
@@ -861,13 +857,11 @@ describe('convertToFunctionResponse', () => {
name: toolName,
id: callId,
response: {
output: '',
output: 'Binary content of type image/gif was processed.',
},
parts: [
{ inlineData: { mimeType: 'image/gif', data: 'gifdata...' } },
],
},
},
...llmContent,
]);
});

View File

@@ -30,12 +30,7 @@ import {
ToolOutputTruncatedEvent,
InputFormat,
} from '../index.js';
import type {
FunctionResponse,
FunctionResponsePart,
Part,
PartListUnion,
} from '@google/genai';
import type { Part, PartListUnion } from '@google/genai';
import { getResponseTextFromParts } from '../utils/generateContentResponseUtilities.js';
import type { ModifyContext } from '../tools/modifiable-tool.js';
import {
@@ -156,17 +151,13 @@ function createFunctionResponsePart(
callId: string,
toolName: string,
output: string,
mediaParts?: FunctionResponsePart[],
): Part {
const functionResponse: FunctionResponse = {
id: callId,
name: toolName,
response: { output },
...(mediaParts && mediaParts.length > 0 ? { parts: mediaParts } : {}),
};
return {
functionResponse,
functionResponse: {
id: callId,
name: toolName,
response: { output },
},
};
}
@@ -207,21 +198,16 @@ export function convertToFunctionResponse(
}
if (contentToProcess.inlineData || contentToProcess.fileData) {
const mediaParts: FunctionResponsePart[] = [];
if (contentToProcess.inlineData) {
mediaParts.push({ inlineData: contentToProcess.inlineData });
}
if (contentToProcess.fileData) {
mediaParts.push({ fileData: contentToProcess.fileData });
}
const mimeType =
contentToProcess.inlineData?.mimeType ||
contentToProcess.fileData?.mimeType ||
'unknown';
const functionResponse = createFunctionResponsePart(
callId,
toolName,
'',
mediaParts,
`Binary content of type ${mimeType} was processed.`,
);
return [functionResponse];
return [functionResponse, contentToProcess];
}
if (contentToProcess.text !== undefined) {

View File

@@ -309,13 +309,11 @@ describe('executeToolCall', () => {
name: 'testTool',
id: 'call6',
response: {
output: '',
output: 'Binary content of type image/png was processed.',
},
parts: [
{ inlineData: { mimeType: 'image/png', data: 'base64data' } },
],
},
},
imageDataPart,
],
});
});

View File

@@ -122,13 +122,7 @@ describe('OpenAIContentConverter', () => {
const toolMessage = messages.find((message) => message.role === 'tool');
expect(toolMessage).toBeDefined();
expect(Array.isArray(toolMessage?.content)).toBe(true);
const contentArray = toolMessage?.content as Array<{
type: string;
text?: string;
}>;
expect(contentArray[0].type).toBe('text');
expect(contentArray[0].text).toBe('Raw output text');
expect(toolMessage?.content).toBe('Raw output text');
});
it('should prioritize error field when present', () => {
@@ -140,13 +134,7 @@ describe('OpenAIContentConverter', () => {
const toolMessage = messages.find((message) => message.role === 'tool');
expect(toolMessage).toBeDefined();
expect(Array.isArray(toolMessage?.content)).toBe(true);
const contentArray = toolMessage?.content as Array<{
type: string;
text?: string;
}>;
expect(contentArray[0].type).toBe('text');
expect(contentArray[0].text).toBe('Command failed');
expect(toolMessage?.content).toBe('Command failed');
});
it('should stringify non-string responses', () => {
@@ -158,318 +146,7 @@ describe('OpenAIContentConverter', () => {
const toolMessage = messages.find((message) => message.role === 'tool');
expect(toolMessage).toBeDefined();
expect(Array.isArray(toolMessage?.content)).toBe(true);
const contentArray = toolMessage?.content as Array<{
type: string;
text?: string;
}>;
expect(contentArray[0].type).toBe('text');
expect(contentArray[0].text).toBe('{"data":{"value":42}}');
});
it('should convert function responses with inlineData to tool message with embedded image_url', () => {
const request: GenerateContentParameters = {
model: 'models/test',
contents: [
{
role: 'model',
parts: [
{
functionCall: {
id: 'call_1',
name: 'Read',
args: {},
},
},
],
},
{
role: 'user',
parts: [
{
functionResponse: {
id: 'call_1',
name: 'Read',
response: { output: 'Image content' },
parts: [
{
inlineData: {
mimeType: 'image/png',
data: 'base64encodedimagedata',
},
},
],
},
},
],
},
],
};
const messages = converter.convertGeminiRequestToOpenAI(request);
// Should have tool message with both text and image content
const toolMessage = messages.find((message) => message.role === 'tool');
expect(toolMessage).toBeDefined();
expect((toolMessage as { tool_call_id?: string }).tool_call_id).toBe(
'call_1',
);
expect(Array.isArray(toolMessage?.content)).toBe(true);
const contentArray = toolMessage?.content as Array<{
type: string;
text?: string;
image_url?: { url: string };
}>;
expect(contentArray).toHaveLength(2);
expect(contentArray[0].type).toBe('text');
expect(contentArray[0].text).toBe('Image content');
expect(contentArray[1].type).toBe('image_url');
expect(contentArray[1].image_url?.url).toBe(
'data:image/png;base64,base64encodedimagedata',
);
// No separate user message should be created
const userMessage = messages.find((message) => message.role === 'user');
expect(userMessage).toBeUndefined();
});
it('should convert function responses with fileData to tool message with embedded input_file', () => {
const request: GenerateContentParameters = {
model: 'models/test',
contents: [
{
role: 'model',
parts: [
{
functionCall: {
id: 'call_1',
name: 'Read',
args: {},
},
},
],
},
{
role: 'user',
parts: [
{
functionResponse: {
id: 'call_1',
name: 'Read',
response: { output: 'File content' },
parts: [
{
fileData: {
mimeType: 'image/jpeg',
fileUri: 'base64imagedata',
},
},
],
},
},
],
},
],
};
const messages = converter.convertGeminiRequestToOpenAI(request);
// Should have tool message with both text and file content
const toolMessage = messages.find((message) => message.role === 'tool');
expect(toolMessage).toBeDefined();
expect(Array.isArray(toolMessage?.content)).toBe(true);
const contentArray = toolMessage?.content as Array<{
type: string;
text?: string;
file?: { filename: string; file_data: string };
}>;
expect(contentArray).toHaveLength(2);
expect(contentArray[0].type).toBe('text');
expect(contentArray[0].text).toBe('File content');
expect(contentArray[1].type).toBe('file');
expect(contentArray[1].file?.filename).toBe('file'); // Default filename when displayName not provided
expect(contentArray[1].file?.file_data).toBe(
'data:image/jpeg;base64,base64imagedata',
);
// No separate user message should be created
const userMessage = messages.find((message) => message.role === 'user');
expect(userMessage).toBeUndefined();
});
it('should convert PDF fileData to tool message with embedded input_file', () => {
const request: GenerateContentParameters = {
model: 'models/test',
contents: [
{
role: 'model',
parts: [
{
functionCall: {
id: 'call_1',
name: 'Read',
args: {},
},
},
],
},
{
role: 'user',
parts: [
{
functionResponse: {
id: 'call_1',
name: 'Read',
response: { output: 'PDF content' },
parts: [
{
fileData: {
mimeType: 'application/pdf',
fileUri: 'base64pdfdata',
displayName: 'document.pdf',
},
},
],
},
},
],
},
],
};
const messages = converter.convertGeminiRequestToOpenAI(request);
// Should have tool message with both text and file content
const toolMessage = messages.find((message) => message.role === 'tool');
expect(toolMessage).toBeDefined();
expect(Array.isArray(toolMessage?.content)).toBe(true);
const contentArray = toolMessage?.content as Array<{
type: string;
text?: string;
file?: { filename: string; file_data: string };
}>;
expect(contentArray).toHaveLength(2);
expect(contentArray[0].type).toBe('text');
expect(contentArray[0].text).toBe('PDF content');
expect(contentArray[1].type).toBe('file');
expect(contentArray[1].file?.filename).toBe('document.pdf');
expect(contentArray[1].file?.file_data).toBe(
'data:application/pdf;base64,base64pdfdata',
);
// No separate user message should be created
const userMessage = messages.find((message) => message.role === 'user');
expect(userMessage).toBeUndefined();
});
it('should convert audio parts to tool message with embedded input_audio', () => {
const request: GenerateContentParameters = {
model: 'models/test',
contents: [
{
role: 'model',
parts: [
{
functionCall: {
id: 'call_1',
name: 'Record',
args: {},
},
},
],
},
{
role: 'user',
parts: [
{
functionResponse: {
id: 'call_1',
name: 'Record',
response: { output: 'Audio recorded' },
parts: [
{
inlineData: {
mimeType: 'audio/wav',
data: 'audiobase64data',
},
},
],
},
},
],
},
],
};
const messages = converter.convertGeminiRequestToOpenAI(request);
// Should have tool message with both text and audio content
const toolMessage = messages.find((message) => message.role === 'tool');
expect(toolMessage).toBeDefined();
expect(Array.isArray(toolMessage?.content)).toBe(true);
const contentArray = toolMessage?.content as Array<{
type: string;
text?: string;
input_audio?: { data: string; format: string };
}>;
expect(contentArray).toHaveLength(2);
expect(contentArray[0].type).toBe('text');
expect(contentArray[0].text).toBe('Audio recorded');
expect(contentArray[1].type).toBe('input_audio');
expect(contentArray[1].input_audio?.data).toBe('audiobase64data');
expect(contentArray[1].input_audio?.format).toBe('wav');
// No separate user message should be created
const userMessage = messages.find((message) => message.role === 'user');
expect(userMessage).toBeUndefined();
});
it('should create tool message with text-only content when no media parts', () => {
const request = createRequestWithFunctionResponse({
output: 'Plain text output',
});
const messages = converter.convertGeminiRequestToOpenAI(request);
const toolMessage = messages.find((message) => message.role === 'tool');
expect(toolMessage).toBeDefined();
expect(Array.isArray(toolMessage?.content)).toBe(true);
const contentArray = toolMessage?.content as Array<{
type: string;
text?: string;
}>;
expect(contentArray).toHaveLength(1);
expect(contentArray[0].type).toBe('text');
expect(contentArray[0].text).toBe('Plain text output');
// No user message should be created when there's no media
const userMessage = messages.find((message) => message.role === 'user');
expect(userMessage).toBeUndefined();
});
it('should skip empty function responses with no media and no text', () => {
const request: GenerateContentParameters = {
model: 'models/test',
contents: [
{
role: 'user',
parts: [
{
functionResponse: {
id: 'call_1',
name: 'Empty',
response: { output: '' },
},
},
],
},
],
};
const messages = converter.convertGeminiRequestToOpenAI(request);
// Should have no messages for empty response
expect(messages).toHaveLength(0);
expect(toolMessage?.content).toBe('{"data":{"value":42}}');
});
});
@@ -503,35 +180,6 @@ describe('OpenAIContentConverter', () => {
);
});
it('should convert reasoning to a thought part for non-streaming responses', () => {
const response = converter.convertOpenAIResponseToGemini({
object: 'chat.completion',
id: 'chatcmpl-2',
created: 123,
model: 'gpt-test',
choices: [
{
index: 0,
message: {
role: 'assistant',
content: 'final answer',
reasoning: 'chain-of-thought',
},
finish_reason: 'stop',
logprobs: null,
},
],
} as unknown as OpenAI.Chat.ChatCompletion);
const parts = response.candidates?.[0]?.content?.parts;
expect(parts?.[0]).toEqual(
expect.objectContaining({ thought: true, text: 'chain-of-thought' }),
);
expect(parts?.[1]).toEqual(
expect.objectContaining({ text: 'final answer' }),
);
});
it('should convert streaming reasoning_content delta to a thought part', () => {
const chunk = converter.convertOpenAIChunkToGemini({
object: 'chat.completion.chunk',
@@ -560,34 +208,6 @@ describe('OpenAIContentConverter', () => {
);
});
it('should convert streaming reasoning delta to a thought part', () => {
const chunk = converter.convertOpenAIChunkToGemini({
object: 'chat.completion.chunk',
id: 'chunk-1b',
created: 456,
choices: [
{
index: 0,
delta: {
content: 'visible text',
reasoning: 'thinking...',
},
finish_reason: 'stop',
logprobs: null,
},
],
model: 'gpt-test',
} as unknown as OpenAI.Chat.ChatCompletionChunk);
const parts = chunk.candidates?.[0]?.content?.parts;
expect(parts?.[0]).toEqual(
expect.objectContaining({ thought: true, text: 'thinking...' }),
);
expect(parts?.[1]).toEqual(
expect.objectContaining({ text: 'visible text' }),
);
});
it('should not throw when streaming chunk has no delta', () => {
const chunk = converter.convertOpenAIChunkToGemini({
object: 'chat.completion.chunk',
@@ -964,7 +584,11 @@ describe('OpenAIContentConverter', () => {
expect(messages).toHaveLength(1);
expect(messages[0].role).toBe('assistant');
expect(messages[0].content).toBe('First partSecond part');
const content = messages[0]
.content as OpenAI.Chat.ChatCompletionContentPart[];
expect(content).toHaveLength(2);
expect(content[0]).toEqual({ type: 'text', text: 'First part' });
expect(content[1]).toEqual({ type: 'text', text: 'Second part' });
});
it('should merge multiple consecutive assistant messages', () => {
@@ -990,7 +614,9 @@ describe('OpenAIContentConverter', () => {
expect(messages).toHaveLength(1);
expect(messages[0].role).toBe('assistant');
expect(messages[0].content).toBe('Part 1Part 2Part 3');
const content = messages[0]
.content as OpenAI.Chat.ChatCompletionContentPart[];
expect(content).toHaveLength(3);
});
it('should merge tool_calls from consecutive assistant messages', () => {
@@ -1048,9 +674,7 @@ describe('OpenAIContentConverter', () => {
],
};
const messages = converter.convertGeminiRequestToOpenAI(request, {
cleanOrphanToolCalls: false,
});
const messages = converter.convertGeminiRequestToOpenAI(request);
// Should have: assistant (tool_call_1), tool (result_1), assistant (tool_call_2), tool (result_2)
expect(messages).toHaveLength(4);
@@ -1105,7 +729,10 @@ describe('OpenAIContentConverter', () => {
const messages = converter.convertGeminiRequestToOpenAI(request);
expect(messages).toHaveLength(1);
expect(messages[0].content).toBe('Text partAnother text');
const content = messages[0]
.content as OpenAI.Chat.ChatCompletionContentPart[];
expect(Array.isArray(content)).toBe(true);
expect(content).toHaveLength(2);
});
it('should merge empty content correctly', () => {
@@ -1131,7 +758,11 @@ describe('OpenAIContentConverter', () => {
// Empty messages should be filtered out
expect(messages).toHaveLength(1);
expect(messages[0].content).toBe('FirstSecond');
const content = messages[0]
.content as OpenAI.Chat.ChatCompletionContentPart[];
expect(content).toHaveLength(2);
expect(content[0]).toEqual({ type: 'text', text: 'First' });
expect(content[1]).toEqual({ type: 'text', text: 'Second' });
});
});
});

View File

@@ -11,6 +11,7 @@ import type {
Tool,
ToolListUnion,
CallableTool,
FunctionCall,
FunctionResponse,
ContentListUnion,
ContentUnion,
@@ -46,13 +47,11 @@ type ExtendedChatCompletionMessageParam =
export interface ExtendedCompletionMessage
extends OpenAI.Chat.ChatCompletionMessage {
reasoning_content?: string | null;
reasoning?: string | null;
}
export interface ExtendedCompletionChunkDelta
extends OpenAI.Chat.ChatCompletionChunk.Choice.Delta {
reasoning_content?: string | null;
reasoning?: string | null;
}
/**
@@ -64,17 +63,21 @@ export interface ToolCallAccumulator {
arguments: string;
}
type OpenAIContentPart =
| OpenAI.Chat.ChatCompletionContentPartText
| OpenAI.Chat.ChatCompletionContentPartImage
| OpenAI.Chat.ChatCompletionContentPartInputAudio
| {
type: 'file';
file: {
filename: string;
file_data: string;
};
};
/**
* Parsed parts from Gemini content, categorized by type
*/
interface ParsedParts {
thoughtParts: string[];
contentParts: string[];
functionCalls: FunctionCall[];
functionResponses: FunctionResponse[];
mediaParts: Array<{
type: 'image' | 'audio' | 'file';
data: string;
mimeType: string;
fileUri?: string;
}>;
}
/**
* Converter class for transforming data between Gemini and OpenAI formats
@@ -268,48 +271,28 @@ export class OpenAIContentConverter {
): OpenAI.Chat.ChatCompletion {
const candidate = response.candidates?.[0];
const parts = (candidate?.content?.parts || []) as Part[];
// Parse parts inline
const thoughtParts: string[] = [];
const contentParts: string[] = [];
const toolCalls: OpenAI.Chat.ChatCompletionMessageToolCall[] = [];
let toolCallIndex = 0;
for (const part of parts) {
if (typeof part === 'string') {
contentParts.push(part);
} else if ('text' in part && part.text) {
if ('thought' in part && part.thought) {
thoughtParts.push(part.text);
} else {
contentParts.push(part.text);
}
} else if ('functionCall' in part && part.functionCall) {
toolCalls.push({
id: part.functionCall.id || `call_${toolCallIndex}`,
type: 'function' as const,
function: {
name: part.functionCall.name || '',
arguments: JSON.stringify(part.functionCall.args || {}),
},
});
toolCallIndex += 1;
}
}
const parsedParts = this.parseParts(parts);
const message: ExtendedCompletionMessage = {
role: 'assistant',
content: contentParts.join('') || null,
content: parsedParts.contentParts.join('') || null,
refusal: null,
};
const reasoningContent = thoughtParts.join('');
const reasoningContent = parsedParts.thoughtParts.join('');
if (reasoningContent) {
message.reasoning_content = reasoningContent;
}
if (toolCalls.length > 0) {
message.tool_calls = toolCalls;
if (parsedParts.functionCalls.length > 0) {
message.tool_calls = parsedParts.functionCalls.map((call, index) => ({
id: call.id || `call_${index}`,
type: 'function' as const,
function: {
name: call.name || '',
arguments: JSON.stringify(call.args || {}),
},
}));
}
const finishReason = this.mapGeminiFinishReasonToOpenAI(
@@ -407,82 +390,40 @@ export class OpenAIContentConverter {
}
if (!this.isContentObject(content)) return;
const parts = content.parts || [];
const role = content.role === 'model' ? 'assistant' : 'user';
const contentParts: OpenAIContentPart[] = [];
const reasoningParts: string[] = [];
const toolCalls: OpenAI.Chat.ChatCompletionMessageToolCall[] = [];
let toolCallIndex = 0;
const parsedParts = this.parseParts(content.parts || []);
for (const part of parts) {
if (typeof part === 'string') {
contentParts.push({ type: 'text' as const, text: part });
continue;
}
if ('text' in part && 'thought' in part && part.thought) {
if (role === 'assistant' && part.text) {
reasoningParts.push(part.text);
}
}
if ('text' in part && part.text && !('thought' in part && part.thought)) {
contentParts.push({ type: 'text' as const, text: part.text });
}
const mediaPart = this.createMediaContentPart(part);
if (mediaPart && role === 'user') {
contentParts.push(mediaPart);
}
if ('functionCall' in part && part.functionCall && role === 'assistant') {
toolCalls.push({
id: part.functionCall.id || `call_${toolCallIndex}`,
type: 'function' as const,
function: {
name: part.functionCall.name || '',
arguments: JSON.stringify(part.functionCall.args || {}),
},
// Handle function responses (tool results) first
if (parsedParts.functionResponses.length > 0) {
for (const funcResponse of parsedParts.functionResponses) {
messages.push({
role: 'tool' as const,
tool_call_id: funcResponse.id || '',
content: this.extractFunctionResponseContent(funcResponse.response),
});
toolCallIndex += 1;
}
if (part.functionResponse && role === 'user') {
// Create tool message for the function response (with embedded media)
const toolMessage = this.createToolMessage(part.functionResponse);
if (toolMessage) {
messages.push(toolMessage);
}
}
return;
}
if (role === 'assistant') {
if (
contentParts.length === 0 &&
toolCalls.length === 0 &&
reasoningParts.length === 0
) {
return;
}
// Handle model messages with function calls
if (content.role === 'model' && parsedParts.functionCalls.length > 0) {
const toolCalls = parsedParts.functionCalls.map((fc, index) => ({
id: fc.id || `call_${index}`,
type: 'function' as const,
function: {
name: fc.name || '',
arguments: JSON.stringify(fc.args || {}),
},
}));
const assistantTextContent = contentParts
.filter(
(part): part is OpenAI.Chat.ChatCompletionContentPartText =>
part.type === 'text',
)
.map((part) => part.text)
.join('');
const assistantMessage: ExtendedChatCompletionAssistantMessageParam = {
role: 'assistant',
content: assistantTextContent || null,
role: 'assistant' as const,
content: parsedParts.contentParts.join('') || null,
tool_calls: toolCalls,
};
if (toolCalls.length > 0) {
assistantMessage.tool_calls = toolCalls;
}
const reasoningContent = reasoningParts.join('');
// Only include reasoning_content if it has actual content
const reasoningContent = parsedParts.thoughtParts.join('');
if (reasoningContent) {
assistantMessage.reasoning_content = reasoningContent;
}
@@ -491,15 +432,79 @@ export class OpenAIContentConverter {
return;
}
if (contentParts.length > 0) {
messages.push({
role: 'user',
content:
contentParts as unknown as OpenAI.Chat.ChatCompletionContentPart[],
});
// Handle regular messages with multimodal content
const role = content.role === 'model' ? 'assistant' : 'user';
const openAIMessage = this.createMultimodalMessage(role, parsedParts);
if (openAIMessage) {
messages.push(openAIMessage);
}
}
/**
* Parse Gemini parts into categorized components
*/
private parseParts(parts: Part[]): ParsedParts {
const thoughtParts: string[] = [];
const contentParts: string[] = [];
const functionCalls: FunctionCall[] = [];
const functionResponses: FunctionResponse[] = [];
const mediaParts: Array<{
type: 'image' | 'audio' | 'file';
data: string;
mimeType: string;
fileUri?: string;
}> = [];
for (const part of parts) {
if (typeof part === 'string') {
contentParts.push(part);
} else if (
'text' in part &&
part.text &&
!('thought' in part && part.thought)
) {
contentParts.push(part.text);
} else if (
'text' in part &&
part.text &&
'thought' in part &&
part.thought
) {
thoughtParts.push(part.text);
} else if ('functionCall' in part && part.functionCall) {
functionCalls.push(part.functionCall);
} else if ('functionResponse' in part && part.functionResponse) {
functionResponses.push(part.functionResponse);
} else if ('inlineData' in part && part.inlineData) {
const { data, mimeType } = part.inlineData;
if (data && mimeType) {
const mediaType = this.getMediaType(mimeType);
mediaParts.push({ type: mediaType, data, mimeType });
}
} else if ('fileData' in part && part.fileData) {
const { fileUri, mimeType } = part.fileData;
if (fileUri && mimeType) {
const mediaType = this.getMediaType(mimeType);
mediaParts.push({
type: mediaType,
data: '',
mimeType,
fileUri,
});
}
}
}
return {
thoughtParts,
contentParts,
functionCalls,
functionResponses,
mediaParts,
};
}
private extractFunctionResponseContent(response: unknown): string {
if (response === null || response === undefined) {
return '';
@@ -530,96 +535,6 @@ export class OpenAIContentConverter {
}
}
/**
* Create a tool message from function response (with embedded media parts)
*/
private createToolMessage(
response: FunctionResponse,
): OpenAI.Chat.ChatCompletionToolMessageParam | null {
const textContent = this.extractFunctionResponseContent(response.response);
const contentParts: OpenAIContentPart[] = [];
// Add text content first if present
if (textContent) {
contentParts.push({ type: 'text' as const, text: textContent });
}
// Add media parts from function response
for (const part of response.parts || []) {
const mediaPart = this.createMediaContentPart(part);
if (mediaPart) {
contentParts.push(mediaPart);
}
}
// Tool messages require content, so skip if empty
if (contentParts.length === 0) {
return null;
}
// Cast to OpenAI type - some OpenAI-compatible APIs support richer content in tool messages
return {
role: 'tool' as const,
tool_call_id: response.id || '',
content: contentParts as unknown as
| string
| OpenAI.Chat.ChatCompletionContentPartText[],
};
}
/**
* Create OpenAI media content part from Gemini part
*/
private createMediaContentPart(part: Part): OpenAIContentPart | null {
if (part.inlineData?.mimeType && part.inlineData?.data) {
const mediaType = this.getMediaType(part.inlineData.mimeType);
if (mediaType === 'image') {
const dataUrl = `data:${part.inlineData.mimeType};base64,${part.inlineData.data}`;
return {
type: 'image_url' as const,
image_url: { url: dataUrl },
};
}
if (mediaType === 'audio') {
const format = this.getAudioFormat(part.inlineData.mimeType);
if (format) {
return {
type: 'input_audio' as const,
input_audio: {
data: part.inlineData.data,
format,
},
};
}
}
}
if (part.fileData?.mimeType && part.fileData?.fileUri) {
const filename = part.fileData.displayName || 'file';
const fileUri = part.fileData.fileUri;
if (fileUri.startsWith('data:')) {
return {
type: 'file' as const,
file: {
filename,
file_data: fileUri,
},
};
}
return {
type: 'file' as const,
file: {
filename,
file_data: `data:${part.fileData.mimeType};base64,${fileUri}`,
},
};
}
return null;
}
/**
* Determine media type from MIME type
*/
@@ -629,6 +544,85 @@ export class OpenAIContentConverter {
return 'file';
}
/**
* Create multimodal OpenAI message from parsed parts
*/
private createMultimodalMessage(
role: 'user' | 'assistant',
parsedParts: Pick<
ParsedParts,
'contentParts' | 'mediaParts' | 'thoughtParts'
>,
): ExtendedChatCompletionMessageParam | null {
const { contentParts, mediaParts, thoughtParts } = parsedParts;
const reasoningContent = thoughtParts.join('');
const content = contentParts.map((text) => ({
type: 'text' as const,
text,
}));
// If no media parts, return simple text message
if (mediaParts.length === 0) {
if (content.length === 0) return null;
const message: ExtendedChatCompletionMessageParam = { role, content };
// Only include reasoning_content if it has actual content
if (reasoningContent) {
(
message as ExtendedChatCompletionAssistantMessageParam
).reasoning_content = reasoningContent;
}
return message;
}
// For assistant messages with media, convert to text only
// since OpenAI assistant messages don't support media content arrays
if (role === 'assistant') {
return content.length > 0
? { role: 'assistant' as const, content }
: null;
}
const contentArray: OpenAI.Chat.ChatCompletionContentPart[] = [...content];
// Add media content
for (const mediaPart of mediaParts) {
if (mediaPart.type === 'image') {
if (mediaPart.fileUri) {
// For file URIs, use the URI directly
contentArray.push({
type: 'image_url' as const,
image_url: { url: mediaPart.fileUri },
});
} else if (mediaPart.data) {
// For inline data, create data URL
const dataUrl = `data:${mediaPart.mimeType};base64,${mediaPart.data}`;
contentArray.push({
type: 'image_url' as const,
image_url: { url: dataUrl },
});
}
} else if (mediaPart.type === 'audio' && mediaPart.data) {
// Convert audio format from MIME type
const format = this.getAudioFormat(mediaPart.mimeType);
if (format) {
contentArray.push({
type: 'input_audio' as const,
input_audio: {
data: mediaPart.data,
format: format as 'wav' | 'mp3',
},
});
}
}
// Note: File type is not directly supported in OpenAI's current API
// Could be extended in the future or handled as text description
}
return contentArray.length > 0
? { role: 'user' as const, content: contentArray }
: null;
}
/**
* Convert MIME type to OpenAI audio format
*/
@@ -699,9 +693,8 @@ export class OpenAIContentConverter {
const parts: Part[] = [];
// Handle reasoning content (thoughts)
const reasoningText =
(choice.message as ExtendedCompletionMessage).reasoning_content ??
(choice.message as ExtendedCompletionMessage).reasoning;
const reasoningText = (choice.message as ExtendedCompletionMessage)
.reasoning_content;
if (reasoningText) {
parts.push({ text: reasoningText, thought: true });
}
@@ -805,9 +798,8 @@ export class OpenAIContentConverter {
if (choice) {
const parts: Part[] = [];
const reasoningText =
(choice.delta as ExtendedCompletionChunkDelta)?.reasoning_content ??
(choice.delta as ExtendedCompletionChunkDelta)?.reasoning;
const reasoningText = (choice.delta as ExtendedCompletionChunkDelta)
?.reasoning_content;
if (reasoningText) {
parts.push({ text: reasoningText, thought: true });
}
@@ -1138,10 +1130,6 @@ export class OpenAIContentConverter {
// If the last message is also an assistant message, merge them
if (lastMessage.role === 'assistant') {
const lastToolCalls =
'tool_calls' in lastMessage ? lastMessage.tool_calls || [] : [];
const currentToolCalls =
'tool_calls' in message ? message.tool_calls || [] : [];
// Combine content
const lastContent = lastMessage.content;
const currentContent = message.content;
@@ -1183,6 +1171,10 @@ export class OpenAIContentConverter {
}
// Combine tool calls
const lastToolCalls =
'tool_calls' in lastMessage ? lastMessage.tool_calls || [] : [];
const currentToolCalls =
'tool_calls' in message ? message.tool_calls || [] : [];
const combinedToolCalls = [...lastToolCalls, ...currentToolCalls];
// Update the last message with combined data

View File

@@ -320,15 +320,13 @@ export class ContentGenerationPipeline {
'frequency_penalty',
'frequencyPenalty',
),
...this.buildReasoningConfig(request),
...this.buildReasoningConfig(),
};
return params;
}
private buildReasoningConfig(
request: GenerateContentParameters,
): Record<string, unknown> {
private buildReasoningConfig(): Record<string, unknown> {
// Reasoning configuration for OpenAI-compatible endpoints is highly fragmented.
// For example, across common providers and models:
//
@@ -338,21 +336,13 @@ export class ContentGenerationPipeline {
// - gpt-5.x series — thinking is enabled by default; can be disabled via `reasoning.effort`
// - qwen3 series — model-dependent; can be manually disabled via `extra_body.enable_thinking`
//
// Given this inconsistency, we avoid mapping values and only pass through the
// configured reasoning object when explicitly enabled. This keeps provider- and
// model-specific semantics intact while honoring request-level opt-out.
// Given this inconsistency, we choose not to set any reasoning config here and
// instead rely on each models default behavior.
if (request.config?.thinkingConfig?.includeThoughts === false) {
return {};
}
// We plan to introduce provider- and model-specific settings to enable more
// fine-grained control over reasoning configuration.
const reasoning = this.contentGeneratorConfig.reasoning;
if (reasoning === false || reasoning === undefined) {
return {};
}
return { reasoning };
return {};
}
/**

View File

@@ -608,7 +608,7 @@ describe('DashScopeOpenAICompatibleProvider', () => {
});
});
it('should add cache control to last item even if not text for streaming requests', () => {
it('should add empty text item with cache control if last item is not text for streaming requests', () => {
const requestWithNonTextLast: OpenAI.Chat.ChatCompletionCreateParams = {
model: 'qwen-max',
stream: true, // This will trigger cache control on last message
@@ -633,12 +633,12 @@ describe('DashScopeOpenAICompatibleProvider', () => {
const content = result.messages[0]
.content as OpenAI.Chat.ChatCompletionContentPart[];
expect(content).toHaveLength(2);
expect(content).toHaveLength(3);
// Cache control should be added to the last item (image)
expect(content[1]).toEqual({
type: 'image_url',
image_url: { url: 'https://example.com/image.jpg' },
// Should add empty text item with cache control
expect(content[2]).toEqual({
type: 'text',
text: '',
cache_control: { type: 'ephemeral' },
});
});
@@ -709,8 +709,13 @@ describe('DashScopeOpenAICompatibleProvider', () => {
const content = result.messages[0]
.content as OpenAI.Chat.ChatCompletionContentPart[];
// Empty content array should remain empty
expect(content).toEqual([]);
expect(content).toEqual([
{
type: 'text',
text: '',
cache_control: { type: 'ephemeral' },
},
]);
});
});

View File

@@ -257,15 +257,31 @@ export class DashScopeOpenAICompatibleProvider
contentArray: ChatCompletionContentPartWithCache[],
): ChatCompletionContentPartWithCache[] {
if (contentArray.length === 0) {
return contentArray;
return [
{
type: 'text',
text: '',
cache_control: { type: 'ephemeral' },
} as ChatCompletionContentPartTextWithCache,
];
}
// Add cache_control to the last text item
const lastItem = contentArray[contentArray.length - 1];
contentArray[contentArray.length - 1] = {
...lastItem,
cache_control: { type: 'ephemeral' },
} as ChatCompletionContentPartTextWithCache;
if (lastItem.type === 'text') {
// Add cache_control to the last text item
contentArray[contentArray.length - 1] = {
...lastItem,
cache_control: { type: 'ephemeral' },
} as ChatCompletionContentPartTextWithCache;
} else {
// If the last item is not text, add a new text item with cache_control
contentArray.push({
type: 'text',
text: '',
cache_control: { type: 'ephemeral' },
} as ChatCompletionContentPartTextWithCache);
}
return contentArray;
}

View File

@@ -283,7 +283,6 @@ describe('ReadFileTool', () => {
inlineData: {
data: pngHeader.toString('base64'),
mimeType: 'image/png',
displayName: 'image.png',
},
});
expect(result.returnDisplay).toBe('Read image file: image.png');
@@ -302,10 +301,9 @@ describe('ReadFileTool', () => {
const result = await invocation.execute(abortSignal);
expect(result.llmContent).toEqual({
fileData: {
fileUri: pdfHeader.toString('base64'),
inlineData: {
data: pdfHeader.toString('base64'),
mimeType: 'application/pdf',
displayName: 'document.pdf',
},
});
expect(result.returnDisplay).toBe('Read pdf file: document.pdf');

View File

@@ -383,7 +383,6 @@ describe('ReadManyFilesTool', () => {
0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a,
]).toString('base64'),
mimeType: 'image/png',
displayName: 'image.png',
},
},
'\n--- End of content ---',
@@ -408,7 +407,6 @@ describe('ReadManyFilesTool', () => {
0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a,
]).toString('base64'),
mimeType: 'image/png',
displayName: 'myExactImage.png',
},
},
'\n--- End of content ---',
@@ -436,34 +434,32 @@ describe('ReadManyFilesTool', () => {
);
});
it('should include PDF files as fileData parts if explicitly requested by extension', async () => {
it('should include PDF files as inlineData parts if explicitly requested by extension', async () => {
createBinaryFile('important.pdf', Buffer.from('%PDF-1.4...'));
const params = { paths: ['*.pdf'] }; // Explicitly requesting .pdf files
const invocation = tool.build(params);
const result = await invocation.execute(new AbortController().signal);
expect(result.llmContent).toEqual([
{
fileData: {
fileUri: Buffer.from('%PDF-1.4...').toString('base64'),
inlineData: {
data: Buffer.from('%PDF-1.4...').toString('base64'),
mimeType: 'application/pdf',
displayName: 'important.pdf',
},
},
'\n--- End of content ---',
]);
});
it('should include PDF files as fileData parts if explicitly requested by name', async () => {
it('should include PDF files as inlineData parts if explicitly requested by name', async () => {
createBinaryFile('report-final.pdf', Buffer.from('%PDF-1.4...'));
const params = { paths: ['report-final.pdf'] };
const invocation = tool.build(params);
const result = await invocation.execute(new AbortController().signal);
expect(result.llmContent).toEqual([
{
fileData: {
fileUri: Buffer.from('%PDF-1.4...').toString('base64'),
inlineData: {
data: Buffer.from('%PDF-1.4...').toString('base64'),
mimeType: 'application/pdf',
displayName: 'report-final.pdf',
},
},
'\n--- End of content ---',

View File

@@ -731,10 +731,6 @@ describe('fileUtils', () => {
expect(
(result.llmContent as { inlineData: { data: string } }).inlineData.data,
).toBe(fakePngData.toString('base64'));
expect(
(result.llmContent as { inlineData: { displayName?: string } })
.inlineData.displayName,
).toBe('image.png');
expect(result.returnDisplay).toContain('Read image file: image.png');
});
@@ -747,20 +743,15 @@ describe('fileUtils', () => {
mockConfig,
);
expect(
(result.llmContent as { fileData: unknown }).fileData,
(result.llmContent as { inlineData: unknown }).inlineData,
).toBeDefined();
expect(
(result.llmContent as { fileData: { mimeType: string } }).fileData
(result.llmContent as { inlineData: { mimeType: string } }).inlineData
.mimeType,
).toBe('application/pdf');
expect(
(result.llmContent as { fileData: { fileUri: string } }).fileData
.fileUri,
(result.llmContent as { inlineData: { data: string } }).inlineData.data,
).toBe(fakePdfData.toString('base64'));
expect(
(result.llmContent as { fileData: { displayName?: string } }).fileData
.displayName,
).toBe('document.pdf');
expect(result.returnDisplay).toContain('Read pdf file: document.pdf');
});

View File

@@ -351,7 +351,6 @@ export async function processSingleFileContent(
.relative(rootDirectory, filePath)
.replace(/\\/g, '/');
const displayName = path.basename(filePath);
switch (fileType) {
case 'binary': {
return {
@@ -457,6 +456,7 @@ export async function processSingleFileContent(
};
}
case 'image':
case 'pdf':
case 'audio':
case 'video': {
const contentBuffer = await fs.promises.readFile(filePath);
@@ -466,21 +466,6 @@ export async function processSingleFileContent(
inlineData: {
data: base64Data,
mimeType: mime.getType(filePath) || 'application/octet-stream',
displayName,
},
},
returnDisplay: `Read ${fileType} file: ${relativePathForDisplay}`,
};
}
case 'pdf': {
const contentBuffer = await fs.promises.readFile(filePath);
const base64Data = contentBuffer.toString('base64');
return {
llmContent: {
fileData: {
fileUri: base64Data,
mimeType: mime.getType(filePath) || 'application/octet-stream',
displayName,
},
},
returnDisplay: `Read ${fileType} file: ${relativePathForDisplay}`,

View File

@@ -92,11 +92,15 @@ const findCodeBlocks = (
describe('memoryImportProcessor', () => {
beforeEach(() => {
vi.clearAllMocks();
vi.resetAllMocks(); // Use resetAllMocks to clear mock implementations
// Mock console methods
console.warn = vi.fn();
console.error = vi.fn();
console.debug = vi.fn();
// Default mock for lstat (used by findProjectRoot)
mockedFs.lstat.mockRejectedValue(
Object.assign(new Error('ENOENT'), { code: 'ENOENT' }),
);
});
afterEach(() => {
@@ -204,20 +208,43 @@ describe('memoryImportProcessor', () => {
);
});
it('should handle file not found errors', async () => {
it('should silently preserve content when file not found (ENOENT)', async () => {
const content = 'Content @./nonexistent.md more content';
const basePath = testPath('test', 'path');
mockedFs.access.mockRejectedValue(new Error('File not found'));
// Mock ENOENT error (file not found)
mockedFs.access.mockRejectedValue(
Object.assign(new Error('ENOENT: no such file or directory'), {
code: 'ENOENT',
}),
);
const result = await processImports(content, basePath, true);
// Content should be preserved as-is when file doesn't exist
expect(result.content).toBe(content);
// No error should be logged for ENOENT
expect(console.error).not.toHaveBeenCalled();
});
it('should log error for non-ENOENT file access errors', async () => {
const content = 'Content @./permission-denied.md more content';
const basePath = testPath('test', 'path');
// Mock a permission denied error (not ENOENT)
mockedFs.access.mockRejectedValue(
Object.assign(new Error('Permission denied'), { code: 'EACCES' }),
);
const result = await processImports(content, basePath, true);
// Should show error comment for non-ENOENT errors
expect(result.content).toContain(
'<!-- Import failed: ./nonexistent.md - File not found -->',
'<!-- Import failed: ./permission-denied.md - Permission denied -->',
);
expect(console.error).toHaveBeenCalledWith(
'[ERROR] [ImportProcessor]',
'Failed to import ./nonexistent.md: File not found',
'Failed to import ./permission-denied.md: Permission denied',
);
});
@@ -448,6 +475,50 @@ describe('memoryImportProcessor', () => {
expect(result.importTree.imports).toBeUndefined();
});
it('should still import valid paths while ignoring non-existent paths', async () => {
const content = '使用 @./valid.md 文件和 @中文路径 注解';
const basePath = testPath('test', 'path');
const importedContent = 'Valid imported content';
// Mock: valid.md exists, 中文路径 doesn't exist
mockedFs.access
.mockResolvedValueOnce(undefined) // ./valid.md exists
.mockRejectedValueOnce(
Object.assign(new Error('ENOENT'), { code: 'ENOENT' }),
); // 中文路径 doesn't exist
mockedFs.readFile.mockResolvedValue(importedContent);
const result = await processImports(content, basePath, true);
// Should import valid.md
expect(result.content).toContain(importedContent);
expect(result.content).toContain('<!-- Imported from: ./valid.md -->');
// The non-existent path should remain as-is
expect(result.content).toContain('@中文路径');
});
it('should import Chinese file names if they exist', async () => {
const content = '导入 @./中文文档.md 文件';
const projectRoot = testPath('test', 'project');
const basePath = testPath(projectRoot, 'src');
const importedContent = '这是中文文档的内容';
mockedFs.access.mockResolvedValue(undefined);
mockedFs.readFile.mockResolvedValue(importedContent);
const result = await processImports(
content,
basePath,
true,
undefined,
projectRoot,
);
// Should successfully import the Chinese-named file
expect(result.content).toContain(importedContent);
expect(result.content).toContain('<!-- Imported from: ./中文文档.md -->');
});
it('should allow imports from parent and subdirectories within project root', async () => {
const content =
'Parent import: @../parent.md Subdir import: @./components/sub.md';

View File

@@ -150,6 +150,18 @@ function isLetter(char: string): boolean {
); // a-z
}
/**
* Checks if an error is a "file not found" error (ENOENT)
*/
function isFileNotFoundError(err: unknown): boolean {
return (
typeof err === 'object' &&
err !== null &&
'code' in err &&
(err as { code: unknown }).code === 'ENOENT'
);
}
function findCodeRegions(content: string): Array<[number, number]> {
const regions: Array<[number, number]> = [];
const tokens = marked.lexer(content);
@@ -292,7 +304,9 @@ export async function processImports(
depth + 1,
);
} catch (error) {
if (debugMode) {
// If file doesn't exist, silently skip this import (it's not a real import)
// Only log warnings for other types of errors
if (!isFileNotFoundError(error) && debugMode) {
logger.warn(
`Failed to import ${fullPath}: ${hasMessage(error) ? error.message : 'Unknown error'}`,
);
@@ -371,6 +385,12 @@ export async function processImports(
result += `<!-- Imported from: ${importPath} -->\n${imported.content}\n<!-- End of import from: ${importPath} -->`;
imports.push(imported.importTree);
} catch (err: unknown) {
// If file doesn't exist, preserve the original @path text (it's not a real import)
if (isFileNotFoundError(err)) {
result += `@${importPath}`;
continue;
}
// For other errors, log and add error comment
let message = 'Unknown error';
if (hasMessage(err)) {
message = err.message;

View File

@@ -113,7 +113,6 @@ describe('readPathFromWorkspace', () => {
inlineData: {
mimeType: 'image/png',
data: imageData.toString('base64'),
displayName: 'image.png',
},
},
]);
@@ -264,7 +263,6 @@ describe('readPathFromWorkspace', () => {
inlineData: {
mimeType: 'image/png',
data: imageData.toString('base64'),
displayName: 'photo.png',
},
});
});