import { useState, useEffect, useCallback } from 'react';
import useLabStore from '../store/labStore';
import { listOllamaModels, getOllamaModel, setOllamaModel } from '../services/localLLM';
// ── Sub-components ────────────────────────────────────────────────────────────
function StatusBadge({ status }) {
if (status.usesSay) {
return (
Ready (macOS Voice)
);
}
if (status.available) {
return (
Ready
);
}
if (status.modelsDownloaded && !status.binaryPresent) {
return (
Binary missing
);
}
if (!status.modelsDownloaded && status.binaryPresent) {
return (
Model not installed
);
}
return (
Not installed
);
}
function ProgressBar({ progress }) {
const { status, percent, file, error } = progress || {};
if (status === 'error') {
return (
Installation failed{error ? `: ${error}` : ''}. Check your internet connection and try again.
);
}
if (status === 'exists' || status === 'complete') {
return (
{file ? `${file} — ` : ''}{status === 'exists' ? 'already installed' : 'done'}
);
}
if (status === 'downloading' || status === 'starting') {
const pct = typeof percent === 'number' ? percent : null;
return (
{pct !== null ? `${pct}%` : 'Preparing…'}
{file ? ` — ${file}` : ''}
);
}
return null;
}
// ── Main screen ───────────────────────────────────────────────────────────────
export default function SettingsScreen() {
const { navigate, useLocalAI, setUseLocalAI, theme, setTheme } = useLabStore();
const [downloading, setDownloading] = useState({ piper: false, whisper: false });
const [status, setStatus] = useState({
piper: { available: false, modelsDownloaded: false, binaryPresent: false },
whisper: { available: false, modelsDownloaded: false, binaryPresent: false },
});
const [progress, setProgress] = useState({});
const [ollamaModels, setOllamaModels] = useState([]);
const [selectedModel, setSelectedModel] = useState(getOllamaModel());
const [modelSaved, setModelSaved] = useState(false);
const checkStatus = useCallback(async () => {
if (!window.electronAPI?.download) return;
try {
const s = await window.electronAPI.download.status();
setStatus({
piper: s.piper || { available: false, modelsDownloaded: false, binaryPresent: false },
whisper: s.whisper || { available: false, modelsDownloaded: false, binaryPresent: false },
});
} catch (err) {
console.warn('Failed to check download status:', err);
}
}, []);
useEffect(() => {
checkStatus();
listOllamaModels().then(setOllamaModels);
if (window.electronAPI) {
window.electronAPI.config.get('ollamaModel').then(saved => {
if (saved) { setOllamaModel(saved); setSelectedModel(saved); }
});
window.electronAPI.config.get('useLocalAI').then(saved => {
if (saved != null) setUseLocalAI(saved);
});
}
// Register progress listener — capture the returned cleanup fn
let cleanup;
if (window.electronAPI?.download?.onProgress) {
cleanup = window.electronAPI.download.onProgress((data) => {
setProgress(prev => ({ ...prev, [data.type]: data }));
if (data.status === 'complete' || data.status === 'error') {
setDownloading(prev => ({ ...prev, [data.type]: false }));
checkStatus();
}
});
}
return () => { if (typeof cleanup === 'function') cleanup(); };
}, [checkStatus, setUseLocalAI]);
const downloadModel = async (type) => {
if (!window.electronAPI?.download) return;
setDownloading(prev => ({ ...prev, [type]: true }));
setProgress(prev => ({ ...prev, [type]: { status: 'starting', percent: 0 } }));
try {
await window.electronAPI.download.start(type);
await checkStatus();
} catch (err) {
console.error('Download failed:', err);
setProgress(prev => ({ ...prev, [type]: { status: 'error', error: err.message } }));
} finally {
setDownloading(prev => ({ ...prev, [type]: false }));
}
};
const saveOllamaModel = async (model) => {
setOllamaModel(model);
setSelectedModel(model);
if (window.electronAPI) await window.electronAPI.config.set('ollamaModel', model);
setModelSaved(true);
setTimeout(() => setModelSaved(false), 2000);
};
const toggleLocalAI = async (value) => {
setUseLocalAI(value);
if (window.electronAPI) await window.electronAPI.config.set('useLocalAI', value);
};
const isElectron = !!window.electronAPI;
// ── Model card helper ───────────────────────────────────────────────────────
const platform = window.electronAPI?.platform;
const ModelCard = ({ type, label, subtitle, iconColor, iconBg, icon, size }) => {
const s = status[type];
const isDownloading = downloading[type];
const prog = progress[type];
// macOS TTS is always ready via built-in 'say' — no install needed
const isAutoReady = s.usesSay;
const alreadyInstalled = s.modelsDownloaded && s.binaryPresent;
return (
{/* Binary missing warning */}
{!isAutoReady && s.modelsDownloaded && !s.binaryPresent && (
Model installed but the {type === 'piper' ? 'Piper' : 'Whisper'} runtime is missing.
Click Install to download it automatically.
)}
{isAutoReady
? 'Uses macOS built-in voice synthesis — works offline with no setup required.'
: `Installs an offline ${type === 'piper' ? 'voice synthesis' : 'speech recognition'} engine (${size}). Enables ${type === 'piper' ? 'voice output' : 'voice input'} without internet after installation.`
}
{/* Progress bar (shown while installing) */}
{isDownloading &&
}
{/* Install button — hidden for auto-ready and while installing */}
{!isAutoReady && !isDownloading && (
)}
);
};
// ── Render ──────────────────────────────────────────────────────────────────
return (
Settings
AI Models
{!isElectron && (
Model downloads are only available in the desktop app. You are currently running in browser mode.
)}
{/* TTS */}
}
/>
{/* STT */}
}
/>
{/* AI Source */}
AI Source
{useLocalAI ? 'Using local Ollama model' : 'Using Sarthi Cloud AI (default)'}
Cloud
Local
{useLocalAI && ollamaModels.length === 0 && (
Ollama not detected at localhost:11434. Install Ollama and pull a model to use local AI.
)}
{!useLocalAI && (
Sarthi Cloud uses the full NCERT curriculum prompt and tracks progress. Recommended for most students.
)}
{/* Ollama model selector */}
{useLocalAI && ollamaModels.length > 0 && (
Local AI Model (Ollama)
Choose which model to use for offline chat
{ollamaModels.map(m => (
))}
{modelSaved &&
Saved!
}
)}
{/* Theme */}
Theme
Change the look and feel
{[
{ id: 'indigo', name: 'Nebula', desc: 'Indigo & purple', colors: ['from-indigo-500 to-purple-600', 'from-purple-500 to-indigo-600', 'from-blue-500 to-indigo-500'] },
{ id: 'emerald', name: 'Forest', desc: 'Emerald & teal', colors: ['from-emerald-500 to-teal-600', 'from-teal-500 to-cyan-600', 'from-cyan-500 to-emerald-500'] },
].map(t => (
))}
{/* Info */}
About Offline Setup
-
TTS (macOS): Uses the macOS built-in voice engine — no installation needed, works offline automatically.
-
TTS (Linux): Click Install to download the Piper engine and voice model (~63 MB).
-
STT: Click Install to download the Whisper speech recognition model (~75 MB). Works fully offline after setup.
Models are stored in your user data folder and persist across app updates.
One-time install per device — no internet required after setup.
);
}