import React, { useState, useEffect } from 'react' import { Button } from '@/components/ui/button' import { Dialog, DialogContent, DialogDescription, DialogHeader, DialogTitle, } from '@/components/ui/dialog' import { Table, TableBody, TableCell, TableHead, TableHeader, TableRow, } from '@/components/ui/table' import { Badge } from '@/components/ui/badge' import { llamaCppApi } from '@/lib/api' import { RefreshCw, Loader2, AlertCircle } from 'lucide-react' interface ModelsDialogProps { open: boolean onOpenChange: (open: boolean) => void instanceName: string isRunning: boolean } interface Model { id: string object: string owned_by: string created: number in_cache: boolean path: string status: { value: string // "loaded" | "loading" | "unloaded" args: string[] } } const StatusIcon: React.FC<{ status: string }> = ({ status }) => { switch (status) { case 'loaded': return (
) case 'loading': return ( ) case 'unloaded': return (
) default: return null } } const ModelsDialog: React.FC = ({ open, onOpenChange, instanceName, isRunning, }) => { const [models, setModels] = useState([]) const [loading, setLoading] = useState(false) const [error, setError] = useState(null) const [loadingModels, setLoadingModels] = useState>(new Set()) // Fetch models function const fetchModels = React.useCallback(async () => { if (!instanceName || !isRunning) return setLoading(true) setError(null) try { const response = await llamaCppApi.getModels(instanceName) setModels(response) } catch (err) { setError(err instanceof Error ? err.message : 'Failed to fetch models') } finally { setLoading(false) } }, [instanceName, isRunning]) // Fetch models when dialog opens useEffect(() => { if (!open || !isRunning) return // Initial fetch void fetchModels() }, [open, isRunning, fetchModels]) // Auto-refresh only when models are loading useEffect(() => { if (!open || !isRunning) return // Check if any model is in loading state const hasLoadingModel = models.some(m => m.status.value === 'loading') if (!hasLoadingModel) return // Poll every 2 seconds when there's a loading model const interval = setInterval(() => { void fetchModels() }, 2000) return () => clearInterval(interval) }, [open, isRunning, models, fetchModels]) // Load model const loadModel = async (modelName: string) => { setLoadingModels((prev) => new Set(prev).add(modelName)) setError(null) try { await llamaCppApi.loadModel(instanceName, modelName) // Wait a bit for the backend to process the load await new Promise(resolve => setTimeout(resolve, 500)) // Refresh models list after loading await fetchModels() } catch (err) { setError(err instanceof Error ? err.message : 'Failed to load model') } finally { setLoadingModels((prev) => { const newSet = new Set(prev) newSet.delete(modelName) return newSet }) } } // Unload model const unloadModel = async (modelName: string) => { setLoadingModels((prev) => new Set(prev).add(modelName)) setError(null) try { await llamaCppApi.unloadModel(instanceName, modelName) // Wait a bit for the backend to process the unload await new Promise(resolve => setTimeout(resolve, 500)) // Refresh models list after unloading await fetchModels() } catch (err) { setError(err instanceof Error ? err.message : 'Failed to unload model') } finally { setLoadingModels((prev) => { const newSet = new Set(prev) newSet.delete(modelName) return newSet }) } } return (
Models: {instanceName} {isRunning ? 'Running' : 'Stopped'} Manage models in this llama.cpp instance
{/* Error Display */} {error && (
{error}
)} {/* Models Table */}
{!isRunning ? (
Instance is not running
) : loading && models.length === 0 ? (
Loading models...
) : models.length === 0 ? (
No models found
) : ( Model Status Actions {models.map((model) => { const isLoading = loadingModels.has(model.id) const isModelLoading = model.status.value === 'loading' return ( {model.id}
{model.status.value}
{model.status.value === 'loaded' ? ( ) : model.status.value === 'unloaded' ? ( ) : ( )}
) })}
)}
{/* Auto-refresh indicator - only shown when models are loading */} {isRunning && models.some(m => m.status.value === 'loading') && (
Auto-refreshing while models are loading
)}
) } export default ModelsDialog