diff --git a/webui/src/components/InstanceCard.tsx b/webui/src/components/InstanceCard.tsx index a79b116..996ab67 100644 --- a/webui/src/components/InstanceCard.tsx +++ b/webui/src/components/InstanceCard.tsx @@ -1,6 +1,7 @@ // ui/src/components/InstanceCard.tsx import { Button } from "@/components/ui/button"; import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; +import { Badge } from "@/components/ui/badge"; import type { Instance } from "@/types/instance"; import { Edit, FileText, Play, Square, Trash2, MoreHorizontal, Download, Boxes } from "lucide-react"; import LogsDialog from "@/components/LogDialog"; @@ -9,7 +10,7 @@ import HealthBadge from "@/components/HealthBadge"; import BackendBadge from "@/components/BackendBadge"; import { useState, useEffect } from "react"; import { useInstanceHealth } from "@/hooks/useInstanceHealth"; -import { instancesApi, llamaCppApi } from "@/lib/api"; +import { instancesApi, llamaCppApi, type Model } from "@/lib/api"; interface InstanceCardProps { instance: Instance; @@ -29,29 +30,33 @@ function InstanceCard({ const [isLogsOpen, setIsLogsOpen] = useState(false); const [isModelsOpen, setIsModelsOpen] = useState(false); const [showAllActions, setShowAllActions] = useState(false); - const [modelCount, setModelCount] = useState(0); + const [models, setModels] = useState([]); const health = useInstanceHealth(instance.name, instance.status); const running = instance.status === "running"; const isLlamaCpp = instance.options?.backend_type === "llama_cpp"; - // Fetch model count for llama.cpp instances + // Fetch models for llama.cpp instances useEffect(() => { if (!isLlamaCpp || !running) { - setModelCount(0); + setModels([]); return; } void (async () => { try { - const models = await llamaCppApi.getModels(instance.name); - setModelCount(models.length); + const fetchedModels = await llamaCppApi.getModels(instance.name); + setModels(fetchedModels); } catch { - setModelCount(0); + setModels([]); } })(); }, [instance.name, isLlamaCpp, running]); + // Calculate model counts + const totalModels = models.length; + const loadedModels = models.filter(m => m.status.value === "loaded").length; + const handleStart = () => { startInstance(instance.name); }; @@ -124,6 +129,12 @@ function InstanceCard({
{running && } + {isLlamaCpp && running && totalModels > 0 && ( + + + {loadedModels}/{totalModels} models + + )}
@@ -174,30 +185,28 @@ function InstanceCard({ {/* Secondary actions - collapsible */} {showAllActions && ( -
+
- {isLlamaCpp && modelCount > 1 && ( + {isLlamaCpp && totalModels > 1 && ( )} @@ -207,7 +216,6 @@ function InstanceCard({ onClick={handleExport} title="Export instance" data-testid="export-instance-button" - className="flex-1" > Export diff --git a/webui/src/lib/api.ts b/webui/src/lib/api.ts index aeeb100..d648874 100644 --- a/webui/src/lib/api.ts +++ b/webui/src/lib/api.ts @@ -237,19 +237,21 @@ export const llamaCppApi = { // POST /llama-cpp/{name}/models/{model}/load loadModel: (instanceName: string, modelName: string) => - apiCall<{ status: string; message: string }>( + apiCall<{ success: boolean }>( `/llama-cpp/${encodeURIComponent(instanceName)}/models/${encodeURIComponent(modelName)}/load`, { method: "POST", + body: JSON.stringify({ model: modelName }), } ), // POST /llama-cpp/{name}/models/{model}/unload unloadModel: (instanceName: string, modelName: string) => - apiCall<{ status: string; message: string }>( + apiCall<{ success: boolean }>( `/llama-cpp/${encodeURIComponent(instanceName)}/models/${encodeURIComponent(modelName)}/unload`, { method: "POST", + body: JSON.stringify({ model: modelName }), } ), };