1 Commits

Author SHA1 Message Date
fd9e651e09 Implement model management for llama.cpp instances 2025-12-18 19:14:20 +01:00
11 changed files with 990 additions and 11 deletions

141
pkg/instance/models.go Normal file
View File

@@ -0,0 +1,141 @@
package instance
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"llamactl/pkg/backends"
"net/http"
"time"
)
// Model represents a model available in a llama.cpp instance
type Model struct {
ID string `json:"id"`
Object string `json:"object"`
OwnedBy string `json:"owned_by"`
Created int64 `json:"created"`
InCache bool `json:"in_cache"`
Path string `json:"path"`
Status ModelStatus `json:"status"`
}
// ModelStatus represents the status of a model in an instance
type ModelStatus struct {
Value string `json:"value"` // "loaded" | "loading" | "unloaded"
Args []string `json:"args"`
}
// IsLlamaCpp checks if this instance is a llama.cpp instance
func (i *Instance) IsLlamaCpp() bool {
opts := i.GetOptions()
if opts == nil {
return false
}
return opts.BackendOptions.BackendType == backends.BackendTypeLlamaCpp
}
// GetModels fetches the models available in this llama.cpp instance
func (i *Instance) GetModels() ([]Model, error) {
if !i.IsLlamaCpp() {
return nil, fmt.Errorf("instance %s is not a llama.cpp instance", i.Name)
}
if !i.IsRunning() {
return nil, fmt.Errorf("instance %s is not running", i.Name)
}
var result struct {
Data []Model `json:"data"`
}
if err := i.doRequest("GET", "/models", nil, &result, 10*time.Second); err != nil {
return nil, fmt.Errorf("failed to fetch models: %w", err)
}
return result.Data, nil
}
// LoadModel loads a model in this llama.cpp instance
func (i *Instance) LoadModel(modelName string) error {
if !i.IsLlamaCpp() {
return fmt.Errorf("instance %s is not a llama.cpp instance", i.Name)
}
if !i.IsRunning() {
return fmt.Errorf("instance %s is not running", i.Name)
}
// Make the load request
reqBody := map[string]string{"model": modelName}
if err := i.doRequest("POST", "/models/load", reqBody, nil, 30*time.Second); err != nil {
return fmt.Errorf("failed to load model: %w", err)
}
return nil
}
// UnloadModel unloads a model from this llama.cpp instance
func (i *Instance) UnloadModel(modelName string) error {
if !i.IsLlamaCpp() {
return fmt.Errorf("instance %s is not a llama.cpp instance", i.Name)
}
if !i.IsRunning() {
return fmt.Errorf("instance %s is not running", i.Name)
}
// Make the unload request
reqBody := map[string]string{"model": modelName}
if err := i.doRequest("POST", "/models/unload", reqBody, nil, 30*time.Second); err != nil {
return fmt.Errorf("failed to unload model: %w", err)
}
return nil
}
// doRequest makes an HTTP request to this instance's backend
func (i *Instance) doRequest(method, path string, reqBody, respBody any, timeout time.Duration) error {
url := fmt.Sprintf("http://%s:%d%s", i.GetHost(), i.GetPort(), path)
var bodyReader io.Reader
if reqBody != nil {
bodyBytes, err := json.Marshal(reqBody)
if err != nil {
return fmt.Errorf("failed to marshal request body: %w", err)
}
bodyReader = bytes.NewReader(bodyBytes)
}
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
req, err := http.NewRequestWithContext(ctx, method, url, bodyReader)
if err != nil {
return fmt.Errorf("failed to create request: %w", err)
}
if reqBody != nil {
req.Header.Set("Content-Type", "application/json")
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
bodyBytes, _ := io.ReadAll(resp.Body)
return fmt.Errorf("status %d: %s", resp.StatusCode, string(bodyBytes))
}
if respBody != nil {
if err := json.NewDecoder(resp.Body).Decode(respBody); err != nil {
return fmt.Errorf("failed to decode response: %w", err)
}
}
return nil
}

View File

@@ -24,6 +24,8 @@ type InstanceManager interface {
EvictLRUInstance() error
RestartInstance(name string) (*instance.Instance, error)
GetInstanceLogs(name string, numLines int) (string, error)
ResolveInstance(modelName string) (string, error)
RefreshModelRegistry(inst *instance.Instance) error
Shutdown()
}
@@ -34,6 +36,7 @@ type instanceManager struct {
db database.InstanceStore
remote *remoteManager
lifecycle *lifecycleManager
models *modelRegistry
// Configuration
globalConfig *config.AppConfig
@@ -60,12 +63,16 @@ func New(globalConfig *config.AppConfig, db database.InstanceStore) InstanceMana
// Initialize remote manager
remote := newRemoteManager(globalConfig.Nodes, 30*time.Second)
// Initialize model registry
models := newModelRegistry()
// Create manager instance
im := &instanceManager{
registry: registry,
ports: ports,
db: db,
remote: remote,
models: models,
globalConfig: globalConfig,
}
@@ -142,9 +149,27 @@ func (im *instanceManager) loadInstances() error {
// Auto-start instances that have auto-restart enabled
go im.autoStartInstances()
// Discover models from all running llama.cpp instances
go im.discoverAllModels()
return nil
}
// discoverAllModels discovers and registers models for all running llama.cpp instances
func (im *instanceManager) discoverAllModels() {
instances := im.registry.listRunning()
for _, inst := range instances {
if !inst.IsLlamaCpp() {
continue
}
if err := im.RefreshModelRegistry(inst); err != nil {
log.Printf("Failed to discover models for instance %s: %v", inst.Name, err)
}
}
}
// loadInstance loads a single persisted instance and adds it to the registry
func (im *instanceManager) loadInstance(persistedInst *instance.Instance) error {
name := persistedInst.Name

View File

@@ -0,0 +1,79 @@
package manager
import (
"fmt"
"llamactl/pkg/instance"
"sync"
)
// modelRegistry maintains a global mapping of model names to instance names
// for llama.cpp instances. Model names must be globally unique across all instances.
type modelRegistry struct {
mu sync.RWMutex
modelToInstance map[string]string // model name → instance name
instanceModels map[string][]string // instance name → model names
}
// newModelRegistry creates a new model registry
func newModelRegistry() *modelRegistry {
return &modelRegistry{
modelToInstance: make(map[string]string),
instanceModels: make(map[string][]string),
}
}
// registerModels registers models from an instance to the registry.
// Skips models that conflict with other instances and returns a list of conflicts.
func (mr *modelRegistry) registerModels(instanceName string, models []instance.Model) []string {
mr.mu.Lock()
defer mr.mu.Unlock()
// Unregister any existing models for this instance first
mr.removeModels(instanceName)
// Register models, skipping conflicts
var modelNames []string
var conflicts []string
for _, model := range models {
// Check if this model conflicts with another instance
if existingInstance, exists := mr.modelToInstance[model.ID]; exists && existingInstance != instanceName {
conflicts = append(conflicts, fmt.Sprintf("%s (already in %s)", model.ID, existingInstance))
continue // Skip this model
}
// Register the model
mr.modelToInstance[model.ID] = instanceName
modelNames = append(modelNames, model.ID)
}
mr.instanceModels[instanceName] = modelNames
return conflicts
}
// unregisterModels removes all models for an instance
func (mr *modelRegistry) unregisterModels(instanceName string) {
mr.mu.Lock()
defer mr.mu.Unlock()
mr.removeModels(instanceName)
}
// removeModels removes all models for an instance (caller must hold lock)
func (mr *modelRegistry) removeModels(instanceName string) {
if models, exists := mr.instanceModels[instanceName]; exists {
for _, modelName := range models {
delete(mr.modelToInstance, modelName)
}
delete(mr.instanceModels, instanceName)
}
}
// getModelInstance returns the instance name that hosts the given model
func (mr *modelRegistry) getModelInstance(modelName string) (string, bool) {
mr.mu.RLock()
defer mr.mu.RUnlock()
instanceName, exists := mr.modelToInstance[modelName]
return instanceName, exists
}

View File

@@ -337,6 +337,9 @@ func (im *instanceManager) DeleteInstance(name string) error {
// Release port (use ReleaseByInstance for proper cleanup)
im.ports.releaseByInstance(name)
// Unregister models when instance is deleted
im.onInstanceStopped(name)
// Remove from registry
if err := im.registry.remove(name); err != nil {
return fmt.Errorf("failed to remove instance from registry: %w", err)
@@ -396,6 +399,9 @@ func (im *instanceManager) StartInstance(name string) (*instance.Instance, error
log.Printf("Warning: failed to persist instance %s: %v", name, err)
}
// Discover and register models for llama.cpp instances
go im.onInstanceStarted(name)
return inst, nil
}
@@ -455,6 +461,9 @@ func (im *instanceManager) StopInstance(name string) (*instance.Instance, error)
log.Printf("Warning: failed to persist instance %s: %v", name, err)
}
// Unregister models when instance stops
im.onInstanceStopped(name)
return inst, nil
}
@@ -535,3 +544,73 @@ func (im *instanceManager) setPortInOptions(options *instance.Options, port int)
func (im *instanceManager) EvictLRUInstance() error {
return im.lifecycle.evictLRU()
}
// ResolveInstance resolves a model name to an instance name.
// Precedence: instance name > model registry
func (im *instanceManager) ResolveInstance(modelName string) (string, error) {
// Check if it's an instance name first
if _, err := im.GetInstance(modelName); err == nil {
return modelName, nil
}
// Check if it's a model name in the registry
if instanceName, exists := im.models.getModelInstance(modelName); exists {
return instanceName, nil
}
return "", fmt.Errorf("model or instance '%s' not found", modelName)
}
// RefreshModelRegistry refreshes the model registry for the given instance
func (im *instanceManager) RefreshModelRegistry(inst *instance.Instance) error {
if !inst.IsRunning() {
return fmt.Errorf("instance %s is not running", inst.Name)
}
// Fetch models from instance and register them
models, err := inst.GetModels()
if err != nil {
return fmt.Errorf("failed to fetch models: %w", err)
}
// Register models, skipping conflicts
conflicts := im.models.registerModels(inst.Name, models)
if len(conflicts) > 0 {
log.Printf("Warning: Model name conflicts for instance %s (skipped): %v", inst.Name, conflicts)
}
// Check if instance name shadows any model names
if otherInstance, exists := im.models.getModelInstance(inst.Name); exists && otherInstance != inst.Name {
log.Printf("Warning: Instance name '%s' shadows model name from instance '%s'", inst.Name, otherInstance)
}
return nil
}
// onInstanceStarted is called when an instance successfully starts and becomes healthy
func (im *instanceManager) onInstanceStarted(name string) {
inst, err := im.GetInstance(name)
if err != nil {
log.Printf("Failed to get instance %s for model discovery: %v", name, err)
return
}
// Only discover models for llama.cpp instances
if !inst.IsLlamaCpp() {
return
}
if err := inst.WaitForHealthy(30); err != nil {
log.Printf("Instance %s not healthy, skipping model discovery: %v", name, err)
return
}
if err := im.RefreshModelRegistry(inst); err != nil {
log.Printf("Failed to discover models for instance %s: %v", name, err)
}
}
// onInstanceStopped is called when an instance stops or is deleted
func (im *instanceManager) onInstanceStopped(name string) {
im.models.unregisterModels(name)
}

View File

@@ -5,9 +5,12 @@ import (
"fmt"
"llamactl/pkg/backends"
"llamactl/pkg/instance"
"log"
"net/http"
"os/exec"
"strings"
"github.com/go-chi/chi/v5"
)
// ParseCommandRequest represents the request body for backend command parsing
@@ -306,3 +309,115 @@ func (h *Handler) LlamaServerVersionHandler() http.HandlerFunc {
func (h *Handler) LlamaServerListDevicesHandler() http.HandlerFunc {
return h.executeLlamaServerCommand("--list-devices", "Failed to list devices")
}
// LlamaCppListModels godoc
// @Summary List models in a llama.cpp instance
// @Description Returns a list of models available in the specified llama.cpp instance
// @Tags Llama.cpp
// @Security ApiKeyAuth
// @Produces json
// @Param name path string true "Instance Name"
// @Success 200 {object} map[string]any "Models list response"
// @Failure 400 {string} string "Invalid instance"
// @Failure 500 {string} string "Internal Server Error"
// @Router /api/v1/llama-cpp/{name}/models [get]
func (h *Handler) LlamaCppListModels() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
inst, err := h.getInstance(r)
if err != nil {
writeError(w, http.StatusBadRequest, "invalid_instance", err.Error())
return
}
models, err := inst.GetModels()
if err != nil {
writeError(w, http.StatusBadRequest, "get_models_failed", err.Error())
return
}
response := map[string]any{
"object": "list",
"data": models,
}
writeJSON(w, http.StatusOK, response)
}
}
// LlamaCppLoadModel godoc
// @Summary Load a model in a llama.cpp instance
// @Description Loads the specified model in the given llama.cpp instance
// @Tags Llama.cpp
// @Security ApiKeyAuth
// @Produces json
// @Param name path string true "Instance Name"
// @Param model path string true "Model Name"
// @Success 200 {object} map[string]string "Success message"
// @Failure 400 {string} string "Invalid request"
// @Failure 500 {string} string "Internal Server Error"
// @Router /api/v1/llama-cpp/{name}/models/{model}/load [post]
func (h *Handler) LlamaCppLoadModel() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
inst, err := h.getInstance(r)
if err != nil {
writeError(w, http.StatusBadRequest, "invalid_instance", err.Error())
return
}
modelName := chi.URLParam(r, "model")
if err := inst.LoadModel(modelName); err != nil {
writeError(w, http.StatusBadRequest, "load_model_failed", err.Error())
return
}
// Refresh the model registry
if err := h.InstanceManager.RefreshModelRegistry(inst); err != nil {
log.Printf("Warning: failed to refresh model registry after load: %v", err)
}
writeJSON(w, http.StatusOK, map[string]string{
"status": "success",
"message": fmt.Sprintf("Model %s loaded successfully", modelName),
})
}
}
// LlamaCppUnloadModel godoc
// @Summary Unload a model in a llama.cpp instance
// @Description Unloads the specified model in the given llama.cpp instance
// @Tags Llama.cpp
// @Security ApiKeyAuth
// @Produces json
// @Param name path string true "Instance Name"
// @Param model path string true "Model Name"
// @Success 200 {object} map[string]string "Success message"
// @Failure 400 {string} string "Invalid request"
// @Failure 500 {string} string "Internal Server Error"
// @Router /api/v1/llama-cpp/{name}/models/{model}/unload [post]
func (h *Handler) LlamaCppUnloadModel() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
inst, err := h.getInstance(r)
if err != nil {
writeError(w, http.StatusBadRequest, "invalid_instance", err.Error())
return
}
modelName := chi.URLParam(r, "model")
if err := inst.UnloadModel(modelName); err != nil {
writeError(w, http.StatusBadRequest, "unload_model_failed", err.Error())
return
}
// Refresh the model registry
if err := h.InstanceManager.RefreshModelRegistry(inst); err != nil {
log.Printf("Warning: failed to refresh model registry after unload: %v", err)
}
writeJSON(w, http.StatusOK, map[string]string{
"status": "success",
"message": fmt.Sprintf("Model %s unloaded successfully", modelName),
})
}
}

View File

@@ -3,6 +3,7 @@ package server
import (
"bytes"
"encoding/json"
"fmt"
"io"
"llamactl/pkg/instance"
"llamactl/pkg/validation"
@@ -40,14 +41,41 @@ func (h *Handler) OpenAIListInstances() http.HandlerFunc {
return
}
openaiInstances := make([]OpenAIInstance, len(instances))
for i, inst := range instances {
openaiInstances[i] = OpenAIInstance{
var openaiInstances []OpenAIInstance
// For each llama.cpp instance, try to fetch models and add them as separate entries
for _, inst := range instances {
if inst.IsLlamaCpp() && inst.IsRunning() {
// Try to fetch models from the instance
models, err := inst.GetModels()
if err != nil {
fmt.Printf("Failed to fetch models from instance %s: %v", inst.Name, err)
continue
}
for _, model := range models {
openaiInstances = append(openaiInstances, OpenAIInstance{
ID: model.ID,
Object: "model",
Created: model.Created,
OwnedBy: inst.Name,
})
}
if len(models) > 1 {
// Skip adding the instance name if multiple models are present
continue
}
}
// Add instance name as single entry (for non-llama.cpp or if model fetch failed)
openaiInstances = append(openaiInstances, OpenAIInstance{
ID: inst.Name,
Object: "model",
Created: inst.Created,
OwnedBy: "llamactl",
}
})
}
openaiResponse := OpenAIListInstancesResponse{
@@ -89,12 +117,19 @@ func (h *Handler) OpenAIProxy() http.HandlerFunc {
modelName, ok := requestBody["model"].(string)
if !ok || modelName == "" {
writeError(w, http.StatusBadRequest, "invalid_request", "Instance name is required")
writeError(w, http.StatusBadRequest, "invalid_request", "Model name is required")
return
}
// Resolve model name to instance name (checks instance names first, then model registry)
instanceName, err := h.InstanceManager.ResolveInstance(modelName)
if err != nil {
writeError(w, http.StatusBadRequest, "model_not_found", err.Error())
return
}
// Validate instance name at the entry point
validatedName, err := validation.ValidateInstanceName(modelName)
validatedName, err := validation.ValidateInstanceName(instanceName)
if err != nil {
writeError(w, http.StatusBadRequest, "invalid_instance_name", err.Error())
return

View File

@@ -73,6 +73,13 @@ func SetupRouter(handler *Handler) *chi.Mux {
})
})
// Llama.cpp instance-specific endpoints
r.Route("/llama-cpp/{name}", func(r chi.Router) {
r.Get("/models", handler.LlamaCppListModels())
r.Post("/models/{model}/load", handler.LlamaCppLoadModel())
r.Post("/models/{model}/unload", handler.LlamaCppUnloadModel())
})
// Node management endpoints
r.Route("/nodes", func(r chi.Router) {
r.Get("/", handler.ListNodes()) // List all nodes

View File

@@ -2,13 +2,14 @@
import { Button } from "@/components/ui/button";
import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
import type { Instance } from "@/types/instance";
import { Edit, FileText, Play, Square, Trash2, MoreHorizontal, Download } from "lucide-react";
import { Edit, FileText, Play, Square, Trash2, MoreHorizontal, Download, Boxes } from "lucide-react";
import LogsDialog from "@/components/LogDialog";
import ModelsDialog from "@/components/ModelsDialog";
import HealthBadge from "@/components/HealthBadge";
import BackendBadge from "@/components/BackendBadge";
import { useState } from "react";
import { useState, useEffect } from "react";
import { useInstanceHealth } from "@/hooks/useInstanceHealth";
import { instancesApi } from "@/lib/api";
import { instancesApi, llamaCppApi } from "@/lib/api";
interface InstanceCardProps {
instance: Instance;
@@ -26,9 +27,31 @@ function InstanceCard({
editInstance,
}: InstanceCardProps) {
const [isLogsOpen, setIsLogsOpen] = useState(false);
const [isModelsOpen, setIsModelsOpen] = useState(false);
const [showAllActions, setShowAllActions] = useState(false);
const [modelCount, setModelCount] = useState(0);
const health = useInstanceHealth(instance.name, instance.status);
const running = instance.status === "running";
const isLlamaCpp = instance.options?.backend_type === "llama_cpp";
// Fetch model count for llama.cpp instances
useEffect(() => {
if (!isLlamaCpp || !running) {
setModelCount(0);
return;
}
void (async () => {
try {
const models = await llamaCppApi.getModels(instance.name);
setModelCount(models.length);
} catch {
setModelCount(0);
}
})();
}, [instance.name, isLlamaCpp, running]);
const handleStart = () => {
startInstance(instance.name);
};
@@ -53,6 +76,10 @@ function InstanceCard({
setIsLogsOpen(true);
};
const handleModels = () => {
setIsModelsOpen(true);
};
const handleExport = () => {
void (async () => {
try {
@@ -83,8 +110,6 @@ function InstanceCard({
})();
};
const running = instance.status === "running";
return (
<>
<Card className="hover:shadow-md transition-shadow">
@@ -162,6 +187,20 @@ function InstanceCard({
Logs
</Button>
{isLlamaCpp && modelCount > 1 && (
<Button
size="sm"
variant="outline"
onClick={handleModels}
title="Manage models"
data-testid="manage-models-button"
className="flex-1"
>
<Boxes className="h-4 w-4 mr-1" />
Models ({modelCount})
</Button>
)}
<Button
size="sm"
variant="outline"
@@ -195,6 +234,13 @@ function InstanceCard({
instanceName={instance.name}
isRunning={running}
/>
<ModelsDialog
open={isModelsOpen}
onOpenChange={setIsModelsOpen}
instanceName={instance.name}
isRunning={running}
/>
</>
);
}

View File

@@ -0,0 +1,287 @@
import React, { useState, useEffect } from 'react'
import { Button } from '@/components/ui/button'
import {
Dialog,
DialogContent,
DialogDescription,
DialogHeader,
DialogTitle,
} from '@/components/ui/dialog'
import {
Table,
TableBody,
TableCell,
TableHead,
TableHeader,
TableRow,
} from '@/components/ui/table'
import { Badge } from '@/components/ui/badge'
import { llamaCppApi } from '@/lib/api'
import { RefreshCw, Loader2, AlertCircle } from 'lucide-react'
interface ModelsDialogProps {
open: boolean
onOpenChange: (open: boolean) => void
instanceName: string
isRunning: boolean
}
interface Model {
id: string
object: string
owned_by: string
created: number
in_cache: boolean
path: string
status: {
value: string // "loaded" | "loading" | "unloaded"
args: string[]
}
}
const StatusIcon: React.FC<{ status: string }> = ({ status }) => {
switch (status) {
case 'loaded':
return (
<div className="h-2 w-2 rounded-full bg-green-500" />
)
case 'loading':
return (
<Loader2
className="h-3 w-3 animate-spin text-yellow-500"
/>
)
case 'unloaded':
return (
<div className="h-2 w-2 rounded-full bg-gray-400" />
)
default:
return null
}
}
const ModelsDialog: React.FC<ModelsDialogProps> = ({
open,
onOpenChange,
instanceName,
isRunning,
}) => {
const [models, setModels] = useState<Model[]>([])
const [loading, setLoading] = useState(false)
const [error, setError] = useState<string | null>(null)
const [loadingModels, setLoadingModels] = useState<Set<string>>(new Set())
// Fetch models function
const fetchModels = React.useCallback(async () => {
if (!instanceName || !isRunning) return
setLoading(true)
setError(null)
try {
const response = await llamaCppApi.getModels(instanceName)
setModels(response)
} catch (err) {
setError(err instanceof Error ? err.message : 'Failed to fetch models')
} finally {
setLoading(false)
}
}, [instanceName, isRunning])
// Poll for models while dialog is open
useEffect(() => {
if (!open || !isRunning) return
// Initial fetch
void fetchModels()
// Poll every 2 seconds
const interval = setInterval(() => {
void fetchModels()
}, 2000)
return () => clearInterval(interval)
}, [open, isRunning, fetchModels])
// Load model
const loadModel = async (modelName: string) => {
setLoadingModels((prev) => new Set(prev).add(modelName))
setError(null)
try {
await llamaCppApi.loadModel(instanceName, modelName)
// Polling will pick up the change
} catch (err) {
setError(err instanceof Error ? err.message : 'Failed to load model')
} finally {
setLoadingModels((prev) => {
const newSet = new Set(prev)
newSet.delete(modelName)
return newSet
})
}
}
// Unload model
const unloadModel = async (modelName: string) => {
setLoadingModels((prev) => new Set(prev).add(modelName))
setError(null)
try {
await llamaCppApi.unloadModel(instanceName, modelName)
// Polling will pick up the change
} catch (err) {
setError(err instanceof Error ? err.message : 'Failed to unload model')
} finally {
setLoadingModels((prev) => {
const newSet = new Set(prev)
newSet.delete(modelName)
return newSet
})
}
}
return (
<Dialog open={open} onOpenChange={onOpenChange}>
<DialogContent className="sm:max-w-4xl max-w-[calc(100%-2rem)] max-h-[80vh] flex flex-col">
<DialogHeader>
<div className="flex items-center justify-between">
<div>
<DialogTitle className="flex items-center gap-2">
Models: {instanceName}
<Badge variant={isRunning ? 'default' : 'secondary'}>
{isRunning ? 'Running' : 'Stopped'}
</Badge>
</DialogTitle>
<DialogDescription>
Manage models in this llama.cpp instance
</DialogDescription>
</div>
<Button
variant="outline"
size="sm"
onClick={() => void fetchModels()}
disabled={loading || !isRunning}
>
{loading ? (
<Loader2 className="h-4 w-4 animate-spin" />
) : (
<RefreshCw className="h-4 w-4" />
)}
</Button>
</div>
</DialogHeader>
{/* Error Display */}
{error && (
<div className="flex items-center gap-2 p-3 bg-destructive/10 border border-destructive/20 rounded-lg">
<AlertCircle className="h-4 w-4 text-destructive" />
<span className="text-sm text-destructive">{error}</span>
</div>
)}
{/* Models Table */}
<div className="flex-1 flex flex-col min-h-0 overflow-auto">
{!isRunning ? (
<div className="flex items-center justify-center h-full text-muted-foreground">
Instance is not running
</div>
) : loading && models.length === 0 ? (
<div className="flex items-center justify-center h-full">
<Loader2 className="h-6 w-6 animate-spin text-muted-foreground" />
<span className="ml-2 text-muted-foreground">
Loading models...
</span>
</div>
) : models.length === 0 ? (
<div className="flex items-center justify-center h-full text-muted-foreground">
No models found
</div>
) : (
<Table>
<TableHeader>
<TableRow>
<TableHead>Model</TableHead>
<TableHead>Status</TableHead>
<TableHead className="text-right">Actions</TableHead>
</TableRow>
</TableHeader>
<TableBody>
{models.map((model) => {
const isLoading = loadingModels.has(model.id)
const isModelLoading = model.status.value === 'loading'
return (
<TableRow key={model.id}>
<TableCell className="font-mono text-sm">
{model.id}
</TableCell>
<TableCell>
<div className="flex items-center gap-2">
<StatusIcon status={model.status.value} />
<span className="text-sm capitalize">
{model.status.value}
</span>
</div>
</TableCell>
<TableCell className="text-right">
{model.status.value === 'loaded' ? (
<Button
size="sm"
variant="outline"
onClick={() => unloadModel(model.id)}
disabled={!isRunning || isLoading || isModelLoading}
>
{isLoading ? (
<>
<Loader2 className="h-3 w-3 animate-spin mr-1" />
Unloading...
</>
) : (
'Unload'
)}
</Button>
) : model.status.value === 'unloaded' ? (
<Button
size="sm"
variant="default"
onClick={() => loadModel(model.id)}
disabled={!isRunning || isLoading || isModelLoading}
>
{isLoading ? (
<>
<Loader2 className="h-3 w-3 animate-spin mr-1" />
Loading...
</>
) : (
'Load'
)}
</Button>
) : (
<Button size="sm" variant="ghost" disabled>
Loading...
</Button>
)}
</TableCell>
</TableRow>
)
})}
</TableBody>
</Table>
)}
</div>
{/* Auto-refresh indicator */}
{isRunning && (
<div className="flex items-center gap-2 text-sm text-muted-foreground">
<div className="w-2 h-2 bg-green-500 rounded-full animate-pulse"></div>
Auto-refreshing every 2 seconds
</div>
)}
</DialogContent>
</Dialog>
)
}
export default ModelsDialog

View File

@@ -0,0 +1,117 @@
import * as React from "react"
import { cn } from "@/lib/utils"
const Table = React.forwardRef<
HTMLTableElement,
React.HTMLAttributes<HTMLTableElement>
>(({ className, ...props }, ref) => (
<div className="relative w-full overflow-auto">
<table
ref={ref}
className={cn("w-full caption-bottom text-sm", className)}
{...props}
/>
</div>
))
Table.displayName = "Table"
const TableHeader = React.forwardRef<
HTMLTableSectionElement,
React.HTMLAttributes<HTMLTableSectionElement>
>(({ className, ...props }, ref) => (
<thead ref={ref} className={cn("[&_tr]:border-b", className)} {...props} />
))
TableHeader.displayName = "TableHeader"
const TableBody = React.forwardRef<
HTMLTableSectionElement,
React.HTMLAttributes<HTMLTableSectionElement>
>(({ className, ...props }, ref) => (
<tbody
ref={ref}
className={cn("[&_tr:last-child]:border-0", className)}
{...props}
/>
))
TableBody.displayName = "TableBody"
const TableFooter = React.forwardRef<
HTMLTableSectionElement,
React.HTMLAttributes<HTMLTableSectionElement>
>(({ className, ...props }, ref) => (
<tfoot
ref={ref}
className={cn(
"border-t bg-muted/50 font-medium [&>tr]:last:border-b-0",
className
)}
{...props}
/>
))
TableFooter.displayName = "TableFooter"
const TableRow = React.forwardRef<
HTMLTableRowElement,
React.HTMLAttributes<HTMLTableRowElement>
>(({ className, ...props }, ref) => (
<tr
ref={ref}
className={cn(
"border-b transition-colors hover:bg-muted/50 data-[state=selected]:bg-muted",
className
)}
{...props}
/>
))
TableRow.displayName = "TableRow"
const TableHead = React.forwardRef<
HTMLTableCellElement,
React.ThHTMLAttributes<HTMLTableCellElement>
>(({ className, ...props }, ref) => (
<th
ref={ref}
className={cn(
"h-12 px-4 text-left align-middle font-medium text-muted-foreground [&:has([role=checkbox])]:pr-0",
className
)}
{...props}
/>
))
TableHead.displayName = "TableHead"
const TableCell = React.forwardRef<
HTMLTableCellElement,
React.TdHTMLAttributes<HTMLTableCellElement>
>(({ className, ...props }, ref) => (
<td
ref={ref}
className={cn("p-4 align-middle [&:has([role=checkbox])]:pr-0", className)}
{...props}
/>
))
TableCell.displayName = "TableCell"
const TableCaption = React.forwardRef<
HTMLTableCaptionElement,
React.HTMLAttributes<HTMLTableCaptionElement>
>(({ className, ...props }, ref) => (
<caption
ref={ref}
className={cn("mt-4 text-sm text-muted-foreground", className)}
{...props}
/>
))
TableCaption.displayName = "TableCaption"
export {
Table,
TableHeader,
TableBody,
TableFooter,
TableHead,
TableRow,
TableCell,
TableCaption,
}

View File

@@ -205,3 +205,51 @@ export const apiKeysApi = {
getPermissions: (id: number) =>
apiCall<KeyPermissionResponse[]>(`/auth/keys/${id}/permissions`),
};
// Llama.cpp model management types
export interface Model {
id: string;
object: string;
owned_by: string;
created: number;
in_cache: boolean;
path: string;
status: {
value: string; // "loaded" | "loading" | "unloaded"
args: string[];
};
}
export interface ModelsListResponse {
object: string;
data: Model[];
}
// Llama.cpp model management API functions
export const llamaCppApi = {
// GET /llama-cpp/{name}/models
getModels: async (instanceName: string): Promise<Model[]> => {
const response = await apiCall<ModelsListResponse>(
`/llama-cpp/${encodeURIComponent(instanceName)}/models`
);
return response.data;
},
// POST /llama-cpp/{name}/models/{model}/load
loadModel: (instanceName: string, modelName: string) =>
apiCall<{ status: string; message: string }>(
`/llama-cpp/${encodeURIComponent(instanceName)}/models/${encodeURIComponent(modelName)}/load`,
{
method: "POST",
}
),
// POST /llama-cpp/{name}/models/{model}/unload
unloadModel: (instanceName: string, modelName: string) =>
apiCall<{ status: string; message: string }>(
`/llama-cpp/${encodeURIComponent(instanceName)}/models/${encodeURIComponent(modelName)}/unload`,
{
method: "POST",
}
),
};