From 7f5292412c6f02bcb2a120aed7ac76f0f59e8509 Mon Sep 17 00:00:00 2001 From: LordMathis Date: Thu, 18 Dec 2025 19:14:20 +0100 Subject: [PATCH 1/8] Implement model management for llama.cpp instances --- pkg/instance/models.go | 141 +++++++++++++ pkg/manager/manager.go | 25 +++ pkg/manager/model_registry.go | 79 +++++++ pkg/manager/operations.go | 79 +++++++ pkg/server/handlers_backends.go | 115 +++++++++++ pkg/server/handlers_openai.go | 47 ++++- pkg/server/routes.go | 7 + webui/src/components/InstanceCard.tsx | 56 ++++- webui/src/components/ModelsDialog.tsx | 287 ++++++++++++++++++++++++++ webui/src/components/ui/table.tsx | 117 +++++++++++ webui/src/lib/api.ts | 48 +++++ 11 files changed, 990 insertions(+), 11 deletions(-) create mode 100644 pkg/instance/models.go create mode 100644 pkg/manager/model_registry.go create mode 100644 webui/src/components/ModelsDialog.tsx create mode 100644 webui/src/components/ui/table.tsx diff --git a/pkg/instance/models.go b/pkg/instance/models.go new file mode 100644 index 0000000..f911f18 --- /dev/null +++ b/pkg/instance/models.go @@ -0,0 +1,141 @@ +package instance + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "llamactl/pkg/backends" + "net/http" + "time" +) + +// Model represents a model available in a llama.cpp instance +type Model struct { + ID string `json:"id"` + Object string `json:"object"` + OwnedBy string `json:"owned_by"` + Created int64 `json:"created"` + InCache bool `json:"in_cache"` + Path string `json:"path"` + Status ModelStatus `json:"status"` +} + +// ModelStatus represents the status of a model in an instance +type ModelStatus struct { + Value string `json:"value"` // "loaded" | "loading" | "unloaded" + Args []string `json:"args"` +} + +// IsLlamaCpp checks if this instance is a llama.cpp instance +func (i *Instance) IsLlamaCpp() bool { + opts := i.GetOptions() + if opts == nil { + return false + } + return opts.BackendOptions.BackendType == backends.BackendTypeLlamaCpp +} + +// GetModels fetches the models available in this llama.cpp instance +func (i *Instance) GetModels() ([]Model, error) { + if !i.IsLlamaCpp() { + return nil, fmt.Errorf("instance %s is not a llama.cpp instance", i.Name) + } + + if !i.IsRunning() { + return nil, fmt.Errorf("instance %s is not running", i.Name) + } + + var result struct { + Data []Model `json:"data"` + } + if err := i.doRequest("GET", "/models", nil, &result, 10*time.Second); err != nil { + return nil, fmt.Errorf("failed to fetch models: %w", err) + } + + return result.Data, nil +} + +// LoadModel loads a model in this llama.cpp instance +func (i *Instance) LoadModel(modelName string) error { + if !i.IsLlamaCpp() { + return fmt.Errorf("instance %s is not a llama.cpp instance", i.Name) + } + + if !i.IsRunning() { + return fmt.Errorf("instance %s is not running", i.Name) + } + + // Make the load request + reqBody := map[string]string{"model": modelName} + if err := i.doRequest("POST", "/models/load", reqBody, nil, 30*time.Second); err != nil { + return fmt.Errorf("failed to load model: %w", err) + } + + return nil +} + +// UnloadModel unloads a model from this llama.cpp instance +func (i *Instance) UnloadModel(modelName string) error { + if !i.IsLlamaCpp() { + return fmt.Errorf("instance %s is not a llama.cpp instance", i.Name) + } + + if !i.IsRunning() { + return fmt.Errorf("instance %s is not running", i.Name) + } + + // Make the unload request + reqBody := map[string]string{"model": modelName} + if err := i.doRequest("POST", "/models/unload", reqBody, nil, 30*time.Second); err != nil { + return fmt.Errorf("failed to unload model: %w", err) + } + + return nil +} + +// doRequest makes an HTTP request to this instance's backend +func (i *Instance) doRequest(method, path string, reqBody, respBody any, timeout time.Duration) error { + url := fmt.Sprintf("http://%s:%d%s", i.GetHost(), i.GetPort(), path) + + var bodyReader io.Reader + if reqBody != nil { + bodyBytes, err := json.Marshal(reqBody) + if err != nil { + return fmt.Errorf("failed to marshal request body: %w", err) + } + bodyReader = bytes.NewReader(bodyBytes) + } + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, method, url, bodyReader) + if err != nil { + return fmt.Errorf("failed to create request: %w", err) + } + + if reqBody != nil { + req.Header.Set("Content-Type", "application/json") + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + bodyBytes, _ := io.ReadAll(resp.Body) + return fmt.Errorf("status %d: %s", resp.StatusCode, string(bodyBytes)) + } + + if respBody != nil { + if err := json.NewDecoder(resp.Body).Decode(respBody); err != nil { + return fmt.Errorf("failed to decode response: %w", err) + } + } + + return nil +} diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 47e554b..73e747e 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -24,6 +24,8 @@ type InstanceManager interface { EvictLRUInstance() error RestartInstance(name string) (*instance.Instance, error) GetInstanceLogs(name string, numLines int) (string, error) + ResolveInstance(modelName string) (string, error) + RefreshModelRegistry(inst *instance.Instance) error Shutdown() } @@ -34,6 +36,7 @@ type instanceManager struct { db database.InstanceStore remote *remoteManager lifecycle *lifecycleManager + models *modelRegistry // Configuration globalConfig *config.AppConfig @@ -60,12 +63,16 @@ func New(globalConfig *config.AppConfig, db database.InstanceStore) InstanceMana // Initialize remote manager remote := newRemoteManager(globalConfig.Nodes, 30*time.Second) + // Initialize model registry + models := newModelRegistry() + // Create manager instance im := &instanceManager{ registry: registry, ports: ports, db: db, remote: remote, + models: models, globalConfig: globalConfig, } @@ -142,9 +149,27 @@ func (im *instanceManager) loadInstances() error { // Auto-start instances that have auto-restart enabled go im.autoStartInstances() + // Discover models from all running llama.cpp instances + go im.discoverAllModels() + return nil } +// discoverAllModels discovers and registers models for all running llama.cpp instances +func (im *instanceManager) discoverAllModels() { + instances := im.registry.listRunning() + + for _, inst := range instances { + if !inst.IsLlamaCpp() { + continue + } + + if err := im.RefreshModelRegistry(inst); err != nil { + log.Printf("Failed to discover models for instance %s: %v", inst.Name, err) + } + } +} + // loadInstance loads a single persisted instance and adds it to the registry func (im *instanceManager) loadInstance(persistedInst *instance.Instance) error { name := persistedInst.Name diff --git a/pkg/manager/model_registry.go b/pkg/manager/model_registry.go new file mode 100644 index 0000000..0515c91 --- /dev/null +++ b/pkg/manager/model_registry.go @@ -0,0 +1,79 @@ +package manager + +import ( + "fmt" + "llamactl/pkg/instance" + "sync" +) + +// modelRegistry maintains a global mapping of model names to instance names +// for llama.cpp instances. Model names must be globally unique across all instances. +type modelRegistry struct { + mu sync.RWMutex + modelToInstance map[string]string // model name → instance name + instanceModels map[string][]string // instance name → model names +} + +// newModelRegistry creates a new model registry +func newModelRegistry() *modelRegistry { + return &modelRegistry{ + modelToInstance: make(map[string]string), + instanceModels: make(map[string][]string), + } +} + +// registerModels registers models from an instance to the registry. +// Skips models that conflict with other instances and returns a list of conflicts. +func (mr *modelRegistry) registerModels(instanceName string, models []instance.Model) []string { + mr.mu.Lock() + defer mr.mu.Unlock() + + // Unregister any existing models for this instance first + mr.removeModels(instanceName) + + // Register models, skipping conflicts + var modelNames []string + var conflicts []string + + for _, model := range models { + // Check if this model conflicts with another instance + if existingInstance, exists := mr.modelToInstance[model.ID]; exists && existingInstance != instanceName { + conflicts = append(conflicts, fmt.Sprintf("%s (already in %s)", model.ID, existingInstance)) + continue // Skip this model + } + + // Register the model + mr.modelToInstance[model.ID] = instanceName + modelNames = append(modelNames, model.ID) + } + + mr.instanceModels[instanceName] = modelNames + + return conflicts +} + +// unregisterModels removes all models for an instance +func (mr *modelRegistry) unregisterModels(instanceName string) { + mr.mu.Lock() + defer mr.mu.Unlock() + mr.removeModels(instanceName) +} + +// removeModels removes all models for an instance (caller must hold lock) +func (mr *modelRegistry) removeModels(instanceName string) { + if models, exists := mr.instanceModels[instanceName]; exists { + for _, modelName := range models { + delete(mr.modelToInstance, modelName) + } + delete(mr.instanceModels, instanceName) + } +} + +// getModelInstance returns the instance name that hosts the given model +func (mr *modelRegistry) getModelInstance(modelName string) (string, bool) { + mr.mu.RLock() + defer mr.mu.RUnlock() + + instanceName, exists := mr.modelToInstance[modelName] + return instanceName, exists +} diff --git a/pkg/manager/operations.go b/pkg/manager/operations.go index 2cfbebf..6ff50f1 100644 --- a/pkg/manager/operations.go +++ b/pkg/manager/operations.go @@ -337,6 +337,9 @@ func (im *instanceManager) DeleteInstance(name string) error { // Release port (use ReleaseByInstance for proper cleanup) im.ports.releaseByInstance(name) + // Unregister models when instance is deleted + im.onInstanceStopped(name) + // Remove from registry if err := im.registry.remove(name); err != nil { return fmt.Errorf("failed to remove instance from registry: %w", err) @@ -396,6 +399,9 @@ func (im *instanceManager) StartInstance(name string) (*instance.Instance, error log.Printf("Warning: failed to persist instance %s: %v", name, err) } + // Discover and register models for llama.cpp instances + go im.onInstanceStarted(name) + return inst, nil } @@ -455,6 +461,9 @@ func (im *instanceManager) StopInstance(name string) (*instance.Instance, error) log.Printf("Warning: failed to persist instance %s: %v", name, err) } + // Unregister models when instance stops + im.onInstanceStopped(name) + return inst, nil } @@ -535,3 +544,73 @@ func (im *instanceManager) setPortInOptions(options *instance.Options, port int) func (im *instanceManager) EvictLRUInstance() error { return im.lifecycle.evictLRU() } + +// ResolveInstance resolves a model name to an instance name. +// Precedence: instance name > model registry +func (im *instanceManager) ResolveInstance(modelName string) (string, error) { + // Check if it's an instance name first + if _, err := im.GetInstance(modelName); err == nil { + return modelName, nil + } + + // Check if it's a model name in the registry + if instanceName, exists := im.models.getModelInstance(modelName); exists { + return instanceName, nil + } + + return "", fmt.Errorf("model or instance '%s' not found", modelName) +} + +// RefreshModelRegistry refreshes the model registry for the given instance +func (im *instanceManager) RefreshModelRegistry(inst *instance.Instance) error { + if !inst.IsRunning() { + return fmt.Errorf("instance %s is not running", inst.Name) + } + + // Fetch models from instance and register them + models, err := inst.GetModels() + if err != nil { + return fmt.Errorf("failed to fetch models: %w", err) + } + + // Register models, skipping conflicts + conflicts := im.models.registerModels(inst.Name, models) + if len(conflicts) > 0 { + log.Printf("Warning: Model name conflicts for instance %s (skipped): %v", inst.Name, conflicts) + } + + // Check if instance name shadows any model names + if otherInstance, exists := im.models.getModelInstance(inst.Name); exists && otherInstance != inst.Name { + log.Printf("Warning: Instance name '%s' shadows model name from instance '%s'", inst.Name, otherInstance) + } + + return nil +} + +// onInstanceStarted is called when an instance successfully starts and becomes healthy +func (im *instanceManager) onInstanceStarted(name string) { + inst, err := im.GetInstance(name) + if err != nil { + log.Printf("Failed to get instance %s for model discovery: %v", name, err) + return + } + + // Only discover models for llama.cpp instances + if !inst.IsLlamaCpp() { + return + } + + if err := inst.WaitForHealthy(30); err != nil { + log.Printf("Instance %s not healthy, skipping model discovery: %v", name, err) + return + } + + if err := im.RefreshModelRegistry(inst); err != nil { + log.Printf("Failed to discover models for instance %s: %v", name, err) + } +} + +// onInstanceStopped is called when an instance stops or is deleted +func (im *instanceManager) onInstanceStopped(name string) { + im.models.unregisterModels(name) +} diff --git a/pkg/server/handlers_backends.go b/pkg/server/handlers_backends.go index 065b24e..f912a8a 100644 --- a/pkg/server/handlers_backends.go +++ b/pkg/server/handlers_backends.go @@ -5,9 +5,12 @@ import ( "fmt" "llamactl/pkg/backends" "llamactl/pkg/instance" + "log" "net/http" "os/exec" "strings" + + "github.com/go-chi/chi/v5" ) // ParseCommandRequest represents the request body for backend command parsing @@ -306,3 +309,115 @@ func (h *Handler) LlamaServerVersionHandler() http.HandlerFunc { func (h *Handler) LlamaServerListDevicesHandler() http.HandlerFunc { return h.executeLlamaServerCommand("--list-devices", "Failed to list devices") } + +// LlamaCppListModels godoc +// @Summary List models in a llama.cpp instance +// @Description Returns a list of models available in the specified llama.cpp instance +// @Tags Llama.cpp +// @Security ApiKeyAuth +// @Produces json +// @Param name path string true "Instance Name" +// @Success 200 {object} map[string]any "Models list response" +// @Failure 400 {string} string "Invalid instance" +// @Failure 500 {string} string "Internal Server Error" +// @Router /api/v1/llama-cpp/{name}/models [get] +func (h *Handler) LlamaCppListModels() http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + inst, err := h.getInstance(r) + if err != nil { + writeError(w, http.StatusBadRequest, "invalid_instance", err.Error()) + return + } + + models, err := inst.GetModels() + if err != nil { + writeError(w, http.StatusBadRequest, "get_models_failed", err.Error()) + return + } + + response := map[string]any{ + "object": "list", + "data": models, + } + + writeJSON(w, http.StatusOK, response) + } +} + +// LlamaCppLoadModel godoc +// @Summary Load a model in a llama.cpp instance +// @Description Loads the specified model in the given llama.cpp instance +// @Tags Llama.cpp +// @Security ApiKeyAuth +// @Produces json +// @Param name path string true "Instance Name" +// @Param model path string true "Model Name" +// @Success 200 {object} map[string]string "Success message" +// @Failure 400 {string} string "Invalid request" +// @Failure 500 {string} string "Internal Server Error" +// @Router /api/v1/llama-cpp/{name}/models/{model}/load [post] +func (h *Handler) LlamaCppLoadModel() http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + inst, err := h.getInstance(r) + if err != nil { + writeError(w, http.StatusBadRequest, "invalid_instance", err.Error()) + return + } + + modelName := chi.URLParam(r, "model") + + if err := inst.LoadModel(modelName); err != nil { + writeError(w, http.StatusBadRequest, "load_model_failed", err.Error()) + return + } + + // Refresh the model registry + if err := h.InstanceManager.RefreshModelRegistry(inst); err != nil { + log.Printf("Warning: failed to refresh model registry after load: %v", err) + } + + writeJSON(w, http.StatusOK, map[string]string{ + "status": "success", + "message": fmt.Sprintf("Model %s loaded successfully", modelName), + }) + } +} + +// LlamaCppUnloadModel godoc +// @Summary Unload a model in a llama.cpp instance +// @Description Unloads the specified model in the given llama.cpp instance +// @Tags Llama.cpp +// @Security ApiKeyAuth +// @Produces json +// @Param name path string true "Instance Name" +// @Param model path string true "Model Name" +// @Success 200 {object} map[string]string "Success message" +// @Failure 400 {string} string "Invalid request" +// @Failure 500 {string} string "Internal Server Error" +// @Router /api/v1/llama-cpp/{name}/models/{model}/unload [post] +func (h *Handler) LlamaCppUnloadModel() http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + inst, err := h.getInstance(r) + if err != nil { + writeError(w, http.StatusBadRequest, "invalid_instance", err.Error()) + return + } + + modelName := chi.URLParam(r, "model") + + if err := inst.UnloadModel(modelName); err != nil { + writeError(w, http.StatusBadRequest, "unload_model_failed", err.Error()) + return + } + + // Refresh the model registry + if err := h.InstanceManager.RefreshModelRegistry(inst); err != nil { + log.Printf("Warning: failed to refresh model registry after unload: %v", err) + } + + writeJSON(w, http.StatusOK, map[string]string{ + "status": "success", + "message": fmt.Sprintf("Model %s unloaded successfully", modelName), + }) + } +} diff --git a/pkg/server/handlers_openai.go b/pkg/server/handlers_openai.go index a7ad635..2240020 100644 --- a/pkg/server/handlers_openai.go +++ b/pkg/server/handlers_openai.go @@ -3,6 +3,7 @@ package server import ( "bytes" "encoding/json" + "fmt" "io" "llamactl/pkg/instance" "llamactl/pkg/validation" @@ -40,14 +41,41 @@ func (h *Handler) OpenAIListInstances() http.HandlerFunc { return } - openaiInstances := make([]OpenAIInstance, len(instances)) - for i, inst := range instances { - openaiInstances[i] = OpenAIInstance{ + var openaiInstances []OpenAIInstance + + // For each llama.cpp instance, try to fetch models and add them as separate entries + for _, inst := range instances { + + if inst.IsLlamaCpp() && inst.IsRunning() { + // Try to fetch models from the instance + models, err := inst.GetModels() + if err != nil { + fmt.Printf("Failed to fetch models from instance %s: %v", inst.Name, err) + continue + } + + for _, model := range models { + openaiInstances = append(openaiInstances, OpenAIInstance{ + ID: model.ID, + Object: "model", + Created: model.Created, + OwnedBy: inst.Name, + }) + } + + if len(models) > 1 { + // Skip adding the instance name if multiple models are present + continue + } + } + + // Add instance name as single entry (for non-llama.cpp or if model fetch failed) + openaiInstances = append(openaiInstances, OpenAIInstance{ ID: inst.Name, Object: "model", Created: inst.Created, OwnedBy: "llamactl", - } + }) } openaiResponse := OpenAIListInstancesResponse{ @@ -89,12 +117,19 @@ func (h *Handler) OpenAIProxy() http.HandlerFunc { modelName, ok := requestBody["model"].(string) if !ok || modelName == "" { - writeError(w, http.StatusBadRequest, "invalid_request", "Instance name is required") + writeError(w, http.StatusBadRequest, "invalid_request", "Model name is required") + return + } + + // Resolve model name to instance name (checks instance names first, then model registry) + instanceName, err := h.InstanceManager.ResolveInstance(modelName) + if err != nil { + writeError(w, http.StatusBadRequest, "model_not_found", err.Error()) return } // Validate instance name at the entry point - validatedName, err := validation.ValidateInstanceName(modelName) + validatedName, err := validation.ValidateInstanceName(instanceName) if err != nil { writeError(w, http.StatusBadRequest, "invalid_instance_name", err.Error()) return diff --git a/pkg/server/routes.go b/pkg/server/routes.go index 60f4487..abd694e 100644 --- a/pkg/server/routes.go +++ b/pkg/server/routes.go @@ -70,6 +70,13 @@ func SetupRouter(handler *Handler) *chi.Mux { }) }) + // Llama.cpp instance-specific endpoints + r.Route("/llama-cpp/{name}", func(r chi.Router) { + r.Get("/models", handler.LlamaCppListModels()) + r.Post("/models/{model}/load", handler.LlamaCppLoadModel()) + r.Post("/models/{model}/unload", handler.LlamaCppUnloadModel()) + }) + // Node management endpoints r.Route("/nodes", func(r chi.Router) { r.Get("/", handler.ListNodes()) // List all nodes diff --git a/webui/src/components/InstanceCard.tsx b/webui/src/components/InstanceCard.tsx index d889655..a79b116 100644 --- a/webui/src/components/InstanceCard.tsx +++ b/webui/src/components/InstanceCard.tsx @@ -2,13 +2,14 @@ import { Button } from "@/components/ui/button"; import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; import type { Instance } from "@/types/instance"; -import { Edit, FileText, Play, Square, Trash2, MoreHorizontal, Download } from "lucide-react"; +import { Edit, FileText, Play, Square, Trash2, MoreHorizontal, Download, Boxes } from "lucide-react"; import LogsDialog from "@/components/LogDialog"; +import ModelsDialog from "@/components/ModelsDialog"; import HealthBadge from "@/components/HealthBadge"; import BackendBadge from "@/components/BackendBadge"; -import { useState } from "react"; +import { useState, useEffect } from "react"; import { useInstanceHealth } from "@/hooks/useInstanceHealth"; -import { instancesApi } from "@/lib/api"; +import { instancesApi, llamaCppApi } from "@/lib/api"; interface InstanceCardProps { instance: Instance; @@ -26,9 +27,31 @@ function InstanceCard({ editInstance, }: InstanceCardProps) { const [isLogsOpen, setIsLogsOpen] = useState(false); + const [isModelsOpen, setIsModelsOpen] = useState(false); const [showAllActions, setShowAllActions] = useState(false); + const [modelCount, setModelCount] = useState(0); const health = useInstanceHealth(instance.name, instance.status); + const running = instance.status === "running"; + const isLlamaCpp = instance.options?.backend_type === "llama_cpp"; + + // Fetch model count for llama.cpp instances + useEffect(() => { + if (!isLlamaCpp || !running) { + setModelCount(0); + return; + } + + void (async () => { + try { + const models = await llamaCppApi.getModels(instance.name); + setModelCount(models.length); + } catch { + setModelCount(0); + } + })(); + }, [instance.name, isLlamaCpp, running]); + const handleStart = () => { startInstance(instance.name); }; @@ -53,6 +76,10 @@ function InstanceCard({ setIsLogsOpen(true); }; + const handleModels = () => { + setIsModelsOpen(true); + }; + const handleExport = () => { void (async () => { try { @@ -83,8 +110,6 @@ function InstanceCard({ })(); }; - const running = instance.status === "running"; - return ( <> @@ -162,6 +187,20 @@ function InstanceCard({ Logs + {isLlamaCpp && modelCount > 1 && ( + + )} + + + + + {/* Error Display */} + {error && ( +
+ + {error} +
+ )} + + {/* Models Table */} +
+ {!isRunning ? ( +
+ Instance is not running +
+ ) : loading && models.length === 0 ? ( +
+ + + Loading models... + +
+ ) : models.length === 0 ? ( +
+ No models found +
+ ) : ( + + + + Model + Status + Actions + + + + {models.map((model) => { + const isLoading = loadingModels.has(model.id) + const isModelLoading = model.status.value === 'loading' + + return ( + + + {model.id} + + +
+ + + {model.status.value} + +
+
+ + {model.status.value === 'loaded' ? ( + + ) : model.status.value === 'unloaded' ? ( + + ) : ( + + )} + +
+ ) + })} +
+
+ )} +
+ + {/* Auto-refresh indicator */} + {isRunning && ( +
+
+ Auto-refreshing every 2 seconds +
+ )} + + + ) +} + +export default ModelsDialog diff --git a/webui/src/components/ui/table.tsx b/webui/src/components/ui/table.tsx new file mode 100644 index 0000000..7f3502f --- /dev/null +++ b/webui/src/components/ui/table.tsx @@ -0,0 +1,117 @@ +import * as React from "react" + +import { cn } from "@/lib/utils" + +const Table = React.forwardRef< + HTMLTableElement, + React.HTMLAttributes +>(({ className, ...props }, ref) => ( +
+ + +)) +Table.displayName = "Table" + +const TableHeader = React.forwardRef< + HTMLTableSectionElement, + React.HTMLAttributes +>(({ className, ...props }, ref) => ( + +)) +TableHeader.displayName = "TableHeader" + +const TableBody = React.forwardRef< + HTMLTableSectionElement, + React.HTMLAttributes +>(({ className, ...props }, ref) => ( + +)) +TableBody.displayName = "TableBody" + +const TableFooter = React.forwardRef< + HTMLTableSectionElement, + React.HTMLAttributes +>(({ className, ...props }, ref) => ( + tr]:last:border-b-0", + className + )} + {...props} + /> +)) +TableFooter.displayName = "TableFooter" + +const TableRow = React.forwardRef< + HTMLTableRowElement, + React.HTMLAttributes +>(({ className, ...props }, ref) => ( + +)) +TableRow.displayName = "TableRow" + +const TableHead = React.forwardRef< + HTMLTableCellElement, + React.ThHTMLAttributes +>(({ className, ...props }, ref) => ( +
+)) +TableHead.displayName = "TableHead" + +const TableCell = React.forwardRef< + HTMLTableCellElement, + React.TdHTMLAttributes +>(({ className, ...props }, ref) => ( + +)) +TableCell.displayName = "TableCell" + +const TableCaption = React.forwardRef< + HTMLTableCaptionElement, + React.HTMLAttributes +>(({ className, ...props }, ref) => ( +
+)) +TableCaption.displayName = "TableCaption" + +export { + Table, + TableHeader, + TableBody, + TableFooter, + TableHead, + TableRow, + TableCell, + TableCaption, +} diff --git a/webui/src/lib/api.ts b/webui/src/lib/api.ts index ddcf384..aeeb100 100644 --- a/webui/src/lib/api.ts +++ b/webui/src/lib/api.ts @@ -205,3 +205,51 @@ export const apiKeysApi = { getPermissions: (id: number) => apiCall(`/auth/keys/${id}/permissions`), }; + +// Llama.cpp model management types +export interface Model { + id: string; + object: string; + owned_by: string; + created: number; + in_cache: boolean; + path: string; + status: { + value: string; // "loaded" | "loading" | "unloaded" + args: string[]; + }; +} + +export interface ModelsListResponse { + object: string; + data: Model[]; +} + +// Llama.cpp model management API functions +export const llamaCppApi = { + // GET /llama-cpp/{name}/models + getModels: async (instanceName: string): Promise => { + const response = await apiCall( + `/llama-cpp/${encodeURIComponent(instanceName)}/models` + ); + return response.data; + }, + + // POST /llama-cpp/{name}/models/{model}/load + loadModel: (instanceName: string, modelName: string) => + apiCall<{ status: string; message: string }>( + `/llama-cpp/${encodeURIComponent(instanceName)}/models/${encodeURIComponent(modelName)}/load`, + { + method: "POST", + } + ), + + // POST /llama-cpp/{name}/models/{model}/unload + unloadModel: (instanceName: string, modelName: string) => + apiCall<{ status: string; message: string }>( + `/llama-cpp/${encodeURIComponent(instanceName)}/models/${encodeURIComponent(modelName)}/unload`, + { + method: "POST", + } + ), +}; From 41d904475c46b73a43c10ee6454b5684b5f8bbab Mon Sep 17 00:00:00 2001 From: LordMathis Date: Sun, 21 Dec 2025 20:48:22 +0100 Subject: [PATCH 2/8] Remove model registry --- pkg/manager/manager.go | 27 +---------- pkg/manager/model_registry.go | 79 ------------------------------- pkg/manager/operations.go | 83 +-------------------------------- pkg/server/handlers.go | 2 +- pkg/server/handlers_backends.go | 11 ----- test_llm.py | 74 +++++++++++++++++++++++++++++ 6 files changed, 78 insertions(+), 198 deletions(-) delete mode 100644 pkg/manager/model_registry.go create mode 100644 test_llm.py diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 73e747e..5b55e7d 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -19,13 +19,11 @@ type InstanceManager interface { UpdateInstance(name string, options *instance.Options) (*instance.Instance, error) DeleteInstance(name string) error StartInstance(name string) (*instance.Instance, error) - IsMaxRunningInstancesReached() bool + AtMaxRunning() bool StopInstance(name string) (*instance.Instance, error) EvictLRUInstance() error RestartInstance(name string) (*instance.Instance, error) GetInstanceLogs(name string, numLines int) (string, error) - ResolveInstance(modelName string) (string, error) - RefreshModelRegistry(inst *instance.Instance) error Shutdown() } @@ -36,7 +34,6 @@ type instanceManager struct { db database.InstanceStore remote *remoteManager lifecycle *lifecycleManager - models *modelRegistry // Configuration globalConfig *config.AppConfig @@ -63,16 +60,12 @@ func New(globalConfig *config.AppConfig, db database.InstanceStore) InstanceMana // Initialize remote manager remote := newRemoteManager(globalConfig.Nodes, 30*time.Second) - // Initialize model registry - models := newModelRegistry() - // Create manager instance im := &instanceManager{ registry: registry, ports: ports, db: db, remote: remote, - models: models, globalConfig: globalConfig, } @@ -149,27 +142,9 @@ func (im *instanceManager) loadInstances() error { // Auto-start instances that have auto-restart enabled go im.autoStartInstances() - // Discover models from all running llama.cpp instances - go im.discoverAllModels() - return nil } -// discoverAllModels discovers and registers models for all running llama.cpp instances -func (im *instanceManager) discoverAllModels() { - instances := im.registry.listRunning() - - for _, inst := range instances { - if !inst.IsLlamaCpp() { - continue - } - - if err := im.RefreshModelRegistry(inst); err != nil { - log.Printf("Failed to discover models for instance %s: %v", inst.Name, err) - } - } -} - // loadInstance loads a single persisted instance and adds it to the registry func (im *instanceManager) loadInstance(persistedInst *instance.Instance) error { name := persistedInst.Name diff --git a/pkg/manager/model_registry.go b/pkg/manager/model_registry.go deleted file mode 100644 index 0515c91..0000000 --- a/pkg/manager/model_registry.go +++ /dev/null @@ -1,79 +0,0 @@ -package manager - -import ( - "fmt" - "llamactl/pkg/instance" - "sync" -) - -// modelRegistry maintains a global mapping of model names to instance names -// for llama.cpp instances. Model names must be globally unique across all instances. -type modelRegistry struct { - mu sync.RWMutex - modelToInstance map[string]string // model name → instance name - instanceModels map[string][]string // instance name → model names -} - -// newModelRegistry creates a new model registry -func newModelRegistry() *modelRegistry { - return &modelRegistry{ - modelToInstance: make(map[string]string), - instanceModels: make(map[string][]string), - } -} - -// registerModels registers models from an instance to the registry. -// Skips models that conflict with other instances and returns a list of conflicts. -func (mr *modelRegistry) registerModels(instanceName string, models []instance.Model) []string { - mr.mu.Lock() - defer mr.mu.Unlock() - - // Unregister any existing models for this instance first - mr.removeModels(instanceName) - - // Register models, skipping conflicts - var modelNames []string - var conflicts []string - - for _, model := range models { - // Check if this model conflicts with another instance - if existingInstance, exists := mr.modelToInstance[model.ID]; exists && existingInstance != instanceName { - conflicts = append(conflicts, fmt.Sprintf("%s (already in %s)", model.ID, existingInstance)) - continue // Skip this model - } - - // Register the model - mr.modelToInstance[model.ID] = instanceName - modelNames = append(modelNames, model.ID) - } - - mr.instanceModels[instanceName] = modelNames - - return conflicts -} - -// unregisterModels removes all models for an instance -func (mr *modelRegistry) unregisterModels(instanceName string) { - mr.mu.Lock() - defer mr.mu.Unlock() - mr.removeModels(instanceName) -} - -// removeModels removes all models for an instance (caller must hold lock) -func (mr *modelRegistry) removeModels(instanceName string) { - if models, exists := mr.instanceModels[instanceName]; exists { - for _, modelName := range models { - delete(mr.modelToInstance, modelName) - } - delete(mr.instanceModels, instanceName) - } -} - -// getModelInstance returns the instance name that hosts the given model -func (mr *modelRegistry) getModelInstance(modelName string) (string, bool) { - mr.mu.RLock() - defer mr.mu.RUnlock() - - instanceName, exists := mr.modelToInstance[modelName] - return instanceName, exists -} diff --git a/pkg/manager/operations.go b/pkg/manager/operations.go index 6ff50f1..54ee5d4 100644 --- a/pkg/manager/operations.go +++ b/pkg/manager/operations.go @@ -337,9 +337,6 @@ func (im *instanceManager) DeleteInstance(name string) error { // Release port (use ReleaseByInstance for proper cleanup) im.ports.releaseByInstance(name) - // Unregister models when instance is deleted - im.onInstanceStopped(name) - // Remove from registry if err := im.registry.remove(name); err != nil { return fmt.Errorf("failed to remove instance from registry: %w", err) @@ -386,7 +383,7 @@ func (im *instanceManager) StartInstance(name string) (*instance.Instance, error } // Check max running instances limit for local instances only - if im.IsMaxRunningInstancesReached() { + if im.AtMaxRunning() { return nil, MaxRunningInstancesError(fmt.Errorf("maximum number of running instances (%d) reached", im.globalConfig.Instances.MaxRunningInstances)) } @@ -399,13 +396,10 @@ func (im *instanceManager) StartInstance(name string) (*instance.Instance, error log.Printf("Warning: failed to persist instance %s: %v", name, err) } - // Discover and register models for llama.cpp instances - go im.onInstanceStarted(name) - return inst, nil } -func (im *instanceManager) IsMaxRunningInstancesReached() bool { +func (im *instanceManager) AtMaxRunning() bool { if im.globalConfig.Instances.MaxRunningInstances == -1 { return false } @@ -461,9 +455,6 @@ func (im *instanceManager) StopInstance(name string) (*instance.Instance, error) log.Printf("Warning: failed to persist instance %s: %v", name, err) } - // Unregister models when instance stops - im.onInstanceStopped(name) - return inst, nil } @@ -544,73 +535,3 @@ func (im *instanceManager) setPortInOptions(options *instance.Options, port int) func (im *instanceManager) EvictLRUInstance() error { return im.lifecycle.evictLRU() } - -// ResolveInstance resolves a model name to an instance name. -// Precedence: instance name > model registry -func (im *instanceManager) ResolveInstance(modelName string) (string, error) { - // Check if it's an instance name first - if _, err := im.GetInstance(modelName); err == nil { - return modelName, nil - } - - // Check if it's a model name in the registry - if instanceName, exists := im.models.getModelInstance(modelName); exists { - return instanceName, nil - } - - return "", fmt.Errorf("model or instance '%s' not found", modelName) -} - -// RefreshModelRegistry refreshes the model registry for the given instance -func (im *instanceManager) RefreshModelRegistry(inst *instance.Instance) error { - if !inst.IsRunning() { - return fmt.Errorf("instance %s is not running", inst.Name) - } - - // Fetch models from instance and register them - models, err := inst.GetModels() - if err != nil { - return fmt.Errorf("failed to fetch models: %w", err) - } - - // Register models, skipping conflicts - conflicts := im.models.registerModels(inst.Name, models) - if len(conflicts) > 0 { - log.Printf("Warning: Model name conflicts for instance %s (skipped): %v", inst.Name, conflicts) - } - - // Check if instance name shadows any model names - if otherInstance, exists := im.models.getModelInstance(inst.Name); exists && otherInstance != inst.Name { - log.Printf("Warning: Instance name '%s' shadows model name from instance '%s'", inst.Name, otherInstance) - } - - return nil -} - -// onInstanceStarted is called when an instance successfully starts and becomes healthy -func (im *instanceManager) onInstanceStarted(name string) { - inst, err := im.GetInstance(name) - if err != nil { - log.Printf("Failed to get instance %s for model discovery: %v", name, err) - return - } - - // Only discover models for llama.cpp instances - if !inst.IsLlamaCpp() { - return - } - - if err := inst.WaitForHealthy(30); err != nil { - log.Printf("Instance %s not healthy, skipping model discovery: %v", name, err) - return - } - - if err := im.RefreshModelRegistry(inst); err != nil { - log.Printf("Failed to discover models for instance %s: %v", name, err) - } -} - -// onInstanceStopped is called when an instance stops or is deleted -func (im *instanceManager) onInstanceStopped(name string) { - im.models.unregisterModels(name) -} diff --git a/pkg/server/handlers.go b/pkg/server/handlers.go index 3e232ee..ce72173 100644 --- a/pkg/server/handlers.go +++ b/pkg/server/handlers.go @@ -96,7 +96,7 @@ func (h *Handler) ensureInstanceRunning(inst *instance.Instance) error { return fmt.Errorf("instance is not running and on-demand start is not enabled") } - if h.InstanceManager.IsMaxRunningInstancesReached() { + if h.InstanceManager.AtMaxRunning() { if h.cfg.Instances.EnableLRUEviction { err := h.InstanceManager.EvictLRUInstance() if err != nil { diff --git a/pkg/server/handlers_backends.go b/pkg/server/handlers_backends.go index f912a8a..2cc9304 100644 --- a/pkg/server/handlers_backends.go +++ b/pkg/server/handlers_backends.go @@ -5,7 +5,6 @@ import ( "fmt" "llamactl/pkg/backends" "llamactl/pkg/instance" - "log" "net/http" "os/exec" "strings" @@ -371,11 +370,6 @@ func (h *Handler) LlamaCppLoadModel() http.HandlerFunc { return } - // Refresh the model registry - if err := h.InstanceManager.RefreshModelRegistry(inst); err != nil { - log.Printf("Warning: failed to refresh model registry after load: %v", err) - } - writeJSON(w, http.StatusOK, map[string]string{ "status": "success", "message": fmt.Sprintf("Model %s loaded successfully", modelName), @@ -410,11 +404,6 @@ func (h *Handler) LlamaCppUnloadModel() http.HandlerFunc { return } - // Refresh the model registry - if err := h.InstanceManager.RefreshModelRegistry(inst); err != nil { - log.Printf("Warning: failed to refresh model registry after unload: %v", err) - } - writeJSON(w, http.StatusOK, map[string]string{ "status": "success", "message": fmt.Sprintf("Model %s unloaded successfully", modelName), diff --git a/test_llm.py b/test_llm.py new file mode 100644 index 0000000..944d3e7 --- /dev/null +++ b/test_llm.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python3 +""" +Simple Python script to interact with local LLM server's OpenAI-compatible API +""" + +import requests + +# Local LLM server configuration +LLM_SERVER_URL = "http://localhost:8080/v1/chat/completions" +MODEL_NAME = "proxy-test" # Default model name, can be changed based on your setup + +def send_message(message, model=MODEL_NAME, temperature=0.7, max_tokens=1000): + """ + Send a message to local LLM server API + + Args: + message (str): The message to send + model (str): Model name (depends on your LLM server setup) + temperature (float): Controls randomness (0.0 to 1.0) + max_tokens (int): Maximum tokens in response + + Returns: + str: The AI response or error message + """ + + headers = { + "Content-Type": "application/json", + "Authorization": "Bearer test-inf" + } + + data = { + "model": model, + "messages": [ + { + "role": "user", + "content": message + } + ], + "temperature": temperature, + "max_tokens": max_tokens, + "stream": False + } + + response = requests.post(LLM_SERVER_URL, headers=headers, json=data, timeout=60) + response.raise_for_status() + + result = response.json() + return result["choices"][0]["message"]["content"] + +def main(): + """Run in interactive mode for continuous conversation""" + print("Local LLM Chat Client") + print("-" * 40) + + while True: + try: + user_input = input("\nYou: ").strip() + + if not user_input: + continue + + print("AI: ", end="", flush=True) + response = send_message(user_input) + print(response) + + except KeyboardInterrupt: + print("\nGoodbye!") + break + except EOFError: + print("\nGoodbye!") + break + +if __name__ == "__main__": + main() \ No newline at end of file From ee122d669c3f917af5c56f1372fad0fd06736336 Mon Sep 17 00:00:00 2001 From: LordMathis Date: Sun, 21 Dec 2025 23:32:33 +0100 Subject: [PATCH 3/8] Support llama.cpp router mode for openai endpoints --- pkg/backends/backend.go | 1 + pkg/instance/instance.go | 9 ++ pkg/instance/models.go | 141 -------------------------------- pkg/server/handlers_backends.go | 113 ++++++++++++++++++------- pkg/server/handlers_openai.go | 93 ++++++++++++++++++--- 5 files changed, 174 insertions(+), 183 deletions(-) delete mode 100644 pkg/instance/models.go diff --git a/pkg/backends/backend.go b/pkg/backends/backend.go index ad138a5..2bb0fa8 100644 --- a/pkg/backends/backend.go +++ b/pkg/backends/backend.go @@ -14,6 +14,7 @@ const ( BackendTypeLlamaCpp BackendType = "llama_cpp" BackendTypeMlxLm BackendType = "mlx_lm" BackendTypeVllm BackendType = "vllm" + BackendTypeUnknown BackendType = "unknown" ) type backend interface { diff --git a/pkg/instance/instance.go b/pkg/instance/instance.go index a8faa1b..6149a4a 100644 --- a/pkg/instance/instance.go +++ b/pkg/instance/instance.go @@ -7,6 +7,7 @@ import ( "net/http" "time" + "llamactl/pkg/backends" "llamactl/pkg/config" ) @@ -117,6 +118,14 @@ func (i *Instance) WaitForHealthy(timeout int) error { return i.process.waitForHealthy(timeout) } +func (i *Instance) GetBackendType() backends.BackendType { + opts := i.GetOptions() + if opts == nil { + return backends.BackendTypeUnknown + } + return opts.BackendOptions.BackendType +} + // GetOptions returns the current options func (i *Instance) GetOptions() *Options { if i.options == nil { diff --git a/pkg/instance/models.go b/pkg/instance/models.go deleted file mode 100644 index f911f18..0000000 --- a/pkg/instance/models.go +++ /dev/null @@ -1,141 +0,0 @@ -package instance - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "llamactl/pkg/backends" - "net/http" - "time" -) - -// Model represents a model available in a llama.cpp instance -type Model struct { - ID string `json:"id"` - Object string `json:"object"` - OwnedBy string `json:"owned_by"` - Created int64 `json:"created"` - InCache bool `json:"in_cache"` - Path string `json:"path"` - Status ModelStatus `json:"status"` -} - -// ModelStatus represents the status of a model in an instance -type ModelStatus struct { - Value string `json:"value"` // "loaded" | "loading" | "unloaded" - Args []string `json:"args"` -} - -// IsLlamaCpp checks if this instance is a llama.cpp instance -func (i *Instance) IsLlamaCpp() bool { - opts := i.GetOptions() - if opts == nil { - return false - } - return opts.BackendOptions.BackendType == backends.BackendTypeLlamaCpp -} - -// GetModels fetches the models available in this llama.cpp instance -func (i *Instance) GetModels() ([]Model, error) { - if !i.IsLlamaCpp() { - return nil, fmt.Errorf("instance %s is not a llama.cpp instance", i.Name) - } - - if !i.IsRunning() { - return nil, fmt.Errorf("instance %s is not running", i.Name) - } - - var result struct { - Data []Model `json:"data"` - } - if err := i.doRequest("GET", "/models", nil, &result, 10*time.Second); err != nil { - return nil, fmt.Errorf("failed to fetch models: %w", err) - } - - return result.Data, nil -} - -// LoadModel loads a model in this llama.cpp instance -func (i *Instance) LoadModel(modelName string) error { - if !i.IsLlamaCpp() { - return fmt.Errorf("instance %s is not a llama.cpp instance", i.Name) - } - - if !i.IsRunning() { - return fmt.Errorf("instance %s is not running", i.Name) - } - - // Make the load request - reqBody := map[string]string{"model": modelName} - if err := i.doRequest("POST", "/models/load", reqBody, nil, 30*time.Second); err != nil { - return fmt.Errorf("failed to load model: %w", err) - } - - return nil -} - -// UnloadModel unloads a model from this llama.cpp instance -func (i *Instance) UnloadModel(modelName string) error { - if !i.IsLlamaCpp() { - return fmt.Errorf("instance %s is not a llama.cpp instance", i.Name) - } - - if !i.IsRunning() { - return fmt.Errorf("instance %s is not running", i.Name) - } - - // Make the unload request - reqBody := map[string]string{"model": modelName} - if err := i.doRequest("POST", "/models/unload", reqBody, nil, 30*time.Second); err != nil { - return fmt.Errorf("failed to unload model: %w", err) - } - - return nil -} - -// doRequest makes an HTTP request to this instance's backend -func (i *Instance) doRequest(method, path string, reqBody, respBody any, timeout time.Duration) error { - url := fmt.Sprintf("http://%s:%d%s", i.GetHost(), i.GetPort(), path) - - var bodyReader io.Reader - if reqBody != nil { - bodyBytes, err := json.Marshal(reqBody) - if err != nil { - return fmt.Errorf("failed to marshal request body: %w", err) - } - bodyReader = bytes.NewReader(bodyBytes) - } - - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - - req, err := http.NewRequestWithContext(ctx, method, url, bodyReader) - if err != nil { - return fmt.Errorf("failed to create request: %w", err) - } - - if reqBody != nil { - req.Header.Set("Content-Type", "application/json") - } - - resp, err := http.DefaultClient.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - bodyBytes, _ := io.ReadAll(resp.Body) - return fmt.Errorf("status %d: %s", resp.StatusCode, string(bodyBytes)) - } - - if respBody != nil { - if err := json.NewDecoder(resp.Body).Decode(respBody); err != nil { - return fmt.Errorf("failed to decode response: %w", err) - } - } - - return nil -} diff --git a/pkg/server/handlers_backends.go b/pkg/server/handlers_backends.go index 2cc9304..8fc9ff6 100644 --- a/pkg/server/handlers_backends.go +++ b/pkg/server/handlers_backends.go @@ -8,8 +8,6 @@ import ( "net/http" "os/exec" "strings" - - "github.com/go-chi/chi/v5" ) // ParseCommandRequest represents the request body for backend command parsing @@ -322,24 +320,41 @@ func (h *Handler) LlamaServerListDevicesHandler() http.HandlerFunc { // @Router /api/v1/llama-cpp/{name}/models [get] func (h *Handler) LlamaCppListModels() http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { - inst, err := h.getInstance(r) + inst, err := h.validateLlamaCppInstance(r) if err != nil { - writeError(w, http.StatusBadRequest, "invalid_instance", err.Error()) + writeError(w, http.StatusBadRequest, "invalid instance", err.Error()) return } - models, err := inst.GetModels() - if err != nil { - writeError(w, http.StatusBadRequest, "get_models_failed", err.Error()) + // Check instance permissions + if err := h.authMiddleware.CheckInstancePermission(r.Context(), inst.ID); err != nil { + writeError(w, http.StatusForbidden, "permission_denied", err.Error()) return } - response := map[string]any{ - "object": "list", - "data": models, + // Check if instance is shutting down before autostart logic + if inst.GetStatus() == instance.ShuttingDown { + writeError(w, http.StatusServiceUnavailable, "instance_shutting_down", "Instance is shutting down") + return } - writeJSON(w, http.StatusOK, response) + if !inst.IsRemote() && !inst.IsRunning() { + err := h.ensureInstanceRunning(inst) + if err != nil { + writeError(w, http.StatusInternalServerError, "instance start failed", err.Error()) + return + } + } + + // Modify request path to /models for proxying + r.URL.Path = "/models" + + // Use instance's ServeHTTP which tracks inflight requests and handles shutting down state + err = inst.ServeHTTP(w, r) + if err != nil { + // Error is already handled in ServeHTTP (response written) + return + } } } @@ -357,23 +372,41 @@ func (h *Handler) LlamaCppListModels() http.HandlerFunc { // @Router /api/v1/llama-cpp/{name}/models/{model}/load [post] func (h *Handler) LlamaCppLoadModel() http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { - inst, err := h.getInstance(r) + inst, err := h.validateLlamaCppInstance(r) if err != nil { - writeError(w, http.StatusBadRequest, "invalid_instance", err.Error()) + writeError(w, http.StatusBadRequest, "invalid instance", err.Error()) return } - modelName := chi.URLParam(r, "model") - - if err := inst.LoadModel(modelName); err != nil { - writeError(w, http.StatusBadRequest, "load_model_failed", err.Error()) + // Check instance permissions + if err := h.authMiddleware.CheckInstancePermission(r.Context(), inst.ID); err != nil { + writeError(w, http.StatusForbidden, "permission_denied", err.Error()) return } - writeJSON(w, http.StatusOK, map[string]string{ - "status": "success", - "message": fmt.Sprintf("Model %s loaded successfully", modelName), - }) + // Check if instance is shutting down before autostart logic + if inst.GetStatus() == instance.ShuttingDown { + writeError(w, http.StatusServiceUnavailable, "instance_shutting_down", "Instance is shutting down") + return + } + + if !inst.IsRemote() && !inst.IsRunning() { + err := h.ensureInstanceRunning(inst) + if err != nil { + writeError(w, http.StatusInternalServerError, "instance start failed", err.Error()) + return + } + } + + // Modify request path to /models/load for proxying + r.URL.Path = "/models/load" + + // Use instance's ServeHTTP which tracks inflight requests and handles shutting down state + err = inst.ServeHTTP(w, r) + if err != nil { + // Error is already handled in ServeHTTP (response written) + return + } } } @@ -391,22 +424,40 @@ func (h *Handler) LlamaCppLoadModel() http.HandlerFunc { // @Router /api/v1/llama-cpp/{name}/models/{model}/unload [post] func (h *Handler) LlamaCppUnloadModel() http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { - inst, err := h.getInstance(r) + inst, err := h.validateLlamaCppInstance(r) if err != nil { - writeError(w, http.StatusBadRequest, "invalid_instance", err.Error()) + writeError(w, http.StatusBadRequest, "invalid instance", err.Error()) return } - modelName := chi.URLParam(r, "model") - - if err := inst.UnloadModel(modelName); err != nil { - writeError(w, http.StatusBadRequest, "unload_model_failed", err.Error()) + // Check instance permissions + if err := h.authMiddleware.CheckInstancePermission(r.Context(), inst.ID); err != nil { + writeError(w, http.StatusForbidden, "permission_denied", err.Error()) return } - writeJSON(w, http.StatusOK, map[string]string{ - "status": "success", - "message": fmt.Sprintf("Model %s unloaded successfully", modelName), - }) + // Check if instance is shutting down before autostart logic + if inst.GetStatus() == instance.ShuttingDown { + writeError(w, http.StatusServiceUnavailable, "instance_shutting_down", "Instance is shutting down") + return + } + + if !inst.IsRemote() && !inst.IsRunning() { + err := h.ensureInstanceRunning(inst) + if err != nil { + writeError(w, http.StatusInternalServerError, "instance start failed", err.Error()) + return + } + } + + // Modify request path to /models/unload for proxying + r.URL.Path = "/models/unload" + + // Use instance's ServeHTTP which tracks inflight requests and handles shutting down state + err = inst.ServeHTTP(w, r) + if err != nil { + // Error is already handled in ServeHTTP (response written) + return + } } } diff --git a/pkg/server/handlers_openai.go b/pkg/server/handlers_openai.go index 2240020..06f06b7 100644 --- a/pkg/server/handlers_openai.go +++ b/pkg/server/handlers_openai.go @@ -5,9 +5,11 @@ import ( "encoding/json" "fmt" "io" + "llamactl/pkg/backends" "llamactl/pkg/instance" "llamactl/pkg/validation" "net/http" + "strings" ) // OpenAIListInstancesResponse represents the response structure for listing instances (models) in OpenAI-compatible format @@ -24,6 +26,53 @@ type OpenAIInstance struct { OwnedBy string `json:"owned_by"` } +// LlamaCppModel represents a model available in a llama.cpp instance +type LlamaCppModel struct { + ID string `json:"id"` + Object string `json:"object"` + OwnedBy string `json:"owned_by"` + Created int64 `json:"created"` + InCache bool `json:"in_cache"` + Path string `json:"path"` + Status LlamaCppModelStatus `json:"status"` +} + +// LlamaCppModelStatus represents the status of a model in a llama.cpp instance +type LlamaCppModelStatus struct { + Value string `json:"value"` // "loaded" | "loading" | "unloaded" + Args []string `json:"args"` +} + +// fetchLlamaCppModels fetches models from a llama.cpp instance using the proxy +func fetchLlamaCppModels(inst *instance.Instance) ([]LlamaCppModel, error) { + // Create a request to the instance's /models endpoint + req, err := http.NewRequest("GET", fmt.Sprintf("http://%s:%d/models", inst.GetHost(), inst.GetPort()), nil) + if err != nil { + return nil, fmt.Errorf("failed to create request: %w", err) + } + + // Use a custom response writer to capture the response + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + bodyBytes, _ := io.ReadAll(resp.Body) + return nil, fmt.Errorf("status %d: %s", resp.StatusCode, string(bodyBytes)) + } + + var result struct { + Data []LlamaCppModel `json:"data"` + } + if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + return nil, fmt.Errorf("failed to decode response: %w", err) + } + + return result.Data, nil +} + // OpenAIListInstances godoc // @Summary List instances in OpenAI-compatible format // @Description Returns a list of instances in a format compatible with OpenAI API @@ -46,9 +95,9 @@ func (h *Handler) OpenAIListInstances() http.HandlerFunc { // For each llama.cpp instance, try to fetch models and add them as separate entries for _, inst := range instances { - if inst.IsLlamaCpp() && inst.IsRunning() { + if inst.GetBackendType() == backends.BackendTypeLlamaCpp && inst.IsRunning() { // Try to fetch models from the instance - models, err := inst.GetModels() + models, err := fetchLlamaCppModels(inst) if err != nil { fmt.Printf("Failed to fetch models from instance %s: %v", inst.Name, err) continue @@ -56,9 +105,9 @@ func (h *Handler) OpenAIListInstances() http.HandlerFunc { for _, model := range models { openaiInstances = append(openaiInstances, OpenAIInstance{ - ID: model.ID, + ID: inst.Name + "/" + model.ID, Object: "model", - Created: model.Created, + Created: inst.Created, OwnedBy: inst.Name, }) } @@ -115,17 +164,24 @@ func (h *Handler) OpenAIProxy() http.HandlerFunc { return } - modelName, ok := requestBody["model"].(string) - if !ok || modelName == "" { + reqModelName, ok := requestBody["model"].(string) + if !ok || reqModelName == "" { writeError(w, http.StatusBadRequest, "invalid_request", "Model name is required") return } - // Resolve model name to instance name (checks instance names first, then model registry) - instanceName, err := h.InstanceManager.ResolveInstance(modelName) - if err != nil { - writeError(w, http.StatusBadRequest, "model_not_found", err.Error()) - return + // Parse instance name and model name from / format + var instanceName string + var modelName string + + // Check if model name contains "/" + if idx := strings.Index(reqModelName, "/"); idx != -1 { + // Split into instance and model parts + instanceName = reqModelName[:idx] + modelName = reqModelName[idx+1:] + } else { + instanceName = reqModelName + modelName = reqModelName } // Validate instance name at the entry point @@ -154,6 +210,11 @@ func (h *Handler) OpenAIProxy() http.HandlerFunc { return } + if inst.IsRemote() { + // Don't replace model name for remote instances + modelName = reqModelName + } + if !inst.IsRemote() && !inst.IsRunning() { err := h.ensureInstanceRunning(inst) if err != nil { @@ -162,6 +223,16 @@ func (h *Handler) OpenAIProxy() http.HandlerFunc { } } + // Update the request body with just the model name + requestBody["model"] = modelName + + // Re-marshal the updated body + bodyBytes, err = json.Marshal(requestBody) + if err != nil { + writeError(w, http.StatusInternalServerError, "marshal_error", "Failed to update request body") + return + } + // Recreate the request body from the bytes we read r.Body = io.NopCloser(bytes.NewReader(bodyBytes)) r.ContentLength = int64(len(bodyBytes)) From 5062c882de48156f755d53f5eb90a8936408785f Mon Sep 17 00:00:00 2001 From: LordMathis Date: Mon, 22 Dec 2025 15:20:22 +0100 Subject: [PATCH 4/8] Update dependencies --- webui/package-lock.json | 33 ++++++++++----------------------- 1 file changed, 10 insertions(+), 23 deletions(-) diff --git a/webui/package-lock.json b/webui/package-lock.json index 077797a..35ffc7f 100644 --- a/webui/package-lock.json +++ b/webui/package-lock.json @@ -161,7 +161,6 @@ "integrity": "sha512-e7jT4DxYvIDLk1ZHmU/m/mB19rex9sv0c2ftBtjSBv+kVM/902eh0fINUzD7UwLLNR+jU585GxUJ8/EBfAM5fw==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@babel/code-frame": "^7.27.1", "@babel/generator": "^7.28.5", @@ -511,7 +510,6 @@ } ], "license": "MIT", - "peer": true, "engines": { "node": ">=18" }, @@ -558,7 +556,6 @@ } ], "license": "MIT", - "peer": true, "engines": { "node": ">=18" } @@ -2537,7 +2534,8 @@ "resolved": "https://registry.npmjs.org/@types/aria-query/-/aria-query-5.0.4.tgz", "integrity": "sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw==", "dev": true, - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/@types/babel__core": { "version": "7.20.5", @@ -2621,7 +2619,6 @@ "integrity": "sha512-gWEkeiyYE4vqjON/+Obqcoeffmk0NF15WSBwSs7zwVA2bAbTaE0SJ7P0WNGoJn8uE7fiaV5a7dKYIJriEqOrmA==", "devOptional": true, "license": "MIT", - "peer": true, "dependencies": { "undici-types": "~7.16.0" } @@ -2632,7 +2629,6 @@ "integrity": "sha512-tBFxBp9Nfyy5rsmefN+WXc1JeW/j2BpBHFdLZbEVfs9wn3E3NRFxwV0pJg8M1qQAexFpvz73hJXFofV0ZAu92A==", "devOptional": true, "license": "MIT", - "peer": true, "dependencies": { "csstype": "^3.0.2" } @@ -2643,7 +2639,6 @@ "integrity": "sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ==", "devOptional": true, "license": "MIT", - "peer": true, "peerDependencies": { "@types/react": "^19.2.0" } @@ -2693,7 +2688,6 @@ "integrity": "sha512-6/cmF2piao+f6wSxUsJLZjck7OQsYyRtcOZS02k7XINSNlz93v6emM8WutDQSXnroG2xwYlEVHJI+cPA7CPM3Q==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@typescript-eslint/scope-manager": "8.50.0", "@typescript-eslint/types": "8.50.0", @@ -3042,7 +3036,6 @@ "integrity": "sha512-F9jI5rSstNknPlTlPN2gcc4gpbaagowuRzw/OJzl368dvPun668Q182S8Q8P9PITgGCl5LAKXpzuue106eM4wA==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@vitest/utils": "4.0.8", "fflate": "^0.8.2", @@ -3079,7 +3072,6 @@ "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", "dev": true, "license": "MIT", - "peer": true, "bin": { "acorn": "bin/acorn" }, @@ -3130,6 +3122,7 @@ "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=8" } @@ -3401,7 +3394,6 @@ } ], "license": "MIT", - "peer": true, "dependencies": { "caniuse-lite": "^1.0.30001726", "electron-to-chromium": "^1.5.173", @@ -3834,7 +3826,8 @@ "resolved": "https://registry.npmjs.org/dom-accessibility-api/-/dom-accessibility-api-0.5.16.tgz", "integrity": "sha512-X7BJ2yElsnOJ30pZF4uIIDfBEVgF4XEBxL9Bxhy6dnrm5hkzqmsWHGTiHqRiITNhMyFLyAiWndIJP7Z1NTteDg==", "dev": true, - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/dunder-proto": { "version": "1.0.1", @@ -4138,7 +4131,6 @@ "integrity": "sha512-BhHmn2yNOFA9H9JmmIVKJmd288g9hrVRDkdoIgRCRuSySRUHH7r/DI6aAXW9T1WwUuY3DFgrcaqB+deURBLR5g==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@eslint-community/eslint-utils": "^4.8.0", "@eslint-community/regexpp": "^4.12.1", @@ -5357,7 +5349,6 @@ "integrity": "sha512-GtldT42B8+jefDUC4yUKAvsaOrH7PDHmZxZXNgF2xMmymjUbRYJvpAybZAKEmXDGTM0mCsz8duOa4vTm5AY2Kg==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@acemir/cssom": "^0.9.28", "@asamuzakjp/dom-selector": "^6.7.6", @@ -5768,6 +5759,7 @@ "integrity": "sha512-h5bgJWpxJNswbU7qCrV0tIKQCaS3blPDrqKWx+QxzuzL1zGUzij9XCWLrSLsJPu5t+eWA/ycetzYAO5IOMcWAQ==", "dev": true, "license": "MIT", + "peer": true, "bin": { "lz-string": "bin/bin.js" } @@ -6153,7 +6145,6 @@ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "license": "MIT", - "peer": true, "engines": { "node": ">=12" }, @@ -6190,7 +6181,6 @@ } ], "license": "MIT", - "peer": true, "dependencies": { "nanoid": "^3.3.11", "picocolors": "^1.1.1", @@ -6216,6 +6206,7 @@ "integrity": "sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "ansi-regex": "^5.0.1", "ansi-styles": "^5.0.0", @@ -6231,6 +6222,7 @@ "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=10" }, @@ -6272,7 +6264,6 @@ "resolved": "https://registry.npmjs.org/react/-/react-19.2.0.tgz", "integrity": "sha512-tmbWg6W31tQLeB5cdIBOicJDJRR2KzXsV7uSK9iNfLWQ5bIZfxuPEHp7M8wiHyHnn0DD1i7w3Zmin0FtkrwoCQ==", "license": "MIT", - "peer": true, "engines": { "node": ">=0.10.0" } @@ -6282,7 +6273,6 @@ "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.0.tgz", "integrity": "sha512-UlbRu4cAiGaIewkPyiRGJk0imDN2T3JjieT6spoL2UeSf5od4n5LB/mQ4ejmxhCFT1tYe8IvaFulzynWovsEFQ==", "license": "MIT", - "peer": true, "dependencies": { "scheduler": "^0.27.0" }, @@ -6295,7 +6285,8 @@ "resolved": "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz", "integrity": "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==", "dev": true, - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/react-refresh": { "version": "0.18.0", @@ -7249,7 +7240,6 @@ "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", "dev": true, "license": "Apache-2.0", - "peer": true, "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" @@ -7397,7 +7387,6 @@ "resolved": "https://registry.npmjs.org/vite/-/vite-7.3.0.tgz", "integrity": "sha512-dZwN5L1VlUBewiP6H9s2+B3e3Jg96D0vzN+Ry73sOefebhYr9f94wwkMNN/9ouoU8pV1BqA1d1zGk8928cx0rg==", "license": "MIT", - "peer": true, "dependencies": { "esbuild": "^0.27.0", "fdir": "^6.5.0", @@ -7473,7 +7462,6 @@ "integrity": "sha512-urzu3NCEV0Qa0Y2PwvBtRgmNtxhj5t5ULw7cuKhIHh3OrkKTLlut0lnBOv9qe5OvbkMH2g38G7KPDCTpIytBVg==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@vitest/expect": "4.0.8", "@vitest/mocker": "4.0.8", @@ -7802,7 +7790,6 @@ "resolved": "https://registry.npmjs.org/zod/-/zod-4.2.0.tgz", "integrity": "sha512-Bd5fw9wlIhtqCCxotZgdTOMwGm1a0u75wARVEY9HMs1X17trvA/lMi4+MGK5EUfYkXVTbX8UDiDKW4OgzHVUZw==", "license": "MIT", - "peer": true, "funding": { "url": "https://github.com/sponsors/colinhacks" } From d9d7b6d814a15c6b73ba5293d9ad5ab00329608e Mon Sep 17 00:00:00 2001 From: LordMathis Date: Mon, 22 Dec 2025 18:19:18 +0100 Subject: [PATCH 5/8] Allow empty backend options --- pkg/backends/backend.go | 21 ++++++++++++--------- pkg/backends/llama.go | 18 +++++++++++++++++- 2 files changed, 29 insertions(+), 10 deletions(-) diff --git a/pkg/backends/backend.go b/pkg/backends/backend.go index 2bb0fa8..6e50565 100644 --- a/pkg/backends/backend.go +++ b/pkg/backends/backend.go @@ -56,13 +56,15 @@ func (o *Options) UnmarshalJSON(data []byte) error { } // Create backend from constructor map - if o.BackendOptions != nil { - constructor, exists := backendConstructors[o.BackendType] - if !exists { - return fmt.Errorf("unsupported backend type: %s", o.BackendType) - } + constructor, exists := backendConstructors[o.BackendType] + if !exists { + return fmt.Errorf("unsupported backend type: %s", o.BackendType) + } - backend := constructor() + backend := constructor() + + // If backend_options is provided, unmarshal into the backend + if o.BackendOptions != nil { optionsData, err := json.Marshal(o.BackendOptions) if err != nil { return fmt.Errorf("failed to marshal backend options: %w", err) @@ -71,10 +73,11 @@ func (o *Options) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(optionsData, backend); err != nil { return fmt.Errorf("failed to unmarshal backend options: %w", err) } - - // Store in the appropriate typed field for backward compatibility - o.setBackendOptions(backend) } + // If backend_options is nil or empty, backend remains as empty struct (for router mode) + + // Store in the appropriate typed field + o.setBackendOptions(backend) return nil } diff --git a/pkg/backends/llama.go b/pkg/backends/llama.go index 246f0fe..fb94684 100644 --- a/pkg/backends/llama.go +++ b/pkg/backends/llama.go @@ -327,20 +327,30 @@ func (o *LlamaServerOptions) UnmarshalJSON(data []byte) error { } func (o *LlamaServerOptions) GetPort() int { + if o == nil { + return 0 + } return o.Port } func (o *LlamaServerOptions) SetPort(port int) { + if o == nil { + return + } o.Port = port } func (o *LlamaServerOptions) GetHost() string { + if o == nil { + return "localhost" + } return o.Host } func (o *LlamaServerOptions) Validate() error { + // Allow nil options for router mode where llama.cpp manages models dynamically if o == nil { - return validation.ValidationError(fmt.Errorf("llama server options cannot be nil for llama.cpp backend")) + return nil } // Use reflection to check all string fields for injection patterns @@ -370,6 +380,9 @@ func (o *LlamaServerOptions) Validate() error { // BuildCommandArgs converts InstanceOptions to command line arguments func (o *LlamaServerOptions) BuildCommandArgs() []string { + if o == nil { + return []string{} + } // Llama uses multiple flags for arrays by default (not comma-separated) // Use package-level llamaMultiValuedFlags variable args := BuildCommandArgs(o, llamaMultiValuedFlags) @@ -381,6 +394,9 @@ func (o *LlamaServerOptions) BuildCommandArgs() []string { } func (o *LlamaServerOptions) BuildDockerArgs() []string { + if o == nil { + return []string{} + } // For llama, Docker args are the same as normal args return o.BuildCommandArgs() } From 99eba3daa9e037b0c587aa65a7e313c2a4c686dd Mon Sep 17 00:00:00 2001 From: LordMathis Date: Mon, 22 Dec 2025 20:10:28 +0100 Subject: [PATCH 6/8] Update test client --- test_client.py | 1 - test_llm.py | 74 -------------------------------------------------- 2 files changed, 75 deletions(-) delete mode 100644 test_llm.py diff --git a/test_client.py b/test_client.py index e151a23..ea0e685 100644 --- a/test_client.py +++ b/test_client.py @@ -4,7 +4,6 @@ Simple Python script to interact with local LLM server's OpenAI-compatible API """ import requests -import json import sys # Local LLM server configuration diff --git a/test_llm.py b/test_llm.py deleted file mode 100644 index 944d3e7..0000000 --- a/test_llm.py +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env python3 -""" -Simple Python script to interact with local LLM server's OpenAI-compatible API -""" - -import requests - -# Local LLM server configuration -LLM_SERVER_URL = "http://localhost:8080/v1/chat/completions" -MODEL_NAME = "proxy-test" # Default model name, can be changed based on your setup - -def send_message(message, model=MODEL_NAME, temperature=0.7, max_tokens=1000): - """ - Send a message to local LLM server API - - Args: - message (str): The message to send - model (str): Model name (depends on your LLM server setup) - temperature (float): Controls randomness (0.0 to 1.0) - max_tokens (int): Maximum tokens in response - - Returns: - str: The AI response or error message - """ - - headers = { - "Content-Type": "application/json", - "Authorization": "Bearer test-inf" - } - - data = { - "model": model, - "messages": [ - { - "role": "user", - "content": message - } - ], - "temperature": temperature, - "max_tokens": max_tokens, - "stream": False - } - - response = requests.post(LLM_SERVER_URL, headers=headers, json=data, timeout=60) - response.raise_for_status() - - result = response.json() - return result["choices"][0]["message"]["content"] - -def main(): - """Run in interactive mode for continuous conversation""" - print("Local LLM Chat Client") - print("-" * 40) - - while True: - try: - user_input = input("\nYou: ").strip() - - if not user_input: - continue - - print("AI: ", end="", flush=True) - response = send_message(user_input) - print(response) - - except KeyboardInterrupt: - print("\nGoodbye!") - break - except EOFError: - print("\nGoodbye!") - break - -if __name__ == "__main__": - main() \ No newline at end of file From 761cdfe7d8889e457dedb4780c843d8868b80210 Mon Sep 17 00:00:00 2001 From: LordMathis Date: Mon, 22 Dec 2025 20:11:16 +0100 Subject: [PATCH 7/8] Improve InstanceCard to display models for llama.cpp instances --- webui/src/components/InstanceCard.tsx | 34 +++++++++++++++++---------- webui/src/lib/api.ts | 6 +++-- 2 files changed, 25 insertions(+), 15 deletions(-) diff --git a/webui/src/components/InstanceCard.tsx b/webui/src/components/InstanceCard.tsx index a79b116..996ab67 100644 --- a/webui/src/components/InstanceCard.tsx +++ b/webui/src/components/InstanceCard.tsx @@ -1,6 +1,7 @@ // ui/src/components/InstanceCard.tsx import { Button } from "@/components/ui/button"; import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; +import { Badge } from "@/components/ui/badge"; import type { Instance } from "@/types/instance"; import { Edit, FileText, Play, Square, Trash2, MoreHorizontal, Download, Boxes } from "lucide-react"; import LogsDialog from "@/components/LogDialog"; @@ -9,7 +10,7 @@ import HealthBadge from "@/components/HealthBadge"; import BackendBadge from "@/components/BackendBadge"; import { useState, useEffect } from "react"; import { useInstanceHealth } from "@/hooks/useInstanceHealth"; -import { instancesApi, llamaCppApi } from "@/lib/api"; +import { instancesApi, llamaCppApi, type Model } from "@/lib/api"; interface InstanceCardProps { instance: Instance; @@ -29,29 +30,33 @@ function InstanceCard({ const [isLogsOpen, setIsLogsOpen] = useState(false); const [isModelsOpen, setIsModelsOpen] = useState(false); const [showAllActions, setShowAllActions] = useState(false); - const [modelCount, setModelCount] = useState(0); + const [models, setModels] = useState([]); const health = useInstanceHealth(instance.name, instance.status); const running = instance.status === "running"; const isLlamaCpp = instance.options?.backend_type === "llama_cpp"; - // Fetch model count for llama.cpp instances + // Fetch models for llama.cpp instances useEffect(() => { if (!isLlamaCpp || !running) { - setModelCount(0); + setModels([]); return; } void (async () => { try { - const models = await llamaCppApi.getModels(instance.name); - setModelCount(models.length); + const fetchedModels = await llamaCppApi.getModels(instance.name); + setModels(fetchedModels); } catch { - setModelCount(0); + setModels([]); } })(); }, [instance.name, isLlamaCpp, running]); + // Calculate model counts + const totalModels = models.length; + const loadedModels = models.filter(m => m.status.value === "loaded").length; + const handleStart = () => { startInstance(instance.name); }; @@ -124,6 +129,12 @@ function InstanceCard({
{running && } + {isLlamaCpp && running && totalModels > 0 && ( + + + {loadedModels}/{totalModels} models + + )}
@@ -174,30 +185,28 @@ function InstanceCard({ {/* Secondary actions - collapsible */} {showAllActions && ( -
+
- {isLlamaCpp && modelCount > 1 && ( + {isLlamaCpp && totalModels > 1 && ( )} @@ -207,7 +216,6 @@ function InstanceCard({ onClick={handleExport} title="Export instance" data-testid="export-instance-button" - className="flex-1" > Export diff --git a/webui/src/lib/api.ts b/webui/src/lib/api.ts index aeeb100..d648874 100644 --- a/webui/src/lib/api.ts +++ b/webui/src/lib/api.ts @@ -237,19 +237,21 @@ export const llamaCppApi = { // POST /llama-cpp/{name}/models/{model}/load loadModel: (instanceName: string, modelName: string) => - apiCall<{ status: string; message: string }>( + apiCall<{ success: boolean }>( `/llama-cpp/${encodeURIComponent(instanceName)}/models/${encodeURIComponent(modelName)}/load`, { method: "POST", + body: JSON.stringify({ model: modelName }), } ), // POST /llama-cpp/{name}/models/{model}/unload unloadModel: (instanceName: string, modelName: string) => - apiCall<{ status: string; message: string }>( + apiCall<{ success: boolean }>( `/llama-cpp/${encodeURIComponent(instanceName)}/models/${encodeURIComponent(modelName)}/unload`, { method: "POST", + body: JSON.stringify({ model: modelName }), } ), }; From 3c95e76137b6ffedcce64e8cf31a08c98ccade38 Mon Sep 17 00:00:00 2001 From: LordMathis Date: Mon, 22 Dec 2025 20:33:29 +0100 Subject: [PATCH 8/8] Poll models during loading --- webui/src/components/ModelsDialog.tsx | 38 +++++++++++++++++++-------- 1 file changed, 27 insertions(+), 11 deletions(-) diff --git a/webui/src/components/ModelsDialog.tsx b/webui/src/components/ModelsDialog.tsx index d5ba733..c1218ca 100644 --- a/webui/src/components/ModelsDialog.tsx +++ b/webui/src/components/ModelsDialog.tsx @@ -88,20 +88,30 @@ const ModelsDialog: React.FC = ({ } }, [instanceName, isRunning]) - // Poll for models while dialog is open + // Fetch models when dialog opens useEffect(() => { if (!open || !isRunning) return // Initial fetch void fetchModels() + }, [open, isRunning, fetchModels]) - // Poll every 2 seconds + // Auto-refresh only when models are loading + useEffect(() => { + if (!open || !isRunning) return + + // Check if any model is in loading state + const hasLoadingModel = models.some(m => m.status.value === 'loading') + + if (!hasLoadingModel) return + + // Poll every 2 seconds when there's a loading model const interval = setInterval(() => { void fetchModels() }, 2000) return () => clearInterval(interval) - }, [open, isRunning, fetchModels]) + }, [open, isRunning, models, fetchModels]) // Load model const loadModel = async (modelName: string) => { @@ -110,7 +120,10 @@ const ModelsDialog: React.FC = ({ try { await llamaCppApi.loadModel(instanceName, modelName) - // Polling will pick up the change + // Wait a bit for the backend to process the load + await new Promise(resolve => setTimeout(resolve, 500)) + // Refresh models list after loading + await fetchModels() } catch (err) { setError(err instanceof Error ? err.message : 'Failed to load model') } finally { @@ -129,7 +142,10 @@ const ModelsDialog: React.FC = ({ try { await llamaCppApi.unloadModel(instanceName, modelName) - // Polling will pick up the change + // Wait a bit for the backend to process the unload + await new Promise(resolve => setTimeout(resolve, 500)) + // Refresh models list after unloading + await fetchModels() } catch (err) { setError(err instanceof Error ? err.message : 'Failed to unload model') } finally { @@ -230,7 +246,7 @@ const ModelsDialog: React.FC = ({
- {/* Auto-refresh indicator */} - {isRunning && ( + {/* Auto-refresh indicator - only shown when models are loading */} + {isRunning && models.some(m => m.status.value === 'loading') && (
-
- Auto-refreshing every 2 seconds +
+ Auto-refreshing while models are loading
)}