diff --git a/pkg/backends/backend.go b/pkg/backends/backend.go index ad138a5..6e50565 100644 --- a/pkg/backends/backend.go +++ b/pkg/backends/backend.go @@ -14,6 +14,7 @@ const ( BackendTypeLlamaCpp BackendType = "llama_cpp" BackendTypeMlxLm BackendType = "mlx_lm" BackendTypeVllm BackendType = "vllm" + BackendTypeUnknown BackendType = "unknown" ) type backend interface { @@ -55,13 +56,15 @@ func (o *Options) UnmarshalJSON(data []byte) error { } // Create backend from constructor map - if o.BackendOptions != nil { - constructor, exists := backendConstructors[o.BackendType] - if !exists { - return fmt.Errorf("unsupported backend type: %s", o.BackendType) - } + constructor, exists := backendConstructors[o.BackendType] + if !exists { + return fmt.Errorf("unsupported backend type: %s", o.BackendType) + } - backend := constructor() + backend := constructor() + + // If backend_options is provided, unmarshal into the backend + if o.BackendOptions != nil { optionsData, err := json.Marshal(o.BackendOptions) if err != nil { return fmt.Errorf("failed to marshal backend options: %w", err) @@ -70,10 +73,11 @@ func (o *Options) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(optionsData, backend); err != nil { return fmt.Errorf("failed to unmarshal backend options: %w", err) } - - // Store in the appropriate typed field for backward compatibility - o.setBackendOptions(backend) } + // If backend_options is nil or empty, backend remains as empty struct (for router mode) + + // Store in the appropriate typed field + o.setBackendOptions(backend) return nil } diff --git a/pkg/backends/llama.go b/pkg/backends/llama.go index 246f0fe..fb94684 100644 --- a/pkg/backends/llama.go +++ b/pkg/backends/llama.go @@ -327,20 +327,30 @@ func (o *LlamaServerOptions) UnmarshalJSON(data []byte) error { } func (o *LlamaServerOptions) GetPort() int { + if o == nil { + return 0 + } return o.Port } func (o *LlamaServerOptions) SetPort(port int) { + if o == nil { + return + } o.Port = port } func (o *LlamaServerOptions) GetHost() string { + if o == nil { + return "localhost" + } return o.Host } func (o *LlamaServerOptions) Validate() error { + // Allow nil options for router mode where llama.cpp manages models dynamically if o == nil { - return validation.ValidationError(fmt.Errorf("llama server options cannot be nil for llama.cpp backend")) + return nil } // Use reflection to check all string fields for injection patterns @@ -370,6 +380,9 @@ func (o *LlamaServerOptions) Validate() error { // BuildCommandArgs converts InstanceOptions to command line arguments func (o *LlamaServerOptions) BuildCommandArgs() []string { + if o == nil { + return []string{} + } // Llama uses multiple flags for arrays by default (not comma-separated) // Use package-level llamaMultiValuedFlags variable args := BuildCommandArgs(o, llamaMultiValuedFlags) @@ -381,6 +394,9 @@ func (o *LlamaServerOptions) BuildCommandArgs() []string { } func (o *LlamaServerOptions) BuildDockerArgs() []string { + if o == nil { + return []string{} + } // For llama, Docker args are the same as normal args return o.BuildCommandArgs() } diff --git a/pkg/instance/instance.go b/pkg/instance/instance.go index a8faa1b..6149a4a 100644 --- a/pkg/instance/instance.go +++ b/pkg/instance/instance.go @@ -7,6 +7,7 @@ import ( "net/http" "time" + "llamactl/pkg/backends" "llamactl/pkg/config" ) @@ -117,6 +118,14 @@ func (i *Instance) WaitForHealthy(timeout int) error { return i.process.waitForHealthy(timeout) } +func (i *Instance) GetBackendType() backends.BackendType { + opts := i.GetOptions() + if opts == nil { + return backends.BackendTypeUnknown + } + return opts.BackendOptions.BackendType +} + // GetOptions returns the current options func (i *Instance) GetOptions() *Options { if i.options == nil { diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 47e554b..5b55e7d 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -19,7 +19,7 @@ type InstanceManager interface { UpdateInstance(name string, options *instance.Options) (*instance.Instance, error) DeleteInstance(name string) error StartInstance(name string) (*instance.Instance, error) - IsMaxRunningInstancesReached() bool + AtMaxRunning() bool StopInstance(name string) (*instance.Instance, error) EvictLRUInstance() error RestartInstance(name string) (*instance.Instance, error) diff --git a/pkg/manager/operations.go b/pkg/manager/operations.go index 2cfbebf..54ee5d4 100644 --- a/pkg/manager/operations.go +++ b/pkg/manager/operations.go @@ -383,7 +383,7 @@ func (im *instanceManager) StartInstance(name string) (*instance.Instance, error } // Check max running instances limit for local instances only - if im.IsMaxRunningInstancesReached() { + if im.AtMaxRunning() { return nil, MaxRunningInstancesError(fmt.Errorf("maximum number of running instances (%d) reached", im.globalConfig.Instances.MaxRunningInstances)) } @@ -399,7 +399,7 @@ func (im *instanceManager) StartInstance(name string) (*instance.Instance, error return inst, nil } -func (im *instanceManager) IsMaxRunningInstancesReached() bool { +func (im *instanceManager) AtMaxRunning() bool { if im.globalConfig.Instances.MaxRunningInstances == -1 { return false } diff --git a/pkg/server/handlers.go b/pkg/server/handlers.go index 3e232ee..ce72173 100644 --- a/pkg/server/handlers.go +++ b/pkg/server/handlers.go @@ -96,7 +96,7 @@ func (h *Handler) ensureInstanceRunning(inst *instance.Instance) error { return fmt.Errorf("instance is not running and on-demand start is not enabled") } - if h.InstanceManager.IsMaxRunningInstancesReached() { + if h.InstanceManager.AtMaxRunning() { if h.cfg.Instances.EnableLRUEviction { err := h.InstanceManager.EvictLRUInstance() if err != nil { diff --git a/pkg/server/handlers_backends.go b/pkg/server/handlers_backends.go index 065b24e..8fc9ff6 100644 --- a/pkg/server/handlers_backends.go +++ b/pkg/server/handlers_backends.go @@ -306,3 +306,158 @@ func (h *Handler) LlamaServerVersionHandler() http.HandlerFunc { func (h *Handler) LlamaServerListDevicesHandler() http.HandlerFunc { return h.executeLlamaServerCommand("--list-devices", "Failed to list devices") } + +// LlamaCppListModels godoc +// @Summary List models in a llama.cpp instance +// @Description Returns a list of models available in the specified llama.cpp instance +// @Tags Llama.cpp +// @Security ApiKeyAuth +// @Produces json +// @Param name path string true "Instance Name" +// @Success 200 {object} map[string]any "Models list response" +// @Failure 400 {string} string "Invalid instance" +// @Failure 500 {string} string "Internal Server Error" +// @Router /api/v1/llama-cpp/{name}/models [get] +func (h *Handler) LlamaCppListModels() http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + inst, err := h.validateLlamaCppInstance(r) + if err != nil { + writeError(w, http.StatusBadRequest, "invalid instance", err.Error()) + return + } + + // Check instance permissions + if err := h.authMiddleware.CheckInstancePermission(r.Context(), inst.ID); err != nil { + writeError(w, http.StatusForbidden, "permission_denied", err.Error()) + return + } + + // Check if instance is shutting down before autostart logic + if inst.GetStatus() == instance.ShuttingDown { + writeError(w, http.StatusServiceUnavailable, "instance_shutting_down", "Instance is shutting down") + return + } + + if !inst.IsRemote() && !inst.IsRunning() { + err := h.ensureInstanceRunning(inst) + if err != nil { + writeError(w, http.StatusInternalServerError, "instance start failed", err.Error()) + return + } + } + + // Modify request path to /models for proxying + r.URL.Path = "/models" + + // Use instance's ServeHTTP which tracks inflight requests and handles shutting down state + err = inst.ServeHTTP(w, r) + if err != nil { + // Error is already handled in ServeHTTP (response written) + return + } + } +} + +// LlamaCppLoadModel godoc +// @Summary Load a model in a llama.cpp instance +// @Description Loads the specified model in the given llama.cpp instance +// @Tags Llama.cpp +// @Security ApiKeyAuth +// @Produces json +// @Param name path string true "Instance Name" +// @Param model path string true "Model Name" +// @Success 200 {object} map[string]string "Success message" +// @Failure 400 {string} string "Invalid request" +// @Failure 500 {string} string "Internal Server Error" +// @Router /api/v1/llama-cpp/{name}/models/{model}/load [post] +func (h *Handler) LlamaCppLoadModel() http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + inst, err := h.validateLlamaCppInstance(r) + if err != nil { + writeError(w, http.StatusBadRequest, "invalid instance", err.Error()) + return + } + + // Check instance permissions + if err := h.authMiddleware.CheckInstancePermission(r.Context(), inst.ID); err != nil { + writeError(w, http.StatusForbidden, "permission_denied", err.Error()) + return + } + + // Check if instance is shutting down before autostart logic + if inst.GetStatus() == instance.ShuttingDown { + writeError(w, http.StatusServiceUnavailable, "instance_shutting_down", "Instance is shutting down") + return + } + + if !inst.IsRemote() && !inst.IsRunning() { + err := h.ensureInstanceRunning(inst) + if err != nil { + writeError(w, http.StatusInternalServerError, "instance start failed", err.Error()) + return + } + } + + // Modify request path to /models/load for proxying + r.URL.Path = "/models/load" + + // Use instance's ServeHTTP which tracks inflight requests and handles shutting down state + err = inst.ServeHTTP(w, r) + if err != nil { + // Error is already handled in ServeHTTP (response written) + return + } + } +} + +// LlamaCppUnloadModel godoc +// @Summary Unload a model in a llama.cpp instance +// @Description Unloads the specified model in the given llama.cpp instance +// @Tags Llama.cpp +// @Security ApiKeyAuth +// @Produces json +// @Param name path string true "Instance Name" +// @Param model path string true "Model Name" +// @Success 200 {object} map[string]string "Success message" +// @Failure 400 {string} string "Invalid request" +// @Failure 500 {string} string "Internal Server Error" +// @Router /api/v1/llama-cpp/{name}/models/{model}/unload [post] +func (h *Handler) LlamaCppUnloadModel() http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + inst, err := h.validateLlamaCppInstance(r) + if err != nil { + writeError(w, http.StatusBadRequest, "invalid instance", err.Error()) + return + } + + // Check instance permissions + if err := h.authMiddleware.CheckInstancePermission(r.Context(), inst.ID); err != nil { + writeError(w, http.StatusForbidden, "permission_denied", err.Error()) + return + } + + // Check if instance is shutting down before autostart logic + if inst.GetStatus() == instance.ShuttingDown { + writeError(w, http.StatusServiceUnavailable, "instance_shutting_down", "Instance is shutting down") + return + } + + if !inst.IsRemote() && !inst.IsRunning() { + err := h.ensureInstanceRunning(inst) + if err != nil { + writeError(w, http.StatusInternalServerError, "instance start failed", err.Error()) + return + } + } + + // Modify request path to /models/unload for proxying + r.URL.Path = "/models/unload" + + // Use instance's ServeHTTP which tracks inflight requests and handles shutting down state + err = inst.ServeHTTP(w, r) + if err != nil { + // Error is already handled in ServeHTTP (response written) + return + } + } +} diff --git a/pkg/server/handlers_openai.go b/pkg/server/handlers_openai.go index a7ad635..06f06b7 100644 --- a/pkg/server/handlers_openai.go +++ b/pkg/server/handlers_openai.go @@ -3,10 +3,13 @@ package server import ( "bytes" "encoding/json" + "fmt" "io" + "llamactl/pkg/backends" "llamactl/pkg/instance" "llamactl/pkg/validation" "net/http" + "strings" ) // OpenAIListInstancesResponse represents the response structure for listing instances (models) in OpenAI-compatible format @@ -23,6 +26,53 @@ type OpenAIInstance struct { OwnedBy string `json:"owned_by"` } +// LlamaCppModel represents a model available in a llama.cpp instance +type LlamaCppModel struct { + ID string `json:"id"` + Object string `json:"object"` + OwnedBy string `json:"owned_by"` + Created int64 `json:"created"` + InCache bool `json:"in_cache"` + Path string `json:"path"` + Status LlamaCppModelStatus `json:"status"` +} + +// LlamaCppModelStatus represents the status of a model in a llama.cpp instance +type LlamaCppModelStatus struct { + Value string `json:"value"` // "loaded" | "loading" | "unloaded" + Args []string `json:"args"` +} + +// fetchLlamaCppModels fetches models from a llama.cpp instance using the proxy +func fetchLlamaCppModels(inst *instance.Instance) ([]LlamaCppModel, error) { + // Create a request to the instance's /models endpoint + req, err := http.NewRequest("GET", fmt.Sprintf("http://%s:%d/models", inst.GetHost(), inst.GetPort()), nil) + if err != nil { + return nil, fmt.Errorf("failed to create request: %w", err) + } + + // Use a custom response writer to capture the response + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + bodyBytes, _ := io.ReadAll(resp.Body) + return nil, fmt.Errorf("status %d: %s", resp.StatusCode, string(bodyBytes)) + } + + var result struct { + Data []LlamaCppModel `json:"data"` + } + if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + return nil, fmt.Errorf("failed to decode response: %w", err) + } + + return result.Data, nil +} + // OpenAIListInstances godoc // @Summary List instances in OpenAI-compatible format // @Description Returns a list of instances in a format compatible with OpenAI API @@ -40,14 +90,41 @@ func (h *Handler) OpenAIListInstances() http.HandlerFunc { return } - openaiInstances := make([]OpenAIInstance, len(instances)) - for i, inst := range instances { - openaiInstances[i] = OpenAIInstance{ + var openaiInstances []OpenAIInstance + + // For each llama.cpp instance, try to fetch models and add them as separate entries + for _, inst := range instances { + + if inst.GetBackendType() == backends.BackendTypeLlamaCpp && inst.IsRunning() { + // Try to fetch models from the instance + models, err := fetchLlamaCppModels(inst) + if err != nil { + fmt.Printf("Failed to fetch models from instance %s: %v", inst.Name, err) + continue + } + + for _, model := range models { + openaiInstances = append(openaiInstances, OpenAIInstance{ + ID: inst.Name + "/" + model.ID, + Object: "model", + Created: inst.Created, + OwnedBy: inst.Name, + }) + } + + if len(models) > 1 { + // Skip adding the instance name if multiple models are present + continue + } + } + + // Add instance name as single entry (for non-llama.cpp or if model fetch failed) + openaiInstances = append(openaiInstances, OpenAIInstance{ ID: inst.Name, Object: "model", Created: inst.Created, OwnedBy: "llamactl", - } + }) } openaiResponse := OpenAIListInstancesResponse{ @@ -87,14 +164,28 @@ func (h *Handler) OpenAIProxy() http.HandlerFunc { return } - modelName, ok := requestBody["model"].(string) - if !ok || modelName == "" { - writeError(w, http.StatusBadRequest, "invalid_request", "Instance name is required") + reqModelName, ok := requestBody["model"].(string) + if !ok || reqModelName == "" { + writeError(w, http.StatusBadRequest, "invalid_request", "Model name is required") return } + // Parse instance name and model name from / format + var instanceName string + var modelName string + + // Check if model name contains "/" + if idx := strings.Index(reqModelName, "/"); idx != -1 { + // Split into instance and model parts + instanceName = reqModelName[:idx] + modelName = reqModelName[idx+1:] + } else { + instanceName = reqModelName + modelName = reqModelName + } + // Validate instance name at the entry point - validatedName, err := validation.ValidateInstanceName(modelName) + validatedName, err := validation.ValidateInstanceName(instanceName) if err != nil { writeError(w, http.StatusBadRequest, "invalid_instance_name", err.Error()) return @@ -119,6 +210,11 @@ func (h *Handler) OpenAIProxy() http.HandlerFunc { return } + if inst.IsRemote() { + // Don't replace model name for remote instances + modelName = reqModelName + } + if !inst.IsRemote() && !inst.IsRunning() { err := h.ensureInstanceRunning(inst) if err != nil { @@ -127,6 +223,16 @@ func (h *Handler) OpenAIProxy() http.HandlerFunc { } } + // Update the request body with just the model name + requestBody["model"] = modelName + + // Re-marshal the updated body + bodyBytes, err = json.Marshal(requestBody) + if err != nil { + writeError(w, http.StatusInternalServerError, "marshal_error", "Failed to update request body") + return + } + // Recreate the request body from the bytes we read r.Body = io.NopCloser(bytes.NewReader(bodyBytes)) r.ContentLength = int64(len(bodyBytes)) diff --git a/pkg/server/routes.go b/pkg/server/routes.go index 60f4487..abd694e 100644 --- a/pkg/server/routes.go +++ b/pkg/server/routes.go @@ -70,6 +70,13 @@ func SetupRouter(handler *Handler) *chi.Mux { }) }) + // Llama.cpp instance-specific endpoints + r.Route("/llama-cpp/{name}", func(r chi.Router) { + r.Get("/models", handler.LlamaCppListModels()) + r.Post("/models/{model}/load", handler.LlamaCppLoadModel()) + r.Post("/models/{model}/unload", handler.LlamaCppUnloadModel()) + }) + // Node management endpoints r.Route("/nodes", func(r chi.Router) { r.Get("/", handler.ListNodes()) // List all nodes diff --git a/test_client.py b/test_client.py index e151a23..ea0e685 100644 --- a/test_client.py +++ b/test_client.py @@ -4,7 +4,6 @@ Simple Python script to interact with local LLM server's OpenAI-compatible API """ import requests -import json import sys # Local LLM server configuration diff --git a/webui/package-lock.json b/webui/package-lock.json index 077797a..35ffc7f 100644 --- a/webui/package-lock.json +++ b/webui/package-lock.json @@ -161,7 +161,6 @@ "integrity": "sha512-e7jT4DxYvIDLk1ZHmU/m/mB19rex9sv0c2ftBtjSBv+kVM/902eh0fINUzD7UwLLNR+jU585GxUJ8/EBfAM5fw==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@babel/code-frame": "^7.27.1", "@babel/generator": "^7.28.5", @@ -511,7 +510,6 @@ } ], "license": "MIT", - "peer": true, "engines": { "node": ">=18" }, @@ -558,7 +556,6 @@ } ], "license": "MIT", - "peer": true, "engines": { "node": ">=18" } @@ -2537,7 +2534,8 @@ "resolved": "https://registry.npmjs.org/@types/aria-query/-/aria-query-5.0.4.tgz", "integrity": "sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw==", "dev": true, - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/@types/babel__core": { "version": "7.20.5", @@ -2621,7 +2619,6 @@ "integrity": "sha512-gWEkeiyYE4vqjON/+Obqcoeffmk0NF15WSBwSs7zwVA2bAbTaE0SJ7P0WNGoJn8uE7fiaV5a7dKYIJriEqOrmA==", "devOptional": true, "license": "MIT", - "peer": true, "dependencies": { "undici-types": "~7.16.0" } @@ -2632,7 +2629,6 @@ "integrity": "sha512-tBFxBp9Nfyy5rsmefN+WXc1JeW/j2BpBHFdLZbEVfs9wn3E3NRFxwV0pJg8M1qQAexFpvz73hJXFofV0ZAu92A==", "devOptional": true, "license": "MIT", - "peer": true, "dependencies": { "csstype": "^3.0.2" } @@ -2643,7 +2639,6 @@ "integrity": "sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ==", "devOptional": true, "license": "MIT", - "peer": true, "peerDependencies": { "@types/react": "^19.2.0" } @@ -2693,7 +2688,6 @@ "integrity": "sha512-6/cmF2piao+f6wSxUsJLZjck7OQsYyRtcOZS02k7XINSNlz93v6emM8WutDQSXnroG2xwYlEVHJI+cPA7CPM3Q==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@typescript-eslint/scope-manager": "8.50.0", "@typescript-eslint/types": "8.50.0", @@ -3042,7 +3036,6 @@ "integrity": "sha512-F9jI5rSstNknPlTlPN2gcc4gpbaagowuRzw/OJzl368dvPun668Q182S8Q8P9PITgGCl5LAKXpzuue106eM4wA==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@vitest/utils": "4.0.8", "fflate": "^0.8.2", @@ -3079,7 +3072,6 @@ "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", "dev": true, "license": "MIT", - "peer": true, "bin": { "acorn": "bin/acorn" }, @@ -3130,6 +3122,7 @@ "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=8" } @@ -3401,7 +3394,6 @@ } ], "license": "MIT", - "peer": true, "dependencies": { "caniuse-lite": "^1.0.30001726", "electron-to-chromium": "^1.5.173", @@ -3834,7 +3826,8 @@ "resolved": "https://registry.npmjs.org/dom-accessibility-api/-/dom-accessibility-api-0.5.16.tgz", "integrity": "sha512-X7BJ2yElsnOJ30pZF4uIIDfBEVgF4XEBxL9Bxhy6dnrm5hkzqmsWHGTiHqRiITNhMyFLyAiWndIJP7Z1NTteDg==", "dev": true, - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/dunder-proto": { "version": "1.0.1", @@ -4138,7 +4131,6 @@ "integrity": "sha512-BhHmn2yNOFA9H9JmmIVKJmd288g9hrVRDkdoIgRCRuSySRUHH7r/DI6aAXW9T1WwUuY3DFgrcaqB+deURBLR5g==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@eslint-community/eslint-utils": "^4.8.0", "@eslint-community/regexpp": "^4.12.1", @@ -5357,7 +5349,6 @@ "integrity": "sha512-GtldT42B8+jefDUC4yUKAvsaOrH7PDHmZxZXNgF2xMmymjUbRYJvpAybZAKEmXDGTM0mCsz8duOa4vTm5AY2Kg==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@acemir/cssom": "^0.9.28", "@asamuzakjp/dom-selector": "^6.7.6", @@ -5768,6 +5759,7 @@ "integrity": "sha512-h5bgJWpxJNswbU7qCrV0tIKQCaS3blPDrqKWx+QxzuzL1zGUzij9XCWLrSLsJPu5t+eWA/ycetzYAO5IOMcWAQ==", "dev": true, "license": "MIT", + "peer": true, "bin": { "lz-string": "bin/bin.js" } @@ -6153,7 +6145,6 @@ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "license": "MIT", - "peer": true, "engines": { "node": ">=12" }, @@ -6190,7 +6181,6 @@ } ], "license": "MIT", - "peer": true, "dependencies": { "nanoid": "^3.3.11", "picocolors": "^1.1.1", @@ -6216,6 +6206,7 @@ "integrity": "sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "ansi-regex": "^5.0.1", "ansi-styles": "^5.0.0", @@ -6231,6 +6222,7 @@ "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=10" }, @@ -6272,7 +6264,6 @@ "resolved": "https://registry.npmjs.org/react/-/react-19.2.0.tgz", "integrity": "sha512-tmbWg6W31tQLeB5cdIBOicJDJRR2KzXsV7uSK9iNfLWQ5bIZfxuPEHp7M8wiHyHnn0DD1i7w3Zmin0FtkrwoCQ==", "license": "MIT", - "peer": true, "engines": { "node": ">=0.10.0" } @@ -6282,7 +6273,6 @@ "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.0.tgz", "integrity": "sha512-UlbRu4cAiGaIewkPyiRGJk0imDN2T3JjieT6spoL2UeSf5od4n5LB/mQ4ejmxhCFT1tYe8IvaFulzynWovsEFQ==", "license": "MIT", - "peer": true, "dependencies": { "scheduler": "^0.27.0" }, @@ -6295,7 +6285,8 @@ "resolved": "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz", "integrity": "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==", "dev": true, - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/react-refresh": { "version": "0.18.0", @@ -7249,7 +7240,6 @@ "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", "dev": true, "license": "Apache-2.0", - "peer": true, "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" @@ -7397,7 +7387,6 @@ "resolved": "https://registry.npmjs.org/vite/-/vite-7.3.0.tgz", "integrity": "sha512-dZwN5L1VlUBewiP6H9s2+B3e3Jg96D0vzN+Ry73sOefebhYr9f94wwkMNN/9ouoU8pV1BqA1d1zGk8928cx0rg==", "license": "MIT", - "peer": true, "dependencies": { "esbuild": "^0.27.0", "fdir": "^6.5.0", @@ -7473,7 +7462,6 @@ "integrity": "sha512-urzu3NCEV0Qa0Y2PwvBtRgmNtxhj5t5ULw7cuKhIHh3OrkKTLlut0lnBOv9qe5OvbkMH2g38G7KPDCTpIytBVg==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@vitest/expect": "4.0.8", "@vitest/mocker": "4.0.8", @@ -7802,7 +7790,6 @@ "resolved": "https://registry.npmjs.org/zod/-/zod-4.2.0.tgz", "integrity": "sha512-Bd5fw9wlIhtqCCxotZgdTOMwGm1a0u75wARVEY9HMs1X17trvA/lMi4+MGK5EUfYkXVTbX8UDiDKW4OgzHVUZw==", "license": "MIT", - "peer": true, "funding": { "url": "https://github.com/sponsors/colinhacks" } diff --git a/webui/src/components/InstanceCard.tsx b/webui/src/components/InstanceCard.tsx index d889655..996ab67 100644 --- a/webui/src/components/InstanceCard.tsx +++ b/webui/src/components/InstanceCard.tsx @@ -1,14 +1,16 @@ // ui/src/components/InstanceCard.tsx import { Button } from "@/components/ui/button"; import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; +import { Badge } from "@/components/ui/badge"; import type { Instance } from "@/types/instance"; -import { Edit, FileText, Play, Square, Trash2, MoreHorizontal, Download } from "lucide-react"; +import { Edit, FileText, Play, Square, Trash2, MoreHorizontal, Download, Boxes } from "lucide-react"; import LogsDialog from "@/components/LogDialog"; +import ModelsDialog from "@/components/ModelsDialog"; import HealthBadge from "@/components/HealthBadge"; import BackendBadge from "@/components/BackendBadge"; -import { useState } from "react"; +import { useState, useEffect } from "react"; import { useInstanceHealth } from "@/hooks/useInstanceHealth"; -import { instancesApi } from "@/lib/api"; +import { instancesApi, llamaCppApi, type Model } from "@/lib/api"; interface InstanceCardProps { instance: Instance; @@ -26,9 +28,35 @@ function InstanceCard({ editInstance, }: InstanceCardProps) { const [isLogsOpen, setIsLogsOpen] = useState(false); + const [isModelsOpen, setIsModelsOpen] = useState(false); const [showAllActions, setShowAllActions] = useState(false); + const [models, setModels] = useState([]); const health = useInstanceHealth(instance.name, instance.status); + const running = instance.status === "running"; + const isLlamaCpp = instance.options?.backend_type === "llama_cpp"; + + // Fetch models for llama.cpp instances + useEffect(() => { + if (!isLlamaCpp || !running) { + setModels([]); + return; + } + + void (async () => { + try { + const fetchedModels = await llamaCppApi.getModels(instance.name); + setModels(fetchedModels); + } catch { + setModels([]); + } + })(); + }, [instance.name, isLlamaCpp, running]); + + // Calculate model counts + const totalModels = models.length; + const loadedModels = models.filter(m => m.status.value === "loaded").length; + const handleStart = () => { startInstance(instance.name); }; @@ -53,6 +81,10 @@ function InstanceCard({ setIsLogsOpen(true); }; + const handleModels = () => { + setIsModelsOpen(true); + }; + const handleExport = () => { void (async () => { try { @@ -83,8 +115,6 @@ function InstanceCard({ })(); }; - const running = instance.status === "running"; - return ( <> @@ -99,6 +129,12 @@ function InstanceCard({
{running && } + {isLlamaCpp && running && totalModels > 0 && ( + + + {loadedModels}/{totalModels} models + + )}
@@ -149,26 +185,37 @@ function InstanceCard({ {/* Secondary actions - collapsible */} {showAllActions && ( -
+
+ {isLlamaCpp && totalModels > 1 && ( + + )} + +
+ + + {/* Error Display */} + {error && ( +
+ + {error} +
+ )} + + {/* Models Table */} +
+ {!isRunning ? ( +
+ Instance is not running +
+ ) : loading && models.length === 0 ? ( +
+ + + Loading models... + +
+ ) : models.length === 0 ? ( +
+ No models found +
+ ) : ( + + + + Model + Status + Actions + + + + {models.map((model) => { + const isLoading = loadingModels.has(model.id) + const isModelLoading = model.status.value === 'loading' + + return ( + + + {model.id} + + +
+ + + {model.status.value} + +
+
+ + {model.status.value === 'loaded' ? ( + + ) : model.status.value === 'unloaded' ? ( + + ) : ( + + )} + +
+ ) + })} +
+
+ )} +
+ + {/* Auto-refresh indicator - only shown when models are loading */} + {isRunning && models.some(m => m.status.value === 'loading') && ( +
+
+ Auto-refreshing while models are loading +
+ )} + + + ) +} + +export default ModelsDialog diff --git a/webui/src/components/ui/table.tsx b/webui/src/components/ui/table.tsx new file mode 100644 index 0000000..7f3502f --- /dev/null +++ b/webui/src/components/ui/table.tsx @@ -0,0 +1,117 @@ +import * as React from "react" + +import { cn } from "@/lib/utils" + +const Table = React.forwardRef< + HTMLTableElement, + React.HTMLAttributes +>(({ className, ...props }, ref) => ( +
+ + +)) +Table.displayName = "Table" + +const TableHeader = React.forwardRef< + HTMLTableSectionElement, + React.HTMLAttributes +>(({ className, ...props }, ref) => ( + +)) +TableHeader.displayName = "TableHeader" + +const TableBody = React.forwardRef< + HTMLTableSectionElement, + React.HTMLAttributes +>(({ className, ...props }, ref) => ( + +)) +TableBody.displayName = "TableBody" + +const TableFooter = React.forwardRef< + HTMLTableSectionElement, + React.HTMLAttributes +>(({ className, ...props }, ref) => ( + tr]:last:border-b-0", + className + )} + {...props} + /> +)) +TableFooter.displayName = "TableFooter" + +const TableRow = React.forwardRef< + HTMLTableRowElement, + React.HTMLAttributes +>(({ className, ...props }, ref) => ( + +)) +TableRow.displayName = "TableRow" + +const TableHead = React.forwardRef< + HTMLTableCellElement, + React.ThHTMLAttributes +>(({ className, ...props }, ref) => ( +
+)) +TableHead.displayName = "TableHead" + +const TableCell = React.forwardRef< + HTMLTableCellElement, + React.TdHTMLAttributes +>(({ className, ...props }, ref) => ( + +)) +TableCell.displayName = "TableCell" + +const TableCaption = React.forwardRef< + HTMLTableCaptionElement, + React.HTMLAttributes +>(({ className, ...props }, ref) => ( +
+)) +TableCaption.displayName = "TableCaption" + +export { + Table, + TableHeader, + TableBody, + TableFooter, + TableHead, + TableRow, + TableCell, + TableCaption, +} diff --git a/webui/src/lib/api.ts b/webui/src/lib/api.ts index ddcf384..d648874 100644 --- a/webui/src/lib/api.ts +++ b/webui/src/lib/api.ts @@ -205,3 +205,53 @@ export const apiKeysApi = { getPermissions: (id: number) => apiCall(`/auth/keys/${id}/permissions`), }; + +// Llama.cpp model management types +export interface Model { + id: string; + object: string; + owned_by: string; + created: number; + in_cache: boolean; + path: string; + status: { + value: string; // "loaded" | "loading" | "unloaded" + args: string[]; + }; +} + +export interface ModelsListResponse { + object: string; + data: Model[]; +} + +// Llama.cpp model management API functions +export const llamaCppApi = { + // GET /llama-cpp/{name}/models + getModels: async (instanceName: string): Promise => { + const response = await apiCall( + `/llama-cpp/${encodeURIComponent(instanceName)}/models` + ); + return response.data; + }, + + // POST /llama-cpp/{name}/models/{model}/load + loadModel: (instanceName: string, modelName: string) => + apiCall<{ success: boolean }>( + `/llama-cpp/${encodeURIComponent(instanceName)}/models/${encodeURIComponent(modelName)}/load`, + { + method: "POST", + body: JSON.stringify({ model: modelName }), + } + ), + + // POST /llama-cpp/{name}/models/{model}/unload + unloadModel: (instanceName: string, modelName: string) => + apiCall<{ success: boolean }>( + `/llama-cpp/${encodeURIComponent(instanceName)}/models/${encodeURIComponent(modelName)}/unload`, + { + method: "POST", + body: JSON.stringify({ model: modelName }), + } + ), +};