mirror of
https://github.com/lordmathis/llamactl.git
synced 2025-12-23 17:44:24 +00:00
Implement model management for llama.cpp instances
This commit is contained in:
141
pkg/instance/models.go
Normal file
141
pkg/instance/models.go
Normal file
@@ -0,0 +1,141 @@
|
||||
package instance
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"llamactl/pkg/backends"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Model represents a model available in a llama.cpp instance
|
||||
type Model struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
OwnedBy string `json:"owned_by"`
|
||||
Created int64 `json:"created"`
|
||||
InCache bool `json:"in_cache"`
|
||||
Path string `json:"path"`
|
||||
Status ModelStatus `json:"status"`
|
||||
}
|
||||
|
||||
// ModelStatus represents the status of a model in an instance
|
||||
type ModelStatus struct {
|
||||
Value string `json:"value"` // "loaded" | "loading" | "unloaded"
|
||||
Args []string `json:"args"`
|
||||
}
|
||||
|
||||
// IsLlamaCpp checks if this instance is a llama.cpp instance
|
||||
func (i *Instance) IsLlamaCpp() bool {
|
||||
opts := i.GetOptions()
|
||||
if opts == nil {
|
||||
return false
|
||||
}
|
||||
return opts.BackendOptions.BackendType == backends.BackendTypeLlamaCpp
|
||||
}
|
||||
|
||||
// GetModels fetches the models available in this llama.cpp instance
|
||||
func (i *Instance) GetModels() ([]Model, error) {
|
||||
if !i.IsLlamaCpp() {
|
||||
return nil, fmt.Errorf("instance %s is not a llama.cpp instance", i.Name)
|
||||
}
|
||||
|
||||
if !i.IsRunning() {
|
||||
return nil, fmt.Errorf("instance %s is not running", i.Name)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Data []Model `json:"data"`
|
||||
}
|
||||
if err := i.doRequest("GET", "/models", nil, &result, 10*time.Second); err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch models: %w", err)
|
||||
}
|
||||
|
||||
return result.Data, nil
|
||||
}
|
||||
|
||||
// LoadModel loads a model in this llama.cpp instance
|
||||
func (i *Instance) LoadModel(modelName string) error {
|
||||
if !i.IsLlamaCpp() {
|
||||
return fmt.Errorf("instance %s is not a llama.cpp instance", i.Name)
|
||||
}
|
||||
|
||||
if !i.IsRunning() {
|
||||
return fmt.Errorf("instance %s is not running", i.Name)
|
||||
}
|
||||
|
||||
// Make the load request
|
||||
reqBody := map[string]string{"model": modelName}
|
||||
if err := i.doRequest("POST", "/models/load", reqBody, nil, 30*time.Second); err != nil {
|
||||
return fmt.Errorf("failed to load model: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnloadModel unloads a model from this llama.cpp instance
|
||||
func (i *Instance) UnloadModel(modelName string) error {
|
||||
if !i.IsLlamaCpp() {
|
||||
return fmt.Errorf("instance %s is not a llama.cpp instance", i.Name)
|
||||
}
|
||||
|
||||
if !i.IsRunning() {
|
||||
return fmt.Errorf("instance %s is not running", i.Name)
|
||||
}
|
||||
|
||||
// Make the unload request
|
||||
reqBody := map[string]string{"model": modelName}
|
||||
if err := i.doRequest("POST", "/models/unload", reqBody, nil, 30*time.Second); err != nil {
|
||||
return fmt.Errorf("failed to unload model: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// doRequest makes an HTTP request to this instance's backend
|
||||
func (i *Instance) doRequest(method, path string, reqBody, respBody any, timeout time.Duration) error {
|
||||
url := fmt.Sprintf("http://%s:%d%s", i.GetHost(), i.GetPort(), path)
|
||||
|
||||
var bodyReader io.Reader
|
||||
if reqBody != nil {
|
||||
bodyBytes, err := json.Marshal(reqBody)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal request body: %w", err)
|
||||
}
|
||||
bodyReader = bytes.NewReader(bodyBytes)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, method, url, bodyReader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
if reqBody != nil {
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
}
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||
return fmt.Errorf("status %d: %s", resp.StatusCode, string(bodyBytes))
|
||||
}
|
||||
|
||||
if respBody != nil {
|
||||
if err := json.NewDecoder(resp.Body).Decode(respBody); err != nil {
|
||||
return fmt.Errorf("failed to decode response: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -24,6 +24,8 @@ type InstanceManager interface {
|
||||
EvictLRUInstance() error
|
||||
RestartInstance(name string) (*instance.Instance, error)
|
||||
GetInstanceLogs(name string, numLines int) (string, error)
|
||||
ResolveInstance(modelName string) (string, error)
|
||||
RefreshModelRegistry(inst *instance.Instance) error
|
||||
Shutdown()
|
||||
}
|
||||
|
||||
@@ -34,6 +36,7 @@ type instanceManager struct {
|
||||
db database.InstanceStore
|
||||
remote *remoteManager
|
||||
lifecycle *lifecycleManager
|
||||
models *modelRegistry
|
||||
|
||||
// Configuration
|
||||
globalConfig *config.AppConfig
|
||||
@@ -60,12 +63,16 @@ func New(globalConfig *config.AppConfig, db database.InstanceStore) InstanceMana
|
||||
// Initialize remote manager
|
||||
remote := newRemoteManager(globalConfig.Nodes, 30*time.Second)
|
||||
|
||||
// Initialize model registry
|
||||
models := newModelRegistry()
|
||||
|
||||
// Create manager instance
|
||||
im := &instanceManager{
|
||||
registry: registry,
|
||||
ports: ports,
|
||||
db: db,
|
||||
remote: remote,
|
||||
models: models,
|
||||
globalConfig: globalConfig,
|
||||
}
|
||||
|
||||
@@ -142,9 +149,27 @@ func (im *instanceManager) loadInstances() error {
|
||||
// Auto-start instances that have auto-restart enabled
|
||||
go im.autoStartInstances()
|
||||
|
||||
// Discover models from all running llama.cpp instances
|
||||
go im.discoverAllModels()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// discoverAllModels discovers and registers models for all running llama.cpp instances
|
||||
func (im *instanceManager) discoverAllModels() {
|
||||
instances := im.registry.listRunning()
|
||||
|
||||
for _, inst := range instances {
|
||||
if !inst.IsLlamaCpp() {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := im.RefreshModelRegistry(inst); err != nil {
|
||||
log.Printf("Failed to discover models for instance %s: %v", inst.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// loadInstance loads a single persisted instance and adds it to the registry
|
||||
func (im *instanceManager) loadInstance(persistedInst *instance.Instance) error {
|
||||
name := persistedInst.Name
|
||||
|
||||
79
pkg/manager/model_registry.go
Normal file
79
pkg/manager/model_registry.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"llamactl/pkg/instance"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// modelRegistry maintains a global mapping of model names to instance names
|
||||
// for llama.cpp instances. Model names must be globally unique across all instances.
|
||||
type modelRegistry struct {
|
||||
mu sync.RWMutex
|
||||
modelToInstance map[string]string // model name → instance name
|
||||
instanceModels map[string][]string // instance name → model names
|
||||
}
|
||||
|
||||
// newModelRegistry creates a new model registry
|
||||
func newModelRegistry() *modelRegistry {
|
||||
return &modelRegistry{
|
||||
modelToInstance: make(map[string]string),
|
||||
instanceModels: make(map[string][]string),
|
||||
}
|
||||
}
|
||||
|
||||
// registerModels registers models from an instance to the registry.
|
||||
// Skips models that conflict with other instances and returns a list of conflicts.
|
||||
func (mr *modelRegistry) registerModels(instanceName string, models []instance.Model) []string {
|
||||
mr.mu.Lock()
|
||||
defer mr.mu.Unlock()
|
||||
|
||||
// Unregister any existing models for this instance first
|
||||
mr.removeModels(instanceName)
|
||||
|
||||
// Register models, skipping conflicts
|
||||
var modelNames []string
|
||||
var conflicts []string
|
||||
|
||||
for _, model := range models {
|
||||
// Check if this model conflicts with another instance
|
||||
if existingInstance, exists := mr.modelToInstance[model.ID]; exists && existingInstance != instanceName {
|
||||
conflicts = append(conflicts, fmt.Sprintf("%s (already in %s)", model.ID, existingInstance))
|
||||
continue // Skip this model
|
||||
}
|
||||
|
||||
// Register the model
|
||||
mr.modelToInstance[model.ID] = instanceName
|
||||
modelNames = append(modelNames, model.ID)
|
||||
}
|
||||
|
||||
mr.instanceModels[instanceName] = modelNames
|
||||
|
||||
return conflicts
|
||||
}
|
||||
|
||||
// unregisterModels removes all models for an instance
|
||||
func (mr *modelRegistry) unregisterModels(instanceName string) {
|
||||
mr.mu.Lock()
|
||||
defer mr.mu.Unlock()
|
||||
mr.removeModels(instanceName)
|
||||
}
|
||||
|
||||
// removeModels removes all models for an instance (caller must hold lock)
|
||||
func (mr *modelRegistry) removeModels(instanceName string) {
|
||||
if models, exists := mr.instanceModels[instanceName]; exists {
|
||||
for _, modelName := range models {
|
||||
delete(mr.modelToInstance, modelName)
|
||||
}
|
||||
delete(mr.instanceModels, instanceName)
|
||||
}
|
||||
}
|
||||
|
||||
// getModelInstance returns the instance name that hosts the given model
|
||||
func (mr *modelRegistry) getModelInstance(modelName string) (string, bool) {
|
||||
mr.mu.RLock()
|
||||
defer mr.mu.RUnlock()
|
||||
|
||||
instanceName, exists := mr.modelToInstance[modelName]
|
||||
return instanceName, exists
|
||||
}
|
||||
@@ -337,6 +337,9 @@ func (im *instanceManager) DeleteInstance(name string) error {
|
||||
// Release port (use ReleaseByInstance for proper cleanup)
|
||||
im.ports.releaseByInstance(name)
|
||||
|
||||
// Unregister models when instance is deleted
|
||||
im.onInstanceStopped(name)
|
||||
|
||||
// Remove from registry
|
||||
if err := im.registry.remove(name); err != nil {
|
||||
return fmt.Errorf("failed to remove instance from registry: %w", err)
|
||||
@@ -396,6 +399,9 @@ func (im *instanceManager) StartInstance(name string) (*instance.Instance, error
|
||||
log.Printf("Warning: failed to persist instance %s: %v", name, err)
|
||||
}
|
||||
|
||||
// Discover and register models for llama.cpp instances
|
||||
go im.onInstanceStarted(name)
|
||||
|
||||
return inst, nil
|
||||
}
|
||||
|
||||
@@ -455,6 +461,9 @@ func (im *instanceManager) StopInstance(name string) (*instance.Instance, error)
|
||||
log.Printf("Warning: failed to persist instance %s: %v", name, err)
|
||||
}
|
||||
|
||||
// Unregister models when instance stops
|
||||
im.onInstanceStopped(name)
|
||||
|
||||
return inst, nil
|
||||
}
|
||||
|
||||
@@ -535,3 +544,73 @@ func (im *instanceManager) setPortInOptions(options *instance.Options, port int)
|
||||
func (im *instanceManager) EvictLRUInstance() error {
|
||||
return im.lifecycle.evictLRU()
|
||||
}
|
||||
|
||||
// ResolveInstance resolves a model name to an instance name.
|
||||
// Precedence: instance name > model registry
|
||||
func (im *instanceManager) ResolveInstance(modelName string) (string, error) {
|
||||
// Check if it's an instance name first
|
||||
if _, err := im.GetInstance(modelName); err == nil {
|
||||
return modelName, nil
|
||||
}
|
||||
|
||||
// Check if it's a model name in the registry
|
||||
if instanceName, exists := im.models.getModelInstance(modelName); exists {
|
||||
return instanceName, nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("model or instance '%s' not found", modelName)
|
||||
}
|
||||
|
||||
// RefreshModelRegistry refreshes the model registry for the given instance
|
||||
func (im *instanceManager) RefreshModelRegistry(inst *instance.Instance) error {
|
||||
if !inst.IsRunning() {
|
||||
return fmt.Errorf("instance %s is not running", inst.Name)
|
||||
}
|
||||
|
||||
// Fetch models from instance and register them
|
||||
models, err := inst.GetModels()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch models: %w", err)
|
||||
}
|
||||
|
||||
// Register models, skipping conflicts
|
||||
conflicts := im.models.registerModels(inst.Name, models)
|
||||
if len(conflicts) > 0 {
|
||||
log.Printf("Warning: Model name conflicts for instance %s (skipped): %v", inst.Name, conflicts)
|
||||
}
|
||||
|
||||
// Check if instance name shadows any model names
|
||||
if otherInstance, exists := im.models.getModelInstance(inst.Name); exists && otherInstance != inst.Name {
|
||||
log.Printf("Warning: Instance name '%s' shadows model name from instance '%s'", inst.Name, otherInstance)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// onInstanceStarted is called when an instance successfully starts and becomes healthy
|
||||
func (im *instanceManager) onInstanceStarted(name string) {
|
||||
inst, err := im.GetInstance(name)
|
||||
if err != nil {
|
||||
log.Printf("Failed to get instance %s for model discovery: %v", name, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Only discover models for llama.cpp instances
|
||||
if !inst.IsLlamaCpp() {
|
||||
return
|
||||
}
|
||||
|
||||
if err := inst.WaitForHealthy(30); err != nil {
|
||||
log.Printf("Instance %s not healthy, skipping model discovery: %v", name, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := im.RefreshModelRegistry(inst); err != nil {
|
||||
log.Printf("Failed to discover models for instance %s: %v", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// onInstanceStopped is called when an instance stops or is deleted
|
||||
func (im *instanceManager) onInstanceStopped(name string) {
|
||||
im.models.unregisterModels(name)
|
||||
}
|
||||
|
||||
@@ -5,9 +5,12 @@ import (
|
||||
"fmt"
|
||||
"llamactl/pkg/backends"
|
||||
"llamactl/pkg/instance"
|
||||
"log"
|
||||
"net/http"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
)
|
||||
|
||||
// ParseCommandRequest represents the request body for backend command parsing
|
||||
@@ -306,3 +309,115 @@ func (h *Handler) LlamaServerVersionHandler() http.HandlerFunc {
|
||||
func (h *Handler) LlamaServerListDevicesHandler() http.HandlerFunc {
|
||||
return h.executeLlamaServerCommand("--list-devices", "Failed to list devices")
|
||||
}
|
||||
|
||||
// LlamaCppListModels godoc
|
||||
// @Summary List models in a llama.cpp instance
|
||||
// @Description Returns a list of models available in the specified llama.cpp instance
|
||||
// @Tags Llama.cpp
|
||||
// @Security ApiKeyAuth
|
||||
// @Produces json
|
||||
// @Param name path string true "Instance Name"
|
||||
// @Success 200 {object} map[string]any "Models list response"
|
||||
// @Failure 400 {string} string "Invalid instance"
|
||||
// @Failure 500 {string} string "Internal Server Error"
|
||||
// @Router /api/v1/llama-cpp/{name}/models [get]
|
||||
func (h *Handler) LlamaCppListModels() http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
inst, err := h.getInstance(r)
|
||||
if err != nil {
|
||||
writeError(w, http.StatusBadRequest, "invalid_instance", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
models, err := inst.GetModels()
|
||||
if err != nil {
|
||||
writeError(w, http.StatusBadRequest, "get_models_failed", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
response := map[string]any{
|
||||
"object": "list",
|
||||
"data": models,
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, response)
|
||||
}
|
||||
}
|
||||
|
||||
// LlamaCppLoadModel godoc
|
||||
// @Summary Load a model in a llama.cpp instance
|
||||
// @Description Loads the specified model in the given llama.cpp instance
|
||||
// @Tags Llama.cpp
|
||||
// @Security ApiKeyAuth
|
||||
// @Produces json
|
||||
// @Param name path string true "Instance Name"
|
||||
// @Param model path string true "Model Name"
|
||||
// @Success 200 {object} map[string]string "Success message"
|
||||
// @Failure 400 {string} string "Invalid request"
|
||||
// @Failure 500 {string} string "Internal Server Error"
|
||||
// @Router /api/v1/llama-cpp/{name}/models/{model}/load [post]
|
||||
func (h *Handler) LlamaCppLoadModel() http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
inst, err := h.getInstance(r)
|
||||
if err != nil {
|
||||
writeError(w, http.StatusBadRequest, "invalid_instance", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
modelName := chi.URLParam(r, "model")
|
||||
|
||||
if err := inst.LoadModel(modelName); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "load_model_failed", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Refresh the model registry
|
||||
if err := h.InstanceManager.RefreshModelRegistry(inst); err != nil {
|
||||
log.Printf("Warning: failed to refresh model registry after load: %v", err)
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]string{
|
||||
"status": "success",
|
||||
"message": fmt.Sprintf("Model %s loaded successfully", modelName),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// LlamaCppUnloadModel godoc
|
||||
// @Summary Unload a model in a llama.cpp instance
|
||||
// @Description Unloads the specified model in the given llama.cpp instance
|
||||
// @Tags Llama.cpp
|
||||
// @Security ApiKeyAuth
|
||||
// @Produces json
|
||||
// @Param name path string true "Instance Name"
|
||||
// @Param model path string true "Model Name"
|
||||
// @Success 200 {object} map[string]string "Success message"
|
||||
// @Failure 400 {string} string "Invalid request"
|
||||
// @Failure 500 {string} string "Internal Server Error"
|
||||
// @Router /api/v1/llama-cpp/{name}/models/{model}/unload [post]
|
||||
func (h *Handler) LlamaCppUnloadModel() http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
inst, err := h.getInstance(r)
|
||||
if err != nil {
|
||||
writeError(w, http.StatusBadRequest, "invalid_instance", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
modelName := chi.URLParam(r, "model")
|
||||
|
||||
if err := inst.UnloadModel(modelName); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "unload_model_failed", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Refresh the model registry
|
||||
if err := h.InstanceManager.RefreshModelRegistry(inst); err != nil {
|
||||
log.Printf("Warning: failed to refresh model registry after unload: %v", err)
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]string{
|
||||
"status": "success",
|
||||
"message": fmt.Sprintf("Model %s unloaded successfully", modelName),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package server
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"llamactl/pkg/instance"
|
||||
"llamactl/pkg/validation"
|
||||
@@ -40,14 +41,41 @@ func (h *Handler) OpenAIListInstances() http.HandlerFunc {
|
||||
return
|
||||
}
|
||||
|
||||
openaiInstances := make([]OpenAIInstance, len(instances))
|
||||
for i, inst := range instances {
|
||||
openaiInstances[i] = OpenAIInstance{
|
||||
var openaiInstances []OpenAIInstance
|
||||
|
||||
// For each llama.cpp instance, try to fetch models and add them as separate entries
|
||||
for _, inst := range instances {
|
||||
|
||||
if inst.IsLlamaCpp() && inst.IsRunning() {
|
||||
// Try to fetch models from the instance
|
||||
models, err := inst.GetModels()
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to fetch models from instance %s: %v", inst.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, model := range models {
|
||||
openaiInstances = append(openaiInstances, OpenAIInstance{
|
||||
ID: model.ID,
|
||||
Object: "model",
|
||||
Created: model.Created,
|
||||
OwnedBy: inst.Name,
|
||||
})
|
||||
}
|
||||
|
||||
if len(models) > 1 {
|
||||
// Skip adding the instance name if multiple models are present
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Add instance name as single entry (for non-llama.cpp or if model fetch failed)
|
||||
openaiInstances = append(openaiInstances, OpenAIInstance{
|
||||
ID: inst.Name,
|
||||
Object: "model",
|
||||
Created: inst.Created,
|
||||
OwnedBy: "llamactl",
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
openaiResponse := OpenAIListInstancesResponse{
|
||||
@@ -89,12 +117,19 @@ func (h *Handler) OpenAIProxy() http.HandlerFunc {
|
||||
|
||||
modelName, ok := requestBody["model"].(string)
|
||||
if !ok || modelName == "" {
|
||||
writeError(w, http.StatusBadRequest, "invalid_request", "Instance name is required")
|
||||
writeError(w, http.StatusBadRequest, "invalid_request", "Model name is required")
|
||||
return
|
||||
}
|
||||
|
||||
// Resolve model name to instance name (checks instance names first, then model registry)
|
||||
instanceName, err := h.InstanceManager.ResolveInstance(modelName)
|
||||
if err != nil {
|
||||
writeError(w, http.StatusBadRequest, "model_not_found", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Validate instance name at the entry point
|
||||
validatedName, err := validation.ValidateInstanceName(modelName)
|
||||
validatedName, err := validation.ValidateInstanceName(instanceName)
|
||||
if err != nil {
|
||||
writeError(w, http.StatusBadRequest, "invalid_instance_name", err.Error())
|
||||
return
|
||||
|
||||
@@ -73,6 +73,13 @@ func SetupRouter(handler *Handler) *chi.Mux {
|
||||
})
|
||||
})
|
||||
|
||||
// Llama.cpp instance-specific endpoints
|
||||
r.Route("/llama-cpp/{name}", func(r chi.Router) {
|
||||
r.Get("/models", handler.LlamaCppListModels())
|
||||
r.Post("/models/{model}/load", handler.LlamaCppLoadModel())
|
||||
r.Post("/models/{model}/unload", handler.LlamaCppUnloadModel())
|
||||
})
|
||||
|
||||
// Node management endpoints
|
||||
r.Route("/nodes", func(r chi.Router) {
|
||||
r.Get("/", handler.ListNodes()) // List all nodes
|
||||
|
||||
Reference in New Issue
Block a user