diff --git a/pkg/server/handlers.go b/pkg/server/handlers.go index 98514ec..8f0b509 100644 --- a/pkg/server/handlers.go +++ b/pkg/server/handlers.go @@ -207,7 +207,7 @@ func (h *Handler) GetInstance() http.HandlerFunc { inst, err := h.InstanceManager.GetInstance(name) if err != nil { - http.Error(w, "Failed to get instance: "+err.Error(), http.StatusInternalServerError) + http.Error(w, "Invalid instance: "+err.Error(), http.StatusBadRequest) return } @@ -480,29 +480,15 @@ func (h *Handler) ProxyToInstance() http.HandlerFunc { // Strip the "/api/v1/instances//proxy" prefix from the request URL prefix := fmt.Sprintf("/api/v1/instances/%s/proxy", name) - proxyPath := r.URL.Path[len(prefix):] - - // Ensure the proxy path starts with "/" - if !strings.HasPrefix(proxyPath, "/") { - proxyPath = "/" + proxyPath - } + r.URL.Path = strings.TrimPrefix(r.URL.Path, prefix) // Update the last request time for the instance inst.UpdateLastRequestTime() - // Modify the request to remove the proxy prefix - originalPath := r.URL.Path - r.URL.Path = proxyPath - // Set forwarded headers r.Header.Set("X-Forwarded-Host", r.Header.Get("Host")) r.Header.Set("X-Forwarded-Proto", "http") - // Restore original path for logging purposes - defer func() { - r.URL.Path = originalPath - }() - // Forward the request using the cached proxy proxy.ServeHTTP(w, r) } @@ -585,12 +571,13 @@ func (h *Handler) OpenAIProxy() http.HandlerFunc { // Route to the appropriate inst based on instance name inst, err := h.InstanceManager.GetInstance(modelName) if err != nil { - http.Error(w, "Failed to get instance: "+err.Error(), http.StatusInternalServerError) + http.Error(w, "Invalid instance: "+err.Error(), http.StatusBadRequest) return } if !inst.IsRunning() { - allowOnDemand := inst.GetOptions() != nil && inst.GetOptions().OnDemandStart != nil && *inst.GetOptions().OnDemandStart + options := inst.GetOptions() + allowOnDemand := options != nil && options.OnDemandStart != nil && *options.OnDemandStart if !allowOnDemand { http.Error(w, "Instance is not running", http.StatusServiceUnavailable) return @@ -639,6 +626,84 @@ func (h *Handler) OpenAIProxy() http.HandlerFunc { } } +func (h *Handler) LlamaCppProxy(onDemandStart bool) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + + // Get the instance name from the URL parameter + name := chi.URLParam(r, "name") + if name == "" { + http.Error(w, "Instance name cannot be empty", http.StatusBadRequest) + return + } + + // Route to the appropriate inst based on instance name + inst, err := h.InstanceManager.GetInstance(name) + if err != nil { + http.Error(w, "Invalid instance: "+err.Error(), http.StatusBadRequest) + return + } + + options := inst.GetOptions() + if options == nil { + http.Error(w, "Cannot obtain Instance's options", http.StatusInternalServerError) + return + } + + if options.BackendType != backends.BackendTypeLlamaCpp { + http.Error(w, "Instance is not a llama.cpp server.", http.StatusBadRequest) + return + } + + if !inst.IsRunning() { + + if !(onDemandStart && options.OnDemandStart != nil && *options.OnDemandStart) { + http.Error(w, "Instance is not running", http.StatusServiceUnavailable) + return + } + + if h.InstanceManager.IsMaxRunningInstancesReached() { + if h.cfg.Instances.EnableLRUEviction { + err := h.InstanceManager.EvictLRUInstance() + if err != nil { + http.Error(w, "Cannot start Instance, failed to evict instance "+err.Error(), http.StatusInternalServerError) + return + } + } else { + http.Error(w, "Cannot start Instance, maximum number of instances reached", http.StatusConflict) + return + } + } + + // If on-demand start is enabled, start the instance + if _, err := h.InstanceManager.StartInstance(name); err != nil { + http.Error(w, "Failed to start instance: "+err.Error(), http.StatusInternalServerError) + return + } + + // Wait for the instance to become healthy before proceeding + if err := inst.WaitForHealthy(h.cfg.Instances.OnDemandStartTimeout); err != nil { // 2 minutes timeout + http.Error(w, "Instance failed to become healthy: "+err.Error(), http.StatusServiceUnavailable) + return + } + } + + proxy, err := inst.GetProxy() + if err != nil { + http.Error(w, "Failed to get proxy: "+err.Error(), http.StatusInternalServerError) + return + } + + // Strip the "/llama-cpp/" prefix from the request URL + prefix := fmt.Sprintf("/llama-cpp/%s", name) + r.URL.Path = strings.TrimPrefix(r.URL.Path, prefix) + + // Update the last request time for the instance + inst.UpdateLastRequestTime() + + proxy.ServeHTTP(w, r) + } +} + // ParseCommandRequest represents the request body for command parsing type ParseCommandRequest struct { Command string `json:"command"` @@ -719,21 +784,21 @@ func (h *Handler) ParseMlxCommand() http.HandlerFunc { writeError(w, http.StatusBadRequest, "invalid_request", "Invalid JSON body") return } - + if strings.TrimSpace(req.Command) == "" { writeError(w, http.StatusBadRequest, "invalid_command", "Command cannot be empty") return } - + mlxOptions, err := mlx.ParseMlxCommand(req.Command) if err != nil { writeError(w, http.StatusBadRequest, "parse_error", err.Error()) return } - + // Currently only support mlx_lm backend type backendType := backends.BackendTypeMlxLm - + options := &instance.CreateInstanceOptions{ BackendType: backendType, MlxServerOptions: mlxOptions, diff --git a/pkg/server/routes.go b/pkg/server/routes.go index 02cfa22..8d5068b 100644 --- a/pkg/server/routes.go +++ b/pkg/server/routes.go @@ -103,6 +103,51 @@ func SetupRouter(handler *Handler) *chi.Mux { }) + r.Route("/llama-cpp/{name}", func(r chi.Router) { + + // Public Routes + // Allow llama-cpp server to serve its own WebUI if it is running. + // Don't auto start the server since it can be accessed without an API key + r.Get("/", handler.LlamaCppProxy(false)) + + // Private Routes + r.Group(func(r chi.Router) { + + if authMiddleware != nil && handler.cfg.Auth.RequireInferenceAuth { + r.Use(authMiddleware.AuthMiddleware(KeyTypeInference)) + } + + // This handler auto start the server if it's not running + llamaCppHandler := handler.LlamaCppProxy(true) + + // llama.cpp server specific proxy endpoints + r.Get("/props", llamaCppHandler) + // /slots endpoint is secured (see: https://github.com/ggml-org/llama.cpp/pull/15630) + r.Get("/slots", llamaCppHandler) + r.Post("/apply-template", llamaCppHandler) + r.Post("/completion", llamaCppHandler) + r.Post("/detokenize", llamaCppHandler) + r.Post("/embeddings", llamaCppHandler) + r.Post("/infill", llamaCppHandler) + r.Post("/metrics", llamaCppHandler) + r.Post("/props", llamaCppHandler) + r.Post("/reranking", llamaCppHandler) + r.Post("/tokenize", llamaCppHandler) + + // OpenAI-compatible proxy endpoint + // Handles all POST requests to /v1/*, including: + // - /v1/completions + // - /v1/chat/completions + // - /v1/embeddings + // - /v1/rerank + // - /v1/reranking + // llamaCppHandler is used here because some users of llama.cpp endpoints depend + // on "model" field being optional, and handler.OpenAIProxy requires it. + r.Post("/v1/*", llamaCppHandler) + }) + + }) + // Serve WebUI files if err := webui.SetupWebUI(r); err != nil { fmt.Printf("Failed to set up WebUI: %v\n", err)