mirror of
https://github.com/lordmathis/llamactl.git
synced 2025-11-06 00:54:23 +00:00
Added support for proxying llama.cpp native API endpoints via /llama-cpp/{name}/
This commit is contained in:
@@ -480,29 +480,15 @@ func (h *Handler) ProxyToInstance() http.HandlerFunc {
|
|||||||
|
|
||||||
// Strip the "/api/v1/instances/<name>/proxy" prefix from the request URL
|
// Strip the "/api/v1/instances/<name>/proxy" prefix from the request URL
|
||||||
prefix := fmt.Sprintf("/api/v1/instances/%s/proxy", name)
|
prefix := fmt.Sprintf("/api/v1/instances/%s/proxy", name)
|
||||||
proxyPath := r.URL.Path[len(prefix):]
|
r.URL.Path = strings.TrimPrefix(r.URL.Path, prefix)
|
||||||
|
|
||||||
// Ensure the proxy path starts with "/"
|
|
||||||
if !strings.HasPrefix(proxyPath, "/") {
|
|
||||||
proxyPath = "/" + proxyPath
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the last request time for the instance
|
// Update the last request time for the instance
|
||||||
inst.UpdateLastRequestTime()
|
inst.UpdateLastRequestTime()
|
||||||
|
|
||||||
// Modify the request to remove the proxy prefix
|
|
||||||
originalPath := r.URL.Path
|
|
||||||
r.URL.Path = proxyPath
|
|
||||||
|
|
||||||
// Set forwarded headers
|
// Set forwarded headers
|
||||||
r.Header.Set("X-Forwarded-Host", r.Header.Get("Host"))
|
r.Header.Set("X-Forwarded-Host", r.Header.Get("Host"))
|
||||||
r.Header.Set("X-Forwarded-Proto", "http")
|
r.Header.Set("X-Forwarded-Proto", "http")
|
||||||
|
|
||||||
// Restore original path for logging purposes
|
|
||||||
defer func() {
|
|
||||||
r.URL.Path = originalPath
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Forward the request using the cached proxy
|
// Forward the request using the cached proxy
|
||||||
proxy.ServeHTTP(w, r)
|
proxy.ServeHTTP(w, r)
|
||||||
}
|
}
|
||||||
@@ -585,12 +571,13 @@ func (h *Handler) OpenAIProxy() http.HandlerFunc {
|
|||||||
// Route to the appropriate inst based on instance name
|
// Route to the appropriate inst based on instance name
|
||||||
inst, err := h.InstanceManager.GetInstance(modelName)
|
inst, err := h.InstanceManager.GetInstance(modelName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(w, "Failed to get instance: "+err.Error(), http.StatusInternalServerError)
|
http.Error(w, "Invalid instance: "+err.Error(), http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if !inst.IsRunning() {
|
if !inst.IsRunning() {
|
||||||
allowOnDemand := inst.GetOptions() != nil && inst.GetOptions().OnDemandStart != nil && *inst.GetOptions().OnDemandStart
|
options := inst.GetOptions()
|
||||||
|
allowOnDemand := options != nil && options.OnDemandStart != nil && *options.OnDemandStart
|
||||||
if !allowOnDemand {
|
if !allowOnDemand {
|
||||||
http.Error(w, "Instance is not running", http.StatusServiceUnavailable)
|
http.Error(w, "Instance is not running", http.StatusServiceUnavailable)
|
||||||
return
|
return
|
||||||
@@ -639,6 +626,84 @@ func (h *Handler) OpenAIProxy() http.HandlerFunc {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (h *Handler) LlamaCppProxy(onDemandStart bool) http.HandlerFunc {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
|
// Get the instance name from the URL parameter
|
||||||
|
name := chi.URLParam(r, "name")
|
||||||
|
if name == "" {
|
||||||
|
http.Error(w, "Instance name cannot be empty", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Route to the appropriate inst based on instance name
|
||||||
|
inst, err := h.InstanceManager.GetInstance(name)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, "Invalid instance: "+err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
options := inst.GetOptions()
|
||||||
|
if options == nil {
|
||||||
|
http.Error(w, "Cannot obtain Instance's options", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.BackendType != backends.BackendTypeLlamaCpp {
|
||||||
|
http.Error(w, "Instance is not a llama.cpp server.", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !inst.IsRunning() {
|
||||||
|
|
||||||
|
if !(onDemandStart && options.OnDemandStart != nil && *options.OnDemandStart) {
|
||||||
|
http.Error(w, "Instance is not running", http.StatusServiceUnavailable)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if h.InstanceManager.IsMaxRunningInstancesReached() {
|
||||||
|
if h.cfg.Instances.EnableLRUEviction {
|
||||||
|
err := h.InstanceManager.EvictLRUInstance()
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, "Cannot start Instance, failed to evict instance "+err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
http.Error(w, "Cannot start Instance, maximum number of instances reached", http.StatusConflict)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If on-demand start is enabled, start the instance
|
||||||
|
if _, err := h.InstanceManager.StartInstance(name); err != nil {
|
||||||
|
http.Error(w, "Failed to start instance: "+err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for the instance to become healthy before proceeding
|
||||||
|
if err := inst.WaitForHealthy(h.cfg.Instances.OnDemandStartTimeout); err != nil { // 2 minutes timeout
|
||||||
|
http.Error(w, "Instance failed to become healthy: "+err.Error(), http.StatusServiceUnavailable)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
proxy, err := inst.GetProxy()
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, "Failed to get proxy: "+err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Strip the "/llama-cpp/<name>" prefix from the request URL
|
||||||
|
prefix := fmt.Sprintf("/llama-cpp/%s", name)
|
||||||
|
r.URL.Path = strings.TrimPrefix(r.URL.Path, prefix)
|
||||||
|
|
||||||
|
// Update the last request time for the instance
|
||||||
|
inst.UpdateLastRequestTime()
|
||||||
|
|
||||||
|
proxy.ServeHTTP(w, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ParseCommandRequest represents the request body for command parsing
|
// ParseCommandRequest represents the request body for command parsing
|
||||||
type ParseCommandRequest struct {
|
type ParseCommandRequest struct {
|
||||||
Command string `json:"command"`
|
Command string `json:"command"`
|
||||||
|
|||||||
@@ -103,6 +103,51 @@ func SetupRouter(handler *Handler) *chi.Mux {
|
|||||||
|
|
||||||
})
|
})
|
||||||
|
|
||||||
|
r.Route("/llama-cpp/{name}", func(r chi.Router) {
|
||||||
|
|
||||||
|
// Public Routes
|
||||||
|
// Allow llama-cpp server to serve its own WebUI if it is running.
|
||||||
|
// Don't auto start the server since it can be accessed without an API key
|
||||||
|
r.Get("/", handler.LlamaCppProxy(false))
|
||||||
|
|
||||||
|
// Private Routes
|
||||||
|
r.Group(func(r chi.Router) {
|
||||||
|
|
||||||
|
if authMiddleware != nil && handler.cfg.Auth.RequireInferenceAuth {
|
||||||
|
r.Use(authMiddleware.AuthMiddleware(KeyTypeInference))
|
||||||
|
}
|
||||||
|
|
||||||
|
// This handler auto start the server if it's not running
|
||||||
|
llamaCppHandler := handler.LlamaCppProxy(true)
|
||||||
|
|
||||||
|
// llama.cpp server specific proxy endpoints
|
||||||
|
r.Get("/props", llamaCppHandler)
|
||||||
|
// /slots endpoint is secured (see: https://github.com/ggml-org/llama.cpp/pull/15630)
|
||||||
|
r.Get("/slots", llamaCppHandler)
|
||||||
|
r.Post("/apply-template", llamaCppHandler)
|
||||||
|
r.Post("/completion", llamaCppHandler)
|
||||||
|
r.Post("/detokenize", llamaCppHandler)
|
||||||
|
r.Post("/embeddings", llamaCppHandler)
|
||||||
|
r.Post("/infill", llamaCppHandler)
|
||||||
|
r.Post("/metrics", llamaCppHandler)
|
||||||
|
r.Post("/props", llamaCppHandler)
|
||||||
|
r.Post("/reranking", llamaCppHandler)
|
||||||
|
r.Post("/tokenize", llamaCppHandler)
|
||||||
|
|
||||||
|
// OpenAI-compatible proxy endpoint
|
||||||
|
// Handles all POST requests to /v1/*, including:
|
||||||
|
// - /v1/completions
|
||||||
|
// - /v1/chat/completions
|
||||||
|
// - /v1/embeddings
|
||||||
|
// - /v1/rerank
|
||||||
|
// - /v1/reranking
|
||||||
|
// llamaCppHandler is used here because some users of llama.cpp endpoints depend
|
||||||
|
// on "model" field being optional, and handler.OpenAIProxy requires it.
|
||||||
|
r.Post("/v1/*", llamaCppHandler)
|
||||||
|
})
|
||||||
|
|
||||||
|
})
|
||||||
|
|
||||||
// Serve WebUI files
|
// Serve WebUI files
|
||||||
if err := webui.SetupWebUI(r); err != nil {
|
if err := webui.SetupWebUI(r); err != nil {
|
||||||
fmt.Printf("Failed to set up WebUI: %v\n", err)
|
fmt.Printf("Failed to set up WebUI: %v\n", err)
|
||||||
|
|||||||
Reference in New Issue
Block a user