mirror of
https://github.com/lordmathis/llamactl.git
synced 2025-11-06 00:54:23 +00:00
Merge branch 'main' into feat/multi-host
This commit is contained in:
@@ -197,6 +197,7 @@ server:
|
|||||||
host: "0.0.0.0" # Server host to bind to
|
host: "0.0.0.0" # Server host to bind to
|
||||||
port: 8080 # Server port to bind to
|
port: 8080 # Server port to bind to
|
||||||
allowed_origins: ["*"] # Allowed CORS origins (default: all)
|
allowed_origins: ["*"] # Allowed CORS origins (default: all)
|
||||||
|
allowed_headers: ["*"] # Allowed CORS headers (default: all)
|
||||||
enable_swagger: false # Enable Swagger UI for API docs
|
enable_swagger: false # Enable Swagger UI for API docs
|
||||||
|
|
||||||
backends:
|
backends:
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ server:
|
|||||||
host: "0.0.0.0" # Server host to bind to
|
host: "0.0.0.0" # Server host to bind to
|
||||||
port: 8080 # Server port to bind to
|
port: 8080 # Server port to bind to
|
||||||
allowed_origins: ["*"] # Allowed CORS origins (default: all)
|
allowed_origins: ["*"] # Allowed CORS origins (default: all)
|
||||||
|
allowed_headers: ["*"] # Allowed CORS headers (default: all)
|
||||||
enable_swagger: false # Enable Swagger UI for API docs
|
enable_swagger: false # Enable Swagger UI for API docs
|
||||||
|
|
||||||
backends:
|
backends:
|
||||||
@@ -104,6 +105,7 @@ server:
|
|||||||
host: "0.0.0.0" # Server host to bind to (default: "0.0.0.0")
|
host: "0.0.0.0" # Server host to bind to (default: "0.0.0.0")
|
||||||
port: 8080 # Server port to bind to (default: 8080)
|
port: 8080 # Server port to bind to (default: 8080)
|
||||||
allowed_origins: ["*"] # CORS allowed origins (default: ["*"])
|
allowed_origins: ["*"] # CORS allowed origins (default: ["*"])
|
||||||
|
allowed_headers: ["*"] # CORS allowed headers (default: ["*"])
|
||||||
enable_swagger: false # Enable Swagger UI (default: false)
|
enable_swagger: false # Enable Swagger UI (default: false)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
5
llamactl.yaml
Normal file
5
llamactl.yaml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
auth:
|
||||||
|
management_keys:
|
||||||
|
- test-mgmt
|
||||||
|
inference_keys:
|
||||||
|
- test-inf
|
||||||
@@ -58,6 +58,9 @@ type ServerConfig struct {
|
|||||||
// Allowed origins for CORS (e.g., "http://localhost:3000")
|
// Allowed origins for CORS (e.g., "http://localhost:3000")
|
||||||
AllowedOrigins []string `yaml:"allowed_origins"`
|
AllowedOrigins []string `yaml:"allowed_origins"`
|
||||||
|
|
||||||
|
// Allowed headers for CORS (e.g., "Accept", "Authorization", "Content-Type", "X-CSRF-Token")
|
||||||
|
AllowedHeaders []string `yaml:"allowed_headers"`
|
||||||
|
|
||||||
// Enable Swagger UI for API documentation
|
// Enable Swagger UI for API documentation
|
||||||
EnableSwagger bool `yaml:"enable_swagger"`
|
EnableSwagger bool `yaml:"enable_swagger"`
|
||||||
|
|
||||||
@@ -143,6 +146,7 @@ func LoadConfig(configPath string) (AppConfig, error) {
|
|||||||
Host: "0.0.0.0",
|
Host: "0.0.0.0",
|
||||||
Port: 8080,
|
Port: 8080,
|
||||||
AllowedOrigins: []string{"*"}, // Default to allow all origins
|
AllowedOrigins: []string{"*"}, // Default to allow all origins
|
||||||
|
AllowedHeaders: []string{"*"}, // Default to allow all headers
|
||||||
EnableSwagger: false,
|
EnableSwagger: false,
|
||||||
},
|
},
|
||||||
Backends: BackendConfig{
|
Backends: BackendConfig{
|
||||||
|
|||||||
@@ -314,19 +314,32 @@ func (im *instanceManager) loadInstance(name, path string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// autoStartInstances starts instances that were running when persisted and have auto-restart enabled
|
// autoStartInstances starts instances that were running when persisted and have auto-restart enabled
|
||||||
|
// For instances with auto-restart disabled, it sets their status to Stopped
|
||||||
func (im *instanceManager) autoStartInstances() {
|
func (im *instanceManager) autoStartInstances() {
|
||||||
im.mu.RLock()
|
im.mu.RLock()
|
||||||
var instancesToStart []*instance.Process
|
var instancesToStart []*instance.Process
|
||||||
|
var instancesToStop []*instance.Process
|
||||||
for _, inst := range im.instances {
|
for _, inst := range im.instances {
|
||||||
if inst.IsRunning() && // Was running when persisted
|
if inst.IsRunning() && // Was running when persisted
|
||||||
inst.GetOptions() != nil &&
|
inst.GetOptions() != nil &&
|
||||||
inst.GetOptions().AutoRestart != nil &&
|
inst.GetOptions().AutoRestart != nil {
|
||||||
*inst.GetOptions().AutoRestart {
|
if *inst.GetOptions().AutoRestart {
|
||||||
instancesToStart = append(instancesToStart, inst)
|
instancesToStart = append(instancesToStart, inst)
|
||||||
|
} else {
|
||||||
|
// Instance was running but auto-restart is disabled, mark as stopped
|
||||||
|
instancesToStop = append(instancesToStop, inst)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
im.mu.RUnlock()
|
im.mu.RUnlock()
|
||||||
|
|
||||||
|
// Stop instances that have auto-restart disabled
|
||||||
|
for _, inst := range instancesToStop {
|
||||||
|
log.Printf("Instance %s was running but auto-restart is disabled, setting status to stopped", inst.Name)
|
||||||
|
inst.SetStatus(instance.Stopped)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start instances that have auto-restart enabled
|
||||||
for _, inst := range instancesToStart {
|
for _, inst := range instancesToStart {
|
||||||
log.Printf("Auto-starting instance %s", inst.Name)
|
log.Printf("Auto-starting instance %s", inst.Name)
|
||||||
// Reset running state before starting (since Start() expects stopped instance)
|
// Reset running state before starting (since Start() expects stopped instance)
|
||||||
|
|||||||
@@ -209,3 +209,66 @@ func createTestManager() manager.InstanceManager {
|
|||||||
}
|
}
|
||||||
return manager.NewInstanceManager(backendConfig, cfg, nil)
|
return manager.NewInstanceManager(backendConfig, cfg, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAutoRestartDisabledInstanceStatus(t *testing.T) {
|
||||||
|
tempDir := t.TempDir()
|
||||||
|
|
||||||
|
backendConfig := config.BackendConfig{
|
||||||
|
LlamaCpp: config.BackendSettings{
|
||||||
|
Command: "llama-server",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := config.InstancesConfig{
|
||||||
|
PortRange: [2]int{8000, 9000},
|
||||||
|
InstancesDir: tempDir,
|
||||||
|
MaxInstances: 10,
|
||||||
|
TimeoutCheckInterval: 5,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create first manager and instance with auto-restart disabled
|
||||||
|
manager1 := manager.NewInstanceManager(backendConfig, cfg)
|
||||||
|
|
||||||
|
autoRestart := false
|
||||||
|
options := &instance.CreateInstanceOptions{
|
||||||
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
|
AutoRestart: &autoRestart,
|
||||||
|
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||||
|
Model: "/path/to/model.gguf",
|
||||||
|
Port: 8080,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
inst, err := manager1.CreateInstance("test-instance", options)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("CreateInstance failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Simulate instance being in running state when persisted
|
||||||
|
// (this would happen if the instance was running when llamactl was stopped)
|
||||||
|
inst.SetStatus(instance.Running)
|
||||||
|
|
||||||
|
// Shutdown first manager
|
||||||
|
manager1.Shutdown()
|
||||||
|
|
||||||
|
// Create second manager (simulating restart of llamactl)
|
||||||
|
manager2 := manager.NewInstanceManager(backendConfig, cfg)
|
||||||
|
|
||||||
|
// Get the loaded instance
|
||||||
|
loadedInst, err := manager2.GetInstance("test-instance")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("GetInstance failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The instance should be marked as Stopped, not Running
|
||||||
|
// because auto-restart is disabled
|
||||||
|
if loadedInst.IsRunning() {
|
||||||
|
t.Errorf("Expected instance with auto-restart disabled to be stopped after manager restart, but it was running")
|
||||||
|
}
|
||||||
|
|
||||||
|
if loadedInst.GetStatus() != instance.Stopped {
|
||||||
|
t.Errorf("Expected instance status to be Stopped, got %v", loadedInst.GetStatus())
|
||||||
|
}
|
||||||
|
|
||||||
|
manager2.Shutdown()
|
||||||
|
}
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package server
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"llamactl/pkg/backends"
|
"llamactl/pkg/backends"
|
||||||
"llamactl/pkg/backends/llamacpp"
|
"llamactl/pkg/backends/llamacpp"
|
||||||
"llamactl/pkg/backends/mlx"
|
"llamactl/pkg/backends/mlx"
|
||||||
@@ -10,6 +11,8 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/go-chi/chi/v5"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ParseCommandRequest represents the request body for command parsing
|
// ParseCommandRequest represents the request body for command parsing
|
||||||
@@ -17,6 +20,84 @@ type ParseCommandRequest struct {
|
|||||||
Command string `json:"command"`
|
Command string `json:"command"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (h *Handler) LlamaCppProxy(onDemandStart bool) http.HandlerFunc {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
|
// Get the instance name from the URL parameter
|
||||||
|
name := chi.URLParam(r, "name")
|
||||||
|
if name == "" {
|
||||||
|
http.Error(w, "Instance name cannot be empty", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Route to the appropriate inst based on instance name
|
||||||
|
inst, err := h.InstanceManager.GetInstance(name)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, "Invalid instance: "+err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
options := inst.GetOptions()
|
||||||
|
if options == nil {
|
||||||
|
http.Error(w, "Cannot obtain Instance's options", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.BackendType != backends.BackendTypeLlamaCpp {
|
||||||
|
http.Error(w, "Instance is not a llama.cpp server.", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !inst.IsRunning() {
|
||||||
|
|
||||||
|
if !(onDemandStart && options.OnDemandStart != nil && *options.OnDemandStart) {
|
||||||
|
http.Error(w, "Instance is not running", http.StatusServiceUnavailable)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if h.InstanceManager.IsMaxRunningInstancesReached() {
|
||||||
|
if h.cfg.Instances.EnableLRUEviction {
|
||||||
|
err := h.InstanceManager.EvictLRUInstance()
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, "Cannot start Instance, failed to evict instance "+err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
http.Error(w, "Cannot start Instance, maximum number of instances reached", http.StatusConflict)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If on-demand start is enabled, start the instance
|
||||||
|
if _, err := h.InstanceManager.StartInstance(name); err != nil {
|
||||||
|
http.Error(w, "Failed to start instance: "+err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for the instance to become healthy before proceeding
|
||||||
|
if err := inst.WaitForHealthy(h.cfg.Instances.OnDemandStartTimeout); err != nil { // 2 minutes timeout
|
||||||
|
http.Error(w, "Instance failed to become healthy: "+err.Error(), http.StatusServiceUnavailable)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
proxy, err := inst.GetProxy()
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, "Failed to get proxy: "+err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Strip the "/llama-cpp/<name>" prefix from the request URL
|
||||||
|
prefix := fmt.Sprintf("/llama-cpp/%s", name)
|
||||||
|
r.URL.Path = strings.TrimPrefix(r.URL.Path, prefix)
|
||||||
|
|
||||||
|
// Update the last request time for the instance
|
||||||
|
inst.UpdateLastRequestTime()
|
||||||
|
|
||||||
|
proxy.ServeHTTP(w, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ParseLlamaCommand godoc
|
// ParseLlamaCommand godoc
|
||||||
// @Summary Parse llama-server command
|
// @Summary Parse llama-server command
|
||||||
// @Description Parses a llama-server command string into instance options
|
// @Description Parses a llama-server command string into instance options
|
||||||
|
|||||||
@@ -102,7 +102,7 @@ func (h *Handler) GetInstance() http.HandlerFunc {
|
|||||||
|
|
||||||
inst, err := h.InstanceManager.GetInstance(name)
|
inst, err := h.InstanceManager.GetInstance(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(w, "Failed to get instance: "+err.Error(), http.StatusInternalServerError)
|
http.Error(w, "Invalid instance: "+err.Error(), http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -361,12 +361,6 @@ func (h *Handler) ProxyToInstance() http.HandlerFunc {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if this is a remote instance
|
|
||||||
if inst.IsRemote() {
|
|
||||||
h.RemoteInstanceProxy(w, r, name, inst)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if !inst.IsRunning() {
|
if !inst.IsRunning() {
|
||||||
http.Error(w, "Instance is not running", http.StatusServiceUnavailable)
|
http.Error(w, "Instance is not running", http.StatusServiceUnavailable)
|
||||||
return
|
return
|
||||||
@@ -381,29 +375,15 @@ func (h *Handler) ProxyToInstance() http.HandlerFunc {
|
|||||||
|
|
||||||
// Strip the "/api/v1/instances/<name>/proxy" prefix from the request URL
|
// Strip the "/api/v1/instances/<name>/proxy" prefix from the request URL
|
||||||
prefix := fmt.Sprintf("/api/v1/instances/%s/proxy", name)
|
prefix := fmt.Sprintf("/api/v1/instances/%s/proxy", name)
|
||||||
proxyPath := r.URL.Path[len(prefix):]
|
r.URL.Path = strings.TrimPrefix(r.URL.Path, prefix)
|
||||||
|
|
||||||
// Ensure the proxy path starts with "/"
|
|
||||||
if !strings.HasPrefix(proxyPath, "/") {
|
|
||||||
proxyPath = "/" + proxyPath
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the last request time for the instance
|
// Update the last request time for the instance
|
||||||
inst.UpdateLastRequestTime()
|
inst.UpdateLastRequestTime()
|
||||||
|
|
||||||
// Modify the request to remove the proxy prefix
|
|
||||||
originalPath := r.URL.Path
|
|
||||||
r.URL.Path = proxyPath
|
|
||||||
|
|
||||||
// Set forwarded headers
|
// Set forwarded headers
|
||||||
r.Header.Set("X-Forwarded-Host", r.Header.Get("Host"))
|
r.Header.Set("X-Forwarded-Host", r.Header.Get("Host"))
|
||||||
r.Header.Set("X-Forwarded-Proto", "http")
|
r.Header.Set("X-Forwarded-Proto", "http")
|
||||||
|
|
||||||
// Restore original path for logging purposes
|
|
||||||
defer func() {
|
|
||||||
r.URL.Path = originalPath
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Forward the request using the cached proxy
|
// Forward the request using the cached proxy
|
||||||
proxy.ServeHTTP(w, r)
|
proxy.ServeHTTP(w, r)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -87,7 +87,7 @@ func (h *Handler) OpenAIProxy() http.HandlerFunc {
|
|||||||
// Route to the appropriate inst based on instance name
|
// Route to the appropriate inst based on instance name
|
||||||
inst, err := h.InstanceManager.GetInstance(modelName)
|
inst, err := h.InstanceManager.GetInstance(modelName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(w, "Failed to get instance: "+err.Error(), http.StatusInternalServerError)
|
http.Error(w, "Invalid instance: "+err.Error(), http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -98,7 +98,8 @@ func (h *Handler) OpenAIProxy() http.HandlerFunc {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !inst.IsRunning() {
|
if !inst.IsRunning() {
|
||||||
allowOnDemand := inst.GetOptions() != nil && inst.GetOptions().OnDemandStart != nil && *inst.GetOptions().OnDemandStart
|
options := inst.GetOptions()
|
||||||
|
allowOnDemand := options != nil && options.OnDemandStart != nil && *options.OnDemandStart
|
||||||
if !allowOnDemand {
|
if !allowOnDemand {
|
||||||
http.Error(w, "Instance is not running", http.StatusServiceUnavailable)
|
http.Error(w, "Instance is not running", http.StatusServiceUnavailable)
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ func SetupRouter(handler *Handler) *chi.Mux {
|
|||||||
r.Use(cors.Handler(cors.Options{
|
r.Use(cors.Handler(cors.Options{
|
||||||
AllowedOrigins: handler.cfg.Server.AllowedOrigins,
|
AllowedOrigins: handler.cfg.Server.AllowedOrigins,
|
||||||
AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"},
|
AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"},
|
||||||
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "X-CSRF-Token"},
|
AllowedHeaders: handler.cfg.Server.AllowedHeaders,
|
||||||
ExposedHeaders: []string{"Link"},
|
ExposedHeaders: []string{"Link"},
|
||||||
AllowCredentials: false,
|
AllowCredentials: false,
|
||||||
MaxAge: 300,
|
MaxAge: 300,
|
||||||
@@ -112,6 +112,51 @@ func SetupRouter(handler *Handler) *chi.Mux {
|
|||||||
|
|
||||||
})
|
})
|
||||||
|
|
||||||
|
r.Route("/llama-cpp/{name}", func(r chi.Router) {
|
||||||
|
|
||||||
|
// Public Routes
|
||||||
|
// Allow llama-cpp server to serve its own WebUI if it is running.
|
||||||
|
// Don't auto start the server since it can be accessed without an API key
|
||||||
|
r.Get("/", handler.LlamaCppProxy(false))
|
||||||
|
|
||||||
|
// Private Routes
|
||||||
|
r.Group(func(r chi.Router) {
|
||||||
|
|
||||||
|
if authMiddleware != nil && handler.cfg.Auth.RequireInferenceAuth {
|
||||||
|
r.Use(authMiddleware.AuthMiddleware(KeyTypeInference))
|
||||||
|
}
|
||||||
|
|
||||||
|
// This handler auto start the server if it's not running
|
||||||
|
llamaCppHandler := handler.LlamaCppProxy(true)
|
||||||
|
|
||||||
|
// llama.cpp server specific proxy endpoints
|
||||||
|
r.Get("/props", llamaCppHandler)
|
||||||
|
// /slots endpoint is secured (see: https://github.com/ggml-org/llama.cpp/pull/15630)
|
||||||
|
r.Get("/slots", llamaCppHandler)
|
||||||
|
r.Post("/apply-template", llamaCppHandler)
|
||||||
|
r.Post("/completion", llamaCppHandler)
|
||||||
|
r.Post("/detokenize", llamaCppHandler)
|
||||||
|
r.Post("/embeddings", llamaCppHandler)
|
||||||
|
r.Post("/infill", llamaCppHandler)
|
||||||
|
r.Post("/metrics", llamaCppHandler)
|
||||||
|
r.Post("/props", llamaCppHandler)
|
||||||
|
r.Post("/reranking", llamaCppHandler)
|
||||||
|
r.Post("/tokenize", llamaCppHandler)
|
||||||
|
|
||||||
|
// OpenAI-compatible proxy endpoint
|
||||||
|
// Handles all POST requests to /v1/*, including:
|
||||||
|
// - /v1/completions
|
||||||
|
// - /v1/chat/completions
|
||||||
|
// - /v1/embeddings
|
||||||
|
// - /v1/rerank
|
||||||
|
// - /v1/reranking
|
||||||
|
// llamaCppHandler is used here because some users of llama.cpp endpoints depend
|
||||||
|
// on "model" field being optional, and handler.OpenAIProxy requires it.
|
||||||
|
r.Post("/v1/*", llamaCppHandler)
|
||||||
|
})
|
||||||
|
|
||||||
|
})
|
||||||
|
|
||||||
// Serve WebUI files
|
// Serve WebUI files
|
||||||
if err := webui.SetupWebUI(r); err != nil {
|
if err := webui.SetupWebUI(r); err != nil {
|
||||||
fmt.Printf("Failed to set up WebUI: %v\n", err)
|
fmt.Printf("Failed to set up WebUI: %v\n", err)
|
||||||
|
|||||||
@@ -11,11 +11,13 @@ describe('API Error Handling', () => {
|
|||||||
})
|
})
|
||||||
|
|
||||||
it('converts HTTP errors to meaningful messages', async () => {
|
it('converts HTTP errors to meaningful messages', async () => {
|
||||||
mockFetch.mockResolvedValue({
|
const mockResponse = {
|
||||||
ok: false,
|
ok: false,
|
||||||
status: 409,
|
status: 409,
|
||||||
text: () => Promise.resolve('Instance already exists')
|
text: () => Promise.resolve('Instance already exists'),
|
||||||
})
|
clone: function() { return this }
|
||||||
|
}
|
||||||
|
mockFetch.mockResolvedValue(mockResponse)
|
||||||
|
|
||||||
await expect(instancesApi.create('existing', {}))
|
await expect(instancesApi.create('existing', {}))
|
||||||
.rejects
|
.rejects
|
||||||
@@ -23,11 +25,13 @@ describe('API Error Handling', () => {
|
|||||||
})
|
})
|
||||||
|
|
||||||
it('handles empty error responses gracefully', async () => {
|
it('handles empty error responses gracefully', async () => {
|
||||||
mockFetch.mockResolvedValue({
|
const mockResponse = {
|
||||||
ok: false,
|
ok: false,
|
||||||
status: 500,
|
status: 500,
|
||||||
text: () => Promise.resolve('')
|
text: () => Promise.resolve(''),
|
||||||
})
|
clone: function() { return this }
|
||||||
|
}
|
||||||
|
mockFetch.mockResolvedValue(mockResponse)
|
||||||
|
|
||||||
await expect(instancesApi.list())
|
await expect(instancesApi.list())
|
||||||
.rejects
|
.rejects
|
||||||
|
|||||||
@@ -49,12 +49,9 @@ async function apiCall<T>(
|
|||||||
} else {
|
} else {
|
||||||
// Handle empty responses for JSON endpoints
|
// Handle empty responses for JSON endpoints
|
||||||
const contentLength = response.headers.get('content-length');
|
const contentLength = response.headers.get('content-length');
|
||||||
if (contentLength === '0' || contentLength === null) {
|
if (contentLength === '0') {
|
||||||
const text = await response.text();
|
|
||||||
if (text.trim() === '') {
|
|
||||||
return {} as T; // Return empty object for empty JSON responses
|
return {} as T; // Return empty object for empty JSON responses
|
||||||
}
|
}
|
||||||
}
|
|
||||||
const data = await response.json() as T;
|
const data = await response.json() as T;
|
||||||
return data;
|
return data;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -26,7 +26,8 @@ export async function handleApiError(response: Response): Promise<void> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!response.ok) {
|
if (!response.ok) {
|
||||||
const errorMessage = await parseErrorResponse(response)
|
// Clone the response before reading to avoid consuming the body stream
|
||||||
|
const errorMessage = await parseErrorResponse(response.clone())
|
||||||
throw new Error(errorMessage)
|
throw new Error(errorMessage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Reference in New Issue
Block a user