29 Commits

Author SHA1 Message Date
34edb8a2e5 Merge pull request #78 from lordmathis/feat/inflight-requests
feat: Wait for inflight requests to finish before shutting down an instance
2025-10-30 18:08:55 +01:00
560850f86d Add shutdown state checks in HTTP handlers 2025-10-30 18:00:59 +01:00
c340439306 Add support for 'shutting_down' state in HealthBadge and health service 2025-10-29 00:09:18 +01:00
77c0e22fd0 Use instance's ServeHTTP in handlers 2025-10-29 00:01:29 +01:00
d65c5ab717 Wait for inflight requests before stopping 2025-10-29 00:00:56 +01:00
2b94244c8a Replace GetProxy with ServeHttp in instance 2025-10-29 00:00:02 +01:00
2e5644db53 Implement inflight request tracking 2025-10-28 23:59:02 +01:00
7ee22fee51 Implement shutting down status 2025-10-28 23:53:11 +01:00
e5baedb776 Merge pull request #76 from lordmathis/feat/import-export
feat: Ad support for instance import and export on frontend
2025-10-27 20:46:48 +01:00
e6205b930e Document import and export features 2025-10-27 20:44:28 +01:00
f9eb424690 Fix concurrent map write issue in MarshalJSON by initializing BackendOptions 2025-10-27 20:36:42 +01:00
5b84b64623 Fix some typescript issues 2025-10-27 20:36:31 +01:00
7813a5f2be Move import instance configuration to InstanceDialog component 2025-10-27 20:17:18 +01:00
a00c9b82a6 Add import functionality for instance configuration from JSON file 2025-10-27 20:11:22 +01:00
cbfa6bd48f Fix export functionality to exclude computed field from JSON output 2025-10-27 19:59:43 +01:00
bee0f72c10 Add export functionality to InstanceCard component 2025-10-27 19:55:07 +01:00
a5d8f541f0 Merge pull request #75 from lordmathis/fix/delete-instance
fix: Prevent restarting instance from getting deleted
2025-10-27 19:27:58 +01:00
dfcc16083c Update test configuration to use 'sh -c "sleep 999999"' command 2025-10-27 19:25:13 +01:00
6ec2919049 Fix instance start simulation in TestUpdateInstance 2025-10-27 19:14:54 +01:00
d6a6f377fc Fix logger race condition 2025-10-27 19:06:06 +01:00
cd9a71d9fc Update test configuration to use 'yes' command instead of 'sleep' 2025-10-27 18:54:20 +01:00
2c4cc5a69a Fix manager tests 2025-10-27 18:47:17 +01:00
b1fc1d2dc8 Add InstancesDir to test configuration for instance management 2025-10-27 18:38:23 +01:00
08c47a16a0 Fix operations tests 2025-10-27 18:35:16 +01:00
219db7abce Move port range validation to config 2025-10-27 18:23:49 +01:00
14131a6274 Remove redundant code 2025-10-27 18:18:25 +01:00
e65f4f1641 Remove unsupported error wrapping from log.Printf 2025-10-27 18:01:58 +01:00
5ef0654cdd Use %w for error wrapping in log messages across multiple files 2025-10-27 17:54:39 +01:00
1814772fa2 Fix instance deletion check to account for restarting status 2025-10-27 17:42:27 +01:00
30 changed files with 356 additions and 267 deletions

View File

@@ -5,6 +5,7 @@ import (
"llamactl/pkg/config"
"llamactl/pkg/manager"
"llamactl/pkg/server"
"log"
"net/http"
"os"
"os/signal"
@@ -38,8 +39,7 @@ func main() {
configPath := os.Getenv("LLAMACTL_CONFIG_PATH")
cfg, err := config.LoadConfig(configPath)
if err != nil {
fmt.Printf("Error loading config: %v\n", err)
fmt.Println("Using default configuration.")
log.Printf("Error loading config: %v\nUsing default configuration.", err)
}
// Set version information
@@ -50,13 +50,11 @@ func main() {
// Create the data directory if it doesn't exist
if cfg.Instances.AutoCreateDirs {
if err := os.MkdirAll(cfg.Instances.InstancesDir, 0755); err != nil {
fmt.Printf("Error creating config directory %s: %v\n", cfg.Instances.InstancesDir, err)
fmt.Println("Persistence will not be available.")
log.Printf("Error creating config directory %s: %v\nPersistence will not be available.", cfg.Instances.InstancesDir, err)
}
if err := os.MkdirAll(cfg.Instances.LogsDir, 0755); err != nil {
fmt.Printf("Error creating log directory %s: %v\n", cfg.Instances.LogsDir, err)
fmt.Println("Instance logs will not be available.")
log.Printf("Error creating log directory %s: %v\nInstance logs will not be available.", cfg.Instances.LogsDir, err)
}
}
@@ -81,7 +79,7 @@ func main() {
go func() {
fmt.Printf("Llamactl server listening on %s:%d\n", cfg.Server.Host, cfg.Server.Port)
if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed {
fmt.Printf("Error starting server: %v\n", err)
log.Printf("Error starting server: %v\n", err)
}
}()
@@ -90,7 +88,7 @@ func main() {
fmt.Println("Shutting down server...")
if err := server.Close(); err != nil {
fmt.Printf("Error shutting down server: %v\n", err)
log.Printf("Error shutting down server: %v\n", err)
} else {
fmt.Println("Server shut down gracefully.")
}

View File

@@ -42,6 +42,7 @@ Each instance is displayed as a card showing:
![Create Instance Screenshot](images/create_instance.png)
1. Click the **"Create Instance"** button on the dashboard
2. *Optional*: Click **"Import"** in the dialog header to load a previously exported configuration
2. Enter a unique **Name** for your instance (only required field)
3. **Select Target Node**: Choose which node to deploy the instance to from the dropdown
4. **Choose Backend Type**:
@@ -219,6 +220,12 @@ curl -X PUT http://localhost:8080/api/v1/instances/{name} \
Configuration changes require restarting the instance to take effect.
## Export Instance
**Via Web UI**
1. Click the **"More actions"** button (three dots) on an instance card
2. Click **"Export"** to download the instance configuration as a JSON file
## View Logs
**Via Web UI**

View File

@@ -93,6 +93,8 @@ func (o *Options) MarshalJSON() ([]byte, error) {
if err != nil {
return nil, fmt.Errorf("failed to marshal backend options: %w", err)
}
// Create a new map to avoid concurrent map writes
aux.BackendOptions = make(map[string]any)
if err := json.Unmarshal(optionsData, &aux.BackendOptions); err != nil {
return nil, fmt.Errorf("failed to unmarshal backend options to map: %w", err)
}

View File

@@ -1,6 +1,7 @@
package config
import (
"fmt"
"log"
"os"
"path/filepath"
@@ -231,6 +232,11 @@ func LoadConfig(configPath string) (AppConfig, error) {
cfg.Instances.LogsDir = filepath.Join(cfg.Instances.DataDir, "logs")
}
// Validate port range
if cfg.Instances.PortRange[0] <= 0 || cfg.Instances.PortRange[1] <= 0 || cfg.Instances.PortRange[0] >= cfg.Instances.PortRange[1] {
return AppConfig{}, fmt.Errorf("invalid port range: %v", cfg.Instances.PortRange)
}
return cfg, nil
}

View File

@@ -5,7 +5,7 @@ import (
"fmt"
"llamactl/pkg/config"
"log"
"net/http/httputil"
"net/http"
"time"
)
@@ -182,15 +182,6 @@ func (i *Instance) GetPort() int {
return i.options.GetPort()
}
// GetProxy returns the reverse proxy for this instance
func (i *Instance) GetProxy() (*httputil.ReverseProxy, error) {
if i.proxy == nil {
return nil, fmt.Errorf("instance %s has no proxy component", i.Name)
}
return i.proxy.get()
}
func (i *Instance) IsRemote() bool {
opts := i.GetOptions()
if opts == nil {
@@ -242,6 +233,22 @@ func (i *Instance) ShouldTimeout() bool {
return i.proxy.shouldTimeout()
}
// GetInflightRequests returns the current number of inflight requests
func (i *Instance) GetInflightRequests() int32 {
if i.proxy == nil {
return 0
}
return i.proxy.getInflightRequests()
}
// ServeHTTP serves HTTP requests through the proxy with request tracking and shutdown handling
func (i *Instance) ServeHTTP(w http.ResponseWriter, r *http.Request) error {
if i.proxy == nil {
return fmt.Errorf("instance %s has no proxy component", i.Name)
}
return i.proxy.serveHTTP(w, r)
}
func (i *Instance) getCommand() string {
opts := i.GetOptions()
if opts == nil {

View File

@@ -171,64 +171,6 @@ func TestSetOptions(t *testing.T) {
}
}
func TestGetProxy(t *testing.T) {
globalConfig := &config.AppConfig{
Backends: config.BackendConfig{
LlamaCpp: config.BackendSettings{
Command: "llama-server",
Args: []string{},
},
MLX: config.BackendSettings{
Command: "mlx_lm.server",
Args: []string{},
},
VLLM: config.BackendSettings{
Command: "vllm",
Args: []string{"serve"},
},
},
Instances: config.InstancesConfig{
LogsDir: "/tmp/test",
},
Nodes: map[string]config.NodeConfig{},
LocalNode: "main",
}
options := &instance.Options{
Nodes: map[string]struct{}{"main": {}},
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Host: "localhost",
Port: 8080,
},
},
}
// Mock onStatusChange function
mockOnStatusChange := func(oldStatus, newStatus instance.Status) {}
inst := instance.New("test-instance", globalConfig, options, mockOnStatusChange)
// Get proxy for the first time
proxy1, err := inst.GetProxy()
if err != nil {
t.Fatalf("GetProxy failed: %v", err)
}
if proxy1 == nil {
t.Error("Expected proxy to be created")
}
// Get proxy again - should return cached version
proxy2, err := inst.GetProxy()
if err != nil {
t.Fatalf("GetProxy failed: %v", err)
}
if proxy1 != proxy2 {
t.Error("Expected cached proxy to be returned")
}
}
func TestMarshalJSON(t *testing.T) {
globalConfig := &config.AppConfig{
Backends: config.BackendConfig{
@@ -613,11 +555,6 @@ func TestRemoteInstanceOperations(t *testing.T) {
t.Error("Expected error when restarting remote instance")
}
// GetProxy should fail for remote instance
if _, err := inst.GetProxy(); err != nil {
t.Error("Expected no error when getting proxy for remote instance")
}
// GetLogs should fail for remote instance
if _, err := inst.GetLogs(10); err == nil {
t.Error("Expected error when getting logs for remote instance")

View File

@@ -7,13 +7,14 @@ import (
"os"
"strings"
"sync"
"sync/atomic"
"time"
)
type logger struct {
name string
logDir string
logFile *os.File
logFile atomic.Pointer[os.File]
logFilePath string
mu sync.RWMutex
}
@@ -47,11 +48,11 @@ func (i *logger) create() error {
return fmt.Errorf("failed to create stdout log file: %w", err)
}
i.logFile = logFile
i.logFile.Store(logFile)
// Write a startup marker to both files
timestamp := time.Now().Format("2006-01-02 15:04:05")
fmt.Fprintf(i.logFile, "\n=== Instance %s started at %s ===\n", i.name, timestamp)
fmt.Fprintf(logFile, "\n=== Instance %s started at %s ===\n", i.name, timestamp)
return nil
}
@@ -102,11 +103,12 @@ func (i *logger) close() {
i.mu.Lock()
defer i.mu.Unlock()
if i.logFile != nil {
logFile := i.logFile.Swap(nil)
if logFile != nil {
timestamp := time.Now().Format("2006-01-02 15:04:05")
fmt.Fprintf(i.logFile, "=== Instance %s stopped at %s ===\n\n", i.name, timestamp)
i.logFile.Close()
i.logFile = nil
fmt.Fprintf(logFile, "=== Instance %s stopped at %s ===\n\n", i.name, timestamp)
logFile.Sync() // Ensure all buffered data is written to disk
logFile.Close()
}
}
@@ -117,9 +119,9 @@ func (i *logger) readOutput(reader io.ReadCloser) {
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
line := scanner.Text()
if i.logFile != nil {
fmt.Fprintln(i.logFile, line)
i.logFile.Sync() // Ensure data is written to disk
// Use atomic load to avoid lock contention on every line
if logFile := i.logFile.Load(); logFile != nil {
fmt.Fprintln(logFile, line)
}
}
}

View File

@@ -132,14 +132,28 @@ func (p *process) stop() error {
p.restartCancel = nil
}
// Set status to stopped first to signal intentional stop
p.instance.SetStatus(Stopped)
// Set status to ShuttingDown first to reject new requests
p.instance.SetStatus(ShuttingDown)
// Get the monitor done channel before releasing the lock
monitorDone := p.monitorDone
p.mu.Unlock()
// Wait for inflight requests to complete (max 30 seconds)
log.Printf("Instance %s shutting down, waiting for inflight requests to complete...", p.instance.Name)
deadline := time.Now().Add(30 * time.Second)
for time.Now().Before(deadline) {
inflight := p.instance.GetInflightRequests()
if inflight == 0 {
break
}
time.Sleep(100 * time.Millisecond)
}
// Now set status to stopped to signal intentional stop
p.instance.SetStatus(Stopped)
// Stop the process with SIGINT if cmd exists
if p.cmd != nil && p.cmd.Process != nil {
if err := p.cmd.Process.Signal(syscall.SIGINT); err != nil {
@@ -156,6 +170,7 @@ func (p *process) stop() error {
select {
case <-monitorDone:
// Process exited normally
log.Printf("Instance %s shut down gracefully", p.instance.Name)
case <-time.After(30 * time.Second):
// Force kill if it doesn't exit within 30 seconds
if p.cmd != nil && p.cmd.Process != nil {

View File

@@ -37,8 +37,9 @@ type proxy struct {
proxyOnce sync.Once
proxyErr error
lastRequestTime atomic.Int64
timeProvider TimeProvider
lastRequestTime atomic.Int64
inflightRequests atomic.Int32
timeProvider TimeProvider
}
// newProxy creates a new Proxy for the given instance
@@ -153,6 +154,23 @@ func (p *proxy) build() (*httputil.ReverseProxy, error) {
return proxy, nil
}
// serveHTTP handles HTTP requests with inflight tracking
func (p *proxy) serveHTTP(w http.ResponseWriter, r *http.Request) error {
// Get the reverse proxy
reverseProxy, err := p.get()
if err != nil {
return err
}
// Track inflight requests
p.incInflightRequests()
defer p.decInflightRequests()
// Serve the request
reverseProxy.ServeHTTP(w, r)
return nil
}
// clear resets the proxy, allowing it to be recreated when options change.
func (p *proxy) clear() {
p.mu.Lock()
@@ -160,7 +178,7 @@ func (p *proxy) clear() {
p.proxy = nil
p.proxyErr = nil
p.proxyOnce = sync.Once{} // Reset Once for next GetProxy call
p.proxyOnce = sync.Once{}
}
// updateLastRequestTime updates the last request access time for the instance
@@ -199,3 +217,18 @@ func (p *proxy) shouldTimeout() bool {
func (p *proxy) setTimeProvider(tp TimeProvider) {
p.timeProvider = tp
}
// incInflightRequests increments the inflight request counter
func (p *proxy) incInflightRequests() {
p.inflightRequests.Add(1)
}
// decInflightRequests decrements the inflight request counter
func (p *proxy) decInflightRequests() {
p.inflightRequests.Add(-1)
}
// getInflightRequests returns the current number of inflight requests
func (p *proxy) getInflightRequests() int32 {
return p.inflightRequests.Load()
}

View File

@@ -14,20 +14,23 @@ const (
Running
Failed
Restarting
ShuttingDown
)
var nameToStatus = map[string]Status{
"stopped": Stopped,
"running": Running,
"failed": Failed,
"restarting": Restarting,
"stopped": Stopped,
"running": Running,
"failed": Failed,
"restarting": Restarting,
"shutting_down": ShuttingDown,
}
var statusToName = map[Status]string{
Stopped: "stopped",
Running: "running",
Failed: "failed",
Restarting: "restarting",
Stopped: "stopped",
Running: "running",
Failed: "failed",
Restarting: "restarting",
ShuttingDown: "shutting_down",
}
// Status enum JSON marshaling methods

View File

@@ -10,7 +10,7 @@ import (
)
func TestInstanceTimeoutLogic(t *testing.T) {
testManager := createTestManager()
testManager := createTestManager(t)
defer testManager.Shutdown()
idleTimeout := 1 // 1 minute
@@ -42,7 +42,7 @@ func TestInstanceTimeoutLogic(t *testing.T) {
}
func TestInstanceWithoutTimeoutNeverExpires(t *testing.T) {
testManager := createTestManager()
testManager := createTestManager(t)
defer testManager.Shutdown()
noTimeoutInst := createInstanceWithTimeout(t, testManager, "no-timeout-test", "/path/to/model.gguf", nil)
@@ -64,7 +64,7 @@ func TestInstanceWithoutTimeoutNeverExpires(t *testing.T) {
}
func TestEvictLRUInstance_Success(t *testing.T) {
manager := createTestManager()
manager := createTestManager(t)
defer manager.Shutdown()
// Create 3 instances with idle timeout enabled (value doesn't matter for LRU logic)
@@ -121,7 +121,7 @@ func TestEvictLRUInstance_Success(t *testing.T) {
}
func TestEvictLRUInstance_NoRunningInstances(t *testing.T) {
manager := createTestManager()
manager := createTestManager(t)
defer manager.Shutdown()
err := manager.EvictLRUInstance()
@@ -134,7 +134,7 @@ func TestEvictLRUInstance_NoRunningInstances(t *testing.T) {
}
func TestEvictLRUInstance_OnlyEvictsTimeoutEnabledInstances(t *testing.T) {
manager := createTestManager()
manager := createTestManager(t)
defer manager.Shutdown()
// Create mix of instances: some with timeout enabled, some disabled

View File

@@ -54,16 +54,10 @@ func New(globalConfig *config.AppConfig) InstanceManager {
// Initialize port allocator
portRange := globalConfig.Instances.PortRange
ports, err := newPortAllocator(portRange[0], portRange[1])
if err != nil {
log.Fatalf("Failed to create port allocator: %v", err)
}
ports := newPortAllocator(portRange[0], portRange[1])
// Initialize persistence
persistence, err := newInstancePersister(globalConfig.Instances.InstancesDir)
if err != nil {
log.Fatalf("Failed to create instance persister: %v", err)
}
persistence := newInstancePersister(globalConfig.Instances.InstancesDir)
// Initialize remote manager
remote := newRemoteManager(globalConfig.Nodes, 30*time.Second)
@@ -116,7 +110,7 @@ func (im *instanceManager) Shutdown() {
defer wg.Done()
fmt.Printf("Stopping instance %s...\n", inst.Name)
if err := inst.Stop(); err != nil {
fmt.Printf("Error stopping instance %s: %v\n", inst.Name, err)
log.Printf("Error stopping instance %s: %v\n", inst.Name, err)
}
}(inst)
}

View File

@@ -85,7 +85,7 @@ func TestDeleteInstance_RemovesPersistenceFile(t *testing.T) {
}
func TestConcurrentAccess(t *testing.T) {
mgr := createTestManager()
mgr := createTestManager(t)
defer mgr.Shutdown()
// Test concurrent operations
@@ -113,7 +113,7 @@ func TestConcurrentAccess(t *testing.T) {
}
// Concurrent list operations
for i := 0; i < 3; i++ {
for range 3 {
wg.Add(1)
go func() {
defer wg.Done()
@@ -134,16 +134,17 @@ func TestConcurrentAccess(t *testing.T) {
// Helper functions for test configuration
func createTestAppConfig(instancesDir string) *config.AppConfig {
// Use 'sleep' as a test command instead of 'llama-server'
// This allows tests to run in CI environments without requiring actual LLM binaries
// The sleep command will be invoked with model paths and other args, which it ignores
// Use 'sh -c "sleep 999999"' as a test command instead of 'llama-server'
// The shell ignores all additional arguments passed after the command
return &config.AppConfig{
Backends: config.BackendConfig{
LlamaCpp: config.BackendSettings{
Command: "sleep",
Command: "sh",
Args: []string{"-c", "sleep 999999"},
},
MLX: config.BackendSettings{
Command: "sleep",
Command: "sh",
Args: []string{"-c", "sleep 999999"},
},
},
Instances: config.InstancesConfig{
@@ -162,28 +163,8 @@ func createTestAppConfig(instancesDir string) *config.AppConfig {
}
}
func createTestManager() manager.InstanceManager {
appConfig := &config.AppConfig{
Backends: config.BackendConfig{
LlamaCpp: config.BackendSettings{
Command: "sleep",
},
MLX: config.BackendSettings{
Command: "sleep",
},
},
Instances: config.InstancesConfig{
PortRange: [2]int{8000, 9000},
LogsDir: "/tmp/test",
MaxInstances: 10,
MaxRunningInstances: 10,
DefaultAutoRestart: true,
DefaultMaxRestarts: 3,
DefaultRestartDelay: 5,
TimeoutCheckInterval: 5,
},
LocalNode: "main",
Nodes: map[string]config.NodeConfig{},
}
func createTestManager(t *testing.T) manager.InstanceManager {
tempDir := t.TempDir()
appConfig := createTestAppConfig(tempDir)
return manager.New(appConfig)
}

View File

@@ -330,7 +330,8 @@ func (im *instanceManager) DeleteInstance(name string) error {
lock.Lock()
defer im.unlockAndCleanup(name)
if inst.IsRunning() {
status := inst.GetStatus()
if status == instance.Running || status == instance.Restarting {
return fmt.Errorf("instance with name %s is still running, stop it before deleting", name)
}

View File

@@ -10,7 +10,7 @@ import (
)
func TestCreateInstance_FailsWithDuplicateName(t *testing.T) {
mngr := createTestManager()
mngr := createTestManager(t)
options := &instance.Options{
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
@@ -36,6 +36,7 @@ func TestCreateInstance_FailsWithDuplicateName(t *testing.T) {
}
func TestCreateInstance_FailsWhenMaxInstancesReached(t *testing.T) {
tempDir := t.TempDir()
appConfig := &config.AppConfig{
Backends: config.BackendConfig{
LlamaCpp: config.BackendSettings{
@@ -44,6 +45,7 @@ func TestCreateInstance_FailsWhenMaxInstancesReached(t *testing.T) {
},
Instances: config.InstancesConfig{
PortRange: [2]int{8000, 9000},
InstancesDir: tempDir,
MaxInstances: 1, // Very low limit for testing
TimeoutCheckInterval: 5,
},
@@ -77,7 +79,7 @@ func TestCreateInstance_FailsWhenMaxInstancesReached(t *testing.T) {
}
func TestCreateInstance_FailsWithPortConflict(t *testing.T) {
manager := createTestManager()
manager := createTestManager(t)
options1 := &instance.Options{
BackendOptions: backends.Options{
@@ -115,7 +117,7 @@ func TestCreateInstance_FailsWithPortConflict(t *testing.T) {
}
func TestInstanceOperations_FailWithNonExistentInstance(t *testing.T) {
manager := createTestManager()
manager := createTestManager(t)
options := &instance.Options{
BackendOptions: backends.Options{
@@ -143,7 +145,7 @@ func TestInstanceOperations_FailWithNonExistentInstance(t *testing.T) {
}
func TestDeleteInstance_RunningInstanceFails(t *testing.T) {
mgr := createTestManager()
mgr := createTestManager(t)
defer mgr.Shutdown()
options := &instance.Options{
@@ -155,15 +157,13 @@ func TestDeleteInstance_RunningInstanceFails(t *testing.T) {
},
}
_, err := mgr.CreateInstance("test-instance", options)
inst, err := mgr.CreateInstance("test-instance", options)
if err != nil {
t.Fatalf("CreateInstance failed: %v", err)
}
_, err = mgr.StartInstance("test-instance")
if err != nil {
t.Fatalf("StartInstance failed: %v", err)
}
// Simulate starting the instance
inst.SetStatus(instance.Running)
// Should fail to delete running instance
err = mgr.DeleteInstance("test-instance")
@@ -173,7 +173,7 @@ func TestDeleteInstance_RunningInstanceFails(t *testing.T) {
}
func TestUpdateInstance(t *testing.T) {
mgr := createTestManager()
mgr := createTestManager(t)
defer mgr.Shutdown()
options := &instance.Options{
@@ -186,14 +186,14 @@ func TestUpdateInstance(t *testing.T) {
},
}
_, err := mgr.CreateInstance("test-instance", options)
inst, err := mgr.CreateInstance("test-instance", options)
if err != nil {
t.Fatalf("CreateInstance failed: %v", err)
}
_, err = mgr.StartInstance("test-instance")
if err != nil {
t.Fatalf("StartInstance failed: %v", err)
// Start the instance (will use 'yes' command from test config)
if err := inst.Start(); err != nil {
t.Fatalf("Failed to start instance: %v", err)
}
// Update running instance with new model
@@ -212,9 +212,9 @@ func TestUpdateInstance(t *testing.T) {
t.Fatalf("UpdateInstance failed: %v", err)
}
// Should still be running after update
// Should be running after update (was running before, should be restarted)
if !updated.IsRunning() {
t.Error("Instance should be running after update")
t.Errorf("Instance should be running after update, got: %v", updated.GetStatus())
}
if updated.GetOptions().BackendOptions.LlamaServerOptions.Model != "/path/to/new-model.gguf" {
@@ -223,7 +223,7 @@ func TestUpdateInstance(t *testing.T) {
}
func TestUpdateInstance_ReleasesOldPort(t *testing.T) {
mgr := createTestManager()
mgr := createTestManager(t)
defer mgr.Shutdown()
options := &instance.Options{

View File

@@ -15,35 +15,18 @@ import (
type instancePersister struct {
mu sync.Mutex
instancesDir string
enabled bool
}
// newInstancePersister creates a new instance persister.
// If instancesDir is empty, persistence is disabled.
func newInstancePersister(instancesDir string) (*instancePersister, error) {
if instancesDir == "" {
return &instancePersister{
enabled: false,
}, nil
}
// Ensure the instances directory exists
if err := os.MkdirAll(instancesDir, 0755); err != nil {
return nil, fmt.Errorf("failed to create instances directory: %w", err)
}
func newInstancePersister(instancesDir string) *instancePersister {
return &instancePersister{
instancesDir: instancesDir,
enabled: true,
}, nil
}
}
// Save persists an instance to disk with atomic write
func (p *instancePersister) save(inst *instance.Instance) error {
if !p.enabled {
return nil
}
if inst == nil {
return fmt.Errorf("cannot save nil instance")
}
@@ -103,10 +86,6 @@ func (p *instancePersister) save(inst *instance.Instance) error {
// Delete removes an instance's persistence file from disk.
func (p *instancePersister) delete(name string) error {
if !p.enabled {
return nil
}
validatedName, err := p.validateInstanceName(name)
if err != nil {
return err
@@ -131,10 +110,6 @@ func (p *instancePersister) delete(name string) error {
// LoadAll loads all persisted instances from disk.
// Returns a slice of instances and any errors encountered during loading.
func (p *instancePersister) loadAll() ([]*instance.Instance, error) {
if !p.enabled {
return nil, nil
}
p.mu.Lock()
defer p.mu.Unlock()

View File

@@ -24,15 +24,7 @@ type portAllocator struct {
}
// newPortAllocator creates a new port allocator for the given port range.
// Returns an error if the port range is invalid.
func newPortAllocator(minPort, maxPort int) (*portAllocator, error) {
if minPort <= 0 || maxPort <= 0 {
return nil, fmt.Errorf("invalid port range: min=%d, max=%d (must be > 0)", minPort, maxPort)
}
if minPort > maxPort {
return nil, fmt.Errorf("invalid port range: min=%d > max=%d", minPort, maxPort)
}
func newPortAllocator(minPort, maxPort int) *portAllocator {
rangeSize := maxPort - minPort + 1
bitmapSize := (rangeSize + 63) / 64 // Round up to nearest uint64
@@ -42,7 +34,7 @@ func newPortAllocator(minPort, maxPort int) (*portAllocator, error) {
minPort: minPort,
maxPort: maxPort,
rangeSize: rangeSize,
}, nil
}
}
// allocate finds and allocates the first available port for the given instance.

View File

@@ -66,17 +66,16 @@ func (h *Handler) LlamaCppUIProxy() http.HandlerFunc {
return
}
proxy, err := inst.GetProxy()
if err != nil {
writeError(w, http.StatusInternalServerError, "failed to get proxy", err.Error())
return
}
if !inst.IsRemote() {
h.stripLlamaCppPrefix(r, inst.Name)
}
proxy.ServeHTTP(w, r)
// Use instance's ServeHTTP which tracks inflight requests and handles shutting down state
err = inst.ServeHTTP(w, r)
if err != nil {
// Error is already handled in ServeHTTP (response written)
return
}
}
}
@@ -110,6 +109,12 @@ func (h *Handler) LlamaCppProxy() http.HandlerFunc {
return
}
// Check if instance is shutting down before autostart logic
if inst.GetStatus() == instance.ShuttingDown {
writeError(w, http.StatusServiceUnavailable, "instance_shutting_down", "Instance is shutting down")
return
}
if !inst.IsRemote() && !inst.IsRunning() {
err := h.ensureInstanceRunning(inst)
if err != nil {
@@ -118,17 +123,16 @@ func (h *Handler) LlamaCppProxy() http.HandlerFunc {
}
}
proxy, err := inst.GetProxy()
if err != nil {
writeError(w, http.StatusInternalServerError, "failed to get proxy", err.Error())
return
}
if !inst.IsRemote() {
h.stripLlamaCppPrefix(r, inst.Name)
}
proxy.ServeHTTP(w, r)
// Use instance's ServeHTTP which tracks inflight requests and handles shutting down state
err = inst.ServeHTTP(w, r)
if err != nil {
// Error is already handled in ServeHTTP (response written)
return
}
}
}

View File

@@ -332,12 +332,6 @@ func (h *Handler) InstanceProxy() http.HandlerFunc {
return
}
proxy, err := inst.GetProxy()
if err != nil {
writeError(w, http.StatusInternalServerError, "proxy_failed", "Failed to get proxy: "+err.Error())
return
}
if !inst.IsRemote() {
// Strip the "/api/v1/instances/<name>/proxy" prefix from the request URL
prefix := fmt.Sprintf("/api/v1/instances/%s/proxy", inst.Name)
@@ -348,6 +342,11 @@ func (h *Handler) InstanceProxy() http.HandlerFunc {
r.Header.Set("X-Forwarded-Host", r.Header.Get("Host"))
r.Header.Set("X-Forwarded-Proto", "http")
proxy.ServeHTTP(w, r)
// Use instance's ServeHTTP which tracks inflight requests and handles shutting down state
err = inst.ServeHTTP(w, r)
if err != nil {
// Error is already handled in ServeHTTP (response written)
return
}
}
}

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"encoding/json"
"io"
"llamactl/pkg/instance"
"llamactl/pkg/validation"
"net/http"
)
@@ -106,6 +107,12 @@ func (h *Handler) OpenAIProxy() http.HandlerFunc {
return
}
// Check if instance is shutting down before autostart logic
if inst.GetStatus() == instance.ShuttingDown {
writeError(w, http.StatusServiceUnavailable, "instance_shutting_down", "Instance is shutting down")
return
}
if !inst.IsRemote() && !inst.IsRunning() {
err := h.ensureInstanceRunning(inst)
if err != nil {
@@ -114,16 +121,15 @@ func (h *Handler) OpenAIProxy() http.HandlerFunc {
}
}
proxy, err := inst.GetProxy()
if err != nil {
writeError(w, http.StatusInternalServerError, "proxy_failed", err.Error())
return
}
// Recreate the request body from the bytes we read
r.Body = io.NopCloser(bytes.NewReader(bodyBytes))
r.ContentLength = int64(len(bodyBytes))
proxy.ServeHTTP(w, r)
// Use instance's ServeHTTP which tracks inflight requests and handles shutting down state
err = inst.ServeHTTP(w, r)
if err != nil {
// Error is already handled in ServeHTTP (response written)
return
}
}
}

View File

@@ -1,7 +1,7 @@
package server
import (
"fmt"
"log"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
@@ -159,7 +159,7 @@ func SetupRouter(handler *Handler) *chi.Mux {
// Serve WebUI files
if err := webui.SetupWebUI(r); err != nil {
fmt.Printf("Failed to set up WebUI: %v\n", err)
log.Printf("Failed to set up WebUI: %v\n", err)
}
return r

View File

@@ -12,13 +12,13 @@ interface BackendFormFieldProps {
const BackendFormField: React.FC<BackendFormFieldProps> = ({ fieldKey, value, onChange }) => {
// Get configuration for basic fields, or use field name for advanced fields
const config = basicBackendFieldsConfig[fieldKey as string] || { label: fieldKey }
const config = basicBackendFieldsConfig[fieldKey] || { label: fieldKey }
// Get type from Zod schema
const fieldType = getBackendFieldType(fieldKey)
const handleChange = (newValue: string | number | boolean | string[] | undefined) => {
onChange(fieldKey as string, newValue)
onChange(fieldKey, newValue)
}
const renderField = () => {

View File

@@ -21,6 +21,8 @@ const HealthBadge: React.FC<HealthBadgeProps> = ({ health }) => {
return <Loader2 className="h-3 w-3 animate-spin" />;
case "restarting":
return <Loader2 className="h-3 w-3 animate-spin" />;
case "shutting_down":
return <Loader2 className="h-3 w-3 animate-spin" />;
case "stopped":
return <Clock className="h-3 w-3" />;
case "failed":
@@ -36,6 +38,8 @@ const HealthBadge: React.FC<HealthBadgeProps> = ({ health }) => {
return "outline";
case "restarting":
return "outline";
case "shutting_down":
return "outline";
case "stopped":
return "secondary";
case "failed":
@@ -51,6 +55,8 @@ const HealthBadge: React.FC<HealthBadgeProps> = ({ health }) => {
return "Starting";
case "restarting":
return "Restarting";
case "shutting_down":
return "Shutting Down";
case "stopped":
return "Stopped";
case "failed":

View File

@@ -2,12 +2,13 @@
import { Button } from "@/components/ui/button";
import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
import type { Instance } from "@/types/instance";
import { Edit, FileText, Play, Square, Trash2, MoreHorizontal } from "lucide-react";
import { Edit, FileText, Play, Square, Trash2, MoreHorizontal, Download } from "lucide-react";
import LogsDialog from "@/components/LogDialog";
import HealthBadge from "@/components/HealthBadge";
import BackendBadge from "@/components/BackendBadge";
import { useState } from "react";
import { useInstanceHealth } from "@/hooks/useInstanceHealth";
import { instancesApi } from "@/lib/api";
interface InstanceCardProps {
instance: Instance;
@@ -52,6 +53,40 @@ function InstanceCard({
setIsLogsOpen(true);
};
const handleExport = () => {
void (async () => {
try {
// Fetch the most up-to-date instance data from the backend
const instanceData = await instancesApi.get(instance.name);
// Remove docker_enabled as it's a computed field, not persisted to disk
// eslint-disable-next-line @typescript-eslint/no-unused-vars
const { docker_enabled, ...persistedData } = instanceData;
// Convert to JSON string with pretty formatting (matching backend format)
const jsonString = JSON.stringify(persistedData, null, 2);
// Create a blob and download link
const blob = new Blob([jsonString], { type: "application/json" });
const url = URL.createObjectURL(blob);
const link = document.createElement("a");
link.href = url;
link.download = `${instance.name}.json`;
// Trigger download
document.body.appendChild(link);
link.click();
// Cleanup
document.body.removeChild(link);
URL.revokeObjectURL(url);
} catch (error) {
console.error("Failed to export instance:", error);
alert(`Failed to export instance: ${error instanceof Error ? error.message : "Unknown error"}`);
}
})();
};
const running = instance.status === "running";
return (
@@ -131,6 +166,18 @@ function InstanceCard({
Logs
</Button>
<Button
size="sm"
variant="outline"
onClick={handleExport}
title="Export instance"
data-testid="export-instance-button"
className="flex-1"
>
<Download className="h-4 w-4 mr-1" />
Export
</Button>
<Button
size="sm"
variant="destructive"

View File

@@ -1,4 +1,4 @@
import React, { useState, useEffect } from "react";
import React, { useState, useEffect, useRef } from "react";
import { Button } from "@/components/ui/button";
import {
Dialog,
@@ -9,9 +9,11 @@ import {
DialogTitle,
} from "@/components/ui/dialog";
import { BackendType, type CreateInstanceOptions, type Instance } from "@/types/instance";
import type { BackendOptions } from "@/schemas/instanceOptions";
import ParseCommandDialog from "@/components/ParseCommandDialog";
import InstanceSettingsCard from "@/components/instance/InstanceSettingsCard";
import BackendConfigurationCard from "@/components/instance/BackendConfigurationCard";
import { Upload } from "lucide-react";
interface InstanceDialogProps {
open: boolean;
@@ -32,6 +34,7 @@ const InstanceDialog: React.FC<InstanceDialogProps> = ({
const [formData, setFormData] = useState<CreateInstanceOptions>({});
const [nameError, setNameError] = useState("");
const [showParseDialog, setShowParseDialog] = useState(false);
const fileInputRef = useRef<HTMLInputElement>(null);
// Reset form when dialog opens/closes or when instance changes
@@ -54,13 +57,13 @@ const InstanceDialog: React.FC<InstanceDialogProps> = ({
}
}, [open, instance]);
const handleFieldChange = (key: keyof CreateInstanceOptions, value: any) => {
const handleFieldChange = (key: keyof CreateInstanceOptions, value: unknown) => {
setFormData((prev) => {
// If backend_type is changing, clear backend_options
if (key === 'backend_type' && prev.backend_type !== value) {
return {
...prev,
[key]: value,
backend_type: value as CreateInstanceOptions['backend_type'],
backend_options: {}, // Clear backend options when backend type changes
};
}
@@ -68,17 +71,17 @@ const InstanceDialog: React.FC<InstanceDialogProps> = ({
return {
...prev,
[key]: value,
};
} as CreateInstanceOptions;
});
};
const handleBackendFieldChange = (key: string, value: any) => {
const handleBackendFieldChange = (key: string, value: unknown) => {
setFormData((prev) => ({
...prev,
backend_options: {
...prev.backend_options,
[key]: value,
} as any,
} as BackendOptions,
}));
};
@@ -104,11 +107,13 @@ const InstanceDialog: React.FC<InstanceDialogProps> = ({
}
// Clean up undefined values to avoid sending empty fields
const cleanOptions: CreateInstanceOptions = {};
const cleanOptions: CreateInstanceOptions = {} as CreateInstanceOptions;
Object.entries(formData).forEach(([key, value]) => {
const typedKey = key as keyof CreateInstanceOptions;
if (key === 'backend_options' && value && typeof value === 'object' && !Array.isArray(value)) {
// Handle backend_options specially - clean nested object
const cleanBackendOptions: any = {};
const cleanBackendOptions: Record<string, unknown> = {};
Object.entries(value).forEach(([backendKey, backendValue]) => {
if (backendValue !== undefined && backendValue !== null && (typeof backendValue !== 'string' || backendValue.trim() !== "")) {
// Handle arrays - don't include empty arrays
@@ -121,7 +126,7 @@ const InstanceDialog: React.FC<InstanceDialogProps> = ({
// Only include backend_options if it has content
if (Object.keys(cleanBackendOptions).length > 0) {
(cleanOptions as any)[key] = cleanBackendOptions;
(cleanOptions as Record<string, unknown>)[typedKey] = cleanBackendOptions as BackendOptions;
}
} else if (value !== undefined && value !== null) {
// Skip empty strings
@@ -132,7 +137,7 @@ const InstanceDialog: React.FC<InstanceDialogProps> = ({
if (Array.isArray(value) && value.length === 0) {
return;
}
(cleanOptions as any)[key] = value;
(cleanOptions as Record<string, unknown>)[typedKey] = value;
}
});
@@ -153,6 +158,49 @@ const InstanceDialog: React.FC<InstanceDialogProps> = ({
setShowParseDialog(false);
};
const handleImportFile = () => {
fileInputRef.current?.click();
};
const handleFileChange = (event: React.ChangeEvent<HTMLInputElement>) => {
const file = event.target.files?.[0];
if (!file) return;
const reader = new FileReader();
reader.onload = (e) => {
try {
const content = e.target?.result as string;
const importedData = JSON.parse(content) as { name?: string; options?: CreateInstanceOptions };
// Validate that it's an instance export
if (!importedData.name || !importedData.options) {
alert('Invalid instance file: Missing required fields (name, options)');
return;
}
// Set the instance name (only for new instances, not editing)
if (!isEditing && typeof importedData.name === 'string') {
handleNameChange(importedData.name);
}
// Populate all the options from the imported file
if (importedData.options) {
setFormData(prev => ({
...prev,
...importedData.options,
}));
}
// Reset the file input
event.target.value = '';
} catch (error) {
console.error('Failed to parse instance file:', error);
alert(`Failed to parse instance file: ${error instanceof Error ? error.message : 'Invalid JSON'}`);
}
};
reader.readAsText(file);
};
// Save button label logic
let saveButtonLabel = "Create Instance";
@@ -168,14 +216,38 @@ const InstanceDialog: React.FC<InstanceDialogProps> = ({
<Dialog open={open} onOpenChange={onOpenChange}>
<DialogContent className="sm:max-w-[600px] max-h-[80vh] overflow-hidden flex flex-col">
<DialogHeader>
<DialogTitle>
{isEditing ? "Edit Instance" : "Create New Instance"}
</DialogTitle>
<DialogDescription>
{isEditing
? "Modify the instance configuration below."
: "Configure your new llama-server instance below."}
</DialogDescription>
<div className="flex items-center justify-between">
<div className="flex-1">
<DialogTitle>
{isEditing ? "Edit Instance" : "Create New Instance"}
</DialogTitle>
<DialogDescription>
{isEditing
? "Modify the instance configuration below."
: "Configure your new llama-server instance below."}
</DialogDescription>
</div>
{!isEditing && (
<Button
type="button"
variant="ghost"
size="sm"
onClick={handleImportFile}
title="Import instance configuration from JSON file"
className="ml-2"
>
<Upload className="h-4 w-4 mr-2" />
Import
</Button>
)}
</div>
<input
ref={fileInputRef}
type="file"
accept=".json"
onChange={handleFileChange}
className="hidden"
/>
</DialogHeader>
<div className="flex-1 overflow-y-auto">

View File

@@ -56,9 +56,9 @@ function InstanceList({ editInstance }: InstanceListProps) {
<MemoizedInstanceCard
key={instance.name}
instance={instance}
startInstance={startInstance}
stopInstance={stopInstance}
deleteInstance={deleteInstance}
startInstance={() => { void startInstance(instance.name) }}
stopInstance={() => { void stopInstance(instance.name) }}
deleteInstance={() => { void deleteInstance(instance.name) }}
editInstance={editInstance}
/>
))}

View File

@@ -54,7 +54,7 @@ const ParseCommandDialog: React.FC<ParseCommandDialogProps> = ({
options = await backendsApi.vllm.parseCommand(command);
break;
default:
throw new Error(`Unsupported backend type: ${backendType}`);
throw new Error(`Unsupported backend type: ${String(backendType)}`);
}
onParsed(options);

View File

@@ -18,7 +18,7 @@ export function useInstanceHealth(instanceName: string, instanceStatus: Instance
// Trigger health check when instance status changes to active states
useEffect(() => {
if (instanceStatus === 'running' || instanceStatus === 'restarting') {
if (instanceStatus === 'running' || instanceStatus === 'restarting' || instanceStatus === 'shutting_down') {
healthService.refreshHealth(instanceName).catch(error => {
console.error(`Failed to refresh health for ${instanceName}:`, error)
})

View File

@@ -5,11 +5,12 @@ type HealthCallback = (health: HealthStatus) => void
// Polling intervals based on health state (in milliseconds)
const POLLING_INTERVALS: Record<HealthState, number> = {
'starting': 5000, // 5 seconds - frequent during startup
'restarting': 5000, // 5 seconds - restart in progress
'ready': 60000, // 60 seconds - stable state
'stopped': 0, // No polling
'failed': 0, // No polling
'starting': 5000, // 5 seconds - frequent during startup
'restarting': 5000, // 5 seconds - restart in progress
'shutting_down': 3000, // 3 seconds - monitor shutdown progress
'ready': 60000, // 60 seconds - stable state
'stopped': 0, // No polling
'failed': 0, // No polling
}
class HealthService {
@@ -96,6 +97,7 @@ class HealthService {
case 'running': return 'starting' // Should not happen as we check HTTP for running
case 'failed': return 'failed'
case 'restarting': return 'restarting'
case 'shutting_down': return 'shutting_down'
}
}

View File

@@ -11,9 +11,9 @@ export const BackendType = {
export type BackendTypeValue = typeof BackendType[keyof typeof BackendType]
export type InstanceStatus = 'running' | 'stopped' | 'failed' | 'restarting'
export type InstanceStatus = 'running' | 'stopped' | 'failed' | 'restarting' | 'shutting_down'
export type HealthState = 'stopped' | 'starting' | 'ready' | 'failed' | 'restarting'
export type HealthState = 'stopped' | 'starting' | 'ready' | 'failed' | 'restarting' | 'shutting_down'
export interface HealthStatus {
state: HealthState