mirror of
https://github.com/lordmathis/llamactl.git
synced 2025-11-05 16:44:22 +00:00
818 lines
22 KiB
Go
818 lines
22 KiB
Go
package instance_test
|
|
|
|
import (
|
|
"encoding/json"
|
|
"llamactl/pkg/backends"
|
|
"llamactl/pkg/config"
|
|
"llamactl/pkg/instance"
|
|
"llamactl/pkg/testutil"
|
|
"testing"
|
|
"time"
|
|
)
|
|
|
|
func TestNewInstance(t *testing.T) {
|
|
backendConfig := &config.BackendConfig{
|
|
LlamaCpp: config.BackendSettings{
|
|
Command: "llama-server",
|
|
Args: []string{},
|
|
},
|
|
MLX: config.BackendSettings{
|
|
Command: "mlx_lm.server",
|
|
Args: []string{},
|
|
},
|
|
VLLM: config.BackendSettings{
|
|
Command: "vllm",
|
|
Args: []string{"serve"},
|
|
},
|
|
}
|
|
|
|
globalSettings := &config.InstancesConfig{
|
|
LogsDir: "/tmp/test",
|
|
DefaultAutoRestart: true,
|
|
DefaultMaxRestarts: 3,
|
|
DefaultRestartDelay: 5,
|
|
}
|
|
|
|
options := &instance.Options{
|
|
BackendType: backends.BackendTypeLlamaCpp,
|
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
|
Model: "/path/to/model.gguf",
|
|
Port: 8080,
|
|
},
|
|
}
|
|
|
|
// Mock onStatusChange function
|
|
mockOnStatusChange := func(oldStatus, newStatus instance.Status) {}
|
|
|
|
inst := instance.New("test-instance", backendConfig, globalSettings, options, "main", mockOnStatusChange)
|
|
|
|
if inst.Name != "test-instance" {
|
|
t.Errorf("Expected name 'test-instance', got %q", inst.Name)
|
|
}
|
|
if inst.IsRunning() {
|
|
t.Error("New instance should not be running")
|
|
}
|
|
|
|
// Check that options were properly set with defaults applied
|
|
opts := inst.GetOptions()
|
|
if opts.LlamaServerOptions.Model != "/path/to/model.gguf" {
|
|
t.Errorf("Expected model '/path/to/model.gguf', got %q", opts.LlamaServerOptions.Model)
|
|
}
|
|
if inst.GetPort() != 8080 {
|
|
t.Errorf("Expected port 8080, got %d", inst.GetPort())
|
|
}
|
|
|
|
// Check that defaults were applied
|
|
if opts.AutoRestart == nil || !*opts.AutoRestart {
|
|
t.Error("Expected AutoRestart to be true (default)")
|
|
}
|
|
if opts.MaxRestarts == nil || *opts.MaxRestarts != 3 {
|
|
t.Errorf("Expected MaxRestarts to be 3 (default), got %v", opts.MaxRestarts)
|
|
}
|
|
if opts.RestartDelay == nil || *opts.RestartDelay != 5 {
|
|
t.Errorf("Expected RestartDelay to be 5 (default), got %v", opts.RestartDelay)
|
|
}
|
|
}
|
|
|
|
func TestNewInstance_WithRestartOptions(t *testing.T) {
|
|
backendConfig := &config.BackendConfig{
|
|
LlamaCpp: config.BackendSettings{
|
|
Command: "llama-server",
|
|
Args: []string{},
|
|
},
|
|
MLX: config.BackendSettings{
|
|
Command: "mlx_lm.server",
|
|
Args: []string{},
|
|
},
|
|
VLLM: config.BackendSettings{
|
|
Command: "vllm",
|
|
Args: []string{"serve"},
|
|
},
|
|
}
|
|
|
|
globalSettings := &config.InstancesConfig{
|
|
LogsDir: "/tmp/test",
|
|
DefaultAutoRestart: true,
|
|
DefaultMaxRestarts: 3,
|
|
DefaultRestartDelay: 5,
|
|
}
|
|
|
|
// Override some defaults
|
|
autoRestart := false
|
|
maxRestarts := 10
|
|
restartDelay := 15
|
|
|
|
options := &instance.Options{
|
|
AutoRestart: &autoRestart,
|
|
MaxRestarts: &maxRestarts,
|
|
RestartDelay: &restartDelay,
|
|
BackendType: backends.BackendTypeLlamaCpp,
|
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
|
Model: "/path/to/model.gguf",
|
|
},
|
|
}
|
|
|
|
// Mock onStatusChange function
|
|
mockOnStatusChange := func(oldStatus, newStatus instance.Status) {}
|
|
|
|
instance := instance.New("test-instance", backendConfig, globalSettings, options, "main", mockOnStatusChange)
|
|
opts := instance.GetOptions()
|
|
|
|
// Check that explicit values override defaults
|
|
if opts.AutoRestart == nil || *opts.AutoRestart {
|
|
t.Error("Expected AutoRestart to be false (overridden)")
|
|
}
|
|
if opts.MaxRestarts == nil || *opts.MaxRestarts != 10 {
|
|
t.Errorf("Expected MaxRestarts to be 10 (overridden), got %v", opts.MaxRestarts)
|
|
}
|
|
if opts.RestartDelay == nil || *opts.RestartDelay != 15 {
|
|
t.Errorf("Expected RestartDelay to be 15 (overridden), got %v", opts.RestartDelay)
|
|
}
|
|
}
|
|
|
|
func TestSetOptions(t *testing.T) {
|
|
backendConfig := &config.BackendConfig{
|
|
LlamaCpp: config.BackendSettings{
|
|
Command: "llama-server",
|
|
Args: []string{},
|
|
},
|
|
MLX: config.BackendSettings{
|
|
Command: "mlx_lm.server",
|
|
Args: []string{},
|
|
},
|
|
VLLM: config.BackendSettings{
|
|
Command: "vllm",
|
|
Args: []string{"serve"},
|
|
},
|
|
}
|
|
|
|
globalSettings := &config.InstancesConfig{
|
|
LogsDir: "/tmp/test",
|
|
DefaultAutoRestart: true,
|
|
DefaultMaxRestarts: 3,
|
|
DefaultRestartDelay: 5,
|
|
}
|
|
|
|
initialOptions := &instance.Options{
|
|
BackendType: backends.BackendTypeLlamaCpp,
|
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
|
Model: "/path/to/model.gguf",
|
|
Port: 8080,
|
|
},
|
|
}
|
|
|
|
// Mock onStatusChange function
|
|
mockOnStatusChange := func(oldStatus, newStatus instance.Status) {}
|
|
|
|
inst := instance.New("test-instance", backendConfig, globalSettings, initialOptions, "main", mockOnStatusChange)
|
|
|
|
// Update options
|
|
newOptions := &instance.Options{
|
|
BackendType: backends.BackendTypeLlamaCpp,
|
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
|
Model: "/path/to/new-model.gguf",
|
|
Port: 8081,
|
|
},
|
|
}
|
|
|
|
inst.SetOptions(newOptions)
|
|
opts := inst.GetOptions()
|
|
|
|
if opts.LlamaServerOptions.Model != "/path/to/new-model.gguf" {
|
|
t.Errorf("Expected updated model '/path/to/new-model.gguf', got %q", opts.LlamaServerOptions.Model)
|
|
}
|
|
if inst.GetPort() != 8081 {
|
|
t.Errorf("Expected updated port 8081, got %d", inst.GetPort())
|
|
}
|
|
|
|
// Check that defaults are still applied
|
|
if opts.AutoRestart == nil || !*opts.AutoRestart {
|
|
t.Error("Expected AutoRestart to be true (default)")
|
|
}
|
|
}
|
|
|
|
func TestSetOptions_PreservesNodes(t *testing.T) {
|
|
backendConfig := &config.BackendConfig{
|
|
LlamaCpp: config.BackendSettings{
|
|
Command: "llama-server",
|
|
Args: []string{},
|
|
},
|
|
}
|
|
|
|
globalSettings := &config.InstancesConfig{
|
|
LogsDir: "/tmp/test",
|
|
DefaultAutoRestart: true,
|
|
DefaultMaxRestarts: 3,
|
|
DefaultRestartDelay: 5,
|
|
}
|
|
|
|
// Create instance with initial nodes
|
|
initialOptions := &instance.Options{
|
|
BackendType: backends.BackendTypeLlamaCpp,
|
|
Nodes: map[string]struct{}{"worker1": {}},
|
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
|
Model: "/path/to/model.gguf",
|
|
Port: 8080,
|
|
},
|
|
}
|
|
|
|
mockOnStatusChange := func(oldStatus, newStatus instance.Status) {}
|
|
inst := instance.New("test-instance", backendConfig, globalSettings, initialOptions, "main", mockOnStatusChange)
|
|
|
|
// Try to update with different nodes
|
|
updatedOptions := &instance.Options{
|
|
BackendType: backends.BackendTypeLlamaCpp,
|
|
Nodes: map[string]struct{}{"worker2": {}}, // Attempt to change node
|
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
|
Model: "/path/to/new-model.gguf",
|
|
Port: 8081,
|
|
},
|
|
}
|
|
|
|
inst.SetOptions(updatedOptions)
|
|
opts := inst.GetOptions()
|
|
|
|
// Nodes should remain unchanged
|
|
if _, exists := opts.Nodes["worker1"]; len(opts.Nodes) != 1 || !exists {
|
|
t.Errorf("Expected nodes to contain 'worker1', got %v", opts.Nodes)
|
|
}
|
|
|
|
// Other options should be updated
|
|
if opts.LlamaServerOptions.Model != "/path/to/new-model.gguf" {
|
|
t.Errorf("Expected updated model '/path/to/new-model.gguf', got %q", opts.LlamaServerOptions.Model)
|
|
}
|
|
}
|
|
|
|
func TestGetProxy(t *testing.T) {
|
|
backendConfig := &config.BackendConfig{
|
|
LlamaCpp: config.BackendSettings{
|
|
Command: "llama-server",
|
|
Args: []string{},
|
|
},
|
|
MLX: config.BackendSettings{
|
|
Command: "mlx_lm.server",
|
|
Args: []string{},
|
|
},
|
|
VLLM: config.BackendSettings{
|
|
Command: "vllm",
|
|
Args: []string{"serve"},
|
|
},
|
|
}
|
|
|
|
globalSettings := &config.InstancesConfig{
|
|
LogsDir: "/tmp/test",
|
|
}
|
|
|
|
options := &instance.Options{
|
|
BackendType: backends.BackendTypeLlamaCpp,
|
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
|
Host: "localhost",
|
|
Port: 8080,
|
|
},
|
|
}
|
|
|
|
// Mock onStatusChange function
|
|
mockOnStatusChange := func(oldStatus, newStatus instance.Status) {}
|
|
|
|
inst := instance.New("test-instance", backendConfig, globalSettings, options, "main", mockOnStatusChange)
|
|
|
|
// Get proxy for the first time
|
|
proxy1, err := inst.GetProxy()
|
|
if err != nil {
|
|
t.Fatalf("GetProxy failed: %v", err)
|
|
}
|
|
if proxy1 == nil {
|
|
t.Error("Expected proxy to be created")
|
|
}
|
|
|
|
// Get proxy again - should return cached version
|
|
proxy2, err := inst.GetProxy()
|
|
if err != nil {
|
|
t.Fatalf("GetProxy failed: %v", err)
|
|
}
|
|
if proxy1 != proxy2 {
|
|
t.Error("Expected cached proxy to be returned")
|
|
}
|
|
}
|
|
|
|
func TestMarshalJSON(t *testing.T) {
|
|
backendConfig := &config.BackendConfig{
|
|
LlamaCpp: config.BackendSettings{
|
|
Command: "llama-server",
|
|
Args: []string{},
|
|
},
|
|
MLX: config.BackendSettings{
|
|
Command: "mlx_lm.server",
|
|
Args: []string{},
|
|
},
|
|
VLLM: config.BackendSettings{
|
|
Command: "vllm",
|
|
Args: []string{"serve"},
|
|
},
|
|
}
|
|
|
|
globalSettings := &config.InstancesConfig{
|
|
LogsDir: "/tmp/test",
|
|
DefaultAutoRestart: true,
|
|
DefaultMaxRestarts: 3,
|
|
DefaultRestartDelay: 5,
|
|
}
|
|
|
|
options := &instance.Options{
|
|
BackendType: backends.BackendTypeLlamaCpp,
|
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
|
Model: "/path/to/model.gguf",
|
|
Port: 8080,
|
|
},
|
|
}
|
|
|
|
// Mock onStatusChange function
|
|
mockOnStatusChange := func(oldStatus, newStatus instance.Status) {}
|
|
|
|
instance := instance.New("test-instance", backendConfig, globalSettings, options, "main", mockOnStatusChange)
|
|
|
|
data, err := json.Marshal(instance)
|
|
if err != nil {
|
|
t.Fatalf("JSON marshal failed: %v", err)
|
|
}
|
|
|
|
// Check that JSON contains expected fields
|
|
var result map[string]any
|
|
err = json.Unmarshal(data, &result)
|
|
if err != nil {
|
|
t.Fatalf("JSON unmarshal failed: %v", err)
|
|
}
|
|
|
|
if result["name"] != "test-instance" {
|
|
t.Errorf("Expected name 'test-instance', got %v", result["name"])
|
|
}
|
|
if result["status"] != "stopped" {
|
|
t.Errorf("Expected status 'stopped', got %v", result["status"])
|
|
}
|
|
|
|
// Check that options are included
|
|
options_data, ok := result["options"]
|
|
if !ok {
|
|
t.Error("Expected options to be included in JSON")
|
|
}
|
|
options_map, ok := options_data.(map[string]interface{})
|
|
if !ok {
|
|
t.Error("Expected options to be a map")
|
|
}
|
|
|
|
// Check backend type
|
|
if options_map["backend_type"] != string(backends.BackendTypeLlamaCpp) {
|
|
t.Errorf("Expected backend_type '%s', got %v", backends.BackendTypeLlamaCpp, options_map["backend_type"])
|
|
}
|
|
|
|
// Check backend options
|
|
backend_options_data, ok := options_map["backend_options"]
|
|
if !ok {
|
|
t.Error("Expected backend_options to be included in JSON")
|
|
}
|
|
backend_options_map, ok := backend_options_data.(map[string]any)
|
|
if !ok {
|
|
t.Error("Expected backend_options to be a map")
|
|
}
|
|
if backend_options_map["model"] != "/path/to/model.gguf" {
|
|
t.Errorf("Expected model '/path/to/model.gguf', got %v", backend_options_map["model"])
|
|
}
|
|
if backend_options_map["port"] != float64(8080) {
|
|
t.Errorf("Expected port 8080, got %v", backend_options_map["port"])
|
|
}
|
|
}
|
|
|
|
func TestUnmarshalJSON(t *testing.T) {
|
|
jsonData := `{
|
|
"name": "test-instance",
|
|
"status": "running",
|
|
"options": {
|
|
"auto_restart": false,
|
|
"max_restarts": 5,
|
|
"backend_type": "llama_cpp",
|
|
"backend_options": {
|
|
"model": "/path/to/model.gguf",
|
|
"port": 8080
|
|
}
|
|
}
|
|
}`
|
|
|
|
var inst instance.Instance
|
|
err := json.Unmarshal([]byte(jsonData), &inst)
|
|
if err != nil {
|
|
t.Fatalf("JSON unmarshal failed: %v", err)
|
|
}
|
|
|
|
if inst.Name != "test-instance" {
|
|
t.Errorf("Expected name 'test-instance', got %q", inst.Name)
|
|
}
|
|
if !inst.IsRunning() {
|
|
t.Error("Expected status to be running")
|
|
}
|
|
|
|
opts := inst.GetOptions()
|
|
if opts == nil {
|
|
t.Fatal("Expected options to be set")
|
|
}
|
|
if opts.BackendType != backends.BackendTypeLlamaCpp {
|
|
t.Errorf("Expected backend_type '%s', got %s", backends.BackendTypeLlamaCpp, opts.BackendType)
|
|
}
|
|
if opts.LlamaServerOptions == nil {
|
|
t.Fatal("Expected LlamaServerOptions to be set")
|
|
}
|
|
if opts.LlamaServerOptions.Model != "/path/to/model.gguf" {
|
|
t.Errorf("Expected model '/path/to/model.gguf', got %q", opts.LlamaServerOptions.Model)
|
|
}
|
|
if inst.GetPort() != 8080 {
|
|
t.Errorf("Expected port 8080, got %d", inst.GetPort())
|
|
}
|
|
if opts.AutoRestart == nil || *opts.AutoRestart {
|
|
t.Error("Expected AutoRestart to be false")
|
|
}
|
|
if opts.MaxRestarts == nil || *opts.MaxRestarts != 5 {
|
|
t.Errorf("Expected MaxRestarts to be 5, got %v", opts.MaxRestarts)
|
|
}
|
|
}
|
|
|
|
func TestCreateOptionsValidation(t *testing.T) {
|
|
tests := []struct {
|
|
name string
|
|
maxRestarts *int
|
|
restartDelay *int
|
|
expectedMax int
|
|
expectedDelay int
|
|
}{
|
|
{
|
|
name: "valid positive values",
|
|
maxRestarts: testutil.IntPtr(10),
|
|
restartDelay: testutil.IntPtr(30),
|
|
expectedMax: 10,
|
|
expectedDelay: 30,
|
|
},
|
|
{
|
|
name: "zero values",
|
|
maxRestarts: testutil.IntPtr(0),
|
|
restartDelay: testutil.IntPtr(0),
|
|
expectedMax: 0,
|
|
expectedDelay: 0,
|
|
},
|
|
{
|
|
name: "negative values should be corrected",
|
|
maxRestarts: testutil.IntPtr(-5),
|
|
restartDelay: testutil.IntPtr(-10),
|
|
expectedMax: 0,
|
|
expectedDelay: 0,
|
|
},
|
|
}
|
|
|
|
backendConfig := &config.BackendConfig{
|
|
LlamaCpp: config.BackendSettings{
|
|
Command: "llama-server",
|
|
Args: []string{},
|
|
},
|
|
MLX: config.BackendSettings{
|
|
Command: "mlx_lm.server",
|
|
Args: []string{},
|
|
},
|
|
VLLM: config.BackendSettings{
|
|
Command: "vllm",
|
|
Args: []string{"serve"},
|
|
},
|
|
}
|
|
|
|
globalSettings := &config.InstancesConfig{
|
|
LogsDir: "/tmp/test",
|
|
}
|
|
|
|
for _, tt := range tests {
|
|
t.Run(tt.name, func(t *testing.T) {
|
|
options := &instance.Options{
|
|
MaxRestarts: tt.maxRestarts,
|
|
RestartDelay: tt.restartDelay,
|
|
BackendType: backends.BackendTypeLlamaCpp,
|
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
|
Model: "/path/to/model.gguf",
|
|
},
|
|
}
|
|
|
|
// Mock onStatusChange function
|
|
mockOnStatusChange := func(oldStatus, newStatus instance.Status) {}
|
|
|
|
instance := instance.New("test", backendConfig, globalSettings, options, "main", mockOnStatusChange)
|
|
opts := instance.GetOptions()
|
|
|
|
if opts.MaxRestarts == nil {
|
|
t.Error("Expected MaxRestarts to be set")
|
|
} else if *opts.MaxRestarts != tt.expectedMax {
|
|
t.Errorf("Expected MaxRestarts %d, got %d", tt.expectedMax, *opts.MaxRestarts)
|
|
}
|
|
|
|
if opts.RestartDelay == nil {
|
|
t.Error("Expected RestartDelay to be set")
|
|
} else if *opts.RestartDelay != tt.expectedDelay {
|
|
t.Errorf("Expected RestartDelay %d, got %d", tt.expectedDelay, *opts.RestartDelay)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
func TestStatusChangeCallback(t *testing.T) {
|
|
backendConfig := &config.BackendConfig{
|
|
LlamaCpp: config.BackendSettings{Command: "llama-server"},
|
|
}
|
|
globalSettings := &config.InstancesConfig{LogsDir: "/tmp/test"}
|
|
options := &instance.Options{
|
|
BackendType: backends.BackendTypeLlamaCpp,
|
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
|
Model: "/path/to/model.gguf",
|
|
},
|
|
}
|
|
|
|
var callbackOldStatus, callbackNewStatus instance.Status
|
|
callbackCalled := false
|
|
|
|
onStatusChange := func(oldStatus, newStatus instance.Status) {
|
|
callbackOldStatus = oldStatus
|
|
callbackNewStatus = newStatus
|
|
callbackCalled = true
|
|
}
|
|
|
|
inst := instance.New("test", backendConfig, globalSettings, options, "main", onStatusChange)
|
|
|
|
inst.SetStatus(instance.Running)
|
|
|
|
if !callbackCalled {
|
|
t.Error("Expected status change callback to be called")
|
|
}
|
|
if callbackOldStatus != instance.Stopped {
|
|
t.Errorf("Expected old status Stopped, got %v", callbackOldStatus)
|
|
}
|
|
if callbackNewStatus != instance.Running {
|
|
t.Errorf("Expected new status Running, got %v", callbackNewStatus)
|
|
}
|
|
}
|
|
|
|
func TestSetOptions_NodesPreserved(t *testing.T) {
|
|
backendConfig := &config.BackendConfig{
|
|
LlamaCpp: config.BackendSettings{Command: "llama-server"},
|
|
}
|
|
globalSettings := &config.InstancesConfig{LogsDir: "/tmp/test"}
|
|
|
|
tests := []struct {
|
|
name string
|
|
initialNodes map[string]struct{}
|
|
updateNodes map[string]struct{}
|
|
expectedNodes map[string]struct{}
|
|
}{
|
|
{
|
|
name: "nil nodes preserved as nil",
|
|
initialNodes: nil,
|
|
updateNodes: map[string]struct{}{"worker1": {}},
|
|
expectedNodes: nil,
|
|
},
|
|
{
|
|
name: "empty nodes preserved as empty",
|
|
initialNodes: map[string]struct{}{},
|
|
updateNodes: map[string]struct{}{"worker1": {}},
|
|
expectedNodes: map[string]struct{}{},
|
|
},
|
|
{
|
|
name: "existing nodes preserved",
|
|
initialNodes: map[string]struct{}{"worker1": {}, "worker2": {}},
|
|
updateNodes: map[string]struct{}{"worker3": {}},
|
|
expectedNodes: map[string]struct{}{"worker1": {}, "worker2": {}},
|
|
},
|
|
}
|
|
|
|
for _, tt := range tests {
|
|
t.Run(tt.name, func(t *testing.T) {
|
|
options := &instance.Options{
|
|
BackendType: backends.BackendTypeLlamaCpp,
|
|
Nodes: tt.initialNodes,
|
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
|
Model: "/path/to/model.gguf",
|
|
},
|
|
}
|
|
|
|
inst := instance.New("test", backendConfig, globalSettings, options, "main", nil)
|
|
|
|
// Attempt to update nodes (should be ignored)
|
|
updateOptions := &instance.Options{
|
|
BackendType: backends.BackendTypeLlamaCpp,
|
|
Nodes: tt.updateNodes,
|
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
|
Model: "/path/to/new-model.gguf",
|
|
},
|
|
}
|
|
inst.SetOptions(updateOptions)
|
|
|
|
opts := inst.GetOptions()
|
|
|
|
// Verify nodes are preserved
|
|
if len(opts.Nodes) != len(tt.expectedNodes) {
|
|
t.Errorf("Expected %d nodes, got %d", len(tt.expectedNodes), len(opts.Nodes))
|
|
}
|
|
for node := range tt.expectedNodes {
|
|
if _, exists := opts.Nodes[node]; !exists {
|
|
t.Errorf("Expected node %s to exist", node)
|
|
}
|
|
}
|
|
|
|
// Verify other options were updated
|
|
if opts.LlamaServerOptions.Model != "/path/to/new-model.gguf" {
|
|
t.Errorf("Expected model to be updated to '/path/to/new-model.gguf', got %q", opts.LlamaServerOptions.Model)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
func TestProcessErrorCases(t *testing.T) {
|
|
backendConfig := &config.BackendConfig{
|
|
LlamaCpp: config.BackendSettings{Command: "llama-server"},
|
|
}
|
|
globalSettings := &config.InstancesConfig{LogsDir: "/tmp/test"}
|
|
options := &instance.Options{
|
|
BackendType: backends.BackendTypeLlamaCpp,
|
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
|
Model: "/path/to/model.gguf",
|
|
},
|
|
}
|
|
|
|
inst := instance.New("test", backendConfig, globalSettings, options, "main", nil)
|
|
|
|
// Stop when not running should return error
|
|
err := inst.Stop()
|
|
if err == nil {
|
|
t.Error("Expected error when stopping non-running instance")
|
|
}
|
|
|
|
// Simulate running state
|
|
inst.SetStatus(instance.Running)
|
|
|
|
// Start when already running should return error
|
|
err = inst.Start()
|
|
if err == nil {
|
|
t.Error("Expected error when starting already running instance")
|
|
}
|
|
}
|
|
|
|
func TestRemoteInstanceOperations(t *testing.T) {
|
|
backendConfig := &config.BackendConfig{
|
|
LlamaCpp: config.BackendSettings{Command: "llama-server"},
|
|
}
|
|
globalSettings := &config.InstancesConfig{LogsDir: "/tmp/test"}
|
|
options := &instance.Options{
|
|
BackendType: backends.BackendTypeLlamaCpp,
|
|
Nodes: map[string]struct{}{"remote-node": {}}, // Remote instance
|
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
|
Model: "/path/to/model.gguf",
|
|
},
|
|
}
|
|
|
|
inst := instance.New("remote-test", backendConfig, globalSettings, options, "main", nil)
|
|
|
|
if !inst.IsRemote() {
|
|
t.Error("Expected instance to be remote")
|
|
}
|
|
|
|
// Start should fail for remote instance
|
|
if err := inst.Start(); err == nil {
|
|
t.Error("Expected error when starting remote instance")
|
|
}
|
|
|
|
// Stop should fail for remote instance
|
|
if err := inst.Stop(); err == nil {
|
|
t.Error("Expected error when stopping remote instance")
|
|
}
|
|
|
|
// Restart should fail for remote instance
|
|
if err := inst.Restart(); err == nil {
|
|
t.Error("Expected error when restarting remote instance")
|
|
}
|
|
|
|
// GetProxy should fail for remote instance
|
|
if _, err := inst.GetProxy(); err == nil {
|
|
t.Error("Expected error when getting proxy for remote instance")
|
|
}
|
|
|
|
// GetLogs should fail for remote instance
|
|
if _, err := inst.GetLogs(10); err == nil {
|
|
t.Error("Expected error when getting logs for remote instance")
|
|
}
|
|
}
|
|
|
|
func TestProxyClearOnOptionsChange(t *testing.T) {
|
|
backendConfig := &config.BackendConfig{
|
|
LlamaCpp: config.BackendSettings{Command: "llama-server"},
|
|
}
|
|
globalSettings := &config.InstancesConfig{LogsDir: "/tmp/test"}
|
|
options := &instance.Options{
|
|
BackendType: backends.BackendTypeLlamaCpp,
|
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
|
Host: "localhost",
|
|
Port: 8080,
|
|
},
|
|
}
|
|
|
|
inst := instance.New("test", backendConfig, globalSettings, options, "main", nil)
|
|
|
|
// Get initial proxy
|
|
proxy1, err := inst.GetProxy()
|
|
if err != nil {
|
|
t.Fatalf("Failed to get initial proxy: %v", err)
|
|
}
|
|
|
|
// Update options (should clear proxy)
|
|
newOptions := &instance.Options{
|
|
BackendType: backends.BackendTypeLlamaCpp,
|
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
|
Host: "localhost",
|
|
Port: 8081, // Different port
|
|
},
|
|
}
|
|
inst.SetOptions(newOptions)
|
|
|
|
// Get proxy again - should be recreated with new port
|
|
proxy2, err := inst.GetProxy()
|
|
if err != nil {
|
|
t.Fatalf("Failed to get proxy after options change: %v", err)
|
|
}
|
|
|
|
// Proxies should be different instances (recreated)
|
|
if proxy1 == proxy2 {
|
|
t.Error("Expected proxy to be recreated after options change")
|
|
}
|
|
}
|
|
|
|
func TestIdleTimeout(t *testing.T) {
|
|
backendConfig := &config.BackendConfig{
|
|
LlamaCpp: config.BackendSettings{Command: "llama-server"},
|
|
}
|
|
globalSettings := &config.InstancesConfig{LogsDir: "/tmp/test"}
|
|
|
|
t.Run("not running never times out", func(t *testing.T) {
|
|
timeout := 1
|
|
inst := instance.New("test", backendConfig, globalSettings, &instance.Options{
|
|
BackendType: backends.BackendTypeLlamaCpp,
|
|
IdleTimeout: &timeout,
|
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
|
Model: "/path/to/model.gguf",
|
|
},
|
|
}, "main", nil)
|
|
|
|
if inst.ShouldTimeout() {
|
|
t.Error("Non-running instance should never timeout")
|
|
}
|
|
})
|
|
|
|
t.Run("no timeout configured", func(t *testing.T) {
|
|
inst := instance.New("test", backendConfig, globalSettings, &instance.Options{
|
|
BackendType: backends.BackendTypeLlamaCpp,
|
|
IdleTimeout: nil, // No timeout
|
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
|
Model: "/path/to/model.gguf",
|
|
},
|
|
}, "main", nil)
|
|
inst.SetStatus(instance.Running)
|
|
|
|
if inst.ShouldTimeout() {
|
|
t.Error("Instance with no timeout configured should not timeout")
|
|
}
|
|
})
|
|
|
|
t.Run("timeout exceeded", func(t *testing.T) {
|
|
timeout := 1 // 1 minute
|
|
inst := instance.New("test", backendConfig, globalSettings, &instance.Options{
|
|
BackendType: backends.BackendTypeLlamaCpp,
|
|
IdleTimeout: &timeout,
|
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
|
Model: "/path/to/model.gguf",
|
|
},
|
|
}, "main", nil)
|
|
inst.SetStatus(instance.Running)
|
|
|
|
// Use mock time provider
|
|
mockTime := &mockTimeProvider{currentTime: time.Now().Unix()}
|
|
inst.SetTimeProvider(mockTime)
|
|
|
|
// Set last request time to now
|
|
inst.UpdateLastRequestTime()
|
|
|
|
// Advance time by 2 minutes (exceeds 1 minute timeout)
|
|
mockTime.currentTime = time.Now().Add(2 * time.Minute).Unix()
|
|
|
|
if !inst.ShouldTimeout() {
|
|
t.Error("Instance should timeout when idle time exceeds configured timeout")
|
|
}
|
|
})
|
|
}
|
|
|
|
// mockTimeProvider for timeout testing
|
|
type mockTimeProvider struct {
|
|
currentTime int64 // Unix timestamp
|
|
}
|
|
|
|
func (m *mockTimeProvider) Now() time.Time {
|
|
return time.Unix(m.currentTime, 0)
|
|
}
|