mirror of
https://github.com/lordmathis/llamactl.git
synced 2025-11-05 16:44:22 +00:00
Refactor instance and manager tests to use BackendOptions structure
This commit is contained in:
@@ -34,10 +34,12 @@ func TestNewInstance(t *testing.T) {
|
||||
}
|
||||
|
||||
options := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
Port: 8080,
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
Port: 8080,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -55,8 +57,8 @@ func TestNewInstance(t *testing.T) {
|
||||
|
||||
// Check that options were properly set with defaults applied
|
||||
opts := inst.GetOptions()
|
||||
if opts.LlamaServerOptions.Model != "/path/to/model.gguf" {
|
||||
t.Errorf("Expected model '/path/to/model.gguf', got %q", opts.LlamaServerOptions.Model)
|
||||
if opts.BackendOptions.LlamaServerOptions.Model != "/path/to/model.gguf" {
|
||||
t.Errorf("Expected model '/path/to/model.gguf', got %q", opts.BackendOptions.LlamaServerOptions.Model)
|
||||
}
|
||||
if inst.GetPort() != 8080 {
|
||||
t.Errorf("Expected port 8080, got %d", inst.GetPort())
|
||||
@@ -106,9 +108,11 @@ func TestNewInstance_WithRestartOptions(t *testing.T) {
|
||||
AutoRestart: &autoRestart,
|
||||
MaxRestarts: &maxRestarts,
|
||||
RestartDelay: &restartDelay,
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -154,10 +158,12 @@ func TestSetOptions(t *testing.T) {
|
||||
}
|
||||
|
||||
initialOptions := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
Port: 8080,
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
Port: 8080,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -168,18 +174,20 @@ func TestSetOptions(t *testing.T) {
|
||||
|
||||
// Update options
|
||||
newOptions := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/new-model.gguf",
|
||||
Port: 8081,
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/new-model.gguf",
|
||||
Port: 8081,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
inst.SetOptions(newOptions)
|
||||
opts := inst.GetOptions()
|
||||
|
||||
if opts.LlamaServerOptions.Model != "/path/to/new-model.gguf" {
|
||||
t.Errorf("Expected updated model '/path/to/new-model.gguf', got %q", opts.LlamaServerOptions.Model)
|
||||
if opts.BackendOptions.LlamaServerOptions.Model != "/path/to/new-model.gguf" {
|
||||
t.Errorf("Expected updated model '/path/to/new-model.gguf', got %q", opts.BackendOptions.LlamaServerOptions.Model)
|
||||
}
|
||||
if inst.GetPort() != 8081 {
|
||||
t.Errorf("Expected updated port 8081, got %d", inst.GetPort())
|
||||
@@ -208,11 +216,13 @@ func TestSetOptions_PreservesNodes(t *testing.T) {
|
||||
|
||||
// Create instance with initial nodes
|
||||
initialOptions := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
Nodes: map[string]struct{}{"worker1": {}},
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
Port: 8080,
|
||||
Nodes: map[string]struct{}{"worker1": {}},
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
Port: 8080,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -221,11 +231,13 @@ func TestSetOptions_PreservesNodes(t *testing.T) {
|
||||
|
||||
// Try to update with different nodes
|
||||
updatedOptions := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
Nodes: map[string]struct{}{"worker2": {}}, // Attempt to change node
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/new-model.gguf",
|
||||
Port: 8081,
|
||||
Nodes: map[string]struct{}{"worker2": {}}, // Attempt to change node
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/new-model.gguf",
|
||||
Port: 8081,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -238,8 +250,8 @@ func TestSetOptions_PreservesNodes(t *testing.T) {
|
||||
}
|
||||
|
||||
// Other options should be updated
|
||||
if opts.LlamaServerOptions.Model != "/path/to/new-model.gguf" {
|
||||
t.Errorf("Expected updated model '/path/to/new-model.gguf', got %q", opts.LlamaServerOptions.Model)
|
||||
if opts.BackendOptions.LlamaServerOptions.Model != "/path/to/new-model.gguf" {
|
||||
t.Errorf("Expected updated model '/path/to/new-model.gguf', got %q", opts.BackendOptions.LlamaServerOptions.Model)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -264,10 +276,12 @@ func TestGetProxy(t *testing.T) {
|
||||
}
|
||||
|
||||
options := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Host: "localhost",
|
||||
Port: 8080,
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Host: "localhost",
|
||||
Port: 8080,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -319,10 +333,12 @@ func TestMarshalJSON(t *testing.T) {
|
||||
}
|
||||
|
||||
options := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
Port: 8080,
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
Port: 8080,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -336,6 +352,9 @@ func TestMarshalJSON(t *testing.T) {
|
||||
t.Fatalf("JSON marshal failed: %v", err)
|
||||
}
|
||||
|
||||
// Debug: Print the JSON to see what we're getting
|
||||
t.Logf("Marshaled JSON: %s", string(data))
|
||||
|
||||
// Check that JSON contains expected fields
|
||||
var result map[string]any
|
||||
err = json.Unmarshal(data, &result)
|
||||
@@ -414,14 +433,14 @@ func TestUnmarshalJSON(t *testing.T) {
|
||||
if opts == nil {
|
||||
t.Fatal("Expected options to be set")
|
||||
}
|
||||
if opts.BackendType != backends.BackendTypeLlamaCpp {
|
||||
t.Errorf("Expected backend_type '%s', got %s", backends.BackendTypeLlamaCpp, opts.BackendType)
|
||||
if opts.BackendOptions.BackendType != backends.BackendTypeLlamaCpp {
|
||||
t.Errorf("Expected backend_type '%s', got %s", backends.BackendTypeLlamaCpp, opts.BackendOptions.BackendType)
|
||||
}
|
||||
if opts.LlamaServerOptions == nil {
|
||||
if opts.BackendOptions.LlamaServerOptions == nil {
|
||||
t.Fatal("Expected LlamaServerOptions to be set")
|
||||
}
|
||||
if opts.LlamaServerOptions.Model != "/path/to/model.gguf" {
|
||||
t.Errorf("Expected model '/path/to/model.gguf', got %q", opts.LlamaServerOptions.Model)
|
||||
if opts.BackendOptions.LlamaServerOptions.Model != "/path/to/model.gguf" {
|
||||
t.Errorf("Expected model '/path/to/model.gguf', got %q", opts.BackendOptions.LlamaServerOptions.Model)
|
||||
}
|
||||
if inst.GetPort() != 8080 {
|
||||
t.Errorf("Expected port 8080, got %d", inst.GetPort())
|
||||
@@ -489,9 +508,11 @@ func TestCreateOptionsValidation(t *testing.T) {
|
||||
options := &instance.Options{
|
||||
MaxRestarts: tt.maxRestarts,
|
||||
RestartDelay: tt.restartDelay,
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -522,9 +543,11 @@ func TestStatusChangeCallback(t *testing.T) {
|
||||
}
|
||||
globalSettings := &config.InstancesConfig{LogsDir: "/tmp/test"}
|
||||
options := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -587,10 +610,12 @@ func TestSetOptions_NodesPreserved(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
options := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
Nodes: tt.initialNodes,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
Nodes: tt.initialNodes,
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -598,10 +623,12 @@ func TestSetOptions_NodesPreserved(t *testing.T) {
|
||||
|
||||
// Attempt to update nodes (should be ignored)
|
||||
updateOptions := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
Nodes: tt.updateNodes,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/new-model.gguf",
|
||||
Nodes: tt.updateNodes,
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/new-model.gguf",
|
||||
},
|
||||
},
|
||||
}
|
||||
inst.SetOptions(updateOptions)
|
||||
@@ -619,8 +646,8 @@ func TestSetOptions_NodesPreserved(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify other options were updated
|
||||
if opts.LlamaServerOptions.Model != "/path/to/new-model.gguf" {
|
||||
t.Errorf("Expected model to be updated to '/path/to/new-model.gguf', got %q", opts.LlamaServerOptions.Model)
|
||||
if opts.BackendOptions.LlamaServerOptions.Model != "/path/to/new-model.gguf" {
|
||||
t.Errorf("Expected model to be updated to '/path/to/new-model.gguf', got %q", opts.BackendOptions.LlamaServerOptions.Model)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -632,9 +659,11 @@ func TestProcessErrorCases(t *testing.T) {
|
||||
}
|
||||
globalSettings := &config.InstancesConfig{LogsDir: "/tmp/test"}
|
||||
options := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -662,10 +691,12 @@ func TestRemoteInstanceOperations(t *testing.T) {
|
||||
}
|
||||
globalSettings := &config.InstancesConfig{LogsDir: "/tmp/test"}
|
||||
options := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
Nodes: map[string]struct{}{"remote-node": {}}, // Remote instance
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
Nodes: map[string]struct{}{"remote-node": {}}, // Remote instance
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -707,10 +738,12 @@ func TestProxyClearOnOptionsChange(t *testing.T) {
|
||||
}
|
||||
globalSettings := &config.InstancesConfig{LogsDir: "/tmp/test"}
|
||||
options := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Host: "localhost",
|
||||
Port: 8080,
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Host: "localhost",
|
||||
Port: 8080,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -724,10 +757,12 @@ func TestProxyClearOnOptionsChange(t *testing.T) {
|
||||
|
||||
// Update options (should clear proxy)
|
||||
newOptions := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Host: "localhost",
|
||||
Port: 8081, // Different port
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Host: "localhost",
|
||||
Port: 8081, // Different port
|
||||
},
|
||||
},
|
||||
}
|
||||
inst.SetOptions(newOptions)
|
||||
@@ -753,10 +788,12 @@ func TestIdleTimeout(t *testing.T) {
|
||||
t.Run("not running never times out", func(t *testing.T) {
|
||||
timeout := 1
|
||||
inst := instance.New("test", backendConfig, globalSettings, &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
IdleTimeout: &timeout,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
},
|
||||
},
|
||||
}, "main", nil)
|
||||
|
||||
@@ -767,10 +804,12 @@ func TestIdleTimeout(t *testing.T) {
|
||||
|
||||
t.Run("no timeout configured", func(t *testing.T) {
|
||||
inst := instance.New("test", backendConfig, globalSettings, &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
IdleTimeout: nil, // No timeout
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
},
|
||||
},
|
||||
}, "main", nil)
|
||||
inst.SetStatus(instance.Running)
|
||||
@@ -783,10 +822,12 @@ func TestIdleTimeout(t *testing.T) {
|
||||
t.Run("timeout exceeded", func(t *testing.T) {
|
||||
timeout := 1 // 1 minute
|
||||
inst := instance.New("test", backendConfig, globalSettings, &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
IdleTimeout: &timeout,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
},
|
||||
},
|
||||
}, "main", nil)
|
||||
inst.SetStatus(instance.Running)
|
||||
|
||||
@@ -70,10 +70,12 @@ func TestPersistence(t *testing.T) {
|
||||
// Test instance persistence on creation
|
||||
manager1 := manager.NewInstanceManager(backendConfig, cfg, map[string]config.NodeConfig{}, "main")
|
||||
options := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
Port: 8080,
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
Port: 8080,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -132,9 +134,11 @@ func TestConcurrentAccess(t *testing.T) {
|
||||
go func(index int) {
|
||||
defer wg.Done()
|
||||
options := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
},
|
||||
},
|
||||
}
|
||||
instanceName := fmt.Sprintf("concurrent-test-%d", index)
|
||||
@@ -169,9 +173,11 @@ func TestShutdown(t *testing.T) {
|
||||
|
||||
// Create test instance
|
||||
options := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err := mgr.CreateInstance("test-instance", options)
|
||||
@@ -230,11 +236,13 @@ func TestAutoRestartDisabledInstanceStatus(t *testing.T) {
|
||||
|
||||
autoRestart := false
|
||||
options := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
AutoRestart: &autoRestart,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
Port: 8080,
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
Port: 8080,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -13,10 +13,12 @@ func TestCreateInstance_Success(t *testing.T) {
|
||||
manager := createTestManager()
|
||||
|
||||
options := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
Port: 8080,
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
Port: 8080,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -40,9 +42,11 @@ func TestCreateInstance_ValidationAndLimits(t *testing.T) {
|
||||
// Test duplicate names
|
||||
mngr := createTestManager()
|
||||
options := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -96,9 +100,11 @@ func TestPortManagement(t *testing.T) {
|
||||
|
||||
// Test auto port assignment
|
||||
options1 := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -114,10 +120,12 @@ func TestPortManagement(t *testing.T) {
|
||||
|
||||
// Test port conflict detection
|
||||
options2 := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model2.gguf",
|
||||
Port: port1, // Same port - should conflict
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model2.gguf",
|
||||
Port: port1, // Same port - should conflict
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -132,10 +140,12 @@ func TestPortManagement(t *testing.T) {
|
||||
// Test port release on deletion
|
||||
specificPort := 8080
|
||||
options3 := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
Port: specificPort,
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
Port: specificPort,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -160,9 +170,11 @@ func TestInstanceOperations(t *testing.T) {
|
||||
manager := createTestManager()
|
||||
|
||||
options := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -183,10 +195,12 @@ func TestInstanceOperations(t *testing.T) {
|
||||
|
||||
// Update instance
|
||||
newOptions := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/new-model.gguf",
|
||||
Port: 8081,
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/new-model.gguf",
|
||||
Port: 8081,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -194,8 +208,8 @@ func TestInstanceOperations(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("UpdateInstance failed: %v", err)
|
||||
}
|
||||
if updated.GetOptions().LlamaServerOptions.Model != "/path/to/new-model.gguf" {
|
||||
t.Errorf("Expected model '/path/to/new-model.gguf', got %q", updated.GetOptions().LlamaServerOptions.Model)
|
||||
if updated.GetOptions().BackendOptions.LlamaServerOptions.Model != "/path/to/new-model.gguf" {
|
||||
t.Errorf("Expected model '/path/to/new-model.gguf', got %q", updated.GetOptions().BackendOptions.LlamaServerOptions.Model)
|
||||
}
|
||||
|
||||
// List instances
|
||||
|
||||
@@ -35,9 +35,11 @@ func TestTimeoutFunctionality(t *testing.T) {
|
||||
idleTimeout := 1 // 1 minute
|
||||
options := &instance.Options{
|
||||
IdleTimeout: &idleTimeout,
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -84,9 +86,11 @@ func TestTimeoutFunctionality(t *testing.T) {
|
||||
|
||||
// Test that instance without timeout doesn't timeout
|
||||
noTimeoutOptions := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
},
|
||||
},
|
||||
// No IdleTimeout set
|
||||
}
|
||||
@@ -115,25 +119,31 @@ func TestEvictLRUInstance_Success(t *testing.T) {
|
||||
|
||||
// Create 3 instances with idle timeout enabled (value doesn't matter for LRU logic)
|
||||
options1 := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model1.gguf",
|
||||
},
|
||||
IdleTimeout: func() *int { timeout := 1; return &timeout }(), // Any value > 0
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model1.gguf",
|
||||
},
|
||||
},
|
||||
}
|
||||
options2 := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model2.gguf",
|
||||
},
|
||||
IdleTimeout: func() *int { timeout := 1; return &timeout }(), // Any value > 0
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model2.gguf",
|
||||
},
|
||||
},
|
||||
}
|
||||
options3 := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model3.gguf",
|
||||
},
|
||||
IdleTimeout: func() *int { timeout := 1; return &timeout }(), // Any value > 0
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model3.gguf",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
inst1, err := manager.CreateInstance("instance-1", options1)
|
||||
@@ -197,11 +207,13 @@ func TestEvictLRUInstance_NoEligibleInstances(t *testing.T) {
|
||||
// Helper function to create instances with different timeout configurations
|
||||
createInstanceWithTimeout := func(manager manager.InstanceManager, name, model string, timeout *int) *instance.Instance {
|
||||
options := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: model,
|
||||
},
|
||||
IdleTimeout: timeout,
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: model,
|
||||
},
|
||||
},
|
||||
}
|
||||
inst, err := manager.CreateInstance(name, options)
|
||||
if err != nil {
|
||||
|
||||
@@ -40,7 +40,7 @@ func (h *Handler) LlamaCppProxy(onDemandStart bool) http.HandlerFunc {
|
||||
return
|
||||
}
|
||||
|
||||
if options.BackendType != backends.BackendTypeLlamaCpp {
|
||||
if options.BackendOptions.BackendType != backends.BackendTypeLlamaCpp {
|
||||
http.Error(w, "Instance is not a llama.cpp server.", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
@@ -133,8 +133,10 @@ func (h *Handler) ParseLlamaCommand() http.HandlerFunc {
|
||||
return
|
||||
}
|
||||
options := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: llamaOptions,
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: llamaOptions,
|
||||
},
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
if err := json.NewEncoder(w).Encode(options); err != nil {
|
||||
@@ -186,8 +188,10 @@ func (h *Handler) ParseMlxCommand() http.HandlerFunc {
|
||||
backendType := backends.BackendTypeMlxLm
|
||||
|
||||
options := &instance.Options{
|
||||
BackendType: backendType,
|
||||
MlxServerOptions: mlxOptions,
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backendType,
|
||||
MlxServerOptions: mlxOptions,
|
||||
},
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
@@ -239,8 +243,10 @@ func (h *Handler) ParseVllmCommand() http.HandlerFunc {
|
||||
backendType := backends.BackendTypeVllm
|
||||
|
||||
options := &instance.Options{
|
||||
BackendType: backendType,
|
||||
VllmServerOptions: vllmOptions,
|
||||
BackendOptions: backends.Options{
|
||||
BackendType: backendType,
|
||||
VllmServerOptions: vllmOptions,
|
||||
},
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
@@ -2,8 +2,6 @@ package validation_test
|
||||
|
||||
import (
|
||||
"llamactl/pkg/backends"
|
||||
"llamactl/pkg/instance"
|
||||
"llamactl/pkg/testutil"
|
||||
"llamactl/pkg/validation"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -57,13 +55,11 @@ func TestValidateInstanceName(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestValidateInstanceOptions_NilOptions(t *testing.T) {
|
||||
err := validation.ValidateInstanceOptions(nil)
|
||||
var opts backends.Options
|
||||
err := opts.ValidateInstanceOptions()
|
||||
if err == nil {
|
||||
t.Error("Expected error for nil options")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "options cannot be nil") {
|
||||
t.Errorf("Expected 'options cannot be nil' error, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateInstanceOptions_PortValidation(t *testing.T) {
|
||||
@@ -82,14 +78,14 @@ func TestValidateInstanceOptions_PortValidation(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
options := &instance.Options{
|
||||
options := backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Port: tt.port,
|
||||
},
|
||||
}
|
||||
|
||||
err := validation.ValidateInstanceOptions(options)
|
||||
err := options.ValidateInstanceOptions()
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ValidateInstanceOptions(port=%d) error = %v, wantErr %v", tt.port, err, tt.wantErr)
|
||||
}
|
||||
@@ -136,14 +132,14 @@ func TestValidateInstanceOptions_StringInjection(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Test with Model field (string field)
|
||||
options := &instance.Options{
|
||||
options := backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: tt.value,
|
||||
},
|
||||
}
|
||||
|
||||
err := validation.ValidateInstanceOptions(options)
|
||||
err := options.ValidateInstanceOptions()
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ValidateInstanceOptions(model=%q) error = %v, wantErr %v", tt.value, err, tt.wantErr)
|
||||
}
|
||||
@@ -174,14 +170,14 @@ func TestValidateInstanceOptions_ArrayInjection(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Test with Lora field (array field)
|
||||
options := &instance.Options{
|
||||
options := backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Lora: tt.array,
|
||||
},
|
||||
}
|
||||
|
||||
err := validation.ValidateInstanceOptions(options)
|
||||
err := options.ValidateInstanceOptions()
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ValidateInstanceOptions(lora=%v) error = %v, wantErr %v", tt.array, err, tt.wantErr)
|
||||
}
|
||||
@@ -193,12 +189,12 @@ func TestValidateInstanceOptions_MultipleFieldInjection(t *testing.T) {
|
||||
// Test that injection in any field is caught
|
||||
tests := []struct {
|
||||
name string
|
||||
options *instance.Options
|
||||
options backends.Options
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "injection in model field",
|
||||
options: &instance.Options{
|
||||
options: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "safe.gguf",
|
||||
@@ -209,7 +205,7 @@ func TestValidateInstanceOptions_MultipleFieldInjection(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "injection in log file",
|
||||
options: &instance.Options{
|
||||
options: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "safe.gguf",
|
||||
@@ -220,7 +216,7 @@ func TestValidateInstanceOptions_MultipleFieldInjection(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "all safe fields",
|
||||
options: &instance.Options{
|
||||
options: backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
@@ -236,7 +232,7 @@ func TestValidateInstanceOptions_MultipleFieldInjection(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := validation.ValidateInstanceOptions(tt.options)
|
||||
err := tt.options.ValidateInstanceOptions()
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ValidateInstanceOptions() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
@@ -246,11 +242,8 @@ func TestValidateInstanceOptions_MultipleFieldInjection(t *testing.T) {
|
||||
|
||||
func TestValidateInstanceOptions_NonStringFields(t *testing.T) {
|
||||
// Test that non-string fields don't interfere with validation
|
||||
options := &instance.Options{
|
||||
AutoRestart: testutil.BoolPtr(true),
|
||||
MaxRestarts: testutil.IntPtr(5),
|
||||
RestartDelay: testutil.IntPtr(10),
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
options := backends.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Port: 8080,
|
||||
GPULayers: 32,
|
||||
@@ -263,7 +256,7 @@ func TestValidateInstanceOptions_NonStringFields(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
err := validation.ValidateInstanceOptions(options)
|
||||
err := options.ValidateInstanceOptions()
|
||||
if err != nil {
|
||||
t.Errorf("ValidateInstanceOptions with non-string fields should not error, got: %v", err)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user