Refactor instance and manager tests to use BackendOptions structure

This commit is contained in:
2025-10-19 18:07:14 +02:00
parent 55f671c354
commit 9da2433a7c
6 changed files with 251 additions and 177 deletions

View File

@@ -34,11 +34,13 @@ func TestNewInstance(t *testing.T) {
}
options := &instance.Options{
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "/path/to/model.gguf",
Port: 8080,
},
},
}
// Mock onStatusChange function
@@ -55,8 +57,8 @@ func TestNewInstance(t *testing.T) {
// Check that options were properly set with defaults applied
opts := inst.GetOptions()
if opts.LlamaServerOptions.Model != "/path/to/model.gguf" {
t.Errorf("Expected model '/path/to/model.gguf', got %q", opts.LlamaServerOptions.Model)
if opts.BackendOptions.LlamaServerOptions.Model != "/path/to/model.gguf" {
t.Errorf("Expected model '/path/to/model.gguf', got %q", opts.BackendOptions.LlamaServerOptions.Model)
}
if inst.GetPort() != 8080 {
t.Errorf("Expected port 8080, got %d", inst.GetPort())
@@ -106,10 +108,12 @@ func TestNewInstance_WithRestartOptions(t *testing.T) {
AutoRestart: &autoRestart,
MaxRestarts: &maxRestarts,
RestartDelay: &restartDelay,
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "/path/to/model.gguf",
},
},
}
// Mock onStatusChange function
@@ -154,11 +158,13 @@ func TestSetOptions(t *testing.T) {
}
initialOptions := &instance.Options{
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "/path/to/model.gguf",
Port: 8080,
},
},
}
// Mock onStatusChange function
@@ -168,18 +174,20 @@ func TestSetOptions(t *testing.T) {
// Update options
newOptions := &instance.Options{
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "/path/to/new-model.gguf",
Port: 8081,
},
},
}
inst.SetOptions(newOptions)
opts := inst.GetOptions()
if opts.LlamaServerOptions.Model != "/path/to/new-model.gguf" {
t.Errorf("Expected updated model '/path/to/new-model.gguf', got %q", opts.LlamaServerOptions.Model)
if opts.BackendOptions.LlamaServerOptions.Model != "/path/to/new-model.gguf" {
t.Errorf("Expected updated model '/path/to/new-model.gguf', got %q", opts.BackendOptions.LlamaServerOptions.Model)
}
if inst.GetPort() != 8081 {
t.Errorf("Expected updated port 8081, got %d", inst.GetPort())
@@ -208,12 +216,14 @@ func TestSetOptions_PreservesNodes(t *testing.T) {
// Create instance with initial nodes
initialOptions := &instance.Options{
BackendType: backends.BackendTypeLlamaCpp,
Nodes: map[string]struct{}{"worker1": {}},
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "/path/to/model.gguf",
Port: 8080,
},
},
}
mockOnStatusChange := func(oldStatus, newStatus instance.Status) {}
@@ -221,12 +231,14 @@ func TestSetOptions_PreservesNodes(t *testing.T) {
// Try to update with different nodes
updatedOptions := &instance.Options{
BackendType: backends.BackendTypeLlamaCpp,
Nodes: map[string]struct{}{"worker2": {}}, // Attempt to change node
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "/path/to/new-model.gguf",
Port: 8081,
},
},
}
inst.SetOptions(updatedOptions)
@@ -238,8 +250,8 @@ func TestSetOptions_PreservesNodes(t *testing.T) {
}
// Other options should be updated
if opts.LlamaServerOptions.Model != "/path/to/new-model.gguf" {
t.Errorf("Expected updated model '/path/to/new-model.gguf', got %q", opts.LlamaServerOptions.Model)
if opts.BackendOptions.LlamaServerOptions.Model != "/path/to/new-model.gguf" {
t.Errorf("Expected updated model '/path/to/new-model.gguf', got %q", opts.BackendOptions.LlamaServerOptions.Model)
}
}
@@ -264,11 +276,13 @@ func TestGetProxy(t *testing.T) {
}
options := &instance.Options{
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Host: "localhost",
Port: 8080,
},
},
}
// Mock onStatusChange function
@@ -319,11 +333,13 @@ func TestMarshalJSON(t *testing.T) {
}
options := &instance.Options{
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "/path/to/model.gguf",
Port: 8080,
},
},
}
// Mock onStatusChange function
@@ -336,6 +352,9 @@ func TestMarshalJSON(t *testing.T) {
t.Fatalf("JSON marshal failed: %v", err)
}
// Debug: Print the JSON to see what we're getting
t.Logf("Marshaled JSON: %s", string(data))
// Check that JSON contains expected fields
var result map[string]any
err = json.Unmarshal(data, &result)
@@ -414,14 +433,14 @@ func TestUnmarshalJSON(t *testing.T) {
if opts == nil {
t.Fatal("Expected options to be set")
}
if opts.BackendType != backends.BackendTypeLlamaCpp {
t.Errorf("Expected backend_type '%s', got %s", backends.BackendTypeLlamaCpp, opts.BackendType)
if opts.BackendOptions.BackendType != backends.BackendTypeLlamaCpp {
t.Errorf("Expected backend_type '%s', got %s", backends.BackendTypeLlamaCpp, opts.BackendOptions.BackendType)
}
if opts.LlamaServerOptions == nil {
if opts.BackendOptions.LlamaServerOptions == nil {
t.Fatal("Expected LlamaServerOptions to be set")
}
if opts.LlamaServerOptions.Model != "/path/to/model.gguf" {
t.Errorf("Expected model '/path/to/model.gguf', got %q", opts.LlamaServerOptions.Model)
if opts.BackendOptions.LlamaServerOptions.Model != "/path/to/model.gguf" {
t.Errorf("Expected model '/path/to/model.gguf', got %q", opts.BackendOptions.LlamaServerOptions.Model)
}
if inst.GetPort() != 8080 {
t.Errorf("Expected port 8080, got %d", inst.GetPort())
@@ -489,10 +508,12 @@ func TestCreateOptionsValidation(t *testing.T) {
options := &instance.Options{
MaxRestarts: tt.maxRestarts,
RestartDelay: tt.restartDelay,
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "/path/to/model.gguf",
},
},
}
// Mock onStatusChange function
@@ -522,10 +543,12 @@ func TestStatusChangeCallback(t *testing.T) {
}
globalSettings := &config.InstancesConfig{LogsDir: "/tmp/test"}
options := &instance.Options{
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "/path/to/model.gguf",
},
},
}
var callbackOldStatus, callbackNewStatus instance.Status
@@ -587,22 +610,26 @@ func TestSetOptions_NodesPreserved(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
options := &instance.Options{
BackendType: backends.BackendTypeLlamaCpp,
Nodes: tt.initialNodes,
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "/path/to/model.gguf",
},
},
}
inst := instance.New("test", backendConfig, globalSettings, options, "main", nil)
// Attempt to update nodes (should be ignored)
updateOptions := &instance.Options{
BackendType: backends.BackendTypeLlamaCpp,
Nodes: tt.updateNodes,
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "/path/to/new-model.gguf",
},
},
}
inst.SetOptions(updateOptions)
@@ -619,8 +646,8 @@ func TestSetOptions_NodesPreserved(t *testing.T) {
}
// Verify other options were updated
if opts.LlamaServerOptions.Model != "/path/to/new-model.gguf" {
t.Errorf("Expected model to be updated to '/path/to/new-model.gguf', got %q", opts.LlamaServerOptions.Model)
if opts.BackendOptions.LlamaServerOptions.Model != "/path/to/new-model.gguf" {
t.Errorf("Expected model to be updated to '/path/to/new-model.gguf', got %q", opts.BackendOptions.LlamaServerOptions.Model)
}
})
}
@@ -632,10 +659,12 @@ func TestProcessErrorCases(t *testing.T) {
}
globalSettings := &config.InstancesConfig{LogsDir: "/tmp/test"}
options := &instance.Options{
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "/path/to/model.gguf",
},
},
}
inst := instance.New("test", backendConfig, globalSettings, options, "main", nil)
@@ -662,11 +691,13 @@ func TestRemoteInstanceOperations(t *testing.T) {
}
globalSettings := &config.InstancesConfig{LogsDir: "/tmp/test"}
options := &instance.Options{
BackendType: backends.BackendTypeLlamaCpp,
Nodes: map[string]struct{}{"remote-node": {}}, // Remote instance
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "/path/to/model.gguf",
},
},
}
inst := instance.New("remote-test", backendConfig, globalSettings, options, "main", nil)
@@ -707,11 +738,13 @@ func TestProxyClearOnOptionsChange(t *testing.T) {
}
globalSettings := &config.InstancesConfig{LogsDir: "/tmp/test"}
options := &instance.Options{
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Host: "localhost",
Port: 8080,
},
},
}
inst := instance.New("test", backendConfig, globalSettings, options, "main", nil)
@@ -724,11 +757,13 @@ func TestProxyClearOnOptionsChange(t *testing.T) {
// Update options (should clear proxy)
newOptions := &instance.Options{
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Host: "localhost",
Port: 8081, // Different port
},
},
}
inst.SetOptions(newOptions)
@@ -753,11 +788,13 @@ func TestIdleTimeout(t *testing.T) {
t.Run("not running never times out", func(t *testing.T) {
timeout := 1
inst := instance.New("test", backendConfig, globalSettings, &instance.Options{
BackendType: backends.BackendTypeLlamaCpp,
IdleTimeout: &timeout,
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "/path/to/model.gguf",
},
},
}, "main", nil)
if inst.ShouldTimeout() {
@@ -767,11 +804,13 @@ func TestIdleTimeout(t *testing.T) {
t.Run("no timeout configured", func(t *testing.T) {
inst := instance.New("test", backendConfig, globalSettings, &instance.Options{
BackendType: backends.BackendTypeLlamaCpp,
IdleTimeout: nil, // No timeout
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "/path/to/model.gguf",
},
},
}, "main", nil)
inst.SetStatus(instance.Running)
@@ -783,11 +822,13 @@ func TestIdleTimeout(t *testing.T) {
t.Run("timeout exceeded", func(t *testing.T) {
timeout := 1 // 1 minute
inst := instance.New("test", backendConfig, globalSettings, &instance.Options{
BackendType: backends.BackendTypeLlamaCpp,
IdleTimeout: &timeout,
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "/path/to/model.gguf",
},
},
}, "main", nil)
inst.SetStatus(instance.Running)

View File

@@ -70,11 +70,13 @@ func TestPersistence(t *testing.T) {
// Test instance persistence on creation
manager1 := manager.NewInstanceManager(backendConfig, cfg, map[string]config.NodeConfig{}, "main")
options := &instance.Options{
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "/path/to/model.gguf",
Port: 8080,
},
},
}
_, err := manager1.CreateInstance("test-instance", options)
@@ -132,10 +134,12 @@ func TestConcurrentAccess(t *testing.T) {
go func(index int) {
defer wg.Done()
options := &instance.Options{
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "/path/to/model.gguf",
},
},
}
instanceName := fmt.Sprintf("concurrent-test-%d", index)
if _, err := mgr.CreateInstance(instanceName, options); err != nil {
@@ -169,10 +173,12 @@ func TestShutdown(t *testing.T) {
// Create test instance
options := &instance.Options{
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "/path/to/model.gguf",
},
},
}
_, err := mgr.CreateInstance("test-instance", options)
if err != nil {
@@ -230,12 +236,14 @@ func TestAutoRestartDisabledInstanceStatus(t *testing.T) {
autoRestart := false
options := &instance.Options{
BackendType: backends.BackendTypeLlamaCpp,
AutoRestart: &autoRestart,
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "/path/to/model.gguf",
Port: 8080,
},
},
}
inst, err := manager1.CreateInstance("test-instance", options)

View File

@@ -13,11 +13,13 @@ func TestCreateInstance_Success(t *testing.T) {
manager := createTestManager()
options := &instance.Options{
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "/path/to/model.gguf",
Port: 8080,
},
},
}
inst, err := manager.CreateInstance("test-instance", options)
@@ -40,10 +42,12 @@ func TestCreateInstance_ValidationAndLimits(t *testing.T) {
// Test duplicate names
mngr := createTestManager()
options := &instance.Options{
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "/path/to/model.gguf",
},
},
}
_, err := mngr.CreateInstance("test-instance", options)
@@ -96,10 +100,12 @@ func TestPortManagement(t *testing.T) {
// Test auto port assignment
options1 := &instance.Options{
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "/path/to/model.gguf",
},
},
}
inst1, err := manager.CreateInstance("instance1", options1)
@@ -114,11 +120,13 @@ func TestPortManagement(t *testing.T) {
// Test port conflict detection
options2 := &instance.Options{
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "/path/to/model2.gguf",
Port: port1, // Same port - should conflict
},
},
}
_, err = manager.CreateInstance("instance2", options2)
@@ -132,11 +140,13 @@ func TestPortManagement(t *testing.T) {
// Test port release on deletion
specificPort := 8080
options3 := &instance.Options{
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "/path/to/model.gguf",
Port: specificPort,
},
},
}
_, err = manager.CreateInstance("port-test", options3)
@@ -160,10 +170,12 @@ func TestInstanceOperations(t *testing.T) {
manager := createTestManager()
options := &instance.Options{
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "/path/to/model.gguf",
},
},
}
// Create instance
@@ -183,19 +195,21 @@ func TestInstanceOperations(t *testing.T) {
// Update instance
newOptions := &instance.Options{
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "/path/to/new-model.gguf",
Port: 8081,
},
},
}
updated, err := manager.UpdateInstance("test-instance", newOptions)
if err != nil {
t.Fatalf("UpdateInstance failed: %v", err)
}
if updated.GetOptions().LlamaServerOptions.Model != "/path/to/new-model.gguf" {
t.Errorf("Expected model '/path/to/new-model.gguf', got %q", updated.GetOptions().LlamaServerOptions.Model)
if updated.GetOptions().BackendOptions.LlamaServerOptions.Model != "/path/to/new-model.gguf" {
t.Errorf("Expected model '/path/to/new-model.gguf', got %q", updated.GetOptions().BackendOptions.LlamaServerOptions.Model)
}
// List instances

View File

@@ -35,10 +35,12 @@ func TestTimeoutFunctionality(t *testing.T) {
idleTimeout := 1 // 1 minute
options := &instance.Options{
IdleTimeout: &idleTimeout,
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "/path/to/model.gguf",
},
},
}
inst, err := testManager.CreateInstance("timeout-test", options)
@@ -84,10 +86,12 @@ func TestTimeoutFunctionality(t *testing.T) {
// Test that instance without timeout doesn't timeout
noTimeoutOptions := &instance.Options{
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "/path/to/model.gguf",
},
},
// No IdleTimeout set
}
@@ -115,25 +119,31 @@ func TestEvictLRUInstance_Success(t *testing.T) {
// Create 3 instances with idle timeout enabled (value doesn't matter for LRU logic)
options1 := &instance.Options{
IdleTimeout: func() *int { timeout := 1; return &timeout }(), // Any value > 0
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "/path/to/model1.gguf",
},
IdleTimeout: func() *int { timeout := 1; return &timeout }(), // Any value > 0
},
}
options2 := &instance.Options{
IdleTimeout: func() *int { timeout := 1; return &timeout }(), // Any value > 0
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "/path/to/model2.gguf",
},
IdleTimeout: func() *int { timeout := 1; return &timeout }(), // Any value > 0
},
}
options3 := &instance.Options{
IdleTimeout: func() *int { timeout := 1; return &timeout }(), // Any value > 0
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "/path/to/model3.gguf",
},
IdleTimeout: func() *int { timeout := 1; return &timeout }(), // Any value > 0
},
}
inst1, err := manager.CreateInstance("instance-1", options1)
@@ -197,11 +207,13 @@ func TestEvictLRUInstance_NoEligibleInstances(t *testing.T) {
// Helper function to create instances with different timeout configurations
createInstanceWithTimeout := func(manager manager.InstanceManager, name, model string, timeout *int) *instance.Instance {
options := &instance.Options{
IdleTimeout: timeout,
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: model,
},
IdleTimeout: timeout,
},
}
inst, err := manager.CreateInstance(name, options)
if err != nil {

View File

@@ -40,7 +40,7 @@ func (h *Handler) LlamaCppProxy(onDemandStart bool) http.HandlerFunc {
return
}
if options.BackendType != backends.BackendTypeLlamaCpp {
if options.BackendOptions.BackendType != backends.BackendTypeLlamaCpp {
http.Error(w, "Instance is not a llama.cpp server.", http.StatusBadRequest)
return
}
@@ -133,8 +133,10 @@ func (h *Handler) ParseLlamaCommand() http.HandlerFunc {
return
}
options := &instance.Options{
BackendOptions: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: llamaOptions,
},
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(options); err != nil {
@@ -186,8 +188,10 @@ func (h *Handler) ParseMlxCommand() http.HandlerFunc {
backendType := backends.BackendTypeMlxLm
options := &instance.Options{
BackendOptions: backends.Options{
BackendType: backendType,
MlxServerOptions: mlxOptions,
},
}
w.Header().Set("Content-Type", "application/json")
@@ -239,8 +243,10 @@ func (h *Handler) ParseVllmCommand() http.HandlerFunc {
backendType := backends.BackendTypeVllm
options := &instance.Options{
BackendOptions: backends.Options{
BackendType: backendType,
VllmServerOptions: vllmOptions,
},
}
w.Header().Set("Content-Type", "application/json")

View File

@@ -2,8 +2,6 @@ package validation_test
import (
"llamactl/pkg/backends"
"llamactl/pkg/instance"
"llamactl/pkg/testutil"
"llamactl/pkg/validation"
"strings"
"testing"
@@ -57,13 +55,11 @@ func TestValidateInstanceName(t *testing.T) {
}
func TestValidateInstanceOptions_NilOptions(t *testing.T) {
err := validation.ValidateInstanceOptions(nil)
var opts backends.Options
err := opts.ValidateInstanceOptions()
if err == nil {
t.Error("Expected error for nil options")
}
if !strings.Contains(err.Error(), "options cannot be nil") {
t.Errorf("Expected 'options cannot be nil' error, got: %v", err)
}
}
func TestValidateInstanceOptions_PortValidation(t *testing.T) {
@@ -82,14 +78,14 @@ func TestValidateInstanceOptions_PortValidation(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
options := &instance.Options{
options := backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Port: tt.port,
},
}
err := validation.ValidateInstanceOptions(options)
err := options.ValidateInstanceOptions()
if (err != nil) != tt.wantErr {
t.Errorf("ValidateInstanceOptions(port=%d) error = %v, wantErr %v", tt.port, err, tt.wantErr)
}
@@ -136,14 +132,14 @@ func TestValidateInstanceOptions_StringInjection(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Test with Model field (string field)
options := &instance.Options{
options := backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: tt.value,
},
}
err := validation.ValidateInstanceOptions(options)
err := options.ValidateInstanceOptions()
if (err != nil) != tt.wantErr {
t.Errorf("ValidateInstanceOptions(model=%q) error = %v, wantErr %v", tt.value, err, tt.wantErr)
}
@@ -174,14 +170,14 @@ func TestValidateInstanceOptions_ArrayInjection(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Test with Lora field (array field)
options := &instance.Options{
options := backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Lora: tt.array,
},
}
err := validation.ValidateInstanceOptions(options)
err := options.ValidateInstanceOptions()
if (err != nil) != tt.wantErr {
t.Errorf("ValidateInstanceOptions(lora=%v) error = %v, wantErr %v", tt.array, err, tt.wantErr)
}
@@ -193,12 +189,12 @@ func TestValidateInstanceOptions_MultipleFieldInjection(t *testing.T) {
// Test that injection in any field is caught
tests := []struct {
name string
options *instance.Options
options backends.Options
wantErr bool
}{
{
name: "injection in model field",
options: &instance.Options{
options: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "safe.gguf",
@@ -209,7 +205,7 @@ func TestValidateInstanceOptions_MultipleFieldInjection(t *testing.T) {
},
{
name: "injection in log file",
options: &instance.Options{
options: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "safe.gguf",
@@ -220,7 +216,7 @@ func TestValidateInstanceOptions_MultipleFieldInjection(t *testing.T) {
},
{
name: "all safe fields",
options: &instance.Options{
options: backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Model: "/path/to/model.gguf",
@@ -236,7 +232,7 @@ func TestValidateInstanceOptions_MultipleFieldInjection(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := validation.ValidateInstanceOptions(tt.options)
err := tt.options.ValidateInstanceOptions()
if (err != nil) != tt.wantErr {
t.Errorf("ValidateInstanceOptions() error = %v, wantErr %v", err, tt.wantErr)
}
@@ -246,10 +242,7 @@ func TestValidateInstanceOptions_MultipleFieldInjection(t *testing.T) {
func TestValidateInstanceOptions_NonStringFields(t *testing.T) {
// Test that non-string fields don't interfere with validation
options := &instance.Options{
AutoRestart: testutil.BoolPtr(true),
MaxRestarts: testutil.IntPtr(5),
RestartDelay: testutil.IntPtr(10),
options := backends.Options{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &backends.LlamaServerOptions{
Port: 8080,
@@ -263,7 +256,7 @@ func TestValidateInstanceOptions_NonStringFields(t *testing.T) {
},
}
err := validation.ValidateInstanceOptions(options)
err := options.ValidateInstanceOptions()
if err != nil {
t.Errorf("ValidateInstanceOptions with non-string fields should not error, got: %v", err)
}