mirror of
https://github.com/lordmathis/llamactl.git
synced 2025-11-05 16:44:22 +00:00
Refactor instance and manager tests to use BackendOptions structure
This commit is contained in:
@@ -34,10 +34,12 @@ func TestNewInstance(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendOptions: backends.Options{
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
Model: "/path/to/model.gguf",
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Port: 8080,
|
Model: "/path/to/model.gguf",
|
||||||
|
Port: 8080,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -55,8 +57,8 @@ func TestNewInstance(t *testing.T) {
|
|||||||
|
|
||||||
// Check that options were properly set with defaults applied
|
// Check that options were properly set with defaults applied
|
||||||
opts := inst.GetOptions()
|
opts := inst.GetOptions()
|
||||||
if opts.LlamaServerOptions.Model != "/path/to/model.gguf" {
|
if opts.BackendOptions.LlamaServerOptions.Model != "/path/to/model.gguf" {
|
||||||
t.Errorf("Expected model '/path/to/model.gguf', got %q", opts.LlamaServerOptions.Model)
|
t.Errorf("Expected model '/path/to/model.gguf', got %q", opts.BackendOptions.LlamaServerOptions.Model)
|
||||||
}
|
}
|
||||||
if inst.GetPort() != 8080 {
|
if inst.GetPort() != 8080 {
|
||||||
t.Errorf("Expected port 8080, got %d", inst.GetPort())
|
t.Errorf("Expected port 8080, got %d", inst.GetPort())
|
||||||
@@ -106,9 +108,11 @@ func TestNewInstance_WithRestartOptions(t *testing.T) {
|
|||||||
AutoRestart: &autoRestart,
|
AutoRestart: &autoRestart,
|
||||||
MaxRestarts: &maxRestarts,
|
MaxRestarts: &maxRestarts,
|
||||||
RestartDelay: &restartDelay,
|
RestartDelay: &restartDelay,
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendOptions: backends.Options{
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
Model: "/path/to/model.gguf",
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
|
Model: "/path/to/model.gguf",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -154,10 +158,12 @@ func TestSetOptions(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
initialOptions := &instance.Options{
|
initialOptions := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendOptions: backends.Options{
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
Model: "/path/to/model.gguf",
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Port: 8080,
|
Model: "/path/to/model.gguf",
|
||||||
|
Port: 8080,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -168,18 +174,20 @@ func TestSetOptions(t *testing.T) {
|
|||||||
|
|
||||||
// Update options
|
// Update options
|
||||||
newOptions := &instance.Options{
|
newOptions := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendOptions: backends.Options{
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
Model: "/path/to/new-model.gguf",
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Port: 8081,
|
Model: "/path/to/new-model.gguf",
|
||||||
|
Port: 8081,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
inst.SetOptions(newOptions)
|
inst.SetOptions(newOptions)
|
||||||
opts := inst.GetOptions()
|
opts := inst.GetOptions()
|
||||||
|
|
||||||
if opts.LlamaServerOptions.Model != "/path/to/new-model.gguf" {
|
if opts.BackendOptions.LlamaServerOptions.Model != "/path/to/new-model.gguf" {
|
||||||
t.Errorf("Expected updated model '/path/to/new-model.gguf', got %q", opts.LlamaServerOptions.Model)
|
t.Errorf("Expected updated model '/path/to/new-model.gguf', got %q", opts.BackendOptions.LlamaServerOptions.Model)
|
||||||
}
|
}
|
||||||
if inst.GetPort() != 8081 {
|
if inst.GetPort() != 8081 {
|
||||||
t.Errorf("Expected updated port 8081, got %d", inst.GetPort())
|
t.Errorf("Expected updated port 8081, got %d", inst.GetPort())
|
||||||
@@ -208,11 +216,13 @@ func TestSetOptions_PreservesNodes(t *testing.T) {
|
|||||||
|
|
||||||
// Create instance with initial nodes
|
// Create instance with initial nodes
|
||||||
initialOptions := &instance.Options{
|
initialOptions := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
Nodes: map[string]struct{}{"worker1": {}},
|
||||||
Nodes: map[string]struct{}{"worker1": {}},
|
BackendOptions: backends.Options{
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
Model: "/path/to/model.gguf",
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Port: 8080,
|
Model: "/path/to/model.gguf",
|
||||||
|
Port: 8080,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -221,11 +231,13 @@ func TestSetOptions_PreservesNodes(t *testing.T) {
|
|||||||
|
|
||||||
// Try to update with different nodes
|
// Try to update with different nodes
|
||||||
updatedOptions := &instance.Options{
|
updatedOptions := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
Nodes: map[string]struct{}{"worker2": {}}, // Attempt to change node
|
||||||
Nodes: map[string]struct{}{"worker2": {}}, // Attempt to change node
|
BackendOptions: backends.Options{
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
Model: "/path/to/new-model.gguf",
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Port: 8081,
|
Model: "/path/to/new-model.gguf",
|
||||||
|
Port: 8081,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -238,8 +250,8 @@ func TestSetOptions_PreservesNodes(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Other options should be updated
|
// Other options should be updated
|
||||||
if opts.LlamaServerOptions.Model != "/path/to/new-model.gguf" {
|
if opts.BackendOptions.LlamaServerOptions.Model != "/path/to/new-model.gguf" {
|
||||||
t.Errorf("Expected updated model '/path/to/new-model.gguf', got %q", opts.LlamaServerOptions.Model)
|
t.Errorf("Expected updated model '/path/to/new-model.gguf', got %q", opts.BackendOptions.LlamaServerOptions.Model)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -264,10 +276,12 @@ func TestGetProxy(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendOptions: backends.Options{
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
Host: "localhost",
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Port: 8080,
|
Host: "localhost",
|
||||||
|
Port: 8080,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -319,10 +333,12 @@ func TestMarshalJSON(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendOptions: backends.Options{
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
Model: "/path/to/model.gguf",
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Port: 8080,
|
Model: "/path/to/model.gguf",
|
||||||
|
Port: 8080,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -336,6 +352,9 @@ func TestMarshalJSON(t *testing.T) {
|
|||||||
t.Fatalf("JSON marshal failed: %v", err)
|
t.Fatalf("JSON marshal failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Debug: Print the JSON to see what we're getting
|
||||||
|
t.Logf("Marshaled JSON: %s", string(data))
|
||||||
|
|
||||||
// Check that JSON contains expected fields
|
// Check that JSON contains expected fields
|
||||||
var result map[string]any
|
var result map[string]any
|
||||||
err = json.Unmarshal(data, &result)
|
err = json.Unmarshal(data, &result)
|
||||||
@@ -414,14 +433,14 @@ func TestUnmarshalJSON(t *testing.T) {
|
|||||||
if opts == nil {
|
if opts == nil {
|
||||||
t.Fatal("Expected options to be set")
|
t.Fatal("Expected options to be set")
|
||||||
}
|
}
|
||||||
if opts.BackendType != backends.BackendTypeLlamaCpp {
|
if opts.BackendOptions.BackendType != backends.BackendTypeLlamaCpp {
|
||||||
t.Errorf("Expected backend_type '%s', got %s", backends.BackendTypeLlamaCpp, opts.BackendType)
|
t.Errorf("Expected backend_type '%s', got %s", backends.BackendTypeLlamaCpp, opts.BackendOptions.BackendType)
|
||||||
}
|
}
|
||||||
if opts.LlamaServerOptions == nil {
|
if opts.BackendOptions.LlamaServerOptions == nil {
|
||||||
t.Fatal("Expected LlamaServerOptions to be set")
|
t.Fatal("Expected LlamaServerOptions to be set")
|
||||||
}
|
}
|
||||||
if opts.LlamaServerOptions.Model != "/path/to/model.gguf" {
|
if opts.BackendOptions.LlamaServerOptions.Model != "/path/to/model.gguf" {
|
||||||
t.Errorf("Expected model '/path/to/model.gguf', got %q", opts.LlamaServerOptions.Model)
|
t.Errorf("Expected model '/path/to/model.gguf', got %q", opts.BackendOptions.LlamaServerOptions.Model)
|
||||||
}
|
}
|
||||||
if inst.GetPort() != 8080 {
|
if inst.GetPort() != 8080 {
|
||||||
t.Errorf("Expected port 8080, got %d", inst.GetPort())
|
t.Errorf("Expected port 8080, got %d", inst.GetPort())
|
||||||
@@ -489,9 +508,11 @@ func TestCreateOptionsValidation(t *testing.T) {
|
|||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
MaxRestarts: tt.maxRestarts,
|
MaxRestarts: tt.maxRestarts,
|
||||||
RestartDelay: tt.restartDelay,
|
RestartDelay: tt.restartDelay,
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendOptions: backends.Options{
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
Model: "/path/to/model.gguf",
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
|
Model: "/path/to/model.gguf",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -522,9 +543,11 @@ func TestStatusChangeCallback(t *testing.T) {
|
|||||||
}
|
}
|
||||||
globalSettings := &config.InstancesConfig{LogsDir: "/tmp/test"}
|
globalSettings := &config.InstancesConfig{LogsDir: "/tmp/test"}
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendOptions: backends.Options{
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
Model: "/path/to/model.gguf",
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
|
Model: "/path/to/model.gguf",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -587,10 +610,12 @@ func TestSetOptions_NodesPreserved(t *testing.T) {
|
|||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
Nodes: tt.initialNodes,
|
||||||
Nodes: tt.initialNodes,
|
BackendOptions: backends.Options{
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
Model: "/path/to/model.gguf",
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
|
Model: "/path/to/model.gguf",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -598,10 +623,12 @@ func TestSetOptions_NodesPreserved(t *testing.T) {
|
|||||||
|
|
||||||
// Attempt to update nodes (should be ignored)
|
// Attempt to update nodes (should be ignored)
|
||||||
updateOptions := &instance.Options{
|
updateOptions := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
Nodes: tt.updateNodes,
|
||||||
Nodes: tt.updateNodes,
|
BackendOptions: backends.Options{
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
Model: "/path/to/new-model.gguf",
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
|
Model: "/path/to/new-model.gguf",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
inst.SetOptions(updateOptions)
|
inst.SetOptions(updateOptions)
|
||||||
@@ -619,8 +646,8 @@ func TestSetOptions_NodesPreserved(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Verify other options were updated
|
// Verify other options were updated
|
||||||
if opts.LlamaServerOptions.Model != "/path/to/new-model.gguf" {
|
if opts.BackendOptions.LlamaServerOptions.Model != "/path/to/new-model.gguf" {
|
||||||
t.Errorf("Expected model to be updated to '/path/to/new-model.gguf', got %q", opts.LlamaServerOptions.Model)
|
t.Errorf("Expected model to be updated to '/path/to/new-model.gguf', got %q", opts.BackendOptions.LlamaServerOptions.Model)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -632,9 +659,11 @@ func TestProcessErrorCases(t *testing.T) {
|
|||||||
}
|
}
|
||||||
globalSettings := &config.InstancesConfig{LogsDir: "/tmp/test"}
|
globalSettings := &config.InstancesConfig{LogsDir: "/tmp/test"}
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendOptions: backends.Options{
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
Model: "/path/to/model.gguf",
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
|
Model: "/path/to/model.gguf",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -662,10 +691,12 @@ func TestRemoteInstanceOperations(t *testing.T) {
|
|||||||
}
|
}
|
||||||
globalSettings := &config.InstancesConfig{LogsDir: "/tmp/test"}
|
globalSettings := &config.InstancesConfig{LogsDir: "/tmp/test"}
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
Nodes: map[string]struct{}{"remote-node": {}}, // Remote instance
|
||||||
Nodes: map[string]struct{}{"remote-node": {}}, // Remote instance
|
BackendOptions: backends.Options{
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
Model: "/path/to/model.gguf",
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
|
Model: "/path/to/model.gguf",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -707,10 +738,12 @@ func TestProxyClearOnOptionsChange(t *testing.T) {
|
|||||||
}
|
}
|
||||||
globalSettings := &config.InstancesConfig{LogsDir: "/tmp/test"}
|
globalSettings := &config.InstancesConfig{LogsDir: "/tmp/test"}
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendOptions: backends.Options{
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
Host: "localhost",
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Port: 8080,
|
Host: "localhost",
|
||||||
|
Port: 8080,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -724,10 +757,12 @@ func TestProxyClearOnOptionsChange(t *testing.T) {
|
|||||||
|
|
||||||
// Update options (should clear proxy)
|
// Update options (should clear proxy)
|
||||||
newOptions := &instance.Options{
|
newOptions := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendOptions: backends.Options{
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
Host: "localhost",
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Port: 8081, // Different port
|
Host: "localhost",
|
||||||
|
Port: 8081, // Different port
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
inst.SetOptions(newOptions)
|
inst.SetOptions(newOptions)
|
||||||
@@ -753,10 +788,12 @@ func TestIdleTimeout(t *testing.T) {
|
|||||||
t.Run("not running never times out", func(t *testing.T) {
|
t.Run("not running never times out", func(t *testing.T) {
|
||||||
timeout := 1
|
timeout := 1
|
||||||
inst := instance.New("test", backendConfig, globalSettings, &instance.Options{
|
inst := instance.New("test", backendConfig, globalSettings, &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
|
||||||
IdleTimeout: &timeout,
|
IdleTimeout: &timeout,
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
BackendOptions: backends.Options{
|
||||||
Model: "/path/to/model.gguf",
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
|
Model: "/path/to/model.gguf",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}, "main", nil)
|
}, "main", nil)
|
||||||
|
|
||||||
@@ -767,10 +804,12 @@ func TestIdleTimeout(t *testing.T) {
|
|||||||
|
|
||||||
t.Run("no timeout configured", func(t *testing.T) {
|
t.Run("no timeout configured", func(t *testing.T) {
|
||||||
inst := instance.New("test", backendConfig, globalSettings, &instance.Options{
|
inst := instance.New("test", backendConfig, globalSettings, &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
|
||||||
IdleTimeout: nil, // No timeout
|
IdleTimeout: nil, // No timeout
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
BackendOptions: backends.Options{
|
||||||
Model: "/path/to/model.gguf",
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
|
Model: "/path/to/model.gguf",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}, "main", nil)
|
}, "main", nil)
|
||||||
inst.SetStatus(instance.Running)
|
inst.SetStatus(instance.Running)
|
||||||
@@ -783,10 +822,12 @@ func TestIdleTimeout(t *testing.T) {
|
|||||||
t.Run("timeout exceeded", func(t *testing.T) {
|
t.Run("timeout exceeded", func(t *testing.T) {
|
||||||
timeout := 1 // 1 minute
|
timeout := 1 // 1 minute
|
||||||
inst := instance.New("test", backendConfig, globalSettings, &instance.Options{
|
inst := instance.New("test", backendConfig, globalSettings, &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
|
||||||
IdleTimeout: &timeout,
|
IdleTimeout: &timeout,
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
BackendOptions: backends.Options{
|
||||||
Model: "/path/to/model.gguf",
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
|
Model: "/path/to/model.gguf",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}, "main", nil)
|
}, "main", nil)
|
||||||
inst.SetStatus(instance.Running)
|
inst.SetStatus(instance.Running)
|
||||||
|
|||||||
@@ -70,10 +70,12 @@ func TestPersistence(t *testing.T) {
|
|||||||
// Test instance persistence on creation
|
// Test instance persistence on creation
|
||||||
manager1 := manager.NewInstanceManager(backendConfig, cfg, map[string]config.NodeConfig{}, "main")
|
manager1 := manager.NewInstanceManager(backendConfig, cfg, map[string]config.NodeConfig{}, "main")
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendOptions: backends.Options{
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
Model: "/path/to/model.gguf",
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Port: 8080,
|
Model: "/path/to/model.gguf",
|
||||||
|
Port: 8080,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -132,9 +134,11 @@ func TestConcurrentAccess(t *testing.T) {
|
|||||||
go func(index int) {
|
go func(index int) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendOptions: backends.Options{
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
Model: "/path/to/model.gguf",
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
|
Model: "/path/to/model.gguf",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
instanceName := fmt.Sprintf("concurrent-test-%d", index)
|
instanceName := fmt.Sprintf("concurrent-test-%d", index)
|
||||||
@@ -169,9 +173,11 @@ func TestShutdown(t *testing.T) {
|
|||||||
|
|
||||||
// Create test instance
|
// Create test instance
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendOptions: backends.Options{
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
Model: "/path/to/model.gguf",
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
|
Model: "/path/to/model.gguf",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err := mgr.CreateInstance("test-instance", options)
|
_, err := mgr.CreateInstance("test-instance", options)
|
||||||
@@ -230,11 +236,13 @@ func TestAutoRestartDisabledInstanceStatus(t *testing.T) {
|
|||||||
|
|
||||||
autoRestart := false
|
autoRestart := false
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
|
||||||
AutoRestart: &autoRestart,
|
AutoRestart: &autoRestart,
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
BackendOptions: backends.Options{
|
||||||
Model: "/path/to/model.gguf",
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
Port: 8080,
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
|
Model: "/path/to/model.gguf",
|
||||||
|
Port: 8080,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -13,10 +13,12 @@ func TestCreateInstance_Success(t *testing.T) {
|
|||||||
manager := createTestManager()
|
manager := createTestManager()
|
||||||
|
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendOptions: backends.Options{
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
Model: "/path/to/model.gguf",
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Port: 8080,
|
Model: "/path/to/model.gguf",
|
||||||
|
Port: 8080,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -40,9 +42,11 @@ func TestCreateInstance_ValidationAndLimits(t *testing.T) {
|
|||||||
// Test duplicate names
|
// Test duplicate names
|
||||||
mngr := createTestManager()
|
mngr := createTestManager()
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendOptions: backends.Options{
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
Model: "/path/to/model.gguf",
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
|
Model: "/path/to/model.gguf",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -96,9 +100,11 @@ func TestPortManagement(t *testing.T) {
|
|||||||
|
|
||||||
// Test auto port assignment
|
// Test auto port assignment
|
||||||
options1 := &instance.Options{
|
options1 := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendOptions: backends.Options{
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
Model: "/path/to/model.gguf",
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
|
Model: "/path/to/model.gguf",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -114,10 +120,12 @@ func TestPortManagement(t *testing.T) {
|
|||||||
|
|
||||||
// Test port conflict detection
|
// Test port conflict detection
|
||||||
options2 := &instance.Options{
|
options2 := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendOptions: backends.Options{
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
Model: "/path/to/model2.gguf",
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Port: port1, // Same port - should conflict
|
Model: "/path/to/model2.gguf",
|
||||||
|
Port: port1, // Same port - should conflict
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -132,10 +140,12 @@ func TestPortManagement(t *testing.T) {
|
|||||||
// Test port release on deletion
|
// Test port release on deletion
|
||||||
specificPort := 8080
|
specificPort := 8080
|
||||||
options3 := &instance.Options{
|
options3 := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendOptions: backends.Options{
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
Model: "/path/to/model.gguf",
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Port: specificPort,
|
Model: "/path/to/model.gguf",
|
||||||
|
Port: specificPort,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -160,9 +170,11 @@ func TestInstanceOperations(t *testing.T) {
|
|||||||
manager := createTestManager()
|
manager := createTestManager()
|
||||||
|
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendOptions: backends.Options{
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
Model: "/path/to/model.gguf",
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
|
Model: "/path/to/model.gguf",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -183,10 +195,12 @@ func TestInstanceOperations(t *testing.T) {
|
|||||||
|
|
||||||
// Update instance
|
// Update instance
|
||||||
newOptions := &instance.Options{
|
newOptions := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendOptions: backends.Options{
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
Model: "/path/to/new-model.gguf",
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Port: 8081,
|
Model: "/path/to/new-model.gguf",
|
||||||
|
Port: 8081,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -194,8 +208,8 @@ func TestInstanceOperations(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("UpdateInstance failed: %v", err)
|
t.Fatalf("UpdateInstance failed: %v", err)
|
||||||
}
|
}
|
||||||
if updated.GetOptions().LlamaServerOptions.Model != "/path/to/new-model.gguf" {
|
if updated.GetOptions().BackendOptions.LlamaServerOptions.Model != "/path/to/new-model.gguf" {
|
||||||
t.Errorf("Expected model '/path/to/new-model.gguf', got %q", updated.GetOptions().LlamaServerOptions.Model)
|
t.Errorf("Expected model '/path/to/new-model.gguf', got %q", updated.GetOptions().BackendOptions.LlamaServerOptions.Model)
|
||||||
}
|
}
|
||||||
|
|
||||||
// List instances
|
// List instances
|
||||||
|
|||||||
@@ -35,9 +35,11 @@ func TestTimeoutFunctionality(t *testing.T) {
|
|||||||
idleTimeout := 1 // 1 minute
|
idleTimeout := 1 // 1 minute
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
IdleTimeout: &idleTimeout,
|
IdleTimeout: &idleTimeout,
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendOptions: backends.Options{
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
Model: "/path/to/model.gguf",
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
|
Model: "/path/to/model.gguf",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -84,9 +86,11 @@ func TestTimeoutFunctionality(t *testing.T) {
|
|||||||
|
|
||||||
// Test that instance without timeout doesn't timeout
|
// Test that instance without timeout doesn't timeout
|
||||||
noTimeoutOptions := &instance.Options{
|
noTimeoutOptions := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendOptions: backends.Options{
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
Model: "/path/to/model.gguf",
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
|
Model: "/path/to/model.gguf",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
// No IdleTimeout set
|
// No IdleTimeout set
|
||||||
}
|
}
|
||||||
@@ -115,25 +119,31 @@ func TestEvictLRUInstance_Success(t *testing.T) {
|
|||||||
|
|
||||||
// Create 3 instances with idle timeout enabled (value doesn't matter for LRU logic)
|
// Create 3 instances with idle timeout enabled (value doesn't matter for LRU logic)
|
||||||
options1 := &instance.Options{
|
options1 := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
|
||||||
Model: "/path/to/model1.gguf",
|
|
||||||
},
|
|
||||||
IdleTimeout: func() *int { timeout := 1; return &timeout }(), // Any value > 0
|
IdleTimeout: func() *int { timeout := 1; return &timeout }(), // Any value > 0
|
||||||
|
BackendOptions: backends.Options{
|
||||||
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
|
Model: "/path/to/model1.gguf",
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
options2 := &instance.Options{
|
options2 := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
|
||||||
Model: "/path/to/model2.gguf",
|
|
||||||
},
|
|
||||||
IdleTimeout: func() *int { timeout := 1; return &timeout }(), // Any value > 0
|
IdleTimeout: func() *int { timeout := 1; return &timeout }(), // Any value > 0
|
||||||
|
BackendOptions: backends.Options{
|
||||||
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
|
Model: "/path/to/model2.gguf",
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
options3 := &instance.Options{
|
options3 := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
|
||||||
Model: "/path/to/model3.gguf",
|
|
||||||
},
|
|
||||||
IdleTimeout: func() *int { timeout := 1; return &timeout }(), // Any value > 0
|
IdleTimeout: func() *int { timeout := 1; return &timeout }(), // Any value > 0
|
||||||
|
BackendOptions: backends.Options{
|
||||||
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
|
Model: "/path/to/model3.gguf",
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
inst1, err := manager.CreateInstance("instance-1", options1)
|
inst1, err := manager.CreateInstance("instance-1", options1)
|
||||||
@@ -197,11 +207,13 @@ func TestEvictLRUInstance_NoEligibleInstances(t *testing.T) {
|
|||||||
// Helper function to create instances with different timeout configurations
|
// Helper function to create instances with different timeout configurations
|
||||||
createInstanceWithTimeout := func(manager manager.InstanceManager, name, model string, timeout *int) *instance.Instance {
|
createInstanceWithTimeout := func(manager manager.InstanceManager, name, model string, timeout *int) *instance.Instance {
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
|
||||||
Model: model,
|
|
||||||
},
|
|
||||||
IdleTimeout: timeout,
|
IdleTimeout: timeout,
|
||||||
|
BackendOptions: backends.Options{
|
||||||
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
|
Model: model,
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
inst, err := manager.CreateInstance(name, options)
|
inst, err := manager.CreateInstance(name, options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ func (h *Handler) LlamaCppProxy(onDemandStart bool) http.HandlerFunc {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if options.BackendType != backends.BackendTypeLlamaCpp {
|
if options.BackendOptions.BackendType != backends.BackendTypeLlamaCpp {
|
||||||
http.Error(w, "Instance is not a llama.cpp server.", http.StatusBadRequest)
|
http.Error(w, "Instance is not a llama.cpp server.", http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -133,8 +133,10 @@ func (h *Handler) ParseLlamaCommand() http.HandlerFunc {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendOptions: backends.Options{
|
||||||
LlamaServerOptions: llamaOptions,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
|
LlamaServerOptions: llamaOptions,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
w.Header().Set("Content-Type", "application/json")
|
w.Header().Set("Content-Type", "application/json")
|
||||||
if err := json.NewEncoder(w).Encode(options); err != nil {
|
if err := json.NewEncoder(w).Encode(options); err != nil {
|
||||||
@@ -186,8 +188,10 @@ func (h *Handler) ParseMlxCommand() http.HandlerFunc {
|
|||||||
backendType := backends.BackendTypeMlxLm
|
backendType := backends.BackendTypeMlxLm
|
||||||
|
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backendType,
|
BackendOptions: backends.Options{
|
||||||
MlxServerOptions: mlxOptions,
|
BackendType: backendType,
|
||||||
|
MlxServerOptions: mlxOptions,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
w.Header().Set("Content-Type", "application/json")
|
||||||
@@ -239,8 +243,10 @@ func (h *Handler) ParseVllmCommand() http.HandlerFunc {
|
|||||||
backendType := backends.BackendTypeVllm
|
backendType := backends.BackendTypeVllm
|
||||||
|
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backendType,
|
BackendOptions: backends.Options{
|
||||||
VllmServerOptions: vllmOptions,
|
BackendType: backendType,
|
||||||
|
VllmServerOptions: vllmOptions,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
|||||||
@@ -2,8 +2,6 @@ package validation_test
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"llamactl/pkg/backends"
|
"llamactl/pkg/backends"
|
||||||
"llamactl/pkg/instance"
|
|
||||||
"llamactl/pkg/testutil"
|
|
||||||
"llamactl/pkg/validation"
|
"llamactl/pkg/validation"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
@@ -57,13 +55,11 @@ func TestValidateInstanceName(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestValidateInstanceOptions_NilOptions(t *testing.T) {
|
func TestValidateInstanceOptions_NilOptions(t *testing.T) {
|
||||||
err := validation.ValidateInstanceOptions(nil)
|
var opts backends.Options
|
||||||
|
err := opts.ValidateInstanceOptions()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Error("Expected error for nil options")
|
t.Error("Expected error for nil options")
|
||||||
}
|
}
|
||||||
if !strings.Contains(err.Error(), "options cannot be nil") {
|
|
||||||
t.Errorf("Expected 'options cannot be nil' error, got: %v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestValidateInstanceOptions_PortValidation(t *testing.T) {
|
func TestValidateInstanceOptions_PortValidation(t *testing.T) {
|
||||||
@@ -82,14 +78,14 @@ func TestValidateInstanceOptions_PortValidation(t *testing.T) {
|
|||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
options := &instance.Options{
|
options := backends.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Port: tt.port,
|
Port: tt.port,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := validation.ValidateInstanceOptions(options)
|
err := options.ValidateInstanceOptions()
|
||||||
if (err != nil) != tt.wantErr {
|
if (err != nil) != tt.wantErr {
|
||||||
t.Errorf("ValidateInstanceOptions(port=%d) error = %v, wantErr %v", tt.port, err, tt.wantErr)
|
t.Errorf("ValidateInstanceOptions(port=%d) error = %v, wantErr %v", tt.port, err, tt.wantErr)
|
||||||
}
|
}
|
||||||
@@ -136,14 +132,14 @@ func TestValidateInstanceOptions_StringInjection(t *testing.T) {
|
|||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
// Test with Model field (string field)
|
// Test with Model field (string field)
|
||||||
options := &instance.Options{
|
options := backends.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: tt.value,
|
Model: tt.value,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := validation.ValidateInstanceOptions(options)
|
err := options.ValidateInstanceOptions()
|
||||||
if (err != nil) != tt.wantErr {
|
if (err != nil) != tt.wantErr {
|
||||||
t.Errorf("ValidateInstanceOptions(model=%q) error = %v, wantErr %v", tt.value, err, tt.wantErr)
|
t.Errorf("ValidateInstanceOptions(model=%q) error = %v, wantErr %v", tt.value, err, tt.wantErr)
|
||||||
}
|
}
|
||||||
@@ -174,14 +170,14 @@ func TestValidateInstanceOptions_ArrayInjection(t *testing.T) {
|
|||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
// Test with Lora field (array field)
|
// Test with Lora field (array field)
|
||||||
options := &instance.Options{
|
options := backends.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Lora: tt.array,
|
Lora: tt.array,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := validation.ValidateInstanceOptions(options)
|
err := options.ValidateInstanceOptions()
|
||||||
if (err != nil) != tt.wantErr {
|
if (err != nil) != tt.wantErr {
|
||||||
t.Errorf("ValidateInstanceOptions(lora=%v) error = %v, wantErr %v", tt.array, err, tt.wantErr)
|
t.Errorf("ValidateInstanceOptions(lora=%v) error = %v, wantErr %v", tt.array, err, tt.wantErr)
|
||||||
}
|
}
|
||||||
@@ -193,12 +189,12 @@ func TestValidateInstanceOptions_MultipleFieldInjection(t *testing.T) {
|
|||||||
// Test that injection in any field is caught
|
// Test that injection in any field is caught
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
options *instance.Options
|
options backends.Options
|
||||||
wantErr bool
|
wantErr bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "injection in model field",
|
name: "injection in model field",
|
||||||
options: &instance.Options{
|
options: backends.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "safe.gguf",
|
Model: "safe.gguf",
|
||||||
@@ -209,7 +205,7 @@ func TestValidateInstanceOptions_MultipleFieldInjection(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "injection in log file",
|
name: "injection in log file",
|
||||||
options: &instance.Options{
|
options: backends.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "safe.gguf",
|
Model: "safe.gguf",
|
||||||
@@ -220,7 +216,7 @@ func TestValidateInstanceOptions_MultipleFieldInjection(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "all safe fields",
|
name: "all safe fields",
|
||||||
options: &instance.Options{
|
options: backends.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "/path/to/model.gguf",
|
Model: "/path/to/model.gguf",
|
||||||
@@ -236,7 +232,7 @@ func TestValidateInstanceOptions_MultipleFieldInjection(t *testing.T) {
|
|||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
err := validation.ValidateInstanceOptions(tt.options)
|
err := tt.options.ValidateInstanceOptions()
|
||||||
if (err != nil) != tt.wantErr {
|
if (err != nil) != tt.wantErr {
|
||||||
t.Errorf("ValidateInstanceOptions() error = %v, wantErr %v", err, tt.wantErr)
|
t.Errorf("ValidateInstanceOptions() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
}
|
}
|
||||||
@@ -246,11 +242,8 @@ func TestValidateInstanceOptions_MultipleFieldInjection(t *testing.T) {
|
|||||||
|
|
||||||
func TestValidateInstanceOptions_NonStringFields(t *testing.T) {
|
func TestValidateInstanceOptions_NonStringFields(t *testing.T) {
|
||||||
// Test that non-string fields don't interfere with validation
|
// Test that non-string fields don't interfere with validation
|
||||||
options := &instance.Options{
|
options := backends.Options{
|
||||||
AutoRestart: testutil.BoolPtr(true),
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
MaxRestarts: testutil.IntPtr(5),
|
|
||||||
RestartDelay: testutil.IntPtr(10),
|
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
|
||||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Port: 8080,
|
Port: 8080,
|
||||||
GPULayers: 32,
|
GPULayers: 32,
|
||||||
@@ -263,7 +256,7 @@ func TestValidateInstanceOptions_NonStringFields(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := validation.ValidateInstanceOptions(options)
|
err := options.ValidateInstanceOptions()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("ValidateInstanceOptions with non-string fields should not error, got: %v", err)
|
t.Errorf("ValidateInstanceOptions with non-string fields should not error, got: %v", err)
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user