mirror of
https://github.com/lordmathis/llamactl.git
synced 2025-11-07 01:24:27 +00:00
Flatten backends package structure
This commit is contained in:
@@ -3,7 +3,6 @@ package manager_test
|
||||
import (
|
||||
"fmt"
|
||||
"llamactl/pkg/backends"
|
||||
"llamactl/pkg/backends/llamacpp"
|
||||
"llamactl/pkg/config"
|
||||
"llamactl/pkg/instance"
|
||||
"llamactl/pkg/manager"
|
||||
@@ -72,7 +71,7 @@ func TestPersistence(t *testing.T) {
|
||||
manager1 := manager.NewInstanceManager(backendConfig, cfg, map[string]config.NodeConfig{}, "main")
|
||||
options := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
Port: 8080,
|
||||
},
|
||||
@@ -134,7 +133,7 @@ func TestConcurrentAccess(t *testing.T) {
|
||||
defer wg.Done()
|
||||
options := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
},
|
||||
}
|
||||
@@ -171,7 +170,7 @@ func TestShutdown(t *testing.T) {
|
||||
// Create test instance
|
||||
options := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
},
|
||||
}
|
||||
@@ -233,7 +232,7 @@ func TestAutoRestartDisabledInstanceStatus(t *testing.T) {
|
||||
options := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
AutoRestart: &autoRestart,
|
||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
Port: 8080,
|
||||
},
|
||||
|
||||
@@ -2,7 +2,6 @@ package manager_test
|
||||
|
||||
import (
|
||||
"llamactl/pkg/backends"
|
||||
"llamactl/pkg/backends/llamacpp"
|
||||
"llamactl/pkg/config"
|
||||
"llamactl/pkg/instance"
|
||||
"llamactl/pkg/manager"
|
||||
@@ -15,7 +14,7 @@ func TestCreateInstance_Success(t *testing.T) {
|
||||
|
||||
options := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
Port: 8080,
|
||||
},
|
||||
@@ -42,7 +41,7 @@ func TestCreateInstance_ValidationAndLimits(t *testing.T) {
|
||||
mngr := createTestManager()
|
||||
options := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
},
|
||||
}
|
||||
@@ -98,7 +97,7 @@ func TestPortManagement(t *testing.T) {
|
||||
// Test auto port assignment
|
||||
options1 := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
},
|
||||
}
|
||||
@@ -116,7 +115,7 @@ func TestPortManagement(t *testing.T) {
|
||||
// Test port conflict detection
|
||||
options2 := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model2.gguf",
|
||||
Port: port1, // Same port - should conflict
|
||||
},
|
||||
@@ -134,7 +133,7 @@ func TestPortManagement(t *testing.T) {
|
||||
specificPort := 8080
|
||||
options3 := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
Port: specificPort,
|
||||
},
|
||||
@@ -162,7 +161,7 @@ func TestInstanceOperations(t *testing.T) {
|
||||
|
||||
options := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
},
|
||||
}
|
||||
@@ -185,7 +184,7 @@ func TestInstanceOperations(t *testing.T) {
|
||||
// Update instance
|
||||
newOptions := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/new-model.gguf",
|
||||
Port: 8081,
|
||||
},
|
||||
|
||||
@@ -2,7 +2,6 @@ package manager_test
|
||||
|
||||
import (
|
||||
"llamactl/pkg/backends"
|
||||
"llamactl/pkg/backends/llamacpp"
|
||||
"llamactl/pkg/config"
|
||||
"llamactl/pkg/instance"
|
||||
"llamactl/pkg/manager"
|
||||
@@ -37,7 +36,7 @@ func TestTimeoutFunctionality(t *testing.T) {
|
||||
options := &instance.Options{
|
||||
IdleTimeout: &idleTimeout,
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
},
|
||||
}
|
||||
@@ -86,7 +85,7 @@ func TestTimeoutFunctionality(t *testing.T) {
|
||||
// Test that instance without timeout doesn't timeout
|
||||
noTimeoutOptions := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
},
|
||||
// No IdleTimeout set
|
||||
@@ -117,21 +116,21 @@ func TestEvictLRUInstance_Success(t *testing.T) {
|
||||
// Create 3 instances with idle timeout enabled (value doesn't matter for LRU logic)
|
||||
options1 := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model1.gguf",
|
||||
},
|
||||
IdleTimeout: func() *int { timeout := 1; return &timeout }(), // Any value > 0
|
||||
}
|
||||
options2 := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model2.gguf",
|
||||
},
|
||||
IdleTimeout: func() *int { timeout := 1; return &timeout }(), // Any value > 0
|
||||
}
|
||||
options3 := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: "/path/to/model3.gguf",
|
||||
},
|
||||
IdleTimeout: func() *int { timeout := 1; return &timeout }(), // Any value > 0
|
||||
@@ -199,7 +198,7 @@ func TestEvictLRUInstance_NoEligibleInstances(t *testing.T) {
|
||||
createInstanceWithTimeout := func(manager manager.InstanceManager, name, model string, timeout *int) *instance.Instance {
|
||||
options := &instance.Options{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||
Model: model,
|
||||
},
|
||||
IdleTimeout: timeout,
|
||||
|
||||
Reference in New Issue
Block a user