mirror of
https://github.com/lordmathis/llamactl.git
synced 2025-11-06 00:54:23 +00:00
Fix tests
This commit is contained in:
@@ -12,8 +12,18 @@ import (
|
|||||||
|
|
||||||
func TestNewInstance(t *testing.T) {
|
func TestNewInstance(t *testing.T) {
|
||||||
backendConfig := &config.BackendConfig{
|
backendConfig := &config.BackendConfig{
|
||||||
LlamaExecutable: "llama-server",
|
LlamaCpp: config.BackendSettings{
|
||||||
MLXLMExecutable: "mlx_lm.server",
|
Command: "llama-server",
|
||||||
|
Args: []string{},
|
||||||
|
},
|
||||||
|
MLX: config.BackendSettings{
|
||||||
|
Command: "mlx_lm.server",
|
||||||
|
Args: []string{},
|
||||||
|
},
|
||||||
|
VLLM: config.BackendSettings{
|
||||||
|
Command: "vllm",
|
||||||
|
Args: []string{"serve"},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
globalSettings := &config.InstancesConfig{
|
globalSettings := &config.InstancesConfig{
|
||||||
@@ -66,8 +76,18 @@ func TestNewInstance(t *testing.T) {
|
|||||||
|
|
||||||
func TestNewInstance_WithRestartOptions(t *testing.T) {
|
func TestNewInstance_WithRestartOptions(t *testing.T) {
|
||||||
backendConfig := &config.BackendConfig{
|
backendConfig := &config.BackendConfig{
|
||||||
LlamaExecutable: "llama-server",
|
LlamaCpp: config.BackendSettings{
|
||||||
MLXLMExecutable: "mlx_lm.server",
|
Command: "llama-server",
|
||||||
|
Args: []string{},
|
||||||
|
},
|
||||||
|
MLX: config.BackendSettings{
|
||||||
|
Command: "mlx_lm.server",
|
||||||
|
Args: []string{},
|
||||||
|
},
|
||||||
|
VLLM: config.BackendSettings{
|
||||||
|
Command: "vllm",
|
||||||
|
Args: []string{"serve"},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
globalSettings := &config.InstancesConfig{
|
globalSettings := &config.InstancesConfig{
|
||||||
@@ -112,8 +132,18 @@ func TestNewInstance_WithRestartOptions(t *testing.T) {
|
|||||||
|
|
||||||
func TestSetOptions(t *testing.T) {
|
func TestSetOptions(t *testing.T) {
|
||||||
backendConfig := &config.BackendConfig{
|
backendConfig := &config.BackendConfig{
|
||||||
LlamaExecutable: "llama-server",
|
LlamaCpp: config.BackendSettings{
|
||||||
MLXLMExecutable: "mlx_lm.server",
|
Command: "llama-server",
|
||||||
|
Args: []string{},
|
||||||
|
},
|
||||||
|
MLX: config.BackendSettings{
|
||||||
|
Command: "mlx_lm.server",
|
||||||
|
Args: []string{},
|
||||||
|
},
|
||||||
|
VLLM: config.BackendSettings{
|
||||||
|
Command: "vllm",
|
||||||
|
Args: []string{"serve"},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
globalSettings := &config.InstancesConfig{
|
globalSettings := &config.InstancesConfig{
|
||||||
@@ -163,8 +193,18 @@ func TestSetOptions(t *testing.T) {
|
|||||||
|
|
||||||
func TestGetProxy(t *testing.T) {
|
func TestGetProxy(t *testing.T) {
|
||||||
backendConfig := &config.BackendConfig{
|
backendConfig := &config.BackendConfig{
|
||||||
LlamaExecutable: "llama-server",
|
LlamaCpp: config.BackendSettings{
|
||||||
MLXLMExecutable: "mlx_lm.server",
|
Command: "llama-server",
|
||||||
|
Args: []string{},
|
||||||
|
},
|
||||||
|
MLX: config.BackendSettings{
|
||||||
|
Command: "mlx_lm.server",
|
||||||
|
Args: []string{},
|
||||||
|
},
|
||||||
|
VLLM: config.BackendSettings{
|
||||||
|
Command: "vllm",
|
||||||
|
Args: []string{"serve"},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
globalSettings := &config.InstancesConfig{
|
globalSettings := &config.InstancesConfig{
|
||||||
@@ -205,8 +245,18 @@ func TestGetProxy(t *testing.T) {
|
|||||||
|
|
||||||
func TestMarshalJSON(t *testing.T) {
|
func TestMarshalJSON(t *testing.T) {
|
||||||
backendConfig := &config.BackendConfig{
|
backendConfig := &config.BackendConfig{
|
||||||
LlamaExecutable: "llama-server",
|
LlamaCpp: config.BackendSettings{
|
||||||
MLXLMExecutable: "mlx_lm.server",
|
Command: "llama-server",
|
||||||
|
Args: []string{},
|
||||||
|
},
|
||||||
|
MLX: config.BackendSettings{
|
||||||
|
Command: "mlx_lm.server",
|
||||||
|
Args: []string{},
|
||||||
|
},
|
||||||
|
VLLM: config.BackendSettings{
|
||||||
|
Command: "vllm",
|
||||||
|
Args: []string{"serve"},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
globalSettings := &config.InstancesConfig{
|
globalSettings := &config.InstancesConfig{
|
||||||
@@ -364,8 +414,18 @@ func TestCreateInstanceOptionsValidation(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
backendConfig := &config.BackendConfig{
|
backendConfig := &config.BackendConfig{
|
||||||
LlamaExecutable: "llama-server",
|
LlamaCpp: config.BackendSettings{
|
||||||
MLXLMExecutable: "mlx_lm.server",
|
Command: "llama-server",
|
||||||
|
Args: []string{},
|
||||||
|
},
|
||||||
|
MLX: config.BackendSettings{
|
||||||
|
Command: "mlx_lm.server",
|
||||||
|
Args: []string{},
|
||||||
|
},
|
||||||
|
VLLM: config.BackendSettings{
|
||||||
|
Command: "vllm",
|
||||||
|
Args: []string{"serve"},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
globalSettings := &config.InstancesConfig{
|
globalSettings := &config.InstancesConfig{
|
||||||
|
|||||||
@@ -34,8 +34,12 @@ func (m *MockTimeProvider) SetTime(t time.Time) {
|
|||||||
|
|
||||||
func TestUpdateLastRequestTime(t *testing.T) {
|
func TestUpdateLastRequestTime(t *testing.T) {
|
||||||
backendConfig := &config.BackendConfig{
|
backendConfig := &config.BackendConfig{
|
||||||
LlamaExecutable: "llama-server",
|
LlamaCpp: config.BackendSettings{
|
||||||
MLXLMExecutable: "mlx_lm.server",
|
Command: "llama-server",
|
||||||
|
},
|
||||||
|
MLX: config.BackendSettings{
|
||||||
|
Command: "mlx_lm.server",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
globalSettings := &config.InstancesConfig{
|
globalSettings := &config.InstancesConfig{
|
||||||
@@ -60,8 +64,12 @@ func TestUpdateLastRequestTime(t *testing.T) {
|
|||||||
|
|
||||||
func TestShouldTimeout_NotRunning(t *testing.T) {
|
func TestShouldTimeout_NotRunning(t *testing.T) {
|
||||||
backendConfig := &config.BackendConfig{
|
backendConfig := &config.BackendConfig{
|
||||||
LlamaExecutable: "llama-server",
|
LlamaCpp: config.BackendSettings{
|
||||||
MLXLMExecutable: "mlx_lm.server",
|
Command: "llama-server",
|
||||||
|
},
|
||||||
|
MLX: config.BackendSettings{
|
||||||
|
Command: "mlx_lm.server",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
globalSettings := &config.InstancesConfig{
|
globalSettings := &config.InstancesConfig{
|
||||||
@@ -90,8 +98,12 @@ func TestShouldTimeout_NotRunning(t *testing.T) {
|
|||||||
|
|
||||||
func TestShouldTimeout_NoTimeoutConfigured(t *testing.T) {
|
func TestShouldTimeout_NoTimeoutConfigured(t *testing.T) {
|
||||||
backendConfig := &config.BackendConfig{
|
backendConfig := &config.BackendConfig{
|
||||||
LlamaExecutable: "llama-server",
|
LlamaCpp: config.BackendSettings{
|
||||||
MLXLMExecutable: "mlx_lm.server",
|
Command: "llama-server",
|
||||||
|
},
|
||||||
|
MLX: config.BackendSettings{
|
||||||
|
Command: "mlx_lm.server",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
globalSettings := &config.InstancesConfig{
|
globalSettings := &config.InstancesConfig{
|
||||||
@@ -133,8 +145,12 @@ func TestShouldTimeout_NoTimeoutConfigured(t *testing.T) {
|
|||||||
|
|
||||||
func TestShouldTimeout_WithinTimeLimit(t *testing.T) {
|
func TestShouldTimeout_WithinTimeLimit(t *testing.T) {
|
||||||
backendConfig := &config.BackendConfig{
|
backendConfig := &config.BackendConfig{
|
||||||
LlamaExecutable: "llama-server",
|
LlamaCpp: config.BackendSettings{
|
||||||
MLXLMExecutable: "mlx_lm.server",
|
Command: "llama-server",
|
||||||
|
},
|
||||||
|
MLX: config.BackendSettings{
|
||||||
|
Command: "mlx_lm.server",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
globalSettings := &config.InstancesConfig{
|
globalSettings := &config.InstancesConfig{
|
||||||
@@ -167,8 +183,12 @@ func TestShouldTimeout_WithinTimeLimit(t *testing.T) {
|
|||||||
|
|
||||||
func TestShouldTimeout_ExceedsTimeLimit(t *testing.T) {
|
func TestShouldTimeout_ExceedsTimeLimit(t *testing.T) {
|
||||||
backendConfig := &config.BackendConfig{
|
backendConfig := &config.BackendConfig{
|
||||||
LlamaExecutable: "llama-server",
|
LlamaCpp: config.BackendSettings{
|
||||||
MLXLMExecutable: "mlx_lm.server",
|
Command: "llama-server",
|
||||||
|
},
|
||||||
|
MLX: config.BackendSettings{
|
||||||
|
Command: "mlx_lm.server",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
globalSettings := &config.InstancesConfig{
|
globalSettings := &config.InstancesConfig{
|
||||||
@@ -207,8 +227,12 @@ func TestShouldTimeout_ExceedsTimeLimit(t *testing.T) {
|
|||||||
|
|
||||||
func TestTimeoutConfiguration_Validation(t *testing.T) {
|
func TestTimeoutConfiguration_Validation(t *testing.T) {
|
||||||
backendConfig := &config.BackendConfig{
|
backendConfig := &config.BackendConfig{
|
||||||
LlamaExecutable: "llama-server",
|
LlamaCpp: config.BackendSettings{
|
||||||
MLXLMExecutable: "mlx_lm.server",
|
Command: "llama-server",
|
||||||
|
},
|
||||||
|
MLX: config.BackendSettings{
|
||||||
|
Command: "mlx_lm.server",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
globalSettings := &config.InstancesConfig{
|
globalSettings := &config.InstancesConfig{
|
||||||
|
|||||||
@@ -16,8 +16,12 @@ import (
|
|||||||
|
|
||||||
func TestNewInstanceManager(t *testing.T) {
|
func TestNewInstanceManager(t *testing.T) {
|
||||||
backendConfig := config.BackendConfig{
|
backendConfig := config.BackendConfig{
|
||||||
LlamaExecutable: "llama-server",
|
LlamaCpp: config.BackendSettings{
|
||||||
MLXLMExecutable: "mlx_lm.server",
|
Command: "llama-server",
|
||||||
|
},
|
||||||
|
MLX: config.BackendSettings{
|
||||||
|
Command: "mlx_lm.server",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg := config.InstancesConfig{
|
cfg := config.InstancesConfig{
|
||||||
@@ -49,8 +53,12 @@ func TestPersistence(t *testing.T) {
|
|||||||
tempDir := t.TempDir()
|
tempDir := t.TempDir()
|
||||||
|
|
||||||
backendConfig := config.BackendConfig{
|
backendConfig := config.BackendConfig{
|
||||||
LlamaExecutable: "llama-server",
|
LlamaCpp: config.BackendSettings{
|
||||||
MLXLMExecutable: "mlx_lm.server",
|
Command: "llama-server",
|
||||||
|
},
|
||||||
|
MLX: config.BackendSettings{
|
||||||
|
Command: "mlx_lm.server",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg := config.InstancesConfig{
|
cfg := config.InstancesConfig{
|
||||||
@@ -182,8 +190,12 @@ func TestShutdown(t *testing.T) {
|
|||||||
// Helper function to create a test manager with standard config
|
// Helper function to create a test manager with standard config
|
||||||
func createTestManager() manager.InstanceManager {
|
func createTestManager() manager.InstanceManager {
|
||||||
backendConfig := config.BackendConfig{
|
backendConfig := config.BackendConfig{
|
||||||
LlamaExecutable: "llama-server",
|
LlamaCpp: config.BackendSettings{
|
||||||
MLXLMExecutable: "mlx_lm.server",
|
Command: "llama-server",
|
||||||
|
},
|
||||||
|
MLX: config.BackendSettings{
|
||||||
|
Command: "mlx_lm.server",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg := config.InstancesConfig{
|
cfg := config.InstancesConfig{
|
||||||
|
|||||||
@@ -63,8 +63,12 @@ func TestCreateInstance_ValidationAndLimits(t *testing.T) {
|
|||||||
|
|
||||||
// Test max instances limit
|
// Test max instances limit
|
||||||
backendConfig := config.BackendConfig{
|
backendConfig := config.BackendConfig{
|
||||||
LlamaExecutable: "llama-server",
|
LlamaCpp: config.BackendSettings{
|
||||||
MLXLMExecutable: "mlx_lm.server",
|
Command: "llama-server",
|
||||||
|
},
|
||||||
|
MLX: config.BackendSettings{
|
||||||
|
Command: "mlx_lm.server",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
cfg := config.InstancesConfig{
|
cfg := config.InstancesConfig{
|
||||||
PortRange: [2]int{8000, 9000},
|
PortRange: [2]int{8000, 9000},
|
||||||
|
|||||||
@@ -14,8 +14,8 @@ import (
|
|||||||
func TestTimeoutFunctionality(t *testing.T) {
|
func TestTimeoutFunctionality(t *testing.T) {
|
||||||
// Test timeout checker initialization
|
// Test timeout checker initialization
|
||||||
backendConfig := config.BackendConfig{
|
backendConfig := config.BackendConfig{
|
||||||
LlamaExecutable: "llama-server",
|
LlamaCpp: config.BackendSettings{Command: "llama-server"},
|
||||||
MLXLMExecutable: "mlx_lm.server",
|
MLX: config.BackendSettings{Command: "mlx_lm.server"},
|
||||||
}
|
}
|
||||||
cfg := config.InstancesConfig{
|
cfg := config.InstancesConfig{
|
||||||
PortRange: [2]int{8000, 9000},
|
PortRange: [2]int{8000, 9000},
|
||||||
|
|||||||
Reference in New Issue
Block a user