mirror of
https://github.com/lordmathis/llamactl.git
synced 2025-11-06 00:54:23 +00:00
Add tests for status change callback and options preservation
This commit is contained in:
@@ -174,7 +174,7 @@ func (i *Instance) SetOptions(opts *Options) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Preserve the original nodes to prevent changing instance location
|
// Preserve the original nodes to prevent changing instance location
|
||||||
if i.options != nil && i.options.get() != nil && i.options.get().Nodes != nil {
|
if i.options != nil && i.options.get() != nil {
|
||||||
opts.Nodes = i.options.get().Nodes
|
opts.Nodes = i.options.get().Nodes
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"llamactl/pkg/instance"
|
"llamactl/pkg/instance"
|
||||||
"llamactl/pkg/testutil"
|
"llamactl/pkg/testutil"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNewInstance(t *testing.T) {
|
func TestNewInstance(t *testing.T) {
|
||||||
@@ -515,3 +516,303 @@ func TestCreateOptionsValidation(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestStatusChangeCallback(t *testing.T) {
|
||||||
|
backendConfig := &config.BackendConfig{
|
||||||
|
LlamaCpp: config.BackendSettings{Command: "llama-server"},
|
||||||
|
}
|
||||||
|
globalSettings := &config.InstancesConfig{LogsDir: "/tmp/test"}
|
||||||
|
options := &instance.Options{
|
||||||
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
|
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||||
|
Model: "/path/to/model.gguf",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var callbackOldStatus, callbackNewStatus instance.Status
|
||||||
|
callbackCalled := false
|
||||||
|
|
||||||
|
onStatusChange := func(oldStatus, newStatus instance.Status) {
|
||||||
|
callbackOldStatus = oldStatus
|
||||||
|
callbackNewStatus = newStatus
|
||||||
|
callbackCalled = true
|
||||||
|
}
|
||||||
|
|
||||||
|
inst := instance.New("test", backendConfig, globalSettings, options, "main", onStatusChange)
|
||||||
|
|
||||||
|
inst.SetStatus(instance.Running)
|
||||||
|
|
||||||
|
if !callbackCalled {
|
||||||
|
t.Error("Expected status change callback to be called")
|
||||||
|
}
|
||||||
|
if callbackOldStatus != instance.Stopped {
|
||||||
|
t.Errorf("Expected old status Stopped, got %v", callbackOldStatus)
|
||||||
|
}
|
||||||
|
if callbackNewStatus != instance.Running {
|
||||||
|
t.Errorf("Expected new status Running, got %v", callbackNewStatus)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSetOptions_NodesPreserved(t *testing.T) {
|
||||||
|
backendConfig := &config.BackendConfig{
|
||||||
|
LlamaCpp: config.BackendSettings{Command: "llama-server"},
|
||||||
|
}
|
||||||
|
globalSettings := &config.InstancesConfig{LogsDir: "/tmp/test"}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
initialNodes map[string]struct{}
|
||||||
|
updateNodes map[string]struct{}
|
||||||
|
expectedNodes map[string]struct{}
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "nil nodes preserved as nil",
|
||||||
|
initialNodes: nil,
|
||||||
|
updateNodes: map[string]struct{}{"worker1": {}},
|
||||||
|
expectedNodes: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "empty nodes preserved as empty",
|
||||||
|
initialNodes: map[string]struct{}{},
|
||||||
|
updateNodes: map[string]struct{}{"worker1": {}},
|
||||||
|
expectedNodes: map[string]struct{}{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "existing nodes preserved",
|
||||||
|
initialNodes: map[string]struct{}{"worker1": {}, "worker2": {}},
|
||||||
|
updateNodes: map[string]struct{}{"worker3": {}},
|
||||||
|
expectedNodes: map[string]struct{}{"worker1": {}, "worker2": {}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
options := &instance.Options{
|
||||||
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
|
Nodes: tt.initialNodes,
|
||||||
|
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||||
|
Model: "/path/to/model.gguf",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
inst := instance.New("test", backendConfig, globalSettings, options, "main", nil)
|
||||||
|
|
||||||
|
// Attempt to update nodes (should be ignored)
|
||||||
|
updateOptions := &instance.Options{
|
||||||
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
|
Nodes: tt.updateNodes,
|
||||||
|
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||||
|
Model: "/path/to/new-model.gguf",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
inst.SetOptions(updateOptions)
|
||||||
|
|
||||||
|
opts := inst.GetOptions()
|
||||||
|
|
||||||
|
// Verify nodes are preserved
|
||||||
|
if len(opts.Nodes) != len(tt.expectedNodes) {
|
||||||
|
t.Errorf("Expected %d nodes, got %d", len(tt.expectedNodes), len(opts.Nodes))
|
||||||
|
}
|
||||||
|
for node := range tt.expectedNodes {
|
||||||
|
if _, exists := opts.Nodes[node]; !exists {
|
||||||
|
t.Errorf("Expected node %s to exist", node)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify other options were updated
|
||||||
|
if opts.LlamaServerOptions.Model != "/path/to/new-model.gguf" {
|
||||||
|
t.Errorf("Expected model to be updated to '/path/to/new-model.gguf', got %q", opts.LlamaServerOptions.Model)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProcessErrorCases(t *testing.T) {
|
||||||
|
backendConfig := &config.BackendConfig{
|
||||||
|
LlamaCpp: config.BackendSettings{Command: "llama-server"},
|
||||||
|
}
|
||||||
|
globalSettings := &config.InstancesConfig{LogsDir: "/tmp/test"}
|
||||||
|
options := &instance.Options{
|
||||||
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
|
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||||
|
Model: "/path/to/model.gguf",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
inst := instance.New("test", backendConfig, globalSettings, options, "main", nil)
|
||||||
|
|
||||||
|
// Stop when not running should return error
|
||||||
|
err := inst.Stop()
|
||||||
|
if err == nil {
|
||||||
|
t.Error("Expected error when stopping non-running instance")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Simulate running state
|
||||||
|
inst.SetStatus(instance.Running)
|
||||||
|
|
||||||
|
// Start when already running should return error
|
||||||
|
err = inst.Start()
|
||||||
|
if err == nil {
|
||||||
|
t.Error("Expected error when starting already running instance")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRemoteInstanceOperations(t *testing.T) {
|
||||||
|
backendConfig := &config.BackendConfig{
|
||||||
|
LlamaCpp: config.BackendSettings{Command: "llama-server"},
|
||||||
|
}
|
||||||
|
globalSettings := &config.InstancesConfig{LogsDir: "/tmp/test"}
|
||||||
|
options := &instance.Options{
|
||||||
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
|
Nodes: map[string]struct{}{"remote-node": {}}, // Remote instance
|
||||||
|
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||||
|
Model: "/path/to/model.gguf",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
inst := instance.New("remote-test", backendConfig, globalSettings, options, "main", nil)
|
||||||
|
|
||||||
|
if !inst.IsRemote() {
|
||||||
|
t.Error("Expected instance to be remote")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start should fail for remote instance
|
||||||
|
if err := inst.Start(); err == nil {
|
||||||
|
t.Error("Expected error when starting remote instance")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop should fail for remote instance
|
||||||
|
if err := inst.Stop(); err == nil {
|
||||||
|
t.Error("Expected error when stopping remote instance")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restart should fail for remote instance
|
||||||
|
if err := inst.Restart(); err == nil {
|
||||||
|
t.Error("Expected error when restarting remote instance")
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetProxy should fail for remote instance
|
||||||
|
if _, err := inst.GetProxy(); err == nil {
|
||||||
|
t.Error("Expected error when getting proxy for remote instance")
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetLogs should fail for remote instance
|
||||||
|
if _, err := inst.GetLogs(10); err == nil {
|
||||||
|
t.Error("Expected error when getting logs for remote instance")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProxyClearOnOptionsChange(t *testing.T) {
|
||||||
|
backendConfig := &config.BackendConfig{
|
||||||
|
LlamaCpp: config.BackendSettings{Command: "llama-server"},
|
||||||
|
}
|
||||||
|
globalSettings := &config.InstancesConfig{LogsDir: "/tmp/test"}
|
||||||
|
options := &instance.Options{
|
||||||
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
|
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||||
|
Host: "localhost",
|
||||||
|
Port: 8080,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
inst := instance.New("test", backendConfig, globalSettings, options, "main", nil)
|
||||||
|
|
||||||
|
// Get initial proxy
|
||||||
|
proxy1, err := inst.GetProxy()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to get initial proxy: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update options (should clear proxy)
|
||||||
|
newOptions := &instance.Options{
|
||||||
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
|
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||||
|
Host: "localhost",
|
||||||
|
Port: 8081, // Different port
|
||||||
|
},
|
||||||
|
}
|
||||||
|
inst.SetOptions(newOptions)
|
||||||
|
|
||||||
|
// Get proxy again - should be recreated with new port
|
||||||
|
proxy2, err := inst.GetProxy()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to get proxy after options change: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Proxies should be different instances (recreated)
|
||||||
|
if proxy1 == proxy2 {
|
||||||
|
t.Error("Expected proxy to be recreated after options change")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIdleTimeout(t *testing.T) {
|
||||||
|
backendConfig := &config.BackendConfig{
|
||||||
|
LlamaCpp: config.BackendSettings{Command: "llama-server"},
|
||||||
|
}
|
||||||
|
globalSettings := &config.InstancesConfig{LogsDir: "/tmp/test"}
|
||||||
|
|
||||||
|
t.Run("not running never times out", func(t *testing.T) {
|
||||||
|
timeout := 1
|
||||||
|
inst := instance.New("test", backendConfig, globalSettings, &instance.Options{
|
||||||
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
|
IdleTimeout: &timeout,
|
||||||
|
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||||
|
Model: "/path/to/model.gguf",
|
||||||
|
},
|
||||||
|
}, "main", nil)
|
||||||
|
|
||||||
|
if inst.ShouldTimeout() {
|
||||||
|
t.Error("Non-running instance should never timeout")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("no timeout configured", func(t *testing.T) {
|
||||||
|
inst := instance.New("test", backendConfig, globalSettings, &instance.Options{
|
||||||
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
|
IdleTimeout: nil, // No timeout
|
||||||
|
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||||
|
Model: "/path/to/model.gguf",
|
||||||
|
},
|
||||||
|
}, "main", nil)
|
||||||
|
inst.SetStatus(instance.Running)
|
||||||
|
|
||||||
|
if inst.ShouldTimeout() {
|
||||||
|
t.Error("Instance with no timeout configured should not timeout")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("timeout exceeded", func(t *testing.T) {
|
||||||
|
timeout := 1 // 1 minute
|
||||||
|
inst := instance.New("test", backendConfig, globalSettings, &instance.Options{
|
||||||
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
|
IdleTimeout: &timeout,
|
||||||
|
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||||
|
Model: "/path/to/model.gguf",
|
||||||
|
},
|
||||||
|
}, "main", nil)
|
||||||
|
inst.SetStatus(instance.Running)
|
||||||
|
|
||||||
|
// Use mock time provider
|
||||||
|
mockTime := &mockTimeProvider{currentTime: time.Now().Unix()}
|
||||||
|
inst.SetTimeProvider(mockTime)
|
||||||
|
|
||||||
|
// Set last request time to now
|
||||||
|
inst.UpdateLastRequestTime()
|
||||||
|
|
||||||
|
// Advance time by 2 minutes (exceeds 1 minute timeout)
|
||||||
|
mockTime.currentTime = time.Now().Add(2 * time.Minute).Unix()
|
||||||
|
|
||||||
|
if !inst.ShouldTimeout() {
|
||||||
|
t.Error("Instance should timeout when idle time exceeds configured timeout")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// mockTimeProvider for timeout testing
|
||||||
|
type mockTimeProvider struct {
|
||||||
|
currentTime int64 // Unix timestamp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockTimeProvider) Now() time.Time {
|
||||||
|
return time.Unix(m.currentTime, 0)
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,274 +0,0 @@
|
|||||||
package instance_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"llamactl/pkg/backends"
|
|
||||||
"llamactl/pkg/backends/llamacpp"
|
|
||||||
"llamactl/pkg/config"
|
|
||||||
"llamactl/pkg/instance"
|
|
||||||
"llamactl/pkg/testutil"
|
|
||||||
"sync/atomic"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MockTimeProvider implements TimeProvider for testing
|
|
||||||
type MockTimeProvider struct {
|
|
||||||
currentTime atomic.Int64 // Unix timestamp
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewMockTimeProvider(t time.Time) *MockTimeProvider {
|
|
||||||
m := &MockTimeProvider{}
|
|
||||||
m.currentTime.Store(t.Unix())
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MockTimeProvider) Now() time.Time {
|
|
||||||
return time.Unix(m.currentTime.Load(), 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MockTimeProvider) SetTime(t time.Time) {
|
|
||||||
m.currentTime.Store(t.Unix())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Timeout-related tests
|
|
||||||
|
|
||||||
func TestUpdateLastRequestTime(t *testing.T) {
|
|
||||||
backendConfig := &config.BackendConfig{
|
|
||||||
LlamaCpp: config.BackendSettings{
|
|
||||||
Command: "llama-server",
|
|
||||||
},
|
|
||||||
MLX: config.BackendSettings{
|
|
||||||
Command: "mlx_lm.server",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
globalSettings := &config.InstancesConfig{
|
|
||||||
LogsDir: "/tmp/test",
|
|
||||||
}
|
|
||||||
|
|
||||||
options := &instance.Options{
|
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
|
||||||
Model: "/path/to/model.gguf",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mock onStatusChange function
|
|
||||||
mockOnStatusChange := func(oldStatus, newStatus instance.Status) {}
|
|
||||||
|
|
||||||
inst := instance.New("test-instance", backendConfig, globalSettings, options, "main", mockOnStatusChange)
|
|
||||||
|
|
||||||
// Test that UpdateLastRequestTime doesn't panic
|
|
||||||
inst.UpdateLastRequestTime()
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestShouldTimeout_NotRunning(t *testing.T) {
|
|
||||||
backendConfig := &config.BackendConfig{
|
|
||||||
LlamaCpp: config.BackendSettings{
|
|
||||||
Command: "llama-server",
|
|
||||||
},
|
|
||||||
MLX: config.BackendSettings{
|
|
||||||
Command: "mlx_lm.server",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
globalSettings := &config.InstancesConfig{
|
|
||||||
LogsDir: "/tmp/test",
|
|
||||||
}
|
|
||||||
|
|
||||||
idleTimeout := 1 // 1 minute
|
|
||||||
options := &instance.Options{
|
|
||||||
IdleTimeout: &idleTimeout,
|
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
|
||||||
Model: "/path/to/model.gguf",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mock onStatusChange function
|
|
||||||
mockOnStatusChange := func(oldStatus, newStatus instance.Status) {}
|
|
||||||
|
|
||||||
inst := instance.New("test-instance", backendConfig, globalSettings, options, "main", mockOnStatusChange)
|
|
||||||
|
|
||||||
// Instance is not running, should not timeout regardless of configuration
|
|
||||||
if inst.ShouldTimeout() {
|
|
||||||
t.Error("Non-running instance should never timeout")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestShouldTimeout_NoTimeoutConfigured(t *testing.T) {
|
|
||||||
backendConfig := &config.BackendConfig{
|
|
||||||
LlamaCpp: config.BackendSettings{
|
|
||||||
Command: "llama-server",
|
|
||||||
},
|
|
||||||
MLX: config.BackendSettings{
|
|
||||||
Command: "mlx_lm.server",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
globalSettings := &config.InstancesConfig{
|
|
||||||
LogsDir: "/tmp/test",
|
|
||||||
}
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
idleTimeout *int
|
|
||||||
}{
|
|
||||||
{"nil timeout", nil},
|
|
||||||
{"zero timeout", testutil.IntPtr(0)},
|
|
||||||
{"negative timeout", testutil.IntPtr(-5)},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
// Mock onStatusChange function
|
|
||||||
mockOnStatusChange := func(oldStatus, newStatus instance.Status) {}
|
|
||||||
|
|
||||||
options := &instance.Options{
|
|
||||||
IdleTimeout: tt.idleTimeout,
|
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
|
||||||
Model: "/path/to/model.gguf",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
inst := instance.New("test-instance", backendConfig, globalSettings, options, "main", mockOnStatusChange)
|
|
||||||
// Simulate running state
|
|
||||||
inst.SetStatus(instance.Running)
|
|
||||||
|
|
||||||
if inst.ShouldTimeout() {
|
|
||||||
t.Errorf("Instance with %s should not timeout", tt.name)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestShouldTimeout_WithinTimeLimit(t *testing.T) {
|
|
||||||
backendConfig := &config.BackendConfig{
|
|
||||||
LlamaCpp: config.BackendSettings{
|
|
||||||
Command: "llama-server",
|
|
||||||
},
|
|
||||||
MLX: config.BackendSettings{
|
|
||||||
Command: "mlx_lm.server",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
globalSettings := &config.InstancesConfig{
|
|
||||||
LogsDir: "/tmp/test",
|
|
||||||
}
|
|
||||||
|
|
||||||
idleTimeout := 5 // 5 minutes
|
|
||||||
options := &instance.Options{
|
|
||||||
IdleTimeout: &idleTimeout,
|
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
|
||||||
Model: "/path/to/model.gguf",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mock onStatusChange function
|
|
||||||
mockOnStatusChange := func(oldStatus, newStatus instance.Status) {}
|
|
||||||
|
|
||||||
inst := instance.New("test-instance", backendConfig, globalSettings, options, "main", mockOnStatusChange)
|
|
||||||
inst.SetStatus(instance.Running)
|
|
||||||
|
|
||||||
// Update last request time to now
|
|
||||||
inst.UpdateLastRequestTime()
|
|
||||||
|
|
||||||
// Should not timeout immediately
|
|
||||||
if inst.ShouldTimeout() {
|
|
||||||
t.Error("Instance should not timeout when last request was recent")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestShouldTimeout_ExceedsTimeLimit(t *testing.T) {
|
|
||||||
backendConfig := &config.BackendConfig{
|
|
||||||
LlamaCpp: config.BackendSettings{
|
|
||||||
Command: "llama-server",
|
|
||||||
},
|
|
||||||
MLX: config.BackendSettings{
|
|
||||||
Command: "mlx_lm.server",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
globalSettings := &config.InstancesConfig{
|
|
||||||
LogsDir: "/tmp/test",
|
|
||||||
}
|
|
||||||
|
|
||||||
idleTimeout := 1 // 1 minute
|
|
||||||
options := &instance.Options{
|
|
||||||
IdleTimeout: &idleTimeout,
|
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
|
||||||
Model: "/path/to/model.gguf",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mock onStatusChange function
|
|
||||||
mockOnStatusChange := func(oldStatus, newStatus instance.Status) {}
|
|
||||||
|
|
||||||
inst := instance.New("test-instance", backendConfig, globalSettings, options, "main", mockOnStatusChange)
|
|
||||||
inst.SetStatus(instance.Running)
|
|
||||||
|
|
||||||
// Use MockTimeProvider to simulate old last request time
|
|
||||||
mockTime := NewMockTimeProvider(time.Now())
|
|
||||||
inst.SetTimeProvider(mockTime)
|
|
||||||
|
|
||||||
// Set last request time to now
|
|
||||||
inst.UpdateLastRequestTime()
|
|
||||||
|
|
||||||
// Advance time by 2 minutes (exceeds 1 minute timeout)
|
|
||||||
mockTime.SetTime(time.Now().Add(2 * time.Minute))
|
|
||||||
|
|
||||||
if !inst.ShouldTimeout() {
|
|
||||||
t.Error("Instance should timeout when last request exceeds idle timeout")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTimeoutConfiguration_Validation(t *testing.T) {
|
|
||||||
backendConfig := &config.BackendConfig{
|
|
||||||
LlamaCpp: config.BackendSettings{
|
|
||||||
Command: "llama-server",
|
|
||||||
},
|
|
||||||
MLX: config.BackendSettings{
|
|
||||||
Command: "mlx_lm.server",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
globalSettings := &config.InstancesConfig{
|
|
||||||
LogsDir: "/tmp/test",
|
|
||||||
}
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
inputTimeout *int
|
|
||||||
expectedTimeout int
|
|
||||||
}{
|
|
||||||
{"default value when nil", nil, 0},
|
|
||||||
{"positive value", testutil.IntPtr(10), 10},
|
|
||||||
{"zero value", testutil.IntPtr(0), 0},
|
|
||||||
{"negative value gets corrected", testutil.IntPtr(-5), 0},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
options := &instance.Options{
|
|
||||||
IdleTimeout: tt.inputTimeout,
|
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
|
||||||
Model: "/path/to/model.gguf",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mock onStatusChange function
|
|
||||||
mockOnStatusChange := func(oldStatus, newStatus instance.Status) {}
|
|
||||||
|
|
||||||
inst := instance.New("test-instance", backendConfig, globalSettings, options, "main", mockOnStatusChange)
|
|
||||||
opts := inst.GetOptions()
|
|
||||||
|
|
||||||
if opts.IdleTimeout == nil || *opts.IdleTimeout != tt.expectedTimeout {
|
|
||||||
t.Errorf("Expected IdleTimeout %d, got %v", tt.expectedTimeout, opts.IdleTimeout)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Reference in New Issue
Block a user