Fix local instance detection

This commit is contained in:
2025-10-16 21:26:04 +02:00
parent cf20f304b3
commit c5097e59be
9 changed files with 46 additions and 29 deletions

View File

@@ -49,6 +49,7 @@ type instanceManager struct {
ports map[int]bool
instancesConfig config.InstancesConfig
backendsConfig config.BackendConfig
localNodeName string // Name of the local node
// Timeout checker
timeoutChecker *time.Ticker
@@ -63,7 +64,7 @@ type instanceManager struct {
}
// NewInstanceManager creates a new instance of InstanceManager.
func NewInstanceManager(backendsConfig config.BackendConfig, instancesConfig config.InstancesConfig, nodesConfig map[string]config.NodeConfig) InstanceManager {
func NewInstanceManager(backendsConfig config.BackendConfig, instancesConfig config.InstancesConfig, nodesConfig map[string]config.NodeConfig, localNodeName string) InstanceManager {
if instancesConfig.TimeoutCheckInterval <= 0 {
instancesConfig.TimeoutCheckInterval = 5 // Default to 5 minutes if not set
}
@@ -81,6 +82,7 @@ func NewInstanceManager(backendsConfig config.BackendConfig, instancesConfig con
ports: make(map[int]bool),
instancesConfig: instancesConfig,
backendsConfig: backendsConfig,
localNodeName: localNodeName,
timeoutChecker: time.NewTicker(time.Duration(instancesConfig.TimeoutCheckInterval) * time.Minute),
shutdownChan: make(chan struct{}),
@@ -274,7 +276,8 @@ func (im *instanceManager) loadInstance(name, path string) error {
options := persistedInstance.GetOptions()
// Check if this is a remote instance
isRemote := options != nil && len(options.Nodes) > 0
// An instance is remote if Nodes is specified AND the first node is not the local node
isRemote := options != nil && len(options.Nodes) > 0 && options.Nodes[0] != im.localNodeName
var statusCallback func(oldStatus, newStatus instance.InstanceStatus)
if !isRemote {
@@ -285,7 +288,7 @@ func (im *instanceManager) loadInstance(name, path string) error {
}
// Create new inst using NewInstance (handles validation, defaults, setup)
inst := instance.NewInstance(name, &im.backendsConfig, &im.instancesConfig, options, statusCallback)
inst := instance.NewInstance(name, &im.backendsConfig, &im.instancesConfig, options, im.localNodeName, statusCallback)
// Restore persisted fields that NewInstance doesn't set
inst.Created = persistedInstance.Created

View File

@@ -34,7 +34,7 @@ func TestNewInstanceManager(t *testing.T) {
TimeoutCheckInterval: 5,
}
mgr := manager.NewInstanceManager(backendConfig, cfg, map[string]config.NodeConfig{})
mgr := manager.NewInstanceManager(backendConfig, cfg, map[string]config.NodeConfig{}, "main")
if mgr == nil {
t.Fatal("NewInstanceManager returned nil")
}
@@ -69,7 +69,7 @@ func TestPersistence(t *testing.T) {
}
// Test instance persistence on creation
manager1 := manager.NewInstanceManager(backendConfig, cfg, map[string]config.NodeConfig{})
manager1 := manager.NewInstanceManager(backendConfig, cfg, map[string]config.NodeConfig{}, "main")
options := &instance.CreateInstanceOptions{
BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
@@ -90,7 +90,7 @@ func TestPersistence(t *testing.T) {
}
// Test loading instances from disk
manager2 := manager.NewInstanceManager(backendConfig, cfg, map[string]config.NodeConfig{})
manager2 := manager.NewInstanceManager(backendConfig, cfg, map[string]config.NodeConfig{}, "main")
instances, err := manager2.ListInstances()
if err != nil {
t.Fatalf("ListInstances failed: %v", err)
@@ -207,7 +207,7 @@ func createTestManager() manager.InstanceManager {
DefaultRestartDelay: 5,
TimeoutCheckInterval: 5,
}
return manager.NewInstanceManager(backendConfig, cfg, map[string]config.NodeConfig{})
return manager.NewInstanceManager(backendConfig, cfg, map[string]config.NodeConfig{}, "main")
}
func TestAutoRestartDisabledInstanceStatus(t *testing.T) {
@@ -227,7 +227,7 @@ func TestAutoRestartDisabledInstanceStatus(t *testing.T) {
}
// Create first manager and instance with auto-restart disabled
manager1 := manager.NewInstanceManager(backendConfig, cfg, map[string]config.NodeConfig{})
manager1 := manager.NewInstanceManager(backendConfig, cfg, map[string]config.NodeConfig{}, "main")
autoRestart := false
options := &instance.CreateInstanceOptions{
@@ -252,7 +252,7 @@ func TestAutoRestartDisabledInstanceStatus(t *testing.T) {
manager1.Shutdown()
// Create second manager (simulating restart of llamactl)
manager2 := manager.NewInstanceManager(backendConfig, cfg, map[string]config.NodeConfig{})
manager2 := manager.NewInstanceManager(backendConfig, cfg, map[string]config.NodeConfig{}, "main")
// Get the loaded instance
loadedInst, err := manager2.GetInstance("test-instance")

View File

@@ -99,7 +99,8 @@ func (im *instanceManager) CreateInstance(name string, options *instance.CreateI
}
// Check if this is a remote instance
isRemote := len(options.Nodes) > 0
// An instance is remote if Nodes is specified AND the first node is not the local node
isRemote := len(options.Nodes) > 0 && options.Nodes[0] != im.localNodeName
var nodeConfig *config.NodeConfig
if isRemote {
@@ -119,7 +120,7 @@ func (im *instanceManager) CreateInstance(name string, options *instance.CreateI
// Create a local stub that preserves the Nodes field for tracking
// We keep the original options (with Nodes) so IsRemote() works correctly
inst := instance.NewInstance(name, &im.backendsConfig, &im.instancesConfig, options, nil)
inst := instance.NewInstance(name, &im.backendsConfig, &im.instancesConfig, options, im.localNodeName, nil)
// Update the local stub with all remote data (preserving Nodes)
im.updateLocalInstanceFromRemote(inst, remoteInst)
@@ -152,7 +153,7 @@ func (im *instanceManager) CreateInstance(name string, options *instance.CreateI
im.onStatusChange(name, oldStatus, newStatus)
}
inst := instance.NewInstance(name, &im.backendsConfig, &im.instancesConfig, options, statusCallback)
inst := instance.NewInstance(name, &im.backendsConfig, &im.instancesConfig, options, im.localNodeName, statusCallback)
im.instances[inst.Name] = inst
if err := im.persistInstance(inst); err != nil {

View File

@@ -75,7 +75,7 @@ func TestCreateInstance_ValidationAndLimits(t *testing.T) {
MaxInstances: 1, // Very low limit for testing
TimeoutCheckInterval: 5,
}
limitedManager := manager.NewInstanceManager(backendConfig, cfg, map[string]config.NodeConfig{})
limitedManager := manager.NewInstanceManager(backendConfig, cfg, map[string]config.NodeConfig{}, "main")
_, err = limitedManager.CreateInstance("instance1", options)
if err != nil {

View File

@@ -23,7 +23,7 @@ func TestTimeoutFunctionality(t *testing.T) {
MaxInstances: 5,
}
manager := manager.NewInstanceManager(backendConfig, cfg, map[string]config.NodeConfig{})
manager := manager.NewInstanceManager(backendConfig, cfg, map[string]config.NodeConfig{}, "main")
if manager == nil {
t.Fatal("Manager should be initialized with timeout checker")
}