Switch manager to global app config

This commit is contained in:
2025-10-25 00:14:12 +02:00
parent eff59a86fd
commit 58f8861d17
4 changed files with 27 additions and 29 deletions

View File

@@ -58,7 +58,7 @@ func main() {
}
// Initialize the instance manager
instanceManager := manager.New(cfg.Backends, cfg.Instances, cfg.Nodes, cfg.LocalNode)
instanceManager := manager.New(&cfg)
// Create a new handler with the instance manager
handler := server.NewHandler(instanceManager, cfg)

View File

@@ -30,11 +30,12 @@ type Instance struct {
}
// New creates a new instance with the given name, log path, options and local node name
func New(name string, globalConfig *config.AppConfig, opts *Options, localNodeName string, onStatusChange func(oldStatus, newStatus Status)) *Instance {
func New(name string, globalConfig *config.AppConfig, opts *Options, onStatusChange func(oldStatus, newStatus Status)) *Instance {
globalInstanceSettings := &globalConfig.Instances
globalBackendSettings := &globalConfig.Backends
globalNodesConfig := globalConfig.Nodes
localNodeName := globalConfig.LocalNode
// Validate and copy options
opts.validateAndApplyDefaults(name, globalInstanceSettings)

View File

@@ -35,9 +35,7 @@ type instanceManager struct {
lifecycle *lifecycleManager
// Configuration
instancesConfig config.InstancesConfig
backendsConfig config.BackendConfig
localNodeName string // Name of the local node
globalConfig *config.AppConfig
// Synchronization
instanceLocks sync.Map // map[string]*sync.Mutex - per-instance locks for concurrent operations
@@ -45,29 +43,30 @@ type instanceManager struct {
}
// New creates a new instance of InstanceManager.
func New(backendsConfig config.BackendConfig, instancesConfig config.InstancesConfig, nodesConfig map[string]config.NodeConfig, localNodeName string) InstanceManager {
if instancesConfig.TimeoutCheckInterval <= 0 {
instancesConfig.TimeoutCheckInterval = 5 // Default to 5 minutes if not set
func New(globalConfig *config.AppConfig) InstanceManager {
if globalConfig.Instances.TimeoutCheckInterval <= 0 {
globalConfig.Instances.TimeoutCheckInterval = 5 // Default to 5 minutes if not set
}
// Initialize components
registry := newInstanceRegistry()
// Initialize port allocator
portRange := instancesConfig.PortRange
portRange := globalConfig.Instances.PortRange
ports, err := newPortAllocator(portRange[0], portRange[1])
if err != nil {
log.Fatalf("Failed to create port allocator: %v", err)
}
// Initialize persistence
persistence, err := newInstancePersister(instancesConfig.InstancesDir)
persistence, err := newInstancePersister(globalConfig.Instances.InstancesDir)
if err != nil {
log.Fatalf("Failed to create instance persister: %v", err)
}
// Initialize remote manager
remote := newRemoteManager(nodesConfig, 30*time.Second)
remote := newRemoteManager(globalConfig.Nodes, 30*time.Second)
// Create manager instance
im := &instanceManager{
@@ -75,13 +74,11 @@ func New(backendsConfig config.BackendConfig, instancesConfig config.InstancesCo
ports: ports,
persistence: persistence,
remote: remote,
instancesConfig: instancesConfig,
backendsConfig: backendsConfig,
localNodeName: localNodeName,
globalConfig: globalConfig,
}
// Initialize lifecycle manager (needs reference to manager for Stop/Evict operations)
checkInterval := time.Duration(instancesConfig.TimeoutCheckInterval) * time.Minute
checkInterval := time.Duration(globalConfig.Instances.TimeoutCheckInterval) * time.Minute
im.lifecycle = newLifecycleManager(registry, im, checkInterval, true)
// Load existing instances from disk
@@ -165,7 +162,7 @@ func (im *instanceManager) loadInstance(persistedInst *instance.Instance) error
var isRemote bool
var nodeName string
if options != nil {
if _, isLocal := options.Nodes[im.localNodeName]; !isLocal && len(options.Nodes) > 0 {
if _, isLocal := options.Nodes[im.globalConfig.LocalNode]; !isLocal && len(options.Nodes) > 0 {
// Get the first node from the set
for node := range options.Nodes {
nodeName = node
@@ -184,7 +181,7 @@ func (im *instanceManager) loadInstance(persistedInst *instance.Instance) error
}
// Create new inst using NewInstance (handles validation, defaults, setup)
inst := instance.New(name, &im.backendsConfig, &im.instancesConfig, options, im.localNodeName, statusCallback)
inst := instance.New(name, im.globalConfig, options, statusCallback)
// Restore persisted fields that NewInstance doesn't set
inst.Created = persistedInst.Created

View File

@@ -68,7 +68,7 @@ func (im *instanceManager) CreateInstance(name string, options *instance.Options
}
// Check if this is a remote instance (local node not in the Nodes set)
if _, isLocal := options.Nodes[im.localNodeName]; !isLocal && len(options.Nodes) > 0 {
if _, isLocal := options.Nodes[im.globalConfig.LocalNode]; !isLocal && len(options.Nodes) > 0 {
// Get the first node from the set
var nodeName string
for node := range options.Nodes {
@@ -94,7 +94,7 @@ func (im *instanceManager) CreateInstance(name string, options *instance.Options
// Create a local stub that preserves the Nodes field for tracking
// We keep the original options (with Nodes) so IsRemote() works correctly
inst := instance.New(name, &im.backendsConfig, &im.instancesConfig, options, im.localNodeName, nil)
inst := instance.New(name, im.globalConfig, options, nil)
// Update the local stub with all remote data (preserving Nodes)
im.updateLocalInstanceFromRemote(inst, remoteInst)
@@ -129,8 +129,8 @@ func (im *instanceManager) CreateInstance(name string, options *instance.Options
}
}
localInstanceCount := totalInstances - remoteCount
if localInstanceCount >= im.instancesConfig.MaxInstances && im.instancesConfig.MaxInstances != -1 {
return nil, fmt.Errorf("maximum number of instances (%d) reached", im.instancesConfig.MaxInstances)
if localInstanceCount >= im.globalConfig.Instances.MaxInstances && im.globalConfig.Instances.MaxInstances != -1 {
return nil, fmt.Errorf("maximum number of instances (%d) reached", im.globalConfig.Instances.MaxInstances)
}
// Assign and validate port for backend-specific options
@@ -155,7 +155,7 @@ func (im *instanceManager) CreateInstance(name string, options *instance.Options
im.onStatusChange(name, oldStatus, newStatus)
}
inst := instance.New(name, &im.backendsConfig, &im.instancesConfig, options, im.localNodeName, statusCallback)
inst := instance.New(name, im.globalConfig, options, statusCallback)
// Add to registry
if err := im.registry.add(inst); err != nil {
@@ -384,7 +384,7 @@ func (im *instanceManager) StartInstance(name string) (*instance.Instance, error
// Check max running instances limit for local instances only
if im.IsMaxRunningInstancesReached() {
return nil, MaxRunningInstancesError(fmt.Errorf("maximum number of running instances (%d) reached", im.instancesConfig.MaxRunningInstances))
return nil, MaxRunningInstancesError(fmt.Errorf("maximum number of running instances (%d) reached", im.globalConfig.Instances.MaxRunningInstances))
}
if err := inst.Start(); err != nil {
@@ -400,7 +400,7 @@ func (im *instanceManager) StartInstance(name string) (*instance.Instance, error
}
func (im *instanceManager) IsMaxRunningInstancesReached() bool {
if im.instancesConfig.MaxRunningInstances == -1 {
if im.globalConfig.Instances.MaxRunningInstances == -1 {
return false
}
@@ -412,7 +412,7 @@ func (im *instanceManager) IsMaxRunningInstancesReached() bool {
}
}
return localRunningCount >= im.instancesConfig.MaxRunningInstances
return localRunningCount >= im.globalConfig.Instances.MaxRunningInstances
}
// StopInstance stops a running instance and returns it.