mirror of
https://github.com/lordmathis/llamactl.git
synced 2025-11-05 16:44:22 +00:00
Unexport member struct methods
This commit is contained in:
@@ -46,13 +46,13 @@ func NewLifecycleManager(
|
||||
}
|
||||
|
||||
// Start begins the timeout checking loop in a goroutine.
|
||||
func (l *lifecycleManager) Start() {
|
||||
func (l *lifecycleManager) start() {
|
||||
go l.timeoutCheckLoop()
|
||||
}
|
||||
|
||||
// Stop gracefully stops the lifecycle manager.
|
||||
// This ensures the timeout checker completes before instance cleanup begins.
|
||||
func (l *lifecycleManager) Stop() {
|
||||
func (l *lifecycleManager) stop() {
|
||||
l.shutdownOnce.Do(func() {
|
||||
close(l.shutdownChan)
|
||||
<-l.shutdownDone // Wait for checker to finish (prevents shutdown race)
|
||||
@@ -77,7 +77,7 @@ func (l *lifecycleManager) timeoutCheckLoop() {
|
||||
// checkTimeouts checks all instances for timeout and stops those that have timed out.
|
||||
func (l *lifecycleManager) checkTimeouts() {
|
||||
// Get all instances from registry
|
||||
instances := l.registry.List()
|
||||
instances := l.registry.list()
|
||||
|
||||
var timeoutInstances []string
|
||||
|
||||
@@ -89,7 +89,7 @@ func (l *lifecycleManager) checkTimeouts() {
|
||||
}
|
||||
|
||||
// Only check running instances
|
||||
if !l.registry.IsRunning(inst.Name) {
|
||||
if !l.registry.isRunning(inst.Name) {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -111,13 +111,13 @@ func (l *lifecycleManager) checkTimeouts() {
|
||||
|
||||
// EvictLRU finds and stops the least recently used running instance.
|
||||
// This is called when max running instances limit is reached.
|
||||
func (l *lifecycleManager) EvictLRU() error {
|
||||
func (l *lifecycleManager) evictLRU() error {
|
||||
if !l.enableLRU {
|
||||
return fmt.Errorf("LRU eviction is not enabled")
|
||||
}
|
||||
|
||||
// Get all running instances
|
||||
runningInstances := l.registry.ListRunning()
|
||||
runningInstances := l.registry.listRunning()
|
||||
|
||||
var lruInstance *instance.Instance
|
||||
|
||||
|
||||
@@ -102,23 +102,23 @@ func New(backendsConfig config.BackendConfig, instancesConfig config.InstancesCo
|
||||
}
|
||||
|
||||
// Start the lifecycle manager
|
||||
im.lifecycle.Start()
|
||||
im.lifecycle.start()
|
||||
|
||||
return im
|
||||
}
|
||||
|
||||
// persistInstance saves an instance using the persistence component
|
||||
func (im *instanceManager) persistInstance(inst *instance.Instance) error {
|
||||
return im.persistence.Save(inst)
|
||||
return im.persistence.save(inst)
|
||||
}
|
||||
|
||||
func (im *instanceManager) Shutdown() {
|
||||
im.shutdownOnce.Do(func() {
|
||||
// 1. Stop lifecycle manager (stops timeout checker)
|
||||
im.lifecycle.Stop()
|
||||
im.lifecycle.stop()
|
||||
|
||||
// 2. Get running instances (no lock needed - registry handles it)
|
||||
running := im.registry.ListRunning()
|
||||
running := im.registry.listRunning()
|
||||
|
||||
// 3. Stop local instances concurrently
|
||||
var wg sync.WaitGroup
|
||||
@@ -143,7 +143,7 @@ func (im *instanceManager) Shutdown() {
|
||||
// loadInstances restores all instances from disk using the persistence component
|
||||
func (im *instanceManager) loadInstances() error {
|
||||
// Load all instances from persistence
|
||||
instances, err := im.persistence.LoadAll()
|
||||
instances, err := im.persistence.loadAll()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load instances: %w", err)
|
||||
}
|
||||
@@ -205,21 +205,21 @@ func (im *instanceManager) loadInstance(persistedInst *instance.Instance) error
|
||||
// Handle remote instance mapping
|
||||
if isRemote {
|
||||
// Map instance to node in remote manager
|
||||
if err := im.remote.SetInstanceNode(name, nodeName); err != nil {
|
||||
if err := im.remote.setInstanceNode(name, nodeName); err != nil {
|
||||
return fmt.Errorf("failed to set instance node: %w", err)
|
||||
}
|
||||
} else {
|
||||
// Allocate port for local instances
|
||||
if inst.GetPort() > 0 {
|
||||
port := inst.GetPort()
|
||||
if err := im.ports.AllocateSpecific(port, name); err != nil {
|
||||
if err := im.ports.allocateSpecific(port, name); err != nil {
|
||||
return fmt.Errorf("port conflict: instance %s wants port %d which is already in use: %w", name, port, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add instance to registry
|
||||
if err := im.registry.Add(inst); err != nil {
|
||||
if err := im.registry.add(inst); err != nil {
|
||||
return fmt.Errorf("failed to add instance to registry: %w", err)
|
||||
}
|
||||
|
||||
@@ -229,7 +229,7 @@ func (im *instanceManager) loadInstance(persistedInst *instance.Instance) error
|
||||
// autoStartInstances starts instances that were running when persisted and have auto-restart enabled
|
||||
// For instances with auto-restart disabled, it sets their status to Stopped
|
||||
func (im *instanceManager) autoStartInstances() {
|
||||
instances := im.registry.List()
|
||||
instances := im.registry.list()
|
||||
|
||||
var instancesToStart []*instance.Instance
|
||||
var instancesToStop []*instance.Instance
|
||||
@@ -251,7 +251,7 @@ func (im *instanceManager) autoStartInstances() {
|
||||
for _, inst := range instancesToStop {
|
||||
log.Printf("Instance %s was running but auto-restart is disabled, setting status to stopped", inst.Name)
|
||||
inst.SetStatus(instance.Stopped)
|
||||
im.registry.MarkStopped(inst.Name)
|
||||
im.registry.markStopped(inst.Name)
|
||||
}
|
||||
|
||||
// Start instances that have auto-restart enabled
|
||||
@@ -259,13 +259,13 @@ func (im *instanceManager) autoStartInstances() {
|
||||
log.Printf("Auto-starting instance %s", inst.Name)
|
||||
// Reset running state before starting (since Start() expects stopped instance)
|
||||
inst.SetStatus(instance.Stopped)
|
||||
im.registry.MarkStopped(inst.Name)
|
||||
im.registry.markStopped(inst.Name)
|
||||
|
||||
// Check if this is a remote instance
|
||||
if node, exists := im.remote.GetNodeForInstance(inst.Name); exists && node != nil {
|
||||
if node, exists := im.remote.getNodeForInstance(inst.Name); exists && node != nil {
|
||||
// Remote instance - use remote manager with context
|
||||
ctx := context.Background()
|
||||
if _, err := im.remote.StartInstance(ctx, node, inst.Name); err != nil {
|
||||
if _, err := im.remote.startInstance(ctx, node, inst.Name); err != nil {
|
||||
log.Printf("Failed to auto-start remote instance %s: %v", inst.Name, err)
|
||||
}
|
||||
} else {
|
||||
@@ -279,9 +279,9 @@ func (im *instanceManager) autoStartInstances() {
|
||||
|
||||
func (im *instanceManager) onStatusChange(name string, oldStatus, newStatus instance.Status) {
|
||||
if newStatus == instance.Running {
|
||||
im.registry.MarkRunning(name)
|
||||
im.registry.markRunning(name)
|
||||
} else {
|
||||
im.registry.MarkStopped(name)
|
||||
im.registry.markStopped(name)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -293,7 +293,7 @@ func (im *instanceManager) getNodeForInstance(inst *instance.Instance) *config.N
|
||||
}
|
||||
|
||||
// Check if we have a node mapping in remote manager
|
||||
if nodeConfig, exists := im.remote.GetNodeForInstance(inst.Name); exists {
|
||||
if nodeConfig, exists := im.remote.getNodeForInstance(inst.Name); exists {
|
||||
return nodeConfig
|
||||
}
|
||||
|
||||
|
||||
@@ -30,13 +30,13 @@ func (im *instanceManager) updateLocalInstanceFromRemote(localInst *instance.Ins
|
||||
// ListInstances returns a list of all instances managed by the instance manager.
|
||||
// For remote instances, this fetches the live state from remote nodes and updates local stubs.
|
||||
func (im *instanceManager) ListInstances() ([]*instance.Instance, error) {
|
||||
instances := im.registry.List()
|
||||
instances := im.registry.list()
|
||||
|
||||
// Update remote instances with live state
|
||||
ctx := context.Background()
|
||||
for _, inst := range instances {
|
||||
if node := im.getNodeForInstance(inst); node != nil {
|
||||
remoteInst, err := im.remote.GetInstance(ctx, node, inst.Name)
|
||||
remoteInst, err := im.remote.getInstance(ctx, node, inst.Name)
|
||||
if err != nil {
|
||||
// Log error but continue with stale data
|
||||
// Don't fail the entire list operation due to one remote failure
|
||||
@@ -69,7 +69,7 @@ func (im *instanceManager) CreateInstance(name string, options *instance.Options
|
||||
}
|
||||
|
||||
// Check if instance with this name already exists (must be globally unique)
|
||||
if _, exists := im.registry.Get(name); exists {
|
||||
if _, exists := im.registry.get(name); exists {
|
||||
return nil, fmt.Errorf("instance with name %s already exists", name)
|
||||
}
|
||||
|
||||
@@ -84,16 +84,16 @@ func (im *instanceManager) CreateInstance(name string, options *instance.Options
|
||||
|
||||
// Create the remote instance on the remote node
|
||||
ctx := context.Background()
|
||||
nodeConfig, exists := im.remote.GetNodeForInstance(nodeName)
|
||||
nodeConfig, exists := im.remote.getNodeForInstance(nodeName)
|
||||
if !exists {
|
||||
// Try to set the node if it doesn't exist yet
|
||||
if err := im.remote.SetInstanceNode(name, nodeName); err != nil {
|
||||
if err := im.remote.setInstanceNode(name, nodeName); err != nil {
|
||||
return nil, fmt.Errorf("node %s not found", nodeName)
|
||||
}
|
||||
nodeConfig, _ = im.remote.GetNodeForInstance(name)
|
||||
nodeConfig, _ = im.remote.getNodeForInstance(name)
|
||||
}
|
||||
|
||||
remoteInst, err := im.remote.CreateInstance(ctx, nodeConfig, name, options)
|
||||
remoteInst, err := im.remote.createInstance(ctx, nodeConfig, name, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -106,19 +106,19 @@ func (im *instanceManager) CreateInstance(name string, options *instance.Options
|
||||
im.updateLocalInstanceFromRemote(inst, remoteInst)
|
||||
|
||||
// Map instance to node
|
||||
if err := im.remote.SetInstanceNode(name, nodeName); err != nil {
|
||||
if err := im.remote.setInstanceNode(name, nodeName); err != nil {
|
||||
return nil, fmt.Errorf("failed to map instance to node: %w", err)
|
||||
}
|
||||
|
||||
// Add to registry (doesn't count towards local limits)
|
||||
if err := im.registry.Add(inst); err != nil {
|
||||
if err := im.registry.add(inst); err != nil {
|
||||
return nil, fmt.Errorf("failed to add instance to registry: %w", err)
|
||||
}
|
||||
|
||||
// Persist the remote instance locally for tracking across restarts
|
||||
if err := im.persistInstance(inst); err != nil {
|
||||
// Rollback: remove from registry
|
||||
im.registry.Remove(name)
|
||||
im.registry.remove(name)
|
||||
return nil, fmt.Errorf("failed to persist remote instance %s: %w", name, err)
|
||||
}
|
||||
|
||||
@@ -127,9 +127,9 @@ func (im *instanceManager) CreateInstance(name string, options *instance.Options
|
||||
|
||||
// Local instance creation
|
||||
// Check max instances limit for local instances only
|
||||
totalInstances := im.registry.Count()
|
||||
totalInstances := im.registry.count()
|
||||
remoteCount := 0
|
||||
for _, inst := range im.registry.List() {
|
||||
for _, inst := range im.registry.list() {
|
||||
if inst.IsRemote() {
|
||||
remoteCount++
|
||||
}
|
||||
@@ -144,14 +144,14 @@ func (im *instanceManager) CreateInstance(name string, options *instance.Options
|
||||
var allocatedPort int
|
||||
if currentPort == 0 {
|
||||
// Allocate a port if not specified
|
||||
allocatedPort, err = im.ports.Allocate(name)
|
||||
allocatedPort, err = im.ports.allocate(name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to allocate port: %w", err)
|
||||
}
|
||||
im.setPortInOptions(options, allocatedPort)
|
||||
} else {
|
||||
// Use the specified port
|
||||
if err := im.ports.AllocateSpecific(currentPort, name); err != nil {
|
||||
if err := im.ports.allocateSpecific(currentPort, name); err != nil {
|
||||
return nil, fmt.Errorf("port %d is already in use: %w", currentPort, err)
|
||||
}
|
||||
allocatedPort = currentPort
|
||||
@@ -164,9 +164,9 @@ func (im *instanceManager) CreateInstance(name string, options *instance.Options
|
||||
inst := instance.New(name, &im.backendsConfig, &im.instancesConfig, options, im.localNodeName, statusCallback)
|
||||
|
||||
// Add to registry
|
||||
if err := im.registry.Add(inst); err != nil {
|
||||
if err := im.registry.add(inst); err != nil {
|
||||
// Rollback: release port
|
||||
im.ports.Release(allocatedPort)
|
||||
im.ports.release(allocatedPort)
|
||||
return nil, fmt.Errorf("failed to add instance to registry: %w", err)
|
||||
}
|
||||
|
||||
@@ -181,7 +181,7 @@ func (im *instanceManager) CreateInstance(name string, options *instance.Options
|
||||
// GetInstance retrieves an instance by its name.
|
||||
// For remote instances, this fetches the live state from the remote node and updates the local stub.
|
||||
func (im *instanceManager) GetInstance(name string) (*instance.Instance, error) {
|
||||
inst, exists := im.registry.Get(name)
|
||||
inst, exists := im.registry.get(name)
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("instance with name %s not found", name)
|
||||
}
|
||||
@@ -189,7 +189,7 @@ func (im *instanceManager) GetInstance(name string) (*instance.Instance, error)
|
||||
// Check if instance is remote and fetch live state
|
||||
if node := im.getNodeForInstance(inst); node != nil {
|
||||
ctx := context.Background()
|
||||
remoteInst, err := im.remote.GetInstance(ctx, node, name)
|
||||
remoteInst, err := im.remote.getInstance(ctx, node, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -207,7 +207,7 @@ func (im *instanceManager) GetInstance(name string) (*instance.Instance, error)
|
||||
// UpdateInstance updates the options of an existing instance and returns it.
|
||||
// If the instance is running, it will be restarted to apply the new options.
|
||||
func (im *instanceManager) UpdateInstance(name string, options *instance.Options) (*instance.Instance, error) {
|
||||
inst, exists := im.registry.Get(name)
|
||||
inst, exists := im.registry.get(name)
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("instance with name %s not found", name)
|
||||
}
|
||||
@@ -215,7 +215,7 @@ func (im *instanceManager) UpdateInstance(name string, options *instance.Options
|
||||
// Check if instance is remote and delegate to remote operation
|
||||
if node := im.getNodeForInstance(inst); node != nil {
|
||||
ctx := context.Background()
|
||||
remoteInst, err := im.remote.UpdateInstance(ctx, node, name, options)
|
||||
remoteInst, err := im.remote.updateInstance(ctx, node, name, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -253,14 +253,14 @@ func (im *instanceManager) UpdateInstance(name string, options *instance.Options
|
||||
// Port is changing - need to release old and allocate new
|
||||
if newPort == 0 {
|
||||
// Auto-allocate new port
|
||||
allocatedPort, err = im.ports.Allocate(name)
|
||||
allocatedPort, err = im.ports.allocate(name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to allocate new port: %w", err)
|
||||
}
|
||||
im.setPortInOptions(options, allocatedPort)
|
||||
} else {
|
||||
// Use specified port
|
||||
if err := im.ports.AllocateSpecific(newPort, name); err != nil {
|
||||
if err := im.ports.allocateSpecific(newPort, name); err != nil {
|
||||
return nil, fmt.Errorf("failed to allocate port %d: %w", newPort, err)
|
||||
}
|
||||
allocatedPort = newPort
|
||||
@@ -268,9 +268,9 @@ func (im *instanceManager) UpdateInstance(name string, options *instance.Options
|
||||
|
||||
// Release old port
|
||||
if oldPort > 0 {
|
||||
if err := im.ports.Release(oldPort); err != nil {
|
||||
if err := im.ports.release(oldPort); err != nil {
|
||||
// Rollback new port allocation
|
||||
im.ports.Release(allocatedPort)
|
||||
im.ports.release(allocatedPort)
|
||||
return nil, fmt.Errorf("failed to release old port %d: %w", oldPort, err)
|
||||
}
|
||||
}
|
||||
@@ -305,7 +305,7 @@ func (im *instanceManager) UpdateInstance(name string, options *instance.Options
|
||||
|
||||
// DeleteInstance removes stopped instance by its name.
|
||||
func (im *instanceManager) DeleteInstance(name string) error {
|
||||
inst, exists := im.registry.Get(name)
|
||||
inst, exists := im.registry.get(name)
|
||||
if !exists {
|
||||
return fmt.Errorf("instance with name %s not found", name)
|
||||
}
|
||||
@@ -313,17 +313,17 @@ func (im *instanceManager) DeleteInstance(name string) error {
|
||||
// Check if instance is remote and delegate to remote operation
|
||||
if node := im.getNodeForInstance(inst); node != nil {
|
||||
ctx := context.Background()
|
||||
err := im.remote.DeleteInstance(ctx, node, name)
|
||||
err := im.remote.deleteInstance(ctx, node, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Clean up local tracking
|
||||
im.remote.RemoveInstance(name)
|
||||
im.registry.Remove(name)
|
||||
im.remote.removeInstance(name)
|
||||
im.registry.remove(name)
|
||||
|
||||
// Delete the instance's persistence file
|
||||
if err := im.persistence.Delete(name); err != nil {
|
||||
if err := im.persistence.delete(name); err != nil {
|
||||
return fmt.Errorf("failed to delete config file for remote instance %s: %w", name, err)
|
||||
}
|
||||
|
||||
@@ -339,15 +339,15 @@ func (im *instanceManager) DeleteInstance(name string) error {
|
||||
}
|
||||
|
||||
// Release port (use ReleaseByInstance for proper cleanup)
|
||||
im.ports.ReleaseByInstance(name)
|
||||
im.ports.releaseByInstance(name)
|
||||
|
||||
// Remove from registry
|
||||
if err := im.registry.Remove(name); err != nil {
|
||||
if err := im.registry.remove(name); err != nil {
|
||||
return fmt.Errorf("failed to remove instance from registry: %w", err)
|
||||
}
|
||||
|
||||
// Delete persistence file
|
||||
if err := im.persistence.Delete(name); err != nil {
|
||||
if err := im.persistence.delete(name); err != nil {
|
||||
return fmt.Errorf("failed to delete config file for instance %s: %w", name, err)
|
||||
}
|
||||
|
||||
@@ -357,7 +357,7 @@ func (im *instanceManager) DeleteInstance(name string) error {
|
||||
// StartInstance starts a stopped instance and returns it.
|
||||
// If the instance is already running, it returns an error.
|
||||
func (im *instanceManager) StartInstance(name string) (*instance.Instance, error) {
|
||||
inst, exists := im.registry.Get(name)
|
||||
inst, exists := im.registry.get(name)
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("instance with name %s not found", name)
|
||||
}
|
||||
@@ -365,7 +365,7 @@ func (im *instanceManager) StartInstance(name string) (*instance.Instance, error
|
||||
// Check if instance is remote and delegate to remote operation
|
||||
if node := im.getNodeForInstance(inst); node != nil {
|
||||
ctx := context.Background()
|
||||
remoteInst, err := im.remote.StartInstance(ctx, node, name)
|
||||
remoteInst, err := im.remote.startInstance(ctx, node, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -408,7 +408,7 @@ func (im *instanceManager) IsMaxRunningInstancesReached() bool {
|
||||
|
||||
// Count only local running instances (each node has its own limits)
|
||||
localRunningCount := 0
|
||||
for _, inst := range im.registry.ListRunning() {
|
||||
for _, inst := range im.registry.listRunning() {
|
||||
if !inst.IsRemote() {
|
||||
localRunningCount++
|
||||
}
|
||||
@@ -419,7 +419,7 @@ func (im *instanceManager) IsMaxRunningInstancesReached() bool {
|
||||
|
||||
// StopInstance stops a running instance and returns it.
|
||||
func (im *instanceManager) StopInstance(name string) (*instance.Instance, error) {
|
||||
inst, exists := im.registry.Get(name)
|
||||
inst, exists := im.registry.get(name)
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("instance with name %s not found", name)
|
||||
}
|
||||
@@ -427,7 +427,7 @@ func (im *instanceManager) StopInstance(name string) (*instance.Instance, error)
|
||||
// Check if instance is remote and delegate to remote operation
|
||||
if node := im.getNodeForInstance(inst); node != nil {
|
||||
ctx := context.Background()
|
||||
remoteInst, err := im.remote.StopInstance(ctx, node, name)
|
||||
remoteInst, err := im.remote.stopInstance(ctx, node, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -460,7 +460,7 @@ func (im *instanceManager) StopInstance(name string) (*instance.Instance, error)
|
||||
|
||||
// RestartInstance stops and then starts an instance, returning the updated instance.
|
||||
func (im *instanceManager) RestartInstance(name string) (*instance.Instance, error) {
|
||||
inst, exists := im.registry.Get(name)
|
||||
inst, exists := im.registry.get(name)
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("instance with name %s not found", name)
|
||||
}
|
||||
@@ -468,7 +468,7 @@ func (im *instanceManager) RestartInstance(name string) (*instance.Instance, err
|
||||
// Check if instance is remote and delegate to remote operation
|
||||
if node := im.getNodeForInstance(inst); node != nil {
|
||||
ctx := context.Background()
|
||||
remoteInst, err := im.remote.RestartInstance(ctx, node, name)
|
||||
remoteInst, err := im.remote.restartInstance(ctx, node, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -505,7 +505,7 @@ func (im *instanceManager) RestartInstance(name string) (*instance.Instance, err
|
||||
|
||||
// GetInstanceLogs retrieves the logs for a specific instance by its name.
|
||||
func (im *instanceManager) GetInstanceLogs(name string, numLines int) (string, error) {
|
||||
inst, exists := im.registry.Get(name)
|
||||
inst, exists := im.registry.get(name)
|
||||
if !exists {
|
||||
return "", fmt.Errorf("instance with name %s not found", name)
|
||||
}
|
||||
@@ -513,7 +513,7 @@ func (im *instanceManager) GetInstanceLogs(name string, numLines int) (string, e
|
||||
// Check if instance is remote and delegate to remote operation
|
||||
if node := im.getNodeForInstance(inst); node != nil {
|
||||
ctx := context.Background()
|
||||
return im.remote.GetInstanceLogs(ctx, node, name, numLines)
|
||||
return im.remote.getInstanceLogs(ctx, node, name, numLines)
|
||||
}
|
||||
|
||||
// Get logs from the local instance
|
||||
@@ -532,5 +532,5 @@ func (im *instanceManager) setPortInOptions(options *instance.Options, port int)
|
||||
|
||||
// EvictLRUInstance finds and stops the least recently used running instance.
|
||||
func (im *instanceManager) EvictLRUInstance() error {
|
||||
return im.lifecycle.EvictLRU()
|
||||
return im.lifecycle.evictLRU()
|
||||
}
|
||||
|
||||
@@ -39,7 +39,7 @@ func NewInstancePersister(instancesDir string) (*instancePersister, error) {
|
||||
}
|
||||
|
||||
// Save persists an instance to disk with atomic write
|
||||
func (p *instancePersister) Save(inst *instance.Instance) error {
|
||||
func (p *instancePersister) save(inst *instance.Instance) error {
|
||||
if !p.enabled {
|
||||
return nil
|
||||
}
|
||||
@@ -101,7 +101,7 @@ func (p *instancePersister) Save(inst *instance.Instance) error {
|
||||
}
|
||||
|
||||
// Load loads a single instance from disk by name.
|
||||
func (p *instancePersister) Load(name string) (*instance.Instance, error) {
|
||||
func (p *instancePersister) load(name string) (*instance.Instance, error) {
|
||||
if !p.enabled {
|
||||
return nil, fmt.Errorf("persistence is disabled")
|
||||
}
|
||||
@@ -127,7 +127,7 @@ func (p *instancePersister) Load(name string) (*instance.Instance, error) {
|
||||
}
|
||||
|
||||
// Delete removes an instance's persistence file from disk.
|
||||
func (p *instancePersister) Delete(name string) error {
|
||||
func (p *instancePersister) delete(name string) error {
|
||||
if !p.enabled {
|
||||
return nil
|
||||
}
|
||||
@@ -154,7 +154,7 @@ func (p *instancePersister) Delete(name string) error {
|
||||
|
||||
// LoadAll loads all persisted instances from disk.
|
||||
// Returns a slice of instances and any errors encountered during loading.
|
||||
func (p *instancePersister) LoadAll() ([]*instance.Instance, error) {
|
||||
func (p *instancePersister) loadAll() ([]*instance.Instance, error) {
|
||||
if !p.enabled {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -45,9 +45,9 @@ func NewPortAllocator(minPort, maxPort int) (*portAllocator, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Allocate finds and allocates the first available port for the given instance.
|
||||
// allocate finds and allocates the first available port for the given instance.
|
||||
// Returns the allocated port or an error if no ports are available.
|
||||
func (p *portAllocator) Allocate(instanceName string) (int, error) {
|
||||
func (p *portAllocator) allocate(instanceName string) (int, error) {
|
||||
if instanceName == "" {
|
||||
return 0, fmt.Errorf("instance name cannot be empty")
|
||||
}
|
||||
@@ -66,9 +66,9 @@ func (p *portAllocator) Allocate(instanceName string) (int, error) {
|
||||
return port, nil
|
||||
}
|
||||
|
||||
// AllocateSpecific allocates a specific port for the given instance.
|
||||
// allocateSpecific allocates a specific port for the given instance.
|
||||
// Returns an error if the port is already allocated or out of range.
|
||||
func (p *portAllocator) AllocateSpecific(port int, instanceName string) error {
|
||||
func (p *portAllocator) allocateSpecific(port int, instanceName string) error {
|
||||
if instanceName == "" {
|
||||
return fmt.Errorf("instance name cannot be empty")
|
||||
}
|
||||
@@ -89,9 +89,9 @@ func (p *portAllocator) AllocateSpecific(port int, instanceName string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Release releases a specific port, making it available for reuse.
|
||||
// release releases a specific port, making it available for reuse.
|
||||
// Returns an error if the port is not allocated.
|
||||
func (p *portAllocator) Release(port int) error {
|
||||
func (p *portAllocator) release(port int) error {
|
||||
if port < p.minPort || port > p.maxPort {
|
||||
return fmt.Errorf("port %d is out of range [%d-%d]", port, p.minPort, p.maxPort)
|
||||
}
|
||||
@@ -109,10 +109,10 @@ func (p *portAllocator) Release(port int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReleaseByInstance releases all ports allocated to the given instance.
|
||||
// releaseByInstance releases all ports allocated to the given instance.
|
||||
// This is useful for cleanup when deleting or updating an instance.
|
||||
// Returns the number of ports released.
|
||||
func (p *portAllocator) ReleaseByInstance(instanceName string) int {
|
||||
func (p *portAllocator) releaseByInstance(instanceName string) int {
|
||||
if instanceName == "" {
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ func NewInstanceRegistry() *instanceRegistry {
|
||||
|
||||
// Get retrieves an instance by name.
|
||||
// Returns the instance and true if found, nil and false otherwise.
|
||||
func (r *instanceRegistry) Get(name string) (*instance.Instance, bool) {
|
||||
func (r *instanceRegistry) get(name string) (*instance.Instance, bool) {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
|
||||
@@ -32,7 +32,7 @@ func (r *instanceRegistry) Get(name string) (*instance.Instance, bool) {
|
||||
}
|
||||
|
||||
// List returns a snapshot copy of all instances to prevent external mutation.
|
||||
func (r *instanceRegistry) List() []*instance.Instance {
|
||||
func (r *instanceRegistry) list() []*instance.Instance {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
|
||||
@@ -44,7 +44,7 @@ func (r *instanceRegistry) List() []*instance.Instance {
|
||||
}
|
||||
|
||||
// ListRunning returns a snapshot of all currently running instances.
|
||||
func (r *instanceRegistry) ListRunning() []*instance.Instance {
|
||||
func (r *instanceRegistry) listRunning() []*instance.Instance {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
|
||||
@@ -59,7 +59,7 @@ func (r *instanceRegistry) ListRunning() []*instance.Instance {
|
||||
|
||||
// Add adds a new instance to the registry.
|
||||
// Returns an error if an instance with the same name already exists.
|
||||
func (r *instanceRegistry) Add(inst *instance.Instance) error {
|
||||
func (r *instanceRegistry) add(inst *instance.Instance) error {
|
||||
if inst == nil {
|
||||
return fmt.Errorf("cannot add nil instance")
|
||||
}
|
||||
@@ -83,7 +83,7 @@ func (r *instanceRegistry) Add(inst *instance.Instance) error {
|
||||
|
||||
// Remove removes an instance from the registry.
|
||||
// Returns an error if the instance doesn't exist.
|
||||
func (r *instanceRegistry) Remove(name string) error {
|
||||
func (r *instanceRegistry) remove(name string) error {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
@@ -98,30 +98,30 @@ func (r *instanceRegistry) Remove(name string) error {
|
||||
}
|
||||
|
||||
// MarkRunning marks an instance as running using lock-free sync.Map.
|
||||
func (r *instanceRegistry) MarkRunning(name string) {
|
||||
func (r *instanceRegistry) markRunning(name string) {
|
||||
r.running.Store(name, struct{}{})
|
||||
}
|
||||
|
||||
// MarkStopped marks an instance as stopped using lock-free sync.Map.
|
||||
func (r *instanceRegistry) MarkStopped(name string) {
|
||||
func (r *instanceRegistry) markStopped(name string) {
|
||||
r.running.Delete(name)
|
||||
}
|
||||
|
||||
// IsRunning checks if an instance is running using lock-free sync.Map.
|
||||
func (r *instanceRegistry) IsRunning(name string) bool {
|
||||
func (r *instanceRegistry) isRunning(name string) bool {
|
||||
_, isRunning := r.running.Load(name)
|
||||
return isRunning
|
||||
}
|
||||
|
||||
// Count returns the total number of instances in the registry.
|
||||
func (r *instanceRegistry) Count() int {
|
||||
func (r *instanceRegistry) count() int {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
return len(r.instances)
|
||||
}
|
||||
|
||||
// CountRunning returns the number of currently running instances.
|
||||
func (r *instanceRegistry) CountRunning() int {
|
||||
func (r *instanceRegistry) countRunning() int {
|
||||
count := 0
|
||||
r.running.Range(func(key, value any) bool {
|
||||
count++
|
||||
|
||||
@@ -47,7 +47,7 @@ func NewRemoteManager(nodes map[string]config.NodeConfig, timeout time.Duration)
|
||||
|
||||
// GetNodeForInstance returns the node configuration for a given instance.
|
||||
// Returns nil if the instance is not mapped to any node.
|
||||
func (rm *remoteManager) GetNodeForInstance(instanceName string) (*config.NodeConfig, bool) {
|
||||
func (rm *remoteManager) getNodeForInstance(instanceName string) (*config.NodeConfig, bool) {
|
||||
rm.mu.RLock()
|
||||
defer rm.mu.RUnlock()
|
||||
|
||||
@@ -57,7 +57,7 @@ func (rm *remoteManager) GetNodeForInstance(instanceName string) (*config.NodeCo
|
||||
|
||||
// SetInstanceNode maps an instance to a specific node.
|
||||
// Returns an error if the node doesn't exist.
|
||||
func (rm *remoteManager) SetInstanceNode(instanceName, nodeName string) error {
|
||||
func (rm *remoteManager) setInstanceNode(instanceName, nodeName string) error {
|
||||
rm.mu.Lock()
|
||||
defer rm.mu.Unlock()
|
||||
|
||||
@@ -71,7 +71,7 @@ func (rm *remoteManager) SetInstanceNode(instanceName, nodeName string) error {
|
||||
}
|
||||
|
||||
// RemoveInstance removes the instance-to-node mapping.
|
||||
func (rm *remoteManager) RemoveInstance(instanceName string) {
|
||||
func (rm *remoteManager) removeInstance(instanceName string) {
|
||||
rm.mu.Lock()
|
||||
defer rm.mu.Unlock()
|
||||
|
||||
@@ -138,7 +138,7 @@ func parseRemoteResponse(resp *http.Response, result any) error {
|
||||
// --- Remote CRUD operations ---
|
||||
|
||||
// ListInstances lists all instances on a remote node.
|
||||
func (rm *remoteManager) ListInstances(ctx context.Context, node *config.NodeConfig) ([]*instance.Instance, error) {
|
||||
func (rm *remoteManager) listInstances(ctx context.Context, node *config.NodeConfig) ([]*instance.Instance, error) {
|
||||
resp, err := rm.makeRemoteRequest(ctx, node, "GET", apiBasePath, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -153,7 +153,7 @@ func (rm *remoteManager) ListInstances(ctx context.Context, node *config.NodeCon
|
||||
}
|
||||
|
||||
// CreateInstance creates a new instance on a remote node.
|
||||
func (rm *remoteManager) CreateInstance(ctx context.Context, node *config.NodeConfig, name string, opts *instance.Options) (*instance.Instance, error) {
|
||||
func (rm *remoteManager) createInstance(ctx context.Context, node *config.NodeConfig, name string, opts *instance.Options) (*instance.Instance, error) {
|
||||
path := fmt.Sprintf("%s%s/", apiBasePath, name)
|
||||
|
||||
resp, err := rm.makeRemoteRequest(ctx, node, "POST", path, opts)
|
||||
@@ -170,7 +170,7 @@ func (rm *remoteManager) CreateInstance(ctx context.Context, node *config.NodeCo
|
||||
}
|
||||
|
||||
// GetInstance retrieves an instance by name from a remote node.
|
||||
func (rm *remoteManager) GetInstance(ctx context.Context, node *config.NodeConfig, name string) (*instance.Instance, error) {
|
||||
func (rm *remoteManager) getInstance(ctx context.Context, node *config.NodeConfig, name string) (*instance.Instance, error) {
|
||||
path := fmt.Sprintf("%s%s/", apiBasePath, name)
|
||||
resp, err := rm.makeRemoteRequest(ctx, node, "GET", path, nil)
|
||||
if err != nil {
|
||||
@@ -186,7 +186,7 @@ func (rm *remoteManager) GetInstance(ctx context.Context, node *config.NodeConfi
|
||||
}
|
||||
|
||||
// UpdateInstance updates an existing instance on a remote node.
|
||||
func (rm *remoteManager) UpdateInstance(ctx context.Context, node *config.NodeConfig, name string, opts *instance.Options) (*instance.Instance, error) {
|
||||
func (rm *remoteManager) updateInstance(ctx context.Context, node *config.NodeConfig, name string, opts *instance.Options) (*instance.Instance, error) {
|
||||
path := fmt.Sprintf("%s%s/", apiBasePath, name)
|
||||
|
||||
resp, err := rm.makeRemoteRequest(ctx, node, "PUT", path, opts)
|
||||
@@ -203,7 +203,7 @@ func (rm *remoteManager) UpdateInstance(ctx context.Context, node *config.NodeCo
|
||||
}
|
||||
|
||||
// DeleteInstance deletes an instance from a remote node.
|
||||
func (rm *remoteManager) DeleteInstance(ctx context.Context, node *config.NodeConfig, name string) error {
|
||||
func (rm *remoteManager) deleteInstance(ctx context.Context, node *config.NodeConfig, name string) error {
|
||||
path := fmt.Sprintf("%s%s/", apiBasePath, name)
|
||||
resp, err := rm.makeRemoteRequest(ctx, node, "DELETE", path, nil)
|
||||
if err != nil {
|
||||
@@ -214,7 +214,7 @@ func (rm *remoteManager) DeleteInstance(ctx context.Context, node *config.NodeCo
|
||||
}
|
||||
|
||||
// StartInstance starts an instance on a remote node.
|
||||
func (rm *remoteManager) StartInstance(ctx context.Context, node *config.NodeConfig, name string) (*instance.Instance, error) {
|
||||
func (rm *remoteManager) startInstance(ctx context.Context, node *config.NodeConfig, name string) (*instance.Instance, error) {
|
||||
path := fmt.Sprintf("%s%s/start", apiBasePath, name)
|
||||
resp, err := rm.makeRemoteRequest(ctx, node, "POST", path, nil)
|
||||
if err != nil {
|
||||
@@ -230,7 +230,7 @@ func (rm *remoteManager) StartInstance(ctx context.Context, node *config.NodeCon
|
||||
}
|
||||
|
||||
// StopInstance stops an instance on a remote node.
|
||||
func (rm *remoteManager) StopInstance(ctx context.Context, node *config.NodeConfig, name string) (*instance.Instance, error) {
|
||||
func (rm *remoteManager) stopInstance(ctx context.Context, node *config.NodeConfig, name string) (*instance.Instance, error) {
|
||||
path := fmt.Sprintf("%s%s/stop", apiBasePath, name)
|
||||
resp, err := rm.makeRemoteRequest(ctx, node, "POST", path, nil)
|
||||
if err != nil {
|
||||
@@ -246,7 +246,7 @@ func (rm *remoteManager) StopInstance(ctx context.Context, node *config.NodeConf
|
||||
}
|
||||
|
||||
// RestartInstance restarts an instance on a remote node.
|
||||
func (rm *remoteManager) RestartInstance(ctx context.Context, node *config.NodeConfig, name string) (*instance.Instance, error) {
|
||||
func (rm *remoteManager) restartInstance(ctx context.Context, node *config.NodeConfig, name string) (*instance.Instance, error) {
|
||||
path := fmt.Sprintf("%s%s/restart", apiBasePath, name)
|
||||
resp, err := rm.makeRemoteRequest(ctx, node, "POST", path, nil)
|
||||
if err != nil {
|
||||
@@ -262,7 +262,7 @@ func (rm *remoteManager) RestartInstance(ctx context.Context, node *config.NodeC
|
||||
}
|
||||
|
||||
// GetInstanceLogs retrieves logs for an instance from a remote node.
|
||||
func (rm *remoteManager) GetInstanceLogs(ctx context.Context, node *config.NodeConfig, name string, numLines int) (string, error) {
|
||||
func (rm *remoteManager) getInstanceLogs(ctx context.Context, node *config.NodeConfig, name string, numLines int) (string, error) {
|
||||
path := fmt.Sprintf("%s%s/logs?lines=%d", apiBasePath, name, numLines)
|
||||
resp, err := rm.makeRemoteRequest(ctx, node, "GET", path, nil)
|
||||
if err != nil {
|
||||
|
||||
Reference in New Issue
Block a user