mirror of
https://github.com/lordmathis/llamactl.git
synced 2025-11-06 09:04:27 +00:00
Refactor instance management to support backend types and options
This commit is contained in:
@@ -5,7 +5,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"llamactl/pkg/backends/llamacpp"
|
||||
"llamactl/pkg/backends"
|
||||
"llamactl/pkg/config"
|
||||
"log"
|
||||
"net/http"
|
||||
@@ -29,52 +29,6 @@ func (realTimeProvider) Now() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
type CreateInstanceOptions struct {
|
||||
// Auto restart
|
||||
AutoRestart *bool `json:"auto_restart,omitempty"`
|
||||
MaxRestarts *int `json:"max_restarts,omitempty"`
|
||||
RestartDelay *int `json:"restart_delay,omitempty"`
|
||||
// On demand start
|
||||
OnDemandStart *bool `json:"on_demand_start,omitempty"`
|
||||
// Idle timeout
|
||||
IdleTimeout *int `json:"idle_timeout,omitempty"`
|
||||
// LlamaServerOptions contains the options for the llama server
|
||||
llamacpp.LlamaServerOptions `json:",inline"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements custom JSON unmarshaling for CreateInstanceOptions
|
||||
// This is needed because the embedded LlamaServerOptions has its own UnmarshalJSON
|
||||
// which can interfere with proper unmarshaling of the pointer fields
|
||||
func (c *CreateInstanceOptions) UnmarshalJSON(data []byte) error {
|
||||
// First, unmarshal into a temporary struct without the embedded type
|
||||
type tempCreateOptions struct {
|
||||
AutoRestart *bool `json:"auto_restart,omitempty"`
|
||||
MaxRestarts *int `json:"max_restarts,omitempty"`
|
||||
RestartDelay *int `json:"restart_delay,omitempty"`
|
||||
OnDemandStart *bool `json:"on_demand_start,omitempty"`
|
||||
IdleTimeout *int `json:"idle_timeout,omitempty"`
|
||||
}
|
||||
|
||||
var temp tempCreateOptions
|
||||
if err := json.Unmarshal(data, &temp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Copy the pointer fields
|
||||
c.AutoRestart = temp.AutoRestart
|
||||
c.MaxRestarts = temp.MaxRestarts
|
||||
c.RestartDelay = temp.RestartDelay
|
||||
c.OnDemandStart = temp.OnDemandStart
|
||||
c.IdleTimeout = temp.IdleTimeout
|
||||
|
||||
// Now unmarshal the embedded LlamaServerOptions
|
||||
if err := json.Unmarshal(data, &c.LlamaServerOptions); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Process represents a running instance of the llama server
|
||||
type Process struct {
|
||||
Name string `json:"name"`
|
||||
@@ -110,101 +64,17 @@ type Process struct {
|
||||
timeProvider TimeProvider `json:"-"` // Time provider for testing
|
||||
}
|
||||
|
||||
// validateAndCopyOptions validates and creates a deep copy of the provided options
|
||||
// It applies validation rules and returns a safe copy
|
||||
func validateAndCopyOptions(name string, options *CreateInstanceOptions) *CreateInstanceOptions {
|
||||
optionsCopy := &CreateInstanceOptions{}
|
||||
|
||||
if options != nil {
|
||||
// Copy the embedded LlamaServerOptions
|
||||
optionsCopy.LlamaServerOptions = options.LlamaServerOptions
|
||||
|
||||
// Copy and validate pointer fields
|
||||
if options.AutoRestart != nil {
|
||||
autoRestart := *options.AutoRestart
|
||||
optionsCopy.AutoRestart = &autoRestart
|
||||
}
|
||||
|
||||
if options.MaxRestarts != nil {
|
||||
maxRestarts := *options.MaxRestarts
|
||||
if maxRestarts < 0 {
|
||||
log.Printf("Instance %s MaxRestarts value (%d) cannot be negative, setting to 0", name, maxRestarts)
|
||||
maxRestarts = 0
|
||||
}
|
||||
optionsCopy.MaxRestarts = &maxRestarts
|
||||
}
|
||||
|
||||
if options.RestartDelay != nil {
|
||||
restartDelay := *options.RestartDelay
|
||||
if restartDelay < 0 {
|
||||
log.Printf("Instance %s RestartDelay value (%d) cannot be negative, setting to 0 seconds", name, restartDelay)
|
||||
restartDelay = 0
|
||||
}
|
||||
optionsCopy.RestartDelay = &restartDelay
|
||||
}
|
||||
|
||||
if options.OnDemandStart != nil {
|
||||
onDemandStart := *options.OnDemandStart
|
||||
optionsCopy.OnDemandStart = &onDemandStart
|
||||
}
|
||||
|
||||
if options.IdleTimeout != nil {
|
||||
idleTimeout := *options.IdleTimeout
|
||||
if idleTimeout < 0 {
|
||||
log.Printf("Instance %s IdleTimeout value (%d) cannot be negative, setting to 0 minutes", name, idleTimeout)
|
||||
idleTimeout = 0
|
||||
}
|
||||
optionsCopy.IdleTimeout = &idleTimeout
|
||||
}
|
||||
}
|
||||
|
||||
return optionsCopy
|
||||
}
|
||||
|
||||
// applyDefaultOptions applies default values from global settings to any nil options
|
||||
func applyDefaultOptions(options *CreateInstanceOptions, globalSettings *config.InstancesConfig) {
|
||||
if globalSettings == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if options.AutoRestart == nil {
|
||||
defaultAutoRestart := globalSettings.DefaultAutoRestart
|
||||
options.AutoRestart = &defaultAutoRestart
|
||||
}
|
||||
|
||||
if options.MaxRestarts == nil {
|
||||
defaultMaxRestarts := globalSettings.DefaultMaxRestarts
|
||||
options.MaxRestarts = &defaultMaxRestarts
|
||||
}
|
||||
|
||||
if options.RestartDelay == nil {
|
||||
defaultRestartDelay := globalSettings.DefaultRestartDelay
|
||||
options.RestartDelay = &defaultRestartDelay
|
||||
}
|
||||
|
||||
if options.OnDemandStart == nil {
|
||||
defaultOnDemandStart := globalSettings.DefaultOnDemandStart
|
||||
options.OnDemandStart = &defaultOnDemandStart
|
||||
}
|
||||
|
||||
if options.IdleTimeout == nil {
|
||||
defaultIdleTimeout := 0
|
||||
options.IdleTimeout = &defaultIdleTimeout
|
||||
}
|
||||
}
|
||||
|
||||
// NewInstance creates a new instance with the given name, log path, and options
|
||||
func NewInstance(name string, globalSettings *config.InstancesConfig, options *CreateInstanceOptions, onStatusChange func(oldStatus, newStatus InstanceStatus)) *Process {
|
||||
// Validate and copy options
|
||||
optionsCopy := validateAndCopyOptions(name, options)
|
||||
// Apply defaults
|
||||
applyDefaultOptions(optionsCopy, globalSettings)
|
||||
options.ValidateAndApplyDefaults(name, globalSettings)
|
||||
|
||||
// Create the instance logger
|
||||
logger := NewInstanceLogger(name, globalSettings.LogsDir)
|
||||
|
||||
return &Process{
|
||||
Name: name,
|
||||
options: optionsCopy,
|
||||
options: options,
|
||||
globalSettings: globalSettings,
|
||||
logger: logger,
|
||||
timeProvider: realTimeProvider{},
|
||||
@@ -220,6 +90,30 @@ func (i *Process) GetOptions() *CreateInstanceOptions {
|
||||
return i.options
|
||||
}
|
||||
|
||||
func (i *Process) GetPort() int {
|
||||
i.mu.RLock()
|
||||
defer i.mu.RUnlock()
|
||||
if i.options != nil {
|
||||
switch i.options.BackendType {
|
||||
case backends.BackendTypeLlamaCpp:
|
||||
return i.options.LlamaServerOptions.Port
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (i *Process) GetHost() string {
|
||||
i.mu.RLock()
|
||||
defer i.mu.RUnlock()
|
||||
if i.options != nil {
|
||||
switch i.options.BackendType {
|
||||
case backends.BackendTypeLlamaCpp:
|
||||
return i.options.LlamaServerOptions.Host
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (i *Process) SetOptions(options *CreateInstanceOptions) {
|
||||
i.mu.Lock()
|
||||
defer i.mu.Unlock()
|
||||
@@ -229,11 +123,10 @@ func (i *Process) SetOptions(options *CreateInstanceOptions) {
|
||||
return
|
||||
}
|
||||
|
||||
// Validate and copy options and apply defaults
|
||||
optionsCopy := validateAndCopyOptions(i.Name, options)
|
||||
applyDefaultOptions(optionsCopy, i.globalSettings)
|
||||
// Validate and copy options
|
||||
options.ValidateAndApplyDefaults(i.Name, i.globalSettings)
|
||||
|
||||
i.options = optionsCopy
|
||||
i.options = options
|
||||
// Clear the proxy so it gets recreated with new options
|
||||
i.proxy = nil
|
||||
}
|
||||
@@ -256,7 +149,15 @@ func (i *Process) GetProxy() (*httputil.ReverseProxy, error) {
|
||||
return nil, fmt.Errorf("instance %s has no options set", i.Name)
|
||||
}
|
||||
|
||||
targetURL, err := url.Parse(fmt.Sprintf("http://%s:%d", i.options.Host, i.options.Port))
|
||||
var host string
|
||||
var port int
|
||||
switch i.options.BackendType {
|
||||
case "llama-cpp":
|
||||
host = i.options.LlamaServerOptions.Host
|
||||
port = i.options.LlamaServerOptions.Port
|
||||
}
|
||||
|
||||
targetURL, err := url.Parse(fmt.Sprintf("http://%s:%d", host, port))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse target URL for instance %s: %w", i.Name, err)
|
||||
}
|
||||
@@ -286,44 +187,36 @@ func (i *Process) MarshalJSON() ([]byte, error) {
|
||||
i.mu.RLock()
|
||||
defer i.mu.RUnlock()
|
||||
|
||||
// Create a temporary struct with exported fields for JSON marshalling
|
||||
temp := struct {
|
||||
Name string `json:"name"`
|
||||
// Use anonymous struct to avoid recursion
|
||||
type Alias Process
|
||||
return json.Marshal(&struct {
|
||||
*Alias
|
||||
Options *CreateInstanceOptions `json:"options,omitempty"`
|
||||
Status InstanceStatus `json:"status"`
|
||||
Created int64 `json:"created,omitempty"`
|
||||
}{
|
||||
Name: i.Name,
|
||||
Alias: (*Alias)(i),
|
||||
Options: i.options,
|
||||
Status: i.Status,
|
||||
Created: i.Created,
|
||||
}
|
||||
|
||||
return json.Marshal(temp)
|
||||
})
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler for Instance
|
||||
func (i *Process) UnmarshalJSON(data []byte) error {
|
||||
// Create a temporary struct for unmarshalling
|
||||
temp := struct {
|
||||
Name string `json:"name"`
|
||||
// Use anonymous struct to avoid recursion
|
||||
type Alias Process
|
||||
aux := &struct {
|
||||
*Alias
|
||||
Options *CreateInstanceOptions `json:"options,omitempty"`
|
||||
Status InstanceStatus `json:"status"`
|
||||
Created int64 `json:"created,omitempty"`
|
||||
}{}
|
||||
}{
|
||||
Alias: (*Alias)(i),
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(data, &temp); err != nil {
|
||||
if err := json.Unmarshal(data, aux); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set the fields
|
||||
i.Name = temp.Name
|
||||
i.Status = temp.Status
|
||||
i.Created = temp.Created
|
||||
|
||||
// Handle options with validation but no defaults
|
||||
if temp.Options != nil {
|
||||
i.options = validateAndCopyOptions(i.Name, temp.Options)
|
||||
// Handle options with validation and defaults
|
||||
if aux.Options != nil {
|
||||
aux.Options.ValidateAndApplyDefaults(i.Name, i.globalSettings)
|
||||
i.options = aux.Options
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -2,6 +2,7 @@ package instance_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"llamactl/pkg/backends"
|
||||
"llamactl/pkg/backends/llamacpp"
|
||||
"llamactl/pkg/config"
|
||||
"llamactl/pkg/instance"
|
||||
@@ -18,7 +19,8 @@ func TestNewInstance(t *testing.T) {
|
||||
}
|
||||
|
||||
options := &instance.CreateInstanceOptions{
|
||||
LlamaServerOptions: llamacpp.LlamaServerOptions{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
Port: 8080,
|
||||
},
|
||||
@@ -27,22 +29,22 @@ func TestNewInstance(t *testing.T) {
|
||||
// Mock onStatusChange function
|
||||
mockOnStatusChange := func(oldStatus, newStatus instance.InstanceStatus) {}
|
||||
|
||||
instance := instance.NewInstance("test-instance", globalSettings, options, mockOnStatusChange)
|
||||
inst := instance.NewInstance("test-instance", globalSettings, options, mockOnStatusChange)
|
||||
|
||||
if instance.Name != "test-instance" {
|
||||
t.Errorf("Expected name 'test-instance', got %q", instance.Name)
|
||||
if inst.Name != "test-instance" {
|
||||
t.Errorf("Expected name 'test-instance', got %q", inst.Name)
|
||||
}
|
||||
if instance.IsRunning() {
|
||||
if inst.IsRunning() {
|
||||
t.Error("New instance should not be running")
|
||||
}
|
||||
|
||||
// Check that options were properly set with defaults applied
|
||||
opts := instance.GetOptions()
|
||||
if opts.Model != "/path/to/model.gguf" {
|
||||
t.Errorf("Expected model '/path/to/model.gguf', got %q", opts.Model)
|
||||
opts := inst.GetOptions()
|
||||
if opts.LlamaServerOptions.Model != "/path/to/model.gguf" {
|
||||
t.Errorf("Expected model '/path/to/model.gguf', got %q", opts.LlamaServerOptions.Model)
|
||||
}
|
||||
if opts.Port != 8080 {
|
||||
t.Errorf("Expected port 8080, got %d", opts.Port)
|
||||
if inst.GetPort() != 8080 {
|
||||
t.Errorf("Expected port 8080, got %d", inst.GetPort())
|
||||
}
|
||||
|
||||
// Check that defaults were applied
|
||||
@@ -74,7 +76,8 @@ func TestNewInstance_WithRestartOptions(t *testing.T) {
|
||||
AutoRestart: &autoRestart,
|
||||
MaxRestarts: &maxRestarts,
|
||||
RestartDelay: &restartDelay,
|
||||
LlamaServerOptions: llamacpp.LlamaServerOptions{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
},
|
||||
}
|
||||
@@ -106,7 +109,8 @@ func TestSetOptions(t *testing.T) {
|
||||
}
|
||||
|
||||
initialOptions := &instance.CreateInstanceOptions{
|
||||
LlamaServerOptions: llamacpp.LlamaServerOptions{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
Port: 8080,
|
||||
},
|
||||
@@ -119,7 +123,8 @@ func TestSetOptions(t *testing.T) {
|
||||
|
||||
// Update options
|
||||
newOptions := &instance.CreateInstanceOptions{
|
||||
LlamaServerOptions: llamacpp.LlamaServerOptions{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||
Model: "/path/to/new-model.gguf",
|
||||
Port: 8081,
|
||||
},
|
||||
@@ -128,11 +133,11 @@ func TestSetOptions(t *testing.T) {
|
||||
inst.SetOptions(newOptions)
|
||||
opts := inst.GetOptions()
|
||||
|
||||
if opts.Model != "/path/to/new-model.gguf" {
|
||||
t.Errorf("Expected updated model '/path/to/new-model.gguf', got %q", opts.Model)
|
||||
if opts.LlamaServerOptions.Model != "/path/to/new-model.gguf" {
|
||||
t.Errorf("Expected updated model '/path/to/new-model.gguf', got %q", opts.LlamaServerOptions.Model)
|
||||
}
|
||||
if opts.Port != 8081 {
|
||||
t.Errorf("Expected updated port 8081, got %d", opts.Port)
|
||||
if inst.GetPort() != 8081 {
|
||||
t.Errorf("Expected updated port 8081, got %d", inst.GetPort())
|
||||
}
|
||||
|
||||
// Check that defaults are still applied
|
||||
@@ -147,7 +152,8 @@ func TestGetProxy(t *testing.T) {
|
||||
}
|
||||
|
||||
options := &instance.CreateInstanceOptions{
|
||||
LlamaServerOptions: llamacpp.LlamaServerOptions{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||
Host: "localhost",
|
||||
Port: 8080,
|
||||
},
|
||||
@@ -186,7 +192,8 @@ func TestMarshalJSON(t *testing.T) {
|
||||
}
|
||||
|
||||
options := &instance.CreateInstanceOptions{
|
||||
LlamaServerOptions: llamacpp.LlamaServerOptions{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
Port: 8080,
|
||||
},
|
||||
@@ -225,8 +232,26 @@ func TestMarshalJSON(t *testing.T) {
|
||||
if !ok {
|
||||
t.Error("Expected options to be a map")
|
||||
}
|
||||
if options_map["model"] != "/path/to/model.gguf" {
|
||||
t.Errorf("Expected model '/path/to/model.gguf', got %v", options_map["model"])
|
||||
|
||||
// Check backend type
|
||||
if options_map["backend_type"] != string(backends.BackendTypeLlamaCpp) {
|
||||
t.Errorf("Expected backend_type '%s', got %v", backends.BackendTypeLlamaCpp, options_map["backend_type"])
|
||||
}
|
||||
|
||||
// Check backend options
|
||||
backend_options_data, ok := options_map["backend_options"]
|
||||
if !ok {
|
||||
t.Error("Expected backend_options to be included in JSON")
|
||||
}
|
||||
backend_options_map, ok := backend_options_data.(map[string]any)
|
||||
if !ok {
|
||||
t.Error("Expected backend_options to be a map")
|
||||
}
|
||||
if backend_options_map["model"] != "/path/to/model.gguf" {
|
||||
t.Errorf("Expected model '/path/to/model.gguf', got %v", backend_options_map["model"])
|
||||
}
|
||||
if backend_options_map["port"] != float64(8080) {
|
||||
t.Errorf("Expected port 8080, got %v", backend_options_map["port"])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -235,10 +260,13 @@ func TestUnmarshalJSON(t *testing.T) {
|
||||
"name": "test-instance",
|
||||
"status": "running",
|
||||
"options": {
|
||||
"model": "/path/to/model.gguf",
|
||||
"port": 8080,
|
||||
"auto_restart": false,
|
||||
"max_restarts": 5
|
||||
"max_restarts": 5,
|
||||
"backend_type": "llama_cpp",
|
||||
"backend_options": {
|
||||
"model": "/path/to/model.gguf",
|
||||
"port": 8080
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
@@ -259,11 +287,17 @@ func TestUnmarshalJSON(t *testing.T) {
|
||||
if opts == nil {
|
||||
t.Fatal("Expected options to be set")
|
||||
}
|
||||
if opts.Model != "/path/to/model.gguf" {
|
||||
t.Errorf("Expected model '/path/to/model.gguf', got %q", opts.Model)
|
||||
if opts.BackendType != backends.BackendTypeLlamaCpp {
|
||||
t.Errorf("Expected backend_type '%s', got %s", backends.BackendTypeLlamaCpp, opts.BackendType)
|
||||
}
|
||||
if opts.Port != 8080 {
|
||||
t.Errorf("Expected port 8080, got %d", opts.Port)
|
||||
if opts.LlamaServerOptions == nil {
|
||||
t.Fatal("Expected LlamaServerOptions to be set")
|
||||
}
|
||||
if opts.LlamaServerOptions.Model != "/path/to/model.gguf" {
|
||||
t.Errorf("Expected model '/path/to/model.gguf', got %q", opts.LlamaServerOptions.Model)
|
||||
}
|
||||
if inst.GetPort() != 8080 {
|
||||
t.Errorf("Expected port 8080, got %d", inst.GetPort())
|
||||
}
|
||||
if opts.AutoRestart == nil || *opts.AutoRestart {
|
||||
t.Error("Expected AutoRestart to be false")
|
||||
@@ -313,7 +347,8 @@ func TestCreateInstanceOptionsValidation(t *testing.T) {
|
||||
options := &instance.CreateInstanceOptions{
|
||||
MaxRestarts: tt.maxRestarts,
|
||||
RestartDelay: tt.restartDelay,
|
||||
LlamaServerOptions: llamacpp.LlamaServerOptions{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -40,7 +40,6 @@ func (i *Process) Start() error {
|
||||
}
|
||||
|
||||
args := i.options.BuildCommandArgs()
|
||||
|
||||
i.ctx, i.cancel = context.WithCancel(context.Background())
|
||||
i.cmd = exec.CommandContext(i.ctx, "llama-server", args...)
|
||||
|
||||
@@ -173,11 +172,17 @@ func (i *Process) WaitForHealthy(timeout int) error {
|
||||
}
|
||||
|
||||
// Build the health check URL directly
|
||||
host := opts.Host
|
||||
var host string
|
||||
var port int
|
||||
switch opts.BackendType {
|
||||
case "llama-cpp":
|
||||
host = opts.LlamaServerOptions.Host
|
||||
port = opts.LlamaServerOptions.Port
|
||||
}
|
||||
if host == "" {
|
||||
host = "localhost"
|
||||
}
|
||||
healthURL := fmt.Sprintf("http://%s:%d/health", host, opts.Port)
|
||||
healthURL := fmt.Sprintf("http://%s:%d/health", host, port)
|
||||
|
||||
// Create a dedicated HTTP client for health checks
|
||||
client := &http.Client{
|
||||
|
||||
141
pkg/instance/options.go
Normal file
141
pkg/instance/options.go
Normal file
@@ -0,0 +1,141 @@
|
||||
package instance
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"llamactl/pkg/backends"
|
||||
"llamactl/pkg/backends/llamacpp"
|
||||
"llamactl/pkg/config"
|
||||
"log"
|
||||
)
|
||||
|
||||
type CreateInstanceOptions struct {
|
||||
// Auto restart
|
||||
AutoRestart *bool `json:"auto_restart,omitempty"`
|
||||
MaxRestarts *int `json:"max_restarts,omitempty"`
|
||||
RestartDelay *int `json:"restart_delay,omitempty"` // seconds
|
||||
// On demand start
|
||||
OnDemandStart *bool `json:"on_demand_start,omitempty"`
|
||||
// Idle timeout
|
||||
IdleTimeout *int `json:"idle_timeout,omitempty"` // minutes
|
||||
|
||||
BackendType backends.BackendType `json:"backend_type"`
|
||||
BackendOptions map[string]any `json:"backend_options,omitempty"`
|
||||
|
||||
// LlamaServerOptions contains the options for the llama server
|
||||
LlamaServerOptions *llamacpp.LlamaServerOptions `json:"-"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements custom JSON unmarshaling for CreateInstanceOptions
|
||||
func (c *CreateInstanceOptions) UnmarshalJSON(data []byte) error {
|
||||
// Use anonymous struct to avoid recursion
|
||||
type Alias CreateInstanceOptions
|
||||
aux := &struct {
|
||||
*Alias
|
||||
}{
|
||||
Alias: (*Alias)(c),
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(data, aux); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Parse backend-specific options
|
||||
switch c.BackendType {
|
||||
case backends.BackendTypeLlamaCpp:
|
||||
if c.BackendOptions != nil {
|
||||
// Convert map to JSON and then unmarshal to LlamaServerOptions
|
||||
optionsData, err := json.Marshal(c.BackendOptions)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal backend options: %w", err)
|
||||
}
|
||||
|
||||
c.LlamaServerOptions = &llamacpp.LlamaServerOptions{}
|
||||
if err := json.Unmarshal(optionsData, c.LlamaServerOptions); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal llama.cpp options: %w", err)
|
||||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unknown backend type: %s", c.BackendType)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements custom JSON marshaling for CreateInstanceOptions
|
||||
func (c *CreateInstanceOptions) MarshalJSON() ([]byte, error) {
|
||||
// Use anonymous struct to avoid recursion
|
||||
type Alias CreateInstanceOptions
|
||||
aux := struct {
|
||||
*Alias
|
||||
}{
|
||||
Alias: (*Alias)(c),
|
||||
}
|
||||
|
||||
// Convert LlamaServerOptions back to BackendOptions map for JSON
|
||||
if c.BackendType == backends.BackendTypeLlamaCpp && c.LlamaServerOptions != nil {
|
||||
data, err := json.Marshal(c.LlamaServerOptions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal llama server options: %w", err)
|
||||
}
|
||||
|
||||
var backendOpts map[string]any
|
||||
if err := json.Unmarshal(data, &backendOpts); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal to map: %w", err)
|
||||
}
|
||||
|
||||
aux.BackendOptions = backendOpts
|
||||
}
|
||||
|
||||
return json.Marshal(aux)
|
||||
}
|
||||
|
||||
// ValidateAndApplyDefaults validates the instance options and applies constraints
|
||||
func (c *CreateInstanceOptions) ValidateAndApplyDefaults(name string, globalSettings *config.InstancesConfig) {
|
||||
// Validate and apply constraints
|
||||
if c.MaxRestarts != nil && *c.MaxRestarts < 0 {
|
||||
log.Printf("Instance %s MaxRestarts value (%d) cannot be negative, setting to 0", name, *c.MaxRestarts)
|
||||
*c.MaxRestarts = 0
|
||||
}
|
||||
|
||||
if c.RestartDelay != nil && *c.RestartDelay < 0 {
|
||||
log.Printf("Instance %s RestartDelay value (%d) cannot be negative, setting to 0 seconds", name, *c.RestartDelay)
|
||||
*c.RestartDelay = 0
|
||||
}
|
||||
|
||||
if c.IdleTimeout != nil && *c.IdleTimeout < 0 {
|
||||
log.Printf("Instance %s IdleTimeout value (%d) cannot be negative, setting to 0 minutes", name, *c.IdleTimeout)
|
||||
*c.IdleTimeout = 0
|
||||
}
|
||||
|
||||
// Apply defaults from global settings for nil fields
|
||||
if globalSettings != nil {
|
||||
if c.AutoRestart == nil {
|
||||
c.AutoRestart = &globalSettings.DefaultAutoRestart
|
||||
}
|
||||
if c.MaxRestarts == nil {
|
||||
c.MaxRestarts = &globalSettings.DefaultMaxRestarts
|
||||
}
|
||||
if c.RestartDelay == nil {
|
||||
c.RestartDelay = &globalSettings.DefaultRestartDelay
|
||||
}
|
||||
if c.OnDemandStart == nil {
|
||||
c.OnDemandStart = &globalSettings.DefaultOnDemandStart
|
||||
}
|
||||
if c.IdleTimeout == nil {
|
||||
defaultIdleTimeout := 0
|
||||
c.IdleTimeout = &defaultIdleTimeout
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// BuildCommandArgs builds command line arguments for the backend
|
||||
func (c *CreateInstanceOptions) BuildCommandArgs() []string {
|
||||
switch c.BackendType {
|
||||
case backends.BackendTypeLlamaCpp:
|
||||
if c.LlamaServerOptions != nil {
|
||||
return c.LlamaServerOptions.BuildCommandArgs()
|
||||
}
|
||||
}
|
||||
return []string{}
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package instance_test
|
||||
|
||||
import (
|
||||
"llamactl/pkg/backends"
|
||||
"llamactl/pkg/backends/llamacpp"
|
||||
"llamactl/pkg/config"
|
||||
"llamactl/pkg/instance"
|
||||
@@ -37,7 +38,8 @@ func TestUpdateLastRequestTime(t *testing.T) {
|
||||
}
|
||||
|
||||
options := &instance.CreateInstanceOptions{
|
||||
LlamaServerOptions: llamacpp.LlamaServerOptions{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
},
|
||||
}
|
||||
@@ -59,7 +61,8 @@ func TestShouldTimeout_NotRunning(t *testing.T) {
|
||||
idleTimeout := 1 // 1 minute
|
||||
options := &instance.CreateInstanceOptions{
|
||||
IdleTimeout: &idleTimeout,
|
||||
LlamaServerOptions: llamacpp.LlamaServerOptions{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
},
|
||||
}
|
||||
@@ -96,7 +99,8 @@ func TestShouldTimeout_NoTimeoutConfigured(t *testing.T) {
|
||||
|
||||
options := &instance.CreateInstanceOptions{
|
||||
IdleTimeout: tt.idleTimeout,
|
||||
LlamaServerOptions: llamacpp.LlamaServerOptions{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
},
|
||||
}
|
||||
@@ -120,7 +124,8 @@ func TestShouldTimeout_WithinTimeLimit(t *testing.T) {
|
||||
idleTimeout := 5 // 5 minutes
|
||||
options := &instance.CreateInstanceOptions{
|
||||
IdleTimeout: &idleTimeout,
|
||||
LlamaServerOptions: llamacpp.LlamaServerOptions{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
},
|
||||
}
|
||||
@@ -148,7 +153,8 @@ func TestShouldTimeout_ExceedsTimeLimit(t *testing.T) {
|
||||
idleTimeout := 1 // 1 minute
|
||||
options := &instance.CreateInstanceOptions{
|
||||
IdleTimeout: &idleTimeout,
|
||||
LlamaServerOptions: llamacpp.LlamaServerOptions{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
},
|
||||
}
|
||||
@@ -194,7 +200,8 @@ func TestTimeoutConfiguration_Validation(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
options := &instance.CreateInstanceOptions{
|
||||
IdleTimeout: tt.inputTimeout,
|
||||
LlamaServerOptions: llamacpp.LlamaServerOptions{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
||||
Model: "/path/to/model.gguf",
|
||||
},
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user