Merge pull request #27 from lordmathis/feat/separate-backend-options

feat: Separate backend options from common instance options
This commit is contained in:
2025-09-02 22:03:35 +02:00
committed by GitHub
24 changed files with 780 additions and 317 deletions

7
pkg/backends/backend.go Normal file
View File

@@ -0,0 +1,7 @@
package backends
type BackendType string
const (
BackendTypeLlamaCpp BackendType = "llama_cpp"
)

View File

@@ -5,7 +5,7 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"llamactl/pkg/backends/llamacpp" "llamactl/pkg/backends"
"llamactl/pkg/config" "llamactl/pkg/config"
"log" "log"
"net/http" "net/http"
@@ -29,52 +29,6 @@ func (realTimeProvider) Now() time.Time {
return time.Now() return time.Now()
} }
type CreateInstanceOptions struct {
// Auto restart
AutoRestart *bool `json:"auto_restart,omitempty"`
MaxRestarts *int `json:"max_restarts,omitempty"`
RestartDelay *int `json:"restart_delay,omitempty"`
// On demand start
OnDemandStart *bool `json:"on_demand_start,omitempty"`
// Idle timeout
IdleTimeout *int `json:"idle_timeout,omitempty"`
// LlamaServerOptions contains the options for the llama server
llamacpp.LlamaServerOptions `json:",inline"`
}
// UnmarshalJSON implements custom JSON unmarshaling for CreateInstanceOptions
// This is needed because the embedded LlamaServerOptions has its own UnmarshalJSON
// which can interfere with proper unmarshaling of the pointer fields
func (c *CreateInstanceOptions) UnmarshalJSON(data []byte) error {
// First, unmarshal into a temporary struct without the embedded type
type tempCreateOptions struct {
AutoRestart *bool `json:"auto_restart,omitempty"`
MaxRestarts *int `json:"max_restarts,omitempty"`
RestartDelay *int `json:"restart_delay,omitempty"`
OnDemandStart *bool `json:"on_demand_start,omitempty"`
IdleTimeout *int `json:"idle_timeout,omitempty"`
}
var temp tempCreateOptions
if err := json.Unmarshal(data, &temp); err != nil {
return err
}
// Copy the pointer fields
c.AutoRestart = temp.AutoRestart
c.MaxRestarts = temp.MaxRestarts
c.RestartDelay = temp.RestartDelay
c.OnDemandStart = temp.OnDemandStart
c.IdleTimeout = temp.IdleTimeout
// Now unmarshal the embedded LlamaServerOptions
if err := json.Unmarshal(data, &c.LlamaServerOptions); err != nil {
return err
}
return nil
}
// Process represents a running instance of the llama server // Process represents a running instance of the llama server
type Process struct { type Process struct {
Name string `json:"name"` Name string `json:"name"`
@@ -110,101 +64,17 @@ type Process struct {
timeProvider TimeProvider `json:"-"` // Time provider for testing timeProvider TimeProvider `json:"-"` // Time provider for testing
} }
// validateAndCopyOptions validates and creates a deep copy of the provided options
// It applies validation rules and returns a safe copy
func validateAndCopyOptions(name string, options *CreateInstanceOptions) *CreateInstanceOptions {
optionsCopy := &CreateInstanceOptions{}
if options != nil {
// Copy the embedded LlamaServerOptions
optionsCopy.LlamaServerOptions = options.LlamaServerOptions
// Copy and validate pointer fields
if options.AutoRestart != nil {
autoRestart := *options.AutoRestart
optionsCopy.AutoRestart = &autoRestart
}
if options.MaxRestarts != nil {
maxRestarts := *options.MaxRestarts
if maxRestarts < 0 {
log.Printf("Instance %s MaxRestarts value (%d) cannot be negative, setting to 0", name, maxRestarts)
maxRestarts = 0
}
optionsCopy.MaxRestarts = &maxRestarts
}
if options.RestartDelay != nil {
restartDelay := *options.RestartDelay
if restartDelay < 0 {
log.Printf("Instance %s RestartDelay value (%d) cannot be negative, setting to 0 seconds", name, restartDelay)
restartDelay = 0
}
optionsCopy.RestartDelay = &restartDelay
}
if options.OnDemandStart != nil {
onDemandStart := *options.OnDemandStart
optionsCopy.OnDemandStart = &onDemandStart
}
if options.IdleTimeout != nil {
idleTimeout := *options.IdleTimeout
if idleTimeout < 0 {
log.Printf("Instance %s IdleTimeout value (%d) cannot be negative, setting to 0 minutes", name, idleTimeout)
idleTimeout = 0
}
optionsCopy.IdleTimeout = &idleTimeout
}
}
return optionsCopy
}
// applyDefaultOptions applies default values from global settings to any nil options
func applyDefaultOptions(options *CreateInstanceOptions, globalSettings *config.InstancesConfig) {
if globalSettings == nil {
return
}
if options.AutoRestart == nil {
defaultAutoRestart := globalSettings.DefaultAutoRestart
options.AutoRestart = &defaultAutoRestart
}
if options.MaxRestarts == nil {
defaultMaxRestarts := globalSettings.DefaultMaxRestarts
options.MaxRestarts = &defaultMaxRestarts
}
if options.RestartDelay == nil {
defaultRestartDelay := globalSettings.DefaultRestartDelay
options.RestartDelay = &defaultRestartDelay
}
if options.OnDemandStart == nil {
defaultOnDemandStart := globalSettings.DefaultOnDemandStart
options.OnDemandStart = &defaultOnDemandStart
}
if options.IdleTimeout == nil {
defaultIdleTimeout := 0
options.IdleTimeout = &defaultIdleTimeout
}
}
// NewInstance creates a new instance with the given name, log path, and options // NewInstance creates a new instance with the given name, log path, and options
func NewInstance(name string, globalSettings *config.InstancesConfig, options *CreateInstanceOptions, onStatusChange func(oldStatus, newStatus InstanceStatus)) *Process { func NewInstance(name string, globalSettings *config.InstancesConfig, options *CreateInstanceOptions, onStatusChange func(oldStatus, newStatus InstanceStatus)) *Process {
// Validate and copy options // Validate and copy options
optionsCopy := validateAndCopyOptions(name, options) options.ValidateAndApplyDefaults(name, globalSettings)
// Apply defaults
applyDefaultOptions(optionsCopy, globalSettings)
// Create the instance logger // Create the instance logger
logger := NewInstanceLogger(name, globalSettings.LogsDir) logger := NewInstanceLogger(name, globalSettings.LogsDir)
return &Process{ return &Process{
Name: name, Name: name,
options: optionsCopy, options: options,
globalSettings: globalSettings, globalSettings: globalSettings,
logger: logger, logger: logger,
timeProvider: realTimeProvider{}, timeProvider: realTimeProvider{},
@@ -220,6 +90,30 @@ func (i *Process) GetOptions() *CreateInstanceOptions {
return i.options return i.options
} }
func (i *Process) GetPort() int {
i.mu.RLock()
defer i.mu.RUnlock()
if i.options != nil {
switch i.options.BackendType {
case backends.BackendTypeLlamaCpp:
return i.options.LlamaServerOptions.Port
}
}
return 0
}
func (i *Process) GetHost() string {
i.mu.RLock()
defer i.mu.RUnlock()
if i.options != nil {
switch i.options.BackendType {
case backends.BackendTypeLlamaCpp:
return i.options.LlamaServerOptions.Host
}
}
return ""
}
func (i *Process) SetOptions(options *CreateInstanceOptions) { func (i *Process) SetOptions(options *CreateInstanceOptions) {
i.mu.Lock() i.mu.Lock()
defer i.mu.Unlock() defer i.mu.Unlock()
@@ -229,11 +123,10 @@ func (i *Process) SetOptions(options *CreateInstanceOptions) {
return return
} }
// Validate and copy options and apply defaults // Validate and copy options
optionsCopy := validateAndCopyOptions(i.Name, options) options.ValidateAndApplyDefaults(i.Name, i.globalSettings)
applyDefaultOptions(optionsCopy, i.globalSettings)
i.options = optionsCopy i.options = options
// Clear the proxy so it gets recreated with new options // Clear the proxy so it gets recreated with new options
i.proxy = nil i.proxy = nil
} }
@@ -256,7 +149,15 @@ func (i *Process) GetProxy() (*httputil.ReverseProxy, error) {
return nil, fmt.Errorf("instance %s has no options set", i.Name) return nil, fmt.Errorf("instance %s has no options set", i.Name)
} }
targetURL, err := url.Parse(fmt.Sprintf("http://%s:%d", i.options.Host, i.options.Port)) var host string
var port int
switch i.options.BackendType {
case backends.BackendTypeLlamaCpp:
host = i.options.LlamaServerOptions.Host
port = i.options.LlamaServerOptions.Port
}
targetURL, err := url.Parse(fmt.Sprintf("http://%s:%d", host, port))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse target URL for instance %s: %w", i.Name, err) return nil, fmt.Errorf("failed to parse target URL for instance %s: %w", i.Name, err)
} }
@@ -286,44 +187,36 @@ func (i *Process) MarshalJSON() ([]byte, error) {
i.mu.RLock() i.mu.RLock()
defer i.mu.RUnlock() defer i.mu.RUnlock()
// Create a temporary struct with exported fields for JSON marshalling // Use anonymous struct to avoid recursion
temp := struct { type Alias Process
Name string `json:"name"` return json.Marshal(&struct {
*Alias
Options *CreateInstanceOptions `json:"options,omitempty"` Options *CreateInstanceOptions `json:"options,omitempty"`
Status InstanceStatus `json:"status"`
Created int64 `json:"created,omitempty"`
}{ }{
Name: i.Name, Alias: (*Alias)(i),
Options: i.options, Options: i.options,
Status: i.Status, })
Created: i.Created,
}
return json.Marshal(temp)
} }
// UnmarshalJSON implements json.Unmarshaler for Instance // UnmarshalJSON implements json.Unmarshaler for Instance
func (i *Process) UnmarshalJSON(data []byte) error { func (i *Process) UnmarshalJSON(data []byte) error {
// Create a temporary struct for unmarshalling // Use anonymous struct to avoid recursion
temp := struct { type Alias Process
Name string `json:"name"` aux := &struct {
*Alias
Options *CreateInstanceOptions `json:"options,omitempty"` Options *CreateInstanceOptions `json:"options,omitempty"`
Status InstanceStatus `json:"status"` }{
Created int64 `json:"created,omitempty"` Alias: (*Alias)(i),
}{} }
if err := json.Unmarshal(data, &temp); err != nil { if err := json.Unmarshal(data, aux); err != nil {
return err return err
} }
// Set the fields // Handle options with validation and defaults
i.Name = temp.Name if aux.Options != nil {
i.Status = temp.Status aux.Options.ValidateAndApplyDefaults(i.Name, i.globalSettings)
i.Created = temp.Created i.options = aux.Options
// Handle options with validation but no defaults
if temp.Options != nil {
i.options = validateAndCopyOptions(i.Name, temp.Options)
} }
return nil return nil

View File

@@ -2,6 +2,7 @@ package instance_test
import ( import (
"encoding/json" "encoding/json"
"llamactl/pkg/backends"
"llamactl/pkg/backends/llamacpp" "llamactl/pkg/backends/llamacpp"
"llamactl/pkg/config" "llamactl/pkg/config"
"llamactl/pkg/instance" "llamactl/pkg/instance"
@@ -18,7 +19,8 @@ func TestNewInstance(t *testing.T) {
} }
options := &instance.CreateInstanceOptions{ options := &instance.CreateInstanceOptions{
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Model: "/path/to/model.gguf", Model: "/path/to/model.gguf",
Port: 8080, Port: 8080,
}, },
@@ -27,22 +29,22 @@ func TestNewInstance(t *testing.T) {
// Mock onStatusChange function // Mock onStatusChange function
mockOnStatusChange := func(oldStatus, newStatus instance.InstanceStatus) {} mockOnStatusChange := func(oldStatus, newStatus instance.InstanceStatus) {}
instance := instance.NewInstance("test-instance", globalSettings, options, mockOnStatusChange) inst := instance.NewInstance("test-instance", globalSettings, options, mockOnStatusChange)
if instance.Name != "test-instance" { if inst.Name != "test-instance" {
t.Errorf("Expected name 'test-instance', got %q", instance.Name) t.Errorf("Expected name 'test-instance', got %q", inst.Name)
} }
if instance.IsRunning() { if inst.IsRunning() {
t.Error("New instance should not be running") t.Error("New instance should not be running")
} }
// Check that options were properly set with defaults applied // Check that options were properly set with defaults applied
opts := instance.GetOptions() opts := inst.GetOptions()
if opts.Model != "/path/to/model.gguf" { if opts.LlamaServerOptions.Model != "/path/to/model.gguf" {
t.Errorf("Expected model '/path/to/model.gguf', got %q", opts.Model) t.Errorf("Expected model '/path/to/model.gguf', got %q", opts.LlamaServerOptions.Model)
} }
if opts.Port != 8080 { if inst.GetPort() != 8080 {
t.Errorf("Expected port 8080, got %d", opts.Port) t.Errorf("Expected port 8080, got %d", inst.GetPort())
} }
// Check that defaults were applied // Check that defaults were applied
@@ -74,7 +76,8 @@ func TestNewInstance_WithRestartOptions(t *testing.T) {
AutoRestart: &autoRestart, AutoRestart: &autoRestart,
MaxRestarts: &maxRestarts, MaxRestarts: &maxRestarts,
RestartDelay: &restartDelay, RestartDelay: &restartDelay,
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Model: "/path/to/model.gguf", Model: "/path/to/model.gguf",
}, },
} }
@@ -106,7 +109,8 @@ func TestSetOptions(t *testing.T) {
} }
initialOptions := &instance.CreateInstanceOptions{ initialOptions := &instance.CreateInstanceOptions{
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Model: "/path/to/model.gguf", Model: "/path/to/model.gguf",
Port: 8080, Port: 8080,
}, },
@@ -119,7 +123,8 @@ func TestSetOptions(t *testing.T) {
// Update options // Update options
newOptions := &instance.CreateInstanceOptions{ newOptions := &instance.CreateInstanceOptions{
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Model: "/path/to/new-model.gguf", Model: "/path/to/new-model.gguf",
Port: 8081, Port: 8081,
}, },
@@ -128,11 +133,11 @@ func TestSetOptions(t *testing.T) {
inst.SetOptions(newOptions) inst.SetOptions(newOptions)
opts := inst.GetOptions() opts := inst.GetOptions()
if opts.Model != "/path/to/new-model.gguf" { if opts.LlamaServerOptions.Model != "/path/to/new-model.gguf" {
t.Errorf("Expected updated model '/path/to/new-model.gguf', got %q", opts.Model) t.Errorf("Expected updated model '/path/to/new-model.gguf', got %q", opts.LlamaServerOptions.Model)
} }
if opts.Port != 8081 { if inst.GetPort() != 8081 {
t.Errorf("Expected updated port 8081, got %d", opts.Port) t.Errorf("Expected updated port 8081, got %d", inst.GetPort())
} }
// Check that defaults are still applied // Check that defaults are still applied
@@ -147,7 +152,8 @@ func TestGetProxy(t *testing.T) {
} }
options := &instance.CreateInstanceOptions{ options := &instance.CreateInstanceOptions{
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Host: "localhost", Host: "localhost",
Port: 8080, Port: 8080,
}, },
@@ -186,7 +192,8 @@ func TestMarshalJSON(t *testing.T) {
} }
options := &instance.CreateInstanceOptions{ options := &instance.CreateInstanceOptions{
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Model: "/path/to/model.gguf", Model: "/path/to/model.gguf",
Port: 8080, Port: 8080,
}, },
@@ -225,8 +232,26 @@ func TestMarshalJSON(t *testing.T) {
if !ok { if !ok {
t.Error("Expected options to be a map") t.Error("Expected options to be a map")
} }
if options_map["model"] != "/path/to/model.gguf" {
t.Errorf("Expected model '/path/to/model.gguf', got %v", options_map["model"]) // Check backend type
if options_map["backend_type"] != string(backends.BackendTypeLlamaCpp) {
t.Errorf("Expected backend_type '%s', got %v", backends.BackendTypeLlamaCpp, options_map["backend_type"])
}
// Check backend options
backend_options_data, ok := options_map["backend_options"]
if !ok {
t.Error("Expected backend_options to be included in JSON")
}
backend_options_map, ok := backend_options_data.(map[string]any)
if !ok {
t.Error("Expected backend_options to be a map")
}
if backend_options_map["model"] != "/path/to/model.gguf" {
t.Errorf("Expected model '/path/to/model.gguf', got %v", backend_options_map["model"])
}
if backend_options_map["port"] != float64(8080) {
t.Errorf("Expected port 8080, got %v", backend_options_map["port"])
} }
} }
@@ -235,10 +260,13 @@ func TestUnmarshalJSON(t *testing.T) {
"name": "test-instance", "name": "test-instance",
"status": "running", "status": "running",
"options": { "options": {
"model": "/path/to/model.gguf",
"port": 8080,
"auto_restart": false, "auto_restart": false,
"max_restarts": 5 "max_restarts": 5,
"backend_type": "llama_cpp",
"backend_options": {
"model": "/path/to/model.gguf",
"port": 8080
}
} }
}` }`
@@ -259,11 +287,17 @@ func TestUnmarshalJSON(t *testing.T) {
if opts == nil { if opts == nil {
t.Fatal("Expected options to be set") t.Fatal("Expected options to be set")
} }
if opts.Model != "/path/to/model.gguf" { if opts.BackendType != backends.BackendTypeLlamaCpp {
t.Errorf("Expected model '/path/to/model.gguf', got %q", opts.Model) t.Errorf("Expected backend_type '%s', got %s", backends.BackendTypeLlamaCpp, opts.BackendType)
} }
if opts.Port != 8080 { if opts.LlamaServerOptions == nil {
t.Errorf("Expected port 8080, got %d", opts.Port) t.Fatal("Expected LlamaServerOptions to be set")
}
if opts.LlamaServerOptions.Model != "/path/to/model.gguf" {
t.Errorf("Expected model '/path/to/model.gguf', got %q", opts.LlamaServerOptions.Model)
}
if inst.GetPort() != 8080 {
t.Errorf("Expected port 8080, got %d", inst.GetPort())
} }
if opts.AutoRestart == nil || *opts.AutoRestart { if opts.AutoRestart == nil || *opts.AutoRestart {
t.Error("Expected AutoRestart to be false") t.Error("Expected AutoRestart to be false")
@@ -313,7 +347,8 @@ func TestCreateInstanceOptionsValidation(t *testing.T) {
options := &instance.CreateInstanceOptions{ options := &instance.CreateInstanceOptions{
MaxRestarts: tt.maxRestarts, MaxRestarts: tt.maxRestarts,
RestartDelay: tt.restartDelay, RestartDelay: tt.restartDelay,
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Model: "/path/to/model.gguf", Model: "/path/to/model.gguf",
}, },
} }

View File

@@ -40,7 +40,6 @@ func (i *Process) Start() error {
} }
args := i.options.BuildCommandArgs() args := i.options.BuildCommandArgs()
i.ctx, i.cancel = context.WithCancel(context.Background()) i.ctx, i.cancel = context.WithCancel(context.Background())
i.cmd = exec.CommandContext(i.ctx, "llama-server", args...) i.cmd = exec.CommandContext(i.ctx, "llama-server", args...)
@@ -173,11 +172,17 @@ func (i *Process) WaitForHealthy(timeout int) error {
} }
// Build the health check URL directly // Build the health check URL directly
host := opts.Host var host string
var port int
switch opts.BackendType {
case "llama-cpp":
host = opts.LlamaServerOptions.Host
port = opts.LlamaServerOptions.Port
}
if host == "" { if host == "" {
host = "localhost" host = "localhost"
} }
healthURL := fmt.Sprintf("http://%s:%d/health", host, opts.Port) healthURL := fmt.Sprintf("http://%s:%d/health", host, port)
// Create a dedicated HTTP client for health checks // Create a dedicated HTTP client for health checks
client := &http.Client{ client := &http.Client{

141
pkg/instance/options.go Normal file
View File

@@ -0,0 +1,141 @@
package instance
import (
"encoding/json"
"fmt"
"llamactl/pkg/backends"
"llamactl/pkg/backends/llamacpp"
"llamactl/pkg/config"
"log"
)
type CreateInstanceOptions struct {
// Auto restart
AutoRestart *bool `json:"auto_restart,omitempty"`
MaxRestarts *int `json:"max_restarts,omitempty"`
RestartDelay *int `json:"restart_delay,omitempty"` // seconds
// On demand start
OnDemandStart *bool `json:"on_demand_start,omitempty"`
// Idle timeout
IdleTimeout *int `json:"idle_timeout,omitempty"` // minutes
BackendType backends.BackendType `json:"backend_type"`
BackendOptions map[string]any `json:"backend_options,omitempty"`
// LlamaServerOptions contains the options for the llama server
LlamaServerOptions *llamacpp.LlamaServerOptions `json:"-"`
}
// UnmarshalJSON implements custom JSON unmarshaling for CreateInstanceOptions
func (c *CreateInstanceOptions) UnmarshalJSON(data []byte) error {
// Use anonymous struct to avoid recursion
type Alias CreateInstanceOptions
aux := &struct {
*Alias
}{
Alias: (*Alias)(c),
}
if err := json.Unmarshal(data, aux); err != nil {
return err
}
// Parse backend-specific options
switch c.BackendType {
case backends.BackendTypeLlamaCpp:
if c.BackendOptions != nil {
// Convert map to JSON and then unmarshal to LlamaServerOptions
optionsData, err := json.Marshal(c.BackendOptions)
if err != nil {
return fmt.Errorf("failed to marshal backend options: %w", err)
}
c.LlamaServerOptions = &llamacpp.LlamaServerOptions{}
if err := json.Unmarshal(optionsData, c.LlamaServerOptions); err != nil {
return fmt.Errorf("failed to unmarshal llama.cpp options: %w", err)
}
}
default:
return fmt.Errorf("unknown backend type: %s", c.BackendType)
}
return nil
}
// MarshalJSON implements custom JSON marshaling for CreateInstanceOptions
func (c *CreateInstanceOptions) MarshalJSON() ([]byte, error) {
// Use anonymous struct to avoid recursion
type Alias CreateInstanceOptions
aux := struct {
*Alias
}{
Alias: (*Alias)(c),
}
// Convert LlamaServerOptions back to BackendOptions map for JSON
if c.BackendType == backends.BackendTypeLlamaCpp && c.LlamaServerOptions != nil {
data, err := json.Marshal(c.LlamaServerOptions)
if err != nil {
return nil, fmt.Errorf("failed to marshal llama server options: %w", err)
}
var backendOpts map[string]any
if err := json.Unmarshal(data, &backendOpts); err != nil {
return nil, fmt.Errorf("failed to unmarshal to map: %w", err)
}
aux.BackendOptions = backendOpts
}
return json.Marshal(aux)
}
// ValidateAndApplyDefaults validates the instance options and applies constraints
func (c *CreateInstanceOptions) ValidateAndApplyDefaults(name string, globalSettings *config.InstancesConfig) {
// Validate and apply constraints
if c.MaxRestarts != nil && *c.MaxRestarts < 0 {
log.Printf("Instance %s MaxRestarts value (%d) cannot be negative, setting to 0", name, *c.MaxRestarts)
*c.MaxRestarts = 0
}
if c.RestartDelay != nil && *c.RestartDelay < 0 {
log.Printf("Instance %s RestartDelay value (%d) cannot be negative, setting to 0 seconds", name, *c.RestartDelay)
*c.RestartDelay = 0
}
if c.IdleTimeout != nil && *c.IdleTimeout < 0 {
log.Printf("Instance %s IdleTimeout value (%d) cannot be negative, setting to 0 minutes", name, *c.IdleTimeout)
*c.IdleTimeout = 0
}
// Apply defaults from global settings for nil fields
if globalSettings != nil {
if c.AutoRestart == nil {
c.AutoRestart = &globalSettings.DefaultAutoRestart
}
if c.MaxRestarts == nil {
c.MaxRestarts = &globalSettings.DefaultMaxRestarts
}
if c.RestartDelay == nil {
c.RestartDelay = &globalSettings.DefaultRestartDelay
}
if c.OnDemandStart == nil {
c.OnDemandStart = &globalSettings.DefaultOnDemandStart
}
if c.IdleTimeout == nil {
defaultIdleTimeout := 0
c.IdleTimeout = &defaultIdleTimeout
}
}
}
// BuildCommandArgs builds command line arguments for the backend
func (c *CreateInstanceOptions) BuildCommandArgs() []string {
switch c.BackendType {
case backends.BackendTypeLlamaCpp:
if c.LlamaServerOptions != nil {
return c.LlamaServerOptions.BuildCommandArgs()
}
}
return []string{}
}

View File

@@ -1,6 +1,7 @@
package instance_test package instance_test
import ( import (
"llamactl/pkg/backends"
"llamactl/pkg/backends/llamacpp" "llamactl/pkg/backends/llamacpp"
"llamactl/pkg/config" "llamactl/pkg/config"
"llamactl/pkg/instance" "llamactl/pkg/instance"
@@ -37,7 +38,8 @@ func TestUpdateLastRequestTime(t *testing.T) {
} }
options := &instance.CreateInstanceOptions{ options := &instance.CreateInstanceOptions{
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Model: "/path/to/model.gguf", Model: "/path/to/model.gguf",
}, },
} }
@@ -59,7 +61,8 @@ func TestShouldTimeout_NotRunning(t *testing.T) {
idleTimeout := 1 // 1 minute idleTimeout := 1 // 1 minute
options := &instance.CreateInstanceOptions{ options := &instance.CreateInstanceOptions{
IdleTimeout: &idleTimeout, IdleTimeout: &idleTimeout,
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Model: "/path/to/model.gguf", Model: "/path/to/model.gguf",
}, },
} }
@@ -96,7 +99,8 @@ func TestShouldTimeout_NoTimeoutConfigured(t *testing.T) {
options := &instance.CreateInstanceOptions{ options := &instance.CreateInstanceOptions{
IdleTimeout: tt.idleTimeout, IdleTimeout: tt.idleTimeout,
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Model: "/path/to/model.gguf", Model: "/path/to/model.gguf",
}, },
} }
@@ -120,7 +124,8 @@ func TestShouldTimeout_WithinTimeLimit(t *testing.T) {
idleTimeout := 5 // 5 minutes idleTimeout := 5 // 5 minutes
options := &instance.CreateInstanceOptions{ options := &instance.CreateInstanceOptions{
IdleTimeout: &idleTimeout, IdleTimeout: &idleTimeout,
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Model: "/path/to/model.gguf", Model: "/path/to/model.gguf",
}, },
} }
@@ -148,7 +153,8 @@ func TestShouldTimeout_ExceedsTimeLimit(t *testing.T) {
idleTimeout := 1 // 1 minute idleTimeout := 1 // 1 minute
options := &instance.CreateInstanceOptions{ options := &instance.CreateInstanceOptions{
IdleTimeout: &idleTimeout, IdleTimeout: &idleTimeout,
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Model: "/path/to/model.gguf", Model: "/path/to/model.gguf",
}, },
} }
@@ -194,7 +200,8 @@ func TestTimeoutConfiguration_Validation(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
options := &instance.CreateInstanceOptions{ options := &instance.CreateInstanceOptions{
IdleTimeout: tt.inputTimeout, IdleTimeout: tt.inputTimeout,
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Model: "/path/to/model.gguf", Model: "/path/to/model.gguf",
}, },
} }

View File

@@ -248,8 +248,8 @@ func (im *instanceManager) loadInstance(name, path string) error {
inst.SetStatus(persistedInstance.Status) inst.SetStatus(persistedInstance.Status)
// Check for port conflicts and add to maps // Check for port conflicts and add to maps
if inst.GetOptions() != nil && inst.GetOptions().Port > 0 { if inst.GetPort() > 0 {
port := inst.GetOptions().Port port := inst.GetPort()
if im.ports[port] { if im.ports[port] {
return fmt.Errorf("port conflict: instance %s wants port %d which is already in use", name, port) return fmt.Errorf("port conflict: instance %s wants port %d which is already in use", name, port)
} }

View File

@@ -2,6 +2,7 @@ package manager_test
import ( import (
"fmt" "fmt"
"llamactl/pkg/backends"
"llamactl/pkg/backends/llamacpp" "llamactl/pkg/backends/llamacpp"
"llamactl/pkg/config" "llamactl/pkg/config"
"llamactl/pkg/instance" "llamactl/pkg/instance"
@@ -53,7 +54,8 @@ func TestPersistence(t *testing.T) {
// Test instance persistence on creation // Test instance persistence on creation
manager1 := manager.NewInstanceManager(cfg) manager1 := manager.NewInstanceManager(cfg)
options := &instance.CreateInstanceOptions{ options := &instance.CreateInstanceOptions{
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Model: "/path/to/model.gguf", Model: "/path/to/model.gguf",
Port: 8080, Port: 8080,
}, },
@@ -109,12 +111,13 @@ func TestConcurrentAccess(t *testing.T) {
errChan := make(chan error, 10) errChan := make(chan error, 10)
// Concurrent instance creation // Concurrent instance creation
for i := 0; i < 5; i++ { for i := range 5 {
wg.Add(1) wg.Add(1)
go func(index int) { go func(index int) {
defer wg.Done() defer wg.Done()
options := &instance.CreateInstanceOptions{ options := &instance.CreateInstanceOptions{
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Model: "/path/to/model.gguf", Model: "/path/to/model.gguf",
}, },
} }
@@ -150,7 +153,8 @@ func TestShutdown(t *testing.T) {
// Create test instance // Create test instance
options := &instance.CreateInstanceOptions{ options := &instance.CreateInstanceOptions{
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Model: "/path/to/model.gguf", Model: "/path/to/model.gguf",
}, },
} }

View File

@@ -2,6 +2,7 @@ package manager
import ( import (
"fmt" "fmt"
"llamactl/pkg/backends"
"llamactl/pkg/instance" "llamactl/pkg/instance"
"llamactl/pkg/validation" "llamactl/pkg/validation"
"os" "os"
@@ -52,19 +53,9 @@ func (im *instanceManager) CreateInstance(name string, options *instance.CreateI
return nil, fmt.Errorf("instance with name %s already exists", name) return nil, fmt.Errorf("instance with name %s already exists", name)
} }
// Assign a port if not specified // Assign and validate port for backend-specific options
if options.Port == 0 { if err := im.assignAndValidatePort(options); err != nil {
port, err := im.getNextAvailablePort() return nil, err
if err != nil {
return nil, fmt.Errorf("failed to get next available port: %w", err)
}
options.Port = port
} else {
// Validate the specified port
if _, exists := im.ports[options.Port]; exists {
return nil, fmt.Errorf("port %d is already in use", options.Port)
}
im.ports[options.Port] = true
} }
statusCallback := func(oldStatus, newStatus instance.InstanceStatus) { statusCallback := func(oldStatus, newStatus instance.InstanceStatus) {
@@ -73,7 +64,6 @@ func (im *instanceManager) CreateInstance(name string, options *instance.CreateI
inst := instance.NewInstance(name, &im.instancesConfig, options, statusCallback) inst := instance.NewInstance(name, &im.instancesConfig, options, statusCallback)
im.instances[inst.Name] = inst im.instances[inst.Name] = inst
im.ports[options.Port] = true
if err := im.persistInstance(inst); err != nil { if err := im.persistInstance(inst); err != nil {
return nil, fmt.Errorf("failed to persist instance %s: %w", name, err) return nil, fmt.Errorf("failed to persist instance %s: %w", name, err)
@@ -157,7 +147,7 @@ func (im *instanceManager) DeleteInstance(name string) error {
return fmt.Errorf("instance with name %s is still running, stop it before deleting", name) return fmt.Errorf("instance with name %s is still running, stop it before deleting", name)
} }
delete(im.ports, instance.GetOptions().Port) delete(im.ports, instance.GetPort())
delete(im.instances, name) delete(im.instances, name)
// Delete the instance's config file if persistence is enabled // Delete the instance's config file if persistence is enabled
@@ -262,3 +252,49 @@ func (im *instanceManager) GetInstanceLogs(name string) (string, error) {
// TODO: Implement actual log retrieval logic // TODO: Implement actual log retrieval logic
return fmt.Sprintf("Logs for instance %s", name), nil return fmt.Sprintf("Logs for instance %s", name), nil
} }
// getPortFromOptions extracts the port from backend-specific options
func (im *instanceManager) getPortFromOptions(options *instance.CreateInstanceOptions) int {
switch options.BackendType {
case backends.BackendTypeLlamaCpp:
if options.LlamaServerOptions != nil {
return options.LlamaServerOptions.Port
}
}
return 0
}
// setPortInOptions sets the port in backend-specific options
func (im *instanceManager) setPortInOptions(options *instance.CreateInstanceOptions, port int) {
switch options.BackendType {
case backends.BackendTypeLlamaCpp:
if options.LlamaServerOptions != nil {
options.LlamaServerOptions.Port = port
}
}
}
// assignAndValidatePort assigns a port if not specified and validates it's not in use
func (im *instanceManager) assignAndValidatePort(options *instance.CreateInstanceOptions) error {
currentPort := im.getPortFromOptions(options)
if currentPort == 0 {
// Assign a port if not specified
port, err := im.getNextAvailablePort()
if err != nil {
return fmt.Errorf("failed to get next available port: %w", err)
}
im.setPortInOptions(options, port)
// Mark the port as used
im.ports[port] = true
} else {
// Validate the specified port
if _, exists := im.ports[currentPort]; exists {
return fmt.Errorf("port %d is already in use", currentPort)
}
// Mark the port as used
im.ports[currentPort] = true
}
return nil
}

View File

@@ -1,6 +1,7 @@
package manager_test package manager_test
import ( import (
"llamactl/pkg/backends"
"llamactl/pkg/backends/llamacpp" "llamactl/pkg/backends/llamacpp"
"llamactl/pkg/config" "llamactl/pkg/config"
"llamactl/pkg/instance" "llamactl/pkg/instance"
@@ -13,7 +14,8 @@ func TestCreateInstance_Success(t *testing.T) {
manager := createTestManager() manager := createTestManager()
options := &instance.CreateInstanceOptions{ options := &instance.CreateInstanceOptions{
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Model: "/path/to/model.gguf", Model: "/path/to/model.gguf",
Port: 8080, Port: 8080,
}, },
@@ -30,8 +32,8 @@ func TestCreateInstance_Success(t *testing.T) {
if inst.GetStatus() != instance.Stopped { if inst.GetStatus() != instance.Stopped {
t.Error("New instance should not be running") t.Error("New instance should not be running")
} }
if inst.GetOptions().Port != 8080 { if inst.GetPort() != 8080 {
t.Errorf("Expected port 8080, got %d", inst.GetOptions().Port) t.Errorf("Expected port 8080, got %d", inst.GetPort())
} }
} }
@@ -39,7 +41,8 @@ func TestCreateInstance_ValidationAndLimits(t *testing.T) {
// Test duplicate names // Test duplicate names
mngr := createTestManager() mngr := createTestManager()
options := &instance.CreateInstanceOptions{ options := &instance.CreateInstanceOptions{
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Model: "/path/to/model.gguf", Model: "/path/to/model.gguf",
}, },
} }
@@ -86,7 +89,8 @@ func TestPortManagement(t *testing.T) {
// Test auto port assignment // Test auto port assignment
options1 := &instance.CreateInstanceOptions{ options1 := &instance.CreateInstanceOptions{
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Model: "/path/to/model.gguf", Model: "/path/to/model.gguf",
}, },
} }
@@ -96,14 +100,15 @@ func TestPortManagement(t *testing.T) {
t.Fatalf("CreateInstance failed: %v", err) t.Fatalf("CreateInstance failed: %v", err)
} }
port1 := inst1.GetOptions().Port port1 := inst1.GetPort()
if port1 < 8000 || port1 > 9000 { if port1 < 8000 || port1 > 9000 {
t.Errorf("Expected port in range 8000-9000, got %d", port1) t.Errorf("Expected port in range 8000-9000, got %d", port1)
} }
// Test port conflict detection // Test port conflict detection
options2 := &instance.CreateInstanceOptions{ options2 := &instance.CreateInstanceOptions{
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Model: "/path/to/model2.gguf", Model: "/path/to/model2.gguf",
Port: port1, // Same port - should conflict Port: port1, // Same port - should conflict
}, },
@@ -120,7 +125,8 @@ func TestPortManagement(t *testing.T) {
// Test port release on deletion // Test port release on deletion
specificPort := 8080 specificPort := 8080
options3 := &instance.CreateInstanceOptions{ options3 := &instance.CreateInstanceOptions{
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Model: "/path/to/model.gguf", Model: "/path/to/model.gguf",
Port: specificPort, Port: specificPort,
}, },
@@ -147,7 +153,8 @@ func TestInstanceOperations(t *testing.T) {
manager := createTestManager() manager := createTestManager()
options := &instance.CreateInstanceOptions{ options := &instance.CreateInstanceOptions{
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Model: "/path/to/model.gguf", Model: "/path/to/model.gguf",
}, },
} }
@@ -169,7 +176,8 @@ func TestInstanceOperations(t *testing.T) {
// Update instance // Update instance
newOptions := &instance.CreateInstanceOptions{ newOptions := &instance.CreateInstanceOptions{
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Model: "/path/to/new-model.gguf", Model: "/path/to/new-model.gguf",
Port: 8081, Port: 8081,
}, },
@@ -179,8 +187,8 @@ func TestInstanceOperations(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("UpdateInstance failed: %v", err) t.Fatalf("UpdateInstance failed: %v", err)
} }
if updated.GetOptions().Model != "/path/to/new-model.gguf" { if updated.GetOptions().LlamaServerOptions.Model != "/path/to/new-model.gguf" {
t.Errorf("Expected model '/path/to/new-model.gguf', got %q", updated.GetOptions().Model) t.Errorf("Expected model '/path/to/new-model.gguf', got %q", updated.GetOptions().LlamaServerOptions.Model)
} }
// List instances // List instances

View File

@@ -1,6 +1,7 @@
package manager_test package manager_test
import ( import (
"llamactl/pkg/backends"
"llamactl/pkg/backends/llamacpp" "llamactl/pkg/backends/llamacpp"
"llamactl/pkg/config" "llamactl/pkg/config"
"llamactl/pkg/instance" "llamactl/pkg/instance"
@@ -31,7 +32,8 @@ func TestTimeoutFunctionality(t *testing.T) {
idleTimeout := 1 // 1 minute idleTimeout := 1 // 1 minute
options := &instance.CreateInstanceOptions{ options := &instance.CreateInstanceOptions{
IdleTimeout: &idleTimeout, IdleTimeout: &idleTimeout,
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Model: "/path/to/model.gguf", Model: "/path/to/model.gguf",
}, },
} }
@@ -79,7 +81,8 @@ func TestTimeoutFunctionality(t *testing.T) {
// Test that instance without timeout doesn't timeout // Test that instance without timeout doesn't timeout
noTimeoutOptions := &instance.CreateInstanceOptions{ noTimeoutOptions := &instance.CreateInstanceOptions{
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Model: "/path/to/model.gguf", Model: "/path/to/model.gguf",
}, },
// No IdleTimeout set // No IdleTimeout set
@@ -109,19 +112,22 @@ func TestEvictLRUInstance_Success(t *testing.T) {
// Create 3 instances with idle timeout enabled (value doesn't matter for LRU logic) // Create 3 instances with idle timeout enabled (value doesn't matter for LRU logic)
options1 := &instance.CreateInstanceOptions{ options1 := &instance.CreateInstanceOptions{
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Model: "/path/to/model1.gguf", Model: "/path/to/model1.gguf",
}, },
IdleTimeout: func() *int { timeout := 1; return &timeout }(), // Any value > 0 IdleTimeout: func() *int { timeout := 1; return &timeout }(), // Any value > 0
} }
options2 := &instance.CreateInstanceOptions{ options2 := &instance.CreateInstanceOptions{
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Model: "/path/to/model2.gguf", Model: "/path/to/model2.gguf",
}, },
IdleTimeout: func() *int { timeout := 1; return &timeout }(), // Any value > 0 IdleTimeout: func() *int { timeout := 1; return &timeout }(), // Any value > 0
} }
options3 := &instance.CreateInstanceOptions{ options3 := &instance.CreateInstanceOptions{
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Model: "/path/to/model3.gguf", Model: "/path/to/model3.gguf",
}, },
IdleTimeout: func() *int { timeout := 1; return &timeout }(), // Any value > 0 IdleTimeout: func() *int { timeout := 1; return &timeout }(), // Any value > 0
@@ -188,7 +194,8 @@ func TestEvictLRUInstance_NoEligibleInstances(t *testing.T) {
// Helper function to create instances with different timeout configurations // Helper function to create instances with different timeout configurations
createInstanceWithTimeout := func(manager manager.InstanceManager, name, model string, timeout *int) *instance.Process { createInstanceWithTimeout := func(manager manager.InstanceManager, name, model string, timeout *int) *instance.Process {
options := &instance.CreateInstanceOptions{ options := &instance.CreateInstanceOptions{
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Model: model, Model: model,
}, },
IdleTimeout: timeout, IdleTimeout: timeout,

View File

@@ -2,6 +2,7 @@ package validation
import ( import (
"fmt" "fmt"
"llamactl/pkg/backends"
"llamactl/pkg/instance" "llamactl/pkg/instance"
"reflect" "reflect"
"regexp" "regexp"
@@ -33,20 +34,35 @@ func validateStringForInjection(value string) error {
return nil return nil
} }
// ValidateInstanceOptions performs minimal security validation // ValidateInstanceOptions performs validation based on backend type
func ValidateInstanceOptions(options *instance.CreateInstanceOptions) error { func ValidateInstanceOptions(options *instance.CreateInstanceOptions) error {
if options == nil { if options == nil {
return ValidationError(fmt.Errorf("options cannot be nil")) return ValidationError(fmt.Errorf("options cannot be nil"))
} }
// Validate based on backend type
switch options.BackendType {
case backends.BackendTypeLlamaCpp:
return validateLlamaCppOptions(options)
default:
return ValidationError(fmt.Errorf("unsupported backend type: %s", options.BackendType))
}
}
// validateLlamaCppOptions validates llama.cpp specific options
func validateLlamaCppOptions(options *instance.CreateInstanceOptions) error {
if options.LlamaServerOptions == nil {
return ValidationError(fmt.Errorf("llama server options cannot be nil for llama.cpp backend"))
}
// Use reflection to check all string fields for injection patterns // Use reflection to check all string fields for injection patterns
if err := validateStructStrings(&options.LlamaServerOptions, ""); err != nil { if err := validateStructStrings(options.LlamaServerOptions, ""); err != nil {
return err return err
} }
// Basic network validation - only check for reasonable ranges // Basic network validation for port
if options.Port < 0 || options.Port > 65535 { if options.LlamaServerOptions.Port < 0 || options.LlamaServerOptions.Port > 65535 {
return ValidationError(fmt.Errorf("invalid port range")) return ValidationError(fmt.Errorf("invalid port range: %d", options.LlamaServerOptions.Port))
} }
return nil return nil

View File

@@ -1,6 +1,7 @@
package validation_test package validation_test
import ( import (
"llamactl/pkg/backends"
"llamactl/pkg/backends/llamacpp" "llamactl/pkg/backends/llamacpp"
"llamactl/pkg/instance" "llamactl/pkg/instance"
"llamactl/pkg/testutil" "llamactl/pkg/testutil"
@@ -83,7 +84,8 @@ func TestValidateInstanceOptions_PortValidation(t *testing.T) {
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
options := &instance.CreateInstanceOptions{ options := &instance.CreateInstanceOptions{
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Port: tt.port, Port: tt.port,
}, },
} }
@@ -136,7 +138,8 @@ func TestValidateInstanceOptions_StringInjection(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
// Test with Model field (string field) // Test with Model field (string field)
options := &instance.CreateInstanceOptions{ options := &instance.CreateInstanceOptions{
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Model: tt.value, Model: tt.value,
}, },
} }
@@ -173,7 +176,8 @@ func TestValidateInstanceOptions_ArrayInjection(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
// Test with Lora field (array field) // Test with Lora field (array field)
options := &instance.CreateInstanceOptions{ options := &instance.CreateInstanceOptions{
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Lora: tt.array, Lora: tt.array,
}, },
} }
@@ -196,7 +200,8 @@ func TestValidateInstanceOptions_MultipleFieldInjection(t *testing.T) {
{ {
name: "injection in model field", name: "injection in model field",
options: &instance.CreateInstanceOptions{ options: &instance.CreateInstanceOptions{
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Model: "safe.gguf", Model: "safe.gguf",
HFRepo: "microsoft/model; curl evil.com", HFRepo: "microsoft/model; curl evil.com",
}, },
@@ -206,7 +211,8 @@ func TestValidateInstanceOptions_MultipleFieldInjection(t *testing.T) {
{ {
name: "injection in log file", name: "injection in log file",
options: &instance.CreateInstanceOptions{ options: &instance.CreateInstanceOptions{
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Model: "safe.gguf", Model: "safe.gguf",
LogFile: "/tmp/log.txt | tee /etc/passwd", LogFile: "/tmp/log.txt | tee /etc/passwd",
}, },
@@ -216,7 +222,8 @@ func TestValidateInstanceOptions_MultipleFieldInjection(t *testing.T) {
{ {
name: "all safe fields", name: "all safe fields",
options: &instance.CreateInstanceOptions{ options: &instance.CreateInstanceOptions{
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Model: "/path/to/model.gguf", Model: "/path/to/model.gguf",
HFRepo: "microsoft/DialoGPT-medium", HFRepo: "microsoft/DialoGPT-medium",
LogFile: "/tmp/llama.log", LogFile: "/tmp/llama.log",
@@ -244,7 +251,8 @@ func TestValidateInstanceOptions_NonStringFields(t *testing.T) {
AutoRestart: testutil.BoolPtr(true), AutoRestart: testutil.BoolPtr(true),
MaxRestarts: testutil.IntPtr(5), MaxRestarts: testutil.IntPtr(5),
RestartDelay: testutil.IntPtr(10), RestartDelay: testutil.IntPtr(10),
LlamaServerOptions: llamacpp.LlamaServerOptions{ BackendType: backends.BackendTypeLlamaCpp,
LlamaServerOptions: &llamacpp.LlamaServerOptions{
Port: 8080, Port: 8080,
GPULayers: 32, GPULayers: 32,
CtxSize: 4096, CtxSize: 4096,

View File

@@ -5,6 +5,7 @@ import App from '@/App'
import { InstancesProvider } from '@/contexts/InstancesContext' import { InstancesProvider } from '@/contexts/InstancesContext'
import { instancesApi } from '@/lib/api' import { instancesApi } from '@/lib/api'
import type { Instance } from '@/types/instance' import type { Instance } from '@/types/instance'
import { BackendType } from '@/types/instance'
import { AuthProvider } from '@/contexts/AuthContext' import { AuthProvider } from '@/contexts/AuthContext'
// Mock the API // Mock the API
@@ -46,8 +47,8 @@ function renderApp() {
describe('App Component - Critical Business Logic Only', () => { describe('App Component - Critical Business Logic Only', () => {
const mockInstances: Instance[] = [ const mockInstances: Instance[] = [
{ name: 'test-instance-1', status: 'stopped', options: { model: 'model1.gguf' } }, { name: 'test-instance-1', status: 'stopped', options: { backend_type: BackendType.LLAMA_CPP, backend_options: { model: 'model1.gguf' } } },
{ name: 'test-instance-2', status: 'running', options: { model: 'model2.gguf' } } { name: 'test-instance-2', status: 'running', options: { backend_type: BackendType.LLAMA_CPP, backend_options: { model: 'model2.gguf' } } }
] ]
beforeEach(() => { beforeEach(() => {
@@ -82,7 +83,7 @@ describe('App Component - Critical Business Logic Only', () => {
const newInstance: Instance = { const newInstance: Instance = {
name: 'new-test-instance', name: 'new-test-instance',
status: 'stopped', status: 'stopped',
options: { model: 'new-model.gguf' } options: { backend_type: BackendType.LLAMA_CPP, backend_options: { model: 'new-model.gguf' } }
} }
vi.mocked(instancesApi.create).mockResolvedValue(newInstance) vi.mocked(instancesApi.create).mockResolvedValue(newInstance)
@@ -105,6 +106,7 @@ describe('App Component - Critical Business Logic Only', () => {
await waitFor(() => { await waitFor(() => {
expect(instancesApi.create).toHaveBeenCalledWith('new-test-instance', { expect(instancesApi.create).toHaveBeenCalledWith('new-test-instance', {
auto_restart: true, // Default value auto_restart: true, // Default value
backend_type: BackendType.LLAMA_CPP
}) })
}) })
@@ -119,7 +121,7 @@ describe('App Component - Critical Business Logic Only', () => {
const updatedInstance: Instance = { const updatedInstance: Instance = {
name: 'test-instance-1', name: 'test-instance-1',
status: 'stopped', status: 'stopped',
options: { model: 'updated-model.gguf' } options: { backend_type: BackendType.LLAMA_CPP, backend_options: { model: 'updated-model.gguf' } }
} }
vi.mocked(instancesApi.update).mockResolvedValue(updatedInstance) vi.mocked(instancesApi.update).mockResolvedValue(updatedInstance)
@@ -138,7 +140,8 @@ describe('App Component - Critical Business Logic Only', () => {
// Verify correct API call with existing instance data // Verify correct API call with existing instance data
await waitFor(() => { await waitFor(() => {
expect(instancesApi.update).toHaveBeenCalledWith('test-instance-1', { expect(instancesApi.update).toHaveBeenCalledWith('test-instance-1', {
model: "model1.gguf", // Pre-filled from existing instance backend_type: BackendType.LLAMA_CPP,
backend_options: { model: "model1.gguf" } // Pre-filled from existing instance
}) })
}) })
}) })

View File

@@ -0,0 +1,123 @@
import React from 'react'
import { Input } from '@/components/ui/input'
import { Label } from '@/components/ui/label'
import { Checkbox } from '@/components/ui/checkbox'
import type { BackendOptions } from '@/schemas/instanceOptions'
import { getBackendFieldType, basicBackendFieldsConfig } from '@/lib/zodFormUtils'
interface BackendFormFieldProps {
fieldKey: keyof BackendOptions
value: string | number | boolean | string[] | undefined
onChange: (key: string, value: string | number | boolean | string[] | undefined) => void
}
const BackendFormField: React.FC<BackendFormFieldProps> = ({ fieldKey, value, onChange }) => {
// Get configuration for basic fields, or use field name for advanced fields
const config = basicBackendFieldsConfig[fieldKey as string] || { label: fieldKey }
// Get type from Zod schema
const fieldType = getBackendFieldType(fieldKey)
const handleChange = (newValue: string | number | boolean | string[] | undefined) => {
onChange(fieldKey as string, newValue)
}
const renderField = () => {
switch (fieldType) {
case 'boolean':
return (
<div className="flex items-center space-x-2">
<Checkbox
id={fieldKey}
checked={typeof value === 'boolean' ? value : false}
onCheckedChange={(checked) => handleChange(checked)}
/>
<Label htmlFor={fieldKey} className="text-sm font-normal">
{config.label}
{config.description && (
<span className="text-muted-foreground ml-1">- {config.description}</span>
)}
</Label>
</div>
)
case 'number':
return (
<div className="grid gap-2">
<Label htmlFor={fieldKey}>
{config.label}
{config.required && <span className="text-red-500 ml-1">*</span>}
</Label>
<Input
id={fieldKey}
type="number"
step="any" // This allows decimal numbers
value={typeof value === 'string' || typeof value === 'number' ? value : ''}
onChange={(e) => {
const numValue = e.target.value ? parseFloat(e.target.value) : undefined
// Only update if the parsed value is valid or the input is empty
if (e.target.value === '' || (numValue !== undefined && !isNaN(numValue))) {
handleChange(numValue)
}
}}
placeholder={config.placeholder}
/>
{config.description && (
<p className="text-sm text-muted-foreground">{config.description}</p>
)}
</div>
)
case 'array':
return (
<div className="grid gap-2">
<Label htmlFor={fieldKey}>
{config.label}
{config.required && <span className="text-red-500 ml-1">*</span>}
</Label>
<Input
id={fieldKey}
type="text"
value={Array.isArray(value) ? value.join(', ') : ''}
onChange={(e) => {
const arrayValue = e.target.value
? e.target.value.split(',').map(s => s.trim()).filter(Boolean)
: undefined
handleChange(arrayValue)
}}
placeholder="item1, item2, item3"
/>
{config.description && (
<p className="text-sm text-muted-foreground">{config.description}</p>
)}
<p className="text-xs text-muted-foreground">Separate multiple values with commas</p>
</div>
)
case 'text':
default:
return (
<div className="grid gap-2">
<Label htmlFor={fieldKey}>
{config.label}
{config.required && <span className="text-red-500 ml-1">*</span>}
</Label>
<Input
id={fieldKey}
type="text"
value={typeof value === 'string' || typeof value === 'number' ? value : ''}
onChange={(e) => handleChange(e.target.value || undefined)}
placeholder={config.placeholder}
/>
{config.description && (
<p className="text-sm text-muted-foreground">{config.description}</p>
)}
</div>
)
}
}
return <div className="space-y-2">{renderField()}</div>
}
export default BackendFormField

View File

@@ -10,10 +10,11 @@ import {
DialogHeader, DialogHeader,
DialogTitle, DialogTitle,
} from "@/components/ui/dialog"; } from "@/components/ui/dialog";
import type { CreateInstanceOptions, Instance } from "@/types/instance"; import { BackendType, type CreateInstanceOptions, type Instance } from "@/types/instance";
import { getBasicFields, getAdvancedFields } from "@/lib/zodFormUtils"; import { getBasicFields, getAdvancedFields, getBasicBackendFields, getAdvancedBackendFields } from "@/lib/zodFormUtils";
import { ChevronDown, ChevronRight } from "lucide-react"; import { ChevronDown, ChevronRight } from "lucide-react";
import ZodFormField from "@/components/ZodFormField"; import ZodFormField from "@/components/ZodFormField";
import BackendFormField from "@/components/BackendFormField";
interface InstanceDialogProps { interface InstanceDialogProps {
open: boolean; open: boolean;
@@ -38,6 +39,8 @@ const InstanceDialog: React.FC<InstanceDialogProps> = ({
// Get field lists dynamically from the type // Get field lists dynamically from the type
const basicFields = getBasicFields(); const basicFields = getBasicFields();
const advancedFields = getAdvancedFields(); const advancedFields = getAdvancedFields();
const basicBackendFields = getBasicBackendFields();
const advancedBackendFields = getAdvancedBackendFields();
// Reset form when dialog opens/closes or when instance changes // Reset form when dialog opens/closes or when instance changes
useEffect(() => { useEffect(() => {
@@ -51,6 +54,8 @@ const InstanceDialog: React.FC<InstanceDialogProps> = ({
setInstanceName(""); setInstanceName("");
setFormData({ setFormData({
auto_restart: true, // Default value auto_restart: true, // Default value
backend_type: BackendType.LLAMA_CPP, // Default backend type
backend_options: {},
}); });
} }
setShowAdvanced(false); // Always start with basic view setShowAdvanced(false); // Always start with basic view
@@ -65,6 +70,16 @@ const InstanceDialog: React.FC<InstanceDialogProps> = ({
})); }));
}; };
const handleBackendFieldChange = (key: string, value: any) => {
setFormData((prev) => ({
...prev,
backend_options: {
...prev.backend_options,
[key]: value,
},
}));
};
const handleNameChange = (name: string) => { const handleNameChange = (name: string) => {
setInstanceName(name); setInstanceName(name);
// Validate instance name // Validate instance name
@@ -89,7 +104,24 @@ const InstanceDialog: React.FC<InstanceDialogProps> = ({
// Clean up undefined values to avoid sending empty fields // Clean up undefined values to avoid sending empty fields
const cleanOptions: CreateInstanceOptions = {}; const cleanOptions: CreateInstanceOptions = {};
Object.entries(formData).forEach(([key, value]) => { Object.entries(formData).forEach(([key, value]) => {
if (value !== undefined && value !== "" && value !== null) { if (key === 'backend_options' && value && typeof value === 'object') {
// Handle backend_options specially - clean nested object
const cleanBackendOptions: any = {};
Object.entries(value).forEach(([backendKey, backendValue]) => {
if (backendValue !== undefined && backendValue !== null && (typeof backendValue !== 'string' || backendValue.trim() !== "")) {
// Handle arrays - don't include empty arrays
if (Array.isArray(backendValue) && backendValue.length === 0) {
return;
}
cleanBackendOptions[backendKey] = backendValue;
}
});
// Only include backend_options if it has content
if (Object.keys(cleanBackendOptions).length > 0) {
(cleanOptions as any)[key] = cleanBackendOptions;
}
} else if (value !== undefined && value !== null && (typeof value !== 'string' || value.trim() !== "")) {
// Handle arrays - don't include empty arrays // Handle arrays - don't include empty arrays
if (Array.isArray(value) && value.length === 0) { if (Array.isArray(value) && value.length === 0) {
return; return;
@@ -196,8 +228,9 @@ const InstanceDialog: React.FC<InstanceDialogProps> = ({
(fieldKey) => (fieldKey) =>
fieldKey !== "auto_restart" && fieldKey !== "auto_restart" &&
fieldKey !== "max_restarts" && fieldKey !== "max_restarts" &&
fieldKey !== "restart_delay" fieldKey !== "restart_delay" &&
) // Exclude auto_restart, max_restarts, and restart_delay as they're handled above fieldKey !== "backend_options" // backend_options is handled separately
)
.map((fieldKey) => ( .map((fieldKey) => (
<ZodFormField <ZodFormField
key={fieldKey} key={fieldKey}
@@ -208,6 +241,21 @@ const InstanceDialog: React.FC<InstanceDialogProps> = ({
))} ))}
</div> </div>
{/* Backend Configuration Section */}
<div className="space-y-4">
<h3 className="text-lg font-medium">Backend Configuration</h3>
{/* Basic backend fields */}
{basicBackendFields.map((fieldKey) => (
<BackendFormField
key={fieldKey}
fieldKey={fieldKey}
value={formData.backend_options?.[fieldKey]}
onChange={handleBackendFieldChange}
/>
))}
</div>
{/* Advanced Fields Toggle */} {/* Advanced Fields Toggle */}
<div className="border-t pt-4"> <div className="border-t pt-4">
<Button <Button
@@ -226,8 +274,8 @@ const InstanceDialog: React.FC<InstanceDialogProps> = ({
{ {
advancedFields.filter( advancedFields.filter(
(f) => (f) =>
!["max_restarts", "restart_delay"].includes(f as string) !["max_restarts", "restart_delay", "backend_options"].includes(f as string)
).length ).length + advancedBackendFields.length
}{" "} }{" "}
options) options)
</span> </span>
@@ -237,24 +285,51 @@ const InstanceDialog: React.FC<InstanceDialogProps> = ({
{/* Advanced Fields - Automatically generated from type (excluding restart options) */} {/* Advanced Fields - Automatically generated from type (excluding restart options) */}
{showAdvanced && ( {showAdvanced && (
<div className="space-y-4 pl-6 border-l-2 border-muted"> <div className="space-y-4 pl-6 border-l-2 border-muted">
<div className="space-y-4"> {/* Advanced instance fields */}
{advancedFields {advancedFields
.filter( .filter(
(fieldKey) => (fieldKey) =>
!["max_restarts", "restart_delay"].includes( !["max_restarts", "restart_delay", "backend_options"].includes(
fieldKey as string fieldKey as string
) )
) // Exclude restart options as they're handled above ).length > 0 && (
<div className="space-y-4">
<h4 className="text-md font-medium">Advanced Instance Configuration</h4>
{advancedFields
.filter(
(fieldKey) =>
!["max_restarts", "restart_delay", "backend_options"].includes(
fieldKey as string
)
)
.sort() .sort()
.map((fieldKey) => ( .map((fieldKey) => (
<ZodFormField <ZodFormField
key={fieldKey} key={fieldKey}
fieldKey={fieldKey} fieldKey={fieldKey}
value={formData[fieldKey]} value={fieldKey === 'backend_options' ? undefined : formData[fieldKey]}
onChange={handleFieldChange} onChange={handleFieldChange}
/> />
))} ))}
</div> </div>
)}
{/* Advanced backend fields */}
{advancedBackendFields.length > 0 && (
<div className="space-y-4">
<h4 className="text-md font-medium">Advanced Backend Configuration</h4>
{advancedBackendFields
.sort()
.map((fieldKey) => (
<BackendFormField
key={fieldKey}
fieldKey={fieldKey}
value={formData.backend_options?.[fieldKey]}
onChange={handleBackendFieldChange}
/>
))}
</div>
)}
</div> </div>
)} )}
</div> </div>

View File

@@ -3,6 +3,7 @@ import { Input } from '@/components/ui/input'
import { Label } from '@/components/ui/label' import { Label } from '@/components/ui/label'
import { Checkbox } from '@/components/ui/checkbox' import { Checkbox } from '@/components/ui/checkbox'
import type { CreateInstanceOptions } from '@/types/instance' import type { CreateInstanceOptions } from '@/types/instance'
import { BackendType } from '@/types/instance'
import { getFieldType, basicFieldsConfig } from '@/lib/zodFormUtils' import { getFieldType, basicFieldsConfig } from '@/lib/zodFormUtils'
interface ZodFormFieldProps { interface ZodFormFieldProps {
@@ -23,6 +24,30 @@ const ZodFormField: React.FC<ZodFormFieldProps> = ({ fieldKey, value, onChange }
} }
const renderField = () => { const renderField = () => {
// Special handling for backend_type field - render as dropdown
if (fieldKey === 'backend_type') {
return (
<div className="grid gap-2">
<Label htmlFor={fieldKey}>
{config.label}
{config.required && <span className="text-red-500 ml-1">*</span>}
</Label>
<select
id={fieldKey}
value={typeof value === 'string' ? value : BackendType.LLAMA_CPP}
onChange={(e) => handleChange(e.target.value || undefined)}
className="flex h-10 w-full rounded-md border border-input bg-background px-3 py-2 text-sm ring-offset-background file:border-0 file:bg-transparent file:text-sm file:font-medium placeholder:text-muted-foreground focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:cursor-not-allowed disabled:opacity-50"
>
<option value={BackendType.LLAMA_CPP}>Llama Server</option>
{/* Add more backend types here as they become available */}
</select>
{config.description && (
<p className="text-sm text-muted-foreground">{config.description}</p>
)}
</div>
)
}
switch (fieldType) { switch (fieldType) {
case 'boolean': case 'boolean':
return ( return (

View File

@@ -3,6 +3,7 @@ import { render, screen } from '@testing-library/react'
import userEvent from '@testing-library/user-event' import userEvent from '@testing-library/user-event'
import InstanceCard from '@/components/InstanceCard' import InstanceCard from '@/components/InstanceCard'
import type { Instance } from '@/types/instance' import type { Instance } from '@/types/instance'
import { BackendType } from '@/types/instance'
// Mock the health hook since we're not testing health logic here // Mock the health hook since we're not testing health logic here
vi.mock('@/hooks/useInstanceHealth', () => ({ vi.mock('@/hooks/useInstanceHealth', () => ({
@@ -18,13 +19,13 @@ describe('InstanceCard - Instance Actions and State', () => {
const stoppedInstance: Instance = { const stoppedInstance: Instance = {
name: 'test-instance', name: 'test-instance',
status: 'stopped', status: 'stopped',
options: { model: 'test-model.gguf' } options: { backend_type: BackendType.LLAMA_CPP, backend_options: { model: 'test-model.gguf' } }
} }
const runningInstance: Instance = { const runningInstance: Instance = {
name: 'running-instance', name: 'running-instance',
status: 'running', status: 'running',
options: { model: 'running-model.gguf' } options: { backend_type: BackendType.LLAMA_CPP, backend_options: { model: 'running-model.gguf' } }
} }
beforeEach(() => { beforeEach(() => {

View File

@@ -5,6 +5,7 @@ import InstanceList from '@/components/InstanceList'
import { InstancesProvider } from '@/contexts/InstancesContext' import { InstancesProvider } from '@/contexts/InstancesContext'
import { instancesApi } from '@/lib/api' import { instancesApi } from '@/lib/api'
import type { Instance } from '@/types/instance' import type { Instance } from '@/types/instance'
import { BackendType } from '@/types/instance'
import { AuthProvider } from '@/contexts/AuthContext' import { AuthProvider } from '@/contexts/AuthContext'
// Mock the API // Mock the API
@@ -44,9 +45,9 @@ describe('InstanceList - State Management and UI Logic', () => {
const mockEditInstance = vi.fn() const mockEditInstance = vi.fn()
const mockInstances: Instance[] = [ const mockInstances: Instance[] = [
{ name: 'instance-1', status: 'stopped', options: { model: 'model1.gguf' } }, { name: 'instance-1', status: 'stopped', options: { backend_type: BackendType.LLAMA_CPP, backend_options: { model: 'model1.gguf' } } },
{ name: 'instance-2', status: 'running', options: { model: 'model2.gguf' } }, { name: 'instance-2', status: 'running', options: { backend_type: BackendType.LLAMA_CPP, backend_options: { model: 'model2.gguf' } } },
{ name: 'instance-3', status: 'stopped', options: { model: 'model3.gguf' } } { name: 'instance-3', status: 'stopped', options: { backend_type: BackendType.LLAMA_CPP, backend_options: { model: 'model3.gguf' } } }
] ]
const DUMMY_API_KEY = 'test-api-key-123' const DUMMY_API_KEY = 'test-api-key-123'

View File

@@ -3,6 +3,7 @@ import { render, screen, waitFor } from '@testing-library/react'
import userEvent from '@testing-library/user-event' import userEvent from '@testing-library/user-event'
import InstanceDialog from '@/components/InstanceDialog' import InstanceDialog from '@/components/InstanceDialog'
import type { Instance } from '@/types/instance' import type { Instance } from '@/types/instance'
import { BackendType } from '@/types/instance'
describe('InstanceModal - Form Logic and Validation', () => { describe('InstanceModal - Form Logic and Validation', () => {
const mockOnSave = vi.fn() const mockOnSave = vi.fn()
@@ -91,6 +92,7 @@ afterEach(() => {
expect(mockOnSave).toHaveBeenCalledWith('my-instance', { expect(mockOnSave).toHaveBeenCalledWith('my-instance', {
auto_restart: true, // Default value auto_restart: true, // Default value
backend_type: BackendType.LLAMA_CPP
}) })
}) })
@@ -136,8 +138,8 @@ afterEach(() => {
name: 'existing-instance', name: 'existing-instance',
status: 'stopped', status: 'stopped',
options: { options: {
model: 'test-model.gguf', backend_type: BackendType.LLAMA_CPP,
gpu_layers: 10, backend_options: { model: 'test-model.gguf', gpu_layers: 10 },
auto_restart: false auto_restart: false
} }
} }
@@ -177,8 +179,8 @@ afterEach(() => {
await user.click(screen.getByTestId('dialog-save-button')) await user.click(screen.getByTestId('dialog-save-button'))
expect(mockOnSave).toHaveBeenCalledWith('existing-instance', { expect(mockOnSave).toHaveBeenCalledWith('existing-instance', {
model: 'test-model.gguf', backend_type: BackendType.LLAMA_CPP,
gpu_layers: 10, backend_options: { model: 'test-model.gguf', gpu_layers: 10 },
auto_restart: false auto_restart: false
}) })
}) })
@@ -271,6 +273,7 @@ afterEach(() => {
expect(mockOnSave).toHaveBeenCalledWith('test-instance', { expect(mockOnSave).toHaveBeenCalledWith('test-instance', {
auto_restart: true, auto_restart: true,
backend_type: BackendType.LLAMA_CPP,
max_restarts: 5, max_restarts: 5,
restart_delay: 10 restart_delay: 10
}) })
@@ -321,6 +324,7 @@ afterEach(() => {
// Should only include non-empty values // Should only include non-empty values
expect(mockOnSave).toHaveBeenCalledWith('clean-instance', { expect(mockOnSave).toHaveBeenCalledWith('clean-instance', {
auto_restart: true, // Only this default value should be included auto_restart: true, // Only this default value should be included
backend_type: BackendType.LLAMA_CPP
}) })
}) })
@@ -345,7 +349,8 @@ afterEach(() => {
expect(mockOnSave).toHaveBeenCalledWith('numeric-test', { expect(mockOnSave).toHaveBeenCalledWith('numeric-test', {
auto_restart: true, auto_restart: true,
gpu_layers: 15, // Should be number, not string backend_type: BackendType.LLAMA_CPP,
backend_options: { gpu_layers: 15 }, // Should be number, not string
}) })
}) })
}) })

View File

@@ -4,6 +4,7 @@ import type { ReactNode } from "react";
import { InstancesProvider, useInstances } from "@/contexts/InstancesContext"; import { InstancesProvider, useInstances } from "@/contexts/InstancesContext";
import { instancesApi } from "@/lib/api"; import { instancesApi } from "@/lib/api";
import type { Instance } from "@/types/instance"; import type { Instance } from "@/types/instance";
import { BackendType } from "@/types/instance";
import { AuthProvider } from "../AuthContext"; import { AuthProvider } from "../AuthContext";
// Mock the API module // Mock the API module
@@ -47,13 +48,13 @@ function TestComponent() {
{/* Action buttons for testing with specific instances */} {/* Action buttons for testing with specific instances */}
<button <button
onClick={() => createInstance("new-instance", { model: "test.gguf" })} onClick={() => createInstance("new-instance", { backend_type: BackendType.LLAMA_CPP, backend_options: { model: "test.gguf" } })}
data-testid="create-instance" data-testid="create-instance"
> >
Create Instance Create Instance
</button> </button>
<button <button
onClick={() => updateInstance("instance1", { model: "updated.gguf" })} onClick={() => updateInstance("instance1", { backend_type: BackendType.LLAMA_CPP, backend_options: { model: "updated.gguf" } })}
data-testid="update-instance" data-testid="update-instance"
> >
Update Instance Update Instance
@@ -99,8 +100,8 @@ function renderWithProvider(children: ReactNode) {
describe("InstancesContext", () => { describe("InstancesContext", () => {
const mockInstances: Instance[] = [ const mockInstances: Instance[] = [
{ name: "instance1", status: "running", options: { model: "model1.gguf" } }, { name: "instance1", status: "running", options: { backend_type: BackendType.LLAMA_CPP, backend_options: { model: "model1.gguf" } } },
{ name: "instance2", status: "stopped", options: { model: "model2.gguf" } }, { name: "instance2", status: "stopped", options: { backend_type: BackendType.LLAMA_CPP, backend_options: { model: "model2.gguf" } } },
]; ];
beforeEach(() => { beforeEach(() => {
@@ -159,7 +160,7 @@ describe("InstancesContext", () => {
const newInstance: Instance = { const newInstance: Instance = {
name: "new-instance", name: "new-instance",
status: "stopped", status: "stopped",
options: { model: "test.gguf" }, options: { backend_type: BackendType.LLAMA_CPP, backend_options: { model: "test.gguf" } },
}; };
vi.mocked(instancesApi.create).mockResolvedValue(newInstance); vi.mocked(instancesApi.create).mockResolvedValue(newInstance);
@@ -174,7 +175,8 @@ describe("InstancesContext", () => {
await waitFor(() => { await waitFor(() => {
expect(instancesApi.create).toHaveBeenCalledWith("new-instance", { expect(instancesApi.create).toHaveBeenCalledWith("new-instance", {
model: "test.gguf", backend_type: BackendType.LLAMA_CPP,
backend_options: { model: "test.gguf" }
}); });
}); });
@@ -215,7 +217,7 @@ describe("InstancesContext", () => {
const updatedInstance: Instance = { const updatedInstance: Instance = {
name: "instance1", name: "instance1",
status: "running", status: "running",
options: { model: "updated.gguf" }, options: { backend_type: BackendType.LLAMA_CPP, backend_options: { model: "updated.gguf" } },
}; };
vi.mocked(instancesApi.update).mockResolvedValue(updatedInstance); vi.mocked(instancesApi.update).mockResolvedValue(updatedInstance);
@@ -230,7 +232,8 @@ describe("InstancesContext", () => {
await waitFor(() => { await waitFor(() => {
expect(instancesApi.update).toHaveBeenCalledWith("instance1", { expect(instancesApi.update).toHaveBeenCalledWith("instance1", {
model: "updated.gguf", backend_type: BackendType.LLAMA_CPP,
backend_options: { model: "updated.gguf" }
}); });
}); });

View File

@@ -1,6 +1,6 @@
import { type CreateInstanceOptions, getAllFieldKeys } from '@/schemas/instanceOptions' import { type CreateInstanceOptions, type BackendOptions, getAllFieldKeys, getAllBackendFieldKeys } from '@/schemas/instanceOptions'
// Only define the basic fields we want to show by default // Instance-level basic fields (not backend-specific)
export const basicFieldsConfig: Record<string, { export const basicFieldsConfig: Record<string, {
label: string label: string
description?: string description?: string
@@ -30,6 +30,19 @@ export const basicFieldsConfig: Record<string, {
label: 'On-Demand Start', label: 'On-Demand Start',
description: 'Start instance upon receiving OpenAI-compatible API request' description: 'Start instance upon receiving OpenAI-compatible API request'
}, },
backend_type: {
label: 'Backend Type',
description: 'Type of backend to use for this instance'
}
}
// Backend-specific basic fields (these go in backend_options)
export const basicBackendFieldsConfig: Record<string, {
label: string
description?: string
placeholder?: string
required?: boolean
}> = {
model: { model: {
label: 'Model Path', label: 'Model Path',
placeholder: '/path/to/model.gguf', placeholder: '/path/to/model.gguf',
@@ -56,6 +69,10 @@ export function isBasicField(key: keyof CreateInstanceOptions): boolean {
return key in basicFieldsConfig return key in basicFieldsConfig
} }
export function isBasicBackendField(key: keyof BackendOptions): boolean {
return key in basicBackendFieldsConfig
}
export function getBasicFields(): (keyof CreateInstanceOptions)[] { export function getBasicFields(): (keyof CreateInstanceOptions)[] {
return Object.keys(basicFieldsConfig) as (keyof CreateInstanceOptions)[] return Object.keys(basicFieldsConfig) as (keyof CreateInstanceOptions)[]
} }
@@ -64,5 +81,13 @@ export function getAdvancedFields(): (keyof CreateInstanceOptions)[] {
return getAllFieldKeys().filter(key => !isBasicField(key)) return getAllFieldKeys().filter(key => !isBasicField(key))
} }
export function getBasicBackendFields(): (keyof BackendOptions)[] {
return Object.keys(basicBackendFieldsConfig) as (keyof BackendOptions)[]
}
export function getAdvancedBackendFields(): (keyof BackendOptions)[] {
return getAllBackendFieldKeys().filter(key => !isBasicBackendField(key))
}
// Re-export the Zod-based functions // Re-export the Zod-based functions
export { getFieldType } from '@/schemas/instanceOptions' export { getFieldType, getBackendFieldType } from '@/schemas/instanceOptions'

View File

@@ -1,14 +1,8 @@
import { BackendType } from '@/types/instance'
import { z } from 'zod' import { z } from 'zod'
// Define the Zod schema // Define the backend options schema (previously embedded in CreateInstanceOptionsSchema)
export const CreateInstanceOptionsSchema = z.object({ export const BackendOptionsSchema = z.object({
// Restart options
auto_restart: z.boolean().optional(),
max_restarts: z.number().optional(),
restart_delay: z.number().optional(),
idle_timeout: z.number().optional(),
on_demand_start: z.boolean().optional(),
// Common params // Common params
verbose_prompt: z.boolean().optional(), verbose_prompt: z.boolean().optional(),
threads: z.number().optional(), threads: z.number().optional(),
@@ -176,22 +170,57 @@ export const CreateInstanceOptionsSchema = z.object({
fim_qwen_14b_spec: z.boolean().optional(), fim_qwen_14b_spec: z.boolean().optional(),
}) })
// Infer the TypeScript type from the schema // Define the main create instance options schema
export const CreateInstanceOptionsSchema = z.object({
// Restart options
auto_restart: z.boolean().optional(),
max_restarts: z.number().optional(),
restart_delay: z.number().optional(),
idle_timeout: z.number().optional(),
on_demand_start: z.boolean().optional(),
// Backend configuration
backend_type: z.enum([BackendType.LLAMA_CPP]).optional(),
backend_options: BackendOptionsSchema.optional(),
})
// Infer the TypeScript types from the schemas
export type BackendOptions = z.infer<typeof BackendOptionsSchema>
export type CreateInstanceOptions = z.infer<typeof CreateInstanceOptionsSchema> export type CreateInstanceOptions = z.infer<typeof CreateInstanceOptionsSchema>
// Helper to get all field keys // Helper to get all field keys for CreateInstanceOptions
export function getAllFieldKeys(): (keyof CreateInstanceOptions)[] { export function getAllFieldKeys(): (keyof CreateInstanceOptions)[] {
return Object.keys(CreateInstanceOptionsSchema.shape) as (keyof CreateInstanceOptions)[] return Object.keys(CreateInstanceOptionsSchema.shape) as (keyof CreateInstanceOptions)[]
} }
// Helper to get all backend option field keys
export function getAllBackendFieldKeys(): (keyof BackendOptions)[] {
return Object.keys(BackendOptionsSchema.shape) as (keyof BackendOptions)[]
}
// Get field type from Zod schema // Get field type from Zod schema
export function getFieldType(key: keyof CreateInstanceOptions): 'text' | 'number' | 'boolean' | 'array' { export function getFieldType(key: keyof CreateInstanceOptions): 'text' | 'number' | 'boolean' | 'array' | 'object' {
const fieldSchema = CreateInstanceOptionsSchema.shape[key] const fieldSchema = CreateInstanceOptionsSchema.shape[key]
if (!fieldSchema) return 'text' if (!fieldSchema) return 'text'
// Handle ZodOptional wrapper // Handle ZodOptional wrapper
const innerSchema = fieldSchema instanceof z.ZodOptional ? fieldSchema.unwrap() : fieldSchema const innerSchema = fieldSchema instanceof z.ZodOptional ? fieldSchema.unwrap() : fieldSchema
if (innerSchema instanceof z.ZodBoolean) return 'boolean'
if (innerSchema instanceof z.ZodNumber) return 'number'
if (innerSchema instanceof z.ZodArray) return 'array'
if (innerSchema instanceof z.ZodObject) return 'object'
return 'text' // ZodString and others default to text
}
// Get field type for backend options
export function getBackendFieldType(key: keyof BackendOptions): 'text' | 'number' | 'boolean' | 'array' {
const fieldSchema = BackendOptionsSchema.shape[key]
if (!fieldSchema) return 'text'
// Handle ZodOptional wrapper
const innerSchema = fieldSchema instanceof z.ZodOptional ? fieldSchema.unwrap() : fieldSchema
if (innerSchema instanceof z.ZodBoolean) return 'boolean' if (innerSchema instanceof z.ZodBoolean) return 'boolean'
if (innerSchema instanceof z.ZodNumber) return 'number' if (innerSchema instanceof z.ZodNumber) return 'number'
if (innerSchema instanceof z.ZodArray) return 'array' if (innerSchema instanceof z.ZodArray) return 'array'

View File

@@ -2,6 +2,12 @@ import type { CreateInstanceOptions } from '@/schemas/instanceOptions'
export { type CreateInstanceOptions } from '@/schemas/instanceOptions' export { type CreateInstanceOptions } from '@/schemas/instanceOptions'
export const BackendType = {
LLAMA_CPP: 'llama_cpp'
} as const
export type BackendTypeValue = typeof BackendType[keyof typeof BackendType]
export type InstanceStatus = 'running' | 'stopped' | 'failed' export type InstanceStatus = 'running' | 'stopped' | 'failed'
export interface HealthStatus { export interface HealthStatus {