diff --git a/pkg/backends/backend.go b/pkg/backends/backend.go index 2bb0fa8..6e50565 100644 --- a/pkg/backends/backend.go +++ b/pkg/backends/backend.go @@ -56,13 +56,15 @@ func (o *Options) UnmarshalJSON(data []byte) error { } // Create backend from constructor map - if o.BackendOptions != nil { - constructor, exists := backendConstructors[o.BackendType] - if !exists { - return fmt.Errorf("unsupported backend type: %s", o.BackendType) - } + constructor, exists := backendConstructors[o.BackendType] + if !exists { + return fmt.Errorf("unsupported backend type: %s", o.BackendType) + } - backend := constructor() + backend := constructor() + + // If backend_options is provided, unmarshal into the backend + if o.BackendOptions != nil { optionsData, err := json.Marshal(o.BackendOptions) if err != nil { return fmt.Errorf("failed to marshal backend options: %w", err) @@ -71,10 +73,11 @@ func (o *Options) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(optionsData, backend); err != nil { return fmt.Errorf("failed to unmarshal backend options: %w", err) } - - // Store in the appropriate typed field for backward compatibility - o.setBackendOptions(backend) } + // If backend_options is nil or empty, backend remains as empty struct (for router mode) + + // Store in the appropriate typed field + o.setBackendOptions(backend) return nil } diff --git a/pkg/backends/llama.go b/pkg/backends/llama.go index 246f0fe..fb94684 100644 --- a/pkg/backends/llama.go +++ b/pkg/backends/llama.go @@ -327,20 +327,30 @@ func (o *LlamaServerOptions) UnmarshalJSON(data []byte) error { } func (o *LlamaServerOptions) GetPort() int { + if o == nil { + return 0 + } return o.Port } func (o *LlamaServerOptions) SetPort(port int) { + if o == nil { + return + } o.Port = port } func (o *LlamaServerOptions) GetHost() string { + if o == nil { + return "localhost" + } return o.Host } func (o *LlamaServerOptions) Validate() error { + // Allow nil options for router mode where llama.cpp manages models dynamically if o == nil { - return validation.ValidationError(fmt.Errorf("llama server options cannot be nil for llama.cpp backend")) + return nil } // Use reflection to check all string fields for injection patterns @@ -370,6 +380,9 @@ func (o *LlamaServerOptions) Validate() error { // BuildCommandArgs converts InstanceOptions to command line arguments func (o *LlamaServerOptions) BuildCommandArgs() []string { + if o == nil { + return []string{} + } // Llama uses multiple flags for arrays by default (not comma-separated) // Use package-level llamaMultiValuedFlags variable args := BuildCommandArgs(o, llamaMultiValuedFlags) @@ -381,6 +394,9 @@ func (o *LlamaServerOptions) BuildCommandArgs() []string { } func (o *LlamaServerOptions) BuildDockerArgs() []string { + if o == nil { + return []string{} + } // For llama, Docker args are the same as normal args return o.BuildCommandArgs() }