Implement Docker command handling for Llama, MLX, and vLLM backends

This commit is contained in:
2025-09-24 21:31:58 +02:00
parent 72d2a601c8
commit 76ac93bedc
4 changed files with 222 additions and 16 deletions

View File

@@ -1,8 +1,12 @@
package llamacpp
import (
"context"
"encoding/json"
"fmt"
"llamactl/pkg/backends"
"llamactl/pkg/config"
"os/exec"
"reflect"
"strconv"
)
@@ -329,6 +333,56 @@ func (o *LlamaServerOptions) BuildCommandArgs() []string {
return backends.BuildCommandArgs(o, multipleFlags)
}
// BuildCommandArgsWithDocker converts InstanceOptions to command line arguments,
// handling Docker transformations if needed
func (o *LlamaServerOptions) BuildCommandArgsWithDocker(dockerImage string) []string {
args := o.BuildCommandArgs()
// No special Docker transformations needed for llama-cpp
return args
}
// BuildCommand creates the complete command for execution, handling Docker vs native execution
func (o *LlamaServerOptions) BuildCommand(ctx context.Context, backendConfig *config.BackendSettings) (*exec.Cmd, error) {
// Build instance-specific arguments using backend functions
var instanceArgs []string
if backendConfig.Docker != nil && backendConfig.Docker.Enabled {
// Use Docker-aware argument building
instanceArgs = o.BuildCommandArgsWithDocker(backendConfig.Docker.Image)
} else {
// Use regular argument building for native execution
instanceArgs = o.BuildCommandArgs()
}
// Combine backend args with instance args
finalArgs := append(backendConfig.Args, instanceArgs...)
// Choose Docker vs Native execution
if backendConfig.Docker != nil && backendConfig.Docker.Enabled {
return buildDockerCommand(ctx, backendConfig, finalArgs)
} else {
return exec.CommandContext(ctx, backendConfig.Command, finalArgs...), nil
}
}
// buildDockerCommand builds a Docker command with the specified configuration and arguments
func buildDockerCommand(ctx context.Context, backendConfig *config.BackendSettings, args []string) (*exec.Cmd, error) {
// Start with configured Docker arguments (should include "run", "--rm", etc.)
dockerArgs := make([]string, len(backendConfig.Docker.Args))
copy(dockerArgs, backendConfig.Docker.Args)
// Add environment variables
for key, value := range backendConfig.Docker.Environment {
dockerArgs = append(dockerArgs, "-e", fmt.Sprintf("%s=%s", key, value))
}
// Add image and container arguments
dockerArgs = append(dockerArgs, backendConfig.Docker.Image)
dockerArgs = append(dockerArgs, args...)
return exec.CommandContext(ctx, "docker", dockerArgs...), nil
}
// ParseLlamaCommand parses a llama-server command string into LlamaServerOptions
// Supports multiple formats:
// 1. Full command: "llama-server --model file.gguf"

View File

@@ -1,7 +1,11 @@
package mlx
import (
"context"
"fmt"
"llamactl/pkg/backends"
"llamactl/pkg/config"
"os/exec"
)
type MlxServerOptions struct {
@@ -36,6 +40,56 @@ func (o *MlxServerOptions) BuildCommandArgs() []string {
return backends.BuildCommandArgs(o, multipleFlags)
}
// BuildCommandArgsWithDocker converts to command line arguments,
// handling Docker transformations if needed
func (o *MlxServerOptions) BuildCommandArgsWithDocker(dockerImage string) []string {
args := o.BuildCommandArgs()
// No special Docker transformations needed for MLX
return args
}
// BuildCommand creates the complete command for execution, handling Docker vs native execution
func (o *MlxServerOptions) BuildCommand(ctx context.Context, backendConfig *config.BackendSettings) (*exec.Cmd, error) {
// Build instance-specific arguments using backend functions
var instanceArgs []string
if backendConfig.Docker != nil && backendConfig.Docker.Enabled {
// Use Docker-aware argument building
instanceArgs = o.BuildCommandArgsWithDocker(backendConfig.Docker.Image)
} else {
// Use regular argument building for native execution
instanceArgs = o.BuildCommandArgs()
}
// Combine backend args with instance args
finalArgs := append(backendConfig.Args, instanceArgs...)
// Choose Docker vs Native execution
if backendConfig.Docker != nil && backendConfig.Docker.Enabled {
return buildDockerCommand(ctx, backendConfig, finalArgs)
} else {
return exec.CommandContext(ctx, backendConfig.Command, finalArgs...), nil
}
}
// buildDockerCommand builds a Docker command with the specified configuration and arguments
func buildDockerCommand(ctx context.Context, backendConfig *config.BackendSettings, args []string) (*exec.Cmd, error) {
// Start with configured Docker arguments (should include "run", "--rm", etc.)
dockerArgs := make([]string, len(backendConfig.Docker.Args))
copy(dockerArgs, backendConfig.Docker.Args)
// Add environment variables
for key, value := range backendConfig.Docker.Environment {
dockerArgs = append(dockerArgs, "-e", fmt.Sprintf("%s=%s", key, value))
}
// Add image and container arguments
dockerArgs = append(dockerArgs, backendConfig.Docker.Image)
dockerArgs = append(dockerArgs, args...)
return exec.CommandContext(ctx, "docker", dockerArgs...), nil
}
// ParseMlxCommand parses a mlx_lm.server command string into MlxServerOptions
// Supports multiple formats:
// 1. Full command: "mlx_lm.server --model model/path"

View File

@@ -1,6 +1,12 @@
package vllm
import (
"context"
"fmt"
"llamactl/pkg/config"
"os/exec"
"strings"
"llamactl/pkg/backends"
)
@@ -160,6 +166,75 @@ func (o *VllmServerOptions) BuildCommandArgs() []string {
return args
}
// BuildCommandArgsWithDocker converts VllmServerOptions to command line arguments,
// handling Docker transformations if needed
func (o *VllmServerOptions) BuildCommandArgsWithDocker(dockerImage string) []string {
args := o.BuildCommandArgs()
// Handle vLLM Docker image quirk
if isVLLMDocker(dockerImage) {
args = transformVLLMArgs(args)
}
return args
}
// isVLLMDocker checks if the Docker image is a vLLM image
func isVLLMDocker(image string) bool {
return strings.Contains(strings.ToLower(image), "vllm")
}
// transformVLLMArgs converts vLLM arguments for Docker execution
// Convert: ["serve", "microsoft/DialoGPT-medium", "--flag", "value"]
// To: ["--model", "microsoft/DialoGPT-medium", "--flag", "value"]
func transformVLLMArgs(args []string) []string {
if len(args) >= 2 && args[0] == "serve" {
return append([]string{"--model", args[1]}, args[2:]...)
}
return args
}
// BuildCommand creates the complete command for execution, handling Docker vs native execution
func (o *VllmServerOptions) BuildCommand(ctx context.Context, backendConfig *config.BackendSettings) (*exec.Cmd, error) {
// Build instance-specific arguments using backend functions
var instanceArgs []string
if backendConfig.Docker != nil && backendConfig.Docker.Enabled {
// Use Docker-aware argument building
instanceArgs = o.BuildCommandArgsWithDocker(backendConfig.Docker.Image)
} else {
// Use regular argument building for native execution
instanceArgs = o.BuildCommandArgs()
}
// Combine backend args with instance args
finalArgs := append(backendConfig.Args, instanceArgs...)
// Choose Docker vs Native execution
if backendConfig.Docker != nil && backendConfig.Docker.Enabled {
return buildDockerCommand(ctx, backendConfig, finalArgs)
} else {
return exec.CommandContext(ctx, backendConfig.Command, finalArgs...), nil
}
}
// buildDockerCommand builds a Docker command with the specified configuration and arguments
func buildDockerCommand(ctx context.Context, backendConfig *config.BackendSettings, args []string) (*exec.Cmd, error) {
// Start with configured Docker arguments (should include "run", "--rm", etc.)
dockerArgs := make([]string, len(backendConfig.Docker.Args))
copy(dockerArgs, backendConfig.Docker.Args)
// Add environment variables
for key, value := range backendConfig.Docker.Environment {
dockerArgs = append(dockerArgs, "-e", fmt.Sprintf("%s=%s", key, value))
}
// Add image and container arguments
dockerArgs = append(dockerArgs, backendConfig.Docker.Image)
dockerArgs = append(dockerArgs, args...)
return exec.CommandContext(ctx, "docker", dockerArgs...), nil
}
// ParseVllmCommand parses a vLLM serve command string into VllmServerOptions
// Supports multiple formats:
// 1. Full command: "vllm serve --model MODEL_NAME --other-args"