From 72d2a601c8dc6fc482e92fd3b293df0f0355134c Mon Sep 17 00:00:00 2001 From: LordMathis Date: Wed, 24 Sep 2025 21:27:51 +0200 Subject: [PATCH] Update Docker args in LoadConfig and tests to include 'run --rm' prefix --- pkg/config/config.go | 4 ++-- pkg/config/config_test.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/config/config.go b/pkg/config/config.go index 57d863f..863ded4 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -139,7 +139,7 @@ func LoadConfig(configPath string) (AppConfig, error) { Docker: &DockerSettings{ Enabled: false, Image: "ghcr.io/ggml-org/llama.cpp:server", - Args: []string{"--network", "host", "--gpus", "all"}, + Args: []string{"run", "--rm", "--network", "host", "--gpus", "all"}, Environment: map[string]string{}, }, }, @@ -149,7 +149,7 @@ func LoadConfig(configPath string) (AppConfig, error) { Docker: &DockerSettings{ Enabled: false, Image: "vllm/vllm-openai:latest", - Args: []string{"--network", "host", "--gpus", "all", "--shm-size", "1g"}, + Args: []string{"run", "--rm", "--network", "host", "--gpus", "all", "--shm-size", "1g"}, Environment: map[string]string{}, }, }, diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index c541295..ad800ed 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -434,7 +434,7 @@ func TestLoadConfig_BackendEnvironmentVariables(t *testing.T) { "LLAMACTL_LLAMACPP_ARGS": "--verbose --threads 4", "LLAMACTL_LLAMACPP_DOCKER_ENABLED": "true", "LLAMACTL_LLAMACPP_DOCKER_IMAGE": "env-llama:latest", - "LLAMACTL_LLAMACPP_DOCKER_ARGS": "--network host --gpus all", + "LLAMACTL_LLAMACPP_DOCKER_ARGS": "run --rm --network host --gpus all", "LLAMACTL_LLAMACPP_DOCKER_ENV": "CUDA_VISIBLE_DEVICES=0,OMP_NUM_THREADS=4", "LLAMACTL_VLLM_COMMAND": "env-vllm", "LLAMACTL_VLLM_DOCKER_ENABLED": "false", @@ -468,7 +468,7 @@ func TestLoadConfig_BackendEnvironmentVariables(t *testing.T) { if cfg.Backends.LlamaCpp.Docker.Image != "env-llama:latest" { t.Errorf("Expected llama Docker image 'env-llama:latest', got %q", cfg.Backends.LlamaCpp.Docker.Image) } - expectedDockerArgs := []string{"--network", "host", "--gpus", "all"} + expectedDockerArgs := []string{"run", "--rm", "--network", "host", "--gpus", "all"} if len(cfg.Backends.LlamaCpp.Docker.Args) != len(expectedDockerArgs) { t.Errorf("Expected llama Docker args %v, got %v", expectedDockerArgs, cfg.Backends.LlamaCpp.Docker.Args) }