mirror of
https://github.com/lordmathis/llamactl.git
synced 2025-11-06 17:14:28 +00:00
Update Docker args in LoadConfig and tests to include 'run --rm' prefix
This commit is contained in:
@@ -139,7 +139,7 @@ func LoadConfig(configPath string) (AppConfig, error) {
|
|||||||
Docker: &DockerSettings{
|
Docker: &DockerSettings{
|
||||||
Enabled: false,
|
Enabled: false,
|
||||||
Image: "ghcr.io/ggml-org/llama.cpp:server",
|
Image: "ghcr.io/ggml-org/llama.cpp:server",
|
||||||
Args: []string{"--network", "host", "--gpus", "all"},
|
Args: []string{"run", "--rm", "--network", "host", "--gpus", "all"},
|
||||||
Environment: map[string]string{},
|
Environment: map[string]string{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -149,7 +149,7 @@ func LoadConfig(configPath string) (AppConfig, error) {
|
|||||||
Docker: &DockerSettings{
|
Docker: &DockerSettings{
|
||||||
Enabled: false,
|
Enabled: false,
|
||||||
Image: "vllm/vllm-openai:latest",
|
Image: "vllm/vllm-openai:latest",
|
||||||
Args: []string{"--network", "host", "--gpus", "all", "--shm-size", "1g"},
|
Args: []string{"run", "--rm", "--network", "host", "--gpus", "all", "--shm-size", "1g"},
|
||||||
Environment: map[string]string{},
|
Environment: map[string]string{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -434,7 +434,7 @@ func TestLoadConfig_BackendEnvironmentVariables(t *testing.T) {
|
|||||||
"LLAMACTL_LLAMACPP_ARGS": "--verbose --threads 4",
|
"LLAMACTL_LLAMACPP_ARGS": "--verbose --threads 4",
|
||||||
"LLAMACTL_LLAMACPP_DOCKER_ENABLED": "true",
|
"LLAMACTL_LLAMACPP_DOCKER_ENABLED": "true",
|
||||||
"LLAMACTL_LLAMACPP_DOCKER_IMAGE": "env-llama:latest",
|
"LLAMACTL_LLAMACPP_DOCKER_IMAGE": "env-llama:latest",
|
||||||
"LLAMACTL_LLAMACPP_DOCKER_ARGS": "--network host --gpus all",
|
"LLAMACTL_LLAMACPP_DOCKER_ARGS": "run --rm --network host --gpus all",
|
||||||
"LLAMACTL_LLAMACPP_DOCKER_ENV": "CUDA_VISIBLE_DEVICES=0,OMP_NUM_THREADS=4",
|
"LLAMACTL_LLAMACPP_DOCKER_ENV": "CUDA_VISIBLE_DEVICES=0,OMP_NUM_THREADS=4",
|
||||||
"LLAMACTL_VLLM_COMMAND": "env-vllm",
|
"LLAMACTL_VLLM_COMMAND": "env-vllm",
|
||||||
"LLAMACTL_VLLM_DOCKER_ENABLED": "false",
|
"LLAMACTL_VLLM_DOCKER_ENABLED": "false",
|
||||||
@@ -468,7 +468,7 @@ func TestLoadConfig_BackendEnvironmentVariables(t *testing.T) {
|
|||||||
if cfg.Backends.LlamaCpp.Docker.Image != "env-llama:latest" {
|
if cfg.Backends.LlamaCpp.Docker.Image != "env-llama:latest" {
|
||||||
t.Errorf("Expected llama Docker image 'env-llama:latest', got %q", cfg.Backends.LlamaCpp.Docker.Image)
|
t.Errorf("Expected llama Docker image 'env-llama:latest', got %q", cfg.Backends.LlamaCpp.Docker.Image)
|
||||||
}
|
}
|
||||||
expectedDockerArgs := []string{"--network", "host", "--gpus", "all"}
|
expectedDockerArgs := []string{"run", "--rm", "--network", "host", "--gpus", "all"}
|
||||||
if len(cfg.Backends.LlamaCpp.Docker.Args) != len(expectedDockerArgs) {
|
if len(cfg.Backends.LlamaCpp.Docker.Args) != len(expectedDockerArgs) {
|
||||||
t.Errorf("Expected llama Docker args %v, got %v", expectedDockerArgs, cfg.Backends.LlamaCpp.Docker.Args)
|
t.Errorf("Expected llama Docker args %v, got %v", expectedDockerArgs, cfg.Backends.LlamaCpp.Docker.Args)
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user