Update Docker command arguments for llama-server and vllm with volume mounts

This commit is contained in:
2025-09-25 22:51:51 +02:00
parent 282344af23
commit 031d6c7017

View File

@@ -139,7 +139,9 @@ func LoadConfig(configPath string) (AppConfig, error) {
Docker: &DockerSettings{ Docker: &DockerSettings{
Enabled: false, Enabled: false,
Image: "ghcr.io/ggml-org/llama.cpp:server", Image: "ghcr.io/ggml-org/llama.cpp:server",
Args: []string{"run", "--rm", "--network", "host", "--gpus", "all"}, Args: []string{
"run", "--rm", "--network", "host", "--gpus", "all",
"-v", filepath.Join(getDefaultDataDirectory(), "llama.cpp") + ":/root/.cache/llama.cpp"},
Environment: map[string]string{}, Environment: map[string]string{},
}, },
}, },
@@ -149,7 +151,10 @@ func LoadConfig(configPath string) (AppConfig, error) {
Docker: &DockerSettings{ Docker: &DockerSettings{
Enabled: false, Enabled: false,
Image: "vllm/vllm-openai:latest", Image: "vllm/vllm-openai:latest",
Args: []string{"run", "--rm", "--network", "host", "--gpus", "all", "--shm-size", "1g"}, Args: []string{
"run", "--rm", "--network", "host", "--gpus", "all", "--shm-size", "1g",
"-v", filepath.Join(getDefaultDataDirectory(), "huggingface") + ":/root/.cache/huggingface",
},
Environment: map[string]string{}, Environment: map[string]string{},
}, },
}, },