From 031d6c70176cc2e0ac4a83c75960188b3eb3d129 Mon Sep 17 00:00:00 2001 From: LordMathis Date: Thu, 25 Sep 2025 22:51:51 +0200 Subject: [PATCH] Update Docker command arguments for llama-server and vllm with volume mounts --- pkg/config/config.go | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/pkg/config/config.go b/pkg/config/config.go index 863ded4..cf6f9cf 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -137,9 +137,11 @@ func LoadConfig(configPath string) (AppConfig, error) { Command: "llama-server", Args: []string{}, Docker: &DockerSettings{ - Enabled: false, - Image: "ghcr.io/ggml-org/llama.cpp:server", - Args: []string{"run", "--rm", "--network", "host", "--gpus", "all"}, + Enabled: false, + Image: "ghcr.io/ggml-org/llama.cpp:server", + Args: []string{ + "run", "--rm", "--network", "host", "--gpus", "all", + "-v", filepath.Join(getDefaultDataDirectory(), "llama.cpp") + ":/root/.cache/llama.cpp"}, Environment: map[string]string{}, }, }, @@ -147,9 +149,12 @@ func LoadConfig(configPath string) (AppConfig, error) { Command: "vllm", Args: []string{"serve"}, Docker: &DockerSettings{ - Enabled: false, - Image: "vllm/vllm-openai:latest", - Args: []string{"run", "--rm", "--network", "host", "--gpus", "all", "--shm-size", "1g"}, + Enabled: false, + Image: "vllm/vllm-openai:latest", + Args: []string{ + "run", "--rm", "--network", "host", "--gpus", "all", "--shm-size", "1g", + "-v", filepath.Join(getDefaultDataDirectory(), "huggingface") + ":/root/.cache/huggingface", + }, Environment: map[string]string{}, }, },