From b21cc41e42296f33ed9f0220e56aa55fb9b97c8e Mon Sep 17 00:00:00 2001 From: lordmathis Date: Sat, 15 Nov 2025 00:04:23 +0000 Subject: [PATCH] Deployed 514b1b0 to dev with MkDocs 1.6.1 and mike 2.1.3 --- .../fix_line_endings.cpython-311.pyc | Bin 2109 -> 2109 bytes dev/__pycache__/readme_sync.cpython-311.pyc | Bin 3201 -> 3201 bytes dev/api-reference/index.html | 179 +++++++----- dev/docs.go | 276 ++++++++++++++++++ dev/managing-instances/index.html | 185 +++++------- dev/search/search_index.json | 2 +- dev/sitemap.xml | 14 +- dev/sitemap.xml.gz | Bin 257 -> 257 bytes dev/swagger.json | 276 ++++++++++++++++++ dev/swagger.yaml | 190 ++++++++++++ 10 files changed, 941 insertions(+), 181 deletions(-) diff --git a/dev/__pycache__/fix_line_endings.cpython-311.pyc b/dev/__pycache__/fix_line_endings.cpython-311.pyc index 84cc4065de502e543eeecd768f630d034e7f4aa3..a601f7ba49fde920a8b1e1f30835773d57c137ed 100644 GIT binary patch delta 20 acmdlhuvdV4IWI340}xak7T?Hi!~p;{Pz1RE delta 20 acmdlhuvdV4IWI340}!}5iEZRI;s5|K=L7`+ diff --git a/dev/__pycache__/readme_sync.cpython-311.pyc b/dev/__pycache__/readme_sync.cpython-311.pyc index 4670f19f2adfa501a7c2ca0a848bea62b10ffe8b..448d7c3f86b0df0f226f6f4100119f33ca7517f2 100644 GIT binary patch delta 20 acmZpaY?S0)&dbZi00b3>#W!-7@c;lXI0Tyj delta 20 ZcmZpaY?S0)&dbZi00eGMVjH>3cmOKT1Lpt$ diff --git a/dev/api-reference/index.html b/dev/api-reference/index.html index 2c8fb91..f31e13a 100644 --- a/dev/api-reference/index.html +++ b/dev/api-reference/index.html @@ -645,6 +645,39 @@ + + +
  • + + + System + + + + +
  • @@ -792,30 +825,6 @@ -
  • - -
  • - - - System - - - - -
  • @@ -1306,6 +1315,87 @@ Most likely, it is not desirable to edit this file by hand! Response 400 Bad Request

    +

    System

    +
    + +

    GET /api/v1/config

    +

    Get server configuration

    +
    +Description +

    Returns the current server configuration (sanitized)

    +
    +

    Input parameters

    + + + + + + + + + + + + + + + + + + + + + +
    ParameterInTypeDefaultNullableDescription
    ApiKeyAuthheaderstringN/ANo
    + +

    + Response 200 OK +

    + +

    + Response 500 Internal Server Error +

    + +
    + +

    GET /api/v1/version

    +

    Get llamactl version

    +
    +Description +

    Returns the version of the llamactl command

    +
    +

    Input parameters

    + + + + + + + + + + + + + + + + + + + + + +
    ParameterInTypeDefaultNullableDescription
    ApiKeyAuthheaderstringN/ANo
    + +

    + Response 200 OK +

    + +

    + Response 500 Internal Server Error +

    +

    Instances


    @@ -1999,47 +2089,6 @@ config)

    Response 500 Internal Server Error

    -

    System

    -
    - -

    GET /api/v1/version

    -

    Get llamactl version

    -
    -Description -

    Returns the version of the llamactl command

    -
    -

    Input parameters

    - - - - - - - - - - - - - - - - - - - - - -
    ParameterInTypeDefaultNullableDescription
    ApiKeyAuthheaderstringN/ANo
    - -

    - Response 200 OK -

    - -

    - Response 500 Internal Server Error -

    -

    Llama.cpp


    diff --git a/dev/docs.go b/dev/docs.go index f46ac36..8d6a8f1 100644 --- a/dev/docs.go +++ b/dev/docs.go @@ -256,6 +256,34 @@ const docTemplate = `{ } } }, + "/api/v1/config": { + "get": { + "security": [ + { + "ApiKeyAuth": [] + } + ], + "description": "Returns the current server configuration (sanitized)", + "tags": [ + "System" + ], + "summary": "Get server configuration", + "responses": { + "200": { + "description": "Sanitized configuration", + "schema": { + "$ref": "#/definitions/config.AppConfig" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "type": "string" + } + } + } + } + }, "/api/v1/instances": { "get": { "security": [ @@ -1475,6 +1503,247 @@ const docTemplate = `{ } }, "definitions": { + "config.AppConfig": { + "type": "object", + "properties": { + "auth": { + "$ref": "#/definitions/config.AuthConfig" + }, + "backends": { + "$ref": "#/definitions/config.BackendConfig" + }, + "build_time": { + "type": "string" + }, + "commit_hash": { + "type": "string" + }, + "instances": { + "$ref": "#/definitions/config.InstancesConfig" + }, + "local_node": { + "type": "string" + }, + "nodes": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/config.NodeConfig" + } + }, + "server": { + "$ref": "#/definitions/config.ServerConfig" + }, + "version": { + "type": "string" + } + } + }, + "config.AuthConfig": { + "type": "object", + "properties": { + "inference_keys": { + "description": "List of keys for OpenAI compatible inference endpoints", + "type": "array", + "items": { + "type": "string" + } + }, + "management_keys": { + "description": "List of keys for management endpoints", + "type": "array", + "items": { + "type": "string" + } + }, + "require_inference_auth": { + "description": "Require authentication for OpenAI compatible inference endpoints", + "type": "boolean" + }, + "require_management_auth": { + "description": "Require authentication for management endpoints", + "type": "boolean" + } + } + }, + "config.BackendConfig": { + "type": "object", + "properties": { + "llama-cpp": { + "$ref": "#/definitions/config.BackendSettings" + }, + "mlx": { + "$ref": "#/definitions/config.BackendSettings" + }, + "vllm": { + "$ref": "#/definitions/config.BackendSettings" + } + } + }, + "config.BackendSettings": { + "type": "object", + "properties": { + "args": { + "type": "array", + "items": { + "type": "string" + } + }, + "command": { + "type": "string" + }, + "docker": { + "$ref": "#/definitions/config.DockerSettings" + }, + "environment": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "response_headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "config.DockerSettings": { + "type": "object", + "properties": { + "args": { + "type": "array", + "items": { + "type": "string" + } + }, + "enabled": { + "type": "boolean" + }, + "environment": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "image": { + "type": "string" + } + } + }, + "config.InstancesConfig": { + "type": "object", + "properties": { + "auto_create_dirs": { + "description": "Automatically create the data directory if it doesn't exist", + "type": "boolean" + }, + "configs_dir": { + "description": "Instance config directory override", + "type": "string" + }, + "data_dir": { + "description": "Directory where all llamactl data will be stored (instances.json, logs, etc.)", + "type": "string" + }, + "default_auto_restart": { + "description": "Default auto-restart setting for new instances", + "type": "boolean" + }, + "default_max_restarts": { + "description": "Default max restarts for new instances", + "type": "integer" + }, + "default_on_demand_start": { + "description": "Default on-demand start setting for new instances", + "type": "boolean" + }, + "default_restart_delay": { + "description": "Default restart delay for new instances (in seconds)", + "type": "integer" + }, + "enable_lru_eviction": { + "description": "Enable LRU eviction for instance logs", + "type": "boolean" + }, + "logs_dir": { + "description": "Logs directory override", + "type": "string" + }, + "max_instances": { + "description": "Maximum number of instances that can be created", + "type": "integer" + }, + "max_running_instances": { + "description": "Maximum number of instances that can be running at the same time", + "type": "integer" + }, + "on_demand_start_timeout": { + "description": "How long to wait for an instance to start on demand (in seconds)", + "type": "integer" + }, + "port_range": { + "description": "Port range for instances (e.g., 8000,9000)", + "type": "array", + "items": { + "type": "integer" + } + }, + "timeout_check_interval": { + "description": "Interval for checking instance timeouts (in minutes)", + "type": "integer" + } + } + }, + "config.NodeConfig": { + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "api_key": { + "type": "string" + } + } + }, + "config.ServerConfig": { + "type": "object", + "properties": { + "allowed_headers": { + "description": "Allowed headers for CORS (e.g., \"Accept\", \"Authorization\", \"Content-Type\", \"X-CSRF-Token\")", + "type": "array", + "items": { + "type": "string" + } + }, + "allowed_origins": { + "description": "Allowed origins for CORS (e.g., \"http://localhost:3000\")", + "type": "array", + "items": { + "type": "string" + } + }, + "enable_swagger": { + "description": "Enable Swagger UI for API documentation", + "type": "boolean" + }, + "host": { + "description": "Server host to bind to", + "type": "string" + }, + "port": { + "description": "Server port to bind to", + "type": "integer" + }, + "response_headers": { + "description": "Response headers to send with responses", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, "instance.Instance": { "type": "object", "properties": { @@ -1494,6 +1763,13 @@ const docTemplate = `{ "description": "Auto restart", "type": "boolean" }, + "command_override": { + "type": "string" + }, + "docker_enabled": { + "description": "Execution context overrides", + "type": "boolean" + }, "environment": { "description": "Environment variables", "type": "object", diff --git a/dev/managing-instances/index.html b/dev/managing-instances/index.html index e03e442..e616f6c 100644 --- a/dev/managing-instances/index.html +++ b/dev/managing-instances/index.html @@ -789,34 +789,36 @@

    Create Instance Screenshot

    1. Click the "Create Instance" button on the dashboard
    2. -
    3. Optional: Click "Import" in the dialog header to load a previously exported configuration
    4. -
    5. Enter a unique Name for your instance (only required field)
    6. -
    7. Select Target Node: Choose which node to deploy the instance to from the dropdown
    8. -
    9. Choose Backend Type:
        -
      • llama.cpp: For GGUF models using llama-server
      • -
      • MLX: For MLX-optimized models (macOS only)
      • +
      • Optional: Click "Import" to load a previously exported configuration
      • +
    +

    Instance Settings:

    +
      +
    1. Enter a unique Instance Name (required)
    2. +
    3. Select Node: Choose which node to deploy the instance to
    4. +
    5. Configure Auto Restart settings:
        +
      • Enable automatic restart on failure
      • +
      • Set max restarts and delay between attempts
      • +
      +
    6. +
    7. Configure basic instance options:
        +
      • Idle Timeout: Minutes before stopping idle instance
      • +
      • On Demand Start: Start instance only when needed
      • +
      +
    8. +
    +

    Backend Configuration:

    +
      +
    1. Select Backend Type:
        +
      • Llama Server: For GGUF models using llama-server
      • +
      • MLX LM: For MLX-optimized models (macOS only)
      • vLLM: For distributed serving and high-throughput inference
    2. -
    3. Configure model source:
        -
      • For llama.cpp: GGUF model path or HuggingFace repo
      • -
      • For MLX: MLX model path or identifier (e.g., mlx-community/Mistral-7B-Instruct-v0.3-4bit)
      • -
      • For vLLM: HuggingFace model identifier (e.g., microsoft/DialoGPT-medium)
      • -
      -
    4. -
    5. Configure optional instance management settings:
        -
      • Auto Restart: Automatically restart instance on failure
      • -
      • Max Restarts: Maximum number of restart attempts
      • -
      • Restart Delay: Delay in seconds between restart attempts
      • -
      • On Demand Start: Start instance when receiving a request to the OpenAI compatible endpoint
      • -
      • Idle Timeout: Minutes before stopping idle instance (set to 0 to disable)
      • -
      • Environment Variables: Set custom environment variables for the instance process
      • -
      -
    6. -
    7. Configure backend-specific options:
        -
      • llama.cpp: Threads, context size, GPU layers, port, etc.
      • -
      • MLX: Temperature, top-p, adapter path, Python environment, etc.
      • -
      • vLLM: Tensor parallel size, GPU memory utilization, quantization, etc.
      • +
      • Optional: Click "Parse Command" to import settings from an existing backend command
      • +
      • Configure Execution Context:
          +
        • Enable Docker: Run backend in Docker container
        • +
        • Command Override: Custom path to backend executable
        • +
        • Environment Variables: Custom environment variables
    @@ -825,6 +827,14 @@

    Llamactl automatically assigns ports from the configured port range (default: 8000-9000) and generates API keys if authentication is enabled. You typically don't need to manually specify these values.

      +
    1. Configure Basic Backend Options (varies by backend):
        +
      • llama.cpp: Model path, threads, context size, GPU layers, etc.
      • +
      • MLX: Model identifier, temperature, max tokens, etc.
      • +
      • vLLM: Model identifier, tensor parallel size, GPU memory utilization, etc.
      • +
      +
    2. +
    3. Optional: Expand Advanced Backend Options for additional settings
    4. +
    5. Optional: Add Extra Args as key-value pairs for custom command-line arguments
    6. Click "Create" to save the instance

    Via API

    @@ -838,88 +848,47 @@ "model": "/path/to/model.gguf", "threads": 8, "ctx_size": 4096, - "gpu_layers": 32 - }, - "nodes": ["main"] - }' - -# Create MLX instance (macOS only) -curl -X POST http://localhost:8080/api/v1/instances/my-mlx-instance \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer <token>" \ - -d '{ - "backend_type": "mlx_lm", - "backend_options": { - "model": "mlx-community/Mistral-7B-Instruct-v0.3-4bit", - "temp": 0.7, - "top_p": 0.9, - "max_tokens": 2048 - }, - "auto_restart": true, - "max_restarts": 3, - "nodes": ["main"] - }' - -# Create vLLM instance -curl -X POST http://localhost:8080/api/v1/instances/my-vllm-instance \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer <token>" \ - -d '{ - "backend_type": "vllm", - "backend_options": { - "model": "microsoft/DialoGPT-medium", - "tensor_parallel_size": 2, - "gpu_memory_utilization": 0.9 - }, - "auto_restart": true, - "on_demand_start": true, - "environment": { - "CUDA_VISIBLE_DEVICES": "0,1", - "NCCL_DEBUG": "INFO", - "PYTHONPATH": "/custom/path" - }, - "nodes": ["main"] - }' - -# Create llama.cpp instance with HuggingFace model -curl -X POST http://localhost:8080/api/v1/instances/gemma-3-27b \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer <token>" \ - -d '{ - "backend_type": "llama_cpp", - "backend_options": { - "hf_repo": "unsloth/gemma-3-27b-it-GGUF", - "hf_file": "gemma-3-27b-it-GGUF.gguf", - "gpu_layers": 32 - }, - "nodes": ["main"] - }' - -# Create instance on specific remote node -curl -X POST http://localhost:8080/api/v1/instances/remote-llama \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer <token>" \ - -d '{ - "backend_type": "llama_cpp", - "backend_options": { - "model": "/models/llama-7b.gguf", - "gpu_layers": 32 - }, - "nodes": ["worker1"] - }' - -# Create instance on multiple nodes for high availability -curl -X POST http://localhost:8080/api/v1/instances/multi-node-llama \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer <token>" \ - -d '{ - "backend_type": "llama_cpp", - "backend_options": { - "model": "/models/llama-7b.gguf", - "gpu_layers": 32 - }, - "nodes": ["worker1", "worker2", "worker3"] - }' + "gpu_layers": 32, + "flash_attn": "on" + }, + "auto_restart": true, + "max_restarts": 3, + "docker_enabled": false, + "command_override": "/opt/llama-server-dev", + "nodes": ["main"] + }' + +# Create vLLM instance with environment variables +curl -X POST http://localhost:8080/api/v1/instances/my-vllm-instance \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer <token>" \ + -d '{ + "backend_type": "vllm", + "backend_options": { + "model": "microsoft/DialoGPT-medium", + "tensor_parallel_size": 2, + "gpu_memory_utilization": 0.9 + }, + "on_demand_start": true, + "environment": { + "CUDA_VISIBLE_DEVICES": "0,1" + }, + "nodes": ["worker1", "worker2"] + }' + +# Create MLX instance (macOS only) +curl -X POST http://localhost:8080/api/v1/instances/my-mlx-instance \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer <token>" \ + -d '{ + "backend_type": "mlx_lm", + "backend_options": { + "model": "mlx-community/Mistral-7B-Instruct-v0.3-4bit", + "temp": 0.7, + "max_tokens": 2048 + }, + "nodes": ["main"] + }'

    Start Instance

    Via Web UI
    @@ -1026,7 +995,7 @@ Check instance status in real-time:

    - October 27, 2025 + November 14, 2025 diff --git a/dev/search/search_index.json b/dev/search/search_index.json index c6c5cea..8a079ef 100644 --- a/dev/search/search_index.json +++ b/dev/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Llamactl Documentation","text":"

    Welcome to the Llamactl documentation!

    "},{"location":"#what-is-llamactl","title":"What is Llamactl?","text":"

    Unified management and routing for llama.cpp, MLX and vLLM models with web dashboard.

    "},{"location":"#features","title":"Features","text":"

    \ud83d\ude80 Easy Model Management - Multiple Models Simultaneously: Run different models at the same time (7B for speed, 70B for quality) - Smart Resource Management: Automatic idle timeout, LRU eviction, and configurable instance limits - Web Dashboard: Modern React UI for managing instances, monitoring health, and viewing logs

    \ud83d\udd17 Flexible Integration - OpenAI API Compatible: Drop-in replacement - route requests to different models by instance name - Multi-Backend Support: Native support for llama.cpp, MLX (Apple Silicon optimized), and vLLM - Docker Ready: Run backends in containers with full GPU support

    \ud83c\udf10 Distributed Deployment - Remote Instances: Deploy instances on remote hosts - Central Management: Manage everything from a single dashboard with automatic routing

    "},{"location":"#quick-links","title":"Quick Links","text":""},{"location":"#getting-help","title":"Getting Help","text":"

    If you need help or have questions:

    "},{"location":"#license","title":"License","text":"

    MIT License - see the LICENSE file.

    "},{"location":"api-reference/","title":"API Reference","text":""},{"location":"api-reference/#llamactl-api-10","title":"llamactl API 1.0","text":"

    llamactl is a control server for managing Llama Server instances.

    License: MIT License"},{"location":"api-reference/#backends","title":"Backends","text":""},{"location":"api-reference/#get-apiv1backendsllama-cppdevices","title":"GET /api/v1/backends/llama-cpp/devices","text":"

    List available devices for llama server

    Description

    Returns a list of available devices for the llama server

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No

    Response 200 OK

    Response 500 Internal Server Error

    "},{"location":"api-reference/#get-apiv1backendsllama-cpphelp","title":"GET /api/v1/backends/llama-cpp/help","text":"

    Get help for llama server

    Description

    Returns the help text for the llama server command

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No

    Response 200 OK

    Response 500 Internal Server Error

    "},{"location":"api-reference/#post-apiv1backendsllama-cppparse-command","title":"POST /api/v1/backends/llama-cpp/parse-command","text":"

    Parse llama-server command

    Description

    Parses a llama-server command string into instance options

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No request body None No Command to parse

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#get-apiv1backendsllama-cppversion","title":"GET /api/v1/backends/llama-cpp/version","text":"

    Get version of llama server

    Description

    Returns the version of the llama server command

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No

    Response 200 OK

    Response 500 Internal Server Error

    "},{"location":"api-reference/#post-apiv1backendsmlxparse-command","title":"POST /api/v1/backends/mlx/parse-command","text":"

    Parse mlx_lm.server command

    Description

    Parses MLX-LM server command string into instance options

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No request body None No Command to parse

    Response 200 OK

    Response 400 Bad Request

    "},{"location":"api-reference/#post-apiv1backendsvllmparse-command","title":"POST /api/v1/backends/vllm/parse-command","text":"

    Parse vllm serve command

    Description

    Parses a vLLM serve command string into instance options

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No request body None No Command to parse

    Response 200 OK

    Response 400 Bad Request

    "},{"location":"api-reference/#instances","title":"Instances","text":""},{"location":"api-reference/#get-apiv1instances","title":"GET /api/v1/instances","text":"

    List all instances

    Description

    Returns a list of all instances managed by the server

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No

    Response 200 OK

    Response 500 Internal Server Error

    "},{"location":"api-reference/#delete-apiv1instancesname","title":"DELETE /api/v1/instances/{name}","text":"

    Delete an instance

    Description

    Stops and removes a specific instance by name

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 204 No Content

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#get-apiv1instancesname","title":"GET /api/v1/instances/{name}","text":"

    Get details of a specific instance

    Description

    Returns the details of a specific instance by name

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#post-apiv1instancesname","title":"POST /api/v1/instances/{name}","text":"

    Create and start a new instance

    Description

    Creates a new instance with the provided configuration options

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name options body None No Instance configuration options

    Response 201 Created

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#put-apiv1instancesname","title":"PUT /api/v1/instances/{name}","text":"

    Update an instance's configuration

    Description

    Updates the configuration of a specific instance by name

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name options body None No Instance configuration options

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#get-apiv1instancesnamelogs","title":"GET /api/v1/instances/{name}/logs","text":"

    Get logs from a specific instance

    Description

    Returns the logs from a specific instance by name with optional line limit

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No lines query None No Number of lines to retrieve (default: all lines) name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#get-apiv1instancesnameproxy","title":"GET /api/v1/instances/{name}/proxy","text":"

    Proxy requests to a specific instance, does not autostart instance if stopped

    Description

    Forwards HTTP requests to the llama-server instance running on a specific port

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    Response 503 Service Unavailable

    "},{"location":"api-reference/#post-apiv1instancesnameproxy","title":"POST /api/v1/instances/{name}/proxy","text":"

    Proxy requests to a specific instance, does not autostart instance if stopped

    Description

    Forwards HTTP requests to the llama-server instance running on a specific port

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    Response 503 Service Unavailable

    "},{"location":"api-reference/#post-apiv1instancesnamerestart","title":"POST /api/v1/instances/{name}/restart","text":"

    Restart a running instance

    Description

    Restarts a specific instance by name

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#post-apiv1instancesnamestart","title":"POST /api/v1/instances/{name}/start","text":"

    Start a stopped instance

    Description

    Starts a specific instance by name

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#post-apiv1instancesnamestop","title":"POST /api/v1/instances/{name}/stop","text":"

    Stop a running instance

    Description

    Stops a specific instance by name

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#nodes","title":"Nodes","text":""},{"location":"api-reference/#get-apiv1nodes","title":"GET /api/v1/nodes","text":"

    List all configured nodes

    Description

    Returns a map of all nodes configured in the server (node name -> node config)

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No

    Response 200 OK

    Response 500 Internal Server Error

    "},{"location":"api-reference/#get-apiv1nodesname","title":"GET /api/v1/nodes/{name}","text":"

    Get details of a specific node

    Description

    Returns the details of a specific node by name

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Node Name

    Response 200 OK

    Response 400 Bad Request

    Response 404 Not Found

    Response 500 Internal Server Error

    "},{"location":"api-reference/#system","title":"System","text":""},{"location":"api-reference/#get-apiv1version","title":"GET /api/v1/version","text":"

    Get llamactl version

    Description

    Returns the version of the llamactl command

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No

    Response 200 OK

    Response 500 Internal Server Error

    "},{"location":"api-reference/#llamacpp","title":"Llama.cpp","text":""},{"location":"api-reference/#get-llama-cppname","title":"GET /llama-cpp/{name}/","text":"

    Proxy requests to llama.cpp UI for the instance

    Description

    Proxies requests to the llama.cpp UI for the specified instance

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name query None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#post-llama-cppnameapply-template","title":"POST /llama-cpp/{name}/apply-template","text":"

    Proxy requests to llama.cpp server instance

    Description

    Proxies requests to the specified llama.cpp server instance, starting it on- demand if configured

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#post-llama-cppnamecompletion","title":"POST /llama-cpp/{name}/completion","text":"

    Proxy requests to llama.cpp server instance

    Description

    Proxies requests to the specified llama.cpp server instance, starting it on- demand if configured

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#post-llama-cppnamedetokenize","title":"POST /llama-cpp/{name}/detokenize","text":"

    Proxy requests to llama.cpp server instance

    Description

    Proxies requests to the specified llama.cpp server instance, starting it on- demand if configured

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#post-llama-cppnameembeddings","title":"POST /llama-cpp/{name}/embeddings","text":"

    Proxy requests to llama.cpp server instance

    Description

    Proxies requests to the specified llama.cpp server instance, starting it on- demand if configured

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#post-llama-cppnameinfill","title":"POST /llama-cpp/{name}/infill","text":"

    Proxy requests to llama.cpp server instance

    Description

    Proxies requests to the specified llama.cpp server instance, starting it on- demand if configured

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#post-llama-cppnamemetrics","title":"POST /llama-cpp/{name}/metrics","text":"

    Proxy requests to llama.cpp server instance

    Description

    Proxies requests to the specified llama.cpp server instance, starting it on- demand if configured

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#get-llama-cppnameprops","title":"GET /llama-cpp/{name}/props","text":"

    Proxy requests to llama.cpp server instance

    Description

    Proxies requests to the specified llama.cpp server instance, starting it on- demand if configured

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#post-llama-cppnameprops","title":"POST /llama-cpp/{name}/props","text":"

    Proxy requests to llama.cpp server instance

    Description

    Proxies requests to the specified llama.cpp server instance, starting it on- demand if configured

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#post-llama-cppnamereranking","title":"POST /llama-cpp/{name}/reranking","text":"

    Proxy requests to llama.cpp server instance

    Description

    Proxies requests to the specified llama.cpp server instance, starting it on- demand if configured

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#get-llama-cppnameslots","title":"GET /llama-cpp/{name}/slots","text":"

    Proxy requests to llama.cpp server instance

    Description

    Proxies requests to the specified llama.cpp server instance, starting it on- demand if configured

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#post-llama-cppnametokenize","title":"POST /llama-cpp/{name}/tokenize","text":"

    Proxy requests to llama.cpp server instance

    Description

    Proxies requests to the specified llama.cpp server instance, starting it on- demand if configured

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#openai","title":"OpenAI","text":""},{"location":"api-reference/#post-v1","title":"POST /v1/","text":"

    OpenAI-compatible proxy endpoint

    Description

    Handles all POST requests to /v1/*, routing to the appropriate instance based on the request body. Requires API key authentication via the Authorization header.

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#get-v1models","title":"GET /v1/models","text":"

    List instances in OpenAI-compatible format

    Description

    Returns a list of instances in a format compatible with OpenAI API

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No

    Response 200 OK

    Response 500 Internal Server Error

    "},{"location":"configuration/","title":"Configuration","text":"

    llamactl can be configured via configuration files or environment variables. Configuration is loaded in the following order of precedence:

    Defaults < Configuration file < Environment variables\n

    llamactl works out of the box with sensible defaults, but you can customize the behavior to suit your needs.

    "},{"location":"configuration/#default-configuration","title":"Default Configuration","text":"

    Here's the default configuration with all available options:

    server:\n  host: \"0.0.0.0\"                # Server host to bind to\n  port: 8080                     # Server port to bind to\n  allowed_origins: [\"*\"]         # Allowed CORS origins (default: all)\n  allowed_headers: [\"*\"]         # Allowed CORS headers (default: all)\n  enable_swagger: false          # Enable Swagger UI for API docs\n\nbackends:\n  llama-cpp:\n    command: \"llama-server\"\n    args: []\n    environment: {}              # Environment variables for the backend process\n    docker:\n      enabled: false\n      image: \"ghcr.io/ggml-org/llama.cpp:server\"\n      args: [\"run\", \"--rm\", \"--network\", \"host\", \"--gpus\", \"all\"]\n      environment: {}\n    response_headers: {}         # Additional response headers to send with responses\n\n  vllm:\n    command: \"vllm\"\n    args: [\"serve\"]\n    environment: {}              # Environment variables for the backend process\n    docker:\n      enabled: false\n      image: \"vllm/vllm-openai:latest\"\n      args: [\"run\", \"--rm\", \"--network\", \"host\", \"--gpus\", \"all\", \"--shm-size\", \"1g\"]\n      environment: {}\n    response_headers: {}         # Additional response headers to send with responses\n\n  mlx:\n    command: \"mlx_lm.server\"\n    args: []\n    environment: {}              # Environment variables for the backend process\n    response_headers: {}         # Additional response headers to send with responses\n\ninstances:\n  port_range: [8000, 9000]       # Port range for instances\n  data_dir: ~/.local/share/llamactl         # Data directory (platform-specific, see below)\n  configs_dir: ~/.local/share/llamactl/instances  # Instance configs directory\n  logs_dir: ~/.local/share/llamactl/logs    # Logs directory\n  auto_create_dirs: true         # Auto-create data/config/logs dirs if missing\n  max_instances: -1              # Max instances (-1 = unlimited)\n  max_running_instances: -1      # Max running instances (-1 = unlimited)\n  enable_lru_eviction: true      # Enable LRU eviction for idle instances\n  default_auto_restart: true     # Auto-restart new instances by default\n  default_max_restarts: 3        # Max restarts for new instances\n  default_restart_delay: 5       # Restart delay (seconds) for new instances\n  default_on_demand_start: true  # Default on-demand start setting\n  on_demand_start_timeout: 120   # Default on-demand start timeout in seconds\n  timeout_check_interval: 5      # Idle instance timeout check in minutes\n\nauth:\n  require_inference_auth: true   # Require auth for inference endpoints\n  inference_keys: []             # Keys for inference endpoints\n  require_management_auth: true  # Require auth for management endpoints\n  management_keys: []            # Keys for management endpoints\n\nlocal_node: \"main\"               # Name of the local node (default: \"main\")\nnodes:                           # Node configuration for multi-node deployment\n  main:                          # Default local node (empty config)\n
    "},{"location":"configuration/#configuration-files","title":"Configuration Files","text":""},{"location":"configuration/#configuration-file-locations","title":"Configuration File Locations","text":"

    Configuration files are searched in the following locations (in order of precedence, first found is used):

    Linux: - ./llamactl.yaml or ./config.yaml (current directory) - $HOME/.config/llamactl/config.yaml - /etc/llamactl/config.yaml

    macOS: - ./llamactl.yaml or ./config.yaml (current directory) - $HOME/Library/Application Support/llamactl/config.yaml - /Library/Application Support/llamactl/config.yaml

    Windows: - ./llamactl.yaml or ./config.yaml (current directory) - %APPDATA%\\llamactl\\config.yaml - %USERPROFILE%\\llamactl\\config.yaml - %PROGRAMDATA%\\llamactl\\config.yaml

    You can specify the path to config file with LLAMACTL_CONFIG_PATH environment variable.

    "},{"location":"configuration/#configuration-options","title":"Configuration Options","text":""},{"location":"configuration/#server-configuration","title":"Server Configuration","text":"
    server:\n  host: \"0.0.0.0\"         # Server host to bind to (default: \"0.0.0.0\")\n  port: 8080              # Server port to bind to (default: 8080)\n  allowed_origins: [\"*\"]  # CORS allowed origins (default: [\"*\"])\n  allowed_headers: [\"*\"]  # CORS allowed headers (default: [\"*\"])\n  enable_swagger: false   # Enable Swagger UI (default: false)\n

    Environment Variables: - LLAMACTL_HOST - Server host - LLAMACTL_PORT - Server port - LLAMACTL_ALLOWED_ORIGINS - Comma-separated CORS origins - LLAMACTL_ENABLE_SWAGGER - Enable Swagger UI (true/false)

    "},{"location":"configuration/#backend-configuration","title":"Backend Configuration","text":"
    backends:\n  llama-cpp:\n    command: \"llama-server\"\n    args: []\n    environment: {}              # Environment variables for the backend process\n    docker:\n      enabled: false             # Enable Docker runtime (default: false)\n      image: \"ghcr.io/ggml-org/llama.cpp:server\"\n      args: [\"run\", \"--rm\", \"--network\", \"host\", \"--gpus\", \"all\"]\n      environment: {}\n    response_headers: {}         # Additional response headers to send with responses\n\n  vllm:\n    command: \"vllm\"\n    args: [\"serve\"]\n    environment: {}              # Environment variables for the backend process\n    docker:\n      enabled: false             # Enable Docker runtime (default: false)\n      image: \"vllm/vllm-openai:latest\"\n      args: [\"run\", \"--rm\", \"--network\", \"host\", \"--gpus\", \"all\", \"--shm-size\", \"1g\"]\n      environment: {}\n    response_headers: {}         # Additional response headers to send with responses\n\n  mlx:\n    command: \"mlx_lm.server\"\n    args: []\n    environment: {}              # Environment variables for the backend process\n    # MLX does not support Docker\n    response_headers: {}         # Additional response headers to send with responses\n

    Backend Configuration Fields: - command: Executable name/path for the backend - args: Default arguments prepended to all instances - environment: Environment variables for the backend process (optional) - response_headers: Additional response headers to send with responses (optional) - docker: Docker-specific configuration (optional) - enabled: Boolean flag to enable Docker runtime - image: Docker image to use - args: Additional arguments passed to docker run - environment: Environment variables for the container (optional)

    If llamactl is behind an NGINX proxy, X-Accel-Buffering: no response header may be required for NGINX to properly stream the responses without buffering.

    Environment Variables:

    LlamaCpp Backend: - LLAMACTL_LLAMACPP_COMMAND - LlamaCpp executable command - LLAMACTL_LLAMACPP_ARGS - Space-separated default arguments - LLAMACTL_LLAMACPP_ENV - Environment variables in format \"KEY1=value1,KEY2=value2\" - LLAMACTL_LLAMACPP_DOCKER_ENABLED - Enable Docker runtime (true/false) - LLAMACTL_LLAMACPP_DOCKER_IMAGE - Docker image to use - LLAMACTL_LLAMACPP_DOCKER_ARGS - Space-separated Docker arguments - LLAMACTL_LLAMACPP_DOCKER_ENV - Docker environment variables in format \"KEY1=value1,KEY2=value2\" - LLAMACTL_LLAMACPP_RESPONSE_HEADERS - Response headers in format \"KEY1=value1;KEY2=value2\"

    VLLM Backend: - LLAMACTL_VLLM_COMMAND - VLLM executable command - LLAMACTL_VLLM_ARGS - Space-separated default arguments - LLAMACTL_VLLM_ENV - Environment variables in format \"KEY1=value1,KEY2=value2\" - LLAMACTL_VLLM_DOCKER_ENABLED - Enable Docker runtime (true/false) - LLAMACTL_VLLM_DOCKER_IMAGE - Docker image to use - LLAMACTL_VLLM_DOCKER_ARGS - Space-separated Docker arguments - LLAMACTL_VLLM_DOCKER_ENV - Docker environment variables in format \"KEY1=value1,KEY2=value2\" - LLAMACTL_VLLM_RESPONSE_HEADERS - Response headers in format \"KEY1=value1;KEY2=value2\"

    MLX Backend: - LLAMACTL_MLX_COMMAND - MLX executable command - LLAMACTL_MLX_ARGS - Space-separated default arguments - LLAMACTL_MLX_ENV - Environment variables in format \"KEY1=value1,KEY2=value2\" - LLAMACTL_MLX_RESPONSE_HEADERS - Response headers in format \"KEY1=value1;KEY2=value2\"

    "},{"location":"configuration/#instance-configuration","title":"Instance Configuration","text":"
    instances:\n  port_range: [8000, 9000]                          # Port range for instances (default: [8000, 9000])\n  data_dir: \"~/.local/share/llamactl\"               # Directory for all llamactl data (default varies by OS)\n  configs_dir: \"~/.local/share/llamactl/instances\"  # Directory for instance configs (default: data_dir/instances)\n  logs_dir: \"~/.local/share/llamactl/logs\"          # Directory for instance logs (default: data_dir/logs)\n  auto_create_dirs: true                            # Automatically create data/config/logs directories (default: true)\n  max_instances: -1                                 # Maximum instances (-1 = unlimited)\n  max_running_instances: -1                         # Maximum running instances (-1 = unlimited)\n  enable_lru_eviction: true                         # Enable LRU eviction for idle instances\n  default_auto_restart: true                        # Default auto-restart setting\n  default_max_restarts: 3                           # Default maximum restart attempts\n  default_restart_delay: 5                          # Default restart delay in seconds\n  default_on_demand_start: true                     # Default on-demand start setting\n  on_demand_start_timeout: 120                      # Default on-demand start timeout in seconds\n  timeout_check_interval: 5                         # Default instance timeout check interval in minutes\n

    Environment Variables: - LLAMACTL_INSTANCE_PORT_RANGE - Port range (format: \"8000-9000\" or \"8000,9000\") - LLAMACTL_DATA_DIRECTORY - Data directory path - LLAMACTL_INSTANCES_DIR - Instance configs directory path - LLAMACTL_LOGS_DIR - Log directory path - LLAMACTL_AUTO_CREATE_DATA_DIR - Auto-create data/config/logs directories (true/false) - LLAMACTL_MAX_INSTANCES - Maximum number of instances - LLAMACTL_MAX_RUNNING_INSTANCES - Maximum number of running instances - LLAMACTL_ENABLE_LRU_EVICTION - Enable LRU eviction for idle instances - LLAMACTL_DEFAULT_AUTO_RESTART - Default auto-restart setting (true/false) - LLAMACTL_DEFAULT_MAX_RESTARTS - Default maximum restarts - LLAMACTL_DEFAULT_RESTART_DELAY - Default restart delay in seconds - LLAMACTL_DEFAULT_ON_DEMAND_START - Default on-demand start setting (true/false) - LLAMACTL_ON_DEMAND_START_TIMEOUT - Default on-demand start timeout in seconds - LLAMACTL_TIMEOUT_CHECK_INTERVAL - Default instance timeout check interval in minutes

    "},{"location":"configuration/#authentication-configuration","title":"Authentication Configuration","text":"
    auth:\n  require_inference_auth: true           # Require API key for OpenAI endpoints (default: true)\n  inference_keys: []                     # List of valid inference API keys\n  require_management_auth: true          # Require API key for management endpoints (default: true)\n  management_keys: []                    # List of valid management API keys\n

    Environment Variables: - LLAMACTL_REQUIRE_INFERENCE_AUTH - Require auth for OpenAI endpoints (true/false) - LLAMACTL_INFERENCE_KEYS - Comma-separated inference API keys - LLAMACTL_REQUIRE_MANAGEMENT_AUTH - Require auth for management endpoints (true/false) - LLAMACTL_MANAGEMENT_KEYS - Comma-separated management API keys

    "},{"location":"configuration/#remote-node-configuration","title":"Remote Node Configuration","text":"

    llamactl supports remote node deployments. Configure remote nodes to deploy instances on remote hosts and manage them centrally.

    local_node: \"main\"               # Name of the local node (default: \"main\")\nnodes:                           # Node configuration map\n  main:                          # Local node (empty address means local)\n    address: \"\"                  # Not used for local node\n    api_key: \"\"                  # Not used for local node\n  worker1:                       # Remote worker node\n    address: \"http://192.168.1.10:8080\"\n    api_key: \"worker1-api-key\"   # Management API key for authentication\n

    Node Configuration Fields: - local_node: Specifies which node in the nodes map represents the local node. Must match exactly what other nodes call this node. - nodes: Map of node configurations - address: HTTP/HTTPS URL of the remote node (empty for local node) - api_key: Management API key for authenticating with the remote node

    Environment Variables: - LLAMACTL_LOCAL_NODE - Name of the local node

    "},{"location":"installation/","title":"Installation","text":"

    This guide will walk you through installing Llamactl on your system.

    "},{"location":"installation/#prerequisites","title":"Prerequisites","text":""},{"location":"installation/#backend-dependencies","title":"Backend Dependencies","text":"

    llamactl supports multiple backends. Install at least one:

    For llama.cpp backend (all platforms):

    You need llama-server from llama.cpp installed:

    # Homebrew (macOS/Linux)\nbrew install llama.cpp\n# Winget (Windows)\nwinget install llama.cpp\n

    Or build from source - see llama.cpp docs

    For MLX backend (macOS only):

    MLX provides optimized inference on Apple Silicon. Install MLX-LM:

    # Install via pip (requires Python 3.8+)\npip install mlx-lm\n\n# Or in a virtual environment (recommended)\npython -m venv mlx-env\nsource mlx-env/bin/activate\npip install mlx-lm\n

    Note: MLX backend is only available on macOS with Apple Silicon (M1, M2, M3, etc.)

    For vLLM backend:

    vLLM provides high-throughput distributed serving for LLMs. Install vLLM:

    # Install in a virtual environment\npython -m venv vllm-env\nsource vllm-env/bin/activate\npip install vllm\n
    "},{"location":"installation/#installation-methods","title":"Installation Methods","text":""},{"location":"installation/#option-1-download-binary-recommended","title":"Option 1: Download Binary (Recommended)","text":"

    Download the latest release from the GitHub releases page:

    # Linux/macOS - Get latest version and download\nLATEST_VERSION=$(curl -s https://api.github.com/repos/lordmathis/llamactl/releases/latest | grep '\"tag_name\":' | sed -E 's/.*\"([^\"]+)\".*/\\1/')\ncurl -L https://github.com/lordmathis/llamactl/releases/download/${LATEST_VERSION}/llamactl-${LATEST_VERSION}-$(uname -s | tr '[:upper:]' '[:lower:]')-$(uname -m).tar.gz | tar -xz\nsudo mv llamactl /usr/local/bin/\n\n# Or download manually from:\n# https://github.com/lordmathis/llamactl/releases/latest\n\n# Windows - Download from releases page\n
    "},{"location":"installation/#option-2-docker","title":"Option 2: Docker","text":"

    llamactl provides Dockerfiles for creating Docker images with backends pre-installed. The resulting images include the latest llamactl release with the respective backend.

    Available Dockerfiles (CUDA): - llamactl with llama.cpp CUDA: docker/Dockerfile.llamacpp (based on ghcr.io/ggml-org/llama.cpp:server-cuda) - llamactl with vLLM CUDA: docker/Dockerfile.vllm (based on vllm/vllm-openai:latest) - llamactl built from source: docker/Dockerfile.source (multi-stage build with webui)

    Note: These Dockerfiles are configured for CUDA. For other platforms (CPU, ROCm, Vulkan, etc.), adapt the base image. For llama.cpp, see available tags at llama.cpp Docker docs. For vLLM, check vLLM docs.

    Using Docker Compose

    # Clone the repository\ngit clone https://github.com/lordmathis/llamactl.git\ncd llamactl\n\n# Create directories for data and models\nmkdir -p data/llamacpp data/vllm models\n\n# Start llamactl with llama.cpp backend\ndocker-compose -f docker/docker-compose.yml up llamactl-llamacpp -d\n\n# Or start llamactl with vLLM backend\ndocker-compose -f docker/docker-compose.yml up llamactl-vllm -d\n

    Access the dashboard at: - llamactl with llama.cpp: http://localhost:8080 - llamactl with vLLM: http://localhost:8081

    Using Docker Build and Run

    1. llamactl with llama.cpp CUDA:

      docker build -f docker/Dockerfile.llamacpp -t llamactl:llamacpp-cuda .\ndocker run -d \\\n  --name llamactl-llamacpp \\\n  --gpus all \\\n  -p 8080:8080 \\\n  -v ~/.cache/llama.cpp:/root/.cache/llama.cpp \\\n  llamactl:llamacpp-cuda\n

    2. llamactl with vLLM CUDA:

      docker build -f docker/Dockerfile.vllm -t llamactl:vllm-cuda .\ndocker run -d \\\n  --name llamactl-vllm \\\n  --gpus all \\\n  -p 8080:8080 \\\n  -v ~/.cache/huggingface:/root/.cache/huggingface \\\n  llamactl:vllm-cuda\n

    3. llamactl built from source:

      docker build -f docker/Dockerfile.source -t llamactl:source .\ndocker run -d \\\n  --name llamactl \\\n  -p 8080:8080 \\\n  llamactl:source\n

    "},{"location":"installation/#option-3-build-from-source","title":"Option 3: Build from Source","text":"

    Requirements: - Go 1.24 or later - Node.js 22 or later - Git

    If you prefer to build from source:

    # Clone the repository\ngit clone https://github.com/lordmathis/llamactl.git\ncd llamactl\n\n# Build the web UI\ncd webui && npm ci && npm run build && cd ..\n\n# Build the application\ngo build -o llamactl ./cmd/server\n
    "},{"location":"installation/#remote-node-installation","title":"Remote Node Installation","text":"

    For deployments with remote nodes: - Install llamactl on each node using any of the methods above - Configure API keys for authentication between nodes - Ensure node names are consistent across all configurations

    "},{"location":"installation/#verification","title":"Verification","text":"

    Verify your installation by checking the version:

    llamactl --version\n
    "},{"location":"installation/#next-steps","title":"Next Steps","text":"

    Now that Llamactl is installed, continue to the Quick Start guide to get your first instance running!

    For remote node deployments, see the Configuration Guide for node setup instructions.

    "},{"location":"managing-instances/","title":"Managing Instances","text":"

    Learn how to effectively manage your llama.cpp, MLX, and vLLM instances with Llamactl through both the Web UI and API.

    "},{"location":"managing-instances/#overview","title":"Overview","text":"

    Llamactl provides two ways to manage instances:

    "},{"location":"managing-instances/#authentication","title":"Authentication","text":"

    Llamactl uses a Management API Key to authenticate requests to the management API (creating, starting, stopping instances). All curl examples below use <token> as a placeholder - replace this with your actual Management API Key.

    By default, authentication is required. If you don't configure a management API key in your configuration file, llamactl will auto-generate one and print it to the terminal on startup. See the Configuration guide for details.

    For Web UI access: 1. Navigate to the web UI 2. Enter your Management API Key 3. Bearer token is stored for the session

    "},{"location":"managing-instances/#theme-support","title":"Theme Support","text":""},{"location":"managing-instances/#instance-cards","title":"Instance Cards","text":"

    Each instance is displayed as a card showing:

    "},{"location":"managing-instances/#create-instance","title":"Create Instance","text":"

    Via Web UI

    1. Click the \"Create Instance\" button on the dashboard
    2. Optional: Click \"Import\" in the dialog header to load a previously exported configuration
    3. Enter a unique Name for your instance (only required field)
    4. Select Target Node: Choose which node to deploy the instance to from the dropdown
    5. Choose Backend Type:
      • llama.cpp: For GGUF models using llama-server
      • MLX: For MLX-optimized models (macOS only)
      • vLLM: For distributed serving and high-throughput inference
    6. Configure model source:
      • For llama.cpp: GGUF model path or HuggingFace repo
      • For MLX: MLX model path or identifier (e.g., mlx-community/Mistral-7B-Instruct-v0.3-4bit)
      • For vLLM: HuggingFace model identifier (e.g., microsoft/DialoGPT-medium)
    7. Configure optional instance management settings:
      • Auto Restart: Automatically restart instance on failure
      • Max Restarts: Maximum number of restart attempts
      • Restart Delay: Delay in seconds between restart attempts
      • On Demand Start: Start instance when receiving a request to the OpenAI compatible endpoint
      • Idle Timeout: Minutes before stopping idle instance (set to 0 to disable)
      • Environment Variables: Set custom environment variables for the instance process
    8. Configure backend-specific options:
      • llama.cpp: Threads, context size, GPU layers, port, etc.
      • MLX: Temperature, top-p, adapter path, Python environment, etc.
      • vLLM: Tensor parallel size, GPU memory utilization, quantization, etc.

    Auto-Assignment

    Llamactl automatically assigns ports from the configured port range (default: 8000-9000) and generates API keys if authentication is enabled. You typically don't need to manually specify these values.

    1. Click \"Create\" to save the instance

    Via API

    # Create llama.cpp instance with local model file\ncurl -X POST http://localhost:8080/api/v1/instances/my-llama-instance \\\n  -H \"Content-Type: application/json\" \\\n  -H \"Authorization: Bearer <token>\" \\\n  -d '{\n    \"backend_type\": \"llama_cpp\",\n    \"backend_options\": {\n      \"model\": \"/path/to/model.gguf\",\n      \"threads\": 8,\n      \"ctx_size\": 4096,\n      \"gpu_layers\": 32\n    },\n    \"nodes\": [\"main\"]\n  }'\n\n# Create MLX instance (macOS only)\ncurl -X POST http://localhost:8080/api/v1/instances/my-mlx-instance \\\n  -H \"Content-Type: application/json\" \\\n  -H \"Authorization: Bearer <token>\" \\\n  -d '{\n    \"backend_type\": \"mlx_lm\",\n    \"backend_options\": {\n      \"model\": \"mlx-community/Mistral-7B-Instruct-v0.3-4bit\",\n      \"temp\": 0.7,\n      \"top_p\": 0.9,\n      \"max_tokens\": 2048\n    },\n    \"auto_restart\": true,\n    \"max_restarts\": 3,\n    \"nodes\": [\"main\"]\n  }'\n\n# Create vLLM instance\ncurl -X POST http://localhost:8080/api/v1/instances/my-vllm-instance \\\n  -H \"Content-Type: application/json\" \\\n  -H \"Authorization: Bearer <token>\" \\\n  -d '{\n    \"backend_type\": \"vllm\",\n    \"backend_options\": {\n      \"model\": \"microsoft/DialoGPT-medium\",\n      \"tensor_parallel_size\": 2,\n      \"gpu_memory_utilization\": 0.9\n    },\n    \"auto_restart\": true,\n    \"on_demand_start\": true,\n    \"environment\": {\n      \"CUDA_VISIBLE_DEVICES\": \"0,1\",\n      \"NCCL_DEBUG\": \"INFO\",\n      \"PYTHONPATH\": \"/custom/path\"\n    },\n    \"nodes\": [\"main\"]\n  }'\n\n# Create llama.cpp instance with HuggingFace model\ncurl -X POST http://localhost:8080/api/v1/instances/gemma-3-27b \\\n  -H \"Content-Type: application/json\" \\\n  -H \"Authorization: Bearer <token>\" \\\n  -d '{\n    \"backend_type\": \"llama_cpp\",\n    \"backend_options\": {\n      \"hf_repo\": \"unsloth/gemma-3-27b-it-GGUF\",\n      \"hf_file\": \"gemma-3-27b-it-GGUF.gguf\",\n      \"gpu_layers\": 32\n    },\n    \"nodes\": [\"main\"]\n  }'\n\n# Create instance on specific remote node\ncurl -X POST http://localhost:8080/api/v1/instances/remote-llama \\\n  -H \"Content-Type: application/json\" \\\n  -H \"Authorization: Bearer <token>\" \\\n  -d '{\n    \"backend_type\": \"llama_cpp\",\n    \"backend_options\": {\n      \"model\": \"/models/llama-7b.gguf\",\n      \"gpu_layers\": 32\n    },\n    \"nodes\": [\"worker1\"]\n  }'\n\n# Create instance on multiple nodes for high availability\ncurl -X POST http://localhost:8080/api/v1/instances/multi-node-llama \\\n  -H \"Content-Type: application/json\" \\\n  -H \"Authorization: Bearer <token>\" \\\n  -d '{\n    \"backend_type\": \"llama_cpp\",\n    \"backend_options\": {\n      \"model\": \"/models/llama-7b.gguf\",\n      \"gpu_layers\": 32\n    },\n    \"nodes\": [\"worker1\", \"worker2\", \"worker3\"]\n  }'\n
    "},{"location":"managing-instances/#start-instance","title":"Start Instance","text":"

    Via Web UI 1. Click the \"Start\" button on an instance card 2. Watch the status change to \"Unknown\" 3. Monitor progress in the logs 4. Instance status changes to \"Ready\" when ready

    Via API

    curl -X POST http://localhost:8080/api/v1/instances/{name}/start \\\n  -H \"Authorization: Bearer <token>\"\n

    "},{"location":"managing-instances/#stop-instance","title":"Stop Instance","text":"

    Via Web UI 1. Click the \"Stop\" button on an instance card 2. Instance gracefully shuts down

    Via API

    curl -X POST http://localhost:8080/api/v1/instances/{name}/stop \\\n  -H \"Authorization: Bearer <token>\"\n

    "},{"location":"managing-instances/#edit-instance","title":"Edit Instance","text":"

    Via Web UI 1. Click the \"Edit\" button on an instance card 2. Modify settings in the configuration dialog 3. Changes require instance restart to take effect 4. Click \"Update & Restart\" to apply changes

    Via API Modify instance settings:

    curl -X PUT http://localhost:8080/api/v1/instances/{name} \\\n  -H \"Content-Type: application/json\" \\\n  -H \"Authorization: Bearer <token>\" \\\n  -d '{\n    \"backend_options\": {\n      \"threads\": 8,\n      \"context_size\": 4096\n    }\n  }'\n

    Note

    Configuration changes require restarting the instance to take effect.

    "},{"location":"managing-instances/#export-instance","title":"Export Instance","text":"

    Via Web UI 1. Click the \"More actions\" button (three dots) on an instance card 2. Click \"Export\" to download the instance configuration as a JSON file

    "},{"location":"managing-instances/#view-logs","title":"View Logs","text":"

    Via Web UI

    1. Click the \"Logs\" button on any instance card
    2. Real-time log viewer opens

    Via API Check instance status in real-time:

    # Get instance logs\ncurl http://localhost:8080/api/v1/instances/{name}/logs \\\n  -H \"Authorization: Bearer <token>\"\n
    "},{"location":"managing-instances/#delete-instance","title":"Delete Instance","text":"

    Via Web UI 1. Click the \"Delete\" button on an instance card 2. Only stopped instances can be deleted 3. Confirm deletion in the dialog

    Via API

    curl -X DELETE http://localhost:8080/api/v1/instances/{name} \\\n  -H \"Authorization: Bearer <token>\"\n

    "},{"location":"managing-instances/#instance-proxy","title":"Instance Proxy","text":"

    Llamactl proxies all requests to the underlying backend instances (llama-server, MLX, or vLLM).

    # Proxy requests to the instance\ncurl http://localhost:8080/api/v1/instances/{name}/proxy/ \\\n  -H \"Authorization: Bearer <token>\"\n

    All backends provide OpenAI-compatible endpoints. Check the respective documentation: - llama-server docs - MLX-LM docs - vLLM docs

    "},{"location":"managing-instances/#instance-health","title":"Instance Health","text":"

    Via Web UI

    1. The health status badge is displayed on each instance card

    Via API

    Check the health status of your instances:

    curl http://localhost:8080/api/v1/instances/{name}/proxy/health \\\n  -H \"Authorization: Bearer <token>\"\n
    "},{"location":"quick-start/","title":"Quick Start","text":"

    This guide will help you get Llamactl up and running in just a few minutes.

    Before you begin: Ensure you have at least one backend installed (llama.cpp, MLX, or vLLM). See the Installation Guide for backend setup.

    "},{"location":"quick-start/#core-concepts","title":"Core Concepts","text":"

    Before you start, let's clarify a few key terms:

    "},{"location":"quick-start/#authentication","title":"Authentication","text":"

    Llamactl uses two types of API keys:

    By default, authentication is required. If you don't configure these keys in your configuration file, llamactl will auto-generate them and print them to the terminal on startup. You can also configure custom keys or disable authentication entirely in the Configuration guide.

    "},{"location":"quick-start/#start-llamactl","title":"Start Llamactl","text":"

    Start the Llamactl server:

    llamactl\n
    \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\n\u26a0\ufe0f  MANAGEMENT AUTHENTICATION REQUIRED\n\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\n\ud83d\udd11  Generated Management API Key:\n\n    sk-management-...\n\n\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\n\u26a0\ufe0f  INFERENCE AUTHENTICATION REQUIRED\n\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\n\ud83d\udd11  Generated Inference API Key:\n\n    sk-inference-...\n\n\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\n\u26a0\ufe0f  IMPORTANT\n\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\n\u2022 These keys are auto-generated and will change on restart\n\u2022 For production, add explicit keys to your configuration\n\u2022 Copy these keys before they disappear from the terminal\n\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\nLlamactl server listening on 0.0.0.0:8080\n

    Copy the Management and Inference API Keys from the terminal - you'll need them to access the web UI and make inference requests.

    By default, Llamactl will start on http://localhost:8080.

    "},{"location":"quick-start/#access-the-web-ui","title":"Access the Web UI","text":"

    Open your web browser and navigate to:

    http://localhost:8080\n

    Login with the management API key from the terminal output.

    You should see the Llamactl web interface.

    "},{"location":"quick-start/#create-your-first-instance","title":"Create Your First Instance","text":"
    1. Click the \"Add Instance\" button
    2. Fill in the instance configuration:

      • Name: Give your instance a descriptive name
      • Node: Select which node to deploy the instance to (defaults to \"main\" for single-node setups)
      • Backend Type: Choose from llama.cpp, MLX, or vLLM
      • Model: Model path or huggingface repo
      • Additional Options: Backend-specific parameters

      Auto-Assignment

      Llamactl automatically assigns ports from the configured port range (default: 8000-9000) and generates API keys if authentication is enabled. You typically don't need to manually specify these values.

      Remote Node Deployment

      If you have configured remote nodes in your configuration file, you can select which node to deploy the instance to. This allows you to distribute instances across multiple machines. See the Configuration guide for details on setting up remote nodes.

    3. Click \"Create Instance\"

    "},{"location":"quick-start/#start-your-instance","title":"Start Your Instance","text":"

    Once created, you can:

    "},{"location":"quick-start/#example-configurations","title":"Example Configurations","text":"

    Here are basic example configurations for each backend:

    llama.cpp backend:

    {\n  \"name\": \"llama2-7b\",\n  \"backend_type\": \"llama_cpp\",\n  \"backend_options\": {\n    \"model\": \"/path/to/llama-2-7b-chat.gguf\",\n    \"threads\": 4,\n    \"ctx_size\": 2048,\n    \"gpu_layers\": 32\n  },\n  \"nodes\": [\"main\"]\n}\n

    MLX backend (macOS only):

    {\n  \"name\": \"mistral-mlx\",\n  \"backend_type\": \"mlx_lm\",\n  \"backend_options\": {\n    \"model\": \"mlx-community/Mistral-7B-Instruct-v0.3-4bit\",\n    \"temp\": 0.7,\n    \"max_tokens\": 2048\n  },\n  \"nodes\": [\"main\"]\n}\n

    vLLM backend:

    {\n  \"name\": \"dialogpt-vllm\",\n  \"backend_type\": \"vllm\",\n  \"backend_options\": {\n    \"model\": \"microsoft/DialoGPT-medium\",\n    \"tensor_parallel_size\": 2,\n    \"gpu_memory_utilization\": 0.9\n  },\n  \"nodes\": [\"main\"]\n}\n

    Remote node deployment example:

    {\n  \"name\": \"distributed-model\",\n  \"backend_type\": \"llama_cpp\",\n  \"backend_options\": {\n    \"model\": \"/path/to/model.gguf\",\n    \"gpu_layers\": 32\n  },\n  \"nodes\": [\"worker1\"]\n}\n

    "},{"location":"quick-start/#docker-support","title":"Docker Support","text":"

    Llamactl can run backends in Docker containers. To enable Docker for a backend, add a docker section to that backend in your YAML configuration file (e.g. config.yaml) as shown below:

    backends:\n  vllm:\n    command: \"vllm\"\n    args: [\"serve\"]\n    docker:\n      enabled: true\n      image: \"vllm/vllm-openai:latest\"\n      args: [\"run\", \"--rm\", \"--network\", \"host\", \"--gpus\", \"all\", \"--shm-size\", \"1g\"]\n
    "},{"location":"quick-start/#using-the-api","title":"Using the API","text":"

    You can also manage instances via the REST API:

    # List all instances\ncurl http://localhost:8080/api/v1/instances\n\n# Create a new llama.cpp instance\ncurl -X POST http://localhost:8080/api/v1/instances/my-model \\\n  -H \"Content-Type: application/json\" \\\n  -d '{\n    \"backend_type\": \"llama_cpp\",\n    \"backend_options\": {\n      \"model\": \"/path/to/model.gguf\"\n    }\n  }'\n\n# Start an instance\ncurl -X POST http://localhost:8080/api/v1/instances/my-model/start\n
    "},{"location":"quick-start/#openai-compatible-api","title":"OpenAI Compatible API","text":"

    Llamactl provides OpenAI-compatible endpoints, making it easy to integrate with existing OpenAI client libraries and tools.

    "},{"location":"quick-start/#chat-completions","title":"Chat Completions","text":"

    Once you have an instance running, you can use it with the OpenAI-compatible chat completions endpoint:

    curl -X POST http://localhost:8080/v1/chat/completions \\\n  -H \"Content-Type: application/json\" \\\n  -d '{\n    \"model\": \"my-model\",\n    \"messages\": [\n      {\n        \"role\": \"user\",\n        \"content\": \"Hello! Can you help me write a Python function?\"\n      }\n    ],\n    \"max_tokens\": 150,\n    \"temperature\": 0.7\n  }'\n
    "},{"location":"quick-start/#using-with-python-openai-client","title":"Using with Python OpenAI Client","text":"

    You can also use the official OpenAI Python client:

    from openai import OpenAI\n\n# Point the client to your Llamactl server\nclient = OpenAI(\n    base_url=\"http://localhost:8080/v1\",\n    api_key=\"your-inference-api-key\"  # Use the inference API key from terminal or config\n)\n\n# Create a chat completion\nresponse = client.chat.completions.create(\n    model=\"my-model\",  # Use the name of your instance\n    messages=[\n        {\"role\": \"user\", \"content\": \"Explain quantum computing in simple terms\"}\n    ],\n    max_tokens=200,\n    temperature=0.7\n)\n\nprint(response.choices[0].message.content)\n

    API Key

    If you disabled authentication in your config, you can use any value for api_key (e.g., \"not-needed\"). Otherwise, use the inference API key shown in the terminal output on startup.

    "},{"location":"quick-start/#list-available-models","title":"List Available Models","text":"

    Get a list of running instances (models) in OpenAI-compatible format:

    curl http://localhost:8080/v1/models\n
    "},{"location":"quick-start/#next-steps","title":"Next Steps","text":""},{"location":"troubleshooting/","title":"Troubleshooting","text":"

    Issues specific to Llamactl deployment and operation.

    "},{"location":"troubleshooting/#configuration-issues","title":"Configuration Issues","text":""},{"location":"troubleshooting/#invalid-configuration","title":"Invalid Configuration","text":"

    Problem: Invalid configuration preventing startup

    Solutions: 1. Use minimal configuration:

    server:\n  host: \"0.0.0.0\"\n  port: 8080\ninstances:\n  port_range: [8000, 9000]\n

    1. Check data directory permissions:
      # Ensure data directory is writable (default: ~/.local/share/llamactl)\nmkdir -p ~/.local/share/llamactl/{instances,logs}\n
    "},{"location":"troubleshooting/#instance-management-issues","title":"Instance Management Issues","text":""},{"location":"troubleshooting/#instance-fails-to-start","title":"Instance Fails to Start","text":"

    Problem: Instance fails to start or immediately stops

    Solutions:

    1. Check instance logs to see the actual error:

      curl http://localhost:8080/api/v1/instances/{name}/logs\n# Or check log files directly\ntail -f ~/.local/share/llamactl/logs/{instance-name}.log\n

    2. Verify backend is installed:

      • llama.cpp: Ensure llama-server is in PATH
      • MLX: Ensure mlx-lm Python package is installed
      • vLLM: Ensure vllm Python package is installed
    3. Check model path and format:

      • Use absolute paths to model files
      • Verify model format matches backend (GGUF for llama.cpp, etc.)
    4. Verify backend command configuration:

      • Check that the backend command is correctly configured in the global config
      • For virtual environments, specify the full path to the command (e.g., /path/to/venv/bin/mlx_lm.server)
      • See the Configuration Guide for backend configuration details
      • Test the backend directly (see Backend-Specific Issues below)
    "},{"location":"troubleshooting/#backend-specific-issues","title":"Backend-Specific Issues","text":"

    Problem: Model loading, memory, GPU, or performance issues

    Most model-specific issues (memory, GPU configuration, performance tuning) are backend-specific and should be resolved by consulting the respective backend documentation:

    llama.cpp: - llama.cpp GitHub - llama-server README

    MLX: - MLX-LM GitHub - MLX-LM Server Guide

    vLLM: - vLLM Documentation - OpenAI Compatible Server - vllm serve Command

    Testing backends directly:

    Testing your model and configuration directly with the backend helps determine if the issue is with llamactl or the backend itself:

    # llama.cpp\nllama-server --model /path/to/model.gguf --port 8081\n\n# MLX\nmlx_lm.server --model mlx-community/Mistral-7B-Instruct-v0.3-4bit --port 8081\n\n# vLLM\nvllm serve microsoft/DialoGPT-medium --port 8081\n
    "},{"location":"troubleshooting/#api-and-network-issues","title":"API and Network Issues","text":""},{"location":"troubleshooting/#cors-errors","title":"CORS Errors","text":"

    Problem: Web UI shows CORS errors in browser console

    Solutions: 1. Configure allowed origins:

    server:\n  allowed_origins:\n    - \"http://localhost:3000\"\n    - \"https://yourdomain.com\"\n

    "},{"location":"troubleshooting/#authentication-issues","title":"Authentication Issues","text":"

    Problem: API requests failing with authentication errors

    Solutions: 1. Disable authentication temporarily:

    auth:\n  require_management_auth: false\n  require_inference_auth: false\n

    1. Configure API keys:

      auth:\n  management_keys:\n    - \"your-management-key\"\n  inference_keys:\n    - \"your-inference-key\"\n

    2. Use correct Authorization header:

      curl -H \"Authorization: Bearer your-api-key\" \\\n  http://localhost:8080/api/v1/instances\n

    "},{"location":"troubleshooting/#remote-node-issues","title":"Remote Node Issues","text":""},{"location":"troubleshooting/#node-configuration","title":"Node Configuration","text":"

    Problem: Remote instances not appearing or cannot be managed

    Solutions: 1. Verify node configuration:

    local_node: \"main\"  # Must match a key in nodes map\nnodes:\n  main:\n    address: \"\"     # Empty for local node\n  worker1:\n    address: \"http://worker1.internal:8080\"\n    api_key: \"secure-key\"  # Must match worker1's management key\n

    1. Check node name consistency:
    2. local_node on each node must match what other nodes call it
    3. Node names are case-sensitive

    4. Test remote node connectivity:

      curl -H \"Authorization: Bearer remote-node-key\" \\\n  http://remote-node:8080/api/v1/instances\n

    "},{"location":"troubleshooting/#debugging-and-logs","title":"Debugging and Logs","text":""},{"location":"troubleshooting/#viewing-instance-logs","title":"Viewing Instance Logs","text":"
    # Get instance logs via API\ncurl http://localhost:8080/api/v1/instances/{name}/logs\n\n# Or check log files directly\ntail -f ~/.local/share/llamactl/logs/{instance-name}.log\n
    "},{"location":"troubleshooting/#enable-debug-logging","title":"Enable Debug Logging","text":"
    export LLAMACTL_LOG_LEVEL=debug\nllamactl\n
    "},{"location":"troubleshooting/#getting-help","title":"Getting Help","text":"

    When reporting issues, include:

    1. System information:

      llamactl --version\n

    2. Configuration file (remove sensitive keys)

    3. Relevant log output

    4. Steps to reproduce the issue

    "}]} \ No newline at end of file +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Llamactl Documentation","text":"

    Welcome to the Llamactl documentation!

    "},{"location":"#what-is-llamactl","title":"What is Llamactl?","text":"

    Unified management and routing for llama.cpp, MLX and vLLM models with web dashboard.

    "},{"location":"#features","title":"Features","text":"

    \ud83d\ude80 Easy Model Management - Multiple Models Simultaneously: Run different models at the same time (7B for speed, 70B for quality) - Smart Resource Management: Automatic idle timeout, LRU eviction, and configurable instance limits - Web Dashboard: Modern React UI for managing instances, monitoring health, and viewing logs

    \ud83d\udd17 Flexible Integration - OpenAI API Compatible: Drop-in replacement - route requests to different models by instance name - Multi-Backend Support: Native support for llama.cpp, MLX (Apple Silicon optimized), and vLLM - Docker Ready: Run backends in containers with full GPU support

    \ud83c\udf10 Distributed Deployment - Remote Instances: Deploy instances on remote hosts - Central Management: Manage everything from a single dashboard with automatic routing

    "},{"location":"#quick-links","title":"Quick Links","text":""},{"location":"#getting-help","title":"Getting Help","text":"

    If you need help or have questions:

    "},{"location":"#license","title":"License","text":"

    MIT License - see the LICENSE file.

    "},{"location":"api-reference/","title":"API Reference","text":""},{"location":"api-reference/#llamactl-api-10","title":"llamactl API 1.0","text":"

    llamactl is a control server for managing Llama Server instances.

    License: MIT License"},{"location":"api-reference/#backends","title":"Backends","text":""},{"location":"api-reference/#get-apiv1backendsllama-cppdevices","title":"GET /api/v1/backends/llama-cpp/devices","text":"

    List available devices for llama server

    Description

    Returns a list of available devices for the llama server

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No

    Response 200 OK

    Response 500 Internal Server Error

    "},{"location":"api-reference/#get-apiv1backendsllama-cpphelp","title":"GET /api/v1/backends/llama-cpp/help","text":"

    Get help for llama server

    Description

    Returns the help text for the llama server command

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No

    Response 200 OK

    Response 500 Internal Server Error

    "},{"location":"api-reference/#post-apiv1backendsllama-cppparse-command","title":"POST /api/v1/backends/llama-cpp/parse-command","text":"

    Parse llama-server command

    Description

    Parses a llama-server command string into instance options

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No request body None No Command to parse

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#get-apiv1backendsllama-cppversion","title":"GET /api/v1/backends/llama-cpp/version","text":"

    Get version of llama server

    Description

    Returns the version of the llama server command

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No

    Response 200 OK

    Response 500 Internal Server Error

    "},{"location":"api-reference/#post-apiv1backendsmlxparse-command","title":"POST /api/v1/backends/mlx/parse-command","text":"

    Parse mlx_lm.server command

    Description

    Parses MLX-LM server command string into instance options

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No request body None No Command to parse

    Response 200 OK

    Response 400 Bad Request

    "},{"location":"api-reference/#post-apiv1backendsvllmparse-command","title":"POST /api/v1/backends/vllm/parse-command","text":"

    Parse vllm serve command

    Description

    Parses a vLLM serve command string into instance options

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No request body None No Command to parse

    Response 200 OK

    Response 400 Bad Request

    "},{"location":"api-reference/#system","title":"System","text":""},{"location":"api-reference/#get-apiv1config","title":"GET /api/v1/config","text":"

    Get server configuration

    Description

    Returns the current server configuration (sanitized)

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No

    Response 200 OK

    Response 500 Internal Server Error

    "},{"location":"api-reference/#get-apiv1version","title":"GET /api/v1/version","text":"

    Get llamactl version

    Description

    Returns the version of the llamactl command

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No

    Response 200 OK

    Response 500 Internal Server Error

    "},{"location":"api-reference/#instances","title":"Instances","text":""},{"location":"api-reference/#get-apiv1instances","title":"GET /api/v1/instances","text":"

    List all instances

    Description

    Returns a list of all instances managed by the server

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No

    Response 200 OK

    Response 500 Internal Server Error

    "},{"location":"api-reference/#delete-apiv1instancesname","title":"DELETE /api/v1/instances/{name}","text":"

    Delete an instance

    Description

    Stops and removes a specific instance by name

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 204 No Content

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#get-apiv1instancesname","title":"GET /api/v1/instances/{name}","text":"

    Get details of a specific instance

    Description

    Returns the details of a specific instance by name

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#post-apiv1instancesname","title":"POST /api/v1/instances/{name}","text":"

    Create and start a new instance

    Description

    Creates a new instance with the provided configuration options

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name options body None No Instance configuration options

    Response 201 Created

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#put-apiv1instancesname","title":"PUT /api/v1/instances/{name}","text":"

    Update an instance's configuration

    Description

    Updates the configuration of a specific instance by name

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name options body None No Instance configuration options

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#get-apiv1instancesnamelogs","title":"GET /api/v1/instances/{name}/logs","text":"

    Get logs from a specific instance

    Description

    Returns the logs from a specific instance by name with optional line limit

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No lines query None No Number of lines to retrieve (default: all lines) name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#get-apiv1instancesnameproxy","title":"GET /api/v1/instances/{name}/proxy","text":"

    Proxy requests to a specific instance, does not autostart instance if stopped

    Description

    Forwards HTTP requests to the llama-server instance running on a specific port

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    Response 503 Service Unavailable

    "},{"location":"api-reference/#post-apiv1instancesnameproxy","title":"POST /api/v1/instances/{name}/proxy","text":"

    Proxy requests to a specific instance, does not autostart instance if stopped

    Description

    Forwards HTTP requests to the llama-server instance running on a specific port

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    Response 503 Service Unavailable

    "},{"location":"api-reference/#post-apiv1instancesnamerestart","title":"POST /api/v1/instances/{name}/restart","text":"

    Restart a running instance

    Description

    Restarts a specific instance by name

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#post-apiv1instancesnamestart","title":"POST /api/v1/instances/{name}/start","text":"

    Start a stopped instance

    Description

    Starts a specific instance by name

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#post-apiv1instancesnamestop","title":"POST /api/v1/instances/{name}/stop","text":"

    Stop a running instance

    Description

    Stops a specific instance by name

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#nodes","title":"Nodes","text":""},{"location":"api-reference/#get-apiv1nodes","title":"GET /api/v1/nodes","text":"

    List all configured nodes

    Description

    Returns a map of all nodes configured in the server (node name -> node config)

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No

    Response 200 OK

    Response 500 Internal Server Error

    "},{"location":"api-reference/#get-apiv1nodesname","title":"GET /api/v1/nodes/{name}","text":"

    Get details of a specific node

    Description

    Returns the details of a specific node by name

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Node Name

    Response 200 OK

    Response 400 Bad Request

    Response 404 Not Found

    Response 500 Internal Server Error

    "},{"location":"api-reference/#llamacpp","title":"Llama.cpp","text":""},{"location":"api-reference/#get-llama-cppname","title":"GET /llama-cpp/{name}/","text":"

    Proxy requests to llama.cpp UI for the instance

    Description

    Proxies requests to the llama.cpp UI for the specified instance

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name query None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#post-llama-cppnameapply-template","title":"POST /llama-cpp/{name}/apply-template","text":"

    Proxy requests to llama.cpp server instance

    Description

    Proxies requests to the specified llama.cpp server instance, starting it on- demand if configured

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#post-llama-cppnamecompletion","title":"POST /llama-cpp/{name}/completion","text":"

    Proxy requests to llama.cpp server instance

    Description

    Proxies requests to the specified llama.cpp server instance, starting it on- demand if configured

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#post-llama-cppnamedetokenize","title":"POST /llama-cpp/{name}/detokenize","text":"

    Proxy requests to llama.cpp server instance

    Description

    Proxies requests to the specified llama.cpp server instance, starting it on- demand if configured

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#post-llama-cppnameembeddings","title":"POST /llama-cpp/{name}/embeddings","text":"

    Proxy requests to llama.cpp server instance

    Description

    Proxies requests to the specified llama.cpp server instance, starting it on- demand if configured

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#post-llama-cppnameinfill","title":"POST /llama-cpp/{name}/infill","text":"

    Proxy requests to llama.cpp server instance

    Description

    Proxies requests to the specified llama.cpp server instance, starting it on- demand if configured

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#post-llama-cppnamemetrics","title":"POST /llama-cpp/{name}/metrics","text":"

    Proxy requests to llama.cpp server instance

    Description

    Proxies requests to the specified llama.cpp server instance, starting it on- demand if configured

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#get-llama-cppnameprops","title":"GET /llama-cpp/{name}/props","text":"

    Proxy requests to llama.cpp server instance

    Description

    Proxies requests to the specified llama.cpp server instance, starting it on- demand if configured

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#post-llama-cppnameprops","title":"POST /llama-cpp/{name}/props","text":"

    Proxy requests to llama.cpp server instance

    Description

    Proxies requests to the specified llama.cpp server instance, starting it on- demand if configured

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#post-llama-cppnamereranking","title":"POST /llama-cpp/{name}/reranking","text":"

    Proxy requests to llama.cpp server instance

    Description

    Proxies requests to the specified llama.cpp server instance, starting it on- demand if configured

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#get-llama-cppnameslots","title":"GET /llama-cpp/{name}/slots","text":"

    Proxy requests to llama.cpp server instance

    Description

    Proxies requests to the specified llama.cpp server instance, starting it on- demand if configured

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#post-llama-cppnametokenize","title":"POST /llama-cpp/{name}/tokenize","text":"

    Proxy requests to llama.cpp server instance

    Description

    Proxies requests to the specified llama.cpp server instance, starting it on- demand if configured

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No name path None No Instance Name

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#openai","title":"OpenAI","text":""},{"location":"api-reference/#post-v1","title":"POST /v1/","text":"

    OpenAI-compatible proxy endpoint

    Description

    Handles all POST requests to /v1/*, routing to the appropriate instance based on the request body. Requires API key authentication via the Authorization header.

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No

    Response 200 OK

    Response 400 Bad Request

    Response 500 Internal Server Error

    "},{"location":"api-reference/#get-v1models","title":"GET /v1/models","text":"

    List instances in OpenAI-compatible format

    Description

    Returns a list of instances in a format compatible with OpenAI API

    Input parameters

    Parameter In Type Default Nullable Description ApiKeyAuth header string N/A No

    Response 200 OK

    Response 500 Internal Server Error

    "},{"location":"configuration/","title":"Configuration","text":"

    llamactl can be configured via configuration files or environment variables. Configuration is loaded in the following order of precedence:

    Defaults < Configuration file < Environment variables\n

    llamactl works out of the box with sensible defaults, but you can customize the behavior to suit your needs.

    "},{"location":"configuration/#default-configuration","title":"Default Configuration","text":"

    Here's the default configuration with all available options:

    server:\n  host: \"0.0.0.0\"                # Server host to bind to\n  port: 8080                     # Server port to bind to\n  allowed_origins: [\"*\"]         # Allowed CORS origins (default: all)\n  allowed_headers: [\"*\"]         # Allowed CORS headers (default: all)\n  enable_swagger: false          # Enable Swagger UI for API docs\n\nbackends:\n  llama-cpp:\n    command: \"llama-server\"\n    args: []\n    environment: {}              # Environment variables for the backend process\n    docker:\n      enabled: false\n      image: \"ghcr.io/ggml-org/llama.cpp:server\"\n      args: [\"run\", \"--rm\", \"--network\", \"host\", \"--gpus\", \"all\"]\n      environment: {}\n    response_headers: {}         # Additional response headers to send with responses\n\n  vllm:\n    command: \"vllm\"\n    args: [\"serve\"]\n    environment: {}              # Environment variables for the backend process\n    docker:\n      enabled: false\n      image: \"vllm/vllm-openai:latest\"\n      args: [\"run\", \"--rm\", \"--network\", \"host\", \"--gpus\", \"all\", \"--shm-size\", \"1g\"]\n      environment: {}\n    response_headers: {}         # Additional response headers to send with responses\n\n  mlx:\n    command: \"mlx_lm.server\"\n    args: []\n    environment: {}              # Environment variables for the backend process\n    response_headers: {}         # Additional response headers to send with responses\n\ninstances:\n  port_range: [8000, 9000]       # Port range for instances\n  data_dir: ~/.local/share/llamactl         # Data directory (platform-specific, see below)\n  configs_dir: ~/.local/share/llamactl/instances  # Instance configs directory\n  logs_dir: ~/.local/share/llamactl/logs    # Logs directory\n  auto_create_dirs: true         # Auto-create data/config/logs dirs if missing\n  max_instances: -1              # Max instances (-1 = unlimited)\n  max_running_instances: -1      # Max running instances (-1 = unlimited)\n  enable_lru_eviction: true      # Enable LRU eviction for idle instances\n  default_auto_restart: true     # Auto-restart new instances by default\n  default_max_restarts: 3        # Max restarts for new instances\n  default_restart_delay: 5       # Restart delay (seconds) for new instances\n  default_on_demand_start: true  # Default on-demand start setting\n  on_demand_start_timeout: 120   # Default on-demand start timeout in seconds\n  timeout_check_interval: 5      # Idle instance timeout check in minutes\n\nauth:\n  require_inference_auth: true   # Require auth for inference endpoints\n  inference_keys: []             # Keys for inference endpoints\n  require_management_auth: true  # Require auth for management endpoints\n  management_keys: []            # Keys for management endpoints\n\nlocal_node: \"main\"               # Name of the local node (default: \"main\")\nnodes:                           # Node configuration for multi-node deployment\n  main:                          # Default local node (empty config)\n
    "},{"location":"configuration/#configuration-files","title":"Configuration Files","text":""},{"location":"configuration/#configuration-file-locations","title":"Configuration File Locations","text":"

    Configuration files are searched in the following locations (in order of precedence, first found is used):

    Linux: - ./llamactl.yaml or ./config.yaml (current directory) - $HOME/.config/llamactl/config.yaml - /etc/llamactl/config.yaml

    macOS: - ./llamactl.yaml or ./config.yaml (current directory) - $HOME/Library/Application Support/llamactl/config.yaml - /Library/Application Support/llamactl/config.yaml

    Windows: - ./llamactl.yaml or ./config.yaml (current directory) - %APPDATA%\\llamactl\\config.yaml - %USERPROFILE%\\llamactl\\config.yaml - %PROGRAMDATA%\\llamactl\\config.yaml

    You can specify the path to config file with LLAMACTL_CONFIG_PATH environment variable.

    "},{"location":"configuration/#configuration-options","title":"Configuration Options","text":""},{"location":"configuration/#server-configuration","title":"Server Configuration","text":"
    server:\n  host: \"0.0.0.0\"         # Server host to bind to (default: \"0.0.0.0\")\n  port: 8080              # Server port to bind to (default: 8080)\n  allowed_origins: [\"*\"]  # CORS allowed origins (default: [\"*\"])\n  allowed_headers: [\"*\"]  # CORS allowed headers (default: [\"*\"])\n  enable_swagger: false   # Enable Swagger UI (default: false)\n

    Environment Variables: - LLAMACTL_HOST - Server host - LLAMACTL_PORT - Server port - LLAMACTL_ALLOWED_ORIGINS - Comma-separated CORS origins - LLAMACTL_ENABLE_SWAGGER - Enable Swagger UI (true/false)

    "},{"location":"configuration/#backend-configuration","title":"Backend Configuration","text":"
    backends:\n  llama-cpp:\n    command: \"llama-server\"\n    args: []\n    environment: {}              # Environment variables for the backend process\n    docker:\n      enabled: false             # Enable Docker runtime (default: false)\n      image: \"ghcr.io/ggml-org/llama.cpp:server\"\n      args: [\"run\", \"--rm\", \"--network\", \"host\", \"--gpus\", \"all\"]\n      environment: {}\n    response_headers: {}         # Additional response headers to send with responses\n\n  vllm:\n    command: \"vllm\"\n    args: [\"serve\"]\n    environment: {}              # Environment variables for the backend process\n    docker:\n      enabled: false             # Enable Docker runtime (default: false)\n      image: \"vllm/vllm-openai:latest\"\n      args: [\"run\", \"--rm\", \"--network\", \"host\", \"--gpus\", \"all\", \"--shm-size\", \"1g\"]\n      environment: {}\n    response_headers: {}         # Additional response headers to send with responses\n\n  mlx:\n    command: \"mlx_lm.server\"\n    args: []\n    environment: {}              # Environment variables for the backend process\n    # MLX does not support Docker\n    response_headers: {}         # Additional response headers to send with responses\n

    Backend Configuration Fields: - command: Executable name/path for the backend - args: Default arguments prepended to all instances - environment: Environment variables for the backend process (optional) - response_headers: Additional response headers to send with responses (optional) - docker: Docker-specific configuration (optional) - enabled: Boolean flag to enable Docker runtime - image: Docker image to use - args: Additional arguments passed to docker run - environment: Environment variables for the container (optional)

    If llamactl is behind an NGINX proxy, X-Accel-Buffering: no response header may be required for NGINX to properly stream the responses without buffering.

    Environment Variables:

    LlamaCpp Backend: - LLAMACTL_LLAMACPP_COMMAND - LlamaCpp executable command - LLAMACTL_LLAMACPP_ARGS - Space-separated default arguments - LLAMACTL_LLAMACPP_ENV - Environment variables in format \"KEY1=value1,KEY2=value2\" - LLAMACTL_LLAMACPP_DOCKER_ENABLED - Enable Docker runtime (true/false) - LLAMACTL_LLAMACPP_DOCKER_IMAGE - Docker image to use - LLAMACTL_LLAMACPP_DOCKER_ARGS - Space-separated Docker arguments - LLAMACTL_LLAMACPP_DOCKER_ENV - Docker environment variables in format \"KEY1=value1,KEY2=value2\" - LLAMACTL_LLAMACPP_RESPONSE_HEADERS - Response headers in format \"KEY1=value1;KEY2=value2\"

    VLLM Backend: - LLAMACTL_VLLM_COMMAND - VLLM executable command - LLAMACTL_VLLM_ARGS - Space-separated default arguments - LLAMACTL_VLLM_ENV - Environment variables in format \"KEY1=value1,KEY2=value2\" - LLAMACTL_VLLM_DOCKER_ENABLED - Enable Docker runtime (true/false) - LLAMACTL_VLLM_DOCKER_IMAGE - Docker image to use - LLAMACTL_VLLM_DOCKER_ARGS - Space-separated Docker arguments - LLAMACTL_VLLM_DOCKER_ENV - Docker environment variables in format \"KEY1=value1,KEY2=value2\" - LLAMACTL_VLLM_RESPONSE_HEADERS - Response headers in format \"KEY1=value1;KEY2=value2\"

    MLX Backend: - LLAMACTL_MLX_COMMAND - MLX executable command - LLAMACTL_MLX_ARGS - Space-separated default arguments - LLAMACTL_MLX_ENV - Environment variables in format \"KEY1=value1,KEY2=value2\" - LLAMACTL_MLX_RESPONSE_HEADERS - Response headers in format \"KEY1=value1;KEY2=value2\"

    "},{"location":"configuration/#instance-configuration","title":"Instance Configuration","text":"
    instances:\n  port_range: [8000, 9000]                          # Port range for instances (default: [8000, 9000])\n  data_dir: \"~/.local/share/llamactl\"               # Directory for all llamactl data (default varies by OS)\n  configs_dir: \"~/.local/share/llamactl/instances\"  # Directory for instance configs (default: data_dir/instances)\n  logs_dir: \"~/.local/share/llamactl/logs\"          # Directory for instance logs (default: data_dir/logs)\n  auto_create_dirs: true                            # Automatically create data/config/logs directories (default: true)\n  max_instances: -1                                 # Maximum instances (-1 = unlimited)\n  max_running_instances: -1                         # Maximum running instances (-1 = unlimited)\n  enable_lru_eviction: true                         # Enable LRU eviction for idle instances\n  default_auto_restart: true                        # Default auto-restart setting\n  default_max_restarts: 3                           # Default maximum restart attempts\n  default_restart_delay: 5                          # Default restart delay in seconds\n  default_on_demand_start: true                     # Default on-demand start setting\n  on_demand_start_timeout: 120                      # Default on-demand start timeout in seconds\n  timeout_check_interval: 5                         # Default instance timeout check interval in minutes\n

    Environment Variables: - LLAMACTL_INSTANCE_PORT_RANGE - Port range (format: \"8000-9000\" or \"8000,9000\") - LLAMACTL_DATA_DIRECTORY - Data directory path - LLAMACTL_INSTANCES_DIR - Instance configs directory path - LLAMACTL_LOGS_DIR - Log directory path - LLAMACTL_AUTO_CREATE_DATA_DIR - Auto-create data/config/logs directories (true/false) - LLAMACTL_MAX_INSTANCES - Maximum number of instances - LLAMACTL_MAX_RUNNING_INSTANCES - Maximum number of running instances - LLAMACTL_ENABLE_LRU_EVICTION - Enable LRU eviction for idle instances - LLAMACTL_DEFAULT_AUTO_RESTART - Default auto-restart setting (true/false) - LLAMACTL_DEFAULT_MAX_RESTARTS - Default maximum restarts - LLAMACTL_DEFAULT_RESTART_DELAY - Default restart delay in seconds - LLAMACTL_DEFAULT_ON_DEMAND_START - Default on-demand start setting (true/false) - LLAMACTL_ON_DEMAND_START_TIMEOUT - Default on-demand start timeout in seconds - LLAMACTL_TIMEOUT_CHECK_INTERVAL - Default instance timeout check interval in minutes

    "},{"location":"configuration/#authentication-configuration","title":"Authentication Configuration","text":"
    auth:\n  require_inference_auth: true           # Require API key for OpenAI endpoints (default: true)\n  inference_keys: []                     # List of valid inference API keys\n  require_management_auth: true          # Require API key for management endpoints (default: true)\n  management_keys: []                    # List of valid management API keys\n

    Environment Variables: - LLAMACTL_REQUIRE_INFERENCE_AUTH - Require auth for OpenAI endpoints (true/false) - LLAMACTL_INFERENCE_KEYS - Comma-separated inference API keys - LLAMACTL_REQUIRE_MANAGEMENT_AUTH - Require auth for management endpoints (true/false) - LLAMACTL_MANAGEMENT_KEYS - Comma-separated management API keys

    "},{"location":"configuration/#remote-node-configuration","title":"Remote Node Configuration","text":"

    llamactl supports remote node deployments. Configure remote nodes to deploy instances on remote hosts and manage them centrally.

    local_node: \"main\"               # Name of the local node (default: \"main\")\nnodes:                           # Node configuration map\n  main:                          # Local node (empty address means local)\n    address: \"\"                  # Not used for local node\n    api_key: \"\"                  # Not used for local node\n  worker1:                       # Remote worker node\n    address: \"http://192.168.1.10:8080\"\n    api_key: \"worker1-api-key\"   # Management API key for authentication\n

    Node Configuration Fields: - local_node: Specifies which node in the nodes map represents the local node. Must match exactly what other nodes call this node. - nodes: Map of node configurations - address: HTTP/HTTPS URL of the remote node (empty for local node) - api_key: Management API key for authenticating with the remote node

    Environment Variables: - LLAMACTL_LOCAL_NODE - Name of the local node

    "},{"location":"installation/","title":"Installation","text":"

    This guide will walk you through installing Llamactl on your system.

    "},{"location":"installation/#prerequisites","title":"Prerequisites","text":""},{"location":"installation/#backend-dependencies","title":"Backend Dependencies","text":"

    llamactl supports multiple backends. Install at least one:

    For llama.cpp backend (all platforms):

    You need llama-server from llama.cpp installed:

    # Homebrew (macOS/Linux)\nbrew install llama.cpp\n# Winget (Windows)\nwinget install llama.cpp\n

    Or build from source - see llama.cpp docs

    For MLX backend (macOS only):

    MLX provides optimized inference on Apple Silicon. Install MLX-LM:

    # Install via pip (requires Python 3.8+)\npip install mlx-lm\n\n# Or in a virtual environment (recommended)\npython -m venv mlx-env\nsource mlx-env/bin/activate\npip install mlx-lm\n

    Note: MLX backend is only available on macOS with Apple Silicon (M1, M2, M3, etc.)

    For vLLM backend:

    vLLM provides high-throughput distributed serving for LLMs. Install vLLM:

    # Install in a virtual environment\npython -m venv vllm-env\nsource vllm-env/bin/activate\npip install vllm\n
    "},{"location":"installation/#installation-methods","title":"Installation Methods","text":""},{"location":"installation/#option-1-download-binary-recommended","title":"Option 1: Download Binary (Recommended)","text":"

    Download the latest release from the GitHub releases page:

    # Linux/macOS - Get latest version and download\nLATEST_VERSION=$(curl -s https://api.github.com/repos/lordmathis/llamactl/releases/latest | grep '\"tag_name\":' | sed -E 's/.*\"([^\"]+)\".*/\\1/')\ncurl -L https://github.com/lordmathis/llamactl/releases/download/${LATEST_VERSION}/llamactl-${LATEST_VERSION}-$(uname -s | tr '[:upper:]' '[:lower:]')-$(uname -m).tar.gz | tar -xz\nsudo mv llamactl /usr/local/bin/\n\n# Or download manually from:\n# https://github.com/lordmathis/llamactl/releases/latest\n\n# Windows - Download from releases page\n
    "},{"location":"installation/#option-2-docker","title":"Option 2: Docker","text":"

    llamactl provides Dockerfiles for creating Docker images with backends pre-installed. The resulting images include the latest llamactl release with the respective backend.

    Available Dockerfiles (CUDA): - llamactl with llama.cpp CUDA: docker/Dockerfile.llamacpp (based on ghcr.io/ggml-org/llama.cpp:server-cuda) - llamactl with vLLM CUDA: docker/Dockerfile.vllm (based on vllm/vllm-openai:latest) - llamactl built from source: docker/Dockerfile.source (multi-stage build with webui)

    Note: These Dockerfiles are configured for CUDA. For other platforms (CPU, ROCm, Vulkan, etc.), adapt the base image. For llama.cpp, see available tags at llama.cpp Docker docs. For vLLM, check vLLM docs.

    Using Docker Compose

    # Clone the repository\ngit clone https://github.com/lordmathis/llamactl.git\ncd llamactl\n\n# Create directories for data and models\nmkdir -p data/llamacpp data/vllm models\n\n# Start llamactl with llama.cpp backend\ndocker-compose -f docker/docker-compose.yml up llamactl-llamacpp -d\n\n# Or start llamactl with vLLM backend\ndocker-compose -f docker/docker-compose.yml up llamactl-vllm -d\n

    Access the dashboard at: - llamactl with llama.cpp: http://localhost:8080 - llamactl with vLLM: http://localhost:8081

    Using Docker Build and Run

    1. llamactl with llama.cpp CUDA:

      docker build -f docker/Dockerfile.llamacpp -t llamactl:llamacpp-cuda .\ndocker run -d \\\n  --name llamactl-llamacpp \\\n  --gpus all \\\n  -p 8080:8080 \\\n  -v ~/.cache/llama.cpp:/root/.cache/llama.cpp \\\n  llamactl:llamacpp-cuda\n

    2. llamactl with vLLM CUDA:

      docker build -f docker/Dockerfile.vllm -t llamactl:vllm-cuda .\ndocker run -d \\\n  --name llamactl-vllm \\\n  --gpus all \\\n  -p 8080:8080 \\\n  -v ~/.cache/huggingface:/root/.cache/huggingface \\\n  llamactl:vllm-cuda\n

    3. llamactl built from source:

      docker build -f docker/Dockerfile.source -t llamactl:source .\ndocker run -d \\\n  --name llamactl \\\n  -p 8080:8080 \\\n  llamactl:source\n

    "},{"location":"installation/#option-3-build-from-source","title":"Option 3: Build from Source","text":"

    Requirements: - Go 1.24 or later - Node.js 22 or later - Git

    If you prefer to build from source:

    # Clone the repository\ngit clone https://github.com/lordmathis/llamactl.git\ncd llamactl\n\n# Build the web UI\ncd webui && npm ci && npm run build && cd ..\n\n# Build the application\ngo build -o llamactl ./cmd/server\n
    "},{"location":"installation/#remote-node-installation","title":"Remote Node Installation","text":"

    For deployments with remote nodes: - Install llamactl on each node using any of the methods above - Configure API keys for authentication between nodes - Ensure node names are consistent across all configurations

    "},{"location":"installation/#verification","title":"Verification","text":"

    Verify your installation by checking the version:

    llamactl --version\n
    "},{"location":"installation/#next-steps","title":"Next Steps","text":"

    Now that Llamactl is installed, continue to the Quick Start guide to get your first instance running!

    For remote node deployments, see the Configuration Guide for node setup instructions.

    "},{"location":"managing-instances/","title":"Managing Instances","text":"

    Learn how to effectively manage your llama.cpp, MLX, and vLLM instances with Llamactl through both the Web UI and API.

    "},{"location":"managing-instances/#overview","title":"Overview","text":"

    Llamactl provides two ways to manage instances:

    "},{"location":"managing-instances/#authentication","title":"Authentication","text":"

    Llamactl uses a Management API Key to authenticate requests to the management API (creating, starting, stopping instances). All curl examples below use <token> as a placeholder - replace this with your actual Management API Key.

    By default, authentication is required. If you don't configure a management API key in your configuration file, llamactl will auto-generate one and print it to the terminal on startup. See the Configuration guide for details.

    For Web UI access: 1. Navigate to the web UI 2. Enter your Management API Key 3. Bearer token is stored for the session

    "},{"location":"managing-instances/#theme-support","title":"Theme Support","text":""},{"location":"managing-instances/#instance-cards","title":"Instance Cards","text":"

    Each instance is displayed as a card showing:

    "},{"location":"managing-instances/#create-instance","title":"Create Instance","text":"

    Via Web UI

    1. Click the \"Create Instance\" button on the dashboard
    2. Optional: Click \"Import\" to load a previously exported configuration

    Instance Settings:

    1. Enter a unique Instance Name (required)
    2. Select Node: Choose which node to deploy the instance to
    3. Configure Auto Restart settings:
      • Enable automatic restart on failure
      • Set max restarts and delay between attempts
    4. Configure basic instance options:
      • Idle Timeout: Minutes before stopping idle instance
      • On Demand Start: Start instance only when needed

    Backend Configuration:

    1. Select Backend Type:
      • Llama Server: For GGUF models using llama-server
      • MLX LM: For MLX-optimized models (macOS only)
      • vLLM: For distributed serving and high-throughput inference
    2. Optional: Click \"Parse Command\" to import settings from an existing backend command
    3. Configure Execution Context:
      • Enable Docker: Run backend in Docker container
      • Command Override: Custom path to backend executable
      • Environment Variables: Custom environment variables

    Auto-Assignment

    Llamactl automatically assigns ports from the configured port range (default: 8000-9000) and generates API keys if authentication is enabled. You typically don't need to manually specify these values.

    1. Configure Basic Backend Options (varies by backend):
      • llama.cpp: Model path, threads, context size, GPU layers, etc.
      • MLX: Model identifier, temperature, max tokens, etc.
      • vLLM: Model identifier, tensor parallel size, GPU memory utilization, etc.
    2. Optional: Expand Advanced Backend Options for additional settings
    3. Optional: Add Extra Args as key-value pairs for custom command-line arguments
    4. Click \"Create\" to save the instance

    Via API

    # Create llama.cpp instance with local model file\ncurl -X POST http://localhost:8080/api/v1/instances/my-llama-instance \\\n  -H \"Content-Type: application/json\" \\\n  -H \"Authorization: Bearer <token>\" \\\n  -d '{\n    \"backend_type\": \"llama_cpp\",\n    \"backend_options\": {\n      \"model\": \"/path/to/model.gguf\",\n      \"threads\": 8,\n      \"ctx_size\": 4096,\n      \"gpu_layers\": 32,\n      \"flash_attn\": \"on\"\n    },\n    \"auto_restart\": true,\n    \"max_restarts\": 3,\n    \"docker_enabled\": false,\n    \"command_override\": \"/opt/llama-server-dev\",\n    \"nodes\": [\"main\"]\n  }'\n\n# Create vLLM instance with environment variables\ncurl -X POST http://localhost:8080/api/v1/instances/my-vllm-instance \\\n  -H \"Content-Type: application/json\" \\\n  -H \"Authorization: Bearer <token>\" \\\n  -d '{\n    \"backend_type\": \"vllm\",\n    \"backend_options\": {\n      \"model\": \"microsoft/DialoGPT-medium\",\n      \"tensor_parallel_size\": 2,\n      \"gpu_memory_utilization\": 0.9\n    },\n    \"on_demand_start\": true,\n    \"environment\": {\n      \"CUDA_VISIBLE_DEVICES\": \"0,1\"\n    },\n    \"nodes\": [\"worker1\", \"worker2\"]\n  }'\n\n# Create MLX instance (macOS only)\ncurl -X POST http://localhost:8080/api/v1/instances/my-mlx-instance \\\n  -H \"Content-Type: application/json\" \\\n  -H \"Authorization: Bearer <token>\" \\\n  -d '{\n    \"backend_type\": \"mlx_lm\",\n    \"backend_options\": {\n      \"model\": \"mlx-community/Mistral-7B-Instruct-v0.3-4bit\",\n      \"temp\": 0.7,\n      \"max_tokens\": 2048\n    },\n    \"nodes\": [\"main\"]\n  }'\n
    "},{"location":"managing-instances/#start-instance","title":"Start Instance","text":"

    Via Web UI 1. Click the \"Start\" button on an instance card 2. Watch the status change to \"Unknown\" 3. Monitor progress in the logs 4. Instance status changes to \"Ready\" when ready

    Via API

    curl -X POST http://localhost:8080/api/v1/instances/{name}/start \\\n  -H \"Authorization: Bearer <token>\"\n

    "},{"location":"managing-instances/#stop-instance","title":"Stop Instance","text":"

    Via Web UI 1. Click the \"Stop\" button on an instance card 2. Instance gracefully shuts down

    Via API

    curl -X POST http://localhost:8080/api/v1/instances/{name}/stop \\\n  -H \"Authorization: Bearer <token>\"\n

    "},{"location":"managing-instances/#edit-instance","title":"Edit Instance","text":"

    Via Web UI 1. Click the \"Edit\" button on an instance card 2. Modify settings in the configuration dialog 3. Changes require instance restart to take effect 4. Click \"Update & Restart\" to apply changes

    Via API Modify instance settings:

    curl -X PUT http://localhost:8080/api/v1/instances/{name} \\\n  -H \"Content-Type: application/json\" \\\n  -H \"Authorization: Bearer <token>\" \\\n  -d '{\n    \"backend_options\": {\n      \"threads\": 8,\n      \"context_size\": 4096\n    }\n  }'\n

    Note

    Configuration changes require restarting the instance to take effect.

    "},{"location":"managing-instances/#export-instance","title":"Export Instance","text":"

    Via Web UI 1. Click the \"More actions\" button (three dots) on an instance card 2. Click \"Export\" to download the instance configuration as a JSON file

    "},{"location":"managing-instances/#view-logs","title":"View Logs","text":"

    Via Web UI

    1. Click the \"Logs\" button on any instance card
    2. Real-time log viewer opens

    Via API Check instance status in real-time:

    # Get instance logs\ncurl http://localhost:8080/api/v1/instances/{name}/logs \\\n  -H \"Authorization: Bearer <token>\"\n
    "},{"location":"managing-instances/#delete-instance","title":"Delete Instance","text":"

    Via Web UI 1. Click the \"Delete\" button on an instance card 2. Only stopped instances can be deleted 3. Confirm deletion in the dialog

    Via API

    curl -X DELETE http://localhost:8080/api/v1/instances/{name} \\\n  -H \"Authorization: Bearer <token>\"\n

    "},{"location":"managing-instances/#instance-proxy","title":"Instance Proxy","text":"

    Llamactl proxies all requests to the underlying backend instances (llama-server, MLX, or vLLM).

    # Proxy requests to the instance\ncurl http://localhost:8080/api/v1/instances/{name}/proxy/ \\\n  -H \"Authorization: Bearer <token>\"\n

    All backends provide OpenAI-compatible endpoints. Check the respective documentation: - llama-server docs - MLX-LM docs - vLLM docs

    "},{"location":"managing-instances/#instance-health","title":"Instance Health","text":"

    Via Web UI

    1. The health status badge is displayed on each instance card

    Via API

    Check the health status of your instances:

    curl http://localhost:8080/api/v1/instances/{name}/proxy/health \\\n  -H \"Authorization: Bearer <token>\"\n
    "},{"location":"quick-start/","title":"Quick Start","text":"

    This guide will help you get Llamactl up and running in just a few minutes.

    Before you begin: Ensure you have at least one backend installed (llama.cpp, MLX, or vLLM). See the Installation Guide for backend setup.

    "},{"location":"quick-start/#core-concepts","title":"Core Concepts","text":"

    Before you start, let's clarify a few key terms:

    "},{"location":"quick-start/#authentication","title":"Authentication","text":"

    Llamactl uses two types of API keys:

    By default, authentication is required. If you don't configure these keys in your configuration file, llamactl will auto-generate them and print them to the terminal on startup. You can also configure custom keys or disable authentication entirely in the Configuration guide.

    "},{"location":"quick-start/#start-llamactl","title":"Start Llamactl","text":"

    Start the Llamactl server:

    llamactl\n
    \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\n\u26a0\ufe0f  MANAGEMENT AUTHENTICATION REQUIRED\n\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\n\ud83d\udd11  Generated Management API Key:\n\n    sk-management-...\n\n\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\n\u26a0\ufe0f  INFERENCE AUTHENTICATION REQUIRED\n\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\n\ud83d\udd11  Generated Inference API Key:\n\n    sk-inference-...\n\n\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\n\u26a0\ufe0f  IMPORTANT\n\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\n\u2022 These keys are auto-generated and will change on restart\n\u2022 For production, add explicit keys to your configuration\n\u2022 Copy these keys before they disappear from the terminal\n\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\nLlamactl server listening on 0.0.0.0:8080\n

    Copy the Management and Inference API Keys from the terminal - you'll need them to access the web UI and make inference requests.

    By default, Llamactl will start on http://localhost:8080.

    "},{"location":"quick-start/#access-the-web-ui","title":"Access the Web UI","text":"

    Open your web browser and navigate to:

    http://localhost:8080\n

    Login with the management API key from the terminal output.

    You should see the Llamactl web interface.

    "},{"location":"quick-start/#create-your-first-instance","title":"Create Your First Instance","text":"
    1. Click the \"Add Instance\" button
    2. Fill in the instance configuration:

      • Name: Give your instance a descriptive name
      • Node: Select which node to deploy the instance to (defaults to \"main\" for single-node setups)
      • Backend Type: Choose from llama.cpp, MLX, or vLLM
      • Model: Model path or huggingface repo
      • Additional Options: Backend-specific parameters

      Auto-Assignment

      Llamactl automatically assigns ports from the configured port range (default: 8000-9000) and generates API keys if authentication is enabled. You typically don't need to manually specify these values.

      Remote Node Deployment

      If you have configured remote nodes in your configuration file, you can select which node to deploy the instance to. This allows you to distribute instances across multiple machines. See the Configuration guide for details on setting up remote nodes.

    3. Click \"Create Instance\"

    "},{"location":"quick-start/#start-your-instance","title":"Start Your Instance","text":"

    Once created, you can:

    "},{"location":"quick-start/#example-configurations","title":"Example Configurations","text":"

    Here are basic example configurations for each backend:

    llama.cpp backend:

    {\n  \"name\": \"llama2-7b\",\n  \"backend_type\": \"llama_cpp\",\n  \"backend_options\": {\n    \"model\": \"/path/to/llama-2-7b-chat.gguf\",\n    \"threads\": 4,\n    \"ctx_size\": 2048,\n    \"gpu_layers\": 32\n  },\n  \"nodes\": [\"main\"]\n}\n

    MLX backend (macOS only):

    {\n  \"name\": \"mistral-mlx\",\n  \"backend_type\": \"mlx_lm\",\n  \"backend_options\": {\n    \"model\": \"mlx-community/Mistral-7B-Instruct-v0.3-4bit\",\n    \"temp\": 0.7,\n    \"max_tokens\": 2048\n  },\n  \"nodes\": [\"main\"]\n}\n

    vLLM backend:

    {\n  \"name\": \"dialogpt-vllm\",\n  \"backend_type\": \"vllm\",\n  \"backend_options\": {\n    \"model\": \"microsoft/DialoGPT-medium\",\n    \"tensor_parallel_size\": 2,\n    \"gpu_memory_utilization\": 0.9\n  },\n  \"nodes\": [\"main\"]\n}\n

    Remote node deployment example:

    {\n  \"name\": \"distributed-model\",\n  \"backend_type\": \"llama_cpp\",\n  \"backend_options\": {\n    \"model\": \"/path/to/model.gguf\",\n    \"gpu_layers\": 32\n  },\n  \"nodes\": [\"worker1\"]\n}\n

    "},{"location":"quick-start/#docker-support","title":"Docker Support","text":"

    Llamactl can run backends in Docker containers. To enable Docker for a backend, add a docker section to that backend in your YAML configuration file (e.g. config.yaml) as shown below:

    backends:\n  vllm:\n    command: \"vllm\"\n    args: [\"serve\"]\n    docker:\n      enabled: true\n      image: \"vllm/vllm-openai:latest\"\n      args: [\"run\", \"--rm\", \"--network\", \"host\", \"--gpus\", \"all\", \"--shm-size\", \"1g\"]\n
    "},{"location":"quick-start/#using-the-api","title":"Using the API","text":"

    You can also manage instances via the REST API:

    # List all instances\ncurl http://localhost:8080/api/v1/instances\n\n# Create a new llama.cpp instance\ncurl -X POST http://localhost:8080/api/v1/instances/my-model \\\n  -H \"Content-Type: application/json\" \\\n  -d '{\n    \"backend_type\": \"llama_cpp\",\n    \"backend_options\": {\n      \"model\": \"/path/to/model.gguf\"\n    }\n  }'\n\n# Start an instance\ncurl -X POST http://localhost:8080/api/v1/instances/my-model/start\n
    "},{"location":"quick-start/#openai-compatible-api","title":"OpenAI Compatible API","text":"

    Llamactl provides OpenAI-compatible endpoints, making it easy to integrate with existing OpenAI client libraries and tools.

    "},{"location":"quick-start/#chat-completions","title":"Chat Completions","text":"

    Once you have an instance running, you can use it with the OpenAI-compatible chat completions endpoint:

    curl -X POST http://localhost:8080/v1/chat/completions \\\n  -H \"Content-Type: application/json\" \\\n  -d '{\n    \"model\": \"my-model\",\n    \"messages\": [\n      {\n        \"role\": \"user\",\n        \"content\": \"Hello! Can you help me write a Python function?\"\n      }\n    ],\n    \"max_tokens\": 150,\n    \"temperature\": 0.7\n  }'\n
    "},{"location":"quick-start/#using-with-python-openai-client","title":"Using with Python OpenAI Client","text":"

    You can also use the official OpenAI Python client:

    from openai import OpenAI\n\n# Point the client to your Llamactl server\nclient = OpenAI(\n    base_url=\"http://localhost:8080/v1\",\n    api_key=\"your-inference-api-key\"  # Use the inference API key from terminal or config\n)\n\n# Create a chat completion\nresponse = client.chat.completions.create(\n    model=\"my-model\",  # Use the name of your instance\n    messages=[\n        {\"role\": \"user\", \"content\": \"Explain quantum computing in simple terms\"}\n    ],\n    max_tokens=200,\n    temperature=0.7\n)\n\nprint(response.choices[0].message.content)\n

    API Key

    If you disabled authentication in your config, you can use any value for api_key (e.g., \"not-needed\"). Otherwise, use the inference API key shown in the terminal output on startup.

    "},{"location":"quick-start/#list-available-models","title":"List Available Models","text":"

    Get a list of running instances (models) in OpenAI-compatible format:

    curl http://localhost:8080/v1/models\n
    "},{"location":"quick-start/#next-steps","title":"Next Steps","text":""},{"location":"troubleshooting/","title":"Troubleshooting","text":"

    Issues specific to Llamactl deployment and operation.

    "},{"location":"troubleshooting/#configuration-issues","title":"Configuration Issues","text":""},{"location":"troubleshooting/#invalid-configuration","title":"Invalid Configuration","text":"

    Problem: Invalid configuration preventing startup

    Solutions: 1. Use minimal configuration:

    server:\n  host: \"0.0.0.0\"\n  port: 8080\ninstances:\n  port_range: [8000, 9000]\n

    1. Check data directory permissions:
      # Ensure data directory is writable (default: ~/.local/share/llamactl)\nmkdir -p ~/.local/share/llamactl/{instances,logs}\n
    "},{"location":"troubleshooting/#instance-management-issues","title":"Instance Management Issues","text":""},{"location":"troubleshooting/#instance-fails-to-start","title":"Instance Fails to Start","text":"

    Problem: Instance fails to start or immediately stops

    Solutions:

    1. Check instance logs to see the actual error:

      curl http://localhost:8080/api/v1/instances/{name}/logs\n# Or check log files directly\ntail -f ~/.local/share/llamactl/logs/{instance-name}.log\n

    2. Verify backend is installed:

      • llama.cpp: Ensure llama-server is in PATH
      • MLX: Ensure mlx-lm Python package is installed
      • vLLM: Ensure vllm Python package is installed
    3. Check model path and format:

      • Use absolute paths to model files
      • Verify model format matches backend (GGUF for llama.cpp, etc.)
    4. Verify backend command configuration:

      • Check that the backend command is correctly configured in the global config
      • For virtual environments, specify the full path to the command (e.g., /path/to/venv/bin/mlx_lm.server)
      • See the Configuration Guide for backend configuration details
      • Test the backend directly (see Backend-Specific Issues below)
    "},{"location":"troubleshooting/#backend-specific-issues","title":"Backend-Specific Issues","text":"

    Problem: Model loading, memory, GPU, or performance issues

    Most model-specific issues (memory, GPU configuration, performance tuning) are backend-specific and should be resolved by consulting the respective backend documentation:

    llama.cpp: - llama.cpp GitHub - llama-server README

    MLX: - MLX-LM GitHub - MLX-LM Server Guide

    vLLM: - vLLM Documentation - OpenAI Compatible Server - vllm serve Command

    Testing backends directly:

    Testing your model and configuration directly with the backend helps determine if the issue is with llamactl or the backend itself:

    # llama.cpp\nllama-server --model /path/to/model.gguf --port 8081\n\n# MLX\nmlx_lm.server --model mlx-community/Mistral-7B-Instruct-v0.3-4bit --port 8081\n\n# vLLM\nvllm serve microsoft/DialoGPT-medium --port 8081\n
    "},{"location":"troubleshooting/#api-and-network-issues","title":"API and Network Issues","text":""},{"location":"troubleshooting/#cors-errors","title":"CORS Errors","text":"

    Problem: Web UI shows CORS errors in browser console

    Solutions: 1. Configure allowed origins:

    server:\n  allowed_origins:\n    - \"http://localhost:3000\"\n    - \"https://yourdomain.com\"\n

    "},{"location":"troubleshooting/#authentication-issues","title":"Authentication Issues","text":"

    Problem: API requests failing with authentication errors

    Solutions: 1. Disable authentication temporarily:

    auth:\n  require_management_auth: false\n  require_inference_auth: false\n

    1. Configure API keys:

      auth:\n  management_keys:\n    - \"your-management-key\"\n  inference_keys:\n    - \"your-inference-key\"\n

    2. Use correct Authorization header:

      curl -H \"Authorization: Bearer your-api-key\" \\\n  http://localhost:8080/api/v1/instances\n

    "},{"location":"troubleshooting/#remote-node-issues","title":"Remote Node Issues","text":""},{"location":"troubleshooting/#node-configuration","title":"Node Configuration","text":"

    Problem: Remote instances not appearing or cannot be managed

    Solutions: 1. Verify node configuration:

    local_node: \"main\"  # Must match a key in nodes map\nnodes:\n  main:\n    address: \"\"     # Empty for local node\n  worker1:\n    address: \"http://worker1.internal:8080\"\n    api_key: \"secure-key\"  # Must match worker1's management key\n

    1. Check node name consistency:
    2. local_node on each node must match what other nodes call it
    3. Node names are case-sensitive

    4. Test remote node connectivity:

      curl -H \"Authorization: Bearer remote-node-key\" \\\n  http://remote-node:8080/api/v1/instances\n

    "},{"location":"troubleshooting/#debugging-and-logs","title":"Debugging and Logs","text":""},{"location":"troubleshooting/#viewing-instance-logs","title":"Viewing Instance Logs","text":"
    # Get instance logs via API\ncurl http://localhost:8080/api/v1/instances/{name}/logs\n\n# Or check log files directly\ntail -f ~/.local/share/llamactl/logs/{instance-name}.log\n
    "},{"location":"troubleshooting/#enable-debug-logging","title":"Enable Debug Logging","text":"
    export LLAMACTL_LOG_LEVEL=debug\nllamactl\n
    "},{"location":"troubleshooting/#getting-help","title":"Getting Help","text":"

    When reporting issues, include:

    1. System information:

      llamactl --version\n

    2. Configuration file (remove sensitive keys)

    3. Relevant log output

    4. Steps to reproduce the issue

    "}]} \ No newline at end of file diff --git a/dev/sitemap.xml b/dev/sitemap.xml index b10731d..2a369ca 100644 --- a/dev/sitemap.xml +++ b/dev/sitemap.xml @@ -2,30 +2,30 @@ https://llamactl.org/dev/ - 2025-11-13 + 2025-11-15 https://llamactl.org/dev/api-reference/ - 2025-11-13 + 2025-11-15 https://llamactl.org/dev/configuration/ - 2025-11-13 + 2025-11-15 https://llamactl.org/dev/installation/ - 2025-11-13 + 2025-11-15 https://llamactl.org/dev/managing-instances/ - 2025-11-13 + 2025-11-15 https://llamactl.org/dev/quick-start/ - 2025-11-13 + 2025-11-15 https://llamactl.org/dev/troubleshooting/ - 2025-11-13 + 2025-11-15 \ No newline at end of file diff --git a/dev/sitemap.xml.gz b/dev/sitemap.xml.gz index 4fedb38fd9f6a553f0f2f053b6601074412c7fe4..02d968a3ad224e9ff89629ea079ffb1d7ad01009 100644 GIT binary patch literal 257 zcmV+c0sj6UiwFpS!WU@*|8r?{Wo=<_E_iKh0KJnzZo?oDMfW*{#U5iviquv%+4ThN z0VpOmDliT+j(hx$ov7-zT`(Kq^Yx!*1l{>7hU(@rdPu!!>qb;ASr~j8d-4AEDxbtz zAG#$6bX2i9VJ{}4`9-O9U2F8@qM5OVJSwykA5c9+)zr^IA1bab;J(|d3t;t@f*ccq ziN;dcBL;Ubb*<461T$lu@~F^Gd_Z*u*)*-y#bOa-Zw)AHc~r<900|37(O`0wd&M4dDi Hs{;T4v!;XZ diff --git a/dev/swagger.json b/dev/swagger.json index 26f9662..f79a008 100644 --- a/dev/swagger.json +++ b/dev/swagger.json @@ -249,6 +249,34 @@ } } }, + "/api/v1/config": { + "get": { + "security": [ + { + "ApiKeyAuth": [] + } + ], + "description": "Returns the current server configuration (sanitized)", + "tags": [ + "System" + ], + "summary": "Get server configuration", + "responses": { + "200": { + "description": "Sanitized configuration", + "schema": { + "$ref": "#/definitions/config.AppConfig" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "type": "string" + } + } + } + } + }, "/api/v1/instances": { "get": { "security": [ @@ -1468,6 +1496,247 @@ } }, "definitions": { + "config.AppConfig": { + "type": "object", + "properties": { + "auth": { + "$ref": "#/definitions/config.AuthConfig" + }, + "backends": { + "$ref": "#/definitions/config.BackendConfig" + }, + "build_time": { + "type": "string" + }, + "commit_hash": { + "type": "string" + }, + "instances": { + "$ref": "#/definitions/config.InstancesConfig" + }, + "local_node": { + "type": "string" + }, + "nodes": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/config.NodeConfig" + } + }, + "server": { + "$ref": "#/definitions/config.ServerConfig" + }, + "version": { + "type": "string" + } + } + }, + "config.AuthConfig": { + "type": "object", + "properties": { + "inference_keys": { + "description": "List of keys for OpenAI compatible inference endpoints", + "type": "array", + "items": { + "type": "string" + } + }, + "management_keys": { + "description": "List of keys for management endpoints", + "type": "array", + "items": { + "type": "string" + } + }, + "require_inference_auth": { + "description": "Require authentication for OpenAI compatible inference endpoints", + "type": "boolean" + }, + "require_management_auth": { + "description": "Require authentication for management endpoints", + "type": "boolean" + } + } + }, + "config.BackendConfig": { + "type": "object", + "properties": { + "llama-cpp": { + "$ref": "#/definitions/config.BackendSettings" + }, + "mlx": { + "$ref": "#/definitions/config.BackendSettings" + }, + "vllm": { + "$ref": "#/definitions/config.BackendSettings" + } + } + }, + "config.BackendSettings": { + "type": "object", + "properties": { + "args": { + "type": "array", + "items": { + "type": "string" + } + }, + "command": { + "type": "string" + }, + "docker": { + "$ref": "#/definitions/config.DockerSettings" + }, + "environment": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "response_headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "config.DockerSettings": { + "type": "object", + "properties": { + "args": { + "type": "array", + "items": { + "type": "string" + } + }, + "enabled": { + "type": "boolean" + }, + "environment": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "image": { + "type": "string" + } + } + }, + "config.InstancesConfig": { + "type": "object", + "properties": { + "auto_create_dirs": { + "description": "Automatically create the data directory if it doesn't exist", + "type": "boolean" + }, + "configs_dir": { + "description": "Instance config directory override", + "type": "string" + }, + "data_dir": { + "description": "Directory where all llamactl data will be stored (instances.json, logs, etc.)", + "type": "string" + }, + "default_auto_restart": { + "description": "Default auto-restart setting for new instances", + "type": "boolean" + }, + "default_max_restarts": { + "description": "Default max restarts for new instances", + "type": "integer" + }, + "default_on_demand_start": { + "description": "Default on-demand start setting for new instances", + "type": "boolean" + }, + "default_restart_delay": { + "description": "Default restart delay for new instances (in seconds)", + "type": "integer" + }, + "enable_lru_eviction": { + "description": "Enable LRU eviction for instance logs", + "type": "boolean" + }, + "logs_dir": { + "description": "Logs directory override", + "type": "string" + }, + "max_instances": { + "description": "Maximum number of instances that can be created", + "type": "integer" + }, + "max_running_instances": { + "description": "Maximum number of instances that can be running at the same time", + "type": "integer" + }, + "on_demand_start_timeout": { + "description": "How long to wait for an instance to start on demand (in seconds)", + "type": "integer" + }, + "port_range": { + "description": "Port range for instances (e.g., 8000,9000)", + "type": "array", + "items": { + "type": "integer" + } + }, + "timeout_check_interval": { + "description": "Interval for checking instance timeouts (in minutes)", + "type": "integer" + } + } + }, + "config.NodeConfig": { + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "api_key": { + "type": "string" + } + } + }, + "config.ServerConfig": { + "type": "object", + "properties": { + "allowed_headers": { + "description": "Allowed headers for CORS (e.g., \"Accept\", \"Authorization\", \"Content-Type\", \"X-CSRF-Token\")", + "type": "array", + "items": { + "type": "string" + } + }, + "allowed_origins": { + "description": "Allowed origins for CORS (e.g., \"http://localhost:3000\")", + "type": "array", + "items": { + "type": "string" + } + }, + "enable_swagger": { + "description": "Enable Swagger UI for API documentation", + "type": "boolean" + }, + "host": { + "description": "Server host to bind to", + "type": "string" + }, + "port": { + "description": "Server port to bind to", + "type": "integer" + }, + "response_headers": { + "description": "Response headers to send with responses", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, "instance.Instance": { "type": "object", "properties": { @@ -1487,6 +1756,13 @@ "description": "Auto restart", "type": "boolean" }, + "command_override": { + "type": "string" + }, + "docker_enabled": { + "description": "Execution context overrides", + "type": "boolean" + }, "environment": { "description": "Environment variables", "type": "object", diff --git a/dev/swagger.yaml b/dev/swagger.yaml index 7506036..2888ce1 100644 --- a/dev/swagger.yaml +++ b/dev/swagger.yaml @@ -1,5 +1,173 @@ basePath: /api/v1 definitions: + config.AppConfig: + properties: + auth: + $ref: '#/definitions/config.AuthConfig' + backends: + $ref: '#/definitions/config.BackendConfig' + build_time: + type: string + commit_hash: + type: string + instances: + $ref: '#/definitions/config.InstancesConfig' + local_node: + type: string + nodes: + additionalProperties: + $ref: '#/definitions/config.NodeConfig' + type: object + server: + $ref: '#/definitions/config.ServerConfig' + version: + type: string + type: object + config.AuthConfig: + properties: + inference_keys: + description: List of keys for OpenAI compatible inference endpoints + items: + type: string + type: array + management_keys: + description: List of keys for management endpoints + items: + type: string + type: array + require_inference_auth: + description: Require authentication for OpenAI compatible inference endpoints + type: boolean + require_management_auth: + description: Require authentication for management endpoints + type: boolean + type: object + config.BackendConfig: + properties: + llama-cpp: + $ref: '#/definitions/config.BackendSettings' + mlx: + $ref: '#/definitions/config.BackendSettings' + vllm: + $ref: '#/definitions/config.BackendSettings' + type: object + config.BackendSettings: + properties: + args: + items: + type: string + type: array + command: + type: string + docker: + $ref: '#/definitions/config.DockerSettings' + environment: + additionalProperties: + type: string + type: object + response_headers: + additionalProperties: + type: string + type: object + type: object + config.DockerSettings: + properties: + args: + items: + type: string + type: array + enabled: + type: boolean + environment: + additionalProperties: + type: string + type: object + image: + type: string + type: object + config.InstancesConfig: + properties: + auto_create_dirs: + description: Automatically create the data directory if it doesn't exist + type: boolean + configs_dir: + description: Instance config directory override + type: string + data_dir: + description: Directory where all llamactl data will be stored (instances.json, + logs, etc.) + type: string + default_auto_restart: + description: Default auto-restart setting for new instances + type: boolean + default_max_restarts: + description: Default max restarts for new instances + type: integer + default_on_demand_start: + description: Default on-demand start setting for new instances + type: boolean + default_restart_delay: + description: Default restart delay for new instances (in seconds) + type: integer + enable_lru_eviction: + description: Enable LRU eviction for instance logs + type: boolean + logs_dir: + description: Logs directory override + type: string + max_instances: + description: Maximum number of instances that can be created + type: integer + max_running_instances: + description: Maximum number of instances that can be running at the same time + type: integer + on_demand_start_timeout: + description: How long to wait for an instance to start on demand (in seconds) + type: integer + port_range: + description: Port range for instances (e.g., 8000,9000) + items: + type: integer + type: array + timeout_check_interval: + description: Interval for checking instance timeouts (in minutes) + type: integer + type: object + config.NodeConfig: + properties: + address: + type: string + api_key: + type: string + type: object + config.ServerConfig: + properties: + allowed_headers: + description: Allowed headers for CORS (e.g., "Accept", "Authorization", "Content-Type", + "X-CSRF-Token") + items: + type: string + type: array + allowed_origins: + description: Allowed origins for CORS (e.g., "http://localhost:3000") + items: + type: string + type: array + enable_swagger: + description: Enable Swagger UI for API documentation + type: boolean + host: + description: Server host to bind to + type: string + port: + description: Server port to bind to + type: integer + response_headers: + additionalProperties: + type: string + description: Response headers to send with responses + type: object + type: object instance.Instance: properties: created: @@ -13,6 +181,11 @@ definitions: auto_restart: description: Auto restart type: boolean + command_override: + type: string + docker_enabled: + description: Execution context overrides + type: boolean environment: additionalProperties: type: string @@ -216,6 +389,23 @@ paths: summary: Parse vllm serve command tags: - Backends + /api/v1/config: + get: + description: Returns the current server configuration (sanitized) + responses: + "200": + description: Sanitized configuration + schema: + $ref: '#/definitions/config.AppConfig' + "500": + description: Internal Server Error + schema: + type: string + security: + - ApiKeyAuth: [] + summary: Get server configuration + tags: + - System /api/v1/instances: get: description: Returns a list of all instances managed by the server