mirror of
https://github.com/lordmathis/llamactl.git
synced 2025-12-23 01:24:24 +00:00
Compare commits
20 Commits
c734329a62
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 70dc635e4c | |||
|
|
e0d342f31f | ||
| 9cea295305 | |||
| 1f78d3f780 | |||
| e7baeb9ece | |||
| 3cec850e74 | |||
| 67098d7801 | |||
| 3c95e76137 | |||
| 761cdfe7d8 | |||
| 99eba3daa9 | |||
| d9d7b6d814 | |||
| 5062c882de | |||
| ee122d669c | |||
| 41d904475c | |||
| 7f5292412c | |||
| ec84a7d331 | |||
| b45219a01e | |||
| 463bb561e1 | |||
| ebdb9143c0 | |||
| 4269d04381 |
36
.github/workflows/release.yaml
vendored
36
.github/workflows/release.yaml
vendored
@@ -45,15 +45,23 @@ jobs:
|
||||
build:
|
||||
name: Build Binaries
|
||||
needs: build-webui
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ${{ matrix.runner }}
|
||||
strategy:
|
||||
matrix:
|
||||
goos: [linux, windows, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
exclude:
|
||||
# Windows ARM64 support is limited
|
||||
- goos: windows
|
||||
include:
|
||||
- goos: linux
|
||||
goarch: amd64
|
||||
runner: ubuntu-latest
|
||||
- goos: linux
|
||||
goarch: arm64
|
||||
runner: ubuntu-latest
|
||||
cc: aarch64-linux-gnu-gcc
|
||||
- goos: darwin
|
||||
goarch: arm64
|
||||
runner: macos-latest
|
||||
- goos: windows
|
||||
goarch: amd64
|
||||
runner: windows-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
@@ -70,11 +78,19 @@ jobs:
|
||||
name: webui-dist
|
||||
path: webui/dist/
|
||||
|
||||
- name: Install cross-compilation tools (Linux ARM64 only)
|
||||
if: matrix.cc != ''
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc-aarch64-linux-gnu
|
||||
|
||||
- name: Build binary
|
||||
env:
|
||||
GOOS: ${{ matrix.goos }}
|
||||
GOARCH: ${{ matrix.goarch }}
|
||||
CGO_ENABLED: 0
|
||||
CGO_ENABLED: 1
|
||||
CC: ${{ matrix.cc }}
|
||||
shell: bash
|
||||
run: |
|
||||
# Set binary extension for Windows
|
||||
BINARY_NAME="llamactl"
|
||||
@@ -91,8 +107,10 @@ jobs:
|
||||
ARCHIVE_OS="macos"
|
||||
fi
|
||||
ARCHIVE_NAME="llamactl-${{ github.ref_name }}-${ARCHIVE_OS}-${{ matrix.goarch }}"
|
||||
|
||||
if [ "${{ matrix.goos }}" = "windows" ]; then
|
||||
zip "${ARCHIVE_NAME}.zip" "${BINARY_NAME}"
|
||||
# Use 7z on Windows (pre-installed)
|
||||
7z a "${ARCHIVE_NAME}.zip" "${BINARY_NAME}"
|
||||
echo "ASSET_PATH=${ARCHIVE_NAME}.zip" >> $GITHUB_ENV
|
||||
else
|
||||
tar -czf "${ARCHIVE_NAME}.tar.gz" "${BINARY_NAME}"
|
||||
@@ -179,4 +197,4 @@ jobs:
|
||||
with:
|
||||
files: assets/checksums.txt
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
|
||||
**🚀 Easy Model Management**
|
||||
- **Multiple Models Simultaneously**: Run different models at the same time (7B for speed, 70B for quality)
|
||||
- **Dynamic Multi-Model Instances**: llama.cpp router mode - serve multiple models from a single instance with on-demand loading
|
||||
- **Smart Resource Management**: Automatic idle timeout, LRU eviction, and configurable instance limits
|
||||
- **Web Dashboard**: Modern React UI for managing instances, monitoring health, and viewing logs
|
||||
|
||||
@@ -183,7 +184,6 @@ data_dir: ~/.local/share/llamactl # Main data directory (database, instances, l
|
||||
|
||||
instances:
|
||||
port_range: [8000, 9000] # Port range for instances
|
||||
configs_dir: ~/.local/share/llamactl/instances # Instance configs directory (platform dependent) [deprecated]
|
||||
logs_dir: ~/.local/share/llamactl/logs # Logs directory (platform dependent)
|
||||
auto_create_dirs: true # Auto-create data/config/logs dirs if missing
|
||||
max_instances: -1 # Max instances (-1 = unlimited)
|
||||
|
||||
@@ -57,11 +57,6 @@ func main() {
|
||||
log.Printf("Error creating data directory %s: %v\nData persistence may not be available.", cfg.DataDir, err)
|
||||
}
|
||||
|
||||
// Create instances directory
|
||||
if err := os.MkdirAll(cfg.Instances.InstancesDir, 0755); err != nil {
|
||||
log.Printf("Error creating instances directory %s: %v\nPersistence will not be available.", cfg.Instances.InstancesDir, err)
|
||||
}
|
||||
|
||||
// Create logs directory
|
||||
if err := os.MkdirAll(cfg.Instances.LogsDir, 0755); err != nil {
|
||||
log.Printf("Error creating log directory %s: %v\nInstance logs will not be available.", cfg.Instances.LogsDir, err)
|
||||
@@ -84,11 +79,6 @@ func main() {
|
||||
log.Fatalf("Failed to run database migrations: %v", err)
|
||||
}
|
||||
|
||||
// Migrate from JSON files if needed (one-time migration)
|
||||
if err := migrateFromJSON(&cfg, db); err != nil {
|
||||
log.Printf("Warning: Failed to migrate from JSON: %v", err)
|
||||
}
|
||||
|
||||
// Initialize the instance manager with dependency injection
|
||||
instanceManager := manager.New(&cfg, db)
|
||||
|
||||
|
||||
@@ -1,87 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"llamactl/pkg/config"
|
||||
"llamactl/pkg/database"
|
||||
"llamactl/pkg/instance"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// migrateFromJSON migrates instances from JSON files to SQLite database
|
||||
// This is a one-time migration that runs on first startup with existing JSON files.
|
||||
// Migrated files are moved to a migrated subdirectory to avoid re-importing.
|
||||
func migrateFromJSON(cfg *config.AppConfig, db database.InstanceStore) error {
|
||||
instancesDir := cfg.Instances.InstancesDir
|
||||
if instancesDir == "" {
|
||||
return nil // No instances directory configured
|
||||
}
|
||||
|
||||
// Check if instances directory exists
|
||||
if _, err := os.Stat(instancesDir); os.IsNotExist(err) {
|
||||
return nil // No instances directory, nothing to migrate
|
||||
}
|
||||
|
||||
// Find all JSON files
|
||||
files, err := filepath.Glob(filepath.Join(instancesDir, "*.json"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list instance files: %w", err)
|
||||
}
|
||||
|
||||
if len(files) == 0 {
|
||||
return nil // No JSON files to migrate
|
||||
}
|
||||
|
||||
log.Printf("Migrating %d instances from JSON to SQLite...", len(files))
|
||||
|
||||
// Create migrated directory
|
||||
migratedDir := filepath.Join(instancesDir, "migrated")
|
||||
if err := os.MkdirAll(migratedDir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create migrated directory: %w", err)
|
||||
}
|
||||
|
||||
// Migrate each JSON file
|
||||
var migrated int
|
||||
for _, file := range files {
|
||||
if err := migrateJSONFile(file, db); err != nil {
|
||||
log.Printf("Failed to migrate %s: %v", file, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Move the file to the migrated directory
|
||||
destPath := filepath.Join(migratedDir, filepath.Base(file))
|
||||
if err := os.Rename(file, destPath); err != nil {
|
||||
log.Printf("Warning: Failed to move %s to migrated directory: %v", file, err)
|
||||
// Don't fail the migration if we can't move the file
|
||||
}
|
||||
|
||||
migrated++
|
||||
}
|
||||
|
||||
log.Printf("Successfully migrated %d/%d instances to SQLite", migrated, len(files))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// migrateJSONFile migrates a single JSON file to the database
|
||||
func migrateJSONFile(filename string, db database.InstanceStore) error {
|
||||
data, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read file: %w", err)
|
||||
}
|
||||
|
||||
var inst instance.Instance
|
||||
if err := json.Unmarshal(data, &inst); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal instance: %w", err)
|
||||
}
|
||||
|
||||
if err := db.Save(&inst); err != nil {
|
||||
return fmt.Errorf("failed to save instance to database: %w", err)
|
||||
}
|
||||
|
||||
log.Printf("Migrated instance %s from JSON to SQLite", inst.Name)
|
||||
return nil
|
||||
}
|
||||
176
docs/docs.go
176
docs/docs.go
@@ -999,6 +999,156 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/llama-cpp/{name}/models": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
"ApiKeyAuth": []
|
||||
}
|
||||
],
|
||||
"description": "Returns a list of models available in the specified llama.cpp instance",
|
||||
"tags": [
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "List models in a llama.cpp instance",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Instance Name",
|
||||
"name": "name",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Models list response",
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"additionalProperties": true
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Invalid instance",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"500": {
|
||||
"description": "Internal Server Error",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/llama-cpp/{name}/models/{model}/load": {
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
"ApiKeyAuth": []
|
||||
}
|
||||
],
|
||||
"description": "Loads the specified model in the given llama.cpp instance",
|
||||
"tags": [
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Load a model in a llama.cpp instance",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Instance Name",
|
||||
"name": "name",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Model Name",
|
||||
"name": "model",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Success message",
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Invalid request",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"500": {
|
||||
"description": "Internal Server Error",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/llama-cpp/{name}/models/{model}/unload": {
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
"ApiKeyAuth": []
|
||||
}
|
||||
],
|
||||
"description": "Unloads the specified model in the given llama.cpp instance",
|
||||
"tags": [
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Unload a model in a llama.cpp instance",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Instance Name",
|
||||
"name": "name",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Model Name",
|
||||
"name": "model",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Success message",
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Invalid request",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"500": {
|
||||
"description": "Internal Server Error",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/nodes": {
|
||||
"get": {
|
||||
"security": [
|
||||
@@ -1788,13 +1938,6 @@ const docTemplate = `{
|
||||
"config.AuthConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"inference_keys": {
|
||||
"description": "List of keys for OpenAI compatible inference endpoints",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"management_keys": {
|
||||
"description": "List of keys for management endpoints",
|
||||
"type": "array",
|
||||
@@ -1905,10 +2048,6 @@ const docTemplate = `{
|
||||
"description": "Automatically create the data directory if it doesn't exist",
|
||||
"type": "boolean"
|
||||
},
|
||||
"configs_dir": {
|
||||
"description": "Instance config directory override (relative to data_dir if not absolute)",
|
||||
"type": "string"
|
||||
},
|
||||
"default_auto_restart": {
|
||||
"description": "Default auto-restart setting for new instances",
|
||||
"type": "boolean"
|
||||
@@ -1929,6 +2068,21 @@ const docTemplate = `{
|
||||
"description": "Enable LRU eviction for instance logs",
|
||||
"type": "boolean"
|
||||
},
|
||||
"logRotationCompress": {
|
||||
"description": "Whether to compress rotated log files",
|
||||
"type": "boolean",
|
||||
"default": false
|
||||
},
|
||||
"logRotationEnabled": {
|
||||
"description": "Log rotation enabled",
|
||||
"type": "boolean",
|
||||
"default": true
|
||||
},
|
||||
"logRotationMaxSize": {
|
||||
"description": "Maximum log file size in MB before rotation",
|
||||
"type": "integer",
|
||||
"default": 100
|
||||
},
|
||||
"logs_dir": {
|
||||
"description": "Logs directory override (relative to data_dir if not absolute)",
|
||||
"type": "string"
|
||||
|
||||
@@ -222,6 +222,100 @@ curl -X DELETE http://localhost:8080/api/v1/instances/{name} \
|
||||
-H "Authorization: Bearer <token>"
|
||||
```
|
||||
|
||||
## Multi-Model llama.cpp Instances
|
||||
|
||||
!!! info "llama.cpp Router Mode"
|
||||
llama.cpp instances support [**router mode**](https://huggingface.co/blog/ggml-org/model-management-in-llamacpp), allowing a single instance to serve multiple models dynamically. Models are loaded on-demand from the llama.cpp cache without restarting the instance.
|
||||
|
||||
### Creating a Multi-Model Instance
|
||||
|
||||
**Via Web UI**
|
||||
|
||||
1. Click **"Create Instance"**
|
||||
2. Select **Backend Type**: "Llama Server"
|
||||
3. Leave **Backend Options** empty `{}` or omit the model field
|
||||
4. Create the instance
|
||||
|
||||
**Via API**
|
||||
|
||||
```bash
|
||||
# Create instance without specifying a model (router mode)
|
||||
curl -X POST http://localhost:8080/api/v1/instances/my-router \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer <token>" \
|
||||
-d '{
|
||||
"backend_type": "llama_cpp",
|
||||
"backend_options": {},
|
||||
"nodes": ["main"]
|
||||
}'
|
||||
```
|
||||
|
||||
### Managing Models
|
||||
|
||||
**Via Web UI**
|
||||
|
||||
1. Start the router mode instance
|
||||
2. Instance card displays a badge showing loaded/total models (e.g., "2/5 models")
|
||||
3. Click the **"Models"** button on the instance card
|
||||
4. Models dialog opens showing:
|
||||
- All available models from llama.cpp instance
|
||||
- Status indicator (loaded, loading, or unloaded)
|
||||
- Load/Unload buttons for each model
|
||||
5. Click **"Load"** to load a model into memory
|
||||
6. Click **"Unload"** to free up memory
|
||||
|
||||
**Via API**
|
||||
|
||||
```bash
|
||||
# List available models
|
||||
curl http://localhost:8080/api/v1/llama-cpp/my-router/models \
|
||||
-H "Authorization: Bearer <token>"
|
||||
|
||||
# Load a model
|
||||
curl -X POST http://localhost:8080/api/v1/llama-cpp/my-router/models/Mistral-7B-Instruct-v0.3.Q4_K_M.gguf/load \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer <token>" \
|
||||
-d '{"model": "Mistral-7B-Instruct-v0.3.Q4_K_M.gguf"}'
|
||||
|
||||
# Unload a model
|
||||
curl -X POST http://localhost:8080/api/v1/llama-cpp/my-router/models/Mistral-7B-Instruct-v0.3.Q4_K_M.gguf/unload \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer <token>" \
|
||||
-d '{"model": "Mistral-7B-Instruct-v0.3.Q4_K_M.gguf"}'
|
||||
```
|
||||
|
||||
### Using Multi-Model Instances
|
||||
|
||||
When making inference requests to a multi-model instance, specify the model using the format `instance_name/model_name`:
|
||||
|
||||
```bash
|
||||
# OpenAI-compatible chat completion with specific model
|
||||
curl -X POST http://localhost:8080/v1/chat/completions \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer <inference-key>" \
|
||||
-d '{
|
||||
"model": "my-router/Mistral-7B-Instruct-v0.3.Q4_K_M.gguf",
|
||||
"messages": [
|
||||
{"role": "user", "content": "Hello!"}
|
||||
]
|
||||
}'
|
||||
|
||||
# List all available models (includes multi-model instances)
|
||||
curl http://localhost:8080/v1/models \
|
||||
-H "Authorization: Bearer <inference-key>"
|
||||
```
|
||||
|
||||
The response from `/v1/models` will include each model from multi-model instances as separate entries in the format `instance_name/model_name`.
|
||||
|
||||
### Model Discovery
|
||||
|
||||
Models are automatically discovered from the llama.cpp cache directory. The default cache locations are:
|
||||
|
||||
- **Linux/macOS**: `~/.cache/llama.cpp/`
|
||||
- **Windows**: `%LOCALAPPDATA%\llama.cpp\`
|
||||
|
||||
Place your GGUF model files in the cache directory, and they will appear in the models list when you start a router mode instance.
|
||||
|
||||
## Instance Proxy
|
||||
|
||||
Llamactl proxies all requests to the underlying backend instances (llama-server, MLX, or vLLM).
|
||||
|
||||
@@ -992,6 +992,156 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/llama-cpp/{name}/models": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
"ApiKeyAuth": []
|
||||
}
|
||||
],
|
||||
"description": "Returns a list of models available in the specified llama.cpp instance",
|
||||
"tags": [
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "List models in a llama.cpp instance",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Instance Name",
|
||||
"name": "name",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Models list response",
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"additionalProperties": true
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Invalid instance",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"500": {
|
||||
"description": "Internal Server Error",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/llama-cpp/{name}/models/{model}/load": {
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
"ApiKeyAuth": []
|
||||
}
|
||||
],
|
||||
"description": "Loads the specified model in the given llama.cpp instance",
|
||||
"tags": [
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Load a model in a llama.cpp instance",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Instance Name",
|
||||
"name": "name",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Model Name",
|
||||
"name": "model",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Success message",
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Invalid request",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"500": {
|
||||
"description": "Internal Server Error",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/llama-cpp/{name}/models/{model}/unload": {
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
"ApiKeyAuth": []
|
||||
}
|
||||
],
|
||||
"description": "Unloads the specified model in the given llama.cpp instance",
|
||||
"tags": [
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Unload a model in a llama.cpp instance",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Instance Name",
|
||||
"name": "name",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Model Name",
|
||||
"name": "model",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Success message",
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Invalid request",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"500": {
|
||||
"description": "Internal Server Error",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/nodes": {
|
||||
"get": {
|
||||
"security": [
|
||||
@@ -1781,13 +1931,6 @@
|
||||
"config.AuthConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"inference_keys": {
|
||||
"description": "List of keys for OpenAI compatible inference endpoints",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"management_keys": {
|
||||
"description": "List of keys for management endpoints",
|
||||
"type": "array",
|
||||
@@ -1898,10 +2041,6 @@
|
||||
"description": "Automatically create the data directory if it doesn't exist",
|
||||
"type": "boolean"
|
||||
},
|
||||
"configs_dir": {
|
||||
"description": "Instance config directory override (relative to data_dir if not absolute)",
|
||||
"type": "string"
|
||||
},
|
||||
"default_auto_restart": {
|
||||
"description": "Default auto-restart setting for new instances",
|
||||
"type": "boolean"
|
||||
@@ -1922,6 +2061,21 @@
|
||||
"description": "Enable LRU eviction for instance logs",
|
||||
"type": "boolean"
|
||||
},
|
||||
"logRotationCompress": {
|
||||
"description": "Whether to compress rotated log files",
|
||||
"type": "boolean",
|
||||
"default": false
|
||||
},
|
||||
"logRotationEnabled": {
|
||||
"description": "Log rotation enabled",
|
||||
"type": "boolean",
|
||||
"default": true
|
||||
},
|
||||
"logRotationMaxSize": {
|
||||
"description": "Maximum log file size in MB before rotation",
|
||||
"type": "integer",
|
||||
"default": 100
|
||||
},
|
||||
"logs_dir": {
|
||||
"description": "Logs directory override (relative to data_dir if not absolute)",
|
||||
"type": "string"
|
||||
|
||||
@@ -39,11 +39,6 @@ definitions:
|
||||
type: object
|
||||
config.AuthConfig:
|
||||
properties:
|
||||
inference_keys:
|
||||
description: List of keys for OpenAI compatible inference endpoints
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
management_keys:
|
||||
description: List of keys for management endpoints
|
||||
items:
|
||||
@@ -118,10 +113,6 @@ definitions:
|
||||
auto_create_dirs:
|
||||
description: Automatically create the data directory if it doesn't exist
|
||||
type: boolean
|
||||
configs_dir:
|
||||
description: Instance config directory override (relative to data_dir if not
|
||||
absolute)
|
||||
type: string
|
||||
default_auto_restart:
|
||||
description: Default auto-restart setting for new instances
|
||||
type: boolean
|
||||
@@ -137,6 +128,18 @@ definitions:
|
||||
enable_lru_eviction:
|
||||
description: Enable LRU eviction for instance logs
|
||||
type: boolean
|
||||
logRotationCompress:
|
||||
default: false
|
||||
description: Whether to compress rotated log files
|
||||
type: boolean
|
||||
logRotationEnabled:
|
||||
default: true
|
||||
description: Log rotation enabled
|
||||
type: boolean
|
||||
logRotationMaxSize:
|
||||
default: 100
|
||||
description: Maximum log file size in MB before rotation
|
||||
type: integer
|
||||
logs_dir:
|
||||
description: Logs directory override (relative to data_dir if not absolute)
|
||||
type: string
|
||||
@@ -955,6 +958,102 @@ paths:
|
||||
summary: Stop a running instance
|
||||
tags:
|
||||
- Instances
|
||||
/api/v1/llama-cpp/{name}/models:
|
||||
get:
|
||||
description: Returns a list of models available in the specified llama.cpp instance
|
||||
parameters:
|
||||
- description: Instance Name
|
||||
in: path
|
||||
name: name
|
||||
required: true
|
||||
type: string
|
||||
responses:
|
||||
"200":
|
||||
description: Models list response
|
||||
schema:
|
||||
additionalProperties: true
|
||||
type: object
|
||||
"400":
|
||||
description: Invalid instance
|
||||
schema:
|
||||
type: string
|
||||
"500":
|
||||
description: Internal Server Error
|
||||
schema:
|
||||
type: string
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: List models in a llama.cpp instance
|
||||
tags:
|
||||
- Llama.cpp
|
||||
/api/v1/llama-cpp/{name}/models/{model}/load:
|
||||
post:
|
||||
description: Loads the specified model in the given llama.cpp instance
|
||||
parameters:
|
||||
- description: Instance Name
|
||||
in: path
|
||||
name: name
|
||||
required: true
|
||||
type: string
|
||||
- description: Model Name
|
||||
in: path
|
||||
name: model
|
||||
required: true
|
||||
type: string
|
||||
responses:
|
||||
"200":
|
||||
description: Success message
|
||||
schema:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
"400":
|
||||
description: Invalid request
|
||||
schema:
|
||||
type: string
|
||||
"500":
|
||||
description: Internal Server Error
|
||||
schema:
|
||||
type: string
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Load a model in a llama.cpp instance
|
||||
tags:
|
||||
- Llama.cpp
|
||||
/api/v1/llama-cpp/{name}/models/{model}/unload:
|
||||
post:
|
||||
description: Unloads the specified model in the given llama.cpp instance
|
||||
parameters:
|
||||
- description: Instance Name
|
||||
in: path
|
||||
name: name
|
||||
required: true
|
||||
type: string
|
||||
- description: Model Name
|
||||
in: path
|
||||
name: model
|
||||
required: true
|
||||
type: string
|
||||
responses:
|
||||
"200":
|
||||
description: Success message
|
||||
schema:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
"400":
|
||||
description: Invalid request
|
||||
schema:
|
||||
type: string
|
||||
"500":
|
||||
description: Internal Server Error
|
||||
schema:
|
||||
type: string
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Unload a model in a llama.cpp instance
|
||||
tags:
|
||||
- Llama.cpp
|
||||
/api/v1/nodes:
|
||||
get:
|
||||
description: Returns a map of all nodes configured in the server (node name
|
||||
|
||||
@@ -14,6 +14,7 @@ const (
|
||||
BackendTypeLlamaCpp BackendType = "llama_cpp"
|
||||
BackendTypeMlxLm BackendType = "mlx_lm"
|
||||
BackendTypeVllm BackendType = "vllm"
|
||||
BackendTypeUnknown BackendType = "unknown"
|
||||
)
|
||||
|
||||
type backend interface {
|
||||
@@ -55,13 +56,15 @@ func (o *Options) UnmarshalJSON(data []byte) error {
|
||||
}
|
||||
|
||||
// Create backend from constructor map
|
||||
if o.BackendOptions != nil {
|
||||
constructor, exists := backendConstructors[o.BackendType]
|
||||
if !exists {
|
||||
return fmt.Errorf("unsupported backend type: %s", o.BackendType)
|
||||
}
|
||||
constructor, exists := backendConstructors[o.BackendType]
|
||||
if !exists {
|
||||
return fmt.Errorf("unsupported backend type: %s", o.BackendType)
|
||||
}
|
||||
|
||||
backend := constructor()
|
||||
backend := constructor()
|
||||
|
||||
// If backend_options is provided, unmarshal into the backend
|
||||
if o.BackendOptions != nil {
|
||||
optionsData, err := json.Marshal(o.BackendOptions)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal backend options: %w", err)
|
||||
@@ -70,10 +73,11 @@ func (o *Options) UnmarshalJSON(data []byte) error {
|
||||
if err := json.Unmarshal(optionsData, backend); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal backend options: %w", err)
|
||||
}
|
||||
|
||||
// Store in the appropriate typed field for backward compatibility
|
||||
o.setBackendOptions(backend)
|
||||
}
|
||||
// If backend_options is nil or empty, backend remains as empty struct (for router mode)
|
||||
|
||||
// Store in the appropriate typed field
|
||||
o.setBackendOptions(backend)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -327,20 +327,30 @@ func (o *LlamaServerOptions) UnmarshalJSON(data []byte) error {
|
||||
}
|
||||
|
||||
func (o *LlamaServerOptions) GetPort() int {
|
||||
if o == nil {
|
||||
return 0
|
||||
}
|
||||
return o.Port
|
||||
}
|
||||
|
||||
func (o *LlamaServerOptions) SetPort(port int) {
|
||||
if o == nil {
|
||||
return
|
||||
}
|
||||
o.Port = port
|
||||
}
|
||||
|
||||
func (o *LlamaServerOptions) GetHost() string {
|
||||
if o == nil {
|
||||
return "localhost"
|
||||
}
|
||||
return o.Host
|
||||
}
|
||||
|
||||
func (o *LlamaServerOptions) Validate() error {
|
||||
// Allow nil options for router mode where llama.cpp manages models dynamically
|
||||
if o == nil {
|
||||
return validation.ValidationError(fmt.Errorf("llama server options cannot be nil for llama.cpp backend"))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Use reflection to check all string fields for injection patterns
|
||||
@@ -370,6 +380,9 @@ func (o *LlamaServerOptions) Validate() error {
|
||||
|
||||
// BuildCommandArgs converts InstanceOptions to command line arguments
|
||||
func (o *LlamaServerOptions) BuildCommandArgs() []string {
|
||||
if o == nil {
|
||||
return []string{}
|
||||
}
|
||||
// Llama uses multiple flags for arrays by default (not comma-separated)
|
||||
// Use package-level llamaMultiValuedFlags variable
|
||||
args := BuildCommandArgs(o, llamaMultiValuedFlags)
|
||||
@@ -381,6 +394,9 @@ func (o *LlamaServerOptions) BuildCommandArgs() []string {
|
||||
}
|
||||
|
||||
func (o *LlamaServerOptions) BuildDockerArgs() []string {
|
||||
if o == nil {
|
||||
return []string{}
|
||||
}
|
||||
// For llama, Docker args are the same as normal args
|
||||
return o.BuildCommandArgs()
|
||||
}
|
||||
|
||||
@@ -32,19 +32,7 @@ func LoadConfig(configPath string) (AppConfig, error) {
|
||||
// 3. Override with environment variables
|
||||
loadEnvVars(&cfg)
|
||||
|
||||
// Log warning if deprecated inference keys are present
|
||||
if len(cfg.Auth.InferenceKeys) > 0 {
|
||||
log.Println("⚠️ Config-based inference keys are no longer supported and will be ignored.")
|
||||
log.Println(" Please create inference keys in web UI or via management API.")
|
||||
}
|
||||
|
||||
// Set default directories if not specified
|
||||
if cfg.Instances.InstancesDir == "" {
|
||||
cfg.Instances.InstancesDir = filepath.Join(cfg.DataDir, "instances")
|
||||
} else {
|
||||
// Log deprecation warning if using custom instances dir
|
||||
log.Println("⚠️ Instances directory is deprecated and will be removed in future versions. Instances are persisted in the database.")
|
||||
}
|
||||
if cfg.Instances.LogsDir == "" {
|
||||
cfg.Instances.LogsDir = filepath.Join(cfg.DataDir, "logs")
|
||||
}
|
||||
@@ -101,7 +89,6 @@ func (cfg *AppConfig) SanitizedCopy() (AppConfig, error) {
|
||||
}
|
||||
|
||||
// Clear sensitive information
|
||||
sanitized.Auth.InferenceKeys = []string{}
|
||||
sanitized.Auth.ManagementKeys = []string{}
|
||||
|
||||
// Clear API keys from nodes
|
||||
|
||||
@@ -41,9 +41,6 @@ func TestLoadConfig_Defaults(t *testing.T) {
|
||||
t.Fatalf("Failed to get user home directory: %v", err)
|
||||
}
|
||||
|
||||
if cfg.Instances.InstancesDir != filepath.Join(homedir, ".local", "share", "llamactl", "instances") {
|
||||
t.Errorf("Expected default instances directory '%s', got %q", filepath.Join(homedir, ".local", "share", "llamactl", "instances"), cfg.Instances.InstancesDir)
|
||||
}
|
||||
if cfg.Instances.LogsDir != filepath.Join(homedir, ".local", "share", "llamactl", "logs") {
|
||||
t.Errorf("Expected default logs directory '%s', got %q", filepath.Join(homedir, ".local", "share", "llamactl", "logs"), cfg.Instances.LogsDir)
|
||||
}
|
||||
|
||||
@@ -53,10 +53,7 @@ func getDefaultConfig(dataDir string) AppConfig {
|
||||
},
|
||||
},
|
||||
Instances: InstancesConfig{
|
||||
PortRange: [2]int{8000, 9000},
|
||||
// NOTE: empty string is set as placeholder value since InstancesDir
|
||||
// should be relative path to DataDir if not explicitly set.
|
||||
InstancesDir: "",
|
||||
PortRange: [2]int{8000, 9000},
|
||||
AutoCreateDirs: true,
|
||||
MaxInstances: -1, // -1 means unlimited
|
||||
MaxRunningInstances: -1, // -1 means unlimited
|
||||
@@ -80,7 +77,6 @@ func getDefaultConfig(dataDir string) AppConfig {
|
||||
},
|
||||
Auth: AuthConfig{
|
||||
RequireInferenceAuth: true,
|
||||
InferenceKeys: []string{},
|
||||
RequireManagementAuth: true,
|
||||
ManagementKeys: []string{},
|
||||
},
|
||||
|
||||
@@ -31,9 +31,6 @@ func loadEnvVars(cfg *AppConfig) {
|
||||
if dataDir := os.Getenv("LLAMACTL_DATA_DIRECTORY"); dataDir != "" {
|
||||
cfg.DataDir = dataDir
|
||||
}
|
||||
if instancesDir := os.Getenv("LLAMACTL_INSTANCES_DIR"); instancesDir != "" {
|
||||
cfg.Instances.InstancesDir = instancesDir
|
||||
}
|
||||
if logsDir := os.Getenv("LLAMACTL_LOGS_DIR"); logsDir != "" {
|
||||
cfg.Instances.LogsDir = logsDir
|
||||
}
|
||||
@@ -220,9 +217,6 @@ func loadEnvVars(cfg *AppConfig) {
|
||||
cfg.Auth.RequireInferenceAuth = b
|
||||
}
|
||||
}
|
||||
if inferenceKeys := os.Getenv("LLAMACTL_INFERENCE_KEYS"); inferenceKeys != "" {
|
||||
cfg.Auth.InferenceKeys = strings.Split(inferenceKeys, ",")
|
||||
}
|
||||
if requireManagementAuth := os.Getenv("LLAMACTL_REQUIRE_MANAGEMENT_AUTH"); requireManagementAuth != "" {
|
||||
if b, err := strconv.ParseBool(requireManagementAuth); err == nil {
|
||||
cfg.Auth.RequireManagementAuth = b
|
||||
|
||||
@@ -81,9 +81,6 @@ type InstancesConfig struct {
|
||||
// Port range for instances (e.g., 8000,9000)
|
||||
PortRange [2]int `yaml:"port_range" json:"port_range"`
|
||||
|
||||
// Instance config directory override (relative to data_dir if not absolute)
|
||||
InstancesDir string `yaml:"configs_dir" json:"configs_dir"`
|
||||
|
||||
// Automatically create the data directory if it doesn't exist
|
||||
AutoCreateDirs bool `yaml:"auto_create_dirs" json:"auto_create_dirs"`
|
||||
|
||||
@@ -133,9 +130,6 @@ type AuthConfig struct {
|
||||
// Require authentication for OpenAI compatible inference endpoints
|
||||
RequireInferenceAuth bool `yaml:"require_inference_auth" json:"require_inference_auth"`
|
||||
|
||||
// List of keys for OpenAI compatible inference endpoints
|
||||
InferenceKeys []string `yaml:"inference_keys" json:"inference_keys"`
|
||||
|
||||
// Require authentication for management endpoints
|
||||
RequireManagementAuth bool `yaml:"require_management_auth" json:"require_management_auth"`
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"llamactl/pkg/backends"
|
||||
"llamactl/pkg/config"
|
||||
)
|
||||
|
||||
@@ -117,6 +118,14 @@ func (i *Instance) WaitForHealthy(timeout int) error {
|
||||
return i.process.waitForHealthy(timeout)
|
||||
}
|
||||
|
||||
func (i *Instance) GetBackendType() backends.BackendType {
|
||||
opts := i.GetOptions()
|
||||
if opts == nil {
|
||||
return backends.BackendTypeUnknown
|
||||
}
|
||||
return opts.BackendOptions.BackendType
|
||||
}
|
||||
|
||||
// GetOptions returns the current options
|
||||
func (i *Instance) GetOptions() *Options {
|
||||
if i.options == nil {
|
||||
|
||||
@@ -19,7 +19,7 @@ type InstanceManager interface {
|
||||
UpdateInstance(name string, options *instance.Options) (*instance.Instance, error)
|
||||
DeleteInstance(name string) error
|
||||
StartInstance(name string) (*instance.Instance, error)
|
||||
IsMaxRunningInstancesReached() bool
|
||||
AtMaxRunning() bool
|
||||
StopInstance(name string) (*instance.Instance, error)
|
||||
EvictLRUInstance() error
|
||||
RestartInstance(name string) (*instance.Instance, error)
|
||||
|
||||
@@ -202,7 +202,6 @@ func createTestAppConfig(instancesDir string) *config.AppConfig {
|
||||
},
|
||||
Instances: config.InstancesConfig{
|
||||
PortRange: [2]int{8000, 9000},
|
||||
InstancesDir: instancesDir,
|
||||
MaxInstances: 10,
|
||||
MaxRunningInstances: 10,
|
||||
DefaultAutoRestart: true,
|
||||
|
||||
@@ -383,7 +383,7 @@ func (im *instanceManager) StartInstance(name string) (*instance.Instance, error
|
||||
}
|
||||
|
||||
// Check max running instances limit for local instances only
|
||||
if im.IsMaxRunningInstancesReached() {
|
||||
if im.AtMaxRunning() {
|
||||
return nil, MaxRunningInstancesError(fmt.Errorf("maximum number of running instances (%d) reached", im.globalConfig.Instances.MaxRunningInstances))
|
||||
}
|
||||
|
||||
@@ -399,7 +399,7 @@ func (im *instanceManager) StartInstance(name string) (*instance.Instance, error
|
||||
return inst, nil
|
||||
}
|
||||
|
||||
func (im *instanceManager) IsMaxRunningInstancesReached() bool {
|
||||
func (im *instanceManager) AtMaxRunning() bool {
|
||||
if im.globalConfig.Instances.MaxRunningInstances == -1 {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -38,7 +38,6 @@ func TestCreateInstance_FailsWithDuplicateName(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCreateInstance_FailsWhenMaxInstancesReached(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
appConfig := &config.AppConfig{
|
||||
Backends: config.BackendConfig{
|
||||
LlamaCpp: config.BackendSettings{
|
||||
@@ -47,7 +46,6 @@ func TestCreateInstance_FailsWhenMaxInstancesReached(t *testing.T) {
|
||||
},
|
||||
Instances: config.InstancesConfig{
|
||||
PortRange: [2]int{8000, 9000},
|
||||
InstancesDir: tempDir,
|
||||
MaxInstances: 1, // Very low limit for testing
|
||||
TimeoutCheckInterval: 5,
|
||||
},
|
||||
|
||||
@@ -96,7 +96,7 @@ func (h *Handler) ensureInstanceRunning(inst *instance.Instance) error {
|
||||
return fmt.Errorf("instance is not running and on-demand start is not enabled")
|
||||
}
|
||||
|
||||
if h.InstanceManager.IsMaxRunningInstancesReached() {
|
||||
if h.InstanceManager.AtMaxRunning() {
|
||||
if h.cfg.Instances.EnableLRUEviction {
|
||||
err := h.InstanceManager.EvictLRUInstance()
|
||||
if err != nil {
|
||||
|
||||
@@ -306,3 +306,158 @@ func (h *Handler) LlamaServerVersionHandler() http.HandlerFunc {
|
||||
func (h *Handler) LlamaServerListDevicesHandler() http.HandlerFunc {
|
||||
return h.executeLlamaServerCommand("--list-devices", "Failed to list devices")
|
||||
}
|
||||
|
||||
// LlamaCppListModels godoc
|
||||
// @Summary List models in a llama.cpp instance
|
||||
// @Description Returns a list of models available in the specified llama.cpp instance
|
||||
// @Tags Llama.cpp
|
||||
// @Security ApiKeyAuth
|
||||
// @Produces json
|
||||
// @Param name path string true "Instance Name"
|
||||
// @Success 200 {object} map[string]any "Models list response"
|
||||
// @Failure 400 {string} string "Invalid instance"
|
||||
// @Failure 500 {string} string "Internal Server Error"
|
||||
// @Router /api/v1/llama-cpp/{name}/models [get]
|
||||
func (h *Handler) LlamaCppListModels() http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
inst, err := h.validateLlamaCppInstance(r)
|
||||
if err != nil {
|
||||
writeError(w, http.StatusBadRequest, "invalid instance", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Check instance permissions
|
||||
if err := h.authMiddleware.CheckInstancePermission(r.Context(), inst.ID); err != nil {
|
||||
writeError(w, http.StatusForbidden, "permission_denied", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Check if instance is shutting down before autostart logic
|
||||
if inst.GetStatus() == instance.ShuttingDown {
|
||||
writeError(w, http.StatusServiceUnavailable, "instance_shutting_down", "Instance is shutting down")
|
||||
return
|
||||
}
|
||||
|
||||
if !inst.IsRemote() && !inst.IsRunning() {
|
||||
err := h.ensureInstanceRunning(inst)
|
||||
if err != nil {
|
||||
writeError(w, http.StatusInternalServerError, "instance start failed", err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Modify request path to /models for proxying
|
||||
r.URL.Path = "/models"
|
||||
|
||||
// Use instance's ServeHTTP which tracks inflight requests and handles shutting down state
|
||||
err = inst.ServeHTTP(w, r)
|
||||
if err != nil {
|
||||
// Error is already handled in ServeHTTP (response written)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// LlamaCppLoadModel godoc
|
||||
// @Summary Load a model in a llama.cpp instance
|
||||
// @Description Loads the specified model in the given llama.cpp instance
|
||||
// @Tags Llama.cpp
|
||||
// @Security ApiKeyAuth
|
||||
// @Produces json
|
||||
// @Param name path string true "Instance Name"
|
||||
// @Param model path string true "Model Name"
|
||||
// @Success 200 {object} map[string]string "Success message"
|
||||
// @Failure 400 {string} string "Invalid request"
|
||||
// @Failure 500 {string} string "Internal Server Error"
|
||||
// @Router /api/v1/llama-cpp/{name}/models/{model}/load [post]
|
||||
func (h *Handler) LlamaCppLoadModel() http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
inst, err := h.validateLlamaCppInstance(r)
|
||||
if err != nil {
|
||||
writeError(w, http.StatusBadRequest, "invalid instance", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Check instance permissions
|
||||
if err := h.authMiddleware.CheckInstancePermission(r.Context(), inst.ID); err != nil {
|
||||
writeError(w, http.StatusForbidden, "permission_denied", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Check if instance is shutting down before autostart logic
|
||||
if inst.GetStatus() == instance.ShuttingDown {
|
||||
writeError(w, http.StatusServiceUnavailable, "instance_shutting_down", "Instance is shutting down")
|
||||
return
|
||||
}
|
||||
|
||||
if !inst.IsRemote() && !inst.IsRunning() {
|
||||
err := h.ensureInstanceRunning(inst)
|
||||
if err != nil {
|
||||
writeError(w, http.StatusInternalServerError, "instance start failed", err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Modify request path to /models/load for proxying
|
||||
r.URL.Path = "/models/load"
|
||||
|
||||
// Use instance's ServeHTTP which tracks inflight requests and handles shutting down state
|
||||
err = inst.ServeHTTP(w, r)
|
||||
if err != nil {
|
||||
// Error is already handled in ServeHTTP (response written)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// LlamaCppUnloadModel godoc
|
||||
// @Summary Unload a model in a llama.cpp instance
|
||||
// @Description Unloads the specified model in the given llama.cpp instance
|
||||
// @Tags Llama.cpp
|
||||
// @Security ApiKeyAuth
|
||||
// @Produces json
|
||||
// @Param name path string true "Instance Name"
|
||||
// @Param model path string true "Model Name"
|
||||
// @Success 200 {object} map[string]string "Success message"
|
||||
// @Failure 400 {string} string "Invalid request"
|
||||
// @Failure 500 {string} string "Internal Server Error"
|
||||
// @Router /api/v1/llama-cpp/{name}/models/{model}/unload [post]
|
||||
func (h *Handler) LlamaCppUnloadModel() http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
inst, err := h.validateLlamaCppInstance(r)
|
||||
if err != nil {
|
||||
writeError(w, http.StatusBadRequest, "invalid instance", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Check instance permissions
|
||||
if err := h.authMiddleware.CheckInstancePermission(r.Context(), inst.ID); err != nil {
|
||||
writeError(w, http.StatusForbidden, "permission_denied", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Check if instance is shutting down before autostart logic
|
||||
if inst.GetStatus() == instance.ShuttingDown {
|
||||
writeError(w, http.StatusServiceUnavailable, "instance_shutting_down", "Instance is shutting down")
|
||||
return
|
||||
}
|
||||
|
||||
if !inst.IsRemote() && !inst.IsRunning() {
|
||||
err := h.ensureInstanceRunning(inst)
|
||||
if err != nil {
|
||||
writeError(w, http.StatusInternalServerError, "instance start failed", err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Modify request path to /models/unload for proxying
|
||||
r.URL.Path = "/models/unload"
|
||||
|
||||
// Use instance's ServeHTTP which tracks inflight requests and handles shutting down state
|
||||
err = inst.ServeHTTP(w, r)
|
||||
if err != nil {
|
||||
// Error is already handled in ServeHTTP (response written)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,10 +3,13 @@ package server
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"llamactl/pkg/backends"
|
||||
"llamactl/pkg/instance"
|
||||
"llamactl/pkg/validation"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// OpenAIListInstancesResponse represents the response structure for listing instances (models) in OpenAI-compatible format
|
||||
@@ -23,6 +26,53 @@ type OpenAIInstance struct {
|
||||
OwnedBy string `json:"owned_by"`
|
||||
}
|
||||
|
||||
// LlamaCppModel represents a model available in a llama.cpp instance
|
||||
type LlamaCppModel struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
OwnedBy string `json:"owned_by"`
|
||||
Created int64 `json:"created"`
|
||||
InCache bool `json:"in_cache"`
|
||||
Path string `json:"path"`
|
||||
Status LlamaCppModelStatus `json:"status"`
|
||||
}
|
||||
|
||||
// LlamaCppModelStatus represents the status of a model in a llama.cpp instance
|
||||
type LlamaCppModelStatus struct {
|
||||
Value string `json:"value"` // "loaded" | "loading" | "unloaded"
|
||||
Args []string `json:"args"`
|
||||
}
|
||||
|
||||
// fetchLlamaCppModels fetches models from a llama.cpp instance using the proxy
|
||||
func fetchLlamaCppModels(inst *instance.Instance) ([]LlamaCppModel, error) {
|
||||
// Create a request to the instance's /models endpoint
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("http://%s:%d/models", inst.GetHost(), inst.GetPort()), nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
// Use a custom response writer to capture the response
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||
return nil, fmt.Errorf("status %d: %s", resp.StatusCode, string(bodyBytes))
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Data []LlamaCppModel `json:"data"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return nil, fmt.Errorf("failed to decode response: %w", err)
|
||||
}
|
||||
|
||||
return result.Data, nil
|
||||
}
|
||||
|
||||
// OpenAIListInstances godoc
|
||||
// @Summary List instances in OpenAI-compatible format
|
||||
// @Description Returns a list of instances in a format compatible with OpenAI API
|
||||
@@ -40,14 +90,41 @@ func (h *Handler) OpenAIListInstances() http.HandlerFunc {
|
||||
return
|
||||
}
|
||||
|
||||
openaiInstances := make([]OpenAIInstance, len(instances))
|
||||
for i, inst := range instances {
|
||||
openaiInstances[i] = OpenAIInstance{
|
||||
var openaiInstances []OpenAIInstance
|
||||
|
||||
// For each llama.cpp instance, try to fetch models and add them as separate entries
|
||||
for _, inst := range instances {
|
||||
|
||||
if inst.GetBackendType() == backends.BackendTypeLlamaCpp && inst.IsRunning() {
|
||||
// Try to fetch models from the instance
|
||||
models, err := fetchLlamaCppModels(inst)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to fetch models from instance %s: %v", inst.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, model := range models {
|
||||
openaiInstances = append(openaiInstances, OpenAIInstance{
|
||||
ID: inst.Name + "/" + model.ID,
|
||||
Object: "model",
|
||||
Created: inst.Created,
|
||||
OwnedBy: inst.Name,
|
||||
})
|
||||
}
|
||||
|
||||
if len(models) > 1 {
|
||||
// Skip adding the instance name if multiple models are present
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Add instance name as single entry (for non-llama.cpp or if model fetch failed)
|
||||
openaiInstances = append(openaiInstances, OpenAIInstance{
|
||||
ID: inst.Name,
|
||||
Object: "model",
|
||||
Created: inst.Created,
|
||||
OwnedBy: "llamactl",
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
openaiResponse := OpenAIListInstancesResponse{
|
||||
@@ -87,14 +164,28 @@ func (h *Handler) OpenAIProxy() http.HandlerFunc {
|
||||
return
|
||||
}
|
||||
|
||||
modelName, ok := requestBody["model"].(string)
|
||||
if !ok || modelName == "" {
|
||||
writeError(w, http.StatusBadRequest, "invalid_request", "Instance name is required")
|
||||
reqModelName, ok := requestBody["model"].(string)
|
||||
if !ok || reqModelName == "" {
|
||||
writeError(w, http.StatusBadRequest, "invalid_request", "Model name is required")
|
||||
return
|
||||
}
|
||||
|
||||
// Parse instance name and model name from <instance_name>/<model_name> format
|
||||
var instanceName string
|
||||
var modelName string
|
||||
|
||||
// Check if model name contains "/"
|
||||
if idx := strings.Index(reqModelName, "/"); idx != -1 {
|
||||
// Split into instance and model parts
|
||||
instanceName = reqModelName[:idx]
|
||||
modelName = reqModelName[idx+1:]
|
||||
} else {
|
||||
instanceName = reqModelName
|
||||
modelName = reqModelName
|
||||
}
|
||||
|
||||
// Validate instance name at the entry point
|
||||
validatedName, err := validation.ValidateInstanceName(modelName)
|
||||
validatedName, err := validation.ValidateInstanceName(instanceName)
|
||||
if err != nil {
|
||||
writeError(w, http.StatusBadRequest, "invalid_instance_name", err.Error())
|
||||
return
|
||||
@@ -119,6 +210,11 @@ func (h *Handler) OpenAIProxy() http.HandlerFunc {
|
||||
return
|
||||
}
|
||||
|
||||
if inst.IsRemote() {
|
||||
// Don't replace model name for remote instances
|
||||
modelName = reqModelName
|
||||
}
|
||||
|
||||
if !inst.IsRemote() && !inst.IsRunning() {
|
||||
err := h.ensureInstanceRunning(inst)
|
||||
if err != nil {
|
||||
@@ -127,6 +223,16 @@ func (h *Handler) OpenAIProxy() http.HandlerFunc {
|
||||
}
|
||||
}
|
||||
|
||||
// Update the request body with just the model name
|
||||
requestBody["model"] = modelName
|
||||
|
||||
// Re-marshal the updated body
|
||||
bodyBytes, err = json.Marshal(requestBody)
|
||||
if err != nil {
|
||||
writeError(w, http.StatusInternalServerError, "marshal_error", "Failed to update request body")
|
||||
return
|
||||
}
|
||||
|
||||
// Recreate the request body from the bytes we read
|
||||
r.Body = io.NopCloser(bytes.NewReader(bodyBytes))
|
||||
r.ContentLength = int64(len(bodyBytes))
|
||||
|
||||
@@ -275,16 +275,3 @@ func TestAutoGenerationScenarios(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigBasedInferenceKeysDeprecationWarning(t *testing.T) {
|
||||
// Test that config-based inference keys trigger a warning (captured in logs)
|
||||
cfg := config.AuthConfig{
|
||||
InferenceKeys: []string{"sk-inference-old"},
|
||||
}
|
||||
|
||||
// Creating middleware should log a warning, but shouldn't fail
|
||||
_ = server.NewAPIAuthMiddleware(cfg, nil)
|
||||
|
||||
// If we get here without panic, the test passes
|
||||
// The warning is logged but not returned as an error
|
||||
}
|
||||
|
||||
@@ -26,9 +26,6 @@ func SetupRouter(handler *Handler) *chi.Mux {
|
||||
MaxAge: 300,
|
||||
}))
|
||||
|
||||
// Add API authentication middleware
|
||||
authMiddleware := NewAPIAuthMiddleware(handler.cfg.Auth, handler.authStore)
|
||||
|
||||
if handler.cfg.Server.EnableSwagger {
|
||||
r.Get("/swagger/*", httpSwagger.Handler(
|
||||
httpSwagger.URL("/swagger/doc.json"),
|
||||
@@ -38,8 +35,8 @@ func SetupRouter(handler *Handler) *chi.Mux {
|
||||
// Define routes
|
||||
r.Route("/api/v1", func(r chi.Router) {
|
||||
|
||||
if authMiddleware != nil && handler.cfg.Auth.RequireManagementAuth {
|
||||
r.Use(authMiddleware.ManagementAuthMiddleware())
|
||||
if handler.authMiddleware != nil && handler.cfg.Auth.RequireManagementAuth {
|
||||
r.Use(handler.authMiddleware.ManagementAuthMiddleware())
|
||||
}
|
||||
|
||||
r.Get("/version", handler.VersionHandler())
|
||||
@@ -73,6 +70,13 @@ func SetupRouter(handler *Handler) *chi.Mux {
|
||||
})
|
||||
})
|
||||
|
||||
// Llama.cpp instance-specific endpoints
|
||||
r.Route("/llama-cpp/{name}", func(r chi.Router) {
|
||||
r.Get("/models", handler.LlamaCppListModels())
|
||||
r.Post("/models/{model}/load", handler.LlamaCppLoadModel())
|
||||
r.Post("/models/{model}/unload", handler.LlamaCppUnloadModel())
|
||||
})
|
||||
|
||||
// Node management endpoints
|
||||
r.Route("/nodes", func(r chi.Router) {
|
||||
r.Get("/", handler.ListNodes()) // List all nodes
|
||||
@@ -107,8 +111,8 @@ func SetupRouter(handler *Handler) *chi.Mux {
|
||||
|
||||
r.Route("/v1", func(r chi.Router) {
|
||||
|
||||
if authMiddleware != nil && handler.cfg.Auth.RequireInferenceAuth {
|
||||
r.Use(authMiddleware.InferenceAuthMiddleware())
|
||||
if handler.authMiddleware != nil && handler.cfg.Auth.RequireInferenceAuth {
|
||||
r.Use(handler.authMiddleware.InferenceAuthMiddleware())
|
||||
}
|
||||
|
||||
r.Get("/models", handler.OpenAIListInstances()) // List instances in OpenAI-compatible format
|
||||
@@ -135,8 +139,8 @@ func SetupRouter(handler *Handler) *chi.Mux {
|
||||
// Private Routes
|
||||
r.Group(func(r chi.Router) {
|
||||
|
||||
if authMiddleware != nil && handler.cfg.Auth.RequireInferenceAuth {
|
||||
r.Use(authMiddleware.InferenceAuthMiddleware())
|
||||
if handler.authMiddleware != nil && handler.cfg.Auth.RequireInferenceAuth {
|
||||
r.Use(handler.authMiddleware.InferenceAuthMiddleware())
|
||||
}
|
||||
|
||||
// This handler auto starts the server if it's not running
|
||||
|
||||
@@ -4,7 +4,6 @@ Simple Python script to interact with local LLM server's OpenAI-compatible API
|
||||
"""
|
||||
|
||||
import requests
|
||||
import json
|
||||
import sys
|
||||
|
||||
# Local LLM server configuration
|
||||
|
||||
8
webui/package-lock.json
generated
8
webui/package-lock.json
generated
@@ -18,7 +18,7 @@
|
||||
"class-variance-authority": "^0.7.1",
|
||||
"clsx": "^2.1.1",
|
||||
"date-fns": "^4.1.0",
|
||||
"lucide-react": "^0.561.0",
|
||||
"lucide-react": "^0.562.0",
|
||||
"react": "^19.2.0",
|
||||
"react-dom": "^19.2.0",
|
||||
"sonner": "^2.0.7",
|
||||
@@ -5754,9 +5754,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/lucide-react": {
|
||||
"version": "0.561.0",
|
||||
"resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.561.0.tgz",
|
||||
"integrity": "sha512-Y59gMY38tl4/i0qewcqohPdEbieBy7SovpBL9IFebhc2mDd8x4PZSOsiFRkpPcOq6bj1r/mjH/Rk73gSlIJP2A==",
|
||||
"version": "0.562.0",
|
||||
"resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.562.0.tgz",
|
||||
"integrity": "sha512-82hOAu7y0dbVuFfmO4bYF1XEwYk/mEbM5E+b1jgci/udUBEE/R7LF5Ip0CCEmXe8AybRM8L+04eP+LGZeDvkiw==",
|
||||
"license": "ISC",
|
||||
"peerDependencies": {
|
||||
"react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0"
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
"class-variance-authority": "^0.7.1",
|
||||
"clsx": "^2.1.1",
|
||||
"date-fns": "^4.1.0",
|
||||
"lucide-react": "^0.561.0",
|
||||
"lucide-react": "^0.562.0",
|
||||
"react": "^19.2.0",
|
||||
"react-dom": "^19.2.0",
|
||||
"sonner": "^2.0.7",
|
||||
|
||||
@@ -1,14 +1,16 @@
|
||||
// ui/src/components/InstanceCard.tsx
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
|
||||
import { Badge } from "@/components/ui/badge";
|
||||
import type { Instance } from "@/types/instance";
|
||||
import { Edit, FileText, Play, Square, Trash2, MoreHorizontal, Download } from "lucide-react";
|
||||
import { Edit, FileText, Play, Square, Trash2, MoreHorizontal, Download, Boxes } from "lucide-react";
|
||||
import LogsDialog from "@/components/LogDialog";
|
||||
import ModelsDialog from "@/components/ModelsDialog";
|
||||
import HealthBadge from "@/components/HealthBadge";
|
||||
import BackendBadge from "@/components/BackendBadge";
|
||||
import { useState } from "react";
|
||||
import { useState, useEffect } from "react";
|
||||
import { useInstanceHealth } from "@/hooks/useInstanceHealth";
|
||||
import { instancesApi } from "@/lib/api";
|
||||
import { instancesApi, llamaCppApi, type Model } from "@/lib/api";
|
||||
|
||||
interface InstanceCardProps {
|
||||
instance: Instance;
|
||||
@@ -26,9 +28,35 @@ function InstanceCard({
|
||||
editInstance,
|
||||
}: InstanceCardProps) {
|
||||
const [isLogsOpen, setIsLogsOpen] = useState(false);
|
||||
const [isModelsOpen, setIsModelsOpen] = useState(false);
|
||||
const [showAllActions, setShowAllActions] = useState(false);
|
||||
const [models, setModels] = useState<Model[]>([]);
|
||||
const health = useInstanceHealth(instance.name, instance.status);
|
||||
|
||||
const running = instance.status === "running";
|
||||
const isLlamaCpp = instance.options?.backend_type === "llama_cpp";
|
||||
|
||||
// Fetch models for llama.cpp instances
|
||||
useEffect(() => {
|
||||
if (!isLlamaCpp || !running) {
|
||||
setModels([]);
|
||||
return;
|
||||
}
|
||||
|
||||
void (async () => {
|
||||
try {
|
||||
const fetchedModels = await llamaCppApi.getModels(instance.name);
|
||||
setModels(fetchedModels);
|
||||
} catch {
|
||||
setModels([]);
|
||||
}
|
||||
})();
|
||||
}, [instance.name, isLlamaCpp, running]);
|
||||
|
||||
// Calculate model counts
|
||||
const totalModels = models.length;
|
||||
const loadedModels = models.filter(m => m.status.value === "loaded").length;
|
||||
|
||||
const handleStart = () => {
|
||||
startInstance(instance.name);
|
||||
};
|
||||
@@ -53,6 +81,10 @@ function InstanceCard({
|
||||
setIsLogsOpen(true);
|
||||
};
|
||||
|
||||
const handleModels = () => {
|
||||
setIsModelsOpen(true);
|
||||
};
|
||||
|
||||
const handleExport = () => {
|
||||
void (async () => {
|
||||
try {
|
||||
@@ -83,8 +115,6 @@ function InstanceCard({
|
||||
})();
|
||||
};
|
||||
|
||||
const running = instance.status === "running";
|
||||
|
||||
return (
|
||||
<>
|
||||
<Card className="hover:shadow-md transition-shadow">
|
||||
@@ -99,6 +129,12 @@ function InstanceCard({
|
||||
<div className="flex items-center gap-2 flex-wrap">
|
||||
<BackendBadge backend={instance.options?.backend_type} docker={instance.options?.docker_enabled} />
|
||||
{running && <HealthBadge health={health} />}
|
||||
{isLlamaCpp && running && totalModels > 0 && (
|
||||
<Badge variant="secondary" className="text-xs">
|
||||
<Boxes className="h-3 w-3 mr-1" />
|
||||
{loadedModels}/{totalModels} models
|
||||
</Badge>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</CardHeader>
|
||||
@@ -149,26 +185,37 @@ function InstanceCard({
|
||||
|
||||
{/* Secondary actions - collapsible */}
|
||||
{showAllActions && (
|
||||
<div className="flex items-center gap-2 pt-2 border-t border-border">
|
||||
<div className="flex items-center gap-2 pt-2 border-t border-border flex-wrap">
|
||||
<Button
|
||||
size="sm"
|
||||
variant="outline"
|
||||
onClick={handleLogs}
|
||||
title="View logs"
|
||||
data-testid="view-logs-button"
|
||||
className="flex-1"
|
||||
>
|
||||
<FileText className="h-4 w-4 mr-1" />
|
||||
Logs
|
||||
</Button>
|
||||
|
||||
{isLlamaCpp && totalModels > 1 && (
|
||||
<Button
|
||||
size="sm"
|
||||
variant="outline"
|
||||
onClick={handleModels}
|
||||
title="Manage models"
|
||||
data-testid="manage-models-button"
|
||||
>
|
||||
<Boxes className="h-4 w-4 mr-1" />
|
||||
Models
|
||||
</Button>
|
||||
)}
|
||||
|
||||
<Button
|
||||
size="sm"
|
||||
variant="outline"
|
||||
onClick={handleExport}
|
||||
title="Export instance"
|
||||
data-testid="export-instance-button"
|
||||
className="flex-1"
|
||||
>
|
||||
<Download className="h-4 w-4 mr-1" />
|
||||
Export
|
||||
@@ -195,6 +242,13 @@ function InstanceCard({
|
||||
instanceName={instance.name}
|
||||
isRunning={running}
|
||||
/>
|
||||
|
||||
<ModelsDialog
|
||||
open={isModelsOpen}
|
||||
onOpenChange={setIsModelsOpen}
|
||||
instanceName={instance.name}
|
||||
isRunning={running}
|
||||
/>
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
303
webui/src/components/ModelsDialog.tsx
Normal file
303
webui/src/components/ModelsDialog.tsx
Normal file
@@ -0,0 +1,303 @@
|
||||
import React, { useState, useEffect } from 'react'
|
||||
import { Button } from '@/components/ui/button'
|
||||
import {
|
||||
Dialog,
|
||||
DialogContent,
|
||||
DialogDescription,
|
||||
DialogHeader,
|
||||
DialogTitle,
|
||||
} from '@/components/ui/dialog'
|
||||
import {
|
||||
Table,
|
||||
TableBody,
|
||||
TableCell,
|
||||
TableHead,
|
||||
TableHeader,
|
||||
TableRow,
|
||||
} from '@/components/ui/table'
|
||||
import { Badge } from '@/components/ui/badge'
|
||||
import { llamaCppApi } from '@/lib/api'
|
||||
import { RefreshCw, Loader2, AlertCircle } from 'lucide-react'
|
||||
|
||||
interface ModelsDialogProps {
|
||||
open: boolean
|
||||
onOpenChange: (open: boolean) => void
|
||||
instanceName: string
|
||||
isRunning: boolean
|
||||
}
|
||||
|
||||
interface Model {
|
||||
id: string
|
||||
object: string
|
||||
owned_by: string
|
||||
created: number
|
||||
in_cache: boolean
|
||||
path: string
|
||||
status: {
|
||||
value: string // "loaded" | "loading" | "unloaded"
|
||||
args: string[]
|
||||
}
|
||||
}
|
||||
|
||||
const StatusIcon: React.FC<{ status: string }> = ({ status }) => {
|
||||
switch (status) {
|
||||
case 'loaded':
|
||||
return (
|
||||
<div className="h-2 w-2 rounded-full bg-green-500" />
|
||||
)
|
||||
case 'loading':
|
||||
return (
|
||||
<Loader2
|
||||
className="h-3 w-3 animate-spin text-yellow-500"
|
||||
/>
|
||||
)
|
||||
case 'unloaded':
|
||||
return (
|
||||
<div className="h-2 w-2 rounded-full bg-gray-400" />
|
||||
)
|
||||
default:
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
const ModelsDialog: React.FC<ModelsDialogProps> = ({
|
||||
open,
|
||||
onOpenChange,
|
||||
instanceName,
|
||||
isRunning,
|
||||
}) => {
|
||||
const [models, setModels] = useState<Model[]>([])
|
||||
const [loading, setLoading] = useState(false)
|
||||
const [error, setError] = useState<string | null>(null)
|
||||
const [loadingModels, setLoadingModels] = useState<Set<string>>(new Set())
|
||||
|
||||
// Fetch models function
|
||||
const fetchModels = React.useCallback(async () => {
|
||||
if (!instanceName || !isRunning) return
|
||||
|
||||
setLoading(true)
|
||||
setError(null)
|
||||
|
||||
try {
|
||||
const response = await llamaCppApi.getModels(instanceName)
|
||||
setModels(response)
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : 'Failed to fetch models')
|
||||
} finally {
|
||||
setLoading(false)
|
||||
}
|
||||
}, [instanceName, isRunning])
|
||||
|
||||
// Fetch models when dialog opens
|
||||
useEffect(() => {
|
||||
if (!open || !isRunning) return
|
||||
|
||||
// Initial fetch
|
||||
void fetchModels()
|
||||
}, [open, isRunning, fetchModels])
|
||||
|
||||
// Auto-refresh only when models are loading
|
||||
useEffect(() => {
|
||||
if (!open || !isRunning) return
|
||||
|
||||
// Check if any model is in loading state
|
||||
const hasLoadingModel = models.some(m => m.status.value === 'loading')
|
||||
|
||||
if (!hasLoadingModel) return
|
||||
|
||||
// Poll every 2 seconds when there's a loading model
|
||||
const interval = setInterval(() => {
|
||||
void fetchModels()
|
||||
}, 2000)
|
||||
|
||||
return () => clearInterval(interval)
|
||||
}, [open, isRunning, models, fetchModels])
|
||||
|
||||
// Load model
|
||||
const loadModel = async (modelName: string) => {
|
||||
setLoadingModels((prev) => new Set(prev).add(modelName))
|
||||
setError(null)
|
||||
|
||||
try {
|
||||
await llamaCppApi.loadModel(instanceName, modelName)
|
||||
// Wait a bit for the backend to process the load
|
||||
await new Promise(resolve => setTimeout(resolve, 500))
|
||||
// Refresh models list after loading
|
||||
await fetchModels()
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : 'Failed to load model')
|
||||
} finally {
|
||||
setLoadingModels((prev) => {
|
||||
const newSet = new Set(prev)
|
||||
newSet.delete(modelName)
|
||||
return newSet
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Unload model
|
||||
const unloadModel = async (modelName: string) => {
|
||||
setLoadingModels((prev) => new Set(prev).add(modelName))
|
||||
setError(null)
|
||||
|
||||
try {
|
||||
await llamaCppApi.unloadModel(instanceName, modelName)
|
||||
// Wait a bit for the backend to process the unload
|
||||
await new Promise(resolve => setTimeout(resolve, 500))
|
||||
// Refresh models list after unloading
|
||||
await fetchModels()
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : 'Failed to unload model')
|
||||
} finally {
|
||||
setLoadingModels((prev) => {
|
||||
const newSet = new Set(prev)
|
||||
newSet.delete(modelName)
|
||||
return newSet
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<Dialog open={open} onOpenChange={onOpenChange}>
|
||||
<DialogContent className="sm:max-w-4xl max-w-[calc(100%-2rem)] max-h-[80vh] flex flex-col">
|
||||
<DialogHeader>
|
||||
<div className="flex items-center justify-between">
|
||||
<div>
|
||||
<DialogTitle className="flex items-center gap-2">
|
||||
Models: {instanceName}
|
||||
<Badge variant={isRunning ? 'default' : 'secondary'}>
|
||||
{isRunning ? 'Running' : 'Stopped'}
|
||||
</Badge>
|
||||
</DialogTitle>
|
||||
<DialogDescription>
|
||||
Manage models in this llama.cpp instance
|
||||
</DialogDescription>
|
||||
</div>
|
||||
|
||||
<Button
|
||||
variant="outline"
|
||||
size="sm"
|
||||
onClick={() => void fetchModels()}
|
||||
disabled={loading || !isRunning}
|
||||
>
|
||||
{loading ? (
|
||||
<Loader2 className="h-4 w-4 animate-spin" />
|
||||
) : (
|
||||
<RefreshCw className="h-4 w-4" />
|
||||
)}
|
||||
</Button>
|
||||
</div>
|
||||
</DialogHeader>
|
||||
|
||||
{/* Error Display */}
|
||||
{error && (
|
||||
<div className="flex items-center gap-2 p-3 bg-destructive/10 border border-destructive/20 rounded-lg">
|
||||
<AlertCircle className="h-4 w-4 text-destructive" />
|
||||
<span className="text-sm text-destructive">{error}</span>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Models Table */}
|
||||
<div className="flex-1 flex flex-col min-h-0 overflow-auto">
|
||||
{!isRunning ? (
|
||||
<div className="flex items-center justify-center h-full text-muted-foreground">
|
||||
Instance is not running
|
||||
</div>
|
||||
) : loading && models.length === 0 ? (
|
||||
<div className="flex items-center justify-center h-full">
|
||||
<Loader2 className="h-6 w-6 animate-spin text-muted-foreground" />
|
||||
<span className="ml-2 text-muted-foreground">
|
||||
Loading models...
|
||||
</span>
|
||||
</div>
|
||||
) : models.length === 0 ? (
|
||||
<div className="flex items-center justify-center h-full text-muted-foreground">
|
||||
No models found
|
||||
</div>
|
||||
) : (
|
||||
<Table>
|
||||
<TableHeader>
|
||||
<TableRow>
|
||||
<TableHead>Model</TableHead>
|
||||
<TableHead>Status</TableHead>
|
||||
<TableHead className="text-right">Actions</TableHead>
|
||||
</TableRow>
|
||||
</TableHeader>
|
||||
<TableBody>
|
||||
{models.map((model) => {
|
||||
const isLoading = loadingModels.has(model.id)
|
||||
const isModelLoading = model.status.value === 'loading'
|
||||
|
||||
return (
|
||||
<TableRow key={model.id}>
|
||||
<TableCell className="font-mono text-sm">
|
||||
{model.id}
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<div className="flex items-center gap-2">
|
||||
<StatusIcon status={model.status.value} />
|
||||
<span className="text-sm capitalize">
|
||||
{model.status.value}
|
||||
</span>
|
||||
</div>
|
||||
</TableCell>
|
||||
<TableCell className="text-right">
|
||||
{model.status.value === 'loaded' ? (
|
||||
<Button
|
||||
size="sm"
|
||||
variant="outline"
|
||||
onClick={() => { void unloadModel(model.id) }}
|
||||
disabled={!isRunning || isLoading || isModelLoading}
|
||||
>
|
||||
{isLoading ? (
|
||||
<>
|
||||
<Loader2 className="h-3 w-3 animate-spin mr-1" />
|
||||
Unloading...
|
||||
</>
|
||||
) : (
|
||||
'Unload'
|
||||
)}
|
||||
</Button>
|
||||
) : model.status.value === 'unloaded' ? (
|
||||
<Button
|
||||
size="sm"
|
||||
variant="default"
|
||||
onClick={() => { void loadModel(model.id) }}
|
||||
disabled={!isRunning || isLoading || isModelLoading}
|
||||
>
|
||||
{isLoading ? (
|
||||
<>
|
||||
<Loader2 className="h-3 w-3 animate-spin mr-1" />
|
||||
Loading...
|
||||
</>
|
||||
) : (
|
||||
'Load'
|
||||
)}
|
||||
</Button>
|
||||
) : (
|
||||
<Button size="sm" variant="ghost" disabled>
|
||||
Loading...
|
||||
</Button>
|
||||
)}
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
)
|
||||
})}
|
||||
</TableBody>
|
||||
</Table>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Auto-refresh indicator - only shown when models are loading */}
|
||||
{isRunning && models.some(m => m.status.value === 'loading') && (
|
||||
<div className="flex items-center gap-2 text-sm text-muted-foreground">
|
||||
<div className="w-2 h-2 bg-yellow-500 rounded-full animate-pulse"></div>
|
||||
Auto-refreshing while models are loading
|
||||
</div>
|
||||
)}
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
)
|
||||
}
|
||||
|
||||
export default ModelsDialog
|
||||
117
webui/src/components/ui/table.tsx
Normal file
117
webui/src/components/ui/table.tsx
Normal file
@@ -0,0 +1,117 @@
|
||||
import * as React from "react"
|
||||
|
||||
import { cn } from "@/lib/utils"
|
||||
|
||||
const Table = React.forwardRef<
|
||||
HTMLTableElement,
|
||||
React.HTMLAttributes<HTMLTableElement>
|
||||
>(({ className, ...props }, ref) => (
|
||||
<div className="relative w-full overflow-auto">
|
||||
<table
|
||||
ref={ref}
|
||||
className={cn("w-full caption-bottom text-sm", className)}
|
||||
{...props}
|
||||
/>
|
||||
</div>
|
||||
))
|
||||
Table.displayName = "Table"
|
||||
|
||||
const TableHeader = React.forwardRef<
|
||||
HTMLTableSectionElement,
|
||||
React.HTMLAttributes<HTMLTableSectionElement>
|
||||
>(({ className, ...props }, ref) => (
|
||||
<thead ref={ref} className={cn("[&_tr]:border-b", className)} {...props} />
|
||||
))
|
||||
TableHeader.displayName = "TableHeader"
|
||||
|
||||
const TableBody = React.forwardRef<
|
||||
HTMLTableSectionElement,
|
||||
React.HTMLAttributes<HTMLTableSectionElement>
|
||||
>(({ className, ...props }, ref) => (
|
||||
<tbody
|
||||
ref={ref}
|
||||
className={cn("[&_tr:last-child]:border-0", className)}
|
||||
{...props}
|
||||
/>
|
||||
))
|
||||
TableBody.displayName = "TableBody"
|
||||
|
||||
const TableFooter = React.forwardRef<
|
||||
HTMLTableSectionElement,
|
||||
React.HTMLAttributes<HTMLTableSectionElement>
|
||||
>(({ className, ...props }, ref) => (
|
||||
<tfoot
|
||||
ref={ref}
|
||||
className={cn(
|
||||
"border-t bg-muted/50 font-medium [&>tr]:last:border-b-0",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
))
|
||||
TableFooter.displayName = "TableFooter"
|
||||
|
||||
const TableRow = React.forwardRef<
|
||||
HTMLTableRowElement,
|
||||
React.HTMLAttributes<HTMLTableRowElement>
|
||||
>(({ className, ...props }, ref) => (
|
||||
<tr
|
||||
ref={ref}
|
||||
className={cn(
|
||||
"border-b transition-colors hover:bg-muted/50 data-[state=selected]:bg-muted",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
))
|
||||
TableRow.displayName = "TableRow"
|
||||
|
||||
const TableHead = React.forwardRef<
|
||||
HTMLTableCellElement,
|
||||
React.ThHTMLAttributes<HTMLTableCellElement>
|
||||
>(({ className, ...props }, ref) => (
|
||||
<th
|
||||
ref={ref}
|
||||
className={cn(
|
||||
"h-12 px-4 text-left align-middle font-medium text-muted-foreground [&:has([role=checkbox])]:pr-0",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
))
|
||||
TableHead.displayName = "TableHead"
|
||||
|
||||
const TableCell = React.forwardRef<
|
||||
HTMLTableCellElement,
|
||||
React.TdHTMLAttributes<HTMLTableCellElement>
|
||||
>(({ className, ...props }, ref) => (
|
||||
<td
|
||||
ref={ref}
|
||||
className={cn("p-4 align-middle [&:has([role=checkbox])]:pr-0", className)}
|
||||
{...props}
|
||||
/>
|
||||
))
|
||||
TableCell.displayName = "TableCell"
|
||||
|
||||
const TableCaption = React.forwardRef<
|
||||
HTMLTableCaptionElement,
|
||||
React.HTMLAttributes<HTMLTableCaptionElement>
|
||||
>(({ className, ...props }, ref) => (
|
||||
<caption
|
||||
ref={ref}
|
||||
className={cn("mt-4 text-sm text-muted-foreground", className)}
|
||||
{...props}
|
||||
/>
|
||||
))
|
||||
TableCaption.displayName = "TableCaption"
|
||||
|
||||
export {
|
||||
Table,
|
||||
TableHeader,
|
||||
TableBody,
|
||||
TableFooter,
|
||||
TableHead,
|
||||
TableRow,
|
||||
TableCell,
|
||||
TableCaption,
|
||||
}
|
||||
@@ -205,3 +205,53 @@ export const apiKeysApi = {
|
||||
getPermissions: (id: number) =>
|
||||
apiCall<KeyPermissionResponse[]>(`/auth/keys/${id}/permissions`),
|
||||
};
|
||||
|
||||
// Llama.cpp model management types
|
||||
export interface Model {
|
||||
id: string;
|
||||
object: string;
|
||||
owned_by: string;
|
||||
created: number;
|
||||
in_cache: boolean;
|
||||
path: string;
|
||||
status: {
|
||||
value: string; // "loaded" | "loading" | "unloaded"
|
||||
args: string[];
|
||||
};
|
||||
}
|
||||
|
||||
export interface ModelsListResponse {
|
||||
object: string;
|
||||
data: Model[];
|
||||
}
|
||||
|
||||
// Llama.cpp model management API functions
|
||||
export const llamaCppApi = {
|
||||
// GET /llama-cpp/{name}/models
|
||||
getModels: async (instanceName: string): Promise<Model[]> => {
|
||||
const response = await apiCall<ModelsListResponse>(
|
||||
`/llama-cpp/${encodeURIComponent(instanceName)}/models`
|
||||
);
|
||||
return response.data;
|
||||
},
|
||||
|
||||
// POST /llama-cpp/{name}/models/{model}/load
|
||||
loadModel: (instanceName: string, modelName: string) =>
|
||||
apiCall<{ success: boolean }>(
|
||||
`/llama-cpp/${encodeURIComponent(instanceName)}/models/${encodeURIComponent(modelName)}/load`,
|
||||
{
|
||||
method: "POST",
|
||||
body: JSON.stringify({ model: modelName }),
|
||||
}
|
||||
),
|
||||
|
||||
// POST /llama-cpp/{name}/models/{model}/unload
|
||||
unloadModel: (instanceName: string, modelName: string) =>
|
||||
apiCall<{ success: boolean }>(
|
||||
`/llama-cpp/${encodeURIComponent(instanceName)}/models/${encodeURIComponent(modelName)}/unload`,
|
||||
{
|
||||
method: "POST",
|
||||
body: JSON.stringify({ model: modelName }),
|
||||
}
|
||||
),
|
||||
};
|
||||
|
||||
@@ -30,7 +30,6 @@ export interface ServerConfig {
|
||||
|
||||
export interface InstancesConfig {
|
||||
port_range: [number, number]
|
||||
configs_dir: string
|
||||
logs_dir: string
|
||||
auto_create_dirs: boolean
|
||||
max_instances: number
|
||||
@@ -53,7 +52,6 @@ export interface DatabaseConfig {
|
||||
|
||||
export interface AuthConfig {
|
||||
require_inference_auth: boolean
|
||||
inference_keys: string[] // Will be empty in sanitized response
|
||||
require_management_auth: boolean
|
||||
management_keys: string[] // Will be empty in sanitized response
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user