3 Commits

13 changed files with 867 additions and 17 deletions

View File

@@ -14,6 +14,7 @@ const (
BackendTypeLlamaCpp BackendType = "llama_cpp"
BackendTypeMlxLm BackendType = "mlx_lm"
BackendTypeVllm BackendType = "vllm"
BackendTypeUnknown BackendType = "unknown"
)
type backend interface {

View File

@@ -7,6 +7,7 @@ import (
"net/http"
"time"
"llamactl/pkg/backends"
"llamactl/pkg/config"
)
@@ -117,6 +118,14 @@ func (i *Instance) WaitForHealthy(timeout int) error {
return i.process.waitForHealthy(timeout)
}
func (i *Instance) GetBackendType() backends.BackendType {
opts := i.GetOptions()
if opts == nil {
return backends.BackendTypeUnknown
}
return opts.BackendOptions.BackendType
}
// GetOptions returns the current options
func (i *Instance) GetOptions() *Options {
if i.options == nil {

View File

@@ -19,7 +19,7 @@ type InstanceManager interface {
UpdateInstance(name string, options *instance.Options) (*instance.Instance, error)
DeleteInstance(name string) error
StartInstance(name string) (*instance.Instance, error)
IsMaxRunningInstancesReached() bool
AtMaxRunning() bool
StopInstance(name string) (*instance.Instance, error)
EvictLRUInstance() error
RestartInstance(name string) (*instance.Instance, error)

View File

@@ -383,7 +383,7 @@ func (im *instanceManager) StartInstance(name string) (*instance.Instance, error
}
// Check max running instances limit for local instances only
if im.IsMaxRunningInstancesReached() {
if im.AtMaxRunning() {
return nil, MaxRunningInstancesError(fmt.Errorf("maximum number of running instances (%d) reached", im.globalConfig.Instances.MaxRunningInstances))
}
@@ -399,7 +399,7 @@ func (im *instanceManager) StartInstance(name string) (*instance.Instance, error
return inst, nil
}
func (im *instanceManager) IsMaxRunningInstancesReached() bool {
func (im *instanceManager) AtMaxRunning() bool {
if im.globalConfig.Instances.MaxRunningInstances == -1 {
return false
}

View File

@@ -96,7 +96,7 @@ func (h *Handler) ensureInstanceRunning(inst *instance.Instance) error {
return fmt.Errorf("instance is not running and on-demand start is not enabled")
}
if h.InstanceManager.IsMaxRunningInstancesReached() {
if h.InstanceManager.AtMaxRunning() {
if h.cfg.Instances.EnableLRUEviction {
err := h.InstanceManager.EvictLRUInstance()
if err != nil {

View File

@@ -306,3 +306,158 @@ func (h *Handler) LlamaServerVersionHandler() http.HandlerFunc {
func (h *Handler) LlamaServerListDevicesHandler() http.HandlerFunc {
return h.executeLlamaServerCommand("--list-devices", "Failed to list devices")
}
// LlamaCppListModels godoc
// @Summary List models in a llama.cpp instance
// @Description Returns a list of models available in the specified llama.cpp instance
// @Tags Llama.cpp
// @Security ApiKeyAuth
// @Produces json
// @Param name path string true "Instance Name"
// @Success 200 {object} map[string]any "Models list response"
// @Failure 400 {string} string "Invalid instance"
// @Failure 500 {string} string "Internal Server Error"
// @Router /api/v1/llama-cpp/{name}/models [get]
func (h *Handler) LlamaCppListModels() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
inst, err := h.validateLlamaCppInstance(r)
if err != nil {
writeError(w, http.StatusBadRequest, "invalid instance", err.Error())
return
}
// Check instance permissions
if err := h.authMiddleware.CheckInstancePermission(r.Context(), inst.ID); err != nil {
writeError(w, http.StatusForbidden, "permission_denied", err.Error())
return
}
// Check if instance is shutting down before autostart logic
if inst.GetStatus() == instance.ShuttingDown {
writeError(w, http.StatusServiceUnavailable, "instance_shutting_down", "Instance is shutting down")
return
}
if !inst.IsRemote() && !inst.IsRunning() {
err := h.ensureInstanceRunning(inst)
if err != nil {
writeError(w, http.StatusInternalServerError, "instance start failed", err.Error())
return
}
}
// Modify request path to /models for proxying
r.URL.Path = "/models"
// Use instance's ServeHTTP which tracks inflight requests and handles shutting down state
err = inst.ServeHTTP(w, r)
if err != nil {
// Error is already handled in ServeHTTP (response written)
return
}
}
}
// LlamaCppLoadModel godoc
// @Summary Load a model in a llama.cpp instance
// @Description Loads the specified model in the given llama.cpp instance
// @Tags Llama.cpp
// @Security ApiKeyAuth
// @Produces json
// @Param name path string true "Instance Name"
// @Param model path string true "Model Name"
// @Success 200 {object} map[string]string "Success message"
// @Failure 400 {string} string "Invalid request"
// @Failure 500 {string} string "Internal Server Error"
// @Router /api/v1/llama-cpp/{name}/models/{model}/load [post]
func (h *Handler) LlamaCppLoadModel() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
inst, err := h.validateLlamaCppInstance(r)
if err != nil {
writeError(w, http.StatusBadRequest, "invalid instance", err.Error())
return
}
// Check instance permissions
if err := h.authMiddleware.CheckInstancePermission(r.Context(), inst.ID); err != nil {
writeError(w, http.StatusForbidden, "permission_denied", err.Error())
return
}
// Check if instance is shutting down before autostart logic
if inst.GetStatus() == instance.ShuttingDown {
writeError(w, http.StatusServiceUnavailable, "instance_shutting_down", "Instance is shutting down")
return
}
if !inst.IsRemote() && !inst.IsRunning() {
err := h.ensureInstanceRunning(inst)
if err != nil {
writeError(w, http.StatusInternalServerError, "instance start failed", err.Error())
return
}
}
// Modify request path to /models/load for proxying
r.URL.Path = "/models/load"
// Use instance's ServeHTTP which tracks inflight requests and handles shutting down state
err = inst.ServeHTTP(w, r)
if err != nil {
// Error is already handled in ServeHTTP (response written)
return
}
}
}
// LlamaCppUnloadModel godoc
// @Summary Unload a model in a llama.cpp instance
// @Description Unloads the specified model in the given llama.cpp instance
// @Tags Llama.cpp
// @Security ApiKeyAuth
// @Produces json
// @Param name path string true "Instance Name"
// @Param model path string true "Model Name"
// @Success 200 {object} map[string]string "Success message"
// @Failure 400 {string} string "Invalid request"
// @Failure 500 {string} string "Internal Server Error"
// @Router /api/v1/llama-cpp/{name}/models/{model}/unload [post]
func (h *Handler) LlamaCppUnloadModel() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
inst, err := h.validateLlamaCppInstance(r)
if err != nil {
writeError(w, http.StatusBadRequest, "invalid instance", err.Error())
return
}
// Check instance permissions
if err := h.authMiddleware.CheckInstancePermission(r.Context(), inst.ID); err != nil {
writeError(w, http.StatusForbidden, "permission_denied", err.Error())
return
}
// Check if instance is shutting down before autostart logic
if inst.GetStatus() == instance.ShuttingDown {
writeError(w, http.StatusServiceUnavailable, "instance_shutting_down", "Instance is shutting down")
return
}
if !inst.IsRemote() && !inst.IsRunning() {
err := h.ensureInstanceRunning(inst)
if err != nil {
writeError(w, http.StatusInternalServerError, "instance start failed", err.Error())
return
}
}
// Modify request path to /models/unload for proxying
r.URL.Path = "/models/unload"
// Use instance's ServeHTTP which tracks inflight requests and handles shutting down state
err = inst.ServeHTTP(w, r)
if err != nil {
// Error is already handled in ServeHTTP (response written)
return
}
}
}

View File

@@ -3,10 +3,13 @@ package server
import (
"bytes"
"encoding/json"
"fmt"
"io"
"llamactl/pkg/backends"
"llamactl/pkg/instance"
"llamactl/pkg/validation"
"net/http"
"strings"
)
// OpenAIListInstancesResponse represents the response structure for listing instances (models) in OpenAI-compatible format
@@ -23,6 +26,53 @@ type OpenAIInstance struct {
OwnedBy string `json:"owned_by"`
}
// LlamaCppModel represents a model available in a llama.cpp instance
type LlamaCppModel struct {
ID string `json:"id"`
Object string `json:"object"`
OwnedBy string `json:"owned_by"`
Created int64 `json:"created"`
InCache bool `json:"in_cache"`
Path string `json:"path"`
Status LlamaCppModelStatus `json:"status"`
}
// LlamaCppModelStatus represents the status of a model in a llama.cpp instance
type LlamaCppModelStatus struct {
Value string `json:"value"` // "loaded" | "loading" | "unloaded"
Args []string `json:"args"`
}
// fetchLlamaCppModels fetches models from a llama.cpp instance using the proxy
func fetchLlamaCppModels(inst *instance.Instance) ([]LlamaCppModel, error) {
// Create a request to the instance's /models endpoint
req, err := http.NewRequest("GET", fmt.Sprintf("http://%s:%d/models", inst.GetHost(), inst.GetPort()), nil)
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
// Use a custom response writer to capture the response
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
bodyBytes, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("status %d: %s", resp.StatusCode, string(bodyBytes))
}
var result struct {
Data []LlamaCppModel `json:"data"`
}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return nil, fmt.Errorf("failed to decode response: %w", err)
}
return result.Data, nil
}
// OpenAIListInstances godoc
// @Summary List instances in OpenAI-compatible format
// @Description Returns a list of instances in a format compatible with OpenAI API
@@ -40,14 +90,41 @@ func (h *Handler) OpenAIListInstances() http.HandlerFunc {
return
}
openaiInstances := make([]OpenAIInstance, len(instances))
for i, inst := range instances {
openaiInstances[i] = OpenAIInstance{
var openaiInstances []OpenAIInstance
// For each llama.cpp instance, try to fetch models and add them as separate entries
for _, inst := range instances {
if inst.GetBackendType() == backends.BackendTypeLlamaCpp && inst.IsRunning() {
// Try to fetch models from the instance
models, err := fetchLlamaCppModels(inst)
if err != nil {
fmt.Printf("Failed to fetch models from instance %s: %v", inst.Name, err)
continue
}
for _, model := range models {
openaiInstances = append(openaiInstances, OpenAIInstance{
ID: inst.Name + "/" + model.ID,
Object: "model",
Created: inst.Created,
OwnedBy: inst.Name,
})
}
if len(models) > 1 {
// Skip adding the instance name if multiple models are present
continue
}
}
// Add instance name as single entry (for non-llama.cpp or if model fetch failed)
openaiInstances = append(openaiInstances, OpenAIInstance{
ID: inst.Name,
Object: "model",
Created: inst.Created,
OwnedBy: "llamactl",
}
})
}
openaiResponse := OpenAIListInstancesResponse{
@@ -87,14 +164,28 @@ func (h *Handler) OpenAIProxy() http.HandlerFunc {
return
}
modelName, ok := requestBody["model"].(string)
if !ok || modelName == "" {
writeError(w, http.StatusBadRequest, "invalid_request", "Instance name is required")
reqModelName, ok := requestBody["model"].(string)
if !ok || reqModelName == "" {
writeError(w, http.StatusBadRequest, "invalid_request", "Model name is required")
return
}
// Parse instance name and model name from <instance_name>/<model_name> format
var instanceName string
var modelName string
// Check if model name contains "/"
if idx := strings.Index(reqModelName, "/"); idx != -1 {
// Split into instance and model parts
instanceName = reqModelName[:idx]
modelName = reqModelName[idx+1:]
} else {
instanceName = reqModelName
modelName = reqModelName
}
// Validate instance name at the entry point
validatedName, err := validation.ValidateInstanceName(modelName)
validatedName, err := validation.ValidateInstanceName(instanceName)
if err != nil {
writeError(w, http.StatusBadRequest, "invalid_instance_name", err.Error())
return
@@ -119,6 +210,11 @@ func (h *Handler) OpenAIProxy() http.HandlerFunc {
return
}
if inst.IsRemote() {
// Don't replace model name for remote instances
modelName = reqModelName
}
if !inst.IsRemote() && !inst.IsRunning() {
err := h.ensureInstanceRunning(inst)
if err != nil {
@@ -127,6 +223,16 @@ func (h *Handler) OpenAIProxy() http.HandlerFunc {
}
}
// Update the request body with just the model name
requestBody["model"] = modelName
// Re-marshal the updated body
bodyBytes, err = json.Marshal(requestBody)
if err != nil {
writeError(w, http.StatusInternalServerError, "marshal_error", "Failed to update request body")
return
}
// Recreate the request body from the bytes we read
r.Body = io.NopCloser(bytes.NewReader(bodyBytes))
r.ContentLength = int64(len(bodyBytes))

View File

@@ -73,6 +73,13 @@ func SetupRouter(handler *Handler) *chi.Mux {
})
})
// Llama.cpp instance-specific endpoints
r.Route("/llama-cpp/{name}", func(r chi.Router) {
r.Get("/models", handler.LlamaCppListModels())
r.Post("/models/{model}/load", handler.LlamaCppLoadModel())
r.Post("/models/{model}/unload", handler.LlamaCppUnloadModel())
})
// Node management endpoints
r.Route("/nodes", func(r chi.Router) {
r.Get("/", handler.ListNodes()) // List all nodes

74
test_llm.py Normal file
View File

@@ -0,0 +1,74 @@
#!/usr/bin/env python3
"""
Simple Python script to interact with local LLM server's OpenAI-compatible API
"""
import requests
# Local LLM server configuration
LLM_SERVER_URL = "http://localhost:8080/v1/chat/completions"
MODEL_NAME = "proxy-test" # Default model name, can be changed based on your setup
def send_message(message, model=MODEL_NAME, temperature=0.7, max_tokens=1000):
"""
Send a message to local LLM server API
Args:
message (str): The message to send
model (str): Model name (depends on your LLM server setup)
temperature (float): Controls randomness (0.0 to 1.0)
max_tokens (int): Maximum tokens in response
Returns:
str: The AI response or error message
"""
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer test-inf"
}
data = {
"model": model,
"messages": [
{
"role": "user",
"content": message
}
],
"temperature": temperature,
"max_tokens": max_tokens,
"stream": False
}
response = requests.post(LLM_SERVER_URL, headers=headers, json=data, timeout=60)
response.raise_for_status()
result = response.json()
return result["choices"][0]["message"]["content"]
def main():
"""Run in interactive mode for continuous conversation"""
print("Local LLM Chat Client")
print("-" * 40)
while True:
try:
user_input = input("\nYou: ").strip()
if not user_input:
continue
print("AI: ", end="", flush=True)
response = send_message(user_input)
print(response)
except KeyboardInterrupt:
print("\nGoodbye!")
break
except EOFError:
print("\nGoodbye!")
break
if __name__ == "__main__":
main()

View File

@@ -2,13 +2,14 @@
import { Button } from "@/components/ui/button";
import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
import type { Instance } from "@/types/instance";
import { Edit, FileText, Play, Square, Trash2, MoreHorizontal, Download } from "lucide-react";
import { Edit, FileText, Play, Square, Trash2, MoreHorizontal, Download, Boxes } from "lucide-react";
import LogsDialog from "@/components/LogDialog";
import ModelsDialog from "@/components/ModelsDialog";
import HealthBadge from "@/components/HealthBadge";
import BackendBadge from "@/components/BackendBadge";
import { useState } from "react";
import { useState, useEffect } from "react";
import { useInstanceHealth } from "@/hooks/useInstanceHealth";
import { instancesApi } from "@/lib/api";
import { instancesApi, llamaCppApi } from "@/lib/api";
interface InstanceCardProps {
instance: Instance;
@@ -26,9 +27,31 @@ function InstanceCard({
editInstance,
}: InstanceCardProps) {
const [isLogsOpen, setIsLogsOpen] = useState(false);
const [isModelsOpen, setIsModelsOpen] = useState(false);
const [showAllActions, setShowAllActions] = useState(false);
const [modelCount, setModelCount] = useState(0);
const health = useInstanceHealth(instance.name, instance.status);
const running = instance.status === "running";
const isLlamaCpp = instance.options?.backend_type === "llama_cpp";
// Fetch model count for llama.cpp instances
useEffect(() => {
if (!isLlamaCpp || !running) {
setModelCount(0);
return;
}
void (async () => {
try {
const models = await llamaCppApi.getModels(instance.name);
setModelCount(models.length);
} catch {
setModelCount(0);
}
})();
}, [instance.name, isLlamaCpp, running]);
const handleStart = () => {
startInstance(instance.name);
};
@@ -53,6 +76,10 @@ function InstanceCard({
setIsLogsOpen(true);
};
const handleModels = () => {
setIsModelsOpen(true);
};
const handleExport = () => {
void (async () => {
try {
@@ -83,8 +110,6 @@ function InstanceCard({
})();
};
const running = instance.status === "running";
return (
<>
<Card className="hover:shadow-md transition-shadow">
@@ -162,6 +187,20 @@ function InstanceCard({
Logs
</Button>
{isLlamaCpp && modelCount > 1 && (
<Button
size="sm"
variant="outline"
onClick={handleModels}
title="Manage models"
data-testid="manage-models-button"
className="flex-1"
>
<Boxes className="h-4 w-4 mr-1" />
Models ({modelCount})
</Button>
)}
<Button
size="sm"
variant="outline"
@@ -195,6 +234,13 @@ function InstanceCard({
instanceName={instance.name}
isRunning={running}
/>
<ModelsDialog
open={isModelsOpen}
onOpenChange={setIsModelsOpen}
instanceName={instance.name}
isRunning={running}
/>
</>
);
}

View File

@@ -0,0 +1,287 @@
import React, { useState, useEffect } from 'react'
import { Button } from '@/components/ui/button'
import {
Dialog,
DialogContent,
DialogDescription,
DialogHeader,
DialogTitle,
} from '@/components/ui/dialog'
import {
Table,
TableBody,
TableCell,
TableHead,
TableHeader,
TableRow,
} from '@/components/ui/table'
import { Badge } from '@/components/ui/badge'
import { llamaCppApi } from '@/lib/api'
import { RefreshCw, Loader2, AlertCircle } from 'lucide-react'
interface ModelsDialogProps {
open: boolean
onOpenChange: (open: boolean) => void
instanceName: string
isRunning: boolean
}
interface Model {
id: string
object: string
owned_by: string
created: number
in_cache: boolean
path: string
status: {
value: string // "loaded" | "loading" | "unloaded"
args: string[]
}
}
const StatusIcon: React.FC<{ status: string }> = ({ status }) => {
switch (status) {
case 'loaded':
return (
<div className="h-2 w-2 rounded-full bg-green-500" />
)
case 'loading':
return (
<Loader2
className="h-3 w-3 animate-spin text-yellow-500"
/>
)
case 'unloaded':
return (
<div className="h-2 w-2 rounded-full bg-gray-400" />
)
default:
return null
}
}
const ModelsDialog: React.FC<ModelsDialogProps> = ({
open,
onOpenChange,
instanceName,
isRunning,
}) => {
const [models, setModels] = useState<Model[]>([])
const [loading, setLoading] = useState(false)
const [error, setError] = useState<string | null>(null)
const [loadingModels, setLoadingModels] = useState<Set<string>>(new Set())
// Fetch models function
const fetchModels = React.useCallback(async () => {
if (!instanceName || !isRunning) return
setLoading(true)
setError(null)
try {
const response = await llamaCppApi.getModels(instanceName)
setModels(response)
} catch (err) {
setError(err instanceof Error ? err.message : 'Failed to fetch models')
} finally {
setLoading(false)
}
}, [instanceName, isRunning])
// Poll for models while dialog is open
useEffect(() => {
if (!open || !isRunning) return
// Initial fetch
void fetchModels()
// Poll every 2 seconds
const interval = setInterval(() => {
void fetchModels()
}, 2000)
return () => clearInterval(interval)
}, [open, isRunning, fetchModels])
// Load model
const loadModel = async (modelName: string) => {
setLoadingModels((prev) => new Set(prev).add(modelName))
setError(null)
try {
await llamaCppApi.loadModel(instanceName, modelName)
// Polling will pick up the change
} catch (err) {
setError(err instanceof Error ? err.message : 'Failed to load model')
} finally {
setLoadingModels((prev) => {
const newSet = new Set(prev)
newSet.delete(modelName)
return newSet
})
}
}
// Unload model
const unloadModel = async (modelName: string) => {
setLoadingModels((prev) => new Set(prev).add(modelName))
setError(null)
try {
await llamaCppApi.unloadModel(instanceName, modelName)
// Polling will pick up the change
} catch (err) {
setError(err instanceof Error ? err.message : 'Failed to unload model')
} finally {
setLoadingModels((prev) => {
const newSet = new Set(prev)
newSet.delete(modelName)
return newSet
})
}
}
return (
<Dialog open={open} onOpenChange={onOpenChange}>
<DialogContent className="sm:max-w-4xl max-w-[calc(100%-2rem)] max-h-[80vh] flex flex-col">
<DialogHeader>
<div className="flex items-center justify-between">
<div>
<DialogTitle className="flex items-center gap-2">
Models: {instanceName}
<Badge variant={isRunning ? 'default' : 'secondary'}>
{isRunning ? 'Running' : 'Stopped'}
</Badge>
</DialogTitle>
<DialogDescription>
Manage models in this llama.cpp instance
</DialogDescription>
</div>
<Button
variant="outline"
size="sm"
onClick={() => void fetchModels()}
disabled={loading || !isRunning}
>
{loading ? (
<Loader2 className="h-4 w-4 animate-spin" />
) : (
<RefreshCw className="h-4 w-4" />
)}
</Button>
</div>
</DialogHeader>
{/* Error Display */}
{error && (
<div className="flex items-center gap-2 p-3 bg-destructive/10 border border-destructive/20 rounded-lg">
<AlertCircle className="h-4 w-4 text-destructive" />
<span className="text-sm text-destructive">{error}</span>
</div>
)}
{/* Models Table */}
<div className="flex-1 flex flex-col min-h-0 overflow-auto">
{!isRunning ? (
<div className="flex items-center justify-center h-full text-muted-foreground">
Instance is not running
</div>
) : loading && models.length === 0 ? (
<div className="flex items-center justify-center h-full">
<Loader2 className="h-6 w-6 animate-spin text-muted-foreground" />
<span className="ml-2 text-muted-foreground">
Loading models...
</span>
</div>
) : models.length === 0 ? (
<div className="flex items-center justify-center h-full text-muted-foreground">
No models found
</div>
) : (
<Table>
<TableHeader>
<TableRow>
<TableHead>Model</TableHead>
<TableHead>Status</TableHead>
<TableHead className="text-right">Actions</TableHead>
</TableRow>
</TableHeader>
<TableBody>
{models.map((model) => {
const isLoading = loadingModels.has(model.id)
const isModelLoading = model.status.value === 'loading'
return (
<TableRow key={model.id}>
<TableCell className="font-mono text-sm">
{model.id}
</TableCell>
<TableCell>
<div className="flex items-center gap-2">
<StatusIcon status={model.status.value} />
<span className="text-sm capitalize">
{model.status.value}
</span>
</div>
</TableCell>
<TableCell className="text-right">
{model.status.value === 'loaded' ? (
<Button
size="sm"
variant="outline"
onClick={() => unloadModel(model.id)}
disabled={!isRunning || isLoading || isModelLoading}
>
{isLoading ? (
<>
<Loader2 className="h-3 w-3 animate-spin mr-1" />
Unloading...
</>
) : (
'Unload'
)}
</Button>
) : model.status.value === 'unloaded' ? (
<Button
size="sm"
variant="default"
onClick={() => loadModel(model.id)}
disabled={!isRunning || isLoading || isModelLoading}
>
{isLoading ? (
<>
<Loader2 className="h-3 w-3 animate-spin mr-1" />
Loading...
</>
) : (
'Load'
)}
</Button>
) : (
<Button size="sm" variant="ghost" disabled>
Loading...
</Button>
)}
</TableCell>
</TableRow>
)
})}
</TableBody>
</Table>
)}
</div>
{/* Auto-refresh indicator */}
{isRunning && (
<div className="flex items-center gap-2 text-sm text-muted-foreground">
<div className="w-2 h-2 bg-green-500 rounded-full animate-pulse"></div>
Auto-refreshing every 2 seconds
</div>
)}
</DialogContent>
</Dialog>
)
}
export default ModelsDialog

View File

@@ -0,0 +1,117 @@
import * as React from "react"
import { cn } from "@/lib/utils"
const Table = React.forwardRef<
HTMLTableElement,
React.HTMLAttributes<HTMLTableElement>
>(({ className, ...props }, ref) => (
<div className="relative w-full overflow-auto">
<table
ref={ref}
className={cn("w-full caption-bottom text-sm", className)}
{...props}
/>
</div>
))
Table.displayName = "Table"
const TableHeader = React.forwardRef<
HTMLTableSectionElement,
React.HTMLAttributes<HTMLTableSectionElement>
>(({ className, ...props }, ref) => (
<thead ref={ref} className={cn("[&_tr]:border-b", className)} {...props} />
))
TableHeader.displayName = "TableHeader"
const TableBody = React.forwardRef<
HTMLTableSectionElement,
React.HTMLAttributes<HTMLTableSectionElement>
>(({ className, ...props }, ref) => (
<tbody
ref={ref}
className={cn("[&_tr:last-child]:border-0", className)}
{...props}
/>
))
TableBody.displayName = "TableBody"
const TableFooter = React.forwardRef<
HTMLTableSectionElement,
React.HTMLAttributes<HTMLTableSectionElement>
>(({ className, ...props }, ref) => (
<tfoot
ref={ref}
className={cn(
"border-t bg-muted/50 font-medium [&>tr]:last:border-b-0",
className
)}
{...props}
/>
))
TableFooter.displayName = "TableFooter"
const TableRow = React.forwardRef<
HTMLTableRowElement,
React.HTMLAttributes<HTMLTableRowElement>
>(({ className, ...props }, ref) => (
<tr
ref={ref}
className={cn(
"border-b transition-colors hover:bg-muted/50 data-[state=selected]:bg-muted",
className
)}
{...props}
/>
))
TableRow.displayName = "TableRow"
const TableHead = React.forwardRef<
HTMLTableCellElement,
React.ThHTMLAttributes<HTMLTableCellElement>
>(({ className, ...props }, ref) => (
<th
ref={ref}
className={cn(
"h-12 px-4 text-left align-middle font-medium text-muted-foreground [&:has([role=checkbox])]:pr-0",
className
)}
{...props}
/>
))
TableHead.displayName = "TableHead"
const TableCell = React.forwardRef<
HTMLTableCellElement,
React.TdHTMLAttributes<HTMLTableCellElement>
>(({ className, ...props }, ref) => (
<td
ref={ref}
className={cn("p-4 align-middle [&:has([role=checkbox])]:pr-0", className)}
{...props}
/>
))
TableCell.displayName = "TableCell"
const TableCaption = React.forwardRef<
HTMLTableCaptionElement,
React.HTMLAttributes<HTMLTableCaptionElement>
>(({ className, ...props }, ref) => (
<caption
ref={ref}
className={cn("mt-4 text-sm text-muted-foreground", className)}
{...props}
/>
))
TableCaption.displayName = "TableCaption"
export {
Table,
TableHeader,
TableBody,
TableFooter,
TableHead,
TableRow,
TableCell,
TableCaption,
}

View File

@@ -205,3 +205,51 @@ export const apiKeysApi = {
getPermissions: (id: number) =>
apiCall<KeyPermissionResponse[]>(`/auth/keys/${id}/permissions`),
};
// Llama.cpp model management types
export interface Model {
id: string;
object: string;
owned_by: string;
created: number;
in_cache: boolean;
path: string;
status: {
value: string; // "loaded" | "loading" | "unloaded"
args: string[];
};
}
export interface ModelsListResponse {
object: string;
data: Model[];
}
// Llama.cpp model management API functions
export const llamaCppApi = {
// GET /llama-cpp/{name}/models
getModels: async (instanceName: string): Promise<Model[]> => {
const response = await apiCall<ModelsListResponse>(
`/llama-cpp/${encodeURIComponent(instanceName)}/models`
);
return response.data;
},
// POST /llama-cpp/{name}/models/{model}/load
loadModel: (instanceName: string, modelName: string) =>
apiCall<{ status: string; message: string }>(
`/llama-cpp/${encodeURIComponent(instanceName)}/models/${encodeURIComponent(modelName)}/load`,
{
method: "POST",
}
),
// POST /llama-cpp/{name}/models/{model}/unload
unloadModel: (instanceName: string, modelName: string) =>
apiCall<{ status: string; message: string }>(
`/llama-cpp/${encodeURIComponent(instanceName)}/models/${encodeURIComponent(modelName)}/unload`,
{
method: "POST",
}
),
};