mirror of
https://github.com/lordmathis/llamactl.git
synced 2025-11-06 00:54:23 +00:00
Implement llama-server command parsing and add UI components for command input
This commit is contained in:
144
pkg/backends/llamacpp/parser.go
Normal file
144
pkg/backends/llamacpp/parser.go
Normal file
@@ -0,0 +1,144 @@
|
||||
package llamacpp
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ParseLlamaCommand parses a llama-server command string into LlamaServerOptions
|
||||
func ParseLlamaCommand(command string) (*LlamaServerOptions, error) {
|
||||
// 1. Validate command starts with llama-server
|
||||
trimmed := strings.TrimSpace(command)
|
||||
if trimmed == "" {
|
||||
return nil, fmt.Errorf("command cannot be empty")
|
||||
}
|
||||
|
||||
// Check if command starts with llama-server (case-insensitive)
|
||||
lowerCommand := strings.ToLower(trimmed)
|
||||
if !strings.HasPrefix(lowerCommand, "llama-server") {
|
||||
return nil, fmt.Errorf("command must start with 'llama-server'")
|
||||
}
|
||||
|
||||
// 2. Extract arguments (everything after llama-server)
|
||||
parts := strings.Fields(trimmed)
|
||||
if len(parts) < 1 {
|
||||
return nil, fmt.Errorf("invalid command format")
|
||||
}
|
||||
|
||||
args := parts[1:] // Skip binary name
|
||||
|
||||
// 3. Parse arguments into map
|
||||
options := make(map[string]any)
|
||||
i := 0
|
||||
for i < len(args) {
|
||||
arg := args[i]
|
||||
|
||||
// Skip non-flag arguments
|
||||
if !strings.HasPrefix(arg, "-") {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle --flag=value format
|
||||
if strings.Contains(arg, "=") {
|
||||
parts := strings.SplitN(arg, "=", 2)
|
||||
flag := strings.TrimPrefix(parts[0], "-")
|
||||
flag = strings.TrimPrefix(flag, "-")
|
||||
|
||||
// Convert flag from kebab-case to snake_case for consistency with JSON field names
|
||||
flagName := strings.ReplaceAll(flag, "-", "_")
|
||||
|
||||
// Convert value to appropriate type
|
||||
value := parseValue(parts[1])
|
||||
|
||||
// Handle array flags by checking if flag already exists
|
||||
if existingValue, exists := options[flagName]; exists {
|
||||
// Convert to array if not already
|
||||
switch existing := existingValue.(type) {
|
||||
case []string:
|
||||
options[flagName] = append(existing, parts[1])
|
||||
case string:
|
||||
options[flagName] = []string{existing, parts[1]}
|
||||
default:
|
||||
options[flagName] = []string{fmt.Sprintf("%v", existing), parts[1]}
|
||||
}
|
||||
} else {
|
||||
options[flagName] = value
|
||||
}
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle --flag value format
|
||||
flag := strings.TrimPrefix(arg, "-")
|
||||
flag = strings.TrimPrefix(flag, "-")
|
||||
|
||||
// Convert flag from kebab-case to snake_case for consistency with JSON field names
|
||||
flagName := strings.ReplaceAll(flag, "-", "_")
|
||||
|
||||
// Check if next arg is a value (not a flag)
|
||||
if i+1 < len(args) && !strings.HasPrefix(args[i+1], "-") {
|
||||
value := parseValue(args[i+1])
|
||||
|
||||
// Handle array flags by checking if flag already exists
|
||||
if existingValue, exists := options[flagName]; exists {
|
||||
// Convert to array if not already
|
||||
switch existing := existingValue.(type) {
|
||||
case []string:
|
||||
options[flagName] = append(existing, args[i+1])
|
||||
case string:
|
||||
options[flagName] = []string{existing, args[i+1]}
|
||||
default:
|
||||
options[flagName] = []string{fmt.Sprintf("%v", existing), args[i+1]}
|
||||
}
|
||||
} else {
|
||||
options[flagName] = value
|
||||
}
|
||||
i += 2 // Skip flag and value
|
||||
} else {
|
||||
// Boolean flag
|
||||
options[flagName] = true
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Convert to LlamaServerOptions using existing UnmarshalJSON
|
||||
jsonData, err := json.Marshal(options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal parsed options: %w", err)
|
||||
}
|
||||
|
||||
var llamaOptions LlamaServerOptions
|
||||
if err := json.Unmarshal(jsonData, &llamaOptions); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse command options: %w", err)
|
||||
}
|
||||
|
||||
// 5. Return LlamaServerOptions
|
||||
return &llamaOptions, nil
|
||||
}
|
||||
|
||||
// parseValue attempts to parse a string value into the most appropriate type
|
||||
func parseValue(value string) any {
|
||||
// Try to parse as boolean
|
||||
if strings.ToLower(value) == "true" {
|
||||
return true
|
||||
}
|
||||
if strings.ToLower(value) == "false" {
|
||||
return false
|
||||
}
|
||||
|
||||
// Try to parse as integer
|
||||
if intVal, err := strconv.Atoi(value); err == nil {
|
||||
return intVal
|
||||
}
|
||||
|
||||
// Try to parse as float
|
||||
if floatVal, err := strconv.ParseFloat(value, 64); err == nil {
|
||||
return floatVal
|
||||
}
|
||||
|
||||
// Default to string
|
||||
return value
|
||||
}
|
||||
169
pkg/backends/llamacpp/parser_test.go
Normal file
169
pkg/backends/llamacpp/parser_test.go
Normal file
@@ -0,0 +1,169 @@
|
||||
package llamacpp
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseLlamaCommand(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
command string
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
name: "basic command with model",
|
||||
command: "llama-server --model /path/to/model.gguf",
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "command with multiple flags",
|
||||
command: "llama-server --model /path/to/model.gguf --gpu-layers 32 --ctx-size 4096",
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "command with short flags",
|
||||
command: "llama-server -m /path/to/model.gguf -ngl 32 -c 4096",
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "command with equals format",
|
||||
command: "llama-server --model=/path/to/model.gguf --gpu-layers=32",
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "command with boolean flags",
|
||||
command: "llama-server --model /path/to/model.gguf --verbose --no-mmap",
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "empty command",
|
||||
command: "",
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid command without llama-server",
|
||||
command: "other-command --model /path/to/model.gguf",
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "case insensitive command",
|
||||
command: "LLAMA-SERVER --model /path/to/model.gguf",
|
||||
expectErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := ParseLlamaCommand(tt.command)
|
||||
|
||||
if tt.expectErr {
|
||||
if err == nil {
|
||||
t.Errorf("expected error but got none")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if result == nil {
|
||||
t.Errorf("expected result but got nil")
|
||||
return
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseLlamaCommandSpecificValues(t *testing.T) {
|
||||
// Test specific value parsing
|
||||
command := "llama-server --model /test/model.gguf --gpu-layers 32 --ctx-size 4096 --verbose"
|
||||
result, err := ParseLlamaCommand(command)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if result.Model != "/test/model.gguf" {
|
||||
t.Errorf("expected model '/test/model.gguf', got '%s'", result.Model)
|
||||
}
|
||||
|
||||
if result.GPULayers != 32 {
|
||||
t.Errorf("expected gpu_layers 32, got %d", result.GPULayers)
|
||||
}
|
||||
|
||||
if result.CtxSize != 4096 {
|
||||
t.Errorf("expected ctx_size 4096, got %d", result.CtxSize)
|
||||
}
|
||||
|
||||
if !result.Verbose {
|
||||
t.Errorf("expected verbose to be true, got %v", result.Verbose)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseLlamaCommandArrayFlags(t *testing.T) {
|
||||
// Test array flag handling (critical for lora, override-tensor, etc.)
|
||||
command := "llama-server --model test.gguf --lora adapter1.bin --lora adapter2.bin"
|
||||
result, err := ParseLlamaCommand(command)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if len(result.Lora) != 2 {
|
||||
t.Errorf("expected 2 lora adapters, got %d", len(result.Lora))
|
||||
}
|
||||
|
||||
if result.Lora[0] != "adapter1.bin" || result.Lora[1] != "adapter2.bin" {
|
||||
t.Errorf("expected lora adapters [adapter1.bin, adapter2.bin], got %v", result.Lora)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseLlamaCommandMixedFormats(t *testing.T) {
|
||||
// Test mixing --flag=value and --flag value formats
|
||||
command := "llama-server --model=/path/model.gguf --gpu-layers 16 --batch-size=512 --verbose"
|
||||
result, err := ParseLlamaCommand(command)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if result.Model != "/path/model.gguf" {
|
||||
t.Errorf("expected model '/path/model.gguf', got '%s'", result.Model)
|
||||
}
|
||||
|
||||
if result.GPULayers != 16 {
|
||||
t.Errorf("expected gpu_layers 16, got %d", result.GPULayers)
|
||||
}
|
||||
|
||||
if result.BatchSize != 512 {
|
||||
t.Errorf("expected batch_size 512, got %d", result.BatchSize)
|
||||
}
|
||||
|
||||
if !result.Verbose {
|
||||
t.Errorf("expected verbose to be true, got %v", result.Verbose)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseLlamaCommandTypeConversion(t *testing.T) {
|
||||
// Test that values are converted to appropriate types
|
||||
command := "llama-server --model test.gguf --temp 0.7 --top-k 40 --no-mmap"
|
||||
result, err := ParseLlamaCommand(command)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if result.Temperature != 0.7 {
|
||||
t.Errorf("expected temperature 0.7, got %f", result.Temperature)
|
||||
}
|
||||
|
||||
if result.TopK != 40 {
|
||||
t.Errorf("expected top_k 40, got %d", result.TopK)
|
||||
}
|
||||
|
||||
if !result.NoMmap {
|
||||
t.Errorf("expected no_mmap to be true, got %v", result.NoMmap)
|
||||
}
|
||||
}
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"llamactl/pkg/backends"
|
||||
"llamactl/pkg/backends/llamacpp"
|
||||
"llamactl/pkg/config"
|
||||
"llamactl/pkg/instance"
|
||||
"llamactl/pkg/manager"
|
||||
@@ -629,3 +631,53 @@ func (h *Handler) OpenAIProxy() http.HandlerFunc {
|
||||
proxy.ServeHTTP(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
// ParseCommandRequest represents the request body for command parsing
|
||||
type ParseCommandRequest struct {
|
||||
Command string `json:"command"`
|
||||
}
|
||||
|
||||
// ParseLlamaCommand godoc
|
||||
// @Summary Parse llama-server command
|
||||
// @Description Parses a llama-server command string into instance options
|
||||
// @Tags backends
|
||||
// @Security ApiKeyAuth
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param request body ParseCommandRequest true "Command to parse"
|
||||
// @Success 200 {object} instance.CreateInstanceOptions "Parsed options"
|
||||
// @Failure 400 {string} string "Invalid request or command"
|
||||
// @Failure 500 {string} string "Internal Server Error"
|
||||
// @Router /backends/llama-cpp/parse-command [post]
|
||||
func (h *Handler) ParseLlamaCommand() http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
var req ParseCommandRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, "Invalid request body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if req.Command == "" {
|
||||
http.Error(w, "Command cannot be empty", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse the command using llamacpp parser
|
||||
llamaOptions, err := llamacpp.ParseLlamaCommand(req.Command)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to parse command: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Create the full CreateInstanceOptions
|
||||
options := &instance.CreateInstanceOptions{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: llamaOptions,
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
if err := json.NewEncoder(w).Encode(options); err != nil {
|
||||
http.Error(w, "Failed to encode response", http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -50,6 +50,13 @@ func SetupRouter(handler *Handler) *chi.Mux {
|
||||
r.Get("/devices", handler.LlamaServerListDevicesHandler())
|
||||
})
|
||||
|
||||
// Backend-specific endpoints
|
||||
r.Route("/backends", func(r chi.Router) {
|
||||
r.Route("/llama-cpp", func(r chi.Router) {
|
||||
r.Post("/parse-command", handler.ParseLlamaCommand())
|
||||
})
|
||||
})
|
||||
|
||||
// Instance management endpoints
|
||||
r.Route("/instances", func(r chi.Router) {
|
||||
r.Get("/", handler.ListInstances()) // List all instances
|
||||
|
||||
11
webui/package-lock.json
generated
11
webui/package-lock.json
generated
@@ -19,6 +19,7 @@
|
||||
"lucide-react": "^0.525.0",
|
||||
"react": "^19.1.0",
|
||||
"react-dom": "^19.1.0",
|
||||
"sonner": "^2.0.7",
|
||||
"tailwind-merge": "^3.3.1",
|
||||
"tailwindcss": "^4.1.11",
|
||||
"zod": "^4.0.5"
|
||||
@@ -6750,6 +6751,16 @@
|
||||
"node": ">=18"
|
||||
}
|
||||
},
|
||||
"node_modules/sonner": {
|
||||
"version": "2.0.7",
|
||||
"resolved": "https://registry.npmjs.org/sonner/-/sonner-2.0.7.tgz",
|
||||
"integrity": "sha512-W6ZN4p58k8aDKA4XPcx2hpIQXBRAgyiWVkYhT7CvK6D3iAu7xjvVyhQHg2/iaKJZ1XVJ4r7XuwGL+WGEK37i9w==",
|
||||
"license": "MIT",
|
||||
"peerDependencies": {
|
||||
"react": "^18.0.0 || ^19.0.0 || ^19.0.0-rc",
|
||||
"react-dom": "^18.0.0 || ^19.0.0 || ^19.0.0-rc"
|
||||
}
|
||||
},
|
||||
"node_modules/source-map-js": {
|
||||
"version": "1.2.1",
|
||||
"resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz",
|
||||
|
||||
@@ -28,6 +28,7 @@
|
||||
"lucide-react": "^0.525.0",
|
||||
"react": "^19.1.0",
|
||||
"react-dom": "^19.1.0",
|
||||
"sonner": "^2.0.7",
|
||||
"tailwind-merge": "^3.3.1",
|
||||
"tailwindcss": "^4.1.11",
|
||||
"zod": "^4.0.5"
|
||||
|
||||
@@ -8,6 +8,7 @@ import { type CreateInstanceOptions, type Instance } from "@/types/instance";
|
||||
import { useInstances } from "@/contexts/InstancesContext";
|
||||
import { useAuth } from "@/contexts/AuthContext";
|
||||
import { ThemeProvider } from "@/contexts/ThemeContext";
|
||||
import { Toaster } from "sonner";
|
||||
|
||||
function App() {
|
||||
const { isAuthenticated, isLoading: authLoading } = useAuth();
|
||||
@@ -85,6 +86,8 @@ function App() {
|
||||
open={isSystemInfoModalOpen}
|
||||
onOpenChange={setIsSystemInfoModalOpen}
|
||||
/>
|
||||
|
||||
<Toaster />
|
||||
</div>
|
||||
</ThemeProvider>
|
||||
);
|
||||
|
||||
@@ -12,9 +12,10 @@ import {
|
||||
} from "@/components/ui/dialog";
|
||||
import { BackendType, type CreateInstanceOptions, type Instance } from "@/types/instance";
|
||||
import { getBasicFields, getAdvancedFields, getBasicBackendFields, getAdvancedBackendFields } from "@/lib/zodFormUtils";
|
||||
import { ChevronDown, ChevronRight } from "lucide-react";
|
||||
import { ChevronDown, ChevronRight, Terminal } from "lucide-react";
|
||||
import ZodFormField from "@/components/ZodFormField";
|
||||
import BackendFormField from "@/components/BackendFormField";
|
||||
import ParseCommandDialog from "@/components/ParseCommandDialog";
|
||||
|
||||
interface InstanceDialogProps {
|
||||
open: boolean;
|
||||
@@ -35,6 +36,7 @@ const InstanceDialog: React.FC<InstanceDialogProps> = ({
|
||||
const [formData, setFormData] = useState<CreateInstanceOptions>({});
|
||||
const [showAdvanced, setShowAdvanced] = useState(false);
|
||||
const [nameError, setNameError] = useState("");
|
||||
const [showParseDialog, setShowParseDialog] = useState(false);
|
||||
|
||||
// Get field lists dynamically from the type
|
||||
const basicFields = getBasicFields();
|
||||
@@ -142,6 +144,14 @@ const InstanceDialog: React.FC<InstanceDialogProps> = ({
|
||||
setShowAdvanced(!showAdvanced);
|
||||
};
|
||||
|
||||
const handleCommandParsed = (parsedOptions: CreateInstanceOptions) => {
|
||||
setFormData(prev => ({
|
||||
...prev,
|
||||
...parsedOptions,
|
||||
}));
|
||||
setShowParseDialog(false);
|
||||
};
|
||||
|
||||
// Check if auto_restart is enabled
|
||||
const isAutoRestartEnabled = formData.auto_restart === true;
|
||||
|
||||
@@ -258,6 +268,16 @@ const InstanceDialog: React.FC<InstanceDialogProps> = ({
|
||||
|
||||
{/* Advanced Fields Toggle */}
|
||||
<div className="border-t pt-4">
|
||||
<div className="flex items-center justify-between">
|
||||
<Button
|
||||
variant="outline"
|
||||
onClick={() => setShowParseDialog(true)}
|
||||
className="flex items-center gap-2"
|
||||
>
|
||||
<Terminal className="h-4 w-4" />
|
||||
Parse Command
|
||||
</Button>
|
||||
|
||||
<Button
|
||||
variant="ghost"
|
||||
onClick={toggleAdvanced}
|
||||
@@ -281,6 +301,7 @@ const InstanceDialog: React.FC<InstanceDialogProps> = ({
|
||||
</span>
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Advanced Fields - Automatically generated from type (excluding restart options) */}
|
||||
{showAdvanced && (
|
||||
@@ -352,6 +373,12 @@ const InstanceDialog: React.FC<InstanceDialogProps> = ({
|
||||
</Button>
|
||||
</DialogFooter>
|
||||
</DialogContent>
|
||||
|
||||
<ParseCommandDialog
|
||||
open={showParseDialog}
|
||||
onOpenChange={setShowParseDialog}
|
||||
onParsed={handleCommandParsed}
|
||||
/>
|
||||
</Dialog>
|
||||
);
|
||||
};
|
||||
|
||||
117
webui/src/components/ParseCommandDialog.tsx
Normal file
117
webui/src/components/ParseCommandDialog.tsx
Normal file
@@ -0,0 +1,117 @@
|
||||
import React, { useState } from "react";
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { Label } from "@/components/ui/label";
|
||||
import {
|
||||
Dialog,
|
||||
DialogContent,
|
||||
DialogDescription,
|
||||
DialogFooter,
|
||||
DialogHeader,
|
||||
DialogTitle,
|
||||
} from "@/components/ui/dialog";
|
||||
import { type CreateInstanceOptions } from "@/types/instance";
|
||||
import { backendsApi } from "@/lib/api";
|
||||
import { toast } from "sonner";
|
||||
|
||||
interface ParseCommandDialogProps {
|
||||
open: boolean;
|
||||
onOpenChange: (open: boolean) => void;
|
||||
onParsed: (options: CreateInstanceOptions) => void;
|
||||
}
|
||||
|
||||
const ParseCommandDialog: React.FC<ParseCommandDialogProps> = ({
|
||||
open,
|
||||
onOpenChange,
|
||||
onParsed,
|
||||
}) => {
|
||||
const [command, setCommand] = useState('');
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
|
||||
const handleParse = async () => {
|
||||
if (!command.trim()) {
|
||||
setError("Command cannot be empty");
|
||||
return;
|
||||
}
|
||||
|
||||
setLoading(true);
|
||||
setError(null);
|
||||
|
||||
try {
|
||||
const options = await backendsApi.llamaCpp.parseCommand(command);
|
||||
onParsed(options);
|
||||
onOpenChange(false);
|
||||
// Reset form
|
||||
setCommand('');
|
||||
setError(null);
|
||||
// Show success toast
|
||||
toast.success('Command parsed successfully');
|
||||
} catch (err) {
|
||||
const errorMessage = err instanceof Error ? err.message : 'Failed to parse command';
|
||||
setError(errorMessage);
|
||||
// Show error toast
|
||||
toast.error('Failed to parse command', {
|
||||
description: errorMessage
|
||||
});
|
||||
} finally {
|
||||
setLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
const handleOpenChange = (open: boolean) => {
|
||||
if (!open) {
|
||||
// Reset form when closing
|
||||
setCommand('');
|
||||
setError(null);
|
||||
}
|
||||
onOpenChange(open);
|
||||
};
|
||||
|
||||
return (
|
||||
<Dialog open={open} onOpenChange={handleOpenChange}>
|
||||
<DialogContent className="sm:max-w-[600px]">
|
||||
<DialogHeader>
|
||||
<DialogTitle>Parse Llama Server Command</DialogTitle>
|
||||
<DialogDescription>
|
||||
Paste your llama-server command to automatically populate the form fields
|
||||
</DialogDescription>
|
||||
</DialogHeader>
|
||||
|
||||
<div className="space-y-4">
|
||||
<div>
|
||||
<Label htmlFor="command">Command</Label>
|
||||
<textarea
|
||||
id="command"
|
||||
value={command}
|
||||
onChange={(e) => setCommand(e.target.value)}
|
||||
placeholder="llama-server --model /path/to/model.gguf --gpu-layers 32 --ctx-size 4096"
|
||||
className="w-full h-32 p-3 border border-input rounded-md font-mono text-sm resize-vertical focus:outline-none focus:ring-2 focus:ring-ring focus:ring-offset-2"
|
||||
/>
|
||||
</div>
|
||||
|
||||
{error && (
|
||||
<div className="text-destructive text-sm bg-destructive/10 p-3 rounded-md">
|
||||
{error}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<DialogFooter>
|
||||
<Button variant="outline" onClick={() => handleOpenChange(false)}>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button
|
||||
onClick={() => {
|
||||
handleParse().catch(console.error);
|
||||
}}
|
||||
disabled={!command.trim() || loading}
|
||||
>
|
||||
{loading ? 'Parsing...' : 'Parse Command'}
|
||||
</Button>
|
||||
</DialogFooter>
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
);
|
||||
};
|
||||
|
||||
export default ParseCommandDialog;
|
||||
@@ -83,6 +83,18 @@ export const serverApi = {
|
||||
getDevices: () => apiCall<string>("/server/devices", {}, "text"),
|
||||
};
|
||||
|
||||
// Backend API functions
|
||||
export const backendsApi = {
|
||||
llamaCpp: {
|
||||
// POST /backends/llama-cpp/parse-command
|
||||
parseCommand: (command: string) =>
|
||||
apiCall<CreateInstanceOptions>('/backends/llama-cpp/parse-command', {
|
||||
method: 'POST',
|
||||
body: JSON.stringify({ command }),
|
||||
}),
|
||||
},
|
||||
};
|
||||
|
||||
// Instance API functions
|
||||
export const instancesApi = {
|
||||
// GET /instances
|
||||
|
||||
Reference in New Issue
Block a user