diff --git a/webui/src/components/ParseCommandDialog.tsx b/webui/src/components/ParseCommandDialog.tsx index 6b14eaa..fcf79e6 100644 --- a/webui/src/components/ParseCommandDialog.tsx +++ b/webui/src/components/ParseCommandDialog.tsx @@ -9,7 +9,7 @@ import { DialogHeader, DialogTitle, } from "@/components/ui/dialog"; -import { type CreateInstanceOptions } from "@/types/instance"; +import { BackendType, type BackendTypeValue, type CreateInstanceOptions } from "@/types/instance"; import { backendsApi } from "@/lib/api"; import { toast } from "sonner"; @@ -25,6 +25,7 @@ const ParseCommandDialog: React.FC = ({ onParsed, }) => { const [command, setCommand] = useState(''); + const [backendType, setBackendType] = useState(BackendType.LLAMA_CPP); const [loading, setLoading] = useState(false); const [error, setError] = useState(null); @@ -38,18 +39,31 @@ const ParseCommandDialog: React.FC = ({ setError(null); try { - const options = await backendsApi.llamaCpp.parseCommand(command); + let options: CreateInstanceOptions; + + // Parse based on selected backend type + switch (backendType) { + case BackendType.LLAMA_CPP: + options = await backendsApi.llamaCpp.parseCommand(command); + break; + case BackendType.MLX_LM: + options = await backendsApi.mlx.parseCommand(command); + break; + case BackendType.VLLM: + options = await backendsApi.vllm.parseCommand(command); + break; + default: + throw new Error(`Unsupported backend type: ${backendType}`); + } + onParsed(options); onOpenChange(false); - // Reset form setCommand(''); setError(null); - // Show success toast toast.success('Command parsed successfully'); } catch (err) { const errorMessage = err instanceof Error ? err.message : 'Failed to parse command'; setError(errorMessage); - // Show error toast toast.error('Failed to parse command', { description: errorMessage }); @@ -60,35 +74,62 @@ const ParseCommandDialog: React.FC = ({ const handleOpenChange = (open: boolean) => { if (!open) { - // Reset form when closing setCommand(''); + setBackendType(BackendType.LLAMA_CPP); setError(null); } onOpenChange(open); }; + const getPlaceholderForBackend = (backendType: BackendTypeValue): string => { + switch (backendType) { + case BackendType.LLAMA_CPP: + return "llama-server --model /path/to/model.gguf --gpu-layers 32 --ctx-size 4096"; + case BackendType.MLX_LM: + return "mlx_lm.server --model mlx-community/Mistral-7B-Instruct-v0.3-4bit --host 0.0.0.0 --port 8080"; + case BackendType.VLLM: + return "vllm serve --model microsoft/DialoGPT-medium --tensor-parallel-size 2 --gpu-memory-utilization 0.9"; + default: + return "Enter your command here..."; + } + }; + return ( - Parse Llama Server Command + Parse Backend Command - Paste your llama-server command to automatically populate the form fields + Select your backend type and paste the command to automatically populate the form fields - +
+
+ + +
+