mirror of
https://github.com/lordmathis/llamactl.git
synced 2025-11-05 16:44:22 +00:00
Implement llama-server command parsing and add UI components for command input
This commit is contained in:
@@ -5,6 +5,8 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"llamactl/pkg/backends"
|
||||
"llamactl/pkg/backends/llamacpp"
|
||||
"llamactl/pkg/config"
|
||||
"llamactl/pkg/instance"
|
||||
"llamactl/pkg/manager"
|
||||
@@ -629,3 +631,53 @@ func (h *Handler) OpenAIProxy() http.HandlerFunc {
|
||||
proxy.ServeHTTP(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
// ParseCommandRequest represents the request body for command parsing
|
||||
type ParseCommandRequest struct {
|
||||
Command string `json:"command"`
|
||||
}
|
||||
|
||||
// ParseLlamaCommand godoc
|
||||
// @Summary Parse llama-server command
|
||||
// @Description Parses a llama-server command string into instance options
|
||||
// @Tags backends
|
||||
// @Security ApiKeyAuth
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param request body ParseCommandRequest true "Command to parse"
|
||||
// @Success 200 {object} instance.CreateInstanceOptions "Parsed options"
|
||||
// @Failure 400 {string} string "Invalid request or command"
|
||||
// @Failure 500 {string} string "Internal Server Error"
|
||||
// @Router /backends/llama-cpp/parse-command [post]
|
||||
func (h *Handler) ParseLlamaCommand() http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
var req ParseCommandRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, "Invalid request body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if req.Command == "" {
|
||||
http.Error(w, "Command cannot be empty", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse the command using llamacpp parser
|
||||
llamaOptions, err := llamacpp.ParseLlamaCommand(req.Command)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to parse command: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Create the full CreateInstanceOptions
|
||||
options := &instance.CreateInstanceOptions{
|
||||
BackendType: backends.BackendTypeLlamaCpp,
|
||||
LlamaServerOptions: llamaOptions,
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
if err := json.NewEncoder(w).Encode(options); err != nil {
|
||||
http.Error(w, "Failed to encode response", http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -50,6 +50,13 @@ func SetupRouter(handler *Handler) *chi.Mux {
|
||||
r.Get("/devices", handler.LlamaServerListDevicesHandler())
|
||||
})
|
||||
|
||||
// Backend-specific endpoints
|
||||
r.Route("/backends", func(r chi.Router) {
|
||||
r.Route("/llama-cpp", func(r chi.Router) {
|
||||
r.Post("/parse-command", handler.ParseLlamaCommand())
|
||||
})
|
||||
})
|
||||
|
||||
// Instance management endpoints
|
||||
r.Route("/instances", func(r chi.Router) {
|
||||
r.Get("/", handler.ListInstances()) // List all instances
|
||||
|
||||
Reference in New Issue
Block a user