mirror of
https://github.com/lordmathis/llamactl.git
synced 2025-11-06 09:04:27 +00:00
133 lines
3.8 KiB
Go
133 lines
3.8 KiB
Go
package llamactl
|
|
|
|
import (
|
|
"encoding/json"
|
|
"fmt"
|
|
"net/http"
|
|
"os/exec"
|
|
)
|
|
|
|
type Handler struct {
|
|
InstanceManager InstanceManager
|
|
}
|
|
|
|
func NewHandler(im InstanceManager) *Handler {
|
|
return &Handler{
|
|
InstanceManager: im,
|
|
}
|
|
}
|
|
|
|
// HelpHandler godoc
|
|
// @Summary Get help for llama server
|
|
// @Description Returns the help text for the llama server command
|
|
// @Tags server
|
|
// #Produces text/plain
|
|
// @Success 200 {string} string "Help text"
|
|
// @Failure 500 {string} string "Internal Server Error"
|
|
// @Router /server/help [get]
|
|
func (h *Handler) HelpHandler() http.HandlerFunc {
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
helpCmd := exec.Command("llama-server", "--help")
|
|
output, err := helpCmd.CombinedOutput()
|
|
if err != nil {
|
|
http.Error(w, "Failed to get help: "+err.Error(), http.StatusInternalServerError)
|
|
return
|
|
}
|
|
w.Header().Set("Content-Type", "text/plain")
|
|
w.Write(output)
|
|
}
|
|
}
|
|
|
|
// VersionHandler godoc
|
|
// @Summary Get version of llama server
|
|
// @Description Returns the version of the llama server command
|
|
// @Tags server
|
|
// #Produces text/plain
|
|
// @Success 200 {string} string "Version information"
|
|
// @Failure 500 {string} string "Internal Server Error"
|
|
// @Router /server/version [get]
|
|
func (h *Handler) VersionHandler() http.HandlerFunc {
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
versionCmd := exec.Command("llama-server", "--version")
|
|
output, err := versionCmd.CombinedOutput()
|
|
if err != nil {
|
|
http.Error(w, "Failed to get version: "+err.Error(), http.StatusInternalServerError)
|
|
return
|
|
}
|
|
w.Header().Set("Content-Type", "text/plain")
|
|
w.Write(output)
|
|
}
|
|
}
|
|
|
|
// ListDevicesHandler godoc
|
|
// @Summary List available devices for llama server
|
|
// @Description Returns a list of available devices for the llama server
|
|
// @Tags server
|
|
// #Produces text/plain
|
|
// @Success 200 {string} string "List of devices"
|
|
// @Failure 500 {string} string "Internal Server Error"
|
|
// @Router /server/devices [get]
|
|
func (h *Handler) ListDevicesHandler() http.HandlerFunc {
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
listCmd := exec.Command("llama-server", "--list-devices")
|
|
output, err := listCmd.CombinedOutput()
|
|
if err != nil {
|
|
http.Error(w, "Failed to list devices: "+err.Error(), http.StatusInternalServerError)
|
|
return
|
|
}
|
|
w.Header().Set("Content-Type", "text/plain")
|
|
w.Write(output)
|
|
}
|
|
}
|
|
|
|
func (h *Handler) StartHandler() http.HandlerFunc {
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
var requestBody InstanceOptions
|
|
if err := json.NewDecoder(r.Body).Decode(&requestBody); err != nil {
|
|
fmt.Println("Error decoding request body:", err)
|
|
http.Error(w, "Invalid request body", http.StatusBadRequest)
|
|
return
|
|
}
|
|
|
|
}
|
|
}
|
|
|
|
// func launchHandler(w http.ResponseWriter, r *http.Request) {
|
|
// model := chi.URLParam(r, "model")
|
|
// if model == "" {
|
|
// http.Error(w, "Model parameter is required", http.StatusBadRequest)
|
|
// return
|
|
// }
|
|
|
|
// cmd := execLLama(model)
|
|
// if err := cmd.Start(); err != nil {
|
|
// http.Error(w, "Failed to start llama server: "+err.Error(), http.StatusInternalServerError)
|
|
// return
|
|
// }
|
|
|
|
// instances[model] = cmd
|
|
// w.Write([]byte("Llama server started for model: " + model))
|
|
// }
|
|
|
|
// func stopHandler(w http.ResponseWriter, r *http.Request) {
|
|
// model := chi.URLParam(r, "model")
|
|
// if model == "" {
|
|
// http.Error(w, "Model parameter is required", http.StatusBadRequest)
|
|
// return
|
|
// }
|
|
|
|
// cmd, exists := instances[model]
|
|
// if !exists {
|
|
// http.Error(w, "No running instance for model: "+model, http.StatusNotFound)
|
|
// return
|
|
// }
|
|
|
|
// if err := cmd.Process.Signal(os.Interrupt); err != nil {
|
|
// http.Error(w, "Failed to stop llama server: "+err.Error(), http.StatusInternalServerError)
|
|
// return
|
|
// }
|
|
|
|
// delete(instances, model)
|
|
// w.Write([]byte("Llama server stopped for model: " + model))
|
|
// }
|