mirror of
https://github.com/lordmathis/llamactl.git
synced 2025-11-05 16:44:22 +00:00
Setup routing
This commit is contained in:
@@ -1,67 +1,18 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
llamactl "llamactl/pkg"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
)
|
||||
|
||||
var instances map[string]*exec.Cmd = make(map[string]*exec.Cmd)
|
||||
|
||||
func execLLama(model string) *exec.Cmd {
|
||||
llamaCmd := exec.Command("llama", "server", "--model", model, "--port", "8080")
|
||||
return llamaCmd
|
||||
}
|
||||
|
||||
func launchHandler(w http.ResponseWriter, r *http.Request) {
|
||||
model := chi.URLParam(r, "model")
|
||||
if model == "" {
|
||||
http.Error(w, "Model parameter is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
cmd := execLLama(model)
|
||||
if err := cmd.Start(); err != nil {
|
||||
http.Error(w, "Failed to start llama server: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
instances[model] = cmd
|
||||
w.Write([]byte("Llama server started for model: " + model))
|
||||
}
|
||||
|
||||
func stopHandler(w http.ResponseWriter, r *http.Request) {
|
||||
model := chi.URLParam(r, "model")
|
||||
if model == "" {
|
||||
http.Error(w, "Model parameter is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
cmd, exists := instances[model]
|
||||
if !exists {
|
||||
http.Error(w, "No running instance for model: "+model, http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
if err := cmd.Process.Signal(os.Interrupt); err != nil {
|
||||
http.Error(w, "Failed to stop llama server: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
delete(instances, model)
|
||||
w.Write([]byte("Llama server stopped for model: " + model))
|
||||
}
|
||||
|
||||
// @title Llama Server Control
|
||||
// @version 1.0
|
||||
// @description This is a control server for managing Llama Server instances.
|
||||
// @license.name MIT License
|
||||
// @license.url https://opensource.org/license/mit/
|
||||
// @basePath /api/v1
|
||||
func main() {
|
||||
r := chi.NewRouter()
|
||||
r.Use(middleware.Logger)
|
||||
r.Get("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Write([]byte("welcome"))
|
||||
})
|
||||
r.Post("/launch/{model}", launchHandler)
|
||||
r.Post("/stop/{model}", stopHandler)
|
||||
http.ListenAndServe(":3000", r)
|
||||
r := llamactl.SetupRouter()
|
||||
// Start the server with the router
|
||||
http.ListenAndServe(":8080", r)
|
||||
}
|
||||
|
||||
@@ -61,3 +61,42 @@ func ListDevicesHandler(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.Write(output)
|
||||
}
|
||||
|
||||
// func launchHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// model := chi.URLParam(r, "model")
|
||||
// if model == "" {
|
||||
// http.Error(w, "Model parameter is required", http.StatusBadRequest)
|
||||
// return
|
||||
// }
|
||||
|
||||
// cmd := execLLama(model)
|
||||
// if err := cmd.Start(); err != nil {
|
||||
// http.Error(w, "Failed to start llama server: "+err.Error(), http.StatusInternalServerError)
|
||||
// return
|
||||
// }
|
||||
|
||||
// instances[model] = cmd
|
||||
// w.Write([]byte("Llama server started for model: " + model))
|
||||
// }
|
||||
|
||||
// func stopHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// model := chi.URLParam(r, "model")
|
||||
// if model == "" {
|
||||
// http.Error(w, "Model parameter is required", http.StatusBadRequest)
|
||||
// return
|
||||
// }
|
||||
|
||||
// cmd, exists := instances[model]
|
||||
// if !exists {
|
||||
// http.Error(w, "No running instance for model: "+model, http.StatusNotFound)
|
||||
// return
|
||||
// }
|
||||
|
||||
// if err := cmd.Process.Signal(os.Interrupt); err != nil {
|
||||
// http.Error(w, "Failed to stop llama server: "+err.Error(), http.StatusInternalServerError)
|
||||
// return
|
||||
// }
|
||||
|
||||
// delete(instances, model)
|
||||
// w.Write([]byte("Llama server stopped for model: " + model))
|
||||
// }
|
||||
|
||||
24
server/pkg/routes.go
Normal file
24
server/pkg/routes.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package llamactl
|
||||
|
||||
import (
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
)
|
||||
|
||||
func SetupRouter() *chi.Mux {
|
||||
r := chi.NewRouter()
|
||||
r.Use(middleware.Logger)
|
||||
|
||||
// Define routes
|
||||
r.Route("/api/v1", func(r chi.Router) {
|
||||
r.Get("/server/help", HelpHandler)
|
||||
r.Get("/server/version", VersionHandler)
|
||||
r.Get("/server/devices", ListDevicesHandler)
|
||||
|
||||
// Launch and stop handlers
|
||||
// r.Post("/server/launch/{model}", launchHandler)
|
||||
// r.Post("/server/stop/{model}", stopHandler)
|
||||
})
|
||||
|
||||
return r
|
||||
}
|
||||
Reference in New Issue
Block a user