mirror of
https://github.com/lordmathis/llamactl.git
synced 2025-11-06 00:54:23 +00:00
Setup routing
This commit is contained in:
@@ -1,67 +1,18 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
llamactl "llamactl/pkg"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
|
|
||||||
"github.com/go-chi/chi/v5"
|
|
||||||
"github.com/go-chi/chi/v5/middleware"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var instances map[string]*exec.Cmd = make(map[string]*exec.Cmd)
|
// @title Llama Server Control
|
||||||
|
// @version 1.0
|
||||||
func execLLama(model string) *exec.Cmd {
|
// @description This is a control server for managing Llama Server instances.
|
||||||
llamaCmd := exec.Command("llama", "server", "--model", model, "--port", "8080")
|
// @license.name MIT License
|
||||||
return llamaCmd
|
// @license.url https://opensource.org/license/mit/
|
||||||
}
|
// @basePath /api/v1
|
||||||
|
|
||||||
func launchHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
model := chi.URLParam(r, "model")
|
|
||||||
if model == "" {
|
|
||||||
http.Error(w, "Model parameter is required", http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd := execLLama(model)
|
|
||||||
if err := cmd.Start(); err != nil {
|
|
||||||
http.Error(w, "Failed to start llama server: "+err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
instances[model] = cmd
|
|
||||||
w.Write([]byte("Llama server started for model: " + model))
|
|
||||||
}
|
|
||||||
|
|
||||||
func stopHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
model := chi.URLParam(r, "model")
|
|
||||||
if model == "" {
|
|
||||||
http.Error(w, "Model parameter is required", http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd, exists := instances[model]
|
|
||||||
if !exists {
|
|
||||||
http.Error(w, "No running instance for model: "+model, http.StatusNotFound)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := cmd.Process.Signal(os.Interrupt); err != nil {
|
|
||||||
http.Error(w, "Failed to stop llama server: "+err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
delete(instances, model)
|
|
||||||
w.Write([]byte("Llama server stopped for model: " + model))
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
r := chi.NewRouter()
|
r := llamactl.SetupRouter()
|
||||||
r.Use(middleware.Logger)
|
// Start the server with the router
|
||||||
r.Get("/", func(w http.ResponseWriter, r *http.Request) {
|
http.ListenAndServe(":8080", r)
|
||||||
w.Write([]byte("welcome"))
|
|
||||||
})
|
|
||||||
r.Post("/launch/{model}", launchHandler)
|
|
||||||
r.Post("/stop/{model}", stopHandler)
|
|
||||||
http.ListenAndServe(":3000", r)
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -61,3 +61,42 @@ func ListDevicesHandler(w http.ResponseWriter, r *http.Request) {
|
|||||||
w.Header().Set("Content-Type", "text/plain")
|
w.Header().Set("Content-Type", "text/plain")
|
||||||
w.Write(output)
|
w.Write(output)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// func launchHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// model := chi.URLParam(r, "model")
|
||||||
|
// if model == "" {
|
||||||
|
// http.Error(w, "Model parameter is required", http.StatusBadRequest)
|
||||||
|
// return
|
||||||
|
// }
|
||||||
|
|
||||||
|
// cmd := execLLama(model)
|
||||||
|
// if err := cmd.Start(); err != nil {
|
||||||
|
// http.Error(w, "Failed to start llama server: "+err.Error(), http.StatusInternalServerError)
|
||||||
|
// return
|
||||||
|
// }
|
||||||
|
|
||||||
|
// instances[model] = cmd
|
||||||
|
// w.Write([]byte("Llama server started for model: " + model))
|
||||||
|
// }
|
||||||
|
|
||||||
|
// func stopHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// model := chi.URLParam(r, "model")
|
||||||
|
// if model == "" {
|
||||||
|
// http.Error(w, "Model parameter is required", http.StatusBadRequest)
|
||||||
|
// return
|
||||||
|
// }
|
||||||
|
|
||||||
|
// cmd, exists := instances[model]
|
||||||
|
// if !exists {
|
||||||
|
// http.Error(w, "No running instance for model: "+model, http.StatusNotFound)
|
||||||
|
// return
|
||||||
|
// }
|
||||||
|
|
||||||
|
// if err := cmd.Process.Signal(os.Interrupt); err != nil {
|
||||||
|
// http.Error(w, "Failed to stop llama server: "+err.Error(), http.StatusInternalServerError)
|
||||||
|
// return
|
||||||
|
// }
|
||||||
|
|
||||||
|
// delete(instances, model)
|
||||||
|
// w.Write([]byte("Llama server stopped for model: " + model))
|
||||||
|
// }
|
||||||
|
|||||||
24
server/pkg/routes.go
Normal file
24
server/pkg/routes.go
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
package llamactl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/go-chi/chi/v5"
|
||||||
|
"github.com/go-chi/chi/v5/middleware"
|
||||||
|
)
|
||||||
|
|
||||||
|
func SetupRouter() *chi.Mux {
|
||||||
|
r := chi.NewRouter()
|
||||||
|
r.Use(middleware.Logger)
|
||||||
|
|
||||||
|
// Define routes
|
||||||
|
r.Route("/api/v1", func(r chi.Router) {
|
||||||
|
r.Get("/server/help", HelpHandler)
|
||||||
|
r.Get("/server/version", VersionHandler)
|
||||||
|
r.Get("/server/devices", ListDevicesHandler)
|
||||||
|
|
||||||
|
// Launch and stop handlers
|
||||||
|
// r.Post("/server/launch/{model}", launchHandler)
|
||||||
|
// r.Post("/server/stop/{model}", stopHandler)
|
||||||
|
})
|
||||||
|
|
||||||
|
return r
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user