Add OpenAI-compatible endpoints and instance creation timestamp

This commit is contained in:
2025-07-27 12:07:33 +02:00
parent 1261232baa
commit e6652e52e1
7 changed files with 408 additions and 0 deletions

View File

@@ -402,6 +402,7 @@ func (h *Handler) GetInstanceLogs() http.HandlerFunc {
// @Failure 500 {string} string "Internal Server Error"
// @Failure 503 {string} string "Instance is not running"
// @Router /instances/{name}/proxy [get]
// @Router /instances/{name}/proxy [post]
func (h *Handler) ProxyToInstance() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
name := chi.URLParam(r, "name")
@@ -455,7 +456,55 @@ func (h *Handler) ProxyToInstance() http.HandlerFunc {
}
}
// OpenAIListInstances godoc
// @Summary List instances in OpenAI-compatible format
// @Description Returns a list of instances in a format compatible with OpenAI API
// @Tags openai
// @Produces json
// @Success 200 {object} OpenAIListInstancesResponse "List of OpenAI-compatible instances"
// @Failure 500 {string} string "Internal Server Error"
// @Router /v1/models [get]
func (h *Handler) OpenAIListInstances() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
instances, err := h.InstanceManager.ListInstances()
if err != nil {
http.Error(w, "Failed to list instances: "+err.Error(), http.StatusInternalServerError)
return
}
openaiInstances := make([]OpenAIInstance, len(instances))
for i, instance := range instances {
openaiInstances[i] = OpenAIInstance{
ID: instance.Name,
Object: "model",
Created: instance.Created,
OwnedBy: "llamactl",
}
}
openaiResponse := OpenAIListInstancesResponse{
Object: "list",
Data: openaiInstances,
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(openaiResponse); err != nil {
http.Error(w, "Failed to encode instances: "+err.Error(), http.StatusInternalServerError)
return
}
}
}
// OpenAIProxy godoc
// @Summary OpenAI-compatible proxy endpoint
// @Description Handles all POST requests to /v1/*, routing to the appropriate instance based on the request body
// @Tags openai
// @Accept json
// @Produces json
// @Success 200 "OpenAI response"
// @Failure 400 {string} string "Invalid request body or model name"
// @Failure 500 {string} string "Internal Server Error"
// @Router /v1/ [post]
func (h *Handler) OpenAIProxy() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
// Read the entire body first

View File

@@ -10,6 +10,7 @@ import (
"net/url"
"os/exec"
"sync"
"time"
)
type CreateInstanceOptions struct {
@@ -60,6 +61,9 @@ type Instance struct {
// Status
Running bool `json:"running"`
// Creation time
Created int64 `json:"created,omitempty"` // Unix timestamp when the instance was created
// Logging file
logger *InstanceLogger `json:"-"`
@@ -153,6 +157,8 @@ func NewInstance(name string, globalSettings *InstancesConfig, options *CreateIn
logger: logger,
Running: false,
Created: time.Now().Unix(),
}
}

13
pkg/openai.go Normal file
View File

@@ -0,0 +1,13 @@
package llamactl
type OpenAIListInstancesResponse struct {
Object string `json:"object"`
Data []OpenAIInstance `json:"data"`
}
type OpenAIInstance struct {
ID string `json:"id"`
Object string `json:"object"`
Created int64 `json:"created"`
OwnedBy string `json:"owned_by"`
}

View File

@@ -50,6 +50,8 @@ func SetupRouter(handler *Handler) *chi.Mux {
})
})
r.Get(("/v1/models"), handler.OpenAIListInstances()) // List instances in OpenAI-compatible format
// OpenAI-compatible proxy endpoint
// Handles all POST requests to /v1/*, including:
// - /v1/completions