mirror of
https://github.com/lordmathis/llamactl.git
synced 2025-12-22 17:14:22 +00:00
Compare commits
2 Commits
fd9e651e09
...
feat/llama
| Author | SHA1 | Date | |
|---|---|---|---|
| 38790aa507 | |||
| faf026aa54 |
@@ -14,6 +14,7 @@ const (
|
|||||||
BackendTypeLlamaCpp BackendType = "llama_cpp"
|
BackendTypeLlamaCpp BackendType = "llama_cpp"
|
||||||
BackendTypeMlxLm BackendType = "mlx_lm"
|
BackendTypeMlxLm BackendType = "mlx_lm"
|
||||||
BackendTypeVllm BackendType = "vllm"
|
BackendTypeVllm BackendType = "vllm"
|
||||||
|
BackendTypeUnknown BackendType = "unknown"
|
||||||
)
|
)
|
||||||
|
|
||||||
type backend interface {
|
type backend interface {
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"llamactl/pkg/backends"
|
||||||
"llamactl/pkg/config"
|
"llamactl/pkg/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -117,6 +118,14 @@ func (i *Instance) WaitForHealthy(timeout int) error {
|
|||||||
return i.process.waitForHealthy(timeout)
|
return i.process.waitForHealthy(timeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (i *Instance) GetBackendType() backends.BackendType {
|
||||||
|
opts := i.GetOptions()
|
||||||
|
if opts == nil {
|
||||||
|
return backends.BackendTypeUnknown
|
||||||
|
}
|
||||||
|
return opts.BackendOptions.BackendType
|
||||||
|
}
|
||||||
|
|
||||||
// GetOptions returns the current options
|
// GetOptions returns the current options
|
||||||
func (i *Instance) GetOptions() *Options {
|
func (i *Instance) GetOptions() *Options {
|
||||||
if i.options == nil {
|
if i.options == nil {
|
||||||
|
|||||||
@@ -1,141 +0,0 @@
|
|||||||
package instance
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"llamactl/pkg/backends"
|
|
||||||
"net/http"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Model represents a model available in a llama.cpp instance
|
|
||||||
type Model struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
Object string `json:"object"`
|
|
||||||
OwnedBy string `json:"owned_by"`
|
|
||||||
Created int64 `json:"created"`
|
|
||||||
InCache bool `json:"in_cache"`
|
|
||||||
Path string `json:"path"`
|
|
||||||
Status ModelStatus `json:"status"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ModelStatus represents the status of a model in an instance
|
|
||||||
type ModelStatus struct {
|
|
||||||
Value string `json:"value"` // "loaded" | "loading" | "unloaded"
|
|
||||||
Args []string `json:"args"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsLlamaCpp checks if this instance is a llama.cpp instance
|
|
||||||
func (i *Instance) IsLlamaCpp() bool {
|
|
||||||
opts := i.GetOptions()
|
|
||||||
if opts == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return opts.BackendOptions.BackendType == backends.BackendTypeLlamaCpp
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetModels fetches the models available in this llama.cpp instance
|
|
||||||
func (i *Instance) GetModels() ([]Model, error) {
|
|
||||||
if !i.IsLlamaCpp() {
|
|
||||||
return nil, fmt.Errorf("instance %s is not a llama.cpp instance", i.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !i.IsRunning() {
|
|
||||||
return nil, fmt.Errorf("instance %s is not running", i.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
var result struct {
|
|
||||||
Data []Model `json:"data"`
|
|
||||||
}
|
|
||||||
if err := i.doRequest("GET", "/models", nil, &result, 10*time.Second); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to fetch models: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return result.Data, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadModel loads a model in this llama.cpp instance
|
|
||||||
func (i *Instance) LoadModel(modelName string) error {
|
|
||||||
if !i.IsLlamaCpp() {
|
|
||||||
return fmt.Errorf("instance %s is not a llama.cpp instance", i.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !i.IsRunning() {
|
|
||||||
return fmt.Errorf("instance %s is not running", i.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make the load request
|
|
||||||
reqBody := map[string]string{"model": modelName}
|
|
||||||
if err := i.doRequest("POST", "/models/load", reqBody, nil, 30*time.Second); err != nil {
|
|
||||||
return fmt.Errorf("failed to load model: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnloadModel unloads a model from this llama.cpp instance
|
|
||||||
func (i *Instance) UnloadModel(modelName string) error {
|
|
||||||
if !i.IsLlamaCpp() {
|
|
||||||
return fmt.Errorf("instance %s is not a llama.cpp instance", i.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !i.IsRunning() {
|
|
||||||
return fmt.Errorf("instance %s is not running", i.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make the unload request
|
|
||||||
reqBody := map[string]string{"model": modelName}
|
|
||||||
if err := i.doRequest("POST", "/models/unload", reqBody, nil, 30*time.Second); err != nil {
|
|
||||||
return fmt.Errorf("failed to unload model: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// doRequest makes an HTTP request to this instance's backend
|
|
||||||
func (i *Instance) doRequest(method, path string, reqBody, respBody any, timeout time.Duration) error {
|
|
||||||
url := fmt.Sprintf("http://%s:%d%s", i.GetHost(), i.GetPort(), path)
|
|
||||||
|
|
||||||
var bodyReader io.Reader
|
|
||||||
if reqBody != nil {
|
|
||||||
bodyBytes, err := json.Marshal(reqBody)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to marshal request body: %w", err)
|
|
||||||
}
|
|
||||||
bodyReader = bytes.NewReader(bodyBytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
req, err := http.NewRequestWithContext(ctx, method, url, bodyReader)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create request: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if reqBody != nil {
|
|
||||||
req.Header.Set("Content-Type", "application/json")
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := http.DefaultClient.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
bodyBytes, _ := io.ReadAll(resp.Body)
|
|
||||||
return fmt.Errorf("status %d: %s", resp.StatusCode, string(bodyBytes))
|
|
||||||
}
|
|
||||||
|
|
||||||
if respBody != nil {
|
|
||||||
if err := json.NewDecoder(resp.Body).Decode(respBody); err != nil {
|
|
||||||
return fmt.Errorf("failed to decode response: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -19,13 +19,11 @@ type InstanceManager interface {
|
|||||||
UpdateInstance(name string, options *instance.Options) (*instance.Instance, error)
|
UpdateInstance(name string, options *instance.Options) (*instance.Instance, error)
|
||||||
DeleteInstance(name string) error
|
DeleteInstance(name string) error
|
||||||
StartInstance(name string) (*instance.Instance, error)
|
StartInstance(name string) (*instance.Instance, error)
|
||||||
IsMaxRunningInstancesReached() bool
|
AtMaxRunning() bool
|
||||||
StopInstance(name string) (*instance.Instance, error)
|
StopInstance(name string) (*instance.Instance, error)
|
||||||
EvictLRUInstance() error
|
EvictLRUInstance() error
|
||||||
RestartInstance(name string) (*instance.Instance, error)
|
RestartInstance(name string) (*instance.Instance, error)
|
||||||
GetInstanceLogs(name string, numLines int) (string, error)
|
GetInstanceLogs(name string, numLines int) (string, error)
|
||||||
ResolveInstance(modelName string) (string, error)
|
|
||||||
RefreshModelRegistry(inst *instance.Instance) error
|
|
||||||
Shutdown()
|
Shutdown()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -36,7 +34,6 @@ type instanceManager struct {
|
|||||||
db database.InstanceStore
|
db database.InstanceStore
|
||||||
remote *remoteManager
|
remote *remoteManager
|
||||||
lifecycle *lifecycleManager
|
lifecycle *lifecycleManager
|
||||||
models *modelRegistry
|
|
||||||
|
|
||||||
// Configuration
|
// Configuration
|
||||||
globalConfig *config.AppConfig
|
globalConfig *config.AppConfig
|
||||||
@@ -63,16 +60,12 @@ func New(globalConfig *config.AppConfig, db database.InstanceStore) InstanceMana
|
|||||||
// Initialize remote manager
|
// Initialize remote manager
|
||||||
remote := newRemoteManager(globalConfig.Nodes, 30*time.Second)
|
remote := newRemoteManager(globalConfig.Nodes, 30*time.Second)
|
||||||
|
|
||||||
// Initialize model registry
|
|
||||||
models := newModelRegistry()
|
|
||||||
|
|
||||||
// Create manager instance
|
// Create manager instance
|
||||||
im := &instanceManager{
|
im := &instanceManager{
|
||||||
registry: registry,
|
registry: registry,
|
||||||
ports: ports,
|
ports: ports,
|
||||||
db: db,
|
db: db,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
models: models,
|
|
||||||
globalConfig: globalConfig,
|
globalConfig: globalConfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -149,27 +142,9 @@ func (im *instanceManager) loadInstances() error {
|
|||||||
// Auto-start instances that have auto-restart enabled
|
// Auto-start instances that have auto-restart enabled
|
||||||
go im.autoStartInstances()
|
go im.autoStartInstances()
|
||||||
|
|
||||||
// Discover models from all running llama.cpp instances
|
|
||||||
go im.discoverAllModels()
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// discoverAllModels discovers and registers models for all running llama.cpp instances
|
|
||||||
func (im *instanceManager) discoverAllModels() {
|
|
||||||
instances := im.registry.listRunning()
|
|
||||||
|
|
||||||
for _, inst := range instances {
|
|
||||||
if !inst.IsLlamaCpp() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := im.RefreshModelRegistry(inst); err != nil {
|
|
||||||
log.Printf("Failed to discover models for instance %s: %v", inst.Name, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// loadInstance loads a single persisted instance and adds it to the registry
|
// loadInstance loads a single persisted instance and adds it to the registry
|
||||||
func (im *instanceManager) loadInstance(persistedInst *instance.Instance) error {
|
func (im *instanceManager) loadInstance(persistedInst *instance.Instance) error {
|
||||||
name := persistedInst.Name
|
name := persistedInst.Name
|
||||||
|
|||||||
@@ -1,79 +0,0 @@
|
|||||||
package manager
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"llamactl/pkg/instance"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// modelRegistry maintains a global mapping of model names to instance names
|
|
||||||
// for llama.cpp instances. Model names must be globally unique across all instances.
|
|
||||||
type modelRegistry struct {
|
|
||||||
mu sync.RWMutex
|
|
||||||
modelToInstance map[string]string // model name → instance name
|
|
||||||
instanceModels map[string][]string // instance name → model names
|
|
||||||
}
|
|
||||||
|
|
||||||
// newModelRegistry creates a new model registry
|
|
||||||
func newModelRegistry() *modelRegistry {
|
|
||||||
return &modelRegistry{
|
|
||||||
modelToInstance: make(map[string]string),
|
|
||||||
instanceModels: make(map[string][]string),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// registerModels registers models from an instance to the registry.
|
|
||||||
// Skips models that conflict with other instances and returns a list of conflicts.
|
|
||||||
func (mr *modelRegistry) registerModels(instanceName string, models []instance.Model) []string {
|
|
||||||
mr.mu.Lock()
|
|
||||||
defer mr.mu.Unlock()
|
|
||||||
|
|
||||||
// Unregister any existing models for this instance first
|
|
||||||
mr.removeModels(instanceName)
|
|
||||||
|
|
||||||
// Register models, skipping conflicts
|
|
||||||
var modelNames []string
|
|
||||||
var conflicts []string
|
|
||||||
|
|
||||||
for _, model := range models {
|
|
||||||
// Check if this model conflicts with another instance
|
|
||||||
if existingInstance, exists := mr.modelToInstance[model.ID]; exists && existingInstance != instanceName {
|
|
||||||
conflicts = append(conflicts, fmt.Sprintf("%s (already in %s)", model.ID, existingInstance))
|
|
||||||
continue // Skip this model
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register the model
|
|
||||||
mr.modelToInstance[model.ID] = instanceName
|
|
||||||
modelNames = append(modelNames, model.ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
mr.instanceModels[instanceName] = modelNames
|
|
||||||
|
|
||||||
return conflicts
|
|
||||||
}
|
|
||||||
|
|
||||||
// unregisterModels removes all models for an instance
|
|
||||||
func (mr *modelRegistry) unregisterModels(instanceName string) {
|
|
||||||
mr.mu.Lock()
|
|
||||||
defer mr.mu.Unlock()
|
|
||||||
mr.removeModels(instanceName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// removeModels removes all models for an instance (caller must hold lock)
|
|
||||||
func (mr *modelRegistry) removeModels(instanceName string) {
|
|
||||||
if models, exists := mr.instanceModels[instanceName]; exists {
|
|
||||||
for _, modelName := range models {
|
|
||||||
delete(mr.modelToInstance, modelName)
|
|
||||||
}
|
|
||||||
delete(mr.instanceModels, instanceName)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// getModelInstance returns the instance name that hosts the given model
|
|
||||||
func (mr *modelRegistry) getModelInstance(modelName string) (string, bool) {
|
|
||||||
mr.mu.RLock()
|
|
||||||
defer mr.mu.RUnlock()
|
|
||||||
|
|
||||||
instanceName, exists := mr.modelToInstance[modelName]
|
|
||||||
return instanceName, exists
|
|
||||||
}
|
|
||||||
@@ -337,9 +337,6 @@ func (im *instanceManager) DeleteInstance(name string) error {
|
|||||||
// Release port (use ReleaseByInstance for proper cleanup)
|
// Release port (use ReleaseByInstance for proper cleanup)
|
||||||
im.ports.releaseByInstance(name)
|
im.ports.releaseByInstance(name)
|
||||||
|
|
||||||
// Unregister models when instance is deleted
|
|
||||||
im.onInstanceStopped(name)
|
|
||||||
|
|
||||||
// Remove from registry
|
// Remove from registry
|
||||||
if err := im.registry.remove(name); err != nil {
|
if err := im.registry.remove(name); err != nil {
|
||||||
return fmt.Errorf("failed to remove instance from registry: %w", err)
|
return fmt.Errorf("failed to remove instance from registry: %w", err)
|
||||||
@@ -386,7 +383,7 @@ func (im *instanceManager) StartInstance(name string) (*instance.Instance, error
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check max running instances limit for local instances only
|
// Check max running instances limit for local instances only
|
||||||
if im.IsMaxRunningInstancesReached() {
|
if im.AtMaxRunning() {
|
||||||
return nil, MaxRunningInstancesError(fmt.Errorf("maximum number of running instances (%d) reached", im.globalConfig.Instances.MaxRunningInstances))
|
return nil, MaxRunningInstancesError(fmt.Errorf("maximum number of running instances (%d) reached", im.globalConfig.Instances.MaxRunningInstances))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -399,13 +396,10 @@ func (im *instanceManager) StartInstance(name string) (*instance.Instance, error
|
|||||||
log.Printf("Warning: failed to persist instance %s: %v", name, err)
|
log.Printf("Warning: failed to persist instance %s: %v", name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Discover and register models for llama.cpp instances
|
|
||||||
go im.onInstanceStarted(name)
|
|
||||||
|
|
||||||
return inst, nil
|
return inst, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (im *instanceManager) IsMaxRunningInstancesReached() bool {
|
func (im *instanceManager) AtMaxRunning() bool {
|
||||||
if im.globalConfig.Instances.MaxRunningInstances == -1 {
|
if im.globalConfig.Instances.MaxRunningInstances == -1 {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@@ -461,9 +455,6 @@ func (im *instanceManager) StopInstance(name string) (*instance.Instance, error)
|
|||||||
log.Printf("Warning: failed to persist instance %s: %v", name, err)
|
log.Printf("Warning: failed to persist instance %s: %v", name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unregister models when instance stops
|
|
||||||
im.onInstanceStopped(name)
|
|
||||||
|
|
||||||
return inst, nil
|
return inst, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -544,73 +535,3 @@ func (im *instanceManager) setPortInOptions(options *instance.Options, port int)
|
|||||||
func (im *instanceManager) EvictLRUInstance() error {
|
func (im *instanceManager) EvictLRUInstance() error {
|
||||||
return im.lifecycle.evictLRU()
|
return im.lifecycle.evictLRU()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResolveInstance resolves a model name to an instance name.
|
|
||||||
// Precedence: instance name > model registry
|
|
||||||
func (im *instanceManager) ResolveInstance(modelName string) (string, error) {
|
|
||||||
// Check if it's an instance name first
|
|
||||||
if _, err := im.GetInstance(modelName); err == nil {
|
|
||||||
return modelName, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if it's a model name in the registry
|
|
||||||
if instanceName, exists := im.models.getModelInstance(modelName); exists {
|
|
||||||
return instanceName, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", fmt.Errorf("model or instance '%s' not found", modelName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RefreshModelRegistry refreshes the model registry for the given instance
|
|
||||||
func (im *instanceManager) RefreshModelRegistry(inst *instance.Instance) error {
|
|
||||||
if !inst.IsRunning() {
|
|
||||||
return fmt.Errorf("instance %s is not running", inst.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetch models from instance and register them
|
|
||||||
models, err := inst.GetModels()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to fetch models: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register models, skipping conflicts
|
|
||||||
conflicts := im.models.registerModels(inst.Name, models)
|
|
||||||
if len(conflicts) > 0 {
|
|
||||||
log.Printf("Warning: Model name conflicts for instance %s (skipped): %v", inst.Name, conflicts)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if instance name shadows any model names
|
|
||||||
if otherInstance, exists := im.models.getModelInstance(inst.Name); exists && otherInstance != inst.Name {
|
|
||||||
log.Printf("Warning: Instance name '%s' shadows model name from instance '%s'", inst.Name, otherInstance)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// onInstanceStarted is called when an instance successfully starts and becomes healthy
|
|
||||||
func (im *instanceManager) onInstanceStarted(name string) {
|
|
||||||
inst, err := im.GetInstance(name)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("Failed to get instance %s for model discovery: %v", name, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only discover models for llama.cpp instances
|
|
||||||
if !inst.IsLlamaCpp() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := inst.WaitForHealthy(30); err != nil {
|
|
||||||
log.Printf("Instance %s not healthy, skipping model discovery: %v", name, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := im.RefreshModelRegistry(inst); err != nil {
|
|
||||||
log.Printf("Failed to discover models for instance %s: %v", name, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// onInstanceStopped is called when an instance stops or is deleted
|
|
||||||
func (im *instanceManager) onInstanceStopped(name string) {
|
|
||||||
im.models.unregisterModels(name)
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -96,7 +96,7 @@ func (h *Handler) ensureInstanceRunning(inst *instance.Instance) error {
|
|||||||
return fmt.Errorf("instance is not running and on-demand start is not enabled")
|
return fmt.Errorf("instance is not running and on-demand start is not enabled")
|
||||||
}
|
}
|
||||||
|
|
||||||
if h.InstanceManager.IsMaxRunningInstancesReached() {
|
if h.InstanceManager.AtMaxRunning() {
|
||||||
if h.cfg.Instances.EnableLRUEviction {
|
if h.cfg.Instances.EnableLRUEviction {
|
||||||
err := h.InstanceManager.EvictLRUInstance()
|
err := h.InstanceManager.EvictLRUInstance()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -5,12 +5,9 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"llamactl/pkg/backends"
|
"llamactl/pkg/backends"
|
||||||
"llamactl/pkg/instance"
|
"llamactl/pkg/instance"
|
||||||
"log"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/go-chi/chi/v5"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ParseCommandRequest represents the request body for backend command parsing
|
// ParseCommandRequest represents the request body for backend command parsing
|
||||||
@@ -323,24 +320,41 @@ func (h *Handler) LlamaServerListDevicesHandler() http.HandlerFunc {
|
|||||||
// @Router /api/v1/llama-cpp/{name}/models [get]
|
// @Router /api/v1/llama-cpp/{name}/models [get]
|
||||||
func (h *Handler) LlamaCppListModels() http.HandlerFunc {
|
func (h *Handler) LlamaCppListModels() http.HandlerFunc {
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
inst, err := h.getInstance(r)
|
inst, err := h.validateLlamaCppInstance(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeError(w, http.StatusBadRequest, "invalid_instance", err.Error())
|
writeError(w, http.StatusBadRequest, "invalid instance", err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
models, err := inst.GetModels()
|
// Check instance permissions
|
||||||
if err != nil {
|
if err := h.authMiddleware.CheckInstancePermission(r.Context(), inst.ID); err != nil {
|
||||||
writeError(w, http.StatusBadRequest, "get_models_failed", err.Error())
|
writeError(w, http.StatusForbidden, "permission_denied", err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
response := map[string]any{
|
// Check if instance is shutting down before autostart logic
|
||||||
"object": "list",
|
if inst.GetStatus() == instance.ShuttingDown {
|
||||||
"data": models,
|
writeError(w, http.StatusServiceUnavailable, "instance_shutting_down", "Instance is shutting down")
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
writeJSON(w, http.StatusOK, response)
|
if !inst.IsRemote() && !inst.IsRunning() {
|
||||||
|
err := h.ensureInstanceRunning(inst)
|
||||||
|
if err != nil {
|
||||||
|
writeError(w, http.StatusInternalServerError, "instance start failed", err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Modify request path to /models for proxying
|
||||||
|
r.URL.Path = "/models"
|
||||||
|
|
||||||
|
// Use instance's ServeHTTP which tracks inflight requests and handles shutting down state
|
||||||
|
err = inst.ServeHTTP(w, r)
|
||||||
|
if err != nil {
|
||||||
|
// Error is already handled in ServeHTTP (response written)
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -358,28 +372,41 @@ func (h *Handler) LlamaCppListModels() http.HandlerFunc {
|
|||||||
// @Router /api/v1/llama-cpp/{name}/models/{model}/load [post]
|
// @Router /api/v1/llama-cpp/{name}/models/{model}/load [post]
|
||||||
func (h *Handler) LlamaCppLoadModel() http.HandlerFunc {
|
func (h *Handler) LlamaCppLoadModel() http.HandlerFunc {
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
inst, err := h.getInstance(r)
|
inst, err := h.validateLlamaCppInstance(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeError(w, http.StatusBadRequest, "invalid_instance", err.Error())
|
writeError(w, http.StatusBadRequest, "invalid instance", err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
modelName := chi.URLParam(r, "model")
|
// Check instance permissions
|
||||||
|
if err := h.authMiddleware.CheckInstancePermission(r.Context(), inst.ID); err != nil {
|
||||||
if err := inst.LoadModel(modelName); err != nil {
|
writeError(w, http.StatusForbidden, "permission_denied", err.Error())
|
||||||
writeError(w, http.StatusBadRequest, "load_model_failed", err.Error())
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Refresh the model registry
|
// Check if instance is shutting down before autostart logic
|
||||||
if err := h.InstanceManager.RefreshModelRegistry(inst); err != nil {
|
if inst.GetStatus() == instance.ShuttingDown {
|
||||||
log.Printf("Warning: failed to refresh model registry after load: %v", err)
|
writeError(w, http.StatusServiceUnavailable, "instance_shutting_down", "Instance is shutting down")
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
writeJSON(w, http.StatusOK, map[string]string{
|
if !inst.IsRemote() && !inst.IsRunning() {
|
||||||
"status": "success",
|
err := h.ensureInstanceRunning(inst)
|
||||||
"message": fmt.Sprintf("Model %s loaded successfully", modelName),
|
if err != nil {
|
||||||
})
|
writeError(w, http.StatusInternalServerError, "instance start failed", err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Modify request path to /models/load for proxying
|
||||||
|
r.URL.Path = "/models/load"
|
||||||
|
|
||||||
|
// Use instance's ServeHTTP which tracks inflight requests and handles shutting down state
|
||||||
|
err = inst.ServeHTTP(w, r)
|
||||||
|
if err != nil {
|
||||||
|
// Error is already handled in ServeHTTP (response written)
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -397,27 +424,40 @@ func (h *Handler) LlamaCppLoadModel() http.HandlerFunc {
|
|||||||
// @Router /api/v1/llama-cpp/{name}/models/{model}/unload [post]
|
// @Router /api/v1/llama-cpp/{name}/models/{model}/unload [post]
|
||||||
func (h *Handler) LlamaCppUnloadModel() http.HandlerFunc {
|
func (h *Handler) LlamaCppUnloadModel() http.HandlerFunc {
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
inst, err := h.getInstance(r)
|
inst, err := h.validateLlamaCppInstance(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeError(w, http.StatusBadRequest, "invalid_instance", err.Error())
|
writeError(w, http.StatusBadRequest, "invalid instance", err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
modelName := chi.URLParam(r, "model")
|
// Check instance permissions
|
||||||
|
if err := h.authMiddleware.CheckInstancePermission(r.Context(), inst.ID); err != nil {
|
||||||
if err := inst.UnloadModel(modelName); err != nil {
|
writeError(w, http.StatusForbidden, "permission_denied", err.Error())
|
||||||
writeError(w, http.StatusBadRequest, "unload_model_failed", err.Error())
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Refresh the model registry
|
// Check if instance is shutting down before autostart logic
|
||||||
if err := h.InstanceManager.RefreshModelRegistry(inst); err != nil {
|
if inst.GetStatus() == instance.ShuttingDown {
|
||||||
log.Printf("Warning: failed to refresh model registry after unload: %v", err)
|
writeError(w, http.StatusServiceUnavailable, "instance_shutting_down", "Instance is shutting down")
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
writeJSON(w, http.StatusOK, map[string]string{
|
if !inst.IsRemote() && !inst.IsRunning() {
|
||||||
"status": "success",
|
err := h.ensureInstanceRunning(inst)
|
||||||
"message": fmt.Sprintf("Model %s unloaded successfully", modelName),
|
if err != nil {
|
||||||
})
|
writeError(w, http.StatusInternalServerError, "instance start failed", err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Modify request path to /models/unload for proxying
|
||||||
|
r.URL.Path = "/models/unload"
|
||||||
|
|
||||||
|
// Use instance's ServeHTTP which tracks inflight requests and handles shutting down state
|
||||||
|
err = inst.ServeHTTP(w, r)
|
||||||
|
if err != nil {
|
||||||
|
// Error is already handled in ServeHTTP (response written)
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,9 +5,11 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"llamactl/pkg/backends"
|
||||||
"llamactl/pkg/instance"
|
"llamactl/pkg/instance"
|
||||||
"llamactl/pkg/validation"
|
"llamactl/pkg/validation"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// OpenAIListInstancesResponse represents the response structure for listing instances (models) in OpenAI-compatible format
|
// OpenAIListInstancesResponse represents the response structure for listing instances (models) in OpenAI-compatible format
|
||||||
@@ -24,6 +26,53 @@ type OpenAIInstance struct {
|
|||||||
OwnedBy string `json:"owned_by"`
|
OwnedBy string `json:"owned_by"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LlamaCppModel represents a model available in a llama.cpp instance
|
||||||
|
type LlamaCppModel struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Object string `json:"object"`
|
||||||
|
OwnedBy string `json:"owned_by"`
|
||||||
|
Created int64 `json:"created"`
|
||||||
|
InCache bool `json:"in_cache"`
|
||||||
|
Path string `json:"path"`
|
||||||
|
Status LlamaCppModelStatus `json:"status"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// LlamaCppModelStatus represents the status of a model in a llama.cpp instance
|
||||||
|
type LlamaCppModelStatus struct {
|
||||||
|
Value string `json:"value"` // "loaded" | "loading" | "unloaded"
|
||||||
|
Args []string `json:"args"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// fetchLlamaCppModels fetches models from a llama.cpp instance using the proxy
|
||||||
|
func fetchLlamaCppModels(inst *instance.Instance) ([]LlamaCppModel, error) {
|
||||||
|
// Create a request to the instance's /models endpoint
|
||||||
|
req, err := http.NewRequest("GET", fmt.Sprintf("http://%s:%d/models", inst.GetHost(), inst.GetPort()), nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use a custom response writer to capture the response
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||||
|
return nil, fmt.Errorf("status %d: %s", resp.StatusCode, string(bodyBytes))
|
||||||
|
}
|
||||||
|
|
||||||
|
var result struct {
|
||||||
|
Data []LlamaCppModel `json:"data"`
|
||||||
|
}
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to decode response: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return result.Data, nil
|
||||||
|
}
|
||||||
|
|
||||||
// OpenAIListInstances godoc
|
// OpenAIListInstances godoc
|
||||||
// @Summary List instances in OpenAI-compatible format
|
// @Summary List instances in OpenAI-compatible format
|
||||||
// @Description Returns a list of instances in a format compatible with OpenAI API
|
// @Description Returns a list of instances in a format compatible with OpenAI API
|
||||||
@@ -46,9 +95,9 @@ func (h *Handler) OpenAIListInstances() http.HandlerFunc {
|
|||||||
// For each llama.cpp instance, try to fetch models and add them as separate entries
|
// For each llama.cpp instance, try to fetch models and add them as separate entries
|
||||||
for _, inst := range instances {
|
for _, inst := range instances {
|
||||||
|
|
||||||
if inst.IsLlamaCpp() && inst.IsRunning() {
|
if inst.GetBackendType() == backends.BackendTypeLlamaCpp && inst.IsRunning() {
|
||||||
// Try to fetch models from the instance
|
// Try to fetch models from the instance
|
||||||
models, err := inst.GetModels()
|
models, err := fetchLlamaCppModels(inst)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("Failed to fetch models from instance %s: %v", inst.Name, err)
|
fmt.Printf("Failed to fetch models from instance %s: %v", inst.Name, err)
|
||||||
continue
|
continue
|
||||||
@@ -56,9 +105,9 @@ func (h *Handler) OpenAIListInstances() http.HandlerFunc {
|
|||||||
|
|
||||||
for _, model := range models {
|
for _, model := range models {
|
||||||
openaiInstances = append(openaiInstances, OpenAIInstance{
|
openaiInstances = append(openaiInstances, OpenAIInstance{
|
||||||
ID: model.ID,
|
ID: inst.Name + "/" + model.ID,
|
||||||
Object: "model",
|
Object: "model",
|
||||||
Created: model.Created,
|
Created: inst.Created,
|
||||||
OwnedBy: inst.Name,
|
OwnedBy: inst.Name,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -115,17 +164,24 @@ func (h *Handler) OpenAIProxy() http.HandlerFunc {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
modelName, ok := requestBody["model"].(string)
|
reqModelName, ok := requestBody["model"].(string)
|
||||||
if !ok || modelName == "" {
|
if !ok || reqModelName == "" {
|
||||||
writeError(w, http.StatusBadRequest, "invalid_request", "Model name is required")
|
writeError(w, http.StatusBadRequest, "invalid_request", "Model name is required")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Resolve model name to instance name (checks instance names first, then model registry)
|
// Parse instance name and model name from <instance_name>/<model_name> format
|
||||||
instanceName, err := h.InstanceManager.ResolveInstance(modelName)
|
var instanceName string
|
||||||
if err != nil {
|
var modelName string
|
||||||
writeError(w, http.StatusBadRequest, "model_not_found", err.Error())
|
|
||||||
return
|
// Check if model name contains "/"
|
||||||
|
if idx := strings.Index(reqModelName, "/"); idx != -1 {
|
||||||
|
// Split into instance and model parts
|
||||||
|
instanceName = reqModelName[:idx]
|
||||||
|
modelName = reqModelName[idx+1:]
|
||||||
|
} else {
|
||||||
|
instanceName = reqModelName
|
||||||
|
modelName = reqModelName
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate instance name at the entry point
|
// Validate instance name at the entry point
|
||||||
@@ -154,6 +210,11 @@ func (h *Handler) OpenAIProxy() http.HandlerFunc {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if inst.IsRemote() {
|
||||||
|
// Don't replace model name for remote instances
|
||||||
|
modelName = reqModelName
|
||||||
|
}
|
||||||
|
|
||||||
if !inst.IsRemote() && !inst.IsRunning() {
|
if !inst.IsRemote() && !inst.IsRunning() {
|
||||||
err := h.ensureInstanceRunning(inst)
|
err := h.ensureInstanceRunning(inst)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -162,6 +223,16 @@ func (h *Handler) OpenAIProxy() http.HandlerFunc {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Update the request body with just the model name
|
||||||
|
requestBody["model"] = modelName
|
||||||
|
|
||||||
|
// Re-marshal the updated body
|
||||||
|
bodyBytes, err = json.Marshal(requestBody)
|
||||||
|
if err != nil {
|
||||||
|
writeError(w, http.StatusInternalServerError, "marshal_error", "Failed to update request body")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// Recreate the request body from the bytes we read
|
// Recreate the request body from the bytes we read
|
||||||
r.Body = io.NopCloser(bytes.NewReader(bodyBytes))
|
r.Body = io.NopCloser(bytes.NewReader(bodyBytes))
|
||||||
r.ContentLength = int64(len(bodyBytes))
|
r.ContentLength = int64(len(bodyBytes))
|
||||||
|
|||||||
74
test_llm.py
Normal file
74
test_llm.py
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Simple Python script to interact with local LLM server's OpenAI-compatible API
|
||||||
|
"""
|
||||||
|
|
||||||
|
import requests
|
||||||
|
|
||||||
|
# Local LLM server configuration
|
||||||
|
LLM_SERVER_URL = "http://localhost:8080/v1/chat/completions"
|
||||||
|
MODEL_NAME = "proxy-test" # Default model name, can be changed based on your setup
|
||||||
|
|
||||||
|
def send_message(message, model=MODEL_NAME, temperature=0.7, max_tokens=1000):
|
||||||
|
"""
|
||||||
|
Send a message to local LLM server API
|
||||||
|
|
||||||
|
Args:
|
||||||
|
message (str): The message to send
|
||||||
|
model (str): Model name (depends on your LLM server setup)
|
||||||
|
temperature (float): Controls randomness (0.0 to 1.0)
|
||||||
|
max_tokens (int): Maximum tokens in response
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: The AI response or error message
|
||||||
|
"""
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"Authorization": "Bearer test-inf"
|
||||||
|
}
|
||||||
|
|
||||||
|
data = {
|
||||||
|
"model": model,
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": message
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"temperature": temperature,
|
||||||
|
"max_tokens": max_tokens,
|
||||||
|
"stream": False
|
||||||
|
}
|
||||||
|
|
||||||
|
response = requests.post(LLM_SERVER_URL, headers=headers, json=data, timeout=60)
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
result = response.json()
|
||||||
|
return result["choices"][0]["message"]["content"]
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""Run in interactive mode for continuous conversation"""
|
||||||
|
print("Local LLM Chat Client")
|
||||||
|
print("-" * 40)
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
user_input = input("\nYou: ").strip()
|
||||||
|
|
||||||
|
if not user_input:
|
||||||
|
continue
|
||||||
|
|
||||||
|
print("AI: ", end="", flush=True)
|
||||||
|
response = send_message(user_input)
|
||||||
|
print(response)
|
||||||
|
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
print("\nGoodbye!")
|
||||||
|
break
|
||||||
|
except EOFError:
|
||||||
|
print("\nGoodbye!")
|
||||||
|
break
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
Reference in New Issue
Block a user