mirror of
https://github.com/lordmathis/llamactl.git
synced 2025-11-06 00:54:23 +00:00
Refactor backend options handling and validation
This commit is contained in:
@@ -3,7 +3,6 @@ package instance
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"llamactl/pkg/backends"
|
||||
"llamactl/pkg/config"
|
||||
"log"
|
||||
"net/http/httputil"
|
||||
@@ -124,48 +123,6 @@ func (i *Instance) IsRunning() bool {
|
||||
return i.status.isRunning()
|
||||
}
|
||||
|
||||
func (i *Instance) GetPort() int {
|
||||
opts := i.GetOptions()
|
||||
if opts != nil {
|
||||
switch opts.BackendType {
|
||||
case backends.BackendTypeLlamaCpp:
|
||||
if opts.LlamaServerOptions != nil {
|
||||
return opts.LlamaServerOptions.Port
|
||||
}
|
||||
case backends.BackendTypeMlxLm:
|
||||
if opts.MlxServerOptions != nil {
|
||||
return opts.MlxServerOptions.Port
|
||||
}
|
||||
case backends.BackendTypeVllm:
|
||||
if opts.VllmServerOptions != nil {
|
||||
return opts.VllmServerOptions.Port
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (i *Instance) GetHost() string {
|
||||
opts := i.GetOptions()
|
||||
if opts != nil {
|
||||
switch opts.BackendType {
|
||||
case backends.BackendTypeLlamaCpp:
|
||||
if opts.LlamaServerOptions != nil {
|
||||
return opts.LlamaServerOptions.Host
|
||||
}
|
||||
case backends.BackendTypeMlxLm:
|
||||
if opts.MlxServerOptions != nil {
|
||||
return opts.MlxServerOptions.Host
|
||||
}
|
||||
case backends.BackendTypeVllm:
|
||||
if opts.VllmServerOptions != nil {
|
||||
return opts.VllmServerOptions.Host
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// SetOptions sets the options
|
||||
func (i *Instance) SetOptions(opts *Options) {
|
||||
if opts == nil {
|
||||
@@ -198,6 +155,20 @@ func (i *Instance) SetTimeProvider(tp TimeProvider) {
|
||||
}
|
||||
}
|
||||
|
||||
func (i *Instance) GetHost() string {
|
||||
if i.options == nil {
|
||||
return "localhost"
|
||||
}
|
||||
return i.options.GetHost()
|
||||
}
|
||||
|
||||
func (i *Instance) GetPort() int {
|
||||
if i.options == nil {
|
||||
return 0
|
||||
}
|
||||
return i.options.GetPort()
|
||||
}
|
||||
|
||||
// GetProxy returns the reverse proxy for this instance
|
||||
func (i *Instance) GetProxy() (*httputil.ReverseProxy, error) {
|
||||
if i.proxy == nil {
|
||||
@@ -266,39 +237,31 @@ func (i *Instance) ShouldTimeout() bool {
|
||||
return i.proxy.shouldTimeout()
|
||||
}
|
||||
|
||||
// getBackendHostPort extracts the host and port from instance options
|
||||
// Returns the configured host and port for the backend
|
||||
func (i *Instance) getBackendHostPort() (string, int) {
|
||||
func (i *Instance) getCommand() string {
|
||||
opts := i.GetOptions()
|
||||
if opts == nil {
|
||||
return "localhost", 0
|
||||
return ""
|
||||
}
|
||||
|
||||
var host string
|
||||
var port int
|
||||
switch opts.BackendType {
|
||||
case backends.BackendTypeLlamaCpp:
|
||||
if opts.LlamaServerOptions != nil {
|
||||
host = opts.LlamaServerOptions.Host
|
||||
port = opts.LlamaServerOptions.Port
|
||||
}
|
||||
case backends.BackendTypeMlxLm:
|
||||
if opts.MlxServerOptions != nil {
|
||||
host = opts.MlxServerOptions.Host
|
||||
port = opts.MlxServerOptions.Port
|
||||
}
|
||||
case backends.BackendTypeVllm:
|
||||
if opts.VllmServerOptions != nil {
|
||||
host = opts.VllmServerOptions.Host
|
||||
port = opts.VllmServerOptions.Port
|
||||
}
|
||||
return opts.BackendOptions.GetCommand(i.globalBackendSettings)
|
||||
}
|
||||
|
||||
func (i *Instance) buildCommandArgs() []string {
|
||||
opts := i.GetOptions()
|
||||
if opts == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if host == "" {
|
||||
host = "localhost"
|
||||
return opts.BackendOptions.BuildCommandArgs(i.globalBackendSettings)
|
||||
}
|
||||
|
||||
func (i *Instance) buildEnvironment() map[string]string {
|
||||
opts := i.GetOptions()
|
||||
if opts == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return host, port
|
||||
return opts.BackendOptions.BuildEnvironment(i.globalBackendSettings, opts.Environment)
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler for Instance
|
||||
@@ -307,21 +270,7 @@ func (i *Instance) MarshalJSON() ([]byte, error) {
|
||||
opts := i.GetOptions()
|
||||
|
||||
// Determine if docker is enabled for this instance's backend
|
||||
var dockerEnabled bool
|
||||
if opts != nil {
|
||||
switch opts.BackendType {
|
||||
case backends.BackendTypeLlamaCpp:
|
||||
if i.globalBackendSettings != nil && i.globalBackendSettings.LlamaCpp.Docker != nil && i.globalBackendSettings.LlamaCpp.Docker.Enabled {
|
||||
dockerEnabled = true
|
||||
}
|
||||
case backends.BackendTypeVllm:
|
||||
if i.globalBackendSettings != nil && i.globalBackendSettings.VLLM.Docker != nil && i.globalBackendSettings.VLLM.Docker.Enabled {
|
||||
dockerEnabled = true
|
||||
}
|
||||
case backends.BackendTypeMlxLm:
|
||||
// MLX does not support docker currently
|
||||
}
|
||||
}
|
||||
dockerEnabled := opts.BackendOptions.IsDockerEnabled(i.globalBackendSettings)
|
||||
|
||||
return json.Marshal(&struct {
|
||||
Name string `json:"name"`
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"llamactl/pkg/backends"
|
||||
"llamactl/pkg/config"
|
||||
"log"
|
||||
"maps"
|
||||
"slices"
|
||||
"sync"
|
||||
)
|
||||
@@ -21,18 +20,12 @@ type Options struct {
|
||||
OnDemandStart *bool `json:"on_demand_start,omitempty"`
|
||||
// Idle timeout
|
||||
IdleTimeout *int `json:"idle_timeout,omitempty"` // minutes
|
||||
//Environment variables
|
||||
// Environment variables
|
||||
Environment map[string]string `json:"environment,omitempty"`
|
||||
|
||||
BackendType backends.BackendType `json:"backend_type"`
|
||||
BackendOptions map[string]any `json:"backend_options,omitempty"`
|
||||
|
||||
// Assigned nodes
|
||||
Nodes map[string]struct{} `json:"-"`
|
||||
|
||||
// Backend-specific options
|
||||
LlamaServerOptions *backends.LlamaServerOptions `json:"-"`
|
||||
MlxServerOptions *backends.MlxServerOptions `json:"-"`
|
||||
VllmServerOptions *backends.VllmServerOptions `json:"-"`
|
||||
// Backend options
|
||||
BackendOptions backends.Options `json:"-"`
|
||||
}
|
||||
|
||||
// options wraps Options with thread-safe access (unexported).
|
||||
@@ -62,6 +55,18 @@ func (o *options) set(opts *Options) {
|
||||
o.opts = opts
|
||||
}
|
||||
|
||||
func (o *options) GetHost() string {
|
||||
o.mu.RLock()
|
||||
defer o.mu.RUnlock()
|
||||
return o.opts.BackendOptions.GetHost()
|
||||
}
|
||||
|
||||
func (o *options) GetPort() int {
|
||||
o.mu.RLock()
|
||||
defer o.mu.RUnlock()
|
||||
return o.opts.BackendOptions.GetPort()
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler for options wrapper
|
||||
func (o *options) MarshalJSON() ([]byte, error) {
|
||||
o.mu.RLock()
|
||||
@@ -85,7 +90,9 @@ func (c *Options) UnmarshalJSON(data []byte) error {
|
||||
// Use anonymous struct to avoid recursion
|
||||
type Alias Options
|
||||
aux := &struct {
|
||||
Nodes []string `json:"nodes,omitempty"` // Accept JSON array
|
||||
Nodes []string `json:"nodes,omitempty"`
|
||||
BackendType backends.BackendType `json:"backend_type"`
|
||||
BackendOptions map[string]any `json:"backend_options,omitempty"`
|
||||
*Alias
|
||||
}{
|
||||
Alias: (*Alias)(c),
|
||||
@@ -103,47 +110,27 @@ func (c *Options) UnmarshalJSON(data []byte) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Parse backend-specific options
|
||||
switch c.BackendType {
|
||||
case backends.BackendTypeLlamaCpp:
|
||||
if c.BackendOptions != nil {
|
||||
// Convert map to JSON and then unmarshal to LlamaServerOptions
|
||||
optionsData, err := json.Marshal(c.BackendOptions)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal backend options: %w", err)
|
||||
}
|
||||
// Create backend options struct and unmarshal
|
||||
c.BackendOptions = backends.Options{
|
||||
BackendType: aux.BackendType,
|
||||
BackendOptions: aux.BackendOptions,
|
||||
}
|
||||
|
||||
c.LlamaServerOptions = &backends.LlamaServerOptions{}
|
||||
if err := json.Unmarshal(optionsData, c.LlamaServerOptions); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal llama.cpp options: %w", err)
|
||||
}
|
||||
}
|
||||
case backends.BackendTypeMlxLm:
|
||||
if c.BackendOptions != nil {
|
||||
optionsData, err := json.Marshal(c.BackendOptions)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal backend options: %w", err)
|
||||
}
|
||||
// Marshal the backend options to JSON for proper unmarshaling
|
||||
backendJson, err := json.Marshal(struct {
|
||||
BackendType backends.BackendType `json:"backend_type"`
|
||||
BackendOptions map[string]any `json:"backend_options,omitempty"`
|
||||
}{
|
||||
BackendType: aux.BackendType,
|
||||
BackendOptions: aux.BackendOptions,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal backend options: %w", err)
|
||||
}
|
||||
|
||||
c.MlxServerOptions = &backends.MlxServerOptions{}
|
||||
if err := json.Unmarshal(optionsData, c.MlxServerOptions); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal MLX options: %w", err)
|
||||
}
|
||||
}
|
||||
case backends.BackendTypeVllm:
|
||||
if c.BackendOptions != nil {
|
||||
optionsData, err := json.Marshal(c.BackendOptions)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal backend options: %w", err)
|
||||
}
|
||||
|
||||
c.VllmServerOptions = &backends.VllmServerOptions{}
|
||||
if err := json.Unmarshal(optionsData, c.VllmServerOptions); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal vLLM options: %w", err)
|
||||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unknown backend type: %s", c.BackendType)
|
||||
// Unmarshal into the backends.Options struct to trigger its custom unmarshaling
|
||||
if err := json.Unmarshal(backendJson, &c.BackendOptions); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal backend options: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -154,7 +141,9 @@ func (c *Options) MarshalJSON() ([]byte, error) {
|
||||
// Use anonymous struct to avoid recursion
|
||||
type Alias Options
|
||||
aux := struct {
|
||||
Nodes []string `json:"nodes,omitempty"` // Output as JSON array
|
||||
Nodes []string `json:"nodes,omitempty"` // Output as JSON array
|
||||
BackendType backends.BackendType `json:"backend_type"`
|
||||
BackendOptions map[string]any `json:"backend_options,omitempty"`
|
||||
*Alias
|
||||
}{
|
||||
Alias: (*Alias)(c),
|
||||
@@ -170,52 +159,25 @@ func (c *Options) MarshalJSON() ([]byte, error) {
|
||||
slices.Sort(aux.Nodes)
|
||||
}
|
||||
|
||||
// Convert backend-specific options back to BackendOptions map for JSON
|
||||
switch c.BackendType {
|
||||
case backends.BackendTypeLlamaCpp:
|
||||
if c.LlamaServerOptions != nil {
|
||||
data, err := json.Marshal(c.LlamaServerOptions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal llama server options: %w", err)
|
||||
}
|
||||
// Set backend type
|
||||
aux.BackendType = c.BackendOptions.BackendType
|
||||
|
||||
var backendOpts map[string]any
|
||||
if err := json.Unmarshal(data, &backendOpts); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal to map: %w", err)
|
||||
}
|
||||
|
||||
aux.BackendOptions = backendOpts
|
||||
}
|
||||
case backends.BackendTypeMlxLm:
|
||||
if c.MlxServerOptions != nil {
|
||||
data, err := json.Marshal(c.MlxServerOptions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal MLX server options: %w", err)
|
||||
}
|
||||
|
||||
var backendOpts map[string]any
|
||||
if err := json.Unmarshal(data, &backendOpts); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal to map: %w", err)
|
||||
}
|
||||
|
||||
aux.BackendOptions = backendOpts
|
||||
}
|
||||
case backends.BackendTypeVllm:
|
||||
if c.VllmServerOptions != nil {
|
||||
data, err := json.Marshal(c.VllmServerOptions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal vLLM server options: %w", err)
|
||||
}
|
||||
|
||||
var backendOpts map[string]any
|
||||
if err := json.Unmarshal(data, &backendOpts); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal to map: %w", err)
|
||||
}
|
||||
|
||||
aux.BackendOptions = backendOpts
|
||||
}
|
||||
// Marshal the backends.Options struct to get the properly formatted backend options
|
||||
backendData, err := json.Marshal(c.BackendOptions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal backend options: %w", err)
|
||||
}
|
||||
|
||||
// Unmarshal into a temporary struct to extract the backend_options map
|
||||
var tempBackend struct {
|
||||
BackendOptions map[string]any `json:"backend_options,omitempty"`
|
||||
}
|
||||
if err := json.Unmarshal(backendData, &tempBackend); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal backend data: %w", err)
|
||||
}
|
||||
|
||||
aux.BackendOptions = tempBackend.BackendOptions
|
||||
|
||||
return json.Marshal(aux)
|
||||
}
|
||||
|
||||
@@ -257,78 +219,3 @@ func (c *Options) validateAndApplyDefaults(name string, globalSettings *config.I
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getCommand builds the command to run the backend
|
||||
func (c *Options) getCommand(backendConfig *config.BackendSettings) string {
|
||||
|
||||
if backendConfig.Docker != nil && backendConfig.Docker.Enabled && c.BackendType != backends.BackendTypeMlxLm {
|
||||
return "docker"
|
||||
}
|
||||
|
||||
return backendConfig.Command
|
||||
}
|
||||
|
||||
// buildCommandArgs builds command line arguments for the backend
|
||||
func (c *Options) buildCommandArgs(backendConfig *config.BackendSettings) []string {
|
||||
|
||||
var args []string
|
||||
|
||||
if backendConfig.Docker != nil && backendConfig.Docker.Enabled && c.BackendType != backends.BackendTypeMlxLm {
|
||||
// For Docker, start with Docker args
|
||||
args = append(args, backendConfig.Docker.Args...)
|
||||
args = append(args, backendConfig.Docker.Image)
|
||||
|
||||
switch c.BackendType {
|
||||
case backends.BackendTypeLlamaCpp:
|
||||
if c.LlamaServerOptions != nil {
|
||||
args = append(args, c.LlamaServerOptions.BuildDockerArgs()...)
|
||||
}
|
||||
case backends.BackendTypeVllm:
|
||||
if c.VllmServerOptions != nil {
|
||||
args = append(args, c.VllmServerOptions.BuildDockerArgs()...)
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
// For native execution, start with backend args
|
||||
args = append(args, backendConfig.Args...)
|
||||
|
||||
switch c.BackendType {
|
||||
case backends.BackendTypeLlamaCpp:
|
||||
if c.LlamaServerOptions != nil {
|
||||
args = append(args, c.LlamaServerOptions.BuildCommandArgs()...)
|
||||
}
|
||||
case backends.BackendTypeMlxLm:
|
||||
if c.MlxServerOptions != nil {
|
||||
args = append(args, c.MlxServerOptions.BuildCommandArgs()...)
|
||||
}
|
||||
case backends.BackendTypeVllm:
|
||||
if c.VllmServerOptions != nil {
|
||||
args = append(args, c.VllmServerOptions.BuildCommandArgs()...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return args
|
||||
}
|
||||
|
||||
// buildEnvironment builds the environment variables for the backend process
|
||||
func (c *Options) buildEnvironment(backendConfig *config.BackendSettings) map[string]string {
|
||||
env := map[string]string{}
|
||||
|
||||
if backendConfig.Environment != nil {
|
||||
maps.Copy(env, backendConfig.Environment)
|
||||
}
|
||||
|
||||
if backendConfig.Docker != nil && backendConfig.Docker.Enabled && c.BackendType != backends.BackendTypeMlxLm {
|
||||
if backendConfig.Docker.Environment != nil {
|
||||
maps.Copy(env, backendConfig.Docker.Environment)
|
||||
}
|
||||
}
|
||||
|
||||
if c.Environment != nil {
|
||||
maps.Copy(env, c.Environment)
|
||||
}
|
||||
|
||||
return env
|
||||
}
|
||||
|
||||
@@ -12,9 +12,6 @@ import (
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"llamactl/pkg/backends"
|
||||
"llamactl/pkg/config"
|
||||
)
|
||||
|
||||
// process manages the OS process lifecycle for a local instance.
|
||||
@@ -216,7 +213,8 @@ func (p *process) waitForHealthy(timeout int) error {
|
||||
defer cancel()
|
||||
|
||||
// Get host/port from instance
|
||||
host, port := p.instance.getBackendHostPort()
|
||||
host := p.instance.options.GetHost()
|
||||
port := p.instance.options.GetPort()
|
||||
healthURL := fmt.Sprintf("http://%s:%d/health", host, port)
|
||||
|
||||
// Create a dedicated HTTP client for health checks
|
||||
@@ -386,26 +384,15 @@ func (p *process) handleAutoRestart(err error) {
|
||||
|
||||
// buildCommand builds the command to execute using backend-specific logic
|
||||
func (p *process) buildCommand() (*exec.Cmd, error) {
|
||||
// Get options
|
||||
opts := p.instance.GetOptions()
|
||||
if opts == nil {
|
||||
return nil, fmt.Errorf("instance options are nil")
|
||||
}
|
||||
|
||||
// Get backend configuration
|
||||
backendConfig, err := p.getBackendConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Build the environment variables
|
||||
env := opts.buildEnvironment(backendConfig)
|
||||
env := p.instance.buildEnvironment()
|
||||
|
||||
// Get the command to execute
|
||||
command := opts.getCommand(backendConfig)
|
||||
command := p.instance.getCommand()
|
||||
|
||||
// Build command arguments
|
||||
args := opts.buildCommandArgs(backendConfig)
|
||||
args := p.instance.buildCommandArgs()
|
||||
|
||||
// Create the exec.Cmd
|
||||
cmd := exec.CommandContext(p.ctx, command, args...)
|
||||
@@ -420,27 +407,3 @@ func (p *process) buildCommand() (*exec.Cmd, error) {
|
||||
|
||||
return cmd, nil
|
||||
}
|
||||
|
||||
// getBackendConfig resolves the backend configuration for the current instance
|
||||
func (p *process) getBackendConfig() (*config.BackendSettings, error) {
|
||||
opts := p.instance.GetOptions()
|
||||
if opts == nil {
|
||||
return nil, fmt.Errorf("instance options are nil")
|
||||
}
|
||||
|
||||
var backendTypeStr string
|
||||
|
||||
switch opts.BackendType {
|
||||
case backends.BackendTypeLlamaCpp:
|
||||
backendTypeStr = "llama-cpp"
|
||||
case backends.BackendTypeMlxLm:
|
||||
backendTypeStr = "mlx"
|
||||
case backends.BackendTypeVllm:
|
||||
backendTypeStr = "vllm"
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported backend type: %s", opts.BackendType)
|
||||
}
|
||||
|
||||
settings := p.instance.globalBackendSettings.GetBackendSettings(backendTypeStr)
|
||||
return &settings, nil
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package instance
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"llamactl/pkg/backends"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
@@ -68,8 +67,11 @@ func (p *proxy) build() (*httputil.ReverseProxy, error) {
|
||||
}
|
||||
|
||||
// Get host/port from process
|
||||
host, port := p.instance.getBackendHostPort()
|
||||
|
||||
host := p.instance.options.GetHost()
|
||||
port := p.instance.options.GetPort()
|
||||
if port == 0 {
|
||||
return nil, fmt.Errorf("instance %s has no port assigned", p.instance.Name)
|
||||
}
|
||||
targetURL, err := url.Parse(fmt.Sprintf("http://%s:%d", host, port))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse target URL for instance %s: %w", p.instance.Name, err)
|
||||
@@ -78,15 +80,7 @@ func (p *proxy) build() (*httputil.ReverseProxy, error) {
|
||||
proxy := httputil.NewSingleHostReverseProxy(targetURL)
|
||||
|
||||
// Get response headers from backend config
|
||||
var responseHeaders map[string]string
|
||||
switch options.BackendType {
|
||||
case backends.BackendTypeLlamaCpp:
|
||||
responseHeaders = p.instance.globalBackendSettings.LlamaCpp.ResponseHeaders
|
||||
case backends.BackendTypeVllm:
|
||||
responseHeaders = p.instance.globalBackendSettings.VLLM.ResponseHeaders
|
||||
case backends.BackendTypeMlxLm:
|
||||
responseHeaders = p.instance.globalBackendSettings.MLX.ResponseHeaders
|
||||
}
|
||||
responseHeaders := options.BackendOptions.GetResponseHeaders(p.instance.globalBackendSettings)
|
||||
|
||||
proxy.ModifyResponse = func(resp *http.Response) error {
|
||||
// Remove CORS headers from backend response to avoid conflicts
|
||||
|
||||
Reference in New Issue
Block a user