mirror of
https://github.com/lordmathis/llamactl.git
synced 2025-11-05 16:44:22 +00:00
Capitalize godoc tags
This commit is contained in:
68
docs/docs.go
68
docs/docs.go
@@ -28,7 +28,7 @@ const docTemplate = `{
|
||||
],
|
||||
"description": "Returns a list of available devices for the llama server",
|
||||
"tags": [
|
||||
"backends"
|
||||
"Backends"
|
||||
],
|
||||
"summary": "List available devices for llama server",
|
||||
"responses": {
|
||||
@@ -56,7 +56,7 @@ const docTemplate = `{
|
||||
],
|
||||
"description": "Returns the help text for the llama server command",
|
||||
"tags": [
|
||||
"backends"
|
||||
"Backends"
|
||||
],
|
||||
"summary": "Get help for llama server",
|
||||
"responses": {
|
||||
@@ -90,7 +90,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Backends"
|
||||
],
|
||||
"summary": "Parse llama-server command",
|
||||
"parameters": [
|
||||
@@ -141,7 +141,7 @@ const docTemplate = `{
|
||||
],
|
||||
"description": "Returns the version of the llama server command",
|
||||
"tags": [
|
||||
"backends"
|
||||
"Backends"
|
||||
],
|
||||
"summary": "Get version of llama server",
|
||||
"responses": {
|
||||
@@ -175,7 +175,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Backends"
|
||||
],
|
||||
"summary": "Parse mlx_lm.server command",
|
||||
"parameters": [
|
||||
@@ -223,7 +223,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Backends"
|
||||
],
|
||||
"summary": "Parse vllm serve command",
|
||||
"parameters": [
|
||||
@@ -265,7 +265,7 @@ const docTemplate = `{
|
||||
],
|
||||
"description": "Returns a list of all instances managed by the server",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "List all instances",
|
||||
"responses": {
|
||||
@@ -296,7 +296,7 @@ const docTemplate = `{
|
||||
],
|
||||
"description": "Returns the details of a specific instance by name",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Get details of a specific instance",
|
||||
"parameters": [
|
||||
@@ -340,7 +340,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Update an instance's configuration",
|
||||
"parameters": [
|
||||
@@ -393,7 +393,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Create and start a new instance",
|
||||
"parameters": [
|
||||
@@ -443,7 +443,7 @@ const docTemplate = `{
|
||||
],
|
||||
"description": "Stops and removes a specific instance by name",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Delete an instance",
|
||||
"parameters": [
|
||||
@@ -483,7 +483,7 @@ const docTemplate = `{
|
||||
],
|
||||
"description": "Returns the logs from a specific instance by name with optional line limit",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Get logs from a specific instance",
|
||||
"parameters": [
|
||||
@@ -532,7 +532,7 @@ const docTemplate = `{
|
||||
],
|
||||
"description": "Forwards HTTP requests to the llama-server instance running on a specific port",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Proxy requests to a specific instance, does not autostart instance if stopped",
|
||||
"parameters": [
|
||||
@@ -576,7 +576,7 @@ const docTemplate = `{
|
||||
],
|
||||
"description": "Forwards HTTP requests to the llama-server instance running on a specific port",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Proxy requests to a specific instance, does not autostart instance if stopped",
|
||||
"parameters": [
|
||||
@@ -622,7 +622,7 @@ const docTemplate = `{
|
||||
],
|
||||
"description": "Restarts a specific instance by name",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Restart a running instance",
|
||||
"parameters": [
|
||||
@@ -665,7 +665,7 @@ const docTemplate = `{
|
||||
],
|
||||
"description": "Starts a specific instance by name",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Start a stopped instance",
|
||||
"parameters": [
|
||||
@@ -708,7 +708,7 @@ const docTemplate = `{
|
||||
],
|
||||
"description": "Stops a specific instance by name",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Stop a running instance",
|
||||
"parameters": [
|
||||
@@ -751,7 +751,7 @@ const docTemplate = `{
|
||||
],
|
||||
"description": "Returns a map of all nodes configured in the server (node name -\u003e node config)",
|
||||
"tags": [
|
||||
"nodes"
|
||||
"Nodes"
|
||||
],
|
||||
"summary": "List all configured nodes",
|
||||
"responses": {
|
||||
@@ -782,7 +782,7 @@ const docTemplate = `{
|
||||
],
|
||||
"description": "Returns the details of a specific node by name",
|
||||
"tags": [
|
||||
"nodes"
|
||||
"Nodes"
|
||||
],
|
||||
"summary": "Get details of a specific node",
|
||||
"parameters": [
|
||||
@@ -831,7 +831,7 @@ const docTemplate = `{
|
||||
],
|
||||
"description": "Returns the version of the llamactl command",
|
||||
"tags": [
|
||||
"version"
|
||||
"System"
|
||||
],
|
||||
"summary": "Get llamactl version",
|
||||
"responses": {
|
||||
@@ -862,7 +862,7 @@ const docTemplate = `{
|
||||
"text/html"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp UI for the instance",
|
||||
"parameters": [
|
||||
@@ -908,7 +908,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -955,7 +955,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1002,7 +1002,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1049,7 +1049,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1096,7 +1096,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1143,7 +1143,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1190,7 +1190,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1235,7 +1235,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1282,7 +1282,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1329,7 +1329,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1376,7 +1376,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1423,7 +1423,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"openai"
|
||||
"OpenAI"
|
||||
],
|
||||
"summary": "OpenAI-compatible proxy endpoint",
|
||||
"responses": {
|
||||
@@ -1454,7 +1454,7 @@ const docTemplate = `{
|
||||
],
|
||||
"description": "Returns a list of instances in a format compatible with OpenAI API",
|
||||
"tags": [
|
||||
"openai"
|
||||
"OpenAI"
|
||||
],
|
||||
"summary": "List instances in OpenAI-compatible format",
|
||||
"responses": {
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
],
|
||||
"description": "Returns a list of available devices for the llama server",
|
||||
"tags": [
|
||||
"backends"
|
||||
"Backends"
|
||||
],
|
||||
"summary": "List available devices for llama server",
|
||||
"responses": {
|
||||
@@ -49,7 +49,7 @@
|
||||
],
|
||||
"description": "Returns the help text for the llama server command",
|
||||
"tags": [
|
||||
"backends"
|
||||
"Backends"
|
||||
],
|
||||
"summary": "Get help for llama server",
|
||||
"responses": {
|
||||
@@ -83,7 +83,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Backends"
|
||||
],
|
||||
"summary": "Parse llama-server command",
|
||||
"parameters": [
|
||||
@@ -134,7 +134,7 @@
|
||||
],
|
||||
"description": "Returns the version of the llama server command",
|
||||
"tags": [
|
||||
"backends"
|
||||
"Backends"
|
||||
],
|
||||
"summary": "Get version of llama server",
|
||||
"responses": {
|
||||
@@ -168,7 +168,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Backends"
|
||||
],
|
||||
"summary": "Parse mlx_lm.server command",
|
||||
"parameters": [
|
||||
@@ -216,7 +216,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Backends"
|
||||
],
|
||||
"summary": "Parse vllm serve command",
|
||||
"parameters": [
|
||||
@@ -258,7 +258,7 @@
|
||||
],
|
||||
"description": "Returns a list of all instances managed by the server",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "List all instances",
|
||||
"responses": {
|
||||
@@ -289,7 +289,7 @@
|
||||
],
|
||||
"description": "Returns the details of a specific instance by name",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Get details of a specific instance",
|
||||
"parameters": [
|
||||
@@ -333,7 +333,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Update an instance's configuration",
|
||||
"parameters": [
|
||||
@@ -386,7 +386,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Create and start a new instance",
|
||||
"parameters": [
|
||||
@@ -436,7 +436,7 @@
|
||||
],
|
||||
"description": "Stops and removes a specific instance by name",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Delete an instance",
|
||||
"parameters": [
|
||||
@@ -476,7 +476,7 @@
|
||||
],
|
||||
"description": "Returns the logs from a specific instance by name with optional line limit",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Get logs from a specific instance",
|
||||
"parameters": [
|
||||
@@ -525,7 +525,7 @@
|
||||
],
|
||||
"description": "Forwards HTTP requests to the llama-server instance running on a specific port",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Proxy requests to a specific instance, does not autostart instance if stopped",
|
||||
"parameters": [
|
||||
@@ -569,7 +569,7 @@
|
||||
],
|
||||
"description": "Forwards HTTP requests to the llama-server instance running on a specific port",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Proxy requests to a specific instance, does not autostart instance if stopped",
|
||||
"parameters": [
|
||||
@@ -615,7 +615,7 @@
|
||||
],
|
||||
"description": "Restarts a specific instance by name",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Restart a running instance",
|
||||
"parameters": [
|
||||
@@ -658,7 +658,7 @@
|
||||
],
|
||||
"description": "Starts a specific instance by name",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Start a stopped instance",
|
||||
"parameters": [
|
||||
@@ -701,7 +701,7 @@
|
||||
],
|
||||
"description": "Stops a specific instance by name",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Stop a running instance",
|
||||
"parameters": [
|
||||
@@ -744,7 +744,7 @@
|
||||
],
|
||||
"description": "Returns a map of all nodes configured in the server (node name -\u003e node config)",
|
||||
"tags": [
|
||||
"nodes"
|
||||
"Nodes"
|
||||
],
|
||||
"summary": "List all configured nodes",
|
||||
"responses": {
|
||||
@@ -775,7 +775,7 @@
|
||||
],
|
||||
"description": "Returns the details of a specific node by name",
|
||||
"tags": [
|
||||
"nodes"
|
||||
"Nodes"
|
||||
],
|
||||
"summary": "Get details of a specific node",
|
||||
"parameters": [
|
||||
@@ -824,7 +824,7 @@
|
||||
],
|
||||
"description": "Returns the version of the llamactl command",
|
||||
"tags": [
|
||||
"version"
|
||||
"System"
|
||||
],
|
||||
"summary": "Get llamactl version",
|
||||
"responses": {
|
||||
@@ -855,7 +855,7 @@
|
||||
"text/html"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp UI for the instance",
|
||||
"parameters": [
|
||||
@@ -901,7 +901,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -948,7 +948,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -995,7 +995,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1042,7 +1042,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1089,7 +1089,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1136,7 +1136,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1183,7 +1183,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1228,7 +1228,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1275,7 +1275,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1322,7 +1322,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1369,7 +1369,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1416,7 +1416,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"openai"
|
||||
"OpenAI"
|
||||
],
|
||||
"summary": "OpenAI-compatible proxy endpoint",
|
||||
"responses": {
|
||||
@@ -1447,7 +1447,7 @@
|
||||
],
|
||||
"description": "Returns a list of instances in a format compatible with OpenAI API",
|
||||
"tags": [
|
||||
"openai"
|
||||
"OpenAI"
|
||||
],
|
||||
"summary": "List instances in OpenAI-compatible format",
|
||||
"responses": {
|
||||
|
||||
@@ -85,7 +85,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: List available devices for llama server
|
||||
tags:
|
||||
- backends
|
||||
- Backends
|
||||
/api/v1/backends/llama-cpp/help:
|
||||
get:
|
||||
description: Returns the help text for the llama server command
|
||||
@@ -102,7 +102,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Get help for llama server
|
||||
tags:
|
||||
- backends
|
||||
- Backends
|
||||
/api/v1/backends/llama-cpp/parse-command:
|
||||
post:
|
||||
consumes:
|
||||
@@ -138,7 +138,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Parse llama-server command
|
||||
tags:
|
||||
- backends
|
||||
- Backends
|
||||
/api/v1/backends/llama-cpp/version:
|
||||
get:
|
||||
description: Returns the version of the llama server command
|
||||
@@ -155,7 +155,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Get version of llama server
|
||||
tags:
|
||||
- backends
|
||||
- Backends
|
||||
/api/v1/backends/mlx/parse-command:
|
||||
post:
|
||||
consumes:
|
||||
@@ -185,7 +185,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Parse mlx_lm.server command
|
||||
tags:
|
||||
- backends
|
||||
- Backends
|
||||
/api/v1/backends/vllm/parse-command:
|
||||
post:
|
||||
consumes:
|
||||
@@ -215,7 +215,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Parse vllm serve command
|
||||
tags:
|
||||
- backends
|
||||
- Backends
|
||||
/api/v1/instances:
|
||||
get:
|
||||
description: Returns a list of all instances managed by the server
|
||||
@@ -234,7 +234,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: List all instances
|
||||
tags:
|
||||
- instances
|
||||
- Instances
|
||||
/api/v1/instances/{name}:
|
||||
delete:
|
||||
description: Stops and removes a specific instance by name
|
||||
@@ -259,7 +259,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Delete an instance
|
||||
tags:
|
||||
- instances
|
||||
- Instances
|
||||
get:
|
||||
description: Returns the details of a specific instance by name
|
||||
parameters:
|
||||
@@ -285,7 +285,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Get details of a specific instance
|
||||
tags:
|
||||
- instances
|
||||
- Instances
|
||||
post:
|
||||
consumes:
|
||||
- application/json
|
||||
@@ -319,7 +319,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Create and start a new instance
|
||||
tags:
|
||||
- instances
|
||||
- Instances
|
||||
put:
|
||||
consumes:
|
||||
- application/json
|
||||
@@ -353,7 +353,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Update an instance's configuration
|
||||
tags:
|
||||
- instances
|
||||
- Instances
|
||||
/api/v1/instances/{name}/logs:
|
||||
get:
|
||||
description: Returns the logs from a specific instance by name with optional
|
||||
@@ -385,7 +385,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Get logs from a specific instance
|
||||
tags:
|
||||
- instances
|
||||
- Instances
|
||||
/api/v1/instances/{name}/proxy:
|
||||
get:
|
||||
description: Forwards HTTP requests to the llama-server instance running on
|
||||
@@ -416,7 +416,7 @@ paths:
|
||||
summary: Proxy requests to a specific instance, does not autostart instance
|
||||
if stopped
|
||||
tags:
|
||||
- instances
|
||||
- Instances
|
||||
post:
|
||||
description: Forwards HTTP requests to the llama-server instance running on
|
||||
a specific port
|
||||
@@ -446,7 +446,7 @@ paths:
|
||||
summary: Proxy requests to a specific instance, does not autostart instance
|
||||
if stopped
|
||||
tags:
|
||||
- instances
|
||||
- Instances
|
||||
/api/v1/instances/{name}/restart:
|
||||
post:
|
||||
description: Restarts a specific instance by name
|
||||
@@ -473,7 +473,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Restart a running instance
|
||||
tags:
|
||||
- instances
|
||||
- Instances
|
||||
/api/v1/instances/{name}/start:
|
||||
post:
|
||||
description: Starts a specific instance by name
|
||||
@@ -500,7 +500,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Start a stopped instance
|
||||
tags:
|
||||
- instances
|
||||
- Instances
|
||||
/api/v1/instances/{name}/stop:
|
||||
post:
|
||||
description: Stops a specific instance by name
|
||||
@@ -527,7 +527,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Stop a running instance
|
||||
tags:
|
||||
- instances
|
||||
- Instances
|
||||
/api/v1/nodes:
|
||||
get:
|
||||
description: Returns a map of all nodes configured in the server (node name
|
||||
@@ -547,7 +547,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: List all configured nodes
|
||||
tags:
|
||||
- nodes
|
||||
- Nodes
|
||||
/api/v1/nodes/{name}:
|
||||
get:
|
||||
description: Returns the details of a specific node by name
|
||||
@@ -578,7 +578,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Get details of a specific node
|
||||
tags:
|
||||
- nodes
|
||||
- Nodes
|
||||
/api/v1/version:
|
||||
get:
|
||||
description: Returns the version of the llamactl command
|
||||
@@ -595,7 +595,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Get llamactl version
|
||||
tags:
|
||||
- version
|
||||
- System
|
||||
/llama-cpp/{name}/:
|
||||
get:
|
||||
description: Proxies requests to the llama.cpp UI for the specified instance
|
||||
@@ -624,7 +624,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Proxy requests to llama.cpp UI for the instance
|
||||
tags:
|
||||
- backends
|
||||
- Llama.cpp
|
||||
/llama-cpp/{name}/apply-template:
|
||||
post:
|
||||
description: Proxies requests to the specified llama.cpp server instance, starting
|
||||
@@ -655,7 +655,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Proxy requests to llama.cpp server instance
|
||||
tags:
|
||||
- backends
|
||||
- Llama.cpp
|
||||
/llama-cpp/{name}/completion:
|
||||
post:
|
||||
description: Proxies requests to the specified llama.cpp server instance, starting
|
||||
@@ -686,7 +686,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Proxy requests to llama.cpp server instance
|
||||
tags:
|
||||
- backends
|
||||
- Llama.cpp
|
||||
/llama-cpp/{name}/detokenize:
|
||||
post:
|
||||
description: Proxies requests to the specified llama.cpp server instance, starting
|
||||
@@ -717,7 +717,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Proxy requests to llama.cpp server instance
|
||||
tags:
|
||||
- backends
|
||||
- Llama.cpp
|
||||
/llama-cpp/{name}/embeddings:
|
||||
post:
|
||||
description: Proxies requests to the specified llama.cpp server instance, starting
|
||||
@@ -748,7 +748,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Proxy requests to llama.cpp server instance
|
||||
tags:
|
||||
- backends
|
||||
- Llama.cpp
|
||||
/llama-cpp/{name}/infill:
|
||||
post:
|
||||
description: Proxies requests to the specified llama.cpp server instance, starting
|
||||
@@ -779,7 +779,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Proxy requests to llama.cpp server instance
|
||||
tags:
|
||||
- backends
|
||||
- Llama.cpp
|
||||
/llama-cpp/{name}/metrics:
|
||||
post:
|
||||
description: Proxies requests to the specified llama.cpp server instance, starting
|
||||
@@ -810,7 +810,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Proxy requests to llama.cpp server instance
|
||||
tags:
|
||||
- backends
|
||||
- Llama.cpp
|
||||
/llama-cpp/{name}/props:
|
||||
get:
|
||||
description: Proxies requests to the specified llama.cpp server instance, starting
|
||||
@@ -841,7 +841,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Proxy requests to llama.cpp server instance
|
||||
tags:
|
||||
- backends
|
||||
- Llama.cpp
|
||||
post:
|
||||
description: Proxies requests to the specified llama.cpp server instance, starting
|
||||
it on-demand if configured
|
||||
@@ -871,7 +871,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Proxy requests to llama.cpp server instance
|
||||
tags:
|
||||
- backends
|
||||
- Llama.cpp
|
||||
/llama-cpp/{name}/reranking:
|
||||
post:
|
||||
description: Proxies requests to the specified llama.cpp server instance, starting
|
||||
@@ -902,7 +902,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Proxy requests to llama.cpp server instance
|
||||
tags:
|
||||
- backends
|
||||
- Llama.cpp
|
||||
/llama-cpp/{name}/slots:
|
||||
get:
|
||||
description: Proxies requests to the specified llama.cpp server instance, starting
|
||||
@@ -933,7 +933,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Proxy requests to llama.cpp server instance
|
||||
tags:
|
||||
- backends
|
||||
- Llama.cpp
|
||||
/llama-cpp/{name}/tokenize:
|
||||
post:
|
||||
description: Proxies requests to the specified llama.cpp server instance, starting
|
||||
@@ -964,7 +964,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Proxy requests to llama.cpp server instance
|
||||
tags:
|
||||
- backends
|
||||
- Llama.cpp
|
||||
/v1/:
|
||||
post:
|
||||
consumes:
|
||||
@@ -987,7 +987,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: OpenAI-compatible proxy endpoint
|
||||
tags:
|
||||
- openai
|
||||
- OpenAI
|
||||
/v1/models:
|
||||
get:
|
||||
description: Returns a list of instances in a format compatible with OpenAI
|
||||
@@ -1005,7 +1005,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: List instances in OpenAI-compatible format
|
||||
tags:
|
||||
- openai
|
||||
- OpenAI
|
||||
securityDefinitions:
|
||||
ApiKeyAuth:
|
||||
in: header
|
||||
|
||||
@@ -44,7 +44,7 @@ func (h *Handler) stripLlamaCppPrefix(r *http.Request, instName string) {
|
||||
// LlamaCppUIProxy godoc
|
||||
// @Summary Proxy requests to llama.cpp UI for the instance
|
||||
// @Description Proxies requests to the llama.cpp UI for the specified instance
|
||||
// @Tags backends
|
||||
// @Tags Llama.cpp
|
||||
// @Security ApiKeyAuth
|
||||
// @Produce html
|
||||
// @Param name query string true "Instance Name"
|
||||
@@ -83,7 +83,7 @@ func (h *Handler) LlamaCppUIProxy() http.HandlerFunc {
|
||||
// LlamaCppProxy godoc
|
||||
// @Summary Proxy requests to llama.cpp server instance
|
||||
// @Description Proxies requests to the specified llama.cpp server instance, starting it on-demand if configured
|
||||
// @Tags backends
|
||||
// @Tags Llama.cpp
|
||||
// @Security ApiKeyAuth
|
||||
// @Produce json
|
||||
// @Param name path string true "Instance Name"
|
||||
@@ -160,7 +160,7 @@ func parseHelper(w http.ResponseWriter, r *http.Request, backend interface {
|
||||
// ParseLlamaCommand godoc
|
||||
// @Summary Parse llama-server command
|
||||
// @Description Parses a llama-server command string into instance options
|
||||
// @Tags backends
|
||||
// @Tags Backends
|
||||
// @Security ApiKeyAuth
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
@@ -190,7 +190,7 @@ func (h *Handler) ParseLlamaCommand() http.HandlerFunc {
|
||||
// ParseMlxCommand godoc
|
||||
// @Summary Parse mlx_lm.server command
|
||||
// @Description Parses MLX-LM server command string into instance options
|
||||
// @Tags backends
|
||||
// @Tags Backends
|
||||
// @Security ApiKeyAuth
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
@@ -219,7 +219,7 @@ func (h *Handler) ParseMlxCommand() http.HandlerFunc {
|
||||
// ParseVllmCommand godoc
|
||||
// @Summary Parse vllm serve command
|
||||
// @Description Parses a vLLM serve command string into instance options
|
||||
// @Tags backends
|
||||
// @Tags Backends
|
||||
// @Security ApiKeyAuth
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
@@ -261,7 +261,7 @@ func (h *Handler) executeLlamaServerCommand(flag, errorMsg string) http.HandlerF
|
||||
// LlamaServerHelpHandler godoc
|
||||
// @Summary Get help for llama server
|
||||
// @Description Returns the help text for the llama server command
|
||||
// @Tags backends
|
||||
// @Tags Backends
|
||||
// @Security ApiKeyAuth
|
||||
// @Produces text/plain
|
||||
// @Success 200 {string} string "Help text"
|
||||
@@ -274,7 +274,7 @@ func (h *Handler) LlamaServerHelpHandler() http.HandlerFunc {
|
||||
// LlamaServerVersionHandler godoc
|
||||
// @Summary Get version of llama server
|
||||
// @Description Returns the version of the llama server command
|
||||
// @Tags backends
|
||||
// @Tags Backends
|
||||
// @Security ApiKeyAuth
|
||||
// @Produces text/plain
|
||||
// @Success 200 {string} string "Version information"
|
||||
@@ -287,7 +287,7 @@ func (h *Handler) LlamaServerVersionHandler() http.HandlerFunc {
|
||||
// LlamaServerListDevicesHandler godoc
|
||||
// @Summary List available devices for llama server
|
||||
// @Description Returns a list of available devices for the llama server
|
||||
// @Tags backends
|
||||
// @Tags Backends
|
||||
// @Security ApiKeyAuth
|
||||
// @Produces text/plain
|
||||
// @Success 200 {string} string "List of devices"
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
// ListInstances godoc
|
||||
// @Summary List all instances
|
||||
// @Description Returns a list of all instances managed by the server
|
||||
// @Tags instances
|
||||
// @Tags Instances
|
||||
// @Security ApiKeyAuth
|
||||
// @Produces json
|
||||
// @Success 200 {array} instance.Instance "List of instances"
|
||||
@@ -37,7 +37,7 @@ func (h *Handler) ListInstances() http.HandlerFunc {
|
||||
// CreateInstance godoc
|
||||
// @Summary Create and start a new instance
|
||||
// @Description Creates a new instance with the provided configuration options
|
||||
// @Tags instances
|
||||
// @Tags Instances
|
||||
// @Security ApiKeyAuth
|
||||
// @Accept json
|
||||
// @Produces json
|
||||
@@ -75,7 +75,7 @@ func (h *Handler) CreateInstance() http.HandlerFunc {
|
||||
// GetInstance godoc
|
||||
// @Summary Get details of a specific instance
|
||||
// @Description Returns the details of a specific instance by name
|
||||
// @Tags instances
|
||||
// @Tags Instances
|
||||
// @Security ApiKeyAuth
|
||||
// @Produces json
|
||||
// @Param name path string true "Instance Name"
|
||||
@@ -105,7 +105,7 @@ func (h *Handler) GetInstance() http.HandlerFunc {
|
||||
// UpdateInstance godoc
|
||||
// @Summary Update an instance's configuration
|
||||
// @Description Updates the configuration of a specific instance by name
|
||||
// @Tags instances
|
||||
// @Tags Instances
|
||||
// @Security ApiKeyAuth
|
||||
// @Accept json
|
||||
// @Produces json
|
||||
@@ -143,7 +143,7 @@ func (h *Handler) UpdateInstance() http.HandlerFunc {
|
||||
// StartInstance godoc
|
||||
// @Summary Start a stopped instance
|
||||
// @Description Starts a specific instance by name
|
||||
// @Tags instances
|
||||
// @Tags Instances
|
||||
// @Security ApiKeyAuth
|
||||
// @Produces json
|
||||
// @Param name path string true "Instance Name"
|
||||
@@ -179,7 +179,7 @@ func (h *Handler) StartInstance() http.HandlerFunc {
|
||||
// StopInstance godoc
|
||||
// @Summary Stop a running instance
|
||||
// @Description Stops a specific instance by name
|
||||
// @Tags instances
|
||||
// @Tags Instances
|
||||
// @Security ApiKeyAuth
|
||||
// @Produces json
|
||||
// @Param name path string true "Instance Name"
|
||||
@@ -209,7 +209,7 @@ func (h *Handler) StopInstance() http.HandlerFunc {
|
||||
// RestartInstance godoc
|
||||
// @Summary Restart a running instance
|
||||
// @Description Restarts a specific instance by name
|
||||
// @Tags instances
|
||||
// @Tags Instances
|
||||
// @Security ApiKeyAuth
|
||||
// @Produces json
|
||||
// @Param name path string true "Instance Name"
|
||||
@@ -239,7 +239,7 @@ func (h *Handler) RestartInstance() http.HandlerFunc {
|
||||
// DeleteInstance godoc
|
||||
// @Summary Delete an instance
|
||||
// @Description Stops and removes a specific instance by name
|
||||
// @Tags instances
|
||||
// @Tags Instances
|
||||
// @Security ApiKeyAuth
|
||||
// @Param name path string true "Instance Name"
|
||||
// @Success 204 "No Content"
|
||||
@@ -267,7 +267,7 @@ func (h *Handler) DeleteInstance() http.HandlerFunc {
|
||||
// GetInstanceLogs godoc
|
||||
// @Summary Get logs from a specific instance
|
||||
// @Description Returns the logs from a specific instance by name with optional line limit
|
||||
// @Tags instances
|
||||
// @Tags Instances
|
||||
// @Security ApiKeyAuth
|
||||
// @Param name path string true "Instance Name"
|
||||
// @Param lines query string false "Number of lines to retrieve (default: all lines)"
|
||||
@@ -310,7 +310,7 @@ func (h *Handler) GetInstanceLogs() http.HandlerFunc {
|
||||
// InstanceProxy godoc
|
||||
// @Summary Proxy requests to a specific instance, does not autostart instance if stopped
|
||||
// @Description Forwards HTTP requests to the llama-server instance running on a specific port
|
||||
// @Tags instances
|
||||
// @Tags Instances
|
||||
// @Security ApiKeyAuth
|
||||
// @Param name path string true "Instance Name"
|
||||
// @Success 200 "Request successfully proxied to instance"
|
||||
|
||||
@@ -14,7 +14,7 @@ type NodeResponse struct {
|
||||
// ListNodes godoc
|
||||
// @Summary List all configured nodes
|
||||
// @Description Returns a map of all nodes configured in the server (node name -> node config)
|
||||
// @Tags nodes
|
||||
// @Tags Nodes
|
||||
// @Security ApiKeyAuth
|
||||
// @Produces json
|
||||
// @Success 200 {object} map[string]NodeResponse "Map of nodes"
|
||||
@@ -37,7 +37,7 @@ func (h *Handler) ListNodes() http.HandlerFunc {
|
||||
// GetNode godoc
|
||||
// @Summary Get details of a specific node
|
||||
// @Description Returns the details of a specific node by name
|
||||
// @Tags nodes
|
||||
// @Tags Nodes
|
||||
// @Security ApiKeyAuth
|
||||
// @Produces json
|
||||
// @Param name path string true "Node Name"
|
||||
|
||||
@@ -25,7 +25,7 @@ type OpenAIInstance struct {
|
||||
// OpenAIListInstances godoc
|
||||
// @Summary List instances in OpenAI-compatible format
|
||||
// @Description Returns a list of instances in a format compatible with OpenAI API
|
||||
// @Tags openai
|
||||
// @Tags OpenAI
|
||||
// @Security ApiKeyAuth
|
||||
// @Produces json
|
||||
// @Success 200 {object} OpenAIListInstancesResponse "List of OpenAI-compatible instances"
|
||||
@@ -61,7 +61,7 @@ func (h *Handler) OpenAIListInstances() http.HandlerFunc {
|
||||
// OpenAIProxy godoc
|
||||
// @Summary OpenAI-compatible proxy endpoint
|
||||
// @Description Handles all POST requests to /v1/*, routing to the appropriate instance based on the request body. Requires API key authentication via the `Authorization` header.
|
||||
// @Tags openai
|
||||
// @Tags OpenAI
|
||||
// @Security ApiKeyAuth
|
||||
// @Accept json
|
||||
// @Produces json
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
// VersionHandler godoc
|
||||
// @Summary Get llamactl version
|
||||
// @Description Returns the version of the llamactl command
|
||||
// @Tags version
|
||||
// @Tags System
|
||||
// @Security ApiKeyAuth
|
||||
// @Produces text/plain
|
||||
// @Success 200 {string} string "Version information"
|
||||
|
||||
Reference in New Issue
Block a user