mirror of
https://github.com/lordmathis/llamactl.git
synced 2025-11-05 16:44:22 +00:00
Capitalize godoc tags
This commit is contained in:
68
docs/docs.go
68
docs/docs.go
@@ -28,7 +28,7 @@ const docTemplate = `{
|
||||
],
|
||||
"description": "Returns a list of available devices for the llama server",
|
||||
"tags": [
|
||||
"backends"
|
||||
"Backends"
|
||||
],
|
||||
"summary": "List available devices for llama server",
|
||||
"responses": {
|
||||
@@ -56,7 +56,7 @@ const docTemplate = `{
|
||||
],
|
||||
"description": "Returns the help text for the llama server command",
|
||||
"tags": [
|
||||
"backends"
|
||||
"Backends"
|
||||
],
|
||||
"summary": "Get help for llama server",
|
||||
"responses": {
|
||||
@@ -90,7 +90,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Backends"
|
||||
],
|
||||
"summary": "Parse llama-server command",
|
||||
"parameters": [
|
||||
@@ -141,7 +141,7 @@ const docTemplate = `{
|
||||
],
|
||||
"description": "Returns the version of the llama server command",
|
||||
"tags": [
|
||||
"backends"
|
||||
"Backends"
|
||||
],
|
||||
"summary": "Get version of llama server",
|
||||
"responses": {
|
||||
@@ -175,7 +175,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Backends"
|
||||
],
|
||||
"summary": "Parse mlx_lm.server command",
|
||||
"parameters": [
|
||||
@@ -223,7 +223,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Backends"
|
||||
],
|
||||
"summary": "Parse vllm serve command",
|
||||
"parameters": [
|
||||
@@ -265,7 +265,7 @@ const docTemplate = `{
|
||||
],
|
||||
"description": "Returns a list of all instances managed by the server",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "List all instances",
|
||||
"responses": {
|
||||
@@ -296,7 +296,7 @@ const docTemplate = `{
|
||||
],
|
||||
"description": "Returns the details of a specific instance by name",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Get details of a specific instance",
|
||||
"parameters": [
|
||||
@@ -340,7 +340,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Update an instance's configuration",
|
||||
"parameters": [
|
||||
@@ -393,7 +393,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Create and start a new instance",
|
||||
"parameters": [
|
||||
@@ -443,7 +443,7 @@ const docTemplate = `{
|
||||
],
|
||||
"description": "Stops and removes a specific instance by name",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Delete an instance",
|
||||
"parameters": [
|
||||
@@ -483,7 +483,7 @@ const docTemplate = `{
|
||||
],
|
||||
"description": "Returns the logs from a specific instance by name with optional line limit",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Get logs from a specific instance",
|
||||
"parameters": [
|
||||
@@ -532,7 +532,7 @@ const docTemplate = `{
|
||||
],
|
||||
"description": "Forwards HTTP requests to the llama-server instance running on a specific port",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Proxy requests to a specific instance, does not autostart instance if stopped",
|
||||
"parameters": [
|
||||
@@ -576,7 +576,7 @@ const docTemplate = `{
|
||||
],
|
||||
"description": "Forwards HTTP requests to the llama-server instance running on a specific port",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Proxy requests to a specific instance, does not autostart instance if stopped",
|
||||
"parameters": [
|
||||
@@ -622,7 +622,7 @@ const docTemplate = `{
|
||||
],
|
||||
"description": "Restarts a specific instance by name",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Restart a running instance",
|
||||
"parameters": [
|
||||
@@ -665,7 +665,7 @@ const docTemplate = `{
|
||||
],
|
||||
"description": "Starts a specific instance by name",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Start a stopped instance",
|
||||
"parameters": [
|
||||
@@ -708,7 +708,7 @@ const docTemplate = `{
|
||||
],
|
||||
"description": "Stops a specific instance by name",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Stop a running instance",
|
||||
"parameters": [
|
||||
@@ -751,7 +751,7 @@ const docTemplate = `{
|
||||
],
|
||||
"description": "Returns a map of all nodes configured in the server (node name -\u003e node config)",
|
||||
"tags": [
|
||||
"nodes"
|
||||
"Nodes"
|
||||
],
|
||||
"summary": "List all configured nodes",
|
||||
"responses": {
|
||||
@@ -782,7 +782,7 @@ const docTemplate = `{
|
||||
],
|
||||
"description": "Returns the details of a specific node by name",
|
||||
"tags": [
|
||||
"nodes"
|
||||
"Nodes"
|
||||
],
|
||||
"summary": "Get details of a specific node",
|
||||
"parameters": [
|
||||
@@ -831,7 +831,7 @@ const docTemplate = `{
|
||||
],
|
||||
"description": "Returns the version of the llamactl command",
|
||||
"tags": [
|
||||
"version"
|
||||
"System"
|
||||
],
|
||||
"summary": "Get llamactl version",
|
||||
"responses": {
|
||||
@@ -862,7 +862,7 @@ const docTemplate = `{
|
||||
"text/html"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp UI for the instance",
|
||||
"parameters": [
|
||||
@@ -908,7 +908,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -955,7 +955,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1002,7 +1002,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1049,7 +1049,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1096,7 +1096,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1143,7 +1143,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1190,7 +1190,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1235,7 +1235,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1282,7 +1282,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1329,7 +1329,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1376,7 +1376,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1423,7 +1423,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"openai"
|
||||
"OpenAI"
|
||||
],
|
||||
"summary": "OpenAI-compatible proxy endpoint",
|
||||
"responses": {
|
||||
@@ -1454,7 +1454,7 @@ const docTemplate = `{
|
||||
],
|
||||
"description": "Returns a list of instances in a format compatible with OpenAI API",
|
||||
"tags": [
|
||||
"openai"
|
||||
"OpenAI"
|
||||
],
|
||||
"summary": "List instances in OpenAI-compatible format",
|
||||
"responses": {
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
],
|
||||
"description": "Returns a list of available devices for the llama server",
|
||||
"tags": [
|
||||
"backends"
|
||||
"Backends"
|
||||
],
|
||||
"summary": "List available devices for llama server",
|
||||
"responses": {
|
||||
@@ -49,7 +49,7 @@
|
||||
],
|
||||
"description": "Returns the help text for the llama server command",
|
||||
"tags": [
|
||||
"backends"
|
||||
"Backends"
|
||||
],
|
||||
"summary": "Get help for llama server",
|
||||
"responses": {
|
||||
@@ -83,7 +83,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Backends"
|
||||
],
|
||||
"summary": "Parse llama-server command",
|
||||
"parameters": [
|
||||
@@ -134,7 +134,7 @@
|
||||
],
|
||||
"description": "Returns the version of the llama server command",
|
||||
"tags": [
|
||||
"backends"
|
||||
"Backends"
|
||||
],
|
||||
"summary": "Get version of llama server",
|
||||
"responses": {
|
||||
@@ -168,7 +168,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Backends"
|
||||
],
|
||||
"summary": "Parse mlx_lm.server command",
|
||||
"parameters": [
|
||||
@@ -216,7 +216,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Backends"
|
||||
],
|
||||
"summary": "Parse vllm serve command",
|
||||
"parameters": [
|
||||
@@ -258,7 +258,7 @@
|
||||
],
|
||||
"description": "Returns a list of all instances managed by the server",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "List all instances",
|
||||
"responses": {
|
||||
@@ -289,7 +289,7 @@
|
||||
],
|
||||
"description": "Returns the details of a specific instance by name",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Get details of a specific instance",
|
||||
"parameters": [
|
||||
@@ -333,7 +333,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Update an instance's configuration",
|
||||
"parameters": [
|
||||
@@ -386,7 +386,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Create and start a new instance",
|
||||
"parameters": [
|
||||
@@ -436,7 +436,7 @@
|
||||
],
|
||||
"description": "Stops and removes a specific instance by name",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Delete an instance",
|
||||
"parameters": [
|
||||
@@ -476,7 +476,7 @@
|
||||
],
|
||||
"description": "Returns the logs from a specific instance by name with optional line limit",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Get logs from a specific instance",
|
||||
"parameters": [
|
||||
@@ -525,7 +525,7 @@
|
||||
],
|
||||
"description": "Forwards HTTP requests to the llama-server instance running on a specific port",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Proxy requests to a specific instance, does not autostart instance if stopped",
|
||||
"parameters": [
|
||||
@@ -569,7 +569,7 @@
|
||||
],
|
||||
"description": "Forwards HTTP requests to the llama-server instance running on a specific port",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Proxy requests to a specific instance, does not autostart instance if stopped",
|
||||
"parameters": [
|
||||
@@ -615,7 +615,7 @@
|
||||
],
|
||||
"description": "Restarts a specific instance by name",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Restart a running instance",
|
||||
"parameters": [
|
||||
@@ -658,7 +658,7 @@
|
||||
],
|
||||
"description": "Starts a specific instance by name",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Start a stopped instance",
|
||||
"parameters": [
|
||||
@@ -701,7 +701,7 @@
|
||||
],
|
||||
"description": "Stops a specific instance by name",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Stop a running instance",
|
||||
"parameters": [
|
||||
@@ -744,7 +744,7 @@
|
||||
],
|
||||
"description": "Returns a map of all nodes configured in the server (node name -\u003e node config)",
|
||||
"tags": [
|
||||
"nodes"
|
||||
"Nodes"
|
||||
],
|
||||
"summary": "List all configured nodes",
|
||||
"responses": {
|
||||
@@ -775,7 +775,7 @@
|
||||
],
|
||||
"description": "Returns the details of a specific node by name",
|
||||
"tags": [
|
||||
"nodes"
|
||||
"Nodes"
|
||||
],
|
||||
"summary": "Get details of a specific node",
|
||||
"parameters": [
|
||||
@@ -824,7 +824,7 @@
|
||||
],
|
||||
"description": "Returns the version of the llamactl command",
|
||||
"tags": [
|
||||
"version"
|
||||
"System"
|
||||
],
|
||||
"summary": "Get llamactl version",
|
||||
"responses": {
|
||||
@@ -855,7 +855,7 @@
|
||||
"text/html"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp UI for the instance",
|
||||
"parameters": [
|
||||
@@ -901,7 +901,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -948,7 +948,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -995,7 +995,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1042,7 +1042,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1089,7 +1089,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1136,7 +1136,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1183,7 +1183,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1228,7 +1228,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1275,7 +1275,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1322,7 +1322,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1369,7 +1369,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1416,7 +1416,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"openai"
|
||||
"OpenAI"
|
||||
],
|
||||
"summary": "OpenAI-compatible proxy endpoint",
|
||||
"responses": {
|
||||
@@ -1447,7 +1447,7 @@
|
||||
],
|
||||
"description": "Returns a list of instances in a format compatible with OpenAI API",
|
||||
"tags": [
|
||||
"openai"
|
||||
"OpenAI"
|
||||
],
|
||||
"summary": "List instances in OpenAI-compatible format",
|
||||
"responses": {
|
||||
|
||||
@@ -85,7 +85,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: List available devices for llama server
|
||||
tags:
|
||||
- backends
|
||||
- Backends
|
||||
/api/v1/backends/llama-cpp/help:
|
||||
get:
|
||||
description: Returns the help text for the llama server command
|
||||
@@ -102,7 +102,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Get help for llama server
|
||||
tags:
|
||||
- backends
|
||||
- Backends
|
||||
/api/v1/backends/llama-cpp/parse-command:
|
||||
post:
|
||||
consumes:
|
||||
@@ -138,7 +138,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Parse llama-server command
|
||||
tags:
|
||||
- backends
|
||||
- Backends
|
||||
/api/v1/backends/llama-cpp/version:
|
||||
get:
|
||||
description: Returns the version of the llama server command
|
||||
@@ -155,7 +155,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Get version of llama server
|
||||
tags:
|
||||
- backends
|
||||
- Backends
|
||||
/api/v1/backends/mlx/parse-command:
|
||||
post:
|
||||
consumes:
|
||||
@@ -185,7 +185,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Parse mlx_lm.server command
|
||||
tags:
|
||||
- backends
|
||||
- Backends
|
||||
/api/v1/backends/vllm/parse-command:
|
||||
post:
|
||||
consumes:
|
||||
@@ -215,7 +215,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Parse vllm serve command
|
||||
tags:
|
||||
- backends
|
||||
- Backends
|
||||
/api/v1/instances:
|
||||
get:
|
||||
description: Returns a list of all instances managed by the server
|
||||
@@ -234,7 +234,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: List all instances
|
||||
tags:
|
||||
- instances
|
||||
- Instances
|
||||
/api/v1/instances/{name}:
|
||||
delete:
|
||||
description: Stops and removes a specific instance by name
|
||||
@@ -259,7 +259,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Delete an instance
|
||||
tags:
|
||||
- instances
|
||||
- Instances
|
||||
get:
|
||||
description: Returns the details of a specific instance by name
|
||||
parameters:
|
||||
@@ -285,7 +285,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Get details of a specific instance
|
||||
tags:
|
||||
- instances
|
||||
- Instances
|
||||
post:
|
||||
consumes:
|
||||
- application/json
|
||||
@@ -319,7 +319,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Create and start a new instance
|
||||
tags:
|
||||
- instances
|
||||
- Instances
|
||||
put:
|
||||
consumes:
|
||||
- application/json
|
||||
@@ -353,7 +353,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Update an instance's configuration
|
||||
tags:
|
||||
- instances
|
||||
- Instances
|
||||
/api/v1/instances/{name}/logs:
|
||||
get:
|
||||
description: Returns the logs from a specific instance by name with optional
|
||||
@@ -385,7 +385,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Get logs from a specific instance
|
||||
tags:
|
||||
- instances
|
||||
- Instances
|
||||
/api/v1/instances/{name}/proxy:
|
||||
get:
|
||||
description: Forwards HTTP requests to the llama-server instance running on
|
||||
@@ -416,7 +416,7 @@ paths:
|
||||
summary: Proxy requests to a specific instance, does not autostart instance
|
||||
if stopped
|
||||
tags:
|
||||
- instances
|
||||
- Instances
|
||||
post:
|
||||
description: Forwards HTTP requests to the llama-server instance running on
|
||||
a specific port
|
||||
@@ -446,7 +446,7 @@ paths:
|
||||
summary: Proxy requests to a specific instance, does not autostart instance
|
||||
if stopped
|
||||
tags:
|
||||
- instances
|
||||
- Instances
|
||||
/api/v1/instances/{name}/restart:
|
||||
post:
|
||||
description: Restarts a specific instance by name
|
||||
@@ -473,7 +473,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Restart a running instance
|
||||
tags:
|
||||
- instances
|
||||
- Instances
|
||||
/api/v1/instances/{name}/start:
|
||||
post:
|
||||
description: Starts a specific instance by name
|
||||
@@ -500,7 +500,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Start a stopped instance
|
||||
tags:
|
||||
- instances
|
||||
- Instances
|
||||
/api/v1/instances/{name}/stop:
|
||||
post:
|
||||
description: Stops a specific instance by name
|
||||
@@ -527,7 +527,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Stop a running instance
|
||||
tags:
|
||||
- instances
|
||||
- Instances
|
||||
/api/v1/nodes:
|
||||
get:
|
||||
description: Returns a map of all nodes configured in the server (node name
|
||||
@@ -547,7 +547,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: List all configured nodes
|
||||
tags:
|
||||
- nodes
|
||||
- Nodes
|
||||
/api/v1/nodes/{name}:
|
||||
get:
|
||||
description: Returns the details of a specific node by name
|
||||
@@ -578,7 +578,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Get details of a specific node
|
||||
tags:
|
||||
- nodes
|
||||
- Nodes
|
||||
/api/v1/version:
|
||||
get:
|
||||
description: Returns the version of the llamactl command
|
||||
@@ -595,7 +595,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Get llamactl version
|
||||
tags:
|
||||
- version
|
||||
- System
|
||||
/llama-cpp/{name}/:
|
||||
get:
|
||||
description: Proxies requests to the llama.cpp UI for the specified instance
|
||||
@@ -624,7 +624,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Proxy requests to llama.cpp UI for the instance
|
||||
tags:
|
||||
- backends
|
||||
- Llama.cpp
|
||||
/llama-cpp/{name}/apply-template:
|
||||
post:
|
||||
description: Proxies requests to the specified llama.cpp server instance, starting
|
||||
@@ -655,7 +655,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Proxy requests to llama.cpp server instance
|
||||
tags:
|
||||
- backends
|
||||
- Llama.cpp
|
||||
/llama-cpp/{name}/completion:
|
||||
post:
|
||||
description: Proxies requests to the specified llama.cpp server instance, starting
|
||||
@@ -686,7 +686,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Proxy requests to llama.cpp server instance
|
||||
tags:
|
||||
- backends
|
||||
- Llama.cpp
|
||||
/llama-cpp/{name}/detokenize:
|
||||
post:
|
||||
description: Proxies requests to the specified llama.cpp server instance, starting
|
||||
@@ -717,7 +717,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Proxy requests to llama.cpp server instance
|
||||
tags:
|
||||
- backends
|
||||
- Llama.cpp
|
||||
/llama-cpp/{name}/embeddings:
|
||||
post:
|
||||
description: Proxies requests to the specified llama.cpp server instance, starting
|
||||
@@ -748,7 +748,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Proxy requests to llama.cpp server instance
|
||||
tags:
|
||||
- backends
|
||||
- Llama.cpp
|
||||
/llama-cpp/{name}/infill:
|
||||
post:
|
||||
description: Proxies requests to the specified llama.cpp server instance, starting
|
||||
@@ -779,7 +779,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Proxy requests to llama.cpp server instance
|
||||
tags:
|
||||
- backends
|
||||
- Llama.cpp
|
||||
/llama-cpp/{name}/metrics:
|
||||
post:
|
||||
description: Proxies requests to the specified llama.cpp server instance, starting
|
||||
@@ -810,7 +810,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Proxy requests to llama.cpp server instance
|
||||
tags:
|
||||
- backends
|
||||
- Llama.cpp
|
||||
/llama-cpp/{name}/props:
|
||||
get:
|
||||
description: Proxies requests to the specified llama.cpp server instance, starting
|
||||
@@ -841,7 +841,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Proxy requests to llama.cpp server instance
|
||||
tags:
|
||||
- backends
|
||||
- Llama.cpp
|
||||
post:
|
||||
description: Proxies requests to the specified llama.cpp server instance, starting
|
||||
it on-demand if configured
|
||||
@@ -871,7 +871,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Proxy requests to llama.cpp server instance
|
||||
tags:
|
||||
- backends
|
||||
- Llama.cpp
|
||||
/llama-cpp/{name}/reranking:
|
||||
post:
|
||||
description: Proxies requests to the specified llama.cpp server instance, starting
|
||||
@@ -902,7 +902,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Proxy requests to llama.cpp server instance
|
||||
tags:
|
||||
- backends
|
||||
- Llama.cpp
|
||||
/llama-cpp/{name}/slots:
|
||||
get:
|
||||
description: Proxies requests to the specified llama.cpp server instance, starting
|
||||
@@ -933,7 +933,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Proxy requests to llama.cpp server instance
|
||||
tags:
|
||||
- backends
|
||||
- Llama.cpp
|
||||
/llama-cpp/{name}/tokenize:
|
||||
post:
|
||||
description: Proxies requests to the specified llama.cpp server instance, starting
|
||||
@@ -964,7 +964,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Proxy requests to llama.cpp server instance
|
||||
tags:
|
||||
- backends
|
||||
- Llama.cpp
|
||||
/v1/:
|
||||
post:
|
||||
consumes:
|
||||
@@ -987,7 +987,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: OpenAI-compatible proxy endpoint
|
||||
tags:
|
||||
- openai
|
||||
- OpenAI
|
||||
/v1/models:
|
||||
get:
|
||||
description: Returns a list of instances in a format compatible with OpenAI
|
||||
@@ -1005,7 +1005,7 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: List instances in OpenAI-compatible format
|
||||
tags:
|
||||
- openai
|
||||
- OpenAI
|
||||
securityDefinitions:
|
||||
ApiKeyAuth:
|
||||
in: header
|
||||
|
||||
Reference in New Issue
Block a user