mirror of
https://github.com/lordmathis/llamactl.git
synced 2025-11-06 00:54:23 +00:00
Capitalize godoc tags
This commit is contained in:
@@ -21,7 +21,7 @@
|
||||
],
|
||||
"description": "Returns a list of available devices for the llama server",
|
||||
"tags": [
|
||||
"backends"
|
||||
"Backends"
|
||||
],
|
||||
"summary": "List available devices for llama server",
|
||||
"responses": {
|
||||
@@ -49,7 +49,7 @@
|
||||
],
|
||||
"description": "Returns the help text for the llama server command",
|
||||
"tags": [
|
||||
"backends"
|
||||
"Backends"
|
||||
],
|
||||
"summary": "Get help for llama server",
|
||||
"responses": {
|
||||
@@ -83,7 +83,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Backends"
|
||||
],
|
||||
"summary": "Parse llama-server command",
|
||||
"parameters": [
|
||||
@@ -134,7 +134,7 @@
|
||||
],
|
||||
"description": "Returns the version of the llama server command",
|
||||
"tags": [
|
||||
"backends"
|
||||
"Backends"
|
||||
],
|
||||
"summary": "Get version of llama server",
|
||||
"responses": {
|
||||
@@ -168,7 +168,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Backends"
|
||||
],
|
||||
"summary": "Parse mlx_lm.server command",
|
||||
"parameters": [
|
||||
@@ -216,7 +216,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Backends"
|
||||
],
|
||||
"summary": "Parse vllm serve command",
|
||||
"parameters": [
|
||||
@@ -258,7 +258,7 @@
|
||||
],
|
||||
"description": "Returns a list of all instances managed by the server",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "List all instances",
|
||||
"responses": {
|
||||
@@ -289,7 +289,7 @@
|
||||
],
|
||||
"description": "Returns the details of a specific instance by name",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Get details of a specific instance",
|
||||
"parameters": [
|
||||
@@ -333,7 +333,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Update an instance's configuration",
|
||||
"parameters": [
|
||||
@@ -386,7 +386,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Create and start a new instance",
|
||||
"parameters": [
|
||||
@@ -436,7 +436,7 @@
|
||||
],
|
||||
"description": "Stops and removes a specific instance by name",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Delete an instance",
|
||||
"parameters": [
|
||||
@@ -476,7 +476,7 @@
|
||||
],
|
||||
"description": "Returns the logs from a specific instance by name with optional line limit",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Get logs from a specific instance",
|
||||
"parameters": [
|
||||
@@ -525,7 +525,7 @@
|
||||
],
|
||||
"description": "Forwards HTTP requests to the llama-server instance running on a specific port",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Proxy requests to a specific instance, does not autostart instance if stopped",
|
||||
"parameters": [
|
||||
@@ -569,7 +569,7 @@
|
||||
],
|
||||
"description": "Forwards HTTP requests to the llama-server instance running on a specific port",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Proxy requests to a specific instance, does not autostart instance if stopped",
|
||||
"parameters": [
|
||||
@@ -615,7 +615,7 @@
|
||||
],
|
||||
"description": "Restarts a specific instance by name",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Restart a running instance",
|
||||
"parameters": [
|
||||
@@ -658,7 +658,7 @@
|
||||
],
|
||||
"description": "Starts a specific instance by name",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Start a stopped instance",
|
||||
"parameters": [
|
||||
@@ -701,7 +701,7 @@
|
||||
],
|
||||
"description": "Stops a specific instance by name",
|
||||
"tags": [
|
||||
"instances"
|
||||
"Instances"
|
||||
],
|
||||
"summary": "Stop a running instance",
|
||||
"parameters": [
|
||||
@@ -744,7 +744,7 @@
|
||||
],
|
||||
"description": "Returns a map of all nodes configured in the server (node name -\u003e node config)",
|
||||
"tags": [
|
||||
"nodes"
|
||||
"Nodes"
|
||||
],
|
||||
"summary": "List all configured nodes",
|
||||
"responses": {
|
||||
@@ -775,7 +775,7 @@
|
||||
],
|
||||
"description": "Returns the details of a specific node by name",
|
||||
"tags": [
|
||||
"nodes"
|
||||
"Nodes"
|
||||
],
|
||||
"summary": "Get details of a specific node",
|
||||
"parameters": [
|
||||
@@ -824,7 +824,7 @@
|
||||
],
|
||||
"description": "Returns the version of the llamactl command",
|
||||
"tags": [
|
||||
"version"
|
||||
"System"
|
||||
],
|
||||
"summary": "Get llamactl version",
|
||||
"responses": {
|
||||
@@ -855,7 +855,7 @@
|
||||
"text/html"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp UI for the instance",
|
||||
"parameters": [
|
||||
@@ -901,7 +901,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -948,7 +948,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -995,7 +995,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1042,7 +1042,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1089,7 +1089,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1136,7 +1136,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1183,7 +1183,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1228,7 +1228,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1275,7 +1275,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1322,7 +1322,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1369,7 +1369,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"backends"
|
||||
"Llama.cpp"
|
||||
],
|
||||
"summary": "Proxy requests to llama.cpp server instance",
|
||||
"parameters": [
|
||||
@@ -1416,7 +1416,7 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"openai"
|
||||
"OpenAI"
|
||||
],
|
||||
"summary": "OpenAI-compatible proxy endpoint",
|
||||
"responses": {
|
||||
@@ -1447,7 +1447,7 @@
|
||||
],
|
||||
"description": "Returns a list of instances in a format compatible with OpenAI API",
|
||||
"tags": [
|
||||
"openai"
|
||||
"OpenAI"
|
||||
],
|
||||
"summary": "List instances in OpenAI-compatible format",
|
||||
"responses": {
|
||||
|
||||
Reference in New Issue
Block a user