Remove unused JSON unmarshal test and clean up command argument checks

This commit is contained in:
2025-09-19 20:46:25 +02:00
parent 64842e74b0
commit 7eb59aa7e0

View File

@@ -1,7 +1,6 @@
package vllm_test
import (
"encoding/json"
"llamactl/pkg/backends/vllm"
"slices"
"testing"
@@ -108,8 +107,6 @@ func TestBuildCommandArgs(t *testing.T) {
if !contains(args, "--enable-log-outputs") {
t.Errorf("Expected --enable-log-outputs not found in %v", args)
}
// Host and port should NOT be in the arguments (handled by llamactl)
if !contains(args, "--host") {
t.Errorf("Expected --host not found in %v", args)
}
@@ -129,35 +126,6 @@ func TestBuildCommandArgs(t *testing.T) {
}
}
func TestUnmarshalJSON(t *testing.T) {
// Test both underscore and dash formats
jsonData := `{
"model": "test-model",
"tensor_parallel_size": 4,
"gpu-memory-utilization": 0.9,
"enable-log-outputs": true
}`
var options vllm.VllmServerOptions
err := json.Unmarshal([]byte(jsonData), &options)
if err != nil {
t.Fatalf("Unmarshal failed: %v", err)
}
if options.Model != "test-model" {
t.Errorf("Expected model 'test-model', got %q", options.Model)
}
if options.TensorParallelSize != 4 {
t.Errorf("Expected tensor_parallel_size 4, got %d", options.TensorParallelSize)
}
if options.GPUMemoryUtilization != 0.9 {
t.Errorf("Expected gpu_memory_utilization 0.9, got %f", options.GPUMemoryUtilization)
}
if !options.EnableLogOutputs {
t.Errorf("Expected enable_log_outputs true, got %v", options.EnableLogOutputs)
}
}
// Helper functions
func contains(slice []string, item string) bool {
return slices.Contains(slice, item)