Refactor backend type from LLAMA_SERVER to LLAMA_CPP across components and tests

This commit is contained in:
2025-09-02 21:19:22 +02:00
parent 4f6bb6292e
commit 0fd3613798
9 changed files with 32 additions and 32 deletions

View File

@@ -10,8 +10,7 @@ import {
DialogHeader,
DialogTitle,
} from "@/components/ui/dialog";
import type { CreateInstanceOptions, Instance } from "@/types/instance";
import { BackendType } from "@/types/instance";
import { BackendType, type CreateInstanceOptions, type Instance } from "@/types/instance";
import { getBasicFields, getAdvancedFields, getBasicBackendFields, getAdvancedBackendFields } from "@/lib/zodFormUtils";
import { ChevronDown, ChevronRight } from "lucide-react";
import ZodFormField from "@/components/ZodFormField";
@@ -55,7 +54,7 @@ const InstanceDialog: React.FC<InstanceDialogProps> = ({
setInstanceName("");
setFormData({
auto_restart: true, // Default value
backend_type: BackendType.LLAMA_SERVER, // Default backend type
backend_type: BackendType.LLAMA_CPP, // Default backend type
backend_options: {},
});
}

View File

@@ -34,11 +34,11 @@ const ZodFormField: React.FC<ZodFormFieldProps> = ({ fieldKey, value, onChange }
</Label>
<select
id={fieldKey}
value={typeof value === 'string' ? value : BackendType.LLAMA_SERVER}
value={typeof value === 'string' ? value : BackendType.LLAMA_CPP}
onChange={(e) => handleChange(e.target.value || undefined)}
className="flex h-10 w-full rounded-md border border-input bg-background px-3 py-2 text-sm ring-offset-background file:border-0 file:bg-transparent file:text-sm file:font-medium placeholder:text-muted-foreground focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:cursor-not-allowed disabled:opacity-50"
>
<option value={BackendType.LLAMA_SERVER}>Llama Server</option>
<option value={BackendType.LLAMA_CPP}>Llama Server</option>
{/* Add more backend types here as they become available */}
</select>
{config.description && (

View File

@@ -19,13 +19,13 @@ describe('InstanceCard - Instance Actions and State', () => {
const stoppedInstance: Instance = {
name: 'test-instance',
status: 'stopped',
options: { backend_type: BackendType.LLAMA_SERVER, backend_options: { model: 'test-model.gguf' } }
options: { backend_type: BackendType.LLAMA_CPP, backend_options: { model: 'test-model.gguf' } }
}
const runningInstance: Instance = {
name: 'running-instance',
status: 'running',
options: { backend_type: BackendType.LLAMA_SERVER, backend_options: { model: 'running-model.gguf' } }
options: { backend_type: BackendType.LLAMA_CPP, backend_options: { model: 'running-model.gguf' } }
}
beforeEach(() => {

View File

@@ -45,9 +45,9 @@ describe('InstanceList - State Management and UI Logic', () => {
const mockEditInstance = vi.fn()
const mockInstances: Instance[] = [
{ name: 'instance-1', status: 'stopped', options: { backend_type: BackendType.LLAMA_SERVER, backend_options: { model: 'model1.gguf' } } },
{ name: 'instance-2', status: 'running', options: { backend_type: BackendType.LLAMA_SERVER, backend_options: { model: 'model2.gguf' } } },
{ name: 'instance-3', status: 'stopped', options: { backend_type: BackendType.LLAMA_SERVER, backend_options: { model: 'model3.gguf' } } }
{ name: 'instance-1', status: 'stopped', options: { backend_type: BackendType.LLAMA_CPP, backend_options: { model: 'model1.gguf' } } },
{ name: 'instance-2', status: 'running', options: { backend_type: BackendType.LLAMA_CPP, backend_options: { model: 'model2.gguf' } } },
{ name: 'instance-3', status: 'stopped', options: { backend_type: BackendType.LLAMA_CPP, backend_options: { model: 'model3.gguf' } } }
]
const DUMMY_API_KEY = 'test-api-key-123'

View File

@@ -92,7 +92,7 @@ afterEach(() => {
expect(mockOnSave).toHaveBeenCalledWith('my-instance', {
auto_restart: true, // Default value
backend_type: BackendType.LLAMA_SERVER
backend_type: BackendType.LLAMA_CPP
})
})
@@ -138,7 +138,7 @@ afterEach(() => {
name: 'existing-instance',
status: 'stopped',
options: {
backend_type: BackendType.LLAMA_SERVER,
backend_type: BackendType.LLAMA_CPP,
backend_options: { model: 'test-model.gguf', gpu_layers: 10 },
auto_restart: false
}
@@ -179,7 +179,7 @@ afterEach(() => {
await user.click(screen.getByTestId('dialog-save-button'))
expect(mockOnSave).toHaveBeenCalledWith('existing-instance', {
backend_type: BackendType.LLAMA_SERVER,
backend_type: BackendType.LLAMA_CPP,
backend_options: { model: 'test-model.gguf', gpu_layers: 10 },
auto_restart: false
})
@@ -273,7 +273,7 @@ afterEach(() => {
expect(mockOnSave).toHaveBeenCalledWith('test-instance', {
auto_restart: true,
backend_type: BackendType.LLAMA_SERVER,
backend_type: BackendType.LLAMA_CPP,
max_restarts: 5,
restart_delay: 10
})
@@ -324,7 +324,7 @@ afterEach(() => {
// Should only include non-empty values
expect(mockOnSave).toHaveBeenCalledWith('clean-instance', {
auto_restart: true, // Only this default value should be included
backend_type: BackendType.LLAMA_SERVER
backend_type: BackendType.LLAMA_CPP
})
})
@@ -349,7 +349,7 @@ afterEach(() => {
expect(mockOnSave).toHaveBeenCalledWith('numeric-test', {
auto_restart: true,
backend_type: BackendType.LLAMA_SERVER,
backend_type: BackendType.LLAMA_CPP,
backend_options: { gpu_layers: 15 }, // Should be number, not string
})
})