mirror of
https://github.com/lordmathis/llamactl.git
synced 2025-11-06 17:14:28 +00:00
Refactor backend type from LLAMA_SERVER to LLAMA_CPP across components and tests
This commit is contained in:
@@ -47,8 +47,8 @@ function renderApp() {
|
||||
|
||||
describe('App Component - Critical Business Logic Only', () => {
|
||||
const mockInstances: Instance[] = [
|
||||
{ name: 'test-instance-1', status: 'stopped', options: { backend_type: BackendType.LLAMA_SERVER, backend_options: { model: 'model1.gguf' } } },
|
||||
{ name: 'test-instance-2', status: 'running', options: { backend_type: BackendType.LLAMA_SERVER, backend_options: { model: 'model2.gguf' } } }
|
||||
{ name: 'test-instance-1', status: 'stopped', options: { backend_type: BackendType.LLAMA_CPP, backend_options: { model: 'model1.gguf' } } },
|
||||
{ name: 'test-instance-2', status: 'running', options: { backend_type: BackendType.LLAMA_CPP, backend_options: { model: 'model2.gguf' } } }
|
||||
]
|
||||
|
||||
beforeEach(() => {
|
||||
@@ -83,7 +83,7 @@ describe('App Component - Critical Business Logic Only', () => {
|
||||
const newInstance: Instance = {
|
||||
name: 'new-test-instance',
|
||||
status: 'stopped',
|
||||
options: { backend_type: BackendType.LLAMA_SERVER, backend_options: { model: 'new-model.gguf' } }
|
||||
options: { backend_type: BackendType.LLAMA_CPP, backend_options: { model: 'new-model.gguf' } }
|
||||
}
|
||||
vi.mocked(instancesApi.create).mockResolvedValue(newInstance)
|
||||
|
||||
@@ -106,7 +106,7 @@ describe('App Component - Critical Business Logic Only', () => {
|
||||
await waitFor(() => {
|
||||
expect(instancesApi.create).toHaveBeenCalledWith('new-test-instance', {
|
||||
auto_restart: true, // Default value
|
||||
backend_type: BackendType.LLAMA_SERVER
|
||||
backend_type: BackendType.LLAMA_CPP
|
||||
})
|
||||
})
|
||||
|
||||
@@ -121,7 +121,7 @@ describe('App Component - Critical Business Logic Only', () => {
|
||||
const updatedInstance: Instance = {
|
||||
name: 'test-instance-1',
|
||||
status: 'stopped',
|
||||
options: { backend_type: BackendType.LLAMA_SERVER, backend_options: { model: 'updated-model.gguf' } }
|
||||
options: { backend_type: BackendType.LLAMA_CPP, backend_options: { model: 'updated-model.gguf' } }
|
||||
}
|
||||
vi.mocked(instancesApi.update).mockResolvedValue(updatedInstance)
|
||||
|
||||
@@ -140,7 +140,7 @@ describe('App Component - Critical Business Logic Only', () => {
|
||||
// Verify correct API call with existing instance data
|
||||
await waitFor(() => {
|
||||
expect(instancesApi.update).toHaveBeenCalledWith('test-instance-1', {
|
||||
backend_type: BackendType.LLAMA_SERVER,
|
||||
backend_type: BackendType.LLAMA_CPP,
|
||||
backend_options: { model: "model1.gguf" } // Pre-filled from existing instance
|
||||
})
|
||||
})
|
||||
|
||||
@@ -10,8 +10,7 @@ import {
|
||||
DialogHeader,
|
||||
DialogTitle,
|
||||
} from "@/components/ui/dialog";
|
||||
import type { CreateInstanceOptions, Instance } from "@/types/instance";
|
||||
import { BackendType } from "@/types/instance";
|
||||
import { BackendType, type CreateInstanceOptions, type Instance } from "@/types/instance";
|
||||
import { getBasicFields, getAdvancedFields, getBasicBackendFields, getAdvancedBackendFields } from "@/lib/zodFormUtils";
|
||||
import { ChevronDown, ChevronRight } from "lucide-react";
|
||||
import ZodFormField from "@/components/ZodFormField";
|
||||
@@ -55,7 +54,7 @@ const InstanceDialog: React.FC<InstanceDialogProps> = ({
|
||||
setInstanceName("");
|
||||
setFormData({
|
||||
auto_restart: true, // Default value
|
||||
backend_type: BackendType.LLAMA_SERVER, // Default backend type
|
||||
backend_type: BackendType.LLAMA_CPP, // Default backend type
|
||||
backend_options: {},
|
||||
});
|
||||
}
|
||||
|
||||
@@ -34,11 +34,11 @@ const ZodFormField: React.FC<ZodFormFieldProps> = ({ fieldKey, value, onChange }
|
||||
</Label>
|
||||
<select
|
||||
id={fieldKey}
|
||||
value={typeof value === 'string' ? value : BackendType.LLAMA_SERVER}
|
||||
value={typeof value === 'string' ? value : BackendType.LLAMA_CPP}
|
||||
onChange={(e) => handleChange(e.target.value || undefined)}
|
||||
className="flex h-10 w-full rounded-md border border-input bg-background px-3 py-2 text-sm ring-offset-background file:border-0 file:bg-transparent file:text-sm file:font-medium placeholder:text-muted-foreground focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:cursor-not-allowed disabled:opacity-50"
|
||||
>
|
||||
<option value={BackendType.LLAMA_SERVER}>Llama Server</option>
|
||||
<option value={BackendType.LLAMA_CPP}>Llama Server</option>
|
||||
{/* Add more backend types here as they become available */}
|
||||
</select>
|
||||
{config.description && (
|
||||
|
||||
@@ -19,13 +19,13 @@ describe('InstanceCard - Instance Actions and State', () => {
|
||||
const stoppedInstance: Instance = {
|
||||
name: 'test-instance',
|
||||
status: 'stopped',
|
||||
options: { backend_type: BackendType.LLAMA_SERVER, backend_options: { model: 'test-model.gguf' } }
|
||||
options: { backend_type: BackendType.LLAMA_CPP, backend_options: { model: 'test-model.gguf' } }
|
||||
}
|
||||
|
||||
const runningInstance: Instance = {
|
||||
name: 'running-instance',
|
||||
status: 'running',
|
||||
options: { backend_type: BackendType.LLAMA_SERVER, backend_options: { model: 'running-model.gguf' } }
|
||||
options: { backend_type: BackendType.LLAMA_CPP, backend_options: { model: 'running-model.gguf' } }
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
|
||||
@@ -45,9 +45,9 @@ describe('InstanceList - State Management and UI Logic', () => {
|
||||
const mockEditInstance = vi.fn()
|
||||
|
||||
const mockInstances: Instance[] = [
|
||||
{ name: 'instance-1', status: 'stopped', options: { backend_type: BackendType.LLAMA_SERVER, backend_options: { model: 'model1.gguf' } } },
|
||||
{ name: 'instance-2', status: 'running', options: { backend_type: BackendType.LLAMA_SERVER, backend_options: { model: 'model2.gguf' } } },
|
||||
{ name: 'instance-3', status: 'stopped', options: { backend_type: BackendType.LLAMA_SERVER, backend_options: { model: 'model3.gguf' } } }
|
||||
{ name: 'instance-1', status: 'stopped', options: { backend_type: BackendType.LLAMA_CPP, backend_options: { model: 'model1.gguf' } } },
|
||||
{ name: 'instance-2', status: 'running', options: { backend_type: BackendType.LLAMA_CPP, backend_options: { model: 'model2.gguf' } } },
|
||||
{ name: 'instance-3', status: 'stopped', options: { backend_type: BackendType.LLAMA_CPP, backend_options: { model: 'model3.gguf' } } }
|
||||
]
|
||||
|
||||
const DUMMY_API_KEY = 'test-api-key-123'
|
||||
|
||||
@@ -92,7 +92,7 @@ afterEach(() => {
|
||||
|
||||
expect(mockOnSave).toHaveBeenCalledWith('my-instance', {
|
||||
auto_restart: true, // Default value
|
||||
backend_type: BackendType.LLAMA_SERVER
|
||||
backend_type: BackendType.LLAMA_CPP
|
||||
})
|
||||
})
|
||||
|
||||
@@ -138,7 +138,7 @@ afterEach(() => {
|
||||
name: 'existing-instance',
|
||||
status: 'stopped',
|
||||
options: {
|
||||
backend_type: BackendType.LLAMA_SERVER,
|
||||
backend_type: BackendType.LLAMA_CPP,
|
||||
backend_options: { model: 'test-model.gguf', gpu_layers: 10 },
|
||||
auto_restart: false
|
||||
}
|
||||
@@ -179,7 +179,7 @@ afterEach(() => {
|
||||
await user.click(screen.getByTestId('dialog-save-button'))
|
||||
|
||||
expect(mockOnSave).toHaveBeenCalledWith('existing-instance', {
|
||||
backend_type: BackendType.LLAMA_SERVER,
|
||||
backend_type: BackendType.LLAMA_CPP,
|
||||
backend_options: { model: 'test-model.gguf', gpu_layers: 10 },
|
||||
auto_restart: false
|
||||
})
|
||||
@@ -273,7 +273,7 @@ afterEach(() => {
|
||||
|
||||
expect(mockOnSave).toHaveBeenCalledWith('test-instance', {
|
||||
auto_restart: true,
|
||||
backend_type: BackendType.LLAMA_SERVER,
|
||||
backend_type: BackendType.LLAMA_CPP,
|
||||
max_restarts: 5,
|
||||
restart_delay: 10
|
||||
})
|
||||
@@ -324,7 +324,7 @@ afterEach(() => {
|
||||
// Should only include non-empty values
|
||||
expect(mockOnSave).toHaveBeenCalledWith('clean-instance', {
|
||||
auto_restart: true, // Only this default value should be included
|
||||
backend_type: BackendType.LLAMA_SERVER
|
||||
backend_type: BackendType.LLAMA_CPP
|
||||
})
|
||||
})
|
||||
|
||||
@@ -349,7 +349,7 @@ afterEach(() => {
|
||||
|
||||
expect(mockOnSave).toHaveBeenCalledWith('numeric-test', {
|
||||
auto_restart: true,
|
||||
backend_type: BackendType.LLAMA_SERVER,
|
||||
backend_type: BackendType.LLAMA_CPP,
|
||||
backend_options: { gpu_layers: 15 }, // Should be number, not string
|
||||
})
|
||||
})
|
||||
|
||||
@@ -48,13 +48,13 @@ function TestComponent() {
|
||||
|
||||
{/* Action buttons for testing with specific instances */}
|
||||
<button
|
||||
onClick={() => createInstance("new-instance", { backend_type: BackendType.LLAMA_SERVER, backend_options: { model: "test.gguf" } })}
|
||||
onClick={() => createInstance("new-instance", { backend_type: BackendType.LLAMA_CPP, backend_options: { model: "test.gguf" } })}
|
||||
data-testid="create-instance"
|
||||
>
|
||||
Create Instance
|
||||
</button>
|
||||
<button
|
||||
onClick={() => updateInstance("instance1", { backend_type: BackendType.LLAMA_SERVER, backend_options: { model: "updated.gguf" } })}
|
||||
onClick={() => updateInstance("instance1", { backend_type: BackendType.LLAMA_CPP, backend_options: { model: "updated.gguf" } })}
|
||||
data-testid="update-instance"
|
||||
>
|
||||
Update Instance
|
||||
@@ -100,8 +100,8 @@ function renderWithProvider(children: ReactNode) {
|
||||
|
||||
describe("InstancesContext", () => {
|
||||
const mockInstances: Instance[] = [
|
||||
{ name: "instance1", status: "running", options: { backend_type: BackendType.LLAMA_SERVER, backend_options: { model: "model1.gguf" } } },
|
||||
{ name: "instance2", status: "stopped", options: { backend_type: BackendType.LLAMA_SERVER, backend_options: { model: "model2.gguf" } } },
|
||||
{ name: "instance1", status: "running", options: { backend_type: BackendType.LLAMA_CPP, backend_options: { model: "model1.gguf" } } },
|
||||
{ name: "instance2", status: "stopped", options: { backend_type: BackendType.LLAMA_CPP, backend_options: { model: "model2.gguf" } } },
|
||||
];
|
||||
|
||||
beforeEach(() => {
|
||||
@@ -160,7 +160,7 @@ describe("InstancesContext", () => {
|
||||
const newInstance: Instance = {
|
||||
name: "new-instance",
|
||||
status: "stopped",
|
||||
options: { backend_type: BackendType.LLAMA_SERVER, backend_options: { model: "test.gguf" } },
|
||||
options: { backend_type: BackendType.LLAMA_CPP, backend_options: { model: "test.gguf" } },
|
||||
};
|
||||
vi.mocked(instancesApi.create).mockResolvedValue(newInstance);
|
||||
|
||||
@@ -175,7 +175,7 @@ describe("InstancesContext", () => {
|
||||
|
||||
await waitFor(() => {
|
||||
expect(instancesApi.create).toHaveBeenCalledWith("new-instance", {
|
||||
backend_type: BackendType.LLAMA_SERVER,
|
||||
backend_type: BackendType.LLAMA_CPP,
|
||||
backend_options: { model: "test.gguf" }
|
||||
});
|
||||
});
|
||||
@@ -217,7 +217,7 @@ describe("InstancesContext", () => {
|
||||
const updatedInstance: Instance = {
|
||||
name: "instance1",
|
||||
status: "running",
|
||||
options: { backend_type: BackendType.LLAMA_SERVER, backend_options: { model: "updated.gguf" } },
|
||||
options: { backend_type: BackendType.LLAMA_CPP, backend_options: { model: "updated.gguf" } },
|
||||
};
|
||||
vi.mocked(instancesApi.update).mockResolvedValue(updatedInstance);
|
||||
|
||||
@@ -232,7 +232,7 @@ describe("InstancesContext", () => {
|
||||
|
||||
await waitFor(() => {
|
||||
expect(instancesApi.update).toHaveBeenCalledWith("instance1", {
|
||||
backend_type: BackendType.LLAMA_SERVER,
|
||||
backend_type: BackendType.LLAMA_CPP,
|
||||
backend_options: { model: "updated.gguf" }
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import { BackendType } from '@/types/instance'
|
||||
import { z } from 'zod'
|
||||
|
||||
// Define the backend options schema (previously embedded in CreateInstanceOptionsSchema)
|
||||
@@ -179,7 +180,7 @@ export const CreateInstanceOptionsSchema = z.object({
|
||||
on_demand_start: z.boolean().optional(),
|
||||
|
||||
// Backend configuration
|
||||
backend_type: z.enum(['llama_server']).optional(),
|
||||
backend_type: z.enum([BackendType.LLAMA_CPP]).optional(),
|
||||
backend_options: BackendOptionsSchema.optional(),
|
||||
})
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ import type { CreateInstanceOptions } from '@/schemas/instanceOptions'
|
||||
export { type CreateInstanceOptions } from '@/schemas/instanceOptions'
|
||||
|
||||
export const BackendType = {
|
||||
LLAMA_SERVER: 'llama_server'
|
||||
LLAMA_CPP: 'llama_cpp'
|
||||
} as const
|
||||
|
||||
export type BackendTypeValue = typeof BackendType[keyof typeof BackendType]
|
||||
|
||||
Reference in New Issue
Block a user