Implement backend configuration options and refactor related components

This commit is contained in:
2025-09-02 21:12:14 +02:00
parent d9542ba117
commit 4f6bb6292e
11 changed files with 358 additions and 62 deletions

View File

@@ -3,6 +3,7 @@ import { render, screen } from '@testing-library/react'
import userEvent from '@testing-library/user-event'
import InstanceCard from '@/components/InstanceCard'
import type { Instance } from '@/types/instance'
import { BackendType } from '@/types/instance'
// Mock the health hook since we're not testing health logic here
vi.mock('@/hooks/useInstanceHealth', () => ({
@@ -18,13 +19,13 @@ describe('InstanceCard - Instance Actions and State', () => {
const stoppedInstance: Instance = {
name: 'test-instance',
status: 'stopped',
options: { model: 'test-model.gguf' }
options: { backend_type: BackendType.LLAMA_SERVER, backend_options: { model: 'test-model.gguf' } }
}
const runningInstance: Instance = {
name: 'running-instance',
status: 'running',
options: { model: 'running-model.gguf' }
options: { backend_type: BackendType.LLAMA_SERVER, backend_options: { model: 'running-model.gguf' } }
}
beforeEach(() => {

View File

@@ -5,6 +5,7 @@ import InstanceList from '@/components/InstanceList'
import { InstancesProvider } from '@/contexts/InstancesContext'
import { instancesApi } from '@/lib/api'
import type { Instance } from '@/types/instance'
import { BackendType } from '@/types/instance'
import { AuthProvider } from '@/contexts/AuthContext'
// Mock the API
@@ -44,9 +45,9 @@ describe('InstanceList - State Management and UI Logic', () => {
const mockEditInstance = vi.fn()
const mockInstances: Instance[] = [
{ name: 'instance-1', status: 'stopped', options: { model: 'model1.gguf' } },
{ name: 'instance-2', status: 'running', options: { model: 'model2.gguf' } },
{ name: 'instance-3', status: 'stopped', options: { model: 'model3.gguf' } }
{ name: 'instance-1', status: 'stopped', options: { backend_type: BackendType.LLAMA_SERVER, backend_options: { model: 'model1.gguf' } } },
{ name: 'instance-2', status: 'running', options: { backend_type: BackendType.LLAMA_SERVER, backend_options: { model: 'model2.gguf' } } },
{ name: 'instance-3', status: 'stopped', options: { backend_type: BackendType.LLAMA_SERVER, backend_options: { model: 'model3.gguf' } } }
]
const DUMMY_API_KEY = 'test-api-key-123'

View File

@@ -3,6 +3,7 @@ import { render, screen, waitFor } from '@testing-library/react'
import userEvent from '@testing-library/user-event'
import InstanceDialog from '@/components/InstanceDialog'
import type { Instance } from '@/types/instance'
import { BackendType } from '@/types/instance'
describe('InstanceModal - Form Logic and Validation', () => {
const mockOnSave = vi.fn()
@@ -91,6 +92,7 @@ afterEach(() => {
expect(mockOnSave).toHaveBeenCalledWith('my-instance', {
auto_restart: true, // Default value
backend_type: BackendType.LLAMA_SERVER
})
})
@@ -136,8 +138,8 @@ afterEach(() => {
name: 'existing-instance',
status: 'stopped',
options: {
model: 'test-model.gguf',
gpu_layers: 10,
backend_type: BackendType.LLAMA_SERVER,
backend_options: { model: 'test-model.gguf', gpu_layers: 10 },
auto_restart: false
}
}
@@ -177,8 +179,8 @@ afterEach(() => {
await user.click(screen.getByTestId('dialog-save-button'))
expect(mockOnSave).toHaveBeenCalledWith('existing-instance', {
model: 'test-model.gguf',
gpu_layers: 10,
backend_type: BackendType.LLAMA_SERVER,
backend_options: { model: 'test-model.gguf', gpu_layers: 10 },
auto_restart: false
})
})
@@ -271,6 +273,7 @@ afterEach(() => {
expect(mockOnSave).toHaveBeenCalledWith('test-instance', {
auto_restart: true,
backend_type: BackendType.LLAMA_SERVER,
max_restarts: 5,
restart_delay: 10
})
@@ -321,6 +324,7 @@ afterEach(() => {
// Should only include non-empty values
expect(mockOnSave).toHaveBeenCalledWith('clean-instance', {
auto_restart: true, // Only this default value should be included
backend_type: BackendType.LLAMA_SERVER
})
})
@@ -345,7 +349,8 @@ afterEach(() => {
expect(mockOnSave).toHaveBeenCalledWith('numeric-test', {
auto_restart: true,
gpu_layers: 15, // Should be number, not string
backend_type: BackendType.LLAMA_SERVER,
backend_options: { gpu_layers: 15 }, // Should be number, not string
})
})
})