From b594ade8f996cc3fda779955bb37ecefaee79d20 Mon Sep 17 00:00:00 2001 From: LordMathis Date: Sat, 15 Nov 2025 00:45:02 +0100 Subject: [PATCH] Add mocks for ConfigContext in tests to use default configuration values --- webui/src/__tests__/App.test.tsx | 23 ++++++- .../__tests__/InstanceModal.test.tsx | 63 ++++++++++++++----- 2 files changed, 69 insertions(+), 17 deletions(-) diff --git a/webui/src/__tests__/App.test.tsx b/webui/src/__tests__/App.test.tsx index 7497c31..5746fe5 100644 --- a/webui/src/__tests__/App.test.tsx +++ b/webui/src/__tests__/App.test.tsx @@ -49,6 +49,21 @@ vi.mock('@/lib/healthService', () => ({ })), })) +// Mock the ConfigContext hooks +vi.mock('@/contexts/ConfigContext', () => ({ + useInstanceDefaults: () => ({ + autoRestart: true, + maxRestarts: 3, + restartDelay: 5, + onDemandStart: false, + }), + useBackendSettings: () => ({ + command: '/usr/bin/llama-server', + dockerEnabled: false, + dockerImage: '', + }), +})) + function renderApp() { return render( @@ -119,8 +134,12 @@ describe('App Component - Critical Business Logic Only', () => { // Verify correct API call await waitFor(() => { expect(instancesApi.create).toHaveBeenCalledWith('new-test-instance', { - auto_restart: true, // Default value - backend_type: BackendType.LLAMA_CPP + auto_restart: true, // Default value from config + backend_type: BackendType.LLAMA_CPP, + docker_enabled: false, + max_restarts: 3, + on_demand_start: false, + restart_delay: 5 }) }) diff --git a/webui/src/components/__tests__/InstanceModal.test.tsx b/webui/src/components/__tests__/InstanceModal.test.tsx index a931ae9..3010195 100644 --- a/webui/src/components/__tests__/InstanceModal.test.tsx +++ b/webui/src/components/__tests__/InstanceModal.test.tsx @@ -5,6 +5,21 @@ import InstanceDialog from '@/components/InstanceDialog' import type { Instance } from '@/types/instance' import { BackendType } from '@/types/instance' +// Mock the ConfigContext hooks +vi.mock('@/contexts/ConfigContext', () => ({ + useInstanceDefaults: () => ({ + autoRestart: true, + maxRestarts: 3, + restartDelay: 5, + onDemandStart: false, + }), + useBackendSettings: () => ({ + command: '/usr/bin/llama-server', + dockerEnabled: false, + dockerImage: '', + }), +})) + describe('InstanceModal - Form Logic and Validation', () => { const mockOnSave = vi.fn() const mockOnOpenChange = vi.fn() @@ -75,7 +90,7 @@ afterEach(() => { it('submits form with correct data structure', async () => { const user = userEvent.setup() - + render( { // Fill required name await user.type(screen.getByLabelText(/Instance Name/), 'my-instance') - + // Submit form await user.click(screen.getByTestId('dialog-save-button')) expect(mockOnSave).toHaveBeenCalledWith('my-instance', { - auto_restart: true, // Default value - backend_type: BackendType.LLAMA_CPP + auto_restart: true, // Default value from config + backend_type: BackendType.LLAMA_CPP, + docker_enabled: false, + max_restarts: 3, + on_demand_start: false, + restart_delay: 5 }) }) @@ -253,7 +272,7 @@ afterEach(() => { it('includes restart options in form submission when enabled', async () => { const user = userEvent.setup() - + render( { // Fill form await user.type(screen.getByLabelText(/Instance Name/), 'test-instance') - - // Set restart options - await user.type(screen.getByLabelText(/Max Restarts/), '5') - await user.type(screen.getByLabelText(/Restart Delay/), '10') + + // Clear default values and set new restart options + const maxRestartsInput = screen.getByLabelText(/Max Restarts/) + const restartDelayInput = screen.getByLabelText(/Restart Delay/) + await user.clear(maxRestartsInput) + await user.type(maxRestartsInput, '5') + await user.clear(restartDelayInput) + await user.type(restartDelayInput, '10') await user.click(screen.getByTestId('dialog-save-button')) expect(mockOnSave).toHaveBeenCalledWith('test-instance', { auto_restart: true, backend_type: BackendType.LLAMA_CPP, + docker_enabled: false, max_restarts: 5, + on_demand_start: false, restart_delay: 10 }) }) @@ -284,7 +309,7 @@ afterEach(() => { describe('Form Data Handling', () => { it('cleans up undefined values before submission', async () => { const user = userEvent.setup() - + render( { await user.click(screen.getByTestId('dialog-save-button')) - // Should only include non-empty values + // Should include default values from config expect(mockOnSave).toHaveBeenCalledWith('clean-instance', { - auto_restart: true, // Only this default value should be included - backend_type: BackendType.LLAMA_CPP + auto_restart: true, + backend_type: BackendType.LLAMA_CPP, + docker_enabled: false, + max_restarts: 3, + on_demand_start: false, + restart_delay: 5 }) }) it('handles numeric fields correctly', async () => { const user = userEvent.setup() - + render( { ) await user.type(screen.getByLabelText(/Instance Name/), 'numeric-test') - + // Test GPU layers field (numeric) const gpuLayersInput = screen.getByLabelText(/GPU Layers/) await user.type(gpuLayersInput, '15') @@ -328,6 +357,10 @@ afterEach(() => { auto_restart: true, backend_type: BackendType.LLAMA_CPP, backend_options: { gpu_layers: 15 }, // Should be number, not string + docker_enabled: false, + max_restarts: 3, + on_demand_start: false, + restart_delay: 5 }) }) })