Use 3d memory layout for convolution

This commit is contained in:
2024-03-20 19:15:27 +01:00
parent ef63cbd9f1
commit 5860faf85e
2 changed files with 13 additions and 10 deletions

View File

@@ -16,17 +16,14 @@ __global__ void Kernels::convolution(
const int nFilters, const int nFilters,
const int outputSize const int outputSize
) { ) {
int tid = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.x * blockIdx.x + threadIdx.x;
int i = blockDim.y * blockIdx.y + threadIdx.y;
int f = blockDim.z * blockIdx.z + threadIdx.z;
if (tid >= outputSize * outputSize * nFilters) { if (i >= outputSize || j >= outputSize || f >= nFilters) {
return; return;
} }
// Get output index
int f = tid / (outputSize * outputSize);
int i = tid % (outputSize * outputSize) / outputSize;
int j = tid % outputSize;
float sum = 0.0f; float sum = 0.0f;
// Iterate over kernel and input matrix // Iterate over kernel and input matrix
@@ -54,5 +51,5 @@ __global__ void Kernels::convolution(
} }
} }
d_output[tid] = sum; d_output[f * outputSize * outputSize + i * outputSize + j] = sum;
} }

View File

@@ -108,8 +108,14 @@ void Conv2d::toCuda() {
float* Conv2d::forward(const float* d_input) { float* Conv2d::forward(const float* d_input) {
// Convolve // Convolve
int THREADS_PER_BLOCK = outputSize * outputSize * numFilters; dim3 block(8,8,8);
Kernels::convolution<<<1, THREADS_PER_BLOCK>>>( dim3 grid(
(outputSize + block.x - 1) / block.x,
(outputSize + block.y - 1) / block.y,
(numFilters + block.z - 1) / block.z
);
Kernels::convolution<<<grid, block>>>(
d_input, d_weights, d_output, inputSize, inputChannels, paddingSize, d_input, d_weights, d_output, inputSize, inputChannels, paddingSize,
kernelSize, stride, numFilters, outputSize kernelSize, stride, numFilters, outputSize
); );