Make conv2d work again

This commit is contained in:
2024-03-10 19:13:22 +01:00
parent 6bbc036f62
commit f3112311da
6 changed files with 146 additions and 98 deletions

4
.gitignore vendored
View File

@@ -33,4 +33,6 @@
build/
.vscode/
.cache
.cache
venv

View File

@@ -10,4 +10,9 @@ __global__ void pad_matrix_kernel(
int p
);
enum Padding {
SAME,
VALID
};
#endif // PADDING_H

View File

@@ -5,19 +5,20 @@
#include <vector>
#include "activations.cuh"
#include "padding.cuh"
namespace Layers {
class Conv2d {
public:
Conv2d(
int inputSize,
int inputChannels,
int kernelSize,
int stride,
std::string padding,
int numFilters,
Activation activation
int inputSize,
int inputChannels,
int kernelSize,
int stride,
Padding padding,
int numFilters,
Activation activation
);
~Conv2d();

View File

@@ -1,4 +1,5 @@
#include "convolution.cuh"
#include <iostream>
__global__ void convolution_kernel(
const float* d_input,
@@ -19,35 +20,26 @@ __global__ void convolution_kernel(
// Get output index
int f = tid / (outputSize * outputSize);
int i = (tid % (outputSize * outputSize)) / outputSize;
int j = (tid % (outputSize * outputSize)) % outputSize;
int i = tid % (outputSize * outputSize) / outputSize;
int j = tid % outputSize;
float sum = 0.0f;
// std::cout << "f: " << f << ", i: " << i << ", j: " << j << std::endl;
// Iterate over kernel and input matrix
for (int k = 0; k < kernelSize; k++) {
for (int l = 0; l < kernelSize; l++) {
for (int c = 0; c < nChannels; c++) {
int kernelIndex =
k * (kernelSize * nChannels * nFilters) +
l * (nChannels * nFilters) + c * (nFilters) + f;
int inputIndex =
(i * stride + k) * (inputSize * nChannels) +
(j * stride + l) * (nChannels) + c;
// std::cout << "kernelIndex: " << kernelIndex << ", kernel
// value: " << kernels[kernelIndex] << ", inputIndex: " <<
// inputIndex << ", input value: " << input[inputIndex] <<
// std::endl;
int kernelIndex = f * kernelSize * kernelSize * nChannels +
c * kernelSize * kernelSize + k * kernelSize +
l;
int inputIndex = c * inputSize * inputSize +
(i * stride + k) * inputSize +
(j * stride + l);
sum += d_kernel[kernelIndex] * d_input[inputIndex];
}
}
}
// std::cout << "sum: " << sum << std::endl;
d_output[i * (outputSize * nFilters) + j * (nFilters) + f] = sum;
d_output[tid] = sum;
}

View File

@@ -1,5 +1,5 @@
#include <string>
#include <iostream>
#include <string>
#include "activations.cuh"
#include "conv2d.cuh"
@@ -13,7 +13,7 @@ Layers::Conv2d::Conv2d(
int inputChannels,
int kernelSize,
int stride,
std::string padding,
Padding padding,
int numFilters,
Activation activation
)
@@ -25,34 +25,43 @@ Layers::Conv2d::Conv2d(
activation(activation) {
// Allocate memory for kernels
if (padding == "SAME") {
switch (padding)
{
case SAME:
outputSize = inputSize;
paddingSize = ((stride - 1) * inputSize - stride + kernelSize) / 2;
} else if (padding == "VALID") {
break;
case VALID:
paddingSize = 0;
outputSize = (inputSize - kernelSize) / stride + 1;
break;
default:
break;
}
kernels.resize(kernelSize * kernelSize * inputChannels * numFilters);
initializeKernels();
initializeKernels();
d_kernels = nullptr;
CUDA_CHECK(
cudaMalloc((void**)&d_kernels, sizeof(float) * kernelSize * kernelSize * inputChannels * numFilters)
);
CUDA_CHECK(cudaMalloc(
(void**)&d_kernels,
sizeof(float) * kernelSize * kernelSize * inputChannels * numFilters
));
biases.resize(outputSize * outputSize * numFilters);
initializeBiases();
d_biases = nullptr;
CUDA_CHECK(
cudaMalloc((void**)&d_biases, sizeof(float) * outputSize * outputSize * numFilters)
);
CUDA_CHECK(cudaMalloc(
(void**)&d_biases, sizeof(float) * outputSize * outputSize * numFilters
));
d_padded = nullptr;
CUDA_CHECK(cudaMalloc(
(void**)&d_padded, sizeof(float) * (inputSize + 2 * paddingSize) *
(inputSize + 2 * paddingSize) * inputChannels
(inputSize + 2 * paddingSize) * inputChannels
));
toCuda();
@@ -79,19 +88,22 @@ void Layers::Conv2d::setKernels(const std::vector<float>& kernels_input) {
void Layers::Conv2d::toCuda() {
CUDA_CHECK(cudaMemcpy(
d_kernels, kernels.data(), sizeof(float) * kernelSize * kernelSize * numFilters,
d_kernels, kernels.data(),
sizeof(float) * kernelSize * kernelSize * inputChannels * numFilters,
cudaMemcpyHostToDevice
));
CUDA_CHECK(cudaMemcpy(
d_biases, biases.data(), sizeof(float) * outputSize * outputSize * numFilters,
d_biases, biases.data(),
sizeof(float) * outputSize * outputSize * numFilters,
cudaMemcpyHostToDevice
));
}
void Layers::Conv2d::forward(const float* d_input, float* d_output) {
// Pad input
int THREADS_PER_BLOCK = (inputSize + 2 * paddingSize) * (inputSize + 2 * paddingSize) * inputChannels;
int THREADS_PER_BLOCK = (inputSize + 2 * paddingSize) *
(inputSize + 2 * paddingSize) * inputChannels;
pad_matrix_kernel<<<1, THREADS_PER_BLOCK>>>(
d_input, d_padded, inputSize, inputSize, inputChannels, paddingSize
@@ -100,11 +112,14 @@ void Layers::Conv2d::forward(const float* d_input, float* d_output) {
// Convolve
THREADS_PER_BLOCK = outputSize * outputSize * numFilters;
convolution_kernel<<<1, THREADS_PER_BLOCK>>>(
d_padded, d_kernels, d_output, inputSize + (2 * paddingSize), inputChannels, kernelSize, stride, numFilters, outputSize
d_padded, d_kernels, d_output, inputSize + (2 * paddingSize),
inputChannels, kernelSize, stride, numFilters, outputSize
);
// Add bias
vec_vec_add_kernel<<<1, biases.size()>>>(d_biases, d_output, d_output, biases.size());
vec_vec_add_kernel<<<1, biases.size()>>>(
d_biases, d_output, d_output, biases.size()
);
CUDA_CHECK(cudaDeviceSynchronize());
}
@@ -119,27 +134,35 @@ outputSize x numFilters
*/
void Layers::Conv2d::host_conv(const float* input, float* output) {
// Iterate over output matrix
for (int f = 0; f < numFilters; f++) {
for (int i = 0; i < outputSize; i++) {
for (int j = 0; j < outputSize; j++) {
float sum = 0.0f;
for (int tid = 0; tid < outputSize * outputSize * numFilters; tid++)
{
// Get output index
int f = tid / (outputSize * outputSize);
int i = tid % (outputSize * outputSize) / outputSize;
int j = tid % outputSize;
// Iterate over kernel and input matrix
for (int k = 0; k < kernelSize; k++) {
for (int l = 0; l < kernelSize; l++) {
for (int c = 0; c < inputChannels; c++) {
int kernelIndex = k * (kernelSize * inputChannels * numFilters) + l * (inputChannels * numFilters) + c * (numFilters) + f;
int inputIndex = (i * stride + k) * (inputSize * inputChannels) + (j * stride + l) * (inputChannels) + c;
float sum = 0.0f;
sum += kernels[kernelIndex] * input[inputIndex];
}
}
// Iterate over kernel and input matrix
for (int k = 0; k < kernelSize; k++) {
for (int l = 0; l < kernelSize; l++) {
for (int c = 0; c < inputChannels; c++) {
int kernelIndex =
f * kernelSize * kernelSize * inputChannels +
c * kernelSize * kernelSize + k * kernelSize +
l;
int inputIndex = c * inputSize * inputSize +
(i * stride + k) * inputSize +
(j * stride + l);
sum += kernels[kernelIndex] * input[inputIndex];
}
output[i * (outputSize * numFilters) + j * (numFilters) + f] = sum;
}
}
int outputIndex =
f * outputSize * outputSize + i * outputSize + j;
output[outputIndex] = sum;
}
}

View File

@@ -12,7 +12,7 @@ class Conv2dTest : public ::testing::Test {
int inputChannels,
int kernelSize,
int stride,
std::string padding,
Padding padding,
int numFilters,
Activation activation,
std::vector<float>& input,
@@ -30,12 +30,14 @@ class Conv2dTest : public ::testing::Test {
// Allocate device memory
cudaStatus = cudaMalloc(
(void**)&d_input, sizeof(float) * inputSize * inputSize * inputChannels
(void**)&d_input,
sizeof(float) * inputSize * inputSize * inputChannels
);
EXPECT_EQ(cudaStatus, cudaSuccess);
cudaStatus = cudaMalloc(
(void**)&d_output, sizeof(float) * conv2d.outputSize * conv2d.outputSize * numFilters
(void**)&d_output,
sizeof(float) * conv2d.outputSize * conv2d.outputSize * numFilters
);
EXPECT_EQ(cudaStatus, cudaSuccess);
@@ -46,7 +48,6 @@ class Conv2dTest : public ::testing::Test {
);
EXPECT_EQ(cudaStatus, cudaSuccess);
return conv2d;
}
@@ -60,13 +61,13 @@ class Conv2dTest : public ::testing::Test {
};
TEST_F(Conv2dTest, SimpleTest) {
int inputSize = 4;
int inputChannels = 1;
int kernelSize = 2;
int stride = 1;
std::string padding = "VALID";
int numFilters = 1;
Activation activation = LINEAR;
int inputSize = 4;
int inputChannels = 1;
int kernelSize = 2;
int stride = 1;
Padding padding = VALID;
int numFilters = 1;
Activation activation = LINEAR;
std::vector<float> input = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f,
7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f,
@@ -109,14 +110,15 @@ TEST_F(Conv2dTest, SimpleTest) {
}
TEST_F(Conv2dTest, ComplexTest) {
int inputSize = 5;
int inputChannels = 3;
int kernelSize = 3;
int stride = 1;
std::string padding = "SAME";
int numFilters = 2;
Activation activation = LINEAR;
int inputSize = 5;
int inputChannels = 3;
int kernelSize = 3;
int stride = 1;
Padding padding = SAME;
int numFilters = 2;
Activation activation = LINEAR;
// clang-format off
std::vector<float> input = {
// Channel 1
0.823f, 0.217f, 0.435f, 0.981f, 0.742f,
@@ -139,33 +141,32 @@ TEST_F(Conv2dTest, ComplexTest) {
};
std::vector<float> kernels = {
// Filter 1 Channel 1
// Filter 1, Channel 1
0.128f, 0.754f, 0.987f,
0.321f, 0.412f, 0.635f,
0.298f, 0.017f, 0.845f,
// Filter 1 Channel 2
// Filter 1, Channel 2
0.514f, 0.729f, 0.952f,
0.684f, 0.378f, 0.159f,
0.823f, 0.547f, 0.216f,
// Filter 1 Channel 3
0.456f, 0.123f, 0.789f,
0.123f, 0.345f, 0.123f,
0.789f, 0.123f, 0.345f,
// Filter 2 Channel 1
0.123f, 0.345f, 0.123f,
0.789f, 0.123f, 0.345f,
0.123f, 0.345f, 0.123f,
// Filter 2 Channel 2
0.146f, 0.789f, 0.123f,
0.345f, 0.123f, 0.789f,
0.123f, 0.345f, 0.123f,
// Filter 2 Channel 3
0.123f, 0.345f, 0.123f,
0.789f, 0.123f, 0.345f,
0.123f, 0.345f, 0.123f
// Filter 1, Channel 3
0.983f, 0.231f, 0.456f,
0.178f, 0.654f, 0.821f,
0.345f, 0.987f, 0.123f,
// Filter 2, Channel 1
0.789f, 0.543f, 0.210f,
0.012f, 0.371f, 0.638f,
0.456f, 0.198f, 0.907f,
// Filter 2, Channel 2
0.101f, 0.432f, 0.759f,
0.234f, 0.567f, 0.890f,
0.543f, 0.876f, 0.219f,
// Filter 2, Channel 3
0.345f, 0.678f, 0.011f,
0.678f, 0.011f, 0.345f,
0.011f, 0.345f, 0.678f
};
// clang-format on
float* d_input;
float* d_output;
@@ -178,4 +179,28 @@ TEST_F(Conv2dTest, ComplexTest) {
EXPECT_EQ(inputSize, conv2d.outputSize);
conv2d.forward(d_input, d_output);
std::vector<float> output(
conv2d.outputSize * conv2d.outputSize * numFilters
);
cudaMemcpy(
output.data(), d_output,
sizeof(float) * conv2d.outputSize * conv2d.outputSize * numFilters,
cudaMemcpyDeviceToHost
);
// Generated by tools/generate_conv2d_test.py
std::vector<float> expected = {
2.29426f, 3.89173f, 4.17634f, 3.25501f, 2.07618f, 5.41483f, 7.09971f,
6.39811f, 5.71432f, 3.10928f, 5.12973f, 6.29638f, 5.26962f, 5.21997f,
3.05852f, 6.17517f, 7.19311f, 6.69771f, 6.2142f, 4.03242f, 3.3792f,
4.36444f, 4.396f, 4.69905f, 3.62061f, 2.87914f, 3.71743f, 3.51854f,
2.98413f, 1.46579f, 4.94951f, 6.18983f, 4.98187f, 4.38372f, 3.35386f,
5.0364f, 5.3756f, 4.05993f, 4.89299f, 2.78625f, 5.33763f, 5.80899f,
5.89785f, 5.51095f, 3.74287f, 2.64053f, 4.05895f, 3.96482f, 4.30177f,
1.94269f
};
for (int i = 0; i < output.size(); i++) {
EXPECT_NEAR(output[i], expected[i], 0.0001f);
}
}