Make conv2d work again

This commit is contained in:
2024-03-10 19:13:22 +01:00
parent 6bbc036f62
commit f3112311da
6 changed files with 146 additions and 98 deletions

4
.gitignore vendored
View File

@@ -33,4 +33,6 @@
build/ build/
.vscode/ .vscode/
.cache .cache
venv

View File

@@ -10,4 +10,9 @@ __global__ void pad_matrix_kernel(
int p int p
); );
enum Padding {
SAME,
VALID
};
#endif // PADDING_H #endif // PADDING_H

View File

@@ -5,19 +5,20 @@
#include <vector> #include <vector>
#include "activations.cuh" #include "activations.cuh"
#include "padding.cuh"
namespace Layers { namespace Layers {
class Conv2d { class Conv2d {
public: public:
Conv2d( Conv2d(
int inputSize, int inputSize,
int inputChannels, int inputChannels,
int kernelSize, int kernelSize,
int stride, int stride,
std::string padding, Padding padding,
int numFilters, int numFilters,
Activation activation Activation activation
); );
~Conv2d(); ~Conv2d();

View File

@@ -1,4 +1,5 @@
#include "convolution.cuh" #include "convolution.cuh"
#include <iostream>
__global__ void convolution_kernel( __global__ void convolution_kernel(
const float* d_input, const float* d_input,
@@ -19,35 +20,26 @@ __global__ void convolution_kernel(
// Get output index // Get output index
int f = tid / (outputSize * outputSize); int f = tid / (outputSize * outputSize);
int i = (tid % (outputSize * outputSize)) / outputSize; int i = tid % (outputSize * outputSize) / outputSize;
int j = (tid % (outputSize * outputSize)) % outputSize; int j = tid % outputSize;
float sum = 0.0f; float sum = 0.0f;
// std::cout << "f: " << f << ", i: " << i << ", j: " << j << std::endl;
// Iterate over kernel and input matrix // Iterate over kernel and input matrix
for (int k = 0; k < kernelSize; k++) { for (int k = 0; k < kernelSize; k++) {
for (int l = 0; l < kernelSize; l++) { for (int l = 0; l < kernelSize; l++) {
for (int c = 0; c < nChannels; c++) { for (int c = 0; c < nChannels; c++) {
int kernelIndex = int kernelIndex = f * kernelSize * kernelSize * nChannels +
k * (kernelSize * nChannels * nFilters) + c * kernelSize * kernelSize + k * kernelSize +
l * (nChannels * nFilters) + c * (nFilters) + f; l;
int inputIndex = int inputIndex = c * inputSize * inputSize +
(i * stride + k) * (inputSize * nChannels) + (i * stride + k) * inputSize +
(j * stride + l) * (nChannels) + c; (j * stride + l);
// std::cout << "kernelIndex: " << kernelIndex << ", kernel
// value: " << kernels[kernelIndex] << ", inputIndex: " <<
// inputIndex << ", input value: " << input[inputIndex] <<
// std::endl;
sum += d_kernel[kernelIndex] * d_input[inputIndex]; sum += d_kernel[kernelIndex] * d_input[inputIndex];
} }
} }
} }
// std::cout << "sum: " << sum << std::endl; d_output[tid] = sum;
d_output[i * (outputSize * nFilters) + j * (nFilters) + f] = sum;
} }

View File

@@ -1,5 +1,5 @@
#include <string>
#include <iostream> #include <iostream>
#include <string>
#include "activations.cuh" #include "activations.cuh"
#include "conv2d.cuh" #include "conv2d.cuh"
@@ -13,7 +13,7 @@ Layers::Conv2d::Conv2d(
int inputChannels, int inputChannels,
int kernelSize, int kernelSize,
int stride, int stride,
std::string padding, Padding padding,
int numFilters, int numFilters,
Activation activation Activation activation
) )
@@ -25,34 +25,43 @@ Layers::Conv2d::Conv2d(
activation(activation) { activation(activation) {
// Allocate memory for kernels // Allocate memory for kernels
if (padding == "SAME") { switch (padding)
{
case SAME:
outputSize = inputSize; outputSize = inputSize;
paddingSize = ((stride - 1) * inputSize - stride + kernelSize) / 2; paddingSize = ((stride - 1) * inputSize - stride + kernelSize) / 2;
} else if (padding == "VALID") { break;
case VALID:
paddingSize = 0; paddingSize = 0;
outputSize = (inputSize - kernelSize) / stride + 1; outputSize = (inputSize - kernelSize) / stride + 1;
break;
default:
break;
} }
kernels.resize(kernelSize * kernelSize * inputChannels * numFilters); kernels.resize(kernelSize * kernelSize * inputChannels * numFilters);
initializeKernels(); initializeKernels();
d_kernels = nullptr; d_kernels = nullptr;
CUDA_CHECK( CUDA_CHECK(cudaMalloc(
cudaMalloc((void**)&d_kernels, sizeof(float) * kernelSize * kernelSize * inputChannels * numFilters) (void**)&d_kernels,
); sizeof(float) * kernelSize * kernelSize * inputChannels * numFilters
));
biases.resize(outputSize * outputSize * numFilters); biases.resize(outputSize * outputSize * numFilters);
initializeBiases(); initializeBiases();
d_biases = nullptr; d_biases = nullptr;
CUDA_CHECK( CUDA_CHECK(cudaMalloc(
cudaMalloc((void**)&d_biases, sizeof(float) * outputSize * outputSize * numFilters) (void**)&d_biases, sizeof(float) * outputSize * outputSize * numFilters
); ));
d_padded = nullptr; d_padded = nullptr;
CUDA_CHECK(cudaMalloc( CUDA_CHECK(cudaMalloc(
(void**)&d_padded, sizeof(float) * (inputSize + 2 * paddingSize) * (void**)&d_padded, sizeof(float) * (inputSize + 2 * paddingSize) *
(inputSize + 2 * paddingSize) * inputChannels (inputSize + 2 * paddingSize) * inputChannels
)); ));
toCuda(); toCuda();
@@ -79,19 +88,22 @@ void Layers::Conv2d::setKernels(const std::vector<float>& kernels_input) {
void Layers::Conv2d::toCuda() { void Layers::Conv2d::toCuda() {
CUDA_CHECK(cudaMemcpy( CUDA_CHECK(cudaMemcpy(
d_kernels, kernels.data(), sizeof(float) * kernelSize * kernelSize * numFilters, d_kernels, kernels.data(),
sizeof(float) * kernelSize * kernelSize * inputChannels * numFilters,
cudaMemcpyHostToDevice cudaMemcpyHostToDevice
)); ));
CUDA_CHECK(cudaMemcpy( CUDA_CHECK(cudaMemcpy(
d_biases, biases.data(), sizeof(float) * outputSize * outputSize * numFilters, d_biases, biases.data(),
sizeof(float) * outputSize * outputSize * numFilters,
cudaMemcpyHostToDevice cudaMemcpyHostToDevice
)); ));
} }
void Layers::Conv2d::forward(const float* d_input, float* d_output) { void Layers::Conv2d::forward(const float* d_input, float* d_output) {
// Pad input // Pad input
int THREADS_PER_BLOCK = (inputSize + 2 * paddingSize) * (inputSize + 2 * paddingSize) * inputChannels; int THREADS_PER_BLOCK = (inputSize + 2 * paddingSize) *
(inputSize + 2 * paddingSize) * inputChannels;
pad_matrix_kernel<<<1, THREADS_PER_BLOCK>>>( pad_matrix_kernel<<<1, THREADS_PER_BLOCK>>>(
d_input, d_padded, inputSize, inputSize, inputChannels, paddingSize d_input, d_padded, inputSize, inputSize, inputChannels, paddingSize
@@ -100,11 +112,14 @@ void Layers::Conv2d::forward(const float* d_input, float* d_output) {
// Convolve // Convolve
THREADS_PER_BLOCK = outputSize * outputSize * numFilters; THREADS_PER_BLOCK = outputSize * outputSize * numFilters;
convolution_kernel<<<1, THREADS_PER_BLOCK>>>( convolution_kernel<<<1, THREADS_PER_BLOCK>>>(
d_padded, d_kernels, d_output, inputSize + (2 * paddingSize), inputChannels, kernelSize, stride, numFilters, outputSize d_padded, d_kernels, d_output, inputSize + (2 * paddingSize),
inputChannels, kernelSize, stride, numFilters, outputSize
); );
// Add bias // Add bias
vec_vec_add_kernel<<<1, biases.size()>>>(d_biases, d_output, d_output, biases.size()); vec_vec_add_kernel<<<1, biases.size()>>>(
d_biases, d_output, d_output, biases.size()
);
CUDA_CHECK(cudaDeviceSynchronize()); CUDA_CHECK(cudaDeviceSynchronize());
} }
@@ -119,27 +134,35 @@ outputSize x numFilters
*/ */
void Layers::Conv2d::host_conv(const float* input, float* output) { void Layers::Conv2d::host_conv(const float* input, float* output) {
// Iterate over output matrix // Iterate over output matrix
for (int f = 0; f < numFilters; f++) { for (int tid = 0; tid < outputSize * outputSize * numFilters; tid++)
for (int i = 0; i < outputSize; i++) { {
for (int j = 0; j < outputSize; j++) { // Get output index
int f = tid / (outputSize * outputSize);
float sum = 0.0f; int i = tid % (outputSize * outputSize) / outputSize;
int j = tid % outputSize;
// Iterate over kernel and input matrix float sum = 0.0f;
for (int k = 0; k < kernelSize; k++) {
for (int l = 0; l < kernelSize; l++) {
for (int c = 0; c < inputChannels; c++) {
int kernelIndex = k * (kernelSize * inputChannels * numFilters) + l * (inputChannels * numFilters) + c * (numFilters) + f;
int inputIndex = (i * stride + k) * (inputSize * inputChannels) + (j * stride + l) * (inputChannels) + c;
sum += kernels[kernelIndex] * input[inputIndex]; // Iterate over kernel and input matrix
} for (int k = 0; k < kernelSize; k++) {
} for (int l = 0; l < kernelSize; l++) {
for (int c = 0; c < inputChannels; c++) {
int kernelIndex =
f * kernelSize * kernelSize * inputChannels +
c * kernelSize * kernelSize + k * kernelSize +
l;
int inputIndex = c * inputSize * inputSize +
(i * stride + k) * inputSize +
(j * stride + l);
sum += kernels[kernelIndex] * input[inputIndex];
} }
output[i * (outputSize * numFilters) + j * (numFilters) + f] = sum;
} }
} }
int outputIndex =
f * outputSize * outputSize + i * outputSize + j;
output[outputIndex] = sum;
} }
} }

View File

@@ -12,7 +12,7 @@ class Conv2dTest : public ::testing::Test {
int inputChannels, int inputChannels,
int kernelSize, int kernelSize,
int stride, int stride,
std::string padding, Padding padding,
int numFilters, int numFilters,
Activation activation, Activation activation,
std::vector<float>& input, std::vector<float>& input,
@@ -30,12 +30,14 @@ class Conv2dTest : public ::testing::Test {
// Allocate device memory // Allocate device memory
cudaStatus = cudaMalloc( cudaStatus = cudaMalloc(
(void**)&d_input, sizeof(float) * inputSize * inputSize * inputChannels (void**)&d_input,
sizeof(float) * inputSize * inputSize * inputChannels
); );
EXPECT_EQ(cudaStatus, cudaSuccess); EXPECT_EQ(cudaStatus, cudaSuccess);
cudaStatus = cudaMalloc( cudaStatus = cudaMalloc(
(void**)&d_output, sizeof(float) * conv2d.outputSize * conv2d.outputSize * numFilters (void**)&d_output,
sizeof(float) * conv2d.outputSize * conv2d.outputSize * numFilters
); );
EXPECT_EQ(cudaStatus, cudaSuccess); EXPECT_EQ(cudaStatus, cudaSuccess);
@@ -46,7 +48,6 @@ class Conv2dTest : public ::testing::Test {
); );
EXPECT_EQ(cudaStatus, cudaSuccess); EXPECT_EQ(cudaStatus, cudaSuccess);
return conv2d; return conv2d;
} }
@@ -60,13 +61,13 @@ class Conv2dTest : public ::testing::Test {
}; };
TEST_F(Conv2dTest, SimpleTest) { TEST_F(Conv2dTest, SimpleTest) {
int inputSize = 4; int inputSize = 4;
int inputChannels = 1; int inputChannels = 1;
int kernelSize = 2; int kernelSize = 2;
int stride = 1; int stride = 1;
std::string padding = "VALID"; Padding padding = VALID;
int numFilters = 1; int numFilters = 1;
Activation activation = LINEAR; Activation activation = LINEAR;
std::vector<float> input = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, std::vector<float> input = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f,
7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f,
@@ -109,14 +110,15 @@ TEST_F(Conv2dTest, SimpleTest) {
} }
TEST_F(Conv2dTest, ComplexTest) { TEST_F(Conv2dTest, ComplexTest) {
int inputSize = 5; int inputSize = 5;
int inputChannels = 3; int inputChannels = 3;
int kernelSize = 3; int kernelSize = 3;
int stride = 1; int stride = 1;
std::string padding = "SAME"; Padding padding = SAME;
int numFilters = 2; int numFilters = 2;
Activation activation = LINEAR; Activation activation = LINEAR;
// clang-format off
std::vector<float> input = { std::vector<float> input = {
// Channel 1 // Channel 1
0.823f, 0.217f, 0.435f, 0.981f, 0.742f, 0.823f, 0.217f, 0.435f, 0.981f, 0.742f,
@@ -139,33 +141,32 @@ TEST_F(Conv2dTest, ComplexTest) {
}; };
std::vector<float> kernels = { std::vector<float> kernels = {
// Filter 1 Channel 1 // Filter 1, Channel 1
0.128f, 0.754f, 0.987f, 0.128f, 0.754f, 0.987f,
0.321f, 0.412f, 0.635f, 0.321f, 0.412f, 0.635f,
0.298f, 0.017f, 0.845f, 0.298f, 0.017f, 0.845f,
// Filter 1 Channel 2 // Filter 1, Channel 2
0.514f, 0.729f, 0.952f, 0.514f, 0.729f, 0.952f,
0.684f, 0.378f, 0.159f, 0.684f, 0.378f, 0.159f,
0.823f, 0.547f, 0.216f, 0.823f, 0.547f, 0.216f,
// Filter 1 Channel 3 // Filter 1, Channel 3
0.456f, 0.123f, 0.789f, 0.983f, 0.231f, 0.456f,
0.123f, 0.345f, 0.123f, 0.178f, 0.654f, 0.821f,
0.789f, 0.123f, 0.345f, 0.345f, 0.987f, 0.123f,
// Filter 2 Channel 1 // Filter 2, Channel 1
0.123f, 0.345f, 0.123f, 0.789f, 0.543f, 0.210f,
0.789f, 0.123f, 0.345f, 0.012f, 0.371f, 0.638f,
0.123f, 0.345f, 0.123f, 0.456f, 0.198f, 0.907f,
// Filter 2 Channel 2 // Filter 2, Channel 2
0.146f, 0.789f, 0.123f, 0.101f, 0.432f, 0.759f,
0.345f, 0.123f, 0.789f, 0.234f, 0.567f, 0.890f,
0.123f, 0.345f, 0.123f, 0.543f, 0.876f, 0.219f,
// Filter 2 Channel 3 // Filter 2, Channel 3
0.123f, 0.345f, 0.123f, 0.345f, 0.678f, 0.011f,
0.789f, 0.123f, 0.345f, 0.678f, 0.011f, 0.345f,
0.123f, 0.345f, 0.123f 0.011f, 0.345f, 0.678f
}; };
// clang-format on
float* d_input; float* d_input;
float* d_output; float* d_output;
@@ -178,4 +179,28 @@ TEST_F(Conv2dTest, ComplexTest) {
EXPECT_EQ(inputSize, conv2d.outputSize); EXPECT_EQ(inputSize, conv2d.outputSize);
conv2d.forward(d_input, d_output); conv2d.forward(d_input, d_output);
std::vector<float> output(
conv2d.outputSize * conv2d.outputSize * numFilters
);
cudaMemcpy(
output.data(), d_output,
sizeof(float) * conv2d.outputSize * conv2d.outputSize * numFilters,
cudaMemcpyDeviceToHost
);
// Generated by tools/generate_conv2d_test.py
std::vector<float> expected = {
2.29426f, 3.89173f, 4.17634f, 3.25501f, 2.07618f, 5.41483f, 7.09971f,
6.39811f, 5.71432f, 3.10928f, 5.12973f, 6.29638f, 5.26962f, 5.21997f,
3.05852f, 6.17517f, 7.19311f, 6.69771f, 6.2142f, 4.03242f, 3.3792f,
4.36444f, 4.396f, 4.69905f, 3.62061f, 2.87914f, 3.71743f, 3.51854f,
2.98413f, 1.46579f, 4.94951f, 6.18983f, 4.98187f, 4.38372f, 3.35386f,
5.0364f, 5.3756f, 4.05993f, 4.89299f, 2.78625f, 5.33763f, 5.80899f,
5.89785f, 5.51095f, 3.74287f, 2.64053f, 4.05895f, 3.96482f, 4.30177f,
1.94269f
};
for (int i = 0; i < output.size(); i++) {
EXPECT_NEAR(output[i], expected[i], 0.0001f);
}
} }