Add support for non square matrices

This commit is contained in:
2024-05-20 15:20:43 +02:00
parent 6f8b5f4081
commit 74098b24e3
21 changed files with 314 additions and 299 deletions

View File

@@ -6,10 +6,10 @@
#include "avg_pooling.cuh"
TEST(AvgPoolingLayerTest, AvgPoolForwardTest) {
int inputSize = 4;
int nChannels = 2;
int poolingSize = 2;
int stride = 2;
dim2d inputSize = {4, 4};
int nChannels = 2;
dim2d poolingSize = {2, 2};
dim2d stride = {2, 2};
cudaError_t cudaStatus;
@@ -36,13 +36,14 @@ TEST(AvgPoolingLayerTest, AvgPoolForwardTest) {
float *d_input;
cudaStatus = cudaMalloc(
(void **)&d_input, sizeof(float) * inputSize * inputSize * nChannels
(void **)&d_input,
sizeof(float) * inputSize.first * inputSize.second * nChannels
);
EXPECT_EQ(cudaStatus, cudaSuccess);
cudaStatus = cudaMemcpy(
d_input, input.data(),
sizeof(float) * inputSize * inputSize * nChannels,
sizeof(float) * inputSize.first * inputSize.second * nChannels,
cudaMemcpyHostToDevice
);
EXPECT_EQ(cudaStatus, cudaSuccess);
@@ -53,13 +54,13 @@ TEST(AvgPoolingLayerTest, AvgPoolForwardTest) {
std::vector<float> output(outputSize);
cudaStatus = cudaMemcpy(
output.data(), d_output,
sizeof(float) * outputSize,
output.data(), d_output, sizeof(float) * outputSize,
cudaMemcpyDeviceToHost
);
EXPECT_EQ(cudaStatus, cudaSuccess);
std::vector<float> expected = {0.43775f, 0.49475f, 0.48975f, 0.339f, 0.45675f, 0.303f, 0.56975f, 0.57025f};
std::vector<float> expected = {0.43775f, 0.49475f, 0.48975f, 0.339f,
0.45675f, 0.303f, 0.56975f, 0.57025f};
for (int i = 0; i < output.size(); ++i) {
EXPECT_NEAR(expected[i], output[i], 1e-4);

View File

@@ -7,8 +7,8 @@
#include "batch_norm.cuh"
TEST(BatchNormLayerTest, BatchNormSmallForwardTest) {
int inputSize = 4;
int nChannels = 2;
dim2d inputSize = {4, 4};
int nChannels = 2;
cudaError_t cudaStatus;
@@ -17,7 +17,7 @@ TEST(BatchNormLayerTest, BatchNormSmallForwardTest) {
);
std::vector<float> weights = {0.63508f, 0.64903f};
std::vector<float> biases = {0.25079f, 0.66841f};
std::vector<float> biases = {0.25079f, 0.66841f};
batchNorm.setWeights(weights.data());
batchNorm.setBiases(biases.data());
@@ -47,27 +47,27 @@ TEST(BatchNormLayerTest, BatchNormSmallForwardTest) {
EXPECT_EQ(cudaStatus, cudaSuccess);
cudaStatus = cudaMemcpy(
d_input, input.data(), sizeof(float) * input.size(), cudaMemcpyHostToDevice
d_input, input.data(), sizeof(float) * input.size(),
cudaMemcpyHostToDevice
);
EXPECT_EQ(cudaStatus, cudaSuccess);
float* d_output = batchNorm.forward(d_input);
cudaStatus = cudaMemcpy(
output.data(), d_output, sizeof(float) * output.size(), cudaMemcpyDeviceToHost
output.data(), d_output, sizeof(float) * output.size(),
cudaMemcpyDeviceToHost
);
EXPECT_EQ(cudaStatus, cudaSuccess);
std::vector<float> expected = {
-0.06007f, 0.951f, 0.18157f, 1.36202f,
0.39244f, 0.47335f, 0.58598f, -1.00188f,
0.59576f, 0.79919f, -0.57001f, 0.70469f,
-0.62847f, -0.06578f, -0.43668f, 0.72952f,
0.37726f, 0.02088f, 0.35446f, 0.98092f,
1.39264f, 1.80686f, 1.67786f, 1.58318f,
-0.0269f, 0.26878f, 0.81411f, 0.09022f,
0.9126f, 0.71485f, -0.08184f, -0.19131f
};
std::vector<float> expected = {-0.06007f, 0.951f, 0.18157f, 1.36202f,
0.39244f, 0.47335f, 0.58598f, -1.00188f,
0.59576f, 0.79919f, -0.57001f, 0.70469f,
-0.62847f, -0.06578f, -0.43668f, 0.72952f,
0.37726f, 0.02088f, 0.35446f, 0.98092f,
1.39264f, 1.80686f, 1.67786f, 1.58318f,
-0.0269f, 0.26878f, 0.81411f, 0.09022f,
0.9126f, 0.71485f, -0.08184f, -0.19131f};
// std::cout << "BatchNorm2D: " << std::endl;
for (int i = 0; i < output.size(); i++) {
@@ -76,5 +76,4 @@ TEST(BatchNormLayerTest, BatchNormSmallForwardTest) {
}
// std::cout << std::endl;
cudaFree(d_input);
}

View File

@@ -8,12 +8,12 @@
class Conv2dTest : public ::testing::Test {
protected:
CUDANet::Layers::Conv2d commonTestSetup(
int inputSize,
dim2d inputSize,
int inputChannels,
int kernelSize,
int stride,
dim2d kernelSize,
dim2d stride,
int numFilters,
int paddingSize,
dim2d paddingSize,
CUDANet::Layers::ActivationType activationType,
std::vector<float>& input,
float* kernels,
@@ -30,7 +30,7 @@ class Conv2dTest : public ::testing::Test {
// Allocate device memory
cudaStatus = cudaMalloc(
(void**)&d_input,
sizeof(float) * inputSize * inputSize * inputChannels
sizeof(float) * inputSize.first * inputSize.second * inputChannels
);
EXPECT_EQ(cudaStatus, cudaSuccess);
@@ -47,19 +47,18 @@ class Conv2dTest : public ::testing::Test {
void commonTestTeardown(float* d_input) {
// Free device memory
cudaFree(d_input);
}
cudaError_t cudaStatus;
};
TEST_F(Conv2dTest, SimpleTest) {
int inputSize = 4;
int inputChannels = 1;
int kernelSize = 2;
int stride = 1;
int numFilters = 1;
int paddingSize = 0;
dim2d inputSize = {4, 4};
int inputChannels = 1;
dim2d kernelSize = {2, 2};
dim2d stride = {1, 1};
int numFilters = 1;
dim2d paddingSize = {0, 0};
CUDANet::Layers::ActivationType activationType =
CUDANet::Layers::ActivationType::NONE;
@@ -82,8 +81,9 @@ TEST_F(Conv2dTest, SimpleTest) {
activationType, input, kernels.data(), d_input
);
int outputWidth = (inputSize - kernelSize) / stride + 1;
int outputSize = outputWidth * outputWidth * numFilters;
int outputHeight = (inputSize.first - kernelSize.first) / stride.first + 1;
int outputWidth = (inputSize.second - kernelSize.second) / stride.second + 1;
int outputSize = outputHeight * outputWidth * numFilters;
EXPECT_EQ(outputSize, conv2d.getOutputSize());
d_output = conv2d.forward(d_input);
@@ -106,12 +106,16 @@ TEST_F(Conv2dTest, SimpleTest) {
}
TEST_F(Conv2dTest, PaddedTest) {
int inputSize = 5;
int inputChannels = 3;
int kernelSize = 3;
int stride = 1;
int numFilters = 2;
int paddingSize = CUDANET_SAME_PADDING(inputSize, kernelSize, stride);
dim2d inputSize = {5, 5};
int inputChannels = 3;
dim2d kernelSize = {3, 3};
dim2d stride = {1, 1};
int numFilters = 2;
int paddingFirst = CUDANET_SAME_PADDING(inputSize.first, kernelSize.first, stride.first);
int paddingSecond = CUDANET_SAME_PADDING(inputSize.second, kernelSize.second, stride.second);
dim2d paddingSize = {paddingFirst, paddingSecond};
CUDANet::Layers::ActivationType activationType =
CUDANet::Layers::ActivationType::NONE;
@@ -173,16 +177,14 @@ TEST_F(Conv2dTest, PaddedTest) {
activationType, input, kernels.data(), d_input
);
EXPECT_EQ(inputSize * inputSize * numFilters, conv2d.getOutputSize());
EXPECT_EQ(inputSize.first * inputSize.second * numFilters, conv2d.getOutputSize());
d_output = conv2d.forward(d_input);
std::vector<float> output(
conv2d.getOutputSize()
);
std::vector<float> output(conv2d.getOutputSize());
cudaMemcpy(
output.data(), d_output,
sizeof(float) * conv2d.getOutputSize(), cudaMemcpyDeviceToHost
output.data(), d_output, sizeof(float) * conv2d.getOutputSize(),
cudaMemcpyDeviceToHost
);
// Generated by tools/generate_conv2d_test.py
@@ -206,12 +208,17 @@ TEST_F(Conv2dTest, PaddedTest) {
}
TEST_F(Conv2dTest, StridedPaddedConvolution) {
int inputSize = 5;
dim2d inputSize = {5, 5};
int inputChannels = 2;
int kernelSize = 3;
int stride = 2;
dim2d kernelSize = {3, 3};
dim2d stride = {2, 2};
int numFilters = 2;
int paddingSize = CUDANET_SAME_PADDING(inputSize, kernelSize, stride);
int paddingFirst = CUDANET_SAME_PADDING(inputSize.first, kernelSize.second, stride.first);
int paddingSecond = CUDANET_SAME_PADDING(inputSize.second, kernelSize.second, stride.second);
dim2d paddingSize = {paddingFirst, paddingSecond};
CUDANet::Layers::ActivationType activationType =
CUDANet::Layers::ActivationType::RELU;
@@ -258,16 +265,13 @@ TEST_F(Conv2dTest, StridedPaddedConvolution) {
activationType, input, kernels.data(), d_input
);
EXPECT_EQ(inputSize * inputSize * numFilters, conv2d.getOutputSize());
EXPECT_EQ(inputSize.first * inputSize.second * numFilters, conv2d.getOutputSize());
d_output = conv2d.forward(d_input);
std::vector<float> output(
conv2d.getOutputSize()
);
std::vector<float> output(conv2d.getOutputSize());
cudaMemcpy(
output.data(), d_output,
sizeof(float) * conv2d.getOutputSize(),
output.data(), d_output, sizeof(float) * conv2d.getOutputSize(),
cudaMemcpyDeviceToHost
);

View File

@@ -6,10 +6,10 @@
#include "max_pooling.cuh"
TEST(MaxPoolingLayerTest, MaxPoolForwardTest) {
int inputSize = 4;
int nChannels = 2;
int poolingSize = 2;
int stride = 2;
dim2d inputSize = {4, 4};
int nChannels = 2;
dim2d poolingSize = {2, 2};
dim2d stride = {2, 2};
cudaError_t cudaStatus;
@@ -36,13 +36,13 @@ TEST(MaxPoolingLayerTest, MaxPoolForwardTest) {
float *d_input;
cudaStatus = cudaMalloc(
(void **)&d_input, sizeof(float) * inputSize * inputSize * nChannels
(void **)&d_input, sizeof(float) * inputSize.first * inputSize.second * nChannels
);
EXPECT_EQ(cudaStatus, cudaSuccess);
cudaStatus = cudaMemcpy(
d_input, input.data(),
sizeof(float) * inputSize * inputSize * nChannels,
sizeof(float) * inputSize.first * inputSize.second * nChannels,
cudaMemcpyHostToDevice
);
EXPECT_EQ(cudaStatus, cudaSuccess);
@@ -53,13 +53,13 @@ TEST(MaxPoolingLayerTest, MaxPoolForwardTest) {
std::vector<float> output(outputSize);
cudaStatus = cudaMemcpy(
output.data(), d_output,
sizeof(float) * outputSize,
output.data(), d_output, sizeof(float) * outputSize,
cudaMemcpyDeviceToHost
);
EXPECT_EQ(cudaStatus, cudaSuccess);
std::vector<float> expected = {0.619f, 0.732f, 0.712f, 0.742f, 0.919f, 0.973f, 0.819f, 0.85f};
std::vector<float> expected = {0.619f, 0.732f, 0.712f, 0.742f,
0.919f, 0.973f, 0.819f, 0.85f};
for (int i = 0; i < output.size(); ++i) {
EXPECT_FLOAT_EQ(expected[i], output[i]);

View File

@@ -10,27 +10,26 @@ class ModelTest : public ::testing::Test {
CUDANet::Model *commonTestSetup(
bool setWeights = true,
int inputSize = 6,
int inputChannels = 2,
int outputSize = 3,
dim2d inputSize = {6, 6},
int inputChannels = 2,
int outputSize = 3,
int kernelSize = 3,
int stride = 1,
int numFilters = 2,
dim2d kernelSize = {3, 3},
dim2d stride = {1, 1},
int numFilters = 2,
int poolingSize = 2,
int poolingStride = 2
dim2d poolingSize = {2, 2},
dim2d poolingStride = {2, 2}
) {
CUDANet::Model *model =
new CUDANet::Model(inputSize, inputChannels, outputSize);
int paddingSize = 0;
dim2d paddingSize = {0, 0};
// Conv2d
CUDANet::Layers::Conv2d *conv2d = new CUDANet::Layers::Conv2d(
inputSize, inputChannels, kernelSize, stride, numFilters,
paddingSize,
CUDANet::Layers::ActivationType::NONE
paddingSize, CUDANet::Layers::ActivationType::NONE
);
if (setWeights) {
@@ -39,9 +38,13 @@ class ModelTest : public ::testing::Test {
model->addLayer("conv1", conv2d);
// maxpool2d
dim2d poolingInput = {
inputSize.first - kernelSize.first + 1,
inputSize.second - kernelSize.second + 1
};
CUDANet::Layers::MaxPooling2D *maxpool2d =
new CUDANet::Layers::MaxPooling2D(
inputSize - kernelSize + 1, numFilters, poolingSize,
poolingInput, numFilters, poolingSize,
poolingStride, CUDANet::Layers::ActivationType::RELU
);
model->addLayer("maxpool1", maxpool2d);