From ecf7416f8e95bfea2ca1590ad2362e1b3326fb17 Mon Sep 17 00:00:00 2001 From: LordMathis Date: Sat, 20 Apr 2024 16:31:28 +0200 Subject: [PATCH] Rework padding size setting --- include/layers/conv2d.cuh | 4 ++-- include/layers/layer.cuh | 26 ++++++++------------ src/layers/conv2d.cu | 20 ++++------------ test/layers/test_conv2d.cu | 49 +++++++++++++++++++------------------- test/model/test_model.cu | 4 +++- 5 files changed, 44 insertions(+), 59 deletions(-) diff --git a/include/layers/conv2d.cuh b/include/layers/conv2d.cuh index 6223904..c8133cd 100644 --- a/include/layers/conv2d.cuh +++ b/include/layers/conv2d.cuh @@ -23,7 +23,7 @@ class Conv2d : public WeightedLayer { * @param kernelSize Width and height of the convolution kernel * @param stride Convolution stride * @param numFilters Number of output filters - * @param padding Padding type ('SAME' or 'VALID') + * @param paddingSize Padding size * @param activationType Activation function type ('RELU', 'SIGMOID', * 'SOFTMAX' or 'NONE') */ @@ -33,7 +33,7 @@ class Conv2d : public WeightedLayer { int kernelSize, int stride, int numFilters, - Padding padding, + int paddingSize, ActivationType activationType ); diff --git a/include/layers/layer.cuh b/include/layers/layer.cuh index 2927944..c89fe4f 100644 --- a/include/layers/layer.cuh +++ b/include/layers/layer.cuh @@ -4,32 +4,26 @@ #include +#define CUDANET_SAME_PADDING(inputSize, kernelSize, stride) ((stride - 1) * inputSize - stride + kernelSize) / 2; + + namespace CUDANet::Layers { -/** - * @brief Padding types - * - * SAME: Zero padding such that the output size is the same as the input - * VALID: No padding - * - */ -enum Padding { SAME, VALID }; - /** * @brief Basic Sequential Layer - * + * */ class SequentialLayer { public: /** * @brief Destroy the Sequential Layer - * + * */ - virtual ~SequentialLayer() {}; + virtual ~SequentialLayer(){}; /** * @brief Forward propagation virtual function - * + * * @param input Device pointer to the input * @return float* Device pointer to the output */ @@ -45,7 +39,7 @@ class WeightedLayer : public SequentialLayer { * @brief Destroy the ILayer object * */ - virtual ~WeightedLayer() {}; + virtual ~WeightedLayer(){}; /** * @brief Virtual function for forward pass @@ -64,7 +58,7 @@ class WeightedLayer : public SequentialLayer { /** * @brief Virtual function for getting weights - * + * */ virtual std::vector getWeights() = 0; @@ -77,7 +71,7 @@ class WeightedLayer : public SequentialLayer { /** * @brief Virtual function for getting biases - * + * */ virtual std::vector getBiases() = 0; diff --git a/src/layers/conv2d.cu b/src/layers/conv2d.cu index 448e4e4..3713d07 100644 --- a/src/layers/conv2d.cu +++ b/src/layers/conv2d.cu @@ -12,29 +12,17 @@ Conv2d::Conv2d( int kernelSize, int stride, int numFilters, - Padding padding, + int paddingSize, ActivationType activationType ) : inputSize(inputSize), inputChannels(inputChannels), kernelSize(kernelSize), stride(stride), - numFilters(numFilters) { - - switch (padding) { - case SAME: - outputSize = inputSize; - paddingSize = ((stride - 1) * inputSize - stride + kernelSize) / 2; - break; + numFilters(numFilters), + paddingSize(paddingSize) { - case VALID: - paddingSize = 0; - outputSize = (inputSize - kernelSize) / stride + 1; - break; - - default: - break; - } + outputSize = (inputSize - kernelSize + 2 * paddingSize) / stride + 1; activation = Activation( activationType, outputSize * outputSize * numFilters diff --git a/test/layers/test_conv2d.cu b/test/layers/test_conv2d.cu index 9e398f5..4057c7f 100644 --- a/test/layers/test_conv2d.cu +++ b/test/layers/test_conv2d.cu @@ -13,7 +13,7 @@ class Conv2dTest : public ::testing::Test { int kernelSize, int stride, int numFilters, - CUDANet::Layers::Padding padding, + int paddingSize, CUDANet::Layers::ActivationType activationType, std::vector& input, float* kernels, @@ -21,8 +21,8 @@ class Conv2dTest : public ::testing::Test { ) { // Create Conv2d layer CUDANet::Layers::Conv2d conv2d( - inputSize, inputChannels, kernelSize, stride, numFilters, padding, - activationType + inputSize, inputChannels, kernelSize, stride, numFilters, + paddingSize, activationType ); conv2d.setWeights(kernels); @@ -54,12 +54,13 @@ class Conv2dTest : public ::testing::Test { }; TEST_F(Conv2dTest, SimpleTest) { - int inputSize = 4; - int inputChannels = 1; - int kernelSize = 2; - int stride = 1; - int numFilters = 1; - CUDANet::Layers::Padding padding = CUDANet::Layers::Padding::VALID; + int inputSize = 4; + int inputChannels = 1; + int kernelSize = 2; + int stride = 1; + int numFilters = 1; + int paddingSize = 0; + CUDANet::Layers::ActivationType activationType = CUDANet::Layers::ActivationType::NONE; @@ -77,7 +78,7 @@ TEST_F(Conv2dTest, SimpleTest) { float* d_output; CUDANet::Layers::Conv2d conv2d = commonTestSetup( - inputSize, inputChannels, kernelSize, stride, numFilters, padding, + inputSize, inputChannels, kernelSize, stride, numFilters, paddingSize, activationType, input, kernels.data(), d_input ); @@ -104,12 +105,12 @@ TEST_F(Conv2dTest, SimpleTest) { } TEST_F(Conv2dTest, PaddedTest) { - int inputSize = 5; - int inputChannels = 3; - int kernelSize = 3; - int stride = 1; - int numFilters = 2; - CUDANet::Layers::Padding padding = CUDANet::Layers::Padding::SAME; + int inputSize = 5; + int inputChannels = 3; + int kernelSize = 3; + int stride = 1; + int numFilters = 2; + int paddingSize = CUDANET_SAME_PADDING(inputSize, kernelSize, stride); CUDANet::Layers::ActivationType activationType = CUDANet::Layers::ActivationType::NONE; @@ -167,7 +168,7 @@ TEST_F(Conv2dTest, PaddedTest) { float* d_output; CUDANet::Layers::Conv2d conv2d = commonTestSetup( - inputSize, inputChannels, kernelSize, stride, numFilters, padding, + inputSize, inputChannels, kernelSize, stride, numFilters, paddingSize, activationType, input, kernels.data(), d_input ); @@ -206,12 +207,12 @@ TEST_F(Conv2dTest, PaddedTest) { } TEST_F(Conv2dTest, StridedPaddedConvolution) { - int inputSize = 5; - int inputChannels = 2; - int kernelSize = 3; - int stride = 2; - int numFilters = 2; - CUDANet::Layers::Padding padding = CUDANet::Layers::Padding::SAME; + int inputSize = 5; + int inputChannels = 2; + int kernelSize = 3; + int stride = 2; + int numFilters = 2; + int paddingSize = CUDANET_SAME_PADDING(inputSize, kernelSize, stride); CUDANet::Layers::ActivationType activationType = CUDANet::Layers::ActivationType::RELU; @@ -254,7 +255,7 @@ TEST_F(Conv2dTest, StridedPaddedConvolution) { float* d_output; CUDANet::Layers::Conv2d conv2d = commonTestSetup( - inputSize, inputChannels, kernelSize, stride, numFilters, padding, + inputSize, inputChannels, kernelSize, stride, numFilters, paddingSize, activationType, input, kernels.data(), d_input ); diff --git a/test/model/test_model.cu b/test/model/test_model.cu index b6e373c..ac0f5b8 100644 --- a/test/model/test_model.cu +++ b/test/model/test_model.cu @@ -24,10 +24,12 @@ class ModelTest : public ::testing::Test { CUDANet::Model *model = new CUDANet::Model(inputSize, inputChannels, outputSize); + int paddingSize = 0; + // Conv2d CUDANet::Layers::Conv2d *conv2d = new CUDANet::Layers::Conv2d( inputSize, inputChannels, kernelSize, stride, numFilters, - CUDANet::Layers::Padding::VALID, + paddingSize, CUDANet::Layers::ActivationType::NONE );