Rework padding size setting

This commit is contained in:
2024-04-20 16:31:28 +02:00
parent dbaab5652e
commit ecf7416f8e
5 changed files with 44 additions and 59 deletions

View File

@@ -23,7 +23,7 @@ class Conv2d : public WeightedLayer {
* @param kernelSize Width and height of the convolution kernel
* @param stride Convolution stride
* @param numFilters Number of output filters
* @param padding Padding type ('SAME' or 'VALID')
* @param paddingSize Padding size
* @param activationType Activation function type ('RELU', 'SIGMOID',
* 'SOFTMAX' or 'NONE')
*/
@@ -33,7 +33,7 @@ class Conv2d : public WeightedLayer {
int kernelSize,
int stride,
int numFilters,
Padding padding,
int paddingSize,
ActivationType activationType
);

View File

@@ -4,16 +4,10 @@
#include <vector>
namespace CUDANet::Layers {
#define CUDANET_SAME_PADDING(inputSize, kernelSize, stride) ((stride - 1) * inputSize - stride + kernelSize) / 2;
/**
* @brief Padding types
*
* SAME: Zero padding such that the output size is the same as the input
* VALID: No padding
*
*/
enum Padding { SAME, VALID };
namespace CUDANet::Layers {
/**
* @brief Basic Sequential Layer
@@ -25,7 +19,7 @@ class SequentialLayer {
* @brief Destroy the Sequential Layer
*
*/
virtual ~SequentialLayer() {};
virtual ~SequentialLayer(){};
/**
* @brief Forward propagation virtual function
@@ -45,7 +39,7 @@ class WeightedLayer : public SequentialLayer {
* @brief Destroy the ILayer object
*
*/
virtual ~WeightedLayer() {};
virtual ~WeightedLayer(){};
/**
* @brief Virtual function for forward pass

View File

@@ -12,29 +12,17 @@ Conv2d::Conv2d(
int kernelSize,
int stride,
int numFilters,
Padding padding,
int paddingSize,
ActivationType activationType
)
: inputSize(inputSize),
inputChannels(inputChannels),
kernelSize(kernelSize),
stride(stride),
numFilters(numFilters) {
numFilters(numFilters),
paddingSize(paddingSize) {
switch (padding) {
case SAME:
outputSize = inputSize;
paddingSize = ((stride - 1) * inputSize - stride + kernelSize) / 2;
break;
case VALID:
paddingSize = 0;
outputSize = (inputSize - kernelSize) / stride + 1;
break;
default:
break;
}
outputSize = (inputSize - kernelSize + 2 * paddingSize) / stride + 1;
activation = Activation(
activationType, outputSize * outputSize * numFilters

View File

@@ -13,7 +13,7 @@ class Conv2dTest : public ::testing::Test {
int kernelSize,
int stride,
int numFilters,
CUDANet::Layers::Padding padding,
int paddingSize,
CUDANet::Layers::ActivationType activationType,
std::vector<float>& input,
float* kernels,
@@ -21,8 +21,8 @@ class Conv2dTest : public ::testing::Test {
) {
// Create Conv2d layer
CUDANet::Layers::Conv2d conv2d(
inputSize, inputChannels, kernelSize, stride, numFilters, padding,
activationType
inputSize, inputChannels, kernelSize, stride, numFilters,
paddingSize, activationType
);
conv2d.setWeights(kernels);
@@ -54,12 +54,13 @@ class Conv2dTest : public ::testing::Test {
};
TEST_F(Conv2dTest, SimpleTest) {
int inputSize = 4;
int inputChannels = 1;
int kernelSize = 2;
int stride = 1;
int numFilters = 1;
CUDANet::Layers::Padding padding = CUDANet::Layers::Padding::VALID;
int inputSize = 4;
int inputChannels = 1;
int kernelSize = 2;
int stride = 1;
int numFilters = 1;
int paddingSize = 0;
CUDANet::Layers::ActivationType activationType =
CUDANet::Layers::ActivationType::NONE;
@@ -77,7 +78,7 @@ TEST_F(Conv2dTest, SimpleTest) {
float* d_output;
CUDANet::Layers::Conv2d conv2d = commonTestSetup(
inputSize, inputChannels, kernelSize, stride, numFilters, padding,
inputSize, inputChannels, kernelSize, stride, numFilters, paddingSize,
activationType, input, kernels.data(), d_input
);
@@ -104,12 +105,12 @@ TEST_F(Conv2dTest, SimpleTest) {
}
TEST_F(Conv2dTest, PaddedTest) {
int inputSize = 5;
int inputChannels = 3;
int kernelSize = 3;
int stride = 1;
int numFilters = 2;
CUDANet::Layers::Padding padding = CUDANet::Layers::Padding::SAME;
int inputSize = 5;
int inputChannels = 3;
int kernelSize = 3;
int stride = 1;
int numFilters = 2;
int paddingSize = CUDANET_SAME_PADDING(inputSize, kernelSize, stride);
CUDANet::Layers::ActivationType activationType =
CUDANet::Layers::ActivationType::NONE;
@@ -167,7 +168,7 @@ TEST_F(Conv2dTest, PaddedTest) {
float* d_output;
CUDANet::Layers::Conv2d conv2d = commonTestSetup(
inputSize, inputChannels, kernelSize, stride, numFilters, padding,
inputSize, inputChannels, kernelSize, stride, numFilters, paddingSize,
activationType, input, kernels.data(), d_input
);
@@ -206,12 +207,12 @@ TEST_F(Conv2dTest, PaddedTest) {
}
TEST_F(Conv2dTest, StridedPaddedConvolution) {
int inputSize = 5;
int inputChannels = 2;
int kernelSize = 3;
int stride = 2;
int numFilters = 2;
CUDANet::Layers::Padding padding = CUDANet::Layers::Padding::SAME;
int inputSize = 5;
int inputChannels = 2;
int kernelSize = 3;
int stride = 2;
int numFilters = 2;
int paddingSize = CUDANET_SAME_PADDING(inputSize, kernelSize, stride);
CUDANet::Layers::ActivationType activationType =
CUDANet::Layers::ActivationType::RELU;
@@ -254,7 +255,7 @@ TEST_F(Conv2dTest, StridedPaddedConvolution) {
float* d_output;
CUDANet::Layers::Conv2d conv2d = commonTestSetup(
inputSize, inputChannels, kernelSize, stride, numFilters, padding,
inputSize, inputChannels, kernelSize, stride, numFilters, paddingSize,
activationType, input, kernels.data(), d_input
);

View File

@@ -24,10 +24,12 @@ class ModelTest : public ::testing::Test {
CUDANet::Model *model =
new CUDANet::Model(inputSize, inputChannels, outputSize);
int paddingSize = 0;
// Conv2d
CUDANet::Layers::Conv2d *conv2d = new CUDANet::Layers::Conv2d(
inputSize, inputChannels, kernelSize, stride, numFilters,
CUDANet::Layers::Padding::VALID,
paddingSize,
CUDANet::Layers::ActivationType::NONE
);