mirror of
https://github.com/lordmathis/CUDANet.git
synced 2025-12-22 22:34:22 +00:00
Rework padding size setting
This commit is contained in:
@@ -23,7 +23,7 @@ class Conv2d : public WeightedLayer {
|
||||
* @param kernelSize Width and height of the convolution kernel
|
||||
* @param stride Convolution stride
|
||||
* @param numFilters Number of output filters
|
||||
* @param padding Padding type ('SAME' or 'VALID')
|
||||
* @param paddingSize Padding size
|
||||
* @param activationType Activation function type ('RELU', 'SIGMOID',
|
||||
* 'SOFTMAX' or 'NONE')
|
||||
*/
|
||||
@@ -33,7 +33,7 @@ class Conv2d : public WeightedLayer {
|
||||
int kernelSize,
|
||||
int stride,
|
||||
int numFilters,
|
||||
Padding padding,
|
||||
int paddingSize,
|
||||
ActivationType activationType
|
||||
);
|
||||
|
||||
|
||||
@@ -4,16 +4,10 @@
|
||||
|
||||
#include <vector>
|
||||
|
||||
namespace CUDANet::Layers {
|
||||
#define CUDANET_SAME_PADDING(inputSize, kernelSize, stride) ((stride - 1) * inputSize - stride + kernelSize) / 2;
|
||||
|
||||
/**
|
||||
* @brief Padding types
|
||||
*
|
||||
* SAME: Zero padding such that the output size is the same as the input
|
||||
* VALID: No padding
|
||||
*
|
||||
*/
|
||||
enum Padding { SAME, VALID };
|
||||
|
||||
namespace CUDANet::Layers {
|
||||
|
||||
/**
|
||||
* @brief Basic Sequential Layer
|
||||
|
||||
@@ -12,29 +12,17 @@ Conv2d::Conv2d(
|
||||
int kernelSize,
|
||||
int stride,
|
||||
int numFilters,
|
||||
Padding padding,
|
||||
int paddingSize,
|
||||
ActivationType activationType
|
||||
)
|
||||
: inputSize(inputSize),
|
||||
inputChannels(inputChannels),
|
||||
kernelSize(kernelSize),
|
||||
stride(stride),
|
||||
numFilters(numFilters) {
|
||||
numFilters(numFilters),
|
||||
paddingSize(paddingSize) {
|
||||
|
||||
switch (padding) {
|
||||
case SAME:
|
||||
outputSize = inputSize;
|
||||
paddingSize = ((stride - 1) * inputSize - stride + kernelSize) / 2;
|
||||
break;
|
||||
|
||||
case VALID:
|
||||
paddingSize = 0;
|
||||
outputSize = (inputSize - kernelSize) / stride + 1;
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
outputSize = (inputSize - kernelSize + 2 * paddingSize) / stride + 1;
|
||||
|
||||
activation = Activation(
|
||||
activationType, outputSize * outputSize * numFilters
|
||||
|
||||
@@ -13,7 +13,7 @@ class Conv2dTest : public ::testing::Test {
|
||||
int kernelSize,
|
||||
int stride,
|
||||
int numFilters,
|
||||
CUDANet::Layers::Padding padding,
|
||||
int paddingSize,
|
||||
CUDANet::Layers::ActivationType activationType,
|
||||
std::vector<float>& input,
|
||||
float* kernels,
|
||||
@@ -21,8 +21,8 @@ class Conv2dTest : public ::testing::Test {
|
||||
) {
|
||||
// Create Conv2d layer
|
||||
CUDANet::Layers::Conv2d conv2d(
|
||||
inputSize, inputChannels, kernelSize, stride, numFilters, padding,
|
||||
activationType
|
||||
inputSize, inputChannels, kernelSize, stride, numFilters,
|
||||
paddingSize, activationType
|
||||
);
|
||||
|
||||
conv2d.setWeights(kernels);
|
||||
@@ -59,7 +59,8 @@ TEST_F(Conv2dTest, SimpleTest) {
|
||||
int kernelSize = 2;
|
||||
int stride = 1;
|
||||
int numFilters = 1;
|
||||
CUDANet::Layers::Padding padding = CUDANet::Layers::Padding::VALID;
|
||||
int paddingSize = 0;
|
||||
|
||||
CUDANet::Layers::ActivationType activationType =
|
||||
CUDANet::Layers::ActivationType::NONE;
|
||||
|
||||
@@ -77,7 +78,7 @@ TEST_F(Conv2dTest, SimpleTest) {
|
||||
float* d_output;
|
||||
|
||||
CUDANet::Layers::Conv2d conv2d = commonTestSetup(
|
||||
inputSize, inputChannels, kernelSize, stride, numFilters, padding,
|
||||
inputSize, inputChannels, kernelSize, stride, numFilters, paddingSize,
|
||||
activationType, input, kernels.data(), d_input
|
||||
);
|
||||
|
||||
@@ -109,7 +110,7 @@ TEST_F(Conv2dTest, PaddedTest) {
|
||||
int kernelSize = 3;
|
||||
int stride = 1;
|
||||
int numFilters = 2;
|
||||
CUDANet::Layers::Padding padding = CUDANet::Layers::Padding::SAME;
|
||||
int paddingSize = CUDANET_SAME_PADDING(inputSize, kernelSize, stride);
|
||||
CUDANet::Layers::ActivationType activationType =
|
||||
CUDANet::Layers::ActivationType::NONE;
|
||||
|
||||
@@ -167,7 +168,7 @@ TEST_F(Conv2dTest, PaddedTest) {
|
||||
float* d_output;
|
||||
|
||||
CUDANet::Layers::Conv2d conv2d = commonTestSetup(
|
||||
inputSize, inputChannels, kernelSize, stride, numFilters, padding,
|
||||
inputSize, inputChannels, kernelSize, stride, numFilters, paddingSize,
|
||||
activationType, input, kernels.data(), d_input
|
||||
);
|
||||
|
||||
@@ -211,7 +212,7 @@ TEST_F(Conv2dTest, StridedPaddedConvolution) {
|
||||
int kernelSize = 3;
|
||||
int stride = 2;
|
||||
int numFilters = 2;
|
||||
CUDANet::Layers::Padding padding = CUDANet::Layers::Padding::SAME;
|
||||
int paddingSize = CUDANET_SAME_PADDING(inputSize, kernelSize, stride);
|
||||
CUDANet::Layers::ActivationType activationType =
|
||||
CUDANet::Layers::ActivationType::RELU;
|
||||
|
||||
@@ -254,7 +255,7 @@ TEST_F(Conv2dTest, StridedPaddedConvolution) {
|
||||
float* d_output;
|
||||
|
||||
CUDANet::Layers::Conv2d conv2d = commonTestSetup(
|
||||
inputSize, inputChannels, kernelSize, stride, numFilters, padding,
|
||||
inputSize, inputChannels, kernelSize, stride, numFilters, paddingSize,
|
||||
activationType, input, kernels.data(), d_input
|
||||
);
|
||||
|
||||
|
||||
@@ -24,10 +24,12 @@ class ModelTest : public ::testing::Test {
|
||||
CUDANet::Model *model =
|
||||
new CUDANet::Model(inputSize, inputChannels, outputSize);
|
||||
|
||||
int paddingSize = 0;
|
||||
|
||||
// Conv2d
|
||||
CUDANet::Layers::Conv2d *conv2d = new CUDANet::Layers::Conv2d(
|
||||
inputSize, inputChannels, kernelSize, stride, numFilters,
|
||||
CUDANet::Layers::Padding::VALID,
|
||||
paddingSize,
|
||||
CUDANet::Layers::ActivationType::NONE
|
||||
);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user