diff --git a/include/kernels/pooling.cuh b/include/kernels/pooling.cuh index a4722ea..6a74010 100644 --- a/include/kernels/pooling.cuh +++ b/include/kernels/pooling.cuh @@ -11,8 +11,7 @@ __global__ void max_pooling( const int inputSize, const int nChannels, const int poolingSize, - const int stride, - const int paddingSize + const int stride ); __global__ void avg_pooling( @@ -21,8 +20,7 @@ __global__ void avg_pooling( const int inputSize, const int nChannels, const int poolingSize, - const int stride, - const int paddingSize + const int stride ); } // namespace CUDANet::Kernels diff --git a/include/layers/avg_pooling.cuh b/include/layers/avg_pooling.cuh new file mode 100644 index 0000000..2a7416c --- /dev/null +++ b/include/layers/avg_pooling.cuh @@ -0,0 +1,38 @@ +#ifndef CUDANET_AVG_POOLING_H +#define CUDANET_AVG_POOLING_H + +#include "activation.cuh" +#include "layer.cuh" + +namespace CUDANet::Layers { + +class AvgPooling2D : public SequentialLayer { + public: + AvgPooling2D( + int inputSize, + int nChannels, + int poolingSize, + int stride, + ActivationType activationType + ); + ~AvgPooling2D(); + + float* forward(const float* d_input); + + private: + int inputSize; + int nChannels; + int poolingSize; + int stride; + + int outputSize; + int gridSize; + + float* d_output; + + Activation activation; +}; + +} // namespace CUDANet::Layers + +#endif // CUDANET_AVG_POOLING_H diff --git a/include/layers/max_pooling.cuh b/include/layers/max_pooling.cuh index 542777e..90a2b21 100644 --- a/include/layers/max_pooling.cuh +++ b/include/layers/max_pooling.cuh @@ -15,7 +15,6 @@ class MaxPooling2D : public SequentialLayer { int nChannels, int poolingSize, int stride, - Padding padding, ActivationType activationType ); ~MaxPooling2D(); @@ -27,7 +26,6 @@ class MaxPooling2D : public SequentialLayer { int nChannels; int poolingSize; int stride; - int paddingSize; int outputSize; int gridSize; diff --git a/src/kernels/pooling.cu b/src/kernels/pooling.cu index 92df3a9..580b552 100644 --- a/src/kernels/pooling.cu +++ b/src/kernels/pooling.cu @@ -10,8 +10,7 @@ __global__ void Kernels::max_pooling( const int inputSize, const int nChannels, const int poolingSize, - const int stride, - const int paddingSize + const int stride ) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid >= inputSize * inputSize * nChannels) { @@ -28,17 +27,9 @@ __global__ void Kernels::max_pooling( for (int k = 0; k < poolingSize; k++) { for (int l = 0; l < poolingSize; l++) { - if (i * stride + k < paddingSize || - i * stride + k >= (inputSize + paddingSize) || - j * stride + l < paddingSize || - j * stride + l >= (inputSize + paddingSize)) { - continue; - } - - int inputIndex = c * inputSize * inputSize + - (i * stride + k - paddingSize) * inputSize + - (j * stride + l - paddingSize); + (i * stride + k) * inputSize + + (j * stride + l); if (d_input[inputIndex] > max) { max = d_input[inputIndex]; @@ -55,8 +46,7 @@ __global__ void Kernels::avg_pooling( const int inputSize, const int nChannels, const int poolingSize, - const int stride, - const int paddingSize + const int stride ) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid >= inputSize * inputSize * nChannels) { @@ -73,16 +63,9 @@ __global__ void Kernels::avg_pooling( for (int k = 0; k < poolingSize; k++) { for (int l = 0; l < poolingSize; l++) { - if (i * stride + k < paddingSize || - i * stride + k >= (inputSize + paddingSize) || - j * stride + l < paddingSize || - j * stride + l >= (inputSize + paddingSize)) { - continue; - } - int inputIndex = c * inputSize * inputSize + - (i * stride + k - paddingSize) * inputSize + - (j * stride + l - paddingSize); + (i * stride + k) * inputSize + + (j * stride + l); sum += d_input[inputIndex]; } diff --git a/src/layers/avg_pooling.cu b/src/layers/avg_pooling.cu new file mode 100644 index 0000000..b8180df --- /dev/null +++ b/src/layers/avg_pooling.cu @@ -0,0 +1,44 @@ +#include "avg_pooling.cuh" +#include "cuda_helper.cuh" +#include "pooling.cuh" + +using namespace CUDANet::Layers; + +AvgPooling2D::AvgPooling2D( + int inputSize, + int nChannels, + int poolingSize, + int stride, + ActivationType activationType + ) + : inputSize(inputSize), nChannels(nChannels), poolingSize(poolingSize), stride(stride) { + + + outputSize = (inputSize - poolingSize) / stride + 1; + + activation = Activation( + activationType, outputSize * outputSize * nChannels + ); + + d_output = nullptr; + CUDA_CHECK(cudaMalloc( + (void**)&d_output, sizeof(float) * outputSize * outputSize * nChannels + )); + + gridSize = (outputSize * outputSize * nChannels + BLOCK_SIZE - 1) / BLOCK_SIZE; + +} + + +AvgPooling2D::~AvgPooling2D() { + cudaFree(d_output); +} + + +float* AvgPooling2D::forward(const float* d_input) { + Kernels::avg_pooling<<>>( + d_input, d_output, inputSize, nChannels, poolingSize, stride + ); + + return d_output; +} \ No newline at end of file diff --git a/src/layers/max_pooling.cu b/src/layers/max_pooling.cu index e485b7c..c660782 100644 --- a/src/layers/max_pooling.cu +++ b/src/layers/max_pooling.cu @@ -10,26 +10,12 @@ MaxPooling2D::MaxPooling2D( int nChannels, int poolingSize, int stride, - Padding padding, ActivationType activationType ) : inputSize(inputSize), nChannels(nChannels), poolingSize(poolingSize), stride(stride) { - switch (padding) { - case SAME: - outputSize = inputSize; - paddingSize = ((stride - 1) * inputSize - stride + poolingSize) / 2; - break; - - case VALID: - paddingSize = 0; - outputSize = (inputSize - poolingSize) / stride + 1; - break; - - default: - break; - } + outputSize = (inputSize - poolingSize) / stride + 1; activation = Activation( activationType, outputSize * outputSize * nChannels @@ -52,7 +38,7 @@ MaxPooling2D::~MaxPooling2D() { float* MaxPooling2D::forward(const float* d_input) { Kernels::max_pooling<<>>( - d_input, d_output, inputSize, nChannels, poolingSize, stride, paddingSize + d_input, d_output, inputSize, nChannels, poolingSize, stride ); return d_output;