Migrate avg pooling

This commit is contained in:
2024-09-09 21:36:13 +02:00
parent 75475790ac
commit 76e5225001
5 changed files with 129 additions and 53 deletions

View File

@@ -1,16 +1,17 @@
#ifndef CUDANET_H #ifndef CUDANET_H
#define CUDANET_H #define CUDANET_H
// Kernels #ifdef USE_CUDA
#include "activation_functions.cuh" #include "activation_functions.cuh"
#include "convolution.cuh" #include "convolution.cuh"
#include "matmul.cuh" #include "matmul.cuh"
#include "pooling.cuh" #include "pooling.cuh"
#endif
// Layers // Layers
#include "activation.hpp" #include "activation.hpp"
#include "add.cuh" #include "add.hpp"
#include "avg_pooling.cuh" #include "avg_pooling.hpp"
#include "batch_norm.cuh" #include "batch_norm.cuh"
#include "concat.cuh" #include "concat.cuh"
#include "conv2d.cuh" #include "conv2d.cuh"
@@ -25,8 +26,10 @@
#include "module.hpp" #include "module.hpp"
// Utils // Utils
#include "cuda_helper.cuh"
#include "imagenet.hpp" #include "imagenet.hpp"
#ifdef USE_CUDA
#include "cuda_helper.cuh"
#include "vector.cuh" #include "vector.cuh"
#endif
#endif // CUDANET_H #endif // CUDANET_H

View File

@@ -18,7 +18,7 @@ class AvgPooling2d : public SequentialLayer, public TwoDLayer {
); );
~AvgPooling2d(); ~AvgPooling2d();
float* forward(const float* d_input); float* forward(const float* input);
/** /**
* @brief Get output size * @brief Get output size
@@ -45,14 +45,32 @@ class AvgPooling2d : public SequentialLayer, public TwoDLayer {
shape2d outputSize; shape2d outputSize;
float* d_output;
Activation* activation; Activation* activation;
float* forwardCPU(const float* input);
#ifdef USE_CUDA
float* d_output;
float* forwardCUDA(const float* d_input);
void initCUDA();
void delCUDA();
#endif
}; };
class AdaptiveAvgPooling2d : public AvgPooling2d { class AdaptiveAvgPooling2d : public AvgPooling2d {
public: public:
AdaptiveAvgPooling2d(shape2d inputShape, int nChannels, shape2d outputShape, ActivationType activationType); AdaptiveAvgPooling2d(
shape2d inputShape,
int nChannels,
shape2d outputShape,
ActivationType activationType
);
private:
#ifdef USE_CUDA
void initCUDA();
#endif
}; };
} // namespace CUDANet::Layers } // namespace CUDANet::Layers

View File

@@ -0,0 +1,45 @@
#include "avg_pooling.hpp"
#include "cuda_helper.cuh"
#include "pooling.cuh"
using namespace CUDANet::Layers;
void AvgPooling2d::initCUDA() {
d_output = nullptr;
CUDA_CHECK(cudaMalloc(
(void**)&d_output,
sizeof(float) * outputSize.first * outputSize.second * nChannels
));
}
void AvgPooling2d::delCUDA() {
cudaFree(d_output);
}
float* AvgPooling2d::forwardCUDA(const float* d_input) {
dim3 block(8, 8, 8);
dim3 grid(
(outputSize.first + block.x - 1) / block.x,
(outputSize.second + block.y - 1) / block.y,
(nChannels + block.z - 1) / block.z
);
Kernels::avg_pooling<<<grid, block>>>(
d_input, d_output, inputSize, outputSize, nChannels, poolingSize,
stride, padding
);
CUDA_CHECK(cudaGetLastError());
activation->activate(d_output);
CUDA_CHECK(cudaDeviceSynchronize());
return d_output;
}
void AdaptiveAvgPooling2d::initCUDA() {
cudaFree(d_output);
cudaMalloc(
(void**)&d_output,
sizeof(float) * outputSize.first * outputSize.second * nChannels
);
}

View File

@@ -1,15 +1,15 @@
#include "avg_pooling.cuh" #include <stdexcept>
#include "cuda_helper.cuh"
#include "pooling.cuh" #include "avg_pooling.hpp"
using namespace CUDANet::Layers; using namespace CUDANet::Layers;
AvgPooling2d::AvgPooling2d( AvgPooling2d::AvgPooling2d(
shape2d inputSize, shape2d inputSize,
int nChannels, int nChannels,
shape2d poolingSize, shape2d poolingSize,
shape2d stride, shape2d stride,
shape2d padding, shape2d padding,
ActivationType activationType ActivationType activationType
) )
: inputSize(inputSize), : inputSize(inputSize),
@@ -18,44 +18,40 @@ AvgPooling2d::AvgPooling2d(
stride(stride), stride(stride),
padding(padding) { padding(padding) {
outputSize = { outputSize = {
(inputSize.first + 2 * padding.first - poolingSize.first) / stride.first + 1, (inputSize.first + 2 * padding.first - poolingSize.first) /
(inputSize.second + 2 * padding.second - poolingSize.second) / stride.second + 1 stride.first +
1,
(inputSize.second + 2 * padding.second - poolingSize.second) /
stride.second +
1
}; };
activation = new Activation( activation = new Activation(
activationType, outputSize.first * outputSize.second * nChannels activationType, outputSize.first * outputSize.second * nChannels
); );
d_output = nullptr; #ifdef USE_CUDA
CUDA_CHECK(cudaMalloc( initCUDA();
(void**)&d_output, #endif
sizeof(float) * outputSize.first * outputSize.second * nChannels
));
} }
AvgPooling2d::~AvgPooling2d() { AvgPooling2d::~AvgPooling2d() {
cudaFree(d_output); #ifdef USE_CUDA
delCUDA();
#endif
delete activation; delete activation;
} }
float* AvgPooling2d::forward(const float* d_input) { float* AvgPooling2d::forwardCPU(const float* input) {
dim3 block(8, 8, 8); throw std::logic_error("Not implemented");
dim3 grid( }
(outputSize.first + block.x - 1) / block.x,
(outputSize.second + block.y - 1) / block.y,
(nChannels + block.z - 1) / block.z
);
Kernels::avg_pooling<<<grid, block>>>( float* AvgPooling2d::forward(const float* input) {
d_input, d_output, inputSize, outputSize, nChannels, poolingSize, #ifdef USE_CUDA
stride, padding return forwardCUDA(input);
); #else
CUDA_CHECK(cudaGetLastError()); return forwardCPU(input);
#endif
activation->activate(d_output);
CUDA_CHECK(cudaDeviceSynchronize());
return d_output;
} }
int AvgPooling2d::getOutputSize() { int AvgPooling2d::getOutputSize() {
@@ -70,22 +66,36 @@ shape2d AvgPooling2d::getOutputDims() {
return outputSize; return outputSize;
} }
AdaptiveAvgPooling2d::AdaptiveAvgPooling2d(shape2d inputShape, int nChannels, shape2d outputShape, ActivationType activationType) AdaptiveAvgPooling2d::AdaptiveAvgPooling2d(
: AvgPooling2d(inputShape, nChannels, {1, 1}, {1, 1}, {0, 0}, activationType) { shape2d inputShape,
int nChannels,
stride = {inputShape.first / outputShape.first, inputShape.second / outputShape.second}; shape2d outputShape,
ActivationType activationType
)
: AvgPooling2d(
inputShape,
nChannels,
{1, 1},
{1, 1},
{0, 0},
activationType
) {
stride = {
inputShape.first / outputShape.first,
inputShape.second / outputShape.second
};
poolingSize = { poolingSize = {
inputShape.first - (outputShape.first - 1) * stride.first, inputShape.first - (outputShape.first - 1) * stride.first,
inputShape.second - (outputShape.second - 1) * stride.second inputShape.second - (outputShape.second - 1) * stride.second
}; };
padding = { padding = {(poolingSize.first - 1) / 2, (poolingSize.second - 1) / 2};
(poolingSize.first - 1) / 2,
(poolingSize.second - 1) / 2
};
outputSize = outputShape; outputSize = outputShape;
activation = new Activation(activationType, outputSize.first * outputSize.second * nChannels); activation = new Activation(
activationType, outputSize.first * outputSize.second * nChannels
);
cudaFree(d_output); #ifdef USE_CUDA
cudaMalloc((void**)&d_output, sizeof(float) * outputSize.first * outputSize.second * nChannels); initCUDA();
#endif
} }

View File

@@ -3,7 +3,7 @@
#include <vector> #include <vector>
#include "avg_pooling.cuh" #include "avg_pooling.hpp"
class AvgPoolingLayerTest : public ::testing::Test { class AvgPoolingLayerTest : public ::testing::Test {
protected: protected: