Migrate max pooling

This commit is contained in:
2024-09-09 21:48:36 +02:00
parent 8a5d556b7e
commit a0665fb05c
6 changed files with 71 additions and 33 deletions

View File

@@ -18,7 +18,7 @@
#include "dense.hpp" #include "dense.hpp"
#include "input.cuh" #include "input.cuh"
#include "layer.hpp" #include "layer.hpp"
#include "max_pooling.cuh" #include "max_pooling.hpp"
#include "output.cuh" #include "output.cuh"
// Models // Models

View File

@@ -18,7 +18,7 @@ class MaxPooling2d : public SequentialLayer, public TwoDLayer {
); );
~MaxPooling2d(); ~MaxPooling2d();
float* forward(const float* d_input); float* forward(const float* input);
/** /**
* @brief Get output size * @brief Get output size
@@ -45,9 +45,17 @@ class MaxPooling2d : public SequentialLayer, public TwoDLayer {
shape2d outputSize; shape2d outputSize;
float* d_output;
Activation* activation; Activation* activation;
float* forwardCPU(const float* input);
#ifdef USE_CUDA
float* d_output;
float* forwardCUDA(const float* d_input);
void initCUDA();
void delCUDA();
#endif
}; };
} // namespace CUDANet::Layers } // namespace CUDANet::Layers

View File

@@ -0,0 +1,38 @@
#include "cuda_helper.cuh"
#include "max_pooling.hpp"
#include "pooling.cuh"
using namespace CUDANet::Layers;
void MaxPooling2d::initCUDA() {
d_output = nullptr;
CUDA_CHECK(cudaMalloc(
(void**)&d_output,
sizeof(float) * outputSize.first * outputSize.second * nChannels
));
}
void MaxPooling2d::delCUDA() {
cudaFree(d_output);
}
float* MaxPooling2d::forwardCUDA(const float* d_input) {
dim3 block(8, 8, 8);
dim3 grid(
(outputSize.first + block.x - 1) / block.x,
(outputSize.second + block.y - 1) / block.y,
(nChannels + block.z - 1) / block.z
);
Kernels::max_pooling<<<grid, block>>>(
d_input, d_output, inputSize, outputSize, nChannels, poolingSize,
stride, padding
);
CUDA_CHECK(cudaGetLastError());
activation->activate(d_output);
CUDA_CHECK(cudaDeviceSynchronize());
return d_output;
}

View File

@@ -1,6 +1,5 @@
#include "cuda_helper.cuh" #include "max_pooling.hpp"
#include "max_pooling.cuh" #include <stdexcept>
#include "pooling.cuh"
using namespace CUDANet::Layers; using namespace CUDANet::Layers;
@@ -30,38 +29,31 @@ MaxPooling2d::MaxPooling2d(
activationType, outputSize.first * outputSize.second * nChannels activationType, outputSize.first * outputSize.second * nChannels
); );
d_output = nullptr; #ifdef USE_CUDA
CUDA_CHECK(cudaMalloc( initCUDA();
(void**)&d_output, #endif
sizeof(float) * outputSize.first * outputSize.second * nChannels
));
} }
MaxPooling2d::~MaxPooling2d() { MaxPooling2d::~MaxPooling2d() {
cudaFree(d_output); #ifdef USE_CUDA
delCUDA();
#endif
delete activation; delete activation;
} }
float* MaxPooling2d::forward(const float* d_input) { float* MaxPooling2d::forwardCPU(const float* input) {
dim3 block(8, 8, 8); throw std::logic_error("Not implemented");
dim3 grid(
(outputSize.first + block.x - 1) / block.x,
(outputSize.second + block.y - 1) / block.y,
(nChannels + block.z - 1) / block.z
);
Kernels::max_pooling<<<grid, block>>>(
d_input, d_output, inputSize, outputSize, nChannels, poolingSize,
stride, padding
);
CUDA_CHECK(cudaGetLastError());
activation->activate(d_output);
CUDA_CHECK(cudaDeviceSynchronize());
return d_output;
} }
float* MaxPooling2d::forward(const float* input) {
#ifdef USE_CUDA
return forwardCUDA(input);
#else
return forwardCPU(input);
#endif
}
int MaxPooling2d::getOutputSize() { int MaxPooling2d::getOutputSize() {
return outputSize.first * outputSize.second * nChannels; return outputSize.first * outputSize.second * nChannels;
} }

View File

@@ -3,7 +3,7 @@
#include <vector> #include <vector>
#include "max_pooling.cuh" #include "max_pooling.hpp"
class MaxPoolingLayerTest : public ::testing::Test { class MaxPoolingLayerTest : public ::testing::Test {
protected: protected:

View File

@@ -2,7 +2,7 @@
#include "conv2d.cuh" #include "conv2d.cuh"
#include "dense.hpp" #include "dense.hpp"
#include "max_pooling.cuh" #include "max_pooling.hpp"
#include "model.hpp" #include "model.hpp"
class ModelTest : public ::testing::Test { class ModelTest : public ::testing::Test {