Migrate avg pooling

This commit is contained in:
2024-09-09 21:36:13 +02:00
parent 75475790ac
commit 76e5225001
5 changed files with 129 additions and 53 deletions

View File

@@ -0,0 +1,45 @@
#include "avg_pooling.hpp"
#include "cuda_helper.cuh"
#include "pooling.cuh"
using namespace CUDANet::Layers;
void AvgPooling2d::initCUDA() {
d_output = nullptr;
CUDA_CHECK(cudaMalloc(
(void**)&d_output,
sizeof(float) * outputSize.first * outputSize.second * nChannels
));
}
void AvgPooling2d::delCUDA() {
cudaFree(d_output);
}
float* AvgPooling2d::forwardCUDA(const float* d_input) {
dim3 block(8, 8, 8);
dim3 grid(
(outputSize.first + block.x - 1) / block.x,
(outputSize.second + block.y - 1) / block.y,
(nChannels + block.z - 1) / block.z
);
Kernels::avg_pooling<<<grid, block>>>(
d_input, d_output, inputSize, outputSize, nChannels, poolingSize,
stride, padding
);
CUDA_CHECK(cudaGetLastError());
activation->activate(d_output);
CUDA_CHECK(cudaDeviceSynchronize());
return d_output;
}
void AdaptiveAvgPooling2d::initCUDA() {
cudaFree(d_output);
cudaMalloc(
(void**)&d_output,
sizeof(float) * outputSize.first * outputSize.second * nChannels
);
}

View File

@@ -1,15 +1,15 @@
#include "avg_pooling.cuh"
#include "cuda_helper.cuh"
#include "pooling.cuh"
#include <stdexcept>
#include "avg_pooling.hpp"
using namespace CUDANet::Layers;
AvgPooling2d::AvgPooling2d(
shape2d inputSize,
shape2d inputSize,
int nChannels,
shape2d poolingSize,
shape2d stride,
shape2d padding,
shape2d poolingSize,
shape2d stride,
shape2d padding,
ActivationType activationType
)
: inputSize(inputSize),
@@ -18,44 +18,40 @@ AvgPooling2d::AvgPooling2d(
stride(stride),
padding(padding) {
outputSize = {
(inputSize.first + 2 * padding.first - poolingSize.first) / stride.first + 1,
(inputSize.second + 2 * padding.second - poolingSize.second) / stride.second + 1
(inputSize.first + 2 * padding.first - poolingSize.first) /
stride.first +
1,
(inputSize.second + 2 * padding.second - poolingSize.second) /
stride.second +
1
};
activation = new Activation(
activationType, outputSize.first * outputSize.second * nChannels
);
d_output = nullptr;
CUDA_CHECK(cudaMalloc(
(void**)&d_output,
sizeof(float) * outputSize.first * outputSize.second * nChannels
));
#ifdef USE_CUDA
initCUDA();
#endif
}
AvgPooling2d::~AvgPooling2d() {
cudaFree(d_output);
#ifdef USE_CUDA
delCUDA();
#endif
delete activation;
}
float* AvgPooling2d::forward(const float* d_input) {
dim3 block(8, 8, 8);
dim3 grid(
(outputSize.first + block.x - 1) / block.x,
(outputSize.second + block.y - 1) / block.y,
(nChannels + block.z - 1) / block.z
);
float* AvgPooling2d::forwardCPU(const float* input) {
throw std::logic_error("Not implemented");
}
Kernels::avg_pooling<<<grid, block>>>(
d_input, d_output, inputSize, outputSize, nChannels, poolingSize,
stride, padding
);
CUDA_CHECK(cudaGetLastError());
activation->activate(d_output);
CUDA_CHECK(cudaDeviceSynchronize());
return d_output;
float* AvgPooling2d::forward(const float* input) {
#ifdef USE_CUDA
return forwardCUDA(input);
#else
return forwardCPU(input);
#endif
}
int AvgPooling2d::getOutputSize() {
@@ -70,22 +66,36 @@ shape2d AvgPooling2d::getOutputDims() {
return outputSize;
}
AdaptiveAvgPooling2d::AdaptiveAvgPooling2d(shape2d inputShape, int nChannels, shape2d outputShape, ActivationType activationType)
: AvgPooling2d(inputShape, nChannels, {1, 1}, {1, 1}, {0, 0}, activationType) {
stride = {inputShape.first / outputShape.first, inputShape.second / outputShape.second};
AdaptiveAvgPooling2d::AdaptiveAvgPooling2d(
shape2d inputShape,
int nChannels,
shape2d outputShape,
ActivationType activationType
)
: AvgPooling2d(
inputShape,
nChannels,
{1, 1},
{1, 1},
{0, 0},
activationType
) {
stride = {
inputShape.first / outputShape.first,
inputShape.second / outputShape.second
};
poolingSize = {
inputShape.first - (outputShape.first - 1) * stride.first,
inputShape.second - (outputShape.second - 1) * stride.second
};
padding = {
(poolingSize.first - 1) / 2,
(poolingSize.second - 1) / 2
};
outputSize = outputShape;
padding = {(poolingSize.first - 1) / 2, (poolingSize.second - 1) / 2};
outputSize = outputShape;
activation = new Activation(activationType, outputSize.first * outputSize.second * nChannels);
activation = new Activation(
activationType, outputSize.first * outputSize.second * nChannels
);
cudaFree(d_output);
cudaMalloc((void**)&d_output, sizeof(float) * outputSize.first * outputSize.second * nChannels);
#ifdef USE_CUDA
initCUDA();
#endif
}