Implement max pooling layer

This commit is contained in:
2024-03-19 22:04:58 +01:00
parent 364715ff70
commit a0fc1b00ae
9 changed files with 245 additions and 21 deletions

View File

@@ -25,16 +25,17 @@ class Conv2d : public WeightedLayer {
* @param stride Convolution stride
* @param numFilters Number of output filters
* @param padding Padding type ('SAME' or 'VALID')
* @param activationType Activation function type ('RELU', 'SIGMOID', 'SOFTMAX' or 'NONE')
* @param activationType Activation function type ('RELU', 'SIGMOID',
* 'SOFTMAX' or 'NONE')
*/
Conv2d(
int inputSize,
int inputChannels,
int kernelSize,
int stride,
int numFilters,
Layers::Padding padding,
Layers::ActivationType activationType
int inputSize,
int inputChannels,
int kernelSize,
int stride,
int numFilters,
Padding padding,
ActivationType activationType
);
/**
@@ -107,7 +108,7 @@ class Conv2d : public WeightedLayer {
float* d_biases;
// Kernels
Layers::Activation activation;
Activation activation;
/**
* @brief Initialize weights of the convolutional layer with zeros

View File

@@ -0,0 +1,42 @@
#ifndef CUDANET_MAX_POOLING_H
#define CUDANET_MAX_POOLING_H
#include <cuda_runtime.h>
#include "layer.cuh"
#include "activation.cuh"
namespace CUDANet::Layers {
class MaxPooling2D : public SequentialLayer {
public:
MaxPooling2D(
int inputSize,
int nChannels,
int poolingSize,
int stride,
Padding padding,
ActivationType activationType
);
~MaxPooling2D();
float* forward(const float* d_input);
private:
int inputSize;
int nChannels;
int poolingSize;
int stride;
int paddingSize;
int outputSize;
int gridSize;
float* d_output;
Activation activation;
};
} // namespace CUDANet::Layers
#endif // CUDANET_MAX_POOLING_H