mirror of
https://github.com/lordmathis/CUDANet.git
synced 2025-11-06 01:34:22 +00:00
Implement max pooling layer
This commit is contained in:
30
include/kernels/pooling.cuh
Normal file
30
include/kernels/pooling.cuh
Normal file
@@ -0,0 +1,30 @@
|
||||
#ifndef CUDANET_POOLING_H
|
||||
#define CUDANET_POOLING_H
|
||||
|
||||
#include <cuda_runtime.h>
|
||||
|
||||
namespace CUDANet::Kernels {
|
||||
|
||||
__global__ void max_pooling(
|
||||
const float* __restrict__ d_input,
|
||||
float* __restrict__ d_output,
|
||||
const int inputSize,
|
||||
const int nChannels,
|
||||
const int poolingSize,
|
||||
const int stride,
|
||||
const int paddingSize
|
||||
);
|
||||
|
||||
__global__ void avg_pooling(
|
||||
const float* __restrict__ d_input,
|
||||
float* __restrict__ d_output,
|
||||
const int inputSize,
|
||||
const int nChannels,
|
||||
const int poolingSize,
|
||||
const int stride,
|
||||
const int paddingSize
|
||||
);
|
||||
|
||||
} // namespace CUDANet::Kernels
|
||||
|
||||
#endif // CUDANET_POOLING_H
|
||||
@@ -25,16 +25,17 @@ class Conv2d : public WeightedLayer {
|
||||
* @param stride Convolution stride
|
||||
* @param numFilters Number of output filters
|
||||
* @param padding Padding type ('SAME' or 'VALID')
|
||||
* @param activationType Activation function type ('RELU', 'SIGMOID', 'SOFTMAX' or 'NONE')
|
||||
* @param activationType Activation function type ('RELU', 'SIGMOID',
|
||||
* 'SOFTMAX' or 'NONE')
|
||||
*/
|
||||
Conv2d(
|
||||
int inputSize,
|
||||
int inputChannels,
|
||||
int kernelSize,
|
||||
int stride,
|
||||
int numFilters,
|
||||
Layers::Padding padding,
|
||||
Layers::ActivationType activationType
|
||||
int inputSize,
|
||||
int inputChannels,
|
||||
int kernelSize,
|
||||
int stride,
|
||||
int numFilters,
|
||||
Padding padding,
|
||||
ActivationType activationType
|
||||
);
|
||||
|
||||
/**
|
||||
@@ -107,7 +108,7 @@ class Conv2d : public WeightedLayer {
|
||||
float* d_biases;
|
||||
|
||||
// Kernels
|
||||
Layers::Activation activation;
|
||||
Activation activation;
|
||||
|
||||
/**
|
||||
* @brief Initialize weights of the convolutional layer with zeros
|
||||
|
||||
42
include/layers/max_pooling.cuh
Normal file
42
include/layers/max_pooling.cuh
Normal file
@@ -0,0 +1,42 @@
|
||||
#ifndef CUDANET_MAX_POOLING_H
|
||||
#define CUDANET_MAX_POOLING_H
|
||||
|
||||
#include <cuda_runtime.h>
|
||||
|
||||
#include "layer.cuh"
|
||||
#include "activation.cuh"
|
||||
|
||||
namespace CUDANet::Layers {
|
||||
|
||||
class MaxPooling2D : public SequentialLayer {
|
||||
public:
|
||||
MaxPooling2D(
|
||||
int inputSize,
|
||||
int nChannels,
|
||||
int poolingSize,
|
||||
int stride,
|
||||
Padding padding,
|
||||
ActivationType activationType
|
||||
);
|
||||
~MaxPooling2D();
|
||||
|
||||
float* forward(const float* d_input);
|
||||
|
||||
private:
|
||||
int inputSize;
|
||||
int nChannels;
|
||||
int poolingSize;
|
||||
int stride;
|
||||
int paddingSize;
|
||||
|
||||
int outputSize;
|
||||
int gridSize;
|
||||
|
||||
float* d_output;
|
||||
|
||||
Activation activation;
|
||||
};
|
||||
|
||||
} // namespace CUDANet::Layers
|
||||
|
||||
#endif // CUDANET_MAX_POOLING_H
|
||||
Reference in New Issue
Block a user