mirror of
https://github.com/lordmathis/CUDANet.git
synced 2025-12-22 14:24:22 +00:00
Migrate MaxPool2d layer to Tensors
This commit is contained in:
@@ -1,66 +0,0 @@
|
||||
#ifndef CUDANET_INPUT_LAYER_H
|
||||
#define CUDANET_INPUT_LAYER_H
|
||||
|
||||
#include "layer.hpp"
|
||||
|
||||
namespace CUDANet::Layers {
|
||||
|
||||
/**
|
||||
* @brief Input layer, just copies the input to the device
|
||||
*
|
||||
*/
|
||||
class Input : public Layer {
|
||||
public:
|
||||
/**
|
||||
* @brief Create a new Input layer
|
||||
*
|
||||
* @param inputSize Size of the input vector
|
||||
*/
|
||||
explicit Input(int inputSize);
|
||||
|
||||
/**
|
||||
* @brief Destroy the Input layer
|
||||
*
|
||||
*/
|
||||
~Input();
|
||||
|
||||
/**
|
||||
* @brief Forward pass of the input layer. Just copies the input to the
|
||||
* device
|
||||
*
|
||||
* @param input Host pointer to the input vector
|
||||
* @return Device pointer to the output vector
|
||||
*/
|
||||
float* forward(const float* input);
|
||||
|
||||
/**
|
||||
* @brief Get output size
|
||||
*
|
||||
* @return int output size
|
||||
*/
|
||||
int get_output_size();
|
||||
|
||||
/**
|
||||
* @brief Get input size
|
||||
*
|
||||
* @return int input size
|
||||
*/
|
||||
int getInputSize();
|
||||
|
||||
private:
|
||||
int inputSize;
|
||||
|
||||
float* forwardCPU(const float* input);
|
||||
|
||||
#ifdef USE_CUDA
|
||||
float* d_output;
|
||||
|
||||
float* forwardCUDA(const float* input);
|
||||
void initCUDA();
|
||||
void delCUDA();
|
||||
#endif
|
||||
};
|
||||
|
||||
} // namespace CUDANet::Layers
|
||||
|
||||
#endif // CUDANET_INPUT_LAYER_H
|
||||
51
include/layers/max_pool.hpp
Normal file
51
include/layers/max_pool.hpp
Normal file
@@ -0,0 +1,51 @@
|
||||
#pragma once
|
||||
|
||||
#include "layer.hpp"
|
||||
|
||||
namespace CUDANet::Layers {
|
||||
|
||||
class MaxPool2d : public Layer {
|
||||
public:
|
||||
MaxPool2d(
|
||||
CUDANet::Shape input_shape,
|
||||
CUDANet::Shape pooling_shape,
|
||||
CUDANet::Shape stride_shape,
|
||||
CUDANet::Shape padding_shape,
|
||||
CUDANet::Backend* backend
|
||||
);
|
||||
~MaxPool2d();
|
||||
|
||||
CUDANet::Tensor& forward(CUDANet::Tensor &input) override;
|
||||
|
||||
CUDANet::Shape input_shape() override;
|
||||
|
||||
CUDANet::Shape output_shape() override;
|
||||
|
||||
size_t input_size() override;
|
||||
|
||||
size_t output_size() override;
|
||||
|
||||
void set_weights(void *input) override;
|
||||
|
||||
CUDANet::Tensor& get_weights() override;
|
||||
|
||||
void set_biases(void *input) override;
|
||||
|
||||
CUDANet::Tensor& get_biases() override;
|
||||
|
||||
|
||||
|
||||
private:
|
||||
CUDANet::Shape in_shape;
|
||||
|
||||
CUDANet::Shape pooling_shape;
|
||||
CUDANet::Shape stride_shape;
|
||||
CUDANet::Shape padding_shape;
|
||||
|
||||
CUDANet::Shape out_shape;
|
||||
CUDANet::Tensor output;
|
||||
|
||||
CUDANet::Backend *backend;
|
||||
};
|
||||
|
||||
} // namespace CUDANet::Layers
|
||||
@@ -1,63 +0,0 @@
|
||||
#ifndef CUDANET_MAX_POOLING_H
|
||||
#define CUDANET_MAX_POOLING_H
|
||||
|
||||
#include "activation.hpp"
|
||||
#include "layer.hpp"
|
||||
|
||||
namespace CUDANet::Layers {
|
||||
|
||||
class MaxPooling2d : public Layer, public TwoDLayer {
|
||||
public:
|
||||
MaxPooling2d(
|
||||
shape2d inputSize,
|
||||
int nChannels,
|
||||
shape2d poolingSize,
|
||||
shape2d stride,
|
||||
shape2d padding,
|
||||
ActivationType activationType
|
||||
);
|
||||
~MaxPooling2d();
|
||||
|
||||
float* forward(const float* input);
|
||||
|
||||
/**
|
||||
* @brief Get output size
|
||||
*
|
||||
* @return int output size
|
||||
*/
|
||||
int get_output_size();
|
||||
|
||||
/**
|
||||
* @brief Get input size
|
||||
*
|
||||
* @return int input size
|
||||
*/
|
||||
int getInputSize();
|
||||
|
||||
shape2d getOutputDims();
|
||||
|
||||
private:
|
||||
shape2d inputSize;
|
||||
int nChannels;
|
||||
shape2d poolingSize;
|
||||
shape2d stride;
|
||||
shape2d padding;
|
||||
|
||||
shape2d outputSize;
|
||||
|
||||
Activation* activation;
|
||||
|
||||
float* forwardCPU(const float* input);
|
||||
|
||||
#ifdef USE_CUDA
|
||||
float* d_output;
|
||||
float* forwardCUDA(const float* d_input);
|
||||
|
||||
void initCUDA();
|
||||
void delCUDA();
|
||||
#endif
|
||||
};
|
||||
|
||||
} // namespace CUDANet::Layers
|
||||
|
||||
#endif // CUDANET_MAX_POOLING_H
|
||||
@@ -1,59 +0,0 @@
|
||||
#ifndef CUDANET_OUTPUT_LAYER_H
|
||||
#define CUDANET_OUTPUT_LAYER_H
|
||||
|
||||
#include "layer.hpp"
|
||||
|
||||
namespace CUDANet::Layers {
|
||||
|
||||
class Output : public Layer {
|
||||
public:
|
||||
/**
|
||||
* @brief Create a new Output layer
|
||||
*
|
||||
* @param inputSize Size of the input vector
|
||||
*/
|
||||
explicit Output(int inputSize);
|
||||
|
||||
/**
|
||||
* @brief Destroy the Output layer
|
||||
*
|
||||
*/
|
||||
~Output();
|
||||
|
||||
/**
|
||||
* @brief Forward pass of the output layer. Just copies the input from
|
||||
* device to host
|
||||
*
|
||||
* @param input Device pointer to the input vector
|
||||
* @return Host pointer to the output vector
|
||||
*/
|
||||
float* forward(const float* input);
|
||||
|
||||
/**
|
||||
* @brief Get output size
|
||||
*
|
||||
* @return int output size
|
||||
*/
|
||||
int get_output_size();
|
||||
|
||||
/**
|
||||
* @brief Get input size
|
||||
*
|
||||
* @return int input size
|
||||
*/
|
||||
int getInputSize();
|
||||
|
||||
private:
|
||||
int inputSize;
|
||||
float* h_output;
|
||||
|
||||
float* forwardCPU(const float* input);
|
||||
|
||||
#ifdef USE_CUDA
|
||||
float* forwardCUDA(const float* input);
|
||||
#endif
|
||||
};
|
||||
|
||||
} // namespace CUDANet::Layers
|
||||
|
||||
#endif // CUDANET_OUTPUT_LAYER_H
|
||||
Reference in New Issue
Block a user