mirror of
https://github.com/lordmathis/CUDANet.git
synced 2025-12-22 14:24:22 +00:00
WIP Migrate AvgPool2d
This commit is contained in:
@@ -12,7 +12,7 @@
|
||||
namespace CUDANet {
|
||||
|
||||
/**
|
||||
* @brief Basic Sequential Layer
|
||||
* @brief Basic Layer
|
||||
*
|
||||
*/
|
||||
class Layer {
|
||||
|
||||
55
include/layers/avg_pool.hpp
Normal file
55
include/layers/avg_pool.hpp
Normal file
@@ -0,0 +1,55 @@
|
||||
#pragma once
|
||||
|
||||
#include "layer.hpp"
|
||||
|
||||
namespace CUDANet::Layers {
|
||||
|
||||
class AvgPool2d : public Layer {
|
||||
public:
|
||||
AvgPool2d(
|
||||
CUDANet::Shape input_shape,
|
||||
CUDANet::Shape pool_shape,
|
||||
CUDANet::Shape stride_shape,
|
||||
CUDANet::Shape padding_shape,
|
||||
CUDANet::Backend *backend
|
||||
);
|
||||
|
||||
~AvgPool2d();
|
||||
|
||||
CUDANet::Tensor& forward(CUDANet::Tensor& input) override;
|
||||
|
||||
CUDANet::Shape input_shape() override;
|
||||
|
||||
CUDANet::Shape output_shape() override;
|
||||
|
||||
size_t input_size() override;
|
||||
|
||||
size_t output_size() override;
|
||||
|
||||
void set_weights(void* input) override;
|
||||
|
||||
CUDANet::Tensor& get_weights() override;
|
||||
|
||||
void set_biases(void* input) override;
|
||||
|
||||
CUDANet::Tensor& get_biases() override;
|
||||
|
||||
protected:
|
||||
CUDANet::Shape in_shape;
|
||||
|
||||
CUDANet::Shape pool_shape;
|
||||
CUDANet::Shape stride_shape;
|
||||
CUDANet::Shape padding_shape;
|
||||
|
||||
CUDANet::Shape out_shape;
|
||||
CUDANet::Tensor output;
|
||||
|
||||
CUDANet::Backend *backend;
|
||||
};
|
||||
|
||||
class AdaptiveAvgPool2d : public AvgPool2d {
|
||||
public:
|
||||
AdaptiveAvgPool2d(CUDANet::Shape input_shape, CUDANet::Shape output_shape, CUDANet::Backend *backend);
|
||||
};
|
||||
|
||||
} // namespace CUDANet::Layers
|
||||
@@ -1,78 +0,0 @@
|
||||
#ifndef CUDANET_AVG_POOLING_H
|
||||
#define CUDANET_AVG_POOLING_H
|
||||
|
||||
#include "activation.hpp"
|
||||
#include "layer.hpp"
|
||||
|
||||
namespace CUDANet::Layers {
|
||||
|
||||
class AvgPooling2d : public Layer, public TwoDLayer {
|
||||
public:
|
||||
AvgPooling2d(
|
||||
shape2d inputSize,
|
||||
int nChannels,
|
||||
shape2d poolingSize,
|
||||
shape2d stride,
|
||||
shape2d padding,
|
||||
ActivationType activationType
|
||||
);
|
||||
~AvgPooling2d();
|
||||
|
||||
float* forward(const float* input);
|
||||
|
||||
/**
|
||||
* @brief Get output size
|
||||
*
|
||||
* @return int output size
|
||||
*/
|
||||
int get_output_size();
|
||||
|
||||
/**
|
||||
* @brief Get input size
|
||||
*
|
||||
* @return int input size
|
||||
*/
|
||||
int getInputSize();
|
||||
|
||||
shape2d getOutputDims();
|
||||
|
||||
protected:
|
||||
shape2d inputSize;
|
||||
int nChannels;
|
||||
shape2d poolingSize;
|
||||
shape2d stride;
|
||||
shape2d padding;
|
||||
|
||||
shape2d outputSize;
|
||||
|
||||
Activation* activation;
|
||||
|
||||
float* forwardCPU(const float* input);
|
||||
|
||||
#ifdef USE_CUDA
|
||||
float* d_output;
|
||||
float* forwardCUDA(const float* d_input);
|
||||
|
||||
void initCUDA();
|
||||
void delCUDA();
|
||||
#endif
|
||||
};
|
||||
|
||||
class AdaptiveAvgPooling2d : public AvgPooling2d {
|
||||
public:
|
||||
AdaptiveAvgPooling2d(
|
||||
shape2d inputShape,
|
||||
int nChannels,
|
||||
shape2d outputShape,
|
||||
ActivationType activationType
|
||||
);
|
||||
|
||||
private:
|
||||
#ifdef USE_CUDA
|
||||
void initCUDA();
|
||||
#endif
|
||||
};
|
||||
|
||||
} // namespace CUDANet::Layers
|
||||
|
||||
#endif // CUDANET_AVG_POOLING_H
|
||||
@@ -1,101 +1,103 @@
|
||||
#include <stdexcept>
|
||||
|
||||
#include "avg_pooling.hpp"
|
||||
#include "avg_pool.hpp"
|
||||
#include <format>
|
||||
|
||||
using namespace CUDANet::Layers;
|
||||
|
||||
AvgPooling2d::AvgPooling2d(
|
||||
shape2d inputSize,
|
||||
int nChannels,
|
||||
shape2d poolingSize,
|
||||
shape2d stride,
|
||||
shape2d padding,
|
||||
ActivationType activationType
|
||||
AvgPool2d::AvgPool2d(
|
||||
CUDANet::Shape input_shape,
|
||||
CUDANet::Shape pool_shape,
|
||||
CUDANet::Shape stride_shape,
|
||||
CUDANet::Shape padding_shape,
|
||||
CUDANet::Backend* backend
|
||||
)
|
||||
: inputSize(inputSize),
|
||||
nChannels(nChannels),
|
||||
poolingSize(poolingSize),
|
||||
stride(stride),
|
||||
padding(padding) {
|
||||
outputSize = {
|
||||
(inputSize.first + 2 * padding.first - poolingSize.first) /
|
||||
stride.first +
|
||||
: in_shape(input_shape),
|
||||
pool_shape(pool_shape),
|
||||
stride_shape(stride_shape),
|
||||
padding_shape(padding_shape),
|
||||
backend(backend) {
|
||||
if (in_shape.size() != 3) {
|
||||
throw std::runtime_error(
|
||||
std::format(
|
||||
"Invalid input shape. Expected 3 dims, got {}", input_shape.size()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
if (pool_shape.size() != 2) {
|
||||
throw std::runtime_error(
|
||||
std::format(
|
||||
"Invalid pool shape. Expected 2 dims, got {}", pool_shape.size()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
if (stride_shape.size() != 2) {
|
||||
throw std::runtime_error(
|
||||
std::format(
|
||||
"Invalid stride shape. Expected 2 dims, got {}", stride_shape.size()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
if (padding_shape.size() != 2) {
|
||||
throw std::runtime_error(
|
||||
std::format(
|
||||
"Invalid padding shape. Expected 2 dims, got {}", padding_shape.size()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
out_shape = {
|
||||
(in_shape[0] + 2 * padding_shape[0] - pool_shape[0]) / stride_shape[0] +
|
||||
1,
|
||||
(inputSize.second + 2 * padding.second - poolingSize.second) /
|
||||
stride.second +
|
||||
1
|
||||
(in_shape[1] + 2 * padding_shape[1] - pool_shape[1]) / stride_shape[1] +
|
||||
1,
|
||||
in_shape[2]
|
||||
};
|
||||
|
||||
activation = new Activation(
|
||||
activationType, outputSize.first * outputSize.second * nChannels
|
||||
output = CUDANet::Tensor(
|
||||
Shape{out_shape[0] * out_shape[1] * out_shape[2]},
|
||||
CUDANet::DType::FLOAT32, backend
|
||||
);
|
||||
|
||||
#ifdef USE_CUDA
|
||||
initCUDA();
|
||||
#endif
|
||||
}
|
||||
|
||||
AvgPooling2d::~AvgPooling2d() {
|
||||
#ifdef USE_CUDA
|
||||
delCUDA();
|
||||
#endif
|
||||
delete activation;
|
||||
}
|
||||
AvgPool2d::~AvgPool2d() {}
|
||||
|
||||
float* AvgPooling2d::forwardCPU(const float* input) {
|
||||
throw std::logic_error("Not implemented");
|
||||
}
|
||||
CUDANet::Tensor& AvgPool2d::forward(CUDANet::Tensor& input);
|
||||
|
||||
float* AvgPooling2d::forward(const float* input) {
|
||||
#ifdef USE_CUDA
|
||||
return forwardCUDA(input);
|
||||
#else
|
||||
return forwardCPU(input);
|
||||
#endif
|
||||
}
|
||||
CUDANet::Shape AvgPool2d::input_shape();
|
||||
|
||||
int AvgPooling2d::get_output_size() {
|
||||
return outputSize.first * outputSize.second * nChannels;
|
||||
}
|
||||
CUDANet::Shape AvgPool2d::output_shape();
|
||||
|
||||
int AvgPooling2d::getInputSize() {
|
||||
return inputSize.first * inputSize.second * nChannels;
|
||||
}
|
||||
size_t AvgPool2d::input_size();
|
||||
|
||||
shape2d AvgPooling2d::getOutputDims() {
|
||||
return outputSize;
|
||||
}
|
||||
size_t AvgPool2d::output_size();
|
||||
|
||||
AdaptiveAvgPooling2d::AdaptiveAvgPooling2d(
|
||||
shape2d inputShape,
|
||||
int nChannels,
|
||||
shape2d outputShape,
|
||||
ActivationType activationType
|
||||
void AvgPool2d::set_weights(void* input);
|
||||
|
||||
CUDANet::Tensor& AvgPool2d::get_weights();
|
||||
|
||||
void AvgPool2d::set_biases(void* input);
|
||||
|
||||
CUDANet::Tensor& AvgPool2d::get_biases();
|
||||
|
||||
|
||||
AdaptiveAvgPool2d::AdaptiveAvgPool2d(
|
||||
CUDANet::Shape input_shape,
|
||||
CUDANet::Shape output_shape,
|
||||
CUDANet::Backend *backend
|
||||
)
|
||||
: AvgPooling2d(
|
||||
inputShape,
|
||||
nChannels,
|
||||
{1, 1},
|
||||
{1, 1},
|
||||
{0, 0},
|
||||
activationType
|
||||
) {
|
||||
stride = {
|
||||
inputShape.first / outputShape.first,
|
||||
inputShape.second / outputShape.second
|
||||
: AvgPool2d(input_shape, {1, 1}, {1, 1}, {0, 0}, backend) {
|
||||
stride_shape = {
|
||||
input_shape[0] / output_shape[0],
|
||||
input_shape[1] / output_shape[1]
|
||||
};
|
||||
poolingSize = {
|
||||
inputShape.first - (outputShape.first - 1) * stride.first,
|
||||
inputShape.second - (outputShape.second - 1) * stride.second
|
||||
pool_shape = {
|
||||
input_shape[0] - (output_shape[0] - 1) * stride_shape[0],
|
||||
input_shape[1] - (output_shape[1] - 1) * stride_shape[1]
|
||||
};
|
||||
padding = {(poolingSize.first - 1) / 2, (poolingSize.second - 1) / 2};
|
||||
outputSize = outputShape;
|
||||
|
||||
activation = new Activation(
|
||||
activationType, outputSize.first * outputSize.second * nChannels
|
||||
);
|
||||
|
||||
#ifdef USE_CUDA
|
||||
initCUDA();
|
||||
#endif
|
||||
padding_shape = {(pool_shape[0] - 1) / 2, (pool_shape[1] - 1) / 2};
|
||||
out_shape = output_shape;
|
||||
}
|
||||
@@ -23,7 +23,7 @@ Conv2d::Conv2d(
|
||||
if (in_shape.size() != 3) {
|
||||
throw std::runtime_error(
|
||||
std::format(
|
||||
"Invalid input shape. Expected 3 dims, got {}", in_shape
|
||||
"Invalid input shape. Expected 3 dims, got {}", in_shape.size()
|
||||
)
|
||||
);
|
||||
}
|
||||
@@ -31,7 +31,7 @@ Conv2d::Conv2d(
|
||||
if (kernel_shape.size() != 3) {
|
||||
throw std::runtime_error(
|
||||
std::format(
|
||||
"Invalid kernel shape. Expected 3 dims, got {}", kernel_shape
|
||||
"Invalid kernel shape. Expected 3 dims, got {}", kernel_shape.size()
|
||||
)
|
||||
);
|
||||
}
|
||||
@@ -39,7 +39,7 @@ Conv2d::Conv2d(
|
||||
if (stride_shape.size() != 2) {
|
||||
throw std::runtime_error(
|
||||
std::format(
|
||||
"Invalid stride shape. Expected 2 dims, got {}", stride_shape
|
||||
"Invalid stride shape. Expected 2 dims, got {}", stride_shape.size()
|
||||
)
|
||||
);
|
||||
}
|
||||
@@ -47,7 +47,7 @@ Conv2d::Conv2d(
|
||||
if (padding_shape.size() != 2) {
|
||||
throw std::runtime_error(
|
||||
std::format(
|
||||
"Invalid padding shape. Expected 2 dims, got {}", padding_shape
|
||||
"Invalid padding shape. Expected 2 dims, got {}", padding_shape.size()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user