WIP Migrate AvgPool2d

This commit is contained in:
2025-11-19 23:21:18 +01:00
parent e4d05931d4
commit 6685aa6629
5 changed files with 141 additions and 162 deletions

View File

@@ -12,7 +12,7 @@
namespace CUDANet { namespace CUDANet {
/** /**
* @brief Basic Sequential Layer * @brief Basic Layer
* *
*/ */
class Layer { class Layer {

View File

@@ -0,0 +1,55 @@
#pragma once
#include "layer.hpp"
namespace CUDANet::Layers {
class AvgPool2d : public Layer {
public:
AvgPool2d(
CUDANet::Shape input_shape,
CUDANet::Shape pool_shape,
CUDANet::Shape stride_shape,
CUDANet::Shape padding_shape,
CUDANet::Backend *backend
);
~AvgPool2d();
CUDANet::Tensor& forward(CUDANet::Tensor& input) override;
CUDANet::Shape input_shape() override;
CUDANet::Shape output_shape() override;
size_t input_size() override;
size_t output_size() override;
void set_weights(void* input) override;
CUDANet::Tensor& get_weights() override;
void set_biases(void* input) override;
CUDANet::Tensor& get_biases() override;
protected:
CUDANet::Shape in_shape;
CUDANet::Shape pool_shape;
CUDANet::Shape stride_shape;
CUDANet::Shape padding_shape;
CUDANet::Shape out_shape;
CUDANet::Tensor output;
CUDANet::Backend *backend;
};
class AdaptiveAvgPool2d : public AvgPool2d {
public:
AdaptiveAvgPool2d(CUDANet::Shape input_shape, CUDANet::Shape output_shape, CUDANet::Backend *backend);
};
} // namespace CUDANet::Layers

View File

@@ -1,78 +0,0 @@
#ifndef CUDANET_AVG_POOLING_H
#define CUDANET_AVG_POOLING_H
#include "activation.hpp"
#include "layer.hpp"
namespace CUDANet::Layers {
class AvgPooling2d : public Layer, public TwoDLayer {
public:
AvgPooling2d(
shape2d inputSize,
int nChannels,
shape2d poolingSize,
shape2d stride,
shape2d padding,
ActivationType activationType
);
~AvgPooling2d();
float* forward(const float* input);
/**
* @brief Get output size
*
* @return int output size
*/
int get_output_size();
/**
* @brief Get input size
*
* @return int input size
*/
int getInputSize();
shape2d getOutputDims();
protected:
shape2d inputSize;
int nChannels;
shape2d poolingSize;
shape2d stride;
shape2d padding;
shape2d outputSize;
Activation* activation;
float* forwardCPU(const float* input);
#ifdef USE_CUDA
float* d_output;
float* forwardCUDA(const float* d_input);
void initCUDA();
void delCUDA();
#endif
};
class AdaptiveAvgPooling2d : public AvgPooling2d {
public:
AdaptiveAvgPooling2d(
shape2d inputShape,
int nChannels,
shape2d outputShape,
ActivationType activationType
);
private:
#ifdef USE_CUDA
void initCUDA();
#endif
};
} // namespace CUDANet::Layers
#endif // CUDANET_AVG_POOLING_H

View File

@@ -1,101 +1,103 @@
#include <stdexcept> #include <stdexcept>
#include "avg_pooling.hpp" #include "avg_pool.hpp"
#include <format>
using namespace CUDANet::Layers; using namespace CUDANet::Layers;
AvgPooling2d::AvgPooling2d( AvgPool2d::AvgPool2d(
shape2d inputSize, CUDANet::Shape input_shape,
int nChannels, CUDANet::Shape pool_shape,
shape2d poolingSize, CUDANet::Shape stride_shape,
shape2d stride, CUDANet::Shape padding_shape,
shape2d padding, CUDANet::Backend* backend
ActivationType activationType
) )
: inputSize(inputSize), : in_shape(input_shape),
nChannels(nChannels), pool_shape(pool_shape),
poolingSize(poolingSize), stride_shape(stride_shape),
stride(stride), padding_shape(padding_shape),
padding(padding) { backend(backend) {
outputSize = { if (in_shape.size() != 3) {
(inputSize.first + 2 * padding.first - poolingSize.first) / throw std::runtime_error(
stride.first + std::format(
"Invalid input shape. Expected 3 dims, got {}", input_shape.size()
)
);
}
if (pool_shape.size() != 2) {
throw std::runtime_error(
std::format(
"Invalid pool shape. Expected 2 dims, got {}", pool_shape.size()
)
);
}
if (stride_shape.size() != 2) {
throw std::runtime_error(
std::format(
"Invalid stride shape. Expected 2 dims, got {}", stride_shape.size()
)
);
}
if (padding_shape.size() != 2) {
throw std::runtime_error(
std::format(
"Invalid padding shape. Expected 2 dims, got {}", padding_shape.size()
)
);
}
out_shape = {
(in_shape[0] + 2 * padding_shape[0] - pool_shape[0]) / stride_shape[0] +
1, 1,
(inputSize.second + 2 * padding.second - poolingSize.second) / (in_shape[1] + 2 * padding_shape[1] - pool_shape[1]) / stride_shape[1] +
stride.second + 1,
1 in_shape[2]
}; };
activation = new Activation( output = CUDANet::Tensor(
activationType, outputSize.first * outputSize.second * nChannels Shape{out_shape[0] * out_shape[1] * out_shape[2]},
CUDANet::DType::FLOAT32, backend
); );
#ifdef USE_CUDA
initCUDA();
#endif
} }
AvgPooling2d::~AvgPooling2d() { AvgPool2d::~AvgPool2d() {}
#ifdef USE_CUDA
delCUDA();
#endif
delete activation;
}
float* AvgPooling2d::forwardCPU(const float* input) { CUDANet::Tensor& AvgPool2d::forward(CUDANet::Tensor& input);
throw std::logic_error("Not implemented");
}
float* AvgPooling2d::forward(const float* input) { CUDANet::Shape AvgPool2d::input_shape();
#ifdef USE_CUDA
return forwardCUDA(input);
#else
return forwardCPU(input);
#endif
}
int AvgPooling2d::get_output_size() { CUDANet::Shape AvgPool2d::output_shape();
return outputSize.first * outputSize.second * nChannels;
}
int AvgPooling2d::getInputSize() { size_t AvgPool2d::input_size();
return inputSize.first * inputSize.second * nChannels;
}
shape2d AvgPooling2d::getOutputDims() { size_t AvgPool2d::output_size();
return outputSize;
}
AdaptiveAvgPooling2d::AdaptiveAvgPooling2d( void AvgPool2d::set_weights(void* input);
shape2d inputShape,
int nChannels, CUDANet::Tensor& AvgPool2d::get_weights();
shape2d outputShape,
ActivationType activationType void AvgPool2d::set_biases(void* input);
CUDANet::Tensor& AvgPool2d::get_biases();
AdaptiveAvgPool2d::AdaptiveAvgPool2d(
CUDANet::Shape input_shape,
CUDANet::Shape output_shape,
CUDANet::Backend *backend
) )
: AvgPooling2d( : AvgPool2d(input_shape, {1, 1}, {1, 1}, {0, 0}, backend) {
inputShape, stride_shape = {
nChannels, input_shape[0] / output_shape[0],
{1, 1}, input_shape[1] / output_shape[1]
{1, 1},
{0, 0},
activationType
) {
stride = {
inputShape.first / outputShape.first,
inputShape.second / outputShape.second
}; };
poolingSize = { pool_shape = {
inputShape.first - (outputShape.first - 1) * stride.first, input_shape[0] - (output_shape[0] - 1) * stride_shape[0],
inputShape.second - (outputShape.second - 1) * stride.second input_shape[1] - (output_shape[1] - 1) * stride_shape[1]
}; };
padding = {(poolingSize.first - 1) / 2, (poolingSize.second - 1) / 2}; padding_shape = {(pool_shape[0] - 1) / 2, (pool_shape[1] - 1) / 2};
outputSize = outputShape; out_shape = output_shape;
activation = new Activation(
activationType, outputSize.first * outputSize.second * nChannels
);
#ifdef USE_CUDA
initCUDA();
#endif
} }

View File

@@ -23,7 +23,7 @@ Conv2d::Conv2d(
if (in_shape.size() != 3) { if (in_shape.size() != 3) {
throw std::runtime_error( throw std::runtime_error(
std::format( std::format(
"Invalid input shape. Expected 3 dims, got {}", in_shape "Invalid input shape. Expected 3 dims, got {}", in_shape.size()
) )
); );
} }
@@ -31,7 +31,7 @@ Conv2d::Conv2d(
if (kernel_shape.size() != 3) { if (kernel_shape.size() != 3) {
throw std::runtime_error( throw std::runtime_error(
std::format( std::format(
"Invalid kernel shape. Expected 3 dims, got {}", kernel_shape "Invalid kernel shape. Expected 3 dims, got {}", kernel_shape.size()
) )
); );
} }
@@ -39,7 +39,7 @@ Conv2d::Conv2d(
if (stride_shape.size() != 2) { if (stride_shape.size() != 2) {
throw std::runtime_error( throw std::runtime_error(
std::format( std::format(
"Invalid stride shape. Expected 2 dims, got {}", stride_shape "Invalid stride shape. Expected 2 dims, got {}", stride_shape.size()
) )
); );
} }
@@ -47,7 +47,7 @@ Conv2d::Conv2d(
if (padding_shape.size() != 2) { if (padding_shape.size() != 2) {
throw std::runtime_error( throw std::runtime_error(
std::format( std::format(
"Invalid padding shape. Expected 2 dims, got {}", padding_shape "Invalid padding shape. Expected 2 dims, got {}", padding_shape.size()
) )
); );
} }