mirror of
https://github.com/lordmathis/CUDANet.git
synced 2025-11-05 17:34:21 +00:00
Add getOutputDims to 2d layers
This commit is contained in:
@@ -6,7 +6,7 @@
|
||||
|
||||
namespace CUDANet::Layers {
|
||||
|
||||
class AvgPooling2d : public SequentialLayer {
|
||||
class AvgPooling2d : public SequentialLayer, public TwoDLayer {
|
||||
public:
|
||||
AvgPooling2d(
|
||||
dim2d inputSize,
|
||||
@@ -33,6 +33,8 @@ class AvgPooling2d : public SequentialLayer {
|
||||
*/
|
||||
int getInputSize();
|
||||
|
||||
dim2d getOutputDims();
|
||||
|
||||
private:
|
||||
dim2d inputSize;
|
||||
int nChannels;
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
|
||||
namespace CUDANet::Layers {
|
||||
|
||||
class BatchNorm2d : public WeightedLayer {
|
||||
class BatchNorm2d : public WeightedLayer, public TwoDLayer {
|
||||
public:
|
||||
BatchNorm2d(dim2d inputSize, int inputChannels, float epsilon, ActivationType activationType);
|
||||
|
||||
@@ -64,6 +64,8 @@ class BatchNorm2d : public WeightedLayer {
|
||||
*/
|
||||
int getInputSize();
|
||||
|
||||
dim2d getOutputDims();
|
||||
|
||||
private:
|
||||
|
||||
dim2d inputSize;
|
||||
|
||||
@@ -13,7 +13,7 @@ namespace CUDANet::Layers {
|
||||
* @brief 2D convolutional layer
|
||||
*
|
||||
*/
|
||||
class Conv2d : public WeightedLayer {
|
||||
class Conv2d : public WeightedLayer, public TwoDLayer {
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new Conv 2d layer
|
||||
@@ -102,6 +102,8 @@ class Conv2d : public WeightedLayer {
|
||||
return paddingSize;
|
||||
}
|
||||
|
||||
dim2d getOutputDims();
|
||||
|
||||
private:
|
||||
// Inputs
|
||||
dim2d inputSize;
|
||||
|
||||
@@ -11,6 +11,15 @@ typedef std::pair<int, int> dim2d;
|
||||
|
||||
namespace CUDANet::Layers {
|
||||
|
||||
|
||||
class TwoDLayer {
|
||||
|
||||
public:
|
||||
virtual dim2d getOutputDims() = 0;
|
||||
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* @brief Basic Sequential Layer
|
||||
*
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
namespace CUDANet::Layers {
|
||||
|
||||
class MaxPooling2d : public SequentialLayer {
|
||||
class MaxPooling2d : public SequentialLayer, public TwoDLayer {
|
||||
public:
|
||||
MaxPooling2d(
|
||||
dim2d inputSize,
|
||||
@@ -33,6 +33,8 @@ class MaxPooling2d : public SequentialLayer {
|
||||
*/
|
||||
int getInputSize();
|
||||
|
||||
dim2d getOutputDims();
|
||||
|
||||
private:
|
||||
dim2d inputSize;
|
||||
int nChannels;
|
||||
|
||||
@@ -61,4 +61,8 @@ int AvgPooling2d::getOutputSize() {
|
||||
|
||||
int AvgPooling2d::getInputSize() {
|
||||
return inputSize.first * inputSize.second * nChannels;
|
||||
}
|
||||
|
||||
dim2d AvgPooling2d::getOutputDims() {
|
||||
return outputSize;
|
||||
}
|
||||
@@ -128,6 +128,10 @@ int BatchNorm2d::getOutputSize() {
|
||||
return inputSize.first * inputSize.second * inputChannels;
|
||||
}
|
||||
|
||||
dim2d BatchNorm2d::getOutputDims() {
|
||||
return inputSize;
|
||||
}
|
||||
|
||||
float *BatchNorm2d::forward(const float *d_input) {
|
||||
// Compute per-channel batch normalization
|
||||
for (int i = 0; i < inputChannels; i++) {
|
||||
|
||||
@@ -137,4 +137,8 @@ int Conv2d::getOutputSize() {
|
||||
|
||||
int Conv2d::getInputSize() {
|
||||
return inputSize.first * inputSize.second * inputChannels;
|
||||
}
|
||||
|
||||
dim2d Conv2d::getOutputDims() {
|
||||
return outputSize;
|
||||
}
|
||||
@@ -59,4 +59,8 @@ int MaxPooling2d::getOutputSize() {
|
||||
|
||||
int MaxPooling2d::getInputSize() {
|
||||
return inputSize.first * inputSize.second * nChannels;
|
||||
}
|
||||
|
||||
dim2d MaxPooling2d::getOutputDims() {
|
||||
return outputSize;
|
||||
}
|
||||
Reference in New Issue
Block a user