Add support for non square matrices

This commit is contained in:
2024-05-20 15:20:43 +02:00
parent 6f8b5f4081
commit 74098b24e3
21 changed files with 314 additions and 299 deletions

View File

@@ -9,10 +9,10 @@ namespace CUDANet::Layers {
class AvgPooling2D : public SequentialLayer {
public:
AvgPooling2D(
int inputSize,
dim2d inputSize,
int nChannels,
int poolingSize,
int stride,
dim2d poolingSize,
dim2d stride,
ActivationType activationType
);
~AvgPooling2D();
@@ -28,18 +28,18 @@ class AvgPooling2D : public SequentialLayer {
/**
* @brief Get input size
*
*
* @return int input size
*/
int getInputSize();
private:
int inputSize;
int nChannels;
int poolingSize;
int stride;
dim2d inputSize;
int nChannels;
dim2d poolingSize;
dim2d stride;
int outputSize;
dim2d outputSize;
float* d_output;

View File

@@ -10,7 +10,7 @@ namespace CUDANet::Layers {
class BatchNorm2D : public WeightedLayer {
public:
BatchNorm2D(int inputSize, int inputChannels, float epsilon, ActivationType activationType);
BatchNorm2D(dim2d inputSize, int inputChannels, float epsilon, ActivationType activationType);
~BatchNorm2D();
@@ -66,7 +66,7 @@ class BatchNorm2D : public WeightedLayer {
private:
int inputSize;
dim2d inputSize;
int inputChannels;
int gridSize;

View File

@@ -28,12 +28,12 @@ class Conv2d : public WeightedLayer {
* 'SOFTMAX' or 'NONE')
*/
Conv2d(
int inputSize,
dim2d inputSize,
int inputChannels,
int kernelSize,
int stride,
dim2d kernelSize,
dim2d stride,
int numFilters,
int paddingSize,
dim2d paddingSize,
ActivationType activationType
);
@@ -98,23 +98,23 @@ class Conv2d : public WeightedLayer {
*
* @return int
*/
int getPaddingSize() {
dim2d getPaddingSize() {
return paddingSize;
}
private:
// Inputs
int inputSize;
int inputChannels;
dim2d inputSize;
int inputChannels;
// Outputs
int outputSize;
dim2d outputSize;
// Kernel
int kernelSize;
int stride;
int paddingSize;
int numFilters;
dim2d kernelSize;
dim2d stride;
dim2d paddingSize;
int numFilters;
// Kernels
std::vector<float> weights;

View File

@@ -81,8 +81,8 @@ class Dense : public WeightedLayer {
int getInputSize();
private:
unsigned int inputSize;
unsigned int outputSize;
int inputSize;
int outputSize;
float* d_output;
@@ -95,8 +95,8 @@ class Dense : public WeightedLayer {
Layers::Activation* activation;
// Precompute kernel launch parameters
unsigned int forwardGridSize;
unsigned int biasGridSize;
int forwardGridSize;
int biasGridSize;
/**
* @brief Initialize the weights to zeros

View File

@@ -7,6 +7,8 @@
#define CUDANET_SAME_PADDING(inputSize, kernelSize, stride) \
((stride - 1) * inputSize - stride + kernelSize) / 2;
typedef std::pair<int, int> dim2d;
namespace CUDANet::Layers {
/**

View File

@@ -9,10 +9,10 @@ namespace CUDANet::Layers {
class MaxPooling2D : public SequentialLayer {
public:
MaxPooling2D(
int inputSize,
dim2d inputSize,
int nChannels,
int poolingSize,
int stride,
dim2d poolingSize,
dim2d stride,
ActivationType activationType
);
~MaxPooling2D();
@@ -28,18 +28,18 @@ class MaxPooling2D : public SequentialLayer {
/**
* @brief Get input size
*
*
* @return int input size
*/
int getInputSize();
private:
int inputSize;
int nChannels;
int poolingSize;
int stride;
dim2d inputSize;
int nChannels;
dim2d poolingSize;
dim2d stride;
int outputSize;
dim2d outputSize;
float* d_output;