diff --git a/examples/alexnet/alexnet.cpp b/examples/alexnet/alexnet.cpp index 4a90abd..e28787e 100644 --- a/examples/alexnet/alexnet.cpp +++ b/examples/alexnet/alexnet.cpp @@ -32,7 +32,7 @@ readAndNormalizeImage(const std::string &imagePath, int width, int height) { } CUDANet::Model *createModel( - const dim2d inputSize, + const shape2d inputSize, const int inputChannels, const int outputSize ) { @@ -112,7 +112,7 @@ int main(int argc, const char *const argv[]) { std::string modelWeightsPath = argv[1]; std::string imagePath = argv[2]; - const dim2d inputSize = {227, 227}; + const shape2d inputSize = {227, 227}; const int inputChannels = 3; const int outputSize = 1000; diff --git a/examples/inception_v3/inception_v3.cpp b/examples/inception_v3/inception_v3.cpp index 8028709..243f18a 100644 --- a/examples/inception_v3/inception_v3.cpp +++ b/examples/inception_v3/inception_v3.cpp @@ -14,12 +14,12 @@ int main(int argc, const char *const argv[]) { class BasicConv2d : public CUDANet::Module { public: BasicConv2d( - const dim2d inputSize, + const shape2d inputSize, const int inputChannels, const int outputChannels, - const dim2d kernelSize, - const dim2d stride, - const dim2d padding, + const shape2d kernelSize, + const shape2d stride, + const shape2d padding, const std::string &prefix ) { // Create the convolution layer @@ -28,7 +28,7 @@ class BasicConv2d : public CUDANet::Module { padding, CUDANet::Layers::ActivationType::NONE ); - dim2d batchNormSize = conv->getOutputDims(); + shape2d batchNormSize = conv->getOutputDims(); batchNorm = new CUDANet::Layers::BatchNorm2d( batchNormSize, outputChannels, 1e-3f, @@ -44,7 +44,7 @@ class BasicConv2d : public CUDANet::Module { return batchNorm->forward(d_output); } - dim2d getOutputDims() { + shape2d getOutputDims() { return batchNorm->getOutputDims(); } @@ -56,7 +56,7 @@ class BasicConv2d : public CUDANet::Module { class InceptionA : public CUDANet::Module { public: InceptionA( - const dim2d inputSize, + const shape2d inputSize, const int inputChannels, const int poolFeatures, const std::string &prefix @@ -144,7 +144,7 @@ class InceptionA : public CUDANet::Module { } private: - dim2d inputSize; + shape2d inputSize; int inputChannels; int poolFeatures; @@ -168,7 +168,7 @@ class InceptionA : public CUDANet::Module { class InceptionB : public CUDANet::Module { public: InceptionB( - const dim2d inputSize, + const shape2d inputSize, const int inputChannels, const std::string &prefix ) @@ -227,7 +227,7 @@ class InceptionB : public CUDANet::Module { } private: - dim2d inputSize; + shape2d inputSize; int inputChannels; BasicConv2d *branch3x3; @@ -245,7 +245,7 @@ class InceptionB : public CUDANet::Module { class InceptionC : public CUDANet::Module { public: InceptionC( - const dim2d inputSize, + const shape2d inputSize, const int inputChannels, const int nChannels_7x7, const std::string &prefix @@ -338,7 +338,7 @@ class InceptionC : public CUDANet::Module { } private: - dim2d inputSize; + shape2d inputSize; int inputChannels; BasicConv2d *branch1x1; diff --git a/include/kernels/convolution.cuh b/include/kernels/convolution.cuh index 044b6dc..87ec79e 100644 --- a/include/kernels/convolution.cuh +++ b/include/kernels/convolution.cuh @@ -25,13 +25,13 @@ __global__ void convolution( const float* __restrict__ d_kernel, const float* __restrict__ d_bias, float* __restrict__ d_output, - const dim2d inputSize, + const shape2d inputSize, const int nChannels, - const dim2d paddingSize, - const dim2d kernelSize, - const dim2d stride, + const shape2d paddingSize, + const shape2d kernelSize, + const shape2d stride, const int nFilters, - const dim2d outputSize + const shape2d outputSize ); } // namespace CUDANet::Kernels diff --git a/include/kernels/pooling.cuh b/include/kernels/pooling.cuh index 7e5a0aa..79ab7f6 100644 --- a/include/kernels/pooling.cuh +++ b/include/kernels/pooling.cuh @@ -9,23 +9,23 @@ namespace CUDANet::Kernels { __global__ void max_pooling( const float* __restrict__ d_input, float* __restrict__ d_output, - const dim2d inputSize, - const dim2d outputSize, + const shape2d inputSize, + const shape2d outputSize, const int nChannels, - const dim2d poolingSize, - const dim2d stride, - const dim2d padding + const shape2d poolingSize, + const shape2d stride, + const shape2d padding ); __global__ void avg_pooling( const float* __restrict__ d_input, float* __restrict__ d_output, - const dim2d inputSize, - const dim2d outputSize, + const shape2d inputSize, + const shape2d outputSize, const int nChannels, - const dim2d poolingSize, - const dim2d stride, - const dim2d padding + const shape2d poolingSize, + const shape2d stride, + const shape2d padding ); } // namespace CUDANet::Kernels diff --git a/include/layers/avg_pooling.cuh b/include/layers/avg_pooling.cuh index 4fe68c4..7dce0fa 100644 --- a/include/layers/avg_pooling.cuh +++ b/include/layers/avg_pooling.cuh @@ -9,11 +9,11 @@ namespace CUDANet::Layers { class AvgPooling2d : public SequentialLayer, public TwoDLayer { public: AvgPooling2d( - dim2d inputSize, + shape2d inputSize, int nChannels, - dim2d poolingSize, - dim2d stride, - dim2d padding, + shape2d poolingSize, + shape2d stride, + shape2d padding, ActivationType activationType ); ~AvgPooling2d(); @@ -34,16 +34,16 @@ class AvgPooling2d : public SequentialLayer, public TwoDLayer { */ int getInputSize(); - dim2d getOutputDims(); + shape2d getOutputDims(); private: - dim2d inputSize; + shape2d inputSize; int nChannels; - dim2d poolingSize; - dim2d stride; - dim2d padding; + shape2d poolingSize; + shape2d stride; + shape2d padding; - dim2d outputSize; + shape2d outputSize; float* d_output; diff --git a/include/layers/batch_norm.cuh b/include/layers/batch_norm.cuh index 7da46f0..dad5111 100644 --- a/include/layers/batch_norm.cuh +++ b/include/layers/batch_norm.cuh @@ -10,7 +10,7 @@ namespace CUDANet::Layers { class BatchNorm2d : public WeightedLayer, public TwoDLayer { public: - BatchNorm2d(dim2d inputSize, int inputChannels, float epsilon, ActivationType activationType); + BatchNorm2d(shape2d inputSize, int inputChannels, float epsilon, ActivationType activationType); ~BatchNorm2d(); @@ -64,11 +64,11 @@ class BatchNorm2d : public WeightedLayer, public TwoDLayer { */ int getInputSize(); - dim2d getOutputDims(); + shape2d getOutputDims(); private: - dim2d inputSize; + shape2d inputSize; int inputChannels; int gridSize; diff --git a/include/layers/conv2d.cuh b/include/layers/conv2d.cuh index 9cb9978..f1420b8 100644 --- a/include/layers/conv2d.cuh +++ b/include/layers/conv2d.cuh @@ -28,12 +28,12 @@ class Conv2d : public WeightedLayer, public TwoDLayer { * 'SOFTMAX' or 'NONE') */ Conv2d( - dim2d inputSize, + shape2d inputSize, int inputChannels, - dim2d kernelSize, - dim2d stride, + shape2d kernelSize, + shape2d stride, int numFilters, - dim2d paddingSize, + shape2d paddingSize, ActivationType activationType ); @@ -98,24 +98,24 @@ class Conv2d : public WeightedLayer, public TwoDLayer { * * @return int */ - dim2d getPaddingSize() { + shape2d getPaddingSize() { return paddingSize; } - dim2d getOutputDims(); + shape2d getOutputDims(); private: // Inputs - dim2d inputSize; + shape2d inputSize; int inputChannels; // Outputs - dim2d outputSize; + shape2d outputSize; // Kernel - dim2d kernelSize; - dim2d stride; - dim2d paddingSize; + shape2d kernelSize; + shape2d stride; + shape2d paddingSize; int numFilters; // Kernels diff --git a/include/layers/layer.cuh b/include/layers/layer.cuh index 52b27af..08b6aa2 100644 --- a/include/layers/layer.cuh +++ b/include/layers/layer.cuh @@ -7,7 +7,7 @@ #define CUDANET_SAME_PADDING(inputSize, kernelSize, stride) \ ((stride - 1) * inputSize - stride + kernelSize) / 2; -typedef std::pair dim2d; +typedef std::pair shape2d; namespace CUDANet::Layers { @@ -15,7 +15,7 @@ namespace CUDANet::Layers { class TwoDLayer { public: - virtual dim2d getOutputDims() = 0; + virtual shape2d getOutputDims() = 0; }; diff --git a/include/layers/max_pooling.cuh b/include/layers/max_pooling.cuh index 7cef5f5..7f86515 100644 --- a/include/layers/max_pooling.cuh +++ b/include/layers/max_pooling.cuh @@ -9,11 +9,11 @@ namespace CUDANet::Layers { class MaxPooling2d : public SequentialLayer, public TwoDLayer { public: MaxPooling2d( - dim2d inputSize, + shape2d inputSize, int nChannels, - dim2d poolingSize, - dim2d stride, - dim2d padding, + shape2d poolingSize, + shape2d stride, + shape2d padding, ActivationType activationType ); ~MaxPooling2d(); @@ -34,16 +34,16 @@ class MaxPooling2d : public SequentialLayer, public TwoDLayer { */ int getInputSize(); - dim2d getOutputDims(); + shape2d getOutputDims(); private: - dim2d inputSize; + shape2d inputSize; int nChannels; - dim2d poolingSize; - dim2d stride; - dim2d padding; + shape2d poolingSize; + shape2d stride; + shape2d padding; - dim2d outputSize; + shape2d outputSize; float* d_output; diff --git a/include/model/model.hpp b/include/model/model.hpp index 9e1d393..815298d 100644 --- a/include/model/model.hpp +++ b/include/model/model.hpp @@ -26,7 +26,7 @@ struct TensorInfo { class Model { public: - Model(const dim2d inputSize, const int inputChannels, const int outputSize); + Model(const shape2d inputSize, const int inputChannels, const int outputSize); Model(const Model& other); ~Model(); @@ -43,7 +43,7 @@ class Model { Layers::Input* inputLayer; Layers::Output* outputLayer; - dim2d inputSize; + shape2d inputSize; int inputChannels; int outputSize; diff --git a/src/kernels/convolution.cu b/src/kernels/convolution.cu index 1fa4fb7..bae9729 100644 --- a/src/kernels/convolution.cu +++ b/src/kernels/convolution.cu @@ -9,13 +9,13 @@ __global__ void Kernels::convolution( const float* __restrict__ d_kernel, const float* __restrict__ d_bias, float* __restrict__ d_output, - const dim2d inputSize, + const shape2d inputSize, const int nChannels, - const dim2d paddingSize, - const dim2d kernelSize, - const dim2d stride, + const shape2d paddingSize, + const shape2d kernelSize, + const shape2d stride, const int nFilters, - const dim2d outputSize + const shape2d outputSize ) { int j = blockDim.x * blockIdx.x + threadIdx.x; int i = blockDim.y * blockIdx.y + threadIdx.y; diff --git a/src/kernels/pooling.cu b/src/kernels/pooling.cu index 5a27555..993cc0c 100644 --- a/src/kernels/pooling.cu +++ b/src/kernels/pooling.cu @@ -7,12 +7,12 @@ using namespace CUDANet; __global__ void Kernels::max_pooling( const float* __restrict__ d_input, float* __restrict__ d_output, - const dim2d inputSize, - const dim2d outputSize, + const shape2d inputSize, + const shape2d outputSize, const int nChannels, - const dim2d poolingSize, - const dim2d stride, - const dim2d padding + const shape2d poolingSize, + const shape2d stride, + const shape2d padding ) { int j = blockDim.x * blockIdx.x + threadIdx.x; int i = blockDim.y * blockIdx.y + threadIdx.y; @@ -48,12 +48,12 @@ __global__ void Kernels::max_pooling( __global__ void Kernels::avg_pooling( const float* __restrict__ d_input, float* __restrict__ d_output, - const dim2d inputSize, - const dim2d outputSize, + const shape2d inputSize, + const shape2d outputSize, const int nChannels, - const dim2d poolingSize, - const dim2d stride, - const dim2d padding + const shape2d poolingSize, + const shape2d stride, + const shape2d padding ) { int j = blockDim.x * blockIdx.x + threadIdx.x; int i = blockDim.y * blockIdx.y + threadIdx.y; diff --git a/src/layers/avg_pooling.cu b/src/layers/avg_pooling.cu index b6c19bd..f27b7c7 100644 --- a/src/layers/avg_pooling.cu +++ b/src/layers/avg_pooling.cu @@ -5,11 +5,11 @@ using namespace CUDANet::Layers; AvgPooling2d::AvgPooling2d( - dim2d inputSize, + shape2d inputSize, int nChannels, - dim2d poolingSize, - dim2d stride, - dim2d padding, + shape2d poolingSize, + shape2d stride, + shape2d padding, ActivationType activationType ) : inputSize(inputSize), @@ -66,6 +66,6 @@ int AvgPooling2d::getInputSize() { return inputSize.first * inputSize.second * nChannels; } -dim2d AvgPooling2d::getOutputDims() { +shape2d AvgPooling2d::getOutputDims() { return outputSize; } \ No newline at end of file diff --git a/src/layers/batch_norm.cu b/src/layers/batch_norm.cu index c311d6f..5e6c9b6 100644 --- a/src/layers/batch_norm.cu +++ b/src/layers/batch_norm.cu @@ -10,7 +10,7 @@ using namespace CUDANet::Layers; BatchNorm2d::BatchNorm2d( - dim2d inputSize, + shape2d inputSize, int inputChannels, float epsilon, ActivationType activationType @@ -128,7 +128,7 @@ int BatchNorm2d::getOutputSize() { return inputSize.first * inputSize.second * inputChannels; } -dim2d BatchNorm2d::getOutputDims() { +shape2d BatchNorm2d::getOutputDims() { return inputSize; } diff --git a/src/layers/conv2d.cu b/src/layers/conv2d.cu index e8069be..bd61555 100644 --- a/src/layers/conv2d.cu +++ b/src/layers/conv2d.cu @@ -12,12 +12,12 @@ using namespace CUDANet::Layers; Conv2d::Conv2d( - dim2d inputSize, + shape2d inputSize, int inputChannels, - dim2d kernelSize, - dim2d stride, + shape2d kernelSize, + shape2d stride, int numFilters, - dim2d paddingSize, + shape2d paddingSize, ActivationType activationType ) : inputSize(inputSize), @@ -139,6 +139,6 @@ int Conv2d::getInputSize() { return inputSize.first * inputSize.second * inputChannels; } -dim2d Conv2d::getOutputDims() { +shape2d Conv2d::getOutputDims() { return outputSize; } \ No newline at end of file diff --git a/src/layers/max_pooling.cu b/src/layers/max_pooling.cu index cd50b5f..9d11b71 100644 --- a/src/layers/max_pooling.cu +++ b/src/layers/max_pooling.cu @@ -5,11 +5,11 @@ using namespace CUDANet::Layers; MaxPooling2d::MaxPooling2d( - dim2d inputSize, + shape2d inputSize, int nChannels, - dim2d poolingSize, - dim2d stride, - dim2d padding, + shape2d poolingSize, + shape2d stride, + shape2d padding, ActivationType activationType ) : inputSize(inputSize), @@ -70,6 +70,6 @@ int MaxPooling2d::getInputSize() { return inputSize.first * inputSize.second * nChannels; } -dim2d MaxPooling2d::getOutputDims() { +shape2d MaxPooling2d::getOutputDims() { return outputSize; } \ No newline at end of file diff --git a/src/model/model.cpp b/src/model/model.cpp index f822646..9319e1d 100644 --- a/src/model/model.cpp +++ b/src/model/model.cpp @@ -11,7 +11,7 @@ using namespace CUDANet; -Model::Model(const dim2d inputSize, const int inputChannels, const int outputSize) +Model::Model(const shape2d inputSize, const int inputChannels, const int outputSize) : inputSize(inputSize), inputChannels(inputChannels), outputSize(outputSize), diff --git a/test/layers/test_avg_pooling.cu b/test/layers/test_avg_pooling.cu index 0d70d40..1c08077 100644 --- a/test/layers/test_avg_pooling.cu +++ b/test/layers/test_avg_pooling.cu @@ -7,11 +7,11 @@ class AvgPoolingLayerTest : public ::testing::Test { protected: - dim2d inputSize; + shape2d inputSize; int nChannels; - dim2d poolingSize; - dim2d stride; - dim2d padding; + shape2d poolingSize; + shape2d stride; + shape2d padding; std::vector input; std::vector expected; diff --git a/test/layers/test_batch_norm.cu b/test/layers/test_batch_norm.cu index ee7bb60..10acb0d 100644 --- a/test/layers/test_batch_norm.cu +++ b/test/layers/test_batch_norm.cu @@ -8,7 +8,7 @@ class BatchNormLayerTest : public ::testing::Test { protected: - dim2d inputSize; + shape2d inputSize; int nChannels; std::vector weights; std::vector biases; diff --git a/test/layers/test_conv2d.cu b/test/layers/test_conv2d.cu index 9c922fd..452c03e 100644 --- a/test/layers/test_conv2d.cu +++ b/test/layers/test_conv2d.cu @@ -7,12 +7,12 @@ class Conv2dTest : public ::testing::Test { protected: - dim2d inputSize; + shape2d inputSize; int inputChannels; - dim2d kernelSize; - dim2d stride; + shape2d kernelSize; + shape2d stride; int numFilters; - dim2d paddingSize; + shape2d paddingSize; CUDANet::Layers::ActivationType activationType; std::vector input; std::vector kernels; diff --git a/test/layers/test_max_pooling.cu b/test/layers/test_max_pooling.cu index a25e88c..6b37008 100644 --- a/test/layers/test_max_pooling.cu +++ b/test/layers/test_max_pooling.cu @@ -7,11 +7,11 @@ class MaxPoolingLayerTest : public ::testing::Test { protected: - dim2d inputSize; + shape2d inputSize; int nChannels; - dim2d poolingSize; - dim2d stride; - dim2d padding; + shape2d poolingSize; + shape2d stride; + shape2d padding; std::vector input; std::vector expected; diff --git a/test/model/test_model.cu b/test/model/test_model.cu index c9b397c..d5dca0d 100644 --- a/test/model/test_model.cu +++ b/test/model/test_model.cu @@ -10,21 +10,21 @@ class ModelTest : public ::testing::Test { CUDANet::Model *commonTestSetup( bool setWeights = true, - dim2d inputSize = {6, 6}, + shape2d inputSize = {6, 6}, int inputChannels = 2, int outputSize = 3, - dim2d kernelSize = {3, 3}, - dim2d stride = {1, 1}, + shape2d kernelSize = {3, 3}, + shape2d stride = {1, 1}, int numFilters = 2, - dim2d poolingSize = {2, 2}, - dim2d poolingStride = {2, 2} + shape2d poolingSize = {2, 2}, + shape2d poolingStride = {2, 2} ) { CUDANet::Model *model = new CUDANet::Model(inputSize, inputChannels, outputSize); - dim2d paddingSize = {0, 0}; + shape2d paddingSize = {0, 0}; // Conv2d CUDANet::Layers::Conv2d *conv2d = new CUDANet::Layers::Conv2d( @@ -38,7 +38,7 @@ class ModelTest : public ::testing::Test { model->addLayer("conv1", conv2d); // maxpool2d - dim2d poolingInput = { + shape2d poolingInput = { inputSize.first - kernelSize.first + 1, inputSize.second - kernelSize.second + 1 };