mirror of
https://github.com/lordmathis/CUDANet.git
synced 2025-11-06 01:34:22 +00:00
Rename dim2d to shape2d
This commit is contained in:
@@ -32,7 +32,7 @@ readAndNormalizeImage(const std::string &imagePath, int width, int height) {
|
||||
}
|
||||
|
||||
CUDANet::Model *createModel(
|
||||
const dim2d inputSize,
|
||||
const shape2d inputSize,
|
||||
const int inputChannels,
|
||||
const int outputSize
|
||||
) {
|
||||
@@ -112,7 +112,7 @@ int main(int argc, const char *const argv[]) {
|
||||
std::string modelWeightsPath = argv[1];
|
||||
std::string imagePath = argv[2];
|
||||
|
||||
const dim2d inputSize = {227, 227};
|
||||
const shape2d inputSize = {227, 227};
|
||||
const int inputChannels = 3;
|
||||
const int outputSize = 1000;
|
||||
|
||||
|
||||
@@ -14,12 +14,12 @@ int main(int argc, const char *const argv[]) {
|
||||
class BasicConv2d : public CUDANet::Module {
|
||||
public:
|
||||
BasicConv2d(
|
||||
const dim2d inputSize,
|
||||
const shape2d inputSize,
|
||||
const int inputChannels,
|
||||
const int outputChannels,
|
||||
const dim2d kernelSize,
|
||||
const dim2d stride,
|
||||
const dim2d padding,
|
||||
const shape2d kernelSize,
|
||||
const shape2d stride,
|
||||
const shape2d padding,
|
||||
const std::string &prefix
|
||||
) {
|
||||
// Create the convolution layer
|
||||
@@ -28,7 +28,7 @@ class BasicConv2d : public CUDANet::Module {
|
||||
padding, CUDANet::Layers::ActivationType::NONE
|
||||
);
|
||||
|
||||
dim2d batchNormSize = conv->getOutputDims();
|
||||
shape2d batchNormSize = conv->getOutputDims();
|
||||
|
||||
batchNorm = new CUDANet::Layers::BatchNorm2d(
|
||||
batchNormSize, outputChannels, 1e-3f,
|
||||
@@ -44,7 +44,7 @@ class BasicConv2d : public CUDANet::Module {
|
||||
return batchNorm->forward(d_output);
|
||||
}
|
||||
|
||||
dim2d getOutputDims() {
|
||||
shape2d getOutputDims() {
|
||||
return batchNorm->getOutputDims();
|
||||
}
|
||||
|
||||
@@ -56,7 +56,7 @@ class BasicConv2d : public CUDANet::Module {
|
||||
class InceptionA : public CUDANet::Module {
|
||||
public:
|
||||
InceptionA(
|
||||
const dim2d inputSize,
|
||||
const shape2d inputSize,
|
||||
const int inputChannels,
|
||||
const int poolFeatures,
|
||||
const std::string &prefix
|
||||
@@ -144,7 +144,7 @@ class InceptionA : public CUDANet::Module {
|
||||
}
|
||||
|
||||
private:
|
||||
dim2d inputSize;
|
||||
shape2d inputSize;
|
||||
int inputChannels;
|
||||
int poolFeatures;
|
||||
|
||||
@@ -168,7 +168,7 @@ class InceptionA : public CUDANet::Module {
|
||||
class InceptionB : public CUDANet::Module {
|
||||
public:
|
||||
InceptionB(
|
||||
const dim2d inputSize,
|
||||
const shape2d inputSize,
|
||||
const int inputChannels,
|
||||
const std::string &prefix
|
||||
)
|
||||
@@ -227,7 +227,7 @@ class InceptionB : public CUDANet::Module {
|
||||
}
|
||||
|
||||
private:
|
||||
dim2d inputSize;
|
||||
shape2d inputSize;
|
||||
int inputChannels;
|
||||
|
||||
BasicConv2d *branch3x3;
|
||||
@@ -245,7 +245,7 @@ class InceptionB : public CUDANet::Module {
|
||||
class InceptionC : public CUDANet::Module {
|
||||
public:
|
||||
InceptionC(
|
||||
const dim2d inputSize,
|
||||
const shape2d inputSize,
|
||||
const int inputChannels,
|
||||
const int nChannels_7x7,
|
||||
const std::string &prefix
|
||||
@@ -338,7 +338,7 @@ class InceptionC : public CUDANet::Module {
|
||||
}
|
||||
|
||||
private:
|
||||
dim2d inputSize;
|
||||
shape2d inputSize;
|
||||
int inputChannels;
|
||||
|
||||
BasicConv2d *branch1x1;
|
||||
|
||||
@@ -25,13 +25,13 @@ __global__ void convolution(
|
||||
const float* __restrict__ d_kernel,
|
||||
const float* __restrict__ d_bias,
|
||||
float* __restrict__ d_output,
|
||||
const dim2d inputSize,
|
||||
const shape2d inputSize,
|
||||
const int nChannels,
|
||||
const dim2d paddingSize,
|
||||
const dim2d kernelSize,
|
||||
const dim2d stride,
|
||||
const shape2d paddingSize,
|
||||
const shape2d kernelSize,
|
||||
const shape2d stride,
|
||||
const int nFilters,
|
||||
const dim2d outputSize
|
||||
const shape2d outputSize
|
||||
);
|
||||
|
||||
} // namespace CUDANet::Kernels
|
||||
|
||||
@@ -9,23 +9,23 @@ namespace CUDANet::Kernels {
|
||||
__global__ void max_pooling(
|
||||
const float* __restrict__ d_input,
|
||||
float* __restrict__ d_output,
|
||||
const dim2d inputSize,
|
||||
const dim2d outputSize,
|
||||
const shape2d inputSize,
|
||||
const shape2d outputSize,
|
||||
const int nChannels,
|
||||
const dim2d poolingSize,
|
||||
const dim2d stride,
|
||||
const dim2d padding
|
||||
const shape2d poolingSize,
|
||||
const shape2d stride,
|
||||
const shape2d padding
|
||||
);
|
||||
|
||||
__global__ void avg_pooling(
|
||||
const float* __restrict__ d_input,
|
||||
float* __restrict__ d_output,
|
||||
const dim2d inputSize,
|
||||
const dim2d outputSize,
|
||||
const shape2d inputSize,
|
||||
const shape2d outputSize,
|
||||
const int nChannels,
|
||||
const dim2d poolingSize,
|
||||
const dim2d stride,
|
||||
const dim2d padding
|
||||
const shape2d poolingSize,
|
||||
const shape2d stride,
|
||||
const shape2d padding
|
||||
);
|
||||
|
||||
} // namespace CUDANet::Kernels
|
||||
|
||||
@@ -9,11 +9,11 @@ namespace CUDANet::Layers {
|
||||
class AvgPooling2d : public SequentialLayer, public TwoDLayer {
|
||||
public:
|
||||
AvgPooling2d(
|
||||
dim2d inputSize,
|
||||
shape2d inputSize,
|
||||
int nChannels,
|
||||
dim2d poolingSize,
|
||||
dim2d stride,
|
||||
dim2d padding,
|
||||
shape2d poolingSize,
|
||||
shape2d stride,
|
||||
shape2d padding,
|
||||
ActivationType activationType
|
||||
);
|
||||
~AvgPooling2d();
|
||||
@@ -34,16 +34,16 @@ class AvgPooling2d : public SequentialLayer, public TwoDLayer {
|
||||
*/
|
||||
int getInputSize();
|
||||
|
||||
dim2d getOutputDims();
|
||||
shape2d getOutputDims();
|
||||
|
||||
private:
|
||||
dim2d inputSize;
|
||||
shape2d inputSize;
|
||||
int nChannels;
|
||||
dim2d poolingSize;
|
||||
dim2d stride;
|
||||
dim2d padding;
|
||||
shape2d poolingSize;
|
||||
shape2d stride;
|
||||
shape2d padding;
|
||||
|
||||
dim2d outputSize;
|
||||
shape2d outputSize;
|
||||
|
||||
float* d_output;
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ namespace CUDANet::Layers {
|
||||
|
||||
class BatchNorm2d : public WeightedLayer, public TwoDLayer {
|
||||
public:
|
||||
BatchNorm2d(dim2d inputSize, int inputChannels, float epsilon, ActivationType activationType);
|
||||
BatchNorm2d(shape2d inputSize, int inputChannels, float epsilon, ActivationType activationType);
|
||||
|
||||
~BatchNorm2d();
|
||||
|
||||
@@ -64,11 +64,11 @@ class BatchNorm2d : public WeightedLayer, public TwoDLayer {
|
||||
*/
|
||||
int getInputSize();
|
||||
|
||||
dim2d getOutputDims();
|
||||
shape2d getOutputDims();
|
||||
|
||||
private:
|
||||
|
||||
dim2d inputSize;
|
||||
shape2d inputSize;
|
||||
int inputChannels;
|
||||
|
||||
int gridSize;
|
||||
|
||||
@@ -28,12 +28,12 @@ class Conv2d : public WeightedLayer, public TwoDLayer {
|
||||
* 'SOFTMAX' or 'NONE')
|
||||
*/
|
||||
Conv2d(
|
||||
dim2d inputSize,
|
||||
shape2d inputSize,
|
||||
int inputChannels,
|
||||
dim2d kernelSize,
|
||||
dim2d stride,
|
||||
shape2d kernelSize,
|
||||
shape2d stride,
|
||||
int numFilters,
|
||||
dim2d paddingSize,
|
||||
shape2d paddingSize,
|
||||
ActivationType activationType
|
||||
);
|
||||
|
||||
@@ -98,24 +98,24 @@ class Conv2d : public WeightedLayer, public TwoDLayer {
|
||||
*
|
||||
* @return int
|
||||
*/
|
||||
dim2d getPaddingSize() {
|
||||
shape2d getPaddingSize() {
|
||||
return paddingSize;
|
||||
}
|
||||
|
||||
dim2d getOutputDims();
|
||||
shape2d getOutputDims();
|
||||
|
||||
private:
|
||||
// Inputs
|
||||
dim2d inputSize;
|
||||
shape2d inputSize;
|
||||
int inputChannels;
|
||||
|
||||
// Outputs
|
||||
dim2d outputSize;
|
||||
shape2d outputSize;
|
||||
|
||||
// Kernel
|
||||
dim2d kernelSize;
|
||||
dim2d stride;
|
||||
dim2d paddingSize;
|
||||
shape2d kernelSize;
|
||||
shape2d stride;
|
||||
shape2d paddingSize;
|
||||
int numFilters;
|
||||
|
||||
// Kernels
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
#define CUDANET_SAME_PADDING(inputSize, kernelSize, stride) \
|
||||
((stride - 1) * inputSize - stride + kernelSize) / 2;
|
||||
|
||||
typedef std::pair<int, int> dim2d;
|
||||
typedef std::pair<int, int> shape2d;
|
||||
|
||||
namespace CUDANet::Layers {
|
||||
|
||||
@@ -15,7 +15,7 @@ namespace CUDANet::Layers {
|
||||
class TwoDLayer {
|
||||
|
||||
public:
|
||||
virtual dim2d getOutputDims() = 0;
|
||||
virtual shape2d getOutputDims() = 0;
|
||||
|
||||
};
|
||||
|
||||
|
||||
@@ -9,11 +9,11 @@ namespace CUDANet::Layers {
|
||||
class MaxPooling2d : public SequentialLayer, public TwoDLayer {
|
||||
public:
|
||||
MaxPooling2d(
|
||||
dim2d inputSize,
|
||||
shape2d inputSize,
|
||||
int nChannels,
|
||||
dim2d poolingSize,
|
||||
dim2d stride,
|
||||
dim2d padding,
|
||||
shape2d poolingSize,
|
||||
shape2d stride,
|
||||
shape2d padding,
|
||||
ActivationType activationType
|
||||
);
|
||||
~MaxPooling2d();
|
||||
@@ -34,16 +34,16 @@ class MaxPooling2d : public SequentialLayer, public TwoDLayer {
|
||||
*/
|
||||
int getInputSize();
|
||||
|
||||
dim2d getOutputDims();
|
||||
shape2d getOutputDims();
|
||||
|
||||
private:
|
||||
dim2d inputSize;
|
||||
shape2d inputSize;
|
||||
int nChannels;
|
||||
dim2d poolingSize;
|
||||
dim2d stride;
|
||||
dim2d padding;
|
||||
shape2d poolingSize;
|
||||
shape2d stride;
|
||||
shape2d padding;
|
||||
|
||||
dim2d outputSize;
|
||||
shape2d outputSize;
|
||||
|
||||
float* d_output;
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ struct TensorInfo {
|
||||
|
||||
class Model {
|
||||
public:
|
||||
Model(const dim2d inputSize, const int inputChannels, const int outputSize);
|
||||
Model(const shape2d inputSize, const int inputChannels, const int outputSize);
|
||||
Model(const Model& other);
|
||||
~Model();
|
||||
|
||||
@@ -43,7 +43,7 @@ class Model {
|
||||
Layers::Input* inputLayer;
|
||||
Layers::Output* outputLayer;
|
||||
|
||||
dim2d inputSize;
|
||||
shape2d inputSize;
|
||||
int inputChannels;
|
||||
|
||||
int outputSize;
|
||||
|
||||
@@ -9,13 +9,13 @@ __global__ void Kernels::convolution(
|
||||
const float* __restrict__ d_kernel,
|
||||
const float* __restrict__ d_bias,
|
||||
float* __restrict__ d_output,
|
||||
const dim2d inputSize,
|
||||
const shape2d inputSize,
|
||||
const int nChannels,
|
||||
const dim2d paddingSize,
|
||||
const dim2d kernelSize,
|
||||
const dim2d stride,
|
||||
const shape2d paddingSize,
|
||||
const shape2d kernelSize,
|
||||
const shape2d stride,
|
||||
const int nFilters,
|
||||
const dim2d outputSize
|
||||
const shape2d outputSize
|
||||
) {
|
||||
int j = blockDim.x * blockIdx.x + threadIdx.x;
|
||||
int i = blockDim.y * blockIdx.y + threadIdx.y;
|
||||
|
||||
@@ -7,12 +7,12 @@ using namespace CUDANet;
|
||||
__global__ void Kernels::max_pooling(
|
||||
const float* __restrict__ d_input,
|
||||
float* __restrict__ d_output,
|
||||
const dim2d inputSize,
|
||||
const dim2d outputSize,
|
||||
const shape2d inputSize,
|
||||
const shape2d outputSize,
|
||||
const int nChannels,
|
||||
const dim2d poolingSize,
|
||||
const dim2d stride,
|
||||
const dim2d padding
|
||||
const shape2d poolingSize,
|
||||
const shape2d stride,
|
||||
const shape2d padding
|
||||
) {
|
||||
int j = blockDim.x * blockIdx.x + threadIdx.x;
|
||||
int i = blockDim.y * blockIdx.y + threadIdx.y;
|
||||
@@ -48,12 +48,12 @@ __global__ void Kernels::max_pooling(
|
||||
__global__ void Kernels::avg_pooling(
|
||||
const float* __restrict__ d_input,
|
||||
float* __restrict__ d_output,
|
||||
const dim2d inputSize,
|
||||
const dim2d outputSize,
|
||||
const shape2d inputSize,
|
||||
const shape2d outputSize,
|
||||
const int nChannels,
|
||||
const dim2d poolingSize,
|
||||
const dim2d stride,
|
||||
const dim2d padding
|
||||
const shape2d poolingSize,
|
||||
const shape2d stride,
|
||||
const shape2d padding
|
||||
) {
|
||||
int j = blockDim.x * blockIdx.x + threadIdx.x;
|
||||
int i = blockDim.y * blockIdx.y + threadIdx.y;
|
||||
|
||||
@@ -5,11 +5,11 @@
|
||||
using namespace CUDANet::Layers;
|
||||
|
||||
AvgPooling2d::AvgPooling2d(
|
||||
dim2d inputSize,
|
||||
shape2d inputSize,
|
||||
int nChannels,
|
||||
dim2d poolingSize,
|
||||
dim2d stride,
|
||||
dim2d padding,
|
||||
shape2d poolingSize,
|
||||
shape2d stride,
|
||||
shape2d padding,
|
||||
ActivationType activationType
|
||||
)
|
||||
: inputSize(inputSize),
|
||||
@@ -66,6 +66,6 @@ int AvgPooling2d::getInputSize() {
|
||||
return inputSize.first * inputSize.second * nChannels;
|
||||
}
|
||||
|
||||
dim2d AvgPooling2d::getOutputDims() {
|
||||
shape2d AvgPooling2d::getOutputDims() {
|
||||
return outputSize;
|
||||
}
|
||||
@@ -10,7 +10,7 @@
|
||||
using namespace CUDANet::Layers;
|
||||
|
||||
BatchNorm2d::BatchNorm2d(
|
||||
dim2d inputSize,
|
||||
shape2d inputSize,
|
||||
int inputChannels,
|
||||
float epsilon,
|
||||
ActivationType activationType
|
||||
@@ -128,7 +128,7 @@ int BatchNorm2d::getOutputSize() {
|
||||
return inputSize.first * inputSize.second * inputChannels;
|
||||
}
|
||||
|
||||
dim2d BatchNorm2d::getOutputDims() {
|
||||
shape2d BatchNorm2d::getOutputDims() {
|
||||
return inputSize;
|
||||
}
|
||||
|
||||
|
||||
@@ -12,12 +12,12 @@
|
||||
using namespace CUDANet::Layers;
|
||||
|
||||
Conv2d::Conv2d(
|
||||
dim2d inputSize,
|
||||
shape2d inputSize,
|
||||
int inputChannels,
|
||||
dim2d kernelSize,
|
||||
dim2d stride,
|
||||
shape2d kernelSize,
|
||||
shape2d stride,
|
||||
int numFilters,
|
||||
dim2d paddingSize,
|
||||
shape2d paddingSize,
|
||||
ActivationType activationType
|
||||
)
|
||||
: inputSize(inputSize),
|
||||
@@ -139,6 +139,6 @@ int Conv2d::getInputSize() {
|
||||
return inputSize.first * inputSize.second * inputChannels;
|
||||
}
|
||||
|
||||
dim2d Conv2d::getOutputDims() {
|
||||
shape2d Conv2d::getOutputDims() {
|
||||
return outputSize;
|
||||
}
|
||||
@@ -5,11 +5,11 @@
|
||||
using namespace CUDANet::Layers;
|
||||
|
||||
MaxPooling2d::MaxPooling2d(
|
||||
dim2d inputSize,
|
||||
shape2d inputSize,
|
||||
int nChannels,
|
||||
dim2d poolingSize,
|
||||
dim2d stride,
|
||||
dim2d padding,
|
||||
shape2d poolingSize,
|
||||
shape2d stride,
|
||||
shape2d padding,
|
||||
ActivationType activationType
|
||||
)
|
||||
: inputSize(inputSize),
|
||||
@@ -70,6 +70,6 @@ int MaxPooling2d::getInputSize() {
|
||||
return inputSize.first * inputSize.second * nChannels;
|
||||
}
|
||||
|
||||
dim2d MaxPooling2d::getOutputDims() {
|
||||
shape2d MaxPooling2d::getOutputDims() {
|
||||
return outputSize;
|
||||
}
|
||||
@@ -11,7 +11,7 @@
|
||||
|
||||
using namespace CUDANet;
|
||||
|
||||
Model::Model(const dim2d inputSize, const int inputChannels, const int outputSize)
|
||||
Model::Model(const shape2d inputSize, const int inputChannels, const int outputSize)
|
||||
: inputSize(inputSize),
|
||||
inputChannels(inputChannels),
|
||||
outputSize(outputSize),
|
||||
|
||||
@@ -7,11 +7,11 @@
|
||||
|
||||
class AvgPoolingLayerTest : public ::testing::Test {
|
||||
protected:
|
||||
dim2d inputSize;
|
||||
shape2d inputSize;
|
||||
int nChannels;
|
||||
dim2d poolingSize;
|
||||
dim2d stride;
|
||||
dim2d padding;
|
||||
shape2d poolingSize;
|
||||
shape2d stride;
|
||||
shape2d padding;
|
||||
std::vector<float> input;
|
||||
std::vector<float> expected;
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
|
||||
class BatchNormLayerTest : public ::testing::Test {
|
||||
protected:
|
||||
dim2d inputSize;
|
||||
shape2d inputSize;
|
||||
int nChannels;
|
||||
std::vector<float> weights;
|
||||
std::vector<float> biases;
|
||||
|
||||
@@ -7,12 +7,12 @@
|
||||
|
||||
class Conv2dTest : public ::testing::Test {
|
||||
protected:
|
||||
dim2d inputSize;
|
||||
shape2d inputSize;
|
||||
int inputChannels;
|
||||
dim2d kernelSize;
|
||||
dim2d stride;
|
||||
shape2d kernelSize;
|
||||
shape2d stride;
|
||||
int numFilters;
|
||||
dim2d paddingSize;
|
||||
shape2d paddingSize;
|
||||
CUDANet::Layers::ActivationType activationType;
|
||||
std::vector<float> input;
|
||||
std::vector<float> kernels;
|
||||
|
||||
@@ -7,11 +7,11 @@
|
||||
|
||||
class MaxPoolingLayerTest : public ::testing::Test {
|
||||
protected:
|
||||
dim2d inputSize;
|
||||
shape2d inputSize;
|
||||
int nChannels;
|
||||
dim2d poolingSize;
|
||||
dim2d stride;
|
||||
dim2d padding;
|
||||
shape2d poolingSize;
|
||||
shape2d stride;
|
||||
shape2d padding;
|
||||
std::vector<float> input;
|
||||
std::vector<float> expected;
|
||||
|
||||
|
||||
@@ -10,21 +10,21 @@ class ModelTest : public ::testing::Test {
|
||||
CUDANet::Model *commonTestSetup(
|
||||
bool setWeights = true,
|
||||
|
||||
dim2d inputSize = {6, 6},
|
||||
shape2d inputSize = {6, 6},
|
||||
int inputChannels = 2,
|
||||
int outputSize = 3,
|
||||
|
||||
dim2d kernelSize = {3, 3},
|
||||
dim2d stride = {1, 1},
|
||||
shape2d kernelSize = {3, 3},
|
||||
shape2d stride = {1, 1},
|
||||
int numFilters = 2,
|
||||
|
||||
dim2d poolingSize = {2, 2},
|
||||
dim2d poolingStride = {2, 2}
|
||||
shape2d poolingSize = {2, 2},
|
||||
shape2d poolingStride = {2, 2}
|
||||
) {
|
||||
CUDANet::Model *model =
|
||||
new CUDANet::Model(inputSize, inputChannels, outputSize);
|
||||
|
||||
dim2d paddingSize = {0, 0};
|
||||
shape2d paddingSize = {0, 0};
|
||||
|
||||
// Conv2d
|
||||
CUDANet::Layers::Conv2d *conv2d = new CUDANet::Layers::Conv2d(
|
||||
@@ -38,7 +38,7 @@ class ModelTest : public ::testing::Test {
|
||||
model->addLayer("conv1", conv2d);
|
||||
|
||||
// maxpool2d
|
||||
dim2d poolingInput = {
|
||||
shape2d poolingInput = {
|
||||
inputSize.first - kernelSize.first + 1,
|
||||
inputSize.second - kernelSize.second + 1
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user