From 6f8b5f40817506f11d3d54dbeac64b67eaec49c5 Mon Sep 17 00:00:00 2001 From: LordMathis Date: Mon, 20 May 2024 13:05:48 +0200 Subject: [PATCH] Rename batchnorm --- examples/inception_v3/inception_v3.cpp | 2 +- include/layers/batch_norm.cuh | 10 +++++----- src/layers/batch_norm.cu | 24 ++++++++++++------------ test/layers/test_batch_norm.cu | 4 ++-- 4 files changed, 20 insertions(+), 20 deletions(-) diff --git a/examples/inception_v3/inception_v3.cpp b/examples/inception_v3/inception_v3.cpp index ba6cc4d..5c1715b 100644 --- a/examples/inception_v3/inception_v3.cpp +++ b/examples/inception_v3/inception_v3.cpp @@ -30,7 +30,7 @@ class BasicConv2d : public CUDANet::Module { int batchNormSize = conv->getOutputSize(); - CUDANet::Layers::BatchNorm *batchNorm = new CUDANet::Layers::BatchNorm( + CUDANet::Layers::BatchNorm2D *batchNorm = new CUDANet::Layers::BatchNorm2D( batchNormSize, outputChannels, 1e-3f, CUDANet::Layers::ActivationType::RELU ); diff --git a/include/layers/batch_norm.cuh b/include/layers/batch_norm.cuh index c24a946..91abb2e 100644 --- a/include/layers/batch_norm.cuh +++ b/include/layers/batch_norm.cuh @@ -8,11 +8,11 @@ namespace CUDANet::Layers { -class BatchNorm : public WeightedLayer { +class BatchNorm2D : public WeightedLayer { public: - BatchNorm(int inputSize, int inputChannels, float epsilon, ActivationType activationType); + BatchNorm2D(int inputSize, int inputChannels, float epsilon, ActivationType activationType); - ~BatchNorm(); + ~BatchNorm2D(); /** * @brief Compute the forward pass of the batchnorm layer @@ -105,13 +105,13 @@ class BatchNorm : public WeightedLayer { /** * @brief Initialize mean of the batchnorm layer with zeros - * + * */ void initializeMean(); /** * @brief Initialize sqrt of variance of the batchnorm layer with ones - * + * */ void initializeSqrtVar(); diff --git a/src/layers/batch_norm.cu b/src/layers/batch_norm.cu index 486a728..c96e6cf 100644 --- a/src/layers/batch_norm.cu +++ b/src/layers/batch_norm.cu @@ -9,7 +9,7 @@ using namespace CUDANet::Layers; -BatchNorm::BatchNorm( +BatchNorm2D::BatchNorm2D( int inputSize, int inputChannels, float epsilon, @@ -63,7 +63,7 @@ BatchNorm::BatchNorm( (inputSize * inputSize + BLOCK_SIZE - 1) / BLOCK_SIZE; } -BatchNorm::~BatchNorm() { +BatchNorm2D::~BatchNorm2D() { cudaFree(d_output); cudaFree(d_mean); cudaFree(d_mean_sub); @@ -74,33 +74,33 @@ BatchNorm::~BatchNorm() { cudaFree(d_epsilon); } -void BatchNorm::initializeWeights() { +void BatchNorm2D::initializeWeights() { std::fill(weights.begin(), weights.end(), 1.0f); } -void BatchNorm::initializeBiases() { +void BatchNorm2D::initializeBiases() { std::fill(biases.begin(), biases.end(), 0.0f); } -void BatchNorm::setWeights(const float *weights_input) { +void BatchNorm2D::setWeights(const float *weights_input) { std::copy(weights_input, weights_input + weights.size(), weights.begin()); toCuda(); } -std::vector BatchNorm::getWeights() { +std::vector BatchNorm2D::getWeights() { return weights; } -void BatchNorm::setBiases(const float *biases_input) { +void BatchNorm2D::setBiases(const float *biases_input) { std::copy(biases_input, biases_input + biases.size(), biases.begin()); toCuda(); } -std::vector BatchNorm::getBiases() { +std::vector BatchNorm2D::getBiases() { return biases; } -void BatchNorm::toCuda() { +void BatchNorm2D::toCuda() { CUDA_CHECK(cudaMemcpy( d_weights, weights.data(), sizeof(float) * inputChannels, cudaMemcpyHostToDevice @@ -111,15 +111,15 @@ void BatchNorm::toCuda() { )); } -int BatchNorm::getInputSize() { +int BatchNorm2D::getInputSize() { return inputSize * inputSize * inputChannels; } -int BatchNorm::getOutputSize() { +int BatchNorm2D::getOutputSize() { return inputSize * inputSize * inputChannels; } -float *BatchNorm::forward(const float *d_input) { +float *BatchNorm2D::forward(const float *d_input) { // Compute per-channel batch normalization for (int i = 0; i < inputChannels; i++) { diff --git a/test/layers/test_batch_norm.cu b/test/layers/test_batch_norm.cu index 46da0fa..c3fbb4c 100644 --- a/test/layers/test_batch_norm.cu +++ b/test/layers/test_batch_norm.cu @@ -12,7 +12,7 @@ TEST(BatchNormLayerTest, BatchNormSmallForwardTest) { cudaError_t cudaStatus; - CUDANet::Layers::BatchNorm batchNorm( + CUDANet::Layers::BatchNorm2D batchNorm( inputSize, nChannels, 1e-5f, CUDANet::Layers::ActivationType::NONE ); @@ -69,7 +69,7 @@ TEST(BatchNormLayerTest, BatchNormSmallForwardTest) { 0.9126f, 0.71485f, -0.08184f, -0.19131f }; - // std::cout << "BatchNorm: " << std::endl; + // std::cout << "BatchNorm2D: " << std::endl; for (int i = 0; i < output.size(); i++) { EXPECT_NEAR(output[i], expected[i], 1e-5); // std::cout << output[i] << " ";