Rename batchnorm

This commit is contained in:
2024-05-20 13:05:48 +02:00
parent 94df063dcd
commit 6f8b5f4081
4 changed files with 20 additions and 20 deletions

View File

@@ -30,7 +30,7 @@ class BasicConv2d : public CUDANet::Module {
int batchNormSize = conv->getOutputSize(); int batchNormSize = conv->getOutputSize();
CUDANet::Layers::BatchNorm *batchNorm = new CUDANet::Layers::BatchNorm( CUDANet::Layers::BatchNorm2D *batchNorm = new CUDANet::Layers::BatchNorm2D(
batchNormSize, outputChannels, 1e-3f, batchNormSize, outputChannels, 1e-3f,
CUDANet::Layers::ActivationType::RELU CUDANet::Layers::ActivationType::RELU
); );

View File

@@ -8,11 +8,11 @@
namespace CUDANet::Layers { namespace CUDANet::Layers {
class BatchNorm : public WeightedLayer { class BatchNorm2D : public WeightedLayer {
public: public:
BatchNorm(int inputSize, int inputChannels, float epsilon, ActivationType activationType); BatchNorm2D(int inputSize, int inputChannels, float epsilon, ActivationType activationType);
~BatchNorm(); ~BatchNorm2D();
/** /**
* @brief Compute the forward pass of the batchnorm layer * @brief Compute the forward pass of the batchnorm layer
@@ -105,13 +105,13 @@ class BatchNorm : public WeightedLayer {
/** /**
* @brief Initialize mean of the batchnorm layer with zeros * @brief Initialize mean of the batchnorm layer with zeros
* *
*/ */
void initializeMean(); void initializeMean();
/** /**
* @brief Initialize sqrt of variance of the batchnorm layer with ones * @brief Initialize sqrt of variance of the batchnorm layer with ones
* *
*/ */
void initializeSqrtVar(); void initializeSqrtVar();

View File

@@ -9,7 +9,7 @@
using namespace CUDANet::Layers; using namespace CUDANet::Layers;
BatchNorm::BatchNorm( BatchNorm2D::BatchNorm2D(
int inputSize, int inputSize,
int inputChannels, int inputChannels,
float epsilon, float epsilon,
@@ -63,7 +63,7 @@ BatchNorm::BatchNorm(
(inputSize * inputSize + BLOCK_SIZE - 1) / BLOCK_SIZE; (inputSize * inputSize + BLOCK_SIZE - 1) / BLOCK_SIZE;
} }
BatchNorm::~BatchNorm() { BatchNorm2D::~BatchNorm2D() {
cudaFree(d_output); cudaFree(d_output);
cudaFree(d_mean); cudaFree(d_mean);
cudaFree(d_mean_sub); cudaFree(d_mean_sub);
@@ -74,33 +74,33 @@ BatchNorm::~BatchNorm() {
cudaFree(d_epsilon); cudaFree(d_epsilon);
} }
void BatchNorm::initializeWeights() { void BatchNorm2D::initializeWeights() {
std::fill(weights.begin(), weights.end(), 1.0f); std::fill(weights.begin(), weights.end(), 1.0f);
} }
void BatchNorm::initializeBiases() { void BatchNorm2D::initializeBiases() {
std::fill(biases.begin(), biases.end(), 0.0f); std::fill(biases.begin(), biases.end(), 0.0f);
} }
void BatchNorm::setWeights(const float *weights_input) { void BatchNorm2D::setWeights(const float *weights_input) {
std::copy(weights_input, weights_input + weights.size(), weights.begin()); std::copy(weights_input, weights_input + weights.size(), weights.begin());
toCuda(); toCuda();
} }
std::vector<float> BatchNorm::getWeights() { std::vector<float> BatchNorm2D::getWeights() {
return weights; return weights;
} }
void BatchNorm::setBiases(const float *biases_input) { void BatchNorm2D::setBiases(const float *biases_input) {
std::copy(biases_input, biases_input + biases.size(), biases.begin()); std::copy(biases_input, biases_input + biases.size(), biases.begin());
toCuda(); toCuda();
} }
std::vector<float> BatchNorm::getBiases() { std::vector<float> BatchNorm2D::getBiases() {
return biases; return biases;
} }
void BatchNorm::toCuda() { void BatchNorm2D::toCuda() {
CUDA_CHECK(cudaMemcpy( CUDA_CHECK(cudaMemcpy(
d_weights, weights.data(), sizeof(float) * inputChannels, d_weights, weights.data(), sizeof(float) * inputChannels,
cudaMemcpyHostToDevice cudaMemcpyHostToDevice
@@ -111,15 +111,15 @@ void BatchNorm::toCuda() {
)); ));
} }
int BatchNorm::getInputSize() { int BatchNorm2D::getInputSize() {
return inputSize * inputSize * inputChannels; return inputSize * inputSize * inputChannels;
} }
int BatchNorm::getOutputSize() { int BatchNorm2D::getOutputSize() {
return inputSize * inputSize * inputChannels; return inputSize * inputSize * inputChannels;
} }
float *BatchNorm::forward(const float *d_input) { float *BatchNorm2D::forward(const float *d_input) {
// Compute per-channel batch normalization // Compute per-channel batch normalization
for (int i = 0; i < inputChannels; i++) { for (int i = 0; i < inputChannels; i++) {

View File

@@ -12,7 +12,7 @@ TEST(BatchNormLayerTest, BatchNormSmallForwardTest) {
cudaError_t cudaStatus; cudaError_t cudaStatus;
CUDANet::Layers::BatchNorm batchNorm( CUDANet::Layers::BatchNorm2D batchNorm(
inputSize, nChannels, 1e-5f, CUDANet::Layers::ActivationType::NONE inputSize, nChannels, 1e-5f, CUDANet::Layers::ActivationType::NONE
); );
@@ -69,7 +69,7 @@ TEST(BatchNormLayerTest, BatchNormSmallForwardTest) {
0.9126f, 0.71485f, -0.08184f, -0.19131f 0.9126f, 0.71485f, -0.08184f, -0.19131f
}; };
// std::cout << "BatchNorm: " << std::endl; // std::cout << "BatchNorm2D: " << std::endl;
for (int i = 0; i < output.size(); i++) { for (int i = 0; i < output.size(); i++) {
EXPECT_NEAR(output[i], expected[i], 1e-5); EXPECT_NEAR(output[i], expected[i], 1e-5);
// std::cout << output[i] << " "; // std::cout << output[i] << " ";