mirror of
https://github.com/lordmathis/CUDANet.git
synced 2025-11-05 17:34:21 +00:00
Rename batchnorm
This commit is contained in:
@@ -30,7 +30,7 @@ class BasicConv2d : public CUDANet::Module {
|
||||
|
||||
int batchNormSize = conv->getOutputSize();
|
||||
|
||||
CUDANet::Layers::BatchNorm *batchNorm = new CUDANet::Layers::BatchNorm(
|
||||
CUDANet::Layers::BatchNorm2D *batchNorm = new CUDANet::Layers::BatchNorm2D(
|
||||
batchNormSize, outputChannels, 1e-3f,
|
||||
CUDANet::Layers::ActivationType::RELU
|
||||
);
|
||||
|
||||
@@ -8,11 +8,11 @@
|
||||
|
||||
namespace CUDANet::Layers {
|
||||
|
||||
class BatchNorm : public WeightedLayer {
|
||||
class BatchNorm2D : public WeightedLayer {
|
||||
public:
|
||||
BatchNorm(int inputSize, int inputChannels, float epsilon, ActivationType activationType);
|
||||
BatchNorm2D(int inputSize, int inputChannels, float epsilon, ActivationType activationType);
|
||||
|
||||
~BatchNorm();
|
||||
~BatchNorm2D();
|
||||
|
||||
/**
|
||||
* @brief Compute the forward pass of the batchnorm layer
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
|
||||
using namespace CUDANet::Layers;
|
||||
|
||||
BatchNorm::BatchNorm(
|
||||
BatchNorm2D::BatchNorm2D(
|
||||
int inputSize,
|
||||
int inputChannels,
|
||||
float epsilon,
|
||||
@@ -63,7 +63,7 @@ BatchNorm::BatchNorm(
|
||||
(inputSize * inputSize + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
||||
}
|
||||
|
||||
BatchNorm::~BatchNorm() {
|
||||
BatchNorm2D::~BatchNorm2D() {
|
||||
cudaFree(d_output);
|
||||
cudaFree(d_mean);
|
||||
cudaFree(d_mean_sub);
|
||||
@@ -74,33 +74,33 @@ BatchNorm::~BatchNorm() {
|
||||
cudaFree(d_epsilon);
|
||||
}
|
||||
|
||||
void BatchNorm::initializeWeights() {
|
||||
void BatchNorm2D::initializeWeights() {
|
||||
std::fill(weights.begin(), weights.end(), 1.0f);
|
||||
}
|
||||
|
||||
void BatchNorm::initializeBiases() {
|
||||
void BatchNorm2D::initializeBiases() {
|
||||
std::fill(biases.begin(), biases.end(), 0.0f);
|
||||
}
|
||||
|
||||
void BatchNorm::setWeights(const float *weights_input) {
|
||||
void BatchNorm2D::setWeights(const float *weights_input) {
|
||||
std::copy(weights_input, weights_input + weights.size(), weights.begin());
|
||||
toCuda();
|
||||
}
|
||||
|
||||
std::vector<float> BatchNorm::getWeights() {
|
||||
std::vector<float> BatchNorm2D::getWeights() {
|
||||
return weights;
|
||||
}
|
||||
|
||||
void BatchNorm::setBiases(const float *biases_input) {
|
||||
void BatchNorm2D::setBiases(const float *biases_input) {
|
||||
std::copy(biases_input, biases_input + biases.size(), biases.begin());
|
||||
toCuda();
|
||||
}
|
||||
|
||||
std::vector<float> BatchNorm::getBiases() {
|
||||
std::vector<float> BatchNorm2D::getBiases() {
|
||||
return biases;
|
||||
}
|
||||
|
||||
void BatchNorm::toCuda() {
|
||||
void BatchNorm2D::toCuda() {
|
||||
CUDA_CHECK(cudaMemcpy(
|
||||
d_weights, weights.data(), sizeof(float) * inputChannels,
|
||||
cudaMemcpyHostToDevice
|
||||
@@ -111,15 +111,15 @@ void BatchNorm::toCuda() {
|
||||
));
|
||||
}
|
||||
|
||||
int BatchNorm::getInputSize() {
|
||||
int BatchNorm2D::getInputSize() {
|
||||
return inputSize * inputSize * inputChannels;
|
||||
}
|
||||
|
||||
int BatchNorm::getOutputSize() {
|
||||
int BatchNorm2D::getOutputSize() {
|
||||
return inputSize * inputSize * inputChannels;
|
||||
}
|
||||
|
||||
float *BatchNorm::forward(const float *d_input) {
|
||||
float *BatchNorm2D::forward(const float *d_input) {
|
||||
|
||||
// Compute per-channel batch normalization
|
||||
for (int i = 0; i < inputChannels; i++) {
|
||||
|
||||
@@ -12,7 +12,7 @@ TEST(BatchNormLayerTest, BatchNormSmallForwardTest) {
|
||||
|
||||
cudaError_t cudaStatus;
|
||||
|
||||
CUDANet::Layers::BatchNorm batchNorm(
|
||||
CUDANet::Layers::BatchNorm2D batchNorm(
|
||||
inputSize, nChannels, 1e-5f, CUDANet::Layers::ActivationType::NONE
|
||||
);
|
||||
|
||||
@@ -69,7 +69,7 @@ TEST(BatchNormLayerTest, BatchNormSmallForwardTest) {
|
||||
0.9126f, 0.71485f, -0.08184f, -0.19131f
|
||||
};
|
||||
|
||||
// std::cout << "BatchNorm: " << std::endl;
|
||||
// std::cout << "BatchNorm2D: " << std::endl;
|
||||
for (int i = 0; i < output.size(); i++) {
|
||||
EXPECT_NEAR(output[i], expected[i], 1e-5);
|
||||
// std::cout << output[i] << " ";
|
||||
|
||||
Reference in New Issue
Block a user