mirror of
https://github.com/lordmathis/CUDANet.git
synced 2025-11-05 17:34:21 +00:00
Migrate conv2d layer
This commit is contained in:
@@ -12,7 +12,7 @@
|
|||||||
#include "activation.hpp"
|
#include "activation.hpp"
|
||||||
#include "add.hpp"
|
#include "add.hpp"
|
||||||
#include "avg_pooling.hpp"
|
#include "avg_pooling.hpp"
|
||||||
#include "batch_norm.cuh"
|
#include "batch_norm.hpp"
|
||||||
#include "concat.hpp"
|
#include "concat.hpp"
|
||||||
#include "conv2d.hpp"
|
#include "conv2d.hpp"
|
||||||
#include "dense.hpp"
|
#include "dense.hpp"
|
||||||
|
|||||||
@@ -10,7 +10,12 @@ namespace CUDANet::Layers {
|
|||||||
|
|
||||||
class BatchNorm2d : public WeightedLayer, public TwoDLayer {
|
class BatchNorm2d : public WeightedLayer, public TwoDLayer {
|
||||||
public:
|
public:
|
||||||
BatchNorm2d(shape2d inputSize, int inputChannels, float epsilon, ActivationType activationType);
|
BatchNorm2d(
|
||||||
|
shape2d inputSize,
|
||||||
|
int inputChannels,
|
||||||
|
float epsilon,
|
||||||
|
ActivationType activationType
|
||||||
|
);
|
||||||
|
|
||||||
~BatchNorm2d();
|
~BatchNorm2d();
|
||||||
|
|
||||||
@@ -93,12 +98,14 @@ class BatchNorm2d : public WeightedLayer, public TwoDLayer {
|
|||||||
shape2d getOutputDims();
|
shape2d getOutputDims();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
shape2d inputSize;
|
shape2d inputSize;
|
||||||
int inputChannels;
|
int inputChannels;
|
||||||
|
float epsilon;
|
||||||
|
|
||||||
int gridSize;
|
int gridSize;
|
||||||
|
|
||||||
|
#ifdef USE_CUDA
|
||||||
|
|
||||||
float* d_output;
|
float* d_output;
|
||||||
|
|
||||||
float* d_running_mean;
|
float* d_running_mean;
|
||||||
@@ -110,6 +117,19 @@ class BatchNorm2d : public WeightedLayer, public TwoDLayer {
|
|||||||
float* d_weights;
|
float* d_weights;
|
||||||
float* d_biases;
|
float* d_biases;
|
||||||
|
|
||||||
|
void initCUDA();
|
||||||
|
void delCUDA();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Copy weights and biases to the device
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
void toCuda();
|
||||||
|
|
||||||
|
float* forwardCUDA(const float* d_input);
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
std::vector<float> weights;
|
std::vector<float> weights;
|
||||||
std::vector<float> biases;
|
std::vector<float> biases;
|
||||||
|
|
||||||
@@ -118,6 +138,8 @@ class BatchNorm2d : public WeightedLayer, public TwoDLayer {
|
|||||||
|
|
||||||
Activation* activation;
|
Activation* activation;
|
||||||
|
|
||||||
|
float* forwardCPU(const float* input);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Initialize weights of the batchnorm layer with zeros
|
* @brief Initialize weights of the batchnorm layer with zeros
|
||||||
*
|
*
|
||||||
@@ -141,12 +163,6 @@ class BatchNorm2d : public WeightedLayer, public TwoDLayer {
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
void initializeRunningVar();
|
void initializeRunningVar();
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Copy weights and biases to the device
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
void toCuda();
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace CUDANet::Layers
|
} // namespace CUDANet::Layers
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "activation.hpp"
|
#include "activation.hpp"
|
||||||
#include "batch_norm.cuh"
|
#include "batch_norm.hpp"
|
||||||
#include "cuda_helper.cuh"
|
#include "cuda_helper.cuh"
|
||||||
#include "layer.hpp"
|
#include "layer.hpp"
|
||||||
#include "matmul.cuh"
|
#include "matmul.cuh"
|
||||||
@@ -9,17 +9,7 @@
|
|||||||
|
|
||||||
using namespace CUDANet::Layers;
|
using namespace CUDANet::Layers;
|
||||||
|
|
||||||
BatchNorm2d::BatchNorm2d(
|
void BatchNorm2d::initCUDA() {
|
||||||
shape2d inputSize,
|
|
||||||
int inputChannels,
|
|
||||||
float epsilon,
|
|
||||||
ActivationType activationType
|
|
||||||
)
|
|
||||||
: inputSize(inputSize), inputChannels(inputChannels) {
|
|
||||||
activation = new Activation(
|
|
||||||
activationType, inputSize.first * inputSize.second * inputChannels
|
|
||||||
);
|
|
||||||
|
|
||||||
d_output = nullptr;
|
d_output = nullptr;
|
||||||
CUDA_CHECK(cudaMalloc(
|
CUDA_CHECK(cudaMalloc(
|
||||||
(void **)&d_output,
|
(void **)&d_output,
|
||||||
@@ -27,14 +17,14 @@ BatchNorm2d::BatchNorm2d(
|
|||||||
));
|
));
|
||||||
|
|
||||||
d_running_mean = nullptr;
|
d_running_mean = nullptr;
|
||||||
CUDA_CHECK(cudaMalloc(
|
CUDA_CHECK(
|
||||||
(void **)&d_running_mean, sizeof(float) * inputChannels
|
cudaMalloc((void **)&d_running_mean, sizeof(float) * inputChannels)
|
||||||
));
|
);
|
||||||
|
|
||||||
d_running_var = nullptr;
|
d_running_var = nullptr;
|
||||||
CUDA_CHECK(cudaMalloc(
|
CUDA_CHECK(
|
||||||
(void **)&d_running_var, sizeof(float) * inputChannels
|
cudaMalloc((void **)&d_running_var, sizeof(float) * inputChannels)
|
||||||
));
|
);
|
||||||
|
|
||||||
d_weights = nullptr;
|
d_weights = nullptr;
|
||||||
CUDA_CHECK(cudaMalloc((void **)&d_weights, sizeof(float) * inputChannels));
|
CUDA_CHECK(cudaMalloc((void **)&d_weights, sizeof(float) * inputChannels));
|
||||||
@@ -55,24 +45,11 @@ BatchNorm2d::BatchNorm2d(
|
|||||||
cudaMemcpy(d_epsilon, &epsilon, sizeof(float), cudaMemcpyHostToDevice)
|
cudaMemcpy(d_epsilon, &epsilon, sizeof(float), cudaMemcpyHostToDevice)
|
||||||
);
|
);
|
||||||
|
|
||||||
weights.resize(inputChannels);
|
|
||||||
biases.resize(inputChannels);
|
|
||||||
|
|
||||||
running_mean.resize(inputChannels);
|
|
||||||
running_var.resize(inputChannels);
|
|
||||||
|
|
||||||
initializeWeights();
|
|
||||||
initializeBiases();
|
|
||||||
initializeRunningMean();
|
|
||||||
initializeRunningVar();
|
|
||||||
|
|
||||||
toCuda();
|
|
||||||
|
|
||||||
gridSize =
|
gridSize =
|
||||||
(inputSize.first * inputSize.second + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
(inputSize.first * inputSize.second + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
BatchNorm2d::~BatchNorm2d() {
|
void BatchNorm2d::delCUDA() {
|
||||||
cudaFree(d_output);
|
cudaFree(d_output);
|
||||||
cudaFree(d_running_mean);
|
cudaFree(d_running_mean);
|
||||||
cudaFree(d_running_var);
|
cudaFree(d_running_var);
|
||||||
@@ -82,58 +59,6 @@ BatchNorm2d::~BatchNorm2d() {
|
|||||||
cudaFree(d_epsilon);
|
cudaFree(d_epsilon);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BatchNorm2d::initializeWeights() {
|
|
||||||
std::fill(weights.begin(), weights.end(), 1.0f);
|
|
||||||
}
|
|
||||||
|
|
||||||
void BatchNorm2d::initializeBiases() {
|
|
||||||
std::fill(biases.begin(), biases.end(), 0.0f);
|
|
||||||
}
|
|
||||||
|
|
||||||
void BatchNorm2d::initializeRunningMean() {
|
|
||||||
std::fill(running_mean.begin(), running_mean.end(), 0.0f);
|
|
||||||
}
|
|
||||||
|
|
||||||
void BatchNorm2d::initializeRunningVar() {
|
|
||||||
std::fill(running_var.begin(), running_var.end(), 1.0f);
|
|
||||||
}
|
|
||||||
|
|
||||||
void BatchNorm2d::setWeights(const float *weights_input) {
|
|
||||||
std::copy(weights_input, weights_input + weights.size(), weights.begin());
|
|
||||||
toCuda();
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<float> BatchNorm2d::getWeights() {
|
|
||||||
return weights;
|
|
||||||
}
|
|
||||||
|
|
||||||
void BatchNorm2d::setBiases(const float *biases_input) {
|
|
||||||
std::copy(biases_input, biases_input + biases.size(), biases.begin());
|
|
||||||
toCuda();
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<float> BatchNorm2d::getBiases() {
|
|
||||||
return biases;
|
|
||||||
}
|
|
||||||
|
|
||||||
void BatchNorm2d::setRunningMean(const float* running_mean_input) {
|
|
||||||
std::copy(running_mean_input, running_mean_input + inputChannels, running_mean.begin());
|
|
||||||
toCuda();
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<float> BatchNorm2d::getRunningMean() {
|
|
||||||
return running_mean;
|
|
||||||
}
|
|
||||||
|
|
||||||
void BatchNorm2d::setRunningVar(const float* running_var_input) {
|
|
||||||
std::copy(running_var_input, running_var_input + inputChannels, running_var.begin());
|
|
||||||
toCuda();
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<float> BatchNorm2d::getRunningVar() {
|
|
||||||
return running_var;
|
|
||||||
}
|
|
||||||
|
|
||||||
void BatchNorm2d::toCuda() {
|
void BatchNorm2d::toCuda() {
|
||||||
CUDA_CHECK(cudaMemcpy(
|
CUDA_CHECK(cudaMemcpy(
|
||||||
d_weights, weights.data(), sizeof(float) * inputChannels,
|
d_weights, weights.data(), sizeof(float) * inputChannels,
|
||||||
@@ -153,22 +78,9 @@ void BatchNorm2d::toCuda() {
|
|||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
int BatchNorm2d::getInputSize() {
|
float *BatchNorm2d::forwardCUDA(const float *d_input) {
|
||||||
return inputSize.first * inputSize.second * inputChannels;
|
|
||||||
}
|
|
||||||
|
|
||||||
int BatchNorm2d::getOutputSize() {
|
|
||||||
return inputSize.first * inputSize.second * inputChannels;
|
|
||||||
}
|
|
||||||
|
|
||||||
shape2d BatchNorm2d::getOutputDims() {
|
|
||||||
return inputSize;
|
|
||||||
}
|
|
||||||
|
|
||||||
float *BatchNorm2d::forward(const float *d_input) {
|
|
||||||
// Compute per-channel batch normalization
|
// Compute per-channel batch normalization
|
||||||
for (int i = 0; i < inputChannels; i++) {
|
for (int i = 0; i < inputChannels; i++) {
|
||||||
|
|
||||||
// Subtract mean from input
|
// Subtract mean from input
|
||||||
Kernels::vec_scalar_sub<<<gridSize, BLOCK_SIZE>>>(
|
Kernels::vec_scalar_sub<<<gridSize, BLOCK_SIZE>>>(
|
||||||
d_input + i * inputSize.first * inputSize.second,
|
d_input + i * inputSize.first * inputSize.second,
|
||||||
@@ -181,17 +93,14 @@ float *BatchNorm2d::forward(const float *d_input) {
|
|||||||
Kernels::vec_scale<<<gridSize, BLOCK_SIZE>>>(
|
Kernels::vec_scale<<<gridSize, BLOCK_SIZE>>>(
|
||||||
d_output + i * inputSize.first * inputSize.second,
|
d_output + i * inputSize.first * inputSize.second,
|
||||||
d_output + i * inputSize.first * inputSize.second,
|
d_output + i * inputSize.first * inputSize.second,
|
||||||
&d_running_var[i],
|
&d_running_var[i], d_epsilon, inputSize.first * inputSize.second
|
||||||
d_epsilon,
|
|
||||||
inputSize.first * inputSize.second
|
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
|
|
||||||
// Multiply by weights
|
// Multiply by weights
|
||||||
Kernels::vec_scalar_mul<<<gridSize, BLOCK_SIZE>>>(
|
Kernels::vec_scalar_mul<<<gridSize, BLOCK_SIZE>>>(
|
||||||
d_output + i * inputSize.first * inputSize.second,
|
d_output + i * inputSize.first * inputSize.second,
|
||||||
d_output + i * inputSize.first * inputSize.second,
|
d_output + i * inputSize.first * inputSize.second, &d_weights[i],
|
||||||
&d_weights[i],
|
|
||||||
inputSize.first * inputSize.second
|
inputSize.first * inputSize.second
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
@@ -199,8 +108,7 @@ float *BatchNorm2d::forward(const float *d_input) {
|
|||||||
// Add biases
|
// Add biases
|
||||||
Kernels::vec_scalar_add<<<gridSize, BLOCK_SIZE>>>(
|
Kernels::vec_scalar_add<<<gridSize, BLOCK_SIZE>>>(
|
||||||
d_output + i * inputSize.first * inputSize.second,
|
d_output + i * inputSize.first * inputSize.second,
|
||||||
d_output + i * inputSize.first * inputSize.second,
|
d_output + i * inputSize.first * inputSize.second, &d_biases[i],
|
||||||
&d_biases[i],
|
|
||||||
inputSize.first * inputSize.second
|
inputSize.first * inputSize.second
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
133
src/layers/batch_norm.cpp
Normal file
133
src/layers/batch_norm.cpp
Normal file
@@ -0,0 +1,133 @@
|
|||||||
|
#include "batch_norm.hpp"
|
||||||
|
|
||||||
|
#include <stdexcept>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "activation.hpp"
|
||||||
|
#include "layer.hpp"
|
||||||
|
|
||||||
|
using namespace CUDANet::Layers;
|
||||||
|
|
||||||
|
BatchNorm2d::BatchNorm2d(
|
||||||
|
shape2d inputSize,
|
||||||
|
int inputChannels,
|
||||||
|
float epsilon,
|
||||||
|
ActivationType activationType
|
||||||
|
)
|
||||||
|
: inputSize(inputSize), inputChannels(inputChannels), epsilon(epsilon) {
|
||||||
|
activation = new Activation(
|
||||||
|
activationType, inputSize.first * inputSize.second * inputChannels
|
||||||
|
);
|
||||||
|
|
||||||
|
weights.resize(inputChannels);
|
||||||
|
biases.resize(inputChannels);
|
||||||
|
|
||||||
|
running_mean.resize(inputChannels);
|
||||||
|
running_var.resize(inputChannels);
|
||||||
|
|
||||||
|
initializeWeights();
|
||||||
|
initializeBiases();
|
||||||
|
initializeRunningMean();
|
||||||
|
initializeRunningVar();
|
||||||
|
|
||||||
|
#ifdef USE_CUDA
|
||||||
|
initCUDA();
|
||||||
|
toCuda();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
BatchNorm2d::~BatchNorm2d() {
|
||||||
|
#ifdef USE_CUDA
|
||||||
|
delCUDA();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
void BatchNorm2d::initializeWeights() {
|
||||||
|
std::fill(weights.begin(), weights.end(), 1.0f);
|
||||||
|
}
|
||||||
|
|
||||||
|
void BatchNorm2d::initializeBiases() {
|
||||||
|
std::fill(biases.begin(), biases.end(), 0.0f);
|
||||||
|
}
|
||||||
|
|
||||||
|
void BatchNorm2d::initializeRunningMean() {
|
||||||
|
std::fill(running_mean.begin(), running_mean.end(), 0.0f);
|
||||||
|
}
|
||||||
|
|
||||||
|
void BatchNorm2d::initializeRunningVar() {
|
||||||
|
std::fill(running_var.begin(), running_var.end(), 1.0f);
|
||||||
|
}
|
||||||
|
|
||||||
|
void BatchNorm2d::setWeights(const float* weights_input) {
|
||||||
|
std::copy(weights_input, weights_input + weights.size(), weights.begin());
|
||||||
|
#ifdef USE_CUDA
|
||||||
|
toCuda();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<float> BatchNorm2d::getWeights() {
|
||||||
|
return weights;
|
||||||
|
}
|
||||||
|
|
||||||
|
void BatchNorm2d::setBiases(const float* biases_input) {
|
||||||
|
std::copy(biases_input, biases_input + biases.size(), biases.begin());
|
||||||
|
#ifdef USE_CUDA
|
||||||
|
toCuda();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<float> BatchNorm2d::getBiases() {
|
||||||
|
return biases;
|
||||||
|
}
|
||||||
|
|
||||||
|
void BatchNorm2d::setRunningMean(const float* running_mean_input) {
|
||||||
|
std::copy(
|
||||||
|
running_mean_input, running_mean_input + inputChannels,
|
||||||
|
running_mean.begin()
|
||||||
|
);
|
||||||
|
#ifdef USE_CUDA
|
||||||
|
toCuda();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<float> BatchNorm2d::getRunningMean() {
|
||||||
|
return running_mean;
|
||||||
|
}
|
||||||
|
|
||||||
|
void BatchNorm2d::setRunningVar(const float* running_var_input) {
|
||||||
|
std::copy(
|
||||||
|
running_var_input, running_var_input + inputChannels,
|
||||||
|
running_var.begin()
|
||||||
|
);
|
||||||
|
#ifdef USE_CUDA
|
||||||
|
toCuda();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<float> BatchNorm2d::getRunningVar() {
|
||||||
|
return running_var;
|
||||||
|
}
|
||||||
|
|
||||||
|
int BatchNorm2d::getInputSize() {
|
||||||
|
return inputSize.first * inputSize.second * inputChannels;
|
||||||
|
}
|
||||||
|
|
||||||
|
int BatchNorm2d::getOutputSize() {
|
||||||
|
return inputSize.first * inputSize.second * inputChannels;
|
||||||
|
}
|
||||||
|
|
||||||
|
shape2d BatchNorm2d::getOutputDims() {
|
||||||
|
return inputSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
float* BatchNorm2d::forwardCPU(const float* input) {
|
||||||
|
throw std::logic_error("Not implemented");
|
||||||
|
}
|
||||||
|
|
||||||
|
float* BatchNorm2d::forward(const float* input) {
|
||||||
|
#ifdef USE_CUDA
|
||||||
|
return forwardCUDA(input);
|
||||||
|
#else
|
||||||
|
return forwardCPU(input);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
@@ -9,7 +9,7 @@
|
|||||||
|
|
||||||
#include "input.hpp"
|
#include "input.hpp"
|
||||||
#include "layer.hpp"
|
#include "layer.hpp"
|
||||||
#include "batch_norm.cuh"
|
#include "batch_norm.hpp"
|
||||||
|
|
||||||
using namespace CUDANet;
|
using namespace CUDANet;
|
||||||
|
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "activation.hpp"
|
#include "activation.hpp"
|
||||||
#include "batch_norm.cuh"
|
#include "batch_norm.hpp"
|
||||||
|
|
||||||
class BatchNormLayerTest : public ::testing::Test {
|
class BatchNormLayerTest : public ::testing::Test {
|
||||||
protected:
|
protected:
|
||||||
|
|||||||
Reference in New Issue
Block a user