Migrate Dense layer

This commit is contained in:
2024-09-08 13:36:53 +02:00
parent 0dca8348bd
commit 75475790ac
24 changed files with 147 additions and 106 deletions

View File

@@ -8,15 +8,15 @@
#include "pooling.cuh" #include "pooling.cuh"
// Layers // Layers
#include "activation.cuh" #include "activation.hpp"
#include "add.cuh" #include "add.cuh"
#include "avg_pooling.cuh" #include "avg_pooling.cuh"
#include "batch_norm.cuh" #include "batch_norm.cuh"
#include "concat.cuh" #include "concat.cuh"
#include "conv2d.cuh" #include "conv2d.cuh"
#include "dense.cuh" #include "dense.hpp"
#include "input.cuh" #include "input.cuh"
#include "layer.cuh" #include "layer.hpp"
#include "max_pooling.cuh" #include "max_pooling.cuh"
#include "output.cuh" #include "output.cuh"

View File

@@ -2,7 +2,7 @@
#define CUDANET_CONVOLUTION_H #define CUDANET_CONVOLUTION_H
#include <cuda_runtime.h> #include <cuda_runtime.h>
#include "layer.cuh" #include "layer.hpp"
namespace CUDANet::Kernels { namespace CUDANet::Kernels {

View File

@@ -2,7 +2,7 @@
#define CUDANET_POOLING_H #define CUDANET_POOLING_H
#include <cuda_runtime.h> #include <cuda_runtime.h>
#include "layer.cuh" #include "layer.hpp"
namespace CUDANet::Kernels { namespace CUDANet::Kernels {

View File

@@ -48,6 +48,8 @@ class Activation {
ActivationType activationType; ActivationType activationType;
int length; int length;
void activateCPU(float* input);
#ifdef USE_CUDA #ifdef USE_CUDA
int gridSize; int gridSize;
@@ -58,10 +60,7 @@ class Activation {
void initCUDA(); void initCUDA();
void delCUDA(); void delCUDA();
#else
void activateCPU(float* input);
#endif #endif
}; };

View File

@@ -1,8 +1,8 @@
#ifndef CUDANET_AVG_POOLING_H #ifndef CUDANET_AVG_POOLING_H
#define CUDANET_AVG_POOLING_H #define CUDANET_AVG_POOLING_H
#include "activation.cuh" #include "activation.hpp"
#include "layer.cuh" #include "layer.hpp"
namespace CUDANet::Layers { namespace CUDANet::Layers {

View File

@@ -3,8 +3,8 @@
#include <vector> #include <vector>
#include "activation.cuh" #include "activation.hpp"
#include "layer.cuh" #include "layer.hpp"
namespace CUDANet::Layers { namespace CUDANet::Layers {

View File

@@ -3,9 +3,9 @@
#include <vector> #include <vector>
#include "activation.cuh" #include "activation.hpp"
#include "convolution.cuh" #include "convolution.cuh"
#include "layer.cuh" #include "layer.hpp"
namespace CUDANet::Layers { namespace CUDANet::Layers {

View File

@@ -3,8 +3,8 @@
#include <vector> #include <vector>
#include "activation.cuh" #include "activation.hpp"
#include "layer.cuh" #include "layer.hpp"
namespace CUDANet::Layers { namespace CUDANet::Layers {
@@ -84,20 +84,11 @@ class Dense : public WeightedLayer {
int inputSize; int inputSize;
int outputSize; int outputSize;
float* d_output;
float* d_weights;
float* d_biases;
std::vector<float> weights; std::vector<float> weights;
std::vector<float> biases; std::vector<float> biases;
Layers::Activation* activation; Layers::Activation* activation;
// Precompute kernel launch parameters
int forwardGridSize;
int biasGridSize;
/** /**
* @brief Initialize the weights to zeros * @brief Initialize the weights to zeros
* *
@@ -110,11 +101,30 @@ class Dense : public WeightedLayer {
*/ */
void initializeBiases(); void initializeBiases();
float* forwardCPU(const float* input);
#ifdef USE_CUDA
float* d_output;
float* d_weights;
float* d_biases;
// Precompute kernel launch parameters
int forwardGridSize;
int biasGridSize;
/** /**
* @brief Copy the weights and biases to the device * @brief Copy the weights and biases to the device
* *
*/ */
void toCuda(); void toCuda();
void initCUDA();
void delCUDA();
float* forwardCUDA(const float* d_input);
#endif
}; };
} // namespace CUDANet::Layers } // namespace CUDANet::Layers

View File

@@ -1,7 +1,7 @@
#ifndef CUDANET_INPUT_LAYER_H #ifndef CUDANET_INPUT_LAYER_H
#define CUDANET_INPUT_LAYER_H #define CUDANET_INPUT_LAYER_H
#include "layer.cuh" #include "layer.hpp"
namespace CUDANet::Layers { namespace CUDANet::Layers {

View File

@@ -111,10 +111,12 @@ class WeightedLayer : public SequentialLayer {
*/ */
virtual void initializeBiases() = 0; virtual void initializeBiases() = 0;
#ifdef USE_CUDA
/** /**
* @brief Copy the weights and biases to the device * @brief Copy the weights and biases to the device
*/ */
virtual void toCuda() = 0; virtual void toCuda() = 0;
#endif
}; };
} // namespace CUDANet::Layers } // namespace CUDANet::Layers

View File

@@ -1,8 +1,8 @@
#ifndef CUDANET_MAX_POOLING_H #ifndef CUDANET_MAX_POOLING_H
#define CUDANET_MAX_POOLING_H #define CUDANET_MAX_POOLING_H
#include "activation.cuh" #include "activation.hpp"
#include "layer.cuh" #include "layer.hpp"
namespace CUDANet::Layers { namespace CUDANet::Layers {

View File

@@ -1,7 +1,7 @@
#ifndef CUDANET_OUTPUT_LAYER_H #ifndef CUDANET_OUTPUT_LAYER_H
#define CUDANET_OUTPUT_LAYER_H #define CUDANET_OUTPUT_LAYER_H
#include "layer.cuh" #include "layer.hpp"
namespace CUDANet::Layers { namespace CUDANet::Layers {

View File

@@ -6,7 +6,7 @@
#include <vector> #include <vector>
#include "input.cuh" #include "input.cuh"
#include "layer.cuh" #include "layer.hpp"
#include "module.hpp" #include "module.hpp"
#include "output.cuh" #include "output.cuh"

View File

@@ -5,7 +5,7 @@
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
#include "layer.cuh" #include "layer.hpp"
namespace CUDANet { namespace CUDANet {

View File

@@ -1,5 +1,5 @@
#include "cuda_helper.cuh" #include "cuda_helper.cuh"
#include "layer.cuh" #include "layer.hpp"
#include "pooling.cuh" #include "pooling.cuh"
using namespace CUDANet; using namespace CUDANet;

View File

@@ -6,26 +6,14 @@
#include <iostream> #include <iostream>
#include "vector.cuh" #include "vector.cuh"
#include "activation.cuh" #include "activation.hpp"
#include "cuda_helper.cuh" #include "cuda_helper.cuh"
#include "dense.cuh" #include "dense.hpp"
#include "matmul.cuh" #include "matmul.cuh"
using namespace CUDANet::Layers; using namespace CUDANet::Layers;
Dense::Dense( void Dense::initCUDA() {
int inputSize,
int outputSize,
ActivationType activationType
)
: inputSize(inputSize), outputSize(outputSize) {
// Allocate memory for weights and biases
weights.resize(outputSize * inputSize);
biases.resize(outputSize);
initializeWeights();
initializeBiases();
d_output = nullptr; d_output = nullptr;
CUDA_CHECK(cudaMalloc((void**)&d_output, sizeof(float) * outputSize)); CUDA_CHECK(cudaMalloc((void**)&d_output, sizeof(float) * outputSize));
@@ -44,27 +32,26 @@ Dense::Dense(
forwardGridSize = forwardGridSize =
(std::max(inputSize, outputSize) + BLOCK_SIZE - 1) / BLOCK_SIZE; (std::max(inputSize, outputSize) + BLOCK_SIZE - 1) / BLOCK_SIZE;
biasGridSize = (outputSize + BLOCK_SIZE - 1) / BLOCK_SIZE; biasGridSize = (outputSize + BLOCK_SIZE - 1) / BLOCK_SIZE;
activation = new Activation(activationType, outputSize);
} }
Dense::~Dense() { void Dense::delCUDA() {
cudaFree(d_output); cudaFree(d_output);
cudaFree(d_weights); cudaFree(d_weights);
cudaFree(d_biases); cudaFree(d_biases);
delete activation;
} }
void Dense::initializeWeights() { void Dense::toCuda() {
std::fill(weights.begin(), weights.end(), 0.0f); CUDA_CHECK(cudaMemcpy(
d_weights, weights.data(), sizeof(float) * inputSize * outputSize,
cudaMemcpyHostToDevice
));
CUDA_CHECK(cudaMemcpy(
d_biases, biases.data(), sizeof(float) * outputSize,
cudaMemcpyHostToDevice
));
} }
void Dense::initializeBiases() { float* Dense::forwardCUDA(const float* d_input) {
std::fill(biases.begin(), biases.end(), 0.0f);
}
float* Dense::forward(const float* d_input) {
Kernels::mat_vec_mul<<<forwardGridSize, BLOCK_SIZE>>>( Kernels::mat_vec_mul<<<forwardGridSize, BLOCK_SIZE>>>(
d_weights, d_input, d_output, inputSize, outputSize d_weights, d_input, d_output, inputSize, outputSize
); );
@@ -80,40 +67,3 @@ float* Dense::forward(const float* d_input) {
return d_output; return d_output;
} }
void Dense::toCuda() {
CUDA_CHECK(cudaMemcpy(
d_weights, weights.data(), sizeof(float) * inputSize * outputSize,
cudaMemcpyHostToDevice
));
CUDA_CHECK(cudaMemcpy(
d_biases, biases.data(), sizeof(float) * outputSize,
cudaMemcpyHostToDevice
));
}
void Dense::setWeights(const float* weights_input) {
std::copy(weights_input, weights_input + weights.size(), weights.begin());
toCuda();
}
std::vector<float> Dense::getWeights() {
return weights;
}
void Dense::setBiases(const float* biases_input) {
std::copy(biases_input, biases_input + biases.size(), biases.begin());
toCuda();
}
std::vector<float> Dense::getBiases() {
return biases;
}
int Dense::getOutputSize() {
return outputSize;
}
int Dense::getInputSize() {
return inputSize;
}

View File

@@ -1,9 +1,9 @@
#include <vector> #include <vector>
#include "activation.cuh" #include "activation.hpp"
#include "batch_norm.cuh" #include "batch_norm.cuh"
#include "cuda_helper.cuh" #include "cuda_helper.cuh"
#include "layer.cuh" #include "layer.hpp"
#include "matmul.cuh" #include "matmul.cuh"
#include "vector.cuh" #include "vector.cuh"

View File

@@ -1,11 +1,11 @@
#include <iostream> #include <iostream>
#include <vector> #include <vector>
#include "activation.cuh" #include "activation.hpp"
#include "conv2d.cuh" #include "conv2d.cuh"
#include "convolution.cuh" #include "convolution.cuh"
#include "cuda_helper.cuh" #include "cuda_helper.cuh"
#include "layer.cuh" #include "layer.hpp"
#include "matmul.cuh" #include "matmul.cuh"
#include "vector.cuh" #include "vector.cuh"

80
src/layers/dense.cpp Normal file
View File

@@ -0,0 +1,80 @@
#include "dense.hpp"
#include <stdexcept>
#include "activation.hpp"
using namespace CUDANet::Layers;
Dense::Dense(int inputSize, int outputSize, ActivationType activationType)
: inputSize(inputSize), outputSize(outputSize) {
// Allocate memory for weights and biases
weights.resize(outputSize * inputSize);
biases.resize(outputSize);
initializeWeights();
initializeBiases();
activation = new Activation(activationType, outputSize);
#ifdef USE_CUDA
initCUDA();
#endif
}
Dense::~Dense() {
delete activation;
#ifdef USE_CUDA
delCUDA();
#endif
}
void Dense::initializeWeights() {
std::fill(weights.begin(), weights.end(), 0.0f);
}
void Dense::initializeBiases() {
std::fill(biases.begin(), biases.end(), 0.0f);
}
float* Dense::forwardCPU(const float* input) {
throw std::logic_error("Not implemented");
}
float* Dense::forward(const float* input) {
#ifdef USE_CUDA
return forwardCUDA(input);
#else
return forwardCPU(input);
#endif
}
void Dense::setWeights(const float* weights_input) {
std::copy(weights_input, weights_input + weights.size(), weights.begin());
#ifdef USE_CUDA
toCuda();
#endif
}
std::vector<float> Dense::getWeights() {
return weights;
}
void Dense::setBiases(const float* biases_input) {
std::copy(biases_input, biases_input + biases.size(), biases.begin());
#ifdef USE_CUDA
toCuda();
#endif
}
std::vector<float> Dense::getBiases() {
return biases;
}
int Dense::getOutputSize() {
return outputSize;
}
int Dense::getInputSize() {
return inputSize;
}

View File

@@ -8,7 +8,7 @@
#include <vector> #include <vector>
#include "input.cuh" #include "input.cuh"
#include "layer.cuh" #include "layer.hpp"
#include "batch_norm.cuh" #include "batch_norm.cuh"
using namespace CUDANet; using namespace CUDANet;

View File

@@ -1,4 +1,4 @@
#include "activation.cuh" #include "activation.hpp"
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <cuda_runtime.h> #include <cuda_runtime.h>
#include <vector> #include <vector>

View File

@@ -3,7 +3,7 @@
#include <vector> #include <vector>
#include "activation.cuh" #include "activation.hpp"
#include "batch_norm.cuh" #include "batch_norm.cuh"
class BatchNormLayerTest : public ::testing::Test { class BatchNormLayerTest : public ::testing::Test {

View File

@@ -3,8 +3,8 @@
#include <iostream> #include <iostream>
#include "activation.cuh" #include "activation.hpp"
#include "dense.cuh" #include "dense.hpp"
class DenseLayerTest : public ::testing::Test { class DenseLayerTest : public ::testing::Test {
protected: protected:

View File

@@ -1,7 +1,7 @@
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include "conv2d.cuh" #include "conv2d.cuh"
#include "dense.cuh" #include "dense.hpp"
#include "max_pooling.cuh" #include "max_pooling.cuh"
#include "model.hpp" #include "model.hpp"