Migrate Dense layer

This commit is contained in:
2024-09-08 13:36:53 +02:00
parent 0dca8348bd
commit 75475790ac
24 changed files with 147 additions and 106 deletions

View File

@@ -1,5 +1,5 @@
#include "cuda_helper.cuh"
#include "layer.cuh"
#include "layer.hpp"
#include "pooling.cuh"
using namespace CUDANet;

View File

@@ -6,26 +6,14 @@
#include <iostream>
#include "vector.cuh"
#include "activation.cuh"
#include "activation.hpp"
#include "cuda_helper.cuh"
#include "dense.cuh"
#include "dense.hpp"
#include "matmul.cuh"
using namespace CUDANet::Layers;
Dense::Dense(
int inputSize,
int outputSize,
ActivationType activationType
)
: inputSize(inputSize), outputSize(outputSize) {
// Allocate memory for weights and biases
weights.resize(outputSize * inputSize);
biases.resize(outputSize);
initializeWeights();
initializeBiases();
void Dense::initCUDA() {
d_output = nullptr;
CUDA_CHECK(cudaMalloc((void**)&d_output, sizeof(float) * outputSize));
@@ -44,27 +32,26 @@ Dense::Dense(
forwardGridSize =
(std::max(inputSize, outputSize) + BLOCK_SIZE - 1) / BLOCK_SIZE;
biasGridSize = (outputSize + BLOCK_SIZE - 1) / BLOCK_SIZE;
activation = new Activation(activationType, outputSize);
}
Dense::~Dense() {
void Dense::delCUDA() {
cudaFree(d_output);
cudaFree(d_weights);
cudaFree(d_biases);
delete activation;
}
void Dense::initializeWeights() {
std::fill(weights.begin(), weights.end(), 0.0f);
void Dense::toCuda() {
CUDA_CHECK(cudaMemcpy(
d_weights, weights.data(), sizeof(float) * inputSize * outputSize,
cudaMemcpyHostToDevice
));
CUDA_CHECK(cudaMemcpy(
d_biases, biases.data(), sizeof(float) * outputSize,
cudaMemcpyHostToDevice
));
}
void Dense::initializeBiases() {
std::fill(biases.begin(), biases.end(), 0.0f);
}
float* Dense::forward(const float* d_input) {
float* Dense::forwardCUDA(const float* d_input) {
Kernels::mat_vec_mul<<<forwardGridSize, BLOCK_SIZE>>>(
d_weights, d_input, d_output, inputSize, outputSize
);
@@ -80,40 +67,3 @@ float* Dense::forward(const float* d_input) {
return d_output;
}
void Dense::toCuda() {
CUDA_CHECK(cudaMemcpy(
d_weights, weights.data(), sizeof(float) * inputSize * outputSize,
cudaMemcpyHostToDevice
));
CUDA_CHECK(cudaMemcpy(
d_biases, biases.data(), sizeof(float) * outputSize,
cudaMemcpyHostToDevice
));
}
void Dense::setWeights(const float* weights_input) {
std::copy(weights_input, weights_input + weights.size(), weights.begin());
toCuda();
}
std::vector<float> Dense::getWeights() {
return weights;
}
void Dense::setBiases(const float* biases_input) {
std::copy(biases_input, biases_input + biases.size(), biases.begin());
toCuda();
}
std::vector<float> Dense::getBiases() {
return biases;
}
int Dense::getOutputSize() {
return outputSize;
}
int Dense::getInputSize() {
return inputSize;
}

View File

@@ -1,9 +1,9 @@
#include <vector>
#include "activation.cuh"
#include "activation.hpp"
#include "batch_norm.cuh"
#include "cuda_helper.cuh"
#include "layer.cuh"
#include "layer.hpp"
#include "matmul.cuh"
#include "vector.cuh"

View File

@@ -1,11 +1,11 @@
#include <iostream>
#include <vector>
#include "activation.cuh"
#include "activation.hpp"
#include "conv2d.cuh"
#include "convolution.cuh"
#include "cuda_helper.cuh"
#include "layer.cuh"
#include "layer.hpp"
#include "matmul.cuh"
#include "vector.cuh"

80
src/layers/dense.cpp Normal file
View File

@@ -0,0 +1,80 @@
#include "dense.hpp"
#include <stdexcept>
#include "activation.hpp"
using namespace CUDANet::Layers;
Dense::Dense(int inputSize, int outputSize, ActivationType activationType)
: inputSize(inputSize), outputSize(outputSize) {
// Allocate memory for weights and biases
weights.resize(outputSize * inputSize);
biases.resize(outputSize);
initializeWeights();
initializeBiases();
activation = new Activation(activationType, outputSize);
#ifdef USE_CUDA
initCUDA();
#endif
}
Dense::~Dense() {
delete activation;
#ifdef USE_CUDA
delCUDA();
#endif
}
void Dense::initializeWeights() {
std::fill(weights.begin(), weights.end(), 0.0f);
}
void Dense::initializeBiases() {
std::fill(biases.begin(), biases.end(), 0.0f);
}
float* Dense::forwardCPU(const float* input) {
throw std::logic_error("Not implemented");
}
float* Dense::forward(const float* input) {
#ifdef USE_CUDA
return forwardCUDA(input);
#else
return forwardCPU(input);
#endif
}
void Dense::setWeights(const float* weights_input) {
std::copy(weights_input, weights_input + weights.size(), weights.begin());
#ifdef USE_CUDA
toCuda();
#endif
}
std::vector<float> Dense::getWeights() {
return weights;
}
void Dense::setBiases(const float* biases_input) {
std::copy(biases_input, biases_input + biases.size(), biases.begin());
#ifdef USE_CUDA
toCuda();
#endif
}
std::vector<float> Dense::getBiases() {
return biases;
}
int Dense::getOutputSize() {
return outputSize;
}
int Dense::getInputSize() {
return inputSize;
}

View File

@@ -8,7 +8,7 @@
#include <vector>
#include "input.cuh"
#include "layer.cuh"
#include "layer.hpp"
#include "batch_norm.cuh"
using namespace CUDANet;