Start dense layer implementation

This commit is contained in:
2024-02-08 19:19:51 +01:00
parent b16ec69469
commit 950021389c
6 changed files with 121 additions and 12 deletions

2
.gitignore vendored
View File

@@ -32,3 +32,5 @@
*.app *.app
build/ build/
.vscode/
.cache

View File

@@ -27,6 +27,7 @@ target_link_libraries(${PROJECT_NAME} ${CUDA_cublas_LIBRARY})
target_include_directories(${PROJECT_NAME} PRIVATE target_include_directories(${PROJECT_NAME} PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}/include ${CMAKE_CURRENT_SOURCE_DIR}/include
${CMAKE_CURRENT_SOURCE_DIR}/include/utils ${CMAKE_CURRENT_SOURCE_DIR}/include/utils
${CMAKE_CURRENT_SOURCE_DIR}/include/layers
${CMAKE_CURRENT_SOURCE_DIR}/src ${CMAKE_CURRENT_SOURCE_DIR}/src
) )

28
include/layers/conv.h Normal file
View File

@@ -0,0 +1,28 @@
// fully_connected_layer.h
#ifndef CONV_LAYER_H
#define CONV_LAYER_H
#include <cublas_v2.h>
namespace Layers {
class Conv {
public:
Conv(int inputSize, int outputSize, int kernelSize, cublasHandle_t cublasHandle);
~Conv();
void forward(const float* input, float* output);
private:
int inputSize;
int outputSize;
int kernelSize;
cublasHandle_t cublasHandle;
float* d_weights;
float* d_biases;
};
} // namespace Layers
#endif // CONV_LAYER_H

36
include/layers/dense.h Normal file
View File

@@ -0,0 +1,36 @@
// fully_connected_layer.h
#ifndef DENSE_LAYER_H
#define DENSE_LAYER_H
#include <vector>
#include <cublas_v2.h>
namespace Layers {
class Dense {
public:
Dense(int inputSize, int outputSize, cublasHandle_t cublasHandle);
~Dense();
void forward(const float* input, float* output);
private:
int inputSize;
int outputSize;
cublasHandle_t cublasHandle;
float* d_weights;
float* d_biases;
std::vector<std::vector<float>> weights;
std::vector<float> biases;
void initializeWeights();
void initializeBiases();
};
} // namespace Layers
#endif // DENSE_LAYER_H

54
src/layers/dense.cpp Normal file
View File

@@ -0,0 +1,54 @@
#include "dense.h"
#include <cublas_v2.h>
Layers::Dense::Dense(int inputSize, int outputSize, cublasHandle_t cublasHandle)
: inputSize(inputSize), outputSize(outputSize), cublasHandle(cublasHandle) {
// Allocate memory for weights and biases
weights.resize(inputSize * outputSize);
biases.resize(outputSize);
// Initialize weights and biases (you may customize this part)
initializeWeights();
initializeBiases();
// Allocate GPU memory for weights and biases
cudaMalloc((void**)&d_weights, sizeof(float) * weights.size());
cudaMalloc((void**)&d_biases, sizeof(float) * biases.size());
// Copy weights and biases to GPU
cudaMemcpy(d_weights, weights.data(), sizeof(float) * weights.size(), cudaMemcpyHostToDevice);
cudaMemcpy(d_biases, biases.data(), sizeof(float) * biases.size(), cudaMemcpyHostToDevice);
}
Layers::Dense::~Dense() {
// Free GPU memory
cudaFree(d_weights);
cudaFree(d_biases);
}
void Layers::Dense::initializeWeights() {
float range = sqrt((float) 6/(inputSize + outputSize));
for (float& weight : weights) {
weight = static_cast<float>(rand()) / RAND_MAX * 2.0 * range - range;
}
}
void Layers::Dense::initializeBiases() {
for (float& bias : biases) {
bias = static_cast<float>(rand()) / RAND_MAX * 2.0f - 1.0f;
}
}
void Layers::Dense::forward(const float* input, float* output) {
// Perform matrix multiplication: output = weights * input + biases
const float alpha = 1.0f;
const float beta = 1.0f;
cublasSgemv(cublasHandle, CUBLAS_OP_N, inputSize, outputSize, &alpha, d_weights, inputSize, input, 1, &beta, output, 1);
// Add biases
cublasSaxpy(cublasHandle, outputSize, &alpha, d_biases, 1, output, 1);
}

View File

@@ -2,18 +2,6 @@
#include <cstdlib> #include <cstdlib>
#include "cuda_helper.h" #include "cuda_helper.h"
// CUDA error checking macro
#define CUDA_CHECK(call) \
do { \
cudaError_t result = call; \
if (result != cudaSuccess) { \
std::fprintf(stderr, "CUDA error at %s:%d code=%d(%s) \"%s\" \n", \
__FILE__, __LINE__, static_cast<unsigned int>(result), \
cudaGetErrorString(result), #call); \
std::exit(EXIT_FAILURE); \
} \
} while (0)
// Initialize CUDA and return the device properties // Initialize CUDA and return the device properties
cudaDeviceProp initializeCUDA() { cudaDeviceProp initializeCUDA() {
int deviceCount; int deviceCount;