Initial activations implementation

This commit is contained in:
2024-02-27 00:24:57 +01:00
parent 6e99525ad0
commit 5e1e0ed1d1
9 changed files with 104 additions and 24 deletions

View File

@@ -10,6 +10,7 @@ include_directories(${CUDAToolkit_INCLUDE_DIRS})
# Add project source files for the library
set(LIBRARY_SOURCES
src/utils/cuda_helper.cu
src/functions/activations.cu
src/layers/dense.cu
)
@@ -27,11 +28,12 @@ target_link_libraries(${PROJECT_NAME} CUDA::cublas CUDA::cudart)
target_include_directories(${PROJECT_NAME} PUBLIC
${CMAKE_CURRENT_SOURCE_DIR}/include
${CMAKE_CURRENT_SOURCE_DIR}/include/utils
${CMAKE_CURRENT_SOURCE_DIR}/include/functions
${CMAKE_CURRENT_SOURCE_DIR}/include/layers
${CMAKE_CURRENT_SOURCE_DIR}/src
)
set_property(TARGET ${PROJECT_NAME} PROPERTY CXX_STANDARD 14)
set_property(TARGET ${PROJECT_NAME} PROPERTY CXX_STANDARD 20)
# Add testing subdirectory
add_subdirectory(test)

View File

@@ -0,0 +1,14 @@
#include <functional>
#ifndef ACTIVATIONS_H
#define ACTIVATIONS_H
__device__ float sigmoid(float a);
__device__ float relu(float a);
__device__ float linear(float a);
__global__ void sigmoid_kernel(const float* __restrict__ src, float* __restrict__ dst, int len);
__global__ void relu_kernel(const float* __restrict__ src, float* __restrict__ dst, int len);
__global__ void linear_kernel(const float* __restrict__ src, float* __restrict__ dst, int len);
#endif // ACTIVATIONS_H

View File

@@ -1,5 +1,3 @@
// fully_connected_layer.h
#ifndef CONV_LAYER_H
#define CONV_LAYER_H

View File

@@ -1,15 +1,17 @@
#ifndef DENSE_LAYER_H
#define DENSE_LAYER_H
#include <functional>
#include <vector>
#include <cublas_v2.h>
#include <ilayer.cuh>
#include <string>
#include "ilayer.cuh"
namespace Layers {
class Dense : public ILayer {
public:
Dense(int inputSize, int outputSize, cublasHandle_t cublasHandle);
Dense(int inputSize, int outputSize, std::string activation, cublasHandle_t cublasHandle);
~Dense();
void forward(const float* input, float* output);
@@ -28,6 +30,8 @@ namespace Layers {
std::vector<float> weights;
std::vector<float> biases;
std::string activation;
void initializeWeights();
void initializeBiases();
void toCuda();

View File

@@ -1,7 +0,0 @@
set(LAYER_SOURCES layers/dense.cu)
add_library(CUDANet
utils/cuda_helper.cu
utils/functions.cu
${LAYER_SOURCES}
)

View File

@@ -0,0 +1,44 @@
#include "activations.cuh"
#include <functional>
__device__ float sigmoid(float a)
{
return 1.0 / (1.0 + exp (-a));
}
__device__ float relu(float a)
{
return a < 0.0 ? 0.0 : a;
}
__device__ float linear(float a)
{
return a;
}
__global__ void sigmoid_kernel(const float* __restrict__ src, float* __restrict__ dst, int len) {
int stride = gridDim.x * blockDim.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = tid; i < len; i += stride) {
dst[i] = sigmoid(src[i]);
}
}
__global__ void relu_kernel(const float* __restrict__ src, float* __restrict__ dst, int len) {
int stride = gridDim.x * blockDim.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = tid; i < len; i += stride) {
dst[i] = relu(src[i]);
}
}
__global__ void linear_kernel(const float* __restrict__ src, float* __restrict__ dst, int len) {
int stride = gridDim.x * blockDim.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = tid; i < len; i += stride) {
dst[i] = linear(src[i]);
}
}

View File

@@ -1,13 +1,15 @@
#include "dense.cuh"
#include "cuda_helper.cuh"
#include "activations.cuh"
#include <cstdlib>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cstdio>
#include <iostream>
#include <functional>
Layers::Dense::Dense(int inputSize, int outputSize, cublasHandle_t cublasHandle)
: inputSize(inputSize), outputSize(outputSize), cublasHandle(cublasHandle) {
Layers::Dense::Dense(int inputSize, int outputSize, std::string activation, cublasHandle_t cublasHandle)
: inputSize(inputSize), outputSize(outputSize), cublasHandle(cublasHandle), activation(activation) {
// Allocate memory for weights and biases
weights.resize(outputSize * inputSize);
@@ -33,13 +35,7 @@ Layers::Dense::~Dense() {
}
void Layers::Dense::initializeWeights() {
for (int j = 0; j < inputSize; ++j) {
for (int i = 0; i < outputSize; ++i) {
int idx = IDX2C(i, j, outputSize);
weights[idx] = 0.0f;
}
}
std::fill(weights.begin(), weights.end(), 0.0f);
}
void Layers::Dense::initializeBiases() {
@@ -52,6 +48,18 @@ void Layers::Dense::forward(const float* d_input, float* d_output) {
CUBLAS_CHECK(cublasSgemv(cublasHandle, CUBLAS_OP_N, inputSize, outputSize, &alpha, d_weights, inputSize, d_input, 1, &beta, d_output, 1));
CUBLAS_CHECK(cublasSaxpy(cublasHandle, outputSize, &alpha, d_biases, 1, d_output, 1));
int threadsPerBlock = 256;
int blocksPerGrid = (outputSize + threadsPerBlock - 1) / threadsPerBlock;
if (activation == "sigmoid") {
sigmoid_kernel<<<blocksPerGrid, threadsPerBlock>>>(d_output, d_output, outputSize);
} else if (activation == "relu") {
relu_kernel<<<blocksPerGrid, threadsPerBlock>>>(d_output, d_output, outputSize);
} else {
linear_kernel<<<blocksPerGrid, threadsPerBlock>>>(d_output, d_output, outputSize);
}
}
void Layers::Dense::toCuda() {

View File

@@ -2,6 +2,7 @@
#include <cuda_runtime_api.h>
#include <driver_types.h>
#include <iostream>
#include "activations.cuh"
#include "dense.cuh"
#include "test_cublas_fixture.cuh"
@@ -9,7 +10,7 @@ class DenseLayerTest : public CublasTestFixture {
protected:
Layers::Dense commonTestSetup(int inputSize, int outputSize, std::vector<float>& input, std::vector<std::vector<float>>& weights, std::vector<float>& biases, float*& d_input, float*& d_output) {
// Create Dense layer
Layers::Dense denseLayer(inputSize, outputSize, cublasHandle);
Layers::Dense denseLayer(inputSize, outputSize, "linear", cublasHandle);
// Set weights and biases
denseLayer.setWeights(weights);
@@ -48,7 +49,7 @@ TEST_F(DenseLayerTest, Init) {
int outputSize = j;
// std::cout << "Dense layer: input size = " << inputSize << ", output size = " << outputSize << std::endl;
Layers::Dense denseLayer(inputSize, outputSize, cublasHandle);
Layers::Dense denseLayer(inputSize, outputSize, "linear", cublasHandle);
}
}
}
@@ -67,7 +68,7 @@ TEST_F(DenseLayerTest, setWeights) {
{1.3f, 0.5f, 0.0f, 1.7f}
};
Layers::Dense denseLayer(inputSize, outputSize, cublasHandle);
Layers::Dense denseLayer(inputSize, outputSize, "linear", cublasHandle);
denseLayer.setWeights(weights);

View File

@@ -0,0 +1,16 @@
#include "gtest/gtest.h"
#include <cuda_runtime_api.h>
#include <driver_types.h>
#include <iostream>
#include "functions.cuh"
#include "test_cublas_fixture.cuh"
class FunctionsTest : public CublasTestFixture {
protected:
cudaError_t cudaStatus;
cublasStatus_t cublasStatus;
};
TEST_F(FunctionsTest, sigmoid) {
}