Initial activations implementation

This commit is contained in:
2024-02-27 00:24:57 +01:00
parent 6e99525ad0
commit 5e1e0ed1d1
9 changed files with 104 additions and 24 deletions

View File

@@ -1,7 +0,0 @@
set(LAYER_SOURCES layers/dense.cu)
add_library(CUDANet
utils/cuda_helper.cu
utils/functions.cu
${LAYER_SOURCES}
)

View File

@@ -0,0 +1,44 @@
#include "activations.cuh"
#include <functional>
__device__ float sigmoid(float a)
{
return 1.0 / (1.0 + exp (-a));
}
__device__ float relu(float a)
{
return a < 0.0 ? 0.0 : a;
}
__device__ float linear(float a)
{
return a;
}
__global__ void sigmoid_kernel(const float* __restrict__ src, float* __restrict__ dst, int len) {
int stride = gridDim.x * blockDim.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = tid; i < len; i += stride) {
dst[i] = sigmoid(src[i]);
}
}
__global__ void relu_kernel(const float* __restrict__ src, float* __restrict__ dst, int len) {
int stride = gridDim.x * blockDim.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = tid; i < len; i += stride) {
dst[i] = relu(src[i]);
}
}
__global__ void linear_kernel(const float* __restrict__ src, float* __restrict__ dst, int len) {
int stride = gridDim.x * blockDim.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = tid; i < len; i += stride) {
dst[i] = linear(src[i]);
}
}

View File

@@ -1,13 +1,15 @@
#include "dense.cuh"
#include "cuda_helper.cuh"
#include "activations.cuh"
#include <cstdlib>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cstdio>
#include <iostream>
#include <functional>
Layers::Dense::Dense(int inputSize, int outputSize, cublasHandle_t cublasHandle)
: inputSize(inputSize), outputSize(outputSize), cublasHandle(cublasHandle) {
Layers::Dense::Dense(int inputSize, int outputSize, std::string activation, cublasHandle_t cublasHandle)
: inputSize(inputSize), outputSize(outputSize), cublasHandle(cublasHandle), activation(activation) {
// Allocate memory for weights and biases
weights.resize(outputSize * inputSize);
@@ -33,13 +35,7 @@ Layers::Dense::~Dense() {
}
void Layers::Dense::initializeWeights() {
for (int j = 0; j < inputSize; ++j) {
for (int i = 0; i < outputSize; ++i) {
int idx = IDX2C(i, j, outputSize);
weights[idx] = 0.0f;
}
}
std::fill(weights.begin(), weights.end(), 0.0f);
}
void Layers::Dense::initializeBiases() {
@@ -52,6 +48,18 @@ void Layers::Dense::forward(const float* d_input, float* d_output) {
CUBLAS_CHECK(cublasSgemv(cublasHandle, CUBLAS_OP_N, inputSize, outputSize, &alpha, d_weights, inputSize, d_input, 1, &beta, d_output, 1));
CUBLAS_CHECK(cublasSaxpy(cublasHandle, outputSize, &alpha, d_biases, 1, d_output, 1));
int threadsPerBlock = 256;
int blocksPerGrid = (outputSize + threadsPerBlock - 1) / threadsPerBlock;
if (activation == "sigmoid") {
sigmoid_kernel<<<blocksPerGrid, threadsPerBlock>>>(d_output, d_output, outputSize);
} else if (activation == "relu") {
relu_kernel<<<blocksPerGrid, threadsPerBlock>>>(d_output, d_output, outputSize);
} else {
linear_kernel<<<blocksPerGrid, threadsPerBlock>>>(d_output, d_output, outputSize);
}
}
void Layers::Dense::toCuda() {