Migrate Activation layer

This commit is contained in:
2024-09-08 12:49:13 +02:00
parent 591507ed21
commit 0dca8348bd
4 changed files with 46 additions and 8 deletions

View File

@@ -11,7 +11,6 @@ if(CUDAToolkit_FOUND)
option(USE_CUDA "Use CUDA implementation" ON)
else()
option(USE_CUDA "Use CUDA implementation" OFF)
message(STATUS "CUDA not found. Defaulting to CPU implementation.")
endif()
if(USE_CUDA)
@@ -19,7 +18,7 @@ if(USE_CUDA)
add_definitions(-DUSE_CUDA)
message(STATUS "Building library with CUDA support")
else()
message(STATUS "Building library without CUDA support")
message(STATUS "CUDA not found or disabled. Defaulting to CPU implementation.")
endif()

View File

@@ -47,11 +47,21 @@ class Activation {
private:
ActivationType activationType;
int length;
#ifdef USE_CUDA
int gridSize;
float* d_softmax_sum;
float* d_max;
void activateCUDA(float* d_input);
void initCUDA();
void delCUDA();
#else
void activateCPU(float* input);
#endif
};

View File

@@ -1,7 +1,6 @@
#include <iostream>
#include <vector>
#include "activation.cuh"
#include "activation.hpp"
#include "activation_functions.cuh"
#include "cuda_helper.cuh"
#include "matmul.cuh"
@@ -9,8 +8,7 @@
using namespace CUDANet::Layers;
Activation::Activation(ActivationType activation, const int length)
: activationType(activation), length(length) {
void Activation::initCUDA() {
if (activationType == SOFTMAX) {
d_softmax_sum = nullptr;
CUDA_CHECK(cudaMalloc((void**)&d_softmax_sum, sizeof(float) * length));
@@ -22,14 +20,14 @@ Activation::Activation(ActivationType activation, const int length)
gridSize = (length + BLOCK_SIZE - 1) / BLOCK_SIZE;
}
Activation::~Activation() {
void Activation::delCUDA() {
if (activationType == SOFTMAX) {
CUDA_CHECK(cudaFree(d_softmax_sum));
CUDA_CHECK(cudaFree(d_max));
}
}
void Activation::activate(float* d_input) {
void Activation::activateCUDA(float* d_input) {
// float sum = 0.0f;

31
src/layers/activation.cpp Normal file
View File

@@ -0,0 +1,31 @@
#include <stdexcept>
#include <vector>
#include "activation.hpp"
using namespace CUDANet::Layers;
Activation::Activation(ActivationType activation, const int length)
: activationType(activation), length(length) {
#ifdef USE_CUDA
initCUDA();
#endif
}
Activation::~Activation() {
#ifdef USE_CUDA
delCUDA();
#endif
}
void Activation::activateCPU(float* input) {
throw std::logic_error("Not implemented");
}
void Activation::activate(float* input) {
#ifdef USE_CUDA
activateCUDA(input);
#else
activateCPU(input);
#endif
}