mirror of
https://github.com/lordmathis/CUDANet.git
synced 2025-11-05 17:34:21 +00:00
Migrate Activation layer
This commit is contained in:
@@ -11,7 +11,6 @@ if(CUDAToolkit_FOUND)
|
|||||||
option(USE_CUDA "Use CUDA implementation" ON)
|
option(USE_CUDA "Use CUDA implementation" ON)
|
||||||
else()
|
else()
|
||||||
option(USE_CUDA "Use CUDA implementation" OFF)
|
option(USE_CUDA "Use CUDA implementation" OFF)
|
||||||
message(STATUS "CUDA not found. Defaulting to CPU implementation.")
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(USE_CUDA)
|
if(USE_CUDA)
|
||||||
@@ -19,7 +18,7 @@ if(USE_CUDA)
|
|||||||
add_definitions(-DUSE_CUDA)
|
add_definitions(-DUSE_CUDA)
|
||||||
message(STATUS "Building library with CUDA support")
|
message(STATUS "Building library with CUDA support")
|
||||||
else()
|
else()
|
||||||
message(STATUS "Building library without CUDA support")
|
message(STATUS "CUDA not found or disabled. Defaulting to CPU implementation.")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -47,11 +47,21 @@ class Activation {
|
|||||||
private:
|
private:
|
||||||
ActivationType activationType;
|
ActivationType activationType;
|
||||||
int length;
|
int length;
|
||||||
|
|
||||||
|
#ifdef USE_CUDA
|
||||||
int gridSize;
|
int gridSize;
|
||||||
|
|
||||||
float* d_softmax_sum;
|
float* d_softmax_sum;
|
||||||
float* d_max;
|
float* d_max;
|
||||||
|
|
||||||
|
void activateCUDA(float* d_input);
|
||||||
|
|
||||||
|
void initCUDA();
|
||||||
|
void delCUDA();
|
||||||
|
#else
|
||||||
|
void activateCPU(float* input);
|
||||||
|
#endif
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@@ -1,7 +1,6 @@
|
|||||||
#include <iostream>
|
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "activation.cuh"
|
#include "activation.hpp"
|
||||||
#include "activation_functions.cuh"
|
#include "activation_functions.cuh"
|
||||||
#include "cuda_helper.cuh"
|
#include "cuda_helper.cuh"
|
||||||
#include "matmul.cuh"
|
#include "matmul.cuh"
|
||||||
@@ -9,8 +8,7 @@
|
|||||||
|
|
||||||
using namespace CUDANet::Layers;
|
using namespace CUDANet::Layers;
|
||||||
|
|
||||||
Activation::Activation(ActivationType activation, const int length)
|
void Activation::initCUDA() {
|
||||||
: activationType(activation), length(length) {
|
|
||||||
if (activationType == SOFTMAX) {
|
if (activationType == SOFTMAX) {
|
||||||
d_softmax_sum = nullptr;
|
d_softmax_sum = nullptr;
|
||||||
CUDA_CHECK(cudaMalloc((void**)&d_softmax_sum, sizeof(float) * length));
|
CUDA_CHECK(cudaMalloc((void**)&d_softmax_sum, sizeof(float) * length));
|
||||||
@@ -22,14 +20,14 @@ Activation::Activation(ActivationType activation, const int length)
|
|||||||
gridSize = (length + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
gridSize = (length + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
Activation::~Activation() {
|
void Activation::delCUDA() {
|
||||||
if (activationType == SOFTMAX) {
|
if (activationType == SOFTMAX) {
|
||||||
CUDA_CHECK(cudaFree(d_softmax_sum));
|
CUDA_CHECK(cudaFree(d_softmax_sum));
|
||||||
CUDA_CHECK(cudaFree(d_max));
|
CUDA_CHECK(cudaFree(d_max));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Activation::activate(float* d_input) {
|
void Activation::activateCUDA(float* d_input) {
|
||||||
|
|
||||||
// float sum = 0.0f;
|
// float sum = 0.0f;
|
||||||
|
|
||||||
31
src/layers/activation.cpp
Normal file
31
src/layers/activation.cpp
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
#include <stdexcept>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "activation.hpp"
|
||||||
|
|
||||||
|
using namespace CUDANet::Layers;
|
||||||
|
|
||||||
|
Activation::Activation(ActivationType activation, const int length)
|
||||||
|
: activationType(activation), length(length) {
|
||||||
|
#ifdef USE_CUDA
|
||||||
|
initCUDA();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
Activation::~Activation() {
|
||||||
|
#ifdef USE_CUDA
|
||||||
|
delCUDA();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
void Activation::activateCPU(float* input) {
|
||||||
|
throw std::logic_error("Not implemented");
|
||||||
|
}
|
||||||
|
|
||||||
|
void Activation::activate(float* input) {
|
||||||
|
#ifdef USE_CUDA
|
||||||
|
activateCUDA(input);
|
||||||
|
#else
|
||||||
|
activateCPU(input);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user