#include "activation.cuh" #include "cuda_helper.cuh" #include "activation_functions.cuh" using namespace CUDANet; Layers::Activation::Activation(ActivationType activation, const unsigned int length) : activationType(activation), length(length) { if (activationType == SOFTMAX) { d_softmax_sum = nullptr; CUDA_CHECK(cudaMalloc((void**)&d_softmax_sum, sizeof(float) * length)); } gridSize = (length + BLOCK_SIZE - 1) / BLOCK_SIZE; } Layers::Activation::~Activation() { if (activationType == SOFTMAX) { cudaFree(d_softmax_sum); } } void Layers::Activation::activate(float* __restrict__ d_input) { switch (activationType) { case SIGMOID: Kernels::sigmoid<<>>( d_input, d_input, length ); break; case RELU: Kernels::relu<<>>( d_input, d_input, length ); break; case SOFTMAX: Kernels::softmax_exp<<>>( d_input, d_input, length ); Kernels::softmax_sum<<>>( d_input, d_softmax_sum, length ); Kernels::softmax_sum<<<1, BLOCK_SIZE>>>( d_softmax_sum, d_softmax_sum, length ); Kernels::softmax_div<<>>( d_input, d_input, d_softmax_sum, length ); break; default: break; } }