mirror of
https://github.com/lordmathis/CUDANet.git
synced 2025-11-06 09:44:28 +00:00
60 lines
1.5 KiB
Plaintext
60 lines
1.5 KiB
Plaintext
#include "activation.cuh"
|
|
|
|
#include "cuda_helper.cuh"
|
|
#include "activation_functions.cuh"
|
|
|
|
using namespace CUDANet;
|
|
|
|
Layers::Activation::Activation(ActivationType activation, const unsigned int length)
|
|
: activationType(activation), length(length) {
|
|
|
|
if (activationType == SOFTMAX) {
|
|
d_softmax_sum = nullptr;
|
|
CUDA_CHECK(cudaMalloc((void**)&d_softmax_sum, sizeof(float) * length));
|
|
}
|
|
|
|
gridSize = (length + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
|
}
|
|
|
|
Layers::Activation::~Activation() {
|
|
if (activationType == SOFTMAX) {
|
|
cudaFree(d_softmax_sum);
|
|
}
|
|
}
|
|
|
|
void Layers::Activation::activate(float* __restrict__ d_input) {
|
|
|
|
switch (activationType) {
|
|
case SIGMOID:
|
|
Kernels::sigmoid<<<gridSize, BLOCK_SIZE>>>(
|
|
d_input, d_input, length
|
|
);
|
|
break;
|
|
|
|
case RELU:
|
|
Kernels::relu<<<gridSize, BLOCK_SIZE>>>(
|
|
d_input, d_input, length
|
|
);
|
|
break;
|
|
case SOFTMAX:
|
|
Kernels::softmax_exp<<<gridSize, BLOCK_SIZE>>>(
|
|
d_input, d_input, length
|
|
);
|
|
|
|
Kernels::softmax_sum<<<gridSize / 2, BLOCK_SIZE>>>(
|
|
d_input, d_softmax_sum, length
|
|
);
|
|
|
|
Kernels::softmax_sum<<<1, BLOCK_SIZE>>>(
|
|
d_softmax_sum, d_softmax_sum, length
|
|
);
|
|
|
|
Kernels::softmax_div<<<gridSize, BLOCK_SIZE>>>(
|
|
d_input, d_input, d_softmax_sum, length
|
|
);
|
|
break;
|
|
|
|
default:
|
|
break;
|
|
}
|
|
} |