diff --git a/src/kernels/activation_functions.cu b/src/kernels/activation_functions.cu index 5e8e960..1642e80 100644 --- a/src/kernels/activation_functions.cu +++ b/src/kernels/activation_functions.cu @@ -1,5 +1,3 @@ -#include - #include "activation_functions.cuh" #include "cuda_helper.cuh" @@ -40,7 +38,7 @@ __global__ void Kernels::softmax_exp( int tid = blockDim.x * blockIdx.x + threadIdx.x; for (int i = tid; i < len; i += stride) { - dst[i] = std::exp(src[i]); + dst[i] = expf(src[i]); } } @@ -50,7 +48,7 @@ __global__ void Kernels::softmax_sum( const unsigned int w ) { __shared__ float partial_sum[BLOCK_SIZE]; - int i = blockIdx.x * blockDim.x * 2 + threadIdx.x; + int i = blockIdx.x * blockDim.x * 2 + threadIdx.x; partial_sum[threadIdx.x] = d_vector[i] + d_vector[i + blockDim.x]; __syncthreads(); @@ -69,7 +67,7 @@ __global__ void Kernels::softmax_sum( __global__ void Kernels::softmax_div( const float* __restrict__ src, float* __restrict__ dst, - const float* __restrict__ sum, + const float* __restrict__ sum, const unsigned int len ) { int stride = gridDim.x * blockDim.x; diff --git a/src/layers/activation.cu b/src/layers/activation.cu index 3f70f38..d874ec5 100644 --- a/src/layers/activation.cu +++ b/src/layers/activation.cu @@ -41,7 +41,7 @@ void Activation::activate(float* __restrict__ d_input) { d_input, d_input, length ); - Kernels::softmax_sum<<>>( + Kernels::softmax_sum<<>>( d_input, d_softmax_sum, length ); diff --git a/test/kernels/test_activation_functions.cu b/test/kernels/test_activation_functions.cu index e62d188..41dfe90 100644 --- a/test/kernels/test_activation_functions.cu +++ b/test/kernels/test_activation_functions.cu @@ -5,8 +5,7 @@ #include "activation_functions.cuh" -TEST(ActivationsTest, SigmoidSanityCheck) { - +TEST(ActivationFunctionsTest, SigmoidSanityCheck) { cudaError_t cudaStatus; float input[3] = {-100.0f, 0.0f, 100.0f}; @@ -22,7 +21,8 @@ TEST(ActivationsTest, SigmoidSanityCheck) { cudaStatus = cudaMalloc((void**)&d_output, sizeof(float) * 3); EXPECT_EQ(cudaStatus, cudaSuccess); - cudaStatus = cudaMemcpy(d_input, input, sizeof(float) * 3, cudaMemcpyHostToDevice); + cudaStatus = + cudaMemcpy(d_input, input, sizeof(float) * 3, cudaMemcpyHostToDevice); EXPECT_EQ(cudaStatus, cudaSuccess); CUDANet::Kernels::sigmoid<<<1, 3>>>(d_input, d_output, 3); @@ -31,7 +31,9 @@ TEST(ActivationsTest, SigmoidSanityCheck) { std::vector output(3); - cudaStatus = cudaMemcpy(output.data(), d_output, sizeof(float) * 3, cudaMemcpyDeviceToHost); + cudaStatus = cudaMemcpy( + output.data(), d_output, sizeof(float) * 3, cudaMemcpyDeviceToHost + ); EXPECT_EQ(cudaStatus, cudaSuccess); for (int i = 0; i < 3; i++) { @@ -40,4 +42,59 @@ TEST(ActivationsTest, SigmoidSanityCheck) { cudaFree(d_input); cudaFree(d_output); +} + +TEST(ActivationFunctionsTest, SoftmaxExpTest) { + cudaError_t cudaStatus; + + float input[6] = {22.496f, 36.9006f, 30.9904f, + 28.4213f, 26.4541f, 31.7887f}; + + std::vector expected = {5886928896.0f, 1.06102872080384e+16f, + 28771323215872.0f, 2204012904448.0f, + 308226162688.0f, 63922983927808.0f}; + + float* d_input; + float* d_output; + + cudaStatus = cudaMalloc((void**)&d_input, sizeof(float) * 6); + EXPECT_EQ(cudaStatus, cudaSuccess); + + cudaStatus = cudaMalloc((void**)&d_output, sizeof(float) * 6); + EXPECT_EQ(cudaStatus, cudaSuccess); + + cudaStatus = + cudaMemcpy(d_input, input, sizeof(float) * 6, cudaMemcpyHostToDevice); + EXPECT_EQ(cudaStatus, cudaSuccess); + + CUDANet::Kernels::softmax_exp<<<1, 6>>>(d_input, d_output, 6); + cudaStatus = cudaDeviceSynchronize(); + EXPECT_EQ(cudaStatus, cudaSuccess); + + std::vector output(6); + + cudaStatus = cudaMemcpy( + output.data(), d_output, sizeof(float) * 6, cudaMemcpyDeviceToHost + ); + EXPECT_EQ(cudaStatus, cudaSuccess); + + for (int i = 0; i < 6; i++) { + EXPECT_NEAR(expected[i], output[i], 1e7); + } + + cudaFree(d_input); + cudaFree(d_output); +} + +TEST(ActivationFunctionsTest, SoftmaxSumTest) { + cudaError_t cudaStatus; + + std::vector input = {5886928896.0f, 1.06102872080384e+16f, + 28771323215872.0f, 2204012904448.0f, + 308226162688.0f, 63922983927808.0f}; + + float* d_input; + + cudaStatus = cudaMalloc((void**)&d_input, sizeof(float) * 6); + EXPECT_EQ(cudaStatus, cudaSuccess); } \ No newline at end of file diff --git a/test/layers/test_activation.cu b/test/layers/test_activation.cu index e501799..cf2b0d8 100644 --- a/test/layers/test_activation.cu +++ b/test/layers/test_activation.cu @@ -3,7 +3,7 @@ #include #include -TEST(ActivationTest, SoftmaxTest) { +TEST(ActivationTest, SoftmaxTest1) { CUDANet::Layers::Activation activation( CUDANet::Layers::ActivationType::SOFTMAX, 5 ); @@ -30,5 +30,35 @@ TEST(ActivationTest, SoftmaxTest) { EXPECT_NEAR(sum, 1.0f, 1e-5f); + cudaFree(d_input); +} + +TEST(ActivationTest, SoftmaxTest2) { + CUDANet::Layers::Activation activation( + CUDANet::Layers::ActivationType::SOFTMAX, 6 + ); + + std::vector input = {22.496f, 36.9006f, 30.9904f, 28.4213f, 26.4541f, 31.7887f}; + + float* d_input; + cudaMalloc((void**)&d_input, sizeof(float) * 6); + cudaMemcpy(d_input, input.data(), sizeof(float) * 6, cudaMemcpyHostToDevice); + + activation.activate(d_input); + std::vector output(6); + cudaMemcpy( + output.data(), d_input, sizeof(float) * 6, cudaMemcpyDeviceToHost + ); + + float sum = 0.0f; + + std::vector expected = {0.0f, 0.99111f, 0.00269f, 0.00021f, 3e-05f, 0.00597f}; + for (int i = 0; i < 5; ++i) { + sum += output[i]; + EXPECT_NEAR(output[i], expected[i], 1e-5f); + } + + EXPECT_NEAR(sum, 1.0f, 1e-5f); + cudaFree(d_input); } \ No newline at end of file