mirror of
https://github.com/lordmathis/CUDANet.git
synced 2025-11-06 01:34:22 +00:00
Fix softmax sum kernel
This commit is contained in:
@@ -51,8 +51,7 @@ __global__ void softmax_exp(
|
|||||||
*/
|
*/
|
||||||
__global__ void softmax_sum(
|
__global__ void softmax_sum(
|
||||||
const float* __restrict__ d_vector,
|
const float* __restrict__ d_vector,
|
||||||
float* __restrict__ d_output,
|
float* __restrict__ d_output
|
||||||
const unsigned int w
|
|
||||||
);
|
);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
@@ -44,15 +44,14 @@ __global__ void Kernels::softmax_exp(
|
|||||||
|
|
||||||
__global__ void Kernels::softmax_sum(
|
__global__ void Kernels::softmax_sum(
|
||||||
const float* __restrict__ d_vector,
|
const float* __restrict__ d_vector,
|
||||||
float* __restrict__ d_output,
|
float* __restrict__ d_output
|
||||||
const unsigned int w
|
|
||||||
) {
|
) {
|
||||||
__shared__ float partial_sum[BLOCK_SIZE];
|
__shared__ float partial_sum[BLOCK_SIZE];
|
||||||
int i = blockIdx.x * blockDim.x * 2 + threadIdx.x;
|
int i = blockIdx.x * blockDim.x + threadIdx.x;
|
||||||
partial_sum[threadIdx.x] = d_vector[i] + d_vector[i + blockDim.x];
|
partial_sum[threadIdx.x] = d_vector[i];
|
||||||
__syncthreads();
|
__syncthreads();
|
||||||
|
|
||||||
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
|
for (int s = blockDim.x / 2; s > 0; s >>= 1) {
|
||||||
if (threadIdx.x < s) {
|
if (threadIdx.x < s) {
|
||||||
partial_sum[threadIdx.x] += partial_sum[threadIdx.x + s];
|
partial_sum[threadIdx.x] += partial_sum[threadIdx.x + s];
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -42,11 +42,11 @@ void Activation::activate(float* __restrict__ d_input) {
|
|||||||
);
|
);
|
||||||
|
|
||||||
Kernels::softmax_sum<<<gridSize, BLOCK_SIZE>>>(
|
Kernels::softmax_sum<<<gridSize, BLOCK_SIZE>>>(
|
||||||
d_input, d_softmax_sum, length
|
d_input, d_softmax_sum
|
||||||
);
|
);
|
||||||
|
|
||||||
Kernels::softmax_sum<<<1, BLOCK_SIZE>>>(
|
Kernels::softmax_sum<<<1, BLOCK_SIZE>>>(
|
||||||
d_softmax_sum, d_softmax_sum, length
|
d_softmax_sum, d_softmax_sum
|
||||||
);
|
);
|
||||||
|
|
||||||
Kernels::softmax_div<<<gridSize, BLOCK_SIZE>>>(
|
Kernels::softmax_div<<<gridSize, BLOCK_SIZE>>>(
|
||||||
|
|||||||
@@ -4,6 +4,7 @@
|
|||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
|
||||||
#include "activation_functions.cuh"
|
#include "activation_functions.cuh"
|
||||||
|
#include "cuda_helper.cuh"
|
||||||
|
|
||||||
TEST(ActivationFunctionsTest, SigmoidSanityCheck) {
|
TEST(ActivationFunctionsTest, SigmoidSanityCheck) {
|
||||||
cudaError_t cudaStatus;
|
cudaError_t cudaStatus;
|
||||||
@@ -89,12 +90,46 @@ TEST(ActivationFunctionsTest, SoftmaxExpTest) {
|
|||||||
TEST(ActivationFunctionsTest, SoftmaxSumTest) {
|
TEST(ActivationFunctionsTest, SoftmaxSumTest) {
|
||||||
cudaError_t cudaStatus;
|
cudaError_t cudaStatus;
|
||||||
|
|
||||||
std::vector<float> input = {5886928896.0f, 1.06102872080384e+16f,
|
const int n = 10;
|
||||||
28771323215872.0f, 2204012904448.0f,
|
std::vector<float> input(n);
|
||||||
308226162688.0f, 63922983927808.0f};
|
for (int i = 0; i < n; i++) {
|
||||||
|
input[i] = i;
|
||||||
|
}
|
||||||
|
|
||||||
|
const float expected = n * (n - 1) / 2;
|
||||||
|
|
||||||
float* d_input;
|
float* d_input;
|
||||||
|
float* d_sum;
|
||||||
|
|
||||||
cudaStatus = cudaMalloc((void**)&d_input, sizeof(float) * 6);
|
const int gridSize = (n + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
||||||
|
|
||||||
|
cudaStatus = cudaMalloc((void**)&d_input, sizeof(float) * n);
|
||||||
EXPECT_EQ(cudaStatus, cudaSuccess);
|
EXPECT_EQ(cudaStatus, cudaSuccess);
|
||||||
|
|
||||||
|
cudaStatus = cudaMalloc((void**)&d_sum, sizeof(float) * n);
|
||||||
|
EXPECT_EQ(cudaStatus, cudaSuccess);
|
||||||
|
|
||||||
|
cudaStatus =
|
||||||
|
cudaMemcpy(d_input, input.data(), sizeof(float) * n, cudaMemcpyHostToDevice);
|
||||||
|
EXPECT_EQ(cudaStatus, cudaSuccess);
|
||||||
|
|
||||||
|
CUDANet::Kernels::softmax_sum<<<gridSize, BLOCK_SIZE>>>(
|
||||||
|
d_input, d_sum
|
||||||
|
);
|
||||||
|
|
||||||
|
CUDANet::Kernels::softmax_sum<<<1, BLOCK_SIZE>>>(
|
||||||
|
d_sum, d_sum
|
||||||
|
);
|
||||||
|
|
||||||
|
CUDANet::Kernels::softmax_sum<<<1, BLOCK_SIZE>>>(
|
||||||
|
d_sum, d_sum
|
||||||
|
);
|
||||||
|
|
||||||
|
std::vector<float> sum(n);
|
||||||
|
cudaStatus = cudaMemcpy(
|
||||||
|
sum.data(), d_sum, sizeof(float) * n, cudaMemcpyDeviceToHost
|
||||||
|
);
|
||||||
|
EXPECT_EQ(cudaStatus, cudaSuccess);
|
||||||
|
|
||||||
|
EXPECT_FLOAT_EQ(expected, sum[0]);
|
||||||
}
|
}
|
||||||
@@ -58,6 +58,8 @@ TEST(ActivationTest, SoftmaxTest2) {
|
|||||||
EXPECT_NEAR(output[i], expected[i], 1e-5f);
|
EXPECT_NEAR(output[i], expected[i], 1e-5f);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::cout << sum << std::endl;
|
||||||
|
|
||||||
EXPECT_NEAR(sum, 1.0f, 1e-5f);
|
EXPECT_NEAR(sum, 1.0f, 1e-5f);
|
||||||
|
|
||||||
cudaFree(d_input);
|
cudaFree(d_input);
|
||||||
|
|||||||
Reference in New Issue
Block a user