mirror of
https://github.com/lordmathis/CUDANet.git
synced 2025-11-06 09:44:28 +00:00
Test softmax
This commit is contained in:
@@ -1,4 +1,4 @@
|
|||||||
#include <functional>
|
#include <cmath>
|
||||||
|
|
||||||
#include "activation_functions.cuh"
|
#include "activation_functions.cuh"
|
||||||
#include "cuda_helper.cuh"
|
#include "cuda_helper.cuh"
|
||||||
@@ -38,7 +38,7 @@ __global__ void CUDANet::Kernels::softmax_exp(
|
|||||||
int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
||||||
|
|
||||||
for (int i = tid; i < len; i += stride) {
|
for (int i = tid; i < len; i += stride) {
|
||||||
dst[i] = exp(src[i]);
|
dst[i] = std::exp(src[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ void Layers::Activation::activate(float* __restrict__ d_input) {
|
|||||||
d_input, d_input, length
|
d_input, d_input, length
|
||||||
);
|
);
|
||||||
|
|
||||||
Kernels::softmax_sum<<<gridSize, BLOCK_SIZE>>>(
|
Kernels::softmax_sum<<<gridSize / 2, BLOCK_SIZE>>>(
|
||||||
d_input, d_softmax_sum, length
|
d_input, d_softmax_sum, length
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ add_executable(test_main
|
|||||||
layers/test_dense.cu
|
layers/test_dense.cu
|
||||||
layers/test_conv2d.cu
|
layers/test_conv2d.cu
|
||||||
layers/test_input.cu
|
layers/test_input.cu
|
||||||
|
layers/test_activation.cu
|
||||||
kernels/test_activation_functions.cu
|
kernels/test_activation_functions.cu
|
||||||
kernels/test_padding.cu
|
kernels/test_padding.cu
|
||||||
kernels/test_matmul.cu
|
kernels/test_matmul.cu
|
||||||
|
|||||||
34
test/layers/test_activation.cu
Normal file
34
test/layers/test_activation.cu
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
#include "activation.cuh"
|
||||||
|
#include <gtest/gtest.h>
|
||||||
|
#include <cuda_runtime.h>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
TEST(ActivationTest, SoftmaxTest) {
|
||||||
|
CUDANet::Layers::Activation activation(
|
||||||
|
CUDANet::Layers::ActivationType::SOFTMAX, 5
|
||||||
|
);
|
||||||
|
|
||||||
|
std::vector<float> input = {0.573f, 0.619f, 0.732f, 0.055f, 0.243f};
|
||||||
|
|
||||||
|
float* d_input;
|
||||||
|
cudaMalloc((void**)&d_input, sizeof(float) * 5);
|
||||||
|
cudaMemcpy(d_input, input.data(), sizeof(float) * 5, cudaMemcpyHostToDevice);
|
||||||
|
|
||||||
|
activation.activate(d_input);
|
||||||
|
std::vector<float> output(5);
|
||||||
|
cudaMemcpy(
|
||||||
|
output.data(), d_input, sizeof(float) * 5, cudaMemcpyDeviceToHost
|
||||||
|
);
|
||||||
|
|
||||||
|
float sum = 0.0f;
|
||||||
|
|
||||||
|
std::vector<float> expected = {0.22055f, 0.23094f, 0.25856f, 0.13139f, 0.15856f};
|
||||||
|
for (int i = 0; i < 5; ++i) {
|
||||||
|
sum += output[i];
|
||||||
|
EXPECT_NEAR(output[i], expected[i], 1e-5f);
|
||||||
|
}
|
||||||
|
|
||||||
|
EXPECT_NEAR(sum, 1.0f, 1e-5f);
|
||||||
|
|
||||||
|
cudaFree(d_input);
|
||||||
|
}
|
||||||
@@ -110,6 +110,14 @@ def gen_strided_test_result():
|
|||||||
output = conv2d(in_channels, out_channels, kernel_size, stride, padding, input, weights)
|
output = conv2d(in_channels, out_channels, kernel_size, stride, padding, input, weights)
|
||||||
print_cpp_vector(output)
|
print_cpp_vector(output)
|
||||||
|
|
||||||
|
def gen_softmax_test_result():
|
||||||
|
input = torch.tensor([
|
||||||
|
0.573, 0.619, 0.732, 0.055, 0.243
|
||||||
|
])
|
||||||
|
|
||||||
|
output = torch.nn.Softmax(dim=0)(input)
|
||||||
|
print_cpp_vector(output)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
print("Generating test results...")
|
print("Generating test results...")
|
||||||
@@ -117,3 +125,5 @@ if __name__ == "__main__":
|
|||||||
gen_padded_test_result()
|
gen_padded_test_result()
|
||||||
print("Strided convolution test:")
|
print("Strided convolution test:")
|
||||||
gen_strided_test_result()
|
gen_strided_test_result()
|
||||||
|
print("Softmax test:")
|
||||||
|
gen_softmax_test_result()
|
||||||
Reference in New Issue
Block a user