mirror of
https://github.com/lordmathis/CUDANet.git
synced 2025-11-06 09:44:28 +00:00
Add more softmax tests
This commit is contained in:
@@ -3,7 +3,7 @@
|
||||
#include <cuda_runtime.h>
|
||||
#include <vector>
|
||||
|
||||
TEST(ActivationTest, SoftmaxTest) {
|
||||
TEST(ActivationTest, SoftmaxTest1) {
|
||||
CUDANet::Layers::Activation activation(
|
||||
CUDANet::Layers::ActivationType::SOFTMAX, 5
|
||||
);
|
||||
@@ -30,5 +30,35 @@ TEST(ActivationTest, SoftmaxTest) {
|
||||
|
||||
EXPECT_NEAR(sum, 1.0f, 1e-5f);
|
||||
|
||||
cudaFree(d_input);
|
||||
}
|
||||
|
||||
TEST(ActivationTest, SoftmaxTest2) {
|
||||
CUDANet::Layers::Activation activation(
|
||||
CUDANet::Layers::ActivationType::SOFTMAX, 6
|
||||
);
|
||||
|
||||
std::vector<float> input = {22.496f, 36.9006f, 30.9904f, 28.4213f, 26.4541f, 31.7887f};
|
||||
|
||||
float* d_input;
|
||||
cudaMalloc((void**)&d_input, sizeof(float) * 6);
|
||||
cudaMemcpy(d_input, input.data(), sizeof(float) * 6, cudaMemcpyHostToDevice);
|
||||
|
||||
activation.activate(d_input);
|
||||
std::vector<float> output(6);
|
||||
cudaMemcpy(
|
||||
output.data(), d_input, sizeof(float) * 6, cudaMemcpyDeviceToHost
|
||||
);
|
||||
|
||||
float sum = 0.0f;
|
||||
|
||||
std::vector<float> expected = {0.0f, 0.99111f, 0.00269f, 0.00021f, 3e-05f, 0.00597f};
|
||||
for (int i = 0; i < 5; ++i) {
|
||||
sum += output[i];
|
||||
EXPECT_NEAR(output[i], expected[i], 1e-5f);
|
||||
}
|
||||
|
||||
EXPECT_NEAR(sum, 1.0f, 1e-5f);
|
||||
|
||||
cudaFree(d_input);
|
||||
}
|
||||
Reference in New Issue
Block a user