From 3fee9fde6f397b12057589f1ef167352f148986c Mon Sep 17 00:00:00 2001 From: LordMathis Date: Sun, 2 Jun 2024 19:31:35 +0200 Subject: [PATCH] Create scafolding for inception A test --- .../inception_v3/tests/test_inception_a.cpp | 214 ++++++++++++++++++ tools/utils.py | 6 +- 2 files changed, 217 insertions(+), 3 deletions(-) create mode 100644 examples/inception_v3/tests/test_inception_a.cpp diff --git a/examples/inception_v3/tests/test_inception_a.cpp b/examples/inception_v3/tests/test_inception_a.cpp new file mode 100644 index 0000000..26aa624 --- /dev/null +++ b/examples/inception_v3/tests/test_inception_a.cpp @@ -0,0 +1,214 @@ +#include + +#include + +class InceptionATest : public ::testing::Test { + protected: + InceptionA *inception_a; + + cudaError_t cudaStatus; + + shape2d inputShape; + int inputChannels; + int poolFeatures; + std::string prefix = "test"; + + float *d_input; + float *d_output; + + std::vector input; + std::vector expected; + + std::vector branch1x1_conv_weights; + std::vector branch1x1_conv_biases; + std::vector branch1x1_bn_weights; + std::vector branch1x1_bn_biases; + + std::vector branch5x5_1_conv_weights; + std::vector branch5x5_1_conv_biases; + std::vector branch5x5_1_bn_weights; + std::vector branch5x5_1_bn_biases; + std::vector branch5x5_2_conv_weights; + std::vector branch5x5_2_conv_biases; + std::vector branch5x5_2_bn_weights; + std::vector branch5x5_2_bn_biases; + + std::vector branch3x3dbl_1_conv_weights; + std::vector branch3x3dbl_1_conv_biases; + std::vector branch3x3dbl_1_bn_weights; + std::vector branch3x3dbl_1_bn_biases; + std::vector branch3x3dbl_2_conv_weights; + std::vector branch3x3dbl_2_conv_biases; + std::vector branch3x3dbl_2_bn_weights; + std::vector branch3x3dbl_2_bn_biases; + std::vector branch3x3dbl_3_conv_weights; + std::vector branch3x3dbl_3_conv_biases; + std::vector branch3x3dbl_3_bn_weights; + std::vector branch3x3dbl_3_bn_biases; + + std::vector branchPool_2_conv_weights; + std::vector branchPool_2_conv_biases; + std::vector branchPool_2_bn_weights; + std::vector branchPool_2_bn_biases; + + virtual void SetUp() override { + inception_a = nullptr; + } + + virtual void TearDown() override { + // Clean up + delete inception_a; + } + + void setBasicConv2dWeights( + BasicConv2d *basic_conv2d, + const std::vector &conv_weights, + const std::vector &conv_biases, + const std::vector &bn_weights, + const std::vector &bn_biases + ) { + std::pair layerPair = + basic_conv2d->getLayers()[0]; + + ASSERT_EQ(layerPair.first, prefix + ".conv"); + + CUDANet::Layers::Conv2d *conv = + dynamic_cast(layerPair.second); + conv->setWeights(conv_weights.data()); + conv->setBiases(conv_biases.data()); + + ASSERT_EQ(conv->getWeights().size(), conv_weights.size()); + ASSERT_EQ(conv->getBiases().size(), conv_biases.size()); + + cudaStatus = cudaGetLastError(); + EXPECT_EQ(cudaStatus, cudaSuccess); + + layerPair = basic_conv2d->getLayers()[1]; + ASSERT_EQ(layerPair.first, prefix + ".bn"); + + CUDANet::Layers::BatchNorm2d *bn = + dynamic_cast(layerPair.second); + bn->setWeights(bn_weights.data()); + bn->setBiases(bn_biases.data()); + + ASSERT_EQ(bn->getWeights().size(), bn_weights.size()); + ASSERT_EQ(bn->getBiases().size(), bn_biases.size()); + + cudaStatus = cudaGetLastError(); + EXPECT_EQ(cudaStatus, cudaSuccess); + } + + void runTest() { + inception_a = new InceptionA( + inputShape, inputChannels, poolFeatures, prefix + ); + + // Set up layer weights and biases + // Branch 1x1 + std::pair layerPair = + inception_a->getLayers()[0]; + ASSERT_EQ(layerPair.first, prefix + ".branch1x1"); + BasicConv2d *basic_conv2d = + dynamic_cast(layerPair.second); + setBasicConv2dWeights( + basic_conv2d, + branch1x1_conv_weights, + branch1x1_conv_biases, + branch1x1_bn_weights, + branch1x1_bn_biases + ); + + // Branch 5x5 + layerPair = inception_a->getLayers()[1]; + ASSERT_EQ(layerPair.first, prefix + ".branch5x5_1"); + basic_conv2d = dynamic_cast(layerPair.second); + setBasicConv2dWeights( + basic_conv2d, + branch5x5_1_conv_weights, + branch5x5_1_conv_biases, + branch5x5_1_bn_weights, + branch5x5_1_bn_biases + ); + layerPair = inception_a->getLayers()[2]; + ASSERT_EQ(layerPair.first, prefix + ".branch5x5_2"); + basic_conv2d = dynamic_cast(layerPair.second); + setBasicConv2dWeights( + basic_conv2d, + branch5x5_2_conv_weights, + branch5x5_2_conv_biases, + branch5x5_2_bn_weights, + branch5x5_2_bn_biases + ); + + // Branch 3x3dbl + layerPair = inception_a->getLayers()[3]; + ASSERT_EQ(layerPair.first, prefix + ".branch3x3dbl_1"); + basic_conv2d = dynamic_cast(layerPair.second); + setBasicConv2dWeights( + basic_conv2d, + branch3x3dbl_1_conv_weights, + branch3x3dbl_1_conv_biases, + branch3x3dbl_1_bn_weights, + branch3x3dbl_1_bn_biases + ); + layerPair = inception_a->getLayers()[4]; + ASSERT_EQ(layerPair.first, prefix + ".branch3x3dbl_2"); + basic_conv2d = dynamic_cast(layerPair.second); + setBasicConv2dWeights( + basic_conv2d, + branch3x3dbl_2_conv_weights, + branch3x3dbl_2_conv_biases, + branch3x3dbl_2_bn_weights, + branch3x3dbl_2_bn_biases + ); + layerPair = inception_a->getLayers()[5]; + ASSERT_EQ(layerPair.first, prefix + ".branch3x3dbl_3"); + basic_conv2d = dynamic_cast(layerPair.second); + setBasicConv2dWeights( + basic_conv2d, + branch3x3dbl_3_conv_weights, + branch3x3dbl_3_conv_biases, + branch3x3dbl_3_bn_weights, + branch3x3dbl_3_bn_biases + ); + + // Pool + layerPair = inception_a->getLayers()[7]; // 6 is a pool layer without weights + ASSERT_EQ(layerPair.first, prefix + ".branch_pool"); + basic_conv2d = dynamic_cast(layerPair.second); + setBasicConv2dWeights( + basic_conv2d, + branchPool_2_conv_weights, + branchPool_2_conv_biases, + branchPool_2_bn_weights, + branchPool_2_bn_biases + ); + + cudaStatus = + cudaMalloc((void **)&d_input, sizeof(float) * input.size()); + EXPECT_EQ(cudaStatus, cudaSuccess); + + cudaStatus = cudaMemcpy( + d_input, input.data(), sizeof(float) * input.size(), + cudaMemcpyHostToDevice + ); + EXPECT_EQ(cudaStatus, cudaSuccess); + + d_output = inception_a->forward(d_input); + + cudaStatus = cudaGetLastError(); + EXPECT_EQ(cudaStatus, cudaSuccess); + + int outputSize = inception_a->getOutputSize(); + std::vector output(outputSize); + cudaStatus = cudaMemcpy( + output.data(), d_output, sizeof(float) * output.size(), + cudaMemcpyDeviceToHost + ); + EXPECT_EQ(cudaStatus, cudaSuccess); + + for (int i = 0; i < output.size(); ++i) { + EXPECT_NEAR(expected[i], output[i], 1e-5f); + } + } +}; \ No newline at end of file diff --git a/tools/utils.py b/tools/utils.py index 25a93fd..e965d0b 100644 --- a/tools/utils.py +++ b/tools/utils.py @@ -47,14 +47,14 @@ def print_model_parameters(model: torch.nn.Module): print(name, param.numel()) -def predict(model, image_path, preprocess=None): +def predict(model, image_path, resize=299, crop=299, preprocess=None): input_image = Image.open(image_path) if preprocess is None: preprocess = transforms.Compose( [ - transforms.Resize(299), - transforms.CenterCrop(299), + transforms.Resize(resize), + transforms.CenterCrop(crop), transforms.ToTensor(), transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]