Rework inception block tests

This commit is contained in:
2024-06-05 21:58:33 +02:00
parent a54ffa8b20
commit 1136ca452f
12 changed files with 416 additions and 104738 deletions

View File

@@ -1,62 +0,0 @@
import sys
import torch
from torchvision.models.inception import InceptionA
sys.path.append("../../../tools")
from utils import print_cpp_vector
torch.manual_seed(0)
@torch.no_grad()
def init_weights(m):
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.uniform_(m.weight)
elif isinstance(m, torch.nn.BatchNorm2d):
torch.nn.init.uniform_(m.weight)
torch.nn.init.uniform_(m.bias)
with torch.no_grad():
inception_a = InceptionA(3, 6)
inception_a.apply(init_weights)
# branch1x1
print_cpp_vector(torch.flatten(inception_a.branch1x1.conv.weight), "branch1x1_conv_weights")
print_cpp_vector(torch.flatten(inception_a.branch1x1.bn.weight), "branch1x1_bn_weights")
print_cpp_vector(torch.flatten(inception_a.branch1x1.bn.bias), "branch1x1_bn_bias")
# branch5x5
print_cpp_vector(torch.flatten(inception_a.branch5x5_1.conv.weight), "branch5x5_1_conv_weights")
print_cpp_vector(torch.flatten(inception_a.branch5x5_1.bn.weight), "branch5x5_1_bn_weights")
print_cpp_vector(torch.flatten(inception_a.branch5x5_1.bn.bias), "branch5x5_1_bn_bias")
print_cpp_vector(torch.flatten(inception_a.branch5x5_2.conv.weight), "branch5x5_2_conv_weights")
print_cpp_vector(torch.flatten(inception_a.branch5x5_2.bn.weight), "branch5x5_2_bn_weights")
print_cpp_vector(torch.flatten(inception_a.branch5x5_2.bn.bias), "branch5x5_2_bn_bias")
# branch3x3dbl
print_cpp_vector(torch.flatten(inception_a.branch3x3dbl_1.conv.weight), "branch3x3dbl_1_conv_weights")
print_cpp_vector(torch.flatten(inception_a.branch3x3dbl_1.bn.weight), "branch3x3dbl_1_bn_weights")
print_cpp_vector(torch.flatten(inception_a.branch3x3dbl_1.bn.bias), "branch3x3dbl_1_bn_bias")
print_cpp_vector(torch.flatten(inception_a.branch3x3dbl_2.conv.weight), "branch3x3dbl_2_conv_weights")
print_cpp_vector(torch.flatten(inception_a.branch3x3dbl_2.bn.weight), "branch3x3dbl_2_bn_weights")
print_cpp_vector(torch.flatten(inception_a.branch3x3dbl_2.bn.bias), "branch3x3dbl_2_bn_bias")
print_cpp_vector(torch.flatten(inception_a.branch3x3dbl_3.conv.weight), "branch3x3dbl_3_conv_weights")
print_cpp_vector(torch.flatten(inception_a.branch3x3dbl_3.bn.weight), "branch3x3dbl_3_bn_weights")
print_cpp_vector(torch.flatten(inception_a.branch3x3dbl_3.bn.bias), "branch3x3dbl_3_bn_bias")
# branchPool
print_cpp_vector(torch.flatten(inception_a.branch_pool.conv.weight), "branchPool_2_conv_weights")
print_cpp_vector(torch.flatten(inception_a.branch_pool.bn.weight), "branchPool_2_bn_weights")
print_cpp_vector(torch.flatten(inception_a.branch_pool.bn.bias), "branchPool_2_bn_bias")
input_shape = (1, 3, 8, 8)
input = torch.randn(input_shape)
print_cpp_vector(torch.flatten(input), "input")
output = inception_a(input)
output = torch.flatten(output)
print_cpp_vector(output)

View File

@@ -1,47 +0,0 @@
import sys
import torch
from torchvision.models.inception import InceptionB
sys.path.append("../../../tools")
from utils import print_cpp_vector
torch.manual_seed(0)
@torch.no_grad()
def init_weights(m):
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.uniform_(m.weight)
elif isinstance(m, torch.nn.BatchNorm2d):
torch.nn.init.uniform_(m.weight)
torch.nn.init.uniform_(m.bias)
with torch.no_grad():
inception_b = InceptionB(3)
inception_b.apply(init_weights)
# branch3x3
print_cpp_vector(torch.flatten(inception_b.branch3x3.conv.weight), "branch3x3_conv_weights")
print_cpp_vector(torch.flatten(inception_b.branch3x3.bn.weight), "branch3x3_bn_weights")
print_cpp_vector(torch.flatten(inception_b.branch3x3.bn.bias), "branch3x3_bn_bias")
# branch3x3dbl
print_cpp_vector(torch.flatten(inception_b.branch3x3dbl_1.conv.weight), "branch3x3dbl_1_conv_weights")
print_cpp_vector(torch.flatten(inception_b.branch3x3dbl_1.bn.weight), "branch3x3dbl_1_bn_weights")
print_cpp_vector(torch.flatten(inception_b.branch3x3dbl_1.bn.bias), "branch3x3dbl_1_bn_bias")
print_cpp_vector(torch.flatten(inception_b.branch3x3dbl_2.conv.weight), "branch3x3dbl_2_conv_weights")
print_cpp_vector(torch.flatten(inception_b.branch3x3dbl_2.bn.weight), "branch3x3dbl_2_bn_weights")
print_cpp_vector(torch.flatten(inception_b.branch3x3dbl_2.bn.bias), "branch3x3dbl_2_bn_bias")
print_cpp_vector(torch.flatten(inception_b.branch3x3dbl_3.conv.weight), "branch3x3dbl_3_conv_weights")
print_cpp_vector(torch.flatten(inception_b.branch3x3dbl_3.bn.weight), "branch3x3dbl_3_bn_weights")
print_cpp_vector(torch.flatten(inception_b.branch3x3dbl_3.bn.bias), "branch3x3dbl_3_bn_bias")
input_shape = (1, 3, 8, 8)
input = torch.randn(input_shape)
print_cpp_vector(torch.flatten(input), "input")
output = inception_b(input)
output = torch.flatten(output)
print_cpp_vector(output, "expected")

View File

@@ -0,0 +1,83 @@
import sys
import torch
from torchvision.models.inception import (
InceptionA,
InceptionB,
InceptionC,
InceptionD,
InceptionE
)
sys.path.append("../../../tools")
from utils import print_cpp_vector, export_model_weights
torch.manual_seed(0)
output_size = 50
class InceptionBlockModel(torch.nn.Module):
def __init__(self, inception_block: torch.nn.Module, linear_in: int, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.inception_block = inception_block
self.fc = torch.nn.Linear(linear_in, output_size)
def forward(self, x):
x = self.inception_block(x)
x = torch.flatten(x)
x = self.fc(x)
return x
@torch.no_grad()
def init_weights(m: torch.nn.Module):
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.uniform_(m.weight, -1, 1)
elif isinstance(m, torch.nn.BatchNorm2d) or isinstance(m, torch.nn.Linear):
torch.nn.init.uniform_(m.weight, -1)
torch.nn.init.uniform_(m.bias, 1)
@torch.no_grad()
def generate_module_test_data(m: torch.nn.Module, name: str):
print(name)
input_shape = (1, 3, 4, 4)
input = torch.randn(input_shape)
print_cpp_vector(torch.flatten(input), "input")
m.eval()
inception_out = m(input)
linear_in = torch.flatten(inception_out).size(0)
inception_block = InceptionBlockModel(m, linear_in)
inception_block.apply(init_weights)
export_model_weights(inception_block, f"resources/{name}.bin")
inception_block.eval()
output = inception_block(input)
print_cpp_vector(torch.flatten(output), "expected")
print()
if __name__ == "__main__":
m = InceptionA(3, 6)
generate_module_test_data(m, "inception_a")
m = InceptionB(3)
generate_module_test_data(m, "inception_b")
m = InceptionC(3, 64)
generate_module_test_data(m, "inception_c")
m = InceptionD(3)
generate_module_test_data(m, "inception_d")
m = InceptionE(3)
generate_module_test_data(m, "inception_e")

View File

@@ -1,73 +0,0 @@
import sys
import torch
from torchvision.models.inception import InceptionC
sys.path.append("../../../tools")
from utils import print_cpp_vector
torch.manual_seed(0)
@torch.no_grad()
def init_weights(m):
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.uniform_(m.weight)
elif isinstance(m, torch.nn.BatchNorm2d):
torch.nn.init.uniform_(m.weight)
torch.nn.init.uniform_(m.bias)
with torch.no_grad():
inception_c = InceptionC(3, 64)
inception_c.apply(init_weights)
# branch1x1
print_cpp_vector(torch.flatten(inception_c.branch1x1.conv.weight), "branch1x1_conv_weights")
print_cpp_vector(torch.flatten(inception_c.branch1x1.bn.weight), "branch1x1_bn_weights")
print_cpp_vector(torch.flatten(inception_c.branch1x1.bn.bias), "branch1x1_bn_bias")
# branch7x7
print_cpp_vector(torch.flatten(inception_c.branch7x7_1.conv.weight), "branch7x7_1_conv_weights")
print_cpp_vector(torch.flatten(inception_c.branch7x7_1.bn.weight), "branch7x7_1_bn_weights")
print_cpp_vector(torch.flatten(inception_c.branch7x7_1.bn.bias), "branch7x7_1_bn_bias")
print_cpp_vector(torch.flatten(inception_c.branch7x7_2.conv.weight), "branch7x7_2_conv_weights")
print_cpp_vector(torch.flatten(inception_c.branch7x7_2.bn.weight), "branch7x7_2_bn_weights")
print_cpp_vector(torch.flatten(inception_c.branch7x7_2.bn.bias), "branch7x7_2_bn_bias")
print_cpp_vector(torch.flatten(inception_c.branch7x7_3.conv.weight), "branch7x7_3_conv_weights")
print_cpp_vector(torch.flatten(inception_c.branch7x7_3.bn.weight), "branch7x7_3_bn_weights")
print_cpp_vector(torch.flatten(inception_c.branch7x7_3.bn.bias), "branch7x7_3_bn_bias")
# branch7x7dbl
print_cpp_vector(torch.flatten(inception_c.branch7x7dbl_1.conv.weight), "branch7x7dbl_1_conv_weights")
print_cpp_vector(torch.flatten(inception_c.branch7x7dbl_1.bn.weight), "branch7x7dbl_1_bn_weights")
print_cpp_vector(torch.flatten(inception_c.branch7x7dbl_1.bn.bias), "branch7x7dbl_1_bn_bias")
print_cpp_vector(torch.flatten(inception_c.branch7x7dbl_2.conv.weight), "branch7x7dbl_2_conv_weights")
print_cpp_vector(torch.flatten(inception_c.branch7x7dbl_2.bn.weight), "branch7x7dbl_2_bn_weights")
print_cpp_vector(torch.flatten(inception_c.branch7x7dbl_2.bn.bias), "branch7x7dbl_2_bn_bias")
print_cpp_vector(torch.flatten(inception_c.branch7x7dbl_3.conv.weight), "branch7x7dbl_3_conv_weights")
print_cpp_vector(torch.flatten(inception_c.branch7x7dbl_3.bn.weight), "branch7x7dbl_3_bn_weights")
print_cpp_vector(torch.flatten(inception_c.branch7x7dbl_3.bn.bias), "branch7x7dbl_3_bn_bias")
print_cpp_vector(torch.flatten(inception_c.branch7x7dbl_4.conv.weight), "branch7x7dbl_4_conv_weights")
print_cpp_vector(torch.flatten(inception_c.branch7x7dbl_4.bn.weight), "branch7x7dbl_4_bn_weights")
print_cpp_vector(torch.flatten(inception_c.branch7x7dbl_4.bn.bias), "branch7x7dbl_4_bn_bias")
print_cpp_vector(torch.flatten(inception_c.branch7x7dbl_5.conv.weight), "branch7x7dbl_5_conv_weights")
print_cpp_vector(torch.flatten(inception_c.branch7x7dbl_5.bn.weight), "branch7x7dbl_5_bn_weights")
print_cpp_vector(torch.flatten(inception_c.branch7x7dbl_5.bn.bias), "branch7x7dbl_5_bn_bias")
# branch_pool
print_cpp_vector(torch.flatten(inception_c.branch_pool.conv.weight), "branchPool_2_conv_weights")
print_cpp_vector(torch.flatten(inception_c.branch_pool.bn.weight), "branchPool_2_bn_weights")
print_cpp_vector(torch.flatten(inception_c.branch_pool.bn.bias), "branchPool_2_bn_bias")
input_shape = (1, 3, 8, 8)
input = torch.randn(input_shape)
print_cpp_vector(torch.flatten(input), "input")
output = inception_c(input)
output = torch.flatten(output)
print_cpp_vector(output, "expected")

View File

@@ -1,55 +0,0 @@
import sys
import torch
from torchvision.models.inception import InceptionD
sys.path.append("../../../tools")
from utils import print_cpp_vector
torch.manual_seed(0)
@torch.no_grad()
def init_weights(m):
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.uniform_(m.weight)
elif isinstance(m, torch.nn.BatchNorm2d):
torch.nn.init.uniform_(m.weight)
torch.nn.init.uniform_(m.bias)
with torch.no_grad():
inception_c = InceptionD(3)
inception_c.apply(init_weights)
# branch3x3
print_cpp_vector(torch.flatten(inception_c.branch3x3_1.conv.weight), "branch3x3_1_conv_weights")
print_cpp_vector(torch.flatten(inception_c.branch3x3_1.bn.weight), "branch3x3_1_bn_weights")
print_cpp_vector(torch.flatten(inception_c.branch3x3_1.bn.bias), "branch3x3_1_bn_bias")
print_cpp_vector(torch.flatten(inception_c.branch3x3_2.conv.weight), "branch3x3_2_conv_weights")
print_cpp_vector(torch.flatten(inception_c.branch3x3_2.bn.weight), "branch3x3_2_bn_weights")
print_cpp_vector(torch.flatten(inception_c.branch3x3_2.bn.bias), "branch3x3_2_bn_bias")
# branch7x7x3
print_cpp_vector(torch.flatten(inception_c.branch7x7x3_1.conv.weight), "branch7x7x3_1_conv_weights")
print_cpp_vector(torch.flatten(inception_c.branch7x7x3_1.bn.weight), "branch7x7x3_1_bn_weights")
print_cpp_vector(torch.flatten(inception_c.branch7x7x3_1.bn.bias), "branch7x7x3_1_bn_bias")
print_cpp_vector(torch.flatten(inception_c.branch7x7x3_2.conv.weight), "branch7x7x3_2_conv_weights")
print_cpp_vector(torch.flatten(inception_c.branch7x7x3_2.bn.weight), "branch7x7x3_2_bn_weights")
print_cpp_vector(torch.flatten(inception_c.branch7x7x3_2.bn.bias), "branch7x7x3_2_bn_bias")
print_cpp_vector(torch.flatten(inception_c.branch7x7x3_3.conv.weight), "branch7x7x3_3_conv_weights")
print_cpp_vector(torch.flatten(inception_c.branch7x7x3_3.bn.weight), "branch7x7x3_3_bn_weights")
print_cpp_vector(torch.flatten(inception_c.branch7x7x3_3.bn.bias), "branch7x7x3_3_bn_bias")
print_cpp_vector(torch.flatten(inception_c.branch7x7x3_4.conv.weight), "branch7x7x3_4_conv_weights")
print_cpp_vector(torch.flatten(inception_c.branch7x7x3_4.bn.weight), "branch7x7x3_4_bn_weights")
print_cpp_vector(torch.flatten(inception_c.branch7x7x3_4.bn.bias), "branch7x7x3_4_bn_bias")
input_shape = (1, 3, 8, 8)
input = torch.randn(input_shape)
print_cpp_vector(torch.flatten(input), "input")
output = inception_c(input)
output = torch.flatten(output)
print_cpp_vector(output, "expected")

View File

@@ -12,7 +12,7 @@ class BasicConv2dTest : public ::testing::Test {
shape2d kernelSize;
shape2d stride;
shape2d padding;
std::string prefix = "test";
std::string prefix = "test.";
float *d_input;
float *d_output;
@@ -46,7 +46,7 @@ class BasicConv2dTest : public ::testing::Test {
std::pair<std::string, CUDANet::Layers::SequentialLayer *> layerPair =
basic_conv2d->getLayers()[0];
ASSERT_EQ(layerPair.first, prefix + ".conv");
ASSERT_EQ(layerPair.first, prefix + "conv");
CUDANet::Layers::Conv2d *conv =
dynamic_cast<CUDANet::Layers::Conv2d *>(layerPair.second);
@@ -60,7 +60,7 @@ class BasicConv2dTest : public ::testing::Test {
EXPECT_EQ(cudaStatus, cudaSuccess);
layerPair = basic_conv2d->getLayers()[1];
ASSERT_EQ(layerPair.first, prefix + ".bn");
ASSERT_EQ(layerPair.first, prefix + "bn");
CUDANet::Layers::BatchNorm2d *bn =
dynamic_cast<CUDANet::Layers::BatchNorm2d *>(layerPair.second);

View File

@@ -0,0 +1,50 @@
#ifndef TEST_FIXTURE_H
#define TEST_FIXTURE_H
#include <cmath>
#include <gtest/gtest.h>
#include "inception_v3.hpp"
class InceptionBlockTest : public ::testing::Test {
protected:
CUDANet::Model *model;
cudaError_t cudaStatus;
shape2d inputShape;
int inputChannels;
int outputSize;
std::vector<float> input;
std::vector<float> expected;
virtual void SetUp() override {
model = nullptr;
}
virtual void TearDown() override {
// Clean up
delete model;
}
void runTest() {
EXPECT_EQ(
input.size(), inputShape.first * inputShape.second * inputChannels
);
float *output = model->predict(input.data());
cudaStatus = cudaGetLastError();
EXPECT_EQ(cudaStatus, cudaSuccess);
EXPECT_EQ(outputSize, expected.size());
for (int i = 0; i < outputSize; ++i) {
EXPECT_NEAR(expected[i], output[i], 1e-3f);
}
}
};
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,69 @@
#include <gtest/gtest.h>
#include <inception_v3.hpp>
#include "test_fixture.hpp"
class InceptionEModel : public CUDANet::Model {
public:
InceptionEModel(
const shape2d inputShape,
const int inputChannels,
const int outputSize
)
: CUDANet::Model(inputShape, inputChannels, outputSize) {
inception_e =
new InceptionE(inputShape, inputChannels, "inception_block.");
addLayer("", inception_e);
fc = new CUDANet::Layers::Dense(
inception_e->getOutputSize(), 50,
CUDANet::Layers::ActivationType::NONE
);
addLayer("fc", fc);
};
float *predict(const float *input) override {
float *d_input = inputLayer->forward(input);
d_input = inception_e->forward(d_input);
d_input = fc->forward(d_input);
return outputLayer->forward(d_input);
}
private:
InceptionE *inception_e;
CUDANet::Layers::Dense *fc;
};
TEST_F(InceptionBlockTest, InceptionETest) {
inputShape = {4, 4};
inputChannels = 3;
outputSize = 50;
model = new InceptionEModel(inputShape, inputChannels, outputSize);
model->loadWeights("../tests/resources/inception_e.bin");
input = {1.85083f, 0.11234f, 0.05994f, -1.02453f, 0.21205f, -0.67387f,
0.66981f, -0.40378f, 0.34194f, 0.92048f, 0.87556f, 0.81094f,
-1.55728f, -0.70326f, -0.25078f, -0.10276f, 1.10463f, -2.40992f,
-1.7226f, -0.18546f, 0.14397f, -1.24784f, -0.35248f, -1.28729f,
0.44803f, 1.68539f, -1.05037f, 0.32115f, -0.12896f, 1.02391f,
0.95329f, -0.81876f, -1.05828f, 0.09282f, -0.38344f, 2.05074f,
2.1034f, 1.65832f, 1.63788f, -1.32596f, -1.43412f, -1.28353f,
0.70226f, 0.9459f, 0.8579f, 0.15361f, 0.34449f, -1.70587f};
expected = {1614.15283f, -11319.01855f, 614.40479f, 5280.0293f,
1914.45007f, -2937.50317f, -11177.16113f, 3215.01245f,
6249.16992f, 5654.91357f, -11702.27148f, 13057.32422f,
8665.35742f, 3911.11743f, 5239.45947f, -11552.88477f,
-8056.7666f, -16426.19922f, -1383.04346f, 6573.53125f,
-12226.16992f, -6641.0957f, -9614.80078f, -9313.30273f,
7023.68848f, 2089.5752f, 1095.53369f, -1387.65698f,
-7928.21729f, -9489.18848f, 4159.78613f, -690.03442f,
-8356.81738f, 12364.08203f, 8226.95703f, 8822.66602f,
-5462.90381f, -1037.42773f, 12958.68555f, -666.58423f,
2032.38574f, -9534.14062f, -947.41333f, 689.37158f,
4585.76465f, -23245.36719f, 975.83398f, -1253.45703f,
-14745.35059f, -2588.05493f};
runTest();
}