Add strided conv2d test

This commit is contained in:
2024-03-11 21:51:27 +01:00
parent a3973f0b21
commit 2518138ef8
2 changed files with 226 additions and 76 deletions

View File

@@ -109,7 +109,7 @@ TEST_F(Conv2dTest, SimpleTest) {
commonTestTeardown(d_input, d_output);
}
TEST_F(Conv2dTest, ComplexTest) {
TEST_F(Conv2dTest, PaddedTest) {
int inputSize = 5;
int inputChannels = 3;
int kernelSize = 3;
@@ -191,16 +191,110 @@ TEST_F(Conv2dTest, ComplexTest) {
// Generated by tools/generate_conv2d_test.py
std::vector<float> expected = {
2.29426f, 3.89173f, 4.17634f, 3.25501f, 2.07618f, 5.41483f, 7.09971f,
6.39811f, 5.71432f, 3.10928f, 5.12973f, 6.29638f, 5.26962f, 5.21997f,
3.05852f, 6.17517f, 7.19311f, 6.69771f, 6.2142f, 4.03242f, 3.3792f,
4.36444f, 4.396f, 4.69905f, 3.62061f, 2.87914f, 3.71743f, 3.51854f,
2.98413f, 1.46579f, 4.94951f, 6.18983f, 4.98187f, 4.38372f, 3.35386f,
5.0364f, 5.3756f, 4.05993f, 4.89299f, 2.78625f, 5.33763f, 5.80899f,
5.89785f, 5.51095f, 3.74287f, 2.64053f, 4.05895f, 3.96482f, 4.30177f,
1.94269f
// Channel 1
2.29426f, 3.89173f, 4.17634f, 3.25501f, 2.07618f,
5.41483f, 7.09971f, 6.39811f, 5.71432f, 3.10928f,
5.12973f, 6.29638f, 5.26962f, 5.21997f, 3.05852f,
6.17517f, 7.19311f, 6.69771f, 6.2142f, 4.03242f,
3.3792f, 4.36444f, 4.396f, 4.69905f, 3.62061f,
// Channel 2
2.87914f, 3.71743f, 3.51854f, 2.98413f, 1.46579f,
4.94951f, 6.18983f, 4.98187f, 4.38372f, 3.35386f,
5.0364f, 5.3756f, 4.05993f, 4.89299f, 2.78625f,
5.33763f, 5.80899f, 5.89785f, 5.51095f, 3.74287f,
2.64053f, 4.05895f, 3.96482f, 4.30177f, 1.94269f
};
for (int i = 0; i < output.size(); i++) {
EXPECT_NEAR(output[i], expected[i], 0.0001f);
}
commonTestTeardown(d_input, d_output);
}
TEST_F(Conv2dTest, StridedPaddedConvolution) {
int inputSize = 5;
int inputChannels = 2;
int kernelSize = 3;
int stride = 2;
int numFilters = 2;
Layers::Padding padding = Layers::Padding::SAME;
Layers::Activation activation = Layers::Activation::RELU;
// clang-format off
std::vector<float> input = {
// Channel 1
0.946f, 0.879f, 0.382f, 0.542f, 0.453f,
0.128f, 0.860f, 0.778f, 0.049f, 0.974f,
0.400f, 0.874f, 0.161f, 0.271f, 0.580f,
0.373f, 0.078f, 0.366f, 0.396f, 0.181f,
0.246f, 0.112f, 0.179f, 0.979f, 0.026f,
// Channel 2
0.598f, 0.458f, 0.776f, 0.213f, 0.199f,
0.853f, 0.170f, 0.609f, 0.269f, 0.777f,
0.776f, 0.694f, 0.430f, 0.238f, 0.968f,
0.473f, 0.303f, 0.084f, 0.785f, 0.444f,
0.464f, 0.413f, 0.779f, 0.298f, 0.783f
};
std::vector<float> kernels = {
// Filter 1, Channel 1
0.744f, 0.745f, 0.641f,
0.164f, 0.157f, 0.127f,
0.732f, 0.761f, 0.601f,
// Filter 1, Channel 2
0.475f, 0.335f, 0.499f,
0.833f, 0.793f, 0.176f,
0.822f, 0.163f, 0.175f,
// Filter 2, Channel 1
0.918f, 0.340f, 0.497f,
0.233f, 0.218f, 0.847f,
0.931f, 0.926f, 0.199f,
// Filter 2, Channel 2
0.510f, 0.432f, 0.567f,
0.236f, 0.397f, 0.739f,
0.939f, 0.891f, 0.006f
};
// clang-format on
float* d_input;
float* d_output;
Layers::Conv2d conv2d = commonTestSetup(
inputSize, inputChannels, kernelSize, stride, padding, numFilters,
activation, input, kernels.data(), d_input, d_output
);
EXPECT_EQ(inputSize, conv2d.outputSize);
conv2d.forward(d_input, d_output);
std::vector<float> output(
conv2d.outputSize * conv2d.outputSize * numFilters
);
cudaMemcpy(
output.data(), d_output,
sizeof(float) * conv2d.outputSize * conv2d.outputSize * numFilters,
cudaMemcpyDeviceToHost
);
// Generated by tools/generate_conv2d_test.py
std::vector<float> expected = {
// Channel 1
0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
0.0f, 1.59803f, 2.84444f, 1.6201f, 0.0f,
0.0f, 2.38937f, 3.80762f, 3.39679f, 0.0f,
0.0f, 1.13102f, 2.33335f, 1.98488f, 0.0f,
0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
// Channel 2
0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
0.0f, 2.57732f, 3.55543f, 2.24675f, 0.0f,
0.0f, 3.36842f, 3.41373f, 3.14804f, 0.0f,
0.0f, 1.17963f, 2.55005f, 1.63218f, 0.0f,
0.0f, 0.0f, 0.0f, 0.0f, 0.0f
};
for (int i = 0; i < output.size(); i++) {
EXPECT_NEAR(output[i], expected[i], 0.0001f);
}
commonTestTeardown(d_input, d_output);
}

View File

@@ -1,8 +1,36 @@
import torch
import numpy as np
def conv2d(in_channels, out_channels, kernel_size, stride, padding, inputs, weights):
conv2d = torch.nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)
conv2d.weight = torch.nn.Parameter(weights)
output = conv2d(inputs)
# Print the output as cpp vector
output = torch.flatten(output)
return output
def print_cpp_vector(vector):
print("std::vector<float> expected = {", end="")
for i in range(len(vector)):
if i != 0:
print(", ", end="")
print(str(round(vector[i].item(), 5)) + "f", end="")
print("};")
def gen_padded_test_result():
in_channels = 3
out_channels = 2
kernel_size = 3
stride = 1
padding = 1
# Define input and kernel data as tensors
input_data = torch.tensor([
inputs = torch.tensor([
0.823, 0.217, 0.435, 0.981, 0.742,
0.109, 0.518, 0.374, 0.681, 0.147,
0.956, 0.729, 0.654, 0.087, 0.392,
@@ -18,9 +46,9 @@ input_data = torch.tensor([
0.789, 0.123, 0.345, 0.123, 0.456,
0.123, 0.345, 0.123, 0.789, 0.123,
0.345, 0.123, 0.789, 0.123, 0.456
], dtype=torch.float)
], dtype=torch.float).reshape(1, 3, 5, 5)
kernel_data = torch.tensor([
weights = torch.tensor([
0.128, 0.754, 0.987,
0.321, 0.412, 0.635,
0.298, 0.017, 0.845,
@@ -39,25 +67,53 @@ kernel_data = torch.tensor([
0.345, 0.678, 0.011,
0.678, 0.011, 0.345,
0.011, 0.345, 0.678
], dtype=torch.float)
], dtype=torch.float).reshape(2, 3, 3, 3)
# Reshape input data to a 4D tensor (batch_size, channels, height, width)
input_data = input_data.reshape(1, 3, 5, 5)
output = conv2d(in_channels, out_channels, kernel_size, stride, padding, inputs, weights)
print_cpp_vector(output)
# Define the convolution layer
conv2d = torch.nn.Conv2d(in_channels=3, out_channels=2, kernel_size=3, padding=1, bias=False)
def gen_strided_test_result():
# Set the weights of the convolution layer
conv2d.weight = torch.nn.Parameter(kernel_data.reshape(2, 3, 3, 3))
in_channels = 2
out_channels = 2
kernel_size = 3
stride = 2
padding = 3
# Perform the convolution
output = conv2d(input_data)
input = torch.tensor([
0.946, 0.879, 0.382, 0.542, 0.453,
0.128, 0.860, 0.778, 0.049, 0.974,
0.400, 0.874, 0.161, 0.271, 0.580,
0.373, 0.078, 0.366, 0.396, 0.181,
0.246, 0.112, 0.179, 0.979, 0.026,
0.598, 0.458, 0.776, 0.213, 0.199,
0.853, 0.170, 0.609, 0.269, 0.777,
0.776, 0.694, 0.430, 0.238, 0.968,
0.473, 0.303, 0.084, 0.785, 0.444,
0.464, 0.413, 0.779, 0.298, 0.783
], dtype=torch.float).reshape(1, 2, 5, 5)
weights = torch.tensor([
0.744, 0.745, 0.641,
0.164, 0.157, 0.127,
0.732, 0.761, 0.601,
0.475, 0.335, 0.499,
0.833, 0.793, 0.176,
0.822, 0.163, 0.175,
0.918, 0.340, 0.497,
0.233, 0.218, 0.847,
0.931, 0.926, 0.199,
0.510, 0.432, 0.567,
0.236, 0.397, 0.739,
0.939, 0.891, 0.006
], dtype=torch.float).reshape(2, 2, 3, 3)
# Print the output as cpp vector
output = torch.flatten(output)
print("std::vector<float> expected = {", end="")
for i in range(len(output)):
if i != 0:
print(", ", end="")
print(str(round(output[i].item(), 5)) + "f", end="")
print("};")
output = conv2d(in_channels, out_channels, kernel_size, stride, padding, input, weights)
print_cpp_vector(output)
if __name__ == "__main__":
print("Generating test results...")
print("Padded convolution test:")
gen_padded_test_result()
print("Strided convolution test:")
gen_strided_test_result()