mirror of
https://github.com/lordmathis/CUDANet.git
synced 2025-11-05 17:34:21 +00:00
Add strided conv2d test
This commit is contained in:
@@ -12,9 +12,9 @@ class Conv2dTest : public ::testing::Test {
|
|||||||
int inputChannels,
|
int inputChannels,
|
||||||
int kernelSize,
|
int kernelSize,
|
||||||
int stride,
|
int stride,
|
||||||
Layers::Padding padding,
|
Layers::Padding padding,
|
||||||
int numFilters,
|
int numFilters,
|
||||||
Layers::Activation activation,
|
Layers::Activation activation,
|
||||||
std::vector<float>& input,
|
std::vector<float>& input,
|
||||||
float* kernels,
|
float* kernels,
|
||||||
float*& d_input,
|
float*& d_input,
|
||||||
@@ -61,12 +61,12 @@ class Conv2dTest : public ::testing::Test {
|
|||||||
};
|
};
|
||||||
|
|
||||||
TEST_F(Conv2dTest, SimpleTest) {
|
TEST_F(Conv2dTest, SimpleTest) {
|
||||||
int inputSize = 4;
|
int inputSize = 4;
|
||||||
int inputChannels = 1;
|
int inputChannels = 1;
|
||||||
int kernelSize = 2;
|
int kernelSize = 2;
|
||||||
int stride = 1;
|
int stride = 1;
|
||||||
Layers::Padding padding = Layers::Padding::VALID;
|
Layers::Padding padding = Layers::Padding::VALID;
|
||||||
int numFilters = 1;
|
int numFilters = 1;
|
||||||
Layers::Activation activation = Layers::Activation::NONE;
|
Layers::Activation activation = Layers::Activation::NONE;
|
||||||
|
|
||||||
std::vector<float> input = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f,
|
std::vector<float> input = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f,
|
||||||
@@ -109,13 +109,13 @@ TEST_F(Conv2dTest, SimpleTest) {
|
|||||||
commonTestTeardown(d_input, d_output);
|
commonTestTeardown(d_input, d_output);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(Conv2dTest, ComplexTest) {
|
TEST_F(Conv2dTest, PaddedTest) {
|
||||||
int inputSize = 5;
|
int inputSize = 5;
|
||||||
int inputChannels = 3;
|
int inputChannels = 3;
|
||||||
int kernelSize = 3;
|
int kernelSize = 3;
|
||||||
int stride = 1;
|
int stride = 1;
|
||||||
Layers::Padding padding = Layers::Padding::SAME;
|
Layers::Padding padding = Layers::Padding::SAME;
|
||||||
int numFilters = 2;
|
int numFilters = 2;
|
||||||
Layers::Activation activation = Layers::Activation::NONE;
|
Layers::Activation activation = Layers::Activation::NONE;
|
||||||
|
|
||||||
// clang-format off
|
// clang-format off
|
||||||
@@ -191,16 +191,110 @@ TEST_F(Conv2dTest, ComplexTest) {
|
|||||||
|
|
||||||
// Generated by tools/generate_conv2d_test.py
|
// Generated by tools/generate_conv2d_test.py
|
||||||
std::vector<float> expected = {
|
std::vector<float> expected = {
|
||||||
2.29426f, 3.89173f, 4.17634f, 3.25501f, 2.07618f, 5.41483f, 7.09971f,
|
// Channel 1
|
||||||
6.39811f, 5.71432f, 3.10928f, 5.12973f, 6.29638f, 5.26962f, 5.21997f,
|
2.29426f, 3.89173f, 4.17634f, 3.25501f, 2.07618f,
|
||||||
3.05852f, 6.17517f, 7.19311f, 6.69771f, 6.2142f, 4.03242f, 3.3792f,
|
5.41483f, 7.09971f, 6.39811f, 5.71432f, 3.10928f,
|
||||||
4.36444f, 4.396f, 4.69905f, 3.62061f, 2.87914f, 3.71743f, 3.51854f,
|
5.12973f, 6.29638f, 5.26962f, 5.21997f, 3.05852f,
|
||||||
2.98413f, 1.46579f, 4.94951f, 6.18983f, 4.98187f, 4.38372f, 3.35386f,
|
6.17517f, 7.19311f, 6.69771f, 6.2142f, 4.03242f,
|
||||||
5.0364f, 5.3756f, 4.05993f, 4.89299f, 2.78625f, 5.33763f, 5.80899f,
|
3.3792f, 4.36444f, 4.396f, 4.69905f, 3.62061f,
|
||||||
5.89785f, 5.51095f, 3.74287f, 2.64053f, 4.05895f, 3.96482f, 4.30177f,
|
// Channel 2
|
||||||
1.94269f
|
2.87914f, 3.71743f, 3.51854f, 2.98413f, 1.46579f,
|
||||||
|
4.94951f, 6.18983f, 4.98187f, 4.38372f, 3.35386f,
|
||||||
|
5.0364f, 5.3756f, 4.05993f, 4.89299f, 2.78625f,
|
||||||
|
5.33763f, 5.80899f, 5.89785f, 5.51095f, 3.74287f,
|
||||||
|
2.64053f, 4.05895f, 3.96482f, 4.30177f, 1.94269f
|
||||||
};
|
};
|
||||||
for (int i = 0; i < output.size(); i++) {
|
for (int i = 0; i < output.size(); i++) {
|
||||||
EXPECT_NEAR(output[i], expected[i], 0.0001f);
|
EXPECT_NEAR(output[i], expected[i], 0.0001f);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
commonTestTeardown(d_input, d_output);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(Conv2dTest, StridedPaddedConvolution) {
|
||||||
|
int inputSize = 5;
|
||||||
|
int inputChannels = 2;
|
||||||
|
int kernelSize = 3;
|
||||||
|
int stride = 2;
|
||||||
|
int numFilters = 2;
|
||||||
|
Layers::Padding padding = Layers::Padding::SAME;
|
||||||
|
Layers::Activation activation = Layers::Activation::RELU;
|
||||||
|
|
||||||
|
// clang-format off
|
||||||
|
std::vector<float> input = {
|
||||||
|
// Channel 1
|
||||||
|
0.946f, 0.879f, 0.382f, 0.542f, 0.453f,
|
||||||
|
0.128f, 0.860f, 0.778f, 0.049f, 0.974f,
|
||||||
|
0.400f, 0.874f, 0.161f, 0.271f, 0.580f,
|
||||||
|
0.373f, 0.078f, 0.366f, 0.396f, 0.181f,
|
||||||
|
0.246f, 0.112f, 0.179f, 0.979f, 0.026f,
|
||||||
|
// Channel 2
|
||||||
|
0.598f, 0.458f, 0.776f, 0.213f, 0.199f,
|
||||||
|
0.853f, 0.170f, 0.609f, 0.269f, 0.777f,
|
||||||
|
0.776f, 0.694f, 0.430f, 0.238f, 0.968f,
|
||||||
|
0.473f, 0.303f, 0.084f, 0.785f, 0.444f,
|
||||||
|
0.464f, 0.413f, 0.779f, 0.298f, 0.783f
|
||||||
|
};
|
||||||
|
std::vector<float> kernels = {
|
||||||
|
// Filter 1, Channel 1
|
||||||
|
0.744f, 0.745f, 0.641f,
|
||||||
|
0.164f, 0.157f, 0.127f,
|
||||||
|
0.732f, 0.761f, 0.601f,
|
||||||
|
// Filter 1, Channel 2
|
||||||
|
0.475f, 0.335f, 0.499f,
|
||||||
|
0.833f, 0.793f, 0.176f,
|
||||||
|
0.822f, 0.163f, 0.175f,
|
||||||
|
// Filter 2, Channel 1
|
||||||
|
0.918f, 0.340f, 0.497f,
|
||||||
|
0.233f, 0.218f, 0.847f,
|
||||||
|
0.931f, 0.926f, 0.199f,
|
||||||
|
// Filter 2, Channel 2
|
||||||
|
0.510f, 0.432f, 0.567f,
|
||||||
|
0.236f, 0.397f, 0.739f,
|
||||||
|
0.939f, 0.891f, 0.006f
|
||||||
|
};
|
||||||
|
// clang-format on
|
||||||
|
|
||||||
|
float* d_input;
|
||||||
|
float* d_output;
|
||||||
|
|
||||||
|
Layers::Conv2d conv2d = commonTestSetup(
|
||||||
|
inputSize, inputChannels, kernelSize, stride, padding, numFilters,
|
||||||
|
activation, input, kernels.data(), d_input, d_output
|
||||||
|
);
|
||||||
|
|
||||||
|
EXPECT_EQ(inputSize, conv2d.outputSize);
|
||||||
|
|
||||||
|
conv2d.forward(d_input, d_output);
|
||||||
|
|
||||||
|
std::vector<float> output(
|
||||||
|
conv2d.outputSize * conv2d.outputSize * numFilters
|
||||||
|
);
|
||||||
|
cudaMemcpy(
|
||||||
|
output.data(), d_output,
|
||||||
|
sizeof(float) * conv2d.outputSize * conv2d.outputSize * numFilters,
|
||||||
|
cudaMemcpyDeviceToHost
|
||||||
|
);
|
||||||
|
|
||||||
|
// Generated by tools/generate_conv2d_test.py
|
||||||
|
std::vector<float> expected = {
|
||||||
|
// Channel 1
|
||||||
|
0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
|
||||||
|
0.0f, 1.59803f, 2.84444f, 1.6201f, 0.0f,
|
||||||
|
0.0f, 2.38937f, 3.80762f, 3.39679f, 0.0f,
|
||||||
|
0.0f, 1.13102f, 2.33335f, 1.98488f, 0.0f,
|
||||||
|
0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
|
||||||
|
// Channel 2
|
||||||
|
0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
|
||||||
|
0.0f, 2.57732f, 3.55543f, 2.24675f, 0.0f,
|
||||||
|
0.0f, 3.36842f, 3.41373f, 3.14804f, 0.0f,
|
||||||
|
0.0f, 1.17963f, 2.55005f, 1.63218f, 0.0f,
|
||||||
|
0.0f, 0.0f, 0.0f, 0.0f, 0.0f
|
||||||
|
};
|
||||||
|
|
||||||
|
for (int i = 0; i < output.size(); i++) {
|
||||||
|
EXPECT_NEAR(output[i], expected[i], 0.0001f);
|
||||||
|
}
|
||||||
|
|
||||||
|
commonTestTeardown(d_input, d_output);
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,63 +1,119 @@
|
|||||||
import torch
|
import torch
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
# Define input and kernel data as tensors
|
def conv2d(in_channels, out_channels, kernel_size, stride, padding, inputs, weights):
|
||||||
input_data = torch.tensor([
|
|
||||||
0.823, 0.217, 0.435, 0.981, 0.742,
|
|
||||||
0.109, 0.518, 0.374, 0.681, 0.147,
|
|
||||||
0.956, 0.729, 0.654, 0.087, 0.392,
|
|
||||||
0.784, 0.921, 0.543, 0.231, 0.816,
|
|
||||||
0.472, 0.614, 0.102, 0.987, 0.398,
|
|
||||||
0.051, 0.756, 0.841, 0.293, 0.128,
|
|
||||||
0.417, 0.632, 0.095, 0.184, 0.529,
|
|
||||||
0.871, 0.958, 0.213, 0.347, 0.725,
|
|
||||||
0.461, 0.012, 0.278, 0.195, 0.649,
|
|
||||||
0.853, 0.707, 0.988, 0.988, 0.322,
|
|
||||||
0.345, 0.123, 0.789, 0.123, 0.456,
|
|
||||||
0.456, 0.789, 0.123, 0.345, 0.123,
|
|
||||||
0.789, 0.123, 0.345, 0.123, 0.456,
|
|
||||||
0.123, 0.345, 0.123, 0.789, 0.123,
|
|
||||||
0.345, 0.123, 0.789, 0.123, 0.456
|
|
||||||
], dtype=torch.float)
|
|
||||||
|
|
||||||
kernel_data = torch.tensor([
|
conv2d = torch.nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)
|
||||||
0.128, 0.754, 0.987,
|
conv2d.weight = torch.nn.Parameter(weights)
|
||||||
0.321, 0.412, 0.635,
|
|
||||||
0.298, 0.017, 0.845,
|
|
||||||
0.514, 0.729, 0.952,
|
|
||||||
0.684, 0.378, 0.159,
|
|
||||||
0.823, 0.547, 0.216,
|
|
||||||
0.983, 0.231, 0.456,
|
|
||||||
0.178, 0.654, 0.821,
|
|
||||||
0.345, 0.987, 0.123,
|
|
||||||
0.789, 0.543, 0.210,
|
|
||||||
0.012, 0.371, 0.638,
|
|
||||||
0.456, 0.198, 0.907,
|
|
||||||
0.101, 0.432, 0.759,
|
|
||||||
0.234, 0.567, 0.890,
|
|
||||||
0.543, 0.876, 0.219,
|
|
||||||
0.345, 0.678, 0.011,
|
|
||||||
0.678, 0.011, 0.345,
|
|
||||||
0.011, 0.345, 0.678
|
|
||||||
], dtype=torch.float)
|
|
||||||
|
|
||||||
# Reshape input data to a 4D tensor (batch_size, channels, height, width)
|
output = conv2d(inputs)
|
||||||
input_data = input_data.reshape(1, 3, 5, 5)
|
|
||||||
|
|
||||||
# Define the convolution layer
|
# Print the output as cpp vector
|
||||||
conv2d = torch.nn.Conv2d(in_channels=3, out_channels=2, kernel_size=3, padding=1, bias=False)
|
output = torch.flatten(output)
|
||||||
|
return output
|
||||||
|
|
||||||
# Set the weights of the convolution layer
|
def print_cpp_vector(vector):
|
||||||
conv2d.weight = torch.nn.Parameter(kernel_data.reshape(2, 3, 3, 3))
|
print("std::vector<float> expected = {", end="")
|
||||||
|
for i in range(len(vector)):
|
||||||
|
if i != 0:
|
||||||
|
print(", ", end="")
|
||||||
|
print(str(round(vector[i].item(), 5)) + "f", end="")
|
||||||
|
print("};")
|
||||||
|
|
||||||
# Perform the convolution
|
|
||||||
output = conv2d(input_data)
|
|
||||||
|
|
||||||
# Print the output as cpp vector
|
def gen_padded_test_result():
|
||||||
output = torch.flatten(output)
|
|
||||||
print("std::vector<float> expected = {", end="")
|
in_channels = 3
|
||||||
for i in range(len(output)):
|
out_channels = 2
|
||||||
if i != 0:
|
kernel_size = 3
|
||||||
print(", ", end="")
|
stride = 1
|
||||||
print(str(round(output[i].item(), 5)) + "f", end="")
|
padding = 1
|
||||||
print("};")
|
|
||||||
|
# Define input and kernel data as tensors
|
||||||
|
inputs = torch.tensor([
|
||||||
|
0.823, 0.217, 0.435, 0.981, 0.742,
|
||||||
|
0.109, 0.518, 0.374, 0.681, 0.147,
|
||||||
|
0.956, 0.729, 0.654, 0.087, 0.392,
|
||||||
|
0.784, 0.921, 0.543, 0.231, 0.816,
|
||||||
|
0.472, 0.614, 0.102, 0.987, 0.398,
|
||||||
|
0.051, 0.756, 0.841, 0.293, 0.128,
|
||||||
|
0.417, 0.632, 0.095, 0.184, 0.529,
|
||||||
|
0.871, 0.958, 0.213, 0.347, 0.725,
|
||||||
|
0.461, 0.012, 0.278, 0.195, 0.649,
|
||||||
|
0.853, 0.707, 0.988, 0.988, 0.322,
|
||||||
|
0.345, 0.123, 0.789, 0.123, 0.456,
|
||||||
|
0.456, 0.789, 0.123, 0.345, 0.123,
|
||||||
|
0.789, 0.123, 0.345, 0.123, 0.456,
|
||||||
|
0.123, 0.345, 0.123, 0.789, 0.123,
|
||||||
|
0.345, 0.123, 0.789, 0.123, 0.456
|
||||||
|
], dtype=torch.float).reshape(1, 3, 5, 5)
|
||||||
|
|
||||||
|
weights = torch.tensor([
|
||||||
|
0.128, 0.754, 0.987,
|
||||||
|
0.321, 0.412, 0.635,
|
||||||
|
0.298, 0.017, 0.845,
|
||||||
|
0.514, 0.729, 0.952,
|
||||||
|
0.684, 0.378, 0.159,
|
||||||
|
0.823, 0.547, 0.216,
|
||||||
|
0.983, 0.231, 0.456,
|
||||||
|
0.178, 0.654, 0.821,
|
||||||
|
0.345, 0.987, 0.123,
|
||||||
|
0.789, 0.543, 0.210,
|
||||||
|
0.012, 0.371, 0.638,
|
||||||
|
0.456, 0.198, 0.907,
|
||||||
|
0.101, 0.432, 0.759,
|
||||||
|
0.234, 0.567, 0.890,
|
||||||
|
0.543, 0.876, 0.219,
|
||||||
|
0.345, 0.678, 0.011,
|
||||||
|
0.678, 0.011, 0.345,
|
||||||
|
0.011, 0.345, 0.678
|
||||||
|
], dtype=torch.float).reshape(2, 3, 3, 3)
|
||||||
|
|
||||||
|
output = conv2d(in_channels, out_channels, kernel_size, stride, padding, inputs, weights)
|
||||||
|
print_cpp_vector(output)
|
||||||
|
|
||||||
|
def gen_strided_test_result():
|
||||||
|
|
||||||
|
in_channels = 2
|
||||||
|
out_channels = 2
|
||||||
|
kernel_size = 3
|
||||||
|
stride = 2
|
||||||
|
padding = 3
|
||||||
|
|
||||||
|
input = torch.tensor([
|
||||||
|
0.946, 0.879, 0.382, 0.542, 0.453,
|
||||||
|
0.128, 0.860, 0.778, 0.049, 0.974,
|
||||||
|
0.400, 0.874, 0.161, 0.271, 0.580,
|
||||||
|
0.373, 0.078, 0.366, 0.396, 0.181,
|
||||||
|
0.246, 0.112, 0.179, 0.979, 0.026,
|
||||||
|
0.598, 0.458, 0.776, 0.213, 0.199,
|
||||||
|
0.853, 0.170, 0.609, 0.269, 0.777,
|
||||||
|
0.776, 0.694, 0.430, 0.238, 0.968,
|
||||||
|
0.473, 0.303, 0.084, 0.785, 0.444,
|
||||||
|
0.464, 0.413, 0.779, 0.298, 0.783
|
||||||
|
], dtype=torch.float).reshape(1, 2, 5, 5)
|
||||||
|
weights = torch.tensor([
|
||||||
|
0.744, 0.745, 0.641,
|
||||||
|
0.164, 0.157, 0.127,
|
||||||
|
0.732, 0.761, 0.601,
|
||||||
|
0.475, 0.335, 0.499,
|
||||||
|
0.833, 0.793, 0.176,
|
||||||
|
0.822, 0.163, 0.175,
|
||||||
|
0.918, 0.340, 0.497,
|
||||||
|
0.233, 0.218, 0.847,
|
||||||
|
0.931, 0.926, 0.199,
|
||||||
|
0.510, 0.432, 0.567,
|
||||||
|
0.236, 0.397, 0.739,
|
||||||
|
0.939, 0.891, 0.006
|
||||||
|
], dtype=torch.float).reshape(2, 2, 3, 3)
|
||||||
|
|
||||||
|
output = conv2d(in_channels, out_channels, kernel_size, stride, padding, input, weights)
|
||||||
|
print_cpp_vector(output)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
print("Generating test results...")
|
||||||
|
print("Padded convolution test:")
|
||||||
|
gen_padded_test_result()
|
||||||
|
print("Strided convolution test:")
|
||||||
|
gen_strided_test_result()
|
||||||
Reference in New Issue
Block a user