mirror of
https://github.com/lordmathis/CUDANet.git
synced 2025-11-05 17:34:21 +00:00
Run black autoformatter
This commit is contained in:
@@ -2,12 +2,13 @@ import torch
|
||||
|
||||
from utils import print_cpp_vector
|
||||
|
||||
|
||||
def gen_batch_norm_test_result(input):
|
||||
|
||||
batch_norm = torch.nn.BatchNorm2d(2, track_running_stats=False)
|
||||
|
||||
weights = torch.Tensor([0.63508, 0.64903])
|
||||
biases= torch.Tensor([0.25079, 0.66841])
|
||||
biases = torch.Tensor([0.25079, 0.66841])
|
||||
|
||||
batch_norm.weight = torch.nn.Parameter(weights)
|
||||
batch_norm.bias = torch.nn.Parameter(biases)
|
||||
@@ -15,11 +16,13 @@ def gen_batch_norm_test_result(input):
|
||||
output = batch_norm(input)
|
||||
print_cpp_vector(output.flatten())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
print("Generating test results...")
|
||||
print("Batch norm test:")
|
||||
|
||||
# fmt: off
|
||||
input = torch.Tensor([
|
||||
# Channel 0
|
||||
0.38899, 0.80478, 0.48836, 0.97381,
|
||||
@@ -32,11 +35,13 @@ if __name__ == "__main__":
|
||||
0.13449, 0.27367, 0.53036, 0.18962,
|
||||
0.57672, 0.48364, 0.10863, 0.0571
|
||||
]).reshape(1, 2, 4, 4)
|
||||
# fmt: on
|
||||
|
||||
gen_batch_norm_test_result(input)
|
||||
|
||||
print("Batch norm test non square input:")
|
||||
|
||||
# fmt: off
|
||||
input = torch.Tensor([
|
||||
0.38899, 0.80478, 0.48836, 0.97381, 0.21567, 0.92312,
|
||||
0.57508, 0.60835, 0.65467, 0.00168, 0.31567, 0.71345,
|
||||
@@ -47,6 +52,6 @@ if __name__ == "__main__":
|
||||
0.13449, 0.27367, 0.53036, 0.18962, 0.45623, 0.14523,
|
||||
0.57672, 0.48364, 0.10863, 0.0571, 0.78934, 0.67545
|
||||
]).reshape(1, 2, 4, 6)
|
||||
# fmt: on
|
||||
|
||||
gen_batch_norm_test_result(input)
|
||||
|
||||
|
||||
@@ -2,20 +2,17 @@ import torch
|
||||
|
||||
from utils import print_cpp_vector
|
||||
|
||||
def _conv2d(in_channels,
|
||||
out_channels,
|
||||
kernel_size,
|
||||
stride,
|
||||
padding,
|
||||
inputs,
|
||||
weights):
|
||||
|
||||
conv2d = torch.nn.Conv2d(in_channels=in_channels,
|
||||
out_channels=out_channels,
|
||||
kernel_size=kernel_size,
|
||||
stride=stride,
|
||||
padding=padding,
|
||||
bias=False)
|
||||
def _conv2d(in_channels, out_channels, kernel_size, stride, padding, inputs, weights):
|
||||
|
||||
conv2d = torch.nn.Conv2d(
|
||||
in_channels=in_channels,
|
||||
out_channels=out_channels,
|
||||
kernel_size=kernel_size,
|
||||
stride=stride,
|
||||
padding=padding,
|
||||
bias=False,
|
||||
)
|
||||
conv2d.weight = torch.nn.Parameter(weights)
|
||||
|
||||
output = conv2d(inputs)
|
||||
@@ -24,6 +21,7 @@ def _conv2d(in_channels,
|
||||
output = torch.flatten(output)
|
||||
return output
|
||||
|
||||
|
||||
def gen_convd_padded_test_result():
|
||||
|
||||
in_channels = 3
|
||||
@@ -32,7 +30,7 @@ def gen_convd_padded_test_result():
|
||||
stride = 1
|
||||
padding = 1
|
||||
|
||||
# Define input and kernel data as tensors
|
||||
# fmt: off
|
||||
inputs = torch.tensor([
|
||||
0.823, 0.217, 0.435, 0.981, 0.742,
|
||||
0.109, 0.518, 0.374, 0.681, 0.147,
|
||||
@@ -71,15 +69,12 @@ def gen_convd_padded_test_result():
|
||||
0.678, 0.011, 0.345,
|
||||
0.011, 0.345, 0.678
|
||||
], dtype=torch.float).reshape(2, 3, 3, 3)
|
||||
# fmt: on
|
||||
|
||||
output = _conv2d(
|
||||
in_channels, out_channels, kernel_size, stride, padding, inputs, weights
|
||||
)
|
||||
|
||||
output = _conv2d(in_channels,
|
||||
out_channels,
|
||||
kernel_size,
|
||||
stride,
|
||||
padding,
|
||||
inputs,
|
||||
weights)
|
||||
|
||||
print_cpp_vector(output)
|
||||
|
||||
|
||||
@@ -91,6 +86,7 @@ def gen_convd_strided_test_result():
|
||||
stride = 2
|
||||
padding = 3
|
||||
|
||||
# fmt: off
|
||||
input = torch.tensor([
|
||||
0.946, 0.879, 0.382, 0.542, 0.453,
|
||||
0.128, 0.860, 0.778, 0.049, 0.974,
|
||||
@@ -103,6 +99,7 @@ def gen_convd_strided_test_result():
|
||||
0.473, 0.303, 0.084, 0.785, 0.444,
|
||||
0.464, 0.413, 0.779, 0.298, 0.783
|
||||
], dtype=torch.float).reshape(1, 2, 5, 5)
|
||||
|
||||
weights = torch.tensor([
|
||||
0.744, 0.745, 0.641,
|
||||
0.164, 0.157, 0.127,
|
||||
@@ -117,15 +114,12 @@ def gen_convd_strided_test_result():
|
||||
0.236, 0.397, 0.739,
|
||||
0.939, 0.891, 0.006
|
||||
], dtype=torch.float).reshape(2, 2, 3, 3)
|
||||
# fmt: on
|
||||
|
||||
output = _conv2d(
|
||||
in_channels, out_channels, kernel_size, stride, padding, input, weights
|
||||
)
|
||||
|
||||
output = _conv2d(in_channels,
|
||||
out_channels,
|
||||
kernel_size,
|
||||
stride,
|
||||
padding,
|
||||
input,
|
||||
weights)
|
||||
|
||||
print_cpp_vector(output)
|
||||
|
||||
|
||||
@@ -137,6 +131,7 @@ def gen_convd_non_square_input_test_result():
|
||||
stride = 1
|
||||
padding = 0
|
||||
|
||||
# fmt: off
|
||||
input = torch.tensor([
|
||||
0.946, 0.879, 0.382, 0.542, 0.453, 0.128,
|
||||
0.128, 0.860, 0.778, 0.049, 0.974, 0.400,
|
||||
@@ -144,22 +139,19 @@ def gen_convd_non_square_input_test_result():
|
||||
0.078, 0.366, 0.396, 0.181, 0.246, 0.112,
|
||||
]).reshape(1, 1, 4, 6)
|
||||
|
||||
|
||||
weights = torch.tensor([
|
||||
0.744, 0.745,
|
||||
0.164, 0.157,
|
||||
]).reshape(1, 1, 2, 2)
|
||||
# fmt: on
|
||||
|
||||
output = _conv2d(
|
||||
in_channels, out_channels, kernel_size, stride, padding, input, weights
|
||||
)
|
||||
|
||||
output = _conv2d(in_channels,
|
||||
out_channels,
|
||||
kernel_size,
|
||||
stride,
|
||||
padding,
|
||||
input,
|
||||
weights)
|
||||
|
||||
print_cpp_vector(output)
|
||||
|
||||
|
||||
def gen_convd_non_square_kernel_test_result():
|
||||
|
||||
in_channels = 1
|
||||
@@ -168,6 +160,7 @@ def gen_convd_non_square_kernel_test_result():
|
||||
stride = 1
|
||||
padding = 0
|
||||
|
||||
# fmt: off
|
||||
input = torch.tensor([
|
||||
0.946, 0.879, 0.382, 0.542,
|
||||
0.128, 0.860, 0.778, 0.049,
|
||||
@@ -178,17 +171,15 @@ def gen_convd_non_square_kernel_test_result():
|
||||
weights = torch.tensor([
|
||||
0.744, 0.745, 0.164
|
||||
]).reshape(1, 1, 1, 3)
|
||||
# fmt: on
|
||||
|
||||
output = _conv2d(
|
||||
in_channels, out_channels, kernel_size, stride, padding, input, weights
|
||||
)
|
||||
|
||||
output = _conv2d(in_channels,
|
||||
out_channels,
|
||||
kernel_size,
|
||||
stride,
|
||||
padding,
|
||||
input,
|
||||
weights)
|
||||
|
||||
print_cpp_vector(output)
|
||||
|
||||
|
||||
def gen_convd_non_square_stride_test_result():
|
||||
|
||||
in_channels = 1
|
||||
@@ -197,6 +188,7 @@ def gen_convd_non_square_stride_test_result():
|
||||
stride = (1, 2)
|
||||
padding = 0
|
||||
|
||||
# fmt: off
|
||||
input = torch.tensor([
|
||||
0.946, 0.879, 0.382, 0.542,
|
||||
0.128, 0.860, 0.778, 0.049,
|
||||
@@ -208,17 +200,15 @@ def gen_convd_non_square_stride_test_result():
|
||||
0.144, 0.745,
|
||||
0.964, 0.164
|
||||
]).reshape(1, 1, 2, 2)
|
||||
# fmt: on
|
||||
|
||||
output = _conv2d(
|
||||
in_channels, out_channels, kernel_size, stride, padding, input, weights
|
||||
)
|
||||
|
||||
output = _conv2d(in_channels,
|
||||
out_channels,
|
||||
kernel_size,
|
||||
stride,
|
||||
padding,
|
||||
input,
|
||||
weights)
|
||||
|
||||
print_cpp_vector(output)
|
||||
|
||||
|
||||
def gen_convd_non_square_padding_test_result():
|
||||
|
||||
in_channels = 1
|
||||
@@ -227,6 +217,7 @@ def gen_convd_non_square_padding_test_result():
|
||||
stride = 1
|
||||
padding = (1, 2)
|
||||
|
||||
# fmt: off
|
||||
input = torch.tensor([
|
||||
0.946, 0.879, 0.382, 0.542,
|
||||
0.128, 0.860, 0.778, 0.049,
|
||||
@@ -238,15 +229,12 @@ def gen_convd_non_square_padding_test_result():
|
||||
0.144, 0.745,
|
||||
0.964, 0.164
|
||||
]).reshape(1, 1, 2, 2)
|
||||
# fmt: on
|
||||
|
||||
output = _conv2d(
|
||||
in_channels, out_channels, kernel_size, stride, padding, input, weights
|
||||
)
|
||||
|
||||
output = _conv2d(in_channels,
|
||||
out_channels,
|
||||
kernel_size,
|
||||
stride,
|
||||
padding,
|
||||
input,
|
||||
weights)
|
||||
|
||||
print_cpp_vector(output)
|
||||
|
||||
|
||||
@@ -263,4 +251,4 @@ if __name__ == "__main__":
|
||||
print("Non square stride convolution test:")
|
||||
gen_convd_non_square_stride_test_result()
|
||||
print("Non square padding convolution test:")
|
||||
gen_convd_non_square_padding_test_result()
|
||||
gen_convd_non_square_padding_test_result()
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
import torch
|
||||
from utils import print_cpp_vector
|
||||
|
||||
|
||||
def gen_dense_softmax_test():
|
||||
|
||||
# fmt: off
|
||||
input = torch.tensor([
|
||||
0.1, 0.2, 0.3, 0.4, 0.5
|
||||
])
|
||||
@@ -17,6 +19,7 @@ def gen_dense_softmax_test():
|
||||
biases = torch.tensor([
|
||||
0.1, 0.2, 0.3, 0.4
|
||||
])
|
||||
# fmt: on
|
||||
|
||||
dense = torch.nn.Linear(5, 4)
|
||||
dense.weight = torch.nn.Parameter(weights)
|
||||
@@ -35,10 +38,9 @@ def gen_dense_softmax_test():
|
||||
souftmax_out = softmax_exp / softmax_sum
|
||||
print(souftmax_out)
|
||||
|
||||
|
||||
softmax = torch.nn.Softmax(dim=0)(output)
|
||||
print_cpp_vector(softmax)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
gen_dense_softmax_test()
|
||||
gen_dense_softmax_test()
|
||||
|
||||
@@ -2,6 +2,7 @@ import torch
|
||||
|
||||
from utils import export_model_weights, print_cpp_vector
|
||||
|
||||
|
||||
class TestModel(torch.nn.Module):
|
||||
|
||||
def __init__(self, *args, **kwargs) -> None:
|
||||
@@ -13,24 +14,18 @@ class TestModel(torch.nn.Module):
|
||||
kernel_size=3,
|
||||
stride=1,
|
||||
padding=0,
|
||||
bias=False
|
||||
)
|
||||
|
||||
self.maxpool1 = torch.nn.MaxPool2d(
|
||||
kernel_size=2,
|
||||
stride=2
|
||||
bias=False,
|
||||
)
|
||||
|
||||
self.maxpool1 = torch.nn.MaxPool2d(kernel_size=2, stride=2)
|
||||
self.activation = torch.nn.ReLU()
|
||||
|
||||
self.linear = torch.nn.Linear(
|
||||
in_features=8,
|
||||
out_features=3,
|
||||
bias=False
|
||||
)
|
||||
self.linear = torch.nn.Linear(in_features=8, out_features=3, bias=False)
|
||||
self.softmax = torch.nn.Softmax(dim=0)
|
||||
|
||||
def set_weights(self):
|
||||
|
||||
# fmt: off
|
||||
conv2d_weights = torch.tensor([
|
||||
0.18313, 0.53363, 0.39527, 0.27575, 0.3433, 0.41746,
|
||||
0.16831, 0.61693, 0.54599, 0.99692, 0.77127, 0.25146,
|
||||
@@ -40,8 +35,10 @@ class TestModel(torch.nn.Module):
|
||||
0.68407, 0.2684, 0.2855, 0.76195, 0.67828, 0.603
|
||||
|
||||
]).reshape(2, 2, 3, 3)
|
||||
# fmt: on
|
||||
self.conv1.weight = torch.nn.Parameter(conv2d_weights)
|
||||
|
||||
# fmt: off
|
||||
linear_weights = torch.tensor([
|
||||
0.36032, 0.33115, 0.02948,
|
||||
0.09802, 0.45072, 0.56266,
|
||||
@@ -52,6 +49,7 @@ class TestModel(torch.nn.Module):
|
||||
0.51559, 0.81916, 0.64915,
|
||||
0.03934, 0.87608, 0.68364,
|
||||
]).reshape(3, 8)
|
||||
# fmt: on
|
||||
self.linear.weight = torch.nn.Parameter(linear_weights)
|
||||
|
||||
def forward(self, x):
|
||||
@@ -64,11 +62,13 @@ class TestModel(torch.nn.Module):
|
||||
x = self.softmax(x)
|
||||
|
||||
return x
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
model = TestModel()
|
||||
model.set_weights()
|
||||
|
||||
# fmt: off
|
||||
input = torch.tensor([
|
||||
0.12762, 0.99056, 0.77565, 0.29058, 0.29787, 0.58415, 0.20484,
|
||||
0.05415, 0.60593, 0.3162, 0.08198, 0.92749, 0.72392, 0.91786,
|
||||
@@ -82,14 +82,14 @@ if __name__ == "__main__":
|
||||
0.84854, 0.61415, 0.2466, 0.20017, 0.78952, 0.93797, 0.27884,
|
||||
0.30514, 0.23521
|
||||
]).reshape(2, 6, 6)
|
||||
|
||||
# input = torch.rand(2, 6, 6)
|
||||
# fmt: on
|
||||
|
||||
print("Single test output:")
|
||||
out = model(input)
|
||||
print_cpp_vector(out)
|
||||
|
||||
print("Multiple predict test output 1:")
|
||||
# fmt: off
|
||||
input = torch.tensor([
|
||||
0.81247, 0.03579, 0.26577, 0.80374, 0.64584, 0.19658, 0.04817,
|
||||
0.50769, 0.33502, 0.01739, 0.32263, 0.69625, 0.07433, 0.98283,
|
||||
@@ -103,10 +103,12 @@ if __name__ == "__main__":
|
||||
0.16811, 0.72188, 0.08683, 0.66985, 0.62707, 0.4035, 0.51822,
|
||||
0.46545, 0.88722
|
||||
]).reshape(2, 6, 6)
|
||||
# fmt: on
|
||||
out = model(input)
|
||||
print_cpp_vector(out)
|
||||
|
||||
print("Multiple predict test output 2:")
|
||||
# fmt: off
|
||||
input = torch.tensor([
|
||||
0.83573, 0.19191, 0.16004, 0.27137, 0.64768, 0.38417, 0.02167,
|
||||
0.28834, 0.21401, 0.16624, 0.12037, 0.12706, 0.3588, 0.10685,
|
||||
@@ -120,6 +122,7 @@ if __name__ == "__main__":
|
||||
0.66075, 0.64496, 0.1191, 0.66261, 0.63431, 0.7137, 0.14851,
|
||||
0.84456, 0.44482
|
||||
]).reshape(2, 6, 6)
|
||||
# fmt: on
|
||||
out = model(input)
|
||||
print_cpp_vector(out)
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ from utils import print_cpp_vector
|
||||
|
||||
|
||||
def _get_pool_input():
|
||||
# fmt: off
|
||||
return torch.tensor([
|
||||
0.573, 0.619, 0.732, 0.055,
|
||||
0.243, 0.316, 0.573, 0.619,
|
||||
@@ -13,8 +14,11 @@ def _get_pool_input():
|
||||
0.473, 0.455, 0.283, 0.416,
|
||||
0.532, 0.819, 0.732, 0.850
|
||||
]).reshape(1, 2, 4, 4)
|
||||
# fmt: on
|
||||
|
||||
|
||||
def _get_pool_input_non_square():
|
||||
# fmt: off
|
||||
return torch.Tensor([
|
||||
0.573, 0.619, 0.732, 0.055, 0.123, 0.234,
|
||||
0.243, 0.316, 0.573, 0.619, 0.456, 0.789,
|
||||
@@ -25,6 +29,7 @@ def _get_pool_input_non_square():
|
||||
0.473, 0.455, 0.283, 0.416, 0.789, 0.123,
|
||||
0.532, 0.819, 0.732, 0.850, 0.987, 0.321
|
||||
]).reshape(1, 2, 4, 6)
|
||||
# fmt: on
|
||||
|
||||
|
||||
def gen_max_pool_test_result():
|
||||
@@ -62,6 +67,7 @@ def gen_max_pool_non_square_stride_test_result():
|
||||
|
||||
print_cpp_vector(output)
|
||||
|
||||
|
||||
def gen_max_pool_non_square_padding_test_result():
|
||||
input = _get_pool_input()
|
||||
|
||||
|
||||
@@ -3,27 +3,33 @@ from utils import print_cpp_vector
|
||||
|
||||
|
||||
def gen_softmax1_test_result():
|
||||
# fmt: off
|
||||
input = torch.tensor([
|
||||
0.573, 0.619, 0.732, 0.055, 0.243
|
||||
])
|
||||
# fmt: on
|
||||
|
||||
output = torch.nn.Softmax(dim=0)(input)
|
||||
print_cpp_vector(output)
|
||||
|
||||
|
||||
def gen_softmax2_test_result():
|
||||
# fmt: off
|
||||
input = torch.tensor([
|
||||
22.496, 36.9006, 30.9904, 28.4213, 26.4541, 31.7887
|
||||
])
|
||||
# fmt: on
|
||||
|
||||
output = torch.nn.Softmax(dim=0)(input)
|
||||
print_cpp_vector(output)
|
||||
|
||||
|
||||
def gen_softmax_exp():
|
||||
# fmt: off
|
||||
input = torch.tensor([
|
||||
22.496, 36.9006, 30.9904, 28.4213, 26.4541, 31.7887
|
||||
])
|
||||
# fmt: on
|
||||
|
||||
output = torch.exp(input)
|
||||
print_cpp_vector(output)
|
||||
@@ -36,4 +42,4 @@ if __name__ == "__main__":
|
||||
print("Softmax 2 test:")
|
||||
gen_softmax2_test_result()
|
||||
print("Softmax exp test:")
|
||||
gen_softmax_exp()
|
||||
gen_softmax_exp()
|
||||
|
||||
@@ -1,10 +1,25 @@
|
||||
import torch
|
||||
|
||||
|
||||
def gen_vector_mean_test_result():
|
||||
input = torch.tensor([0.44371, 0.20253, 0.73232, 0.40378, 0.93348, 0.72756, 0.63388, 0.5251, 0.23973, 0.52233])
|
||||
input = torch.tensor(
|
||||
[
|
||||
0.44371,
|
||||
0.20253,
|
||||
0.73232,
|
||||
0.40378,
|
||||
0.93348,
|
||||
0.72756,
|
||||
0.63388,
|
||||
0.5251,
|
||||
0.23973,
|
||||
0.52233,
|
||||
]
|
||||
)
|
||||
output = torch.mean(input)
|
||||
|
||||
print(output)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
gen_vector_mean_test_result()
|
||||
gen_vector_mean_test_result()
|
||||
|
||||
Reference in New Issue
Block a user