Files
CUDANet/examples/inception_v3/tests/basic_conv2d.py
2024-06-02 14:40:46 +02:00

90 lines
4.1 KiB
Python

import sys
import torch
from torchvision.models.inception import BasicConv2d
sys.path.append("../../../tools")
from utils import print_cpp_vector
basic_conv2d = BasicConv2d(
in_channels=3,
out_channels=6,
kernel_size=3,
stride=1,
padding=1,
)
conv_weights = torch.tensor([
0.18365, 0.08568, 0.08126, 0.68022, 0.41391, 0.71204, 0.66917,
0.63586, 0.28914, 0.43624, 0.03018, 0.47986, 0.71336, 0.82706,
0.587, 0.58516, 0.29813, 0.19312, 0.42975, 0.62522, 0.34256,
0.28057, 0.37367, 0.54325, 0.63421, 0.46445, 0.56908, 0.95247,
0.73934, 0.51263, 0.14464, 0.0956, 0.68846, 0.14675, 0.75427,
0.50547, 0.37078, 0.03316, 0.42855, 0.94293, 0.73855, 0.86475,
0.20687, 0.37793, 0.77947, 0.24402, 0.07547, 0.22212, 0.57188,
0.5098, 0.71999, 0.63828, 0.53237, 0.42874, 0.43621, 0.87348,
0.0073, 0.07752, 0.45232, 0.78307, 0.74813, 0.73456, 0.0378,
0.78518, 0.6989, 0.50484, 0.74265, 0.39178, 0.91015, 0.11684,
0.11499, 0.10394, 0.30637, 0.86116, 0.63743, 0.64142, 0.97882,
0.30948, 0.32144, 0.76108, 0.81794, 0.50111, 0.82209, 0.49028,
0.79417, 0.3257, 0.32221, 0.4007, 0.86371, 0.2271, 0.9414,
0.66233, 0.60802, 0.65701, 0.41021, 0.1135, 0.21892, 0.93389,
0.65786, 0.26068, 0.59535, 0.15048, 0.48185, 0.91072, 0.18252,
0.64154, 0.89179, 0.54726, 0.60756, 0.31149, 0.30717, 0.79877,
0.71727, 0.12418, 0.48471, 0.46097, 0.66898, 0.35467, 0.38027,
0.16989, 0.88578, 0.84377, 0.26529, 0.26057, 0.30256, 0.84876,
0.8849, 0.08982, 0.88191, 0.1944, 0.42052, 0.62898, 0.692,
0.51155, 0.99903, 0.56947, 0.73144, 0.88091, 0.28472, 0.98895,
0.41364, 0.1927, 0.07227, 0.421, 0.85347, 0.19329, 0.07098,
0.19418, 0.06585, 0.49083, 0.85071, 0.96747, 0.45057, 0.54361,
0.49552, 0.23454, 0.97412, 0.26663, 0.09274, 0.1662, 0.04784,
0.76303
]).reshape(6, 3, 3, 3)
bn_weights = torch.tensor([
0.69298, 0.27049, 0.85854, 0.52973, 0.29644, 0.68932
])
bn_biases = torch.tensor([
0.74976, 0.42745, 0.22132, 0.21262, 0.03726, 0.9719
])
basic_conv2d.conv.weight = torch.nn.Parameter(conv_weights)
basic_conv2d.bn.weight = torch.nn.Parameter(bn_weights)
basic_conv2d.bn.bias = torch.nn.Parameter(bn_biases)
input = torch.tensor([
0.75539, 0.17641, 0.8331, 0.80627, 0.51712, 0.87756, 0.97027,
0.21354, 0.28498, 0.05118, 0.37124, 0.40528, 0.13661, 0.08692,
0.73809, 0.57278, 0.73534, 0.31338, 0.15362, 0.80245, 0.49524,
0.81208, 0.24074, 0.42534, 0.62236, 0.75915, 0.06382, 0.66723,
0.13448, 0.96896, 0.87197, 0.67366, 0.67885, 0.49345, 0.08446,
0.94116, 0.8659, 0.22848, 0.53262, 0.51307, 0.89661, 0.72223,
0.90541, 0.47353, 0.85476, 0.04177, 0.04039, 0.7917, 0.56188,
0.53777, 0.91714, 0.84847, 0.16995, 0.59803, 0.05454, 0.00365,
0.01429, 0.42586, 0.31519, 0.222, 0.9149, 0.51885, 0.82969,
0.42778, 0.82913, 0.01303, 0.92699, 0.09225, 0.00284, 0.75769,
0.74072, 0.59012, 0.40777, 0.0469, 0.08751, 0.23163, 0.51327,
0.67095, 0.31971, 0.97841, 0.82292, 0.58917, 0.31565, 0.4728,
0.41885, 0.36524, 0.28194, 0.70945, 0.36008, 0.23199, 0.71093,
0.33364, 0.34199, 0.42114, 0.40026, 0.77819, 0.79858, 0.93793,
0.45238, 0.97922, 0.73814, 0.11831, 0.08414, 0.56552, 0.99841,
0.53862, 0.71138, 0.42274, 0.48724, 0.48201, 0.5361, 0.97138,
0.27607, 0.33018, 0.07456, 0.77788, 0.58824, 0.77027, 0.3938,
0.28081, 0.14074, 0.06907, 0.75419, 0.11888, 0.35715, 0.34481,
0.05669, 0.21063, 0.8664, 0.00087, 0.88281, 0.55202, 0.68655,
0.96262, 0.53907, 0.9227, 0.74055, 0.84487, 0.22792, 0.83233,
0.42938, 0.39054, 0.59604, 0.4141, 0.25982, 0.9311, 0.35475,
0.71432, 0.29186, 0.16604, 0.90708, 0.00171, 0.11541, 0.35719,
0.9221, 0.18793, 0.90198, 0.29281, 0.72144, 0.54645, 0.71165,
0.59584, 0.24041, 0.60954, 0.64945, 0.8122, 0.34145, 0.92178,
0.99894, 0.25076, 0.45067, 0.71997, 0.09573, 0.57334, 0.63273,
0.49469, 0.72747, 0.33449, 0.13755, 0.49458, 0.50319, 0.91328,
0.57269, 0.21927, 0.36831, 0.88708, 0.62277, 0.08318, 0.01425,
0.17998, 0.34614, 0.82303
]).reshape(1, 3, 8, 8)
with torch.no_grad():
output = basic_conv2d(input)
output = torch.flatten(output)
print_cpp_vector(output)