mirror of
https://github.com/lordmathis/CUDANet.git
synced 2025-11-05 17:34:21 +00:00
Start implementing Inception v3
This commit is contained in:
48
examples/inception_v3/inception_v3.cpp
Normal file
48
examples/inception_v3/inception_v3.cpp
Normal file
@@ -0,0 +1,48 @@
|
||||
#include <cudanet.cuh>
|
||||
#include <iostream>
|
||||
|
||||
int main(int argc, const char *const argv[]) {
|
||||
if (argc != 3) {
|
||||
std::cerr << "Usage: " << argv[0] << "<model_weights_path> <image_path>"
|
||||
<< std::endl;
|
||||
return 1; // Return error code indicating incorrect usage
|
||||
}
|
||||
|
||||
std::cout << "Loading model..." << std::endl;
|
||||
}
|
||||
|
||||
class BasicConv2d : public CUDANet::Module {
|
||||
public:
|
||||
BasicConv2d(
|
||||
const int inputSize,
|
||||
const int inputChannels,
|
||||
const int outputChannels,
|
||||
const int kernelSize,
|
||||
const int stride,
|
||||
const int padding,
|
||||
const std::string& prefix
|
||||
)
|
||||
: inputSize(inputSize),
|
||||
inputChannels(inputChannels),
|
||||
outputChannels(outputChannels) {
|
||||
// Create the convolution layer
|
||||
CUDANet::Layers::Conv2d *conv = new CUDANet::Layers::Conv2d(
|
||||
inputSize, inputChannels, kernelSize, stride, outputChannels, padding, CUDANet::Layers::ActivationType::NONE
|
||||
);
|
||||
|
||||
int batchNormSize = conv->getOutputSize();
|
||||
|
||||
CUDANet::Layers::BatchNorm *batchNorm =
|
||||
new CUDANet::Layers::BatchNorm(
|
||||
batchNormSize, outputChannels, 1e-3f, CUDANet::Layers::ActivationType::RELU
|
||||
);
|
||||
|
||||
addLayer(prefix + ".conv", conv);
|
||||
addLayer(prefix + ".bn", batchNorm);
|
||||
}
|
||||
|
||||
private:
|
||||
int inputSize;
|
||||
int inputChannels;
|
||||
int outputChannels;
|
||||
};
|
||||
17
examples/inception_v3/inception_v3.py
Normal file
17
examples/inception_v3/inception_v3.py
Normal file
@@ -0,0 +1,17 @@
|
||||
import torch
|
||||
import torchvision
|
||||
import sys
|
||||
|
||||
from torchsummary import summary
|
||||
|
||||
inception = torchvision.models.inception_v3(weights=torchvision.models.Inception_V3_Weights.DEFAULT)
|
||||
inception.eval()
|
||||
|
||||
sys.path.append('../../tools') # Ugly hack
|
||||
from utils import export_model_weights, print_model_parameters
|
||||
|
||||
print_model_parameters(inception) # print layer names and number of parameters
|
||||
|
||||
inception.cuda()
|
||||
|
||||
summary(inception, (3, 299, 299))
|
||||
Reference in New Issue
Block a user