mirror of
https://github.com/lordmathis/CUDANet.git
synced 2025-11-05 17:34:21 +00:00
Update inception v3 readme
This commit is contained in:
43
examples/inception_v3/README.md
Normal file
43
examples/inception_v3/README.md
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
# Inception v3
|
||||||
|
|
||||||
|
Inception v3 Inference on CUDANet
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
1. Export pytorch Inception v3 weights pretrained on ImageNet (requires pytorch and torchvision):
|
||||||
|
|
||||||
|
```sh
|
||||||
|
python inception_v3.py
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Follow the instructions from the repository root to build the CUDANet library.
|
||||||
|
|
||||||
|
3. Build Inception v3 (requires [OpenCV](https://opencv.org/) for image loading and preprocessing):
|
||||||
|
|
||||||
|
```sh
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
cmake -S ..
|
||||||
|
make
|
||||||
|
```
|
||||||
|
|
||||||
|
4. (Optional) Run tests
|
||||||
|
|
||||||
|
Generate test input/output and resources by running `inception_blocks.py` in the `test` folder
|
||||||
|
|
||||||
|
Build and run tests (requires [Google Test](https://github.com/google/googletest))
|
||||||
|
|
||||||
|
```sh
|
||||||
|
cd build
|
||||||
|
make test_inception_v3
|
||||||
|
./tests/test_inception_v3
|
||||||
|
```
|
||||||
|
|
||||||
|
5. Run Inception v3 inference:
|
||||||
|
```sh
|
||||||
|
inception_v3 ../inception_v3_weights.bin ../image.jpg
|
||||||
|
```
|
||||||
|
|
||||||
|
## Note on Preprocessing
|
||||||
|
|
||||||
|
The image preprocessing in this implementation uses OpenCV, which may produce slightly different results compared to PyTorch's Pillow-based preprocessing due to differences in interpolation methods during resizing.
|
||||||
@@ -4,14 +4,21 @@ import sys
|
|||||||
sys.path.append("../../tools") # Ugly hack
|
sys.path.append("../../tools") # Ugly hack
|
||||||
from utils import export_model_weights, print_model_parameters, predict
|
from utils import export_model_weights, print_model_parameters, predict
|
||||||
|
|
||||||
|
import torch
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
inception = torchvision.models.inception_v3(
|
|
||||||
weights = torchvision.models.Inception_V3_Weights.DEFAULT
|
weights = torchvision.models.Inception_V3_Weights.DEFAULT
|
||||||
|
inception = torchvision.models.inception_v3(
|
||||||
|
weights=weights,
|
||||||
|
transform_input=False
|
||||||
)
|
)
|
||||||
|
|
||||||
|
inception.transform_input = False
|
||||||
inception.eval()
|
inception.eval()
|
||||||
|
|
||||||
# print_model_parameters(inception) # print layer names and number of parameters
|
|
||||||
export_model_weights(inception, "inception_v3_weights.bin")
|
export_model_weights(inception, "inception_v3_weights.bin")
|
||||||
|
|
||||||
# print(predict(inception, "bird.jpg"))
|
# class_labels = weights.meta["categories"]
|
||||||
|
# prediction = predict(inception, "bird.jpg")
|
||||||
|
# print(prediction, class_labels[prediction])
|
||||||
|
|||||||
@@ -35,8 +35,6 @@ def export_model_weights(model: torch.nn.Module, filename):
|
|||||||
|
|
||||||
tensor_data += tensor_bytes
|
tensor_data += tensor_bytes
|
||||||
|
|
||||||
# print(model.named_buffers)
|
|
||||||
|
|
||||||
# Add buffers (for running_mean and running_var)
|
# Add buffers (for running_mean and running_var)
|
||||||
for name, buf in model.named_buffers():
|
for name, buf in model.named_buffers():
|
||||||
if "running_mean" not in name and "running_var" not in name:
|
if "running_mean" not in name and "running_var" not in name:
|
||||||
@@ -76,9 +74,7 @@ def predict(model, image_path, resize=299, crop=299, preprocess=None):
|
|||||||
)
|
)
|
||||||
|
|
||||||
input_tensor = preprocess(input_image)
|
input_tensor = preprocess(input_image)
|
||||||
input_batch = input_tensor.unsqueeze(
|
input_batch = input_tensor.unsqueeze(0)
|
||||||
0
|
|
||||||
) # create a mini-batch as expected by the model
|
|
||||||
|
|
||||||
# move the input and model to GPU for speed if available
|
# move the input and model to GPU for speed if available
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available():
|
||||||
@@ -87,4 +83,4 @@ def predict(model, image_path, resize=299, crop=299, preprocess=None):
|
|||||||
|
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
output = model(input_batch)
|
output = model(input_batch)
|
||||||
return torch.argmax(output)
|
return torch.argmax(output).item()
|
||||||
|
|||||||
Reference in New Issue
Block a user