Fix weigh bias parsing and better error logging

This commit is contained in:
2024-04-20 18:36:53 +02:00
parent ecf7416f8e
commit d08567a563
6 changed files with 93 additions and 11 deletions

View File

@@ -5,6 +5,8 @@
#include <model.hpp> #include <model.hpp>
#include <conv2d.cuh> #include <conv2d.cuh>
#include <max_pooling.cuh>
#include <dense.cuh>
std::vector<float> readAndNormalizeImage(const std::string& imagePath, int width, int height) { std::vector<float> readAndNormalizeImage(const std::string& imagePath, int width, int height) {
// Read the image using OpenCV // Read the image using OpenCV
@@ -30,15 +32,63 @@ CUDANet::Model* createModel(const int inputSize, const int inputChannels, const
CUDANet::Model *model = CUDANet::Model *model =
new CUDANet::Model(inputSize, inputChannels, outputSize); new CUDANet::Model(inputSize, inputChannels, outputSize);
// AlexNet // Block 1
CUDANet::Layers::Conv2d *conv1 = new CUDANet::Layers::Conv2d( CUDANet::Layers::Conv2d *conv1 = new CUDANet::Layers::Conv2d(
inputSize, inputChannels, 11, 4, 96, CUDANet::Layers::Padding::SAME, CUDANet::Layers::ActivationType::RELU inputSize, inputChannels, 11, 4, 64, 2, CUDANet::Layers::ActivationType::RELU
); );
model->addLayer("conv1", conv1); model->addLayer("features.0", conv1); // Match pytorch naming
CUDANet::Layers::MaxPooling *pool1 = new CUDANet::Layers::MaxPooling( CUDANet::Layers::MaxPooling2D *pool1 = new CUDANet::Layers::MaxPooling2D(
3, 3, 2 56, 64, 3, 2, CUDANet::Layers::ActivationType::NONE
) );
model->addLayer("pool1", pool1);
// Block 2
CUDANet::Layers::Conv2d *conv2 = new CUDANet::Layers::Conv2d(
27, 64, 5, 1, 192, 2, CUDANet::Layers::ActivationType::RELU
);
model->addLayer("features.3", conv2);
CUDANet::Layers::MaxPooling2D *pool2 = new CUDANet::Layers::MaxPooling2D(
27, 192, 3, 2, CUDANet::Layers::ActivationType::NONE
);
model->addLayer("pool2", pool2);
// Block 3
CUDANet::Layers::Conv2d *conv3 = new CUDANet::Layers::Conv2d(
13, 192, 3, 1, 384, 1, CUDANet::Layers::ActivationType::RELU
);
model->addLayer("features.6", conv3);
// Block 4
CUDANet::Layers::Conv2d *conv4 = new CUDANet::Layers::Conv2d(
13, 384, 3, 1, 256, 1, CUDANet::Layers::ActivationType::RELU
);
model->addLayer("features.8", conv4);
// Block 5
CUDANet::Layers::Conv2d *conv5 = new CUDANet::Layers::Conv2d(
13, 256, 3, 1, 256, 1, CUDANet::Layers::ActivationType::RELU
);
model->addLayer("features.10", conv5);
CUDANet::Layers::MaxPooling2D *pool5 = new CUDANet::Layers::MaxPooling2D(
13, 256, 3, 2, CUDANet::Layers::ActivationType::NONE
);
model->addLayer("pool5", pool5);
// Classifier
CUDANet::Layers::Dense *dense1 = new CUDANet::Layers::Dense(
6 * 6 * 256, 4096, CUDANet::Layers::ActivationType::RELU
);
model->addLayer("classifier.1", dense1);
CUDANet::Layers::Dense *dense2 = new CUDANet::Layers::Dense(
4096, 4096, CUDANet::Layers::ActivationType::RELU
);
model->addLayer("classifier.4", dense2);
CUDANet::Layers::Dense *dense3 = new CUDANet::Layers::Dense(
4096, 1000, CUDANet::Layers::ActivationType::NONE
);
model->addLayer("classifier.6", dense3);
return model; return model;
} }
@@ -59,13 +109,22 @@ int main(int argc, const char* const argv[]) {
const int outputSize = 1000; const int outputSize = 1000;
CUDANet::Model *model = createModel(inputSize, inputChannels, outputSize); CUDANet::Model *model = createModel(inputSize, inputChannels, outputSize);
model->loadWeights(modelWeightsPath);
// Read and normalize the image // Read and normalize the image
std::vector<float> imageData = readAndNormalizeImage(imagePath, inputSize, inputSize); std::vector<float> imageData = readAndNormalizeImage(imagePath, inputSize, inputSize);
// Print the size of the image data // Print the size of the image data
std::cout << "Size of image data: " << imageData.size() << std::endl; float* output = model->predict(imageData.data());
// Get max index
int maxIndex = 0;
for (int i = 0; i < outputSize; i++) {
if (output[i] > output[maxIndex]) {
maxIndex = i;
}
}
std::cout << "Prediction: " << maxIndex << std::endl;
return 0; return 0;
} }

View File

@@ -1,6 +1,8 @@
#ifndef CUDANET_ACTIVATION_FUNCTIONS_H #ifndef CUDANET_ACTIVATION_FUNCTIONS_H
#define CUDANET_ACTIVATION_FUNCTIONS_H #define CUDANET_ACTIVATION_FUNCTIONS_H
#include <cuda_runtime.h>
namespace CUDANet::Kernels { namespace CUDANet::Kernels {
/** /**

View File

@@ -1,6 +1,8 @@
#ifndef CUDANET_CONVOLUTION_H #ifndef CUDANET_CONVOLUTION_H
#define CUDANET_CONVOLUTION_H #define CUDANET_CONVOLUTION_H
#include <cuda_runtime.h>
namespace CUDANet::Kernels { namespace CUDANet::Kernels {
/** /**

View File

@@ -1,6 +1,8 @@
#ifndef CUDANET_MATMUL_H #ifndef CUDANET_MATMUL_H
#define CUDANET_MATMUL_H #define CUDANET_MATMUL_H
#include <cuda_runtime.h>
namespace CUDANet::Kernels { namespace CUDANet::Kernels {
/** /**

View File

@@ -4,6 +4,7 @@
#include <vector> #include <vector>
#include "layer.cuh" #include "layer.cuh"
#include "activation.cuh"
namespace CUDANet::Layers { namespace CUDANet::Layers {

View File

@@ -89,11 +89,11 @@ void Model::loadWeights(const std::string& path) {
// Parse tensor name into name and type // Parse tensor name into name and type
std::string nameStr = line.substr(0, commaPos); std::string nameStr = line.substr(0, commaPos);
size_t dotPos = nameStr.find('.'); size_t dotPos = nameStr.find_last_of('.');
if (dotPos == std::string::npos) if (dotPos == std::string::npos)
continue; continue;
std::string name = nameStr.substr(0, dotPos); std::string name = nameStr.substr(0, dotPos);
TensorType type = nameStr.substr(dotPos + 1) == "w" ? TensorType::WEIGHT : TensorType::BIAS; TensorType type = nameStr.substr(dotPos + 1) == "weight" ? TensorType::WEIGHT : TensorType::BIAS;
line = line.substr(commaPos + 1); line = line.substr(commaPos + 1);
@@ -118,15 +118,31 @@ void Model::loadWeights(const std::string& path) {
Layers::WeightedLayer* wLayer = dynamic_cast<Layers::WeightedLayer*>(layerMap[tensorInfo.name]); Layers::WeightedLayer* wLayer = dynamic_cast<Layers::WeightedLayer*>(layerMap[tensorInfo.name]);
if (wLayer == nullptr) { if (wLayer == nullptr) {
std::cerr << "Layer: " << tensorInfo.name << "does not have weights, skipping" << std::endl; std::cerr << "Layer: " << tensorInfo.name << " does not have weights" << std::endl;
continue; continue;
} }
if (tensorInfo.type == TensorType::WEIGHT) { if (tensorInfo.type == TensorType::WEIGHT) {
if (wLayer->getWeights().size() != values.size()) {
std::cerr << "Layer: " << tensorInfo.name << " has incorrect number of weights, expected "
<< wLayer->getWeights().size() << " but got " << values.size() << ", skipping" << std::endl;
continue;
}
wLayer->setWeights(values.data()); wLayer->setWeights(values.data());
} else if (tensorInfo.type == TensorType::BIAS) { } else if (tensorInfo.type == TensorType::BIAS) {
if (wLayer->getBiases().size() != values.size()) {
std::cerr << "Layer: " << tensorInfo.name << " has incorrect number of biases, expected "
<< wLayer->getBiases().size() << " but got " << values.size() << ", skipping" << std::endl;
continue;
}
wLayer->setBiases(values.data()); wLayer->setBiases(values.data());
} }
} else {
std::cerr << "Layer: " << tensorInfo.name << " does not exist, skipping" << std::endl;
} }
} }