mirror of
https://github.com/lordmathis/CUDANet.git
synced 2025-11-05 17:34:21 +00:00
Fix adding module layers
This commit is contained in:
@@ -1,16 +1,6 @@
|
|||||||
#include <cudanet.cuh>
|
#include <cudanet.cuh>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
|
||||||
int main(int argc, const char *const argv[]) {
|
|
||||||
if (argc != 3) {
|
|
||||||
std::cerr << "Usage: " << argv[0] << "<model_weights_path> <image_path>"
|
|
||||||
<< std::endl;
|
|
||||||
return 1; // Return error code indicating incorrect usage
|
|
||||||
}
|
|
||||||
|
|
||||||
std::cout << "Loading model..." << std::endl;
|
|
||||||
}
|
|
||||||
|
|
||||||
class BasicConv2d : public CUDANet::Module {
|
class BasicConv2d : public CUDANet::Module {
|
||||||
public:
|
public:
|
||||||
BasicConv2d(
|
BasicConv2d(
|
||||||
@@ -702,7 +692,7 @@ class InceptionE : public CUDANet::Module {
|
|||||||
}
|
}
|
||||||
|
|
||||||
shape2d getOutputDims() {
|
shape2d getOutputDims() {
|
||||||
branch3x3_2a->getOutputDims();
|
return branch3x3_2a->getOutputDims();
|
||||||
}
|
}
|
||||||
|
|
||||||
int getOutputChannels() {
|
int getOutputChannels() {
|
||||||
@@ -823,7 +813,7 @@ class InceptionV3 : public CUDANet::Model {
|
|||||||
Mixed_7c->getOutputSize(), 1000,
|
Mixed_7c->getOutputSize(), 1000,
|
||||||
CUDANet::Layers::ActivationType::SOFTMAX
|
CUDANet::Layers::ActivationType::SOFTMAX
|
||||||
);
|
);
|
||||||
addLayer("", fc);
|
addLayer("fc", fc);
|
||||||
}
|
}
|
||||||
|
|
||||||
float* predict(const float* input) {
|
float* predict(const float* input) {
|
||||||
@@ -905,4 +895,20 @@ class InceptionV3 : public CUDANet::Model {
|
|||||||
InceptionE *Mixed_7c;
|
InceptionE *Mixed_7c;
|
||||||
|
|
||||||
CUDANet::Layers::Dense *fc;
|
CUDANet::Layers::Dense *fc;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
int main(int argc, const char *const argv[]) {
|
||||||
|
|
||||||
|
InceptionV3 *inception_v3 = new InceptionV3({299, 299}, 3, 1000);
|
||||||
|
|
||||||
|
inception_v3->printSummary();
|
||||||
|
|
||||||
|
|
||||||
|
if (argc != 3) {
|
||||||
|
std::cerr << "Usage: " << argv[0] << "<model_weights_path> <image_path>"
|
||||||
|
<< std::endl;
|
||||||
|
return 1; // Return error code indicating incorrect usage
|
||||||
|
}
|
||||||
|
|
||||||
|
std::cout << "Loading model..." << std::endl;
|
||||||
|
}
|
||||||
@@ -11,8 +11,6 @@ namespace CUDANet {
|
|||||||
|
|
||||||
class Module : public Layers::SequentialLayer {
|
class Module : public Layers::SequentialLayer {
|
||||||
public:
|
public:
|
||||||
virtual ~Module() = 0;
|
|
||||||
|
|
||||||
virtual float* forward(const float* d_input) = 0;
|
virtual float* forward(const float* d_input) = 0;
|
||||||
|
|
||||||
int getOutputSize();
|
int getOutputSize();
|
||||||
|
|||||||
@@ -56,17 +56,18 @@ float* Model::predict(const float* input) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void Model::addLayer(const std::string& name, Layers::SequentialLayer* layer) {
|
void Model::addLayer(const std::string& name, Layers::SequentialLayer* layer) {
|
||||||
Module* module = dynamic_cast<Module*>(layer);
|
const Module* module = dynamic_cast<Module*>(layer);
|
||||||
|
|
||||||
if (module != nullptr) {
|
if (module != nullptr) {
|
||||||
layers.push_back({name, module});
|
|
||||||
for (const auto& moduleLayer : module->getLayers()) {
|
for (const auto& moduleLayer : module->getLayers()) {
|
||||||
layerMap[moduleLayer.first] = moduleLayer.second;
|
layerMap[moduleLayer.first] = moduleLayer.second;
|
||||||
|
layers.push_back({moduleLayer.first, moduleLayer.second});
|
||||||
}
|
}
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
layers.push_back({name, layer});
|
layers.push_back({name, layer});
|
||||||
layerMap[name] = layer;
|
layerMap[name] = layer;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,23 +5,29 @@
|
|||||||
using namespace CUDANet;
|
using namespace CUDANet;
|
||||||
|
|
||||||
void Module::addLayer(const std::string& name, Layers::SequentialLayer* layer) {
|
void Module::addLayer(const std::string& name, Layers::SequentialLayer* layer) {
|
||||||
Module* module = dynamic_cast<Module*>(layer);
|
const Module* module = dynamic_cast<Module*>(layer);
|
||||||
|
|
||||||
if (module != nullptr) {
|
if (module != nullptr) {
|
||||||
layers.push_back({ name, module });
|
|
||||||
for (const auto& moduleLayer : module->getLayers()) {
|
for (const auto& moduleLayer : module->getLayers()) {
|
||||||
layerMap[moduleLayer.first] = moduleLayer.second;
|
layerMap[moduleLayer.first] = moduleLayer.second;
|
||||||
|
layers.push_back({moduleLayer.first, moduleLayer.second});
|
||||||
}
|
}
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
layers.push_back({name, layer});
|
||||||
|
layerMap[name] = layer;
|
||||||
|
|
||||||
|
// std::cout << "Wat?! - module" << name << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
Layers::SequentialLayer* Module::getLayer(const std::string& name) {
|
Layers::SequentialLayer* Module::getLayer(const std::string& name) {
|
||||||
return layerMap[name];
|
return layerMap[name];
|
||||||
}
|
}
|
||||||
|
|
||||||
const std::unordered_map<std::string, Layers::SequentialLayer*>& Module::getLayers() const {
|
const std::unordered_map<std::string, Layers::SequentialLayer*>&
|
||||||
|
Module::getLayers() const {
|
||||||
return layerMap;
|
return layerMap;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user