Migrate conv2d layer

This commit is contained in:
2024-09-10 21:19:50 +02:00
parent 74f49d6a00
commit 757584544c
7 changed files with 208 additions and 163 deletions

View File

@@ -14,7 +14,7 @@
#include "avg_pooling.hpp"
#include "batch_norm.cuh"
#include "concat.hpp"
#include "conv2d.cuh"
#include "conv2d.hpp"
#include "dense.hpp"
#include "input.hpp"
#include "layer.hpp"

View File

@@ -4,7 +4,6 @@
#include <vector>
#include "activation.hpp"
#include "convolution.cuh"
#include "layer.hpp"
namespace CUDANet::Layers {
@@ -122,11 +121,25 @@ class Conv2d : public WeightedLayer, public TwoDLayer {
std::vector<float> weights;
std::vector<float> biases;
// Cuda
float* forwardCPU(const float* input);
// Cuda
#ifdef USE_CUDA
float* d_output;
float* d_weights;
float* d_biases;
float* forwardCUDA(const float* d_input);
void initCUDA();
void delCUDA();
/**
* @brief Copy weights and biases to the device
*
*/
void toCuda();
#endif
Activation* activation;
/**
@@ -140,12 +153,6 @@ class Conv2d : public WeightedLayer, public TwoDLayer {
*
*/
void initializeBiases();
/**
* @brief Copy weights and biases to the device
*
*/
void toCuda();
};
} // namespace CUDANet::Layers

View File

@@ -0,0 +1,73 @@
#include <vector>
#include "activation.hpp"
#include "conv2d.hpp"
#include "convolution.cuh"
#include "cuda_helper.cuh"
#include "layer.hpp"
#include "matmul.cuh"
#include "vector.cuh"
using namespace CUDANet::Layers;
void Conv2d::initCUDA() {
d_output = nullptr;
CUDA_CHECK(cudaMalloc(
(void**)&d_output,
sizeof(float) * outputSize.first * outputSize.second * numFilters
));
d_weights = nullptr;
CUDA_CHECK(cudaMalloc(
(void**)&d_weights, sizeof(float) * kernelSize.first *
kernelSize.second * inputChannels * numFilters
));
d_biases = nullptr;
CUDA_CHECK(cudaMalloc((void**)&d_biases, sizeof(float) * numFilters));
}
void Conv2d::delCUDA() {
cudaFree(d_output);
cudaFree(d_weights);
cudaFree(d_biases);
}
void Conv2d::toCuda() {
CUDA_CHECK(cudaMemcpy(
d_weights, weights.data(),
sizeof(float) * kernelSize.first * kernelSize.second * inputChannels *
numFilters,
cudaMemcpyHostToDevice
));
CUDA_CHECK(cudaMemcpy(
d_biases, biases.data(), sizeof(float) * numFilters,
cudaMemcpyHostToDevice
));
}
float* Conv2d::forwardCUDA(const float* d_input) {
// Convolve
dim3 block(8, 8, 8);
dim3 grid(
(outputSize.first + block.x - 1) / block.x,
(outputSize.second + block.y - 1) / block.y,
(numFilters + block.z - 1) / block.z
);
CUDANet::Utils::clear(d_output, outputSize.first * outputSize.second * numFilters);
Kernels::convolution<<<grid, block>>>(
d_input, d_weights, d_biases, d_output, inputSize, inputChannels,
paddingSize, kernelSize, stride, numFilters, outputSize
);
CUDA_CHECK(cudaGetLastError());
// Apply activation
activation->activate(d_output);
CUDA_CHECK(cudaDeviceSynchronize());
return d_output;
}

111
src/layers/conv2d.cpp Normal file
View File

@@ -0,0 +1,111 @@
#include <stdexcept>
#include <vector>
#include "activation.hpp"
#include "conv2d.hpp"
#include "layer.hpp"
using namespace CUDANet::Layers;
Conv2d::Conv2d(
shape2d inputSize,
int inputChannels,
shape2d kernelSize,
shape2d stride,
int numFilters,
shape2d paddingSize,
ActivationType activationType
)
: inputSize(inputSize),
inputChannels(inputChannels),
kernelSize(kernelSize),
stride(stride),
numFilters(numFilters),
paddingSize(paddingSize) {
outputSize = {
(inputSize.first - kernelSize.first + 2 * paddingSize.first) /
stride.first +
1,
(inputSize.second - kernelSize.second + 2 * paddingSize.second) /
stride.second +
1
};
activation = new Activation(
activationType, outputSize.first * outputSize.second * numFilters
);
weights.resize(
kernelSize.first * kernelSize.second * inputChannels * numFilters
);
initializeWeights();
biases.resize(numFilters);
initializeBiases();
#ifdef USE_CUDA
initCUDA();
toCuda();
#endif
}
Conv2d::~Conv2d() {
#ifdef USE_CUDA
delCUDA();
#endif
delete activation;
}
void Conv2d::initializeWeights() {
std::fill(weights.begin(), weights.end(), 0.0f);
}
void Conv2d::initializeBiases() {
std::fill(biases.begin(), biases.end(), 0.0f);
}
void Conv2d::setWeights(const float* weights_input) {
std::copy(weights_input, weights_input + weights.size(), weights.begin());
#ifdef USE_CUDA
toCuda();
#endif
}
std::vector<float> Conv2d::getWeights() {
return weights;
}
void Conv2d::setBiases(const float* biases_input) {
std::copy(biases_input, biases_input + biases.size(), biases.begin());
#ifdef USE_CUDA
toCuda();
#endif
}
std::vector<float> Conv2d::getBiases() {
return biases;
}
float* Conv2d::forwardCPU(const float* input) {
throw std::logic_error("Not implemented");
}
float* Conv2d::forward(const float* input) {
#ifdef USE_CUDA
return forwardCUDA(input);
#else
return forwardCPU(input);
#endif
}
int Conv2d::getOutputSize() {
return outputSize.first * outputSize.second * numFilters;
}
int Conv2d::getInputSize() {
return inputSize.first * inputSize.second * inputChannels;
}
shape2d Conv2d::getOutputDims() {
return outputSize;
}

View File

@@ -1,144 +0,0 @@
#include <iostream>
#include <vector>
#include "activation.hpp"
#include "conv2d.cuh"
#include "convolution.cuh"
#include "cuda_helper.cuh"
#include "layer.hpp"
#include "matmul.cuh"
#include "vector.cuh"
using namespace CUDANet::Layers;
Conv2d::Conv2d(
shape2d inputSize,
int inputChannels,
shape2d kernelSize,
shape2d stride,
int numFilters,
shape2d paddingSize,
ActivationType activationType
)
: inputSize(inputSize),
inputChannels(inputChannels),
kernelSize(kernelSize),
stride(stride),
numFilters(numFilters),
paddingSize(paddingSize) {
outputSize = {
(inputSize.first - kernelSize.first + 2 * paddingSize.first) /
stride.first + 1,
(inputSize.second - kernelSize.second + 2 * paddingSize.second) /
stride.second + 1
};
activation =
new Activation(activationType, outputSize.first * outputSize.second * numFilters);
d_output = nullptr;
CUDA_CHECK(cudaMalloc(
(void**)&d_output, sizeof(float) * outputSize.first * outputSize.second * numFilters
));
weights.resize(kernelSize.first * kernelSize.second * inputChannels * numFilters);
initializeWeights();
d_weights = nullptr;
CUDA_CHECK(cudaMalloc(
(void**)&d_weights,
sizeof(float) * kernelSize.first * kernelSize.second * inputChannels * numFilters
));
biases.resize(numFilters);
initializeBiases();
d_biases = nullptr;
CUDA_CHECK(cudaMalloc((void**)&d_biases, sizeof(float) * numFilters));
toCuda();
}
Conv2d::~Conv2d() {
cudaFree(d_output);
cudaFree(d_weights);
cudaFree(d_biases);
delete activation;
}
void Conv2d::initializeWeights() {
std::fill(weights.begin(), weights.end(), 0.0f);
}
void Conv2d::initializeBiases() {
std::fill(biases.begin(), biases.end(), 0.0f);
}
void Conv2d::setWeights(const float* weights_input) {
std::copy(weights_input, weights_input + weights.size(), weights.begin());
toCuda();
}
std::vector<float> Conv2d::getWeights() {
return weights;
}
void Conv2d::setBiases(const float* biases_input) {
std::copy(biases_input, biases_input + biases.size(), biases.begin());
toCuda();
}
std::vector<float> Conv2d::getBiases() {
return biases;
}
void Conv2d::toCuda() {
CUDA_CHECK(cudaMemcpy(
d_weights, weights.data(),
sizeof(float) * kernelSize.first * kernelSize.second * inputChannels * numFilters,
cudaMemcpyHostToDevice
));
CUDA_CHECK(cudaMemcpy(
d_biases, biases.data(), sizeof(float) * numFilters,
cudaMemcpyHostToDevice
));
}
float* Conv2d::forward(const float* d_input) {
// Convolve
dim3 block(8, 8, 8);
dim3 grid(
(outputSize.first + block.x - 1) / block.x,
(outputSize.second + block.y - 1) / block.y,
(numFilters + block.z - 1) / block.z
);
CUDANet::Utils::clear(d_output, outputSize.first * outputSize.second * numFilters);
Kernels::convolution<<<grid, block>>>(
d_input, d_weights, d_biases, d_output, inputSize, inputChannels,
paddingSize, kernelSize, stride, numFilters, outputSize
);
CUDA_CHECK(cudaGetLastError());
// Apply activation
activation->activate(d_output);
CUDA_CHECK(cudaDeviceSynchronize());
return d_output;
}
int Conv2d::getOutputSize() {
return outputSize.first * outputSize.second * numFilters;
}
int Conv2d::getInputSize() {
return inputSize.first * inputSize.second * inputChannels;
}
shape2d Conv2d::getOutputDims() {
return outputSize;
}

View File

@@ -3,7 +3,7 @@
#include <iostream>
#include "conv2d.cuh"
#include "conv2d.hpp"
class Conv2dTest : public ::testing::Test {
protected:

View File

@@ -1,6 +1,6 @@
#include <gtest/gtest.h>
#include "conv2d.cuh"
#include "conv2d.hpp"
#include "dense.hpp"
#include "max_pooling.hpp"
#include "model.hpp"
@@ -85,8 +85,6 @@ class ModelTest : public ::testing::Test {
void commonTestTeardown(CUDANet::Model *model) {
delete model;
}
cudaError_t cudaStatus;
};
TEST_F(ModelTest, TestModelPredict) {