mirror of
https://github.com/lordmathis/CUDANet.git
synced 2025-11-05 17:34:21 +00:00
Add bias to conv2d
This commit is contained in:
@@ -42,15 +42,18 @@ class Conv2d {
|
||||
|
||||
// Kernels
|
||||
std::vector<float> kernels;
|
||||
std::vector<float> biases;
|
||||
|
||||
// Cuda
|
||||
float* d_kernels;
|
||||
float* d_biases;
|
||||
float* d_padded;
|
||||
|
||||
// Kernels
|
||||
Activation activation;
|
||||
|
||||
void initializeKernels();
|
||||
void initializeBiases();
|
||||
void toCuda();
|
||||
};
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
#include "conv2d.cuh"
|
||||
#include "convolution.cuh"
|
||||
#include "cuda_helper.cuh"
|
||||
#include "matrix_math.cuh"
|
||||
#include "padding.cuh"
|
||||
|
||||
Layers::Conv2d::Conv2d(
|
||||
@@ -33,25 +34,33 @@ Layers::Conv2d::Conv2d(
|
||||
}
|
||||
|
||||
kernels.resize(kernelSize * kernelSize * inputChannels * numFilters);
|
||||
initializeKernels();
|
||||
initializeKernels();
|
||||
|
||||
d_kernels = nullptr;
|
||||
|
||||
CUDA_CHECK(
|
||||
cudaMalloc((void**)&d_kernels, sizeof(float) * kernelSize * kernelSize * inputChannels * numFilters)
|
||||
);
|
||||
toCuda();
|
||||
|
||||
biases.resize(outputSize * outputSize * numFilters);
|
||||
initializeBiases();
|
||||
|
||||
d_biases = nullptr;
|
||||
CUDA_CHECK(
|
||||
cudaMalloc((void**)&d_biases, sizeof(float) * outputSize * outputSize * numFilters)
|
||||
);
|
||||
|
||||
d_padded = nullptr;
|
||||
|
||||
CUDA_CHECK(cudaMalloc(
|
||||
(void**)&d_padded, sizeof(float) * (inputSize + 2 * paddingSize) *
|
||||
(inputSize + 2 * paddingSize) * inputChannels
|
||||
));
|
||||
|
||||
toCuda();
|
||||
}
|
||||
|
||||
Layers::Conv2d::~Conv2d() {
|
||||
cudaFree(d_kernels);
|
||||
cudaFree(d_biases);
|
||||
cudaFree(d_padded);
|
||||
}
|
||||
|
||||
@@ -59,6 +68,10 @@ void Layers::Conv2d::initializeKernels() {
|
||||
std::fill(kernels.begin(), kernels.end(), 0.0f);
|
||||
}
|
||||
|
||||
void Layers::Conv2d::initializeBiases() {
|
||||
std::fill(biases.begin(), biases.end(), 0.0f);
|
||||
}
|
||||
|
||||
void Layers::Conv2d::setKernels(const std::vector<float>& kernels_input) {
|
||||
std::copy(kernels_input.begin(), kernels_input.end(), kernels.begin());
|
||||
toCuda();
|
||||
@@ -69,6 +82,11 @@ void Layers::Conv2d::toCuda() {
|
||||
d_kernels, kernels.data(), sizeof(float) * kernelSize * kernelSize * numFilters,
|
||||
cudaMemcpyHostToDevice
|
||||
));
|
||||
|
||||
CUDA_CHECK(cudaMemcpy(
|
||||
d_biases, biases.data(), sizeof(float) * outputSize * outputSize * numFilters,
|
||||
cudaMemcpyHostToDevice
|
||||
));
|
||||
}
|
||||
|
||||
void Layers::Conv2d::forward(const float* d_input, float* d_output) {
|
||||
@@ -85,6 +103,9 @@ void Layers::Conv2d::forward(const float* d_input, float* d_output) {
|
||||
d_padded, d_kernels, d_output, inputSize + (2 * paddingSize), inputChannels, kernelSize, stride, numFilters, outputSize
|
||||
);
|
||||
|
||||
// Add bias
|
||||
vec_vec_add_kernel<<<1, biases.size()>>>(d_biases, d_output, d_output, biases.size());
|
||||
|
||||
CUDA_CHECK(cudaDeviceSynchronize());
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user