Restructure cuda backend

This commit is contained in:
2024-09-05 22:23:47 +02:00
parent 65727dfee8
commit f8220f0ec1
19 changed files with 69 additions and 16 deletions

View File

@@ -1,26 +1,21 @@
#include "add.cuh"
#include "add.hpp"
#include "matmul.cuh"
#include "cuda_helper.cuh"
using namespace CUDANet::Layers;
Add::Add(int inputSize)
: inputSize(inputSize) {
void Add::initCUDA() {
d_output = nullptr;
CUDA_CHECK(cudaMalloc((void**)&d_output, sizeof(float) * inputSize));
gridSize = (inputSize + BLOCK_SIZE - 1) / BLOCK_SIZE;
}
Add::~Add() {
void Add::delCUDA() {
cudaFree(d_output);
}
void Add::forward(const float* d_inputA, const float* d_inputB) {
float* Add::forwardCUDA(const float* d_inputA, const float* d_inputB) {
Kernels::vec_vec_add<<<gridSize, BLOCK_SIZE>>>(
d_inputA, d_inputB, d_output, inputSize
@@ -28,4 +23,6 @@ void Add::forward(const float* d_inputA, const float* d_inputB) {
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize());
}
return d_output;
}

44
src/layers/add.cpp Normal file
View File

@@ -0,0 +1,44 @@
#include "add.hpp"
#include <stddef.h>
using namespace CUDANet::Layers;
Add::Add(int inputSize)
: inputSize(inputSize) {
output = new float[inputSize];
#ifdef USE_CUDA
initCUDA();
#endif
}
Add::~Add() {
#ifdef USE_CUDA
delCUDA();
#endif
}
float* Add::forward(const float* inputA, const float* inputB) {
#ifdef USE_CUDA
return forwardCUDA(inputA, inputB);
#else
return forwardCPU(inputA, inputB);
#endif
}
float* Add::forwardCPU(const float* inputA, const float* inputB) {
for (size_t i = 0; i < inputSize; i++)
{
output[i] = inputA[i] + inputB[i];
}
return output;
}