Migrate add layer to tensors

This commit is contained in:
2025-11-22 00:12:20 +01:00
parent aeb1739c46
commit 4c8b2ef537
7 changed files with 56 additions and 101 deletions

View File

@@ -90,6 +90,12 @@ class Backend {
CUDANet::Tensor& input_b, CUDANet::Tensor& input_b,
CUDANet::Tensor& output CUDANet::Tensor& output
) = 0; ) = 0;
virtual CUDANet::Tensor& add(
CUDANet::Tensor& input_a,
CUDANet::Tensor& input_b,
CUDANet::Tensor& output
) = 0;
}; };
} // namespace CUDANet } // namespace CUDANet

View File

@@ -86,6 +86,12 @@ class CUDA : public Backend {
CUDANet::Tensor& input_b, CUDANet::Tensor& input_b,
CUDANet::Tensor& output CUDANet::Tensor& output
) override; ) override;
CUDANet::Tensor& add(
CUDANet::Tensor& input_a,
CUDANet::Tensor& input_b,
CUDANet::Tensor& output
) override;
}; };
} // namespace CUDANet::Backend } // namespace CUDANet::Backend

View File

@@ -1,49 +1,24 @@
#ifndef CUDANET_ADD_LAYER_H #pragma once
#define CUDANET_ADD_LAYER_H
#include "shape.hpp"
#include "tensor.hpp"
namespace CUDANet::Layers { namespace CUDANet::Layers {
class Add { class Add {
public: public:
/** Add(CUDANet::Shape a_shape, CUDANet::Shape b_shape, CUDANet::Backend* backend);
* @brief Create a new Add layer
*
* @param inputSize Size of the input arrays
*/
Add(int inputSize);
/**
* @brief Destroy the Add layer
*
*/
~Add(); ~Add();
/** CUDANet::Tensor&
* @brief Adds first input to second input forward(CUDANet::Tensor& input_a, CUDANet::Tensor& input_b);
*
* @param d_inputA Device pointer to the first input
* @param d_inputB Device pointer to the second input
*
*/
float* forward(const float* inputA, const float* inputB);
private: private:
int inputSize; CUDANet::Shape out_shape;
CUDANet::Tensor output;
float* output; CUDANet::Backend *backend;
float* forwardCPU(const float* inputA, const float* inputB);
#ifdef USE_CUDA
float* d_output;
int gridSize;
float* forwardCUDA(const float* d_inputA, const float* d_inputB);
void initCUDA();
void delCUDA();
#endif
}; };
} // namespace CUDANet::Layers } // namespace CUDANet::Layers
#endif // CUDANET_ADD_LAYER_H

View File

@@ -211,6 +211,7 @@ CUDANet::Tensor& CUDA::batch_norm(
); );
CUDA_CHECK(cudaGetLastError()); CUDA_CHECK(cudaGetLastError());
} }
CUDA_CHECK(cudaDeviceSynchronize());
} }
CUDANet::Tensor& CUDA::concat( CUDANet::Tensor& CUDA::concat(
@@ -228,6 +229,23 @@ CUDANet::Tensor& CUDA::concat(
cudaMemcpyDeviceToDevice cudaMemcpyDeviceToDevice
)); ));
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize());
return output;
}
CUDANet::Tensor& CUDA::add(
CUDANet::Tensor& input_a,
CUDANet::Tensor& input_b,
CUDANet::Tensor& output
) {
auto gridSize = (input_a.numel() + BLOCK_SIZE - 1) / BLOCK_SIZE;
Kernels::vec_vec_add<<<gridSize, BLOCK_SIZE>>>(
input_a.data<float>(), input_b.data<float>(), output.data<float>(), input_a.numel()
);
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize()); CUDA_CHECK(cudaDeviceSynchronize());
return output; return output;

View File

@@ -1,28 +0,0 @@
#include "add.hpp"
#include "matmul.cuh"
#include "cuda_helper.cuh"
using namespace CUDANet::Layers;
void Add::initCUDA() {
d_output = nullptr;
CUDA_CHECK(cudaMalloc((void**)&d_output, sizeof(float) * inputSize));
gridSize = (inputSize + BLOCK_SIZE - 1) / BLOCK_SIZE;
}
void Add::delCUDA() {
cudaFree(d_output);
}
float* Add::forwardCUDA(const float* d_inputA, const float* d_inputB) {
Kernels::vec_vec_add<<<gridSize, BLOCK_SIZE>>>(
d_inputA, d_inputB, d_output, inputSize
);
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize());
return d_output;
}

View File

@@ -1,44 +1,22 @@
#include "add.hpp" #include "add.hpp"
#include <stddef.h>
using namespace CUDANet::Layers; using namespace CUDANet::Layers;
Add::Add(int inputSize) Add::Add(CUDANet::Shape a_shape, CUDANet::Shape b_shape, CUDANet::Backend* backend) : backend(backend) {
: inputSize(inputSize) { if (a_shape != b_shape) {
throw InvalidShapeException(
output = new float[inputSize]; "Add requires matching dimensions", a_shape, b_shape
);
#ifdef USE_CUDA
initCUDA();
#endif
} }
out_shape = a_shape;
Add::~Add() { output = CUDANet::Tensor(out_shape, CUDANet::DType::FLOAT32, backend);
#ifdef USE_CUDA
delCUDA();
#endif
} }
Add::~Add() {}
float* Add::forward(const float* inputA, const float* inputB) { CUDANet::Tensor&
Add::forward(CUDANet::Tensor& input_a, CUDANet::Tensor& input_b) {
#ifdef USE_CUDA output.zero();
return forwardCUDA(inputA, inputB);
#else
return forwardCPU(inputA, inputB);
#endif
}
float* Add::forwardCPU(const float* inputA, const float* inputB) {
for (size_t i = 0; i < inputSize; i++)
{
output[i] = inputA[i] + inputB[i];
}
return output;
} }

View File

@@ -6,7 +6,7 @@ Concat::Concat(const CUDANet::Shape a_shape, const CUDANet::Shape b_shape, CUDAN
: a_shape(a_shape), b_shape(b_shape), backend(backend) { : a_shape(a_shape), b_shape(b_shape), backend(backend) {
if (a_shape[0] != b_shape[0] || a_shape[1] != b_shape[1]) { if (a_shape[0] != b_shape[0] || a_shape[1] != b_shape[1]) {
throw InvalidShapeException( throw InvalidShapeException(
"Concat requires matching batch and height dimensions", a_shape, "Concat requires matching height and width dimensions", a_shape,
b_shape b_shape
); );
} }