Migrate add layer to tensors

This commit is contained in:
2025-11-22 00:12:20 +01:00
parent aeb1739c46
commit 4c8b2ef537
7 changed files with 56 additions and 101 deletions

View File

@@ -211,6 +211,7 @@ CUDANet::Tensor& CUDA::batch_norm(
);
CUDA_CHECK(cudaGetLastError());
}
CUDA_CHECK(cudaDeviceSynchronize());
}
CUDANet::Tensor& CUDA::concat(
@@ -228,6 +229,23 @@ CUDANet::Tensor& CUDA::concat(
cudaMemcpyDeviceToDevice
));
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize());
return output;
}
CUDANet::Tensor& CUDA::add(
CUDANet::Tensor& input_a,
CUDANet::Tensor& input_b,
CUDANet::Tensor& output
) {
auto gridSize = (input_a.numel() + BLOCK_SIZE - 1) / BLOCK_SIZE;
Kernels::vec_vec_add<<<gridSize, BLOCK_SIZE>>>(
input_a.data<float>(), input_b.data<float>(), output.data<float>(), input_a.numel()
);
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize());
return output;

View File

@@ -1,28 +0,0 @@
#include "add.hpp"
#include "matmul.cuh"
#include "cuda_helper.cuh"
using namespace CUDANet::Layers;
void Add::initCUDA() {
d_output = nullptr;
CUDA_CHECK(cudaMalloc((void**)&d_output, sizeof(float) * inputSize));
gridSize = (inputSize + BLOCK_SIZE - 1) / BLOCK_SIZE;
}
void Add::delCUDA() {
cudaFree(d_output);
}
float* Add::forwardCUDA(const float* d_inputA, const float* d_inputB) {
Kernels::vec_vec_add<<<gridSize, BLOCK_SIZE>>>(
d_inputA, d_inputB, d_output, inputSize
);
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize());
return d_output;
}