Restructure include paths

This commit is contained in:
2025-11-23 20:57:08 +01:00
parent 4161caf3e1
commit 38cb0c9ac0
13 changed files with 15 additions and 15 deletions

View File

@@ -0,0 +1,118 @@
#pragma once
#include <cstdio>
#include "backend.hpp"
#include "tensor.hpp"
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 128
#endif // BLOCK_SIZE
/**
* @brief CUDA error checking macro
*
*/
#define CUDA_CHECK(call) \
do { \
cudaError_t result = call; \
if (result != cudaSuccess) { \
fprintf(stderr, "CUDA error at %s:%d code=%d(%s) \"%s\" \n", \
__FILE__, __LINE__, static_cast<unsigned int>(result), \
cudaGetErrorString(result), #call); \
exit(EXIT_FAILURE); \
} \
} while (0)
namespace CUDANet::Backends {
class CUDA : public Backend {
public:
// Memory management
void* allocate(size_t bytes) override;
void deallocate(void* ptr) override;
// Tensor ops
void print(const CUDANet::Tensor& input) override;
void zero(CUDANet::Tensor& input) override;
void fill(CUDANet::Tensor &input, int value) override;
void
copy_to_device(CUDANet::Tensor& tensor, void* data, size_t size) override;
void sum(const CUDANet::Tensor& input, CUDANet::Tensor& sum) override;
void max(const CUDANet::Tensor& input, CUDANet::Tensor& max) override;
// Layer ops
void relu(CUDANet::Tensor& tensor) override;
void sigmoid(CUDANet::Tensor& tensor) override;
void softmax(
CUDANet::Tensor& tensor,
CUDANet::Tensor& temp_max,
CUDANet::Tensor& temp_sum
) override;
CUDANet::Tensor& dense(
const CUDANet::Tensor& weights,
const CUDANet::Tensor& biases,
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
const size_t input_size,
const size_t output_size
) override;
CUDANet::Tensor& conv2d(
const CUDANet::Tensor& weights,
const CUDANet::Tensor& biases,
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
const CUDANet::Shape in_shape,
const CUDANet::Shape padding_shape,
const CUDANet::Shape kernel_shape,
const CUDANet::Shape stride_shape,
const CUDANet::Shape out_shape
) override;
CUDANet::Tensor& max_pool2d(
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
CUDANet::Shape input_shape,
CUDANet::Shape pool_shape,
CUDANet::Shape stride_shape,
CUDANet::Shape padding_shape,
CUDANet::Shape output_shape
) override;
CUDANet::Tensor& avg_pool2d(
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
CUDANet::Shape input_shape,
CUDANet::Shape pool_shape,
CUDANet::Shape stride_shape,
CUDANet::Shape padding_shape,
CUDANet::Shape output_shape
) override;
CUDANet::Tensor& batch_norm(
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
CUDANet::Shape input_shape,
CUDANet::Tensor& weights,
CUDANet::Tensor& biases,
CUDANet::Tensor& running_mean,
CUDANet::Tensor& running_var,
CUDANet::Tensor& epsilon
) override;
CUDANet::Tensor& concat(
CUDANet::Tensor& input_a,
CUDANet::Tensor& input_b,
CUDANet::Tensor& output
) override;
CUDANet::Tensor& add(
CUDANet::Tensor& input_a,
CUDANet::Tensor& input_b,
CUDANet::Tensor& output
) override;
};
} // namespace CUDANet::Backend

View File

@@ -0,0 +1,33 @@
#pragma once
#include <cuda_runtime.h>
namespace CUDANet::Kernels {
/**
* @brief Sigmoid activation function kernel
*
* @param src Pointer to the source array
* @param dst Pointer to the destination array
* @param len Length of the arrays
*/
__global__ void sigmoid(
const float* __restrict__ src,
float* __restrict__ dst,
const unsigned int len
);
/**
* @brief Relu activation function kernel
*
* @param src Pointer to the source array
* @param dst Pointer to the destination array
* @param len Length of the arrays
*/
__global__ void relu(
const float* __restrict__ src,
float* __restrict__ dst,
const unsigned int len
);
} // namespace CUDANet::Kernels

View File

@@ -0,0 +1,20 @@
#pragma once
#include <cuda_runtime.h>
#include "layer.hpp"
namespace CUDANet::Kernels {
__global__ void convolution(
const float* __restrict__ d_input,
const float* __restrict__ d_kernel,
const float* __restrict__ d_bias,
float* __restrict__ d_output,
const Shape input_shape,
const Shape padding_shape,
const Shape kernel_shape,
const Shape stride_shape,
const Shape output_shape
);
} // namespace CUDANet::Kernels

View File

@@ -0,0 +1,192 @@
#pragma once
#include <cuda_runtime.h>
namespace CUDANet::Kernels {
/**
* @brief Matrix vector multiplication kernel
*
* @param d_matrix Device pointer to matrix
* @param d_vector Device pointer to vector
* @param d_output Device pointer to output vector
* @param w Width of the matrix
* @param h Height of the matrix
*/
__global__ void mat_vec_mul(
const float* __restrict__ d_matrix,
const float* __restrict__ d_vector,
float* __restrict__ d_output,
const unsigned int w,
const unsigned int h
);
/**
* @brief Vector vector addition kernel
*
* @param d_vector1 Device pointer to first vector
* @param d_vector2 Device pointer to second vector
* @param d_output Device pointer to output vector
* @param w Length of the vectors
*/
__global__ void vec_vec_add(
const float* __restrict__ d_vector1,
const float* __restrict__ d_vector2,
float* __restrict__ d_output,
const unsigned int w
);
/**
* @brief Vector vector subtraction kernel
*
* @param d_vector1
* @param d_vector2
* @param d_output
* @param w
* @return __global__
*/
__global__ void vec_vec_sub(
const float* __restrict__ d_vector1,
const float* __restrict__ d_vector2,
float* __restrict__ d_output,
const unsigned int w
);
__global__ void vec_vec_mul(
const float* __restrict__ d_vector1,
const float* __restrict__ d_vector2,
float* __restrict__ d_output,
const unsigned int w
);
/**
* @brief Sub scalar from each element of the vector
*
* @param d_vector
* @param d_scalar
* @param d_output
* @param w
* @return __global__
*/
__global__ void vec_scalar_sub(
const float* __restrict__ d_src,
float* __restrict__ d_out,
const float* __restrict__ d_scalar,
const unsigned int len
);
/**
* @brief Add scalar to each element of the vector
*
* @param d_src
* @param d_out
* @param d_scalar
* @param len
* @return __global__
*/
__global__ void vec_scalar_add(
const float* __restrict__ d_src,
float* __restrict__ d_out,
const float* __restrict__ d_scalar,
const unsigned int len
);
/**
* @brief Divide each element of the vector by a scalar
*
* @param src Pointer to the source array
* @param dst Pointer to the destination array
* @param len Length of the arrays
*/
__global__ void vec_scalar_div(
const float* __restrict__ d_src,
float* __restrict__ d_out,
const float* __restrict__ d_scalar,
const unsigned int len
);
/**
* @brief Multiply each element of the vector by a scalar
*
* @param d_src
* @param d_out
* @param d_scalar
* @param len
* @return __global__
*/
__global__ void vec_scalar_mul(
const float* __restrict__ d_src,
float* __restrict__ d_out,
const float* __restrict__ d_scalar,
const unsigned int len
);
/**
* @brief Exponentiate each element of the vector
*
* @param src Pointer to the source array
* @param dst Pointer to the destination array
* @param len Length of the arrays
*/
__global__ void vec_exp(
const float* __restrict__ src,
float* __restrict__ dst,
const unsigned int len
);
/**
* @brief Compute the square root of each element of the vector
*
* @param src Device pointer to source vector
* @param dst Device pointer to destination vector
* @param len Length of the vector
*/
__global__ void vec_sqrt(
const float* __restrict__ src,
float* __restrict__ dst,
const unsigned int len
);
/**
* @brief Scales the vector by 1/sqrt(scale + epsilon)
*
* @param src Device pointer to source vector
* @param dst Device pointer to destination vector
* @param scale Scale
* @param epsilon Epsilon
* @param len Length of the vector
*/
__global__ void vec_scale(
const float* __restrict__ src,
float* __restrict__ dst,
const float* __restrict__ scale,
const float* epsilon,
const unsigned int len
);
/**
* @brief Max reduction kernel
*
* @param d_vector Device pointer to vector
* @param d_output Device pointer to output vector
*/
__global__ void max_reduce(
const float* __restrict__ d_vector,
float* __restrict__ d_output,
const unsigned int len
);
/**
* @brief
*
* @param d_vector Device pointer to vector
* @param d_output Device pointer to output vector
* @param len Length of the vector
*/
__global__ void sum_reduce(
const float* __restrict__ d_vector,
float* __restrict__ d_output,
const unsigned int len
);
} // namespace CUDANet::Kernels

View File

@@ -0,0 +1,28 @@
#pragma once
#include <cuda_runtime.h>
#include "layer.hpp"
namespace CUDANet::Kernels {
__global__ void max_pool(
const float* __restrict__ d_input,
float* __restrict__ d_output,
const Shape input_shape,
const Shape output_shape,
const Shape pool_shape,
const Shape stride_shape,
const Shape padding_shape
);
__global__ void avg_pool(
const float* __restrict__ d_input,
float* __restrict__ d_output,
const Shape input_shape,
const Shape output_shape,
const Shape pool_shape,
const Shape stride_shape,
const Shape padding_shape
);
} // namespace CUDANet::Kernels