mirror of
https://github.com/lordmathis/CUDANet.git
synced 2025-12-23 14:54:28 +00:00
Refactor CUDA kernels and tensor operations for type generality
This commit is contained in:
@@ -8,53 +8,60 @@
|
||||
|
||||
#ifndef BLOCK_SIZE
|
||||
#define BLOCK_SIZE 128
|
||||
#endif // BLOCK_SIZE
|
||||
#endif // BLOCK_SIZE
|
||||
|
||||
/**
|
||||
* @brief CUDA error checking macro
|
||||
*
|
||||
*
|
||||
*/
|
||||
#define CUDA_CHECK(call) \
|
||||
do { \
|
||||
cudaError_t result = call; \
|
||||
if (result != cudaSuccess) { \
|
||||
fprintf(stderr, "CUDA error at %s:%d code=%d(%s) \"%s\" \n", \
|
||||
__FILE__, __LINE__, static_cast<unsigned int>(result), \
|
||||
cudaGetErrorString(result), #call); \
|
||||
exit(EXIT_FAILURE); \
|
||||
} \
|
||||
} while (0)
|
||||
#define CUDA_CHECK(call) \
|
||||
do { \
|
||||
cudaError_t result = call; \
|
||||
if (result != cudaSuccess) { \
|
||||
fprintf( \
|
||||
stderr, "CUDA error at %s:%d code=%d(%s) \"%s\" \n", __FILE__, \
|
||||
__LINE__, static_cast<unsigned int>(result), \
|
||||
cudaGetErrorString(result), #call \
|
||||
); \
|
||||
exit(EXIT_FAILURE); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
namespace CUDANet::Backends {
|
||||
|
||||
template <DType dtype>
|
||||
struct cuda_dtype_map;
|
||||
|
||||
template <>
|
||||
struct cuda_dtype_map<DType::FLOAT32> {
|
||||
using type = float;
|
||||
};
|
||||
|
||||
class CUDA : public Backend {
|
||||
private:
|
||||
int device_id;
|
||||
std::set<DType> supported_dtypes;
|
||||
public:
|
||||
CUDA(const BackendConfig& config);
|
||||
|
||||
bool supports_dtype(DType dtype) const override;
|
||||
void set_default_dtype(DType dtype) override;
|
||||
bool supports_dtype(DType dtype) const override;
|
||||
void set_default_dtype(DType dtype) override;
|
||||
DType get_default_dtype() const override;
|
||||
|
||||
static bool is_cuda_available();
|
||||
void initialize();
|
||||
void initialize();
|
||||
|
||||
// Memory management
|
||||
void* allocate(size_t bytes) override;
|
||||
void deallocate(void* ptr) override;
|
||||
|
||||
// Tensor ops
|
||||
// Tensor ops dispatchers
|
||||
void print(const CUDANet::Tensor& input) override;
|
||||
void zero(CUDANet::Tensor& input) override;
|
||||
void fill(CUDANet::Tensor &input, int value) override;
|
||||
void fill(CUDANet::Tensor& input, int value) override;
|
||||
void
|
||||
copy_to_device(CUDANet::Tensor& tensor, void* data, size_t size) override;
|
||||
void sum(const CUDANet::Tensor& input, CUDANet::Tensor& sum) override;
|
||||
void max(const CUDANet::Tensor& input, CUDANet::Tensor& max) override;
|
||||
|
||||
// Layer ops
|
||||
// Layer ops dispatchers
|
||||
void relu(CUDANet::Tensor& tensor) override;
|
||||
void sigmoid(CUDANet::Tensor& tensor) override;
|
||||
void softmax(
|
||||
@@ -67,7 +74,7 @@ class CUDA : public Backend {
|
||||
const CUDANet::Tensor& weights,
|
||||
const CUDANet::Tensor& biases,
|
||||
const CUDANet::Tensor& input,
|
||||
CUDANet::Tensor& output,
|
||||
CUDANet::Tensor& output,
|
||||
const size_t input_size,
|
||||
const size_t output_size
|
||||
) override;
|
||||
@@ -76,43 +83,43 @@ class CUDA : public Backend {
|
||||
const CUDANet::Tensor& weights,
|
||||
const CUDANet::Tensor& biases,
|
||||
const CUDANet::Tensor& input,
|
||||
CUDANet::Tensor& output,
|
||||
const CUDANet::Shape in_shape,
|
||||
const CUDANet::Shape padding_shape,
|
||||
const CUDANet::Shape kernel_shape,
|
||||
const CUDANet::Shape stride_shape,
|
||||
const CUDANet::Shape out_shape
|
||||
CUDANet::Tensor& output,
|
||||
const CUDANet::Shape in_shape,
|
||||
const CUDANet::Shape padding_shape,
|
||||
const CUDANet::Shape kernel_shape,
|
||||
const CUDANet::Shape stride_shape,
|
||||
const CUDANet::Shape out_shape
|
||||
) override;
|
||||
|
||||
CUDANet::Tensor& max_pool2d(
|
||||
const CUDANet::Tensor& input,
|
||||
CUDANet::Tensor& output,
|
||||
CUDANet::Shape input_shape,
|
||||
CUDANet::Shape pool_shape,
|
||||
CUDANet::Shape stride_shape,
|
||||
CUDANet::Shape padding_shape,
|
||||
CUDANet::Shape output_shape
|
||||
CUDANet::Tensor& output,
|
||||
CUDANet::Shape input_shape,
|
||||
CUDANet::Shape pool_shape,
|
||||
CUDANet::Shape stride_shape,
|
||||
CUDANet::Shape padding_shape,
|
||||
CUDANet::Shape output_shape
|
||||
) override;
|
||||
|
||||
CUDANet::Tensor& avg_pool2d(
|
||||
const CUDANet::Tensor& input,
|
||||
CUDANet::Tensor& output,
|
||||
CUDANet::Shape input_shape,
|
||||
CUDANet::Shape pool_shape,
|
||||
CUDANet::Shape stride_shape,
|
||||
CUDANet::Shape padding_shape,
|
||||
CUDANet::Shape output_shape
|
||||
CUDANet::Tensor& output,
|
||||
CUDANet::Shape input_shape,
|
||||
CUDANet::Shape pool_shape,
|
||||
CUDANet::Shape stride_shape,
|
||||
CUDANet::Shape padding_shape,
|
||||
CUDANet::Shape output_shape
|
||||
) override;
|
||||
|
||||
CUDANet::Tensor& batch_norm(
|
||||
const CUDANet::Tensor& input,
|
||||
CUDANet::Tensor& output,
|
||||
CUDANet::Shape input_shape,
|
||||
CUDANet::Tensor& weights,
|
||||
CUDANet::Tensor& biases,
|
||||
CUDANet::Tensor& running_mean,
|
||||
CUDANet::Tensor& running_var,
|
||||
CUDANet::Tensor& epsilon
|
||||
CUDANet::Tensor& output,
|
||||
CUDANet::Shape input_shape,
|
||||
CUDANet::Tensor& weights,
|
||||
CUDANet::Tensor& biases,
|
||||
CUDANet::Tensor& running_mean,
|
||||
CUDANet::Tensor& running_var,
|
||||
CUDANet::Tensor& epsilon
|
||||
) override;
|
||||
|
||||
CUDANet::Tensor& concat(
|
||||
@@ -126,6 +133,111 @@ class CUDA : public Backend {
|
||||
CUDANet::Tensor& input_b,
|
||||
CUDANet::Tensor& output
|
||||
) override;
|
||||
|
||||
private:
|
||||
int device_id;
|
||||
std::set<DType> supported_dtypes;
|
||||
|
||||
// Tensor ops template impls
|
||||
template <typename T>
|
||||
void print_impl(const CUDANet::Tensor& input);
|
||||
|
||||
template <typename T>
|
||||
void fill_impl(CUDANet::Tensor& input, int value);
|
||||
|
||||
template <typename T>
|
||||
void copy_to_device_impl(CUDANet::Tensor& tensor, void* data, size_t size);
|
||||
|
||||
template <typename T>
|
||||
void sum_impl(const CUDANet::Tensor& input, CUDANet::Tensor& sum);
|
||||
|
||||
template <typename T>
|
||||
void max_impl(const CUDANet::Tensor& input, CUDANet::Tensor& max);
|
||||
|
||||
// Layer ops template impls
|
||||
template <typename T>
|
||||
void relu_impl(CUDANet::Tensor& tensor);
|
||||
|
||||
template <typename T>
|
||||
void sigmoid_impl(CUDANet::Tensor& tensor);
|
||||
|
||||
template <typename T>
|
||||
void softmax_impl(
|
||||
CUDANet::Tensor& tensor,
|
||||
CUDANet::Tensor& temp_max,
|
||||
CUDANet::Tensor& temp_sum
|
||||
);
|
||||
|
||||
template <typename T>
|
||||
CUDANet::Tensor& dense_impl(
|
||||
const CUDANet::Tensor& weights,
|
||||
const CUDANet::Tensor& biases,
|
||||
const CUDANet::Tensor& input,
|
||||
CUDANet::Tensor& output,
|
||||
const size_t input_size,
|
||||
const size_t output_size
|
||||
);
|
||||
|
||||
template <typename T>
|
||||
CUDANet::Tensor& conv2d_impl(
|
||||
const CUDANet::Tensor& weights,
|
||||
const CUDANet::Tensor& biases,
|
||||
const CUDANet::Tensor& input,
|
||||
CUDANet::Tensor& output,
|
||||
const CUDANet::Shape in_shape,
|
||||
const CUDANet::Shape padding_shape,
|
||||
const CUDANet::Shape kernel_shape,
|
||||
const CUDANet::Shape stride_shape,
|
||||
const CUDANet::Shape out_shape
|
||||
);
|
||||
|
||||
template <typename T>
|
||||
CUDANet::Tensor& max_pool2d_impl(
|
||||
const CUDANet::Tensor& input,
|
||||
CUDANet::Tensor& output,
|
||||
CUDANet::Shape input_shape,
|
||||
CUDANet::Shape pool_shape,
|
||||
CUDANet::Shape stride_shape,
|
||||
CUDANet::Shape padding_shape,
|
||||
CUDANet::Shape output_shape
|
||||
);
|
||||
|
||||
template <typename T>
|
||||
CUDANet::Tensor& avg_pool2d_impl(
|
||||
const CUDANet::Tensor& input,
|
||||
CUDANet::Tensor& output,
|
||||
CUDANet::Shape input_shape,
|
||||
CUDANet::Shape pool_shape,
|
||||
CUDANet::Shape stride_shape,
|
||||
CUDANet::Shape padding_shape,
|
||||
CUDANet::Shape output_shape
|
||||
);
|
||||
|
||||
template <typename T>
|
||||
CUDANet::Tensor& batch_norm_impl(
|
||||
const CUDANet::Tensor& input,
|
||||
CUDANet::Tensor& output,
|
||||
CUDANet::Shape input_shape,
|
||||
CUDANet::Tensor& weights,
|
||||
CUDANet::Tensor& biases,
|
||||
CUDANet::Tensor& running_mean,
|
||||
CUDANet::Tensor& running_var,
|
||||
CUDANet::Tensor& epsilon
|
||||
);
|
||||
|
||||
template <typename T>
|
||||
CUDANet::Tensor& concat_impl(
|
||||
CUDANet::Tensor& input_a,
|
||||
CUDANet::Tensor& input_b,
|
||||
CUDANet::Tensor& output
|
||||
);
|
||||
|
||||
template <typename T>
|
||||
CUDANet::Tensor& add_impl(
|
||||
CUDANet::Tensor& input_a,
|
||||
CUDANet::Tensor& input_b,
|
||||
CUDANet::Tensor& output
|
||||
);
|
||||
};
|
||||
|
||||
} // namespace CUDANet::Backend
|
||||
} // namespace CUDANet::Backends
|
||||
@@ -4,29 +4,18 @@
|
||||
|
||||
namespace CUDANet::Kernels {
|
||||
|
||||
/**
|
||||
* @brief Sigmoid activation function kernel
|
||||
*
|
||||
* @param src Pointer to the source array
|
||||
* @param dst Pointer to the destination array
|
||||
* @param len Length of the arrays
|
||||
*/
|
||||
|
||||
template <typename T>
|
||||
__global__ void sigmoid(
|
||||
const float* __restrict__ src,
|
||||
float* __restrict__ dst,
|
||||
const T* __restrict__ src,
|
||||
T* __restrict__ dst,
|
||||
const unsigned int len
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Relu activation function kernel
|
||||
*
|
||||
* @param src Pointer to the source array
|
||||
* @param dst Pointer to the destination array
|
||||
* @param len Length of the arrays
|
||||
*/
|
||||
template <typename T>
|
||||
__global__ void relu(
|
||||
const float* __restrict__ src,
|
||||
float* __restrict__ dst,
|
||||
const T* __restrict__ src,
|
||||
T* __restrict__ dst,
|
||||
const unsigned int len
|
||||
);
|
||||
|
||||
|
||||
@@ -5,11 +5,12 @@
|
||||
|
||||
namespace CUDANet::Kernels {
|
||||
|
||||
template <typename T>
|
||||
__global__ void convolution(
|
||||
const float* __restrict__ d_input,
|
||||
const float* __restrict__ d_kernel,
|
||||
const float* __restrict__ d_bias,
|
||||
float* __restrict__ d_output,
|
||||
const T* __restrict__ d_input,
|
||||
const T* __restrict__ d_kernel,
|
||||
const T* __restrict__ d_bias,
|
||||
T* __restrict__ d_output,
|
||||
const Shape input_shape,
|
||||
const Shape padding_shape,
|
||||
const Shape kernel_shape,
|
||||
|
||||
@@ -4,188 +4,105 @@
|
||||
|
||||
namespace CUDANet::Kernels {
|
||||
|
||||
/**
|
||||
* @brief Matrix vector multiplication kernel
|
||||
*
|
||||
* @param d_matrix Device pointer to matrix
|
||||
* @param d_vector Device pointer to vector
|
||||
* @param d_output Device pointer to output vector
|
||||
* @param w Width of the matrix
|
||||
* @param h Height of the matrix
|
||||
*/
|
||||
template <typename T>
|
||||
__global__ void mat_vec_mul(
|
||||
const float* __restrict__ d_matrix,
|
||||
const float* __restrict__ d_vector,
|
||||
float* __restrict__ d_output,
|
||||
const T* __restrict__ d_matrix,
|
||||
const T* __restrict__ d_vector,
|
||||
T* __restrict__ d_output,
|
||||
const unsigned int w,
|
||||
const unsigned int h
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Vector vector addition kernel
|
||||
*
|
||||
* @param d_vector1 Device pointer to first vector
|
||||
* @param d_vector2 Device pointer to second vector
|
||||
* @param d_output Device pointer to output vector
|
||||
* @param w Length of the vectors
|
||||
*/
|
||||
template <typename T>
|
||||
__global__ void vec_vec_add(
|
||||
const float* __restrict__ d_vector1,
|
||||
const float* __restrict__ d_vector2,
|
||||
float* __restrict__ d_output,
|
||||
const T* __restrict__ d_vector1,
|
||||
const T* __restrict__ d_vector2,
|
||||
T* __restrict__ d_output,
|
||||
const unsigned int w
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Vector vector subtraction kernel
|
||||
*
|
||||
* @param d_vector1
|
||||
* @param d_vector2
|
||||
* @param d_output
|
||||
* @param w
|
||||
* @return __global__
|
||||
*/
|
||||
template <typename T>
|
||||
__global__ void vec_vec_sub(
|
||||
const float* __restrict__ d_vector1,
|
||||
const float* __restrict__ d_vector2,
|
||||
float* __restrict__ d_output,
|
||||
const T* __restrict__ d_vector1,
|
||||
const T* __restrict__ d_vector2,
|
||||
T* __restrict__ d_output,
|
||||
const unsigned int w
|
||||
);
|
||||
|
||||
template <typename T>
|
||||
__global__ void vec_vec_mul(
|
||||
const float* __restrict__ d_vector1,
|
||||
const float* __restrict__ d_vector2,
|
||||
float* __restrict__ d_output,
|
||||
const T* __restrict__ d_vector1,
|
||||
const T* __restrict__ d_vector2,
|
||||
T* __restrict__ d_output,
|
||||
const unsigned int w
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Sub scalar from each element of the vector
|
||||
*
|
||||
* @param d_vector
|
||||
* @param d_scalar
|
||||
* @param d_output
|
||||
* @param w
|
||||
* @return __global__
|
||||
*/
|
||||
template <typename T>
|
||||
__global__ void vec_scalar_sub(
|
||||
const float* __restrict__ d_src,
|
||||
float* __restrict__ d_out,
|
||||
const float* __restrict__ d_scalar,
|
||||
const T* __restrict__ d_src,
|
||||
T* __restrict__ d_out,
|
||||
const T* __restrict__ d_scalar,
|
||||
const unsigned int len
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Add scalar to each element of the vector
|
||||
*
|
||||
* @param d_src
|
||||
* @param d_out
|
||||
* @param d_scalar
|
||||
* @param len
|
||||
* @return __global__
|
||||
*/
|
||||
template <typename T>
|
||||
__global__ void vec_scalar_add(
|
||||
const float* __restrict__ d_src,
|
||||
float* __restrict__ d_out,
|
||||
const float* __restrict__ d_scalar,
|
||||
const T* __restrict__ d_src,
|
||||
T* __restrict__ d_out,
|
||||
const T* __restrict__ d_scalar,
|
||||
const unsigned int len
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Divide each element of the vector by a scalar
|
||||
*
|
||||
* @param src Pointer to the source array
|
||||
* @param dst Pointer to the destination array
|
||||
* @param len Length of the arrays
|
||||
*/
|
||||
template <typename T>
|
||||
__global__ void vec_scalar_div(
|
||||
const float* __restrict__ d_src,
|
||||
float* __restrict__ d_out,
|
||||
const float* __restrict__ d_scalar,
|
||||
const T* __restrict__ d_src,
|
||||
T* __restrict__ d_out,
|
||||
const T* __restrict__ d_scalar,
|
||||
const unsigned int len
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Multiply each element of the vector by a scalar
|
||||
*
|
||||
* @param d_src
|
||||
* @param d_out
|
||||
* @param d_scalar
|
||||
* @param len
|
||||
* @return __global__
|
||||
*/
|
||||
template <typename T>
|
||||
__global__ void vec_scalar_mul(
|
||||
const float* __restrict__ d_src,
|
||||
float* __restrict__ d_out,
|
||||
const float* __restrict__ d_scalar,
|
||||
const T* __restrict__ d_src,
|
||||
T* __restrict__ d_out,
|
||||
const T* __restrict__ d_scalar,
|
||||
const unsigned int len
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Exponentiate each element of the vector
|
||||
*
|
||||
* @param src Pointer to the source array
|
||||
* @param dst Pointer to the destination array
|
||||
* @param len Length of the arrays
|
||||
*/
|
||||
template <typename T>
|
||||
__global__ void vec_exp(
|
||||
const float* __restrict__ src,
|
||||
float* __restrict__ dst,
|
||||
const T* __restrict__ src,
|
||||
T* __restrict__ dst,
|
||||
const unsigned int len
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Compute the square root of each element of the vector
|
||||
*
|
||||
* @param src Device pointer to source vector
|
||||
* @param dst Device pointer to destination vector
|
||||
* @param len Length of the vector
|
||||
*/
|
||||
template <typename T>
|
||||
__global__ void vec_sqrt(
|
||||
const float* __restrict__ src,
|
||||
float* __restrict__ dst,
|
||||
const T* __restrict__ src,
|
||||
T* __restrict__ dst,
|
||||
const unsigned int len
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Scales the vector by 1/sqrt(scale + epsilon)
|
||||
*
|
||||
* @param src Device pointer to source vector
|
||||
* @param dst Device pointer to destination vector
|
||||
* @param scale Scale
|
||||
* @param epsilon Epsilon
|
||||
* @param len Length of the vector
|
||||
*/
|
||||
template <typename T>
|
||||
__global__ void vec_scale(
|
||||
const float* __restrict__ src,
|
||||
float* __restrict__ dst,
|
||||
const float* __restrict__ scale,
|
||||
const float* epsilon,
|
||||
const T* __restrict__ src,
|
||||
T* __restrict__ dst,
|
||||
const T* __restrict__ scale,
|
||||
const T* epsilon,
|
||||
const unsigned int len
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief Max reduction kernel
|
||||
*
|
||||
* @param d_vector Device pointer to vector
|
||||
* @param d_output Device pointer to output vector
|
||||
*/
|
||||
template <typename T>
|
||||
__global__ void max_reduce(
|
||||
const float* __restrict__ d_vector,
|
||||
float* __restrict__ d_output,
|
||||
const T* __restrict__ d_vector,
|
||||
T* __restrict__ d_output,
|
||||
const unsigned int len
|
||||
);
|
||||
|
||||
/**
|
||||
* @brief
|
||||
*
|
||||
* @param d_vector Device pointer to vector
|
||||
* @param d_output Device pointer to output vector
|
||||
* @param len Length of the vector
|
||||
*/
|
||||
template <typename T>
|
||||
__global__ void sum_reduce(
|
||||
const float* __restrict__ d_vector,
|
||||
float* __restrict__ d_output,
|
||||
const T* __restrict__ d_vector,
|
||||
T* __restrict__ d_output,
|
||||
const unsigned int len
|
||||
);
|
||||
|
||||
|
||||
@@ -5,9 +5,10 @@
|
||||
|
||||
namespace CUDANet::Kernels {
|
||||
|
||||
template <typename T>
|
||||
__global__ void max_pool(
|
||||
const float* __restrict__ d_input,
|
||||
float* __restrict__ d_output,
|
||||
const T* __restrict__ d_input,
|
||||
T* __restrict__ d_output,
|
||||
const Shape input_shape,
|
||||
const Shape output_shape,
|
||||
const Shape pool_shape,
|
||||
@@ -15,9 +16,10 @@ __global__ void max_pool(
|
||||
const Shape padding_shape
|
||||
);
|
||||
|
||||
template <typename T>
|
||||
__global__ void avg_pool(
|
||||
const float* __restrict__ d_input,
|
||||
float* __restrict__ d_output,
|
||||
const T* __restrict__ d_input,
|
||||
T* __restrict__ d_output,
|
||||
const Shape input_shape,
|
||||
const Shape output_shape,
|
||||
const Shape pool_shape,
|
||||
|
||||
Reference in New Issue
Block a user