Compare commits

...

27 Commits

Author SHA1 Message Date
6318d52f12 Use const T* for input tensors in layer and tensor operations 2025-11-28 21:41:38 +01:00
71dc5a924d Move dtype size implementation to cpp file 2025-11-27 23:29:54 +01:00
7e27c87673 Fix compilation errors and warnings 2025-11-27 22:41:49 +01:00
e79667671a Refactor size calculations in layers and backend 2025-11-27 22:01:09 +01:00
c855ae89ec Refactor Tensor methods to use void* for data handling and add device_ptr method 2025-11-27 21:18:51 +01:00
9ff214d759 Refactor CUDA kernels and tensor operations for type generality 2025-11-26 20:47:55 +01:00
13d3d38b68 Add dtype parameter to layer constructors 2025-11-26 00:19:33 +01:00
84153ac49c Add default dtype to backend 2025-11-25 23:42:19 +01:00
ad079560ff Update CMakeLists.txt 2025-11-25 19:08:55 +01:00
60964cf294 Move factory implementation out of header 2025-11-24 22:01:54 +01:00
a40ba96d4f Implement backend factory 2025-11-24 21:53:47 +01:00
a97ff8e1f6 Update main include file 2025-11-23 21:07:34 +01:00
38cb0c9ac0 Restructure include paths 2025-11-23 20:57:08 +01:00
4161caf3e1 Update BatchNorm2d to return sizes for running mean and var 2025-11-23 20:48:41 +01:00
9f1a56c699 Refactor Layer interface to return size of weights and biases instead of Tensor references 2025-11-23 20:44:25 +01:00
547cd0c224 Remove unnecessary inclusion of cuda_helper.cuh in pool.cu 2025-11-23 19:21:22 +01:00
1102aef293 Implement custom Shape struct with __device__ support 2025-11-23 19:21:06 +01:00
82a0e7c19d Fix some compilation errors 2025-11-23 18:50:57 +01:00
51bcee01ab Migrate model class to Tensor 2025-11-22 22:40:38 +01:00
ca44ea4436 Migrate module to tensors 2025-11-22 18:02:42 +01:00
104d6ea33d Fix small layer issues 2025-11-22 00:33:51 +01:00
4c8b2ef537 Migrate add layer to tensors 2025-11-22 00:12:20 +01:00
aeb1739c46 Migrate concat layer 2025-11-21 23:52:58 +01:00
fd4775faa4 Migrate batch norm layer 2025-11-21 23:24:14 +01:00
5679dc0a50 Add avgPool2d implementation 2025-11-21 19:39:30 +01:00
c83e1f0c45 Implement InvalidShapeException 2025-11-21 18:54:45 +01:00
6685aa6629 WIP Migrate AvgPool2d 2025-11-19 23:21:18 +01:00
56 changed files with 2294 additions and 1692 deletions

View File

@@ -23,8 +23,8 @@ endif()
file(GLOB_RECURSE CPU_SOURCES file(GLOB_RECURSE CPU_SOURCES
src/*.cpp
src/layers/*.cpp src/layers/*.cpp
src/model/*.cpp
) )
set(LIBRARY_SOURCES ${CPU_SOURCES}) set(LIBRARY_SOURCES ${CPU_SOURCES})
@@ -32,10 +32,7 @@ set(LIBRARY_SOURCES ${CPU_SOURCES})
if(USE_CUDA) if(USE_CUDA)
file(GLOB_RECURSE CUDA_SOURCES file(GLOB_RECURSE CUDA_SOURCES
src/backends/cuda/*.cu src/backends/cuda/*.cu
src/backends/cuda/utils/*.cu
src/backends/cuda/kernels/*.cu src/backends/cuda/kernels/*.cu
src/backends/cuda/layers/*.cu
src/layers/*.cu # To be removed
) )
set(LIBRARY_SOURCES ${LIBRARY_SOURCES} ${CUDA_SOURCES}) set(LIBRARY_SOURCES ${LIBRARY_SOURCES} ${CUDA_SOURCES})
endif() endif()
@@ -46,17 +43,17 @@ set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
add_library(${PROJECT_NAME} STATIC ${LIBRARY_SOURCES}) add_library(${PROJECT_NAME} STATIC ${LIBRARY_SOURCES})
if(USE_CUDA) if(USE_CUDA)
# Enable relocatable device code for proper template instantiation across translation units
set_target_properties(${PROJECT_NAME} PROPERTIES
CUDA_SEPARABLE_COMPILATION ON
CUDA_RUNTIME_LIBRARY Shared
)
target_link_libraries(${PROJECT_NAME} CUDA::cudart) target_link_libraries(${PROJECT_NAME} CUDA::cudart)
endif() endif()
# Set include directories for the library # Set include directories for the library
target_include_directories(${PROJECT_NAME} PUBLIC target_include_directories(${PROJECT_NAME} PUBLIC
${CMAKE_CURRENT_SOURCE_DIR}/include ${CMAKE_CURRENT_SOURCE_DIR}/include
${CMAKE_CURRENT_SOURCE_DIR}/include/utils
${CMAKE_CURRENT_SOURCE_DIR}/include/kernels
${CMAKE_CURRENT_SOURCE_DIR}/include/layers
${CMAKE_CURRENT_SOURCE_DIR}/include/model
${CMAKE_CURRENT_SOURCE_DIR}/src
) )
set_property(TARGET ${PROJECT_NAME} PROPERTY CXX_STANDARD 20) set_property(TARGET ${PROJECT_NAME} PROPERTY CXX_STANDARD 20)

View File

@@ -1,14 +1,39 @@
#pragma once #pragma once
#include <cstddef> #include <memory>
#include <optional>
#include "shape.hpp"
#include "tensor.hpp"
namespace CUDANet { namespace CUDANet {
// Forward declaration // Forward declarations
class Backend;
class Tensor; class Tensor;
enum class DType;
enum BackendType { CUDA_BACKEND, CPU_BACKEND };
struct BackendConfig {
int device_id = 0;
};
class BackendFactory {
public:
static std::unique_ptr<Backend> create(BackendType backend_type, const BackendConfig& config);
};
class Backend { class Backend {
protected:
std::optional<DType> default_dtype;
public: public:
// Dtypes
virtual bool supports_dtype(DType dtype) const = 0;
virtual void set_default_dtype(DType dtype) = 0;
virtual DType get_default_dtype() const = 0;
// Memory management // Memory management
virtual void* allocate(size_t bytes) = 0; virtual void* allocate(size_t bytes) = 0;
virtual void deallocate(void* ptr) = 0; virtual void deallocate(void* ptr) = 0;
@@ -16,6 +41,7 @@ class Backend {
// Tensor ops // Tensor ops
virtual void print(const CUDANet::Tensor& input) = 0; virtual void print(const CUDANet::Tensor& input) = 0;
virtual void zero(CUDANet::Tensor& input) = 0; virtual void zero(CUDANet::Tensor& input) = 0;
virtual void fill(CUDANet::Tensor& input, int data) = 0;
virtual void virtual void
copy_to_device(CUDANet::Tensor& tensor, void* data, size_t size) = 0; copy_to_device(CUDANet::Tensor& tensor, void* data, size_t size) = 0;
@@ -53,7 +79,7 @@ class Backend {
const CUDANet::Shape out_shape const CUDANet::Shape out_shape
) = 0; ) = 0;
virtual CUDANet::Tensor& maxPool2d( virtual CUDANet::Tensor& max_pool2d(
const CUDANet::Tensor& input, const CUDANet::Tensor& input,
CUDANet::Tensor& output, CUDANet::Tensor& output,
CUDANet::Shape input_shape, CUDANet::Shape input_shape,
@@ -62,6 +88,39 @@ class Backend {
CUDANet::Shape padding_shape, CUDANet::Shape padding_shape,
CUDANet::Shape output_shape CUDANet::Shape output_shape
) = 0; ) = 0;
virtual CUDANet::Tensor& avg_pool2d(
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
CUDANet::Shape input_shape,
CUDANet::Shape pool_shape,
CUDANet::Shape stride_shape,
CUDANet::Shape padding_shape,
CUDANet::Shape output_shape
) = 0;
virtual CUDANet::Tensor& batch_norm(
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
CUDANet::Shape input_shape,
CUDANet::Tensor& weights,
CUDANet::Tensor& biases,
CUDANet::Tensor& running_mean,
CUDANet::Tensor& running_var,
CUDANet::Tensor& epsilon
) = 0;
virtual CUDANet::Tensor& concat(
CUDANet::Tensor& input_a,
CUDANet::Tensor& input_b,
CUDANet::Tensor& output
) = 0;
virtual CUDANet::Tensor& add(
CUDANet::Tensor& input_a,
CUDANet::Tensor& input_b,
CUDANet::Tensor& output
) = 0;
}; };
} // namespace CUDANet } // namespace CUDANet

View File

@@ -3,7 +3,7 @@
#include "backend.hpp" #include "backend.hpp"
#include "tensor.hpp" #include "tensor.hpp"
namespace CUDANet::Backend { namespace CUDANet::Backends {
class CPU : public Backend { class CPU : public Backend {
public: public:

View File

@@ -1,63 +0,0 @@
#pragma once
#include "backend.hpp"
#include "tensor.hpp"
namespace CUDANet::Backend {
class CUDA : public Backend {
public:
// Memory management
void* allocate(size_t bytes) override;
void deallocate(void* ptr) override;
// Tensor ops
void print(const CUDANet::Tensor& input) override;
void zero(CUDANet::Tensor& input) override;
void
copy_to_device(CUDANet::Tensor& tensor, void* data, size_t size) override;
void sum(const CUDANet::Tensor& input, CUDANet::Tensor& sum) override;
void max(const CUDANet::Tensor& input, CUDANet::Tensor& max) override;
// Layer ops
void relu(CUDANet::Tensor& tensor) override;
void sigmoid(CUDANet::Tensor& tensor) override;
void softmax(
CUDANet::Tensor& tensor,
CUDANet::Tensor& temp_max,
CUDANet::Tensor& temp_sum
) override;
CUDANet::Tensor& dense(
const CUDANet::Tensor& weights,
const CUDANet::Tensor& biases,
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
const size_t input_size,
const size_t output_size
) override;
CUDANet::Tensor& conv2d(
const CUDANet::Tensor& weights,
const CUDANet::Tensor& biases,
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
const CUDANet::Shape in_shape,
const CUDANet::Shape padding_shape,
const CUDANet::Shape kernel_shape,
const CUDANet::Shape stride_shape,
const CUDANet::Shape out_shape
) override;
CUDANet::Tensor& CUDA::maxPool2d(
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
CUDANet::Shape input_shape,
CUDANet::Shape pool_shape,
CUDANet::Shape stride_shape,
CUDANet::Shape padding_shape,
CUDANet::Shape output_shape
) override;
};
} // namespace CUDANet::Backend

View File

@@ -0,0 +1,11 @@
#pragma once
// CUDA Backend Implementation
#include "backend/cuda/cuda.cuh"
// CUDA Kernels
#include "backend/cuda/kernels/activation_functions.cuh"
#include "backend/cuda/kernels/convolution.cuh"
#include "backend/cuda/kernels/matmul.cuh"
#include "backend/cuda/kernels/pool.cuh"

View File

@@ -0,0 +1,243 @@
#pragma once
#include <cstdio>
#include <set>
#include "backend.hpp"
#include "tensor.hpp"
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 128
#endif // BLOCK_SIZE
/**
* @brief CUDA error checking macro
*
*/
#define CUDA_CHECK(call) \
do { \
cudaError_t result = call; \
if (result != cudaSuccess) { \
fprintf( \
stderr, "CUDA error at %s:%d code=%d(%s) \"%s\" \n", __FILE__, \
__LINE__, static_cast<unsigned int>(result), \
cudaGetErrorString(result), #call \
); \
exit(EXIT_FAILURE); \
} \
} while (0)
namespace CUDANet::Backends {
template <DType dtype>
struct cuda_dtype_map;
template <>
struct cuda_dtype_map<DType::FLOAT32> {
using type = float;
};
class CUDA : public Backend {
public:
CUDA(const BackendConfig& config);
bool supports_dtype(DType dtype) const override;
void set_default_dtype(DType dtype) override;
DType get_default_dtype() const override;
static bool is_cuda_available();
void initialize();
// Memory management
void* allocate(size_t bytes) override;
void deallocate(void* ptr) override;
// Tensor ops dispatchers
void print(const CUDANet::Tensor& input) override;
void zero(CUDANet::Tensor& input) override;
void fill(CUDANet::Tensor& input, int value) override;
void
copy_to_device(CUDANet::Tensor& tensor, void* data, size_t size) override;
void sum(const CUDANet::Tensor& input, CUDANet::Tensor& sum) override;
void max(const CUDANet::Tensor& input, CUDANet::Tensor& max) override;
// Layer ops dispatchers
void relu(CUDANet::Tensor& tensor) override;
void sigmoid(CUDANet::Tensor& tensor) override;
void softmax(
CUDANet::Tensor& tensor,
CUDANet::Tensor& temp_max,
CUDANet::Tensor& temp_sum
) override;
CUDANet::Tensor& dense(
const CUDANet::Tensor& weights,
const CUDANet::Tensor& biases,
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
const size_t input_size,
const size_t output_size
) override;
CUDANet::Tensor& conv2d(
const CUDANet::Tensor& weights,
const CUDANet::Tensor& biases,
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
const CUDANet::Shape in_shape,
const CUDANet::Shape padding_shape,
const CUDANet::Shape kernel_shape,
const CUDANet::Shape stride_shape,
const CUDANet::Shape out_shape
) override;
CUDANet::Tensor& max_pool2d(
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
CUDANet::Shape input_shape,
CUDANet::Shape pool_shape,
CUDANet::Shape stride_shape,
CUDANet::Shape padding_shape,
CUDANet::Shape output_shape
) override;
CUDANet::Tensor& avg_pool2d(
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
CUDANet::Shape input_shape,
CUDANet::Shape pool_shape,
CUDANet::Shape stride_shape,
CUDANet::Shape padding_shape,
CUDANet::Shape output_shape
) override;
CUDANet::Tensor& batch_norm(
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
CUDANet::Shape input_shape,
CUDANet::Tensor& weights,
CUDANet::Tensor& biases,
CUDANet::Tensor& running_mean,
CUDANet::Tensor& running_var,
CUDANet::Tensor& epsilon
) override;
CUDANet::Tensor& concat(
CUDANet::Tensor& input_a,
CUDANet::Tensor& input_b,
CUDANet::Tensor& output
) override;
CUDANet::Tensor& add(
CUDANet::Tensor& input_a,
CUDANet::Tensor& input_b,
CUDANet::Tensor& output
) override;
private:
int device_id;
std::set<DType> supported_dtypes;
// Tensor ops template impls
template <typename T>
void print_impl(const CUDANet::Tensor& input);
template <typename T>
void fill_impl(CUDANet::Tensor& input, int value);
template <typename T>
void copy_to_device_impl(CUDANet::Tensor& tensor, void* data, size_t size);
template <typename T>
void sum_impl(const CUDANet::Tensor& input, CUDANet::Tensor& sum);
template <typename T>
void max_impl(const CUDANet::Tensor& input, CUDANet::Tensor& max);
// Layer ops template impls
template <typename T>
void relu_impl(CUDANet::Tensor& tensor);
template <typename T>
void sigmoid_impl(CUDANet::Tensor& tensor);
template <typename T>
void softmax_impl(
CUDANet::Tensor& tensor,
CUDANet::Tensor& temp_max,
CUDANet::Tensor& temp_sum
);
template <typename T>
CUDANet::Tensor& dense_impl(
const CUDANet::Tensor& weights,
const CUDANet::Tensor& biases,
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
const size_t input_size,
const size_t output_size
);
template <typename T>
CUDANet::Tensor& conv2d_impl(
const CUDANet::Tensor& weights,
const CUDANet::Tensor& biases,
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
const CUDANet::Shape in_shape,
const CUDANet::Shape padding_shape,
const CUDANet::Shape kernel_shape,
const CUDANet::Shape stride_shape,
const CUDANet::Shape out_shape
);
template <typename T>
CUDANet::Tensor& max_pool2d_impl(
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
CUDANet::Shape input_shape,
CUDANet::Shape pool_shape,
CUDANet::Shape stride_shape,
CUDANet::Shape padding_shape,
CUDANet::Shape output_shape
);
template <typename T>
CUDANet::Tensor& avg_pool2d_impl(
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
CUDANet::Shape input_shape,
CUDANet::Shape pool_shape,
CUDANet::Shape stride_shape,
CUDANet::Shape padding_shape,
CUDANet::Shape output_shape
);
template <typename T>
CUDANet::Tensor& batch_norm_impl(
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
CUDANet::Shape input_shape,
CUDANet::Tensor& weights,
CUDANet::Tensor& biases,
CUDANet::Tensor& running_mean,
CUDANet::Tensor& running_var,
CUDANet::Tensor& epsilon
);
template <typename T>
CUDANet::Tensor& concat_impl(
CUDANet::Tensor& input_a,
CUDANet::Tensor& input_b,
CUDANet::Tensor& output
);
template <typename T>
CUDANet::Tensor& add_impl(
CUDANet::Tensor& input_a,
CUDANet::Tensor& input_b,
CUDANet::Tensor& output
);
};
} // namespace CUDANet::Backends

View File

@@ -0,0 +1,22 @@
#pragma once
#include <cuda_runtime.h>
namespace CUDANet::Kernels {
template <typename T>
__global__ void sigmoid(
const T* __restrict__ src,
T* __restrict__ dst,
const unsigned int len
);
template <typename T>
__global__ void relu(
const T* __restrict__ src,
T* __restrict__ dst,
const unsigned int len
);
} // namespace CUDANet::Kernels

View File

@@ -5,11 +5,12 @@
namespace CUDANet::Kernels { namespace CUDANet::Kernels {
template <typename T>
__global__ void convolution( __global__ void convolution(
const float* __restrict__ d_input, const T* __restrict__ d_input,
const float* __restrict__ d_kernel, const T* __restrict__ d_kernel,
const float* __restrict__ d_bias, const T* __restrict__ d_bias,
float* __restrict__ d_output, T* __restrict__ d_output,
const Shape input_shape, const Shape input_shape,
const Shape padding_shape, const Shape padding_shape,
const Shape kernel_shape, const Shape kernel_shape,

View File

@@ -0,0 +1,109 @@
#pragma once
#include <cuda_runtime.h>
namespace CUDANet::Kernels {
template <typename T>
__global__ void mat_vec_mul(
const T* __restrict__ d_matrix,
const T* __restrict__ d_vector,
T* __restrict__ d_output,
const unsigned int w,
const unsigned int h
);
template <typename T>
__global__ void vec_vec_add(
const T* __restrict__ d_vector1,
const T* __restrict__ d_vector2,
T* __restrict__ d_output,
const unsigned int w
);
template <typename T>
__global__ void vec_vec_sub(
const T* __restrict__ d_vector1,
const T* __restrict__ d_vector2,
T* __restrict__ d_output,
const unsigned int w
);
template <typename T>
__global__ void vec_vec_mul(
const T* __restrict__ d_vector1,
const T* __restrict__ d_vector2,
T* __restrict__ d_output,
const unsigned int w
);
template <typename T>
__global__ void vec_scalar_sub(
const T* __restrict__ d_src,
T* __restrict__ d_out,
const T* __restrict__ d_scalar,
const unsigned int len
);
template <typename T>
__global__ void vec_scalar_add(
const T* __restrict__ d_src,
T* __restrict__ d_out,
const T* __restrict__ d_scalar,
const unsigned int len
);
template <typename T>
__global__ void vec_scalar_div(
const T* __restrict__ d_src,
T* __restrict__ d_out,
const T* __restrict__ d_scalar,
const unsigned int len
);
template <typename T>
__global__ void vec_scalar_mul(
const T* __restrict__ d_src,
T* __restrict__ d_out,
const T* __restrict__ d_scalar,
const unsigned int len
);
template <typename T>
__global__ void vec_exp(
const T* __restrict__ src,
T* __restrict__ dst,
const unsigned int len
);
template <typename T>
__global__ void vec_sqrt(
const T* __restrict__ src,
T* __restrict__ dst,
const unsigned int len
);
template <typename T>
__global__ void vec_scale(
const T* __restrict__ src,
T* __restrict__ dst,
const T* __restrict__ scale,
const T* epsilon,
const unsigned int len
);
template <typename T>
__global__ void max_reduce(
const T* __restrict__ d_vector,
T* __restrict__ d_output,
const unsigned int len
);
template <typename T>
__global__ void sum_reduce(
const T* __restrict__ d_vector,
T* __restrict__ d_output,
const unsigned int len
);
} // namespace CUDANet::Kernels

View File

@@ -5,9 +5,10 @@
namespace CUDANet::Kernels { namespace CUDANet::Kernels {
template <typename T>
__global__ void max_pool( __global__ void max_pool(
const float* __restrict__ d_input, const T* __restrict__ d_input,
float* __restrict__ d_output, T* __restrict__ d_output,
const Shape input_shape, const Shape input_shape,
const Shape output_shape, const Shape output_shape,
const Shape pool_shape, const Shape pool_shape,
@@ -15,9 +16,10 @@ __global__ void max_pool(
const Shape padding_shape const Shape padding_shape
); );
template <typename T>
__global__ void avg_pool( __global__ void avg_pool(
const float* __restrict__ d_input, const T* __restrict__ d_input,
float* __restrict__ d_output, T* __restrict__ d_output,
const Shape input_shape, const Shape input_shape,
const Shape output_shape, const Shape output_shape,
const Shape pool_shape, const Shape pool_shape,

View File

@@ -1,35 +0,0 @@
#ifndef CUDANET_H
#define CUDANET_H
#ifdef USE_CUDA
#include "activation_functions.cuh"
#include "convolution.cuh"
#include "matmul.cuh"
#include "pooling.cuh"
#endif
// Layers
#include "activation.hpp"
#include "add.hpp"
#include "avg_pooling.hpp"
#include "batch_norm.hpp"
#include "concat.hpp"
#include "conv2d.hpp"
#include "dense.hpp"
#include "input.hpp"
#include "layer.hpp"
#include "max_pooling.hpp"
#include "output.hpp"
// Models
#include "model.hpp"
#include "module.hpp"
// Utils
#include "imagenet.hpp"
#ifdef USE_CUDA
#include "cuda_helper.cuh"
#include "vector.cuh"
#endif
#endif // CUDANET_H

55
include/cudanet.hpp Normal file
View File

@@ -0,0 +1,55 @@
#pragma once
// ============================================================================
// Core Data Structures & Abstractions (BACKEND-INDEPENDENT)
// ============================================================================
#include "shape.hpp"
#include "backend.hpp"
#include "tensor.hpp"
#include "layer.hpp"
// ============================================================================
// Container Classes
// ============================================================================
#include "module.hpp"
#include "model.hpp"
// ============================================================================
// Layer Implementations
// ============================================================================
// Activation
#include "layers/activation.hpp"
// Normalization
#include "layers/batch_norm.hpp"
// Linear
#include "layers/dense.hpp"
// Convolutional
#include "layers/conv2d.hpp"
// Pooling
#include "layers/max_pool.hpp"
#include "layers/avg_pool.hpp"
// Composition (element-wise operations)
#include "layers/add.hpp"
#include "layers/concat.hpp"
// ============================================================================
// Dataset Labels
// ============================================================================
#include "datasets/imagenet.hpp"
// ============================================================================
// Backend-Specific Includes (conditionally compiled)
// ============================================================================
#ifdef USE_CUDA
#include "backend/cuda/all.cuh"
#endif

View File

@@ -1,5 +1,4 @@
#ifndef CUDANET_IMAGENET_H #pragma once
#define CUDANET_IMAGENET_H
#include <map> #include <map>
#include <string> #include <string>
@@ -1012,5 +1011,3 @@ const std::map <int, std::string> IMAGENET_CLASS_MAP = {
// clang-format on // clang-format on
} }
#endif // CUDANET_IMAGENET_H

View File

@@ -1,36 +0,0 @@
#ifndef CUDANET_ACTIVATION_FUNCTIONS_H
#define CUDANET_ACTIVATION_FUNCTIONS_H
#include <cuda_runtime.h>
namespace CUDANet::Kernels {
/**
* @brief Sigmoid activation function kernel
*
* @param src Pointer to the source array
* @param dst Pointer to the destination array
* @param len Length of the arrays
*/
__global__ void sigmoid(
const float* __restrict__ src,
float* __restrict__ dst,
const unsigned int len
);
/**
* @brief Relu activation function kernel
*
* @param src Pointer to the source array
* @param dst Pointer to the destination array
* @param len Length of the arrays
*/
__global__ void relu(
const float* __restrict__ src,
float* __restrict__ dst,
const unsigned int len
);
} // namespace CUDANet::Kernels
#endif // CUDANET_ACTIVATION_FUNCTIONS_H

View File

@@ -1,195 +0,0 @@
#ifndef CUDANET_MATMUL_H
#define CUDANET_MATMUL_H
#include <cuda_runtime.h>
namespace CUDANet::Kernels {
/**
* @brief Matrix vector multiplication kernel
*
* @param d_matrix Device pointer to matrix
* @param d_vector Device pointer to vector
* @param d_output Device pointer to output vector
* @param w Width of the matrix
* @param h Height of the matrix
*/
__global__ void mat_vec_mul(
const float* __restrict__ d_matrix,
const float* __restrict__ d_vector,
float* __restrict__ d_output,
const unsigned int w,
const unsigned int h
);
/**
* @brief Vector vector addition kernel
*
* @param d_vector1 Device pointer to first vector
* @param d_vector2 Device pointer to second vector
* @param d_output Device pointer to output vector
* @param w Length of the vectors
*/
__global__ void vec_vec_add(
const float* __restrict__ d_vector1,
const float* __restrict__ d_vector2,
float* __restrict__ d_output,
const unsigned int w
);
/**
* @brief Vector vector subtraction kernel
*
* @param d_vector1
* @param d_vector2
* @param d_output
* @param w
* @return __global__
*/
__global__ void vec_vec_sub(
const float* __restrict__ d_vector1,
const float* __restrict__ d_vector2,
float* __restrict__ d_output,
const unsigned int w
);
__global__ void vec_vec_mul(
const float* __restrict__ d_vector1,
const float* __restrict__ d_vector2,
float* __restrict__ d_output,
const unsigned int w
);
/**
* @brief Sub scalar from each element of the vector
*
* @param d_vector
* @param d_scalar
* @param d_output
* @param w
* @return __global__
*/
__global__ void vec_scalar_sub(
const float* __restrict__ d_src,
float* __restrict__ d_out,
const float* __restrict__ d_scalar,
const unsigned int len
);
/**
* @brief Add scalar to each element of the vector
*
* @param d_src
* @param d_out
* @param d_scalar
* @param len
* @return __global__
*/
__global__ void vec_scalar_add(
const float* __restrict__ d_src,
float* __restrict__ d_out,
const float* __restrict__ d_scalar,
const unsigned int len
);
/**
* @brief Divide each element of the vector by a scalar
*
* @param src Pointer to the source array
* @param dst Pointer to the destination array
* @param len Length of the arrays
*/
__global__ void vec_scalar_div(
const float* __restrict__ d_src,
float* __restrict__ d_out,
const float* __restrict__ d_scalar,
const unsigned int len
);
/**
* @brief Multiply each element of the vector by a scalar
*
* @param d_src
* @param d_out
* @param d_scalar
* @param len
* @return __global__
*/
__global__ void vec_scalar_mul(
const float* __restrict__ d_src,
float* __restrict__ d_out,
const float* __restrict__ d_scalar,
const unsigned int len
);
/**
* @brief Exponentiate each element of the vector
*
* @param src Pointer to the source array
* @param dst Pointer to the destination array
* @param len Length of the arrays
*/
__global__ void vec_exp(
const float* __restrict__ src,
float* __restrict__ dst,
const unsigned int len
);
/**
* @brief Compute the square root of each element of the vector
*
* @param src Device pointer to source vector
* @param dst Device pointer to destination vector
* @param len Length of the vector
*/
__global__ void vec_sqrt(
const float* __restrict__ src,
float* __restrict__ dst,
const unsigned int len
);
/**
* @brief Scales the vector by 1/sqrt(scale + epsilon)
*
* @param src Device pointer to source vector
* @param dst Device pointer to destination vector
* @param scale Scale
* @param epsilon Epsilon
* @param len Length of the vector
*/
__global__ void vec_scale(
const float* __restrict__ src,
float* __restrict__ dst,
const float* __restrict__ scale,
const float* epsilon,
const unsigned int len
);
/**
* @brief Max reduction kernel
*
* @param d_vector Device pointer to vector
* @param d_output Device pointer to output vector
*/
__global__ void max_reduce(
const float* __restrict__ d_vector,
float* __restrict__ d_output,
const unsigned int len
);
/**
* @brief
*
* @param d_vector Device pointer to vector
* @param d_output Device pointer to output vector
* @param len Length of the vector
*/
__global__ void sum_reduce(
const float* __restrict__ d_vector,
float* __restrict__ d_output,
const unsigned int len
);
} // namespace CUDANet::Kernels
#endif // CUDANET_MATMUL_H

View File

@@ -12,10 +12,12 @@
namespace CUDANet { namespace CUDANet {
/** /**
* @brief Basic Sequential Layer * @brief Basic Layer
* *
*/ */
class Layer { class Layer {
protected:
CUDANet::DType dtype;
public: public:
virtual ~Layer(){}; virtual ~Layer(){};
@@ -32,11 +34,11 @@ class Layer {
virtual void set_weights(void *input) = 0; virtual void set_weights(void *input) = 0;
virtual CUDANet::Tensor& get_weights() = 0; virtual size_t get_weights_size() = 0;
virtual void set_biases(void *input) = 0; virtual void set_biases(void *input) = 0;
virtual CUDANet::Tensor& get_biases() = 0; virtual size_t get_biases_size() = 0;
}; };
} // namespace CUDANet::Layers } // namespace CUDANet

View File

@@ -20,12 +20,13 @@ enum ActivationType { SIGMOID, RELU, SOFTMAX, NONE };
* @brief Utility class that performs activation * @brief Utility class that performs activation
* *
*/ */
class Activation : public Layer { class Activation : public CUDANet::Layer {
public: public:
Activation() = default; Activation() = default;
Activation(CUDANet::Backend* backend, ActivationType activation, const CUDANet::Shape &shape); Activation(ActivationType activation, const CUDANet::Shape &shape, CUDANet::Backend* backend);
Activation(ActivationType activation, const CUDANet::Shape &shape, CUDANet::DType dtype, CUDANet::Backend* backend);
~Activation() = default; ~Activation() = default;
@@ -41,16 +42,16 @@ class Activation : public Layer {
void set_weights(void *input) override; void set_weights(void *input) override;
CUDANet::Tensor& get_weights() override; size_t get_weights_size() override;
void set_biases(void *input) override; void set_biases(void *input) override;
CUDANet::Tensor& get_biases() override; size_t get_biases_size() override;
private: private:
CUDANet::Backend* backend; CUDANet::Backend* backend;
ActivationType activationType; ActivationType activation_type;
CUDANet::Shape shape; CUDANet::Shape shape;
CUDANet::Tensor softmax_sum; CUDANet::Tensor softmax_sum;

View File

@@ -1,49 +1,27 @@
#ifndef CUDANET_ADD_LAYER_H #pragma once
#define CUDANET_ADD_LAYER_H
#include "shape.hpp"
#include "tensor.hpp"
namespace CUDANet::Layers { namespace CUDANet::Layers {
class Add { class Add {
public: public:
/** Add(CUDANet::Shape a_shape, CUDANet::Shape b_shape, CUDANet::Backend* backend);
* @brief Create a new Add layer Add(CUDANet::Shape a_shape, CUDANet::Shape b_shape, CUDANet::DType dtype, CUDANet::Backend* backend);
*
* @param inputSize Size of the input arrays
*/
Add(int inputSize);
/**
* @brief Destroy the Add layer
*
*/
~Add(); ~Add();
/** CUDANet::Tensor&
* @brief Adds first input to second input forward(CUDANet::Tensor& input_a, CUDANet::Tensor& input_b);
*
* @param d_inputA Device pointer to the first input
* @param d_inputB Device pointer to the second input
*
*/
float* forward(const float* inputA, const float* inputB);
private: private:
int inputSize; CUDANet::Shape out_shape;
CUDANet::Tensor output;
float* output; CUDANet::Backend *backend;
float* forwardCPU(const float* inputA, const float* inputB); CUDANet::DType dtype;
#ifdef USE_CUDA
float* d_output;
int gridSize;
float* forwardCUDA(const float* d_inputA, const float* d_inputB);
void initCUDA();
void delCUDA();
#endif
}; };
} // namespace CUDANet::Layers } // namespace CUDANet::Layers
#endif // CUDANET_ADD_LAYER_H

View File

@@ -0,0 +1,64 @@
#pragma once
#include "layer.hpp"
namespace CUDANet::Layers {
class AvgPool2d : public CUDANet::Layer {
public:
AvgPool2d(
CUDANet::Shape input_shape,
CUDANet::Shape pool_shape,
CUDANet::Shape stride_shape,
CUDANet::Shape padding_shape,
CUDANet::Backend *backend
);
AvgPool2d(
CUDANet::Shape input_shape,
CUDANet::Shape pool_shape,
CUDANet::Shape stride_shape,
CUDANet::Shape padding_shape,
CUDANet::DType dtype,
CUDANet::Backend *backend
);
~AvgPool2d();
CUDANet::Tensor& forward(CUDANet::Tensor& input) override;
CUDANet::Shape input_shape() override;
CUDANet::Shape output_shape() override;
size_t input_size() override;
size_t output_size() override;
void set_weights(void* input) override;
size_t get_weights_size() override;
void set_biases(void* input) override;
size_t get_biases_size() override;
protected:
CUDANet::Shape in_shape;
CUDANet::Shape pool_shape;
CUDANet::Shape stride_shape;
CUDANet::Shape padding_shape;
CUDANet::Shape out_shape;
CUDANet::Tensor output;
CUDANet::Backend *backend;
};
class AdaptiveAvgPool2d : public AvgPool2d {
public:
AdaptiveAvgPool2d(CUDANet::Shape input_shape, CUDANet::Shape output_shape, CUDANet::Backend *backend);
AdaptiveAvgPool2d(CUDANet::Shape input_shape, CUDANet::Shape output_shape, CUDANet::DType dtype, CUDANet::Backend *backend);
};
} // namespace CUDANet::Layers

View File

@@ -1,78 +0,0 @@
#ifndef CUDANET_AVG_POOLING_H
#define CUDANET_AVG_POOLING_H
#include "activation.hpp"
#include "layer.hpp"
namespace CUDANet::Layers {
class AvgPooling2d : public Layer, public TwoDLayer {
public:
AvgPooling2d(
shape2d inputSize,
int nChannels,
shape2d poolingSize,
shape2d stride,
shape2d padding,
ActivationType activationType
);
~AvgPooling2d();
float* forward(const float* input);
/**
* @brief Get output size
*
* @return int output size
*/
int get_output_size();
/**
* @brief Get input size
*
* @return int input size
*/
int getInputSize();
shape2d getOutputDims();
protected:
shape2d inputSize;
int nChannels;
shape2d poolingSize;
shape2d stride;
shape2d padding;
shape2d outputSize;
Activation* activation;
float* forwardCPU(const float* input);
#ifdef USE_CUDA
float* d_output;
float* forwardCUDA(const float* d_input);
void initCUDA();
void delCUDA();
#endif
};
class AdaptiveAvgPooling2d : public AvgPooling2d {
public:
AdaptiveAvgPooling2d(
shape2d inputShape,
int nChannels,
shape2d outputShape,
ActivationType activationType
);
private:
#ifdef USE_CUDA
void initCUDA();
#endif
};
} // namespace CUDANet::Layers
#endif // CUDANET_AVG_POOLING_H

View File

@@ -1,170 +1,55 @@
#ifndef CUDANET_BATCH_NORM_H #pragma once
#define CUDANET_BATCH_NORM_H
#include <vector>
#include "activation.hpp"
#include "layer.hpp" #include "layer.hpp"
namespace CUDANet::Layers { namespace CUDANet::Layers {
class BatchNorm2d : public WeightedLayer, public TwoDLayer { class BatchNorm2d : public CUDANet::Layer {
public: public:
BatchNorm2d( BatchNorm2d(CUDANet::Shape input_shape, float epsilon, CUDANet::Backend *backend);
shape2d inputSize, BatchNorm2d(CUDANet::Shape input_shape, float epsilon, CUDANet::DType dtype, CUDANet::Backend *backend);
int inputChannels,
float epsilon,
ActivationType activationType
);
~BatchNorm2d(); ~BatchNorm2d();
/** CUDANet::Tensor& forward(CUDANet::Tensor& input) override;
* @brief Compute the forward pass of the batchnorm layer
*
* @param d_input Device pointer to the input
* @return float* Device pointer to the output
*/
float* forward(const float* d_input);
/** CUDANet::Shape input_shape() override;
* @brief Set the weights of the batchnorm layer
*
* @param weights_input Pointer to the weights
*/
void setWeights(const float* weights_input);
/** CUDANet::Shape output_shape() override;
* @brief Get the weights of the batchnorm layer
*
* @return std::vector<float>
*/
std::vector<float> getWeights();
/** size_t input_size() override;
* @brief Set the biases of the batchnorm layer
*
* @param biases_input Pointer to the biases
*/
void setBiases(const float* biases_input);
/** size_t output_size() override;
* @brief Get the biases of the batchnorm layer
*
* @return std::vector<float>
*/
std::vector<float> getBiases();
/** void set_weights(void* input) override;
* @brief Set the Running Mean
*
* @param running_mean_input
*/
void setRunningMean(const float* running_mean_input);
/** size_t get_weights_size() override;
* @brief Get the Running Mean
*
*/
std::vector<float> getRunningMean();
/** void set_biases(void* input) override;
* @brief Set the Running Var
*
* @param running_mean_input
*/
void setRunningVar(const float* running_mean_input);
/** size_t get_biases_size() override;
* @brief Get the Running Var
*
*/
std::vector<float> getRunningVar();
/** void set_running_mean(void* input);
* @brief Get output size
*
* @return int output size
*/
int getOutputSize();
/** size_t get_running_mean_size();
* @brief Get input size
*
* @return int input size
*/
int getInputSize();
shape2d getOutputDims(); void set_running_var(void* input);
size_t get_running_var_size();
private: private:
shape2d inputSize; CUDANet::Shape in_shape;
int inputChannels; CUDANet::Tensor epsilon;
float epsilon;
int gridSize; CUDANet::Tensor running_mean;
CUDANet::Tensor running_var;
#ifdef USE_CUDA CUDANet::Tensor weights;
CUDANet::Tensor biases;
float* d_output; CUDANet::Tensor output;
float* d_running_mean; CUDANet::Backend *backend;
float* d_running_var;
float* d_length;
float* d_epsilon;
float* d_weights;
float* d_biases;
void initCUDA();
void delCUDA();
/**
* @brief Copy weights and biases to the device
*
*/
void toCuda();
float* forwardCUDA(const float* d_input);
#endif
std::vector<float> weights;
std::vector<float> biases;
std::vector<float> running_mean;
std::vector<float> running_var;
Activation* activation;
float* forwardCPU(const float* input);
/**
* @brief Initialize weights of the batchnorm layer with zeros
*
*/
void initializeWeights();
/**
* @brief Initialize biases of the batchnorm layer with zeros
*
*/
void initializeBiases();
/**
* @brief Initialize mean of the batchnorm layer with zeros
*
*/
void initializeRunningMean();
/**
* @brief Initialize sqrt of variance of the batchnorm layer with ones
*
*/
void initializeRunningVar();
}; };
} // namespace CUDANet::Layers } // namespace CUDANet::Layers
#endif // CUDANET_BATCH_NORM_H

View File

@@ -1,5 +1,4 @@
#ifndef CUDANET_CONCAT_LAYER_H #pragma once
#define CUDANET_CONCAT_LAYER_H
#include "layer.hpp" #include "layer.hpp"
@@ -11,47 +10,27 @@ namespace CUDANet::Layers {
*/ */
class Concat { class Concat {
public: public:
/**
* @brief Create a new Concat layer
*
* @param inputASize Size of the first input
* @param inputBSize Size of the second input
*/
Concat(const int inputASize, const int inputBSize);
/** Concat(const CUDANet::Shape a_shape, const CUDANet::Shape b_shape, CUDANet::Backend *backend);
* @brief Destroy the Concat layer Concat(const CUDANet::Shape a_shape, const CUDANet::Shape b_shape, CUDANet::DType dtype, CUDANet::Backend *backend);
*
*/
~Concat(); ~Concat();
/** CUDANet::Tensor& forward(CUDANet::Tensor& input_a, CUDANet::Tensor& input_b);
* @brief Concatenates the two inputs
*
* @param d_input_A Device pointer to the first input
* @param d_input_B Device pointer to the second input
*
* @return Device pointer to the output
*/
float* forward(const float* d_input_A, const float* d_input_B);
int getOutputSize(); CUDANet::Shape output_shape();
private: private:
int inputASize; CUDANet::Shape a_shape;
int inputBSize; CUDANet::Shape b_shape;
float* forwardCPU(const float* input_A, const float* input_B); CUDANet::Shape out_shape;
CUDANet::Tensor output;
#ifdef USE_CUDA CUDANet::Backend *backend;
float* d_output;
float* forwardCUDA(const float* d_input_A, const float* d_input_B);
void initCUDA(); CUDANet::DType dtype;
void delCUDA();
#endif
}; };
} // namespace CUDANet::Layers } // namespace CUDANet::Layers
#endif // CUDANET_CONCAT_LAYER_H

View File

@@ -1,8 +1,5 @@
#pragma once #pragma once
#include <vector>
#include "activation.hpp"
#include "layer.hpp" #include "layer.hpp"
namespace CUDANet::Layers { namespace CUDANet::Layers {
@@ -11,7 +8,7 @@ namespace CUDANet::Layers {
* @brief 2D convolutional layer * @brief 2D convolutional layer
* *
*/ */
class Conv2d : public Layer { class Conv2d : public CUDANet::Layer {
public: public:
Conv2d( Conv2d(
CUDANet::Shape input_shape, CUDANet::Shape input_shape,
@@ -20,8 +17,16 @@ class Conv2d : public Layer {
CUDANet::Shape padding_shape, CUDANet::Shape padding_shape,
CUDANet::Backend* backend CUDANet::Backend* backend
); );
Conv2d(
CUDANet::Shape input_shape,
CUDANet::Shape kernel_shape,
CUDANet::Shape stride_shape,
CUDANet::Shape padding_shape,
CUDANet::DType dtype,
CUDANet::Backend* backend
);
~Conv2d() {}; ~Conv2d();
CUDANet::Tensor& forward(CUDANet::Tensor& input) override; CUDANet::Tensor& forward(CUDANet::Tensor& input) override;
@@ -35,11 +40,11 @@ class Conv2d : public Layer {
void set_weights(void* input) override; void set_weights(void* input) override;
CUDANet::Tensor& get_weights() override; size_t get_weights_size() override;
void set_biases(void* input) override; void set_biases(void* input) override;
CUDANet::Tensor& get_biases() override; size_t get_biases_size() override;
CUDANet::Shape get_padding_shape(); CUDANet::Shape get_padding_shape();

View File

@@ -1,7 +1,5 @@
#pragma once #pragma once
#include <vector>
#include "backend.hpp" #include "backend.hpp"
#include "layer.hpp" #include "layer.hpp"
@@ -11,10 +9,11 @@ namespace CUDANet::Layers {
* @brief Dense (fully connected) layer * @brief Dense (fully connected) layer
* *
*/ */
class Dense : public Layer { class Dense : public CUDANet::Layer {
public: public:
Dense(CUDANet::Shape input_shape, CUDANet::Shape output_shape, CUDANet::Backend *backend); Dense(CUDANet::Shape input_shape, CUDANet::Shape output_shape, CUDANet::Backend *backend);
Dense(CUDANet::Shape input_shape, CUDANet::Shape output_shape, CUDANet::DType dtype, CUDANet::Backend *backend);
~Dense(); ~Dense();
@@ -30,11 +29,11 @@ class Dense : public Layer {
void set_weights(void *input) override; void set_weights(void *input) override;
CUDANet::Tensor& get_weights() override; size_t get_weights_size() override;
void set_biases(void *input) override; void set_biases(void *input) override;
CUDANet::Tensor& get_biases() override; size_t get_biases_size() override;
private: private:
CUDANet::Backend *backend; CUDANet::Backend *backend;

View File

@@ -4,15 +4,23 @@
namespace CUDANet::Layers { namespace CUDANet::Layers {
class MaxPool2d : public Layer { class MaxPool2d : public CUDANet::Layer {
public: public:
MaxPool2d( MaxPool2d(
CUDANet::Shape input_shape, CUDANet::Shape input_shape,
CUDANet::Shape pooling_shape, CUDANet::Shape pool_shape,
CUDANet::Shape stride_shape, CUDANet::Shape stride_shape,
CUDANet::Shape padding_shape, CUDANet::Shape padding_shape,
CUDANet::Backend* backend CUDANet::Backend* backend
); );
MaxPool2d(
CUDANet::Shape input_shape,
CUDANet::Shape pool_shape,
CUDANet::Shape stride_shape,
CUDANet::Shape padding_shape,
CUDANet::DType dtype,
CUDANet::Backend* backend
);
~MaxPool2d(); ~MaxPool2d();
CUDANet::Tensor& forward(CUDANet::Tensor &input) override; CUDANet::Tensor& forward(CUDANet::Tensor &input) override;
@@ -27,18 +35,18 @@ class MaxPool2d : public Layer {
void set_weights(void *input) override; void set_weights(void *input) override;
CUDANet::Tensor& get_weights() override; size_t get_weights_size() override;
void set_biases(void *input) override; void set_biases(void *input) override;
CUDANet::Tensor& get_biases() override; size_t get_biases_size() override;
private: private:
CUDANet::Shape in_shape; CUDANet::Shape in_shape;
CUDANet::Shape pooling_shape; CUDANet::Shape pool_shape;
CUDANet::Shape stride_shape; CUDANet::Shape stride_shape;
CUDANet::Shape padding_shape; CUDANet::Shape padding_shape;

55
include/model.hpp Normal file
View File

@@ -0,0 +1,55 @@
#pragma once
#include <string>
#include <unordered_map>
#include <vector>
#include "layer.hpp"
#include "module.hpp"
namespace CUDANet {
enum TensorType {
WEIGHT,
BIAS,
RUNNING_MEAN,
RUNNING_VAR
};
struct TensorInfo {
std::string name;
TensorType type;
int size;
int offset;
};
class Model {
public:
Model(const CUDANet::Shape input_shape, const CUDANet::Shape output_shape);
~Model();
virtual CUDANet::Tensor& predict(CUDANet::Tensor& input);
CUDANet::Layer* get_layer(const std::string& name);
void register_layer(const std::string& name, Layer* layer);
void register_module(Module& module);
void load_weights(const std::string& path);
bool validate();
void print_summary();
protected:
CUDANet::Shape in_shape;
CUDANet::Shape out_shape;
CUDANet::Tensor output;
std::vector<std::pair<std::string, Layer*>> layers;
std::unordered_map<std::string, Layer*> layer_map;
};
} // namespace CUDANet

View File

@@ -1,61 +0,0 @@
#ifndef CUDANET_MODEL_H
#define CUDANET_MODEL_H
#include <string>
#include <unordered_map>
#include <vector>
#include "input.hpp"
#include "layer.hpp"
#include "module.hpp"
#include "output.hpp"
namespace CUDANet {
enum TensorType {
WEIGHT,
BIAS,
RUNNING_MEAN,
RUNNING_VAR
};
struct TensorInfo {
std::string name;
TensorType type;
int size;
int offset;
};
class Model {
public:
Model(const shape2d inputSize, const int inputChannels, const int outputSize);
Model(const Model& other);
~Model();
virtual float* predict(const float* input);
void addLayer(const std::string& name, Layers::SequentialLayer* layer);
Layers::SequentialLayer* getLayer(const std::string& name);
void loadWeights(const std::string& path);
bool validate();
void printSummary();
protected:
Layers::Input* inputLayer;
Layers::Output* outputLayer;
shape2d inputSize;
int inputChannels;
int outputSize;
std::vector<std::pair<std::string, Layers::SequentialLayer*>> layers;
std::unordered_map<std::string, Layers::SequentialLayer*> layerMap;
};
} // namespace CUDANet
#endif // CUDANET_MODEL_H

View File

@@ -1,32 +0,0 @@
#ifndef CUDANET_MODULE_H
#define CUDANET_MODULE_H
#include <string>
#include <unordered_map>
#include <vector>
#include "layer.hpp"
namespace CUDANet {
class Module : public Layers::SequentialLayer {
public:
virtual float* forward(const float* d_input) = 0;
int getOutputSize();
int getInputSize();
void addLayer(const std::string& name, Layers::SequentialLayer* layer);
const std::vector<std::pair<std::string, Layers::SequentialLayer*>>& getLayers() const;
protected:
std::vector<std::pair<std::string, Layers::SequentialLayer*>> layers;
int outputSize;
int inputSize;
};
} // namespace CUDANet
#endif

31
include/module.hpp Normal file
View File

@@ -0,0 +1,31 @@
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "layer.hpp"
namespace CUDANet {
class Module {
public:
CUDANet::Shape input_shape();
CUDANet::Shape output_shape();
void register_layer(const std::string& name, Layer* layer);
void register_module(Module& module);
const std::vector<std::pair<std::string, Layer*>>& get_layers() const;
protected:
std::vector<std::pair<std::string, Layer*>> layers;
CUDANet::Shape in_shape;
CUDANet::Shape out_shape;
};
} // namespace CUDANet

View File

@@ -1,9 +1,117 @@
#pragma once #pragma once
#ifndef __host__
#define __host__
#endif
#ifndef __device__
#define __device__
#endif
#include <format>
#include <stdexcept>
#include <vector> #include <vector>
namespace CUDANet { namespace CUDANet {
typedef std::vector<size_t> Shape; struct Shape {
static constexpr size_t MAX_DIMS = 8;
} // namespace CUDANet size_t dims[MAX_DIMS];
size_t ndim;
__host__ __device__ Shape() : ndim(0) {
for (int i = 0; i < MAX_DIMS; i++) dims[i] = 0;
}
__host__ Shape(std::initializer_list<size_t> list) : ndim(list.size()) {
if (ndim > MAX_DIMS) {
throw std::runtime_error("Too many dimensions");
}
size_t i = 0;
for (auto val : list) {
dims[i++] = val;
}
for (; i < MAX_DIMS; i++) dims[i] = 0;
}
__host__ Shape(const std::vector<size_t>& vec) : ndim(vec.size()) {
if (ndim > MAX_DIMS) {
throw std::runtime_error("Too many dimensions");
}
for (size_t i = 0; i < ndim; i++) {
dims[i] = vec[i];
}
for (size_t i = ndim; i < MAX_DIMS; i++) dims[i] = 0;
}
__host__ __device__ size_t operator[](size_t idx) const {
return dims[idx];
}
__host__ __device__ size_t& operator[](size_t idx) {
return dims[idx];
}
__host__ __device__ size_t size() const { return ndim; }
__host__ bool operator==(const Shape& other) const {
if (ndim != other.ndim) return false;
for (size_t i = 0; i < ndim; i++) {
if (dims[i] != other.dims[i]) return false;
}
return true;
}
__host__ bool operator!=(const Shape& other) const {
return !(*this == other);
}
__host__ __device__ bool empty() const {
return ndim == 0;
}
};
std::string format_shape(const Shape& shape) {
std::string result;
for (size_t i = 0; i < shape.size(); ++i) {
if (i > 0) result += ", ";
result += std::to_string(shape[i]);
}
return result;
}
class InvalidShapeException : public std::runtime_error {
public:
InvalidShapeException(
const std::string& param_name,
size_t expected,
size_t actual
)
: std::runtime_error(
std::format(
"Invalid {} shape. Expected {}, actual {}",
param_name,
expected,
actual
)
) {}
InvalidShapeException(
const std::string& message,
const Shape& shape_a,
const Shape& shape_b
)
: std::runtime_error(
std::format(
"{}. Shape A: [{}], Shape B: [{}]",
message,
format_shape(shape_a),
format_shape(shape_b)
)
) {}
};
} // namespace CUDANet

View File

@@ -16,11 +16,17 @@ enum class DType
// INT32, // Not implemented yet // INT32, // Not implemented yet
}; };
size_t dtype_size(DType dtype);
// Forward declaration
class Backend;
class Tensor class Tensor
{ {
public: public:
Tensor() = default; Tensor() = default;
Tensor(Shape shape, CUDANet::Backend* backend);
Tensor(Shape shape, DType dtype, CUDANet::Backend* backend); Tensor(Shape shape, DType dtype, CUDANet::Backend* backend);
Tensor(Tensor&& other) noexcept; Tensor(Tensor&& other) noexcept;
@@ -30,25 +36,19 @@ public:
~Tensor(); ~Tensor();
DType get_dtype() const;
size_t size() const; size_t size() const;
size_t numel() const; size_t numel() const;
template <typename T> void* device_ptr() const;
const T* data() const { void* device_ptr();
return static_cast<T*>(d_ptr);
}
template <typename T>
T* data() {
return static_cast<T*>(d_ptr);
}
void zero(); void zero();
template <typename T> void fill(int value);
void set_data(T *data) {
backend->copy_to_device(*this, data, total_size); void set_data(void *data);
}
private: private:
Shape shape; Shape shape;

View File

@@ -1,26 +0,0 @@
#ifndef CUDANET_HELPER_H
#define CUDANET_HELPER_H
#include <cuda_runtime.h>
#include <cstdio>
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 128
#endif // BLOCK_SIZE
/**
* @brief CUDA error checking macro
*
*/
#define CUDA_CHECK(call) \
do { \
cudaError_t result = call; \
if (result != cudaSuccess) { \
fprintf(stderr, "CUDA error at %s:%d code=%d(%s) \"%s\" \n", \
__FILE__, __LINE__, static_cast<unsigned int>(result), \
cudaGetErrorString(result), #call); \
exit(EXIT_FAILURE); \
} \
} while (0)
#endif // CUDANET_HELPER_H

40
src/backend_factory.cpp Normal file
View File

@@ -0,0 +1,40 @@
#include <stdexcept>
#include <memory>
#ifdef USE_CUDA
#include "backend/cuda/cuda.cuh"
#endif
#include "backend.hpp"
namespace CUDANet {
std::unique_ptr<Backend> BackendFactory::create(BackendType backend_type, const BackendConfig& config) {
switch (backend_type)
{
case BackendType::CUDA_BACKEND:
{
#ifdef USE_CUDA
if (!CUDANet::Backends::CUDA::is_cuda_available()) {
throw std::runtime_error("No CUDA devices found");
}
auto cuda = std::make_unique<CUDANet::Backends::CUDA>(config);
return cuda;
#else
throw std::runtime_error("Library was compiled without CUDA support.");
#endif
}
break;
default:
throw std::runtime_error("Invalid backend");
break;
}
return nullptr;
}
} // namespace CUDANet

76
src/backends/cuda/cuda.cu Normal file
View File

@@ -0,0 +1,76 @@
#include <cuda_runtime.h>
#include <cstdio>
#include <cstdlib>
#include <format>
#include "backend/cuda/cuda.cuh"
#include "tensor.hpp"
using namespace CUDANet::Backends;
CUDA::CUDA(const BackendConfig& config) {
device_id = config.device_id < 0 ? 0 : config.device_id;
supported_dtypes = {DType::FLOAT32};
default_dtype = DType::FLOAT32;
initialize();
}
bool CUDA::is_cuda_available() {
int device_count;
cudaError_t result = cudaGetDeviceCount(&device_count);
// Return false instead of crashing
if (result != cudaSuccess || device_count == 0) {
return false;
}
return true;
}
void CUDA::initialize() {
int device_count;
CUDA_CHECK(cudaGetDeviceCount(&device_count));
if (device_id >= device_count) {
throw std::runtime_error(std::format("Invalid device id {}, only {} devices available", device_id, device_count));
}
CUDA_CHECK(cudaSetDevice(device_id));
cudaDeviceProp deviceProp;
CUDA_CHECK(cudaGetDeviceProperties(&deviceProp, device_id));
std::printf("Using CUDA device %d: %s\n", device_id, deviceProp.name);
}
bool CUDA::supports_dtype(DType dtype) const {
return supported_dtypes.contains(dtype);
}
void CUDA::set_default_dtype(DType dtype) {
if (!supported_dtypes.contains(dtype)) {
throw std::runtime_error("Unsupported dtype");
}
default_dtype = dtype;
}
CUDANet::DType CUDA::get_default_dtype() const {
if (default_dtype) {
return default_dtype.value();
}
const_cast<CUDA*>(this)->default_dtype = DType::FLOAT32;
return DType::FLOAT32;
}
void* CUDA::allocate(size_t bytes) {
void* d_ptr = nullptr;
CUDA_CHECK(cudaMalloc(&d_ptr, bytes));
return d_ptr;
}
void CUDA::deallocate(void* ptr) {
CUDA_CHECK(cudaFree(ptr));
}

View File

@@ -1,39 +0,0 @@
#include <cuda_runtime.h>
#include <cstdio>
#include <cstdlib>
#include <cuda_helper.cuh>
#include "backend/cuda.cuh"
cudaDeviceProp initializeCUDA() {
int deviceCount;
CUDA_CHECK(cudaGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
std::fprintf(stderr, "No CUDA devices found. Exiting.\n");
std::exit(EXIT_FAILURE);
}
int device = 0;
CUDA_CHECK(cudaSetDevice(device));
cudaDeviceProp deviceProp;
CUDA_CHECK(cudaGetDeviceProperties(&deviceProp, device));
std::printf("Using CUDA device %d: %s\n", device, deviceProp.name);
return deviceProp;
}
using namespace CUDANet::Backend;
void* CUDA::allocate(size_t bytes) {
void* d_ptr = nullptr;
CUDA_CHECK(cudaMalloc(&d_ptr, bytes));
return d_ptr;
}
void CUDA::deallocate(void* ptr) {
CUDA_CHECK(cudaFree(ptr));
}

View File

@@ -1,12 +1,19 @@
#include "activation_functions.cuh" #include "backend/cuda/kernels/activation_functions.cuh"
#include "cuda_helper.cuh"
using namespace CUDANet; using namespace CUDANet;
__global__ void Kernels::sigmoid( template
__global__ void Kernels::sigmoid<float>(
const float* __restrict__ src, const float* __restrict__ src,
float* __restrict__ dst, float* __restrict__ dst,
const unsigned int len const unsigned int len
);
template <typename T>
__global__ void Kernels::sigmoid(
const T* __restrict__ src,
T* __restrict__ dst,
const unsigned int len
) { ) {
int stride = gridDim.x * blockDim.x; int stride = gridDim.x * blockDim.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x; int tid = blockDim.x * blockIdx.x + threadIdx.x;
@@ -16,10 +23,17 @@ __global__ void Kernels::sigmoid(
} }
} }
__global__ void Kernels::relu( template __global__ void Kernels::relu<float>(
const float* __restrict__ src, const float* __restrict__ src,
float* __restrict__ dst, float* __restrict__ dst,
const unsigned int len const unsigned int len
);
template <typename T>
__global__ void Kernels::relu(
const T* __restrict__ src,
T* __restrict__ dst,
const unsigned int len
) { ) {
int stride = gridDim.x * blockDim.x; int stride = gridDim.x * blockDim.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x; int tid = blockDim.x * blockIdx.x + threadIdx.x;

View File

@@ -1,10 +1,10 @@
#include <iostream> #include <iostream>
#include "convolution.cuh" #include "backend/cuda/kernels/convolution.cuh"
using namespace CUDANet; using namespace CUDANet;
__global__ void Kernels::convolution( template __global__ void Kernels::convolution<float>(
const float* __restrict__ d_input, const float* __restrict__ d_input,
const float* __restrict__ d_kernel, const float* __restrict__ d_kernel,
const float* __restrict__ d_bias, const float* __restrict__ d_bias,
@@ -14,6 +14,19 @@ __global__ void Kernels::convolution(
const Shape kernel_shape, const Shape kernel_shape,
const Shape stride_shape, const Shape stride_shape,
const Shape output_shape const Shape output_shape
);
template <typename T>
__global__ void Kernels::convolution(
const T* __restrict__ d_input,
const T* __restrict__ d_kernel,
const T* __restrict__ d_bias,
T* __restrict__ d_output,
const Shape input_shape,
const Shape padding_shape,
const Shape kernel_shape,
const Shape stride_shape,
const Shape output_shape
) { ) {
int j = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.x * blockIdx.x + threadIdx.x;
int i = blockDim.y * blockIdx.y + threadIdx.y; int i = blockDim.y * blockIdx.y + threadIdx.y;
@@ -23,7 +36,7 @@ __global__ void Kernels::convolution(
return; return;
} }
float sum = 0.0f; T sum = static_cast<T>(0);
// Iterate over kernel and input matrix // Iterate over kernel and input matrix
for (int c = 0; c < input_shape[2]; c++) { for (int c = 0; c < input_shape[2]; c++) {
@@ -39,7 +52,7 @@ __global__ void Kernels::convolution(
continue; continue;
} }
int kernelIndex = int kernel_idx =
f * kernel_shape[0] * kernel_shape[1] * input_shape[2] + f * kernel_shape[0] * kernel_shape[1] * input_shape[2] +
c * kernel_shape[0] * kernel_shape[1] + c * kernel_shape[0] * kernel_shape[1] +
k * kernel_shape[1] + l; k * kernel_shape[1] + l;
@@ -48,7 +61,7 @@ __global__ void Kernels::convolution(
input_shape[1] + input_shape[1] +
(j * stride_shape[1] + l - padding_shape[1]); (j * stride_shape[1] + l - padding_shape[1]);
sum += d_kernel[kernelIndex] * d_input[inputIndex]; sum += d_kernel[kernel_idx] * d_input[inputIndex];
} }
} }
} }

View File

@@ -1,19 +1,28 @@
#include "cuda_helper.cuh" #include "backend/cuda/cuda.cuh"
#include "matmul.cuh" #include "backend/cuda/kernels/matmul.cuh"
using namespace CUDANet; using namespace CUDANet;
__global__ void Kernels::mat_vec_mul( template __global__ void Kernels::mat_vec_mul<float>(
const float* __restrict__ d_matrix, const float* __restrict__ d_matrix,
const float* __restrict__ d_vector, const float* __restrict__ d_vector,
float* __restrict__ d_output, float* __restrict__ d_output,
const unsigned int w, const unsigned int w,
const unsigned int h const unsigned int h
);
template <typename T>
__global__ void Kernels::mat_vec_mul(
const T* __restrict__ d_matrix,
const T* __restrict__ d_vector,
T* __restrict__ d_output,
const unsigned int w,
const unsigned int h
) { ) {
int tid = blockDim.x * blockIdx.x + threadIdx.x; int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < h) { if (tid < h) {
float temp = 0.0f; T temp = static_cast<T>(0);
for (unsigned int j = 0; j < w; j++) { for (unsigned int j = 0; j < w; j++) {
temp += d_matrix[tid * w + j] * d_vector[j]; temp += d_matrix[tid * w + j] * d_vector[j];
@@ -23,11 +32,19 @@ __global__ void Kernels::mat_vec_mul(
} }
} }
__global__ void Kernels::vec_vec_add( template __global__ void Kernels::vec_vec_add<float>(
const float* __restrict__ d_vector1, const float* __restrict__ d_vector1,
const float* __restrict__ d_vector2, const float* __restrict__ d_vector2,
float* __restrict__ d_output, float* __restrict__ d_output,
const unsigned int w const unsigned int w
);
template <typename T>
__global__ void Kernels::vec_vec_add(
const T* __restrict__ d_vector1,
const T* __restrict__ d_vector2,
T* __restrict__ d_output,
const unsigned int w
) { ) {
int tid = blockDim.x * blockIdx.x + threadIdx.x; int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= w) { if (tid >= w) {
@@ -36,11 +53,19 @@ __global__ void Kernels::vec_vec_add(
d_output[tid] = d_vector1[tid] + d_vector2[tid]; d_output[tid] = d_vector1[tid] + d_vector2[tid];
} }
__global__ void Kernels::vec_vec_sub( template __global__ void Kernels::vec_vec_sub<float>(
const float* __restrict__ d_vector1, const float* __restrict__ d_vector1,
const float* __restrict__ d_vector2, const float* __restrict__ d_vector2,
float* __restrict__ d_output, float* __restrict__ d_output,
const unsigned int w const unsigned int w
);
template <typename T>
__global__ void Kernels::vec_vec_sub(
const T* __restrict__ d_vector1,
const T* __restrict__ d_vector2,
T* __restrict__ d_output,
const unsigned int w
) { ) {
int tid = blockDim.x * blockIdx.x + threadIdx.x; int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= w) { if (tid >= w) {
@@ -49,11 +74,19 @@ __global__ void Kernels::vec_vec_sub(
d_output[tid] = d_vector1[tid] - d_vector2[tid]; d_output[tid] = d_vector1[tid] - d_vector2[tid];
} }
__global__ void Kernels::vec_vec_mul( template __global__ void Kernels::vec_vec_mul<float>(
const float* __restrict__ d_vector1, const float* __restrict__ d_vector1,
const float* __restrict__ d_vector2, const float* __restrict__ d_vector2,
float* __restrict__ d_output, float* __restrict__ d_output,
const unsigned int w const unsigned int w
);
template <typename T>
__global__ void Kernels::vec_vec_mul(
const T* __restrict__ d_vector1,
const T* __restrict__ d_vector2,
T* __restrict__ d_output,
const unsigned int w
) { ) {
int tid = blockDim.x * blockIdx.x + threadIdx.x; int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= w) { if (tid >= w) {
@@ -62,11 +95,19 @@ __global__ void Kernels::vec_vec_mul(
d_output[tid] = d_vector1[tid] * d_vector2[tid]; d_output[tid] = d_vector1[tid] * d_vector2[tid];
} }
__global__ void Kernels::vec_scalar_sub( template __global__ void Kernels::vec_scalar_sub<float>(
const float* __restrict__ d_src, const float* __restrict__ d_src,
float* __restrict__ d_out, float* __restrict__ d_out,
const float* __restrict__ d_scalar, const float* __restrict__ d_scalar,
const unsigned int len const unsigned int len
);
template <typename T>
__global__ void Kernels::vec_scalar_sub(
const T* __restrict__ d_src,
T* __restrict__ d_out,
const T* __restrict__ d_scalar,
const unsigned int len
) { ) {
int tid = blockDim.x * blockIdx.x + threadIdx.x; int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= len) { if (tid >= len) {
@@ -75,11 +116,19 @@ __global__ void Kernels::vec_scalar_sub(
d_out[tid] = d_src[tid] - *d_scalar; d_out[tid] = d_src[tid] - *d_scalar;
} }
__global__ void Kernels::vec_scalar_add( template __global__ void Kernels::vec_scalar_add<float>(
const float* __restrict__ d_src, const float* __restrict__ d_src,
float* __restrict__ d_out, float* __restrict__ d_out,
const float* __restrict__ d_scalar, const float* __restrict__ d_scalar,
const unsigned int len const unsigned int len
);
template <typename T>
__global__ void Kernels::vec_scalar_add(
const T* __restrict__ d_src,
T* __restrict__ d_out,
const T* __restrict__ d_scalar,
const unsigned int len
) { ) {
int tid = blockDim.x * blockIdx.x + threadIdx.x; int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= len) { if (tid >= len) {
@@ -88,11 +137,19 @@ __global__ void Kernels::vec_scalar_add(
d_out[tid] = d_src[tid] + *d_scalar; d_out[tid] = d_src[tid] + *d_scalar;
} }
__global__ void Kernels::vec_scalar_div( template __global__ void Kernels::vec_scalar_div<float>(
const float* __restrict__ d_src, const float* __restrict__ d_src,
float* __restrict__ d_out, float* __restrict__ d_out,
const float* __restrict__ d_scalar, const float* __restrict__ d_scalar,
const unsigned int len const unsigned int len
);
template <typename T>
__global__ void Kernels::vec_scalar_div(
const T* __restrict__ d_src,
T* __restrict__ d_out,
const T* __restrict__ d_scalar,
const unsigned int len
) { ) {
int tid = blockDim.x * blockIdx.x + threadIdx.x; int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= len) { if (tid >= len) {
@@ -101,11 +158,19 @@ __global__ void Kernels::vec_scalar_div(
d_out[tid] = d_src[tid] / *d_scalar; d_out[tid] = d_src[tid] / *d_scalar;
} }
__global__ void Kernels::vec_scalar_mul( template __global__ void Kernels::vec_scalar_mul<float>(
const float* __restrict__ d_src, const float* __restrict__ d_src,
float* __restrict__ d_out, float* __restrict__ d_out,
const float* __restrict__ d_scalar, const float* __restrict__ d_scalar,
const unsigned int len const unsigned int len
);
template <typename T>
__global__ void Kernels::vec_scalar_mul(
const T* __restrict__ d_src,
T* __restrict__ d_out,
const T* __restrict__ d_scalar,
const unsigned int len
) { ) {
int tid = blockDim.x * blockIdx.x + threadIdx.x; int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= len) { if (tid >= len) {
@@ -114,52 +179,85 @@ __global__ void Kernels::vec_scalar_mul(
d_out[tid] = d_src[tid] * *d_scalar; d_out[tid] = d_src[tid] * *d_scalar;
} }
__global__ void Kernels::vec_exp( template __global__ void Kernels::vec_exp<float>(
const float* __restrict__ src, const float* __restrict__ src,
float* __restrict__ dst, float* __restrict__ dst,
const unsigned int len const unsigned int len
);
template <typename T>
__global__ void Kernels::vec_exp(
const T* __restrict__ src,
T* __restrict__ dst,
const unsigned int len
) { ) {
int stride = gridDim.x * blockDim.x; int stride = gridDim.x * blockDim.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x; int tid = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = tid; i < len; i += stride) { for (int i = tid; i < len; i += stride) {
// TODO: separate implementation for __half
dst[i] = expf(src[i]); dst[i] = expf(src[i]);
} }
} }
__global__ void Kernels::vec_sqrt( template __global__ void Kernels::vec_sqrt<float>(
const float* __restrict__ src, const float* __restrict__ src,
float* __restrict__ dst, float* __restrict__ dst,
const unsigned int len const unsigned int len
);
template <typename T>
__global__ void Kernels::vec_sqrt(
const T* __restrict__ src,
T* __restrict__ dst,
const unsigned int len
) { ) {
int stride = gridDim.x * blockDim.x; int stride = gridDim.x * blockDim.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x; int tid = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = tid; i < len; i += stride) { for (int i = tid; i < len; i += stride) {
// TODO: separate implementation for __half
dst[i] = sqrtf(src[i]); dst[i] = sqrtf(src[i]);
} }
} }
__global__ void Kernels::vec_scale( template __global__ void Kernels::vec_scale<float>(
const float* __restrict__ src, const float* __restrict__ src,
float* __restrict__ dst, float* __restrict__ dst,
const float* __restrict__ scale, const float* __restrict__ scale,
const float* epsilon, const float* epsilon,
const unsigned int len const unsigned int len
);
template <typename T>
__global__ void Kernels::vec_scale(
const T* __restrict__ src,
T* __restrict__ dst,
const T* __restrict__ scale,
const T* epsilon,
const unsigned int len
) { ) {
int idx = blockIdx.x * blockDim.x + threadIdx.x; int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) { if (idx < len) {
// TODO: separate implementation for __half
float inv_std = rsqrtf(*scale + *epsilon); float inv_std = rsqrtf(*scale + *epsilon);
dst[idx] = src[idx] * inv_std; dst[idx] = src[idx] * inv_std;
} }
} }
__global__ void Kernels::max_reduce( template __global__ void Kernels::max_reduce<float>(
const float* __restrict__ d_vector, const float* __restrict__ d_vector,
float* __restrict__ d_output, float* __restrict__ d_output,
const unsigned int len const unsigned int len
);
template <typename T>
__global__ void Kernels::max_reduce(
const T* __restrict__ d_vector,
T* __restrict__ d_output,
const unsigned int len
) { ) {
__shared__ float shared_max[BLOCK_SIZE]; __shared__ T shared_max[BLOCK_SIZE];
int i = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) { if (i < len) {
@@ -172,6 +270,7 @@ __global__ void Kernels::max_reduce(
for (int s = blockDim.x / 2; s > 0; s >>= 1) { for (int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) { if (threadIdx.x < s) {
// TODO: separate implementation for __half
shared_max[threadIdx.x] = fmaxf(shared_max[threadIdx.x], shared_max[threadIdx.x + s]); shared_max[threadIdx.x] = fmaxf(shared_max[threadIdx.x], shared_max[threadIdx.x + s]);
} }
__syncthreads(); __syncthreads();
@@ -182,18 +281,25 @@ __global__ void Kernels::max_reduce(
} }
} }
__global__ void Kernels::sum_reduce( template __global__ void Kernels::sum_reduce<float>(
const float* __restrict__ d_vector, const float* __restrict__ d_vector,
float* __restrict__ d_output, float* __restrict__ d_output,
const unsigned int len const unsigned int len
);
template <typename T>
__global__ void Kernels::sum_reduce(
const T* __restrict__ d_vector,
T* __restrict__ d_output,
const unsigned int len
) { ) {
__shared__ float partial_sum[BLOCK_SIZE]; __shared__ T partial_sum[BLOCK_SIZE];
int i = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) { if (i < len) {
partial_sum[threadIdx.x] = d_vector[i]; partial_sum[threadIdx.x] = d_vector[i];
} else { } else {
partial_sum[threadIdx.x] = 0.0f; partial_sum[threadIdx.x] = static_cast<T>(0);
} }
__syncthreads(); __syncthreads();

View File

@@ -1,10 +1,9 @@
#include "cuda_helper.cuh"
#include "layer.hpp" #include "layer.hpp"
#include "pooling.cuh" #include "backend/cuda/kernels/pool.cuh"
using namespace CUDANet; using namespace CUDANet;
__global__ void Kernels::max_pool( template __global__ void Kernels::max_pool<float>(
const float* __restrict__ d_input, const float* __restrict__ d_input,
float* __restrict__ d_output, float* __restrict__ d_output,
const Shape input_shape, const Shape input_shape,
@@ -12,6 +11,17 @@ __global__ void Kernels::max_pool(
const Shape pool_shape, const Shape pool_shape,
const Shape stride_shape, const Shape stride_shape,
const Shape padding_shape const Shape padding_shape
);
template <typename T>
__global__ void Kernels::max_pool(
const T* __restrict__ d_input,
T* __restrict__ d_output,
const Shape input_shape,
const Shape output_shape,
const Shape pool_shape,
const Shape stride_shape,
const Shape padding_shape
) { ) {
int j = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.x * blockIdx.x + threadIdx.x;
int i = blockDim.y * blockIdx.y + threadIdx.y; int i = blockDim.y * blockIdx.y + threadIdx.y;
@@ -21,7 +31,7 @@ __global__ void Kernels::max_pool(
return; return;
} }
float max = 0.0f; T max = static_cast<T>(0);
for (int k = 0; k < pool_shape[0]; k++) { for (int k = 0; k < pool_shape[0]; k++) {
for (int l = 0; l < pool_shape[1]; l++) { for (int l = 0; l < pool_shape[1]; l++) {
@@ -44,7 +54,7 @@ __global__ void Kernels::max_pool(
max; max;
} }
__global__ void Kernels::avg_pool( template __global__ void Kernels::avg_pool<float>(
const float* __restrict__ d_input, const float* __restrict__ d_input,
float* __restrict__ d_output, float* __restrict__ d_output,
const Shape input_shape, const Shape input_shape,
@@ -52,6 +62,17 @@ __global__ void Kernels::avg_pool(
const Shape pool_shape, const Shape pool_shape,
const Shape stride_shape, const Shape stride_shape,
const Shape padding_shape const Shape padding_shape
);
template <typename T>
__global__ void Kernels::avg_pool(
const T* __restrict__ d_input,
T* __restrict__ d_output,
const Shape input_shape,
const Shape output_shape,
const Shape pool_shape,
const Shape stride_shape,
const Shape padding_shape
) { ) {
int j = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.x * blockIdx.x + threadIdx.x;
int i = blockDim.y * blockIdx.y + threadIdx.y; int i = blockDim.y * blockIdx.y + threadIdx.y;
@@ -61,7 +82,7 @@ __global__ void Kernels::avg_pool(
return; return;
} }
float sum = 0.0f; T sum = static_cast<T>(0);
for (int k = 0; k < pool_shape[0]; k++) { for (int k = 0; k < pool_shape[0]; k++) {
for (int l = 0; l < pool_shape[1]; l++) { for (int l = 0; l < pool_shape[1]; l++) {

View File

@@ -1,31 +1,76 @@
#include "backend/cuda.cuh" #include "backend/cuda/cuda.cuh"
#include "kernels/activation_functions.cuh" #include "backend/cuda/kernels/activation_functions.cuh"
#include "kernels/convolution.cuh" #include "backend/cuda/kernels/convolution.cuh"
#include "kernels/matmul.cuh" #include "backend/cuda/kernels/matmul.cuh"
#include "kernels/pooling.cuh" #include "backend/cuda/kernels/pool.cuh"
#include "utils/cuda_helper.cuh"
using namespace CUDANet::Backend; using namespace CUDANet::Backends;
void CUDA::relu(Tensor& tensor) { void CUDA::relu(Tensor& tensor) {
switch (tensor.get_dtype()) {
case DType::FLOAT32:
relu_impl<float>(tensor);
break;
default:
throw std::runtime_error("Unsupported dtype");
break;
}
}
template void CUDA::relu_impl<float>(Tensor& tensor);
template <typename T>
void CUDA::relu_impl(Tensor& tensor) {
int gridSize = (tensor.numel() + BLOCK_SIZE - 1) / BLOCK_SIZE; int gridSize = (tensor.numel() + BLOCK_SIZE - 1) / BLOCK_SIZE;
Kernels::relu<<<gridSize, BLOCK_SIZE>>>( Kernels::relu<<<gridSize, BLOCK_SIZE>>>(
tensor.data<float>(), tensor.data<float>(), tensor.numel() static_cast<T*>(tensor.device_ptr()), static_cast<T*>(tensor.device_ptr()), tensor.numel()
); );
CUDA_CHECK(cudaGetLastError()); CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize()); CUDA_CHECK(cudaDeviceSynchronize());
} }
void CUDA::sigmoid(Tensor& tensor) { void CUDA::sigmoid(CUDANet::Tensor& tensor) {
switch (tensor.get_dtype()) {
case DType::FLOAT32:
sigmoid_impl<float>(tensor);
break;
default:
throw std::runtime_error("Unsupported dtype");
break;
}
}
template void CUDA::sigmoid_impl<float>(Tensor& tensor);
template <typename T>
void CUDA::sigmoid_impl(CUDANet::Tensor& tensor) {
int gridSize = (tensor.numel() + BLOCK_SIZE - 1) / BLOCK_SIZE; int gridSize = (tensor.numel() + BLOCK_SIZE - 1) / BLOCK_SIZE;
Kernels::sigmoid<<<gridSize, BLOCK_SIZE>>>( Kernels::sigmoid<<<gridSize, BLOCK_SIZE>>>(
tensor.data<float>(), tensor.data<float>(), tensor.numel() static_cast<T*>(tensor.device_ptr()), static_cast<T*>(tensor.device_ptr()), tensor.numel()
); );
CUDA_CHECK(cudaGetLastError()); CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize()); CUDA_CHECK(cudaDeviceSynchronize());
} }
void CUDA::softmax(Tensor& tensor, Tensor& temp_max, Tensor& temp_sum) { void CUDA::softmax(Tensor& tensor, Tensor& temp_max, Tensor& temp_sum) {
switch (tensor.get_dtype()) {
case DType::FLOAT32:
softmax_impl<float>(tensor, temp_max, temp_sum);
break;
default:
throw std::runtime_error("Unsupported dtype");
break;
}
}
template void
CUDA::softmax_impl<float>(Tensor& tensor, Tensor& temp_max, Tensor& temp_sum);
template <typename T>
void CUDA::softmax_impl(Tensor& tensor, Tensor& temp_max, Tensor& temp_sum) {
int gridSize = (tensor.numel() + BLOCK_SIZE - 1) / BLOCK_SIZE; int gridSize = (tensor.numel() + BLOCK_SIZE - 1) / BLOCK_SIZE;
// Find max value // Find max value
@@ -33,14 +78,13 @@ void CUDA::softmax(Tensor& tensor, Tensor& temp_max, Tensor& temp_sum) {
// Subtract max value to improve numerical stability // Subtract max value to improve numerical stability
Kernels::vec_scalar_sub<<<gridSize, BLOCK_SIZE>>>( Kernels::vec_scalar_sub<<<gridSize, BLOCK_SIZE>>>(
tensor.data<float>(), tensor.data<float>(), temp_max.data<float>(), static_cast<T*>(tensor.device_ptr()), static_cast<T*>(tensor.device_ptr()), static_cast<T*>(temp_max.device_ptr()), tensor.numel()
tensor.numel()
); );
CUDA_CHECK(cudaGetLastError()); CUDA_CHECK(cudaGetLastError());
// Compute exponentials // Compute exponentials
Kernels::vec_exp<<<gridSize, BLOCK_SIZE>>>( Kernels::vec_exp<<<gridSize, BLOCK_SIZE>>>(
tensor.data<float>(), tensor.data<float>(), tensor.numel() static_cast<T*>(tensor.device_ptr()), static_cast<T*>(tensor.device_ptr()), tensor.numel()
); );
CUDA_CHECK(cudaGetLastError()); CUDA_CHECK(cudaGetLastError());
@@ -48,8 +92,7 @@ void CUDA::softmax(Tensor& tensor, Tensor& temp_max, Tensor& temp_sum) {
sum(tensor, temp_sum); sum(tensor, temp_sum);
Kernels::vec_scalar_div<<<gridSize, BLOCK_SIZE>>>( Kernels::vec_scalar_div<<<gridSize, BLOCK_SIZE>>>(
tensor.data<float>(), tensor.data<float>(), temp_sum.data<float>(), static_cast<T*>(tensor.device_ptr()), static_cast<T*>(tensor.device_ptr()), static_cast<T*>(temp_sum.device_ptr()), tensor.numel()
tensor.numel()
); );
CUDA_CHECK(cudaGetLastError()); CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize()); CUDA_CHECK(cudaDeviceSynchronize());
@@ -62,20 +105,50 @@ CUDANet::Tensor& CUDA::dense(
CUDANet::Tensor& output, CUDANet::Tensor& output,
const size_t input_size, const size_t input_size,
const size_t output_size const size_t output_size
) {
switch (input.get_dtype()) {
case DType::FLOAT32:
return dense_impl<float>(
weights, biases, input, output, input_size, output_size
);
break;
default:
throw std::runtime_error("Unsupported dtype");
break;
}
}
template CUDANet::Tensor& CUDA::dense_impl<float>(
const CUDANet::Tensor& weights,
const CUDANet::Tensor& biases,
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
const size_t input_size,
const size_t output_size
);
template <typename T>
CUDANet::Tensor& CUDA::dense_impl(
const CUDANet::Tensor& weights,
const CUDANet::Tensor& biases,
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
const size_t input_size,
const size_t output_size
) { ) {
auto forwardGridSize = auto forwardGridSize =
(std::max(input_size, output_size) + BLOCK_SIZE - 1) / BLOCK_SIZE; (std::max(input_size, output_size) + BLOCK_SIZE - 1) / BLOCK_SIZE;
auto biasGridSize = (output_size + BLOCK_SIZE - 1) / BLOCK_SIZE; auto biasGridSize = (output_size + BLOCK_SIZE - 1) / BLOCK_SIZE;
Kernels::mat_vec_mul<<<forwardGridSize, BLOCK_SIZE>>>( Kernels::mat_vec_mul<<<forwardGridSize, BLOCK_SIZE>>>(
weights.data<float>(), input.data<float>(), output.data<float>(), static_cast<const T*>(weights.device_ptr()), static_cast<const T*>(input.device_ptr()), static_cast<T*>(output.device_ptr()), input_size,
input_size, output_size output_size
); );
CUDA_CHECK(cudaGetLastError()); CUDA_CHECK(cudaGetLastError());
Kernels::vec_vec_add<<<biasGridSize, BLOCK_SIZE>>>( Kernels::vec_vec_add<<<biasGridSize, BLOCK_SIZE>>>(
biases.data<float>(), output.data<float>(), output.data<float>(), static_cast<const T*>(biases.device_ptr()), static_cast<T*>(output.device_ptr()), static_cast<T*>(output.device_ptr()), output_size
output_size
); );
CUDA_CHECK(cudaGetLastError()); CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize()); CUDA_CHECK(cudaDeviceSynchronize());
@@ -93,18 +166,55 @@ CUDANet::Tensor& CUDA::conv2d(
const CUDANet::Shape kernel_shape, const CUDANet::Shape kernel_shape,
const CUDANet::Shape stride_shape, const CUDANet::Shape stride_shape,
const CUDANet::Shape out_shape const CUDANet::Shape out_shape
) {
switch (input.get_dtype()) {
case DType::FLOAT32:
return conv2d_impl<float>(
weights, biases, input, output, in_shape, padding_shape,
kernel_shape, stride_shape, out_shape
);
break;
default:
throw std::runtime_error("Unsupported dtype");
break;
}
}
template CUDANet::Tensor& CUDA::conv2d_impl<float>(
const CUDANet::Tensor& weights,
const CUDANet::Tensor& biases,
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
const CUDANet::Shape in_shape,
const CUDANet::Shape padding_shape,
const CUDANet::Shape kernel_shape,
const CUDANet::Shape stride_shape,
const CUDANet::Shape out_shape
);
template <typename T>
CUDANet::Tensor& CUDA::conv2d_impl(
const CUDANet::Tensor& weights,
const CUDANet::Tensor& biases,
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
const CUDANet::Shape in_shape,
const CUDANet::Shape padding_shape,
const CUDANet::Shape kernel_shape,
const CUDANet::Shape stride_shape,
const CUDANet::Shape out_shape
) { ) {
dim3 block(8, 8, 8); dim3 block(8, 8, 8);
dim3 grid( dim3 grid(
(out_shape[0] + block.x - 1) / block.x, (out_shape[0] + block.x - 1) / block.x,
(out_shape[1] + block.y - 1) / block.y, (out_shape[1] + block.y - 1) / block.y,
(out_shape[3] + block.z - 1) / block.z (out_shape[2] + block.z - 1) / block.z
); );
Kernels::convolution<<<grid, block>>>( Kernels::convolution<<<grid, block>>>(
input.data<float>(), weights.data<float>(), biases.data<float>(), static_cast<const T*>(input.device_ptr()), static_cast<const T*>(weights.device_ptr()), static_cast<const T*>(biases.device_ptr()), static_cast<T*>(output.device_ptr()),
output.data<float>(), in_shape, padding_shape, kernel_shape, in_shape, padding_shape, kernel_shape, stride_shape, out_shape
stride_shape, out_shape
); );
CUDA_CHECK(cudaGetLastError()); CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize()); CUDA_CHECK(cudaDeviceSynchronize());
@@ -112,14 +222,48 @@ CUDANet::Tensor& CUDA::conv2d(
return output; return output;
} }
CUDANet::Tensor& CUDA::maxPool2d( CUDANet::Tensor& CUDA::max_pool2d(
const CUDANet::Tensor& input, const CUDANet::Tensor& input,
CUDANet::Tensor& output, CUDANet::Tensor& output,
CUDANet::Shape input_shape, CUDANet::Shape input_shape,
CUDANet::Shape pool_shape, CUDANet::Shape pool_shape,
CUDANet::Shape stride_shape, CUDANet::Shape stride_shape,
CUDANet::Shape padding_shape, CUDANet::Shape padding_shape,
CUDANet::Shape output_shape CUDANet::Shape output_shape
) {
switch (input.get_dtype()) {
case DType::FLOAT32:
return max_pool2d_impl<float>(
input, output, input_shape, pool_shape, stride_shape,
padding_shape, output_shape
);
break;
default:
throw std::runtime_error("Unsupported dtype");
break;
}
}
template CUDANet::Tensor& CUDA::max_pool2d_impl<float>(
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
CUDANet::Shape input_shape,
CUDANet::Shape pool_shape,
CUDANet::Shape stride_shape,
CUDANet::Shape padding_shape,
CUDANet::Shape output_shape
);
template <typename T>
CUDANet::Tensor& CUDA::max_pool2d_impl(
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
CUDANet::Shape input_shape,
CUDANet::Shape pool_shape,
CUDANet::Shape stride_shape,
CUDANet::Shape padding_shape,
CUDANet::Shape output_shape
) { ) {
dim3 block(8, 8, 8); dim3 block(8, 8, 8);
dim3 grid( dim3 grid(
@@ -129,8 +273,242 @@ CUDANet::Tensor& CUDA::maxPool2d(
); );
Kernels::max_pool<<<grid, block>>>( Kernels::max_pool<<<grid, block>>>(
input.data<float>(), output.data<float>(), input_shape, output_shape, pool_shape, static_cast<const T*>(input.device_ptr()), static_cast<T*>(output.device_ptr()), input_shape, output_shape,
stride_shape, padding_shape pool_shape, stride_shape, padding_shape
);
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize());
return output;
}
CUDANet::Tensor& CUDA::avg_pool2d(
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
CUDANet::Shape input_shape,
CUDANet::Shape pool_shape,
CUDANet::Shape stride_shape,
CUDANet::Shape padding_shape,
CUDANet::Shape output_shape
) {
switch (input.get_dtype()) {
case DType::FLOAT32:
return avg_pool2d_impl<float>(
input, output, input_shape, pool_shape, stride_shape,
padding_shape, output_shape
);
break;
default:
throw std::runtime_error("Unsupported dtype");
break;
}
}
template CUDANet::Tensor& CUDA::avg_pool2d_impl<float>(
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
CUDANet::Shape input_shape,
CUDANet::Shape pool_shape,
CUDANet::Shape stride_shape,
CUDANet::Shape padding_shape,
CUDANet::Shape output_shape
);
template <typename T>
CUDANet::Tensor& CUDA::avg_pool2d_impl(
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
CUDANet::Shape input_shape,
CUDANet::Shape pool_shape,
CUDANet::Shape stride_shape,
CUDANet::Shape padding_shape,
CUDANet::Shape output_shape
) {
dim3 block(8, 8, 8);
dim3 grid(
(output_shape[0] + block.x - 1) / block.x,
(output_shape[1] + block.y - 1) / block.y,
(output_shape[2] + block.z - 1) / block.z
);
Kernels::avg_pool<<<grid, block>>>(
static_cast<const T*>(input.device_ptr()), static_cast<T*>(output.device_ptr()), input_shape, output_shape,
pool_shape, stride_shape, padding_shape
);
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize());
return output;
}
CUDANet::Tensor& CUDA::batch_norm(
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
CUDANet::Shape input_shape,
CUDANet::Tensor& weights,
CUDANet::Tensor& biases,
CUDANet::Tensor& running_mean,
CUDANet::Tensor& running_var,
CUDANet::Tensor& epsilon
) {
switch (input.get_dtype()) {
case DType::FLOAT32:
return batch_norm_impl<float>(
input, output, input_shape, weights, biases, running_mean,
running_var, epsilon
);
break;
default:
throw std::runtime_error("Unsupported dtype");
break;
}
}
template CUDANet::Tensor& CUDA::batch_norm_impl<float>(
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
CUDANet::Shape input_shape,
CUDANet::Tensor& weights,
CUDANet::Tensor& biases,
CUDANet::Tensor& running_mean,
CUDANet::Tensor& running_var,
CUDANet::Tensor& epsilon
);
template <typename T>
CUDANet::Tensor& CUDA::batch_norm_impl(
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
CUDANet::Shape input_shape,
CUDANet::Tensor& weights,
CUDANet::Tensor& biases,
CUDANet::Tensor& running_mean,
CUDANet::Tensor& running_var,
CUDANet::Tensor& epsilon
) {
auto gridSize =
(input_shape[0] * input_shape[1] + BLOCK_SIZE - 1) / BLOCK_SIZE;
for (int i = 0; i < input_shape[2]; i++) {
// Subtract mean from input
Kernels::vec_scalar_sub<<<gridSize, BLOCK_SIZE>>>(
static_cast<const T*>(input.device_ptr()) + i * input_shape[0] * input_shape[1],
static_cast<T*>(output.device_ptr()) + i * input_shape[0] * input_shape[1],
&static_cast<T*>(running_mean.device_ptr())[i], input_shape[0] * input_shape[1]
);
CUDA_CHECK(cudaGetLastError());
// Divide by sqrt(running_var + epsilon)
Kernels::vec_scale<<<gridSize, BLOCK_SIZE>>>(
static_cast<T*>(output.device_ptr()) + i * input_shape[0] * input_shape[1],
static_cast<T*>(output.device_ptr()) + i * input_shape[0] * input_shape[1],
&static_cast<T*>(running_var.device_ptr())[i], static_cast<T*>(epsilon.device_ptr()),
input_shape[0] * input_shape[1]
);
CUDA_CHECK(cudaGetLastError());
// Multiply by weights
Kernels::vec_scalar_mul<<<gridSize, BLOCK_SIZE>>>(
static_cast<T*>(output.device_ptr()) + i * input_shape[0] * input_shape[1],
static_cast<T*>(output.device_ptr()) + i * input_shape[0] * input_shape[1],
&static_cast<T*>(weights.device_ptr())[i], input_shape[0] * input_shape[1]
);
CUDA_CHECK(cudaGetLastError());
// Add biases
Kernels::vec_scalar_add<<<gridSize, BLOCK_SIZE>>>(
static_cast<T*>(output.device_ptr()) + i * input_shape[0] * input_shape[1],
static_cast<T*>(output.device_ptr()) + i * input_shape[0] * input_shape[1],
&static_cast<T*>(biases.device_ptr())[i], input_shape[0] * input_shape[1]
);
CUDA_CHECK(cudaGetLastError());
}
CUDA_CHECK(cudaDeviceSynchronize());
return output;
}
CUDANet::Tensor& CUDA::concat(
CUDANet::Tensor& input_a,
CUDANet::Tensor& input_b,
CUDANet::Tensor& output
) {
switch (input_a.get_dtype()) {
case DType::FLOAT32:
return concat_impl<float>(
input_a, input_b, output
);
break;
default:
throw std::runtime_error("Unsupported dtype");
break;
}
}
template CUDANet::Tensor& CUDA::concat_impl<float>(
CUDANet::Tensor& input_a,
CUDANet::Tensor& input_b,
CUDANet::Tensor& output
);
template <typename T>
CUDANet::Tensor& CUDA::concat_impl(
CUDANet::Tensor& input_a,
CUDANet::Tensor& input_b,
CUDANet::Tensor& output
) {
CUDA_CHECK(cudaMemcpy(
static_cast<T*>(output.device_ptr()), static_cast<const T*>(input_a.device_ptr()), input_a.size(),
cudaMemcpyDeviceToDevice
));
CUDA_CHECK(cudaMemcpy(
static_cast<T*>(output.device_ptr()) + input_a.numel(), static_cast<const T*>(input_b.device_ptr()), input_b.size(),
cudaMemcpyDeviceToDevice
));
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize());
return output;
}
CUDANet::Tensor& CUDA::add(
CUDANet::Tensor& input_a,
CUDANet::Tensor& input_b,
CUDANet::Tensor& output
) {
switch (input_a.get_dtype()) {
case DType::FLOAT32:
return add_impl<float>(
input_a, input_b, output
);
break;
default:
throw std::runtime_error("Unsupported dtype");
break;
}
}
template CUDANet::Tensor& CUDA::add_impl<float>(
CUDANet::Tensor& input_a,
CUDANet::Tensor& input_b,
CUDANet::Tensor& output
);
template <typename T>
CUDANet::Tensor& CUDA::add_impl(
CUDANet::Tensor& input_a,
CUDANet::Tensor& input_b,
CUDANet::Tensor& output
) {
auto gridSize = (input_a.numel() + BLOCK_SIZE - 1) / BLOCK_SIZE;
Kernels::vec_vec_add<<<gridSize, BLOCK_SIZE>>>(
static_cast<const T*>(input_a.device_ptr()), static_cast<const T*>(input_b.device_ptr()), static_cast<T*>(output.device_ptr()), input_a.numel()
); );
CUDA_CHECK(cudaGetLastError()); CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize()); CUDA_CHECK(cudaDeviceSynchronize());

View File

@@ -1,28 +0,0 @@
#include "add.hpp"
#include "matmul.cuh"
#include "cuda_helper.cuh"
using namespace CUDANet::Layers;
void Add::initCUDA() {
d_output = nullptr;
CUDA_CHECK(cudaMalloc((void**)&d_output, sizeof(float) * inputSize));
gridSize = (inputSize + BLOCK_SIZE - 1) / BLOCK_SIZE;
}
void Add::delCUDA() {
cudaFree(d_output);
}
float* Add::forwardCUDA(const float* d_inputA, const float* d_inputB) {
Kernels::vec_vec_add<<<gridSize, BLOCK_SIZE>>>(
d_inputA, d_inputB, d_output, inputSize
);
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize());
return d_output;
}

View File

@@ -1,45 +0,0 @@
#include "avg_pooling.hpp"
#include "cuda_helper.cuh"
#include "pooling.cuh"
using namespace CUDANet::Layers;
void AvgPooling2d::initCUDA() {
d_output = nullptr;
CUDA_CHECK(cudaMalloc(
(void**)&d_output,
sizeof(float) * outputSize.first * outputSize.second * nChannels
));
}
void AvgPooling2d::delCUDA() {
cudaFree(d_output);
}
float* AvgPooling2d::forwardCUDA(const float* d_input) {
dim3 block(8, 8, 8);
dim3 grid(
(outputSize.first + block.x - 1) / block.x,
(outputSize.second + block.y - 1) / block.y,
(nChannels + block.z - 1) / block.z
);
Kernels::avg_pooling<<<grid, block>>>(
d_input, d_output, inputSize, outputSize, nChannels, poolingSize,
stride, padding
);
CUDA_CHECK(cudaGetLastError());
activation->activate(d_output);
CUDA_CHECK(cudaDeviceSynchronize());
return d_output;
}
void AdaptiveAvgPooling2d::initCUDA() {
cudaFree(d_output);
cudaMalloc(
(void**)&d_output,
sizeof(float) * outputSize.first * outputSize.second * nChannels
);
}

View File

@@ -1,120 +0,0 @@
#include <vector>
#include "activation.hpp"
#include "batch_norm.hpp"
#include "cuda_helper.cuh"
#include "layer.hpp"
#include "matmul.cuh"
#include "vector.cuh"
using namespace CUDANet::Layers;
void BatchNorm2d::initCUDA() {
d_output = nullptr;
CUDA_CHECK(cudaMalloc(
(void **)&d_output,
sizeof(float) * inputSize.first * inputSize.second * inputChannels
));
d_running_mean = nullptr;
CUDA_CHECK(
cudaMalloc((void **)&d_running_mean, sizeof(float) * inputChannels)
);
d_running_var = nullptr;
CUDA_CHECK(
cudaMalloc((void **)&d_running_var, sizeof(float) * inputChannels)
);
d_weights = nullptr;
CUDA_CHECK(cudaMalloc((void **)&d_weights, sizeof(float) * inputChannels));
d_biases = nullptr;
CUDA_CHECK(cudaMalloc((void **)&d_biases, sizeof(float) * inputChannels));
d_length = nullptr;
float length = (float)inputSize.first * inputSize.second;
CUDA_CHECK(cudaMalloc((void **)&d_length, sizeof(float)));
CUDA_CHECK(
cudaMemcpy(d_length, &length, sizeof(float), cudaMemcpyHostToDevice)
);
d_epsilon = nullptr;
CUDA_CHECK(cudaMalloc((void **)&d_epsilon, sizeof(float)));
CUDA_CHECK(
cudaMemcpy(d_epsilon, &epsilon, sizeof(float), cudaMemcpyHostToDevice)
);
gridSize =
(inputSize.first * inputSize.second + BLOCK_SIZE - 1) / BLOCK_SIZE;
}
void BatchNorm2d::delCUDA() {
cudaFree(d_output);
cudaFree(d_running_mean);
cudaFree(d_running_var);
cudaFree(d_weights);
cudaFree(d_biases);
cudaFree(d_length);
cudaFree(d_epsilon);
}
void BatchNorm2d::toCuda() {
CUDA_CHECK(cudaMemcpy(
d_weights, weights.data(), sizeof(float) * inputChannels,
cudaMemcpyHostToDevice
));
CUDA_CHECK(cudaMemcpy(
d_biases, biases.data(), sizeof(float) * inputChannels,
cudaMemcpyHostToDevice
));
CUDA_CHECK(cudaMemcpy(
d_running_mean, running_mean.data(), sizeof(float) * inputChannels,
cudaMemcpyHostToDevice
));
CUDA_CHECK(cudaMemcpy(
d_running_var, running_var.data(), sizeof(float) * inputChannels,
cudaMemcpyHostToDevice
));
}
float *BatchNorm2d::forwardCUDA(const float *d_input) {
// Compute per-channel batch normalization
for (int i = 0; i < inputChannels; i++) {
// Subtract mean from input
Kernels::vec_scalar_sub<<<gridSize, BLOCK_SIZE>>>(
d_input + i * inputSize.first * inputSize.second,
d_output + i * inputSize.first * inputSize.second,
&d_running_mean[i], inputSize.first * inputSize.second
);
CUDA_CHECK(cudaGetLastError());
// Divide by sqrt(running_var + epsilon)
Kernels::vec_scale<<<gridSize, BLOCK_SIZE>>>(
d_output + i * inputSize.first * inputSize.second,
d_output + i * inputSize.first * inputSize.second,
&d_running_var[i], d_epsilon, inputSize.first * inputSize.second
);
CUDA_CHECK(cudaGetLastError());
// Multiply by weights
Kernels::vec_scalar_mul<<<gridSize, BLOCK_SIZE>>>(
d_output + i * inputSize.first * inputSize.second,
d_output + i * inputSize.first * inputSize.second, &d_weights[i],
inputSize.first * inputSize.second
);
CUDA_CHECK(cudaGetLastError());
// Add biases
Kernels::vec_scalar_add<<<gridSize, BLOCK_SIZE>>>(
d_output + i * inputSize.first * inputSize.second,
d_output + i * inputSize.first * inputSize.second, &d_biases[i],
inputSize.first * inputSize.second
);
CUDA_CHECK(cudaGetLastError());
}
activation->activate(d_output);
return d_output;
}

View File

@@ -1,31 +0,0 @@
#include "concat.hpp"
#include "cuda_helper.cuh"
using namespace CUDANet::Layers;
void Concat::initCUDA() {
d_output = nullptr;
CUDA_CHECK(
cudaMalloc((void**)&d_output, sizeof(float) * (inputASize + inputBSize))
);
}
void Concat::delCUDA() {
cudaFree(d_output);
}
float* Concat::forwardCUDA(const float* d_input_A, const float* d_input_B) {
CUDA_CHECK(cudaMemcpy(
d_output, d_input_A, sizeof(float) * inputASize,
cudaMemcpyDeviceToDevice
));
CUDA_CHECK(cudaMemcpy(
d_output + inputASize, d_input_B, sizeof(float) * inputBSize,
cudaMemcpyDeviceToDevice
));
CUDA_CHECK(cudaDeviceSynchronize());
return d_output;
}

View File

@@ -1,18 +1,32 @@
#include <iostream> #include <iostream>
#include "backend.hpp" #include "backend.hpp"
#include "backend/cuda.cuh" #include "backend/cuda/cuda.cuh"
#include "utils/cuda_helper.cuh" #include "backend/cuda/kernels/matmul.cuh"
#include "kernels/matmul.cuh"
using namespace CUDANet::Backend; using namespace CUDANet::Backends;
void CUDA::print(const CUDANet::Tensor &input) { void CUDA::print(const CUDANet::Tensor &input) {
switch (input.get_dtype()) {
case DType::FLOAT32:
print_impl<float>(input);
break;
default:
throw std::runtime_error("Unsupported dtype");
break;
}
}
template void CUDA::print_impl<float> (const CUDANet::Tensor &input);
template <typename T>
void CUDA::print_impl(const CUDANet::Tensor &input) {
auto length = input.numel(); auto length = input.numel();
std::vector<float> h_vec(input.numel()); std::vector<T> h_vec(input.numel());
CUDA_CHECK(cudaMemcpy( CUDA_CHECK(cudaMemcpy(
h_vec.data(), input.data<float>(), sizeof(float) * length, cudaMemcpyDeviceToHost h_vec.data(), static_cast<const T*>(input.device_ptr()), sizeof(T) * length, cudaMemcpyDeviceToHost
)); ));
for (int i = 0; i < length; ++i) { for (int i = 0; i < length; ++i) {
@@ -23,26 +37,75 @@ void CUDA::print(const CUDANet::Tensor &input) {
} }
void CUDA::zero(CUDANet::Tensor &input) { void CUDA::zero(CUDANet::Tensor &input) {
CUDA_CHECK(cudaMemset(input.data<float>(), 0, sizeof(float) * input.numel())); fill(input, 0);
}
void CUDA::fill(CUDANet::Tensor &input, int value) {
switch (input.get_dtype()) {
case DType::FLOAT32:
fill_impl<float>(input, value);
break;
default:
throw std::runtime_error("Unsupported dtype");
break;
}
}
template void CUDA::fill_impl<float>(CUDANet::Tensor &input, int value);
template <typename T>
void CUDA::fill_impl(CUDANet::Tensor &input, int value) {
CUDA_CHECK(cudaMemset(static_cast<T*>(input.device_ptr()), value, sizeof(T) * input.numel()));
} }
void CUDA::copy_to_device(CUDANet::Tensor &tensor, void *data, size_t size) { void CUDA::copy_to_device(CUDANet::Tensor &tensor, void *data, size_t size) {
CUDA_CHECK(cudaMemcpy(tensor.data<float>(), data, size, cudaMemcpyHostToDevice)); switch (tensor.get_dtype()) {
case DType::FLOAT32:
copy_to_device_impl<float>(tensor, data, size);
break;
default:
throw std::runtime_error("Unsupported dtype");
break;
}
}
template void CUDA::copy_to_device_impl<float>(CUDANet::Tensor &tensor, void *data, size_t size);
template <typename T>
void CUDA::copy_to_device_impl(CUDANet::Tensor &tensor, void *data, size_t size) {
CUDA_CHECK(cudaMemcpy(static_cast<T*>(tensor.device_ptr()), data, size, cudaMemcpyHostToDevice));
} }
void CUDA::sum(const CUDANet::Tensor &input, CUDANet::Tensor &sum) { void CUDA::sum(const CUDANet::Tensor &input, CUDANet::Tensor &sum) {
switch (input.get_dtype()) {
case DType::FLOAT32:
sum_impl<float>(input, sum);
break;
default:
throw std::runtime_error("Unsupported dtype");
break;
}
}
template void CUDA::sum_impl<float>(const CUDANet::Tensor &input, CUDANet::Tensor &sum);
template <typename T>
void CUDA::sum_impl(const CUDANet::Tensor &input, CUDANet::Tensor &sum) {
auto length = input.numel(); auto length = input.numel();
const int gridSize = ( + BLOCK_SIZE - 1) / BLOCK_SIZE; const int gridSize = (length + BLOCK_SIZE - 1) / BLOCK_SIZE;
CUDANet::Kernels::sum_reduce<<<gridSize, BLOCK_SIZE>>>( CUDANet::Kernels::sum_reduce<<<gridSize, BLOCK_SIZE>>>(
input.data<float>(), sum.data<float>(), length static_cast<const T*>(input.device_ptr()), static_cast<T*>(sum.device_ptr()), length
); );
CUDA_CHECK(cudaGetLastError()); CUDA_CHECK(cudaGetLastError());
int remaining = gridSize; int remaining = gridSize;
while (remaining > 1) { while (remaining > 1) {
int blocks_needed = (remaining + BLOCK_SIZE - 1) / BLOCK_SIZE; int blocks_needed = (remaining + BLOCK_SIZE - 1) / BLOCK_SIZE;
CUDANet::Kernels::sum_reduce<<<blocks_needed, BLOCK_SIZE>>>(sum.data<float>(), sum.data<float>(), remaining); CUDANet::Kernels::sum_reduce<<<blocks_needed, BLOCK_SIZE>>>(static_cast<T*>(sum.device_ptr()), static_cast<T*>(sum.device_ptr()), remaining);
CUDA_CHECK(cudaGetLastError()); CUDA_CHECK(cudaGetLastError());
remaining = blocks_needed; remaining = blocks_needed;
@@ -50,17 +113,32 @@ void CUDA::sum(const CUDANet::Tensor &input, CUDANet::Tensor &sum) {
} }
void CUDA::max(const CUDANet::Tensor &input, CUDANet::Tensor &max) { void CUDA::max(const CUDANet::Tensor &input, CUDANet::Tensor &max) {
switch (input.get_dtype()) {
case DType::FLOAT32:
max_impl<float>(input, max);
break;
default:
throw std::runtime_error("Unsupported dtype");
break;
}
}
template void CUDA::max_impl<float>(const CUDANet::Tensor &input, CUDANet::Tensor &max);
template <typename T>
void CUDA::max_impl(const CUDANet::Tensor &input, CUDANet::Tensor &max) {
auto length = input.numel(); auto length = input.numel();
const int grid_size = (length + BLOCK_SIZE - 1) / BLOCK_SIZE; const int grid_size = (length + BLOCK_SIZE - 1) / BLOCK_SIZE;
Kernels::max_reduce<<<grid_size, BLOCK_SIZE>>>(input.data<float>(), max.data<float>(), length); Kernels::max_reduce<<<grid_size, BLOCK_SIZE>>>(static_cast<const T*>(input.device_ptr()), static_cast<T*>(max.device_ptr()), length);
CUDA_CHECK(cudaGetLastError()); CUDA_CHECK(cudaGetLastError());
int remaining = grid_size; int remaining = grid_size;
while (remaining > 1) { while (remaining > 1) {
int blocks_needed = (remaining + BLOCK_SIZE - 1) / BLOCK_SIZE; int blocks_needed = (remaining + BLOCK_SIZE - 1) / BLOCK_SIZE;
CUDANet::Kernels::max_reduce<<<blocks_needed, BLOCK_SIZE>>>(max.data<float>(), max.data<float>(), remaining); CUDANet::Kernels::max_reduce<<<blocks_needed, BLOCK_SIZE>>>(static_cast<T*>(max.device_ptr()), static_cast<T*>(max.device_ptr()), remaining);
CUDA_CHECK(cudaGetLastError()); CUDA_CHECK(cudaGetLastError());
remaining = blocks_needed; remaining = blocks_needed;

View File

@@ -2,40 +2,57 @@
#include <stdexcept> #include <stdexcept>
#include <vector> #include <vector>
#include "activation.hpp" #include "layers/activation.hpp"
#include "tensor.hpp" #include "tensor.hpp"
using namespace CUDANet::Layers; using namespace CUDANet::Layers;
Activation::Activation(CUDANet::Backend* backend, ActivationType activation, const CUDANet::Shape &shape) Activation::Activation(
: backend(backend), activationType(activation), shape(shape) { ActivationType activation,
const CUDANet::Shape& shape,
CUDANet::Backend* backend
)
: Activation(activation, shape, backend->get_default_dtype(), backend) {}
Activation::Activation(
ActivationType activation,
const CUDANet::Shape& shape,
CUDANet::DType dtype,
CUDANet::Backend* backend
)
: activation_type(activation),
shape(shape),
backend(backend) {
this->dtype = dtype;
if (shape.size() != 1) { if (shape.size() != 1) {
throw std::runtime_error(std::format("Invalid shape. Expected [1], got {}", shape)); throw InvalidShapeException("input", 1, shape.size());
} }
auto length = shape[0]; auto length = shape[0];
if (activationType == SOFTMAX) { if (activation_type == SOFTMAX) {
softmax_sum = CUDANet::Tensor({static_cast<size_t>(length)}, CUDANet::DType::FLOAT32, backend); softmax_sum =
tensor_max = CUDANet::Tensor({static_cast<size_t>(length)}, CUDANet::DType::FLOAT32, backend); CUDANet::Tensor({static_cast<size_t>(length)}, dtype, backend);
tensor_max =
CUDANet::Tensor({static_cast<size_t>(length)}, dtype, backend);
} }
} }
CUDANet::Tensor& Activation::forward(CUDANet::Tensor &input) { CUDANet::Tensor& Activation::forward(CUDANet::Tensor& input) {
switch (activationType) switch (activation_type) {
{ case ActivationType::SIGMOID:
case ActivationType::SIGMOID: backend->sigmoid(input);
backend->sigmoid(input); break;
break; case ActivationType::RELU:
case ActivationType::RELU: backend->relu(input);
backend->relu(input); break;
break; case ActivationType::SOFTMAX:
case ActivationType::SOFTMAX: backend->softmax(input, tensor_max, softmax_sum);
backend->softmax(input, tensor_max, softmax_sum); break;
break; default:
default: break;
break;
} }
return input; return input;
@@ -57,10 +74,14 @@ size_t Activation::output_size() {
return shape[0]; return shape[0];
} }
void Activation::set_weights(void *input) {} void Activation::set_weights(void* input) {}
CUDANet::Tensor& Activation::get_weights() {} size_t Activation::get_weights_size() {
return 0;
}
void Activation::set_biases(void *input) {} void Activation::set_biases(void* input) {}
CUDANet::Tensor& Activation::get_biases() {} size_t Activation::get_biases_size() {
return 0;
}

View File

@@ -1,44 +1,32 @@
#include "add.hpp" #include "layers/add.hpp"
#include <stddef.h>
using namespace CUDANet::Layers; using namespace CUDANet::Layers;
Add::Add(int inputSize) Add::Add(CUDANet::Shape a_shape, CUDANet::Shape b_shape, CUDANet::Backend* backend)
: inputSize(inputSize) { : Add(a_shape, b_shape, backend->get_default_dtype(), backend) {}
output = new float[inputSize]; Add::Add(CUDANet::Shape a_shape, CUDANet::Shape b_shape, CUDANet::DType dtype, CUDANet::Backend* backend)
: backend(backend), dtype(dtype) {
#ifdef USE_CUDA if (a_shape != b_shape) {
initCUDA(); throw InvalidShapeException(
#endif "Add requires matching dimensions", a_shape, b_shape
);
}
Add::~Add() {
#ifdef USE_CUDA
delCUDA();
#endif
}
float* Add::forward(const float* inputA, const float* inputB) {
#ifdef USE_CUDA
return forwardCUDA(inputA, inputB);
#else
return forwardCPU(inputA, inputB);
#endif
}
float* Add::forwardCPU(const float* inputA, const float* inputB) {
for (size_t i = 0; i < inputSize; i++)
{
output[i] = inputA[i] + inputB[i];
} }
out_shape = a_shape;
output = CUDANet::Tensor(out_shape, dtype, backend);
}
Add::~Add() {}
CUDANet::Tensor&
Add::forward(CUDANet::Tensor& input_a, CUDANet::Tensor& input_b) {
output.zero();
backend->add(
input_a,
input_b,
output
);
return output; return output;
} }

View File

@@ -1,101 +1,146 @@
#include <format>
#include <stdexcept> #include <stdexcept>
#include "avg_pooling.hpp" #include "layers/avg_pool.hpp"
using namespace CUDANet::Layers; using namespace CUDANet::Layers;
AvgPooling2d::AvgPooling2d( AvgPool2d::AvgPool2d(
shape2d inputSize, CUDANet::Shape input_shape,
int nChannels, CUDANet::Shape pool_shape,
shape2d poolingSize, CUDANet::Shape stride_shape,
shape2d stride, CUDANet::Shape padding_shape,
shape2d padding, CUDANet::Backend* backend
ActivationType activationType
) )
: inputSize(inputSize), : AvgPool2d(input_shape, pool_shape, stride_shape, padding_shape, backend->get_default_dtype(), backend) {}
nChannels(nChannels),
poolingSize(poolingSize), AvgPool2d::AvgPool2d(
stride(stride), CUDANet::Shape input_shape,
padding(padding) { CUDANet::Shape pool_shape,
outputSize = { CUDANet::Shape stride_shape,
(inputSize.first + 2 * padding.first - poolingSize.first) / CUDANet::Shape padding_shape,
stride.first + CUDANet::DType dtype,
CUDANet::Backend* backend
)
: in_shape(input_shape),
pool_shape(pool_shape),
stride_shape(stride_shape),
padding_shape(padding_shape),
backend(backend) {
if (in_shape.size() != 3) {
throw InvalidShapeException("input", 3, in_shape.size());
}
if (pool_shape.size() != 2) {
throw InvalidShapeException("pool", 2, pool_shape.size());
}
if (stride_shape.size() != 2) {
throw InvalidShapeException("stride", 2, stride_shape.size());
}
if (padding_shape.size() != 2) {
throw InvalidShapeException("padding", 2, padding_shape.size());
}
this->dtype = dtype;
out_shape = {
(in_shape[0] + 2 * padding_shape[0] - pool_shape[0]) / stride_shape[0] +
1, 1,
(inputSize.second + 2 * padding.second - poolingSize.second) / (in_shape[1] + 2 * padding_shape[1] - pool_shape[1]) / stride_shape[1] +
stride.second + 1,
1 in_shape[2]
}; };
activation = new Activation( output = CUDANet::Tensor(
activationType, outputSize.first * outputSize.second * nChannels Shape{out_shape[0] * out_shape[1] * out_shape[2]},
dtype, backend
); );
#ifdef USE_CUDA
initCUDA();
#endif
} }
AvgPooling2d::~AvgPooling2d() { AvgPool2d::~AvgPool2d() {}
#ifdef USE_CUDA
delCUDA(); CUDANet::Tensor& AvgPool2d::forward(CUDANet::Tensor& input) {
#endif output.zero();
delete activation; backend->avg_pool2d(
input,
output,
in_shape,
pool_shape,
stride_shape,
padding_shape,
out_shape
);
return output;
} }
float* AvgPooling2d::forwardCPU(const float* input) { CUDANet::Shape AvgPool2d::input_shape() {
throw std::logic_error("Not implemented"); return in_shape;
} }
float* AvgPooling2d::forward(const float* input) { CUDANet::Shape AvgPool2d::output_shape() {
#ifdef USE_CUDA return out_shape;
return forwardCUDA(input);
#else
return forwardCPU(input);
#endif
} }
int AvgPooling2d::get_output_size() { size_t AvgPool2d::input_size() {
return outputSize.first * outputSize.second * nChannels; return dtype_size(dtype) * in_shape[0] * in_shape[1] * in_shape[2];
} }
int AvgPooling2d::getInputSize() { size_t AvgPool2d::output_size() {
return inputSize.first * inputSize.second * nChannels; return dtype_size(dtype) * out_shape[0] * out_shape[1] * out_shape[2];
} }
shape2d AvgPooling2d::getOutputDims() { void AvgPool2d::set_weights(void* input) {}
return outputSize;
size_t AvgPool2d::get_weights_size() {
return 0;
} }
AdaptiveAvgPooling2d::AdaptiveAvgPooling2d( void AvgPool2d::set_biases(void* input) {}
shape2d inputShape,
int nChannels, size_t AvgPool2d::get_biases_size() {
shape2d outputShape, return 0;
ActivationType activationType }
AdaptiveAvgPool2d::AdaptiveAvgPool2d(
CUDANet::Shape input_shape,
CUDANet::Shape output_shape,
CUDANet::Backend *backend
) )
: AvgPooling2d( : AdaptiveAvgPool2d(input_shape, output_shape, backend->get_default_dtype(), backend) {}
inputShape,
nChannels,
{1, 1},
{1, 1},
{0, 0},
activationType
) {
stride = {
inputShape.first / outputShape.first,
inputShape.second / outputShape.second
};
poolingSize = {
inputShape.first - (outputShape.first - 1) * stride.first,
inputShape.second - (outputShape.second - 1) * stride.second
};
padding = {(poolingSize.first - 1) / 2, (poolingSize.second - 1) / 2};
outputSize = outputShape;
activation = new Activation( AdaptiveAvgPool2d::AdaptiveAvgPool2d(
activationType, outputSize.first * outputSize.second * nChannels CUDANet::Shape input_shape,
CUDANet::Shape output_shape,
CUDANet::DType dtype,
CUDANet::Backend *backend
)
: AvgPool2d(
input_shape,
// pool_shape
{
input_shape[0] - (output_shape[0] - 1) * (input_shape[0] / output_shape[0]),
input_shape[1] - (output_shape[1] - 1) * (input_shape[1] / output_shape[1])
},
// stride_shape
{
input_shape[0] / output_shape[0],
input_shape[1] / output_shape[1]
},
// padding_shape
{
(input_shape[0] - (output_shape[0] - 1) * (input_shape[0] / output_shape[0]) - 1) / 2,
(input_shape[1] - (output_shape[1] - 1) * (input_shape[1] / output_shape[1]) - 1) / 2
},
dtype,
backend
) {
out_shape = output_shape;
output = CUDANet::Tensor(
Shape{out_shape[0] * out_shape[1] * out_shape[2]},
dtype, backend
); );
#ifdef USE_CUDA
initCUDA();
#endif
} }

View File

@@ -1,133 +1,111 @@
#include "batch_norm.hpp"
#include <stdexcept> #include <stdexcept>
#include <vector> #include <vector>
#include "activation.hpp" #include "layers/batch_norm.hpp"
#include "layer.hpp" #include "layer.hpp"
using namespace CUDANet::Layers; using namespace CUDANet::Layers;
BatchNorm2d::BatchNorm2d( BatchNorm2d::BatchNorm2d(
shape2d inputSize, CUDANet::Shape input_shape,
int inputChannels, float eps,
float epsilon, CUDANet::Backend *backend
ActivationType activationType
) )
: inputSize(inputSize), inputChannels(inputChannels), epsilon(epsilon) { : BatchNorm2d(input_shape, eps, backend->get_default_dtype(), backend) {}
activation = new Activation(
activationType, inputSize.first * inputSize.second * inputChannels BatchNorm2d::BatchNorm2d(
CUDANet::Shape input_shape,
float eps,
CUDANet::DType dtype,
CUDANet::Backend *backend
)
: in_shape(input_shape), backend(backend) {
if (in_shape.size() != 3) {
throw InvalidShapeException("input", 3, in_shape.size());
}
this->dtype = dtype;
epsilon = CUDANet::Tensor({1}, dtype, backend);
epsilon.set_data(&eps);
running_mean = CUDANet::Tensor({in_shape[2]}, dtype, backend);
running_mean.zero();
running_var = CUDANet::Tensor({in_shape[2]}, dtype, backend);
running_var.fill(1);
weights = CUDANet::Tensor({in_shape[2]}, dtype, backend);
weights.fill(1);
biases = CUDANet::Tensor({in_shape[2]}, dtype, backend);
biases.zero();
output = CUDANet::Tensor(in_shape, dtype, backend);
}
BatchNorm2d::~BatchNorm2d() {}
CUDANet::Tensor& BatchNorm2d::forward(CUDANet::Tensor& input) {
output.zero();
backend->batch_norm(
input,
output,
in_shape,
weights,
biases,
running_mean,
running_var,
epsilon
); );
return output;
weights.resize(inputChannels);
biases.resize(inputChannels);
running_mean.resize(inputChannels);
running_var.resize(inputChannels);
initializeWeights();
initializeBiases();
initializeRunningMean();
initializeRunningVar();
#ifdef USE_CUDA
initCUDA();
toCuda();
#endif
} }
BatchNorm2d::~BatchNorm2d() { CUDANet::Shape BatchNorm2d::input_shape() {
#ifdef USE_CUDA return in_shape;
delCUDA();
#endif
} }
void BatchNorm2d::initializeWeights() { CUDANet::Shape BatchNorm2d::output_shape() {
std::fill(weights.begin(), weights.end(), 1.0f); return in_shape;
} }
void BatchNorm2d::initializeBiases() { size_t BatchNorm2d::input_size() {
std::fill(biases.begin(), biases.end(), 0.0f); return dtype_size(dtype) * in_shape[0] * in_shape[1] * in_shape[2];
} }
void BatchNorm2d::initializeRunningMean() { size_t BatchNorm2d::output_size() {
std::fill(running_mean.begin(), running_mean.end(), 0.0f); return dtype_size(dtype) * in_shape[0] * in_shape[1] * in_shape[2];
} }
void BatchNorm2d::initializeRunningVar() { void BatchNorm2d::set_weights(void* input) {
std::fill(running_var.begin(), running_var.end(), 1.0f); weights.set_data(input);
} }
void BatchNorm2d::setWeights(const float* weights_input) { size_t BatchNorm2d::get_weights_size() {
std::copy(weights_input, weights_input + weights.size(), weights.begin()); return weights.size();
#ifdef USE_CUDA
toCuda();
#endif
} }
std::vector<float> BatchNorm2d::getWeights() { void BatchNorm2d::set_biases(void* input) {
return weights; biases.set_data(input);
} }
void BatchNorm2d::setBiases(const float* biases_input) { size_t BatchNorm2d::get_biases_size() {
std::copy(biases_input, biases_input + biases.size(), biases.begin()); return biases.size();
#ifdef USE_CUDA
toCuda();
#endif
} }
std::vector<float> BatchNorm2d::getBiases() { void BatchNorm2d::set_running_mean(void* input) {
return biases; running_mean.set_data(input);
} }
void BatchNorm2d::setRunningMean(const float* running_mean_input) { size_t BatchNorm2d::get_running_mean_size() {
std::copy( return running_mean.size();
running_mean_input, running_mean_input + inputChannels,
running_mean.begin()
);
#ifdef USE_CUDA
toCuda();
#endif
} }
std::vector<float> BatchNorm2d::getRunningMean() { void BatchNorm2d::set_running_var(void* input) {
return running_mean; running_var.set_data(input);
} }
void BatchNorm2d::setRunningVar(const float* running_var_input) { size_t BatchNorm2d::get_running_var_size() {
std::copy( return running_var.size();
running_var_input, running_var_input + inputChannels,
running_var.begin()
);
#ifdef USE_CUDA
toCuda();
#endif
}
std::vector<float> BatchNorm2d::getRunningVar() {
return running_var;
}
int BatchNorm2d::getInputSize() {
return inputSize.first * inputSize.second * inputChannels;
}
int BatchNorm2d::getOutputSize() {
return inputSize.first * inputSize.second * inputChannels;
}
shape2d BatchNorm2d::getOutputDims() {
return inputSize;
}
float* BatchNorm2d::forwardCPU(const float* input) {
throw std::logic_error("Not implemented");
}
float* BatchNorm2d::forward(const float* input) {
#ifdef USE_CUDA
return forwardCUDA(input);
#else
return forwardCPU(input);
#endif
} }

View File

@@ -1,34 +1,35 @@
#include <stdexcept> #include "layers/concat.hpp"
#include "concat.hpp"
using namespace CUDANet::Layers; using namespace CUDANet::Layers;
Concat::Concat(const int inputASize, const int inputBSize) Concat::Concat(const CUDANet::Shape a_shape, const CUDANet::Shape b_shape, CUDANet::Backend *backend)
: inputASize(inputASize), inputBSize(inputBSize) { : Concat(a_shape, b_shape, backend->get_default_dtype(), backend) {}
#ifdef USE_CUDA
initCUDA(); Concat::Concat(const CUDANet::Shape a_shape, const CUDANet::Shape b_shape, CUDANet::DType dtype, CUDANet::Backend *backend)
#endif : a_shape(a_shape), b_shape(b_shape), backend(backend), dtype(dtype) {
if (a_shape[0] != b_shape[0] || a_shape[1] != b_shape[1]) {
throw InvalidShapeException(
"Concat requires matching height and width dimensions", a_shape,
b_shape
);
}
out_shape = {a_shape[0], a_shape[1], a_shape[2] + b_shape[2]};
output = CUDANet::Tensor(out_shape, dtype, backend);
} }
Concat::~Concat() { Concat::~Concat() {}
#ifdef USE_CUDA
delCUDA(); CUDANet::Tensor& Concat::forward(CUDANet::Tensor& input_a, CUDANet::Tensor& input_b) {
#endif output.zero();
backend->concat(
input_a,
input_b,
output
);
return output;
} }
float* Concat::forwardCPU(const float* input_A, const float* input_B) { CUDANet::Shape Concat::output_shape() {
throw std::logic_error("Not implemented"); return out_shape;
} }
float* Concat::forward(const float* input_A, const float* input_B) {
#ifdef USE_CUDA
return forwardCUDA(input_A, input_B);
#else
return forwardCPU(input_A, input_B);
#endif
}
int Concat::getOutputSize() {
return inputASize + inputBSize;
};

View File

@@ -1,8 +1,7 @@
#include "conv2d.hpp"
#include <format> #include <format>
#include <stdexcept> #include <stdexcept>
#include "layers/conv2d.hpp"
#include "layer.hpp" #include "layer.hpp"
#include "tensor.hpp" #include "tensor.hpp"
@@ -14,6 +13,16 @@ Conv2d::Conv2d(
CUDANet::Shape stride_shape, CUDANet::Shape stride_shape,
CUDANet::Shape padding_shape, CUDANet::Shape padding_shape,
CUDANet::Backend* backend CUDANet::Backend* backend
)
: Conv2d(input_shape, kernel_shape, stride_shape, padding_shape, backend->get_default_dtype(), backend) {}
Conv2d::Conv2d(
CUDANet::Shape input_shape,
CUDANet::Shape kernel_shape,
CUDANet::Shape stride_shape,
CUDANet::Shape padding_shape,
CUDANet::DType dtype,
CUDANet::Backend* backend
) )
: in_shape(input_shape), : in_shape(input_shape),
kernel_shape(kernel_shape), kernel_shape(kernel_shape),
@@ -21,60 +30,46 @@ Conv2d::Conv2d(
padding_shape(padding_shape), padding_shape(padding_shape),
backend(backend) { backend(backend) {
if (in_shape.size() != 3) { if (in_shape.size() != 3) {
throw std::runtime_error( throw InvalidShapeException("input", 3, in_shape.size());
std::format(
"Invalid input shape. Expected 3 dims, got {}", in_shape
)
);
} }
if (kernel_shape.size() != 3) { if (kernel_shape.size() != 3) {
throw std::runtime_error( throw InvalidShapeException("kernel", 3, kernel_shape.size());
std::format(
"Invalid kernel shape. Expected 3 dims, got {}", kernel_shape
)
);
} }
if (stride_shape.size() != 2) { if (stride_shape.size() != 2) {
throw std::runtime_error( throw InvalidShapeException("stride", 3, stride_shape.size());
std::format(
"Invalid stride shape. Expected 2 dims, got {}", stride_shape
)
);
} }
if (padding_shape.size() != 2) { if (padding_shape.size() != 2) {
throw std::runtime_error( throw InvalidShapeException("padding", 3, padding_shape.size());
std::format(
"Invalid padding shape. Expected 2 dims, got {}", padding_shape
)
);
} }
size_t out_h = (in_shape[0] - kernel_shape[0] + 2 * padding_shape[0]) / this->dtype = dtype;
stride_shape[0] +
1; out_shape = {
size_t out_w = (in_shape[1] - kernel_shape[1] + 2 * padding_shape[1]) / (in_shape[0] - kernel_shape[0] + 2 * padding_shape[0]) /
stride_shape[1] + stride_shape[0] +
1; 1,
out_shape.resize(3); (in_shape[1] - kernel_shape[1] + 2 * padding_shape[1]) /
out_shape[0] = out_h; stride_shape[1] +
out_shape[1] = out_w; 1,
out_shape[2] = kernel_shape[2]; kernel_shape[2]
};
output = CUDANet::Tensor( output = CUDANet::Tensor(
Shape{out_shape[0] * out_shape[1] * out_shape[3]}, Shape{out_shape[0], out_shape[1], out_shape[2]},
CUDANet::DType::FLOAT32, backend dtype, backend
); );
weights = CUDANet::Tensor( weights = CUDANet::Tensor(
Shape{ Shape{
kernel_shape[0] * kernel_shape[1] * kernel_shape[2] * in_shape[2] kernel_shape[0], kernel_shape[1], kernel_shape[2], in_shape[2]
}, },
CUDANet::DType::FLOAT32, backend dtype, backend
); );
biases = CUDANet::Tensor( biases = CUDANet::Tensor(
Shape{kernel_shape[2]}, CUDANet::DType::FLOAT32, backend Shape{kernel_shape[2]}, dtype, backend
); );
weights.zero(); weights.zero();
@@ -83,18 +78,11 @@ Conv2d::Conv2d(
Conv2d::~Conv2d() {} Conv2d::~Conv2d() {}
CUDANet::Tensor& Conv2d::forward( CUDANet::Tensor& input) { CUDANet::Tensor& Conv2d::forward(CUDANet::Tensor& input) {
output.zero(); output.zero();
backend->conv2d( backend->conv2d(
weights, weights, biases, input, output, in_shape, padding_shape, kernel_shape,
biases, stride_shape, out_shape
input,
output,
in_shape,
padding_shape,
kernel_shape,
stride_shape,
out_shape
); );
return output; return output;
} }
@@ -108,27 +96,27 @@ CUDANet::Shape Conv2d::output_shape() {
} }
size_t Conv2d::input_size() { size_t Conv2d::input_size() {
return sizeof(float) * in_shape[0] * in_shape[1] * in_shape[2]; return dtype_size(dtype) * in_shape[0] * in_shape[1] * in_shape[2];
} }
size_t Conv2d::output_size() { size_t Conv2d::output_size() {
return sizeof(float) * out_shape[0] * out_shape[1] * out_shape[2]; return dtype_size(dtype) * out_shape[0] * out_shape[1] * out_shape[2];
} }
void Conv2d::set_weights(void* input) { void Conv2d::set_weights(void* input) {
weights.set_data<float>(static_cast<float*>(input)); weights.set_data(input);
} }
CUDANet::Tensor& Conv2d::get_weights() { size_t Conv2d::get_weights_size() {
return weights; return weights.size();
} }
void Conv2d::set_biases(void* input) { void Conv2d::set_biases(void* input) {
biases.set_data<float>(static_cast<float*>(input)); biases.set_data(input);
} }
CUDANet::Tensor& Conv2d::get_biases() { size_t Conv2d::get_biases_size() {
return biases; return biases.size();
} }
CUDANet::Shape Conv2d::get_padding_shape() { CUDANet::Shape Conv2d::get_padding_shape() {

View File

@@ -1,30 +1,31 @@
#include "dense.hpp"
#include <format> #include <format>
#include <stdexcept> #include <stdexcept>
#include "layers/dense.hpp"
using namespace CUDANet::Layers; using namespace CUDANet::Layers;
Dense::Dense(CUDANet::Shape in, CUDANet::Shape out, CUDANet::Backend* backend) Dense::Dense(CUDANet::Shape in_shape, CUDANet::Shape out_shape, CUDANet::Backend* backend)
: Dense(in_shape, out_shape, backend->get_default_dtype(), backend) {}
Dense::Dense(CUDANet::Shape in_shape, CUDANet::Shape out_shape, CUDANet::DType dtype, CUDANet::Backend* backend)
: backend(backend), : backend(backend),
in_shape(in), in_shape(in_shape),
out_shape(out) { out_shape(out_shape) {
if (in.size() != 1) { if (in_shape.size() != 1) {
throw std::runtime_error( throw InvalidShapeException("input", 1, in_shape.size());
std::format("Invalid shape. Expected [1], got {}", in_shape)
);
} }
if (out.size() != 1) { if (out_shape.size() != 1) {
throw std::runtime_error( throw InvalidShapeException("output", 1, out_shape.size());
std::format("Invalid shape. Expected [1], got {}", out_shape)
);
} }
weights = CUDANet::Tensor(Shape{in[0] * out[0]}, CUDANet::DType::FLOAT32, backend); this->dtype = dtype;
biases = CUDANet::Tensor(Shape{out[0]}, CUDANet::DType::FLOAT32, backend);
output = CUDANet::Tensor(Shape{out[0]}, CUDANet::DType::FLOAT32, backend); weights = CUDANet::Tensor(Shape{out_shape[0], in_shape[0]}, dtype, backend);
biases = CUDANet::Tensor(Shape{out_shape[0]}, dtype, backend);
output = CUDANet::Tensor(Shape{out_shape[0]}, dtype, backend);
weights.zero(); weights.zero();
biases.zero(); biases.zero();
@@ -34,6 +35,7 @@ Dense::Dense(CUDANet::Shape in, CUDANet::Shape out, CUDANet::Backend* backend)
Dense::~Dense() {} Dense::~Dense() {}
CUDANet::Tensor& Dense::forward(CUDANet::Tensor& input) { CUDANet::Tensor& Dense::forward(CUDANet::Tensor& input) {
output.zero();
backend->dense(weights, biases, input, output, in_shape[0], out_shape[0]); backend->dense(weights, biases, input, output, in_shape[0], out_shape[0]);
return output; return output;
} }
@@ -54,18 +56,19 @@ size_t Dense::output_size() {
return out_shape[0]; return out_shape[0];
}; };
// TODO: Use dtype
void Dense::set_weights(void* input) { void Dense::set_weights(void* input) {
weights.set_data<float>(static_cast<float*>(input)); weights.set_data(input);
} }
CUDANet::Tensor& Dense::get_weights() { size_t Dense::get_weights_size() {
return weights; return weights.size();
} }
void Dense::set_biases(void* input) { void Dense::set_biases(void* input) {
biases.set_data<float>(static_cast<float*>(input)); biases.set_data(input);
} }
CUDANet::Tensor& Dense::get_biases() { size_t Dense::get_biases_size() {
return biases; return biases.size();
} }

View File

@@ -1,36 +1,60 @@
#include "max_pool.hpp"
#include <stdexcept> #include <stdexcept>
#include "layers/max_pool.hpp"
using namespace CUDANet::Layers; using namespace CUDANet::Layers;
MaxPool2d::MaxPool2d( MaxPool2d::MaxPool2d(
CUDANet::Shape input_shape, CUDANet::Shape input_shape,
CUDANet::Shape pooling_shape, CUDANet::Shape pool_shape,
CUDANet::Shape stride_shape, CUDANet::Shape stride_shape,
CUDANet::Shape padding_shape, CUDANet::Shape padding_shape,
CUDANet::Backend* backend CUDANet::Backend* backend
)
: MaxPool2d(input_shape, pool_shape, stride_shape, padding_shape, backend->get_default_dtype(), backend) {}
MaxPool2d::MaxPool2d(
CUDANet::Shape input_shape,
CUDANet::Shape pool_shape,
CUDANet::Shape stride_shape,
CUDANet::Shape padding_shape,
CUDANet::DType dtype,
CUDANet::Backend* backend
) )
: in_shape(input_shape), : in_shape(input_shape),
pooling_shape(pooling_shape), pool_shape(pool_shape),
stride_shape(stride_shape), stride_shape(stride_shape),
padding_shape(padding_shape), padding_shape(padding_shape),
backend(backend) { backend(backend) {
size_t out_h = (in_shape[0] + 2 * padding_shape[0] - pooling_shape[0]) / if (in_shape.size() != 3) {
stride_shape[0] + throw InvalidShapeException("input", 3, in_shape.size());
1; }
size_t out_w = (in_shape[1] + 2 * padding_shape[1] - pooling_shape[1]) /
stride_shape[1] +
1;
out_shape.resize(3); if (pool_shape.size() != 2) {
out_shape[0] = out_h; throw InvalidShapeException("pool", 2, pool_shape.size());
out_shape[1] = out_w; }
out_shape[2] = in_shape[2];
if (stride_shape.size() != 2) {
throw InvalidShapeException("stride", 2, stride_shape.size());
}
if (padding_shape.size() != 2) {
throw InvalidShapeException("padding", 2, padding_shape.size());
}
this->dtype = dtype;
out_shape = {
(in_shape[0] + 2 * padding_shape[0] - pool_shape[0]) / stride_shape[0] +
1,
(in_shape[1] + 2 * padding_shape[1] - pool_shape[1]) / stride_shape[1] +
1,
in_shape[2]
};
output = CUDANet::Tensor( output = CUDANet::Tensor(
Shape{out_shape[0] * out_shape[1] * out_shape[3]}, Shape{out_shape[0] * out_shape[1] * out_shape[2]},
CUDANet::DType::FLOAT32, backend dtype, backend
); );
} }
@@ -38,8 +62,8 @@ MaxPool2d::~MaxPool2d() {}
CUDANet::Tensor& MaxPool2d::forward(CUDANet::Tensor& input) { CUDANet::Tensor& MaxPool2d::forward(CUDANet::Tensor& input) {
output.zero(); output.zero();
backend->maxPool2d( backend->max_pool2d(
input, output, in_shape, pooling_shape, stride_shape, padding_shape, input, output, in_shape, pool_shape, stride_shape, padding_shape,
out_shape out_shape
); );
return output; return output;
@@ -54,17 +78,21 @@ CUDANet::Shape MaxPool2d::output_shape() {
} }
size_t MaxPool2d::input_size() { size_t MaxPool2d::input_size() {
return sizeof(float) * in_shape[0] * in_shape[1] * in_shape[2]; return dtype_size(dtype) * in_shape[0] * in_shape[1] * in_shape[2];
} }
size_t MaxPool2d::output_size() { size_t MaxPool2d::output_size() {
return sizeof(float) * out_shape[0] * out_shape[1] * out_shape[2]; return dtype_size(dtype) * out_shape[0] * out_shape[1] * out_shape[2];
} }
void MaxPool2d::set_weights(void* input) {} void MaxPool2d::set_weights(void* input) {}
CUDANet::Tensor& MaxPool2d::get_weights() {} size_t MaxPool2d::get_weights_size() {
return 0;
}
void MaxPool2d::set_biases(void* input) {} void MaxPool2d::set_biases(void* input) {}
CUDANet::Tensor& MaxPool2d::get_biases() {} size_t MaxPool2d::get_biases_size() {
return 0;
}

View File

@@ -1,5 +1,3 @@
#include "model.hpp"
#include <fstream> #include <fstream>
#include <iostream> #include <iostream>
#include <iomanip> #include <iomanip>
@@ -7,76 +5,51 @@
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
#include "input.hpp"
#include "layer.hpp" #include "layer.hpp"
#include "batch_norm.hpp" #include "layers/batch_norm.hpp"
#include "model.hpp"
using namespace CUDANet; using namespace CUDANet;
Model::Model( Model::Model(
const shape2d inputSize, const CUDANet::Shape input_shape,
const int inputChannels, const CUDANet::Shape output_shape
const int outputSize
) )
: inputSize(inputSize), : in_shape(input_shape),
inputChannels(inputChannels), out_shape(out_shape),
outputSize(outputSize), layers(std::vector<std::pair<std::string, Layer*>>()),
layers(std::vector<std::pair<std::string, Layers::SequentialLayer*>>()), layer_map(std::unordered_map<std::string, Layer*>()) {};
layerMap(std::unordered_map<std::string, Layers::SequentialLayer*>()) {
inputLayer =
new Layers::Input(inputSize.first * inputSize.second * inputChannels);
outputLayer = new Layers::Output(outputSize);
};
Model::Model(const Model& other) Model::~Model() {};
: inputSize(other.inputSize),
inputChannels(other.inputChannels), CUDANet::Tensor& Model::predict(CUDANet::Tensor& input) {
outputSize(other.outputSize), CUDANet::Tensor* current = &input;
layers(std::vector<std::pair<std::string, Layers::SequentialLayer*>>()), for (const auto& [name, layer_ptr] : layers) {
layerMap(std::unordered_map<std::string, Layers::SequentialLayer*>()) { current = &(layer_ptr->forward(*current));
inputLayer = new Layers::Input(*other.inputLayer); }
outputLayer = new Layers::Output(*other.outputLayer); return *current;
} }
Model::~Model() { void Model::register_layer(const std::string& name, Layer* layer) {
delete inputLayer;
delete outputLayer;
for (const auto& layer : layers) {
delete layer.second;
}
};
float* Model::predict(const float* input) {
float* d_input = inputLayer->forward(input);
for (auto& layer : layers) {
d_input = layer.second->forward(d_input);
}
return outputLayer->forward(d_input);
}
void Model::addLayer(const std::string& name, Layers::SequentialLayer* layer) {
const Module* module = dynamic_cast<Module*>(layer);
if (module != nullptr) {
for (const auto& moduleLayer : module->getLayers()) {
layerMap[moduleLayer.first] = moduleLayer.second;
layers.push_back({moduleLayer.first, moduleLayer.second});
}
return;
}
layers.push_back({name, layer}); layers.push_back({name, layer});
layerMap[name] = layer; layer_map[name] = layer;
} }
Layers::SequentialLayer* Model::getLayer(const std::string& name) { void Model::register_module(Module& module) {
return layerMap[name]; for (const auto& [name, layer_ptr] : module.get_layers()) {
layer_map[name] = layer_ptr;
layers.push_back({name, layer_ptr});
}
return;
} }
void Model::loadWeights(const std::string& path) { Layer* Model::get_layer(const std::string& name) {
return layer_map[name];
}
void Model::load_weights(const std::string& path) {
std::ifstream file(path, std::ios::binary); std::ifstream file(path, std::ios::binary);
if (!file.is_open()) { if (!file.is_open()) {
@@ -92,120 +65,114 @@ void Model::loadWeights(const std::string& path) {
return; return;
} }
auto getTensorType = [](const std::string& typeStr) { auto get_tensor_type = [](const std::string& type_str) {
if (typeStr == "weight") return TensorType::WEIGHT; if (type_str == "weight") return TensorType::WEIGHT;
if (typeStr == "bias") return TensorType::BIAS; if (type_str == "bias") return TensorType::BIAS;
if (typeStr == "running_mean") return TensorType::RUNNING_MEAN; if (type_str == "running_mean") return TensorType::RUNNING_MEAN;
if (typeStr == "running_var") return TensorType::RUNNING_VAR; if (type_str == "running_var") return TensorType::RUNNING_VAR;
throw std::runtime_error("Unknown tensor type: " + typeStr); throw std::runtime_error("Unknown tensor type: " + type_str);
}; };
u_int64_t headerSize; u_int64_t header_size;
file.read(reinterpret_cast<char*>(&headerSize), sizeof(headerSize)); file.read(reinterpret_cast<char*>(&header_size), sizeof(header_size));
std::string header(headerSize, '\0'); std::string header(header_size, '\0');
file.read(&header[0], headerSize); file.read(&header[0], header_size);
std::vector<TensorInfo> tensorInfos; std::vector<TensorInfo> tensor_infos;
size_t pos = 0; size_t pos = 0;
while (pos < header.size()) { while (pos < header.size()) {
size_t nextPos = header.find('\n', pos); size_t next_pos = header.find('\n', pos);
if (nextPos == std::string::npos) break; if (next_pos == std::string::npos) break;
std::string line = header.substr(pos, nextPos - pos); std::string line = header.substr(pos, next_pos - pos);
pos = nextPos + 1; pos = next_pos + 1;
size_t commaPos = line.find(','); size_t comma_pos = line.find(',');
if (commaPos == std::string::npos) continue; if (comma_pos == std::string::npos) continue;
// Parse tensor name into name and type // Parse tensor name into name and type
std::string nameStr = line.substr(0, commaPos); std::string name_str = line.substr(0, comma_pos);
size_t dotPos = nameStr.find_last_of('.'); size_t dot_pos = name_str.find_last_of('.');
if (dotPos == std::string::npos) continue; if (dot_pos == std::string::npos) continue;
std::string name = nameStr.substr(0, dotPos); std::string name = name_str.substr(0, dot_pos);
TensorType type = getTensorType(nameStr.substr(dotPos + 1)); TensorType type = get_tensor_type(name_str.substr(dot_pos + 1));
line = line.substr(commaPos + 1); line = line.substr(comma_pos + 1);
commaPos = line.find(','); comma_pos = line.find(',');
if (commaPos == std::string::npos) continue; if (comma_pos == std::string::npos) continue;
int size = std::stoi(line.substr(0, commaPos)); int size = std::stoi(line.substr(0, comma_pos));
int offset = std::stoi(line.substr(commaPos + 1)); int offset = std::stoi(line.substr(comma_pos + 1));
tensorInfos.push_back({name, type, size, offset}); tensor_infos.push_back({name, type, size, offset});
} }
for (const auto& tensorInfo : tensorInfos) { for (const auto& tensor_info : tensor_infos) {
std::vector<float> values(tensorInfo.size); std::vector<float> values(tensor_info.size);
file.seekg( file.seekg(
sizeof(version) + sizeof(headerSize) + header.size() + sizeof(version) + sizeof(header_size) + header.size() +
tensorInfo.offset tensor_info.offset
); );
file.read( file.read(
reinterpret_cast<char*>(values.data()), reinterpret_cast<char*>(values.data()),
tensorInfo.size * sizeof(float) tensor_info.size * sizeof(float)
); );
if (layerMap.find(tensorInfo.name) != layerMap.end()) { if (layer_map.find(tensor_info.name) != layer_map.end()) {
Layers::WeightedLayer* wLayer =
dynamic_cast<Layers::WeightedLayer*>(layerMap[tensorInfo.name]);
if (wLayer == nullptr) { Layer* layer = layer_map[tensor_info.name];
std::cerr << "Layer: " << tensorInfo.name
<< " does not have weights" << std::endl;
continue;
}
if (tensorInfo.type == TensorType::WEIGHT) { if (tensor_info.type == TensorType::WEIGHT) {
if (wLayer->getWeights().size() != values.size()) { if (layer->get_weights_size() != values.size()) {
std::cerr << "Layer: " << tensorInfo.name std::cerr << "Layer: " << tensor_info.name
<< " has incorrect number of weights, expected " << " has incorrect number of weights, expected "
<< wLayer->getWeights().size() << " but got " << layer->get_weights_size() << " but got "
<< values.size() << ", skipping" << std::endl; << values.size() << ", skipping" << std::endl;
continue; continue;
} }
wLayer->setWeights(values.data()); layer->set_weights(values.data());
} else if (tensorInfo.type == TensorType::BIAS) { } else if (tensor_info.type == TensorType::BIAS) {
if (wLayer->getBiases().size() != values.size()) { if (layer->get_biases_size() != values.size()) {
std::cerr << "Layer: " << tensorInfo.name std::cerr << "Layer: " << tensor_info.name
<< " has incorrect number of biases, expected " << " has incorrect number of biases, expected "
<< wLayer->getBiases().size() << " but got " << layer->get_biases_size() << " but got "
<< values.size() << ", skipping" << std::endl; << values.size() << ", skipping" << std::endl;
continue; continue;
} }
wLayer->setBiases(values.data()); layer->set_biases(values.data());
} }
Layers::BatchNorm2d* bnLayer = dynamic_cast<Layers::BatchNorm2d*>(wLayer); Layers::BatchNorm2d* bn_layer = dynamic_cast<Layers::BatchNorm2d*>(layer);
if (bnLayer == nullptr) { if (bn_layer == nullptr) {
continue; continue;
} }
if (tensorInfo.type == TensorType::RUNNING_MEAN) { if (tensor_info.type == TensorType::RUNNING_MEAN) {
if (bnLayer->getRunningMean().size() != values.size()) { if (bn_layer->get_running_mean_size() != values.size()) {
std::cerr << "Layer: " << tensorInfo.name << " has incorrect number of running mean values, expected " std::cerr << "Layer: " << tensor_info.name << " has incorrect number of running mean values, expected "
<< bnLayer->getRunningMean().size() << " but got " << values.size() << ", skipping" << std::endl; << bn_layer->get_running_mean_size() << " but got " << values.size() << ", skipping" << std::endl;
continue; continue;
} }
bnLayer->setRunningMean(values.data()); bn_layer->set_running_mean(values.data());
} else if (tensorInfo.type == TensorType::RUNNING_VAR) { } else if (tensor_info.type == TensorType::RUNNING_VAR) {
if (bnLayer->getRunningVar().size() != values.size()) { if (bn_layer->get_running_var_size() != values.size()) {
std::cerr << "Layer: " << tensorInfo.name << " has incorrect number of running var values, expected " std::cerr << "Layer: " << tensor_info.name << " has incorrect number of running var values, expected "
<< bnLayer->getRunningVar().size() << " but got " << values.size() << ", skipping" << std::endl; << bn_layer->get_running_var_size() << " but got " << values.size() << ", skipping" << std::endl;
continue; continue;
} }
bnLayer->setRunningVar(values.data()); bn_layer->set_running_var(values.data());
} }
} else { } else {
std::cerr << "Layer: " << tensorInfo.name std::cerr << "Layer: " << tensor_info.name
<< " does not exist, skipping" << std::endl; << " does not exist, skipping" << std::endl;
} }
} }
@@ -215,63 +182,63 @@ void Model::loadWeights(const std::string& path) {
bool Model::validate() { bool Model::validate() {
bool valid = true; bool valid = true;
int size = inputLayer->getInputSize(); CUDANet::Shape shape = in_shape;
for (const auto& layer : layers) { for (const auto& [name, layer_ptr] : layers) {
if (layer.second->getInputSize() != size) { if (layer_ptr->input_shape() != shape) {
valid = false; valid = false;
std::cerr << "Layer: " << layer.first std::cerr << "Layer: " << name
<< " has incorrect input size, expected " << size << " has incorrect input shape, expected " << format_shape(shape)
<< " but got " << layer.second->getInputSize() << " but got " << format_shape(layer_ptr->input_shape())
<< std::endl; << std::endl;
break; break;
} }
size = layer.second->getOutputSize(); shape = layer_ptr->output_shape();
} }
return valid; return valid;
} }
void Model::printSummary() { void Model::print_summary() {
struct layer_info { struct layer_info {
std::string name; std::string name;
std::string inputSize; std::string input_shape;
std::string outputSize; std::string output_shape;
}; };
std::vector<layer_info> layerInfos; std::vector<layer_info> layer_infos;
int maxNameLength = 0; int max_name_length = 0;
int maxInputLength = 0; int max_input_length = 0;
int maxOutputLength = 0; int max_output_length = 0;
for (const auto& layer : layers) { for (const auto& [name, layer_ptr] : layers) {
layer_info layerInfo = { layer_info li = {
layer.first, std::to_string(layer.second->getInputSize()), name, format_shape(layer_ptr->input_shape()),
std::to_string(layer.second->getOutputSize()) format_shape(layer_ptr->output_shape())
}; };
layerInfos.push_back(layerInfo); layer_infos.push_back(li);
maxNameLength = std::max(maxNameLength, (int)layerInfo.name.size()); max_name_length = std::max(max_name_length, (int)li.name.size());
maxInputLength = max_input_length =
std::max(maxInputLength, (int)layerInfo.inputSize.size()); std::max(max_input_length, (int)li.input_shape.size());
maxOutputLength = max_output_length =
std::max(maxOutputLength, (int)layerInfo.outputSize.size()); std::max(max_output_length, (int)li.output_shape.size());
} }
int rowLength = maxNameLength + maxInputLength + maxOutputLength + 6; int row_length = max_name_length + max_input_length + max_output_length + 6;
std::cout << "Model Summary:" << std::endl std::cout << "Model Summary:" << std::endl
<< std::string(rowLength, '-') << std::endl; << std::string(row_length, '-') << std::endl;
for (const auto& layerInfo : layerInfos) { for (const auto& li : layer_infos) {
std::cout << std::left std::cout << std::left
<< std::setw(maxNameLength) << layerInfo.name << std::setw(max_name_length) << li.name
<< " | " << std::right << " | " << std::right
<< std::setw(maxInputLength) << layerInfo.inputSize << std::setw(max_input_length) << li.input_shape
<< " | " << " | "
<< std::setw(maxOutputLength) << layerInfo.outputSize << std::setw(max_output_length) << li.output_shape
<< std::endl; << std::endl;
} }
} }

View File

@@ -1,32 +1,28 @@
#include "module.hpp"
#include <algorithm> #include <algorithm>
#include "module.hpp"
using namespace CUDANet; using namespace CUDANet;
void Module::addLayer(const std::string& name, Layers::SequentialLayer* layer) { CUDANet::Shape Module::input_shape() {
const Module* module = dynamic_cast<Module*>(layer); return in_shape;
}
if (module != nullptr) { CUDANet::Shape Module::output_shape() {
for (const auto& moduleLayer : module->getLayers()) { return out_shape;
layers.push_back({moduleLayer.first, moduleLayer.second}); }
}
return;
}
void Module::register_layer(const std::string& name, Layer* layer) {
layers.push_back({name, layer}); layers.push_back({name, layer});
} }
const std::vector<std::pair<std::string, Layers::SequentialLayer*>>& void Module::register_module(Module& module) {
Module::getLayers() const { for (const auto& moduleLayer : module.get_layers()) {
layers.push_back({moduleLayer.first, moduleLayer.second});
}
}
const std::vector<std::pair<std::string, Layer*>>&
Module::get_layers() const {
return layers; return layers;
} }
int Module::getInputSize() {
return inputSize;
}
int Module::getOutputSize() {
return outputSize;
}

View File

@@ -4,17 +4,37 @@
using namespace CUDANet; using namespace CUDANet;
size_t dtype_size(DType dtype) {
switch (dtype)
{
case DType::FLOAT32:
return 4;
break;
default:
throw std::runtime_error("Unknown DType");
break;
}
}
Tensor::Tensor(Shape shape, CUDANet::Backend* backend)
: Tensor(shape, backend->get_default_dtype(), backend) {}
Tensor::Tensor(Shape shape, DType dtype, Backend* backend) Tensor::Tensor(Shape shape, DType dtype, Backend* backend)
: shape(shape), dtype(dtype), backend(backend), d_ptr(nullptr) { : shape(shape), dtype(dtype), backend(backend), d_ptr(nullptr) {
if (shape.empty()) { if (shape.empty()) {
throw std::runtime_error("Tensor shape cannot be empty"); throw std::runtime_error("Tensor shape cannot be empty");
} }
// Check if backend supports DType
if (!backend->supports_dtype(dtype)) {
throw std::runtime_error("Unsupported DType");
}
// Count total elements // Count total elements
size_t count = 1; size_t count = 1;
for (const auto& dim : shape) { for (size_t i = 0; i < shape.size(); ++i) {
count *= dim; count *= shape[i];
} }
total_elms = count; total_elms = count;
@@ -39,9 +59,8 @@ Tensor::Tensor(Tensor&& other) noexcept
total_elms(other.total_elms), total_elms(other.total_elms),
total_size(other.total_size), total_size(other.total_size),
backend(other.backend), backend(other.backend),
d_ptr(other.d_ptr) d_ptr(other.d_ptr) {
{ other.d_ptr = nullptr;
other.d_ptr = nullptr;
other.backend = nullptr; other.backend = nullptr;
} }
@@ -53,15 +72,15 @@ Tensor& Tensor::operator=(Tensor&& other) noexcept {
} }
// Steal other's resources // Steal other's resources
shape = std::move(other.shape); shape = std::move(other.shape);
dtype = other.dtype; dtype = other.dtype;
total_elms = other.total_elms; total_elms = other.total_elms;
total_size = other.total_size; total_size = other.total_size;
backend = other.backend; backend = other.backend;
d_ptr = other.d_ptr; d_ptr = other.d_ptr;
// Leave other in valid but empty state // Leave other in valid but empty state
other.d_ptr = nullptr; other.d_ptr = nullptr;
other.backend = nullptr; other.backend = nullptr;
} }
return *this; return *this;
@@ -74,6 +93,10 @@ Tensor::~Tensor() {
} }
} }
DType Tensor::get_dtype() const {
return dtype;
}
size_t Tensor::numel() const { size_t Tensor::numel() const {
return total_elms; return total_elms;
} }
@@ -82,6 +105,22 @@ size_t Tensor::size() const {
return total_size; return total_size;
} }
void* Tensor::device_ptr() const {
return d_ptr;
}
void* Tensor::device_ptr() {
return d_ptr;
}
void Tensor::zero() { void Tensor::zero() {
backend->zero(*this); backend->zero(*this);
} }
void Tensor::fill(int value) {
backend->fill(*this, value);
}
void Tensor::set_data(void *data) {
backend->copy_to_device(*this, data, total_size);
}