mirror of
https://github.com/lordmathis/CUDANet.git
synced 2025-12-23 14:54:28 +00:00
Compare commits
11 Commits
a97ff8e1f6
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 6318d52f12 | |||
| 71dc5a924d | |||
| 7e27c87673 | |||
| e79667671a | |||
| c855ae89ec | |||
| 9ff214d759 | |||
| 13d3d38b68 | |||
| 84153ac49c | |||
| ad079560ff | |||
| 60964cf294 | |||
| a40ba96d4f |
@@ -23,8 +23,8 @@ endif()
|
|||||||
|
|
||||||
|
|
||||||
file(GLOB_RECURSE CPU_SOURCES
|
file(GLOB_RECURSE CPU_SOURCES
|
||||||
|
src/*.cpp
|
||||||
src/layers/*.cpp
|
src/layers/*.cpp
|
||||||
src/model/*.cpp
|
|
||||||
)
|
)
|
||||||
|
|
||||||
set(LIBRARY_SOURCES ${CPU_SOURCES})
|
set(LIBRARY_SOURCES ${CPU_SOURCES})
|
||||||
@@ -32,10 +32,7 @@ set(LIBRARY_SOURCES ${CPU_SOURCES})
|
|||||||
if(USE_CUDA)
|
if(USE_CUDA)
|
||||||
file(GLOB_RECURSE CUDA_SOURCES
|
file(GLOB_RECURSE CUDA_SOURCES
|
||||||
src/backends/cuda/*.cu
|
src/backends/cuda/*.cu
|
||||||
src/backends/cuda/utils/*.cu
|
|
||||||
src/backends/cuda/kernels/*.cu
|
src/backends/cuda/kernels/*.cu
|
||||||
src/backends/cuda/layers/*.cu
|
|
||||||
src/layers/*.cu # To be removed
|
|
||||||
)
|
)
|
||||||
set(LIBRARY_SOURCES ${LIBRARY_SOURCES} ${CUDA_SOURCES})
|
set(LIBRARY_SOURCES ${LIBRARY_SOURCES} ${CUDA_SOURCES})
|
||||||
endif()
|
endif()
|
||||||
@@ -46,17 +43,17 @@ set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
|
|||||||
add_library(${PROJECT_NAME} STATIC ${LIBRARY_SOURCES})
|
add_library(${PROJECT_NAME} STATIC ${LIBRARY_SOURCES})
|
||||||
|
|
||||||
if(USE_CUDA)
|
if(USE_CUDA)
|
||||||
|
# Enable relocatable device code for proper template instantiation across translation units
|
||||||
|
set_target_properties(${PROJECT_NAME} PROPERTIES
|
||||||
|
CUDA_SEPARABLE_COMPILATION ON
|
||||||
|
CUDA_RUNTIME_LIBRARY Shared
|
||||||
|
)
|
||||||
target_link_libraries(${PROJECT_NAME} CUDA::cudart)
|
target_link_libraries(${PROJECT_NAME} CUDA::cudart)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# Set include directories for the library
|
# Set include directories for the library
|
||||||
target_include_directories(${PROJECT_NAME} PUBLIC
|
target_include_directories(${PROJECT_NAME} PUBLIC
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/include
|
${CMAKE_CURRENT_SOURCE_DIR}/include
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/include/utils
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/include/kernels
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/include/layers
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/include/model
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/src
|
|
||||||
)
|
)
|
||||||
|
|
||||||
set_property(TARGET ${PROJECT_NAME} PROPERTY CXX_STANDARD 20)
|
set_property(TARGET ${PROJECT_NAME} PROPERTY CXX_STANDARD 20)
|
||||||
|
|||||||
@@ -1,16 +1,39 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <cstddef>
|
#include <memory>
|
||||||
|
#include <optional>
|
||||||
|
|
||||||
#include "shape.hpp"
|
#include "shape.hpp"
|
||||||
|
#include "tensor.hpp"
|
||||||
|
|
||||||
namespace CUDANet {
|
namespace CUDANet {
|
||||||
|
|
||||||
// Forward declaration
|
// Forward declarations
|
||||||
|
class Backend;
|
||||||
class Tensor;
|
class Tensor;
|
||||||
|
enum class DType;
|
||||||
|
|
||||||
|
enum BackendType { CUDA_BACKEND, CPU_BACKEND };
|
||||||
|
|
||||||
|
struct BackendConfig {
|
||||||
|
int device_id = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
class BackendFactory {
|
||||||
|
public:
|
||||||
|
static std::unique_ptr<Backend> create(BackendType backend_type, const BackendConfig& config);
|
||||||
|
};
|
||||||
|
|
||||||
class Backend {
|
class Backend {
|
||||||
|
protected:
|
||||||
|
std::optional<DType> default_dtype;
|
||||||
public:
|
public:
|
||||||
|
|
||||||
|
// Dtypes
|
||||||
|
virtual bool supports_dtype(DType dtype) const = 0;
|
||||||
|
virtual void set_default_dtype(DType dtype) = 0;
|
||||||
|
virtual DType get_default_dtype() const = 0;
|
||||||
|
|
||||||
// Memory management
|
// Memory management
|
||||||
virtual void* allocate(size_t bytes) = 0;
|
virtual void* allocate(size_t bytes) = 0;
|
||||||
virtual void deallocate(void* ptr) = 0;
|
virtual void deallocate(void* ptr) = 0;
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <cstdio>
|
#include <cstdio>
|
||||||
|
#include <set>
|
||||||
|
|
||||||
#include "backend.hpp"
|
#include "backend.hpp"
|
||||||
#include "tensor.hpp"
|
#include "tensor.hpp"
|
||||||
@@ -17,22 +18,41 @@
|
|||||||
do { \
|
do { \
|
||||||
cudaError_t result = call; \
|
cudaError_t result = call; \
|
||||||
if (result != cudaSuccess) { \
|
if (result != cudaSuccess) { \
|
||||||
fprintf(stderr, "CUDA error at %s:%d code=%d(%s) \"%s\" \n", \
|
fprintf( \
|
||||||
__FILE__, __LINE__, static_cast<unsigned int>(result), \
|
stderr, "CUDA error at %s:%d code=%d(%s) \"%s\" \n", __FILE__, \
|
||||||
cudaGetErrorString(result), #call); \
|
__LINE__, static_cast<unsigned int>(result), \
|
||||||
|
cudaGetErrorString(result), #call \
|
||||||
|
); \
|
||||||
exit(EXIT_FAILURE); \
|
exit(EXIT_FAILURE); \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
namespace CUDANet::Backends {
|
namespace CUDANet::Backends {
|
||||||
|
|
||||||
|
template <DType dtype>
|
||||||
|
struct cuda_dtype_map;
|
||||||
|
|
||||||
|
template <>
|
||||||
|
struct cuda_dtype_map<DType::FLOAT32> {
|
||||||
|
using type = float;
|
||||||
|
};
|
||||||
|
|
||||||
class CUDA : public Backend {
|
class CUDA : public Backend {
|
||||||
public:
|
public:
|
||||||
|
CUDA(const BackendConfig& config);
|
||||||
|
|
||||||
|
bool supports_dtype(DType dtype) const override;
|
||||||
|
void set_default_dtype(DType dtype) override;
|
||||||
|
DType get_default_dtype() const override;
|
||||||
|
|
||||||
|
static bool is_cuda_available();
|
||||||
|
void initialize();
|
||||||
|
|
||||||
// Memory management
|
// Memory management
|
||||||
void* allocate(size_t bytes) override;
|
void* allocate(size_t bytes) override;
|
||||||
void deallocate(void* ptr) override;
|
void deallocate(void* ptr) override;
|
||||||
|
|
||||||
// Tensor ops
|
// Tensor ops dispatchers
|
||||||
void print(const CUDANet::Tensor& input) override;
|
void print(const CUDANet::Tensor& input) override;
|
||||||
void zero(CUDANet::Tensor& input) override;
|
void zero(CUDANet::Tensor& input) override;
|
||||||
void fill(CUDANet::Tensor& input, int value) override;
|
void fill(CUDANet::Tensor& input, int value) override;
|
||||||
@@ -41,7 +61,7 @@ class CUDA : public Backend {
|
|||||||
void sum(const CUDANet::Tensor& input, CUDANet::Tensor& sum) override;
|
void sum(const CUDANet::Tensor& input, CUDANet::Tensor& sum) override;
|
||||||
void max(const CUDANet::Tensor& input, CUDANet::Tensor& max) override;
|
void max(const CUDANet::Tensor& input, CUDANet::Tensor& max) override;
|
||||||
|
|
||||||
// Layer ops
|
// Layer ops dispatchers
|
||||||
void relu(CUDANet::Tensor& tensor) override;
|
void relu(CUDANet::Tensor& tensor) override;
|
||||||
void sigmoid(CUDANet::Tensor& tensor) override;
|
void sigmoid(CUDANet::Tensor& tensor) override;
|
||||||
void softmax(
|
void softmax(
|
||||||
@@ -113,6 +133,111 @@ class CUDA : public Backend {
|
|||||||
CUDANet::Tensor& input_b,
|
CUDANet::Tensor& input_b,
|
||||||
CUDANet::Tensor& output
|
CUDANet::Tensor& output
|
||||||
) override;
|
) override;
|
||||||
|
|
||||||
|
private:
|
||||||
|
int device_id;
|
||||||
|
std::set<DType> supported_dtypes;
|
||||||
|
|
||||||
|
// Tensor ops template impls
|
||||||
|
template <typename T>
|
||||||
|
void print_impl(const CUDANet::Tensor& input);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void fill_impl(CUDANet::Tensor& input, int value);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void copy_to_device_impl(CUDANet::Tensor& tensor, void* data, size_t size);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void sum_impl(const CUDANet::Tensor& input, CUDANet::Tensor& sum);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void max_impl(const CUDANet::Tensor& input, CUDANet::Tensor& max);
|
||||||
|
|
||||||
|
// Layer ops template impls
|
||||||
|
template <typename T>
|
||||||
|
void relu_impl(CUDANet::Tensor& tensor);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void sigmoid_impl(CUDANet::Tensor& tensor);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void softmax_impl(
|
||||||
|
CUDANet::Tensor& tensor,
|
||||||
|
CUDANet::Tensor& temp_max,
|
||||||
|
CUDANet::Tensor& temp_sum
|
||||||
|
);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
CUDANet::Tensor& dense_impl(
|
||||||
|
const CUDANet::Tensor& weights,
|
||||||
|
const CUDANet::Tensor& biases,
|
||||||
|
const CUDANet::Tensor& input,
|
||||||
|
CUDANet::Tensor& output,
|
||||||
|
const size_t input_size,
|
||||||
|
const size_t output_size
|
||||||
|
);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
CUDANet::Tensor& conv2d_impl(
|
||||||
|
const CUDANet::Tensor& weights,
|
||||||
|
const CUDANet::Tensor& biases,
|
||||||
|
const CUDANet::Tensor& input,
|
||||||
|
CUDANet::Tensor& output,
|
||||||
|
const CUDANet::Shape in_shape,
|
||||||
|
const CUDANet::Shape padding_shape,
|
||||||
|
const CUDANet::Shape kernel_shape,
|
||||||
|
const CUDANet::Shape stride_shape,
|
||||||
|
const CUDANet::Shape out_shape
|
||||||
|
);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
CUDANet::Tensor& max_pool2d_impl(
|
||||||
|
const CUDANet::Tensor& input,
|
||||||
|
CUDANet::Tensor& output,
|
||||||
|
CUDANet::Shape input_shape,
|
||||||
|
CUDANet::Shape pool_shape,
|
||||||
|
CUDANet::Shape stride_shape,
|
||||||
|
CUDANet::Shape padding_shape,
|
||||||
|
CUDANet::Shape output_shape
|
||||||
|
);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
CUDANet::Tensor& avg_pool2d_impl(
|
||||||
|
const CUDANet::Tensor& input,
|
||||||
|
CUDANet::Tensor& output,
|
||||||
|
CUDANet::Shape input_shape,
|
||||||
|
CUDANet::Shape pool_shape,
|
||||||
|
CUDANet::Shape stride_shape,
|
||||||
|
CUDANet::Shape padding_shape,
|
||||||
|
CUDANet::Shape output_shape
|
||||||
|
);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
CUDANet::Tensor& batch_norm_impl(
|
||||||
|
const CUDANet::Tensor& input,
|
||||||
|
CUDANet::Tensor& output,
|
||||||
|
CUDANet::Shape input_shape,
|
||||||
|
CUDANet::Tensor& weights,
|
||||||
|
CUDANet::Tensor& biases,
|
||||||
|
CUDANet::Tensor& running_mean,
|
||||||
|
CUDANet::Tensor& running_var,
|
||||||
|
CUDANet::Tensor& epsilon
|
||||||
|
);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
CUDANet::Tensor& concat_impl(
|
||||||
|
CUDANet::Tensor& input_a,
|
||||||
|
CUDANet::Tensor& input_b,
|
||||||
|
CUDANet::Tensor& output
|
||||||
|
);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
CUDANet::Tensor& add_impl(
|
||||||
|
CUDANet::Tensor& input_a,
|
||||||
|
CUDANet::Tensor& input_b,
|
||||||
|
CUDANet::Tensor& output
|
||||||
|
);
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace CUDANet::Backend
|
} // namespace CUDANet::Backends
|
||||||
@@ -4,29 +4,18 @@
|
|||||||
|
|
||||||
namespace CUDANet::Kernels {
|
namespace CUDANet::Kernels {
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Sigmoid activation function kernel
|
template <typename T>
|
||||||
*
|
|
||||||
* @param src Pointer to the source array
|
|
||||||
* @param dst Pointer to the destination array
|
|
||||||
* @param len Length of the arrays
|
|
||||||
*/
|
|
||||||
__global__ void sigmoid(
|
__global__ void sigmoid(
|
||||||
const float* __restrict__ src,
|
const T* __restrict__ src,
|
||||||
float* __restrict__ dst,
|
T* __restrict__ dst,
|
||||||
const unsigned int len
|
const unsigned int len
|
||||||
);
|
);
|
||||||
|
|
||||||
/**
|
template <typename T>
|
||||||
* @brief Relu activation function kernel
|
|
||||||
*
|
|
||||||
* @param src Pointer to the source array
|
|
||||||
* @param dst Pointer to the destination array
|
|
||||||
* @param len Length of the arrays
|
|
||||||
*/
|
|
||||||
__global__ void relu(
|
__global__ void relu(
|
||||||
const float* __restrict__ src,
|
const T* __restrict__ src,
|
||||||
float* __restrict__ dst,
|
T* __restrict__ dst,
|
||||||
const unsigned int len
|
const unsigned int len
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|||||||
@@ -5,11 +5,12 @@
|
|||||||
|
|
||||||
namespace CUDANet::Kernels {
|
namespace CUDANet::Kernels {
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
__global__ void convolution(
|
__global__ void convolution(
|
||||||
const float* __restrict__ d_input,
|
const T* __restrict__ d_input,
|
||||||
const float* __restrict__ d_kernel,
|
const T* __restrict__ d_kernel,
|
||||||
const float* __restrict__ d_bias,
|
const T* __restrict__ d_bias,
|
||||||
float* __restrict__ d_output,
|
T* __restrict__ d_output,
|
||||||
const Shape input_shape,
|
const Shape input_shape,
|
||||||
const Shape padding_shape,
|
const Shape padding_shape,
|
||||||
const Shape kernel_shape,
|
const Shape kernel_shape,
|
||||||
|
|||||||
@@ -4,188 +4,105 @@
|
|||||||
|
|
||||||
namespace CUDANet::Kernels {
|
namespace CUDANet::Kernels {
|
||||||
|
|
||||||
/**
|
template <typename T>
|
||||||
* @brief Matrix vector multiplication kernel
|
|
||||||
*
|
|
||||||
* @param d_matrix Device pointer to matrix
|
|
||||||
* @param d_vector Device pointer to vector
|
|
||||||
* @param d_output Device pointer to output vector
|
|
||||||
* @param w Width of the matrix
|
|
||||||
* @param h Height of the matrix
|
|
||||||
*/
|
|
||||||
__global__ void mat_vec_mul(
|
__global__ void mat_vec_mul(
|
||||||
const float* __restrict__ d_matrix,
|
const T* __restrict__ d_matrix,
|
||||||
const float* __restrict__ d_vector,
|
const T* __restrict__ d_vector,
|
||||||
float* __restrict__ d_output,
|
T* __restrict__ d_output,
|
||||||
const unsigned int w,
|
const unsigned int w,
|
||||||
const unsigned int h
|
const unsigned int h
|
||||||
);
|
);
|
||||||
|
|
||||||
/**
|
template <typename T>
|
||||||
* @brief Vector vector addition kernel
|
|
||||||
*
|
|
||||||
* @param d_vector1 Device pointer to first vector
|
|
||||||
* @param d_vector2 Device pointer to second vector
|
|
||||||
* @param d_output Device pointer to output vector
|
|
||||||
* @param w Length of the vectors
|
|
||||||
*/
|
|
||||||
__global__ void vec_vec_add(
|
__global__ void vec_vec_add(
|
||||||
const float* __restrict__ d_vector1,
|
const T* __restrict__ d_vector1,
|
||||||
const float* __restrict__ d_vector2,
|
const T* __restrict__ d_vector2,
|
||||||
float* __restrict__ d_output,
|
T* __restrict__ d_output,
|
||||||
const unsigned int w
|
const unsigned int w
|
||||||
);
|
);
|
||||||
|
|
||||||
/**
|
template <typename T>
|
||||||
* @brief Vector vector subtraction kernel
|
|
||||||
*
|
|
||||||
* @param d_vector1
|
|
||||||
* @param d_vector2
|
|
||||||
* @param d_output
|
|
||||||
* @param w
|
|
||||||
* @return __global__
|
|
||||||
*/
|
|
||||||
__global__ void vec_vec_sub(
|
__global__ void vec_vec_sub(
|
||||||
const float* __restrict__ d_vector1,
|
const T* __restrict__ d_vector1,
|
||||||
const float* __restrict__ d_vector2,
|
const T* __restrict__ d_vector2,
|
||||||
float* __restrict__ d_output,
|
T* __restrict__ d_output,
|
||||||
const unsigned int w
|
const unsigned int w
|
||||||
);
|
);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
__global__ void vec_vec_mul(
|
__global__ void vec_vec_mul(
|
||||||
const float* __restrict__ d_vector1,
|
const T* __restrict__ d_vector1,
|
||||||
const float* __restrict__ d_vector2,
|
const T* __restrict__ d_vector2,
|
||||||
float* __restrict__ d_output,
|
T* __restrict__ d_output,
|
||||||
const unsigned int w
|
const unsigned int w
|
||||||
);
|
);
|
||||||
|
|
||||||
/**
|
template <typename T>
|
||||||
* @brief Sub scalar from each element of the vector
|
|
||||||
*
|
|
||||||
* @param d_vector
|
|
||||||
* @param d_scalar
|
|
||||||
* @param d_output
|
|
||||||
* @param w
|
|
||||||
* @return __global__
|
|
||||||
*/
|
|
||||||
__global__ void vec_scalar_sub(
|
__global__ void vec_scalar_sub(
|
||||||
const float* __restrict__ d_src,
|
const T* __restrict__ d_src,
|
||||||
float* __restrict__ d_out,
|
T* __restrict__ d_out,
|
||||||
const float* __restrict__ d_scalar,
|
const T* __restrict__ d_scalar,
|
||||||
const unsigned int len
|
const unsigned int len
|
||||||
);
|
);
|
||||||
|
|
||||||
/**
|
template <typename T>
|
||||||
* @brief Add scalar to each element of the vector
|
|
||||||
*
|
|
||||||
* @param d_src
|
|
||||||
* @param d_out
|
|
||||||
* @param d_scalar
|
|
||||||
* @param len
|
|
||||||
* @return __global__
|
|
||||||
*/
|
|
||||||
__global__ void vec_scalar_add(
|
__global__ void vec_scalar_add(
|
||||||
const float* __restrict__ d_src,
|
const T* __restrict__ d_src,
|
||||||
float* __restrict__ d_out,
|
T* __restrict__ d_out,
|
||||||
const float* __restrict__ d_scalar,
|
const T* __restrict__ d_scalar,
|
||||||
const unsigned int len
|
const unsigned int len
|
||||||
);
|
);
|
||||||
|
|
||||||
/**
|
template <typename T>
|
||||||
* @brief Divide each element of the vector by a scalar
|
|
||||||
*
|
|
||||||
* @param src Pointer to the source array
|
|
||||||
* @param dst Pointer to the destination array
|
|
||||||
* @param len Length of the arrays
|
|
||||||
*/
|
|
||||||
__global__ void vec_scalar_div(
|
__global__ void vec_scalar_div(
|
||||||
const float* __restrict__ d_src,
|
const T* __restrict__ d_src,
|
||||||
float* __restrict__ d_out,
|
T* __restrict__ d_out,
|
||||||
const float* __restrict__ d_scalar,
|
const T* __restrict__ d_scalar,
|
||||||
const unsigned int len
|
const unsigned int len
|
||||||
);
|
);
|
||||||
|
|
||||||
/**
|
template <typename T>
|
||||||
* @brief Multiply each element of the vector by a scalar
|
|
||||||
*
|
|
||||||
* @param d_src
|
|
||||||
* @param d_out
|
|
||||||
* @param d_scalar
|
|
||||||
* @param len
|
|
||||||
* @return __global__
|
|
||||||
*/
|
|
||||||
__global__ void vec_scalar_mul(
|
__global__ void vec_scalar_mul(
|
||||||
const float* __restrict__ d_src,
|
const T* __restrict__ d_src,
|
||||||
float* __restrict__ d_out,
|
T* __restrict__ d_out,
|
||||||
const float* __restrict__ d_scalar,
|
const T* __restrict__ d_scalar,
|
||||||
const unsigned int len
|
const unsigned int len
|
||||||
);
|
);
|
||||||
|
|
||||||
/**
|
template <typename T>
|
||||||
* @brief Exponentiate each element of the vector
|
|
||||||
*
|
|
||||||
* @param src Pointer to the source array
|
|
||||||
* @param dst Pointer to the destination array
|
|
||||||
* @param len Length of the arrays
|
|
||||||
*/
|
|
||||||
__global__ void vec_exp(
|
__global__ void vec_exp(
|
||||||
const float* __restrict__ src,
|
const T* __restrict__ src,
|
||||||
float* __restrict__ dst,
|
T* __restrict__ dst,
|
||||||
const unsigned int len
|
const unsigned int len
|
||||||
);
|
);
|
||||||
|
|
||||||
/**
|
template <typename T>
|
||||||
* @brief Compute the square root of each element of the vector
|
|
||||||
*
|
|
||||||
* @param src Device pointer to source vector
|
|
||||||
* @param dst Device pointer to destination vector
|
|
||||||
* @param len Length of the vector
|
|
||||||
*/
|
|
||||||
__global__ void vec_sqrt(
|
__global__ void vec_sqrt(
|
||||||
const float* __restrict__ src,
|
const T* __restrict__ src,
|
||||||
float* __restrict__ dst,
|
T* __restrict__ dst,
|
||||||
const unsigned int len
|
const unsigned int len
|
||||||
);
|
);
|
||||||
|
|
||||||
/**
|
template <typename T>
|
||||||
* @brief Scales the vector by 1/sqrt(scale + epsilon)
|
|
||||||
*
|
|
||||||
* @param src Device pointer to source vector
|
|
||||||
* @param dst Device pointer to destination vector
|
|
||||||
* @param scale Scale
|
|
||||||
* @param epsilon Epsilon
|
|
||||||
* @param len Length of the vector
|
|
||||||
*/
|
|
||||||
__global__ void vec_scale(
|
__global__ void vec_scale(
|
||||||
const float* __restrict__ src,
|
const T* __restrict__ src,
|
||||||
float* __restrict__ dst,
|
T* __restrict__ dst,
|
||||||
const float* __restrict__ scale,
|
const T* __restrict__ scale,
|
||||||
const float* epsilon,
|
const T* epsilon,
|
||||||
const unsigned int len
|
const unsigned int len
|
||||||
);
|
);
|
||||||
|
|
||||||
/**
|
template <typename T>
|
||||||
* @brief Max reduction kernel
|
|
||||||
*
|
|
||||||
* @param d_vector Device pointer to vector
|
|
||||||
* @param d_output Device pointer to output vector
|
|
||||||
*/
|
|
||||||
__global__ void max_reduce(
|
__global__ void max_reduce(
|
||||||
const float* __restrict__ d_vector,
|
const T* __restrict__ d_vector,
|
||||||
float* __restrict__ d_output,
|
T* __restrict__ d_output,
|
||||||
const unsigned int len
|
const unsigned int len
|
||||||
);
|
);
|
||||||
|
|
||||||
/**
|
template <typename T>
|
||||||
* @brief
|
|
||||||
*
|
|
||||||
* @param d_vector Device pointer to vector
|
|
||||||
* @param d_output Device pointer to output vector
|
|
||||||
* @param len Length of the vector
|
|
||||||
*/
|
|
||||||
__global__ void sum_reduce(
|
__global__ void sum_reduce(
|
||||||
const float* __restrict__ d_vector,
|
const T* __restrict__ d_vector,
|
||||||
float* __restrict__ d_output,
|
T* __restrict__ d_output,
|
||||||
const unsigned int len
|
const unsigned int len
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|||||||
@@ -5,9 +5,10 @@
|
|||||||
|
|
||||||
namespace CUDANet::Kernels {
|
namespace CUDANet::Kernels {
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
__global__ void max_pool(
|
__global__ void max_pool(
|
||||||
const float* __restrict__ d_input,
|
const T* __restrict__ d_input,
|
||||||
float* __restrict__ d_output,
|
T* __restrict__ d_output,
|
||||||
const Shape input_shape,
|
const Shape input_shape,
|
||||||
const Shape output_shape,
|
const Shape output_shape,
|
||||||
const Shape pool_shape,
|
const Shape pool_shape,
|
||||||
@@ -15,9 +16,10 @@ __global__ void max_pool(
|
|||||||
const Shape padding_shape
|
const Shape padding_shape
|
||||||
);
|
);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
__global__ void avg_pool(
|
__global__ void avg_pool(
|
||||||
const float* __restrict__ d_input,
|
const T* __restrict__ d_input,
|
||||||
float* __restrict__ d_output,
|
T* __restrict__ d_output,
|
||||||
const Shape input_shape,
|
const Shape input_shape,
|
||||||
const Shape output_shape,
|
const Shape output_shape,
|
||||||
const Shape pool_shape,
|
const Shape pool_shape,
|
||||||
|
|||||||
@@ -41,15 +41,15 @@
|
|||||||
#include "layers/concat.hpp"
|
#include "layers/concat.hpp"
|
||||||
|
|
||||||
// ============================================================================
|
// ============================================================================
|
||||||
// Utilities
|
// Dataset Labels
|
||||||
// ============================================================================
|
// ============================================================================
|
||||||
|
|
||||||
#include "utils/imagenet.hpp"
|
#include "datasets/imagenet.hpp"
|
||||||
|
|
||||||
// ============================================================================
|
// ============================================================================
|
||||||
// Backend-Specific Includes (conditionally compiled)
|
// Backend-Specific Includes (conditionally compiled)
|
||||||
// ============================================================================
|
// ============================================================================
|
||||||
|
|
||||||
#ifdef USE_CUDA
|
#ifdef USE_CUDA
|
||||||
#include "backend/cuda/cuda_backend.cuh"
|
#include "backend/cuda/all.cuh"
|
||||||
#endif
|
#endif
|
||||||
@@ -16,6 +16,8 @@ namespace CUDANet {
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
class Layer {
|
class Layer {
|
||||||
|
protected:
|
||||||
|
CUDANet::DType dtype;
|
||||||
public:
|
public:
|
||||||
|
|
||||||
virtual ~Layer(){};
|
virtual ~Layer(){};
|
||||||
@@ -39,4 +41,4 @@ class Layer {
|
|||||||
virtual size_t get_biases_size() = 0;
|
virtual size_t get_biases_size() = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace CUDANet::Layers
|
} // namespace CUDANet
|
||||||
|
|||||||
@@ -20,12 +20,13 @@ enum ActivationType { SIGMOID, RELU, SOFTMAX, NONE };
|
|||||||
* @brief Utility class that performs activation
|
* @brief Utility class that performs activation
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
class Activation : public Layer {
|
class Activation : public CUDANet::Layer {
|
||||||
public:
|
public:
|
||||||
|
|
||||||
Activation() = default;
|
Activation() = default;
|
||||||
|
|
||||||
Activation(ActivationType activation, const CUDANet::Shape &shape, CUDANet::Backend* backend);
|
Activation(ActivationType activation, const CUDANet::Shape &shape, CUDANet::Backend* backend);
|
||||||
|
Activation(ActivationType activation, const CUDANet::Shape &shape, CUDANet::DType dtype, CUDANet::Backend* backend);
|
||||||
|
|
||||||
~Activation() = default;
|
~Activation() = default;
|
||||||
|
|
||||||
@@ -50,7 +51,7 @@ class Activation : public Layer {
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
CUDANet::Backend* backend;
|
CUDANet::Backend* backend;
|
||||||
ActivationType activationType;
|
ActivationType activation_type;
|
||||||
CUDANet::Shape shape;
|
CUDANet::Shape shape;
|
||||||
|
|
||||||
CUDANet::Tensor softmax_sum;
|
CUDANet::Tensor softmax_sum;
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ namespace CUDANet::Layers {
|
|||||||
class Add {
|
class Add {
|
||||||
public:
|
public:
|
||||||
Add(CUDANet::Shape a_shape, CUDANet::Shape b_shape, CUDANet::Backend* backend);
|
Add(CUDANet::Shape a_shape, CUDANet::Shape b_shape, CUDANet::Backend* backend);
|
||||||
|
Add(CUDANet::Shape a_shape, CUDANet::Shape b_shape, CUDANet::DType dtype, CUDANet::Backend* backend);
|
||||||
|
|
||||||
~Add();
|
~Add();
|
||||||
|
|
||||||
@@ -19,6 +20,8 @@ class Add {
|
|||||||
CUDANet::Tensor output;
|
CUDANet::Tensor output;
|
||||||
|
|
||||||
CUDANet::Backend *backend;
|
CUDANet::Backend *backend;
|
||||||
|
|
||||||
|
CUDANet::DType dtype;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace CUDANet::Layers
|
} // namespace CUDANet::Layers
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
namespace CUDANet::Layers {
|
namespace CUDANet::Layers {
|
||||||
|
|
||||||
class AvgPool2d : public Layer {
|
class AvgPool2d : public CUDANet::Layer {
|
||||||
public:
|
public:
|
||||||
AvgPool2d(
|
AvgPool2d(
|
||||||
CUDANet::Shape input_shape,
|
CUDANet::Shape input_shape,
|
||||||
@@ -13,6 +13,14 @@ class AvgPool2d : public Layer {
|
|||||||
CUDANet::Shape padding_shape,
|
CUDANet::Shape padding_shape,
|
||||||
CUDANet::Backend *backend
|
CUDANet::Backend *backend
|
||||||
);
|
);
|
||||||
|
AvgPool2d(
|
||||||
|
CUDANet::Shape input_shape,
|
||||||
|
CUDANet::Shape pool_shape,
|
||||||
|
CUDANet::Shape stride_shape,
|
||||||
|
CUDANet::Shape padding_shape,
|
||||||
|
CUDANet::DType dtype,
|
||||||
|
CUDANet::Backend *backend
|
||||||
|
);
|
||||||
|
|
||||||
~AvgPool2d();
|
~AvgPool2d();
|
||||||
|
|
||||||
@@ -50,6 +58,7 @@ class AvgPool2d : public Layer {
|
|||||||
class AdaptiveAvgPool2d : public AvgPool2d {
|
class AdaptiveAvgPool2d : public AvgPool2d {
|
||||||
public:
|
public:
|
||||||
AdaptiveAvgPool2d(CUDANet::Shape input_shape, CUDANet::Shape output_shape, CUDANet::Backend *backend);
|
AdaptiveAvgPool2d(CUDANet::Shape input_shape, CUDANet::Shape output_shape, CUDANet::Backend *backend);
|
||||||
|
AdaptiveAvgPool2d(CUDANet::Shape input_shape, CUDANet::Shape output_shape, CUDANet::DType dtype, CUDANet::Backend *backend);
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace CUDANet::Layers
|
} // namespace CUDANet::Layers
|
||||||
|
|||||||
@@ -4,9 +4,10 @@
|
|||||||
|
|
||||||
namespace CUDANet::Layers {
|
namespace CUDANet::Layers {
|
||||||
|
|
||||||
class BatchNorm2d : public Layer {
|
class BatchNorm2d : public CUDANet::Layer {
|
||||||
public:
|
public:
|
||||||
BatchNorm2d(CUDANet::Shape input_shape, float epsilon, CUDANet::Backend *backend);
|
BatchNorm2d(CUDANet::Shape input_shape, float epsilon, CUDANet::Backend *backend);
|
||||||
|
BatchNorm2d(CUDANet::Shape input_shape, float epsilon, CUDANet::DType dtype, CUDANet::Backend *backend);
|
||||||
|
|
||||||
~BatchNorm2d();
|
~BatchNorm2d();
|
||||||
|
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ class Concat {
|
|||||||
public:
|
public:
|
||||||
|
|
||||||
Concat(const CUDANet::Shape a_shape, const CUDANet::Shape b_shape, CUDANet::Backend *backend);
|
Concat(const CUDANet::Shape a_shape, const CUDANet::Shape b_shape, CUDANet::Backend *backend);
|
||||||
|
Concat(const CUDANet::Shape a_shape, const CUDANet::Shape b_shape, CUDANet::DType dtype, CUDANet::Backend *backend);
|
||||||
|
|
||||||
~Concat();
|
~Concat();
|
||||||
|
|
||||||
@@ -27,6 +28,8 @@ class Concat {
|
|||||||
CUDANet::Tensor output;
|
CUDANet::Tensor output;
|
||||||
|
|
||||||
CUDANet::Backend *backend;
|
CUDANet::Backend *backend;
|
||||||
|
|
||||||
|
CUDANet::DType dtype;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace CUDANet::Layers
|
} // namespace CUDANet::Layers
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ namespace CUDANet::Layers {
|
|||||||
* @brief 2D convolutional layer
|
* @brief 2D convolutional layer
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
class Conv2d : public Layer {
|
class Conv2d : public CUDANet::Layer {
|
||||||
public:
|
public:
|
||||||
Conv2d(
|
Conv2d(
|
||||||
CUDANet::Shape input_shape,
|
CUDANet::Shape input_shape,
|
||||||
@@ -17,6 +17,14 @@ class Conv2d : public Layer {
|
|||||||
CUDANet::Shape padding_shape,
|
CUDANet::Shape padding_shape,
|
||||||
CUDANet::Backend* backend
|
CUDANet::Backend* backend
|
||||||
);
|
);
|
||||||
|
Conv2d(
|
||||||
|
CUDANet::Shape input_shape,
|
||||||
|
CUDANet::Shape kernel_shape,
|
||||||
|
CUDANet::Shape stride_shape,
|
||||||
|
CUDANet::Shape padding_shape,
|
||||||
|
CUDANet::DType dtype,
|
||||||
|
CUDANet::Backend* backend
|
||||||
|
);
|
||||||
|
|
||||||
~Conv2d();
|
~Conv2d();
|
||||||
|
|
||||||
|
|||||||
@@ -9,10 +9,11 @@ namespace CUDANet::Layers {
|
|||||||
* @brief Dense (fully connected) layer
|
* @brief Dense (fully connected) layer
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
class Dense : public Layer {
|
class Dense : public CUDANet::Layer {
|
||||||
public:
|
public:
|
||||||
|
|
||||||
Dense(CUDANet::Shape input_shape, CUDANet::Shape output_shape, CUDANet::Backend *backend);
|
Dense(CUDANet::Shape input_shape, CUDANet::Shape output_shape, CUDANet::Backend *backend);
|
||||||
|
Dense(CUDANet::Shape input_shape, CUDANet::Shape output_shape, CUDANet::DType dtype, CUDANet::Backend *backend);
|
||||||
|
|
||||||
~Dense();
|
~Dense();
|
||||||
|
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
namespace CUDANet::Layers {
|
namespace CUDANet::Layers {
|
||||||
|
|
||||||
class MaxPool2d : public Layer {
|
class MaxPool2d : public CUDANet::Layer {
|
||||||
public:
|
public:
|
||||||
MaxPool2d(
|
MaxPool2d(
|
||||||
CUDANet::Shape input_shape,
|
CUDANet::Shape input_shape,
|
||||||
@@ -13,6 +13,14 @@ class MaxPool2d : public Layer {
|
|||||||
CUDANet::Shape padding_shape,
|
CUDANet::Shape padding_shape,
|
||||||
CUDANet::Backend* backend
|
CUDANet::Backend* backend
|
||||||
);
|
);
|
||||||
|
MaxPool2d(
|
||||||
|
CUDANet::Shape input_shape,
|
||||||
|
CUDANet::Shape pool_shape,
|
||||||
|
CUDANet::Shape stride_shape,
|
||||||
|
CUDANet::Shape padding_shape,
|
||||||
|
CUDANet::DType dtype,
|
||||||
|
CUDANet::Backend* backend
|
||||||
|
);
|
||||||
~MaxPool2d();
|
~MaxPool2d();
|
||||||
|
|
||||||
CUDANet::Tensor& forward(CUDANet::Tensor &input) override;
|
CUDANet::Tensor& forward(CUDANet::Tensor &input) override;
|
||||||
|
|||||||
@@ -15,10 +15,6 @@ class Module {
|
|||||||
|
|
||||||
CUDANet::Shape output_shape();
|
CUDANet::Shape output_shape();
|
||||||
|
|
||||||
size_t input_size();
|
|
||||||
|
|
||||||
size_t output_size();
|
|
||||||
|
|
||||||
void register_layer(const std::string& name, Layer* layer);
|
void register_layer(const std::string& name, Layer* layer);
|
||||||
|
|
||||||
void register_module(Module& module);
|
void register_module(Module& module);
|
||||||
|
|||||||
@@ -9,6 +9,7 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include <format>
|
#include <format>
|
||||||
|
#include <stdexcept>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
namespace CUDANet {
|
namespace CUDANet {
|
||||||
@@ -65,6 +66,12 @@ struct Shape {
|
|||||||
__host__ bool operator!=(const Shape& other) const {
|
__host__ bool operator!=(const Shape& other) const {
|
||||||
return !(*this == other);
|
return !(*this == other);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
__host__ __device__ bool empty() const {
|
||||||
|
return ndim == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
std::string format_shape(const Shape& shape) {
|
std::string format_shape(const Shape& shape) {
|
||||||
|
|||||||
@@ -16,11 +16,17 @@ enum class DType
|
|||||||
// INT32, // Not implemented yet
|
// INT32, // Not implemented yet
|
||||||
};
|
};
|
||||||
|
|
||||||
|
size_t dtype_size(DType dtype);
|
||||||
|
|
||||||
|
// Forward declaration
|
||||||
|
class Backend;
|
||||||
|
|
||||||
class Tensor
|
class Tensor
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
|
||||||
Tensor() = default;
|
Tensor() = default;
|
||||||
|
Tensor(Shape shape, CUDANet::Backend* backend);
|
||||||
Tensor(Shape shape, DType dtype, CUDANet::Backend* backend);
|
Tensor(Shape shape, DType dtype, CUDANet::Backend* backend);
|
||||||
|
|
||||||
Tensor(Tensor&& other) noexcept;
|
Tensor(Tensor&& other) noexcept;
|
||||||
@@ -30,30 +36,19 @@ public:
|
|||||||
|
|
||||||
~Tensor();
|
~Tensor();
|
||||||
|
|
||||||
|
DType get_dtype() const;
|
||||||
|
|
||||||
size_t size() const;
|
size_t size() const;
|
||||||
size_t numel() const;
|
size_t numel() const;
|
||||||
|
|
||||||
template <typename T>
|
void* device_ptr() const;
|
||||||
const T* data() const {
|
void* device_ptr();
|
||||||
return static_cast<T*>(d_ptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
T* data() {
|
|
||||||
return static_cast<T*>(d_ptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
void zero();
|
void zero();
|
||||||
|
|
||||||
template <typename T>
|
void fill(int value);
|
||||||
void fill(T value) {
|
|
||||||
backend->fill(*this, value);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
void set_data(void *data);
|
||||||
void set_data(T *data) {
|
|
||||||
backend->copy_to_device(*this, data, total_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Shape shape;
|
Shape shape;
|
||||||
|
|||||||
40
src/backend_factory.cpp
Normal file
40
src/backend_factory.cpp
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
#include <stdexcept>
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
#ifdef USE_CUDA
|
||||||
|
#include "backend/cuda/cuda.cuh"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include "backend.hpp"
|
||||||
|
|
||||||
|
namespace CUDANet {
|
||||||
|
|
||||||
|
std::unique_ptr<Backend> BackendFactory::create(BackendType backend_type, const BackendConfig& config) {
|
||||||
|
switch (backend_type)
|
||||||
|
{
|
||||||
|
case BackendType::CUDA_BACKEND:
|
||||||
|
{
|
||||||
|
#ifdef USE_CUDA
|
||||||
|
|
||||||
|
if (!CUDANet::Backends::CUDA::is_cuda_available()) {
|
||||||
|
throw std::runtime_error("No CUDA devices found");
|
||||||
|
}
|
||||||
|
|
||||||
|
auto cuda = std::make_unique<CUDANet::Backends::CUDA>(config);
|
||||||
|
return cuda;
|
||||||
|
|
||||||
|
#else
|
||||||
|
throw std::runtime_error("Library was compiled without CUDA support.");
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
throw std::runtime_error("Invalid backend");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace CUDANet
|
||||||
76
src/backends/cuda/cuda.cu
Normal file
76
src/backends/cuda/cuda.cu
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
#include <cuda_runtime.h>
|
||||||
|
|
||||||
|
#include <cstdio>
|
||||||
|
#include <cstdlib>
|
||||||
|
#include <format>
|
||||||
|
|
||||||
|
#include "backend/cuda/cuda.cuh"
|
||||||
|
#include "tensor.hpp"
|
||||||
|
|
||||||
|
using namespace CUDANet::Backends;
|
||||||
|
|
||||||
|
|
||||||
|
CUDA::CUDA(const BackendConfig& config) {
|
||||||
|
device_id = config.device_id < 0 ? 0 : config.device_id;
|
||||||
|
supported_dtypes = {DType::FLOAT32};
|
||||||
|
default_dtype = DType::FLOAT32;
|
||||||
|
initialize();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool CUDA::is_cuda_available() {
|
||||||
|
int device_count;
|
||||||
|
cudaError_t result = cudaGetDeviceCount(&device_count);
|
||||||
|
|
||||||
|
// Return false instead of crashing
|
||||||
|
if (result != cudaSuccess || device_count == 0) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void CUDA::initialize() {
|
||||||
|
|
||||||
|
int device_count;
|
||||||
|
CUDA_CHECK(cudaGetDeviceCount(&device_count));
|
||||||
|
if (device_id >= device_count) {
|
||||||
|
throw std::runtime_error(std::format("Invalid device id {}, only {} devices available", device_id, device_count));
|
||||||
|
}
|
||||||
|
|
||||||
|
CUDA_CHECK(cudaSetDevice(device_id));
|
||||||
|
|
||||||
|
cudaDeviceProp deviceProp;
|
||||||
|
CUDA_CHECK(cudaGetDeviceProperties(&deviceProp, device_id));
|
||||||
|
|
||||||
|
std::printf("Using CUDA device %d: %s\n", device_id, deviceProp.name);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool CUDA::supports_dtype(DType dtype) const {
|
||||||
|
return supported_dtypes.contains(dtype);
|
||||||
|
}
|
||||||
|
|
||||||
|
void CUDA::set_default_dtype(DType dtype) {
|
||||||
|
if (!supported_dtypes.contains(dtype)) {
|
||||||
|
throw std::runtime_error("Unsupported dtype");
|
||||||
|
}
|
||||||
|
|
||||||
|
default_dtype = dtype;
|
||||||
|
}
|
||||||
|
|
||||||
|
CUDANet::DType CUDA::get_default_dtype() const {
|
||||||
|
if (default_dtype) {
|
||||||
|
return default_dtype.value();
|
||||||
|
}
|
||||||
|
|
||||||
|
const_cast<CUDA*>(this)->default_dtype = DType::FLOAT32;
|
||||||
|
return DType::FLOAT32;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* CUDA::allocate(size_t bytes) {
|
||||||
|
void* d_ptr = nullptr;
|
||||||
|
CUDA_CHECK(cudaMalloc(&d_ptr, bytes));
|
||||||
|
return d_ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
void CUDA::deallocate(void* ptr) {
|
||||||
|
CUDA_CHECK(cudaFree(ptr));
|
||||||
|
}
|
||||||
@@ -1,38 +0,0 @@
|
|||||||
#include <cuda_runtime.h>
|
|
||||||
|
|
||||||
#include <cstdio>
|
|
||||||
#include <cstdlib>
|
|
||||||
|
|
||||||
#include "backend/cuda/cuda.cuh"
|
|
||||||
|
|
||||||
cudaDeviceProp initializeCUDA() {
|
|
||||||
int deviceCount;
|
|
||||||
CUDA_CHECK(cudaGetDeviceCount(&deviceCount));
|
|
||||||
|
|
||||||
if (deviceCount == 0) {
|
|
||||||
std::fprintf(stderr, "No CUDA devices found. Exiting.\n");
|
|
||||||
std::exit(EXIT_FAILURE);
|
|
||||||
}
|
|
||||||
|
|
||||||
int device = 0;
|
|
||||||
CUDA_CHECK(cudaSetDevice(device));
|
|
||||||
|
|
||||||
cudaDeviceProp deviceProp;
|
|
||||||
CUDA_CHECK(cudaGetDeviceProperties(&deviceProp, device));
|
|
||||||
|
|
||||||
std::printf("Using CUDA device %d: %s\n", device, deviceProp.name);
|
|
||||||
|
|
||||||
return deviceProp;
|
|
||||||
}
|
|
||||||
|
|
||||||
using namespace CUDANet::Backends;
|
|
||||||
|
|
||||||
void* CUDA::allocate(size_t bytes) {
|
|
||||||
void* d_ptr = nullptr;
|
|
||||||
CUDA_CHECK(cudaMalloc(&d_ptr, bytes));
|
|
||||||
return d_ptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
void CUDA::deallocate(void* ptr) {
|
|
||||||
CUDA_CHECK(cudaFree(ptr));
|
|
||||||
}
|
|
||||||
@@ -2,10 +2,18 @@
|
|||||||
|
|
||||||
using namespace CUDANet;
|
using namespace CUDANet;
|
||||||
|
|
||||||
__global__ void Kernels::sigmoid(
|
template
|
||||||
|
__global__ void Kernels::sigmoid<float>(
|
||||||
const float* __restrict__ src,
|
const float* __restrict__ src,
|
||||||
float* __restrict__ dst,
|
float* __restrict__ dst,
|
||||||
const unsigned int len
|
const unsigned int len
|
||||||
|
);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
__global__ void Kernels::sigmoid(
|
||||||
|
const T* __restrict__ src,
|
||||||
|
T* __restrict__ dst,
|
||||||
|
const unsigned int len
|
||||||
) {
|
) {
|
||||||
int stride = gridDim.x * blockDim.x;
|
int stride = gridDim.x * blockDim.x;
|
||||||
int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
||||||
@@ -15,10 +23,17 @@ __global__ void Kernels::sigmoid(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
__global__ void Kernels::relu(
|
template __global__ void Kernels::relu<float>(
|
||||||
const float* __restrict__ src,
|
const float* __restrict__ src,
|
||||||
float* __restrict__ dst,
|
float* __restrict__ dst,
|
||||||
const unsigned int len
|
const unsigned int len
|
||||||
|
);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
__global__ void Kernels::relu(
|
||||||
|
const T* __restrict__ src,
|
||||||
|
T* __restrict__ dst,
|
||||||
|
const unsigned int len
|
||||||
) {
|
) {
|
||||||
int stride = gridDim.x * blockDim.x;
|
int stride = gridDim.x * blockDim.x;
|
||||||
int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
using namespace CUDANet;
|
using namespace CUDANet;
|
||||||
|
|
||||||
__global__ void Kernels::convolution(
|
template __global__ void Kernels::convolution<float>(
|
||||||
const float* __restrict__ d_input,
|
const float* __restrict__ d_input,
|
||||||
const float* __restrict__ d_kernel,
|
const float* __restrict__ d_kernel,
|
||||||
const float* __restrict__ d_bias,
|
const float* __restrict__ d_bias,
|
||||||
@@ -14,6 +14,19 @@ __global__ void Kernels::convolution(
|
|||||||
const Shape kernel_shape,
|
const Shape kernel_shape,
|
||||||
const Shape stride_shape,
|
const Shape stride_shape,
|
||||||
const Shape output_shape
|
const Shape output_shape
|
||||||
|
);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
__global__ void Kernels::convolution(
|
||||||
|
const T* __restrict__ d_input,
|
||||||
|
const T* __restrict__ d_kernel,
|
||||||
|
const T* __restrict__ d_bias,
|
||||||
|
T* __restrict__ d_output,
|
||||||
|
const Shape input_shape,
|
||||||
|
const Shape padding_shape,
|
||||||
|
const Shape kernel_shape,
|
||||||
|
const Shape stride_shape,
|
||||||
|
const Shape output_shape
|
||||||
) {
|
) {
|
||||||
int j = blockDim.x * blockIdx.x + threadIdx.x;
|
int j = blockDim.x * blockIdx.x + threadIdx.x;
|
||||||
int i = blockDim.y * blockIdx.y + threadIdx.y;
|
int i = blockDim.y * blockIdx.y + threadIdx.y;
|
||||||
@@ -23,7 +36,7 @@ __global__ void Kernels::convolution(
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
float sum = 0.0f;
|
T sum = static_cast<T>(0);
|
||||||
|
|
||||||
// Iterate over kernel and input matrix
|
// Iterate over kernel and input matrix
|
||||||
for (int c = 0; c < input_shape[2]; c++) {
|
for (int c = 0; c < input_shape[2]; c++) {
|
||||||
|
|||||||
@@ -3,17 +3,26 @@
|
|||||||
|
|
||||||
using namespace CUDANet;
|
using namespace CUDANet;
|
||||||
|
|
||||||
__global__ void Kernels::mat_vec_mul(
|
template __global__ void Kernels::mat_vec_mul<float>(
|
||||||
const float* __restrict__ d_matrix,
|
const float* __restrict__ d_matrix,
|
||||||
const float* __restrict__ d_vector,
|
const float* __restrict__ d_vector,
|
||||||
float* __restrict__ d_output,
|
float* __restrict__ d_output,
|
||||||
const unsigned int w,
|
const unsigned int w,
|
||||||
const unsigned int h
|
const unsigned int h
|
||||||
|
);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
__global__ void Kernels::mat_vec_mul(
|
||||||
|
const T* __restrict__ d_matrix,
|
||||||
|
const T* __restrict__ d_vector,
|
||||||
|
T* __restrict__ d_output,
|
||||||
|
const unsigned int w,
|
||||||
|
const unsigned int h
|
||||||
) {
|
) {
|
||||||
int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
||||||
|
|
||||||
if (tid < h) {
|
if (tid < h) {
|
||||||
float temp = 0.0f;
|
T temp = static_cast<T>(0);
|
||||||
|
|
||||||
for (unsigned int j = 0; j < w; j++) {
|
for (unsigned int j = 0; j < w; j++) {
|
||||||
temp += d_matrix[tid * w + j] * d_vector[j];
|
temp += d_matrix[tid * w + j] * d_vector[j];
|
||||||
@@ -23,11 +32,19 @@ __global__ void Kernels::mat_vec_mul(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
__global__ void Kernels::vec_vec_add(
|
template __global__ void Kernels::vec_vec_add<float>(
|
||||||
const float* __restrict__ d_vector1,
|
const float* __restrict__ d_vector1,
|
||||||
const float* __restrict__ d_vector2,
|
const float* __restrict__ d_vector2,
|
||||||
float* __restrict__ d_output,
|
float* __restrict__ d_output,
|
||||||
const unsigned int w
|
const unsigned int w
|
||||||
|
);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
__global__ void Kernels::vec_vec_add(
|
||||||
|
const T* __restrict__ d_vector1,
|
||||||
|
const T* __restrict__ d_vector2,
|
||||||
|
T* __restrict__ d_output,
|
||||||
|
const unsigned int w
|
||||||
) {
|
) {
|
||||||
int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
||||||
if (tid >= w) {
|
if (tid >= w) {
|
||||||
@@ -36,11 +53,19 @@ __global__ void Kernels::vec_vec_add(
|
|||||||
d_output[tid] = d_vector1[tid] + d_vector2[tid];
|
d_output[tid] = d_vector1[tid] + d_vector2[tid];
|
||||||
}
|
}
|
||||||
|
|
||||||
__global__ void Kernels::vec_vec_sub(
|
template __global__ void Kernels::vec_vec_sub<float>(
|
||||||
const float* __restrict__ d_vector1,
|
const float* __restrict__ d_vector1,
|
||||||
const float* __restrict__ d_vector2,
|
const float* __restrict__ d_vector2,
|
||||||
float* __restrict__ d_output,
|
float* __restrict__ d_output,
|
||||||
const unsigned int w
|
const unsigned int w
|
||||||
|
);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
__global__ void Kernels::vec_vec_sub(
|
||||||
|
const T* __restrict__ d_vector1,
|
||||||
|
const T* __restrict__ d_vector2,
|
||||||
|
T* __restrict__ d_output,
|
||||||
|
const unsigned int w
|
||||||
) {
|
) {
|
||||||
int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
||||||
if (tid >= w) {
|
if (tid >= w) {
|
||||||
@@ -49,11 +74,19 @@ __global__ void Kernels::vec_vec_sub(
|
|||||||
d_output[tid] = d_vector1[tid] - d_vector2[tid];
|
d_output[tid] = d_vector1[tid] - d_vector2[tid];
|
||||||
}
|
}
|
||||||
|
|
||||||
__global__ void Kernels::vec_vec_mul(
|
template __global__ void Kernels::vec_vec_mul<float>(
|
||||||
const float* __restrict__ d_vector1,
|
const float* __restrict__ d_vector1,
|
||||||
const float* __restrict__ d_vector2,
|
const float* __restrict__ d_vector2,
|
||||||
float* __restrict__ d_output,
|
float* __restrict__ d_output,
|
||||||
const unsigned int w
|
const unsigned int w
|
||||||
|
);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
__global__ void Kernels::vec_vec_mul(
|
||||||
|
const T* __restrict__ d_vector1,
|
||||||
|
const T* __restrict__ d_vector2,
|
||||||
|
T* __restrict__ d_output,
|
||||||
|
const unsigned int w
|
||||||
) {
|
) {
|
||||||
int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
||||||
if (tid >= w) {
|
if (tid >= w) {
|
||||||
@@ -62,11 +95,19 @@ __global__ void Kernels::vec_vec_mul(
|
|||||||
d_output[tid] = d_vector1[tid] * d_vector2[tid];
|
d_output[tid] = d_vector1[tid] * d_vector2[tid];
|
||||||
}
|
}
|
||||||
|
|
||||||
__global__ void Kernels::vec_scalar_sub(
|
template __global__ void Kernels::vec_scalar_sub<float>(
|
||||||
const float* __restrict__ d_src,
|
const float* __restrict__ d_src,
|
||||||
float* __restrict__ d_out,
|
float* __restrict__ d_out,
|
||||||
const float* __restrict__ d_scalar,
|
const float* __restrict__ d_scalar,
|
||||||
const unsigned int len
|
const unsigned int len
|
||||||
|
);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
__global__ void Kernels::vec_scalar_sub(
|
||||||
|
const T* __restrict__ d_src,
|
||||||
|
T* __restrict__ d_out,
|
||||||
|
const T* __restrict__ d_scalar,
|
||||||
|
const unsigned int len
|
||||||
) {
|
) {
|
||||||
int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
||||||
if (tid >= len) {
|
if (tid >= len) {
|
||||||
@@ -75,11 +116,19 @@ __global__ void Kernels::vec_scalar_sub(
|
|||||||
d_out[tid] = d_src[tid] - *d_scalar;
|
d_out[tid] = d_src[tid] - *d_scalar;
|
||||||
}
|
}
|
||||||
|
|
||||||
__global__ void Kernels::vec_scalar_add(
|
template __global__ void Kernels::vec_scalar_add<float>(
|
||||||
const float* __restrict__ d_src,
|
const float* __restrict__ d_src,
|
||||||
float* __restrict__ d_out,
|
float* __restrict__ d_out,
|
||||||
const float* __restrict__ d_scalar,
|
const float* __restrict__ d_scalar,
|
||||||
const unsigned int len
|
const unsigned int len
|
||||||
|
);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
__global__ void Kernels::vec_scalar_add(
|
||||||
|
const T* __restrict__ d_src,
|
||||||
|
T* __restrict__ d_out,
|
||||||
|
const T* __restrict__ d_scalar,
|
||||||
|
const unsigned int len
|
||||||
) {
|
) {
|
||||||
int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
||||||
if (tid >= len) {
|
if (tid >= len) {
|
||||||
@@ -88,11 +137,19 @@ __global__ void Kernels::vec_scalar_add(
|
|||||||
d_out[tid] = d_src[tid] + *d_scalar;
|
d_out[tid] = d_src[tid] + *d_scalar;
|
||||||
}
|
}
|
||||||
|
|
||||||
__global__ void Kernels::vec_scalar_div(
|
template __global__ void Kernels::vec_scalar_div<float>(
|
||||||
const float* __restrict__ d_src,
|
const float* __restrict__ d_src,
|
||||||
float* __restrict__ d_out,
|
float* __restrict__ d_out,
|
||||||
const float* __restrict__ d_scalar,
|
const float* __restrict__ d_scalar,
|
||||||
const unsigned int len
|
const unsigned int len
|
||||||
|
);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
__global__ void Kernels::vec_scalar_div(
|
||||||
|
const T* __restrict__ d_src,
|
||||||
|
T* __restrict__ d_out,
|
||||||
|
const T* __restrict__ d_scalar,
|
||||||
|
const unsigned int len
|
||||||
) {
|
) {
|
||||||
int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
||||||
if (tid >= len) {
|
if (tid >= len) {
|
||||||
@@ -101,11 +158,19 @@ __global__ void Kernels::vec_scalar_div(
|
|||||||
d_out[tid] = d_src[tid] / *d_scalar;
|
d_out[tid] = d_src[tid] / *d_scalar;
|
||||||
}
|
}
|
||||||
|
|
||||||
__global__ void Kernels::vec_scalar_mul(
|
template __global__ void Kernels::vec_scalar_mul<float>(
|
||||||
const float* __restrict__ d_src,
|
const float* __restrict__ d_src,
|
||||||
float* __restrict__ d_out,
|
float* __restrict__ d_out,
|
||||||
const float* __restrict__ d_scalar,
|
const float* __restrict__ d_scalar,
|
||||||
const unsigned int len
|
const unsigned int len
|
||||||
|
);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
__global__ void Kernels::vec_scalar_mul(
|
||||||
|
const T* __restrict__ d_src,
|
||||||
|
T* __restrict__ d_out,
|
||||||
|
const T* __restrict__ d_scalar,
|
||||||
|
const unsigned int len
|
||||||
) {
|
) {
|
||||||
int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
||||||
if (tid >= len) {
|
if (tid >= len) {
|
||||||
@@ -114,52 +179,85 @@ __global__ void Kernels::vec_scalar_mul(
|
|||||||
d_out[tid] = d_src[tid] * *d_scalar;
|
d_out[tid] = d_src[tid] * *d_scalar;
|
||||||
}
|
}
|
||||||
|
|
||||||
__global__ void Kernels::vec_exp(
|
template __global__ void Kernels::vec_exp<float>(
|
||||||
const float* __restrict__ src,
|
const float* __restrict__ src,
|
||||||
float* __restrict__ dst,
|
float* __restrict__ dst,
|
||||||
const unsigned int len
|
const unsigned int len
|
||||||
|
);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
__global__ void Kernels::vec_exp(
|
||||||
|
const T* __restrict__ src,
|
||||||
|
T* __restrict__ dst,
|
||||||
|
const unsigned int len
|
||||||
) {
|
) {
|
||||||
int stride = gridDim.x * blockDim.x;
|
int stride = gridDim.x * blockDim.x;
|
||||||
int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
||||||
|
|
||||||
for (int i = tid; i < len; i += stride) {
|
for (int i = tid; i < len; i += stride) {
|
||||||
|
// TODO: separate implementation for __half
|
||||||
dst[i] = expf(src[i]);
|
dst[i] = expf(src[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
__global__ void Kernels::vec_sqrt(
|
template __global__ void Kernels::vec_sqrt<float>(
|
||||||
const float* __restrict__ src,
|
const float* __restrict__ src,
|
||||||
float* __restrict__ dst,
|
float* __restrict__ dst,
|
||||||
const unsigned int len
|
const unsigned int len
|
||||||
|
);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
__global__ void Kernels::vec_sqrt(
|
||||||
|
const T* __restrict__ src,
|
||||||
|
T* __restrict__ dst,
|
||||||
|
const unsigned int len
|
||||||
) {
|
) {
|
||||||
int stride = gridDim.x * blockDim.x;
|
int stride = gridDim.x * blockDim.x;
|
||||||
int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
||||||
|
|
||||||
for (int i = tid; i < len; i += stride) {
|
for (int i = tid; i < len; i += stride) {
|
||||||
|
// TODO: separate implementation for __half
|
||||||
dst[i] = sqrtf(src[i]);
|
dst[i] = sqrtf(src[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
__global__ void Kernels::vec_scale(
|
template __global__ void Kernels::vec_scale<float>(
|
||||||
const float* __restrict__ src,
|
const float* __restrict__ src,
|
||||||
float* __restrict__ dst,
|
float* __restrict__ dst,
|
||||||
const float* __restrict__ scale,
|
const float* __restrict__ scale,
|
||||||
const float* epsilon,
|
const float* epsilon,
|
||||||
const unsigned int len
|
const unsigned int len
|
||||||
|
);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
__global__ void Kernels::vec_scale(
|
||||||
|
const T* __restrict__ src,
|
||||||
|
T* __restrict__ dst,
|
||||||
|
const T* __restrict__ scale,
|
||||||
|
const T* epsilon,
|
||||||
|
const unsigned int len
|
||||||
) {
|
) {
|
||||||
int idx = blockIdx.x * blockDim.x + threadIdx.x;
|
int idx = blockIdx.x * blockDim.x + threadIdx.x;
|
||||||
if (idx < len) {
|
if (idx < len) {
|
||||||
|
// TODO: separate implementation for __half
|
||||||
float inv_std = rsqrtf(*scale + *epsilon);
|
float inv_std = rsqrtf(*scale + *epsilon);
|
||||||
dst[idx] = src[idx] * inv_std;
|
dst[idx] = src[idx] * inv_std;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
__global__ void Kernels::max_reduce(
|
template __global__ void Kernels::max_reduce<float>(
|
||||||
const float* __restrict__ d_vector,
|
const float* __restrict__ d_vector,
|
||||||
float* __restrict__ d_output,
|
float* __restrict__ d_output,
|
||||||
const unsigned int len
|
const unsigned int len
|
||||||
|
);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
__global__ void Kernels::max_reduce(
|
||||||
|
const T* __restrict__ d_vector,
|
||||||
|
T* __restrict__ d_output,
|
||||||
|
const unsigned int len
|
||||||
) {
|
) {
|
||||||
__shared__ float shared_max[BLOCK_SIZE];
|
__shared__ T shared_max[BLOCK_SIZE];
|
||||||
int i = blockIdx.x * blockDim.x + threadIdx.x;
|
int i = blockIdx.x * blockDim.x + threadIdx.x;
|
||||||
|
|
||||||
if (i < len) {
|
if (i < len) {
|
||||||
@@ -172,6 +270,7 @@ __global__ void Kernels::max_reduce(
|
|||||||
|
|
||||||
for (int s = blockDim.x / 2; s > 0; s >>= 1) {
|
for (int s = blockDim.x / 2; s > 0; s >>= 1) {
|
||||||
if (threadIdx.x < s) {
|
if (threadIdx.x < s) {
|
||||||
|
// TODO: separate implementation for __half
|
||||||
shared_max[threadIdx.x] = fmaxf(shared_max[threadIdx.x], shared_max[threadIdx.x + s]);
|
shared_max[threadIdx.x] = fmaxf(shared_max[threadIdx.x], shared_max[threadIdx.x + s]);
|
||||||
}
|
}
|
||||||
__syncthreads();
|
__syncthreads();
|
||||||
@@ -182,18 +281,25 @@ __global__ void Kernels::max_reduce(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
__global__ void Kernels::sum_reduce(
|
template __global__ void Kernels::sum_reduce<float>(
|
||||||
const float* __restrict__ d_vector,
|
const float* __restrict__ d_vector,
|
||||||
float* __restrict__ d_output,
|
float* __restrict__ d_output,
|
||||||
const unsigned int len
|
const unsigned int len
|
||||||
|
);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
__global__ void Kernels::sum_reduce(
|
||||||
|
const T* __restrict__ d_vector,
|
||||||
|
T* __restrict__ d_output,
|
||||||
|
const unsigned int len
|
||||||
) {
|
) {
|
||||||
__shared__ float partial_sum[BLOCK_SIZE];
|
__shared__ T partial_sum[BLOCK_SIZE];
|
||||||
int i = blockIdx.x * blockDim.x + threadIdx.x;
|
int i = blockIdx.x * blockDim.x + threadIdx.x;
|
||||||
|
|
||||||
if (i < len) {
|
if (i < len) {
|
||||||
partial_sum[threadIdx.x] = d_vector[i];
|
partial_sum[threadIdx.x] = d_vector[i];
|
||||||
} else {
|
} else {
|
||||||
partial_sum[threadIdx.x] = 0.0f;
|
partial_sum[threadIdx.x] = static_cast<T>(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
__syncthreads();
|
__syncthreads();
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
|
|
||||||
using namespace CUDANet;
|
using namespace CUDANet;
|
||||||
|
|
||||||
__global__ void Kernels::max_pool(
|
template __global__ void Kernels::max_pool<float>(
|
||||||
const float* __restrict__ d_input,
|
const float* __restrict__ d_input,
|
||||||
float* __restrict__ d_output,
|
float* __restrict__ d_output,
|
||||||
const Shape input_shape,
|
const Shape input_shape,
|
||||||
@@ -11,6 +11,17 @@ __global__ void Kernels::max_pool(
|
|||||||
const Shape pool_shape,
|
const Shape pool_shape,
|
||||||
const Shape stride_shape,
|
const Shape stride_shape,
|
||||||
const Shape padding_shape
|
const Shape padding_shape
|
||||||
|
);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
__global__ void Kernels::max_pool(
|
||||||
|
const T* __restrict__ d_input,
|
||||||
|
T* __restrict__ d_output,
|
||||||
|
const Shape input_shape,
|
||||||
|
const Shape output_shape,
|
||||||
|
const Shape pool_shape,
|
||||||
|
const Shape stride_shape,
|
||||||
|
const Shape padding_shape
|
||||||
) {
|
) {
|
||||||
int j = blockDim.x * blockIdx.x + threadIdx.x;
|
int j = blockDim.x * blockIdx.x + threadIdx.x;
|
||||||
int i = blockDim.y * blockIdx.y + threadIdx.y;
|
int i = blockDim.y * blockIdx.y + threadIdx.y;
|
||||||
@@ -20,7 +31,7 @@ __global__ void Kernels::max_pool(
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
float max = 0.0f;
|
T max = static_cast<T>(0);
|
||||||
|
|
||||||
for (int k = 0; k < pool_shape[0]; k++) {
|
for (int k = 0; k < pool_shape[0]; k++) {
|
||||||
for (int l = 0; l < pool_shape[1]; l++) {
|
for (int l = 0; l < pool_shape[1]; l++) {
|
||||||
@@ -43,7 +54,7 @@ __global__ void Kernels::max_pool(
|
|||||||
max;
|
max;
|
||||||
}
|
}
|
||||||
|
|
||||||
__global__ void Kernels::avg_pool(
|
template __global__ void Kernels::avg_pool<float>(
|
||||||
const float* __restrict__ d_input,
|
const float* __restrict__ d_input,
|
||||||
float* __restrict__ d_output,
|
float* __restrict__ d_output,
|
||||||
const Shape input_shape,
|
const Shape input_shape,
|
||||||
@@ -51,6 +62,17 @@ __global__ void Kernels::avg_pool(
|
|||||||
const Shape pool_shape,
|
const Shape pool_shape,
|
||||||
const Shape stride_shape,
|
const Shape stride_shape,
|
||||||
const Shape padding_shape
|
const Shape padding_shape
|
||||||
|
);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
__global__ void Kernels::avg_pool(
|
||||||
|
const T* __restrict__ d_input,
|
||||||
|
T* __restrict__ d_output,
|
||||||
|
const Shape input_shape,
|
||||||
|
const Shape output_shape,
|
||||||
|
const Shape pool_shape,
|
||||||
|
const Shape stride_shape,
|
||||||
|
const Shape padding_shape
|
||||||
) {
|
) {
|
||||||
int j = blockDim.x * blockIdx.x + threadIdx.x;
|
int j = blockDim.x * blockIdx.x + threadIdx.x;
|
||||||
int i = blockDim.y * blockIdx.y + threadIdx.y;
|
int i = blockDim.y * blockIdx.y + threadIdx.y;
|
||||||
@@ -60,7 +82,7 @@ __global__ void Kernels::avg_pool(
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
float sum = 0.0f;
|
T sum = static_cast<T>(0);
|
||||||
|
|
||||||
for (int k = 0; k < pool_shape[0]; k++) {
|
for (int k = 0; k < pool_shape[0]; k++) {
|
||||||
for (int l = 0; l < pool_shape[1]; l++) {
|
for (int l = 0; l < pool_shape[1]; l++) {
|
||||||
|
|||||||
@@ -7,24 +7,70 @@
|
|||||||
using namespace CUDANet::Backends;
|
using namespace CUDANet::Backends;
|
||||||
|
|
||||||
void CUDA::relu(Tensor& tensor) {
|
void CUDA::relu(Tensor& tensor) {
|
||||||
|
switch (tensor.get_dtype()) {
|
||||||
|
case DType::FLOAT32:
|
||||||
|
relu_impl<float>(tensor);
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
throw std::runtime_error("Unsupported dtype");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template void CUDA::relu_impl<float>(Tensor& tensor);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void CUDA::relu_impl(Tensor& tensor) {
|
||||||
int gridSize = (tensor.numel() + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
int gridSize = (tensor.numel() + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
||||||
Kernels::relu<<<gridSize, BLOCK_SIZE>>>(
|
Kernels::relu<<<gridSize, BLOCK_SIZE>>>(
|
||||||
tensor.data<float>(), tensor.data<float>(), tensor.numel()
|
static_cast<T*>(tensor.device_ptr()), static_cast<T*>(tensor.device_ptr()), tensor.numel()
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
CUDA_CHECK(cudaDeviceSynchronize());
|
CUDA_CHECK(cudaDeviceSynchronize());
|
||||||
}
|
}
|
||||||
|
|
||||||
void CUDA::sigmoid(Tensor& tensor) {
|
void CUDA::sigmoid(CUDANet::Tensor& tensor) {
|
||||||
|
switch (tensor.get_dtype()) {
|
||||||
|
case DType::FLOAT32:
|
||||||
|
sigmoid_impl<float>(tensor);
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
throw std::runtime_error("Unsupported dtype");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template void CUDA::sigmoid_impl<float>(Tensor& tensor);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void CUDA::sigmoid_impl(CUDANet::Tensor& tensor) {
|
||||||
int gridSize = (tensor.numel() + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
int gridSize = (tensor.numel() + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
||||||
Kernels::sigmoid<<<gridSize, BLOCK_SIZE>>>(
|
Kernels::sigmoid<<<gridSize, BLOCK_SIZE>>>(
|
||||||
tensor.data<float>(), tensor.data<float>(), tensor.numel()
|
static_cast<T*>(tensor.device_ptr()), static_cast<T*>(tensor.device_ptr()), tensor.numel()
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
CUDA_CHECK(cudaDeviceSynchronize());
|
CUDA_CHECK(cudaDeviceSynchronize());
|
||||||
}
|
}
|
||||||
|
|
||||||
void CUDA::softmax(Tensor& tensor, Tensor& temp_max, Tensor& temp_sum) {
|
void CUDA::softmax(Tensor& tensor, Tensor& temp_max, Tensor& temp_sum) {
|
||||||
|
switch (tensor.get_dtype()) {
|
||||||
|
case DType::FLOAT32:
|
||||||
|
softmax_impl<float>(tensor, temp_max, temp_sum);
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
throw std::runtime_error("Unsupported dtype");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template void
|
||||||
|
CUDA::softmax_impl<float>(Tensor& tensor, Tensor& temp_max, Tensor& temp_sum);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void CUDA::softmax_impl(Tensor& tensor, Tensor& temp_max, Tensor& temp_sum) {
|
||||||
int gridSize = (tensor.numel() + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
int gridSize = (tensor.numel() + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
||||||
|
|
||||||
// Find max value
|
// Find max value
|
||||||
@@ -32,14 +78,13 @@ void CUDA::softmax(Tensor& tensor, Tensor& temp_max, Tensor& temp_sum) {
|
|||||||
|
|
||||||
// Subtract max value to improve numerical stability
|
// Subtract max value to improve numerical stability
|
||||||
Kernels::vec_scalar_sub<<<gridSize, BLOCK_SIZE>>>(
|
Kernels::vec_scalar_sub<<<gridSize, BLOCK_SIZE>>>(
|
||||||
tensor.data<float>(), tensor.data<float>(), temp_max.data<float>(),
|
static_cast<T*>(tensor.device_ptr()), static_cast<T*>(tensor.device_ptr()), static_cast<T*>(temp_max.device_ptr()), tensor.numel()
|
||||||
tensor.numel()
|
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
|
|
||||||
// Compute exponentials
|
// Compute exponentials
|
||||||
Kernels::vec_exp<<<gridSize, BLOCK_SIZE>>>(
|
Kernels::vec_exp<<<gridSize, BLOCK_SIZE>>>(
|
||||||
tensor.data<float>(), tensor.data<float>(), tensor.numel()
|
static_cast<T*>(tensor.device_ptr()), static_cast<T*>(tensor.device_ptr()), tensor.numel()
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
|
|
||||||
@@ -47,8 +92,7 @@ void CUDA::softmax(Tensor& tensor, Tensor& temp_max, Tensor& temp_sum) {
|
|||||||
sum(tensor, temp_sum);
|
sum(tensor, temp_sum);
|
||||||
|
|
||||||
Kernels::vec_scalar_div<<<gridSize, BLOCK_SIZE>>>(
|
Kernels::vec_scalar_div<<<gridSize, BLOCK_SIZE>>>(
|
||||||
tensor.data<float>(), tensor.data<float>(), temp_sum.data<float>(),
|
static_cast<T*>(tensor.device_ptr()), static_cast<T*>(tensor.device_ptr()), static_cast<T*>(temp_sum.device_ptr()), tensor.numel()
|
||||||
tensor.numel()
|
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
CUDA_CHECK(cudaDeviceSynchronize());
|
CUDA_CHECK(cudaDeviceSynchronize());
|
||||||
@@ -61,20 +105,50 @@ CUDANet::Tensor& CUDA::dense(
|
|||||||
CUDANet::Tensor& output,
|
CUDANet::Tensor& output,
|
||||||
const size_t input_size,
|
const size_t input_size,
|
||||||
const size_t output_size
|
const size_t output_size
|
||||||
|
) {
|
||||||
|
switch (input.get_dtype()) {
|
||||||
|
case DType::FLOAT32:
|
||||||
|
return dense_impl<float>(
|
||||||
|
weights, biases, input, output, input_size, output_size
|
||||||
|
);
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
throw std::runtime_error("Unsupported dtype");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template CUDANet::Tensor& CUDA::dense_impl<float>(
|
||||||
|
const CUDANet::Tensor& weights,
|
||||||
|
const CUDANet::Tensor& biases,
|
||||||
|
const CUDANet::Tensor& input,
|
||||||
|
CUDANet::Tensor& output,
|
||||||
|
const size_t input_size,
|
||||||
|
const size_t output_size
|
||||||
|
);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
CUDANet::Tensor& CUDA::dense_impl(
|
||||||
|
const CUDANet::Tensor& weights,
|
||||||
|
const CUDANet::Tensor& biases,
|
||||||
|
const CUDANet::Tensor& input,
|
||||||
|
CUDANet::Tensor& output,
|
||||||
|
const size_t input_size,
|
||||||
|
const size_t output_size
|
||||||
) {
|
) {
|
||||||
auto forwardGridSize =
|
auto forwardGridSize =
|
||||||
(std::max(input_size, output_size) + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
(std::max(input_size, output_size) + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
||||||
auto biasGridSize = (output_size + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
auto biasGridSize = (output_size + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
||||||
|
|
||||||
Kernels::mat_vec_mul<<<forwardGridSize, BLOCK_SIZE>>>(
|
Kernels::mat_vec_mul<<<forwardGridSize, BLOCK_SIZE>>>(
|
||||||
weights.data<float>(), input.data<float>(), output.data<float>(),
|
static_cast<const T*>(weights.device_ptr()), static_cast<const T*>(input.device_ptr()), static_cast<T*>(output.device_ptr()), input_size,
|
||||||
input_size, output_size
|
output_size
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
|
|
||||||
Kernels::vec_vec_add<<<biasGridSize, BLOCK_SIZE>>>(
|
Kernels::vec_vec_add<<<biasGridSize, BLOCK_SIZE>>>(
|
||||||
biases.data<float>(), output.data<float>(), output.data<float>(),
|
static_cast<const T*>(biases.device_ptr()), static_cast<T*>(output.device_ptr()), static_cast<T*>(output.device_ptr()), output_size
|
||||||
output_size
|
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
CUDA_CHECK(cudaDeviceSynchronize());
|
CUDA_CHECK(cudaDeviceSynchronize());
|
||||||
@@ -92,6 +166,44 @@ CUDANet::Tensor& CUDA::conv2d(
|
|||||||
const CUDANet::Shape kernel_shape,
|
const CUDANet::Shape kernel_shape,
|
||||||
const CUDANet::Shape stride_shape,
|
const CUDANet::Shape stride_shape,
|
||||||
const CUDANet::Shape out_shape
|
const CUDANet::Shape out_shape
|
||||||
|
) {
|
||||||
|
switch (input.get_dtype()) {
|
||||||
|
case DType::FLOAT32:
|
||||||
|
return conv2d_impl<float>(
|
||||||
|
weights, biases, input, output, in_shape, padding_shape,
|
||||||
|
kernel_shape, stride_shape, out_shape
|
||||||
|
);
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
throw std::runtime_error("Unsupported dtype");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template CUDANet::Tensor& CUDA::conv2d_impl<float>(
|
||||||
|
const CUDANet::Tensor& weights,
|
||||||
|
const CUDANet::Tensor& biases,
|
||||||
|
const CUDANet::Tensor& input,
|
||||||
|
CUDANet::Tensor& output,
|
||||||
|
const CUDANet::Shape in_shape,
|
||||||
|
const CUDANet::Shape padding_shape,
|
||||||
|
const CUDANet::Shape kernel_shape,
|
||||||
|
const CUDANet::Shape stride_shape,
|
||||||
|
const CUDANet::Shape out_shape
|
||||||
|
);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
CUDANet::Tensor& CUDA::conv2d_impl(
|
||||||
|
const CUDANet::Tensor& weights,
|
||||||
|
const CUDANet::Tensor& biases,
|
||||||
|
const CUDANet::Tensor& input,
|
||||||
|
CUDANet::Tensor& output,
|
||||||
|
const CUDANet::Shape in_shape,
|
||||||
|
const CUDANet::Shape padding_shape,
|
||||||
|
const CUDANet::Shape kernel_shape,
|
||||||
|
const CUDANet::Shape stride_shape,
|
||||||
|
const CUDANet::Shape out_shape
|
||||||
) {
|
) {
|
||||||
dim3 block(8, 8, 8);
|
dim3 block(8, 8, 8);
|
||||||
dim3 grid(
|
dim3 grid(
|
||||||
@@ -101,9 +213,8 @@ CUDANet::Tensor& CUDA::conv2d(
|
|||||||
);
|
);
|
||||||
|
|
||||||
Kernels::convolution<<<grid, block>>>(
|
Kernels::convolution<<<grid, block>>>(
|
||||||
input.data<float>(), weights.data<float>(), biases.data<float>(),
|
static_cast<const T*>(input.device_ptr()), static_cast<const T*>(weights.device_ptr()), static_cast<const T*>(biases.device_ptr()), static_cast<T*>(output.device_ptr()),
|
||||||
output.data<float>(), in_shape, padding_shape, kernel_shape,
|
in_shape, padding_shape, kernel_shape, stride_shape, out_shape
|
||||||
stride_shape, out_shape
|
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
CUDA_CHECK(cudaDeviceSynchronize());
|
CUDA_CHECK(cudaDeviceSynchronize());
|
||||||
@@ -119,6 +230,40 @@ CUDANet::Tensor& CUDA::max_pool2d(
|
|||||||
CUDANet::Shape stride_shape,
|
CUDANet::Shape stride_shape,
|
||||||
CUDANet::Shape padding_shape,
|
CUDANet::Shape padding_shape,
|
||||||
CUDANet::Shape output_shape
|
CUDANet::Shape output_shape
|
||||||
|
) {
|
||||||
|
switch (input.get_dtype()) {
|
||||||
|
case DType::FLOAT32:
|
||||||
|
return max_pool2d_impl<float>(
|
||||||
|
input, output, input_shape, pool_shape, stride_shape,
|
||||||
|
padding_shape, output_shape
|
||||||
|
);
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
throw std::runtime_error("Unsupported dtype");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template CUDANet::Tensor& CUDA::max_pool2d_impl<float>(
|
||||||
|
const CUDANet::Tensor& input,
|
||||||
|
CUDANet::Tensor& output,
|
||||||
|
CUDANet::Shape input_shape,
|
||||||
|
CUDANet::Shape pool_shape,
|
||||||
|
CUDANet::Shape stride_shape,
|
||||||
|
CUDANet::Shape padding_shape,
|
||||||
|
CUDANet::Shape output_shape
|
||||||
|
);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
CUDANet::Tensor& CUDA::max_pool2d_impl(
|
||||||
|
const CUDANet::Tensor& input,
|
||||||
|
CUDANet::Tensor& output,
|
||||||
|
CUDANet::Shape input_shape,
|
||||||
|
CUDANet::Shape pool_shape,
|
||||||
|
CUDANet::Shape stride_shape,
|
||||||
|
CUDANet::Shape padding_shape,
|
||||||
|
CUDANet::Shape output_shape
|
||||||
) {
|
) {
|
||||||
dim3 block(8, 8, 8);
|
dim3 block(8, 8, 8);
|
||||||
dim3 grid(
|
dim3 grid(
|
||||||
@@ -128,8 +273,8 @@ CUDANet::Tensor& CUDA::max_pool2d(
|
|||||||
);
|
);
|
||||||
|
|
||||||
Kernels::max_pool<<<grid, block>>>(
|
Kernels::max_pool<<<grid, block>>>(
|
||||||
input.data<float>(), output.data<float>(), input_shape, output_shape, pool_shape,
|
static_cast<const T*>(input.device_ptr()), static_cast<T*>(output.device_ptr()), input_shape, output_shape,
|
||||||
stride_shape, padding_shape
|
pool_shape, stride_shape, padding_shape
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
CUDA_CHECK(cudaDeviceSynchronize());
|
CUDA_CHECK(cudaDeviceSynchronize());
|
||||||
@@ -145,6 +290,40 @@ CUDANet::Tensor& CUDA::avg_pool2d(
|
|||||||
CUDANet::Shape stride_shape,
|
CUDANet::Shape stride_shape,
|
||||||
CUDANet::Shape padding_shape,
|
CUDANet::Shape padding_shape,
|
||||||
CUDANet::Shape output_shape
|
CUDANet::Shape output_shape
|
||||||
|
) {
|
||||||
|
switch (input.get_dtype()) {
|
||||||
|
case DType::FLOAT32:
|
||||||
|
return avg_pool2d_impl<float>(
|
||||||
|
input, output, input_shape, pool_shape, stride_shape,
|
||||||
|
padding_shape, output_shape
|
||||||
|
);
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
throw std::runtime_error("Unsupported dtype");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template CUDANet::Tensor& CUDA::avg_pool2d_impl<float>(
|
||||||
|
const CUDANet::Tensor& input,
|
||||||
|
CUDANet::Tensor& output,
|
||||||
|
CUDANet::Shape input_shape,
|
||||||
|
CUDANet::Shape pool_shape,
|
||||||
|
CUDANet::Shape stride_shape,
|
||||||
|
CUDANet::Shape padding_shape,
|
||||||
|
CUDANet::Shape output_shape
|
||||||
|
);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
CUDANet::Tensor& CUDA::avg_pool2d_impl(
|
||||||
|
const CUDANet::Tensor& input,
|
||||||
|
CUDANet::Tensor& output,
|
||||||
|
CUDANet::Shape input_shape,
|
||||||
|
CUDANet::Shape pool_shape,
|
||||||
|
CUDANet::Shape stride_shape,
|
||||||
|
CUDANet::Shape padding_shape,
|
||||||
|
CUDANet::Shape output_shape
|
||||||
) {
|
) {
|
||||||
dim3 block(8, 8, 8);
|
dim3 block(8, 8, 8);
|
||||||
dim3 grid(
|
dim3 grid(
|
||||||
@@ -154,8 +333,8 @@ CUDANet::Tensor& CUDA::avg_pool2d(
|
|||||||
);
|
);
|
||||||
|
|
||||||
Kernels::avg_pool<<<grid, block>>>(
|
Kernels::avg_pool<<<grid, block>>>(
|
||||||
input.data<float>(), output.data<float>(), input_shape, output_shape, pool_shape,
|
static_cast<const T*>(input.device_ptr()), static_cast<T*>(output.device_ptr()), input_shape, output_shape,
|
||||||
stride_shape, padding_shape
|
pool_shape, stride_shape, padding_shape
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
CUDA_CHECK(cudaDeviceSynchronize());
|
CUDA_CHECK(cudaDeviceSynchronize());
|
||||||
@@ -172,41 +351,77 @@ CUDANet::Tensor& CUDA::batch_norm(
|
|||||||
CUDANet::Tensor& running_mean,
|
CUDANet::Tensor& running_mean,
|
||||||
CUDANet::Tensor& running_var,
|
CUDANet::Tensor& running_var,
|
||||||
CUDANet::Tensor& epsilon
|
CUDANet::Tensor& epsilon
|
||||||
|
) {
|
||||||
|
switch (input.get_dtype()) {
|
||||||
|
case DType::FLOAT32:
|
||||||
|
return batch_norm_impl<float>(
|
||||||
|
input, output, input_shape, weights, biases, running_mean,
|
||||||
|
running_var, epsilon
|
||||||
|
);
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
throw std::runtime_error("Unsupported dtype");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template CUDANet::Tensor& CUDA::batch_norm_impl<float>(
|
||||||
|
const CUDANet::Tensor& input,
|
||||||
|
CUDANet::Tensor& output,
|
||||||
|
CUDANet::Shape input_shape,
|
||||||
|
CUDANet::Tensor& weights,
|
||||||
|
CUDANet::Tensor& biases,
|
||||||
|
CUDANet::Tensor& running_mean,
|
||||||
|
CUDANet::Tensor& running_var,
|
||||||
|
CUDANet::Tensor& epsilon
|
||||||
|
);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
CUDANet::Tensor& CUDA::batch_norm_impl(
|
||||||
|
const CUDANet::Tensor& input,
|
||||||
|
CUDANet::Tensor& output,
|
||||||
|
CUDANet::Shape input_shape,
|
||||||
|
CUDANet::Tensor& weights,
|
||||||
|
CUDANet::Tensor& biases,
|
||||||
|
CUDANet::Tensor& running_mean,
|
||||||
|
CUDANet::Tensor& running_var,
|
||||||
|
CUDANet::Tensor& epsilon
|
||||||
) {
|
) {
|
||||||
auto gridSize =
|
auto gridSize =
|
||||||
(input_shape[0] * input_shape[1] + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
(input_shape[0] * input_shape[1] + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
||||||
|
|
||||||
|
|
||||||
for (int i = 0; i < input_shape[2]; i++) {
|
for (int i = 0; i < input_shape[2]; i++) {
|
||||||
// Subtract mean from input
|
// Subtract mean from input
|
||||||
Kernels::vec_scalar_sub<<<gridSize, BLOCK_SIZE>>>(
|
Kernels::vec_scalar_sub<<<gridSize, BLOCK_SIZE>>>(
|
||||||
input.data<float>() + i * input_shape[0] * input_shape[1],
|
static_cast<const T*>(input.device_ptr()) + i * input_shape[0] * input_shape[1],
|
||||||
output.data<float>() + i * input_shape[0] * input_shape[1],
|
static_cast<T*>(output.device_ptr()) + i * input_shape[0] * input_shape[1],
|
||||||
&running_mean.data<float>()[i], input_shape[0] * input_shape[1]
|
&static_cast<T*>(running_mean.device_ptr())[i], input_shape[0] * input_shape[1]
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
|
|
||||||
// Divide by sqrt(running_var + epsilon)
|
// Divide by sqrt(running_var + epsilon)
|
||||||
Kernels::vec_scale<<<gridSize, BLOCK_SIZE>>>(
|
Kernels::vec_scale<<<gridSize, BLOCK_SIZE>>>(
|
||||||
output.data<float>() + i * input_shape[0] * input_shape[1],
|
static_cast<T*>(output.device_ptr()) + i * input_shape[0] * input_shape[1],
|
||||||
output.data<float>() + i * input_shape[0] * input_shape[1],
|
static_cast<T*>(output.device_ptr()) + i * input_shape[0] * input_shape[1],
|
||||||
&running_var.data<float>()[i], epsilon.data<float>(), input_shape[0] * input_shape[1]
|
&static_cast<T*>(running_var.device_ptr())[i], static_cast<T*>(epsilon.device_ptr()),
|
||||||
|
input_shape[0] * input_shape[1]
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
|
|
||||||
// Multiply by weights
|
// Multiply by weights
|
||||||
Kernels::vec_scalar_mul<<<gridSize, BLOCK_SIZE>>>(
|
Kernels::vec_scalar_mul<<<gridSize, BLOCK_SIZE>>>(
|
||||||
output.data<float>() + i * input_shape[0] * input_shape[1],
|
static_cast<T*>(output.device_ptr()) + i * input_shape[0] * input_shape[1],
|
||||||
output.data<float>() + i * input_shape[0] * input_shape[1], &weights.data<float>()[i],
|
static_cast<T*>(output.device_ptr()) + i * input_shape[0] * input_shape[1],
|
||||||
input_shape[0] * input_shape[1]
|
&static_cast<T*>(weights.device_ptr())[i], input_shape[0] * input_shape[1]
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
|
|
||||||
// Add biases
|
// Add biases
|
||||||
Kernels::vec_scalar_add<<<gridSize, BLOCK_SIZE>>>(
|
Kernels::vec_scalar_add<<<gridSize, BLOCK_SIZE>>>(
|
||||||
output.data<float>() + i * input_shape[0] * input_shape[1],
|
static_cast<T*>(output.device_ptr()) + i * input_shape[0] * input_shape[1],
|
||||||
output.data<float>() + i * input_shape[0] * input_shape[1], &biases.data<float>()[i],
|
static_cast<T*>(output.device_ptr()) + i * input_shape[0] * input_shape[1],
|
||||||
input_shape[0] * input_shape[1]
|
&static_cast<T*>(biases.device_ptr())[i], input_shape[0] * input_shape[1]
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
}
|
}
|
||||||
@@ -218,14 +433,39 @@ CUDANet::Tensor& CUDA::concat(
|
|||||||
CUDANet::Tensor& input_a,
|
CUDANet::Tensor& input_a,
|
||||||
CUDANet::Tensor& input_b,
|
CUDANet::Tensor& input_b,
|
||||||
CUDANet::Tensor& output
|
CUDANet::Tensor& output
|
||||||
|
) {
|
||||||
|
switch (input_a.get_dtype()) {
|
||||||
|
case DType::FLOAT32:
|
||||||
|
return concat_impl<float>(
|
||||||
|
input_a, input_b, output
|
||||||
|
);
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
throw std::runtime_error("Unsupported dtype");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template CUDANet::Tensor& CUDA::concat_impl<float>(
|
||||||
|
CUDANet::Tensor& input_a,
|
||||||
|
CUDANet::Tensor& input_b,
|
||||||
|
CUDANet::Tensor& output
|
||||||
|
);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
CUDANet::Tensor& CUDA::concat_impl(
|
||||||
|
CUDANet::Tensor& input_a,
|
||||||
|
CUDANet::Tensor& input_b,
|
||||||
|
CUDANet::Tensor& output
|
||||||
) {
|
) {
|
||||||
CUDA_CHECK(cudaMemcpy(
|
CUDA_CHECK(cudaMemcpy(
|
||||||
output.data<float>(), input_a.data<float>(), input_a.size(),
|
static_cast<T*>(output.device_ptr()), static_cast<const T*>(input_a.device_ptr()), input_a.size(),
|
||||||
cudaMemcpyDeviceToDevice
|
cudaMemcpyDeviceToDevice
|
||||||
));
|
));
|
||||||
|
|
||||||
CUDA_CHECK(cudaMemcpy(
|
CUDA_CHECK(cudaMemcpy(
|
||||||
output.data<float>() + input_a.numel(), input_b.data<float>(), input_b.size(),
|
static_cast<T*>(output.device_ptr()) + input_a.numel(), static_cast<const T*>(input_b.device_ptr()), input_b.size(),
|
||||||
cudaMemcpyDeviceToDevice
|
cudaMemcpyDeviceToDevice
|
||||||
));
|
));
|
||||||
|
|
||||||
@@ -239,11 +479,36 @@ CUDANet::Tensor& CUDA::add(
|
|||||||
CUDANet::Tensor& input_a,
|
CUDANet::Tensor& input_a,
|
||||||
CUDANet::Tensor& input_b,
|
CUDANet::Tensor& input_b,
|
||||||
CUDANet::Tensor& output
|
CUDANet::Tensor& output
|
||||||
|
) {
|
||||||
|
switch (input_a.get_dtype()) {
|
||||||
|
case DType::FLOAT32:
|
||||||
|
return add_impl<float>(
|
||||||
|
input_a, input_b, output
|
||||||
|
);
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
throw std::runtime_error("Unsupported dtype");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template CUDANet::Tensor& CUDA::add_impl<float>(
|
||||||
|
CUDANet::Tensor& input_a,
|
||||||
|
CUDANet::Tensor& input_b,
|
||||||
|
CUDANet::Tensor& output
|
||||||
|
);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
CUDANet::Tensor& CUDA::add_impl(
|
||||||
|
CUDANet::Tensor& input_a,
|
||||||
|
CUDANet::Tensor& input_b,
|
||||||
|
CUDANet::Tensor& output
|
||||||
) {
|
) {
|
||||||
auto gridSize = (input_a.numel() + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
auto gridSize = (input_a.numel() + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
||||||
|
|
||||||
Kernels::vec_vec_add<<<gridSize, BLOCK_SIZE>>>(
|
Kernels::vec_vec_add<<<gridSize, BLOCK_SIZE>>>(
|
||||||
input_a.data<float>(), input_b.data<float>(), output.data<float>(), input_a.numel()
|
static_cast<const T*>(input_a.device_ptr()), static_cast<const T*>(input_b.device_ptr()), static_cast<T*>(output.device_ptr()), input_a.numel()
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
CUDA_CHECK(cudaDeviceSynchronize());
|
CUDA_CHECK(cudaDeviceSynchronize());
|
||||||
|
|||||||
@@ -7,11 +7,26 @@
|
|||||||
using namespace CUDANet::Backends;
|
using namespace CUDANet::Backends;
|
||||||
|
|
||||||
void CUDA::print(const CUDANet::Tensor &input) {
|
void CUDA::print(const CUDANet::Tensor &input) {
|
||||||
|
switch (input.get_dtype()) {
|
||||||
|
case DType::FLOAT32:
|
||||||
|
print_impl<float>(input);
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
throw std::runtime_error("Unsupported dtype");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template void CUDA::print_impl<float> (const CUDANet::Tensor &input);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void CUDA::print_impl(const CUDANet::Tensor &input) {
|
||||||
auto length = input.numel();
|
auto length = input.numel();
|
||||||
std::vector<float> h_vec(input.numel());
|
std::vector<T> h_vec(input.numel());
|
||||||
|
|
||||||
CUDA_CHECK(cudaMemcpy(
|
CUDA_CHECK(cudaMemcpy(
|
||||||
h_vec.data(), input.data<float>(), sizeof(float) * length, cudaMemcpyDeviceToHost
|
h_vec.data(), static_cast<const T*>(input.device_ptr()), sizeof(T) * length, cudaMemcpyDeviceToHost
|
||||||
));
|
));
|
||||||
|
|
||||||
for (int i = 0; i < length; ++i) {
|
for (int i = 0; i < length; ++i) {
|
||||||
@@ -26,27 +41,71 @@ void CUDA::zero(CUDANet::Tensor &input) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void CUDA::fill(CUDANet::Tensor &input, int value) {
|
void CUDA::fill(CUDANet::Tensor &input, int value) {
|
||||||
CUDA_CHECK(cudaMemset(input.data<float>(), value, sizeof(float) * input.numel()));
|
switch (input.get_dtype()) {
|
||||||
|
case DType::FLOAT32:
|
||||||
|
fill_impl<float>(input, value);
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
throw std::runtime_error("Unsupported dtype");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template void CUDA::fill_impl<float>(CUDANet::Tensor &input, int value);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void CUDA::fill_impl(CUDANet::Tensor &input, int value) {
|
||||||
|
CUDA_CHECK(cudaMemset(static_cast<T*>(input.device_ptr()), value, sizeof(T) * input.numel()));
|
||||||
}
|
}
|
||||||
|
|
||||||
void CUDA::copy_to_device(CUDANet::Tensor &tensor, void *data, size_t size) {
|
void CUDA::copy_to_device(CUDANet::Tensor &tensor, void *data, size_t size) {
|
||||||
CUDA_CHECK(cudaMemcpy(tensor.data<float>(), data, size, cudaMemcpyHostToDevice));
|
switch (tensor.get_dtype()) {
|
||||||
|
case DType::FLOAT32:
|
||||||
|
copy_to_device_impl<float>(tensor, data, size);
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
throw std::runtime_error("Unsupported dtype");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template void CUDA::copy_to_device_impl<float>(CUDANet::Tensor &tensor, void *data, size_t size);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void CUDA::copy_to_device_impl(CUDANet::Tensor &tensor, void *data, size_t size) {
|
||||||
|
CUDA_CHECK(cudaMemcpy(static_cast<T*>(tensor.device_ptr()), data, size, cudaMemcpyHostToDevice));
|
||||||
}
|
}
|
||||||
|
|
||||||
void CUDA::sum(const CUDANet::Tensor &input, CUDANet::Tensor &sum) {
|
void CUDA::sum(const CUDANet::Tensor &input, CUDANet::Tensor &sum) {
|
||||||
|
switch (input.get_dtype()) {
|
||||||
|
case DType::FLOAT32:
|
||||||
|
sum_impl<float>(input, sum);
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
throw std::runtime_error("Unsupported dtype");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template void CUDA::sum_impl<float>(const CUDANet::Tensor &input, CUDANet::Tensor &sum);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void CUDA::sum_impl(const CUDANet::Tensor &input, CUDANet::Tensor &sum) {
|
||||||
auto length = input.numel();
|
auto length = input.numel();
|
||||||
const int gridSize = ( + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
const int gridSize = (length + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
||||||
|
|
||||||
CUDANet::Kernels::sum_reduce<<<gridSize, BLOCK_SIZE>>>(
|
CUDANet::Kernels::sum_reduce<<<gridSize, BLOCK_SIZE>>>(
|
||||||
input.data<float>(), sum.data<float>(), length
|
static_cast<const T*>(input.device_ptr()), static_cast<T*>(sum.device_ptr()), length
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
|
|
||||||
int remaining = gridSize;
|
int remaining = gridSize;
|
||||||
while (remaining > 1) {
|
while (remaining > 1) {
|
||||||
int blocks_needed = (remaining + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
int blocks_needed = (remaining + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
||||||
CUDANet::Kernels::sum_reduce<<<blocks_needed, BLOCK_SIZE>>>(sum.data<float>(), sum.data<float>(), remaining);
|
CUDANet::Kernels::sum_reduce<<<blocks_needed, BLOCK_SIZE>>>(static_cast<T*>(sum.device_ptr()), static_cast<T*>(sum.device_ptr()), remaining);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
|
|
||||||
remaining = blocks_needed;
|
remaining = blocks_needed;
|
||||||
@@ -54,17 +113,32 @@ void CUDA::sum(const CUDANet::Tensor &input, CUDANet::Tensor &sum) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void CUDA::max(const CUDANet::Tensor &input, CUDANet::Tensor &max) {
|
void CUDA::max(const CUDANet::Tensor &input, CUDANet::Tensor &max) {
|
||||||
|
switch (input.get_dtype()) {
|
||||||
|
case DType::FLOAT32:
|
||||||
|
max_impl<float>(input, max);
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
throw std::runtime_error("Unsupported dtype");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template void CUDA::max_impl<float>(const CUDANet::Tensor &input, CUDANet::Tensor &max);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void CUDA::max_impl(const CUDANet::Tensor &input, CUDANet::Tensor &max) {
|
||||||
auto length = input.numel();
|
auto length = input.numel();
|
||||||
const int grid_size = (length + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
const int grid_size = (length + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
||||||
|
|
||||||
Kernels::max_reduce<<<grid_size, BLOCK_SIZE>>>(input.data<float>(), max.data<float>(), length);
|
Kernels::max_reduce<<<grid_size, BLOCK_SIZE>>>(static_cast<const T*>(input.device_ptr()), static_cast<T*>(max.device_ptr()), length);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
|
|
||||||
int remaining = grid_size;
|
int remaining = grid_size;
|
||||||
|
|
||||||
while (remaining > 1) {
|
while (remaining > 1) {
|
||||||
int blocks_needed = (remaining + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
int blocks_needed = (remaining + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
||||||
CUDANet::Kernels::max_reduce<<<blocks_needed, BLOCK_SIZE>>>(max.data<float>(), max.data<float>(), remaining);
|
CUDANet::Kernels::max_reduce<<<blocks_needed, BLOCK_SIZE>>>(static_cast<T*>(max.device_ptr()), static_cast<T*>(max.device_ptr()), remaining);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
|
|
||||||
remaining = blocks_needed;
|
remaining = blocks_needed;
|
||||||
|
|||||||
@@ -2,13 +2,29 @@
|
|||||||
#include <stdexcept>
|
#include <stdexcept>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "activation.hpp"
|
#include "layers/activation.hpp"
|
||||||
#include "tensor.hpp"
|
#include "tensor.hpp"
|
||||||
|
|
||||||
|
|
||||||
using namespace CUDANet::Layers;
|
using namespace CUDANet::Layers;
|
||||||
|
|
||||||
Activation::Activation(ActivationType activation, const CUDANet::Shape &shape, CUDANet::Backend* backend)
|
Activation::Activation(
|
||||||
: backend(backend), activationType(activation), shape(shape) {
|
ActivationType activation,
|
||||||
|
const CUDANet::Shape& shape,
|
||||||
|
CUDANet::Backend* backend
|
||||||
|
)
|
||||||
|
: Activation(activation, shape, backend->get_default_dtype(), backend) {}
|
||||||
|
|
||||||
|
Activation::Activation(
|
||||||
|
ActivationType activation,
|
||||||
|
const CUDANet::Shape& shape,
|
||||||
|
CUDANet::DType dtype,
|
||||||
|
CUDANet::Backend* backend
|
||||||
|
)
|
||||||
|
: activation_type(activation),
|
||||||
|
shape(shape),
|
||||||
|
backend(backend) {
|
||||||
|
this->dtype = dtype;
|
||||||
|
|
||||||
if (shape.size() != 1) {
|
if (shape.size() != 1) {
|
||||||
throw InvalidShapeException("input", 1, shape.size());
|
throw InvalidShapeException("input", 1, shape.size());
|
||||||
@@ -16,15 +32,16 @@ Activation::Activation(ActivationType activation, const CUDANet::Shape &shape, C
|
|||||||
|
|
||||||
auto length = shape[0];
|
auto length = shape[0];
|
||||||
|
|
||||||
if (activationType == SOFTMAX) {
|
if (activation_type == SOFTMAX) {
|
||||||
softmax_sum = CUDANet::Tensor({static_cast<size_t>(length)}, CUDANet::DType::FLOAT32, backend);
|
softmax_sum =
|
||||||
tensor_max = CUDANet::Tensor({static_cast<size_t>(length)}, CUDANet::DType::FLOAT32, backend);
|
CUDANet::Tensor({static_cast<size_t>(length)}, dtype, backend);
|
||||||
|
tensor_max =
|
||||||
|
CUDANet::Tensor({static_cast<size_t>(length)}, dtype, backend);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
CUDANet::Tensor& Activation::forward(CUDANet::Tensor& input) {
|
CUDANet::Tensor& Activation::forward(CUDANet::Tensor& input) {
|
||||||
switch (activationType)
|
switch (activation_type) {
|
||||||
{
|
|
||||||
case ActivationType::SIGMOID:
|
case ActivationType::SIGMOID:
|
||||||
backend->sigmoid(input);
|
backend->sigmoid(input);
|
||||||
break;
|
break;
|
||||||
|
|||||||
@@ -1,9 +1,13 @@
|
|||||||
#include "add.hpp"
|
#include "layers/add.hpp"
|
||||||
|
|
||||||
using namespace CUDANet::Layers;
|
using namespace CUDANet::Layers;
|
||||||
|
|
||||||
|
|
||||||
Add::Add(CUDANet::Shape a_shape, CUDANet::Shape b_shape, CUDANet::Backend* backend) : backend(backend) {
|
Add::Add(CUDANet::Shape a_shape, CUDANet::Shape b_shape, CUDANet::Backend* backend)
|
||||||
|
: Add(a_shape, b_shape, backend->get_default_dtype(), backend) {}
|
||||||
|
|
||||||
|
Add::Add(CUDANet::Shape a_shape, CUDANet::Shape b_shape, CUDANet::DType dtype, CUDANet::Backend* backend)
|
||||||
|
: backend(backend), dtype(dtype) {
|
||||||
if (a_shape != b_shape) {
|
if (a_shape != b_shape) {
|
||||||
throw InvalidShapeException(
|
throw InvalidShapeException(
|
||||||
"Add requires matching dimensions", a_shape, b_shape
|
"Add requires matching dimensions", a_shape, b_shape
|
||||||
@@ -11,7 +15,7 @@ Add::Add(CUDANet::Shape a_shape, CUDANet::Shape b_shape, CUDANet::Backend* backe
|
|||||||
}
|
}
|
||||||
|
|
||||||
out_shape = a_shape;
|
out_shape = a_shape;
|
||||||
output = CUDANet::Tensor(out_shape, CUDANet::DType::FLOAT32, backend);
|
output = CUDANet::Tensor(out_shape, dtype, backend);
|
||||||
}
|
}
|
||||||
|
|
||||||
Add::~Add() {}
|
Add::~Add() {}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
|
#include <format>
|
||||||
#include <stdexcept>
|
#include <stdexcept>
|
||||||
|
|
||||||
#include "avg_pool.hpp"
|
#include "layers/avg_pool.hpp"
|
||||||
#include <format>
|
|
||||||
|
|
||||||
using namespace CUDANet::Layers;
|
using namespace CUDANet::Layers;
|
||||||
|
|
||||||
@@ -11,6 +11,16 @@ AvgPool2d::AvgPool2d(
|
|||||||
CUDANet::Shape stride_shape,
|
CUDANet::Shape stride_shape,
|
||||||
CUDANet::Shape padding_shape,
|
CUDANet::Shape padding_shape,
|
||||||
CUDANet::Backend* backend
|
CUDANet::Backend* backend
|
||||||
|
)
|
||||||
|
: AvgPool2d(input_shape, pool_shape, stride_shape, padding_shape, backend->get_default_dtype(), backend) {}
|
||||||
|
|
||||||
|
AvgPool2d::AvgPool2d(
|
||||||
|
CUDANet::Shape input_shape,
|
||||||
|
CUDANet::Shape pool_shape,
|
||||||
|
CUDANet::Shape stride_shape,
|
||||||
|
CUDANet::Shape padding_shape,
|
||||||
|
CUDANet::DType dtype,
|
||||||
|
CUDANet::Backend* backend
|
||||||
)
|
)
|
||||||
: in_shape(input_shape),
|
: in_shape(input_shape),
|
||||||
pool_shape(pool_shape),
|
pool_shape(pool_shape),
|
||||||
@@ -33,6 +43,8 @@ AvgPool2d::AvgPool2d(
|
|||||||
throw InvalidShapeException("padding", 2, padding_shape.size());
|
throw InvalidShapeException("padding", 2, padding_shape.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
this->dtype = dtype;
|
||||||
|
|
||||||
out_shape = {
|
out_shape = {
|
||||||
(in_shape[0] + 2 * padding_shape[0] - pool_shape[0]) / stride_shape[0] +
|
(in_shape[0] + 2 * padding_shape[0] - pool_shape[0]) / stride_shape[0] +
|
||||||
1,
|
1,
|
||||||
@@ -43,7 +55,7 @@ AvgPool2d::AvgPool2d(
|
|||||||
|
|
||||||
output = CUDANet::Tensor(
|
output = CUDANet::Tensor(
|
||||||
Shape{out_shape[0] * out_shape[1] * out_shape[2]},
|
Shape{out_shape[0] * out_shape[1] * out_shape[2]},
|
||||||
CUDANet::DType::FLOAT32, backend
|
dtype, backend
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -72,11 +84,11 @@ CUDANet::Shape AvgPool2d::output_shape() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
size_t AvgPool2d::input_size() {
|
size_t AvgPool2d::input_size() {
|
||||||
return sizeof(float) * in_shape[0] * in_shape[1] * in_shape[2];
|
return dtype_size(dtype) * in_shape[0] * in_shape[1] * in_shape[2];
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t AvgPool2d::output_size() {
|
size_t AvgPool2d::output_size() {
|
||||||
return sizeof(float) * out_shape[0] * out_shape[1] * out_shape[2];
|
return dtype_size(dtype) * out_shape[0] * out_shape[1] * out_shape[2];
|
||||||
}
|
}
|
||||||
|
|
||||||
void AvgPool2d::set_weights(void* input) {}
|
void AvgPool2d::set_weights(void* input) {}
|
||||||
@@ -96,6 +108,14 @@ AdaptiveAvgPool2d::AdaptiveAvgPool2d(
|
|||||||
CUDANet::Shape input_shape,
|
CUDANet::Shape input_shape,
|
||||||
CUDANet::Shape output_shape,
|
CUDANet::Shape output_shape,
|
||||||
CUDANet::Backend *backend
|
CUDANet::Backend *backend
|
||||||
|
)
|
||||||
|
: AdaptiveAvgPool2d(input_shape, output_shape, backend->get_default_dtype(), backend) {}
|
||||||
|
|
||||||
|
AdaptiveAvgPool2d::AdaptiveAvgPool2d(
|
||||||
|
CUDANet::Shape input_shape,
|
||||||
|
CUDANet::Shape output_shape,
|
||||||
|
CUDANet::DType dtype,
|
||||||
|
CUDANet::Backend *backend
|
||||||
)
|
)
|
||||||
: AvgPool2d(
|
: AvgPool2d(
|
||||||
input_shape,
|
input_shape,
|
||||||
@@ -114,12 +134,13 @@ AdaptiveAvgPool2d::AdaptiveAvgPool2d(
|
|||||||
(input_shape[0] - (output_shape[0] - 1) * (input_shape[0] / output_shape[0]) - 1) / 2,
|
(input_shape[0] - (output_shape[0] - 1) * (input_shape[0] / output_shape[0]) - 1) / 2,
|
||||||
(input_shape[1] - (output_shape[1] - 1) * (input_shape[1] / output_shape[1]) - 1) / 2
|
(input_shape[1] - (output_shape[1] - 1) * (input_shape[1] / output_shape[1]) - 1) / 2
|
||||||
},
|
},
|
||||||
|
dtype,
|
||||||
backend
|
backend
|
||||||
) {
|
) {
|
||||||
out_shape = output_shape;
|
out_shape = output_shape;
|
||||||
|
|
||||||
output = CUDANet::Tensor(
|
output = CUDANet::Tensor(
|
||||||
Shape{out_shape[0] * out_shape[1] * out_shape[2]},
|
Shape{out_shape[0] * out_shape[1] * out_shape[2]},
|
||||||
CUDANet::DType::FLOAT32, backend
|
dtype, backend
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,9 +1,7 @@
|
|||||||
#include "batch_norm.hpp"
|
|
||||||
|
|
||||||
#include <stdexcept>
|
#include <stdexcept>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "activation.hpp"
|
#include "layers/batch_norm.hpp"
|
||||||
#include "layer.hpp"
|
#include "layer.hpp"
|
||||||
|
|
||||||
using namespace CUDANet::Layers;
|
using namespace CUDANet::Layers;
|
||||||
@@ -12,6 +10,14 @@ BatchNorm2d::BatchNorm2d(
|
|||||||
CUDANet::Shape input_shape,
|
CUDANet::Shape input_shape,
|
||||||
float eps,
|
float eps,
|
||||||
CUDANet::Backend *backend
|
CUDANet::Backend *backend
|
||||||
|
)
|
||||||
|
: BatchNorm2d(input_shape, eps, backend->get_default_dtype(), backend) {}
|
||||||
|
|
||||||
|
BatchNorm2d::BatchNorm2d(
|
||||||
|
CUDANet::Shape input_shape,
|
||||||
|
float eps,
|
||||||
|
CUDANet::DType dtype,
|
||||||
|
CUDANet::Backend *backend
|
||||||
)
|
)
|
||||||
: in_shape(input_shape), backend(backend) {
|
: in_shape(input_shape), backend(backend) {
|
||||||
|
|
||||||
@@ -19,22 +25,24 @@ BatchNorm2d::BatchNorm2d(
|
|||||||
throw InvalidShapeException("input", 3, in_shape.size());
|
throw InvalidShapeException("input", 3, in_shape.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
epsilon = CUDANet::Tensor({1}, CUDANet::DType::FLOAT32, backend);
|
this->dtype = dtype;
|
||||||
epsilon.set_data<float>(&eps);
|
|
||||||
|
|
||||||
running_mean = CUDANet::Tensor({in_shape[2]}, CUDANet::DType::FLOAT32, backend);
|
epsilon = CUDANet::Tensor({1}, dtype, backend);
|
||||||
|
epsilon.set_data(&eps);
|
||||||
|
|
||||||
|
running_mean = CUDANet::Tensor({in_shape[2]}, dtype, backend);
|
||||||
running_mean.zero();
|
running_mean.zero();
|
||||||
|
|
||||||
running_var = CUDANet::Tensor({in_shape[2]}, CUDANet::DType::FLOAT32, backend);
|
running_var = CUDANet::Tensor({in_shape[2]}, dtype, backend);
|
||||||
running_var.fill(1);
|
running_var.fill(1);
|
||||||
|
|
||||||
weights = CUDANet::Tensor({in_shape[2]}, CUDANet::DType::FLOAT32, backend);
|
weights = CUDANet::Tensor({in_shape[2]}, dtype, backend);
|
||||||
weights.fill(1);
|
weights.fill(1);
|
||||||
|
|
||||||
biases = CUDANet::Tensor({in_shape[2]}, CUDANet::DType::FLOAT32, backend);
|
biases = CUDANet::Tensor({in_shape[2]}, dtype, backend);
|
||||||
biases.zero();
|
biases.zero();
|
||||||
|
|
||||||
output = CUDANet::Tensor(in_shape, CUDANet::DType::FLOAT32, backend);
|
output = CUDANet::Tensor(in_shape, dtype, backend);
|
||||||
}
|
}
|
||||||
|
|
||||||
BatchNorm2d::~BatchNorm2d() {}
|
BatchNorm2d::~BatchNorm2d() {}
|
||||||
@@ -63,15 +71,15 @@ CUDANet::Shape BatchNorm2d::output_shape() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
size_t BatchNorm2d::input_size() {
|
size_t BatchNorm2d::input_size() {
|
||||||
return sizeof(float) * in_shape[0] * in_shape[1] * in_shape[2];
|
return dtype_size(dtype) * in_shape[0] * in_shape[1] * in_shape[2];
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t BatchNorm2d::output_size() {
|
size_t BatchNorm2d::output_size() {
|
||||||
return sizeof(float) * in_shape[0] * in_shape[1] * in_shape[2];
|
return dtype_size(dtype) * in_shape[0] * in_shape[1] * in_shape[2];
|
||||||
}
|
}
|
||||||
|
|
||||||
void BatchNorm2d::set_weights(void* input) {
|
void BatchNorm2d::set_weights(void* input) {
|
||||||
weights.set_data<float>(static_cast<float*>(input));
|
weights.set_data(input);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t BatchNorm2d::get_weights_size() {
|
size_t BatchNorm2d::get_weights_size() {
|
||||||
@@ -79,7 +87,7 @@ size_t BatchNorm2d::get_weights_size() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void BatchNorm2d::set_biases(void* input) {
|
void BatchNorm2d::set_biases(void* input) {
|
||||||
biases.set_data<float>(static_cast<float*>(input));
|
biases.set_data(input);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t BatchNorm2d::get_biases_size() {
|
size_t BatchNorm2d::get_biases_size() {
|
||||||
@@ -87,7 +95,7 @@ size_t BatchNorm2d::get_biases_size() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void BatchNorm2d::set_running_mean(void* input) {
|
void BatchNorm2d::set_running_mean(void* input) {
|
||||||
running_mean.set_data<float>(static_cast<float*>(input));
|
running_mean.set_data(input);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t BatchNorm2d::get_running_mean_size() {
|
size_t BatchNorm2d::get_running_mean_size() {
|
||||||
@@ -95,7 +103,7 @@ size_t BatchNorm2d::get_running_mean_size() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void BatchNorm2d::set_running_var(void* input) {
|
void BatchNorm2d::set_running_var(void* input) {
|
||||||
running_var.set_data<float>(static_cast<float*>(input));
|
running_var.set_data(input);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t BatchNorm2d::get_running_var_size() {
|
size_t BatchNorm2d::get_running_var_size() {
|
||||||
|
|||||||
@@ -1,9 +1,12 @@
|
|||||||
#include "concat.hpp"
|
#include "layers/concat.hpp"
|
||||||
|
|
||||||
using namespace CUDANet::Layers;
|
using namespace CUDANet::Layers;
|
||||||
|
|
||||||
Concat::Concat(const CUDANet::Shape a_shape, const CUDANet::Shape b_shape, CUDANet::Backend *backend)
|
Concat::Concat(const CUDANet::Shape a_shape, const CUDANet::Shape b_shape, CUDANet::Backend *backend)
|
||||||
: a_shape(a_shape), b_shape(b_shape), backend(backend) {
|
: Concat(a_shape, b_shape, backend->get_default_dtype(), backend) {}
|
||||||
|
|
||||||
|
Concat::Concat(const CUDANet::Shape a_shape, const CUDANet::Shape b_shape, CUDANet::DType dtype, CUDANet::Backend *backend)
|
||||||
|
: a_shape(a_shape), b_shape(b_shape), backend(backend), dtype(dtype) {
|
||||||
if (a_shape[0] != b_shape[0] || a_shape[1] != b_shape[1]) {
|
if (a_shape[0] != b_shape[0] || a_shape[1] != b_shape[1]) {
|
||||||
throw InvalidShapeException(
|
throw InvalidShapeException(
|
||||||
"Concat requires matching height and width dimensions", a_shape,
|
"Concat requires matching height and width dimensions", a_shape,
|
||||||
@@ -12,7 +15,7 @@ Concat::Concat(const CUDANet::Shape a_shape, const CUDANet::Shape b_shape, CUDAN
|
|||||||
}
|
}
|
||||||
|
|
||||||
out_shape = {a_shape[0], a_shape[1], a_shape[2] + b_shape[2]};
|
out_shape = {a_shape[0], a_shape[1], a_shape[2] + b_shape[2]};
|
||||||
output = CUDANet::Tensor(out_shape, CUDANet::DType::FLOAT32, backend);
|
output = CUDANet::Tensor(out_shape, dtype, backend);
|
||||||
}
|
}
|
||||||
|
|
||||||
Concat::~Concat() {}
|
Concat::~Concat() {}
|
||||||
|
|||||||
@@ -1,8 +1,7 @@
|
|||||||
#include "conv2d.hpp"
|
|
||||||
|
|
||||||
#include <format>
|
#include <format>
|
||||||
#include <stdexcept>
|
#include <stdexcept>
|
||||||
|
|
||||||
|
#include "layers/conv2d.hpp"
|
||||||
#include "layer.hpp"
|
#include "layer.hpp"
|
||||||
#include "tensor.hpp"
|
#include "tensor.hpp"
|
||||||
|
|
||||||
@@ -14,6 +13,16 @@ Conv2d::Conv2d(
|
|||||||
CUDANet::Shape stride_shape,
|
CUDANet::Shape stride_shape,
|
||||||
CUDANet::Shape padding_shape,
|
CUDANet::Shape padding_shape,
|
||||||
CUDANet::Backend* backend
|
CUDANet::Backend* backend
|
||||||
|
)
|
||||||
|
: Conv2d(input_shape, kernel_shape, stride_shape, padding_shape, backend->get_default_dtype(), backend) {}
|
||||||
|
|
||||||
|
Conv2d::Conv2d(
|
||||||
|
CUDANet::Shape input_shape,
|
||||||
|
CUDANet::Shape kernel_shape,
|
||||||
|
CUDANet::Shape stride_shape,
|
||||||
|
CUDANet::Shape padding_shape,
|
||||||
|
CUDANet::DType dtype,
|
||||||
|
CUDANet::Backend* backend
|
||||||
)
|
)
|
||||||
: in_shape(input_shape),
|
: in_shape(input_shape),
|
||||||
kernel_shape(kernel_shape),
|
kernel_shape(kernel_shape),
|
||||||
@@ -36,6 +45,8 @@ Conv2d::Conv2d(
|
|||||||
throw InvalidShapeException("padding", 3, padding_shape.size());
|
throw InvalidShapeException("padding", 3, padding_shape.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
this->dtype = dtype;
|
||||||
|
|
||||||
out_shape = {
|
out_shape = {
|
||||||
(in_shape[0] - kernel_shape[0] + 2 * padding_shape[0]) /
|
(in_shape[0] - kernel_shape[0] + 2 * padding_shape[0]) /
|
||||||
stride_shape[0] +
|
stride_shape[0] +
|
||||||
@@ -48,17 +59,17 @@ Conv2d::Conv2d(
|
|||||||
|
|
||||||
output = CUDANet::Tensor(
|
output = CUDANet::Tensor(
|
||||||
Shape{out_shape[0], out_shape[1], out_shape[2]},
|
Shape{out_shape[0], out_shape[1], out_shape[2]},
|
||||||
CUDANet::DType::FLOAT32, backend
|
dtype, backend
|
||||||
);
|
);
|
||||||
|
|
||||||
weights = CUDANet::Tensor(
|
weights = CUDANet::Tensor(
|
||||||
Shape{
|
Shape{
|
||||||
kernel_shape[0], kernel_shape[1], kernel_shape[2], in_shape[2]
|
kernel_shape[0], kernel_shape[1], kernel_shape[2], in_shape[2]
|
||||||
},
|
},
|
||||||
CUDANet::DType::FLOAT32, backend
|
dtype, backend
|
||||||
);
|
);
|
||||||
biases = CUDANet::Tensor(
|
biases = CUDANet::Tensor(
|
||||||
Shape{kernel_shape[2]}, CUDANet::DType::FLOAT32, backend
|
Shape{kernel_shape[2]}, dtype, backend
|
||||||
);
|
);
|
||||||
|
|
||||||
weights.zero();
|
weights.zero();
|
||||||
@@ -85,15 +96,15 @@ CUDANet::Shape Conv2d::output_shape() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
size_t Conv2d::input_size() {
|
size_t Conv2d::input_size() {
|
||||||
return sizeof(float) * in_shape[0] * in_shape[1] * in_shape[2];
|
return dtype_size(dtype) * in_shape[0] * in_shape[1] * in_shape[2];
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t Conv2d::output_size() {
|
size_t Conv2d::output_size() {
|
||||||
return sizeof(float) * out_shape[0] * out_shape[1] * out_shape[2];
|
return dtype_size(dtype) * out_shape[0] * out_shape[1] * out_shape[2];
|
||||||
}
|
}
|
||||||
|
|
||||||
void Conv2d::set_weights(void* input) {
|
void Conv2d::set_weights(void* input) {
|
||||||
weights.set_data<float>(static_cast<float*>(input));
|
weights.set_data(input);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t Conv2d::get_weights_size() {
|
size_t Conv2d::get_weights_size() {
|
||||||
@@ -101,7 +112,7 @@ size_t Conv2d::get_weights_size() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void Conv2d::set_biases(void* input) {
|
void Conv2d::set_biases(void* input) {
|
||||||
biases.set_data<float>(static_cast<float*>(input));
|
biases.set_data(input);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t Conv2d::get_biases_size() {
|
size_t Conv2d::get_biases_size() {
|
||||||
|
|||||||
@@ -1,11 +1,14 @@
|
|||||||
#include "dense.hpp"
|
|
||||||
|
|
||||||
#include <format>
|
#include <format>
|
||||||
#include <stdexcept>
|
#include <stdexcept>
|
||||||
|
|
||||||
|
#include "layers/dense.hpp"
|
||||||
|
|
||||||
using namespace CUDANet::Layers;
|
using namespace CUDANet::Layers;
|
||||||
|
|
||||||
Dense::Dense(CUDANet::Shape in_shape, CUDANet::Shape out_shape, CUDANet::Backend* backend)
|
Dense::Dense(CUDANet::Shape in_shape, CUDANet::Shape out_shape, CUDANet::Backend* backend)
|
||||||
|
: Dense(in_shape, out_shape, backend->get_default_dtype(), backend) {}
|
||||||
|
|
||||||
|
Dense::Dense(CUDANet::Shape in_shape, CUDANet::Shape out_shape, CUDANet::DType dtype, CUDANet::Backend* backend)
|
||||||
: backend(backend),
|
: backend(backend),
|
||||||
in_shape(in_shape),
|
in_shape(in_shape),
|
||||||
out_shape(out_shape) {
|
out_shape(out_shape) {
|
||||||
@@ -18,9 +21,11 @@ Dense::Dense(CUDANet::Shape in_shape, CUDANet::Shape out_shape, CUDANet::Backend
|
|||||||
throw InvalidShapeException("output", 1, out_shape.size());
|
throw InvalidShapeException("output", 1, out_shape.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
weights = CUDANet::Tensor(Shape{out_shape[0], in_shape[0]}, CUDANet::DType::FLOAT32, backend);
|
this->dtype = dtype;
|
||||||
biases = CUDANet::Tensor(Shape{out_shape[0]}, CUDANet::DType::FLOAT32, backend);
|
|
||||||
output = CUDANet::Tensor(Shape{out_shape[0]}, CUDANet::DType::FLOAT32, backend);
|
weights = CUDANet::Tensor(Shape{out_shape[0], in_shape[0]}, dtype, backend);
|
||||||
|
biases = CUDANet::Tensor(Shape{out_shape[0]}, dtype, backend);
|
||||||
|
output = CUDANet::Tensor(Shape{out_shape[0]}, dtype, backend);
|
||||||
|
|
||||||
weights.zero();
|
weights.zero();
|
||||||
biases.zero();
|
biases.zero();
|
||||||
@@ -51,8 +56,9 @@ size_t Dense::output_size() {
|
|||||||
return out_shape[0];
|
return out_shape[0];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// TODO: Use dtype
|
||||||
void Dense::set_weights(void* input) {
|
void Dense::set_weights(void* input) {
|
||||||
weights.set_data<float>(static_cast<float*>(input));
|
weights.set_data(input);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t Dense::get_weights_size() {
|
size_t Dense::get_weights_size() {
|
||||||
@@ -60,7 +66,7 @@ size_t Dense::get_weights_size() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void Dense::set_biases(void* input) {
|
void Dense::set_biases(void* input) {
|
||||||
biases.set_data<float>(static_cast<float*>(input));
|
biases.set_data(input);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t Dense::get_biases_size() {
|
size_t Dense::get_biases_size() {
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
#include "max_pool.hpp"
|
|
||||||
|
|
||||||
#include <stdexcept>
|
#include <stdexcept>
|
||||||
|
|
||||||
|
#include "layers/max_pool.hpp"
|
||||||
|
|
||||||
using namespace CUDANet::Layers;
|
using namespace CUDANet::Layers;
|
||||||
|
|
||||||
MaxPool2d::MaxPool2d(
|
MaxPool2d::MaxPool2d(
|
||||||
@@ -10,6 +10,16 @@ MaxPool2d::MaxPool2d(
|
|||||||
CUDANet::Shape stride_shape,
|
CUDANet::Shape stride_shape,
|
||||||
CUDANet::Shape padding_shape,
|
CUDANet::Shape padding_shape,
|
||||||
CUDANet::Backend* backend
|
CUDANet::Backend* backend
|
||||||
|
)
|
||||||
|
: MaxPool2d(input_shape, pool_shape, stride_shape, padding_shape, backend->get_default_dtype(), backend) {}
|
||||||
|
|
||||||
|
MaxPool2d::MaxPool2d(
|
||||||
|
CUDANet::Shape input_shape,
|
||||||
|
CUDANet::Shape pool_shape,
|
||||||
|
CUDANet::Shape stride_shape,
|
||||||
|
CUDANet::Shape padding_shape,
|
||||||
|
CUDANet::DType dtype,
|
||||||
|
CUDANet::Backend* backend
|
||||||
)
|
)
|
||||||
: in_shape(input_shape),
|
: in_shape(input_shape),
|
||||||
pool_shape(pool_shape),
|
pool_shape(pool_shape),
|
||||||
@@ -32,6 +42,8 @@ MaxPool2d::MaxPool2d(
|
|||||||
throw InvalidShapeException("padding", 2, padding_shape.size());
|
throw InvalidShapeException("padding", 2, padding_shape.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
this->dtype = dtype;
|
||||||
|
|
||||||
out_shape = {
|
out_shape = {
|
||||||
(in_shape[0] + 2 * padding_shape[0] - pool_shape[0]) / stride_shape[0] +
|
(in_shape[0] + 2 * padding_shape[0] - pool_shape[0]) / stride_shape[0] +
|
||||||
1,
|
1,
|
||||||
@@ -42,7 +54,7 @@ MaxPool2d::MaxPool2d(
|
|||||||
|
|
||||||
output = CUDANet::Tensor(
|
output = CUDANet::Tensor(
|
||||||
Shape{out_shape[0] * out_shape[1] * out_shape[2]},
|
Shape{out_shape[0] * out_shape[1] * out_shape[2]},
|
||||||
CUDANet::DType::FLOAT32, backend
|
dtype, backend
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -66,11 +78,11 @@ CUDANet::Shape MaxPool2d::output_shape() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
size_t MaxPool2d::input_size() {
|
size_t MaxPool2d::input_size() {
|
||||||
return sizeof(float) * in_shape[0] * in_shape[1] * in_shape[2];
|
return dtype_size(dtype) * in_shape[0] * in_shape[1] * in_shape[2];
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t MaxPool2d::output_size() {
|
size_t MaxPool2d::output_size() {
|
||||||
return sizeof(float) * out_shape[0] * out_shape[1] * out_shape[2];
|
return dtype_size(dtype) * out_shape[0] * out_shape[1] * out_shape[2];
|
||||||
}
|
}
|
||||||
|
|
||||||
void MaxPool2d::set_weights(void* input) {}
|
void MaxPool2d::set_weights(void* input) {}
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
#include "model.hpp"
|
|
||||||
|
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <iomanip>
|
#include <iomanip>
|
||||||
@@ -8,7 +6,9 @@
|
|||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "layer.hpp"
|
#include "layer.hpp"
|
||||||
#include "batch_norm.hpp"
|
#include "layers/batch_norm.hpp"
|
||||||
|
|
||||||
|
#include "model.hpp"
|
||||||
|
|
||||||
using namespace CUDANet;
|
using namespace CUDANet;
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
#include "module.hpp"
|
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
|
||||||
|
#include "module.hpp"
|
||||||
|
|
||||||
using namespace CUDANet;
|
using namespace CUDANet;
|
||||||
|
|
||||||
CUDANet::Shape Module::input_shape() {
|
CUDANet::Shape Module::input_shape() {
|
||||||
@@ -12,22 +12,6 @@ CUDANet::Shape Module::output_shape() {
|
|||||||
return out_shape;
|
return out_shape;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t Module::input_size() {
|
|
||||||
size_t count = 1;
|
|
||||||
for (const auto& dim : in_shape) {
|
|
||||||
count *= dim;
|
|
||||||
}
|
|
||||||
return sizeof(float) * count;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t Module::output_size() {
|
|
||||||
size_t count = 1;
|
|
||||||
for (const auto& dim : out_shape) {
|
|
||||||
count *= dim;
|
|
||||||
}
|
|
||||||
return sizeof(float) * count;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Module::register_layer(const std::string& name, Layer* layer) {
|
void Module::register_layer(const std::string& name, Layer* layer) {
|
||||||
layers.push_back({name, layer});
|
layers.push_back({name, layer});
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,17 +4,37 @@
|
|||||||
|
|
||||||
using namespace CUDANet;
|
using namespace CUDANet;
|
||||||
|
|
||||||
|
size_t dtype_size(DType dtype) {
|
||||||
|
switch (dtype)
|
||||||
|
{
|
||||||
|
case DType::FLOAT32:
|
||||||
|
return 4;
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
throw std::runtime_error("Unknown DType");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Tensor::Tensor(Shape shape, CUDANet::Backend* backend)
|
||||||
|
: Tensor(shape, backend->get_default_dtype(), backend) {}
|
||||||
|
|
||||||
Tensor::Tensor(Shape shape, DType dtype, Backend* backend)
|
Tensor::Tensor(Shape shape, DType dtype, Backend* backend)
|
||||||
: shape(shape), dtype(dtype), backend(backend), d_ptr(nullptr) {
|
: shape(shape), dtype(dtype), backend(backend), d_ptr(nullptr) {
|
||||||
|
|
||||||
if (shape.empty()) {
|
if (shape.empty()) {
|
||||||
throw std::runtime_error("Tensor shape cannot be empty");
|
throw std::runtime_error("Tensor shape cannot be empty");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check if backend supports DType
|
||||||
|
if (!backend->supports_dtype(dtype)) {
|
||||||
|
throw std::runtime_error("Unsupported DType");
|
||||||
|
}
|
||||||
|
|
||||||
// Count total elements
|
// Count total elements
|
||||||
size_t count = 1;
|
size_t count = 1;
|
||||||
for (const auto& dim : shape) {
|
for (size_t i = 0; i < shape.size(); ++i) {
|
||||||
count *= dim;
|
count *= shape[i];
|
||||||
}
|
}
|
||||||
total_elms = count;
|
total_elms = count;
|
||||||
|
|
||||||
@@ -39,8 +59,7 @@ Tensor::Tensor(Tensor&& other) noexcept
|
|||||||
total_elms(other.total_elms),
|
total_elms(other.total_elms),
|
||||||
total_size(other.total_size),
|
total_size(other.total_size),
|
||||||
backend(other.backend),
|
backend(other.backend),
|
||||||
d_ptr(other.d_ptr)
|
d_ptr(other.d_ptr) {
|
||||||
{
|
|
||||||
other.d_ptr = nullptr;
|
other.d_ptr = nullptr;
|
||||||
other.backend = nullptr;
|
other.backend = nullptr;
|
||||||
}
|
}
|
||||||
@@ -74,6 +93,10 @@ Tensor::~Tensor() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
DType Tensor::get_dtype() const {
|
||||||
|
return dtype;
|
||||||
|
}
|
||||||
|
|
||||||
size_t Tensor::numel() const {
|
size_t Tensor::numel() const {
|
||||||
return total_elms;
|
return total_elms;
|
||||||
}
|
}
|
||||||
@@ -82,6 +105,22 @@ size_t Tensor::size() const {
|
|||||||
return total_size;
|
return total_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void* Tensor::device_ptr() const {
|
||||||
|
return d_ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* Tensor::device_ptr() {
|
||||||
|
return d_ptr;
|
||||||
|
}
|
||||||
|
|
||||||
void Tensor::zero() {
|
void Tensor::zero() {
|
||||||
backend->zero(*this);
|
backend->zero(*this);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Tensor::fill(int value) {
|
||||||
|
backend->fill(*this, value);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Tensor::set_data(void *data) {
|
||||||
|
backend->copy_to_device(*this, data, total_size);
|
||||||
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user