mirror of
https://github.com/lordmathis/CUDANet.git
synced 2025-12-22 22:34:22 +00:00
Refactor size calculations in layers and backend
This commit is contained in:
@@ -28,6 +28,7 @@ class Backend {
|
|||||||
std::optional<DType> default_dtype;
|
std::optional<DType> default_dtype;
|
||||||
public:
|
public:
|
||||||
|
|
||||||
|
// Dtypes
|
||||||
virtual bool supports_dtype(DType dtype) const = 0;
|
virtual bool supports_dtype(DType dtype) const = 0;
|
||||||
virtual void set_default_dtype(DType dtype) = 0;
|
virtual void set_default_dtype(DType dtype) = 0;
|
||||||
virtual DType get_default_dtype() const = 0;
|
virtual DType get_default_dtype() const = 0;
|
||||||
|
|||||||
@@ -15,10 +15,6 @@ class Module {
|
|||||||
|
|
||||||
CUDANet::Shape output_shape();
|
CUDANet::Shape output_shape();
|
||||||
|
|
||||||
size_t input_size();
|
|
||||||
|
|
||||||
size_t output_size();
|
|
||||||
|
|
||||||
void register_layer(const std::string& name, Layer* layer);
|
void register_layer(const std::string& name, Layer* layer);
|
||||||
|
|
||||||
void register_module(Module& module);
|
void register_module(Module& module);
|
||||||
|
|||||||
@@ -16,6 +16,19 @@ enum class DType
|
|||||||
// INT32, // Not implemented yet
|
// INT32, // Not implemented yet
|
||||||
};
|
};
|
||||||
|
|
||||||
|
size_t dtype_size(DType dtype) {
|
||||||
|
switch (dtype)
|
||||||
|
{
|
||||||
|
case DType::FLOAT32:
|
||||||
|
return 4;
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
throw std::runtime_error("Unknown DType");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
class Tensor
|
class Tensor
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ std::unique_ptr<Backend> BackendFactory::create(BackendType backend_type, const
|
|||||||
#ifdef USE_CUDA
|
#ifdef USE_CUDA
|
||||||
|
|
||||||
if (!CUDANet::Backends::CUDA::is_cuda_available()) {
|
if (!CUDANet::Backends::CUDA::is_cuda_available()) {
|
||||||
throw std::runtime_error("No CUDA devices found")
|
throw std::runtime_error("No CUDA devices found");
|
||||||
}
|
}
|
||||||
|
|
||||||
auto cuda = std::make_unique<CUDANet::Backends::CUDA>(config);
|
auto cuda = std::make_unique<CUDANet::Backends::CUDA>(config);
|
||||||
@@ -31,6 +31,7 @@ std::unique_ptr<Backend> BackendFactory::create(BackendType backend_type, const
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
throw std::runtime_error("Invalid backend");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -65,7 +65,6 @@ CUDANet::DType CUDA::get_default_dtype() const {
|
|||||||
return DType::FLOAT32;
|
return DType::FLOAT32;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void* CUDA::allocate(size_t bytes) {
|
void* CUDA::allocate(size_t bytes) {
|
||||||
void* d_ptr = nullptr;
|
void* d_ptr = nullptr;
|
||||||
CUDA_CHECK(cudaMalloc(&d_ptr, bytes));
|
CUDA_CHECK(cudaMalloc(&d_ptr, bytes));
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ __global__ void Kernels::convolution(
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
T sum = static_cast<t>(0);
|
T sum = static_cast<T>(0);
|
||||||
|
|
||||||
// Iterate over kernel and input matrix
|
// Iterate over kernel and input matrix
|
||||||
for (int c = 0; c < input_shape[2]; c++) {
|
for (int c = 0; c < input_shape[2]; c++) {
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ template <typename T>
|
|||||||
void CUDA::relu_impl(Tensor& tensor) {
|
void CUDA::relu_impl(Tensor& tensor) {
|
||||||
int gridSize = (tensor.numel() + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
int gridSize = (tensor.numel() + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
||||||
Kernels::relu<<<gridSize, BLOCK_SIZE>>>(
|
Kernels::relu<<<gridSize, BLOCK_SIZE>>>(
|
||||||
tensor.data<T>(), tensor.data<T>(), tensor.numel()
|
static_cast<T*>(tensor.device_ptr()), static_cast<T*>(tensor.device_ptr()), tensor.numel()
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
CUDA_CHECK(cudaDeviceSynchronize());
|
CUDA_CHECK(cudaDeviceSynchronize());
|
||||||
@@ -48,7 +48,7 @@ template <typename T>
|
|||||||
void CUDA::sigmoid_impl(CUDANet::Tensor& tensor) {
|
void CUDA::sigmoid_impl(CUDANet::Tensor& tensor) {
|
||||||
int gridSize = (tensor.numel() + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
int gridSize = (tensor.numel() + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
||||||
Kernels::sigmoid<<<gridSize, BLOCK_SIZE>>>(
|
Kernels::sigmoid<<<gridSize, BLOCK_SIZE>>>(
|
||||||
tensor.data<T>(), tensor.data<T>(), tensor.numel()
|
static_cast<T*>(tensor.device_ptr()), static_cast<T*>(tensor.device_ptr()), tensor.numel()
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
CUDA_CHECK(cudaDeviceSynchronize());
|
CUDA_CHECK(cudaDeviceSynchronize());
|
||||||
@@ -78,13 +78,13 @@ void CUDA::softmax_impl(Tensor& tensor, Tensor& temp_max, Tensor& temp_sum) {
|
|||||||
|
|
||||||
// Subtract max value to improve numerical stability
|
// Subtract max value to improve numerical stability
|
||||||
Kernels::vec_scalar_sub<<<gridSize, BLOCK_SIZE>>>(
|
Kernels::vec_scalar_sub<<<gridSize, BLOCK_SIZE>>>(
|
||||||
tensor.data<T>(), tensor.data<T>(), temp_max.data<T>(), tensor.numel()
|
static_cast<T*>(tensor.device_ptr()), static_cast<T*>(tensor.device_ptr()), static_cast<T*>(temp_max.device_ptr()), tensor.numel()
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
|
|
||||||
// Compute exponentials
|
// Compute exponentials
|
||||||
Kernels::vec_exp<<<gridSize, BLOCK_SIZE>>>(
|
Kernels::vec_exp<<<gridSize, BLOCK_SIZE>>>(
|
||||||
tensor.data<T>(), tensor.data<T>(), tensor.numel()
|
static_cast<T*>(tensor.device_ptr()), static_cast<T*>(tensor.device_ptr()), tensor.numel()
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
|
|
||||||
@@ -92,7 +92,7 @@ void CUDA::softmax_impl(Tensor& tensor, Tensor& temp_max, Tensor& temp_sum) {
|
|||||||
sum(tensor, temp_sum);
|
sum(tensor, temp_sum);
|
||||||
|
|
||||||
Kernels::vec_scalar_div<<<gridSize, BLOCK_SIZE>>>(
|
Kernels::vec_scalar_div<<<gridSize, BLOCK_SIZE>>>(
|
||||||
tensor.data<T>(), tensor.data<T>(), temp_sum.data<T>(), tensor.numel()
|
static_cast<T*>(tensor.device_ptr()), static_cast<T*>(tensor.device_ptr()), static_cast<T*>(temp_sum.device_ptr()), tensor.numel()
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
CUDA_CHECK(cudaDeviceSynchronize());
|
CUDA_CHECK(cudaDeviceSynchronize());
|
||||||
@@ -142,13 +142,13 @@ CUDANet::Tensor& CUDA::dense_impl(
|
|||||||
auto biasGridSize = (output_size + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
auto biasGridSize = (output_size + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
||||||
|
|
||||||
Kernels::mat_vec_mul<<<forwardGridSize, BLOCK_SIZE>>>(
|
Kernels::mat_vec_mul<<<forwardGridSize, BLOCK_SIZE>>>(
|
||||||
weights.data<T>(), input.data<T>(), output.data<T>(), input_size,
|
static_cast<T*>(weights.device_ptr()), static_cast<T*>(input.device_ptr()), static_cast<T*>(output.device_ptr()), input_size,
|
||||||
output_size
|
output_size
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
|
|
||||||
Kernels::vec_vec_add<<<biasGridSize, BLOCK_SIZE>>>(
|
Kernels::vec_vec_add<<<biasGridSize, BLOCK_SIZE>>>(
|
||||||
biases.data<T>(), output.data<T>(), output.data<T>(), output_size
|
static_cast<T*>(biases.device_ptr()), static_cast<T*>(output.device_ptr()), static_cast<T*>(output.device_ptr()), output_size
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
CUDA_CHECK(cudaDeviceSynchronize());
|
CUDA_CHECK(cudaDeviceSynchronize());
|
||||||
@@ -213,7 +213,7 @@ CUDANet::Tensor& CUDA::conv2d_impl(
|
|||||||
);
|
);
|
||||||
|
|
||||||
Kernels::convolution<<<grid, block>>>(
|
Kernels::convolution<<<grid, block>>>(
|
||||||
input.data<T>(), weights.data<T>(), biases.data<T>(), output.data<T>(),
|
static_cast<T*>(input.device_ptr())(), static_cast<T*>(weights.device_ptr())(), static_cast<T*>(biases.device_ptr())(), static_cast<T*>(output.device_ptr())(),
|
||||||
in_shape, padding_shape, kernel_shape, stride_shape, out_shape
|
in_shape, padding_shape, kernel_shape, stride_shape, out_shape
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
@@ -273,7 +273,7 @@ CUDANet::Tensor& CUDA::max_pool2d_impl(
|
|||||||
);
|
);
|
||||||
|
|
||||||
Kernels::max_pool<<<grid, block>>>(
|
Kernels::max_pool<<<grid, block>>>(
|
||||||
input.data<T>(), output.data<T>(), input_shape, output_shape,
|
static_cast<T*>(input.device_ptr())(), static_cast<T*>(output.device_ptr())(), input_shape, output_shape,
|
||||||
pool_shape, stride_shape, padding_shape
|
pool_shape, stride_shape, padding_shape
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
@@ -333,7 +333,7 @@ CUDANet::Tensor& CUDA::avg_pool2d_impl(
|
|||||||
);
|
);
|
||||||
|
|
||||||
Kernels::avg_pool<<<grid, block>>>(
|
Kernels::avg_pool<<<grid, block>>>(
|
||||||
input.data<T>(), output.data<T>(), input_shape, output_shape,
|
static_cast<T*>(input.device_ptr())(), static_cast<T*>(output.device_ptr())(), input_shape, output_shape,
|
||||||
pool_shape, stride_shape, padding_shape
|
pool_shape, stride_shape, padding_shape
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
@@ -394,34 +394,34 @@ CUDANet::Tensor& CUDA::batch_norm_impl(
|
|||||||
for (int i = 0; i < input_shape[2]; i++) {
|
for (int i = 0; i < input_shape[2]; i++) {
|
||||||
// Subtract mean from input
|
// Subtract mean from input
|
||||||
Kernels::vec_scalar_sub<<<gridSize, BLOCK_SIZE>>>(
|
Kernels::vec_scalar_sub<<<gridSize, BLOCK_SIZE>>>(
|
||||||
input.data<T>() + i * input_shape[0] * input_shape[1],
|
static_cast<T*>(input.device_ptr())() + i * input_shape[0] * input_shape[1],
|
||||||
output.data<T>() + i * input_shape[0] * input_shape[1],
|
static_cast<T*>(output.device_ptr())() + i * input_shape[0] * input_shape[1],
|
||||||
&running_mean.data<T>()[i], input_shape[0] * input_shape[1]
|
&static_cast<T*>(running_mean.device_ptr())()[i], input_shape[0] * input_shape[1]
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
|
|
||||||
// Divide by sqrt(running_var + epsilon)
|
// Divide by sqrt(running_var + epsilon)
|
||||||
Kernels::vec_scale<<<gridSize, BLOCK_SIZE>>>(
|
Kernels::vec_scale<<<gridSize, BLOCK_SIZE>>>(
|
||||||
output.data<T>() + i * input_shape[0] * input_shape[1],
|
static_cast<T*>(output.device_ptr())() + i * input_shape[0] * input_shape[1],
|
||||||
output.data<T>() + i * input_shape[0] * input_shape[1],
|
static_cast<T*>(output.device_ptr())() + i * input_shape[0] * input_shape[1],
|
||||||
&running_var.data<T>()[i], epsilon.data<T>(),
|
&static_cast<T*>(running_var.device_ptr())()[i], static_cast<T*>(epsilon.device_ptr())(),
|
||||||
input_shape[0] * input_shape[1]
|
input_shape[0] * input_shape[1]
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
|
|
||||||
// Multiply by weights
|
// Multiply by weights
|
||||||
Kernels::vec_scalar_mul<<<gridSize, BLOCK_SIZE>>>(
|
Kernels::vec_scalar_mul<<<gridSize, BLOCK_SIZE>>>(
|
||||||
output.data<T>() + i * input_shape[0] * input_shape[1],
|
static_cast<T*>(output.device_ptr())() + i * input_shape[0] * input_shape[1],
|
||||||
output.data<T>() + i * input_shape[0] * input_shape[1],
|
static_cast<T*>(output.device_ptr())() + i * input_shape[0] * input_shape[1],
|
||||||
&weights.data<T>()[i], input_shape[0] * input_shape[1]
|
&static_cast<T*>(weights.device_ptr())()[i], input_shape[0] * input_shape[1]
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
|
|
||||||
// Add biases
|
// Add biases
|
||||||
Kernels::vec_scalar_add<<<gridSize, BLOCK_SIZE>>>(
|
Kernels::vec_scalar_add<<<gridSize, BLOCK_SIZE>>>(
|
||||||
output.data<T>() + i * input_shape[0] * input_shape[1],
|
static_cast<T*>(output.device_ptr())() + i * input_shape[0] * input_shape[1],
|
||||||
output.data<T>() + i * input_shape[0] * input_shape[1],
|
static_cast<T*>(output.device_ptr())() + i * input_shape[0] * input_shape[1],
|
||||||
&biases.data<T>()[i], input_shape[0] * input_shape[1]
|
&static_cast<T*>(biases.device_ptr())()[i], input_shape[0] * input_shape[1]
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
}
|
}
|
||||||
@@ -460,12 +460,12 @@ CUDANet::Tensor& CUDA::concat_impl(
|
|||||||
CUDANet::Tensor& output
|
CUDANet::Tensor& output
|
||||||
) {
|
) {
|
||||||
CUDA_CHECK(cudaMemcpy(
|
CUDA_CHECK(cudaMemcpy(
|
||||||
output.data<T>(), input_a.data<T>(), input_a.size(),
|
static_cast<T*>(output.device_ptr())(), static_cast<T*>(input_a.device_ptr())(), input_a.size(),
|
||||||
cudaMemcpyDeviceToDevice
|
cudaMemcpyDeviceToDevice
|
||||||
));
|
));
|
||||||
|
|
||||||
CUDA_CHECK(cudaMemcpy(
|
CUDA_CHECK(cudaMemcpy(
|
||||||
output.data<T>() + input_a.numel(), input_b.data<T>(), input_b.size(),
|
static_cast<T*>(output.device_ptr())() + input_a.numel(), static_cast<T*>(input_b.device_ptr())(), input_b.size(),
|
||||||
cudaMemcpyDeviceToDevice
|
cudaMemcpyDeviceToDevice
|
||||||
));
|
));
|
||||||
|
|
||||||
@@ -508,7 +508,7 @@ CUDANet::Tensor& CUDA::add_impl(
|
|||||||
auto gridSize = (input_a.numel() + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
auto gridSize = (input_a.numel() + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
||||||
|
|
||||||
Kernels::vec_vec_add<<<gridSize, BLOCK_SIZE>>>(
|
Kernels::vec_vec_add<<<gridSize, BLOCK_SIZE>>>(
|
||||||
input_a.data<T>(), input_b.data<T>(), output.data<T>(), input_a.numel()
|
static_cast<T*>(input_a.device_ptr())(), static_cast<T*>(input_b.device_ptr())(), static_cast<T*>(output.device_ptr())(), input_a.numel()
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
CUDA_CHECK(cudaDeviceSynchronize());
|
CUDA_CHECK(cudaDeviceSynchronize());
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ void CUDA::print_impl(const CUDANet::Tensor &input) {
|
|||||||
std::vector<T> h_vec(input.numel());
|
std::vector<T> h_vec(input.numel());
|
||||||
|
|
||||||
CUDA_CHECK(cudaMemcpy(
|
CUDA_CHECK(cudaMemcpy(
|
||||||
h_vec.data(), input.data<T>(), sizeof(T) * length, cudaMemcpyDeviceToHost
|
h_vec.data(), static_cast<T*>(input.device_ptr())(), sizeof(T) * length, cudaMemcpyDeviceToHost
|
||||||
));
|
));
|
||||||
|
|
||||||
for (int i = 0; i < length; ++i) {
|
for (int i = 0; i < length; ++i) {
|
||||||
@@ -56,7 +56,7 @@ template void CUDA::fill_impl<float>(CUDANet::Tensor &input, int value);
|
|||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void CUDA::fill_impl(CUDANet::Tensor &input, int value) {
|
void CUDA::fill_impl(CUDANet::Tensor &input, int value) {
|
||||||
CUDA_CHECK(cudaMemset(input.data<T>(), value, sizeof(T) * input.numel()));
|
CUDA_CHECK(cudaMemset(static_cast<T*>(input.device_ptr())(), value, sizeof(T) * input.numel()));
|
||||||
}
|
}
|
||||||
|
|
||||||
void CUDA::copy_to_device(CUDANet::Tensor &tensor, void *data, size_t size) {
|
void CUDA::copy_to_device(CUDANet::Tensor &tensor, void *data, size_t size) {
|
||||||
@@ -75,7 +75,7 @@ template void CUDA::copy_to_device_impl<float>(CUDANet::Tensor &tensor, void *da
|
|||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void CUDA::copy_to_device_impl(CUDANet::Tensor &tensor, void *data, size_t size) {
|
void CUDA::copy_to_device_impl(CUDANet::Tensor &tensor, void *data, size_t size) {
|
||||||
CUDA_CHECK(cudaMemcpy(tensor.data<T>(), data, size, cudaMemcpyHostToDevice));
|
CUDA_CHECK(cudaMemcpy(static_cast<T*>(tensor.device_ptr())(), data, size, cudaMemcpyHostToDevice));
|
||||||
}
|
}
|
||||||
|
|
||||||
void CUDA::sum(const CUDANet::Tensor &input, CUDANet::Tensor &sum) {
|
void CUDA::sum(const CUDANet::Tensor &input, CUDANet::Tensor &sum) {
|
||||||
@@ -95,17 +95,17 @@ template void CUDA::sum_impl<float>(const CUDANet::Tensor &input, CUDANet::Tenso
|
|||||||
template <typename T>
|
template <typename T>
|
||||||
void CUDA::sum_impl(const CUDANet::Tensor &input, CUDANet::Tensor &sum) {
|
void CUDA::sum_impl(const CUDANet::Tensor &input, CUDANet::Tensor &sum) {
|
||||||
auto length = input.numel();
|
auto length = input.numel();
|
||||||
const int gridSize = ( + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
const int gridSize = (length + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
||||||
|
|
||||||
CUDANet::Kernels::sum_reduce<<<gridSize, BLOCK_SIZE>>>(
|
CUDANet::Kernels::sum_reduce<<<gridSize, BLOCK_SIZE>>>(
|
||||||
input.data<T>(), sum.data<T>(), length
|
static_cast<T*>(input.device_ptr())(), static_cast<T*>(sum.device_ptr())(), length
|
||||||
);
|
);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
|
|
||||||
int remaining = gridSize;
|
int remaining = gridSize;
|
||||||
while (remaining > 1) {
|
while (remaining > 1) {
|
||||||
int blocks_needed = (remaining + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
int blocks_needed = (remaining + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
||||||
CUDANet::Kernels::sum_reduce<<<blocks_needed, BLOCK_SIZE>>>(sum.data<T>(), sum.data<T>(), remaining);
|
CUDANet::Kernels::sum_reduce<<<blocks_needed, BLOCK_SIZE>>>(static_cast<T*>(sum.device_ptr())(), static_cast<T*>(sum.device_ptr())(), remaining);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
|
|
||||||
remaining = blocks_needed;
|
remaining = blocks_needed;
|
||||||
@@ -131,14 +131,14 @@ void CUDA::max_impl(const CUDANet::Tensor &input, CUDANet::Tensor &max) {
|
|||||||
auto length = input.numel();
|
auto length = input.numel();
|
||||||
const int grid_size = (length + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
const int grid_size = (length + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
||||||
|
|
||||||
Kernels::max_reduce<<<grid_size, BLOCK_SIZE>>>(input.data<T>(), max.data<T>(), length);
|
Kernels::max_reduce<<<grid_size, BLOCK_SIZE>>>(static_cast<T*>(input.device_ptr())(), static_cast<T*>(max.device_ptr())(), length);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
|
|
||||||
int remaining = grid_size;
|
int remaining = grid_size;
|
||||||
|
|
||||||
while (remaining > 1) {
|
while (remaining > 1) {
|
||||||
int blocks_needed = (remaining + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
int blocks_needed = (remaining + BLOCK_SIZE - 1) / BLOCK_SIZE;
|
||||||
CUDANet::Kernels::max_reduce<<<blocks_needed, BLOCK_SIZE>>>(max.data<T>(), max.data<T>(), remaining);
|
CUDANet::Kernels::max_reduce<<<blocks_needed, BLOCK_SIZE>>>(static_cast<T*>(max.device_ptr())(), static_cast<T*>(max.device_ptr())(), remaining);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
CUDA_CHECK(cudaGetLastError());
|
||||||
|
|
||||||
remaining = blocks_needed;
|
remaining = blocks_needed;
|
||||||
|
|||||||
@@ -84,11 +84,11 @@ CUDANet::Shape AvgPool2d::output_shape() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
size_t AvgPool2d::input_size() {
|
size_t AvgPool2d::input_size() {
|
||||||
return sizeof(float) * in_shape[0] * in_shape[1] * in_shape[2];
|
return dtype_size(dtype) * in_shape[0] * in_shape[1] * in_shape[2];
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t AvgPool2d::output_size() {
|
size_t AvgPool2d::output_size() {
|
||||||
return sizeof(float) * out_shape[0] * out_shape[1] * out_shape[2];
|
return dtype_size(dtype) * out_shape[0] * out_shape[1] * out_shape[2];
|
||||||
}
|
}
|
||||||
|
|
||||||
void AvgPool2d::set_weights(void* input) {}
|
void AvgPool2d::set_weights(void* input) {}
|
||||||
|
|||||||
@@ -73,11 +73,11 @@ CUDANet::Shape BatchNorm2d::output_shape() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
size_t BatchNorm2d::input_size() {
|
size_t BatchNorm2d::input_size() {
|
||||||
return sizeof(float) * in_shape[0] * in_shape[1] * in_shape[2];
|
return dtype_size(dtype) * in_shape[0] * in_shape[1] * in_shape[2];
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t BatchNorm2d::output_size() {
|
size_t BatchNorm2d::output_size() {
|
||||||
return sizeof(float) * in_shape[0] * in_shape[1] * in_shape[2];
|
return dtype_size(dtype) * in_shape[0] * in_shape[1] * in_shape[2];
|
||||||
}
|
}
|
||||||
|
|
||||||
void BatchNorm2d::set_weights(void* input) {
|
void BatchNorm2d::set_weights(void* input) {
|
||||||
|
|||||||
@@ -97,11 +97,11 @@ CUDANet::Shape Conv2d::output_shape() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
size_t Conv2d::input_size() {
|
size_t Conv2d::input_size() {
|
||||||
return sizeof(float) * in_shape[0] * in_shape[1] * in_shape[2];
|
return dtype_size(dtype) * in_shape[0] * in_shape[1] * in_shape[2];
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t Conv2d::output_size() {
|
size_t Conv2d::output_size() {
|
||||||
return sizeof(float) * out_shape[0] * out_shape[1] * out_shape[2];
|
return dtype_size(dtype) * out_shape[0] * out_shape[1] * out_shape[2];
|
||||||
}
|
}
|
||||||
|
|
||||||
void Conv2d::set_weights(void* input) {
|
void Conv2d::set_weights(void* input) {
|
||||||
|
|||||||
@@ -78,11 +78,11 @@ CUDANet::Shape MaxPool2d::output_shape() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
size_t MaxPool2d::input_size() {
|
size_t MaxPool2d::input_size() {
|
||||||
return sizeof(float) * in_shape[0] * in_shape[1] * in_shape[2];
|
return dtype_size(dtype) * in_shape[0] * in_shape[1] * in_shape[2];
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t MaxPool2d::output_size() {
|
size_t MaxPool2d::output_size() {
|
||||||
return sizeof(float) * out_shape[0] * out_shape[1] * out_shape[2];
|
return dtype_size(dtype) * out_shape[0] * out_shape[1] * out_shape[2];
|
||||||
}
|
}
|
||||||
|
|
||||||
void MaxPool2d::set_weights(void* input) {}
|
void MaxPool2d::set_weights(void* input) {}
|
||||||
|
|||||||
@@ -12,22 +12,6 @@ CUDANet::Shape Module::output_shape() {
|
|||||||
return out_shape;
|
return out_shape;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t Module::input_size() {
|
|
||||||
size_t count = 1;
|
|
||||||
for (const auto& dim : in_shape) {
|
|
||||||
count *= dim;
|
|
||||||
}
|
|
||||||
return sizeof(float) * count;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t Module::output_size() {
|
|
||||||
size_t count = 1;
|
|
||||||
for (const auto& dim : out_shape) {
|
|
||||||
count *= dim;
|
|
||||||
}
|
|
||||||
return sizeof(float) * count;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Module::register_layer(const std::string& name, Layer* layer) {
|
void Module::register_layer(const std::string& name, Layer* layer) {
|
||||||
layers.push_back({name, layer});
|
layers.push_back({name, layer});
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user