Compare commits

..

2 Commits

Author SHA1 Message Date
5679dc0a50 Add avgPool2d implementation 2025-11-21 19:39:30 +01:00
c83e1f0c45 Implement InvalidShapeException 2025-11-21 18:54:45 +01:00
12 changed files with 179 additions and 158 deletions

View File

@@ -62,6 +62,16 @@ class Backend {
CUDANet::Shape padding_shape, CUDANet::Shape padding_shape,
CUDANet::Shape output_shape CUDANet::Shape output_shape
) = 0; ) = 0;
virtual CUDANet::Tensor& avgPool2d(
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
CUDANet::Shape input_shape,
CUDANet::Shape pool_shape,
CUDANet::Shape stride_shape,
CUDANet::Shape padding_shape,
CUDANet::Shape output_shape
) = 0;
}; };
} // namespace CUDANet } // namespace CUDANet

View File

@@ -49,7 +49,7 @@ class CUDA : public Backend {
const CUDANet::Shape out_shape const CUDANet::Shape out_shape
) override; ) override;
CUDANet::Tensor& CUDA::maxPool2d( CUDANet::Tensor& maxPool2d(
const CUDANet::Tensor& input, const CUDANet::Tensor& input,
CUDANet::Tensor& output, CUDANet::Tensor& output,
CUDANet::Shape input_shape, CUDANet::Shape input_shape,
@@ -58,6 +58,16 @@ class CUDA : public Backend {
CUDANet::Shape padding_shape, CUDANet::Shape padding_shape,
CUDANet::Shape output_shape CUDANet::Shape output_shape
) override; ) override;
CUDANet::Tensor& avgPool2d(
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
CUDANet::Shape input_shape,
CUDANet::Shape pool_shape,
CUDANet::Shape stride_shape,
CUDANet::Shape padding_shape,
CUDANet::Shape output_shape
) = 0;
}; };
} // namespace CUDANet::Backend } // namespace CUDANet::Backend

View File

@@ -8,7 +8,7 @@ class MaxPool2d : public Layer {
public: public:
MaxPool2d( MaxPool2d(
CUDANet::Shape input_shape, CUDANet::Shape input_shape,
CUDANet::Shape pooling_shape, CUDANet::Shape pool_shape,
CUDANet::Shape stride_shape, CUDANet::Shape stride_shape,
CUDANet::Shape padding_shape, CUDANet::Shape padding_shape,
CUDANet::Backend* backend CUDANet::Backend* backend
@@ -38,7 +38,7 @@ class MaxPool2d : public Layer {
private: private:
CUDANet::Shape in_shape; CUDANet::Shape in_shape;
CUDANet::Shape pooling_shape; CUDANet::Shape pool_shape;
CUDANet::Shape stride_shape; CUDANet::Shape stride_shape;
CUDANet::Shape padding_shape; CUDANet::Shape padding_shape;

View File

@@ -6,4 +6,21 @@ namespace CUDANet {
typedef std::vector<size_t> Shape; typedef std::vector<size_t> Shape;
} // namespace CUDANet class InvalidShapeException : public std::runtime_error {
public:
InvalidShapeException(
const std::string& param_name,
size_t expected,
size_t actual
)
: std::runtime_error(
std::format(
"Invalid {} shape. Expected {}, actual {}",
param_name,
expected,
actual
)
) {}
};
} // namespace CUDANet

View File

@@ -1,6 +1,6 @@
#include "cuda_helper.cuh" #include "cuda_helper.cuh"
#include "layer.hpp" #include "layer.hpp"
#include "pooling.cuh" #include "pool.cuh"
using namespace CUDANet; using namespace CUDANet;

View File

@@ -135,5 +135,31 @@ CUDANet::Tensor& CUDA::maxPool2d(
CUDA_CHECK(cudaGetLastError()); CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize()); CUDA_CHECK(cudaDeviceSynchronize());
return output;
}
CUDANet::Tensor& CUDA::avgPool2d(
const CUDANet::Tensor& input,
CUDANet::Tensor& output,
CUDANet::Shape input_shape,
CUDANet::Shape pool_shape,
CUDANet::Shape stride_shape,
CUDANet::Shape padding_shape,
CUDANet::Shape output_shape
) {
dim3 block(8, 8, 8);
dim3 grid(
(output_shape[0] + block.x - 1) / block.x,
(output_shape[1] + block.y - 1) / block.y,
(output_shape[2] + block.z - 1) / block.z
);
Kernels::avg_pool<<<grid, block>>>(
input.data<float>(), output.data<float>(), input_shape, output_shape, pool_shape,
stride_shape, padding_shape
);
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize());
return output; return output;
} }

View File

@@ -1,45 +0,0 @@
#include "avg_pooling.hpp"
#include "cuda_helper.cuh"
#include "pooling.cuh"
using namespace CUDANet::Layers;
void AvgPooling2d::initCUDA() {
d_output = nullptr;
CUDA_CHECK(cudaMalloc(
(void**)&d_output,
sizeof(float) * outputSize.first * outputSize.second * nChannels
));
}
void AvgPooling2d::delCUDA() {
cudaFree(d_output);
}
float* AvgPooling2d::forwardCUDA(const float* d_input) {
dim3 block(8, 8, 8);
dim3 grid(
(outputSize.first + block.x - 1) / block.x,
(outputSize.second + block.y - 1) / block.y,
(nChannels + block.z - 1) / block.z
);
Kernels::avg_pooling<<<grid, block>>>(
d_input, d_output, inputSize, outputSize, nChannels, poolingSize,
stride, padding
);
CUDA_CHECK(cudaGetLastError());
activation->activate(d_output);
CUDA_CHECK(cudaDeviceSynchronize());
return d_output;
}
void AdaptiveAvgPooling2d::initCUDA() {
cudaFree(d_output);
cudaMalloc(
(void**)&d_output,
sizeof(float) * outputSize.first * outputSize.second * nChannels
);
}

View File

@@ -18,35 +18,19 @@ AvgPool2d::AvgPool2d(
padding_shape(padding_shape), padding_shape(padding_shape),
backend(backend) { backend(backend) {
if (in_shape.size() != 3) { if (in_shape.size() != 3) {
throw std::runtime_error( throw InvalidShapeException("input", 3, in_shape.size());
std::format(
"Invalid input shape. Expected 3 dims, got {}", input_shape.size()
)
);
} }
if (pool_shape.size() != 2) { if (pool_shape.size() != 2) {
throw std::runtime_error( throw InvalidShapeException("pool", 2, pool_shape.size());
std::format(
"Invalid pool shape. Expected 2 dims, got {}", pool_shape.size()
)
);
} }
if (stride_shape.size() != 2) { if (stride_shape.size() != 2) {
throw std::runtime_error( throw InvalidShapeException("stride", 2, stride_shape.size());
std::format(
"Invalid stride shape. Expected 2 dims, got {}", stride_shape.size()
)
);
} }
if (padding_shape.size() != 2) { if (padding_shape.size() != 2) {
throw std::runtime_error( throw InvalidShapeException("padding", 2, padding_shape.size());
std::format(
"Invalid padding shape. Expected 2 dims, got {}", padding_shape.size()
)
);
} }
out_shape = { out_shape = {
@@ -65,23 +49,43 @@ AvgPool2d::AvgPool2d(
AvgPool2d::~AvgPool2d() {} AvgPool2d::~AvgPool2d() {}
CUDANet::Tensor& AvgPool2d::forward(CUDANet::Tensor& input); CUDANet::Tensor& AvgPool2d::forward(CUDANet::Tensor& input) {
output.zero();
backend->avgPool2d(
input,
output,
in_shape,
pool_shape,
stride_shape,
padding_shape,
out_shape
);
return output;
}
CUDANet::Shape AvgPool2d::input_shape(); CUDANet::Shape AvgPool2d::input_shape() {
return in_shape;
}
CUDANet::Shape AvgPool2d::output_shape(); CUDANet::Shape AvgPool2d::output_shape() {
return out_shape;
}
size_t AvgPool2d::input_size(); size_t AvgPool2d::input_size() {
return sizeof(float) * in_shape[0] * in_shape[1] * in_shape[2];
}
size_t AvgPool2d::output_size(); size_t AvgPool2d::output_size() {
return sizeof(float) * out_shape[0] * out_shape[1] * out_shape[3];
}
void AvgPool2d::set_weights(void* input); void AvgPool2d::set_weights(void* input) {}
CUDANet::Tensor& AvgPool2d::get_weights(); CUDANet::Tensor& AvgPool2d::get_weights() {}
void AvgPool2d::set_biases(void* input); void AvgPool2d::set_biases(void* input) {}
CUDANet::Tensor& AvgPool2d::get_biases(); CUDANet::Tensor& AvgPool2d::get_biases() {}
AdaptiveAvgPool2d::AdaptiveAvgPool2d( AdaptiveAvgPool2d::AdaptiveAvgPool2d(
@@ -89,15 +93,29 @@ AdaptiveAvgPool2d::AdaptiveAvgPool2d(
CUDANet::Shape output_shape, CUDANet::Shape output_shape,
CUDANet::Backend *backend CUDANet::Backend *backend
) )
: AvgPool2d(input_shape, {1, 1}, {1, 1}, {0, 0}, backend) { : AvgPool2d(
stride_shape = { input_shape,
input_shape[0] / output_shape[0], // pool_shape
input_shape[1] / output_shape[1] {
}; input_shape[0] - (output_shape[0] - 1) * (input_shape[0] / output_shape[0]),
pool_shape = { input_shape[1] - (output_shape[1] - 1) * (input_shape[1] / output_shape[1])
input_shape[0] - (output_shape[0] - 1) * stride_shape[0], },
input_shape[1] - (output_shape[1] - 1) * stride_shape[1] // stride_shape
}; {
padding_shape = {(pool_shape[0] - 1) / 2, (pool_shape[1] - 1) / 2}; input_shape[0] / output_shape[0],
input_shape[1] / output_shape[1]
},
// padding_shape
{
(input_shape[0] - (output_shape[0] - 1) * (input_shape[0] / output_shape[0]) - 1) / 2,
(input_shape[1] - (output_shape[1] - 1) * (input_shape[1] / output_shape[1]) - 1) / 2
},
backend
) {
out_shape = output_shape; out_shape = output_shape;
}
output = CUDANet::Tensor(
Shape{out_shape[0] * out_shape[1] * out_shape[2]},
CUDANet::DType::FLOAT32, backend
);
}

View File

@@ -21,47 +21,31 @@ Conv2d::Conv2d(
padding_shape(padding_shape), padding_shape(padding_shape),
backend(backend) { backend(backend) {
if (in_shape.size() != 3) { if (in_shape.size() != 3) {
throw std::runtime_error( throw InvalidShapeException("input", 3, in_shape.size());
std::format(
"Invalid input shape. Expected 3 dims, got {}", in_shape.size()
)
);
} }
if (kernel_shape.size() != 3) { if (kernel_shape.size() != 3) {
throw std::runtime_error( throw InvalidShapeException("kernel", 3, kernel_shape.size());
std::format(
"Invalid kernel shape. Expected 3 dims, got {}", kernel_shape.size()
)
);
} }
if (stride_shape.size() != 2) { if (stride_shape.size() != 2) {
throw std::runtime_error( throw InvalidShapeException("stride", 3, stride_shape.size());
std::format(
"Invalid stride shape. Expected 2 dims, got {}", stride_shape.size()
)
);
} }
if (padding_shape.size() != 2) { if (padding_shape.size() != 2) {
throw std::runtime_error( throw InvalidShapeException("padding", 3, padding_shape.size());
std::format(
"Invalid padding shape. Expected 2 dims, got {}", padding_shape.size()
)
);
} }
size_t out_h = (in_shape[0] - kernel_shape[0] + 2 * padding_shape[0]) / out_shape = {
stride_shape[0] + (in_shape[0] - kernel_shape[0] + 2 * padding_shape[0]) /
1; stride_shape[0] +
size_t out_w = (in_shape[1] - kernel_shape[1] + 2 * padding_shape[1]) / 1,
stride_shape[1] + (in_shape[1] - kernel_shape[1] + 2 * padding_shape[1]) /
1; stride_shape[1] +
out_shape.resize(3); 1,
out_shape[0] = out_h; kernel_shape[2]
out_shape[1] = out_w; };
out_shape[2] = kernel_shape[2];
output = CUDANet::Tensor( output = CUDANet::Tensor(
Shape{out_shape[0] * out_shape[1] * out_shape[3]}, Shape{out_shape[0] * out_shape[1] * out_shape[3]},
CUDANet::DType::FLOAT32, backend CUDANet::DType::FLOAT32, backend
@@ -69,7 +53,7 @@ Conv2d::Conv2d(
weights = CUDANet::Tensor( weights = CUDANet::Tensor(
Shape{ Shape{
kernel_shape[0] * kernel_shape[1] * kernel_shape[2] * in_shape[2] kernel_shape[0], kernel_shape[1], kernel_shape[2], in_shape[2]
}, },
CUDANet::DType::FLOAT32, backend CUDANet::DType::FLOAT32, backend
); );
@@ -83,18 +67,11 @@ Conv2d::Conv2d(
Conv2d::~Conv2d() {} Conv2d::~Conv2d() {}
CUDANet::Tensor& Conv2d::forward( CUDANet::Tensor& input) { CUDANet::Tensor& Conv2d::forward(CUDANet::Tensor& input) {
output.zero(); output.zero();
backend->conv2d( backend->conv2d(
weights, weights, biases, input, output, in_shape, padding_shape, kernel_shape,
biases, stride_shape, out_shape
input,
output,
in_shape,
padding_shape,
kernel_shape,
stride_shape,
out_shape
); );
return output; return output;
} }

View File

@@ -5,26 +5,22 @@
using namespace CUDANet::Layers; using namespace CUDANet::Layers;
Dense::Dense(CUDANet::Shape in, CUDANet::Shape out, CUDANet::Backend* backend) Dense::Dense(CUDANet::Shape in_shape, CUDANet::Shape out_shape, CUDANet::Backend* backend)
: backend(backend), : backend(backend),
in_shape(in), in_shape(in_shape),
out_shape(out) { out_shape(out_shape) {
if (in.size() != 1) { if (in_shape.size() != 1) {
throw std::runtime_error( throw InvalidShapeException("input", 1, in_shape.size());
std::format("Invalid shape. Expected [1], got {}", in_shape)
);
} }
if (out.size() != 1) { if (out_shape.size() != 1) {
throw std::runtime_error( throw InvalidShapeException("output", 1, out_shape.size());
std::format("Invalid shape. Expected [1], got {}", out_shape)
);
} }
weights = CUDANet::Tensor(Shape{in[0] * out[0]}, CUDANet::DType::FLOAT32, backend); weights = CUDANet::Tensor(Shape{out_shape[0], in_shape[0]}, CUDANet::DType::FLOAT32, backend);
biases = CUDANet::Tensor(Shape{out[0]}, CUDANet::DType::FLOAT32, backend); biases = CUDANet::Tensor(Shape{out_shape[0]}, CUDANet::DType::FLOAT32, backend);
output = CUDANet::Tensor(Shape{out[0]}, CUDANet::DType::FLOAT32, backend); output = CUDANet::Tensor(Shape{out_shape[0]}, CUDANet::DType::FLOAT32, backend);
weights.zero(); weights.zero();
biases.zero(); biases.zero();

View File

@@ -6,27 +6,39 @@ using namespace CUDANet::Layers;
MaxPool2d::MaxPool2d( MaxPool2d::MaxPool2d(
CUDANet::Shape input_shape, CUDANet::Shape input_shape,
CUDANet::Shape pooling_shape, CUDANet::Shape pool_shape,
CUDANet::Shape stride_shape, CUDANet::Shape stride_shape,
CUDANet::Shape padding_shape, CUDANet::Shape padding_shape,
CUDANet::Backend* backend CUDANet::Backend* backend
) )
: in_shape(input_shape), : in_shape(input_shape),
pooling_shape(pooling_shape), pool_shape(pool_shape),
stride_shape(stride_shape), stride_shape(stride_shape),
padding_shape(padding_shape), padding_shape(padding_shape),
backend(backend) { backend(backend) {
size_t out_h = (in_shape[0] + 2 * padding_shape[0] - pooling_shape[0]) / if (in_shape.size() != 3) {
stride_shape[0] + throw InvalidShapeException("input", 3, in_shape.size());
1; }
size_t out_w = (in_shape[1] + 2 * padding_shape[1] - pooling_shape[1]) /
stride_shape[1] +
1;
out_shape.resize(3); if (pool_shape.size() != 2) {
out_shape[0] = out_h; throw InvalidShapeException("pool", 2, pool_shape.size());
out_shape[1] = out_w; }
out_shape[2] = in_shape[2];
if (stride_shape.size() != 2) {
throw InvalidShapeException("stride", 2, stride_shape.size());
}
if (padding_shape.size() != 2) {
throw InvalidShapeException("padding", 2, padding_shape.size());
}
out_shape = {
(in_shape[0] + 2 * padding_shape[0] - pool_shape[0]) / stride_shape[0] +
1,
(in_shape[1] + 2 * padding_shape[1] - pool_shape[1]) / stride_shape[1] +
1,
in_shape[2]
};
output = CUDANet::Tensor( output = CUDANet::Tensor(
Shape{out_shape[0] * out_shape[1] * out_shape[3]}, Shape{out_shape[0] * out_shape[1] * out_shape[3]},
@@ -39,7 +51,7 @@ MaxPool2d::~MaxPool2d() {}
CUDANet::Tensor& MaxPool2d::forward(CUDANet::Tensor& input) { CUDANet::Tensor& MaxPool2d::forward(CUDANet::Tensor& input) {
output.zero(); output.zero();
backend->maxPool2d( backend->maxPool2d(
input, output, in_shape, pooling_shape, stride_shape, padding_shape, input, output, in_shape, pool_shape, stride_shape, padding_shape,
out_shape out_shape
); );
return output; return output;