Format source code using clang-format

This commit is contained in:
2024-02-27 18:51:22 +01:00
parent fb454de053
commit 48ba09b28d
12 changed files with 229 additions and 138 deletions

20
.clang-format Normal file
View File

@@ -0,0 +1,20 @@
Language: Cpp
Standard: c++20
BasedOnStyle: Google
AccessModifierOffset: -2
AlignAfterOpenBracket: BlockIndent
AlignArrayOfStructures: Left
AlignConsecutiveAssignments:
Enabled: true
AlignConsecutiveBitFields:
Enabled: true
AlignConsecutiveDeclarations:
Enabled: true
AlignConsecutiveMacros:
Enabled: true
AllowAllArgumentsOnNextLine: false
AllowAllParametersOfDeclarationOnNextLine: false
AllowShortFunctionsOnASingleLine: Empty
AlwaysBreakBeforeMultilineStrings: false
BinPackParameters: false
IndentWidth: 4

View File

@@ -7,8 +7,11 @@ __device__ float sigmoid(float a);
__device__ float relu(float a); __device__ float relu(float a);
__device__ float linear(float a); __device__ float linear(float a);
__global__ void sigmoid_kernel(const float* __restrict__ src, float* __restrict__ dst, int len); __global__ void
__global__ void relu_kernel(const float* __restrict__ src, float* __restrict__ dst, int len); sigmoid_kernel(const float* __restrict__ src, float* __restrict__ dst, int len);
__global__ void linear_kernel(const float* __restrict__ src, float* __restrict__ dst, int len); __global__ void
relu_kernel(const float* __restrict__ src, float* __restrict__ dst, int len);
__global__ void
linear_kernel(const float* __restrict__ src, float* __restrict__ dst, int len);
#endif // ACTIVATIONS_H #endif // ACTIVATIONS_H

View File

@@ -7,7 +7,12 @@ namespace Layers {
class Conv { class Conv {
public: public:
Conv(int inputSize, int outputSize, int kernelSize, cublasHandle_t cublasHandle); Conv(
int inputSize,
int outputSize,
int kernelSize,
cublasHandle_t cublasHandle
);
~Conv(); ~Conv();
void forward(const float* input, float* output); void forward(const float* input, float* output);

View File

@@ -1,17 +1,24 @@
#ifndef DENSE_LAYER_H #ifndef DENSE_LAYER_H
#define DENSE_LAYER_H #define DENSE_LAYER_H
#include <functional>
#include <vector>
#include <cublas_v2.h> #include <cublas_v2.h>
#include <functional>
#include <string> #include <string>
#include <vector>
#include "ilayer.cuh" #include "ilayer.cuh"
namespace Layers { namespace Layers {
class Dense : public ILayer { class Dense : public ILayer {
public: public:
Dense(int inputSize, int outputSize, std::string activation, cublasHandle_t cublasHandle); Dense(
int inputSize,
int outputSize,
std::string activation,
cublasHandle_t cublasHandle
);
~Dense(); ~Dense();
void forward(const float* input, float* output); void forward(const float* input, float* output);

View File

@@ -3,6 +3,7 @@
#define I_LAYER_H #define I_LAYER_H
#include <cublas_v2.h> #include <cublas_v2.h>
#include <vector> #include <vector>
namespace Layers { namespace Layers {

View File

@@ -1,22 +1,24 @@
#include "activations.cuh"
#include <functional> #include <functional>
__device__ float sigmoid(float a) #include "activations.cuh"
{
__device__ float sigmoid(float a) {
return 1.0 / (1.0 + exp(-a)); return 1.0 / (1.0 + exp(-a));
} }
__device__ float relu(float a) __device__ float relu(float a) {
{
return a < 0.0 ? 0.0 : a; return a < 0.0 ? 0.0 : a;
} }
__device__ float linear(float a) __device__ float linear(float a) {
{
return a; return a;
} }
__global__ void sigmoid_kernel(const float* __restrict__ src, float* __restrict__ dst, int len) { __global__ void sigmoid_kernel(
const float* __restrict__ src,
float* __restrict__ dst,
int len
) {
int stride = gridDim.x * blockDim.x; int stride = gridDim.x * blockDim.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x; int tid = blockDim.x * blockIdx.x + threadIdx.x;
@@ -25,7 +27,8 @@ __global__ void sigmoid_kernel(const float* __restrict__ src, float* __restrict_
} }
} }
__global__ void relu_kernel(const float* __restrict__ src, float* __restrict__ dst, int len) { __global__ void
relu_kernel(const float* __restrict__ src, float* __restrict__ dst, int len) {
int stride = gridDim.x * blockDim.x; int stride = gridDim.x * blockDim.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x; int tid = blockDim.x * blockIdx.x + threadIdx.x;
@@ -34,7 +37,8 @@ __global__ void relu_kernel(const float* __restrict__ src, float* __restrict__ d
} }
} }
__global__ void linear_kernel(const float* __restrict__ src, float* __restrict__ dst, int len) { __global__ void
linear_kernel(const float* __restrict__ src, float* __restrict__ dst, int len) {
int stride = gridDim.x * blockDim.x; int stride = gridDim.x * blockDim.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x; int tid = blockDim.x * blockIdx.x + threadIdx.x;

View File

@@ -1,16 +1,25 @@
#include "dense.cuh"
#include "cuda_helper.cuh"
#include "activations.cuh"
#include <cstdlib>
#include <cuda_runtime.h>
#include <cublas_v2.h> #include <cublas_v2.h>
#include <cuda_runtime.h>
#include <cstdio> #include <cstdio>
#include <iostream> #include <cstdlib>
#include <functional> #include <functional>
#include <iostream>
Layers::Dense::Dense(int inputSize, int outputSize, std::string activation, cublasHandle_t cublasHandle) #include "activations.cuh"
: inputSize(inputSize), outputSize(outputSize), cublasHandle(cublasHandle), activation(activation) { #include "cuda_helper.cuh"
#include "dense.cuh"
Layers::Dense::Dense(
int inputSize,
int outputSize,
std::string activation,
cublasHandle_t cublasHandle
)
: inputSize(inputSize),
outputSize(outputSize),
cublasHandle(cublasHandle),
activation(activation) {
// Allocate memory for weights and biases // Allocate memory for weights and biases
weights.resize(outputSize * inputSize); weights.resize(outputSize * inputSize);
biases.resize(outputSize); biases.resize(outputSize);
@@ -22,7 +31,9 @@ Layers::Dense::Dense(int inputSize, int outputSize, std::string activation, cubl
d_biases = nullptr; d_biases = nullptr;
// Allocate GPU memory for weights and biases // Allocate GPU memory for weights and biases
CUDA_CHECK(cudaMalloc((void**)&d_weights, sizeof(float) * inputSize * outputSize)); CUDA_CHECK(
cudaMalloc((void**)&d_weights, sizeof(float) * inputSize * outputSize)
);
CUDA_CHECK(cudaMalloc((void**)&d_biases, sizeof(float) * outputSize)); CUDA_CHECK(cudaMalloc((void**)&d_biases, sizeof(float) * outputSize));
toCuda(); toCuda();
@@ -46,28 +57,45 @@ void Layers::Dense::forward(const float* d_input, float* d_output) {
const float alpha = 1.0f; const float alpha = 1.0f;
const float beta = 1.0f; const float beta = 1.0f;
CUBLAS_CHECK(cublasSgemv(cublasHandle, CUBLAS_OP_N, inputSize, outputSize, &alpha, d_weights, inputSize, d_input, 1, &beta, d_output, 1)); CUBLAS_CHECK(cublasSgemv(
CUBLAS_CHECK(cublasSaxpy(cublasHandle, outputSize, &alpha, d_biases, 1, d_output, 1)); cublasHandle, CUBLAS_OP_N, inputSize, outputSize, &alpha, d_weights,
inputSize, d_input, 1, &beta, d_output, 1
));
CUBLAS_CHECK(
cublasSaxpy(cublasHandle, outputSize, &alpha, d_biases, 1, d_output, 1)
);
int threadsPerBlock = 256; int threadsPerBlock = 256;
int blocksPerGrid = (outputSize + threadsPerBlock - 1) / threadsPerBlock; int blocksPerGrid = (outputSize + threadsPerBlock - 1) / threadsPerBlock;
if (activation == "sigmoid") { if (activation == "sigmoid") {
sigmoid_kernel<<<blocksPerGrid, threadsPerBlock>>>(d_output, d_output, outputSize); sigmoid_kernel<<<blocksPerGrid, threadsPerBlock>>>(
d_output, d_output, outputSize
);
} else if (activation == "relu") { } else if (activation == "relu") {
relu_kernel<<<blocksPerGrid, threadsPerBlock>>>(d_output, d_output, outputSize); relu_kernel<<<blocksPerGrid, threadsPerBlock>>>(
d_output, d_output, outputSize
);
} else { } else {
linear_kernel<<<blocksPerGrid, threadsPerBlock>>>(d_output, d_output, outputSize); linear_kernel<<<blocksPerGrid, threadsPerBlock>>>(
d_output, d_output, outputSize
);
} }
} }
void Layers::Dense::toCuda() { void Layers::Dense::toCuda() {
CUBLAS_CHECK(cublasSetMatrix(outputSize, inputSize, sizeof(float), weights.data(), outputSize, d_weights, outputSize)); CUBLAS_CHECK(cublasSetMatrix(
CUBLAS_CHECK(cublasSetVector(biases.size(), sizeof(float), biases.data(), 1, d_biases, 1)); outputSize, inputSize, sizeof(float), weights.data(), outputSize,
d_weights, outputSize
));
CUBLAS_CHECK(cublasSetVector(
biases.size(), sizeof(float), biases.data(), 1, d_biases, 1
));
} }
void Layers::Dense::setWeights(const std::vector<std::vector<float>>& weights_input) { void Layers::Dense::setWeights(
const std::vector<std::vector<float>>& weights_input
) {
int numWeights = inputSize * outputSize; int numWeights = inputSize * outputSize;
if (weights.size() != numWeights) { if (weights.size() != numWeights) {

View File

@@ -1,8 +1,10 @@
#include <cublas_v2.h>
#include <cuda_runtime.h>
#include <cstdio> #include <cstdio>
#include <cstdlib> #include <cstdlib>
#include "cuda_helper.cuh" #include "cuda_helper.cuh"
#include <cuda_runtime.h>
#include <cublas_v2.h>
cudaDeviceProp initializeCUDA(cublasHandle_t& cublasHandle) { cudaDeviceProp initializeCUDA(cublasHandle_t& cublasHandle) {
int deviceCount; int deviceCount;

View File

@@ -1,14 +1,24 @@
#include "gtest/gtest.h"
#include <cuda_runtime_api.h> #include <cuda_runtime_api.h>
#include <driver_types.h> #include <driver_types.h>
#include <iostream> #include <iostream>
#include "activations.cuh" #include "activations.cuh"
#include "dense.cuh" #include "dense.cuh"
#include "gtest/gtest.h"
#include "test_cublas_fixture.cuh" #include "test_cublas_fixture.cuh"
class DenseLayerTest : public CublasTestFixture { class DenseLayerTest : public CublasTestFixture {
protected: protected:
Layers::Dense commonTestSetup(int inputSize, int outputSize, std::vector<float>& input, std::vector<std::vector<float>>& weights, std::vector<float>& biases, float*& d_input, float*& d_output) { Layers::Dense commonTestSetup(
int inputSize,
int outputSize,
std::vector<float>& input,
std::vector<std::vector<float>>& weights,
std::vector<float>& biases,
float*& d_input,
float*& d_output
) {
// Create Dense layer // Create Dense layer
Layers::Dense denseLayer(inputSize, outputSize, "linear", cublasHandle); Layers::Dense denseLayer(inputSize, outputSize, "linear", cublasHandle);
@@ -24,7 +34,9 @@ protected:
EXPECT_EQ(cudaStatus, cudaSuccess); EXPECT_EQ(cudaStatus, cudaSuccess);
// Copy input to device // Copy input to device
cublasStatus = cublasSetVector(input.size(), sizeof(float), input.data(), 1, d_input, 1); cublasStatus = cublasSetVector(
input.size(), sizeof(float), input.data(), 1, d_input, 1
);
EXPECT_EQ(cublasStatus, CUBLAS_STATUS_SUCCESS); EXPECT_EQ(cublasStatus, CUBLAS_STATUS_SUCCESS);
return denseLayer; return denseLayer;
@@ -41,22 +53,21 @@ protected:
}; };
TEST_F(DenseLayerTest, Init) { TEST_F(DenseLayerTest, Init) {
for (int i = 1; i < 100; ++i) { for (int i = 1; i < 100; ++i) {
for (int j = 1; j < 100; ++j) { for (int j = 1; j < 100; ++j) {
int inputSize = i; int inputSize = i;
int outputSize = j; int outputSize = j;
// std::cout << "Dense layer: input size = " << inputSize << ", output size = " << outputSize << std::endl; // std::cout << "Dense layer: input size = " << inputSize << ",
Layers::Dense denseLayer(inputSize, outputSize, "linear", cublasHandle); // output size = " << outputSize << std::endl;
Layers::Dense denseLayer(
inputSize, outputSize, "linear", cublasHandle
);
} }
} }
} }
TEST_F(DenseLayerTest, setWeights) { TEST_F(DenseLayerTest, setWeights) {
int inputSize = 4; int inputSize = 4;
int outputSize = 5; int outputSize = 5;
@@ -71,17 +82,17 @@ TEST_F(DenseLayerTest, setWeights) {
Layers::Dense denseLayer(inputSize, outputSize, "linear", cublasHandle); Layers::Dense denseLayer(inputSize, outputSize, "linear", cublasHandle);
denseLayer.setWeights(weights); denseLayer.setWeights(weights);
} }
TEST_F(DenseLayerTest, ForwardUnitWeightMatrix) { TEST_F(DenseLayerTest, ForwardUnitWeightMatrix) {
int inputSize = 3; int inputSize = 3;
int outputSize = 3; int outputSize = 3;
std::vector<float> input = {1.0f, 2.0f, 3.0f}; std::vector<float> input = {1.0f, 2.0f, 3.0f};
std::vector<std::vector<float>> weights(inputSize, std::vector<float>(outputSize, 0.0f)); std::vector<std::vector<float>> weights(
inputSize, std::vector<float>(outputSize, 0.0f)
);
for (int i = 0; i < inputSize; ++i) { for (int i = 0; i < inputSize; ++i) {
for (int j = 0; j < outputSize; ++j) { for (int j = 0; j < outputSize; ++j) {
if (i == j) { if (i == j) {
@@ -94,11 +105,15 @@ TEST_F(DenseLayerTest, ForwardUnitWeightMatrix) {
float* d_input; float* d_input;
float* d_output; float* d_output;
Layers::Dense denseLayer = commonTestSetup(inputSize, outputSize, input, weights, biases, d_input, d_output); Layers::Dense denseLayer = commonTestSetup(
inputSize, outputSize, input, weights, biases, d_input, d_output
);
denseLayer.forward(d_input, d_output); denseLayer.forward(d_input, d_output);
std::vector<float> output(outputSize); std::vector<float> output(outputSize);
cublasStatus = cublasGetVector(outputSize, sizeof(float), d_output, 1, output.data(), 1); cublasStatus = cublasGetVector(
outputSize, sizeof(float), d_output, 1, output.data(), 1
);
EXPECT_EQ(cublasStatus, CUBLAS_STATUS_SUCCESS); EXPECT_EQ(cublasStatus, CUBLAS_STATUS_SUCCESS);
// Check if the output is a zero vector // Check if the output is a zero vector
@@ -126,17 +141,23 @@ TEST_F(DenseLayerTest, ForwardRandomWeightMatrix) {
float* d_input; float* d_input;
float* d_output; float* d_output;
Layers::Dense denseLayer = commonTestSetup(inputSize, outputSize, input, weights, biases, d_input, d_output); Layers::Dense denseLayer = commonTestSetup(
inputSize, outputSize, input, weights, biases, d_input, d_output
);
denseLayer.forward(d_input, d_output); denseLayer.forward(d_input, d_output);
std::vector<float> output(outputSize); std::vector<float> output(outputSize);
cublasStatus = cublasGetVector(outputSize, sizeof(float), d_output, 1, output.data(), 1); cublasStatus = cublasGetVector(
outputSize, sizeof(float), d_output, 1, output.data(), 1
);
EXPECT_EQ(cublasStatus, CUBLAS_STATUS_SUCCESS); EXPECT_EQ(cublasStatus, CUBLAS_STATUS_SUCCESS);
std::vector<float> expectedOutput = {10.4f, 13.0f, 8.9f, 9.3f}; std::vector<float> expectedOutput = {10.4f, 13.0f, 8.9f, 9.3f};
for (int i = 0; i < outputSize; ++i) { for (int i = 0; i < outputSize; ++i) {
EXPECT_NEAR(output[i], expectedOutput[i], 1e-4); // Allow small tolerance for floating-point comparison EXPECT_NEAR(
output[i], expectedOutput[i], 1e-4
); // Allow small tolerance for floating-point comparison
} }
commonTestTeardown(d_input, d_output); commonTestTeardown(d_input, d_output);

View File

@@ -1,5 +1,5 @@
#include "gtest/gtest.h"
#include "cublas_v2.h" #include "cublas_v2.h"
#include "gtest/gtest.h"
#include "test_cublas_fixture.cuh" #include "test_cublas_fixture.cuh"
cublasHandle_t CublasTestFixture::cublasHandle; cublasHandle_t CublasTestFixture::cublasHandle;

View File

@@ -1,5 +1,5 @@
#include "gtest/gtest.h"
#include "cublas_v2.h" #include "cublas_v2.h"
#include "gtest/gtest.h"
class CublasTestFixture : public ::testing::Test { class CublasTestFixture : public ::testing::Test {
protected: protected:

View File

@@ -1,8 +1,10 @@
#include "gtest/gtest.h"
#include <cuda_runtime_api.h> #include <cuda_runtime_api.h>
#include <driver_types.h> #include <driver_types.h>
#include <iostream> #include <iostream>
#include "functions.cuh" #include "functions.cuh"
#include "gtest/gtest.h"
#include "test_cublas_fixture.cuh" #include "test_cublas_fixture.cuh"
class FunctionsTest : public CublasTestFixture { class FunctionsTest : public CublasTestFixture {
@@ -11,6 +13,4 @@ protected:
cublasStatus_t cublasStatus; cublasStatus_t cublasStatus;
}; };
TEST_F(FunctionsTest, sigmoid) { TEST_F(FunctionsTest, sigmoid) {}
}