Format source code using clang-format

This commit is contained in:
2024-02-27 18:51:22 +01:00
parent fb454de053
commit 48ba09b28d
12 changed files with 229 additions and 138 deletions

20
.clang-format Normal file
View File

@@ -0,0 +1,20 @@
Language: Cpp
Standard: c++20
BasedOnStyle: Google
AccessModifierOffset: -2
AlignAfterOpenBracket: BlockIndent
AlignArrayOfStructures: Left
AlignConsecutiveAssignments:
Enabled: true
AlignConsecutiveBitFields:
Enabled: true
AlignConsecutiveDeclarations:
Enabled: true
AlignConsecutiveMacros:
Enabled: true
AllowAllArgumentsOnNextLine: false
AllowAllParametersOfDeclarationOnNextLine: false
AllowShortFunctionsOnASingleLine: Empty
AlwaysBreakBeforeMultilineStrings: false
BinPackParameters: false
IndentWidth: 4

View File

@@ -7,8 +7,11 @@ __device__ float sigmoid(float a);
__device__ float relu(float a);
__device__ float linear(float a);
__global__ void sigmoid_kernel(const float* __restrict__ src, float* __restrict__ dst, int len);
__global__ void relu_kernel(const float* __restrict__ src, float* __restrict__ dst, int len);
__global__ void linear_kernel(const float* __restrict__ src, float* __restrict__ dst, int len);
__global__ void
sigmoid_kernel(const float* __restrict__ src, float* __restrict__ dst, int len);
__global__ void
relu_kernel(const float* __restrict__ src, float* __restrict__ dst, int len);
__global__ void
linear_kernel(const float* __restrict__ src, float* __restrict__ dst, int len);
#endif // ACTIVATIONS_H

View File

@@ -5,9 +5,14 @@
namespace Layers {
class Conv {
class Conv {
public:
Conv(int inputSize, int outputSize, int kernelSize, cublasHandle_t cublasHandle);
Conv(
int inputSize,
int outputSize,
int kernelSize,
cublasHandle_t cublasHandle
);
~Conv();
void forward(const float* input, float* output);
@@ -19,7 +24,7 @@ namespace Layers {
cublasHandle_t cublasHandle;
float* d_weights;
float* d_biases;
};
};
} // namespace Layers

View File

@@ -1,17 +1,24 @@
#ifndef DENSE_LAYER_H
#define DENSE_LAYER_H
#include <functional>
#include <vector>
#include <cublas_v2.h>
#include <functional>
#include <string>
#include <vector>
#include "ilayer.cuh"
namespace Layers {
class Dense : public ILayer {
class Dense : public ILayer {
public:
Dense(int inputSize, int outputSize, std::string activation, cublasHandle_t cublasHandle);
Dense(
int inputSize,
int outputSize,
std::string activation,
cublasHandle_t cublasHandle
);
~Dense();
void forward(const float* input, float* output);
@@ -35,7 +42,7 @@ namespace Layers {
void initializeWeights();
void initializeBiases();
void toCuda();
};
};
} // namespace Layers

View File

@@ -3,18 +3,19 @@
#define I_LAYER_H
#include <cublas_v2.h>
#include <vector>
namespace Layers {
class ILayer {
class ILayer {
public:
virtual ~ILayer() {}
virtual void forward(const float* input, float* output) = 0;
virtual void setWeights(const std::vector<std::vector<float>>& weights) = 0;
virtual void setBiases(const std::vector<float>& biases) = 0;
};
};
} // namespace Layers

View File

@@ -1,22 +1,24 @@
#include "activations.cuh"
#include <functional>
__device__ float sigmoid(float a)
{
return 1.0 / (1.0 + exp (-a));
#include "activations.cuh"
__device__ float sigmoid(float a) {
return 1.0 / (1.0 + exp(-a));
}
__device__ float relu(float a)
{
__device__ float relu(float a) {
return a < 0.0 ? 0.0 : a;
}
__device__ float linear(float a)
{
__device__ float linear(float a) {
return a;
}
__global__ void sigmoid_kernel(const float* __restrict__ src, float* __restrict__ dst, int len) {
__global__ void sigmoid_kernel(
const float* __restrict__ src,
float* __restrict__ dst,
int len
) {
int stride = gridDim.x * blockDim.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
@@ -25,7 +27,8 @@ __global__ void sigmoid_kernel(const float* __restrict__ src, float* __restrict_
}
}
__global__ void relu_kernel(const float* __restrict__ src, float* __restrict__ dst, int len) {
__global__ void
relu_kernel(const float* __restrict__ src, float* __restrict__ dst, int len) {
int stride = gridDim.x * blockDim.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
@@ -34,7 +37,8 @@ __global__ void relu_kernel(const float* __restrict__ src, float* __restrict__ d
}
}
__global__ void linear_kernel(const float* __restrict__ src, float* __restrict__ dst, int len) {
__global__ void
linear_kernel(const float* __restrict__ src, float* __restrict__ dst, int len) {
int stride = gridDim.x * blockDim.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;

View File

@@ -1,16 +1,25 @@
#include "dense.cuh"
#include "cuda_helper.cuh"
#include "activations.cuh"
#include <cstdlib>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cuda_runtime.h>
#include <cstdio>
#include <iostream>
#include <cstdlib>
#include <functional>
#include <iostream>
Layers::Dense::Dense(int inputSize, int outputSize, std::string activation, cublasHandle_t cublasHandle)
: inputSize(inputSize), outputSize(outputSize), cublasHandle(cublasHandle), activation(activation) {
#include "activations.cuh"
#include "cuda_helper.cuh"
#include "dense.cuh"
Layers::Dense::Dense(
int inputSize,
int outputSize,
std::string activation,
cublasHandle_t cublasHandle
)
: inputSize(inputSize),
outputSize(outputSize),
cublasHandle(cublasHandle),
activation(activation) {
// Allocate memory for weights and biases
weights.resize(outputSize * inputSize);
biases.resize(outputSize);
@@ -22,7 +31,9 @@ Layers::Dense::Dense(int inputSize, int outputSize, std::string activation, cubl
d_biases = nullptr;
// Allocate GPU memory for weights and biases
CUDA_CHECK(cudaMalloc((void**)&d_weights, sizeof(float) * inputSize * outputSize));
CUDA_CHECK(
cudaMalloc((void**)&d_weights, sizeof(float) * inputSize * outputSize)
);
CUDA_CHECK(cudaMalloc((void**)&d_biases, sizeof(float) * outputSize));
toCuda();
@@ -46,28 +57,45 @@ void Layers::Dense::forward(const float* d_input, float* d_output) {
const float alpha = 1.0f;
const float beta = 1.0f;
CUBLAS_CHECK(cublasSgemv(cublasHandle, CUBLAS_OP_N, inputSize, outputSize, &alpha, d_weights, inputSize, d_input, 1, &beta, d_output, 1));
CUBLAS_CHECK(cublasSaxpy(cublasHandle, outputSize, &alpha, d_biases, 1, d_output, 1));
CUBLAS_CHECK(cublasSgemv(
cublasHandle, CUBLAS_OP_N, inputSize, outputSize, &alpha, d_weights,
inputSize, d_input, 1, &beta, d_output, 1
));
CUBLAS_CHECK(
cublasSaxpy(cublasHandle, outputSize, &alpha, d_biases, 1, d_output, 1)
);
int threadsPerBlock = 256;
int blocksPerGrid = (outputSize + threadsPerBlock - 1) / threadsPerBlock;
if (activation == "sigmoid") {
sigmoid_kernel<<<blocksPerGrid, threadsPerBlock>>>(d_output, d_output, outputSize);
sigmoid_kernel<<<blocksPerGrid, threadsPerBlock>>>(
d_output, d_output, outputSize
);
} else if (activation == "relu") {
relu_kernel<<<blocksPerGrid, threadsPerBlock>>>(d_output, d_output, outputSize);
relu_kernel<<<blocksPerGrid, threadsPerBlock>>>(
d_output, d_output, outputSize
);
} else {
linear_kernel<<<blocksPerGrid, threadsPerBlock>>>(d_output, d_output, outputSize);
linear_kernel<<<blocksPerGrid, threadsPerBlock>>>(
d_output, d_output, outputSize
);
}
}
void Layers::Dense::toCuda() {
CUBLAS_CHECK(cublasSetMatrix(outputSize, inputSize, sizeof(float), weights.data(), outputSize, d_weights, outputSize));
CUBLAS_CHECK(cublasSetVector(biases.size(), sizeof(float), biases.data(), 1, d_biases, 1));
CUBLAS_CHECK(cublasSetMatrix(
outputSize, inputSize, sizeof(float), weights.data(), outputSize,
d_weights, outputSize
));
CUBLAS_CHECK(cublasSetVector(
biases.size(), sizeof(float), biases.data(), 1, d_biases, 1
));
}
void Layers::Dense::setWeights(const std::vector<std::vector<float>>& weights_input) {
void Layers::Dense::setWeights(
const std::vector<std::vector<float>>& weights_input
) {
int numWeights = inputSize * outputSize;
if (weights.size() != numWeights) {

View File

@@ -1,8 +1,10 @@
#include <cublas_v2.h>
#include <cuda_runtime.h>
#include <cstdio>
#include <cstdlib>
#include "cuda_helper.cuh"
#include <cuda_runtime.h>
#include <cublas_v2.h>
cudaDeviceProp initializeCUDA(cublasHandle_t& cublasHandle) {
int deviceCount;

View File

@@ -1,14 +1,24 @@
#include "gtest/gtest.h"
#include <cuda_runtime_api.h>
#include <driver_types.h>
#include <iostream>
#include "activations.cuh"
#include "dense.cuh"
#include "gtest/gtest.h"
#include "test_cublas_fixture.cuh"
class DenseLayerTest : public CublasTestFixture {
protected:
Layers::Dense commonTestSetup(int inputSize, int outputSize, std::vector<float>& input, std::vector<std::vector<float>>& weights, std::vector<float>& biases, float*& d_input, float*& d_output) {
protected:
Layers::Dense commonTestSetup(
int inputSize,
int outputSize,
std::vector<float>& input,
std::vector<std::vector<float>>& weights,
std::vector<float>& biases,
float*& d_input,
float*& d_output
) {
// Create Dense layer
Layers::Dense denseLayer(inputSize, outputSize, "linear", cublasHandle);
@@ -24,7 +34,9 @@ protected:
EXPECT_EQ(cudaStatus, cudaSuccess);
// Copy input to device
cublasStatus = cublasSetVector(input.size(), sizeof(float), input.data(), 1, d_input, 1);
cublasStatus = cublasSetVector(
input.size(), sizeof(float), input.data(), 1, d_input, 1
);
EXPECT_EQ(cublasStatus, CUBLAS_STATUS_SUCCESS);
return denseLayer;
@@ -41,22 +53,21 @@ protected:
};
TEST_F(DenseLayerTest, Init) {
for (int i = 1; i < 100; ++i) {
for (int j = 1; j < 100; ++j) {
int inputSize = i;
int outputSize = j;
// std::cout << "Dense layer: input size = " << inputSize << ", output size = " << outputSize << std::endl;
Layers::Dense denseLayer(inputSize, outputSize, "linear", cublasHandle);
// std::cout << "Dense layer: input size = " << inputSize << ",
// output size = " << outputSize << std::endl;
Layers::Dense denseLayer(
inputSize, outputSize, "linear", cublasHandle
);
}
}
}
TEST_F(DenseLayerTest, setWeights) {
int inputSize = 4;
int outputSize = 5;
@@ -71,17 +82,17 @@ TEST_F(DenseLayerTest, setWeights) {
Layers::Dense denseLayer(inputSize, outputSize, "linear", cublasHandle);
denseLayer.setWeights(weights);
}
TEST_F(DenseLayerTest, ForwardUnitWeightMatrix) {
int inputSize = 3;
int outputSize = 3;
std::vector<float> input = {1.0f, 2.0f, 3.0f};
std::vector<std::vector<float>> weights(inputSize, std::vector<float>(outputSize, 0.0f));
std::vector<std::vector<float>> weights(
inputSize, std::vector<float>(outputSize, 0.0f)
);
for (int i = 0; i < inputSize; ++i) {
for (int j = 0; j < outputSize; ++j) {
if (i == j) {
@@ -94,11 +105,15 @@ TEST_F(DenseLayerTest, ForwardUnitWeightMatrix) {
float* d_input;
float* d_output;
Layers::Dense denseLayer = commonTestSetup(inputSize, outputSize, input, weights, biases, d_input, d_output);
Layers::Dense denseLayer = commonTestSetup(
inputSize, outputSize, input, weights, biases, d_input, d_output
);
denseLayer.forward(d_input, d_output);
std::vector<float> output(outputSize);
cublasStatus = cublasGetVector(outputSize, sizeof(float), d_output, 1, output.data(), 1);
cublasStatus = cublasGetVector(
outputSize, sizeof(float), d_output, 1, output.data(), 1
);
EXPECT_EQ(cublasStatus, CUBLAS_STATUS_SUCCESS);
// Check if the output is a zero vector
@@ -126,17 +141,23 @@ TEST_F(DenseLayerTest, ForwardRandomWeightMatrix) {
float* d_input;
float* d_output;
Layers::Dense denseLayer = commonTestSetup(inputSize, outputSize, input, weights, biases, d_input, d_output);
Layers::Dense denseLayer = commonTestSetup(
inputSize, outputSize, input, weights, biases, d_input, d_output
);
denseLayer.forward(d_input, d_output);
std::vector<float> output(outputSize);
cublasStatus = cublasGetVector(outputSize, sizeof(float), d_output, 1, output.data(), 1);
cublasStatus = cublasGetVector(
outputSize, sizeof(float), d_output, 1, output.data(), 1
);
EXPECT_EQ(cublasStatus, CUBLAS_STATUS_SUCCESS);
std::vector<float> expectedOutput = {10.4f, 13.0f, 8.9f, 9.3f};
for (int i = 0; i < outputSize; ++i) {
EXPECT_NEAR(output[i], expectedOutput[i], 1e-4); // Allow small tolerance for floating-point comparison
EXPECT_NEAR(
output[i], expectedOutput[i], 1e-4
); // Allow small tolerance for floating-point comparison
}
commonTestTeardown(d_input, d_output);

View File

@@ -1,5 +1,5 @@
#include "gtest/gtest.h"
#include "cublas_v2.h"
#include "gtest/gtest.h"
#include "test_cublas_fixture.cuh"
cublasHandle_t CublasTestFixture::cublasHandle;

View File

@@ -1,8 +1,8 @@
#include "gtest/gtest.h"
#include "cublas_v2.h"
#include "gtest/gtest.h"
class CublasTestFixture : public ::testing::Test {
protected:
protected:
static cublasHandle_t cublasHandle;
static void SetUpTestSuite();

View File

@@ -1,16 +1,16 @@
#include "gtest/gtest.h"
#include <cuda_runtime_api.h>
#include <driver_types.h>
#include <iostream>
#include "functions.cuh"
#include "gtest/gtest.h"
#include "test_cublas_fixture.cuh"
class FunctionsTest : public CublasTestFixture {
protected:
protected:
cudaError_t cudaStatus;
cublasStatus_t cublasStatus;
};
TEST_F(FunctionsTest, sigmoid) {
}
TEST_F(FunctionsTest, sigmoid) {}