mirror of
https://github.com/lordmathis/CUDANet.git
synced 2025-11-05 17:34:21 +00:00
Format source code using clang-format
This commit is contained in:
20
.clang-format
Normal file
20
.clang-format
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
Language: Cpp
|
||||||
|
Standard: c++20
|
||||||
|
BasedOnStyle: Google
|
||||||
|
AccessModifierOffset: -2
|
||||||
|
AlignAfterOpenBracket: BlockIndent
|
||||||
|
AlignArrayOfStructures: Left
|
||||||
|
AlignConsecutiveAssignments:
|
||||||
|
Enabled: true
|
||||||
|
AlignConsecutiveBitFields:
|
||||||
|
Enabled: true
|
||||||
|
AlignConsecutiveDeclarations:
|
||||||
|
Enabled: true
|
||||||
|
AlignConsecutiveMacros:
|
||||||
|
Enabled: true
|
||||||
|
AllowAllArgumentsOnNextLine: false
|
||||||
|
AllowAllParametersOfDeclarationOnNextLine: false
|
||||||
|
AllowShortFunctionsOnASingleLine: Empty
|
||||||
|
AlwaysBreakBeforeMultilineStrings: false
|
||||||
|
BinPackParameters: false
|
||||||
|
IndentWidth: 4
|
||||||
@@ -7,8 +7,11 @@ __device__ float sigmoid(float a);
|
|||||||
__device__ float relu(float a);
|
__device__ float relu(float a);
|
||||||
__device__ float linear(float a);
|
__device__ float linear(float a);
|
||||||
|
|
||||||
__global__ void sigmoid_kernel(const float* __restrict__ src, float* __restrict__ dst, int len);
|
__global__ void
|
||||||
__global__ void relu_kernel(const float* __restrict__ src, float* __restrict__ dst, int len);
|
sigmoid_kernel(const float* __restrict__ src, float* __restrict__ dst, int len);
|
||||||
__global__ void linear_kernel(const float* __restrict__ src, float* __restrict__ dst, int len);
|
__global__ void
|
||||||
|
relu_kernel(const float* __restrict__ src, float* __restrict__ dst, int len);
|
||||||
|
__global__ void
|
||||||
|
linear_kernel(const float* __restrict__ src, float* __restrict__ dst, int len);
|
||||||
|
|
||||||
#endif // ACTIVATIONS_H
|
#endif // ACTIVATIONS_H
|
||||||
@@ -5,22 +5,27 @@
|
|||||||
|
|
||||||
namespace Layers {
|
namespace Layers {
|
||||||
|
|
||||||
class Conv {
|
class Conv {
|
||||||
public:
|
public:
|
||||||
Conv(int inputSize, int outputSize, int kernelSize, cublasHandle_t cublasHandle);
|
Conv(
|
||||||
~Conv();
|
int inputSize,
|
||||||
|
int outputSize,
|
||||||
|
int kernelSize,
|
||||||
|
cublasHandle_t cublasHandle
|
||||||
|
);
|
||||||
|
~Conv();
|
||||||
|
|
||||||
void forward(const float* input, float* output);
|
void forward(const float* input, float* output);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
int inputSize;
|
int inputSize;
|
||||||
int outputSize;
|
int outputSize;
|
||||||
int kernelSize;
|
int kernelSize;
|
||||||
cublasHandle_t cublasHandle;
|
cublasHandle_t cublasHandle;
|
||||||
float* d_weights;
|
float* d_weights;
|
||||||
float* d_biases;
|
float* d_biases;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Layers
|
} // namespace Layers
|
||||||
|
|
||||||
#endif // CONV_LAYER_H
|
#endif // CONV_LAYER_H
|
||||||
|
|||||||
@@ -1,42 +1,49 @@
|
|||||||
#ifndef DENSE_LAYER_H
|
#ifndef DENSE_LAYER_H
|
||||||
#define DENSE_LAYER_H
|
#define DENSE_LAYER_H
|
||||||
|
|
||||||
#include <functional>
|
|
||||||
#include <vector>
|
|
||||||
#include <cublas_v2.h>
|
#include <cublas_v2.h>
|
||||||
|
|
||||||
|
#include <functional>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
#include "ilayer.cuh"
|
#include "ilayer.cuh"
|
||||||
|
|
||||||
namespace Layers {
|
namespace Layers {
|
||||||
|
|
||||||
class Dense : public ILayer {
|
class Dense : public ILayer {
|
||||||
public:
|
public:
|
||||||
Dense(int inputSize, int outputSize, std::string activation, cublasHandle_t cublasHandle);
|
Dense(
|
||||||
~Dense();
|
int inputSize,
|
||||||
|
int outputSize,
|
||||||
|
std::string activation,
|
||||||
|
cublasHandle_t cublasHandle
|
||||||
|
);
|
||||||
|
~Dense();
|
||||||
|
|
||||||
void forward(const float* input, float* output);
|
void forward(const float* input, float* output);
|
||||||
void setWeights(const std::vector<std::vector<float>>& weights);
|
void setWeights(const std::vector<std::vector<float>>& weights);
|
||||||
void setBiases(const std::vector<float>& biases);
|
void setBiases(const std::vector<float>& biases);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
int inputSize;
|
int inputSize;
|
||||||
int outputSize;
|
int outputSize;
|
||||||
|
|
||||||
cublasHandle_t cublasHandle;
|
cublasHandle_t cublasHandle;
|
||||||
|
|
||||||
float* d_weights;
|
float* d_weights;
|
||||||
float* d_biases;
|
float* d_biases;
|
||||||
|
|
||||||
std::vector<float> weights;
|
std::vector<float> weights;
|
||||||
std::vector<float> biases;
|
std::vector<float> biases;
|
||||||
|
|
||||||
std::string activation;
|
std::string activation;
|
||||||
|
|
||||||
void initializeWeights();
|
void initializeWeights();
|
||||||
void initializeBiases();
|
void initializeBiases();
|
||||||
void toCuda();
|
void toCuda();
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Layers
|
} // namespace Layers
|
||||||
|
|
||||||
#endif // DENSE_LAYER_H
|
#endif // DENSE_LAYER_H
|
||||||
|
|||||||
@@ -3,19 +3,20 @@
|
|||||||
#define I_LAYER_H
|
#define I_LAYER_H
|
||||||
|
|
||||||
#include <cublas_v2.h>
|
#include <cublas_v2.h>
|
||||||
|
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
namespace Layers {
|
namespace Layers {
|
||||||
|
|
||||||
class ILayer {
|
class ILayer {
|
||||||
public:
|
public:
|
||||||
virtual ~ILayer() {}
|
virtual ~ILayer() {}
|
||||||
|
|
||||||
virtual void forward(const float* input, float* output) = 0;
|
virtual void forward(const float* input, float* output) = 0;
|
||||||
virtual void setWeights(const std::vector<std::vector<float>>& weights) = 0;
|
virtual void setWeights(const std::vector<std::vector<float>>& weights) = 0;
|
||||||
virtual void setBiases(const std::vector<float>& biases) = 0;
|
virtual void setBiases(const std::vector<float>& biases) = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Layers
|
} // namespace Layers
|
||||||
|
|
||||||
#endif // I_LAYERH
|
#endif // I_LAYERH
|
||||||
@@ -1,44 +1,48 @@
|
|||||||
#include "activations.cuh"
|
|
||||||
#include <functional>
|
#include <functional>
|
||||||
|
|
||||||
__device__ float sigmoid(float a)
|
#include "activations.cuh"
|
||||||
{
|
|
||||||
return 1.0 / (1.0 + exp (-a));
|
__device__ float sigmoid(float a) {
|
||||||
|
return 1.0 / (1.0 + exp(-a));
|
||||||
}
|
}
|
||||||
|
|
||||||
__device__ float relu(float a)
|
__device__ float relu(float a) {
|
||||||
{
|
|
||||||
return a < 0.0 ? 0.0 : a;
|
return a < 0.0 ? 0.0 : a;
|
||||||
}
|
}
|
||||||
|
|
||||||
__device__ float linear(float a)
|
__device__ float linear(float a) {
|
||||||
{
|
|
||||||
return a;
|
return a;
|
||||||
}
|
}
|
||||||
|
|
||||||
__global__ void sigmoid_kernel(const float* __restrict__ src, float* __restrict__ dst, int len) {
|
__global__ void sigmoid_kernel(
|
||||||
int stride = gridDim.x * blockDim.x;
|
const float* __restrict__ src,
|
||||||
int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
float* __restrict__ dst,
|
||||||
|
int len
|
||||||
|
) {
|
||||||
|
int stride = gridDim.x * blockDim.x;
|
||||||
|
int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
||||||
|
|
||||||
for (int i = tid; i < len; i += stride) {
|
for (int i = tid; i < len; i += stride) {
|
||||||
dst[i] = sigmoid(src[i]);
|
dst[i] = sigmoid(src[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
__global__ void relu_kernel(const float* __restrict__ src, float* __restrict__ dst, int len) {
|
__global__ void
|
||||||
int stride = gridDim.x * blockDim.x;
|
relu_kernel(const float* __restrict__ src, float* __restrict__ dst, int len) {
|
||||||
int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
int stride = gridDim.x * blockDim.x;
|
||||||
|
int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
||||||
|
|
||||||
for (int i = tid; i < len; i += stride) {
|
for (int i = tid; i < len; i += stride) {
|
||||||
dst[i] = relu(src[i]);
|
dst[i] = relu(src[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
__global__ void linear_kernel(const float* __restrict__ src, float* __restrict__ dst, int len) {
|
__global__ void
|
||||||
int stride = gridDim.x * blockDim.x;
|
linear_kernel(const float* __restrict__ src, float* __restrict__ dst, int len) {
|
||||||
int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
int stride = gridDim.x * blockDim.x;
|
||||||
|
int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
||||||
|
|
||||||
for (int i = tid; i < len; i += stride) {
|
for (int i = tid; i < len; i += stride) {
|
||||||
dst[i] = linear(src[i]);
|
dst[i] = linear(src[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,16 +1,25 @@
|
|||||||
#include "dense.cuh"
|
|
||||||
#include "cuda_helper.cuh"
|
|
||||||
#include "activations.cuh"
|
|
||||||
#include <cstdlib>
|
|
||||||
#include <cuda_runtime.h>
|
|
||||||
#include <cublas_v2.h>
|
#include <cublas_v2.h>
|
||||||
|
#include <cuda_runtime.h>
|
||||||
|
|
||||||
#include <cstdio>
|
#include <cstdio>
|
||||||
#include <iostream>
|
#include <cstdlib>
|
||||||
#include <functional>
|
#include <functional>
|
||||||
|
#include <iostream>
|
||||||
|
|
||||||
Layers::Dense::Dense(int inputSize, int outputSize, std::string activation, cublasHandle_t cublasHandle)
|
#include "activations.cuh"
|
||||||
: inputSize(inputSize), outputSize(outputSize), cublasHandle(cublasHandle), activation(activation) {
|
#include "cuda_helper.cuh"
|
||||||
|
#include "dense.cuh"
|
||||||
|
|
||||||
|
Layers::Dense::Dense(
|
||||||
|
int inputSize,
|
||||||
|
int outputSize,
|
||||||
|
std::string activation,
|
||||||
|
cublasHandle_t cublasHandle
|
||||||
|
)
|
||||||
|
: inputSize(inputSize),
|
||||||
|
outputSize(outputSize),
|
||||||
|
cublasHandle(cublasHandle),
|
||||||
|
activation(activation) {
|
||||||
// Allocate memory for weights and biases
|
// Allocate memory for weights and biases
|
||||||
weights.resize(outputSize * inputSize);
|
weights.resize(outputSize * inputSize);
|
||||||
biases.resize(outputSize);
|
biases.resize(outputSize);
|
||||||
@@ -19,10 +28,12 @@ Layers::Dense::Dense(int inputSize, int outputSize, std::string activation, cubl
|
|||||||
initializeBiases();
|
initializeBiases();
|
||||||
|
|
||||||
d_weights = nullptr;
|
d_weights = nullptr;
|
||||||
d_biases = nullptr;
|
d_biases = nullptr;
|
||||||
|
|
||||||
// Allocate GPU memory for weights and biases
|
// Allocate GPU memory for weights and biases
|
||||||
CUDA_CHECK(cudaMalloc((void**)&d_weights, sizeof(float) * inputSize * outputSize));
|
CUDA_CHECK(
|
||||||
|
cudaMalloc((void**)&d_weights, sizeof(float) * inputSize * outputSize)
|
||||||
|
);
|
||||||
CUDA_CHECK(cudaMalloc((void**)&d_biases, sizeof(float) * outputSize));
|
CUDA_CHECK(cudaMalloc((void**)&d_biases, sizeof(float) * outputSize));
|
||||||
|
|
||||||
toCuda();
|
toCuda();
|
||||||
@@ -44,30 +55,47 @@ void Layers::Dense::initializeBiases() {
|
|||||||
|
|
||||||
void Layers::Dense::forward(const float* d_input, float* d_output) {
|
void Layers::Dense::forward(const float* d_input, float* d_output) {
|
||||||
const float alpha = 1.0f;
|
const float alpha = 1.0f;
|
||||||
const float beta = 1.0f;
|
const float beta = 1.0f;
|
||||||
|
|
||||||
CUBLAS_CHECK(cublasSgemv(cublasHandle, CUBLAS_OP_N, inputSize, outputSize, &alpha, d_weights, inputSize, d_input, 1, &beta, d_output, 1));
|
CUBLAS_CHECK(cublasSgemv(
|
||||||
CUBLAS_CHECK(cublasSaxpy(cublasHandle, outputSize, &alpha, d_biases, 1, d_output, 1));
|
cublasHandle, CUBLAS_OP_N, inputSize, outputSize, &alpha, d_weights,
|
||||||
|
inputSize, d_input, 1, &beta, d_output, 1
|
||||||
|
));
|
||||||
|
CUBLAS_CHECK(
|
||||||
|
cublasSaxpy(cublasHandle, outputSize, &alpha, d_biases, 1, d_output, 1)
|
||||||
|
);
|
||||||
|
|
||||||
int threadsPerBlock = 256;
|
int threadsPerBlock = 256;
|
||||||
int blocksPerGrid = (outputSize + threadsPerBlock - 1) / threadsPerBlock;
|
int blocksPerGrid = (outputSize + threadsPerBlock - 1) / threadsPerBlock;
|
||||||
|
|
||||||
if (activation == "sigmoid") {
|
if (activation == "sigmoid") {
|
||||||
sigmoid_kernel<<<blocksPerGrid, threadsPerBlock>>>(d_output, d_output, outputSize);
|
sigmoid_kernel<<<blocksPerGrid, threadsPerBlock>>>(
|
||||||
|
d_output, d_output, outputSize
|
||||||
|
);
|
||||||
} else if (activation == "relu") {
|
} else if (activation == "relu") {
|
||||||
relu_kernel<<<blocksPerGrid, threadsPerBlock>>>(d_output, d_output, outputSize);
|
relu_kernel<<<blocksPerGrid, threadsPerBlock>>>(
|
||||||
|
d_output, d_output, outputSize
|
||||||
|
);
|
||||||
} else {
|
} else {
|
||||||
linear_kernel<<<blocksPerGrid, threadsPerBlock>>>(d_output, d_output, outputSize);
|
linear_kernel<<<blocksPerGrid, threadsPerBlock>>>(
|
||||||
|
d_output, d_output, outputSize
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Layers::Dense::toCuda() {
|
void Layers::Dense::toCuda() {
|
||||||
CUBLAS_CHECK(cublasSetMatrix(outputSize, inputSize, sizeof(float), weights.data(), outputSize, d_weights, outputSize));
|
CUBLAS_CHECK(cublasSetMatrix(
|
||||||
CUBLAS_CHECK(cublasSetVector(biases.size(), sizeof(float), biases.data(), 1, d_biases, 1));
|
outputSize, inputSize, sizeof(float), weights.data(), outputSize,
|
||||||
|
d_weights, outputSize
|
||||||
|
));
|
||||||
|
CUBLAS_CHECK(cublasSetVector(
|
||||||
|
biases.size(), sizeof(float), biases.data(), 1, d_biases, 1
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
void Layers::Dense::setWeights(const std::vector<std::vector<float>>& weights_input) {
|
void Layers::Dense::setWeights(
|
||||||
|
const std::vector<std::vector<float>>& weights_input
|
||||||
|
) {
|
||||||
int numWeights = inputSize * outputSize;
|
int numWeights = inputSize * outputSize;
|
||||||
|
|
||||||
if (weights.size() != numWeights) {
|
if (weights.size() != numWeights) {
|
||||||
@@ -77,7 +105,7 @@ void Layers::Dense::setWeights(const std::vector<std::vector<float>>& weights_in
|
|||||||
|
|
||||||
for (int j = 0; j < inputSize; ++j) {
|
for (int j = 0; j < inputSize; ++j) {
|
||||||
for (int i = 0; i < outputSize; ++i) {
|
for (int i = 0; i < outputSize; ++i) {
|
||||||
int idx = IDX2C(i, j, outputSize);
|
int idx = IDX2C(i, j, outputSize);
|
||||||
weights[idx] = weights_input[i][j];
|
weights[idx] = weights_input[i][j];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,8 +1,10 @@
|
|||||||
|
#include <cublas_v2.h>
|
||||||
|
#include <cuda_runtime.h>
|
||||||
|
|
||||||
#include <cstdio>
|
#include <cstdio>
|
||||||
#include <cstdlib>
|
#include <cstdlib>
|
||||||
|
|
||||||
#include "cuda_helper.cuh"
|
#include "cuda_helper.cuh"
|
||||||
#include <cuda_runtime.h>
|
|
||||||
#include <cublas_v2.h>
|
|
||||||
|
|
||||||
cudaDeviceProp initializeCUDA(cublasHandle_t& cublasHandle) {
|
cudaDeviceProp initializeCUDA(cublasHandle_t& cublasHandle) {
|
||||||
int deviceCount;
|
int deviceCount;
|
||||||
|
|||||||
@@ -1,14 +1,24 @@
|
|||||||
#include "gtest/gtest.h"
|
|
||||||
#include <cuda_runtime_api.h>
|
#include <cuda_runtime_api.h>
|
||||||
#include <driver_types.h>
|
#include <driver_types.h>
|
||||||
|
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
|
||||||
#include "activations.cuh"
|
#include "activations.cuh"
|
||||||
#include "dense.cuh"
|
#include "dense.cuh"
|
||||||
|
#include "gtest/gtest.h"
|
||||||
#include "test_cublas_fixture.cuh"
|
#include "test_cublas_fixture.cuh"
|
||||||
|
|
||||||
class DenseLayerTest : public CublasTestFixture {
|
class DenseLayerTest : public CublasTestFixture {
|
||||||
protected:
|
protected:
|
||||||
Layers::Dense commonTestSetup(int inputSize, int outputSize, std::vector<float>& input, std::vector<std::vector<float>>& weights, std::vector<float>& biases, float*& d_input, float*& d_output) {
|
Layers::Dense commonTestSetup(
|
||||||
|
int inputSize,
|
||||||
|
int outputSize,
|
||||||
|
std::vector<float>& input,
|
||||||
|
std::vector<std::vector<float>>& weights,
|
||||||
|
std::vector<float>& biases,
|
||||||
|
float*& d_input,
|
||||||
|
float*& d_output
|
||||||
|
) {
|
||||||
// Create Dense layer
|
// Create Dense layer
|
||||||
Layers::Dense denseLayer(inputSize, outputSize, "linear", cublasHandle);
|
Layers::Dense denseLayer(inputSize, outputSize, "linear", cublasHandle);
|
||||||
|
|
||||||
@@ -24,7 +34,9 @@ protected:
|
|||||||
EXPECT_EQ(cudaStatus, cudaSuccess);
|
EXPECT_EQ(cudaStatus, cudaSuccess);
|
||||||
|
|
||||||
// Copy input to device
|
// Copy input to device
|
||||||
cublasStatus = cublasSetVector(input.size(), sizeof(float), input.data(), 1, d_input, 1);
|
cublasStatus = cublasSetVector(
|
||||||
|
input.size(), sizeof(float), input.data(), 1, d_input, 1
|
||||||
|
);
|
||||||
EXPECT_EQ(cublasStatus, CUBLAS_STATUS_SUCCESS);
|
EXPECT_EQ(cublasStatus, CUBLAS_STATUS_SUCCESS);
|
||||||
|
|
||||||
return denseLayer;
|
return denseLayer;
|
||||||
@@ -36,28 +48,27 @@ protected:
|
|||||||
cudaFree(d_output);
|
cudaFree(d_output);
|
||||||
}
|
}
|
||||||
|
|
||||||
cudaError_t cudaStatus;
|
cudaError_t cudaStatus;
|
||||||
cublasStatus_t cublasStatus;
|
cublasStatus_t cublasStatus;
|
||||||
};
|
};
|
||||||
|
|
||||||
TEST_F(DenseLayerTest, Init) {
|
TEST_F(DenseLayerTest, Init) {
|
||||||
|
|
||||||
for (int i = 1; i < 100; ++i) {
|
for (int i = 1; i < 100; ++i) {
|
||||||
for (int j = 1; j < 100; ++j) {
|
for (int j = 1; j < 100; ++j) {
|
||||||
|
int inputSize = i;
|
||||||
int inputSize = i;
|
|
||||||
int outputSize = j;
|
int outputSize = j;
|
||||||
|
|
||||||
// std::cout << "Dense layer: input size = " << inputSize << ", output size = " << outputSize << std::endl;
|
// std::cout << "Dense layer: input size = " << inputSize << ",
|
||||||
Layers::Dense denseLayer(inputSize, outputSize, "linear", cublasHandle);
|
// output size = " << outputSize << std::endl;
|
||||||
|
Layers::Dense denseLayer(
|
||||||
|
inputSize, outputSize, "linear", cublasHandle
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DenseLayerTest, setWeights) {
|
TEST_F(DenseLayerTest, setWeights) {
|
||||||
|
int inputSize = 4;
|
||||||
|
|
||||||
int inputSize = 4;
|
|
||||||
int outputSize = 5;
|
int outputSize = 5;
|
||||||
|
|
||||||
std::vector<std::vector<float>> weights = {
|
std::vector<std::vector<float>> weights = {
|
||||||
@@ -71,17 +82,17 @@ TEST_F(DenseLayerTest, setWeights) {
|
|||||||
Layers::Dense denseLayer(inputSize, outputSize, "linear", cublasHandle);
|
Layers::Dense denseLayer(inputSize, outputSize, "linear", cublasHandle);
|
||||||
|
|
||||||
denseLayer.setWeights(weights);
|
denseLayer.setWeights(weights);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DenseLayerTest, ForwardUnitWeightMatrix) {
|
TEST_F(DenseLayerTest, ForwardUnitWeightMatrix) {
|
||||||
|
int inputSize = 3;
|
||||||
int inputSize = 3;
|
|
||||||
int outputSize = 3;
|
int outputSize = 3;
|
||||||
|
|
||||||
std::vector<float> input = {1.0f, 2.0f, 3.0f};
|
std::vector<float> input = {1.0f, 2.0f, 3.0f};
|
||||||
|
|
||||||
std::vector<std::vector<float>> weights(inputSize, std::vector<float>(outputSize, 0.0f));
|
std::vector<std::vector<float>> weights(
|
||||||
|
inputSize, std::vector<float>(outputSize, 0.0f)
|
||||||
|
);
|
||||||
for (int i = 0; i < inputSize; ++i) {
|
for (int i = 0; i < inputSize; ++i) {
|
||||||
for (int j = 0; j < outputSize; ++j) {
|
for (int j = 0; j < outputSize; ++j) {
|
||||||
if (i == j) {
|
if (i == j) {
|
||||||
@@ -94,11 +105,15 @@ TEST_F(DenseLayerTest, ForwardUnitWeightMatrix) {
|
|||||||
float* d_input;
|
float* d_input;
|
||||||
float* d_output;
|
float* d_output;
|
||||||
|
|
||||||
Layers::Dense denseLayer = commonTestSetup(inputSize, outputSize, input, weights, biases, d_input, d_output);
|
Layers::Dense denseLayer = commonTestSetup(
|
||||||
|
inputSize, outputSize, input, weights, biases, d_input, d_output
|
||||||
|
);
|
||||||
denseLayer.forward(d_input, d_output);
|
denseLayer.forward(d_input, d_output);
|
||||||
|
|
||||||
std::vector<float> output(outputSize);
|
std::vector<float> output(outputSize);
|
||||||
cublasStatus = cublasGetVector(outputSize, sizeof(float), d_output, 1, output.data(), 1);
|
cublasStatus = cublasGetVector(
|
||||||
|
outputSize, sizeof(float), d_output, 1, output.data(), 1
|
||||||
|
);
|
||||||
EXPECT_EQ(cublasStatus, CUBLAS_STATUS_SUCCESS);
|
EXPECT_EQ(cublasStatus, CUBLAS_STATUS_SUCCESS);
|
||||||
|
|
||||||
// Check if the output is a zero vector
|
// Check if the output is a zero vector
|
||||||
@@ -110,7 +125,7 @@ TEST_F(DenseLayerTest, ForwardUnitWeightMatrix) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DenseLayerTest, ForwardRandomWeightMatrix) {
|
TEST_F(DenseLayerTest, ForwardRandomWeightMatrix) {
|
||||||
int inputSize = 5;
|
int inputSize = 5;
|
||||||
int outputSize = 4;
|
int outputSize = 4;
|
||||||
|
|
||||||
std::vector<float> input = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f};
|
std::vector<float> input = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f};
|
||||||
@@ -126,17 +141,23 @@ TEST_F(DenseLayerTest, ForwardRandomWeightMatrix) {
|
|||||||
float* d_input;
|
float* d_input;
|
||||||
float* d_output;
|
float* d_output;
|
||||||
|
|
||||||
Layers::Dense denseLayer = commonTestSetup(inputSize, outputSize, input, weights, biases, d_input, d_output);
|
Layers::Dense denseLayer = commonTestSetup(
|
||||||
|
inputSize, outputSize, input, weights, biases, d_input, d_output
|
||||||
|
);
|
||||||
|
|
||||||
denseLayer.forward(d_input, d_output);
|
denseLayer.forward(d_input, d_output);
|
||||||
|
|
||||||
std::vector<float> output(outputSize);
|
std::vector<float> output(outputSize);
|
||||||
cublasStatus = cublasGetVector(outputSize, sizeof(float), d_output, 1, output.data(), 1);
|
cublasStatus = cublasGetVector(
|
||||||
|
outputSize, sizeof(float), d_output, 1, output.data(), 1
|
||||||
|
);
|
||||||
EXPECT_EQ(cublasStatus, CUBLAS_STATUS_SUCCESS);
|
EXPECT_EQ(cublasStatus, CUBLAS_STATUS_SUCCESS);
|
||||||
|
|
||||||
std::vector<float> expectedOutput = {10.4f, 13.0f, 8.9f, 9.3f};
|
std::vector<float> expectedOutput = {10.4f, 13.0f, 8.9f, 9.3f};
|
||||||
for (int i = 0; i < outputSize; ++i) {
|
for (int i = 0; i < outputSize; ++i) {
|
||||||
EXPECT_NEAR(output[i], expectedOutput[i], 1e-4); // Allow small tolerance for floating-point comparison
|
EXPECT_NEAR(
|
||||||
|
output[i], expectedOutput[i], 1e-4
|
||||||
|
); // Allow small tolerance for floating-point comparison
|
||||||
}
|
}
|
||||||
|
|
||||||
commonTestTeardown(d_input, d_output);
|
commonTestTeardown(d_input, d_output);
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#include "gtest/gtest.h"
|
|
||||||
#include "cublas_v2.h"
|
#include "cublas_v2.h"
|
||||||
|
#include "gtest/gtest.h"
|
||||||
#include "test_cublas_fixture.cuh"
|
#include "test_cublas_fixture.cuh"
|
||||||
|
|
||||||
cublasHandle_t CublasTestFixture::cublasHandle;
|
cublasHandle_t CublasTestFixture::cublasHandle;
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
#include "gtest/gtest.h"
|
|
||||||
#include "cublas_v2.h"
|
#include "cublas_v2.h"
|
||||||
|
#include "gtest/gtest.h"
|
||||||
|
|
||||||
class CublasTestFixture : public ::testing::Test {
|
class CublasTestFixture : public ::testing::Test {
|
||||||
protected:
|
protected:
|
||||||
static cublasHandle_t cublasHandle;
|
static cublasHandle_t cublasHandle;
|
||||||
|
|
||||||
static void SetUpTestSuite();
|
static void SetUpTestSuite();
|
||||||
|
|||||||
@@ -1,16 +1,16 @@
|
|||||||
#include "gtest/gtest.h"
|
|
||||||
#include <cuda_runtime_api.h>
|
#include <cuda_runtime_api.h>
|
||||||
#include <driver_types.h>
|
#include <driver_types.h>
|
||||||
|
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
|
||||||
#include "functions.cuh"
|
#include "functions.cuh"
|
||||||
|
#include "gtest/gtest.h"
|
||||||
#include "test_cublas_fixture.cuh"
|
#include "test_cublas_fixture.cuh"
|
||||||
|
|
||||||
class FunctionsTest : public CublasTestFixture {
|
class FunctionsTest : public CublasTestFixture {
|
||||||
protected:
|
protected:
|
||||||
cudaError_t cudaStatus;
|
cudaError_t cudaStatus;
|
||||||
cublasStatus_t cublasStatus;
|
cublasStatus_t cublasStatus;
|
||||||
};
|
};
|
||||||
|
|
||||||
TEST_F(FunctionsTest, sigmoid) {
|
TEST_F(FunctionsTest, sigmoid) {}
|
||||||
|
|
||||||
}
|
|
||||||
Reference in New Issue
Block a user