Remove cublas dependency

This commit is contained in:
2024-03-05 18:41:35 +01:00
parent 98ad84c659
commit f4257afd5a
16 changed files with 65 additions and 141 deletions

View File

@@ -1,4 +1,3 @@
#include <cublas_v2.h>
#include <cuda_runtime.h>
#include <cstdio>
@@ -9,16 +8,15 @@
#include "activations.cuh"
#include "cuda_helper.cuh"
#include "dense.cuh"
#include "matrix_math.cuh"
Layers::Dense::Dense(
int inputSize,
int outputSize,
Activation activation,
cublasHandle_t cublasHandle
Activation activation
)
: inputSize(inputSize),
outputSize(outputSize),
cublasHandle(cublasHandle),
activation(activation) {
// Allocate memory for weights and biases
weights.resize(outputSize * inputSize);
@@ -54,35 +52,30 @@ void Layers::Dense::initializeBiases() {
}
void Layers::Dense::forward(const float* d_input, float* d_output) {
const float alpha = 1.0f;
const float beta = 0.0f;
CUBLAS_CHECK(cublasSgemv(
cublasHandle, CUBLAS_OP_N, outputSize, inputSize, &alpha, d_weights,
outputSize, d_input, 1, &beta, d_output, 1
));
CUBLAS_CHECK(
cublasSaxpy(cublasHandle, outputSize, &alpha, d_biases, 1, d_output, 1)
mat_vec_mul_kernel<<<1, outputSize>>>(
d_weights, d_input, d_output, inputSize, outputSize
);
int threadsPerBlock = 256;
int blocksPerGrid = (outputSize + threadsPerBlock - 1) / threadsPerBlock;
vec_vec_add_kernel<<<1, outputSize>>>(
d_biases, d_output, d_output, outputSize
);
switch (activation) {
case SIGMOID:
sigmoid_kernel<<<blocksPerGrid, threadsPerBlock>>>(
sigmoid_kernel<<<1, outputSize>>>(
d_output, d_output, outputSize
);
break;
case RELU:
relu_kernel<<<blocksPerGrid, threadsPerBlock>>>(
relu_kernel<<<1, outputSize>>>(
d_output, d_output, outputSize
);
break;
default:
linear_kernel<<<blocksPerGrid, threadsPerBlock>>>(
linear_kernel<<<1, outputSize>>>(
d_output, d_output, outputSize
);
break;
@@ -92,12 +85,13 @@ void Layers::Dense::forward(const float* d_input, float* d_output) {
}
void Layers::Dense::toCuda() {
CUBLAS_CHECK(cublasSetMatrix(
outputSize, inputSize, sizeof(float), weights.data(), outputSize,
d_weights, outputSize
CUDA_CHECK(cudaMemcpy(
d_weights, weights.data(), sizeof(float) * inputSize * outputSize,
cudaMemcpyHostToDevice
));
CUBLAS_CHECK(cublasSetVector(
biases.size(), sizeof(float), biases.data(), 1, d_biases, 1
CUDA_CHECK(cudaMemcpy(
d_biases, biases.data(), sizeof(float) * outputSize,
cudaMemcpyHostToDevice
));
}
@@ -111,10 +105,9 @@ void Layers::Dense::setWeights(
exit(EXIT_FAILURE);
}
for (int j = 0; j < inputSize; ++j) {
for (int i = 0; i < outputSize; ++i) {
int idx = IDX2C(i, j, outputSize);
weights[idx] = weights_input[i][j];
for (int i = 0; i < outputSize; ++i) {
for (int j = 0; j < inputSize; ++j) {
weights[i * inputSize + j] = weights_input[i][j];
}
}