Fix some compilation errors

This commit is contained in:
2025-11-23 18:50:57 +01:00
parent 51bcee01ab
commit 82a0e7c19d
14 changed files with 15 additions and 23 deletions

View File

@@ -2,6 +2,8 @@
#include <cstddef>
#include "shape.hpp"
namespace CUDANet {
// Forward declaration

View File

@@ -3,7 +3,7 @@
#include "backend.hpp"
#include "tensor.hpp"
namespace CUDANet::Backend {
namespace CUDANet::Backends {
class CPU : public Backend {
public:

View File

@@ -24,7 +24,7 @@ do { \
} \
} while (0)
namespace CUDANet::Backend {
namespace CUDANet::Backends {
class CUDA : public Backend {
public:

View File

@@ -1,5 +1,4 @@
#ifndef CUDANET_ACTIVATION_FUNCTIONS_H
#define CUDANET_ACTIVATION_FUNCTIONS_H
#pragma once
#include <cuda_runtime.h>
@@ -32,5 +31,3 @@ __global__ void relu(
);
} // namespace CUDANet::Kernels
#endif // CUDANET_ACTIVATION_FUNCTIONS_H

View File

@@ -1,5 +1,4 @@
#ifndef CUDANET_MATMUL_H
#define CUDANET_MATMUL_H
#pragma once
#include <cuda_runtime.h>
@@ -191,5 +190,3 @@ __global__ void sum_reduce(
);
} // namespace CUDANet::Kernels
#endif // CUDANET_MATMUL_H

View File

@@ -25,7 +25,7 @@ class Activation : public Layer {
Activation() = default;
Activation(CUDANet::Backend* backend, ActivationType activation, const CUDANet::Shape &shape);
Activation(ActivationType activation, const CUDANet::Shape &shape, CUDANet::Backend* backend);
~Activation() = default;

View File

@@ -18,7 +18,7 @@ class Conv2d : public Layer {
CUDANet::Backend* backend
);
~Conv2d() {};
~Conv2d();
CUDANet::Tensor& forward(CUDANet::Tensor& input) override;

View File

@@ -1,5 +1,4 @@
#ifndef CUDANET_IMAGENET_H
#define CUDANET_IMAGENET_H
#pragma once
#include <map>
#include <string>
@@ -1012,5 +1011,3 @@ const std::map <int, std::string> IMAGENET_CLASS_MAP = {
// clang-format on
}
#endif // CUDANET_IMAGENET_H

View File

@@ -25,7 +25,7 @@ cudaDeviceProp initializeCUDA() {
return deviceProp;
}
using namespace CUDANet::Backend;
using namespace CUDANet::Backends;
void* CUDA::allocate(size_t bytes) {
void* d_ptr = nullptr;

View File

@@ -1,5 +1,4 @@
#include "activation_functions.cuh"
#include "cuda_helper.cuh"
using namespace CUDANet;

View File

@@ -1,4 +1,4 @@
#include "cuda_helper.cuh"
#include "backend/cuda.cuh"
#include "matmul.cuh"
using namespace CUDANet;

View File

@@ -4,7 +4,7 @@
#include "kernels/matmul.cuh"
#include "kernels/pool.cuh"
using namespace CUDANet::Backend;
using namespace CUDANet::Backends;
void CUDA::relu(Tensor& tensor) {
int gridSize = (tensor.numel() + BLOCK_SIZE - 1) / BLOCK_SIZE;

View File

@@ -4,7 +4,7 @@
#include "backend/cuda.cuh"
#include "kernels/matmul.cuh"
using namespace CUDANet::Backend;
using namespace CUDANet::Backends;
void CUDA::print(const CUDANet::Tensor &input) {
auto length = input.numel();

View File

@@ -7,11 +7,11 @@
using namespace CUDANet::Layers;
Activation::Activation(CUDANet::Backend* backend, ActivationType activation, const CUDANet::Shape &shape)
Activation::Activation(ActivationType activation, const CUDANet::Shape &shape, CUDANet::Backend* backend)
: backend(backend), activationType(activation), shape(shape) {
if (shape.size() != 1) {
throw std::runtime_error(std::format("Invalid shape. Expected [1], got {}", shape));
throw InvalidShapeException("input", 1, shape.size());
}
auto length = shape[0];