diff --git a/include/backend/backend.hpp b/include/backend/backend.hpp new file mode 100644 index 0000000..473a7b9 --- /dev/null +++ b/include/backend/backend.hpp @@ -0,0 +1,21 @@ +#pragma once + +#include + +namespace CUDANet::Backend +{ + +class IBackend +{ +public: + + // Memory management + virtual void* allocate(size_t bytes) = 0; + virtual void deallocate(void* ptr) = 0; + + virtual void copyToDevice(void* devicePtr, const void* hostPtr, size_t bytes) = 0; + virtual void copyToHost(void* hostPtr, const void* devicePtr, size_t bytes) = 0; + +}; + +} // namespace CUDANet::Backend \ No newline at end of file diff --git a/include/backend/tensor.hpp b/include/backend/tensor.hpp new file mode 100644 index 0000000..87c2e04 --- /dev/null +++ b/include/backend/tensor.hpp @@ -0,0 +1,38 @@ +#pragma once +#include +#include "backend/backend.hpp" +#include + +namespace CUDANet::Backend +{ + +enum class DType +{ + FLOAT32, + // FLOAT16, // Not implemented yet + // INT32, // Not implemented yet +}; + +typedef std::vector Shape; + +class Tensor +{ +public: + Tensor(Shape shape, DType dtype, IBackend* backend); + ~Tensor(); + + void* allocate(); + void deallocate(); + + void toDevice(const void* hostPtr); + void toHost(void* hostPtr); + +private: + Shape shape; + DType dtype; + IBackend* backend; + void* devicePtr; + void* hostPtr; +}; + +} // namespace CUDANet::Backend \ No newline at end of file diff --git a/src/backends/tensor.cpp b/src/backends/tensor.cpp new file mode 100644 index 0000000..9413b5b --- /dev/null +++ b/src/backends/tensor.cpp @@ -0,0 +1,11 @@ +#include "backend/tensor.hpp" + +using namespace CUDANet::Backend; + +Tensor::Tensor(Shape shape, DType dtype, IBackend* backend) + : shape(shape), dtype(dtype), backend(backend), devicePtr(nullptr), hostPtr(nullptr) {} + +Tensor::~Tensor() { + deallocate(); +} +