Remove const from layer input

This commit is contained in:
2025-11-19 20:37:41 +01:00
parent dfdfa19022
commit 7896ff0e24
6 changed files with 5 additions and 58 deletions

View File

@@ -20,7 +20,7 @@ class Layer {
virtual ~Layer(){}; virtual ~Layer(){};
virtual CUDANet::Tensor& forward(const CUDANet::Tensor &input) = 0; virtual CUDANet::Tensor& forward(CUDANet::Tensor &input) = 0;
virtual CUDANet::Shape input_shape() = 0; virtual CUDANet::Shape input_shape() = 0;

View File

@@ -23,7 +23,7 @@ class Conv2d : public Layer {
~Conv2d() {}; ~Conv2d() {};
CUDANet::Tensor& forward(const CUDANet::Tensor& input) override; CUDANet::Tensor& forward(CUDANet::Tensor& input) override;
CUDANet::Shape input_shape() override; CUDANet::Shape input_shape() override;

View File

@@ -18,7 +18,7 @@ class Dense : public Layer {
~Dense(); ~Dense();
CUDANet::Tensor& forward(const CUDANet::Tensor &input) override; CUDANet::Tensor& forward(CUDANet::Tensor &input) override;
CUDANet::Shape input_shape() override; CUDANet::Shape input_shape() override;

View File

@@ -1,53 +0,0 @@
#include <vector>
#include "activation.hpp"
#include "conv2d.hpp"
#include "convolution.cuh"
#include "cuda_helper.cuh"
#include "layer.hpp"
#include "matmul.cuh"
#include "vector.cuh"
using namespace CUDANet::Layers;
void Conv2d::initCUDA() {
d_output = nullptr;
CUDA_CHECK(cudaMalloc(
(void**)&d_output,
sizeof(float) * outputSize.first * outputSize.second * numFilters
));
d_weights = nullptr;
CUDA_CHECK(cudaMalloc(
(void**)&d_weights, sizeof(float) * kernelSize.first *
kernelSize.second * inputChannels * numFilters
));
d_biases = nullptr;
CUDA_CHECK(cudaMalloc((void**)&d_biases, sizeof(float) * numFilters));
}
void Conv2d::delCUDA() {
cudaFree(d_output);
cudaFree(d_weights);
cudaFree(d_biases);
}
void Conv2d::toCuda() {
CUDA_CHECK(cudaMemcpy(
d_weights, weights.data(),
sizeof(float) * kernelSize.first * kernelSize.second * inputChannels *
numFilters,
cudaMemcpyHostToDevice
));
CUDA_CHECK(cudaMemcpy(
d_biases, biases.data(), sizeof(float) * numFilters,
cudaMemcpyHostToDevice
));
}
float* Conv2d::forwardCUDA(const float* d_input) {
// Convolve
}

View File

@@ -83,7 +83,7 @@ Conv2d::Conv2d(
Conv2d::~Conv2d() {} Conv2d::~Conv2d() {}
CUDANet::Tensor& Conv2d::forward(const CUDANet::Tensor& input) { CUDANet::Tensor& Conv2d::forward(CUDANet::Tensor& input) {
output.zero(); output.zero();
backend->conv2d( backend->conv2d(
weights, weights,

View File

@@ -33,7 +33,7 @@ Dense::Dense(CUDANet::Shape in, CUDANet::Shape out, CUDANet::Backend* backend)
Dense::~Dense() {} Dense::~Dense() {}
CUDANet::Tensor& Dense::forward(const CUDANet::Tensor& input) { CUDANet::Tensor& Dense::forward(CUDANet::Tensor& input) {
backend->dense(weights, biases, input, output, in_shape[0], out_shape[0]); backend->dense(weights, biases, input, output, in_shape[0], out_shape[0]);
return output; return output;
} }