mirror of
https://github.com/lordmathis/CUDANet.git
synced 2025-11-06 01:34:22 +00:00
Update concat layer
This commit is contained in:
@@ -15,7 +15,7 @@ class Concat {
|
|||||||
* @param inputASize Size of the first input
|
* @param inputASize Size of the first input
|
||||||
* @param inputBSize Size of the second input
|
* @param inputBSize Size of the second input
|
||||||
*/
|
*/
|
||||||
Concat(const unsigned int inputASize, const unsigned int inputBSize);
|
Concat(const int inputASize, const int inputBSize);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Destroy the Concat layer
|
* @brief Destroy the Concat layer
|
||||||
@@ -33,9 +33,11 @@ class Concat {
|
|||||||
*/
|
*/
|
||||||
float* forward(const float* d_input_A, const float* d_input_B);
|
float* forward(const float* d_input_A, const float* d_input_B);
|
||||||
|
|
||||||
|
int getOutputSize();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
unsigned int inputASize;
|
int inputASize;
|
||||||
unsigned int inputBSize;
|
int inputBSize;
|
||||||
|
|
||||||
float* d_output;
|
float* d_output;
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -4,21 +4,19 @@
|
|||||||
using namespace CUDANet::Layers;
|
using namespace CUDANet::Layers;
|
||||||
|
|
||||||
|
|
||||||
Concat::Concat(const unsigned int inputASize, const unsigned int inputBSize)
|
Concat::Concat(const int inputASize, const int inputBSize)
|
||||||
: inputASize(inputASize), inputBSize(inputBSize) {
|
: inputASize(inputASize), inputBSize(inputBSize) {
|
||||||
|
|
||||||
d_output = nullptr;
|
d_output = nullptr;
|
||||||
CUDA_CHECK(cudaMalloc(
|
CUDA_CHECK(cudaMalloc(
|
||||||
(void**)&d_output, sizeof(float) * (inputASize + inputBSize)
|
(void**)&d_output, sizeof(float) * (inputASize + inputBSize)
|
||||||
));
|
));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Concat::~Concat() {
|
Concat::~Concat() {
|
||||||
cudaFree(d_output);
|
cudaFree(d_output);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
float* Concat::forward(const float* d_input_A, const float* d_input_B) {
|
float* Concat::forward(const float* d_input_A, const float* d_input_B) {
|
||||||
CUDA_CHECK(cudaMemcpy(
|
CUDA_CHECK(cudaMemcpy(
|
||||||
d_output, d_input_A, sizeof(float) * inputASize, cudaMemcpyDeviceToDevice
|
d_output, d_input_A, sizeof(float) * inputASize, cudaMemcpyDeviceToDevice
|
||||||
@@ -33,3 +31,7 @@ float* Concat::forward(const float* d_input_A, const float* d_input_B) {
|
|||||||
|
|
||||||
return d_output;
|
return d_output;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int Concat::getOutputSize() {
|
||||||
|
return inputASize + inputBSize;
|
||||||
|
};
|
||||||
|
|||||||
Reference in New Issue
Block a user