mirror of
https://github.com/lordmathis/CUDANet.git
synced 2025-11-06 01:34:22 +00:00
Implement getOutputSize and getInputSize for seq layers
This commit is contained in:
@@ -51,10 +51,10 @@ TEST(AvgPoolingLayerTest, AvgPoolForwardTest) {
|
||||
|
||||
int outputSize = avgPoolingLayer.getOutputSize();
|
||||
|
||||
std::vector<float> output(outputSize * outputSize * nChannels);
|
||||
std::vector<float> output(outputSize);
|
||||
cudaStatus = cudaMemcpy(
|
||||
output.data(), d_output,
|
||||
sizeof(float) * outputSize * outputSize * nChannels,
|
||||
sizeof(float) * outputSize,
|
||||
cudaMemcpyDeviceToHost
|
||||
);
|
||||
EXPECT_EQ(cudaStatus, cudaSuccess);
|
||||
|
||||
@@ -82,14 +82,15 @@ TEST_F(Conv2dTest, SimpleTest) {
|
||||
activationType, input, kernels.data(), d_input
|
||||
);
|
||||
|
||||
int outputSize = (inputSize - kernelSize) / stride + 1;
|
||||
int outputWidth = (inputSize - kernelSize) / stride + 1;
|
||||
int outputSize = outputWidth * outputWidth * numFilters;
|
||||
EXPECT_EQ(outputSize, conv2d.getOutputSize());
|
||||
|
||||
d_output = conv2d.forward(d_input);
|
||||
|
||||
std::vector<float> expected = {44.0f, 54.0f, 64.0f, 84.0f, 94.0f,
|
||||
104.0f, 124.0f, 134.0f, 144.0f};
|
||||
std::vector<float> output(outputSize * outputSize * numFilters);
|
||||
std::vector<float> output(outputSize);
|
||||
|
||||
cudaStatus = cudaMemcpy(
|
||||
output.data(), d_output, sizeof(float) * output.size(),
|
||||
@@ -172,18 +173,16 @@ TEST_F(Conv2dTest, PaddedTest) {
|
||||
activationType, input, kernels.data(), d_input
|
||||
);
|
||||
|
||||
EXPECT_EQ(inputSize, conv2d.getOutputSize());
|
||||
EXPECT_EQ(inputSize * inputSize * numFilters, conv2d.getOutputSize());
|
||||
|
||||
d_output = conv2d.forward(d_input);
|
||||
|
||||
std::vector<float> output(
|
||||
conv2d.getOutputSize() * conv2d.getOutputSize() * numFilters
|
||||
conv2d.getOutputSize()
|
||||
);
|
||||
cudaMemcpy(
|
||||
output.data(), d_output,
|
||||
sizeof(float) * conv2d.getOutputSize() * conv2d.getOutputSize() *
|
||||
numFilters,
|
||||
cudaMemcpyDeviceToHost
|
||||
sizeof(float) * conv2d.getOutputSize(), cudaMemcpyDeviceToHost
|
||||
);
|
||||
|
||||
// Generated by tools/generate_conv2d_test.py
|
||||
@@ -259,17 +258,16 @@ TEST_F(Conv2dTest, StridedPaddedConvolution) {
|
||||
activationType, input, kernels.data(), d_input
|
||||
);
|
||||
|
||||
EXPECT_EQ(inputSize, conv2d.getOutputSize());
|
||||
EXPECT_EQ(inputSize * inputSize * numFilters, conv2d.getOutputSize());
|
||||
|
||||
d_output = conv2d.forward(d_input);
|
||||
|
||||
std::vector<float> output(
|
||||
conv2d.getOutputSize() * conv2d.getOutputSize() * numFilters
|
||||
conv2d.getOutputSize()
|
||||
);
|
||||
cudaMemcpy(
|
||||
output.data(), d_output,
|
||||
sizeof(float) * conv2d.getOutputSize() * conv2d.getOutputSize() *
|
||||
numFilters,
|
||||
sizeof(float) * conv2d.getOutputSize(),
|
||||
cudaMemcpyDeviceToHost
|
||||
);
|
||||
|
||||
|
||||
@@ -51,10 +51,10 @@ TEST(MaxPoolingLayerTest, MaxPoolForwardTest) {
|
||||
|
||||
int outputSize = maxPoolingLayer.getOutputSize();
|
||||
|
||||
std::vector<float> output(outputSize * outputSize * nChannels);
|
||||
std::vector<float> output(outputSize);
|
||||
cudaStatus = cudaMemcpy(
|
||||
output.data(), d_output,
|
||||
sizeof(float) * outputSize * outputSize * nChannels,
|
||||
sizeof(float) * outputSize,
|
||||
cudaMemcpyDeviceToHost
|
||||
);
|
||||
EXPECT_EQ(cudaStatus, cudaSuccess);
|
||||
|
||||
Reference in New Issue
Block a user