Implement padding kernel

This commit is contained in:
2024-03-03 15:24:39 +01:00
parent 019ccc33d9
commit 7e4460cc5e
2 changed files with 40 additions and 27 deletions

View File

@@ -1,3 +1,5 @@
#include <vector>
/* /*
Pads matrix width x height x n_channels to width + 2 * padding x height + 2 * Pads matrix width x height x n_channels to width + 2 * padding x height + 2 *
padding x n_channels Matrix is represented as a pointer to column major vector padding x n_channels Matrix is represented as a pointer to column major vector
@@ -22,10 +24,12 @@ Is represented as:
0 2 4 1 3 5 6 8 10 7 9 11 0 2 4 1 3 5 6 8 10 7 9 11
Padded result: Padded result (as a continuous vector):
0 0 0 0 0 0 0 2 4 0 0 1 3 5 0 0 0 0 0 0 0 0 0 0 0 0 6 8 10 0 0 7 9 11 0 0 0 0 0 0
0 0 0 0 0 0 0 2 4 0
0 1 3 5 0 0 0 0 0 0
0 0 0 0 0 0 6 8 10 0
0 7 9 11 0 0 0 0 0 0
Args: Args:
d_input: Pointer to input vector representing matrix d_input: Pointer to input vector representing matrix
@@ -41,17 +45,29 @@ __global__ void pad_matrix_kernel(
int n, int n,
int p int p
) { ) {
int stride = gridDim.x * blockDim.x; int tid = blockDim.x * blockIdx.x + threadIdx.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = tid; i < (w + 2 * p) * (h + 2 * p) * n; i += stride) { if (tid >= (w + 2 * p) * (h + 2 * p) * n) {
return;
// if i is in the padding region }
if (i < p * (h + 2 * p) * n || i >= (w + p) * (h + 2 * p) * n) {
d_padded[i] = 0.0f; int idx = tid;
} else {
// if i is in the original region // unravel index
d_padded[i] = d_input[(i - p * (h + 2 * p) * n) / (h + 2 * p) * w + (i - p * (h + 2 * p) * n) % (h + 2 * p)]; int i_h = idx % (h + 2 * p);
} idx /= (h + 2 * p);
}
} int i_w = idx % (w + 2 * p);
idx /= (w + 2 * p);
int i_n = idx % n;
// if i is in the padding region
if (i_w < p || i_w >= (w + p) || i_h < p || i_h >= (h + p)) {
d_padded[tid] = 0.0f;
} else {
// Get index into input vector
int i_orig = i_n * w * h + (i_w - p) * h + (i_h - p);
d_padded[tid] = d_input[i_orig];
}
}

View File

@@ -27,9 +27,7 @@ TEST_F(PaddingTest, SimplePaddingTest) {
cudaStatus = cudaMalloc((void**)&d_input, sizeof(float) * inputSize); cudaStatus = cudaMalloc((void**)&d_input, sizeof(float) * inputSize);
EXPECT_EQ(cudaStatus, cudaSuccess); EXPECT_EQ(cudaStatus, cudaSuccess);
cudaStatus = cudaMalloc( cudaStatus = cudaMalloc((void**)&d_padded, sizeof(float) * paddedSize);
(void**)&d_padded, sizeof(float) * paddedSize
);
EXPECT_EQ(cudaStatus, cudaSuccess); EXPECT_EQ(cudaStatus, cudaSuccess);
/* /*
@@ -54,7 +52,12 @@ TEST_F(PaddingTest, SimplePaddingTest) {
cublasSetVector(inputSize, sizeof(float), input.data(), 1, d_input, 1); cublasSetVector(inputSize, sizeof(float), input.data(), 1, d_input, 1);
EXPECT_EQ(cublasStatus, CUBLAS_STATUS_SUCCESS); EXPECT_EQ(cublasStatus, CUBLAS_STATUS_SUCCESS);
pad_matrix_kernel<<<1, 1>>>(d_input, d_padded, w, h, n, p); int THREADS_PER_BLOCK = 64;
int BLOCKS = paddedSize / THREADS_PER_BLOCK + 1;
pad_matrix_kernel<<<BLOCKS, THREADS_PER_BLOCK>>>(
d_input, d_padded, w, h, n, p
);
cudaStatus = cudaDeviceSynchronize(); cudaStatus = cudaDeviceSynchronize();
EXPECT_EQ(cudaStatus, cudaSuccess); EXPECT_EQ(cudaStatus, cudaSuccess);
@@ -70,13 +73,7 @@ TEST_F(PaddingTest, SimplePaddingTest) {
paddedSize, sizeof(float), d_padded, 1, output.data(), 1 paddedSize, sizeof(float), d_padded, 1, output.data(), 1
); );
std::cout << "Actual output: " << std::endl;
for (int i = 0; i < paddedSize; i++) {
std::cout << output[i] << " ";
}
std::cout << std::endl;
for (int i = 0; i < paddedSize; i++) { for (int i = 0; i < paddedSize; i++) {
EXPECT_NEAR(expectedOutput[i], output[i], 1e-5); EXPECT_NEAR(expectedOutput[i], output[i], 1e-5);
} }
} }