Remove linear activation kernel

This commit is contained in:
2024-03-09 22:54:23 +01:00
parent a3d85a10fc
commit fceef07a9b
2 changed files with 0 additions and 13 deletions

View File

@@ -24,13 +24,3 @@ relu_kernel(const float* __restrict__ src, float* __restrict__ dst, int len) {
dst[i] = src[i] < 0.0 ? 0.0 : src[i];
}
}
__global__ void
linear_kernel(const float* __restrict__ src, float* __restrict__ dst, int len) {
int stride = gridDim.x * blockDim.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = tid; i < len; i += stride) {
dst[i] = src[i];
}
}

View File

@@ -75,9 +75,6 @@ void Layers::Dense::forward(const float* d_input, float* d_output) {
break;
default:
linear_kernel<<<1, outputSize>>>(
d_output, d_output, outputSize
);
break;
}