mirror of
https://github.com/lordmathis/CUDANet.git
synced 2025-11-05 17:34:21 +00:00
Remove linear activation kernel
This commit is contained in:
@@ -24,13 +24,3 @@ relu_kernel(const float* __restrict__ src, float* __restrict__ dst, int len) {
|
||||
dst[i] = src[i] < 0.0 ? 0.0 : src[i];
|
||||
}
|
||||
}
|
||||
|
||||
__global__ void
|
||||
linear_kernel(const float* __restrict__ src, float* __restrict__ dst, int len) {
|
||||
int stride = gridDim.x * blockDim.x;
|
||||
int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
||||
|
||||
for (int i = tid; i < len; i += stride) {
|
||||
dst[i] = src[i];
|
||||
}
|
||||
}
|
||||
|
||||
@@ -75,9 +75,6 @@ void Layers::Dense::forward(const float* d_input, float* d_output) {
|
||||
break;
|
||||
|
||||
default:
|
||||
linear_kernel<<<1, outputSize>>>(
|
||||
d_output, d_output, outputSize
|
||||
);
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user