Fix the gelu kernel for f16.

This commit is contained in:
laurent
2023-06-23 13:38:54 +01:00
parent db5526d51a
commit f8848db001
2 changed files with 26 additions and 42 deletions

View File

@ -1,42 +1,27 @@
#include "cuda_utils.cuh"
#include<stdint.h>
extern "C" __global__ void affine_f32(
const size_t numel,
const size_t num_dims,
const size_t *info,
const float *x,
float *y,
const float mul,
const float add
) {
const size_t *dims = info;
const size_t *strides = info + num_dims;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= numel) {
return;
}
// This is likely to be very very slow, we should either optimize the contiguous case
// as a separate kernel, proceed by block, improve the stride computations (and probably
// do all of these).
unsigned strided_i = get_strided_index(i, num_dims, dims, strides);
y[strided_i] = x[i] * mul + add;
}
#define AFFINE_OP(TYPENAME, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const size_t numel, \
const size_t num_dims, \
const size_t *info, \
const TYPENAME *x, \
TYPENAME *y, \
const TYPENAME mul, \
const TYPENAME add \
) { \
const size_t *dims = info; \
const size_t *strides = info + num_dims; \
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; \
if (i >= numel) { \
return; \
} \
unsigned strided_i = get_strided_index(i, num_dims, dims, strides); \
y[strided_i] = x[i] * mul + add; \
} \
AFFINE_OP(float, affine_f32)
AFFINE_OP(double, affine_f64)
AFFINE_OP(uint32_t, affine_u32)
extern "C" __global__ void affine_f64(
const size_t numel,
const size_t num_dims,
const size_t *info,
const double *x,
double *y,
const double mul,
const double add
) {
const size_t *dims = info;
const size_t *strides = info + num_dims;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= numel) {
return;
}
unsigned strided_i = get_strided_index(i, num_dims, dims, strides);
y[strided_i] = x[i] * mul + add;
}

View File

@ -19,11 +19,10 @@ extern "C" __global__ void FN_NAME( \
template<typename T>
__device__ T gelu_fwd(T x) {
constexpr T fastCoeff = 0.044715;
T x_sq = x * x;
T x_cube = x_sq * x;
T alpha = x + fastCoeff * x_cube;
return 0.5 * x * (1.0 + tanhg(M_2_SQRTPI * M_SQRT1_2 * alpha));
T alpha = x + static_cast<T>(0.044715) * x_cube;
return static_cast<T>(0.5) * x * (static_cast<T>(1.0) + tanhg(static_cast<T>(M_2_SQRTPI * M_SQRT1_2) * alpha));
}