From d4e75d582506520ba6a76330bba4c14dcdcc19d8 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Fri, 25 Aug 2023 12:01:12 +0000 Subject: [PATCH] Let's keep the dirty code on its own. --- candle-kernels/src/cast.cu | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/candle-kernels/src/cast.cu b/candle-kernels/src/cast.cu index 03ca1ec7..ab2045a3 100644 --- a/candle-kernels/src/cast.cu +++ b/candle-kernels/src/cast.cu @@ -2,6 +2,29 @@ #include #define CAST_OP(SRC_TYPENAME, DST_TYPENAME, FN_NAME) \ +extern "C" __global__ void FN_NAME( \ + const size_t numel, \ + const size_t num_dims, \ + const size_t *info, \ + const SRC_TYPENAME *inp, \ + DST_TYPENAME *out \ +) { \ + const size_t *dims = info; \ + const size_t *strides = info + num_dims; \ + if (is_contiguous(num_dims, dims, strides)) { \ + for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \ + out[i] = inp[i]; \ + } \ + } \ + else { \ + for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \ + unsigned strided_i = get_strided_index(i, num_dims, dims, strides); \ + out[i] = inp[strided_i]; \ + } \ + } \ +} \ + +#define CAST_BF_OP(SRC_TYPENAME, DST_TYPENAME, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const size_t numel, \ const size_t num_dims, \ @@ -29,14 +52,14 @@ CAST_OP(__nv_bfloat16, __nv_bfloat16, cast_bf16_bf16) // CAST_OP(__nv_bfloat16, uint8_t, cast_bf16_u8) CAST_OP(__nv_bfloat16, uint32_t, cast_bf16_u32) -CAST_OP(__nv_bfloat16, __half, cast_bf16_f16) CAST_OP(__nv_bfloat16, float, cast_bf16_f32) CAST_OP(__nv_bfloat16, double, cast_bf16_f64) CAST_OP(uint8_t, __nv_bfloat16, cast_u8_bf16) CAST_OP(uint32_t, __nv_bfloat16, cast_u32_bf16) -CAST_OP(__half, __nv_bfloat16, cast_f16_bf16) CAST_OP(float, __nv_bfloat16, cast_f32_bf16) CAST_OP(double, __nv_bfloat16, cast_f64_bf16) +CAST_BF_OP(__nv_bfloat16, __half, cast_bf16_f16) +CAST_BF_OP(__half, __nv_bfloat16, cast_f16_bf16) #endif #if __CUDA_ARCH__ >= 530