Simplify the binary kernels.

This commit is contained in:
laurent
2023-06-22 13:16:03 +01:00
parent 4b1c3405e9
commit 83d6198009
5 changed files with 18 additions and 142 deletions

View File

@ -1,20 +1,8 @@
#include "binary_op_macros.cuh"
struct BinaryAddOp {};
#if __CUDA_ARCH__ >= 530
BINARY_OP(__half, badd_fwd_f16, badd_bwd_lhs_f16, badd_bwd_rhs_f16, BinaryAddOp,
x + y,
1.0,
1.0)
BINARY_OP(__half, badd_f16, x + y)
#endif
BINARY_OP(float, badd_fwd_f32, badd_bwd_lhs_f32, badd_bwd_rhs_f32, BinaryAddOp,
x + y,
1.0,
1.0)
BINARY_OP(double, badd_fwd_f64, badd_bwd_lhs_f64, badd_bwd_rhs_f64, BinaryAddOp,
x + y,
1.0,
1.0)
BINARY_OP(float, badd_f32, x + y)
BINARY_OP(double, badd_fwd_f64, x + y);

View File

@ -1,21 +1,8 @@
#include "binary_op_macros.cuh"
struct BinaryDivOp {};
#if __CUDA_ARCH__ >= 530
BINARY_OP(__half, bdiv_fwd_f16, bdiv_bwd_lhs_f16, bdiv_bwd_rhs_f16, BinaryDivOp,
x / y,
recipg(y),
-x / (y * y))
BINARY_OP(__half, bdiv_f16, x / y)
#endif
BINARY_OP(float, bdiv_fwd_f32, bdiv_bwd_lhs_f32, bdiv_bwd_rhs_f32, BinaryDivOp,
x / y,
recipg(y),
-x / (y * y))
BINARY_OP(double, bdiv_fwd_f64, bdiv_bwd_lhs_f64, bdiv_bwd_rhs_f64, BinaryDivOp,
x / y,
recipg(y),
-x / (y * y))
BINARY_OP(float, bdiv_f32, x / y)
BINARY_OP(double, bdiv_f64, x / y);

View File

@ -1,21 +1,8 @@
#include "binary_op_macros.cuh"
struct BinaryMulKernalOp {};
#if __CUDA_ARCH__ >= 530
BINARY_OP(__half, bmul_fwd_f16, bmul_bwd_lhs_f16, bmul_bwd_rhs_f16, BinaryMulKernalOp,
x * y,
y,
x)
BINARY_OP(__half, bmul_f16, x * y)
#endif
BINARY_OP(float, bmul_fwd_f32, bmul_bwd_lhs_f32, bmul_bwd_rhs_f32, BinaryMulKernalOp,
x * y,
y,
x)
BINARY_OP(double, bmul_fwd_f64, bmul_bwd_lhs_f64, bmul_bwd_rhs_f64, BinaryMulKernalOp,
x * y,
y,
x)
BINARY_OP(float, bmul_f32, x * y)
BINARY_OP(double, bmul_f64, x * y);

View File

@ -1,18 +1,16 @@
#include "cuda_utils.cuh"
#define LONG_BINARY_OP(TYPENAME, FORWARD, BACKWARD_LHS, BACKWARD_RHS, OP_STRUCT, FUNC, DFDX, DFDY) \
extern "C" __global__ void FORWARD( \
const OP_STRUCT op, \
#define BINARY_OP(TYPENAME, FN_NAME, FUNC) \
extern "C" __global__ void FN_NAME( \
const size_t numel, \
const size_t num_dims, \
const size_t *info, \
const size_t *dims, \
const size_t *lhs_strides, \
const size_t *rhs_strides, \
const TYPENAME *lhs, \
const TYPENAME *rhs, \
TYPENAME *out \
) { \
const size_t *dims = info; \
const size_t *lhs_strides = info + num_dims; \
const size_t *rhs_strides = info + 2 * num_dims; \
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \
unsigned int tmp_i = i; \
unsigned int lhs_i = 0; \
@ -25,77 +23,6 @@ extern "C" __global__ void FORWARD( \
} \
TYPENAME x = lhs ? lhs[lhs_i] : out[i]; \
TYPENAME y = rhs ? rhs[rhs_i] : out[i]; \
TYPENAME fx; \
FUNC\
out[i] = fx; \
out[i] = FUNC; \
} \
} \
\
extern "C" __global__ void BACKWARD_LHS( \
const OP_STRUCT op, \
const size_t numel, \
const size_t num_dims, \
const size_t *info, \
const TYPENAME *lhs, \
TYPENAME *grad_lhs, \
const size_t chunk_len, \
const TYPENAME *rhs, \
const TYPENAME *grad_out \
) { \
const size_t *dims = info + 0 * num_dims; \
const size_t *out_strides = info + 1 * num_dims; \
const size_t *rhs_strides = info + 2 * num_dims; \
TYPENAME zero = 0.0; \
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \
unsigned int tmp_i = i; \
unsigned int out_i = 0; \
unsigned int rhs_i = 0; \
for (int d = num_dims - 1; d >= 0; d--) { \
unsigned int i_dim = tmp_i % dims[d]; \
out_i += i_dim * out_strides[d]; \
rhs_i += i_dim * rhs_strides[d]; \
tmp_i /= dims[d]; \
} \
TYPENAME x = lhs ? lhs[i / chunk_len] : zero; \
TYPENAME y = rhs ? rhs[rhs_i] : zero; \
TYPENAME go = grad_out[out_i]; \
TYPENAME dfdx = (DFDX); \
chunk_sum(chunk_len, dfdx * go, grad_lhs); \
} \
} \
\
extern "C" __global__ void BACKWARD_RHS( \
const OP_STRUCT op, \
const size_t numel, \
const size_t num_dims, \
const size_t *info, \
const TYPENAME *lhs, \
const TYPENAME *rhs, \
TYPENAME *grad_rhs, \
const size_t chunk_len, \
const TYPENAME *grad_out \
) { \
const size_t *dims = info + 3 * num_dims; \
const size_t *out_strides = info + 4 * num_dims; \
const size_t *lhs_strides = info + 5 * num_dims; \
TYPENAME zero = 0.0; \
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \
unsigned int tmp_i = i; \
unsigned int lhs_i = 0; \
unsigned int out_i = 0; \
for (int d = num_dims - 1; d >= 0; d--) { \
unsigned int i_dim = tmp_i % dims[d]; \
lhs_i += i_dim * lhs_strides[d]; \
out_i += i_dim * out_strides[d]; \
tmp_i /= dims[d]; \
} \
TYPENAME x = lhs ? lhs[lhs_i] : zero; \
TYPENAME y = rhs ? rhs[i / chunk_len] : zero; \
TYPENAME go = grad_out[out_i]; \
TYPENAME dfdy = (DFDY); \
chunk_sum(chunk_len, dfdy * go, grad_rhs); \
} \
}
#define BINARY_OP(TYPENAME, FORWARD, BACKWARD_LHS, BACKWARD_RHS, OP_STRUCT, FUNC, DFDX, DFDY) \
LONG_BINARY_OP(TYPENAME, FORWARD, BACKWARD_LHS, BACKWARD_RHS, OP_STRUCT, fx = (FUNC);, DFDX, DFDY)

View File

@ -1,21 +1,8 @@
#include "binary_op_macros.cuh"
struct BinarySubKernelOp {};
#if __CUDA_ARCH__ >= 530
BINARY_OP(__half, bsub_fwd_f16, bsub_bwd_lhs_f16, bsub_bwd_rhs_f16, BinarySubKernelOp,
x - y,
1.0,
-1.0)
BINARY_OP(__half, bsub_f16, x - y)
#endif
BINARY_OP(float, bsub_fwd_f32, bsub_bwd_lhs_f32, bsub_bwd_rhs_f32, BinarySubKernelOp,
x - y,
1.0,
-1.0)
BINARY_OP(double, bsub_fwd_f64, bsub_bwd_lhs_f64, bsub_bwd_rhs_f64, BinarySubKernelOp,
x - y,
1.0,
-1.0)
BINARY_OP(float, bsub_f32, x - y)
BINARY_OP(double, bsub_f64, x - y);