mirror of
https://github.com/huggingface/candle.git
synced 2025-06-16 02:38:10 +00:00

* Again set a few extra params. * Use the appropriate kernel sizes. * Add all the kernel sizes. * Parallel compiling. * Reduce the amount of parallelism. * Add the missing kernel. * Fix a typo. * Remove bf16 support for now.
23 lines
1.3 KiB
Plaintext
23 lines
1.3 KiB
Plaintext
// Copyright (c) 2023, Tri Dao.
|
|
|
|
// Splitting the different head dimensions to different files to speed up compilation.
|
|
|
|
#include "flash_fwd_launch_template.h"
|
|
|
|
// template<>
|
|
// void run_mha_fwd_<cutlass::half_t, 96>(Flash_fwd_params ¶ms, cudaStream_t stream) {
|
|
// using elem_type = cutlass::half_t;
|
|
// BOOL_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] {
|
|
// run_flash_fwd<Flash_fwd_kernel_traits<96, 128, 64, 4, true, false, elem_type>, Is_dropout>(params, stream);
|
|
// run_flash_fwd<Flash_fwd_kernel_traits<96, 128, 64, 4, true, true, elem_type>, Is_dropout>(params, stream);
|
|
// // This 3rd one is good for H100, and A100, A6000
|
|
// run_flash_fwd<Flash_fwd_kernel_traits<96, 128, 64, 4, false, false, elem_type>, Is_dropout>(params, stream);
|
|
// run_flash_fwd<Flash_fwd_kernel_traits<96, 128, 64, 4, false, true, elem_type>, Is_dropout>(params, stream);
|
|
// // These two are always slower
|
|
// // run_flash_fwd<Flash_fwd_kernel_traits<96, 128, 128, 4, true, elem_type>>(params, stream);
|
|
// // run_flash_fwd<Flash_fwd_kernel_traits<96, 64, 128, 4, true, elem_type>>(params, stream);
|
|
// });
|
|
// }
|
|
template<> void run_mha_fwd_<cutlass::half_t, 96>(Flash_fwd_params ¶ms, cudaStream_t stream) {
|
|
run_mha_fwd_hdim96<cutlass::half_t>(params, stream);
|
|
} |