mirror of
https://github.com/huggingface/candle.git
synced 2025-06-15 18:28:24 +00:00

* Again set a few extra params. * Use the appropriate kernel sizes. * Add all the kernel sizes. * Parallel compiling. * Reduce the amount of parallelism. * Add the missing kernel. * Fix a typo. * Remove bf16 support for now.
10 lines
326 B
Plaintext
10 lines
326 B
Plaintext
// Copyright (c) 2023, Tri Dao.
|
|
|
|
// Splitting the different head dimensions to different files to speed up compilation.
|
|
|
|
#include "flash_fwd_launch_template.h"
|
|
|
|
template<>
|
|
void run_mha_fwd_<cutlass::bfloat16_t, 32>(Flash_fwd_params ¶ms, cudaStream_t stream) {
|
|
run_mha_fwd_hdim32<cutlass::bfloat16_t>(params, stream);
|
|
} |