mirror of
https://github.com/huggingface/candle.git
synced 2025-06-16 02:38:10 +00:00

* chore: update flash attention kernels * fmt * remove unused kernels * force f32 * correct stride
11 lines
378 B
Plaintext
11 lines
378 B
Plaintext
// Copyright (c) 2023, Tri Dao.
|
|
// Splitting the different head dimensions to different files to speed up compilation.
|
|
// This file is auto-generated. See "generate_kernels.py"
|
|
|
|
#include "flash_fwd_launch_template.h"
|
|
|
|
template<>
|
|
void run_mha_fwd_<cutlass::half_t, 256>(Flash_fwd_params ¶ms, cudaStream_t stream) {
|
|
run_mha_fwd_hdim256<cutlass::half_t>(params, stream);
|
|
}
|