mirror of
https://github.com/huggingface/candle.git
synced 2025-06-16 18:48:51 +00:00

* Add some flash-attn kernel, import the code for flash-attn v2 from Dao-AILab. * More flash attn. * Set up the flash attn parameters. * Get things to compile locally. * Move the flash attention files in a different directory. * Build the static C library with nvcc. * Add more flash attention. * Update the build part. * Better caching. * Exclude flash attention from the default workspace. * Put flash-attn behind a feature gate. * Get the flash attn kernel to run. * Move the flags to a more appropriate place. * Enable flash attention in llama. * Use flash attention in llama.
67 lines
2.5 KiB
C++
67 lines
2.5 KiB
C++
// Inspired by
|
|
// https://github.com/NVIDIA/DALI/blob/main/include/dali/core/static_switch.h
|
|
// and https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/Dispatch.h
|
|
|
|
#pragma once
|
|
|
|
/// @param COND - a boolean expression to switch by
|
|
/// @param CONST_NAME - a name given for the constexpr bool variable.
|
|
/// @param ... - code to execute for true and false
|
|
///
|
|
/// Usage:
|
|
/// ```
|
|
/// BOOL_SWITCH(flag, BoolConst, [&] {
|
|
/// some_function<BoolConst>(...);
|
|
/// });
|
|
/// ```
|
|
#define BOOL_SWITCH(COND, CONST_NAME, ...) \
|
|
[&] { \
|
|
if (COND) { \
|
|
constexpr static bool CONST_NAME = true; \
|
|
return __VA_ARGS__(); \
|
|
} else { \
|
|
constexpr static bool CONST_NAME = false; \
|
|
return __VA_ARGS__(); \
|
|
} \
|
|
}()
|
|
|
|
#define FP16_SWITCH(COND, ...) \
|
|
[&] { \
|
|
if (COND) { \
|
|
using elem_type = cutlass::half_t; \
|
|
return __VA_ARGS__(); \
|
|
} else { \
|
|
using elem_type = cutlass::bfloat16_t; \
|
|
return __VA_ARGS__(); \
|
|
} \
|
|
}()
|
|
|
|
#define FWD_HEADDIM_SWITCH(HEADDIM, ...) \
|
|
[&] { \
|
|
if (HEADDIM <= 32) { \
|
|
constexpr static int kHeadDim = 32; \
|
|
return __VA_ARGS__(); \
|
|
} else if (HEADDIM <= 64) { \
|
|
constexpr static int kHeadDim = 64; \
|
|
return __VA_ARGS__(); \
|
|
} else if (HEADDIM <= 96) { \
|
|
constexpr static int kHeadDim = 96; \
|
|
return __VA_ARGS__(); \
|
|
} else if (HEADDIM <= 128) { \
|
|
constexpr static int kHeadDim = 128; \
|
|
return __VA_ARGS__(); \
|
|
} else if (HEADDIM <= 160) { \
|
|
constexpr static int kHeadDim = 160; \
|
|
return __VA_ARGS__(); \
|
|
} else if (HEADDIM <= 192) { \
|
|
constexpr static int kHeadDim = 192; \
|
|
return __VA_ARGS__(); \
|
|
} else if (HEADDIM <= 224) { \
|
|
constexpr static int kHeadDim = 224; \
|
|
return __VA_ARGS__(); \
|
|
} else if (HEADDIM <= 256) { \
|
|
constexpr static int kHeadDim = 256; \
|
|
return __VA_ARGS__(); \
|
|
} \
|
|
}()
|