mirror of
https://github.com/huggingface/candle.git
synced 2025-06-16 18:48:51 +00:00
Add flash attention (#241)
* Add some flash-attn kernel, import the code for flash-attn v2 from Dao-AILab. * More flash attn. * Set up the flash attn parameters. * Get things to compile locally. * Move the flash attention files in a different directory. * Build the static C library with nvcc. * Add more flash attention. * Update the build part. * Better caching. * Exclude flash attention from the default workspace. * Put flash-attn behind a feature gate. * Get the flash attn kernel to run. * Move the flags to a more appropriate place. * Enable flash attention in llama. * Use flash attention in llama.
This commit is contained in:
@ -116,6 +116,9 @@ struct Args {
|
||||
|
||||
#[arg(long)]
|
||||
v2: bool,
|
||||
|
||||
#[arg(long)]
|
||||
use_flash_attn: bool,
|
||||
}
|
||||
|
||||
fn main() -> Result<()> {
|
||||
@ -124,7 +127,7 @@ fn main() -> Result<()> {
|
||||
let args = Args::parse();
|
||||
|
||||
let device = candle_examples::device(args.cpu)?;
|
||||
let config = Config::config_7b();
|
||||
let config = Config::config_7b(args.use_flash_attn);
|
||||
let cache = model::Cache::new(!args.no_kv_cache, &config, &device)?;
|
||||
let dtype = if args.use_f32 { DType::F32 } else { DType::F16 };
|
||||
let (llama, tokenizer_filename) = match args.npy {
|
||||
|
Reference in New Issue
Block a user