Add flash attention (#241)

* Add some flash-attn kernel, import the code for flash-attn v2 from Dao-AILab.

* More flash attn.

* Set up the flash attn parameters.

* Get things to compile locally.

* Move the flash attention files in a different directory.

* Build the static C library with nvcc.

* Add more flash attention.

* Update the build part.

* Better caching.

* Exclude flash attention from the default workspace.

* Put flash-attn behind a feature gate.

* Get the flash attn kernel to run.

* Move the flags to a more appropriate place.

* Enable flash attention in llama.

* Use flash attention in llama.
This commit is contained in:
Laurent Mazare
2023-07-26 07:48:10 +01:00
committed by GitHub
parent c97d51243c
commit d9f9c859af
22 changed files with 2699 additions and 9 deletions

View File

@ -116,6 +116,9 @@ struct Args {
#[arg(long)]
v2: bool,
#[arg(long)]
use_flash_attn: bool,
}
fn main() -> Result<()> {
@ -124,7 +127,7 @@ fn main() -> Result<()> {
let args = Args::parse();
let device = candle_examples::device(args.cpu)?;
let config = Config::config_7b();
let config = Config::config_7b(args.use_flash_attn);
let cache = model::Cache::new(!args.no_kv_cache, &config, &device)?;
let dtype = if args.use_f32 { DType::F32 } else { DType::F16 };
let (llama, tokenizer_filename) = match args.npy {

View File

@ -13,10 +13,11 @@ pub struct Config {
pub n_head: usize,
pub n_embd: usize,
pub n_key_value_head: usize,
pub use_flash_attn: bool,
}
impl Config {
pub fn config_7b() -> Self {
pub fn config_7b(use_flash_attn: bool) -> Self {
Self {
hidden_size: 4096,
intermediate_size: 11008,
@ -25,6 +26,7 @@ impl Config {
n_head: 32,
n_embd: 4096,
n_key_value_head: 32,
use_flash_attn,
}
}
}
@ -140,6 +142,17 @@ struct CausalSelfAttention {
n_key_value_head: usize,
head_dim: usize,
cache: Cache,
use_flash_attn: bool,
}
#[cfg(feature = "flash-attn")]
fn flash_attn(q: &Tensor, k: &Tensor, v: &Tensor) -> Result<Tensor> {
q.custom_op3(k, v, candle_flash_attn::FlashHdim32Sm80)
}
#[cfg(not(feature = "flash-attn"))]
fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor) -> Result<Tensor> {
unimplemented!("compile with '--features flash-attn'")
}
impl CausalSelfAttention {
@ -202,12 +215,17 @@ impl CausalSelfAttention {
let k = self.repeat_kv(k)?;
let v = self.repeat_kv(v)?;
let att = (q.matmul(&k.t()?)? / (self.head_dim as f64).sqrt())?;
let mask = self.cache.mask(seq_len)?.broadcast_as(att.shape())?;
let att = masked_fill(&att, &mask, f32::NEG_INFINITY)?;
let att = att.softmax(D::Minus1)?;
// Convert to contiguous as matmul doesn't support strided vs for now.
let y = att.matmul(&v.contiguous()?)?;
let y = if self.use_flash_attn {
flash_attn(&q, &k, &v)?
} else {
let att = (q.matmul(&k.t()?)? / (self.head_dim as f64).sqrt())?;
let mask = self.cache.mask(seq_len)?.broadcast_as(att.shape())?;
let att = masked_fill(&att, &mask, f32::NEG_INFINITY)?;
let att = att.softmax(D::Minus1)?;
// Convert to contiguous as matmul doesn't support strided vs for now.
att.matmul(&v.contiguous()?)?
};
let y = y.transpose(1, 2)?.reshape(&[b_sz, seq_len, n_embd])?;
let y = y.to_dtype(x_dtype)?;
let y = self.o_proj.forward(&y)?;
@ -245,6 +263,7 @@ impl CausalSelfAttention {
n_key_value_head: cfg.n_key_value_head,
head_dim: cfg.hidden_size / cfg.n_head,
cache: cache.clone(),
use_flash_attn: cfg.use_flash_attn,
})
}
}