mirror of
https://github.com/huggingface/candle.git
synced 2025-06-19 11:56:45 +00:00
Fix the llama causal mask inversion.
This commit is contained in:
@ -289,7 +289,7 @@ impl CausalSelfAttention {
|
|||||||
let device = x.device();
|
let device = x.device();
|
||||||
// TODO: If we support bool or u8 tensors, this would be better.
|
// TODO: If we support bool or u8 tensors, this would be better.
|
||||||
let mask: Vec<_> = (0..t)
|
let mask: Vec<_> = (0..t)
|
||||||
.flat_map(|i| (0..t).map(move |j| u32::from(j <= i)))
|
.flat_map(|i| (0..t).map(move |j| u32::from(j > i)))
|
||||||
.collect();
|
.collect();
|
||||||
// Once lower_triangle is available, use the following:
|
// Once lower_triangle is available, use the following:
|
||||||
//let mask = Tensor::new(1u32, &device)?
|
//let mask = Tensor::new(1u32, &device)?
|
||||||
|
Reference in New Issue
Block a user