mirror of
https://github.com/huggingface/candle.git
synced 2025-06-16 10:38:54 +00:00
Softmax numerical stability. (#267)
* Softmax numerical stability. * Fix the flash-attn test.
This commit is contained in:
@ -1,5 +1,29 @@
|
||||
use candle::{Result, Tensor};
|
||||
|
||||
/// Applies the softmax function to the input tensor, rescaling the element so that elements on
|
||||
/// a slice of fixed index on dimension `dim` are between 0 and 1 and sum to 1.
|
||||
///
|
||||
/// ```rust
|
||||
/// use candle::{Tensor, Device};
|
||||
/// let a = Tensor::new(&[[0f32, 1., 0., 1.], [-2., 2., 3., -3.]], &Device::Cpu)?;
|
||||
/// let a = candle_nn::ops::softmax(&a, 1)?;
|
||||
/// assert_eq!(
|
||||
/// a.to_vec2::<f32>()?,
|
||||
/// &[
|
||||
/// [0.13447072, 0.3655293, 0.13447072, 0.3655293],
|
||||
/// [0.0048928666, 0.26714146, 0.7261658, 0.0017999851]
|
||||
/// ]);
|
||||
/// # Ok::<(), candle::Error>(())
|
||||
/// ```
|
||||
pub fn softmax<D: candle::shape::Dim>(xs: &Tensor, dim: D) -> Result<Tensor> {
|
||||
let dim = dim.to_index(xs.shape(), "softmax")?;
|
||||
let max = xs.max_keepdim(dim)?;
|
||||
let diff = xs.broadcast_sub(&max)?;
|
||||
let num = diff.exp()?;
|
||||
let den = num.sum_keepdim(dim)?;
|
||||
num.broadcast_div(&den)
|
||||
}
|
||||
|
||||
pub fn log_softmax<D: candle::shape::Dim>(xs: &Tensor, d: D) -> Result<Tensor> {
|
||||
let d = d.to_index(xs.shape(), "log-softmax")?;
|
||||
let max = xs.max_keepdim(d)?;
|
||||
|
62
candle-nn/tests/ops.rs
Normal file
62
candle-nn/tests/ops.rs
Normal file
@ -0,0 +1,62 @@
|
||||
use candle::{Device, Result, Tensor};
|
||||
|
||||
pub fn to_vec3_round(t: Tensor, digits: i32) -> Result<Vec<Vec<Vec<f32>>>> {
|
||||
let b = 10f32.powi(digits);
|
||||
let t = t.to_vec3::<f32>()?;
|
||||
let t = t
|
||||
.iter()
|
||||
.map(|t| {
|
||||
t.iter()
|
||||
.map(|t| t.iter().map(|t| f32::round(t * b) / b).collect())
|
||||
.collect()
|
||||
})
|
||||
.collect();
|
||||
Ok(t)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn softmax() -> Result<()> {
|
||||
let device = &Device::Cpu;
|
||||
let data = &[[[3f32, 1., 4.], [1., 5., 9.]], [[2., 1., 7.], [8., 2., 8.]]];
|
||||
let tensor = Tensor::new(data, device)?;
|
||||
let t0 = candle_nn::ops::softmax(&tensor.log()?, 0)?;
|
||||
let t1 = candle_nn::ops::softmax(&tensor.log()?, 1)?;
|
||||
let t2 = candle_nn::ops::softmax(&tensor.log()?, 2)?;
|
||||
assert_eq!(
|
||||
to_vec3_round(t0, 4)?,
|
||||
&[
|
||||
// 3/5, 1/2, 4/11
|
||||
[[0.6, 0.5, 0.3636], [0.1111, 0.7143, 0.5294]],
|
||||
// 2/5, 1/2, 7/11
|
||||
[[0.4, 0.5, 0.6364], [0.8889, 0.2857, 0.4706]]
|
||||
]
|
||||
);
|
||||
assert_eq!(
|
||||
to_vec3_round(t1, 4)?,
|
||||
&[
|
||||
// 3/4, 1/6, 4/13
|
||||
[[0.75, 0.1667, 0.3077], [0.25, 0.8333, 0.6923]],
|
||||
// 2/10, 1/3, 7/15
|
||||
[[0.2, 0.3333, 0.4667], [0.8, 0.6667, 0.5333]]
|
||||
]
|
||||
);
|
||||
assert_eq!(
|
||||
to_vec3_round(t2, 4)?,
|
||||
&[
|
||||
// (3, 1, 4) / 8, (1, 5, 9) / 15
|
||||
[[0.375, 0.125, 0.5], [0.0667, 0.3333, 0.6]],
|
||||
// (2, 1, 7) / 10, (8, 2, 8) / 18
|
||||
[[0.2, 0.1, 0.7], [0.4444, 0.1111, 0.4444]]
|
||||
]
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn softmax_numerical_stability() -> Result<()> {
|
||||
let dev = &Device::Cpu;
|
||||
let xs = Tensor::new(&[1234f32, 0.], dev)?;
|
||||
let softmax = candle_nn::ops::softmax(&xs, 0)?;
|
||||
assert_eq!(softmax.to_vec1::<f32>()?, &[1f32, 0.]);
|
||||
Ok(())
|
||||
}
|
Reference in New Issue
Block a user