Optimize Tensor::new when called on nested Vec<..>. (#2927)

* Optimize Tensor::new when called on nested Vec<..>.

* Improve performance.

* Similar flattening for the 4d case.

* More tweaks.

* Add some dummy test.
This commit is contained in:
Laurent Mazare
2025-04-28 09:19:45 +02:00
committed by GitHub
parent e3db30021f
commit e98754fc5a
5 changed files with 174 additions and 7 deletions

View File

@ -1811,3 +1811,26 @@ fn test_flip_3d_channels() -> Result<()> {
candle_core::test_utils::assert_tensor_eq(&flipped, &expected)?;
Ok(())
}
#[test]
fn tensor_new() -> Result<()> {
let t1 = Tensor::new(vec![1f32, 2.0, 3.0], &Device::Cpu)?;
assert_eq!(t1.to_vec1::<f32>()?, [1.0, 2.0, 3.0]);
let t2 = Tensor::new(vec![vec![1f32, 2., 3.], vec![4., 5., 6.]], &Device::Cpu)?;
assert_eq!(t2.to_vec2::<f32>()?, [[1., 2., 3.], [4., 5., 6.]]);
let t3 = Tensor::new(
vec![
vec![vec![1f32, 2., 3.], vec![4., 5., 6.]],
vec![vec![3f32, 1., 4.], vec![1., 5., 9.]],
],
&Device::Cpu,
)?;
assert_eq!(
t3.to_vec3::<f32>()?,
[
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
[[3.0, 1.0, 4.0], [1.0, 5.0, 9.0]]
]
);
Ok(())
}