diff --git a/candle-core/src/lib.rs b/candle-core/src/lib.rs index ddd446ee..fa85f6e0 100644 --- a/candle-core/src/lib.rs +++ b/candle-core/src/lib.rs @@ -63,6 +63,7 @@ pub mod shape; mod storage; mod strided_index; mod tensor; +pub mod test_utils; pub mod utils; mod variable; diff --git a/candle-core/tests/test_utils.rs b/candle-core/src/test_utils.rs similarity index 82% rename from candle-core/tests/test_utils.rs rename to candle-core/src/test_utils.rs index 327e88c6..8ff73fc0 100644 --- a/candle-core/tests/test_utils.rs +++ b/candle-core/src/test_utils.rs @@ -1,9 +1,4 @@ -#![allow(dead_code)] - -#[cfg(feature = "accelerate")] -extern crate accelerate_src; - -use candle_core::{Result, Tensor}; +use crate::{Result, Tensor}; #[macro_export] macro_rules! test_device { @@ -23,6 +18,12 @@ macro_rules! test_device { }; } +pub fn to_vec0_round(t: &Tensor, digits: i32) -> Result { + let b = 10f32.powi(digits); + let t = t.to_vec0::()?; + Ok(f32::round(t * b) / b) +} + pub fn to_vec1_round(t: &Tensor, digits: i32) -> Result> { let b = 10f32.powi(digits); let t = t.to_vec1::()?; @@ -40,7 +41,7 @@ pub fn to_vec2_round(t: &Tensor, digits: i32) -> Result>> { Ok(t) } -pub fn to_vec3_round(t: Tensor, digits: i32) -> Result>>> { +pub fn to_vec3_round(t: &Tensor, digits: i32) -> Result>>> { let b = 10f32.powi(digits); let t = t.to_vec3::()?; let t = t diff --git a/candle-core/tests/conv_tests.rs b/candle-core/tests/conv_tests.rs index 7a4c2956..2fdd4c74 100644 --- a/candle-core/tests/conv_tests.rs +++ b/candle-core/tests/conv_tests.rs @@ -1,6 +1,5 @@ -mod test_utils; use anyhow::Result; -use candle_core::{Device, Tensor}; +use candle_core::{test_device, test_utils, Device, Tensor}; /* This test is based on the following script. import torch diff --git a/candle-core/tests/custom_op_tests.rs b/candle-core/tests/custom_op_tests.rs index 7ec04c6a..cff0aebe 100644 --- a/candle-core/tests/custom_op_tests.rs +++ b/candle-core/tests/custom_op_tests.rs @@ -1,10 +1,8 @@ use candle_core::backend::BackendStorage; use candle_core::cpu_backend; +use candle_core::test_utils::to_vec1_round; use candle_core::{CpuStorage, CustomOp1, DType, Device, Error, Layout, Result, Shape, Tensor}; -mod test_utils; -use test_utils::to_vec1_round; - fn fwd(v: T, alpha: f64) -> T { if v.is_sign_positive() { v diff --git a/candle-core/tests/grad_tests.rs b/candle-core/tests/grad_tests.rs index c44a7ea7..81b1338a 100644 --- a/candle-core/tests/grad_tests.rs +++ b/candle-core/tests/grad_tests.rs @@ -1,6 +1,5 @@ use anyhow::{Context, Result}; -use candle_core::{Device, Shape, Tensor, Var}; -mod test_utils; +use candle_core::{test_device, test_utils, Device, Shape, Tensor, Var}; fn simple_grad(device: &Device) -> Result<()> { let x = Var::new(&[3f32, 1., 4.], device)?; diff --git a/candle-core/tests/indexing_tests.rs b/candle-core/tests/indexing_tests.rs index 1a77b32d..9c88f319 100644 --- a/candle-core/tests/indexing_tests.rs +++ b/candle-core/tests/indexing_tests.rs @@ -1,8 +1,6 @@ use anyhow::Result; use candle_core::{Device, IndexOp, Tensor}; -mod test_utils; - #[test] fn integer_index() -> Result<()> { let dev = Device::Cpu; diff --git a/candle-core/tests/layout_tests.rs b/candle-core/tests/layout_tests.rs index d7b89605..1b29476f 100644 --- a/candle-core/tests/layout_tests.rs +++ b/candle-core/tests/layout_tests.rs @@ -1,5 +1,4 @@ -mod test_utils; -use candle::{Device, IndexOp, Result, Tensor}; +use candle::{test_device, Device, IndexOp, Result, Tensor}; use candle_core as candle; fn contiguous(device: &Device) -> Result<()> { diff --git a/candle-core/tests/pool_tests.rs b/candle-core/tests/pool_tests.rs index bfdbc40d..b8c007b8 100644 --- a/candle-core/tests/pool_tests.rs +++ b/candle-core/tests/pool_tests.rs @@ -1,5 +1,4 @@ -mod test_utils; -use candle_core::{Device, IndexOp, Result, Tensor}; +use candle_core::{test_device, test_utils, Device, IndexOp, Result, Tensor}; // https://github.com/huggingface/candle/issues/364 fn avg_pool2d(dev: &Device) -> Result<()> { @@ -56,14 +55,17 @@ fn avg_pool2d_pytorch(dev: &Device) -> Result<()> { .reshape((1, 2, 4, 4))?; let pool = t.avg_pool2d((2, 2), (2, 2))?.squeeze(0)?; assert_eq!( - test_utils::to_vec3_round(pool, 4)?, + test_utils::to_vec3_round(&pool, 4)?, [ [[-1.1926, -0.0395], [0.2688, 0.1871]], [[0.1835, -0.1606], [0.6249, 0.3217]] ] ); let pool = t.avg_pool2d((3, 3), (3, 3))?.squeeze(0)?; - assert_eq!(test_utils::to_vec3_round(pool, 4)?, [[[0.085]], [[0.0078]]]); + assert_eq!( + test_utils::to_vec3_round(&pool, 4)?, + [[[0.085]], [[0.0078]]] + ); let t = t.reshape((1, 1, 4, 8))?; let pool = t.avg_pool2d((2, 2), (2, 2))?.squeeze(0)?.squeeze(0)?; diff --git a/candle-core/tests/quantized_tests.rs b/candle-core/tests/quantized_tests.rs index d178b89b..4fd5e90c 100644 --- a/candle-core/tests/quantized_tests.rs +++ b/candle-core/tests/quantized_tests.rs @@ -1,11 +1,10 @@ use candle_core::{ quantized::{self, GgmlDType}, + test_utils::to_vec2_round, Device, Result, Tensor, }; use quantized::{k_quants, GgmlType}; -mod test_utils; use rand::prelude::*; -use test_utils::to_vec2_round; const GGML_TEST_SIZE: usize = 32 * 128; diff --git a/candle-core/tests/tensor_tests.rs b/candle-core/tests/tensor_tests.rs index 1038efdd..6af43196 100644 --- a/candle-core/tests/tensor_tests.rs +++ b/candle-core/tests/tensor_tests.rs @@ -1,5 +1,4 @@ -mod test_utils; -use candle_core::{DType, Device, IndexOp, Result, Tensor}; +use candle_core::{test_device, DType, Device, IndexOp, Result, Tensor}; fn zeros(device: &Device) -> Result<()> { let tensor = Tensor::zeros((5, 2), DType::F32, device)?; diff --git a/candle-nn/tests/batch_norm.rs b/candle-nn/tests/batch_norm.rs index 7a3cfc18..209fc10a 100644 --- a/candle-nn/tests/batch_norm.rs +++ b/candle-nn/tests/batch_norm.rs @@ -4,10 +4,8 @@ extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; -mod test_utils; - use anyhow::Result; -use candle::{DType, Device, Tensor}; +use candle::{test_utils, DType, Device, Tensor}; use candle_nn::BatchNorm; /* The test below has been generated using the following PyTorch code: diff --git a/candle-nn/tests/group_norm.rs b/candle-nn/tests/group_norm.rs index eff66d17..8145a220 100644 --- a/candle-nn/tests/group_norm.rs +++ b/candle-nn/tests/group_norm.rs @@ -25,10 +25,9 @@ extern crate intel_mkl_src; extern crate accelerate_src; use anyhow::Result; +use candle::test_utils::to_vec3_round; use candle::{Device, Tensor}; use candle_nn::{GroupNorm, Module}; -mod test_utils; -use test_utils::to_vec3_round; #[test] fn group_norm() -> Result<()> { @@ -60,7 +59,7 @@ fn group_norm() -> Result<()> { device, )?; assert_eq!( - to_vec3_round(gn2.forward(&input)?, 4)?, + to_vec3_round(&gn2.forward(&input)?, 4)?, &[ [ [-0.1653, 0.3748, -0.7866], @@ -81,7 +80,7 @@ fn group_norm() -> Result<()> { ] ); assert_eq!( - to_vec3_round(gn3.forward(&input)?, 4)?, + to_vec3_round(&gn3.forward(&input)?, 4)?, &[ [ [0.4560, 1.4014, -0.6313], diff --git a/candle-nn/tests/layer_norm.rs b/candle-nn/tests/layer_norm.rs index 0f43d804..f81c29bd 100644 --- a/candle-nn/tests/layer_norm.rs +++ b/candle-nn/tests/layer_norm.rs @@ -5,11 +5,9 @@ extern crate intel_mkl_src; extern crate accelerate_src; use anyhow::Result; -use candle::{Device, Tensor}; +use candle::{test_utils, Device, Tensor}; use candle_nn::{LayerNorm, Module}; -mod test_utils; - #[test] fn layer_norm() -> Result<()> { let device = &Device::Cpu; @@ -28,7 +26,7 @@ fn layer_norm() -> Result<()> { let inp = Tensor::new(&[[[1f32, 2., 3.], [4., 5., 6.], [9., 8., 7.]]], device)?; let res = ln.forward(&inp)?; assert_eq!( - test_utils::to_vec3_round(res.clone(), 4)?, + test_utils::to_vec3_round(&res, 4)?, [[ [-3.1742, 0.5, 4.1742], [-3.1742, 0.5, 4.1742], @@ -41,7 +39,7 @@ fn layer_norm() -> Result<()> { let std = (res.broadcast_sub(&mean)?.sqr()?.sum_keepdim(2)?.sqrt()? / 3.0)?; // The standard deviation should be sqrt(`w`). assert_eq!( - test_utils::to_vec3_round(std, 4)?, + test_utils::to_vec3_round(&std, 4)?, [[[1.7321], [1.7321], [1.7321]]] ); Ok(()) diff --git a/candle-nn/tests/loss.rs b/candle-nn/tests/loss.rs index c075c7fb..d772f176 100644 --- a/candle-nn/tests/loss.rs +++ b/candle-nn/tests/loss.rs @@ -4,9 +4,8 @@ extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; +use candle::test_utils::to_vec0_round; use candle::{Device, Result, Tensor}; -mod test_utils; -use test_utils::to_vec0_round; /* Equivalent python code: import torch diff --git a/candle-nn/tests/ops.rs b/candle-nn/tests/ops.rs index fcf39fd8..4ba8cfcc 100644 --- a/candle-nn/tests/ops.rs +++ b/candle-nn/tests/ops.rs @@ -4,10 +4,7 @@ extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; -mod test_utils; -use test_utils::to_vec3_round; - -use candle::{Device, Result, Tensor}; +use candle::{test_utils::to_vec3_round, Device, Result, Tensor}; #[test] fn softmax() -> Result<()> { @@ -18,7 +15,7 @@ fn softmax() -> Result<()> { let t1 = candle_nn::ops::softmax(&tensor.log()?, 1)?; let t2 = candle_nn::ops::softmax(&tensor.log()?, 2)?; assert_eq!( - to_vec3_round(t0, 4)?, + to_vec3_round(&t0, 4)?, &[ // 3/5, 1/2, 4/11 [[0.6, 0.5, 0.3636], [0.1111, 0.7143, 0.5294]], @@ -27,7 +24,7 @@ fn softmax() -> Result<()> { ] ); assert_eq!( - to_vec3_round(t1, 4)?, + to_vec3_round(&t1, 4)?, &[ // 3/4, 1/6, 4/13 [[0.75, 0.1667, 0.3077], [0.25, 0.8333, 0.6923]], @@ -36,7 +33,7 @@ fn softmax() -> Result<()> { ] ); assert_eq!( - to_vec3_round(t2, 4)?, + to_vec3_round(&t2, 4)?, &[ // (3, 1, 4) / 8, (1, 5, 9) / 15 [[0.375, 0.125, 0.5], [0.0667, 0.3333, 0.6]], diff --git a/candle-nn/tests/optim.rs b/candle-nn/tests/optim.rs index f1d3b3f5..673d0455 100644 --- a/candle-nn/tests/optim.rs +++ b/candle-nn/tests/optim.rs @@ -4,8 +4,7 @@ extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; -mod test_utils; -use test_utils::{to_vec0_round, to_vec2_round}; +use candle::test_utils::{to_vec0_round, to_vec2_round}; use anyhow::Result; use candle::{Device, Tensor, Var}; diff --git a/candle-nn/tests/test_utils.rs b/candle-nn/tests/test_utils.rs deleted file mode 100644 index bb422cd9..00000000 --- a/candle-nn/tests/test_utils.rs +++ /dev/null @@ -1,39 +0,0 @@ -#![allow(dead_code)] -use candle::{Result, Tensor}; - -pub fn to_vec0_round(t: &Tensor, digits: i32) -> Result { - let b = 10f32.powi(digits); - let t = t.to_vec0::()?; - Ok(f32::round(t * b) / b) -} - -pub fn to_vec1_round(t: &Tensor, digits: i32) -> Result> { - let b = 10f32.powi(digits); - let t = t.to_vec1::()?; - let t = t.iter().map(|t| f32::round(t * b) / b).collect(); - Ok(t) -} - -pub fn to_vec2_round(t: &Tensor, digits: i32) -> Result>> { - let b = 10f32.powi(digits); - let t = t.to_vec2::()?; - let t = t - .iter() - .map(|t| t.iter().map(|t| f32::round(t * b) / b).collect()) - .collect(); - Ok(t) -} - -pub fn to_vec3_round(t: Tensor, digits: i32) -> Result>>> { - let b = 10f32.powi(digits); - let t = t.to_vec3::()?; - let t = t - .iter() - .map(|t| { - t.iter() - .map(|t| t.iter().map(|t| f32::round(t * b) / b).collect()) - .collect() - }) - .collect(); - Ok(t) -}