Move the test-utils bits to a shared place. (#619)

This commit is contained in:
Laurent Mazare
2023-08-27 09:42:22 +01:00
committed by GitHub
parent a8b39dd7b7
commit 5320aa6b7d
17 changed files with 34 additions and 88 deletions

View File

@ -1,6 +1,5 @@
mod test_utils;
use anyhow::Result;
use candle_core::{Device, Tensor};
use candle_core::{test_device, test_utils, Device, Tensor};
/* This test is based on the following script.
import torch

View File

@ -1,10 +1,8 @@
use candle_core::backend::BackendStorage;
use candle_core::cpu_backend;
use candle_core::test_utils::to_vec1_round;
use candle_core::{CpuStorage, CustomOp1, DType, Device, Error, Layout, Result, Shape, Tensor};
mod test_utils;
use test_utils::to_vec1_round;
fn fwd<T: num_traits::Float>(v: T, alpha: f64) -> T {
if v.is_sign_positive() {
v

View File

@ -1,6 +1,5 @@
use anyhow::{Context, Result};
use candle_core::{Device, Shape, Tensor, Var};
mod test_utils;
use candle_core::{test_device, test_utils, Device, Shape, Tensor, Var};
fn simple_grad(device: &Device) -> Result<()> {
let x = Var::new(&[3f32, 1., 4.], device)?;

View File

@ -1,8 +1,6 @@
use anyhow::Result;
use candle_core::{Device, IndexOp, Tensor};
mod test_utils;
#[test]
fn integer_index() -> Result<()> {
let dev = Device::Cpu;

View File

@ -1,5 +1,4 @@
mod test_utils;
use candle::{Device, IndexOp, Result, Tensor};
use candle::{test_device, Device, IndexOp, Result, Tensor};
use candle_core as candle;
fn contiguous(device: &Device) -> Result<()> {

View File

@ -1,5 +1,4 @@
mod test_utils;
use candle_core::{Device, IndexOp, Result, Tensor};
use candle_core::{test_device, test_utils, Device, IndexOp, Result, Tensor};
// https://github.com/huggingface/candle/issues/364
fn avg_pool2d(dev: &Device) -> Result<()> {
@ -56,14 +55,17 @@ fn avg_pool2d_pytorch(dev: &Device) -> Result<()> {
.reshape((1, 2, 4, 4))?;
let pool = t.avg_pool2d((2, 2), (2, 2))?.squeeze(0)?;
assert_eq!(
test_utils::to_vec3_round(pool, 4)?,
test_utils::to_vec3_round(&pool, 4)?,
[
[[-1.1926, -0.0395], [0.2688, 0.1871]],
[[0.1835, -0.1606], [0.6249, 0.3217]]
]
);
let pool = t.avg_pool2d((3, 3), (3, 3))?.squeeze(0)?;
assert_eq!(test_utils::to_vec3_round(pool, 4)?, [[[0.085]], [[0.0078]]]);
assert_eq!(
test_utils::to_vec3_round(&pool, 4)?,
[[[0.085]], [[0.0078]]]
);
let t = t.reshape((1, 1, 4, 8))?;
let pool = t.avg_pool2d((2, 2), (2, 2))?.squeeze(0)?.squeeze(0)?;

View File

@ -1,11 +1,10 @@
use candle_core::{
quantized::{self, GgmlDType},
test_utils::to_vec2_round,
Device, Result, Tensor,
};
use quantized::{k_quants, GgmlType};
mod test_utils;
use rand::prelude::*;
use test_utils::to_vec2_round;
const GGML_TEST_SIZE: usize = 32 * 128;

View File

@ -1,5 +1,4 @@
mod test_utils;
use candle_core::{DType, Device, IndexOp, Result, Tensor};
use candle_core::{test_device, DType, Device, IndexOp, Result, Tensor};
fn zeros(device: &Device) -> Result<()> {
let tensor = Tensor::zeros((5, 2), DType::F32, device)?;

View File

@ -1,55 +0,0 @@
#![allow(dead_code)]
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use candle_core::{Result, Tensor};
#[macro_export]
macro_rules! test_device {
// TODO: Switch to generating the two last arguments automatically once concat_idents is
// stable. https://github.com/rust-lang/rust/issues/29599
($fn_name: ident, $test_cpu: ident, $test_cuda: ident) => {
#[test]
fn $test_cpu() -> Result<()> {
$fn_name(&Device::Cpu)
}
#[cfg(feature = "cuda")]
#[test]
fn $test_cuda() -> Result<()> {
$fn_name(&Device::new_cuda(0)?)
}
};
}
pub fn to_vec1_round(t: &Tensor, digits: i32) -> Result<Vec<f32>> {
let b = 10f32.powi(digits);
let t = t.to_vec1::<f32>()?;
let t = t.iter().map(|t| f32::round(t * b) / b).collect();
Ok(t)
}
pub fn to_vec2_round(t: &Tensor, digits: i32) -> Result<Vec<Vec<f32>>> {
let b = 10f32.powi(digits);
let t = t.to_vec2::<f32>()?;
let t = t
.iter()
.map(|t| t.iter().map(|t| f32::round(t * b) / b).collect())
.collect();
Ok(t)
}
pub fn to_vec3_round(t: Tensor, digits: i32) -> Result<Vec<Vec<Vec<f32>>>> {
let b = 10f32.powi(digits);
let t = t.to_vec3::<f32>()?;
let t = t
.iter()
.map(|t| {
t.iter()
.map(|t| t.iter().map(|t| f32::round(t * b) / b).collect())
.collect()
})
.collect();
Ok(t)
}