diff --git a/candle-core/tests/grad_tests.rs b/candle-core/tests/grad_tests.rs index 612dffee..8632ec21 100644 --- a/candle-core/tests/grad_tests.rs +++ b/candle-core/tests/grad_tests.rs @@ -1,9 +1,9 @@ use anyhow::{Context, Result}; use candle::{Device, Shape, Tensor}; +mod test_utils; -#[test] -fn simple_grad() -> Result<()> { - let x = Tensor::var(&[3f32, 1., 4.], &Device::Cpu)?; +fn simple_grad(device: &Device) -> Result<()> { + let x = Tensor::var(&[3f32, 1., 4.], device)?; let y = (((&x * &x)? + &x * 5f64)? + 4f64)?; let grads = y.backward()?; let grad_x = grads.get(&x).context("no grad for x")?; @@ -15,12 +15,11 @@ fn simple_grad() -> Result<()> { Ok(()) } -#[test] -fn matmul_grad() -> Result<()> { +fn matmul_grad(device: &Device) -> Result<()> { let data: Vec<_> = (0..12).map(|i| i as f32).collect(); - let x = Tensor::var_from_slice(&data, (2, 2, 3), &Device::Cpu)?; + let x = Tensor::var_from_slice(&data, (2, 2, 3), device)?; let data: Vec<_> = (0..12).map(|i| i as f32).collect(); - let y = Tensor::var_from_slice(&data, (2, 3, 2), &Device::Cpu)?; + let y = Tensor::var_from_slice(&data, (2, 3, 2), device)?; let c = x.matmul(&y)?; let grads = c.backward()?; @@ -38,3 +37,6 @@ fn matmul_grad() -> Result<()> { ); Ok(()) } + +test_device!(simple_grad, simple_grad_cpu, simple_grad_gpu); +test_device!(matmul_grad, matmul_grad_cpu, matmul_grad_gpu); diff --git a/candle-core/tests/tensor_tests.rs b/candle-core/tests/tensor_tests.rs index 958c8d98..286c12e3 100644 --- a/candle-core/tests/tensor_tests.rs +++ b/candle-core/tests/tensor_tests.rs @@ -1,4 +1,3 @@ -// TODO: Also test the cuda backend. mod test_utils; use candle::{DType, Device, Result, Tensor}; use test_utils::to_vec3_round; diff --git a/candle-core/tests/test_utils.rs b/candle-core/tests/test_utils.rs index 1d6a7dfd..9667b3e8 100644 --- a/candle-core/tests/test_utils.rs +++ b/candle-core/tests/test_utils.rs @@ -1,3 +1,4 @@ +#![allow(dead_code)] use candle::{Result, Tensor}; #[macro_export]