diff --git a/examples/basics.rs b/examples/basics.rs index f01f7871..d5d4f72b 100644 --- a/examples/basics.rs +++ b/examples/basics.rs @@ -2,7 +2,7 @@ use anyhow::Result; use candle::{Device, Tensor}; fn main() -> Result<()> { - let x = Tensor::var(&[3f32, 1., 4.], Device::Cpu)?; + let x = Tensor::var(&[3f32, 1., 4.], &Device::Cpu)?; let y = (((&x * &x)? + &x * 5f64)? + 4f64)?; println!("{:?}", y.to_vec1::()?); Ok(()) diff --git a/examples/cuda_basics.rs b/examples/cuda_basics.rs index e1dca6a9..a9647469 100644 --- a/examples/cuda_basics.rs +++ b/examples/cuda_basics.rs @@ -1,9 +1,12 @@ use anyhow::Result; -use candle::{DType, Device, Tensor}; +use candle::{Device, Tensor}; fn main() -> Result<()> { let device = Device::new_cuda(0)?; - let x = Tensor::zeros(4, DType::F32, device)?; + let x = Tensor::new(&[3f32, 1., 4., 1., 5.], &device)?; + let y = Tensor::new(&[2f32, 7., 1., 8., 2.], &device)?; println!("{:?}", x.to_vec1::()?); + let z = (x + y)?; + println!("{:?}", z.to_vec1::()?); Ok(()) } diff --git a/src/tensor.rs b/src/tensor.rs index be642329..a1262334 100644 --- a/src/tensor.rs +++ b/src/tensor.rs @@ -84,7 +84,7 @@ impl Tensor { fn ones_impl>( shape: S, dtype: DType, - device: Device, + device: &Device, is_variable: bool, ) -> Result { let shape = shape.into(); @@ -101,22 +101,22 @@ impl Tensor { Ok(Self(Arc::new(tensor_))) } - pub fn ones>(shape: S, dtype: DType, device: Device) -> Result { + pub fn ones>(shape: S, dtype: DType, device: &Device) -> Result { Self::ones_impl(shape, dtype, device, false) } - pub fn ones_var>(shape: S, dtype: DType, device: Device) -> Result { + pub fn ones_var>(shape: S, dtype: DType, device: &Device) -> Result { Self::ones_impl(shape, dtype, device, true) } pub fn ones_like(&self) -> Result { - Tensor::ones(self.shape(), self.dtype(), self.device()) + Tensor::ones(self.shape(), self.dtype(), &self.device()) } fn zeros_impl>( shape: S, dtype: DType, - device: Device, + device: &Device, is_variable: bool, ) -> Result { let shape = shape.into(); @@ -133,21 +133,21 @@ impl Tensor { Ok(Self(Arc::new(tensor_))) } - pub fn zeros>(shape: S, dtype: DType, device: Device) -> Result { + pub fn zeros>(shape: S, dtype: DType, device: &Device) -> Result { Self::zeros_impl(shape, dtype, device, false) } - pub fn zeros_var>(shape: S, dtype: DType, device: Device) -> Result { + pub fn zeros_var>(shape: S, dtype: DType, device: &Device) -> Result { Self::zeros_impl(shape, dtype, device, true) } pub fn zeros_like(&self) -> Result { - Tensor::zeros(self.shape(), self.dtype(), self.device()) + Tensor::zeros(self.shape(), self.dtype(), &self.device()) } pub fn new_impl( array: A, - device: Device, + device: &Device, is_variable: bool, ) -> Result { let shape = array.shape()?; @@ -164,11 +164,11 @@ impl Tensor { Ok(Self(Arc::new(tensor_))) } - pub fn new(array: A, device: Device) -> Result { + pub fn new(array: A, device: &Device) -> Result { Self::new_impl(array, device, false) } - pub fn var(array: A, device: Device) -> Result { + pub fn var(array: A, device: &Device) -> Result { Self::new_impl(array, device, true) } diff --git a/tests/grad_tests.rs b/tests/grad_tests.rs index 432b1520..56186e5d 100644 --- a/tests/grad_tests.rs +++ b/tests/grad_tests.rs @@ -3,7 +3,7 @@ use candle::{Device, Tensor}; #[test] fn simple_grad() -> Result<()> { - let x = Tensor::var(&[3f32, 1., 4.], Device::Cpu)?; + let x = Tensor::var(&[3f32, 1., 4.], &Device::Cpu)?; let y = (((&x * &x)? + &x * 5f64)? + 4f64)?; let grads = y.backward()?; let grad_x = grads.get(&x).context("no grad for x")?; diff --git a/tests/tensor_tests.rs b/tests/tensor_tests.rs index fb2d84d9..81c2e801 100644 --- a/tests/tensor_tests.rs +++ b/tests/tensor_tests.rs @@ -2,7 +2,7 @@ use candle::{DType, Device, Result, Tensor}; #[test] fn zeros() -> Result<()> { - let tensor = Tensor::zeros((5, 2), DType::F32, Device::Cpu)?; + let tensor = Tensor::zeros((5, 2), DType::F32, &Device::Cpu)?; let (dim1, dim2) = tensor.shape().r2()?; assert_eq!(dim1, 5); assert_eq!(dim2, 2); @@ -11,7 +11,7 @@ fn zeros() -> Result<()> { #[test] fn add_mul() -> Result<()> { - let tensor = Tensor::new(&[3f32, 1., 4.], Device::Cpu)?; + let tensor = Tensor::new(&[3f32, 1., 4.], &Device::Cpu)?; let dim1 = tensor.shape().r1()?; assert_eq!(dim1, 3); let content: Vec = tensor.to_vec1()?; @@ -28,7 +28,7 @@ fn add_mul() -> Result<()> { #[test] fn tensor_2d() -> Result<()> { let data = &[[3f32, 1., 4., 1., 5.], [2., 1., 7., 8., 2.]]; - let tensor = Tensor::new(data, Device::Cpu)?; + let tensor = Tensor::new(data, &Device::Cpu)?; let dims = tensor.shape().r2()?; assert_eq!(dims, (2, 5)); let content: Vec> = tensor.to_vec2()?; @@ -39,9 +39,9 @@ fn tensor_2d() -> Result<()> { #[test] fn binary_op() -> Result<()> { let data = &[[3f32, 1., 4., 1., 5.], [2., 1., 7., 8., 2.]]; - let tensor = Tensor::new(data, Device::Cpu)?; + let tensor = Tensor::new(data, &Device::Cpu)?; let data2 = &[[5f32, 5., 5., 5., 5.], [2., 1., 7., 8., 2.]]; - let tensor2 = Tensor::new(data2, Device::Cpu)?; + let tensor2 = Tensor::new(data2, &Device::Cpu)?; let tensor = (&tensor + (&tensor * &tensor)? / (&tensor + &tensor2))?; let dims = tensor.shape().r2()?; assert_eq!(dims, (2, 5));