From 944d70bd9a4d65dcef38c6f6706f0e3d54d7230b Mon Sep 17 00:00:00 2001 From: Laurent Mazare Date: Tue, 25 Jul 2023 09:12:14 +0100 Subject: [PATCH] Add a test for scatter add. (#238) * Add a test for scatter add (segfaults on gpus for now). * Bugfix for the scatter add cuda kernel. --- candle-core/src/cuda_backend.rs | 6 ++--- candle-core/tests/tensor_tests.rs | 41 +++++++++++++++++++++++++++++++ candle-kernels/src/indexing.cu | 8 +++--- 3 files changed, 47 insertions(+), 8 deletions(-) diff --git a/candle-core/src/cuda_backend.rs b/candle-core/src/cuda_backend.rs index c550d982..9cc454f1 100644 --- a/candle-core/src/cuda_backend.rs +++ b/candle-core/src/cuda_backend.rs @@ -846,7 +846,7 @@ impl<'a> Map2InPlace for ScatterAdd<'a> { fn f( &self, dst: &mut CudaSlice, - _dst_shape: &Shape, + dst_shape: &Shape, src: &CudaSlice, src_l: &Layout, dev: &CudaDevice, @@ -874,11 +874,11 @@ impl<'a> Map2InPlace for ScatterAdd<'a> { let left_sz: usize = src_l.dims()[..dim].iter().product(); let right_sz: usize = src_l.dims()[dim + 1..].iter().product(); let src_dim_sz = src_l.dims()[dim]; - let ids_dim_sz = ids_l.dims()[dim]; + let dst_dim_sz = dst_shape.dims()[dim]; let cfg = LaunchConfig::for_num_elems((left_sz * right_sz) as u32); let func = dev.get_or_load_func(&kernel_name::(name), kernels::INDEXING)?; // SAFETY: Set later by running the kernel. - let params = (ids, &src, dst, left_sz, src_dim_sz, ids_dim_sz, right_sz); + let params = (ids, &src, dst, left_sz, src_dim_sz, dst_dim_sz, right_sz); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(()) diff --git a/candle-core/tests/tensor_tests.rs b/candle-core/tests/tensor_tests.rs index 356e64d3..a38b6d3d 100644 --- a/candle-core/tests/tensor_tests.rs +++ b/candle-core/tests/tensor_tests.rs @@ -389,6 +389,46 @@ fn index_add(device: &Device) -> Result<()> { Ok(()) } +fn scatter_add(device: &Device) -> Result<()> { + let t = Tensor::arange(0f32, 12f32, device)?.reshape((4, 3))?; + assert_eq!( + t.to_vec2::()?, + &[ + [0.0, 1.0, 2.0], + [3.0, 4.0, 5.0], + [6.0, 7.0, 8.0], + [9.0, 10.0, 11.0] + ] + ); + let ids = Tensor::new(&[[0u32, 1, 2], [3, 4, 0], [3, 3, 1], [2, 0, 4]], device)?; + let init = Tensor::ones((4, 5), DType::F32, device)?; + let hs = init.scatter_add(&ids, &t, 1)?; + assert_eq!( + hs.to_vec2::()?, + &[ + [1.0, 2.0, 3.0, 1.0, 1.0], + [6.0, 1.0, 1.0, 4.0, 5.0], + [1.0, 9.0, 1.0, 14.0, 1.0], + [11.0, 1.0, 10.0, 1.0, 12.0] + ] + ); + + let init = Tensor::ones((6, 3), DType::F32, device)?; + let hs = init.scatter_add(&ids, &t, 0)?; + assert_eq!( + hs.to_vec2::()?, + &[ + [1.0, 11.0, 6.0], + [1.0, 2.0, 9.0], + [10.0, 1.0, 3.0], + [10.0, 8.0, 1.0], + [1.0, 5.0, 12.0], + [1.0, 1.0, 1.0] + ] + ); + Ok(()) +} + fn gather(device: &Device) -> Result<()> { let ids = Tensor::new(&[[0u32], [2u32], [1u32], [0u32]], device)?; let t = Tensor::arange(0f32, 12f32, device)?.reshape((4, 3))?; @@ -588,3 +628,4 @@ test_device!(broadcasting, broadcasting_cpu, broadcasting_gpu); test_device!(index_select, index_select_cpu, index_select_gpu); test_device!(index_add, index_add_cpu, index_add_gpu); test_device!(gather, gather_cpu, gather_gpu); +test_device!(scatter_add, scatter_add_cpu, scatter_add_gpu); diff --git a/candle-kernels/src/indexing.cu b/candle-kernels/src/indexing.cu index fb2d56b2..359db498 100644 --- a/candle-kernels/src/indexing.cu +++ b/candle-kernels/src/indexing.cu @@ -144,7 +144,6 @@ extern "C" __global__ void FN_NAME( \ template __device__ void scatter_add( const I *ids, - const size_t ids_dim_size, const T *inp, T *out, const size_t left_size, @@ -156,8 +155,8 @@ __device__ void scatter_add( for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { const size_t pre = i / right_size; const size_t post = i % right_size; - for (unsigned int j = 0; j < ids_dim_size; ++j) { - const size_t src_i = (pre * ids_dim_size + j) * right_size + post; + for (unsigned int j = 0; j < src_dim_size; ++j) { + const size_t src_i = (pre * src_dim_size + j) * right_size + post; const size_t idx = ids[src_i]; const size_t dst_i = (pre * dst_dim_size + idx) * right_size + post; out[dst_i] += inp[src_i]; @@ -168,14 +167,13 @@ __device__ void scatter_add( #define SA_OP(TYPENAME, INDEX_TYPENAME, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const INDEX_TYPENAME *ids, \ - const size_t ids_dim_size, \ const TYPENAME *inp, \ TYPENAME *out, \ const size_t left_size, \ const size_t src_dim_size, \ const size_t dst_dim_size, \ const size_t right_size \ -) { scatter_add(ids, ids_dim_size, inp, out, left_size, src_dim_size, dst_dim_size, right_size); } \ +) { scatter_add(ids, inp, out, left_size, src_dim_size, dst_dim_size, right_size); } \ #if __CUDA_ARCH__ >= 800