Embedding bugfix.

This commit is contained in:
laurent
2023-06-27 09:56:19 +01:00
parent 18707891b7
commit 6c4a960b15
2 changed files with 4 additions and 4 deletions

View File

@ -3,8 +3,8 @@ use candle::{Device, Tensor};
fn main() -> Result<()> { fn main() -> Result<()> {
let device = Device::new_cuda(0)?; let device = Device::new_cuda(0)?;
let ids = Tensor::new(&[0u32, 3u32, 1u32], &device)?; let ids = Tensor::new(&[0u32, 2u32, 1u32], &device)?;
let t = Tensor::new(&[[0f32, 1f32], [1f32, 2f32], [2f32, 3f32]], &device)?; let t = Tensor::new(&[[0f32, 1f32], [2f32, 3f32], [4f32, 5f32]], &device)?;
let hs = Tensor::embedding(&ids, &t)?; let hs = Tensor::embedding(&ids, &t)?;
println!("> {:?}", hs.to_vec2::<f32>()); println!("> {:?}", hs.to_vec2::<f32>());

View File

@ -18,13 +18,13 @@ extern "C" __global__ void FN_NAME( \
const size_t *strides = info + num_dims; \ const size_t *strides = info + num_dims; \
if (is_contiguous(num_dims, dims, strides)) { \ if (is_contiguous(num_dims, dims, strides)) { \
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \ for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \
memcpy(out + i * h_size, inp + ids[i], h_size); \ memcpy(&out[i * h_size], &inp[ids[i] * h_size], h_size * sizeof(TYPENAME)); \
} \ } \
} \ } \
else { \ else { \
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \ for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \
unsigned strided_i = get_strided_index(i, num_dims, dims, strides); \ unsigned strided_i = get_strided_index(i, num_dims, dims, strides); \
memcpy(out + i * h_size, inp + ids[i], h_size); \ memcpy(&out[i * h_size], &inp[ids[strided_i] * h_size], h_size * sizeof(TYPENAME)); \
} \ } \
} \ } \
} \ } \