mirror of
https://github.com/huggingface/candle.git
synced 2025-06-19 19:58:35 +00:00
Compare commits
25 Commits
Author | SHA1 | Date | |
---|---|---|---|
03641293ee | |||
064ba17bd7 | |||
e8ee253ee0 | |||
8bd3d6b94b | |||
6a3ca7da0c | |||
586b6f6fff | |||
e4b0cc59f5 | |||
0a6e0a8c9a | |||
972903021c | |||
6bc92e63cb | |||
aa04015098 | |||
8b5059e951 | |||
26540641c1 | |||
34d83377f6 | |||
77197379cc | |||
916a8c5464 | |||
243e83f2b9 | |||
cf27868b57 | |||
40c3e1bd5a | |||
ece4c69a68 | |||
4eeaf205d6 | |||
f419a38e1a | |||
361f2ad2af | |||
931432ed55 | |||
0404a3eb5b |
@ -61,7 +61,7 @@ tracing-subscriber = "0.3.7"
|
|||||||
wav = "1.0.0"
|
wav = "1.0.0"
|
||||||
yoke = { version = "0.7.2", features = ["derive"] }
|
yoke = { version = "0.7.2", features = ["derive"] }
|
||||||
zip = { version = "0.6.6", default-features = false }
|
zip = { version = "0.6.6", default-features = false }
|
||||||
metal = { version = "0.27.1", features = ["mps"], package="candle-metal" }
|
metal = { version = "0.27.0", features = ["mps"]}
|
||||||
|
|
||||||
[profile.release-with-debug]
|
[profile.release-with-debug]
|
||||||
inherits = "release"
|
inherits = "release"
|
||||||
|
@ -201,10 +201,9 @@ impl Device {
|
|||||||
Ok(Storage::Cuda(storage))
|
Ok(Storage::Cuda(storage))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Device::Metal(_device) => {
|
Device::Metal(device) => {
|
||||||
// let storage = device.rand_uniform(shape, dtype, lo, up)?;
|
let storage = device.rand_uniform(shape, dtype, lo, up)?;
|
||||||
// Ok(Storage::Metal(storage))
|
Ok(Storage::Metal(storage))
|
||||||
crate::bail!("Metal rand_uniform not implemented")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -1863,10 +1863,7 @@ impl Tensor {
|
|||||||
Storage::Metal(metal.storage_from_cpu_storage(storage)?)
|
Storage::Metal(metal.storage_from_cpu_storage(storage)?)
|
||||||
}
|
}
|
||||||
(Storage::Cuda(storage), Device::Cpu) => Storage::Cpu(storage.to_cpu_storage()?),
|
(Storage::Cuda(storage), Device::Cpu) => Storage::Cpu(storage.to_cpu_storage()?),
|
||||||
(Storage::Metal(storage), Device::Cpu) => {
|
(Storage::Metal(storage), Device::Cpu) => Storage::Cpu(storage.to_cpu_storage()?),
|
||||||
// println!("{storage:?} - {:?}", storage.to_cpu_storage()?);
|
|
||||||
Storage::Cpu(storage.to_cpu_storage()?)
|
|
||||||
}
|
|
||||||
(Storage::Cuda(storage), Device::Cuda(cuda)) => {
|
(Storage::Cuda(storage), Device::Cuda(cuda)) => {
|
||||||
// TODO: Avoid passing through the cpu storage here, especially if the gpu ids
|
// TODO: Avoid passing through the cpu storage here, especially if the gpu ids
|
||||||
// are the same.
|
// are the same.
|
||||||
|
@ -10,7 +10,7 @@ categories = ["science"]
|
|||||||
license = "MIT OR Apache-2.0"
|
license = "MIT OR Apache-2.0"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
metal = { version = "0.27.1", features = ["mps"], package="candle-metal" }
|
metal = { version = "0.27.0", features = ["mps"]}
|
||||||
once_cell = "1.18.0"
|
once_cell = "1.18.0"
|
||||||
thiserror = "1"
|
thiserror = "1"
|
||||||
tracing = "0.1.37"
|
tracing = "0.1.37"
|
||||||
|
@ -109,16 +109,16 @@ kernel void FN_NAME##_strided( \
|
|||||||
} \
|
} \
|
||||||
|
|
||||||
|
|
||||||
AFFINE(affine_float, float)
|
AFFINE(affine_f32, float)
|
||||||
AFFINE(affine_half, half)
|
AFFINE(affine_f16, half)
|
||||||
POWF(powf_float, float)
|
POWF(powf_f32, float)
|
||||||
POWF(powf_half, half)
|
POWF(powf_f16, half)
|
||||||
ELU(elu_float, float)
|
ELU(elu_f32, float)
|
||||||
ELU(elu_half, half)
|
ELU(elu_f16, half)
|
||||||
|
|
||||||
|
|
||||||
#if __METAL_VERSION__ >= 310
|
#if __METAL_VERSION__ >= 310
|
||||||
AFFINE(affine_bfloat, bfloat);
|
AFFINE(affine_bf16, bfloat);
|
||||||
POWF(powf_bfloat, bfloat);
|
POWF(powf_bf16, bfloat);
|
||||||
ELU(elu_bfloat, bfloat);
|
ELU(elu_bf16, bfloat);
|
||||||
#endif
|
#endif
|
||||||
|
@ -1,5 +1,8 @@
|
|||||||
#include <metal_stdlib>
|
#include <metal_stdlib>
|
||||||
|
|
||||||
|
#define MAX(x, y) ((x) > (y) ? (x) : (y))
|
||||||
|
#define MIN(x, y) ((x) < (y) ? (x) : (y))
|
||||||
|
|
||||||
METAL_FUNC uint get_strided_index(
|
METAL_FUNC uint get_strided_index(
|
||||||
uint idx,
|
uint idx,
|
||||||
constant size_t &num_dims,
|
constant size_t &num_dims,
|
||||||
@ -22,15 +25,15 @@ kernel void FN_NAME( \
|
|||||||
constant size_t &dim, \
|
constant size_t &dim, \
|
||||||
device const TYPENAME *left, \
|
device const TYPENAME *left, \
|
||||||
device const TYPENAME *right, \
|
device const TYPENAME *right, \
|
||||||
device TYPENAME *output, \
|
device OUT_TYPENAME *output, \
|
||||||
uint thread_position_in_grid [[ thread_position_in_grid ]] \
|
uint tid [[ thread_position_in_grid ]] \
|
||||||
) { \
|
) { \
|
||||||
if (thread_position_in_grid >= dim) { \
|
if (tid >= dim) { \
|
||||||
return; \
|
return; \
|
||||||
} \
|
} \
|
||||||
TYPENAME x = left[thread_position_in_grid]; \
|
TYPENAME x = left[tid]; \
|
||||||
TYPENAME y = right[thread_position_in_grid]; \
|
TYPENAME y = right[tid]; \
|
||||||
output[thread_position_in_grid] = OUT_TYPENAME(FN); \
|
output[tid] = OUT_TYPENAME(FN); \
|
||||||
}\
|
}\
|
||||||
kernel void FN_NAME_STRIDED( \
|
kernel void FN_NAME_STRIDED( \
|
||||||
constant size_t &dim, \
|
constant size_t &dim, \
|
||||||
@ -40,33 +43,48 @@ kernel void FN_NAME_STRIDED( \
|
|||||||
constant size_t *right_strides, \
|
constant size_t *right_strides, \
|
||||||
device const TYPENAME *left, \
|
device const TYPENAME *left, \
|
||||||
device const TYPENAME *right, \
|
device const TYPENAME *right, \
|
||||||
device TYPENAME *output, \
|
device OUT_TYPENAME *output, \
|
||||||
uint thread_position_in_grid [[ thread_position_in_grid ]] \
|
uint tid [[ thread_position_in_grid ]] \
|
||||||
) { \
|
) { \
|
||||||
if (thread_position_in_grid >= dim) { \
|
if (tid >= dim) { \
|
||||||
return; \
|
return; \
|
||||||
} \
|
} \
|
||||||
TYPENAME x = left[get_strided_index(thread_position_in_grid, num_dims, dims, left_strides)]; \
|
TYPENAME x = left[get_strided_index(tid, num_dims, dims, left_strides)]; \
|
||||||
TYPENAME y = right[get_strided_index(thread_position_in_grid, num_dims, dims, right_strides)]; \
|
TYPENAME y = right[get_strided_index(tid, num_dims, dims, right_strides)]; \
|
||||||
output[thread_position_in_grid] = OUT_TYPENAME(FN); \
|
output[tid] = OUT_TYPENAME(FN); \
|
||||||
}
|
}
|
||||||
|
|
||||||
#define BINARY_OP(FN, NAME) \
|
#define BINARY_OP(FN, NAME) \
|
||||||
BINARY(FN, float, float, NAME##_float, NAME##_float_strided); \
|
BINARY(FN, float, float, NAME##_f32, NAME##_f32_strided); \
|
||||||
BINARY(FN, half, half, NAME##_half, NAME##_half_strided);
|
BINARY(FN, half, half, NAME##_f16, NAME##_f16_strided);
|
||||||
|
|
||||||
#define BFLOAT_BINARY_OP(FN, NAME) \
|
#define BFLOAT_BINARY_OP(FN, NAME) \
|
||||||
BINARY(FN, bfloat, bfloat, NAME##_bfloat, NAME##_bfloat_strided);
|
BINARY(FN, bfloat, bfloat, NAME##_bf16, NAME##_bf16_strided);
|
||||||
|
|
||||||
|
#define BINARY_OP_OUT(NAME, FN) \
|
||||||
|
BINARY(FN, float, uint8_t, NAME##_f32, NAME##_f32_strided); \
|
||||||
|
BINARY(FN, half, uint8_t, NAME##_f16, NAME##_f16_strided);
|
||||||
|
|
||||||
|
|
||||||
BINARY_OP(x + y, add)
|
BINARY_OP(x + y, add)
|
||||||
BINARY_OP(x - y, sub)
|
BINARY_OP(x - y, sub)
|
||||||
BINARY_OP(x * y, mul)
|
BINARY_OP(x * y, mul)
|
||||||
BINARY_OP(x / y, div)
|
BINARY_OP(x / y, div)
|
||||||
|
BINARY_OP(MIN(x, y), min)
|
||||||
|
BINARY_OP(MAX(x, y), max)
|
||||||
|
|
||||||
|
BINARY_OP_OUT(eq, x == y)
|
||||||
|
BINARY_OP_OUT(ne, x != y)
|
||||||
|
BINARY_OP_OUT(le, x <= y)
|
||||||
|
BINARY_OP_OUT(lt, x < y)
|
||||||
|
BINARY_OP_OUT(ge, x >= y)
|
||||||
|
BINARY_OP_OUT(gt, x > y)
|
||||||
|
|
||||||
#if __METAL_VERSION__ >= 310
|
#if __METAL_VERSION__ >= 310
|
||||||
BFLOAT_BINARY_OP(x + y, add)
|
BFLOAT_BINARY_OP(x + y, add)
|
||||||
BFLOAT_BINARY_OP(x - y, sub)
|
BFLOAT_BINARY_OP(x - y, sub)
|
||||||
BFLOAT_BINARY_OP(x * y, mul)
|
BFLOAT_BINARY_OP(x * y, mul)
|
||||||
BFLOAT_BINARY_OP(x / y, div)
|
BFLOAT_BINARY_OP(x / y, div)
|
||||||
|
BFLOAT_BINARY_OP(MIN(x, y), min)
|
||||||
|
BFLOAT_BINARY_OP(MAX(x, y), max)
|
||||||
#endif
|
#endif
|
||||||
|
@ -48,6 +48,7 @@ kernel void FN_NAME_STRIDED( \
|
|||||||
CAST(cast_u32_f32, cast_u32_f32_strided, uint32_t, float)
|
CAST(cast_u32_f32, cast_u32_f32_strided, uint32_t, float)
|
||||||
CAST(cast_u32_u8, cast_u32_u8_strided, uint32_t, uint8_t)
|
CAST(cast_u32_u8, cast_u32_u8_strided, uint32_t, uint8_t)
|
||||||
CAST(cast_u8_u32, cast_u8_u32_strided, uint8_t, uint32_t)
|
CAST(cast_u8_u32, cast_u8_u32_strided, uint8_t, uint32_t)
|
||||||
|
CAST(cast_u8_f32, cast_u8_f32_strided, uint8_t, float)
|
||||||
CAST(cast_f16_f32, cast_f16_f32_strided, half, float)
|
CAST(cast_f16_f32, cast_f16_f32_strided, half, float)
|
||||||
CAST(cast_f32_f16, cast_f32_f16_strided, float, half)
|
CAST(cast_f32_f16, cast_f32_f16_strided, float, half)
|
||||||
|
|
||||||
|
@ -1,6 +1,34 @@
|
|||||||
#include <metal_stdlib>
|
#include <metal_stdlib>
|
||||||
using namespace metal;
|
using namespace metal;
|
||||||
|
|
||||||
|
template<typename TYPENAME, typename INDEX_TYPENAME>
|
||||||
|
METAL_FUNC void index(
|
||||||
|
constant size_t &dst_size,
|
||||||
|
constant size_t &left_size,
|
||||||
|
constant size_t &src_dim_size,
|
||||||
|
constant size_t &right_size,
|
||||||
|
constant size_t &ids_size,
|
||||||
|
const device TYPENAME *input,
|
||||||
|
const device INDEX_TYPENAME *input_ids,
|
||||||
|
device TYPENAME *output,
|
||||||
|
uint tid [[ thread_position_in_grid ]]
|
||||||
|
) {
|
||||||
|
if (tid >= dst_size) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const size_t id_i = (tid / right_size) % ids_size;
|
||||||
|
const INDEX_TYPENAME input_i = min(input_ids[id_i], (INDEX_TYPENAME)(src_dim_size - 1));
|
||||||
|
const size_t right_rank_i = tid % right_size;
|
||||||
|
const size_t left_rank_i = tid / right_size / ids_size;
|
||||||
|
/*
|
||||||
|
// Force prevent out of bounds indexing
|
||||||
|
// since there doesn't seem to be a good way to force crash
|
||||||
|
// No need to check for zero we're only allowing unsized.
|
||||||
|
*/
|
||||||
|
const size_t src_i = left_rank_i * src_dim_size * right_size + input_i * right_size + right_rank_i;
|
||||||
|
output[tid] = input[src_i];
|
||||||
|
}
|
||||||
|
|
||||||
# define INDEX_OP(NAME, INDEX_TYPENAME, TYPENAME) \
|
# define INDEX_OP(NAME, INDEX_TYPENAME, TYPENAME) \
|
||||||
kernel void NAME( \
|
kernel void NAME( \
|
||||||
constant size_t &dst_size, \
|
constant size_t &dst_size, \
|
||||||
@ -11,93 +39,160 @@ kernel void NAME( \
|
|||||||
const device TYPENAME *input, \
|
const device TYPENAME *input, \
|
||||||
const device INDEX_TYPENAME *input_ids, \
|
const device INDEX_TYPENAME *input_ids, \
|
||||||
device TYPENAME *output, \
|
device TYPENAME *output, \
|
||||||
uint gid [[ thread_position_in_grid ]] \
|
uint tid [[ thread_position_in_grid ]] \
|
||||||
) { \
|
) { \
|
||||||
if (gid >= dst_size) { \
|
index<TYPENAME, INDEX_TYPENAME>(dst_size, left_size, src_dim_size, right_size, ids_size, input, input_ids, output, tid); \
|
||||||
return; \
|
|
||||||
} \
|
|
||||||
const size_t id_i = (gid / right_size) % ids_size; \
|
|
||||||
const INDEX_TYPENAME input_i = min(input_ids[id_i], (INDEX_TYPENAME)(src_dim_size - 1)); \
|
|
||||||
const size_t right_rank_i = gid % right_size; \
|
|
||||||
const size_t left_rank_i = gid / right_size / ids_size; \
|
|
||||||
/* \
|
|
||||||
// Force prevent out of bounds indexing \
|
|
||||||
// since there doesn't seem to be a good way to force crash \
|
|
||||||
// No need to check for zero we're only allowing unsized. \
|
|
||||||
*/ \
|
|
||||||
const size_t src_i = left_rank_i * src_dim_size * right_size + input_i * right_size + right_rank_i; \
|
|
||||||
output[gid] = input[src_i]; \
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template<typename TYPENAME, typename INDEX_TYPENAME>
|
||||||
template <typename T, typename I>
|
METAL_FUNC void gather(
|
||||||
void index_add(
|
constant size_t &dst_size,
|
||||||
device I *ids [[buffer(0)]],
|
constant size_t &left_size,
|
||||||
device T *inp [[buffer(1)]],
|
constant size_t &src_dim_size,
|
||||||
device T *out [[buffer(2)]],
|
constant size_t &right_size,
|
||||||
|
constant size_t &ids_size,
|
||||||
constant uint &ids_dim_size,
|
const device TYPENAME *input,
|
||||||
constant uint &left_size,
|
const device INDEX_TYPENAME *input_ids,
|
||||||
constant uint &dst_dim_size,
|
device TYPENAME *output,
|
||||||
constant uint &right_size,
|
uint tid [[ thread_position_in_grid ]]
|
||||||
|
|
||||||
uint gid [[ thread_position_in_grid ]] \
|
|
||||||
) {
|
) {
|
||||||
|
if (tid >= dst_size) {
|
||||||
if (gid >= left_size * right_size) {
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
const INDEX_TYPENAME input_i = input_ids[tid];
|
||||||
|
const size_t right_rank_i = tid % right_size;
|
||||||
|
const size_t left_rank_i = tid / right_size / ids_size;
|
||||||
|
const size_t src_i = (left_rank_i * src_dim_size + input_i) * right_size + right_rank_i;
|
||||||
|
output[tid] = input[src_i];
|
||||||
|
}
|
||||||
|
|
||||||
const uint i = gid;
|
# define GATHER_OP(NAME, INDEX_TYPENAME, TYPENAME) \
|
||||||
const uint pre = i / right_size;
|
kernel void NAME( \
|
||||||
const uint post = i % right_size;
|
constant size_t &dst_size, \
|
||||||
|
constant size_t &left_size, \
|
||||||
|
constant size_t &src_dim_size, \
|
||||||
|
constant size_t &right_size, \
|
||||||
|
constant size_t &ids_size, \
|
||||||
|
const device TYPENAME *input, \
|
||||||
|
const device INDEX_TYPENAME *input_ids, \
|
||||||
|
device TYPENAME *output, \
|
||||||
|
uint tid [[ thread_position_in_grid ]] \
|
||||||
|
) { \
|
||||||
|
gather<TYPENAME, INDEX_TYPENAME>(dst_size, left_size, src_dim_size, right_size, ids_size, input, input_ids, output, tid); \
|
||||||
|
}
|
||||||
|
|
||||||
for (uint j = 0; j < ids_dim_size; j++) {
|
template<typename TYPENAME, typename INDEX_TYPENAME>
|
||||||
const uint idx = ids[j];
|
METAL_FUNC void scatter_add(
|
||||||
const uint src_i = (pre * ids_dim_size + j) * right_size + post;
|
constant size_t &dst_size,
|
||||||
const uint dst_i = (pre * dst_dim_size + idx) * right_size + post;
|
constant size_t &left_size,
|
||||||
out[dst_i] += inp[src_i];
|
constant size_t &src_dim_size,
|
||||||
|
constant size_t &right_size,
|
||||||
|
constant size_t &dst_dim_size,
|
||||||
|
const device TYPENAME *input,
|
||||||
|
const device INDEX_TYPENAME *input_ids,
|
||||||
|
device TYPENAME *output,
|
||||||
|
uint tid [[ thread_position_in_grid ]]
|
||||||
|
) {
|
||||||
|
if (tid >= dst_size) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const size_t right_rank_i = tid % right_size;
|
||||||
|
const size_t left_rank_i = tid / right_size;
|
||||||
|
for (unsigned int j = 0; j < src_dim_size; ++j) {
|
||||||
|
const size_t src_i = (left_rank_i * src_dim_size + j) * right_size + right_rank_i;
|
||||||
|
const INDEX_TYPENAME idx = input_ids[src_i];
|
||||||
|
const size_t dst_i = (left_rank_i * dst_dim_size + idx) * right_size + right_rank_i;
|
||||||
|
output[dst_i] += input[src_i];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#define IA_OP(TYPENAME, INDEX_TYPENAME, FN_NAME) \
|
# define SCATTER_ADD_OP(NAME, INDEX_TYPENAME, TYPENAME) \
|
||||||
kernel void FN_NAME( \
|
kernel void NAME( \
|
||||||
device INDEX_TYPENAME *ids [[buffer(0)]], \
|
constant size_t &dst_size, \
|
||||||
device TYPENAME *inp [[buffer(1)]], \
|
constant size_t &left_size, \
|
||||||
device TYPENAME *out [[buffer(2)]], \
|
constant size_t &src_dim_size, \
|
||||||
constant uint &ids_dim_size, \
|
constant size_t &right_size, \
|
||||||
constant uint &left_size, \
|
constant size_t &dst_dim_size, \
|
||||||
constant uint &dst_dim_size, \
|
const device TYPENAME *input, \
|
||||||
constant uint &right_size, \
|
const device INDEX_TYPENAME *input_ids, \
|
||||||
uint gid [[ thread_position_in_grid ]] \
|
device TYPENAME *output, \
|
||||||
) { index_add<TYPENAME, INDEX_TYPENAME>(ids, inp, out, ids_dim_size, left_size, dst_dim_size, right_size, gid); } \
|
uint tid [[ thread_position_in_grid ]] \
|
||||||
|
) { \
|
||||||
|
scatter_add<TYPENAME, INDEX_TYPENAME>(dst_size, left_size, src_dim_size, right_size, dst_dim_size, input, input_ids, output, tid); \
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename TYPENAME, typename INDEX_TYPENAME>
|
||||||
|
METAL_FUNC void index_add(
|
||||||
|
constant size_t &dst_size,
|
||||||
|
constant size_t &left_size,
|
||||||
|
constant size_t &src_dim_size,
|
||||||
|
constant size_t &right_size,
|
||||||
|
constant size_t &dst_dim_size,
|
||||||
|
constant size_t &ids_dim_size,
|
||||||
|
const device TYPENAME *input,
|
||||||
|
const device INDEX_TYPENAME *input_ids,
|
||||||
|
device TYPENAME *output,
|
||||||
|
uint tid [[ thread_position_in_grid ]]
|
||||||
|
) {
|
||||||
|
if (tid >= dst_size) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const size_t right_rank_i = tid % right_size;
|
||||||
|
const size_t left_rank_i = tid / right_size;
|
||||||
|
for (unsigned int j = 0; j < ids_dim_size; ++j) {
|
||||||
|
const INDEX_TYPENAME idx = input_ids[j];
|
||||||
|
const size_t src_i = (left_rank_i * src_dim_size + j) * right_size + right_rank_i;
|
||||||
|
const size_t dst_i = (left_rank_i * dst_dim_size + idx) * right_size + right_rank_i;
|
||||||
|
output[dst_i] += input[src_i];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# define INDEX_ADD_OP(NAME, INDEX_TYPENAME, TYPENAME) \
|
||||||
|
kernel void NAME( \
|
||||||
|
constant size_t &dst_size, \
|
||||||
|
constant size_t &left_size, \
|
||||||
|
constant size_t &src_dim_size, \
|
||||||
|
constant size_t &right_size, \
|
||||||
|
constant size_t &dst_dim_size, \
|
||||||
|
constant size_t &ids_dim_size, \
|
||||||
|
const device TYPENAME *input, \
|
||||||
|
const device INDEX_TYPENAME *input_ids, \
|
||||||
|
device TYPENAME *output, \
|
||||||
|
uint tid [[ thread_position_in_grid ]] \
|
||||||
|
) { \
|
||||||
|
index_add<TYPENAME, INDEX_TYPENAME>(dst_size, left_size, src_dim_size, right_size, dst_dim_size, ids_dim_size, input, input_ids, output, tid); \
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
INDEX_OP(is_u32_f32, uint, float)
|
INDEX_OP(is_u32_f32, uint, float)
|
||||||
INDEX_OP(is_u32_f16, uint, half)
|
INDEX_OP(is_u32_f16, uint, half)
|
||||||
|
GATHER_OP(gather_u32_f32, uint, float)
|
||||||
|
GATHER_OP(gather_u32_f16, uint, half)
|
||||||
|
SCATTER_ADD_OP(sa_u32_f32, uint, float)
|
||||||
|
SCATTER_ADD_OP(sa_u32_f16, uint, half)
|
||||||
|
|
||||||
|
|
||||||
#if __METAL_VERSION__ >= 310
|
#if __METAL_VERSION__ >= 310
|
||||||
IA_OP(bfloat, int64_t, ia_i64_bf16)
|
INDEX_ADD_OP(ia_i64_bf16, int64_t, bfloat)
|
||||||
IA_OP(bfloat, uint32_t, ia_u32_bf16)
|
INDEX_ADD_OP(ia_u32_bf16, uint32_t, bfloat)
|
||||||
IA_OP(bfloat, uint8_t, ia_u8_bf16)
|
INDEX_ADD_OP(ia_u8_bf16, uint8_t, bfloat)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
IA_OP(half, uint32_t, ia_u32_f16)
|
INDEX_ADD_OP(ia_u32_f16, uint32_t, half)
|
||||||
IA_OP(half, uint8_t, ia_u8_f16)
|
INDEX_ADD_OP(ia_u8_f16, uint8_t, half)
|
||||||
|
|
||||||
IA_OP(float, int64_t, ia_i64_f32)
|
INDEX_ADD_OP(ia_i64_f32, int64_t, float)
|
||||||
IA_OP(uint8_t, int64_t, ia_i64_u8)
|
INDEX_ADD_OP(ia_i64_u8, int64_t, uint8_t)
|
||||||
IA_OP(int64_t, int64_t, ia_i64_i64)
|
INDEX_ADD_OP(ia_i64_i64, int64_t, int64_t)
|
||||||
IA_OP(uint32_t, int64_t, ia_i64_u32)
|
INDEX_ADD_OP(ia_i64_u32, int64_t, uint32_t)
|
||||||
|
|
||||||
IA_OP(float, uint32_t, ia_u32_f32)
|
INDEX_ADD_OP(ia_u32_f32, uint32_t, float)
|
||||||
IA_OP(uint8_t, uint32_t, ia_u32_u8)
|
INDEX_ADD_OP(ia_u32_u8, uint32_t, uint8_t)
|
||||||
IA_OP(int64_t, uint32_t, ia_u32_i64)
|
INDEX_ADD_OP(ia_u32_i64, uint32_t, int64_t)
|
||||||
IA_OP(uint32_t, uint32_t, ia_u32_u32)
|
INDEX_ADD_OP(ia_u32_u32, uint32_t, uint32_t)
|
||||||
|
|
||||||
IA_OP(float, uint8_t, ia_u8_f32)
|
INDEX_ADD_OP(ia_u8_f32, uint8_t, float)
|
||||||
IA_OP(uint8_t, uint8_t, ia_u8_u8)
|
INDEX_ADD_OP(ia_u8_u8, uint8_t, uint8_t)
|
||||||
IA_OP(uint32_t, uint8_t, ia_u8_u32)
|
INDEX_ADD_OP(ia_u8_u32, uint8_t, uint32_t)
|
||||||
IA_OP(int64_t, uint8_t, ia_u8_i64)
|
INDEX_ADD_OP(ia_u8_i64, uint8_t, int64_t)
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
use metal::{
|
use metal::{
|
||||||
Buffer, CommandBufferRef, CompileOptions, ComputeCommandEncoderRef, ComputePipelineState,
|
Buffer, CommandBufferRef, CompileOptions, ComputeCommandEncoderRef, ComputePipelineState,
|
||||||
Device, Function, Library, MTLSize,
|
Device, Function, FunctionConstantValues, Library, MTLDataType, MTLSize, NSUInteger,
|
||||||
};
|
};
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::ffi::c_void;
|
use std::ffi::c_void;
|
||||||
@ -13,7 +13,12 @@ const BINARY: &str = include_str!("binary.metal");
|
|||||||
const TERNARY: &str = include_str!("ternary.metal");
|
const TERNARY: &str = include_str!("ternary.metal");
|
||||||
const CAST: &str = include_str!("cast.metal");
|
const CAST: &str = include_str!("cast.metal");
|
||||||
const REDUCE: &str = include_str!("reduce.metal");
|
const REDUCE: &str = include_str!("reduce.metal");
|
||||||
|
const MFA: &[u8] = include_bytes!("libMetalFlashAttention.metallib");
|
||||||
|
|
||||||
|
/// Most kernels apply similarly across the tensors
|
||||||
|
/// This creates a strategy that uses the maximum amount of threads per threadgroup (capped at the
|
||||||
|
/// actual total buffer length).
|
||||||
|
/// Then kernels can just do their op on their single point in the buffer.
|
||||||
fn linear_split(pipeline: &ComputePipelineState, length: usize) -> (MTLSize, MTLSize) {
|
fn linear_split(pipeline: &ComputePipelineState, length: usize) -> (MTLSize, MTLSize) {
|
||||||
let size = length as u64;
|
let size = length as u64;
|
||||||
let width = std::cmp::min(pipeline.max_total_threads_per_threadgroup(), size);
|
let width = std::cmp::min(pipeline.max_total_threads_per_threadgroup(), size);
|
||||||
@ -35,6 +40,10 @@ fn linear_split(pipeline: &ComputePipelineState, length: usize) -> (MTLSize, MTL
|
|||||||
fn set_param<P: EncoderParam>(encoder: &ComputeCommandEncoderRef, position: u64, data: P) {
|
fn set_param<P: EncoderParam>(encoder: &ComputeCommandEncoderRef, position: u64, data: P) {
|
||||||
<P as EncoderParam>::set_param(encoder, position, data)
|
<P as EncoderParam>::set_param(encoder, position, data)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Helper functions to create the various objects on the compute command encoder
|
||||||
|
/// on a single line.
|
||||||
|
/// Prevents getting wrong some arguments number and mixing length and size in bytes.
|
||||||
trait EncoderParam {
|
trait EncoderParam {
|
||||||
fn set_param(encoder: &ComputeCommandEncoderRef, position: u64, data: Self);
|
fn set_param(encoder: &ComputeCommandEncoderRef, position: u64, data: Self);
|
||||||
}
|
}
|
||||||
@ -105,6 +114,7 @@ pub enum Source {
|
|||||||
Ternary,
|
Ternary,
|
||||||
Cast,
|
Cast,
|
||||||
Reduce,
|
Reduce,
|
||||||
|
Mfa,
|
||||||
}
|
}
|
||||||
|
|
||||||
macro_rules! ops{
|
macro_rules! ops{
|
||||||
@ -115,16 +125,16 @@ macro_rules! ops{
|
|||||||
$(
|
$(
|
||||||
pub mod $name {
|
pub mod $name {
|
||||||
use super::Kernel;
|
use super::Kernel;
|
||||||
pub const FLOAT: Kernel = Kernel(concat!(stringify!($name), "_float"));
|
pub const FLOAT: Kernel = Kernel(concat!(stringify!($name), "_f32"));
|
||||||
pub const HALF: Kernel = Kernel(concat!(stringify!($name), "_half"));
|
pub const HALF: Kernel = Kernel(concat!(stringify!($name), "_f16"));
|
||||||
pub const BFLOAT: Kernel = Kernel(concat!(stringify!($name), "_bfloat"));
|
pub const BFLOAT: Kernel = Kernel(concat!(stringify!($name), "_bf16"));
|
||||||
}
|
}
|
||||||
)+
|
)+
|
||||||
pub mod copy {
|
pub mod copy {
|
||||||
use super::Kernel;
|
use super::Kernel;
|
||||||
pub const FLOAT: Kernel = Kernel("copy_float");
|
pub const FLOAT: Kernel = Kernel("copy_f32");
|
||||||
pub const HALF: Kernel = Kernel("copy_half");
|
pub const HALF: Kernel = Kernel("copy_f16");
|
||||||
pub const BFLOAT: Kernel = Kernel("copy_bfloat");
|
pub const BFLOAT: Kernel = Kernel("copy_bf16");
|
||||||
pub const U32: Kernel = Kernel("copy_u32");
|
pub const U32: Kernel = Kernel("copy_u32");
|
||||||
pub const U8: Kernel = Kernel("copy_u8");
|
pub const U8: Kernel = Kernel("copy_u8");
|
||||||
}
|
}
|
||||||
@ -135,16 +145,16 @@ macro_rules! ops{
|
|||||||
$(
|
$(
|
||||||
pub mod $name {
|
pub mod $name {
|
||||||
use super::Kernel;
|
use super::Kernel;
|
||||||
pub const FLOAT: Kernel = Kernel(concat!(stringify!($name), "_float_strided"));
|
pub const FLOAT: Kernel = Kernel(concat!(stringify!($name), "_f32_strided"));
|
||||||
pub const HALF: Kernel = Kernel(concat!(stringify!($name), "_half_strided"));
|
pub const HALF: Kernel = Kernel(concat!(stringify!($name), "_f16_strided"));
|
||||||
pub const BFLOAT: Kernel = Kernel(concat!(stringify!($name), "_bfloat_strided"));
|
pub const BFLOAT: Kernel = Kernel(concat!(stringify!($name), "_bf16_strided"));
|
||||||
}
|
}
|
||||||
)+
|
)+
|
||||||
pub mod copy {
|
pub mod copy {
|
||||||
use super::Kernel;
|
use super::Kernel;
|
||||||
pub const FLOAT: Kernel = Kernel("copy_float_strided");
|
pub const FLOAT: Kernel = Kernel("copy_f32_strided");
|
||||||
pub const HALF: Kernel = Kernel("copy_half_strided");
|
pub const HALF: Kernel = Kernel("copy_f16_strided");
|
||||||
pub const BFLOAT: Kernel = Kernel("copy_bfloat_strided");
|
pub const BFLOAT: Kernel = Kernel("copy_bf16_strided");
|
||||||
pub const U32: Kernel = Kernel("copy_u32_strided");
|
pub const U32: Kernel = Kernel("copy_u32_strided");
|
||||||
pub const U8: Kernel = Kernel("copy_u8_strided");
|
pub const U8: Kernel = Kernel("copy_u8_strided");
|
||||||
}
|
}
|
||||||
@ -156,7 +166,7 @@ pub mod unary {
|
|||||||
ops!(cos, sin, exp, sqr, sqrt, neg, log, gelu, ceil, floor, round, erf, gelu_erf, tanh);
|
ops!(cos, sin, exp, sqr, sqrt, neg, log, gelu, ceil, floor, round, erf, gelu_erf, tanh);
|
||||||
}
|
}
|
||||||
pub mod binary {
|
pub mod binary {
|
||||||
ops!(add, sub, mul, div);
|
ops!(add, sub, mul, div, min, max, eq, ne, le, lt, ge, gt);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(thiserror::Error, Debug)]
|
#[derive(thiserror::Error, Debug)]
|
||||||
@ -171,6 +181,12 @@ pub enum MetalKernelError {
|
|||||||
FailedToCreateComputeFunction,
|
FailedToCreateComputeFunction,
|
||||||
#[error("Failed to create pipeline")]
|
#[error("Failed to create pipeline")]
|
||||||
FailedToCreatePipeline(String),
|
FailedToCreatePipeline(String),
|
||||||
|
#[error("Invalid matmul arguments {lhs_stride:?} {rhs_stride:?} {mnk:?}")]
|
||||||
|
MatMulNonContiguous {
|
||||||
|
lhs_stride: Vec<usize>,
|
||||||
|
rhs_stride: Vec<usize>,
|
||||||
|
mnk: (usize, usize, usize),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> From<std::sync::PoisonError<T>> for MetalKernelError {
|
impl<T> From<std::sync::PoisonError<T>> for MetalKernelError {
|
||||||
@ -179,23 +195,24 @@ impl<T> From<std::sync::PoisonError<T>> for MetalKernelError {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type KernelMap<T> = HashMap<&'static str, T>;
|
|
||||||
type Libraries = HashMap<Source, Library>;
|
type Libraries = HashMap<Source, Library>;
|
||||||
type Pipelines = KernelMap<ComputePipelineState>;
|
type Pipelines = HashMap<(&'static str, Option<ConstantValues>), ComputePipelineState>;
|
||||||
|
|
||||||
#[derive(Debug, Default)]
|
#[derive(Debug)]
|
||||||
pub struct Kernels {
|
pub struct Kernels {
|
||||||
libraries: RwLock<Libraries>,
|
libraries: RwLock<Libraries>,
|
||||||
pipelines: RwLock<Pipelines>,
|
pipelines: RwLock<Pipelines>,
|
||||||
|
fence: metal::Fence,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Kernels {
|
impl Kernels {
|
||||||
pub fn new() -> Self {
|
pub fn new(fence: metal::Fence) -> Self {
|
||||||
let libraries = RwLock::new(Libraries::new());
|
let libraries = RwLock::new(Libraries::new());
|
||||||
let pipelines = RwLock::new(Pipelines::new());
|
let pipelines = RwLock::new(Pipelines::new());
|
||||||
Self {
|
Self {
|
||||||
libraries,
|
libraries,
|
||||||
pipelines,
|
pipelines,
|
||||||
|
fence,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -208,9 +225,12 @@ impl Kernels {
|
|||||||
Source::Indexing => INDEXING,
|
Source::Indexing => INDEXING,
|
||||||
Source::Cast => CAST,
|
Source::Cast => CAST,
|
||||||
Source::Reduce => REDUCE,
|
Source::Reduce => REDUCE,
|
||||||
|
Source::Mfa => panic!("Invalid lib"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Load the give library from its [`source`].
|
||||||
|
/// If this has been previously loaded it will just fetch it from cache.
|
||||||
pub fn load_library(
|
pub fn load_library(
|
||||||
&self,
|
&self,
|
||||||
device: &Device,
|
device: &Device,
|
||||||
@ -220,10 +240,22 @@ impl Kernels {
|
|||||||
if let Some(lib) = libraries.get(&source) {
|
if let Some(lib) = libraries.get(&source) {
|
||||||
Ok(lib.clone())
|
Ok(lib.clone())
|
||||||
} else {
|
} else {
|
||||||
|
let lib = match source {
|
||||||
|
Source::Mfa => {
|
||||||
|
let source_data = MFA;
|
||||||
|
device.new_library_with_data(source_data).map_err(|e| {
|
||||||
|
MetalKernelError::LoadLibraryError(format!(
|
||||||
|
"Candle metal requires macosx > 13.0 or higher, cannot load mfa: {e}"
|
||||||
|
))
|
||||||
|
})?
|
||||||
|
}
|
||||||
|
source => {
|
||||||
let source_content = self.get_library_source(source);
|
let source_content = self.get_library_source(source);
|
||||||
let lib = device
|
device
|
||||||
.new_library_with_source(source_content, &CompileOptions::new())
|
.new_library_with_source(source_content, &CompileOptions::new())
|
||||||
.map_err(|e| MetalKernelError::LoadLibraryError(e.to_string()))?;
|
.map_err(|e| MetalKernelError::LoadLibraryError(e.to_string()))?
|
||||||
|
}
|
||||||
|
};
|
||||||
libraries.insert(source, lib.clone());
|
libraries.insert(source, lib.clone());
|
||||||
Ok(lib)
|
Ok(lib)
|
||||||
}
|
}
|
||||||
@ -234,39 +266,56 @@ impl Kernels {
|
|||||||
device: &Device,
|
device: &Device,
|
||||||
source: Source,
|
source: Source,
|
||||||
name: &'static str,
|
name: &'static str,
|
||||||
|
constants: Option<FunctionConstantValues>,
|
||||||
) -> Result<Function, MetalKernelError> {
|
) -> Result<Function, MetalKernelError> {
|
||||||
let func = self
|
let func = self
|
||||||
.load_library(device, source)?
|
.load_library(device, source)?
|
||||||
.get_function(name, None)
|
.get_function(name, constants)
|
||||||
.map_err(|e| MetalKernelError::LoadFunctionError(e.to_string()))?;
|
.map_err(|e| MetalKernelError::LoadFunctionError(e.to_string()))?;
|
||||||
Ok(func)
|
Ok(func)
|
||||||
// let mut funcs = self.funcs.write()?;
|
|
||||||
// if let Some(func) = funcs.get(name) {
|
|
||||||
// Ok(func.clone())
|
|
||||||
// } else {
|
|
||||||
// funcs.insert(name, func.clone());
|
|
||||||
// Ok(func)
|
|
||||||
// }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Load the give pipeline
|
||||||
|
/// loads the library from source, then gets the function [`name`] from
|
||||||
|
/// that source
|
||||||
|
fn load_pipeline_with_constants(
|
||||||
|
&self,
|
||||||
|
device: &Device,
|
||||||
|
source: Source,
|
||||||
|
name: &'static str,
|
||||||
|
constants: Option<ConstantValues>,
|
||||||
|
) -> Result<ComputePipelineState, MetalKernelError> {
|
||||||
|
let mut pipelines = self.pipelines.write()?;
|
||||||
|
let key = (name, constants);
|
||||||
|
if let Some(pipeline) = pipelines.get(&key) {
|
||||||
|
Ok(pipeline.clone())
|
||||||
|
} else {
|
||||||
|
let (name, constants) = key;
|
||||||
|
let func = self.load_function(
|
||||||
|
device,
|
||||||
|
source,
|
||||||
|
name,
|
||||||
|
constants.as_ref().map(|c| c.function_constant_values()),
|
||||||
|
)?;
|
||||||
|
let pipeline = device
|
||||||
|
.new_compute_pipeline_state_with_function(&func)
|
||||||
|
.map_err(|e| MetalKernelError::FailedToCreatePipeline(e.to_string()))?;
|
||||||
|
pipelines.insert((name, constants), pipeline.clone());
|
||||||
|
|
||||||
|
Ok(pipeline)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Load the give pipeline
|
||||||
|
/// loads the library from source, then gets the function [`name`] from
|
||||||
|
/// that source (without constants)
|
||||||
pub fn load_pipeline(
|
pub fn load_pipeline(
|
||||||
&self,
|
&self,
|
||||||
device: &Device,
|
device: &Device,
|
||||||
source: Source,
|
source: Source,
|
||||||
name: &'static str,
|
name: &'static str,
|
||||||
) -> Result<ComputePipelineState, MetalKernelError> {
|
) -> Result<ComputePipelineState, MetalKernelError> {
|
||||||
let mut pipelines = self.pipelines.write()?;
|
self.load_pipeline_with_constants(device, source, name, None)
|
||||||
if let Some(pipeline) = pipelines.get(name) {
|
|
||||||
Ok(pipeline.clone())
|
|
||||||
} else {
|
|
||||||
let func = self.load_function(device, source, name)?;
|
|
||||||
let pipeline = device
|
|
||||||
.new_compute_pipeline_state_with_function(&func)
|
|
||||||
.map_err(|e| MetalKernelError::FailedToCreatePipeline(e.to_string()))?;
|
|
||||||
pipelines.insert(name, pipeline.clone());
|
|
||||||
|
|
||||||
Ok(pipeline)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -282,12 +331,16 @@ pub fn call_unary_contiguous(
|
|||||||
) -> Result<(), MetalKernelError> {
|
) -> Result<(), MetalKernelError> {
|
||||||
let pipeline = kernels.load_pipeline(device, Source::Unary, kernel_name.0)?;
|
let pipeline = kernels.load_pipeline(device, Source::Unary, kernel_name.0)?;
|
||||||
let encoder = command_buffer.new_compute_command_encoder();
|
let encoder = command_buffer.new_compute_command_encoder();
|
||||||
|
encoder.wait_for_fence(&kernels.fence);
|
||||||
encoder.set_compute_pipeline_state(&pipeline);
|
encoder.set_compute_pipeline_state(&pipeline);
|
||||||
|
|
||||||
set_params!(encoder, (length, input, output));
|
set_params!(encoder, (length, input, output));
|
||||||
|
|
||||||
let (thread_group_count, thread_group_size) = linear_split(&pipeline, length);
|
let (thread_group_count, thread_group_size) = linear_split(&pipeline, length);
|
||||||
|
encoder.use_resource(input, metal::MTLResourceUsage::Read);
|
||||||
|
encoder.use_resource(output, metal::MTLResourceUsage::Write);
|
||||||
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
||||||
|
encoder.update_fence(&kernels.fence);
|
||||||
encoder.end_encoding();
|
encoder.end_encoding();
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -309,6 +362,7 @@ pub fn call_unary_strided(
|
|||||||
|
|
||||||
let num_dims: usize = shape.len();
|
let num_dims: usize = shape.len();
|
||||||
let encoder = command_buffer.new_compute_command_encoder();
|
let encoder = command_buffer.new_compute_command_encoder();
|
||||||
|
encoder.wait_for_fence(&kernels.fence);
|
||||||
encoder.set_compute_pipeline_state(&pipeline);
|
encoder.set_compute_pipeline_state(&pipeline);
|
||||||
|
|
||||||
let length: usize = shape.iter().product();
|
let length: usize = shape.iter().product();
|
||||||
@ -327,7 +381,10 @@ pub fn call_unary_strided(
|
|||||||
let width: usize = shape.iter().product();
|
let width: usize = shape.iter().product();
|
||||||
let (thread_group_count, thread_group_size) = linear_split(&pipeline, width);
|
let (thread_group_count, thread_group_size) = linear_split(&pipeline, width);
|
||||||
|
|
||||||
|
encoder.use_resource(input, metal::MTLResourceUsage::Read);
|
||||||
|
encoder.use_resource(output, metal::MTLResourceUsage::Write);
|
||||||
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
||||||
|
encoder.update_fence(&kernels.fence);
|
||||||
encoder.end_encoding();
|
encoder.end_encoding();
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -346,13 +403,18 @@ pub fn call_binary_contiguous(
|
|||||||
let pipeline = kernels.load_pipeline(device, Source::Binary, kernel_name.0)?;
|
let pipeline = kernels.load_pipeline(device, Source::Binary, kernel_name.0)?;
|
||||||
|
|
||||||
let encoder = command_buffer.new_compute_command_encoder();
|
let encoder = command_buffer.new_compute_command_encoder();
|
||||||
|
encoder.wait_for_fence(&kernels.fence);
|
||||||
encoder.set_compute_pipeline_state(&pipeline);
|
encoder.set_compute_pipeline_state(&pipeline);
|
||||||
|
|
||||||
set_params!(encoder, (length, left, right, output));
|
set_params!(encoder, (length, left, right, output));
|
||||||
|
|
||||||
let (thread_group_count, thread_group_size) = linear_split(&pipeline, length);
|
let (thread_group_count, thread_group_size) = linear_split(&pipeline, length);
|
||||||
|
|
||||||
|
encoder.use_resource(left, metal::MTLResourceUsage::Read);
|
||||||
|
encoder.use_resource(right, metal::MTLResourceUsage::Read);
|
||||||
|
encoder.use_resource(output, metal::MTLResourceUsage::Write);
|
||||||
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
||||||
|
encoder.update_fence(&kernels.fence);
|
||||||
encoder.end_encoding();
|
encoder.end_encoding();
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -377,6 +439,7 @@ pub fn call_binary_strided(
|
|||||||
let num_dims: usize = shape.len();
|
let num_dims: usize = shape.len();
|
||||||
let encoder = command_buffer.new_compute_command_encoder();
|
let encoder = command_buffer.new_compute_command_encoder();
|
||||||
let width: usize = shape.iter().product();
|
let width: usize = shape.iter().product();
|
||||||
|
encoder.wait_for_fence(&kernels.fence);
|
||||||
encoder.set_compute_pipeline_state(&pipeline);
|
encoder.set_compute_pipeline_state(&pipeline);
|
||||||
|
|
||||||
let length: usize = shape.iter().product();
|
let length: usize = shape.iter().product();
|
||||||
@ -397,7 +460,11 @@ pub fn call_binary_strided(
|
|||||||
|
|
||||||
let (thread_group_count, thread_group_size) = linear_split(&pipeline, width);
|
let (thread_group_count, thread_group_size) = linear_split(&pipeline, width);
|
||||||
|
|
||||||
|
encoder.use_resource(left_input, metal::MTLResourceUsage::Read);
|
||||||
|
encoder.use_resource(right_input, metal::MTLResourceUsage::Read);
|
||||||
|
encoder.use_resource(output, metal::MTLResourceUsage::Write);
|
||||||
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
||||||
|
encoder.update_fence(&kernels.fence);
|
||||||
encoder.end_encoding();
|
encoder.end_encoding();
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -416,12 +483,16 @@ pub fn call_cast_contiguous(
|
|||||||
let pipeline = kernels.load_pipeline(device, Source::Cast, kernel_name)?;
|
let pipeline = kernels.load_pipeline(device, Source::Cast, kernel_name)?;
|
||||||
|
|
||||||
let encoder = command_buffer.new_compute_command_encoder();
|
let encoder = command_buffer.new_compute_command_encoder();
|
||||||
|
encoder.wait_for_fence(&kernels.fence);
|
||||||
encoder.set_compute_pipeline_state(&pipeline);
|
encoder.set_compute_pipeline_state(&pipeline);
|
||||||
|
|
||||||
set_params!(encoder, (length, (input, input_offset), output));
|
set_params!(encoder, (length, (input, input_offset), output));
|
||||||
|
|
||||||
let (thread_group_count, thread_group_size) = linear_split(&pipeline, length);
|
let (thread_group_count, thread_group_size) = linear_split(&pipeline, length);
|
||||||
|
encoder.use_resource(input, metal::MTLResourceUsage::Read);
|
||||||
|
encoder.use_resource(output, metal::MTLResourceUsage::Write);
|
||||||
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
||||||
|
encoder.update_fence(&kernels.fence);
|
||||||
encoder.end_encoding();
|
encoder.end_encoding();
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -441,6 +512,7 @@ pub fn call_cast_strided(
|
|||||||
let pipeline = kernels.load_pipeline(device, Source::Cast, kernel_name)?;
|
let pipeline = kernels.load_pipeline(device, Source::Cast, kernel_name)?;
|
||||||
|
|
||||||
let encoder = command_buffer.new_compute_command_encoder();
|
let encoder = command_buffer.new_compute_command_encoder();
|
||||||
|
encoder.wait_for_fence(&kernels.fence);
|
||||||
encoder.set_compute_pipeline_state(&pipeline);
|
encoder.set_compute_pipeline_state(&pipeline);
|
||||||
|
|
||||||
let length: usize = shape.iter().product();
|
let length: usize = shape.iter().product();
|
||||||
@ -459,7 +531,10 @@ pub fn call_cast_strided(
|
|||||||
|
|
||||||
let (thread_group_count, thread_group_size) = linear_split(&pipeline, length);
|
let (thread_group_count, thread_group_size) = linear_split(&pipeline, length);
|
||||||
|
|
||||||
|
encoder.use_resource(input, metal::MTLResourceUsage::Read);
|
||||||
|
encoder.use_resource(output, metal::MTLResourceUsage::Write);
|
||||||
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
||||||
|
encoder.update_fence(&kernels.fence);
|
||||||
encoder.end_encoding();
|
encoder.end_encoding();
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -479,6 +554,7 @@ pub fn call_reduce_contiguous(
|
|||||||
let elements_to_sum = length / out_length;
|
let elements_to_sum = length / out_length;
|
||||||
|
|
||||||
let encoder = command_buffer.new_compute_command_encoder();
|
let encoder = command_buffer.new_compute_command_encoder();
|
||||||
|
encoder.wait_for_fence(&kernels.fence);
|
||||||
encoder.set_compute_pipeline_state(&pipeline);
|
encoder.set_compute_pipeline_state(&pipeline);
|
||||||
|
|
||||||
set_params!(
|
set_params!(
|
||||||
@ -504,7 +580,68 @@ pub fn call_reduce_contiguous(
|
|||||||
depth: 1,
|
depth: 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
encoder.use_resource(input, metal::MTLResourceUsage::Read);
|
||||||
|
encoder.use_resource(output, metal::MTLResourceUsage::Write);
|
||||||
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
||||||
|
encoder.update_fence(&kernels.fence);
|
||||||
|
encoder.end_encoding();
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn call_reduce_strided(
|
||||||
|
device: &Device,
|
||||||
|
command_buffer: &CommandBufferRef,
|
||||||
|
kernels: &Kernels,
|
||||||
|
kernel_name: &'static str,
|
||||||
|
shape: &[usize],
|
||||||
|
strides: &[usize],
|
||||||
|
out_length: usize,
|
||||||
|
input: &Buffer,
|
||||||
|
input_offset: usize,
|
||||||
|
output: &Buffer,
|
||||||
|
) -> Result<(), MetalKernelError> {
|
||||||
|
let length: usize = shape.iter().product();
|
||||||
|
let pipeline = kernels.load_pipeline(device, Source::Reduce, kernel_name)?;
|
||||||
|
let elements_to_sum = length / out_length;
|
||||||
|
|
||||||
|
let encoder = command_buffer.new_compute_command_encoder();
|
||||||
|
encoder.wait_for_fence(&kernels.fence);
|
||||||
|
encoder.set_compute_pipeline_state(&pipeline);
|
||||||
|
|
||||||
|
set_params!(
|
||||||
|
encoder,
|
||||||
|
(
|
||||||
|
shape.len(),
|
||||||
|
shape,
|
||||||
|
strides,
|
||||||
|
elements_to_sum,
|
||||||
|
(input, input_offset),
|
||||||
|
output
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
let thread_group_count = MTLSize {
|
||||||
|
width: out_length as u64,
|
||||||
|
height: 1,
|
||||||
|
depth: 1,
|
||||||
|
};
|
||||||
|
|
||||||
|
let width = std::cmp::min(
|
||||||
|
pipeline.max_total_threads_per_threadgroup(),
|
||||||
|
elements_to_sum as u64,
|
||||||
|
)
|
||||||
|
.next_power_of_two();
|
||||||
|
|
||||||
|
let thread_group_size = MTLSize {
|
||||||
|
width,
|
||||||
|
height: 1,
|
||||||
|
depth: 1,
|
||||||
|
};
|
||||||
|
|
||||||
|
encoder.use_resource(input, metal::MTLResourceUsage::Read);
|
||||||
|
encoder.use_resource(output, metal::MTLResourceUsage::Write);
|
||||||
|
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
||||||
|
encoder.update_fence(&kernels.fence);
|
||||||
encoder.end_encoding();
|
encoder.end_encoding();
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -518,13 +655,18 @@ pub fn call_last_softmax(
|
|||||||
length: usize,
|
length: usize,
|
||||||
elements_to_sum: usize,
|
elements_to_sum: usize,
|
||||||
input: &Buffer,
|
input: &Buffer,
|
||||||
|
input_offset: usize,
|
||||||
output: &Buffer,
|
output: &Buffer,
|
||||||
) -> Result<(), MetalKernelError> {
|
) -> Result<(), MetalKernelError> {
|
||||||
let pipeline = kernels.load_pipeline(device, Source::Reduce, kernel_name)?;
|
let pipeline = kernels.load_pipeline(device, Source::Reduce, kernel_name)?;
|
||||||
let encoder = command_buffer.new_compute_command_encoder();
|
let encoder = command_buffer.new_compute_command_encoder();
|
||||||
|
encoder.wait_for_fence(&kernels.fence);
|
||||||
encoder.set_compute_pipeline_state(&pipeline);
|
encoder.set_compute_pipeline_state(&pipeline);
|
||||||
|
|
||||||
set_params!(encoder, (length, elements_to_sum, input, output));
|
set_params!(
|
||||||
|
encoder,
|
||||||
|
(length, elements_to_sum, (input, input_offset), output)
|
||||||
|
);
|
||||||
|
|
||||||
let out_length = length / elements_to_sum;
|
let out_length = length / elements_to_sum;
|
||||||
|
|
||||||
@ -546,7 +688,10 @@ pub fn call_last_softmax(
|
|||||||
depth: 1,
|
depth: 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
encoder.use_resource(input, metal::MTLResourceUsage::Read);
|
||||||
|
encoder.use_resource(output, metal::MTLResourceUsage::Write);
|
||||||
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
||||||
|
encoder.update_fence(&kernels.fence);
|
||||||
encoder.end_encoding();
|
encoder.end_encoding();
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -566,12 +711,16 @@ pub fn call_affine(
|
|||||||
let pipeline = kernels.load_pipeline(device, Source::Affine, name)?;
|
let pipeline = kernels.load_pipeline(device, Source::Affine, name)?;
|
||||||
|
|
||||||
let encoder = command_buffer.new_compute_command_encoder();
|
let encoder = command_buffer.new_compute_command_encoder();
|
||||||
|
encoder.wait_for_fence(&kernels.fence);
|
||||||
encoder.set_compute_pipeline_state(&pipeline);
|
encoder.set_compute_pipeline_state(&pipeline);
|
||||||
|
|
||||||
set_params!(encoder, (size, mul, add, input, output));
|
set_params!(encoder, (size, mul, add, input, output));
|
||||||
|
|
||||||
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
|
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
|
||||||
|
encoder.use_resource(input, metal::MTLResourceUsage::Read);
|
||||||
|
encoder.use_resource(output, metal::MTLResourceUsage::Write);
|
||||||
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
||||||
|
encoder.update_fence(&kernels.fence);
|
||||||
encoder.end_encoding();
|
encoder.end_encoding();
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -594,6 +743,7 @@ pub fn call_affine_strided(
|
|||||||
let size: usize = shape.iter().product();
|
let size: usize = shape.iter().product();
|
||||||
|
|
||||||
let encoder = command_buffer.new_compute_command_encoder();
|
let encoder = command_buffer.new_compute_command_encoder();
|
||||||
|
encoder.wait_for_fence(&kernels.fence);
|
||||||
encoder.set_compute_pipeline_state(&pipeline);
|
encoder.set_compute_pipeline_state(&pipeline);
|
||||||
|
|
||||||
set_params!(
|
set_params!(
|
||||||
@ -611,7 +761,10 @@ pub fn call_affine_strided(
|
|||||||
);
|
);
|
||||||
|
|
||||||
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
|
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
|
||||||
|
encoder.use_resource(input, metal::MTLResourceUsage::Read);
|
||||||
|
encoder.use_resource(output, metal::MTLResourceUsage::Write);
|
||||||
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
||||||
|
encoder.update_fence(&kernels.fence);
|
||||||
encoder.end_encoding();
|
encoder.end_encoding();
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -630,12 +783,16 @@ pub fn call_powf(
|
|||||||
let pipeline = kernels.load_pipeline(device, Source::Affine, name)?;
|
let pipeline = kernels.load_pipeline(device, Source::Affine, name)?;
|
||||||
|
|
||||||
let encoder = command_buffer.new_compute_command_encoder();
|
let encoder = command_buffer.new_compute_command_encoder();
|
||||||
|
encoder.wait_for_fence(&kernels.fence);
|
||||||
encoder.set_compute_pipeline_state(&pipeline);
|
encoder.set_compute_pipeline_state(&pipeline);
|
||||||
|
|
||||||
set_params!(encoder, (size, mul, input, output));
|
set_params!(encoder, (size, mul, input, output));
|
||||||
|
|
||||||
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
|
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
|
||||||
|
encoder.use_resource(input, metal::MTLResourceUsage::Read);
|
||||||
|
encoder.use_resource(output, metal::MTLResourceUsage::Write);
|
||||||
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
||||||
|
encoder.update_fence(&kernels.fence);
|
||||||
encoder.end_encoding();
|
encoder.end_encoding();
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -657,6 +814,7 @@ pub fn call_powf_strided(
|
|||||||
let size: usize = shape.iter().product();
|
let size: usize = shape.iter().product();
|
||||||
|
|
||||||
let encoder = command_buffer.new_compute_command_encoder();
|
let encoder = command_buffer.new_compute_command_encoder();
|
||||||
|
encoder.wait_for_fence(&kernels.fence);
|
||||||
encoder.set_compute_pipeline_state(&pipeline);
|
encoder.set_compute_pipeline_state(&pipeline);
|
||||||
|
|
||||||
set_params!(
|
set_params!(
|
||||||
@ -673,7 +831,10 @@ pub fn call_powf_strided(
|
|||||||
);
|
);
|
||||||
|
|
||||||
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
|
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
|
||||||
|
encoder.use_resource(input, metal::MTLResourceUsage::Read);
|
||||||
|
encoder.use_resource(output, metal::MTLResourceUsage::Write);
|
||||||
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
||||||
|
encoder.update_fence(&kernels.fence);
|
||||||
encoder.end_encoding();
|
encoder.end_encoding();
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -692,12 +853,16 @@ pub fn call_elu(
|
|||||||
let pipeline = kernels.load_pipeline(device, Source::Affine, name)?;
|
let pipeline = kernels.load_pipeline(device, Source::Affine, name)?;
|
||||||
|
|
||||||
let encoder = command_buffer.new_compute_command_encoder();
|
let encoder = command_buffer.new_compute_command_encoder();
|
||||||
|
encoder.wait_for_fence(&kernels.fence);
|
||||||
encoder.set_compute_pipeline_state(&pipeline);
|
encoder.set_compute_pipeline_state(&pipeline);
|
||||||
|
|
||||||
set_params!(encoder, (size, mul, input, output));
|
set_params!(encoder, (size, mul, input, output));
|
||||||
|
|
||||||
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
|
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
|
||||||
|
encoder.use_resource(input, metal::MTLResourceUsage::Read);
|
||||||
|
encoder.use_resource(output, metal::MTLResourceUsage::Write);
|
||||||
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
||||||
|
encoder.update_fence(&kernels.fence);
|
||||||
encoder.end_encoding();
|
encoder.end_encoding();
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -719,6 +884,7 @@ pub fn call_elu_strided(
|
|||||||
let size: usize = shape.iter().product();
|
let size: usize = shape.iter().product();
|
||||||
|
|
||||||
let encoder = command_buffer.new_compute_command_encoder();
|
let encoder = command_buffer.new_compute_command_encoder();
|
||||||
|
encoder.wait_for_fence(&kernels.fence);
|
||||||
encoder.set_compute_pipeline_state(&pipeline);
|
encoder.set_compute_pipeline_state(&pipeline);
|
||||||
|
|
||||||
set_params!(
|
set_params!(
|
||||||
@ -735,7 +901,10 @@ pub fn call_elu_strided(
|
|||||||
);
|
);
|
||||||
|
|
||||||
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
|
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
|
||||||
|
encoder.use_resource(input, metal::MTLResourceUsage::Read);
|
||||||
|
encoder.use_resource(output, metal::MTLResourceUsage::Write);
|
||||||
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
||||||
|
encoder.update_fence(&kernels.fence);
|
||||||
encoder.end_encoding();
|
encoder.end_encoding();
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -757,6 +926,7 @@ pub fn call_where_cond_strided(
|
|||||||
let pipeline = kernels.load_pipeline(device, Source::Ternary, name)?;
|
let pipeline = kernels.load_pipeline(device, Source::Ternary, name)?;
|
||||||
|
|
||||||
let encoder = command_buffer.new_compute_command_encoder();
|
let encoder = command_buffer.new_compute_command_encoder();
|
||||||
|
encoder.wait_for_fence(&kernels.fence);
|
||||||
encoder.set_compute_pipeline_state(&pipeline);
|
encoder.set_compute_pipeline_state(&pipeline);
|
||||||
|
|
||||||
let size: usize = shape.iter().product();
|
let size: usize = shape.iter().product();
|
||||||
@ -780,7 +950,12 @@ pub fn call_where_cond_strided(
|
|||||||
|
|
||||||
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
|
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
|
||||||
|
|
||||||
|
encoder.use_resource(cond, metal::MTLResourceUsage::Read);
|
||||||
|
encoder.use_resource(left, metal::MTLResourceUsage::Read);
|
||||||
|
encoder.use_resource(right, metal::MTLResourceUsage::Read);
|
||||||
|
encoder.use_resource(output, metal::MTLResourceUsage::Write);
|
||||||
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
||||||
|
encoder.update_fence(&kernels.fence);
|
||||||
encoder.end_encoding();
|
encoder.end_encoding();
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -807,6 +982,7 @@ pub fn call_index_select(
|
|||||||
|
|
||||||
let encoder = command_buffer.new_compute_command_encoder();
|
let encoder = command_buffer.new_compute_command_encoder();
|
||||||
|
|
||||||
|
encoder.wait_for_fence(&kernels.fence);
|
||||||
encoder.set_compute_pipeline_state(&pipeline);
|
encoder.set_compute_pipeline_state(&pipeline);
|
||||||
|
|
||||||
set_params!(
|
set_params!(
|
||||||
@ -825,10 +1001,416 @@ pub fn call_index_select(
|
|||||||
|
|
||||||
let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el);
|
let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el);
|
||||||
|
|
||||||
|
encoder.use_resource(input, metal::MTLResourceUsage::Read);
|
||||||
|
encoder.use_resource(ids, metal::MTLResourceUsage::Read);
|
||||||
|
encoder.use_resource(output, metal::MTLResourceUsage::Write);
|
||||||
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
||||||
|
encoder.update_fence(&kernels.fence);
|
||||||
encoder.end_encoding();
|
encoder.end_encoding();
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
pub fn call_gather(
|
||||||
|
device: &Device,
|
||||||
|
command_buffer: &CommandBufferRef,
|
||||||
|
kernels: &Kernels,
|
||||||
|
name: &'static str,
|
||||||
|
shape: &[usize],
|
||||||
|
ids_size: usize,
|
||||||
|
dim: usize,
|
||||||
|
input: &Buffer,
|
||||||
|
input_offset: usize,
|
||||||
|
ids: &Buffer,
|
||||||
|
ids_offset: usize,
|
||||||
|
output: &Buffer,
|
||||||
|
) -> Result<(), MetalKernelError> {
|
||||||
|
let left_size: usize = shape[..dim].iter().product();
|
||||||
|
let right_size: usize = shape[dim + 1..].iter().product();
|
||||||
|
let src_dim_size = shape[dim];
|
||||||
|
let dst_el = ids_size * left_size * right_size;
|
||||||
|
|
||||||
|
let pipeline = kernels.load_pipeline(device, Source::Indexing, name)?;
|
||||||
|
|
||||||
|
let encoder = command_buffer.new_compute_command_encoder();
|
||||||
|
|
||||||
|
encoder.wait_for_fence(&kernels.fence);
|
||||||
|
encoder.set_compute_pipeline_state(&pipeline);
|
||||||
|
|
||||||
|
set_params!(
|
||||||
|
encoder,
|
||||||
|
(
|
||||||
|
dst_el,
|
||||||
|
left_size,
|
||||||
|
src_dim_size,
|
||||||
|
right_size,
|
||||||
|
ids_size,
|
||||||
|
(input, input_offset),
|
||||||
|
(ids, ids_offset),
|
||||||
|
output
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el);
|
||||||
|
|
||||||
|
encoder.use_resource(input, metal::MTLResourceUsage::Read);
|
||||||
|
encoder.use_resource(ids, metal::MTLResourceUsage::Read);
|
||||||
|
encoder.use_resource(output, metal::MTLResourceUsage::Write);
|
||||||
|
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
||||||
|
encoder.update_fence(&kernels.fence);
|
||||||
|
encoder.end_encoding();
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn call_scatter_add(
|
||||||
|
device: &Device,
|
||||||
|
command_buffer: &CommandBufferRef,
|
||||||
|
kernels: &Kernels,
|
||||||
|
name: &'static str,
|
||||||
|
src_shape: &[usize],
|
||||||
|
dst_shape: &[usize],
|
||||||
|
dim: usize,
|
||||||
|
input: &Buffer,
|
||||||
|
input_offset: usize,
|
||||||
|
ids: &Buffer,
|
||||||
|
ids_offset: usize,
|
||||||
|
output: &Buffer,
|
||||||
|
) -> Result<(), MetalKernelError> {
|
||||||
|
let left_size: usize = src_shape[..dim].iter().product();
|
||||||
|
let right_size: usize = src_shape[dim + 1..].iter().product();
|
||||||
|
let src_dim_size = src_shape[dim];
|
||||||
|
let dst_el = left_size * right_size;
|
||||||
|
let dst_dim_size = dst_shape[dim];
|
||||||
|
|
||||||
|
let pipeline = kernels.load_pipeline(device, Source::Indexing, name)?;
|
||||||
|
|
||||||
|
let encoder = command_buffer.new_compute_command_encoder();
|
||||||
|
|
||||||
|
encoder.wait_for_fence(&kernels.fence);
|
||||||
|
encoder.set_compute_pipeline_state(&pipeline);
|
||||||
|
|
||||||
|
set_params!(
|
||||||
|
encoder,
|
||||||
|
(
|
||||||
|
dst_el,
|
||||||
|
left_size,
|
||||||
|
src_dim_size,
|
||||||
|
right_size,
|
||||||
|
dst_dim_size,
|
||||||
|
(input, input_offset),
|
||||||
|
(ids, ids_offset),
|
||||||
|
output
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el);
|
||||||
|
|
||||||
|
encoder.use_resource(input, metal::MTLResourceUsage::Read);
|
||||||
|
encoder.use_resource(ids, metal::MTLResourceUsage::Read);
|
||||||
|
encoder.use_resource(output, metal::MTLResourceUsage::Write);
|
||||||
|
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
||||||
|
encoder.update_fence(&kernels.fence);
|
||||||
|
encoder.end_encoding();
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn call_index_add(
|
||||||
|
device: &Device,
|
||||||
|
command_buffer: &CommandBufferRef,
|
||||||
|
kernels: &Kernels,
|
||||||
|
name: &'static str,
|
||||||
|
src_shape: &[usize],
|
||||||
|
dst_shape: &[usize],
|
||||||
|
ids_shape: &[usize],
|
||||||
|
dim: usize,
|
||||||
|
input: &Buffer,
|
||||||
|
input_offset: usize,
|
||||||
|
ids: &Buffer,
|
||||||
|
ids_offset: usize,
|
||||||
|
output: &Buffer,
|
||||||
|
) -> Result<(), MetalKernelError> {
|
||||||
|
let left_size: usize = src_shape[..dim].iter().product();
|
||||||
|
let right_size: usize = src_shape[dim + 1..].iter().product();
|
||||||
|
let src_dim_size = src_shape[dim];
|
||||||
|
let dst_el = left_size * right_size;
|
||||||
|
let dst_dim_size = dst_shape[dim];
|
||||||
|
let ids_dim_size = ids_shape[0];
|
||||||
|
|
||||||
|
let pipeline = kernels.load_pipeline(device, Source::Indexing, name)?;
|
||||||
|
let encoder = command_buffer.new_compute_command_encoder();
|
||||||
|
|
||||||
|
encoder.wait_for_fence(&kernels.fence);
|
||||||
|
encoder.set_compute_pipeline_state(&pipeline);
|
||||||
|
|
||||||
|
set_params!(
|
||||||
|
encoder,
|
||||||
|
(
|
||||||
|
dst_el,
|
||||||
|
left_size,
|
||||||
|
src_dim_size,
|
||||||
|
right_size,
|
||||||
|
dst_dim_size,
|
||||||
|
ids_dim_size,
|
||||||
|
(input, input_offset),
|
||||||
|
(ids, ids_offset),
|
||||||
|
output
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el);
|
||||||
|
|
||||||
|
encoder.use_resource(input, metal::MTLResourceUsage::Read);
|
||||||
|
encoder.use_resource(ids, metal::MTLResourceUsage::Read);
|
||||||
|
encoder.use_resource(output, metal::MTLResourceUsage::Write);
|
||||||
|
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
||||||
|
encoder.update_fence(&kernels.fence);
|
||||||
|
encoder.end_encoding();
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub enum Value {
|
||||||
|
USize(usize),
|
||||||
|
Bool(bool),
|
||||||
|
F32(f32),
|
||||||
|
U16(u16),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::hash::Hash for Value {
|
||||||
|
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
|
||||||
|
match self {
|
||||||
|
Value::F32(v) => v.to_bits().hash(state),
|
||||||
|
Value::USize(v) => v.hash(state),
|
||||||
|
Value::U16(v) => v.hash(state),
|
||||||
|
Value::Bool(v) => v.hash(state),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Value {
|
||||||
|
fn data_type(&self) -> MTLDataType {
|
||||||
|
match self {
|
||||||
|
Value::USize(_) => MTLDataType::UInt,
|
||||||
|
Value::F32(_) => MTLDataType::Float,
|
||||||
|
Value::U16(_) => MTLDataType::UShort,
|
||||||
|
Value::Bool(_) => MTLDataType::Bool,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Not true, good enough for our purposes.
|
||||||
|
impl Eq for Value {}
|
||||||
|
|
||||||
|
#[derive(Debug, Eq, PartialEq, Hash)]
|
||||||
|
struct ConstantValues(Vec<(usize, Value)>);
|
||||||
|
|
||||||
|
impl ConstantValues {
|
||||||
|
pub fn new(values: Vec<(usize, Value)>) -> Self {
|
||||||
|
Self(values)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn function_constant_values(&self) -> FunctionConstantValues {
|
||||||
|
let f = FunctionConstantValues::new();
|
||||||
|
for (index, value) in &self.0 {
|
||||||
|
let ty = value.data_type();
|
||||||
|
match value {
|
||||||
|
Value::USize(v) => {
|
||||||
|
f.set_constant_value_at_index(
|
||||||
|
v as *const usize as *const c_void,
|
||||||
|
ty,
|
||||||
|
*index as u64,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Value::F32(v) => {
|
||||||
|
f.set_constant_value_at_index(
|
||||||
|
v as *const f32 as *const c_void,
|
||||||
|
ty,
|
||||||
|
*index as u64,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Value::U16(v) => {
|
||||||
|
f.set_constant_value_at_index(
|
||||||
|
v as *const u16 as *const c_void,
|
||||||
|
ty,
|
||||||
|
*index as u64,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Value::Bool(v) => {
|
||||||
|
f.set_constant_value_at_index(
|
||||||
|
v as *const bool as *const c_void,
|
||||||
|
ty,
|
||||||
|
*index as u64,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
pub fn call_gemm(
|
||||||
|
device: &Device,
|
||||||
|
command_buffer: &CommandBufferRef,
|
||||||
|
kernels: &Kernels,
|
||||||
|
name: &'static str,
|
||||||
|
(b, m, n, k): (usize, usize, usize, usize),
|
||||||
|
lhs_stride: &[usize],
|
||||||
|
lhs_offset: usize,
|
||||||
|
lhs_buffer: &Buffer,
|
||||||
|
rhs_stride: &[usize],
|
||||||
|
rhs_offset: usize,
|
||||||
|
rhs_buffer: &Buffer,
|
||||||
|
output: &Buffer,
|
||||||
|
) -> Result<(), MetalKernelError> {
|
||||||
|
assert!(rhs_stride.len() >= 2);
|
||||||
|
assert!(lhs_stride.len() >= 2);
|
||||||
|
let rhs_m1 = rhs_stride[rhs_stride.len() - 1];
|
||||||
|
let rhs_m2 = rhs_stride[rhs_stride.len() - 2];
|
||||||
|
let lhs_m1 = lhs_stride[lhs_stride.len() - 1];
|
||||||
|
let lhs_m2 = lhs_stride[lhs_stride.len() - 2];
|
||||||
|
let a_trans = if lhs_m1 == 1 && lhs_m2 == k {
|
||||||
|
false
|
||||||
|
} else if lhs_m1 == m && lhs_m2 == 1 {
|
||||||
|
true
|
||||||
|
} else {
|
||||||
|
return Err(MetalKernelError::MatMulNonContiguous {
|
||||||
|
lhs_stride: lhs_stride.to_vec(),
|
||||||
|
rhs_stride: rhs_stride.to_vec(),
|
||||||
|
mnk: (m, n, k),
|
||||||
|
})?;
|
||||||
|
};
|
||||||
|
let b_trans = if rhs_m1 == 1 && rhs_m2 == n {
|
||||||
|
false
|
||||||
|
} else if rhs_m1 == k && rhs_m2 == 1 {
|
||||||
|
true
|
||||||
|
} else {
|
||||||
|
return Err(MetalKernelError::MatMulNonContiguous {
|
||||||
|
lhs_stride: lhs_stride.to_vec(),
|
||||||
|
rhs_stride: rhs_stride.to_vec(),
|
||||||
|
mnk: (m, n, k),
|
||||||
|
})?;
|
||||||
|
};
|
||||||
|
let d_trans = false;
|
||||||
|
let alpha = 1.0f32;
|
||||||
|
let beta = 0.0f32;
|
||||||
|
let batched = b > 1;
|
||||||
|
let fused_activation = false;
|
||||||
|
let fused_bias = false;
|
||||||
|
let m_simd = 16;
|
||||||
|
let n_simd = 16;
|
||||||
|
let k_simd = 16;
|
||||||
|
let m_splits = 2;
|
||||||
|
let n_splits = 2;
|
||||||
|
let constants = Some(ConstantValues::new(vec![
|
||||||
|
(0, Value::USize(m)),
|
||||||
|
(1, Value::USize(n)),
|
||||||
|
(2, Value::USize(k)),
|
||||||
|
(10, Value::Bool(a_trans)),
|
||||||
|
(11, Value::Bool(b_trans)),
|
||||||
|
(13, Value::Bool(d_trans)),
|
||||||
|
(20, Value::F32(alpha)),
|
||||||
|
(21, Value::F32(beta)),
|
||||||
|
(100, Value::Bool(batched)),
|
||||||
|
(101, Value::Bool(fused_activation)),
|
||||||
|
// Garbage
|
||||||
|
(102, Value::Bool(false)),
|
||||||
|
(103, Value::Bool(false)),
|
||||||
|
(113, Value::Bool(false)),
|
||||||
|
(50_000, Value::Bool(false)),
|
||||||
|
// End garbage
|
||||||
|
(200, Value::U16(m_simd)),
|
||||||
|
(201, Value::U16(n_simd)),
|
||||||
|
(202, Value::U16(k_simd)),
|
||||||
|
(210, Value::U16(m_splits)),
|
||||||
|
(211, Value::U16(n_splits)),
|
||||||
|
(50_001, Value::Bool(fused_bias)),
|
||||||
|
]));
|
||||||
|
let pipeline = kernels.load_pipeline_with_constants(device, Source::Mfa, name, constants)?;
|
||||||
|
let m_group = m_simd * m_splits;
|
||||||
|
let n_group = n_simd * n_splits;
|
||||||
|
|
||||||
|
let a_block_length = m_group * k_simd;
|
||||||
|
let b_block_length = k_simd * n_group;
|
||||||
|
|
||||||
|
let mut block_elements = a_block_length + b_block_length;
|
||||||
|
if (m % 8 != 0) && (n % 8 != 0) {
|
||||||
|
let c_block_length = m_group * n_group;
|
||||||
|
block_elements = std::cmp::max(c_block_length, block_elements)
|
||||||
|
}
|
||||||
|
if fused_bias {
|
||||||
|
if d_trans {
|
||||||
|
block_elements = std::cmp::max(block_elements, m_group);
|
||||||
|
} else {
|
||||||
|
block_elements = std::cmp::max(block_elements, n_group);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let bytes = match name {
|
||||||
|
"sgemm" => 4,
|
||||||
|
"hgemm" => 2,
|
||||||
|
other => {
|
||||||
|
return Err(MetalKernelError::LoadLibraryError(format!(
|
||||||
|
"{other} is not a valid kernel for gemm"
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let block_bytes = block_elements * bytes;
|
||||||
|
|
||||||
|
let encoder = command_buffer.new_compute_command_encoder();
|
||||||
|
encoder.wait_for_fence(&kernels.fence);
|
||||||
|
encoder.set_compute_pipeline_state(&pipeline);
|
||||||
|
encoder.set_threadgroup_memory_length(0, block_bytes.into());
|
||||||
|
encoder.set_buffer(0, Some(lhs_buffer), lhs_offset as NSUInteger);
|
||||||
|
encoder.set_buffer(1, Some(rhs_buffer), rhs_offset as NSUInteger);
|
||||||
|
encoder.set_buffer(2, Some(output), 0);
|
||||||
|
// TODO Tensor D
|
||||||
|
|
||||||
|
let grid_z = b;
|
||||||
|
if batched {
|
||||||
|
let byte_stride_a: usize = lhs_stride[lhs_stride.len() - 3] * bytes as usize;
|
||||||
|
let byte_stride_b: usize = rhs_stride[rhs_stride.len() - 3] * bytes as usize;
|
||||||
|
let byte_stride_c = m * n * bytes as usize;
|
||||||
|
// TODO byte_stride_d
|
||||||
|
let byte_stride_d = 0;
|
||||||
|
|
||||||
|
let mut buffer: Vec<u64> = Vec::with_capacity(b * 4);
|
||||||
|
for i in 0..b {
|
||||||
|
buffer.push((i * byte_stride_a) as u64);
|
||||||
|
buffer.push((i * byte_stride_b) as u64);
|
||||||
|
buffer.push((i * byte_stride_c) as u64);
|
||||||
|
buffer.push((i * byte_stride_d) as u64);
|
||||||
|
}
|
||||||
|
encoder.set_bytes(
|
||||||
|
10,
|
||||||
|
(buffer.len() * core::mem::size_of::<u64>()) as NSUInteger,
|
||||||
|
buffer.as_ptr() as *const NSUInteger as *const c_void,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let grid_size = MTLSize {
|
||||||
|
width: divide(n, n_group.into()),
|
||||||
|
height: divide(m, m_group.into()),
|
||||||
|
depth: grid_z as NSUInteger,
|
||||||
|
};
|
||||||
|
let group_size = MTLSize {
|
||||||
|
width: 32 * (m_splits as u64) * (n_splits as u64),
|
||||||
|
height: 1,
|
||||||
|
depth: 1,
|
||||||
|
};
|
||||||
|
// println!("grid size {grid_size:?} group size {group_size:?}");
|
||||||
|
encoder.use_resource(lhs_buffer, metal::MTLResourceUsage::Read);
|
||||||
|
encoder.use_resource(rhs_buffer, metal::MTLResourceUsage::Read);
|
||||||
|
encoder.use_resource(output, metal::MTLResourceUsage::Write);
|
||||||
|
encoder.dispatch_thread_groups(grid_size, group_size);
|
||||||
|
encoder.update_fence(&kernels.fence);
|
||||||
|
encoder.end_encoding();
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn divide(m: usize, b: usize) -> NSUInteger {
|
||||||
|
((m + b - 1) / b) as NSUInteger
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests;
|
mod tests;
|
||||||
|
BIN
candle-metal-kernels/src/libMetalFlashAttention.metallib
Normal file
BIN
candle-metal-kernels/src/libMetalFlashAttention.metallib
Normal file
Binary file not shown.
@ -2,6 +2,7 @@
|
|||||||
using namespace metal;
|
using namespace metal;
|
||||||
|
|
||||||
#define MAX(x, y) ((x) > (y) ? (x) : (y))
|
#define MAX(x, y) ((x) > (y) ? (x) : (y))
|
||||||
|
#define MIN(x, y) ((x) < (y) ? (x) : (y))
|
||||||
|
|
||||||
METAL_FUNC uint get_strided_index(
|
METAL_FUNC uint get_strided_index(
|
||||||
uint idx,
|
uint idx,
|
||||||
@ -20,9 +21,130 @@ METAL_FUNC uint get_strided_index(
|
|||||||
|
|
||||||
constant int THREADGROUP_SIZE = 2048;
|
constant int THREADGROUP_SIZE = 2048;
|
||||||
|
|
||||||
# define REDUCE(FN, NAME, T) \
|
|
||||||
|
#define ARGMIN(NAME, T, MAXVALUE) \
|
||||||
kernel void NAME( \
|
kernel void NAME( \
|
||||||
constant size_t &src_numel, \
|
constant size_t &num_dims, \
|
||||||
|
constant size_t *dims, \
|
||||||
|
constant size_t *strides, \
|
||||||
|
constant size_t &el_to_sum_per_block, \
|
||||||
|
device const T *src, \
|
||||||
|
device uint *dst, \
|
||||||
|
uint id [[ thread_position_in_grid ]], \
|
||||||
|
uint tid [[ thread_index_in_threadgroup ]], \
|
||||||
|
uint dst_id [[ threadgroup_position_in_grid ]], \
|
||||||
|
uint block_dim [[ threads_per_threadgroup ]] \
|
||||||
|
) { \
|
||||||
|
\
|
||||||
|
threadgroup T shared_memory[THREADGROUP_SIZE]; \
|
||||||
|
threadgroup uint shared_indices[THREADGROUP_SIZE]; \
|
||||||
|
\
|
||||||
|
shared_memory[tid] = MAXVALUE; \
|
||||||
|
shared_indices[tid] = 0xFFFFFFFF; \
|
||||||
|
bool notset = true; \
|
||||||
|
/* \
|
||||||
|
// Elements summed in this block range from dst_id * el_to_sum_per_block \
|
||||||
|
// to (dst_id + 1) * el_to_sum_per_block. \
|
||||||
|
*/ \
|
||||||
|
size_t start_idx = dst_id * el_to_sum_per_block; \
|
||||||
|
size_t stop_idx = start_idx + el_to_sum_per_block; \
|
||||||
|
size_t idx = start_idx + tid; \
|
||||||
|
while (idx < stop_idx) { \
|
||||||
|
/* \
|
||||||
|
// TODO: Fast version for the contiguous case. \
|
||||||
|
*/ \
|
||||||
|
size_t strided_i = get_strided_index(idx, num_dims, dims, strides); \
|
||||||
|
if (notset || src[strided_i] < shared_memory[tid]) { \
|
||||||
|
shared_memory[tid] = src[strided_i]; \
|
||||||
|
/* Assume that the reduction takes place over the last dimension which is contiguous. */ \
|
||||||
|
shared_indices[tid] = idx % dims[num_dims - 1]; \
|
||||||
|
notset = false; \
|
||||||
|
} \
|
||||||
|
idx += block_dim; \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
threadgroup_barrier(mem_flags::mem_none); \
|
||||||
|
\
|
||||||
|
/* \
|
||||||
|
// reduction in shared memory \
|
||||||
|
*/ \
|
||||||
|
for (uint s = block_dim / 2; s > 0; s >>= 1) { \
|
||||||
|
if (tid < s && shared_memory[tid + s] < shared_memory[tid]) { \
|
||||||
|
shared_indices[tid] = shared_indices[tid + s]; \
|
||||||
|
shared_memory[tid] = shared_memory[tid + s]; \
|
||||||
|
} \
|
||||||
|
threadgroup_barrier(mem_flags::mem_none); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
if (tid == 0){ \
|
||||||
|
dst[dst_id] = shared_indices[0]; \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
|
||||||
|
|
||||||
|
#define ARGMAX(NAME, T, MINVALUE) \
|
||||||
|
kernel void NAME( \
|
||||||
|
constant size_t &num_dims, \
|
||||||
|
constant size_t *dims, \
|
||||||
|
constant size_t *strides, \
|
||||||
|
constant size_t &el_to_sum_per_block, \
|
||||||
|
device const T *src, \
|
||||||
|
device uint *dst, \
|
||||||
|
uint id [[ thread_position_in_grid ]], \
|
||||||
|
uint tid [[ thread_index_in_threadgroup ]], \
|
||||||
|
uint dst_id [[ threadgroup_position_in_grid ]], \
|
||||||
|
uint block_dim [[ threads_per_threadgroup ]] \
|
||||||
|
) { \
|
||||||
|
\
|
||||||
|
threadgroup T shared_memory[THREADGROUP_SIZE]; \
|
||||||
|
threadgroup uint shared_indices[THREADGROUP_SIZE]; \
|
||||||
|
\
|
||||||
|
shared_memory[tid] = MINVALUE; \
|
||||||
|
shared_indices[tid] = 0xFFFFFFFF; \
|
||||||
|
/* \
|
||||||
|
// Elements summed in this block range from dst_id * el_to_sum_per_block \
|
||||||
|
// to (dst_id + 1) * el_to_sum_per_block. \
|
||||||
|
*/ \
|
||||||
|
size_t start_idx = dst_id * el_to_sum_per_block; \
|
||||||
|
size_t stop_idx = start_idx + el_to_sum_per_block; \
|
||||||
|
size_t idx = start_idx + tid; \
|
||||||
|
bool notset = true; \
|
||||||
|
while (idx < stop_idx) { \
|
||||||
|
/* \
|
||||||
|
// TODO: Fast version for the contiguous case. \
|
||||||
|
*/ \
|
||||||
|
size_t strided_i = get_strided_index(idx, num_dims, dims, strides); \
|
||||||
|
if (notset || shared_memory[tid] < src[strided_i]) { \
|
||||||
|
shared_memory[tid] = src[strided_i]; \
|
||||||
|
shared_indices[tid] = idx % dims[num_dims - 1]; \
|
||||||
|
notset = false; \
|
||||||
|
} \
|
||||||
|
idx += block_dim; \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
threadgroup_barrier(mem_flags::mem_none); \
|
||||||
|
\
|
||||||
|
/* \
|
||||||
|
// reduction in shared memory \
|
||||||
|
*/ \
|
||||||
|
for (uint s = block_dim / 2; s > 0; s >>= 1) { \
|
||||||
|
if (tid < s && shared_memory[tid + s] > shared_memory[tid]) { \
|
||||||
|
shared_indices[tid] = shared_indices[tid + s]; \
|
||||||
|
shared_memory[tid] = shared_memory[tid + s]; \
|
||||||
|
} \
|
||||||
|
threadgroup_barrier(mem_flags::mem_none); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
if (tid == 0){ \
|
||||||
|
dst[dst_id] = shared_indices[0]; \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
|
||||||
|
#define REDUCE(FN, NAME, T, START) \
|
||||||
|
kernel void NAME( \
|
||||||
|
constant size_t &num_dims, \
|
||||||
|
constant size_t *dims, \
|
||||||
|
constant size_t *strides, \
|
||||||
constant size_t &el_to_sum_per_block, \
|
constant size_t &el_to_sum_per_block, \
|
||||||
device const T *src, \
|
device const T *src, \
|
||||||
device T *dst, \
|
device T *dst, \
|
||||||
@ -32,23 +154,23 @@ kernel void NAME( \
|
|||||||
uint block_dim [[ threads_per_threadgroup ]] \
|
uint block_dim [[ threads_per_threadgroup ]] \
|
||||||
) { \
|
) { \
|
||||||
\
|
\
|
||||||
threadgroup float shared_memory[THREADGROUP_SIZE]; \
|
threadgroup T shared_memory[THREADGROUP_SIZE]; \
|
||||||
\
|
\
|
||||||
shared_memory[tid] = 0; \
|
shared_memory[tid] = START; \
|
||||||
/* \
|
/* \
|
||||||
// Elements summed in this block range from dst_id * el_to_sum_per_block \
|
// Elements summed in this block range from dst_id * el_to_sum_per_block \
|
||||||
// to (dst_id + 1) * el_to_sum_per_block. \
|
// to (dst_id + 1) * el_to_sum_per_block. \
|
||||||
*/ \
|
*/ \
|
||||||
size_t start_idx = dst_id * el_to_sum_per_block; \
|
size_t start_idx = dst_id * el_to_sum_per_block; \
|
||||||
size_t stop_idx = min(start_idx + el_to_sum_per_block, src_numel); \
|
size_t stop_idx = start_idx + el_to_sum_per_block; \
|
||||||
size_t idx = start_idx + tid; \
|
size_t idx = start_idx + tid; \
|
||||||
while (idx < stop_idx) { \
|
while (idx < stop_idx) { \
|
||||||
/* \
|
/* \
|
||||||
// TODO: Fast version for the contiguous case. \
|
// TODO: Fast version for the contiguous case. \
|
||||||
// size_t strided_i = get_strided_index(idx, num_dims, dims, strides); \
|
|
||||||
*/ \
|
*/ \
|
||||||
|
size_t strided_i = get_strided_index(idx, num_dims, dims, strides); \
|
||||||
T x = shared_memory[tid]; \
|
T x = shared_memory[tid]; \
|
||||||
T y = src[idx]; \
|
T y = src[strided_i]; \
|
||||||
shared_memory[tid] = FN; \
|
shared_memory[tid] = FN; \
|
||||||
idx += block_dim; \
|
idx += block_dim; \
|
||||||
} \
|
} \
|
||||||
@ -71,10 +193,6 @@ kernel void NAME( \
|
|||||||
} \
|
} \
|
||||||
|
|
||||||
|
|
||||||
REDUCE(x + y, fast_sum_float, float)
|
|
||||||
REDUCE(x * y, fast_mul_float, float)
|
|
||||||
REDUCE(max(x, y), fast_max_float, float)
|
|
||||||
|
|
||||||
#define SOFTMAX(NAME, T) \
|
#define SOFTMAX(NAME, T) \
|
||||||
kernel void NAME( \
|
kernel void NAME( \
|
||||||
constant size_t &src_numel, \
|
constant size_t &src_numel, \
|
||||||
@ -93,12 +211,13 @@ kernel void NAME(
|
|||||||
size_t stop_idx = min(start_idx + el_to_sum_per_block, src_numel); \
|
size_t stop_idx = min(start_idx + el_to_sum_per_block, src_numel); \
|
||||||
size_t idx = start_idx + tid; \
|
size_t idx = start_idx + tid; \
|
||||||
\
|
\
|
||||||
threadgroup_barrier(mem_flags::mem_threadgroup); \
|
|
||||||
\
|
\
|
||||||
|
float tmp = -INFINITY; \
|
||||||
while (idx < stop_idx) { \
|
while (idx < stop_idx) { \
|
||||||
shared_memory[tid] = MAX(shared_memory[tid], src[idx]); \
|
tmp = MAX(tmp, float(src[idx])); \
|
||||||
idx += block_dim; \
|
idx += block_dim; \
|
||||||
} \
|
} \
|
||||||
|
shared_memory[tid] = tmp; \
|
||||||
\
|
\
|
||||||
threadgroup_barrier(mem_flags::mem_threadgroup); \
|
threadgroup_barrier(mem_flags::mem_threadgroup); \
|
||||||
\
|
\
|
||||||
@ -106,21 +225,26 @@ kernel void NAME(
|
|||||||
if (tid < s) { \
|
if (tid < s) { \
|
||||||
shared_memory[tid] = MAX(shared_memory[tid], shared_memory[tid + s]); \
|
shared_memory[tid] = MAX(shared_memory[tid], shared_memory[tid + s]); \
|
||||||
} \
|
} \
|
||||||
|
threadgroup_barrier(mem_flags::mem_threadgroup); \
|
||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
|
/* wait for shared_memory[0] to be filled */ \
|
||||||
threadgroup_barrier(mem_flags::mem_threadgroup); \
|
threadgroup_barrier(mem_flags::mem_threadgroup); \
|
||||||
\
|
\
|
||||||
float _max = shared_memory[0]; \
|
float _max = shared_memory[0]; \
|
||||||
\
|
\
|
||||||
|
/* prevent tid=0 from overwriting _max before other threads have written it */ \
|
||||||
|
threadgroup_barrier(mem_flags::mem_threadgroup); \
|
||||||
shared_memory[tid] = 0; \
|
shared_memory[tid] = 0; \
|
||||||
\
|
\
|
||||||
idx = start_idx + tid; \
|
idx = start_idx + tid; \
|
||||||
while (idx < stop_idx) { \
|
while (idx < stop_idx) { \
|
||||||
const T val = T(exp(src[idx] - _max)); \
|
const float val = exp(float(src[idx]) - _max); \
|
||||||
dst[idx] = val; \
|
dst[idx] = T(val); \
|
||||||
shared_memory[tid] += val; \
|
shared_memory[tid] += val; \
|
||||||
idx += block_dim; \
|
idx += block_dim; \
|
||||||
} \
|
} \
|
||||||
|
threadgroup_barrier(mem_flags::mem_threadgroup); \
|
||||||
for (uint s = block_dim / 2; s > 0; s >>= 1) { \
|
for (uint s = block_dim / 2; s > 0; s >>= 1) { \
|
||||||
if (tid < s) { \
|
if (tid < s) { \
|
||||||
shared_memory[tid] += shared_memory[tid + s]; \
|
shared_memory[tid] += shared_memory[tid + s]; \
|
||||||
@ -128,7 +252,7 @@ kernel void NAME(
|
|||||||
threadgroup_barrier(mem_flags::mem_threadgroup); \
|
threadgroup_barrier(mem_flags::mem_threadgroup); \
|
||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
const T inv_acc = T(1/shared_memory[0]); \
|
const T inv_acc = T(1.0/shared_memory[0]); \
|
||||||
idx = start_idx + tid; \
|
idx = start_idx + tid; \
|
||||||
while (idx < stop_idx) { \
|
while (idx < stop_idx) { \
|
||||||
dst[idx] *= inv_acc; \
|
dst[idx] *= inv_acc; \
|
||||||
@ -136,8 +260,33 @@ kernel void NAME(
|
|||||||
} \
|
} \
|
||||||
} \
|
} \
|
||||||
|
|
||||||
SOFTMAX(softmax_float, float)
|
REDUCE(x + y, fast_sum_f32_strided, float, 0)
|
||||||
SOFTMAX(softmax_half, half)
|
REDUCE(x + y, fast_sum_u32_strided, uint, 0)
|
||||||
|
REDUCE(x + y, fast_sum_f16_strided, half, 0)
|
||||||
|
REDUCE(x * y, fast_mul_f32_strided, float, 1)
|
||||||
|
REDUCE(x * y, fast_mul_u32_strided, uint, 1)
|
||||||
|
REDUCE(x * y, fast_mul_f16_strided, half, 1)
|
||||||
|
REDUCE(MAX(x, y), fast_max_f32_strided, float, -HUGE_VALF)
|
||||||
|
REDUCE(MAX(x, y), fast_max_u32_strided, uint, 0)
|
||||||
|
REDUCE(MAX(x, y), fast_max_f16_strided, half, -HUGE_VALH)
|
||||||
|
REDUCE(MIN(x, y), fast_min_f32_strided, float, HUGE_VALF)
|
||||||
|
REDUCE(MIN(x, y), fast_min_u32_strided, uint, 0xFFFFFFFF)
|
||||||
|
REDUCE(MIN(x, y), fast_min_f16_strided, half, HUGE_VALH)
|
||||||
|
ARGMIN(fast_argmin_f32_strided, float, HUGE_VALF)
|
||||||
|
ARGMIN(fast_argmin_f16_strided, half, HUGE_VALH)
|
||||||
|
ARGMIN(fast_argmin_u32_strided, uint, 0xFFFFFFFF)
|
||||||
|
ARGMAX(fast_argmax_f32_strided, float, -HUGE_VALF)
|
||||||
|
ARGMAX(fast_argmax_f16_strided, half, -HUGE_VALH)
|
||||||
|
ARGMAX(fast_argmax_u32_strided, uint, 0)
|
||||||
|
|
||||||
|
SOFTMAX(softmax_f32, float)
|
||||||
|
SOFTMAX(softmax_f16, half)
|
||||||
#if __METAL_VERSION__ >= 310
|
#if __METAL_VERSION__ >= 310
|
||||||
SOFTMAX(softmax_bfloat, bfloat)
|
REDUCE(x + y, fast_sum_bf16, bfloat, 0)
|
||||||
|
REDUCE(x * y, fast_mul_bf16, bfloat, 1)
|
||||||
|
REDUCE(MAX(x, y), fast_max_bf16, bfloat, -HUGE_VALBF)
|
||||||
|
REDUCE(MIN(x, y), fast_min_bf16, bfloat, HUGE_VALBF)
|
||||||
|
ARGMIN(fast_argmin_bf16, bfloat, HUGE_VALBF)
|
||||||
|
ARGMAX(fast_argmax_bf16, bfloat, -HUGE_VALBF)
|
||||||
|
SOFTMAX(softmax_bf16, bfloat)
|
||||||
#endif
|
#endif
|
||||||
|
@ -2,6 +2,13 @@ use super::*;
|
|||||||
use half::{bf16, f16};
|
use half::{bf16, f16};
|
||||||
use metal::{CompileOptions, Device, MTLResourceOptions, MTLSize, NSUInteger};
|
use metal::{CompileOptions, Device, MTLResourceOptions, MTLSize, NSUInteger};
|
||||||
|
|
||||||
|
fn read_to_vec<T: Clone>(buffer: &Buffer, n: usize) -> Vec<T> {
|
||||||
|
let ptr = buffer.contents() as *const T;
|
||||||
|
assert!(!ptr.is_null());
|
||||||
|
let slice = unsafe { std::slice::from_raw_parts(ptr, n) };
|
||||||
|
slice.to_vec()
|
||||||
|
}
|
||||||
|
|
||||||
fn new_buffer<T>(device: &Device, data: &[T]) -> Buffer {
|
fn new_buffer<T>(device: &Device, data: &[T]) -> Buffer {
|
||||||
let options = MTLResourceOptions::StorageModeManaged;
|
let options = MTLResourceOptions::StorageModeManaged;
|
||||||
let ptr = data.as_ptr() as *const core::ffi::c_void;
|
let ptr = data.as_ptr() as *const core::ffi::c_void;
|
||||||
@ -30,7 +37,8 @@ fn approx_bf16(v: Vec<bf16>, digits: i32) -> Vec<f32> {
|
|||||||
|
|
||||||
fn run<T: Clone>(v: &[T], name: unary::contiguous::Kernel) -> Vec<T> {
|
fn run<T: Clone>(v: &[T], name: unary::contiguous::Kernel) -> Vec<T> {
|
||||||
let device = device();
|
let device = device();
|
||||||
let kernels = Kernels::new();
|
let fence = device.new_fence();
|
||||||
|
let kernels = Kernels::new(fence);
|
||||||
let command_queue = device.new_command_queue();
|
let command_queue = device.new_command_queue();
|
||||||
let command_buffer = command_queue.new_command_buffer();
|
let command_buffer = command_queue.new_command_buffer();
|
||||||
let input = new_buffer(&device, v);
|
let input = new_buffer(&device, v);
|
||||||
@ -47,12 +55,13 @@ fn run<T: Clone>(v: &[T], name: unary::contiguous::Kernel) -> Vec<T> {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
command_buffer.commit();
|
command_buffer.commit();
|
||||||
command_buffer.wait_until_completed();
|
command_buffer.wait_until_completed();
|
||||||
output.read_to_vec::<T>(v.len())
|
read_to_vec(&output, v.len())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn run_binary<T: Clone>(x: &[T], y: &[T], name: binary::contiguous::Kernel) -> Vec<T> {
|
fn run_binary<T: Clone>(x: &[T], y: &[T], name: binary::contiguous::Kernel) -> Vec<T> {
|
||||||
let device = device();
|
let device = device();
|
||||||
let kernels = Kernels::new();
|
let fence = device.new_fence();
|
||||||
|
let kernels = Kernels::new(fence);
|
||||||
let command_queue = device.new_command_queue();
|
let command_queue = device.new_command_queue();
|
||||||
let command_buffer = command_queue.new_command_buffer();
|
let command_buffer = command_queue.new_command_buffer();
|
||||||
let options = MTLResourceOptions::StorageModeManaged;
|
let options = MTLResourceOptions::StorageModeManaged;
|
||||||
@ -72,7 +81,7 @@ fn run_binary<T: Clone>(x: &[T], y: &[T], name: binary::contiguous::Kernel) -> V
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
command_buffer.commit();
|
command_buffer.commit();
|
||||||
command_buffer.wait_until_completed();
|
command_buffer.wait_until_completed();
|
||||||
output.read_to_vec::<T>(x.len())
|
read_to_vec(&output, x.len())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn run_strided<T: Clone>(
|
fn run_strided<T: Clone>(
|
||||||
@ -87,7 +96,8 @@ fn run_strided<T: Clone>(
|
|||||||
let command_buffer = command_queue.new_command_buffer();
|
let command_buffer = command_queue.new_command_buffer();
|
||||||
let input = new_buffer(&device, v);
|
let input = new_buffer(&device, v);
|
||||||
let output = new_buffer(&device, v);
|
let output = new_buffer(&device, v);
|
||||||
let kernels = Kernels::new();
|
let fence = device.new_fence();
|
||||||
|
let kernels = Kernels::new(fence);
|
||||||
call_unary_strided(
|
call_unary_strided(
|
||||||
&device,
|
&device,
|
||||||
command_buffer,
|
command_buffer,
|
||||||
@ -103,7 +113,7 @@ fn run_strided<T: Clone>(
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
command_buffer.commit();
|
command_buffer.commit();
|
||||||
command_buffer.wait_until_completed();
|
command_buffer.wait_until_completed();
|
||||||
output.read_to_vec::<T>(v.len())
|
read_to_vec(&output, v.len())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -240,7 +250,8 @@ fn binary_add_f32() {
|
|||||||
|
|
||||||
fn cast<T: Clone, U: Clone>(v: &[T], name: &'static str) -> Vec<U> {
|
fn cast<T: Clone, U: Clone>(v: &[T], name: &'static str) -> Vec<U> {
|
||||||
let device = device();
|
let device = device();
|
||||||
let kernels = Kernels::new();
|
let fence = device.new_fence();
|
||||||
|
let kernels = Kernels::new(fence);
|
||||||
let command_queue = device.new_command_queue();
|
let command_queue = device.new_command_queue();
|
||||||
let command_buffer = command_queue.new_command_buffer();
|
let command_buffer = command_queue.new_command_buffer();
|
||||||
let input = new_buffer(&device, v);
|
let input = new_buffer(&device, v);
|
||||||
@ -261,7 +272,7 @@ fn cast<T: Clone, U: Clone>(v: &[T], name: &'static str) -> Vec<U> {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
command_buffer.commit();
|
command_buffer.commit();
|
||||||
command_buffer.wait_until_completed();
|
command_buffer.wait_until_completed();
|
||||||
output.read_to_vec::<U>(v.len())
|
read_to_vec(&output, v.len())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -287,7 +298,8 @@ fn cast_u32_f32() {
|
|||||||
|
|
||||||
fn run_affine<T: Clone>(v: &[T], mul: f64, add: f64) -> Vec<T> {
|
fn run_affine<T: Clone>(v: &[T], mul: f64, add: f64) -> Vec<T> {
|
||||||
let device = device();
|
let device = device();
|
||||||
let kernels = Kernels::new();
|
let fence = device.new_fence();
|
||||||
|
let kernels = Kernels::new(fence);
|
||||||
let command_queue = device.new_command_queue();
|
let command_queue = device.new_command_queue();
|
||||||
let command_buffer = command_queue.new_command_buffer();
|
let command_buffer = command_queue.new_command_buffer();
|
||||||
|
|
||||||
@ -300,7 +312,7 @@ fn run_affine<T: Clone>(v: &[T], mul: f64, add: f64) -> Vec<T> {
|
|||||||
&device,
|
&device,
|
||||||
command_buffer,
|
command_buffer,
|
||||||
&kernels,
|
&kernels,
|
||||||
"affine_float",
|
"affine_f32",
|
||||||
size,
|
size,
|
||||||
&input,
|
&input,
|
||||||
&output,
|
&output,
|
||||||
@ -311,7 +323,7 @@ fn run_affine<T: Clone>(v: &[T], mul: f64, add: f64) -> Vec<T> {
|
|||||||
command_buffer.commit();
|
command_buffer.commit();
|
||||||
command_buffer.wait_until_completed();
|
command_buffer.wait_until_completed();
|
||||||
|
|
||||||
output.read_to_vec::<T>(v.len())
|
read_to_vec(&output, v.len())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn run_affine_strided<T: Clone>(
|
fn run_affine_strided<T: Clone>(
|
||||||
@ -322,7 +334,8 @@ fn run_affine_strided<T: Clone>(
|
|||||||
add: f64,
|
add: f64,
|
||||||
) -> Vec<T> {
|
) -> Vec<T> {
|
||||||
let device = device();
|
let device = device();
|
||||||
let kernels = Kernels::new();
|
let fence = device.new_fence();
|
||||||
|
let kernels = Kernels::new(fence);
|
||||||
let command_queue = device.new_command_queue();
|
let command_queue = device.new_command_queue();
|
||||||
let command_buffer = command_queue.new_command_buffer();
|
let command_buffer = command_queue.new_command_buffer();
|
||||||
|
|
||||||
@ -333,7 +346,7 @@ fn run_affine_strided<T: Clone>(
|
|||||||
&device,
|
&device,
|
||||||
command_buffer,
|
command_buffer,
|
||||||
&kernels,
|
&kernels,
|
||||||
"affine_float_strided",
|
"affine_f32_strided",
|
||||||
shape,
|
shape,
|
||||||
&input,
|
&input,
|
||||||
strides,
|
strides,
|
||||||
@ -347,7 +360,7 @@ fn run_affine_strided<T: Clone>(
|
|||||||
command_buffer.wait_until_completed();
|
command_buffer.wait_until_completed();
|
||||||
|
|
||||||
let len: usize = shape.iter().product();
|
let len: usize = shape.iter().product();
|
||||||
output.read_to_vec::<T>(len)
|
read_to_vec(&output, len)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -450,7 +463,8 @@ fn run_index_select<T: Clone, I: Clone + std::fmt::Debug>(
|
|||||||
_ => unimplemented!(),
|
_ => unimplemented!(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let kernels = Kernels::new();
|
let fence = device.new_fence();
|
||||||
|
let kernels = Kernels::new(fence);
|
||||||
call_index_select(
|
call_index_select(
|
||||||
&device,
|
&device,
|
||||||
&command_buffer,
|
&command_buffer,
|
||||||
@ -468,7 +482,7 @@ fn run_index_select<T: Clone, I: Clone + std::fmt::Debug>(
|
|||||||
command_buffer.commit();
|
command_buffer.commit();
|
||||||
command_buffer.wait_until_completed();
|
command_buffer.wait_until_completed();
|
||||||
|
|
||||||
dst_buffer.read_to_vec::<T>(dst_el)
|
read_to_vec(&dst_buffer, dst_el)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -534,7 +548,7 @@ fn index_add() {
|
|||||||
let expected = vec![
|
let expected = vec![
|
||||||
2.0, 3.0, 4.0, 1.0, 1.0, 1.0, 8.0, 9.0, 10.0, 1.0, 1.0, 1.0, 5.0, 6.0, 7.0,
|
2.0, 3.0, 4.0, 1.0, 1.0, 1.0, 8.0, 9.0, 10.0, 1.0, 1.0, 1.0, 5.0, 6.0, 7.0,
|
||||||
];
|
];
|
||||||
let result = outputs_buffer.read_to_vec::<f32>(right.len());
|
let result: Vec<f32> = read_to_vec(&outputs_buffer, right.len());
|
||||||
assert_eq!(result, expected);
|
assert_eq!(result, expected);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -552,19 +566,23 @@ fn cos_f16() {
|
|||||||
|
|
||||||
fn run_reduce<T: Clone>(v: &[T], out_length: usize, name: &'static str) -> Vec<T> {
|
fn run_reduce<T: Clone>(v: &[T], out_length: usize, name: &'static str) -> Vec<T> {
|
||||||
let device = device();
|
let device = device();
|
||||||
let kernels = Kernels::new();
|
let fence = device.new_fence();
|
||||||
|
let kernels = Kernels::new(fence);
|
||||||
let command_queue = device.new_command_queue();
|
let command_queue = device.new_command_queue();
|
||||||
let command_buffer = command_queue.new_command_buffer();
|
let command_buffer = command_queue.new_command_buffer();
|
||||||
let input = new_buffer(&device, v);
|
let input = new_buffer(&device, v);
|
||||||
|
|
||||||
let options = MTLResourceOptions::StorageModeManaged;
|
let options = MTLResourceOptions::StorageModeManaged;
|
||||||
let output = device.new_buffer((out_length * core::mem::size_of::<T>()) as u64, options);
|
let output = device.new_buffer((out_length * core::mem::size_of::<T>()) as u64, options);
|
||||||
call_reduce_contiguous(
|
let dims = vec![v.len()];
|
||||||
|
let strides = vec![1];
|
||||||
|
call_reduce_strided(
|
||||||
&device,
|
&device,
|
||||||
command_buffer,
|
command_buffer,
|
||||||
&kernels,
|
&kernels,
|
||||||
name,
|
name,
|
||||||
v.len(),
|
&dims,
|
||||||
|
&strides,
|
||||||
out_length,
|
out_length,
|
||||||
&input,
|
&input,
|
||||||
0,
|
0,
|
||||||
@ -574,12 +592,13 @@ fn run_reduce<T: Clone>(v: &[T], out_length: usize, name: &'static str) -> Vec<T
|
|||||||
command_buffer.commit();
|
command_buffer.commit();
|
||||||
command_buffer.wait_until_completed();
|
command_buffer.wait_until_completed();
|
||||||
|
|
||||||
output.read_to_vec::<T>(out_length)
|
read_to_vec(&output, out_length)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn run_softmax<T: Clone + std::fmt::Debug>(v: &[T], last_dim: usize, name: &'static str) -> Vec<T> {
|
fn run_softmax<T: Clone + std::fmt::Debug>(v: &[T], last_dim: usize, name: &'static str) -> Vec<T> {
|
||||||
let device = device();
|
let device = device();
|
||||||
let kernels = Kernels::new();
|
let fence = device.new_fence();
|
||||||
|
let kernels = Kernels::new(fence);
|
||||||
let command_queue = device.new_command_queue();
|
let command_queue = device.new_command_queue();
|
||||||
let command_buffer = command_queue.new_command_buffer();
|
let command_buffer = command_queue.new_command_buffer();
|
||||||
let input = new_buffer(&device, v);
|
let input = new_buffer(&device, v);
|
||||||
@ -592,13 +611,14 @@ fn run_softmax<T: Clone + std::fmt::Debug>(v: &[T], last_dim: usize, name: &'sta
|
|||||||
v.len(),
|
v.len(),
|
||||||
last_dim,
|
last_dim,
|
||||||
&input,
|
&input,
|
||||||
|
0,
|
||||||
&output,
|
&output,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
command_buffer.commit();
|
command_buffer.commit();
|
||||||
command_buffer.wait_until_completed();
|
command_buffer.wait_until_completed();
|
||||||
|
|
||||||
output.read_to_vec::<T>(v.len())
|
read_to_vec(&output, v.len())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -606,7 +626,7 @@ fn reduce_sum() {
|
|||||||
let v = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0];
|
let v = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0];
|
||||||
let out_length = 1;
|
let out_length = 1;
|
||||||
|
|
||||||
let results = run_reduce(&v, out_length, "fast_sum_float");
|
let results = run_reduce(&v, out_length, "fast_sum_f32_strided");
|
||||||
assert_eq!(approx(results, 4), vec![21.0]);
|
assert_eq!(approx(results, 4), vec![21.0]);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -615,7 +635,7 @@ fn reduce_sum2() {
|
|||||||
let v = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0];
|
let v = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0];
|
||||||
let out_length = 2;
|
let out_length = 2;
|
||||||
|
|
||||||
let results = run_reduce(&v, out_length, "fast_sum_float");
|
let results = run_reduce(&v, out_length, "fast_sum_f32_strided");
|
||||||
assert_eq!(approx(results, 4), vec![6.0, 15.0]);
|
assert_eq!(approx(results, 4), vec![6.0, 15.0]);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -623,15 +643,33 @@ fn reduce_sum2() {
|
|||||||
fn softmax() {
|
fn softmax() {
|
||||||
let v = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0];
|
let v = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0];
|
||||||
let last_dim = 6;
|
let last_dim = 6;
|
||||||
let results = run_softmax(&v, last_dim, "softmax_float");
|
let results = run_softmax(&v, last_dim, "softmax_f32");
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
approx(results, 4),
|
approx(results, 4),
|
||||||
vec![0.0043, 0.0116, 0.0315, 0.0858, 0.2331, 0.6337]
|
vec![0.0043, 0.0116, 0.0315, 0.0858, 0.2331, 0.6337]
|
||||||
);
|
);
|
||||||
|
|
||||||
|
let last_dim = 4096;
|
||||||
|
let n = 200;
|
||||||
|
let mut v = vec![0.0; n * last_dim];
|
||||||
|
for i in 0..n {
|
||||||
|
v[i * last_dim] = 20.0;
|
||||||
|
}
|
||||||
|
let results = run_softmax(&v, last_dim, "softmax_f32");
|
||||||
|
let results = approx(results, 4);
|
||||||
|
println!("{results:?}");
|
||||||
|
assert_eq!(
|
||||||
|
results.iter().map(|&s| s.round() as usize).sum::<usize>(),
|
||||||
|
n
|
||||||
|
);
|
||||||
|
assert_eq!(results[0], 1.0);
|
||||||
|
assert_eq!(results[1], 0.0);
|
||||||
|
assert_eq!(results[last_dim], 1.0);
|
||||||
|
assert_eq!(results[2 * last_dim], 1.0);
|
||||||
|
|
||||||
let v = vec![0.0f32, 1.0, 2.0, 3.0, 4.0, 5.0];
|
let v = vec![0.0f32, 1.0, 2.0, 3.0, 4.0, 5.0];
|
||||||
let last_dim = 6;
|
let last_dim = 6;
|
||||||
let results = run_softmax(&v, last_dim, "softmax_float");
|
let results = run_softmax(&v, last_dim, "softmax_f32");
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
approx(results, 4),
|
approx(results, 4),
|
||||||
vec![0.0043, 0.0116, 0.0315, 0.0858, 0.2331, 0.6337]
|
vec![0.0043, 0.0116, 0.0315, 0.0858, 0.2331, 0.6337]
|
||||||
@ -639,7 +677,7 @@ fn softmax() {
|
|||||||
|
|
||||||
let v = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0];
|
let v = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0];
|
||||||
let last_dim = 3;
|
let last_dim = 3;
|
||||||
let results = run_softmax(&v, last_dim, "softmax_float");
|
let results = run_softmax(&v, last_dim, "softmax_f32");
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
approx(results, 4),
|
approx(results, 4),
|
||||||
vec![0.0900, 0.2447, 0.6652, 0.0900, 0.2447, 0.6652]
|
vec![0.0900, 0.2447, 0.6652, 0.0900, 0.2447, 0.6652]
|
||||||
@ -650,7 +688,7 @@ fn softmax() {
|
|||||||
.map(|v| f16::from_f32(*v))
|
.map(|v| f16::from_f32(*v))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
let last_dim = 6;
|
let last_dim = 6;
|
||||||
let results = run_softmax(&v, last_dim, "softmax_half");
|
let results = run_softmax(&v, last_dim, "softmax_f16");
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
approx_f16(results, 4),
|
approx_f16(results, 4),
|
||||||
vec![0.0043, 0.0116, 0.0316, 0.0858, 0.2332, 0.6338]
|
vec![0.0043, 0.0116, 0.0316, 0.0858, 0.2332, 0.6338]
|
||||||
@ -661,7 +699,7 @@ fn softmax() {
|
|||||||
.map(|v| bf16::from_f32(*v))
|
.map(|v| bf16::from_f32(*v))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
let last_dim = 6;
|
let last_dim = 6;
|
||||||
let results = run_softmax(&v, last_dim, "softmax_bfloat");
|
let results = run_softmax(&v, last_dim, "softmax_bf16");
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
approx_bf16(results, 4),
|
approx_bf16(results, 4),
|
||||||
vec![0.0043, 0.0116, 0.0315, 0.0859, 0.2324, 0.6328]
|
vec![0.0043, 0.0116, 0.0315, 0.0859, 0.2324, 0.6328]
|
||||||
@ -679,7 +717,8 @@ fn run_where_cond<I: Clone, T: Clone>(
|
|||||||
name: &'static str,
|
name: &'static str,
|
||||||
) -> Vec<T> {
|
) -> Vec<T> {
|
||||||
let device = device();
|
let device = device();
|
||||||
let kernels = Kernels::new();
|
let fence = device.new_fence();
|
||||||
|
let kernels = Kernels::new(fence);
|
||||||
let command_queue = device.new_command_queue();
|
let command_queue = device.new_command_queue();
|
||||||
let command_buffer = command_queue.new_command_buffer();
|
let command_buffer = command_queue.new_command_buffer();
|
||||||
let options = MTLResourceOptions::StorageModeManaged;
|
let options = MTLResourceOptions::StorageModeManaged;
|
||||||
@ -720,7 +759,7 @@ fn run_where_cond<I: Clone, T: Clone>(
|
|||||||
command_buffer.commit();
|
command_buffer.commit();
|
||||||
command_buffer.wait_until_completed();
|
command_buffer.wait_until_completed();
|
||||||
|
|
||||||
output.read_to_vec::<T>(length)
|
read_to_vec(&output, length)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -744,3 +783,93 @@ fn where_cond() {
|
|||||||
);
|
);
|
||||||
assert_eq!(approx(results, 4), vec![-1.0f32, 2.0, -3.0, -4.0, 5.0, 6.0]);
|
assert_eq!(approx(results, 4), vec![-1.0f32, 2.0, -3.0, -4.0, 5.0, 6.0]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn run_gemm<T: Clone>(
|
||||||
|
(b, m, n, k): (usize, usize, usize, usize),
|
||||||
|
lhs: &[T],
|
||||||
|
lhs_stride: Vec<usize>,
|
||||||
|
lhs_offset: usize,
|
||||||
|
rhs: &[T],
|
||||||
|
rhs_stride: Vec<usize>,
|
||||||
|
rhs_offset: usize,
|
||||||
|
) -> Vec<T> {
|
||||||
|
let device = device();
|
||||||
|
let fence = device.new_fence();
|
||||||
|
let kernels = Kernels::new(fence);
|
||||||
|
let command_queue = device.new_command_queue();
|
||||||
|
let command_buffer = command_queue.new_command_buffer();
|
||||||
|
let options = MTLResourceOptions::StorageModeManaged;
|
||||||
|
|
||||||
|
let lhs = device.new_buffer_with_data(
|
||||||
|
lhs.as_ptr() as *const core::ffi::c_void,
|
||||||
|
std::mem::size_of_val(lhs) as u64,
|
||||||
|
options,
|
||||||
|
);
|
||||||
|
let rhs = device.new_buffer_with_data(
|
||||||
|
rhs.as_ptr() as *const core::ffi::c_void,
|
||||||
|
std::mem::size_of_val(rhs) as u64,
|
||||||
|
options,
|
||||||
|
);
|
||||||
|
let length = b * m * n;
|
||||||
|
let output = device.new_buffer((length * core::mem::size_of::<T>()) as u64, options);
|
||||||
|
call_gemm(
|
||||||
|
&device,
|
||||||
|
command_buffer,
|
||||||
|
&kernels,
|
||||||
|
"sgemm",
|
||||||
|
(b, m, n, k),
|
||||||
|
&lhs_stride,
|
||||||
|
lhs_offset,
|
||||||
|
&lhs,
|
||||||
|
&rhs_stride,
|
||||||
|
rhs_offset,
|
||||||
|
&rhs,
|
||||||
|
&output,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
command_buffer.commit();
|
||||||
|
command_buffer.wait_until_completed();
|
||||||
|
|
||||||
|
read_to_vec(&output, length)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn gemm() {
|
||||||
|
let (b, m, n, k) = (1, 2, 4, 3);
|
||||||
|
let lhs_stride = vec![m * k, k, 1];
|
||||||
|
let lhs: Vec<f32> = (0..b * m * k).map(|f| f as f32).collect();
|
||||||
|
let rhs_stride = vec![n * k, n, 1];
|
||||||
|
let rhs: Vec<f32> = (0..b * n * k).map(|f| f as f32).collect();
|
||||||
|
let results = run_gemm((b, m, n, k), &lhs, lhs_stride, 0, &rhs, rhs_stride, 0);
|
||||||
|
assert_eq!(
|
||||||
|
approx(results, 4),
|
||||||
|
vec![20.0, 23.0, 26.0, 29.0, 56.0, 68.0, 80.0, 92.0]
|
||||||
|
);
|
||||||
|
|
||||||
|
let (b, m, n, k) = (2, 2, 4, 3);
|
||||||
|
let lhs_stride = vec![m * k, k, 1];
|
||||||
|
let lhs: Vec<f32> = (0..b * m * k).map(|f| f as f32).collect();
|
||||||
|
let rhs_stride = vec![n * k, n, 1];
|
||||||
|
let rhs: Vec<f32> = (0..b * n * k).map(|f| f as f32).collect();
|
||||||
|
let results = run_gemm((b, m, n, k), &lhs, lhs_stride, 0, &rhs, rhs_stride, 0);
|
||||||
|
assert_eq!(
|
||||||
|
approx(results, 4),
|
||||||
|
vec![
|
||||||
|
20.0, 23.0, 26.0, 29.0, 56.0, 68.0, 80.0, 92.0, 344.0, 365.0, 386.0, 407.0, 488.0,
|
||||||
|
518.0, 548.0, 578.0
|
||||||
|
]
|
||||||
|
);
|
||||||
|
|
||||||
|
// OFFSET
|
||||||
|
let (b, m, n, k) = (2, 2, 4, 3);
|
||||||
|
let lhs_stride = vec![m * k, k, 1];
|
||||||
|
let lhs: Vec<f32> = (0..b * m * k).map(|f| f as f32).collect();
|
||||||
|
let rhs_stride = vec![n * k, n, 1];
|
||||||
|
let rhs: Vec<f32> = (0..b * n * k).map(|f| f as f32).collect();
|
||||||
|
// Manually set batch_size=1 and offset 12 elements * 4 the number of bytes for f32
|
||||||
|
let results = run_gemm((1, m, n, k), &lhs, lhs_stride, 0, &rhs, rhs_stride, 12 * 4);
|
||||||
|
assert_eq!(
|
||||||
|
approx(results, 4),
|
||||||
|
vec![56.0, 59.0, 62.0, 65.0, 200.0, 212.0, 224.0, 236.0]
|
||||||
|
);
|
||||||
|
}
|
||||||
|
@ -87,11 +87,11 @@ kernel void FN_NAME_STRIDED( \
|
|||||||
}
|
}
|
||||||
|
|
||||||
#define UNARY_OP(NAME) \
|
#define UNARY_OP(NAME) \
|
||||||
UNARY(NAME, float, NAME##_float, NAME##_float_strided); \
|
UNARY(NAME, float, NAME##_f32, NAME##_f32_strided); \
|
||||||
UNARY(NAME, half, NAME##_half, NAME##_half_strided);
|
UNARY(NAME, half, NAME##_f16, NAME##_f16_strided);
|
||||||
|
|
||||||
#define BFLOAT_UNARY_OP(NAME) \
|
#define BFLOAT_UNARY_OP(NAME) \
|
||||||
UNARY(NAME, bfloat, NAME##_bfloat, NAME##_bfloat_strided);
|
UNARY(NAME, bfloat, NAME##_bf16, NAME##_bf16_strided);
|
||||||
|
|
||||||
|
|
||||||
UNARY_OP(cos)
|
UNARY_OP(cos)
|
||||||
@ -108,8 +108,8 @@ UNARY_OP(round)
|
|||||||
UNARY_OP(gelu_erf)
|
UNARY_OP(gelu_erf)
|
||||||
UNARY_OP(erf)
|
UNARY_OP(erf)
|
||||||
UNARY_OP(tanh)
|
UNARY_OP(tanh)
|
||||||
UNARY(id, float, copy_float, copy_float_strided)
|
UNARY(id, float, copy_f32, copy_f32_strided)
|
||||||
UNARY(id, half, copy_half, copy_half_strided)
|
UNARY(id, half, copy_f16, copy_f16_strided)
|
||||||
UNARY(id, uint8_t, copy_u8, copy_u8_strided)
|
UNARY(id, uint8_t, copy_u8, copy_u8_strided)
|
||||||
UNARY(id, uint32_t, copy_u32, copy_u32_strided)
|
UNARY(id, uint32_t, copy_u32, copy_u32_strided)
|
||||||
|
|
||||||
@ -129,5 +129,5 @@ BFLOAT_UNARY_OP(gelu_erf)
|
|||||||
BFLOAT_UNARY_OP(erf)
|
BFLOAT_UNARY_OP(erf)
|
||||||
BFLOAT_UNARY_OP(tanh)
|
BFLOAT_UNARY_OP(tanh)
|
||||||
|
|
||||||
UNARY(id, bfloat, copy_bfloat, copy_bfloat_strided)
|
UNARY(id, bfloat, copy_bf16, copy_bf16_strided)
|
||||||
#endif
|
#endif
|
||||||
|
@ -210,36 +210,35 @@ impl candle::CustomOp1 for SoftmaxLastDim {
|
|||||||
) -> Result<(candle::MetalStorage, Shape)> {
|
) -> Result<(candle::MetalStorage, Shape)> {
|
||||||
use candle::{backend::BackendStorage, DType};
|
use candle::{backend::BackendStorage, DType};
|
||||||
let device = storage.device();
|
let device = storage.device();
|
||||||
let command_buffer = device.command_buffer();
|
let command_buffer = device.command_buffer()?;
|
||||||
let kernels = device.kernels();
|
let kernels = device.kernels();
|
||||||
let name = match storage.dtype() {
|
let name = match storage.dtype() {
|
||||||
DType::F32 => "softmax_float",
|
DType::F32 => "softmax_f32",
|
||||||
DType::F16 => "softmax_half",
|
DType::F16 => "softmax_f16",
|
||||||
DType::BF16 => "softmax_bfloat",
|
DType::BF16 => "softmax_bf16",
|
||||||
dtype => candle::bail!("softmax-last-dim is not implemented for {dtype:?}"),
|
dtype => candle::bail!("softmax-last-dim is not implemented for {dtype:?}"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let n = layout.stride().len();
|
let n = layout.stride().len();
|
||||||
if !(layout.stride()[n - 1] == 1 && layout.start_offset() == 0) {
|
if !(layout.is_contiguous() && layout.stride()[n - 1] == 1) {
|
||||||
candle::bail!("Non contiguous softmax-last-dim is not implemented");
|
candle::bail!("Non contiguous softmax-last-dim is not implemented");
|
||||||
}
|
}
|
||||||
|
|
||||||
let last_dim = layout.dims()[layout.shape().rank() - 1];
|
let last_dim = layout.dims()[layout.shape().rank() - 1];
|
||||||
let elem_count = layout.shape().elem_count();
|
let elem_count = layout.shape().elem_count();
|
||||||
let mut output = device.new_buffer(elem_count, storage.dtype(), "softmax");
|
let output = device.new_buffer(elem_count, storage.dtype(), "softmax")?;
|
||||||
candle_metal_kernels::call_last_softmax(
|
candle_metal_kernels::call_last_softmax(
|
||||||
device.metal_device(),
|
device.metal_device(),
|
||||||
&command_buffer,
|
&command_buffer,
|
||||||
&kernels,
|
kernels,
|
||||||
name,
|
name,
|
||||||
elem_count,
|
elem_count,
|
||||||
last_dim,
|
last_dim,
|
||||||
storage.buffer(),
|
storage.buffer(),
|
||||||
&mut output,
|
layout.start_offset() * storage.dtype().size_in_bytes(),
|
||||||
|
&output,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
command_buffer.commit();
|
|
||||||
output.did_modify_range(metal::NSRange::new(0, output.length()));
|
|
||||||
let newstorage = candle::MetalStorage::new(output, device.clone(), storage.dtype());
|
let newstorage = candle::MetalStorage::new(output, device.clone(), storage.dtype());
|
||||||
Ok((newstorage, layout.shape().clone()))
|
Ok((newstorage, layout.shape().clone()))
|
||||||
}
|
}
|
||||||
|
@ -142,10 +142,10 @@ impl RotaryEmbedding {
|
|||||||
.to_dtype(DType::F32)?
|
.to_dtype(DType::F32)?
|
||||||
.reshape((max_seq_len, 1))?;
|
.reshape((max_seq_len, 1))?;
|
||||||
let freqs = t.matmul(&inv_freq)?;
|
let freqs = t.matmul(&inv_freq)?;
|
||||||
let sin = freqs.sin()?;
|
Ok(Self {
|
||||||
let cos = freqs.cos()?;
|
sin: freqs.sin()?,
|
||||||
// todo!("{}", sin);
|
cos: freqs.cos()?,
|
||||||
Ok(Self { sin, cos })
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn apply_rotary_emb_qkv(
|
fn apply_rotary_emb_qkv(
|
||||||
@ -273,10 +273,6 @@ impl MHA {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> {
|
fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> {
|
||||||
// let view = xs.to_string();
|
|
||||||
// if view.contains("NaN") {
|
|
||||||
// panic!("NaN");
|
|
||||||
// }
|
|
||||||
let _enter = self.span.enter();
|
let _enter = self.span.enter();
|
||||||
let (b_size, seq_len, _n_embd) = xs.dims3()?;
|
let (b_size, seq_len, _n_embd) = xs.dims3()?;
|
||||||
let qkv = self
|
let qkv = self
|
||||||
@ -412,38 +408,3 @@ impl MixFormerSequentialForCausalLM {
|
|||||||
self.blocks.iter_mut().for_each(|b| b.clear_kv_cache())
|
self.blocks.iter_mut().for_each(|b| b.clear_kv_cache())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
#[test]
|
|
||||||
fn test_rotary() {
|
|
||||||
let dev = Device::new_metal(0).unwrap();
|
|
||||||
for i in 0..10000 {
|
|
||||||
let dim = 8;
|
|
||||||
let max_seq_len = 12;
|
|
||||||
let inv_freq: Vec<_> = (0..dim)
|
|
||||||
.step_by(2)
|
|
||||||
.map(|i| 1f32 / 10000f32.powf(i as f32 / dim as f32))
|
|
||||||
.collect();
|
|
||||||
let inv_freq_len = inv_freq.len();
|
|
||||||
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), &dev).unwrap();
|
|
||||||
let t = Tensor::arange(0u32, max_seq_len as u32, &dev)
|
|
||||||
.unwrap()
|
|
||||||
.to_dtype(DType::F32)
|
|
||||||
.unwrap()
|
|
||||||
.reshape((max_seq_len, 1))
|
|
||||||
.unwrap();
|
|
||||||
let x: f32 = t.i((1, 0)).unwrap().to_scalar().unwrap();
|
|
||||||
assert_eq!(x, 1.0);
|
|
||||||
let x: f32 = inv_freq.i((0, 1)).unwrap().to_scalar().unwrap();
|
|
||||||
assert_eq!(x, 0.1);
|
|
||||||
let freqs = t.matmul(&inv_freq).unwrap();
|
|
||||||
let x: f32 = freqs.i((1, 1)).unwrap().to_scalar().unwrap();
|
|
||||||
assert_eq!(x, 0.1);
|
|
||||||
let sin = freqs.sin().unwrap().contiguous().unwrap();
|
|
||||||
let x: f32 = sin.i((1, 1)).unwrap().to_scalar().unwrap();
|
|
||||||
assert_eq!(x, 0.099833414);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
Reference in New Issue
Block a user