Compare commits

..

30 Commits

Author SHA1 Message Date
03641293ee Clippy pass. 2023-12-18 15:22:43 +01:00
064ba17bd7 Remove print. 2023-12-18 11:04:16 +01:00
e8ee253ee0 Missing cast. 2023-12-18 11:01:18 +01:00
8bd3d6b94b Index add. 2023-12-18 10:46:01 +01:00
6a3ca7da0c Scatter add. 2023-12-18 10:32:22 +01:00
586b6f6fff Adding gather op. 2023-12-17 23:34:12 +01:00
e4b0cc59f5 Adding CMP 2023-12-17 22:32:25 +01:00
0a6e0a8c9a Implement randn (CPU-> device) 2023-12-17 19:09:08 +01:00
972903021c Finish reduce kernels. 2023-12-17 19:07:00 +01:00
6bc92e63cb Addressing a lot of comments. 2023-12-15 13:06:04 +01:00
aa04015098 Remove unwrap(). 2023-12-15 12:23:28 +01:00
8b5059e951 Remove test file. 2023-12-15 11:55:30 +01:00
26540641c1 Renamed all kernel names. 2023-12-15 11:24:47 +01:00
34d83377f6 Better error message on older macos 2023-12-15 11:18:54 +01:00
77197379cc More cleanup. 2023-12-15 11:17:05 +01:00
916a8c5464 Revert candle-transformers. 2023-12-15 11:15:21 +01:00
243e83f2b9 Adding a bunch of docs !
Co-authored-by: Ivar Flakstad <69173633+ivarflakstad@users.noreply.github.com>
2023-12-15 11:03:05 +01:00
cf27868b57 More cleanup. 2023-12-15 01:44:22 +01:00
40c3e1bd5a cleanup. 2023-12-15 01:41:14 +01:00
ece4c69a68 Fixing softmax. 2023-12-15 01:35:08 +01:00
4eeaf205d6 Fix softmax for long sequences (missing barrier). 2023-12-14 19:37:03 +01:00
f419a38e1a Fix use resource. 2023-12-14 16:52:37 +01:00
361f2ad2af Working with merging encoders and using fences. 2023-12-14 16:05:33 +01:00
931432ed55 Fixing tests + matmul from MFA 2023-12-13 16:58:36 +01:00
0404a3eb5b Removed MPSMatrix entirely (buggy). 2023-12-13 16:21:48 +01:00
a9d0657432 Better version ? 2023-12-13 12:09:20 +01:00
87dc559817 Lots of updates including some stack of command buffers. 2023-12-12 17:41:56 +01:00
da0af3cb3e Merge pull request #1408 from jbochi/metal_gelu2
Fix NaN errors for Gelu in Metal
2023-12-09 19:46:36 +01:00
803ac8405b Put back affine strided tests
Co-Authored-By: Ivar Flakstad <69173633+ivarflakstad@users.noreply.github.com>
2023-12-06 17:04:15 +01:00
6e25822d4f Fix gelu for large x 2023-12-06 09:59:44 -05:00
17 changed files with 2213 additions and 1276 deletions

View File

@ -61,7 +61,7 @@ tracing-subscriber = "0.3.7"
wav = "1.0.0" wav = "1.0.0"
yoke = { version = "0.7.2", features = ["derive"] } yoke = { version = "0.7.2", features = ["derive"] }
zip = { version = "0.6.6", default-features = false } zip = { version = "0.6.6", default-features = false }
metal = { version = "0.27.1", features = ["mps"], package="candle-metal" } metal = { version = "0.27.0", features = ["mps"]}
[profile.release-with-debug] [profile.release-with-debug]
inherits = "release" inherits = "release"

View File

@ -201,10 +201,9 @@ impl Device {
Ok(Storage::Cuda(storage)) Ok(Storage::Cuda(storage))
} }
} }
Device::Metal(_device) => { Device::Metal(device) => {
// let storage = device.rand_uniform(shape, dtype, lo, up)?; let storage = device.rand_uniform(shape, dtype, lo, up)?;
// Ok(Storage::Metal(storage)) Ok(Storage::Metal(storage))
crate::bail!("Metal rand_uniform not implemented")
} }
} }
} }

File diff suppressed because it is too large Load Diff

View File

@ -1863,10 +1863,7 @@ impl Tensor {
Storage::Metal(metal.storage_from_cpu_storage(storage)?) Storage::Metal(metal.storage_from_cpu_storage(storage)?)
} }
(Storage::Cuda(storage), Device::Cpu) => Storage::Cpu(storage.to_cpu_storage()?), (Storage::Cuda(storage), Device::Cpu) => Storage::Cpu(storage.to_cpu_storage()?),
(Storage::Metal(storage), Device::Cpu) => { (Storage::Metal(storage), Device::Cpu) => Storage::Cpu(storage.to_cpu_storage()?),
println!("{storage:?} - {:?}", storage.to_cpu_storage()?);
Storage::Cpu(storage.to_cpu_storage()?)
}
(Storage::Cuda(storage), Device::Cuda(cuda)) => { (Storage::Cuda(storage), Device::Cuda(cuda)) => {
// TODO: Avoid passing through the cpu storage here, especially if the gpu ids // TODO: Avoid passing through the cpu storage here, especially if the gpu ids
// are the same. // are the same.

View File

@ -10,7 +10,7 @@ categories = ["science"]
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
[dependencies] [dependencies]
metal = { version = "0.27.1", features = ["mps"], package="candle-metal" } metal = { version = "0.27.0", features = ["mps"]}
once_cell = "1.18.0" once_cell = "1.18.0"
thiserror = "1" thiserror = "1"
tracing = "0.1.37" tracing = "0.1.37"

View File

@ -29,9 +29,7 @@ kernel void FN_NAME( \
if (id >= dim) { \ if (id >= dim) { \
return; \ return; \
} \ } \
const TYPENAME m = TYPENAME(mul); \ output[id] = TYPENAME(float(input[id]) * mul + add); \
const TYPENAME a = TYPENAME(add); \
output[id] = input[id] * m + a; \
} \ } \
kernel void FN_NAME##_strided( \ kernel void FN_NAME##_strided( \
constant size_t &dim, \ constant size_t &dim, \
@ -47,15 +45,80 @@ kernel void FN_NAME##_strided( \
if (id >= dim) { \ if (id >= dim) { \
return; \ return; \
} \ } \
const TYPENAME m = TYPENAME(mul); \ output[id] = TYPENAME(float(input[get_strided_index(id, num_dims, dims, strides)]) * mul + add); \
const TYPENAME a = TYPENAME(add); \ }
output[id] = input[get_strided_index(id, num_dims, dims, strides)] * m + a; \
#define POWF(FN_NAME, TYPENAME) \
kernel void FN_NAME( \
constant size_t &dim, \
constant float &mul, \
device const TYPENAME *input, \
device TYPENAME *output, \
uint id [[ thread_position_in_grid ]] \
) { \
if (id >= dim) { \
return; \
} \
output[id] = TYPENAME(pow(input[id], TYPENAME(mul))); \
} \
kernel void FN_NAME##_strided( \
constant size_t &dim, \
constant size_t &num_dims, \
constant size_t *dims, \
constant size_t *strides, \
constant float &mul, \
device const TYPENAME *input, \
device TYPENAME *output, \
uint id [[ thread_position_in_grid ]] \
) { \
if (id >= dim) { \
return; \
} \
output[id] = TYPENAME(pow(input[get_strided_index(id, num_dims, dims, strides)], TYPENAME(mul))); \
}
#define ELU(FN_NAME, TYPENAME) \
kernel void FN_NAME( \
constant size_t &dim, \
constant float &mul, \
device const TYPENAME *input, \
device TYPENAME *output, \
uint id [[ thread_position_in_grid ]] \
) { \
if (id >= dim) { \
return; \
} \
const TYPENAME x = input[id]; \
output[id] = TYPENAME((x > 0)?x: mul * exp(x - 1)); \
} \
kernel void FN_NAME##_strided( \
constant size_t &dim, \
constant size_t &num_dims, \
constant size_t *dims, \
constant size_t *strides, \
constant float &mul, \
device const TYPENAME *input, \
device TYPENAME *output, \
uint id [[ thread_position_in_grid ]] \
) { \
if (id >= dim) { \
return; \
} \
const TYPENAME x = input[get_strided_index(id, num_dims, dims, strides)]; \
output[id] = TYPENAME((x > 0)?x: mul * exp(x - 1)); \
} \ } \
AFFINE(affine_float, float)
AFFINE(affine_half, half) AFFINE(affine_f32, float)
AFFINE(affine_f16, half)
POWF(powf_f32, float)
POWF(powf_f16, half)
ELU(elu_f32, float)
ELU(elu_f16, half)
#if __METAL_VERSION__ >= 310 #if __METAL_VERSION__ >= 310
AFFINE(affine_bfloat, bfloat); AFFINE(affine_bf16, bfloat);
POWF(powf_bf16, bfloat);
ELU(elu_bf16, bfloat);
#endif #endif

View File

@ -1,5 +1,8 @@
#include <metal_stdlib> #include <metal_stdlib>
#define MAX(x, y) ((x) > (y) ? (x) : (y))
#define MIN(x, y) ((x) < (y) ? (x) : (y))
METAL_FUNC uint get_strided_index( METAL_FUNC uint get_strided_index(
uint idx, uint idx,
constant size_t &num_dims, constant size_t &num_dims,
@ -22,15 +25,15 @@ kernel void FN_NAME( \
constant size_t &dim, \ constant size_t &dim, \
device const TYPENAME *left, \ device const TYPENAME *left, \
device const TYPENAME *right, \ device const TYPENAME *right, \
device TYPENAME *output, \ device OUT_TYPENAME *output, \
uint thread_position_in_grid [[ thread_position_in_grid ]] \ uint tid [[ thread_position_in_grid ]] \
) { \ ) { \
if (thread_position_in_grid >= dim) { \ if (tid >= dim) { \
return; \ return; \
} \ } \
TYPENAME x = left[thread_position_in_grid]; \ TYPENAME x = left[tid]; \
TYPENAME y = right[thread_position_in_grid]; \ TYPENAME y = right[tid]; \
output[thread_position_in_grid] = OUT_TYPENAME(FN); \ output[tid] = OUT_TYPENAME(FN); \
}\ }\
kernel void FN_NAME_STRIDED( \ kernel void FN_NAME_STRIDED( \
constant size_t &dim, \ constant size_t &dim, \
@ -40,33 +43,48 @@ kernel void FN_NAME_STRIDED( \
constant size_t *right_strides, \ constant size_t *right_strides, \
device const TYPENAME *left, \ device const TYPENAME *left, \
device const TYPENAME *right, \ device const TYPENAME *right, \
device TYPENAME *output, \ device OUT_TYPENAME *output, \
uint thread_position_in_grid [[ thread_position_in_grid ]] \ uint tid [[ thread_position_in_grid ]] \
) { \ ) { \
if (thread_position_in_grid >= dim) { \ if (tid >= dim) { \
return; \ return; \
} \ } \
TYPENAME x = left[get_strided_index(thread_position_in_grid, num_dims, dims, left_strides)]; \ TYPENAME x = left[get_strided_index(tid, num_dims, dims, left_strides)]; \
TYPENAME y = right[get_strided_index(thread_position_in_grid, num_dims, dims, right_strides)]; \ TYPENAME y = right[get_strided_index(tid, num_dims, dims, right_strides)]; \
output[thread_position_in_grid] = OUT_TYPENAME(FN); \ output[tid] = OUT_TYPENAME(FN); \
} }
#define BINARY_OP(FN, NAME) \ #define BINARY_OP(FN, NAME) \
BINARY(FN, float, float, NAME##_float, NAME##_float_strided); \ BINARY(FN, float, float, NAME##_f32, NAME##_f32_strided); \
BINARY(FN, half, half, NAME##_half, NAME##_half_strided); BINARY(FN, half, half, NAME##_f16, NAME##_f16_strided);
#define BFLOAT_BINARY_OP(FN, NAME) \ #define BFLOAT_BINARY_OP(FN, NAME) \
BINARY(FN, bfloat, bfloat, NAME##_bfloat, NAME##_bfloat_strided); BINARY(FN, bfloat, bfloat, NAME##_bf16, NAME##_bf16_strided);
#define BINARY_OP_OUT(NAME, FN) \
BINARY(FN, float, uint8_t, NAME##_f32, NAME##_f32_strided); \
BINARY(FN, half, uint8_t, NAME##_f16, NAME##_f16_strided);
BINARY_OP(x + y, add) BINARY_OP(x + y, add)
BINARY_OP(x - y, sub) BINARY_OP(x - y, sub)
BINARY_OP(x * y, mul) BINARY_OP(x * y, mul)
BINARY_OP(x / y, div) BINARY_OP(x / y, div)
BINARY_OP(MIN(x, y), min)
BINARY_OP(MAX(x, y), max)
BINARY_OP_OUT(eq, x == y)
BINARY_OP_OUT(ne, x != y)
BINARY_OP_OUT(le, x <= y)
BINARY_OP_OUT(lt, x < y)
BINARY_OP_OUT(ge, x >= y)
BINARY_OP_OUT(gt, x > y)
#if __METAL_VERSION__ >= 310 #if __METAL_VERSION__ >= 310
BFLOAT_BINARY_OP(x + y, add) BFLOAT_BINARY_OP(x + y, add)
BFLOAT_BINARY_OP(x - y, sub) BFLOAT_BINARY_OP(x - y, sub)
BFLOAT_BINARY_OP(x * y, mul) BFLOAT_BINARY_OP(x * y, mul)
BFLOAT_BINARY_OP(x / y, div) BFLOAT_BINARY_OP(x / y, div)
BFLOAT_BINARY_OP(MIN(x, y), min)
BFLOAT_BINARY_OP(MAX(x, y), max)
#endif #endif

View File

@ -48,6 +48,7 @@ kernel void FN_NAME_STRIDED( \
CAST(cast_u32_f32, cast_u32_f32_strided, uint32_t, float) CAST(cast_u32_f32, cast_u32_f32_strided, uint32_t, float)
CAST(cast_u32_u8, cast_u32_u8_strided, uint32_t, uint8_t) CAST(cast_u32_u8, cast_u32_u8_strided, uint32_t, uint8_t)
CAST(cast_u8_u32, cast_u8_u32_strided, uint8_t, uint32_t) CAST(cast_u8_u32, cast_u8_u32_strided, uint8_t, uint32_t)
CAST(cast_u8_f32, cast_u8_f32_strided, uint8_t, float)
CAST(cast_f16_f32, cast_f16_f32_strided, half, float) CAST(cast_f16_f32, cast_f16_f32_strided, half, float)
CAST(cast_f32_f16, cast_f32_f16_strided, float, half) CAST(cast_f32_f16, cast_f32_f16_strided, float, half)

View File

@ -1,6 +1,34 @@
#include <metal_stdlib> #include <metal_stdlib>
using namespace metal; using namespace metal;
template<typename TYPENAME, typename INDEX_TYPENAME>
METAL_FUNC void index(
constant size_t &dst_size,
constant size_t &left_size,
constant size_t &src_dim_size,
constant size_t &right_size,
constant size_t &ids_size,
const device TYPENAME *input,
const device INDEX_TYPENAME *input_ids,
device TYPENAME *output,
uint tid [[ thread_position_in_grid ]]
) {
if (tid >= dst_size) {
return;
}
const size_t id_i = (tid / right_size) % ids_size;
const INDEX_TYPENAME input_i = min(input_ids[id_i], (INDEX_TYPENAME)(src_dim_size - 1));
const size_t right_rank_i = tid % right_size;
const size_t left_rank_i = tid / right_size / ids_size;
/*
// Force prevent out of bounds indexing
// since there doesn't seem to be a good way to force crash
// No need to check for zero we're only allowing unsized.
*/
const size_t src_i = left_rank_i * src_dim_size * right_size + input_i * right_size + right_rank_i;
output[tid] = input[src_i];
}
# define INDEX_OP(NAME, INDEX_TYPENAME, TYPENAME) \ # define INDEX_OP(NAME, INDEX_TYPENAME, TYPENAME) \
kernel void NAME( \ kernel void NAME( \
constant size_t &dst_size, \ constant size_t &dst_size, \
@ -11,93 +39,160 @@ kernel void NAME( \
const device TYPENAME *input, \ const device TYPENAME *input, \
const device INDEX_TYPENAME *input_ids, \ const device INDEX_TYPENAME *input_ids, \
device TYPENAME *output, \ device TYPENAME *output, \
uint gid [[ thread_position_in_grid ]] \ uint tid [[ thread_position_in_grid ]] \
) { \ ) { \
if (gid >= dst_size) { \ index<TYPENAME, INDEX_TYPENAME>(dst_size, left_size, src_dim_size, right_size, ids_size, input, input_ids, output, tid); \
return; \
} \
const size_t id_i = (gid / right_size) % ids_size; \
const INDEX_TYPENAME input_i = min(input_ids[id_i], (INDEX_TYPENAME)(src_dim_size - 1)); \
const size_t right_rank_i = gid % right_size; \
const size_t left_rank_i = gid / right_size / ids_size; \
/* \
// Force prevent out of bounds indexing \
// since there doesn't seem to be a good way to force crash \
// No need to check for zero we're only allowing unsized. \
*/ \
const size_t src_i = left_rank_i * src_dim_size * right_size + input_i * right_size + right_rank_i; \
output[gid] = input[src_i]; \
} }
template<typename TYPENAME, typename INDEX_TYPENAME>
template <typename T, typename I> METAL_FUNC void gather(
void index_add( constant size_t &dst_size,
device I *ids [[buffer(0)]], constant size_t &left_size,
device T *inp [[buffer(1)]], constant size_t &src_dim_size,
device T *out [[buffer(2)]], constant size_t &right_size,
constant size_t &ids_size,
constant uint &ids_dim_size, const device TYPENAME *input,
constant uint &left_size, const device INDEX_TYPENAME *input_ids,
constant uint &dst_dim_size, device TYPENAME *output,
constant uint &right_size, uint tid [[ thread_position_in_grid ]]
uint gid [[ thread_position_in_grid ]] \
) { ) {
if (tid >= dst_size) {
if (gid >= left_size * right_size) {
return; return;
} }
const INDEX_TYPENAME input_i = input_ids[tid];
const size_t right_rank_i = tid % right_size;
const size_t left_rank_i = tid / right_size / ids_size;
const size_t src_i = (left_rank_i * src_dim_size + input_i) * right_size + right_rank_i;
output[tid] = input[src_i];
}
const uint i = gid; # define GATHER_OP(NAME, INDEX_TYPENAME, TYPENAME) \
const uint pre = i / right_size; kernel void NAME( \
const uint post = i % right_size; constant size_t &dst_size, \
constant size_t &left_size, \
constant size_t &src_dim_size, \
constant size_t &right_size, \
constant size_t &ids_size, \
const device TYPENAME *input, \
const device INDEX_TYPENAME *input_ids, \
device TYPENAME *output, \
uint tid [[ thread_position_in_grid ]] \
) { \
gather<TYPENAME, INDEX_TYPENAME>(dst_size, left_size, src_dim_size, right_size, ids_size, input, input_ids, output, tid); \
}
for (uint j = 0; j < ids_dim_size; j++) { template<typename TYPENAME, typename INDEX_TYPENAME>
const uint idx = ids[j]; METAL_FUNC void scatter_add(
const uint src_i = (pre * ids_dim_size + j) * right_size + post; constant size_t &dst_size,
const uint dst_i = (pre * dst_dim_size + idx) * right_size + post; constant size_t &left_size,
out[dst_i] += inp[src_i]; constant size_t &src_dim_size,
constant size_t &right_size,
constant size_t &dst_dim_size,
const device TYPENAME *input,
const device INDEX_TYPENAME *input_ids,
device TYPENAME *output,
uint tid [[ thread_position_in_grid ]]
) {
if (tid >= dst_size) {
return;
}
const size_t right_rank_i = tid % right_size;
const size_t left_rank_i = tid / right_size;
for (unsigned int j = 0; j < src_dim_size; ++j) {
const size_t src_i = (left_rank_i * src_dim_size + j) * right_size + right_rank_i;
const INDEX_TYPENAME idx = input_ids[src_i];
const size_t dst_i = (left_rank_i * dst_dim_size + idx) * right_size + right_rank_i;
output[dst_i] += input[src_i];
} }
} }
#define IA_OP(TYPENAME, INDEX_TYPENAME, FN_NAME) \ # define SCATTER_ADD_OP(NAME, INDEX_TYPENAME, TYPENAME) \
kernel void FN_NAME( \ kernel void NAME( \
device INDEX_TYPENAME *ids [[buffer(0)]], \ constant size_t &dst_size, \
device TYPENAME *inp [[buffer(1)]], \ constant size_t &left_size, \
device TYPENAME *out [[buffer(2)]], \ constant size_t &src_dim_size, \
constant uint &ids_dim_size, \ constant size_t &right_size, \
constant uint &left_size, \ constant size_t &dst_dim_size, \
constant uint &dst_dim_size, \ const device TYPENAME *input, \
constant uint &right_size, \ const device INDEX_TYPENAME *input_ids, \
uint gid [[ thread_position_in_grid ]] \ device TYPENAME *output, \
) { index_add<TYPENAME, INDEX_TYPENAME>(ids, inp, out, ids_dim_size, left_size, dst_dim_size, right_size, gid); } \ uint tid [[ thread_position_in_grid ]] \
) { \
scatter_add<TYPENAME, INDEX_TYPENAME>(dst_size, left_size, src_dim_size, right_size, dst_dim_size, input, input_ids, output, tid); \
}
template<typename TYPENAME, typename INDEX_TYPENAME>
METAL_FUNC void index_add(
constant size_t &dst_size,
constant size_t &left_size,
constant size_t &src_dim_size,
constant size_t &right_size,
constant size_t &dst_dim_size,
constant size_t &ids_dim_size,
const device TYPENAME *input,
const device INDEX_TYPENAME *input_ids,
device TYPENAME *output,
uint tid [[ thread_position_in_grid ]]
) {
if (tid >= dst_size) {
return;
}
const size_t right_rank_i = tid % right_size;
const size_t left_rank_i = tid / right_size;
for (unsigned int j = 0; j < ids_dim_size; ++j) {
const INDEX_TYPENAME idx = input_ids[j];
const size_t src_i = (left_rank_i * src_dim_size + j) * right_size + right_rank_i;
const size_t dst_i = (left_rank_i * dst_dim_size + idx) * right_size + right_rank_i;
output[dst_i] += input[src_i];
}
}
# define INDEX_ADD_OP(NAME, INDEX_TYPENAME, TYPENAME) \
kernel void NAME( \
constant size_t &dst_size, \
constant size_t &left_size, \
constant size_t &src_dim_size, \
constant size_t &right_size, \
constant size_t &dst_dim_size, \
constant size_t &ids_dim_size, \
const device TYPENAME *input, \
const device INDEX_TYPENAME *input_ids, \
device TYPENAME *output, \
uint tid [[ thread_position_in_grid ]] \
) { \
index_add<TYPENAME, INDEX_TYPENAME>(dst_size, left_size, src_dim_size, right_size, dst_dim_size, ids_dim_size, input, input_ids, output, tid); \
}
INDEX_OP(is_u32_f32, uint, float) INDEX_OP(is_u32_f32, uint, float)
INDEX_OP(is_u32_f16, uint, half) INDEX_OP(is_u32_f16, uint, half)
GATHER_OP(gather_u32_f32, uint, float)
GATHER_OP(gather_u32_f16, uint, half)
SCATTER_ADD_OP(sa_u32_f32, uint, float)
SCATTER_ADD_OP(sa_u32_f16, uint, half)
#if __METAL_VERSION__ >= 310 #if __METAL_VERSION__ >= 310
IA_OP(bfloat, int64_t, ia_i64_bf16) INDEX_ADD_OP(ia_i64_bf16, int64_t, bfloat)
IA_OP(bfloat, uint32_t, ia_u32_bf16) INDEX_ADD_OP(ia_u32_bf16, uint32_t, bfloat)
IA_OP(bfloat, uint8_t, ia_u8_bf16) INDEX_ADD_OP(ia_u8_bf16, uint8_t, bfloat)
#endif #endif
IA_OP(half, uint32_t, ia_u32_f16) INDEX_ADD_OP(ia_u32_f16, uint32_t, half)
IA_OP(half, uint8_t, ia_u8_f16) INDEX_ADD_OP(ia_u8_f16, uint8_t, half)
IA_OP(float, int64_t, ia_i64_f32) INDEX_ADD_OP(ia_i64_f32, int64_t, float)
IA_OP(uint8_t, int64_t, ia_i64_u8) INDEX_ADD_OP(ia_i64_u8, int64_t, uint8_t)
IA_OP(int64_t, int64_t, ia_i64_i64) INDEX_ADD_OP(ia_i64_i64, int64_t, int64_t)
IA_OP(uint32_t, int64_t, ia_i64_u32) INDEX_ADD_OP(ia_i64_u32, int64_t, uint32_t)
IA_OP(float, uint32_t, ia_u32_f32) INDEX_ADD_OP(ia_u32_f32, uint32_t, float)
IA_OP(uint8_t, uint32_t, ia_u32_u8) INDEX_ADD_OP(ia_u32_u8, uint32_t, uint8_t)
IA_OP(int64_t, uint32_t, ia_u32_i64) INDEX_ADD_OP(ia_u32_i64, uint32_t, int64_t)
IA_OP(uint32_t, uint32_t, ia_u32_u32) INDEX_ADD_OP(ia_u32_u32, uint32_t, uint32_t)
IA_OP(float, uint8_t, ia_u8_f32) INDEX_ADD_OP(ia_u8_f32, uint8_t, float)
IA_OP(uint8_t, uint8_t, ia_u8_u8) INDEX_ADD_OP(ia_u8_u8, uint8_t, uint8_t)
IA_OP(uint32_t, uint8_t, ia_u8_u32) INDEX_ADD_OP(ia_u8_u32, uint8_t, uint32_t)
IA_OP(int64_t, uint8_t, ia_u8_i64) INDEX_ADD_OP(ia_u8_i64, uint8_t, int64_t)

File diff suppressed because it is too large Load Diff

View File

@ -2,6 +2,7 @@
using namespace metal; using namespace metal;
#define MAX(x, y) ((x) > (y) ? (x) : (y)) #define MAX(x, y) ((x) > (y) ? (x) : (y))
#define MIN(x, y) ((x) < (y) ? (x) : (y))
METAL_FUNC uint get_strided_index( METAL_FUNC uint get_strided_index(
uint idx, uint idx,
@ -18,11 +19,132 @@ METAL_FUNC uint get_strided_index(
return strided_i; return strided_i;
} }
constant int THREADGROUP_SIZE = 1024; constant int THREADGROUP_SIZE = 2048;
# define REDUCE(FN, NAME, T) \
#define ARGMIN(NAME, T, MAXVALUE) \
kernel void NAME( \ kernel void NAME( \
constant size_t &src_numel, \ constant size_t &num_dims, \
constant size_t *dims, \
constant size_t *strides, \
constant size_t &el_to_sum_per_block, \
device const T *src, \
device uint *dst, \
uint id [[ thread_position_in_grid ]], \
uint tid [[ thread_index_in_threadgroup ]], \
uint dst_id [[ threadgroup_position_in_grid ]], \
uint block_dim [[ threads_per_threadgroup ]] \
) { \
\
threadgroup T shared_memory[THREADGROUP_SIZE]; \
threadgroup uint shared_indices[THREADGROUP_SIZE]; \
\
shared_memory[tid] = MAXVALUE; \
shared_indices[tid] = 0xFFFFFFFF; \
bool notset = true; \
/* \
// Elements summed in this block range from dst_id * el_to_sum_per_block \
// to (dst_id + 1) * el_to_sum_per_block. \
*/ \
size_t start_idx = dst_id * el_to_sum_per_block; \
size_t stop_idx = start_idx + el_to_sum_per_block; \
size_t idx = start_idx + tid; \
while (idx < stop_idx) { \
/* \
// TODO: Fast version for the contiguous case. \
*/ \
size_t strided_i = get_strided_index(idx, num_dims, dims, strides); \
if (notset || src[strided_i] < shared_memory[tid]) { \
shared_memory[tid] = src[strided_i]; \
/* Assume that the reduction takes place over the last dimension which is contiguous. */ \
shared_indices[tid] = idx % dims[num_dims - 1]; \
notset = false; \
} \
idx += block_dim; \
} \
\
threadgroup_barrier(mem_flags::mem_none); \
\
/* \
// reduction in shared memory \
*/ \
for (uint s = block_dim / 2; s > 0; s >>= 1) { \
if (tid < s && shared_memory[tid + s] < shared_memory[tid]) { \
shared_indices[tid] = shared_indices[tid + s]; \
shared_memory[tid] = shared_memory[tid + s]; \
} \
threadgroup_barrier(mem_flags::mem_none); \
} \
\
if (tid == 0){ \
dst[dst_id] = shared_indices[0]; \
} \
} \
#define ARGMAX(NAME, T, MINVALUE) \
kernel void NAME( \
constant size_t &num_dims, \
constant size_t *dims, \
constant size_t *strides, \
constant size_t &el_to_sum_per_block, \
device const T *src, \
device uint *dst, \
uint id [[ thread_position_in_grid ]], \
uint tid [[ thread_index_in_threadgroup ]], \
uint dst_id [[ threadgroup_position_in_grid ]], \
uint block_dim [[ threads_per_threadgroup ]] \
) { \
\
threadgroup T shared_memory[THREADGROUP_SIZE]; \
threadgroup uint shared_indices[THREADGROUP_SIZE]; \
\
shared_memory[tid] = MINVALUE; \
shared_indices[tid] = 0xFFFFFFFF; \
/* \
// Elements summed in this block range from dst_id * el_to_sum_per_block \
// to (dst_id + 1) * el_to_sum_per_block. \
*/ \
size_t start_idx = dst_id * el_to_sum_per_block; \
size_t stop_idx = start_idx + el_to_sum_per_block; \
size_t idx = start_idx + tid; \
bool notset = true; \
while (idx < stop_idx) { \
/* \
// TODO: Fast version for the contiguous case. \
*/ \
size_t strided_i = get_strided_index(idx, num_dims, dims, strides); \
if (notset || shared_memory[tid] < src[strided_i]) { \
shared_memory[tid] = src[strided_i]; \
shared_indices[tid] = idx % dims[num_dims - 1]; \
notset = false; \
} \
idx += block_dim; \
} \
\
threadgroup_barrier(mem_flags::mem_none); \
\
/* \
// reduction in shared memory \
*/ \
for (uint s = block_dim / 2; s > 0; s >>= 1) { \
if (tid < s && shared_memory[tid + s] > shared_memory[tid]) { \
shared_indices[tid] = shared_indices[tid + s]; \
shared_memory[tid] = shared_memory[tid + s]; \
} \
threadgroup_barrier(mem_flags::mem_none); \
} \
\
if (tid == 0){ \
dst[dst_id] = shared_indices[0]; \
} \
} \
#define REDUCE(FN, NAME, T, START) \
kernel void NAME( \
constant size_t &num_dims, \
constant size_t *dims, \
constant size_t *strides, \
constant size_t &el_to_sum_per_block, \ constant size_t &el_to_sum_per_block, \
device const T *src, \ device const T *src, \
device T *dst, \ device T *dst, \
@ -32,23 +154,23 @@ kernel void NAME( \
uint block_dim [[ threads_per_threadgroup ]] \ uint block_dim [[ threads_per_threadgroup ]] \
) { \ ) { \
\ \
threadgroup float shared_memory[THREADGROUP_SIZE]; \ threadgroup T shared_memory[THREADGROUP_SIZE]; \
\ \
shared_memory[tid] = 0; \ shared_memory[tid] = START; \
/* \ /* \
// Elements summed in this block range from dst_id * el_to_sum_per_block \ // Elements summed in this block range from dst_id * el_to_sum_per_block \
// to (dst_id + 1) * el_to_sum_per_block. \ // to (dst_id + 1) * el_to_sum_per_block. \
*/ \ */ \
size_t start_idx = dst_id * el_to_sum_per_block; \ size_t start_idx = dst_id * el_to_sum_per_block; \
size_t stop_idx = min(start_idx + el_to_sum_per_block, src_numel); \ size_t stop_idx = start_idx + el_to_sum_per_block; \
size_t idx = start_idx + tid; \ size_t idx = start_idx + tid; \
while (idx < stop_idx) { \ while (idx < stop_idx) { \
/* \ /* \
// TODO: Fast version for the contiguous case. \ // TODO: Fast version for the contiguous case. \
// size_t strided_i = get_strided_index(idx, num_dims, dims, strides); \
*/ \ */ \
size_t strided_i = get_strided_index(idx, num_dims, dims, strides); \
T x = shared_memory[tid]; \ T x = shared_memory[tid]; \
T y = src[idx]; \ T y = src[strided_i]; \
shared_memory[tid] = FN; \ shared_memory[tid] = FN; \
idx += block_dim; \ idx += block_dim; \
} \ } \
@ -71,10 +193,6 @@ kernel void NAME( \
} \ } \
REDUCE(x + y, fast_sum_float, float)
REDUCE(x * y, fast_mul_float, float)
REDUCE(max(x, y), fast_max_float, float)
#define SOFTMAX(NAME, T) \ #define SOFTMAX(NAME, T) \
kernel void NAME( \ kernel void NAME( \
constant size_t &src_numel, \ constant size_t &src_numel, \
@ -93,12 +211,13 @@ kernel void NAME(
size_t stop_idx = min(start_idx + el_to_sum_per_block, src_numel); \ size_t stop_idx = min(start_idx + el_to_sum_per_block, src_numel); \
size_t idx = start_idx + tid; \ size_t idx = start_idx + tid; \
\ \
threadgroup_barrier(mem_flags::mem_threadgroup); \
\ \
float tmp = -INFINITY; \
while (idx < stop_idx) { \ while (idx < stop_idx) { \
shared_memory[tid] = MAX(shared_memory[tid], src[idx]); \ tmp = MAX(tmp, float(src[idx])); \
idx += block_dim; \ idx += block_dim; \
} \ } \
shared_memory[tid] = tmp; \
\ \
threadgroup_barrier(mem_flags::mem_threadgroup); \ threadgroup_barrier(mem_flags::mem_threadgroup); \
\ \
@ -106,21 +225,26 @@ kernel void NAME(
if (tid < s) { \ if (tid < s) { \
shared_memory[tid] = MAX(shared_memory[tid], shared_memory[tid + s]); \ shared_memory[tid] = MAX(shared_memory[tid], shared_memory[tid + s]); \
} \ } \
threadgroup_barrier(mem_flags::mem_threadgroup); \
} \ } \
\ \
/* wait for shared_memory[0] to be filled */ \
threadgroup_barrier(mem_flags::mem_threadgroup); \ threadgroup_barrier(mem_flags::mem_threadgroup); \
\ \
float _max = shared_memory[0]; \ float _max = shared_memory[0]; \
\ \
/* prevent tid=0 from overwriting _max before other threads have written it */ \
threadgroup_barrier(mem_flags::mem_threadgroup); \
shared_memory[tid] = 0; \ shared_memory[tid] = 0; \
\ \
idx = start_idx + tid; \ idx = start_idx + tid; \
while (idx < stop_idx) { \ while (idx < stop_idx) { \
const T val = T(exp(src[idx] - _max)); \ const float val = exp(float(src[idx]) - _max); \
dst[idx] = val; \ dst[idx] = T(val); \
shared_memory[tid] += val; \ shared_memory[tid] += val; \
idx += block_dim; \ idx += block_dim; \
} \ } \
threadgroup_barrier(mem_flags::mem_threadgroup); \
for (uint s = block_dim / 2; s > 0; s >>= 1) { \ for (uint s = block_dim / 2; s > 0; s >>= 1) { \
if (tid < s) { \ if (tid < s) { \
shared_memory[tid] += shared_memory[tid + s]; \ shared_memory[tid] += shared_memory[tid + s]; \
@ -128,7 +252,7 @@ kernel void NAME(
threadgroup_barrier(mem_flags::mem_threadgroup); \ threadgroup_barrier(mem_flags::mem_threadgroup); \
} \ } \
\ \
const T inv_acc = T(1/shared_memory[0]); \ const T inv_acc = T(1.0/shared_memory[0]); \
idx = start_idx + tid; \ idx = start_idx + tid; \
while (idx < stop_idx) { \ while (idx < stop_idx) { \
dst[idx] *= inv_acc; \ dst[idx] *= inv_acc; \
@ -136,8 +260,33 @@ kernel void NAME(
} \ } \
} \ } \
SOFTMAX(softmax_float, float) REDUCE(x + y, fast_sum_f32_strided, float, 0)
SOFTMAX(softmax_half, half) REDUCE(x + y, fast_sum_u32_strided, uint, 0)
REDUCE(x + y, fast_sum_f16_strided, half, 0)
REDUCE(x * y, fast_mul_f32_strided, float, 1)
REDUCE(x * y, fast_mul_u32_strided, uint, 1)
REDUCE(x * y, fast_mul_f16_strided, half, 1)
REDUCE(MAX(x, y), fast_max_f32_strided, float, -HUGE_VALF)
REDUCE(MAX(x, y), fast_max_u32_strided, uint, 0)
REDUCE(MAX(x, y), fast_max_f16_strided, half, -HUGE_VALH)
REDUCE(MIN(x, y), fast_min_f32_strided, float, HUGE_VALF)
REDUCE(MIN(x, y), fast_min_u32_strided, uint, 0xFFFFFFFF)
REDUCE(MIN(x, y), fast_min_f16_strided, half, HUGE_VALH)
ARGMIN(fast_argmin_f32_strided, float, HUGE_VALF)
ARGMIN(fast_argmin_f16_strided, half, HUGE_VALH)
ARGMIN(fast_argmin_u32_strided, uint, 0xFFFFFFFF)
ARGMAX(fast_argmax_f32_strided, float, -HUGE_VALF)
ARGMAX(fast_argmax_f16_strided, half, -HUGE_VALH)
ARGMAX(fast_argmax_u32_strided, uint, 0)
SOFTMAX(softmax_f32, float)
SOFTMAX(softmax_f16, half)
#if __METAL_VERSION__ >= 310 #if __METAL_VERSION__ >= 310
SOFTMAX(softmax_bfloat, bfloat) REDUCE(x + y, fast_sum_bf16, bfloat, 0)
REDUCE(x * y, fast_mul_bf16, bfloat, 1)
REDUCE(MAX(x, y), fast_max_bf16, bfloat, -HUGE_VALBF)
REDUCE(MIN(x, y), fast_min_bf16, bfloat, HUGE_VALBF)
ARGMIN(fast_argmin_bf16, bfloat, HUGE_VALBF)
ARGMAX(fast_argmax_bf16, bfloat, -HUGE_VALBF)
SOFTMAX(softmax_bf16, bfloat)
#endif #endif

View File

@ -1,211 +0,0 @@
import Metal
import MetalPerformanceShadersGraph
let type = MTLDataType.float;
let dataType = type;
var B = 2;
var M = 2;
var N = 4;
var K = 3;
var A_trans = false;
var B_trans = false;
var D_trans = false;
var alpha = Float(1.0);
var beta = Float(0.0);
var batched = B > 1;
var fused_activation = false;
var fused_bias = false;
let constants = MTLFunctionConstantValues()
constants.setConstantValue(&M, type: .uint, index: 0)
constants.setConstantValue(&N, type: .uint, index: 1)
constants.setConstantValue(&K, type: .uint, index: 2)
constants.setConstantValue(&A_trans, type: .bool, index: 10)
constants.setConstantValue(&B_trans, type: .bool, index: 11)
constants.setConstantValue(&D_trans, type: .bool, index: 13)
constants.setConstantValue(&alpha, type: .float, index: 20)
constants.setConstantValue(&beta, type: .float, index: 21)
constants.setConstantValue(&batched, type: .bool, index: 100)
constants.setConstantValue(&fused_activation, type: .bool, index: 101)
constants.setConstantValue(&fused_bias, type: .bool, index: 50001)
var M_simd = UInt16(16)
var N_simd = UInt16(16)
var K_simd = UInt16(32)
var M_splits = UInt16(2)
var N_splits = UInt16(2)
constants.setConstantValue(&M_simd, type: .ushort, index: 200)
constants.setConstantValue(&N_simd, type: .ushort, index: 201)
constants.setConstantValue(&K_simd, type: .ushort, index: 202)
constants.setConstantValue(&M_splits, type: .ushort, index: 210)
constants.setConstantValue(&N_splits, type: .ushort, index: 211)
let M_group = M_simd * M_splits
let N_group = N_simd * N_splits
// Satisfy Metal API validation.
#if DEBUG
do {
var garbage: SIMD4<UInt64> = .zero
constants.setConstantValue(&garbage, type: .bool, index: 102)
constants.setConstantValue(&garbage, type: .bool, index: 103)
constants.setConstantValue(&garbage, type: .bool, index: 113)
constants.setConstantValue(&garbage, type: .bool, index: 50000)
}
#endif
print(constants)
let device = MTLCopyAllDevices().first!
device.shouldMaximizeConcurrentCompilation = true
var libraryURL = URL.init(string: "/Users/nicolas/src/candle/candle-metal-kernels/")!;
libraryURL.append(component: "src")
libraryURL.append(component: "libMetalFlashAttention.metallib")
let library = try! device.makeLibrary(URL: libraryURL)
var name: String
switch dataType {
case .half: name = "hgemm"
case .float: name = "sgemm"
default: fatalError()
}
let function = try! library.makeFunction(
name: name, constantValues: constants)
let A_block_length = M_group * K_simd
let B_block_length = K_simd * N_group
var blockElements = A_block_length + B_block_length;
if (M % 8 != 0) && (N % 8 != 0) {
let C_block_length = M_group * N_group;
blockElements = max(C_block_length, blockElements)
}
if fused_bias {
if D_trans {
blockElements = max(blockElements, M_group)
} else {
blockElements = max(blockElements, N_group)
}
}
// let blockBytes = blockElements * UInt16(dataType.size)
let elementSize = 4
let blockBytes = blockElements * UInt16(elementSize)
func ceilDivide(target: Int, granularity: UInt16) -> Int {
(target + Int(granularity) - 1) / Int(granularity)
}
var gridSize = MTLSize(
width: ceilDivide(target: N, granularity: N_group),
height: ceilDivide(target: M, granularity: M_group),
depth: 1)
let groupSize = MTLSize(
width: Int(32 * M_splits * N_splits),
height: 1,
depth: 1)
let commandQueue = device.makeCommandQueue()!
let commandBuffer = commandQueue.makeCommandBuffer()!
let encoder = commandBuffer.makeComputeCommandEncoder(dispatchType: MTLDispatchType.serial)!
let pipeline = try device.makeComputePipelineState(function: function)
let threadgroupMemoryLength = blockBytes;
print(threadgroupMemoryLength)
encoder.setComputePipelineState(pipeline)
encoder.setThreadgroupMemoryLength(Int(threadgroupMemoryLength), index: 0)
let rowsA = M;
let columnsA = K;
let rowsB = K;
let columnsB = N;
let rowsC = M;
let columnsC = N;
var arrayA = [Float](repeating: 0, count: B * rowsA * columnsA)
var arrayB = [Float](repeating: 0, count: B * rowsB * columnsB)
var arrayC = [Float](repeating: 0, count: B * rowsC * columnsC)
for i in 0..<arrayA.count {
arrayA[i] = Float(i)
}
for i in 0..<arrayB.count {
arrayB[i] = Float(i)
}
let bufferA = device.makeBuffer(bytes: arrayA, length: B * rowsA * columnsA * MemoryLayout<Float>.stride, options: [])
let bufferB = device.makeBuffer(bytes: arrayB, length: B * rowsB * columnsB * MemoryLayout<Float>.stride, options: [])
let bufferC = device.makeBuffer(length: B * rowsC * columnsC * MemoryLayout<Float>.stride, options: [])
print(arrayA)
print(arrayB)
encoder.setBuffer(bufferA, offset: 0, index: 0)
encoder.setBuffer(bufferB, offset: 0, index: 1)
encoder.setBuffer(bufferC, offset: 0, index: 2)
var gridZ: Int = B
if batched{
func byteStride(shape: [Int]) -> Int {
let rank = shape.count
var output = elementSize * shape[rank - 2] * shape[rank - 1]
if shape.dropLast(2).reduce(1, *) == 1 {
output = 0
}
return output
}
let byteStrideA = M*K*elementSize
let byteStrideB = N*K*elementSize
let byteStrideC = M*N*elementSize
let byteStrideD = 0
// if let shapeD = tensors.d?.shape {
// let rank = shapeD.count
// byteStrideD = elementSize * shapeD[rank - 1]
// if shapeD.dropLast(1).reduce(1, *) == 1 {
// byteStrideD = 0
// }
// }
withUnsafeTemporaryAllocation(
of: SIMD4<UInt64>.self, capacity: gridZ
) { buffer in
for i in 0..<buffer.count {
buffer[i] = SIMD4(
UInt64(truncatingIfNeeded: i * byteStrideA),
UInt64(truncatingIfNeeded: i * byteStrideB),
UInt64(truncatingIfNeeded: i * byteStrideC),
UInt64(truncatingIfNeeded: i * byteStrideD))
}
let bufferLength = buffer.count * MemoryLayout<SIMD4<UInt64>>.stride
assert(MemoryLayout<SIMD4<UInt64>>.stride == 8 * 4)
encoder.setBytes(buffer.baseAddress!, length: bufferLength, index: 10)
print("BATCHED")
print(buffer)
}
}
gridSize.depth = gridZ
print(gridSize, groupSize)
encoder.dispatchThreadgroups(
gridSize, threadsPerThreadgroup: groupSize
)
encoder.endEncoding()
commandBuffer.commit()
commandBuffer.waitUntilCompleted()
var contents = bufferC!.contents();
var count = B * rowsA * columnsB;
var typedPointer = contents.bindMemory(to: Float.self, capacity: count)
var bufferedPointer = UnsafeBufferPointer(start: typedPointer, count: count)
print(Array(bufferedPointer))

View File

@ -2,6 +2,13 @@ use super::*;
use half::{bf16, f16}; use half::{bf16, f16};
use metal::{CompileOptions, Device, MTLResourceOptions, MTLSize, NSUInteger}; use metal::{CompileOptions, Device, MTLResourceOptions, MTLSize, NSUInteger};
fn read_to_vec<T: Clone>(buffer: &Buffer, n: usize) -> Vec<T> {
let ptr = buffer.contents() as *const T;
assert!(!ptr.is_null());
let slice = unsafe { std::slice::from_raw_parts(ptr, n) };
slice.to_vec()
}
fn new_buffer<T>(device: &Device, data: &[T]) -> Buffer { fn new_buffer<T>(device: &Device, data: &[T]) -> Buffer {
let options = MTLResourceOptions::StorageModeManaged; let options = MTLResourceOptions::StorageModeManaged;
let ptr = data.as_ptr() as *const core::ffi::c_void; let ptr = data.as_ptr() as *const core::ffi::c_void;
@ -30,7 +37,8 @@ fn approx_bf16(v: Vec<bf16>, digits: i32) -> Vec<f32> {
fn run<T: Clone>(v: &[T], name: unary::contiguous::Kernel) -> Vec<T> { fn run<T: Clone>(v: &[T], name: unary::contiguous::Kernel) -> Vec<T> {
let device = device(); let device = device();
let kernels = Kernels::new(); let fence = device.new_fence();
let kernels = Kernels::new(fence);
let command_queue = device.new_command_queue(); let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer(); let command_buffer = command_queue.new_command_buffer();
let input = new_buffer(&device, v); let input = new_buffer(&device, v);
@ -47,12 +55,13 @@ fn run<T: Clone>(v: &[T], name: unary::contiguous::Kernel) -> Vec<T> {
.unwrap(); .unwrap();
command_buffer.commit(); command_buffer.commit();
command_buffer.wait_until_completed(); command_buffer.wait_until_completed();
output.read_to_vec::<T>(v.len()) read_to_vec(&output, v.len())
} }
fn run_binary<T: Clone>(x: &[T], y: &[T], name: binary::contiguous::Kernel) -> Vec<T> { fn run_binary<T: Clone>(x: &[T], y: &[T], name: binary::contiguous::Kernel) -> Vec<T> {
let device = device(); let device = device();
let kernels = Kernels::new(); let fence = device.new_fence();
let kernels = Kernels::new(fence);
let command_queue = device.new_command_queue(); let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer(); let command_buffer = command_queue.new_command_buffer();
let options = MTLResourceOptions::StorageModeManaged; let options = MTLResourceOptions::StorageModeManaged;
@ -72,7 +81,7 @@ fn run_binary<T: Clone>(x: &[T], y: &[T], name: binary::contiguous::Kernel) -> V
.unwrap(); .unwrap();
command_buffer.commit(); command_buffer.commit();
command_buffer.wait_until_completed(); command_buffer.wait_until_completed();
output.read_to_vec::<T>(x.len()) read_to_vec(&output, x.len())
} }
fn run_strided<T: Clone>( fn run_strided<T: Clone>(
@ -87,7 +96,8 @@ fn run_strided<T: Clone>(
let command_buffer = command_queue.new_command_buffer(); let command_buffer = command_queue.new_command_buffer();
let input = new_buffer(&device, v); let input = new_buffer(&device, v);
let output = new_buffer(&device, v); let output = new_buffer(&device, v);
let kernels = Kernels::new(); let fence = device.new_fence();
let kernels = Kernels::new(fence);
call_unary_strided( call_unary_strided(
&device, &device,
command_buffer, command_buffer,
@ -103,7 +113,7 @@ fn run_strided<T: Clone>(
.unwrap(); .unwrap();
command_buffer.commit(); command_buffer.commit();
command_buffer.wait_until_completed(); command_buffer.wait_until_completed();
output.read_to_vec::<T>(v.len()) read_to_vec(&output, v.len())
} }
#[test] #[test]
@ -205,6 +215,25 @@ fn cos_strided_random() {
); );
} }
#[test]
fn gelu_f16() {
let v: Vec<f16> = [-10f32, -1.0, 0., 1., 2., 3., 10.0, 20.0]
.iter()
.map(|v| f16::from_f32(*v))
.collect();
let expected: Vec<f32> = vec![-0.0, -0.16, 0.0, 0.84, 1.96, 3.0, 10.0, 20.0];
let results = run(&v, unary::contiguous::gelu::HALF);
assert_eq!(approx_f16(results, 2), expected);
}
#[test]
fn gelu_f32() {
let v: Vec<f32> = vec![-10f32, -1.0, 0., 1., 2., 3., 10.0, 20.0];
let expected: Vec<f32> = vec![-0.0, -0.159, 0.0, 0.841, 1.955, 2.996, 10.0, 20.0];
let results = run(&v, unary::contiguous::gelu::FLOAT);
assert_eq!(approx(results, 3), expected);
}
#[test] #[test]
fn binary_add_f32() { fn binary_add_f32() {
let left = vec![1.0f32, 2.0, 3.0]; let left = vec![1.0f32, 2.0, 3.0];
@ -221,7 +250,8 @@ fn binary_add_f32() {
fn cast<T: Clone, U: Clone>(v: &[T], name: &'static str) -> Vec<U> { fn cast<T: Clone, U: Clone>(v: &[T], name: &'static str) -> Vec<U> {
let device = device(); let device = device();
let kernels = Kernels::new(); let fence = device.new_fence();
let kernels = Kernels::new(fence);
let command_queue = device.new_command_queue(); let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer(); let command_buffer = command_queue.new_command_buffer();
let input = new_buffer(&device, v); let input = new_buffer(&device, v);
@ -242,7 +272,7 @@ fn cast<T: Clone, U: Clone>(v: &[T], name: &'static str) -> Vec<U> {
.unwrap(); .unwrap();
command_buffer.commit(); command_buffer.commit();
command_buffer.wait_until_completed(); command_buffer.wait_until_completed();
output.read_to_vec::<U>(v.len()) read_to_vec(&output, v.len())
} }
#[test] #[test]
@ -268,7 +298,8 @@ fn cast_u32_f32() {
fn run_affine<T: Clone>(v: &[T], mul: f64, add: f64) -> Vec<T> { fn run_affine<T: Clone>(v: &[T], mul: f64, add: f64) -> Vec<T> {
let device = device(); let device = device();
let kernels = Kernels::new(); let fence = device.new_fence();
let kernels = Kernels::new(fence);
let command_queue = device.new_command_queue(); let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer(); let command_buffer = command_queue.new_command_buffer();
@ -281,7 +312,7 @@ fn run_affine<T: Clone>(v: &[T], mul: f64, add: f64) -> Vec<T> {
&device, &device,
command_buffer, command_buffer,
&kernels, &kernels,
"affine_float", "affine_f32",
size, size,
&input, &input,
&output, &output,
@ -292,7 +323,7 @@ fn run_affine<T: Clone>(v: &[T], mul: f64, add: f64) -> Vec<T> {
command_buffer.commit(); command_buffer.commit();
command_buffer.wait_until_completed(); command_buffer.wait_until_completed();
output.read_to_vec::<T>(v.len()) read_to_vec(&output, v.len())
} }
fn run_affine_strided<T: Clone>( fn run_affine_strided<T: Clone>(
@ -303,7 +334,8 @@ fn run_affine_strided<T: Clone>(
add: f64, add: f64,
) -> Vec<T> { ) -> Vec<T> {
let device = device(); let device = device();
let kernels = Kernels::new(); let fence = device.new_fence();
let kernels = Kernels::new(fence);
let command_queue = device.new_command_queue(); let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer(); let command_buffer = command_queue.new_command_buffer();
@ -314,7 +346,7 @@ fn run_affine_strided<T: Clone>(
&device, &device,
command_buffer, command_buffer,
&kernels, &kernels,
"affine_float_strided", "affine_f32_strided",
shape, shape,
&input, &input,
strides, strides,
@ -328,7 +360,7 @@ fn run_affine_strided<T: Clone>(
command_buffer.wait_until_completed(); command_buffer.wait_until_completed();
let len: usize = shape.iter().product(); let len: usize = shape.iter().product();
output.read_to_vec::<T>(len) read_to_vec(&output, len)
} }
#[test] #[test]
@ -431,7 +463,8 @@ fn run_index_select<T: Clone, I: Clone + std::fmt::Debug>(
_ => unimplemented!(), _ => unimplemented!(),
}; };
let kernels = Kernels::new(); let fence = device.new_fence();
let kernels = Kernels::new(fence);
call_index_select( call_index_select(
&device, &device,
&command_buffer, &command_buffer,
@ -449,7 +482,7 @@ fn run_index_select<T: Clone, I: Clone + std::fmt::Debug>(
command_buffer.commit(); command_buffer.commit();
command_buffer.wait_until_completed(); command_buffer.wait_until_completed();
dst_buffer.read_to_vec::<T>(dst_el) read_to_vec(&dst_buffer, dst_el)
} }
#[test] #[test]
@ -515,7 +548,7 @@ fn index_add() {
let expected = vec![ let expected = vec![
2.0, 3.0, 4.0, 1.0, 1.0, 1.0, 8.0, 9.0, 10.0, 1.0, 1.0, 1.0, 5.0, 6.0, 7.0, 2.0, 3.0, 4.0, 1.0, 1.0, 1.0, 8.0, 9.0, 10.0, 1.0, 1.0, 1.0, 5.0, 6.0, 7.0,
]; ];
let result = outputs_buffer.read_to_vec::<f32>(right.len()); let result: Vec<f32> = read_to_vec(&outputs_buffer, right.len());
assert_eq!(result, expected); assert_eq!(result, expected);
} }
@ -527,25 +560,29 @@ fn cos_f16() {
.collect(); .collect();
let results = run(&v, unary::contiguous::cos::HALF); let results = run(&v, unary::contiguous::cos::HALF);
let expected: Vec<f16> = v.iter().map(|v| f16::from_f32(v.to_f32().cos())).collect(); let expected: Vec<f16> = v.iter().map(|v| f16::from_f32(v.to_f32().cos())).collect();
assert_eq!(approx_f16(results, 4), vec![0.5405, -0.4163, -0.9902]); assert_eq!(approx_f16(results, 2), vec![0.54, -0.42, -0.99]);
assert_eq!(approx_f16(expected, 4), vec![0.5405, -0.4163, -0.9902]); assert_eq!(approx_f16(expected, 2), vec![0.54, -0.42, -0.99]);
} }
fn run_reduce<T: Clone>(v: &[T], out_length: usize, name: &'static str) -> Vec<T> { fn run_reduce<T: Clone>(v: &[T], out_length: usize, name: &'static str) -> Vec<T> {
let device = device(); let device = device();
let kernels = Kernels::new(); let fence = device.new_fence();
let kernels = Kernels::new(fence);
let command_queue = device.new_command_queue(); let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer(); let command_buffer = command_queue.new_command_buffer();
let input = new_buffer(&device, v); let input = new_buffer(&device, v);
let options = MTLResourceOptions::StorageModeManaged; let options = MTLResourceOptions::StorageModeManaged;
let output = device.new_buffer((out_length * core::mem::size_of::<T>()) as u64, options); let output = device.new_buffer((out_length * core::mem::size_of::<T>()) as u64, options);
call_reduce_contiguous( let dims = vec![v.len()];
let strides = vec![1];
call_reduce_strided(
&device, &device,
command_buffer, command_buffer,
&kernels, &kernels,
name, name,
v.len(), &dims,
&strides,
out_length, out_length,
&input, &input,
0, 0,
@ -555,12 +592,13 @@ fn run_reduce<T: Clone>(v: &[T], out_length: usize, name: &'static str) -> Vec<T
command_buffer.commit(); command_buffer.commit();
command_buffer.wait_until_completed(); command_buffer.wait_until_completed();
output.read_to_vec::<T>(out_length) read_to_vec(&output, out_length)
} }
fn run_softmax<T: Clone + std::fmt::Debug>(v: &[T], last_dim: usize, name: &'static str) -> Vec<T> { fn run_softmax<T: Clone + std::fmt::Debug>(v: &[T], last_dim: usize, name: &'static str) -> Vec<T> {
let device = device(); let device = device();
let kernels = Kernels::new(); let fence = device.new_fence();
let kernels = Kernels::new(fence);
let command_queue = device.new_command_queue(); let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer(); let command_buffer = command_queue.new_command_buffer();
let input = new_buffer(&device, v); let input = new_buffer(&device, v);
@ -573,13 +611,14 @@ fn run_softmax<T: Clone + std::fmt::Debug>(v: &[T], last_dim: usize, name: &'sta
v.len(), v.len(),
last_dim, last_dim,
&input, &input,
0,
&output, &output,
) )
.unwrap(); .unwrap();
command_buffer.commit(); command_buffer.commit();
command_buffer.wait_until_completed(); command_buffer.wait_until_completed();
output.read_to_vec::<T>(v.len()) read_to_vec(&output, v.len())
} }
#[test] #[test]
@ -587,7 +626,7 @@ fn reduce_sum() {
let v = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0]; let v = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0];
let out_length = 1; let out_length = 1;
let results = run_reduce(&v, out_length, "fast_sum_float"); let results = run_reduce(&v, out_length, "fast_sum_f32_strided");
assert_eq!(approx(results, 4), vec![21.0]); assert_eq!(approx(results, 4), vec![21.0]);
} }
@ -596,7 +635,7 @@ fn reduce_sum2() {
let v = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0]; let v = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0];
let out_length = 2; let out_length = 2;
let results = run_reduce(&v, out_length, "fast_sum_float"); let results = run_reduce(&v, out_length, "fast_sum_f32_strided");
assert_eq!(approx(results, 4), vec![6.0, 15.0]); assert_eq!(approx(results, 4), vec![6.0, 15.0]);
} }
@ -604,15 +643,33 @@ fn reduce_sum2() {
fn softmax() { fn softmax() {
let v = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0]; let v = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0];
let last_dim = 6; let last_dim = 6;
let results = run_softmax(&v, last_dim, "softmax_float"); let results = run_softmax(&v, last_dim, "softmax_f32");
assert_eq!( assert_eq!(
approx(results, 4), approx(results, 4),
vec![0.0043, 0.0116, 0.0315, 0.0858, 0.2331, 0.6337] vec![0.0043, 0.0116, 0.0315, 0.0858, 0.2331, 0.6337]
); );
let last_dim = 4096;
let n = 200;
let mut v = vec![0.0; n * last_dim];
for i in 0..n {
v[i * last_dim] = 20.0;
}
let results = run_softmax(&v, last_dim, "softmax_f32");
let results = approx(results, 4);
println!("{results:?}");
assert_eq!(
results.iter().map(|&s| s.round() as usize).sum::<usize>(),
n
);
assert_eq!(results[0], 1.0);
assert_eq!(results[1], 0.0);
assert_eq!(results[last_dim], 1.0);
assert_eq!(results[2 * last_dim], 1.0);
let v = vec![0.0f32, 1.0, 2.0, 3.0, 4.0, 5.0]; let v = vec![0.0f32, 1.0, 2.0, 3.0, 4.0, 5.0];
let last_dim = 6; let last_dim = 6;
let results = run_softmax(&v, last_dim, "softmax_float"); let results = run_softmax(&v, last_dim, "softmax_f32");
assert_eq!( assert_eq!(
approx(results, 4), approx(results, 4),
vec![0.0043, 0.0116, 0.0315, 0.0858, 0.2331, 0.6337] vec![0.0043, 0.0116, 0.0315, 0.0858, 0.2331, 0.6337]
@ -620,7 +677,7 @@ fn softmax() {
let v = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0]; let v = vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0];
let last_dim = 3; let last_dim = 3;
let results = run_softmax(&v, last_dim, "softmax_float"); let results = run_softmax(&v, last_dim, "softmax_f32");
assert_eq!( assert_eq!(
approx(results, 4), approx(results, 4),
vec![0.0900, 0.2447, 0.6652, 0.0900, 0.2447, 0.6652] vec![0.0900, 0.2447, 0.6652, 0.0900, 0.2447, 0.6652]
@ -631,7 +688,7 @@ fn softmax() {
.map(|v| f16::from_f32(*v)) .map(|v| f16::from_f32(*v))
.collect::<Vec<_>>(); .collect::<Vec<_>>();
let last_dim = 6; let last_dim = 6;
let results = run_softmax(&v, last_dim, "softmax_half"); let results = run_softmax(&v, last_dim, "softmax_f16");
assert_eq!( assert_eq!(
approx_f16(results, 4), approx_f16(results, 4),
vec![0.0043, 0.0116, 0.0316, 0.0858, 0.2332, 0.6338] vec![0.0043, 0.0116, 0.0316, 0.0858, 0.2332, 0.6338]
@ -642,7 +699,7 @@ fn softmax() {
.map(|v| bf16::from_f32(*v)) .map(|v| bf16::from_f32(*v))
.collect::<Vec<_>>(); .collect::<Vec<_>>();
let last_dim = 6; let last_dim = 6;
let results = run_softmax(&v, last_dim, "softmax_bfloat"); let results = run_softmax(&v, last_dim, "softmax_bf16");
assert_eq!( assert_eq!(
approx_bf16(results, 4), approx_bf16(results, 4),
vec![0.0043, 0.0116, 0.0315, 0.0859, 0.2324, 0.6328] vec![0.0043, 0.0116, 0.0315, 0.0859, 0.2324, 0.6328]
@ -660,7 +717,8 @@ fn run_where_cond<I: Clone, T: Clone>(
name: &'static str, name: &'static str,
) -> Vec<T> { ) -> Vec<T> {
let device = device(); let device = device();
let kernels = Kernels::new(); let fence = device.new_fence();
let kernels = Kernels::new(fence);
let command_queue = device.new_command_queue(); let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer(); let command_buffer = command_queue.new_command_buffer();
let options = MTLResourceOptions::StorageModeManaged; let options = MTLResourceOptions::StorageModeManaged;
@ -701,7 +759,7 @@ fn run_where_cond<I: Clone, T: Clone>(
command_buffer.commit(); command_buffer.commit();
command_buffer.wait_until_completed(); command_buffer.wait_until_completed();
output.read_to_vec::<T>(length) read_to_vec(&output, length)
} }
#[test] #[test]
@ -730,11 +788,14 @@ fn run_gemm<T: Clone>(
(b, m, n, k): (usize, usize, usize, usize), (b, m, n, k): (usize, usize, usize, usize),
lhs: &[T], lhs: &[T],
lhs_stride: Vec<usize>, lhs_stride: Vec<usize>,
lhs_offset: usize,
rhs: &[T], rhs: &[T],
rhs_stride: Vec<usize>, rhs_stride: Vec<usize>,
rhs_offset: usize,
) -> Vec<T> { ) -> Vec<T> {
let device = device(); let device = device();
let kernels = Kernels::new(); let fence = device.new_fence();
let kernels = Kernels::new(fence);
let command_queue = device.new_command_queue(); let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer(); let command_buffer = command_queue.new_command_buffer();
let options = MTLResourceOptions::StorageModeManaged; let options = MTLResourceOptions::StorageModeManaged;
@ -758,10 +819,10 @@ fn run_gemm<T: Clone>(
"sgemm", "sgemm",
(b, m, n, k), (b, m, n, k),
&lhs_stride, &lhs_stride,
0, lhs_offset,
&lhs, &lhs,
&rhs_stride, &rhs_stride,
0, rhs_offset,
&rhs, &rhs,
&output, &output,
) )
@ -769,7 +830,7 @@ fn run_gemm<T: Clone>(
command_buffer.commit(); command_buffer.commit();
command_buffer.wait_until_completed(); command_buffer.wait_until_completed();
output.read_to_vec::<T>(length) read_to_vec(&output, length)
} }
#[test] #[test]
@ -779,17 +840,18 @@ fn gemm() {
let lhs: Vec<f32> = (0..b * m * k).map(|f| f as f32).collect(); let lhs: Vec<f32> = (0..b * m * k).map(|f| f as f32).collect();
let rhs_stride = vec![n * k, n, 1]; let rhs_stride = vec![n * k, n, 1];
let rhs: Vec<f32> = (0..b * n * k).map(|f| f as f32).collect(); let rhs: Vec<f32> = (0..b * n * k).map(|f| f as f32).collect();
let results = run_gemm((b, m, n, k), &lhs, lhs_stride, &rhs, rhs_stride); let results = run_gemm((b, m, n, k), &lhs, lhs_stride, 0, &rhs, rhs_stride, 0);
assert_eq!( assert_eq!(
approx(results, 4), approx(results, 4),
vec![20.0, 23.0, 26.0, 29.0, 56.0, 68.0, 80.0, 92.0] vec![20.0, 23.0, 26.0, 29.0, 56.0, 68.0, 80.0, 92.0]
); );
let (b, m, n, k) = (2, 2, 4, 3); let (b, m, n, k) = (2, 2, 4, 3);
let lhs_stride = vec![m * k, k, 1]; let lhs_stride = vec![m * k, k, 1];
let lhs: Vec<f32> = (0..b * m * k).map(|f| f as f32).collect(); let lhs: Vec<f32> = (0..b * m * k).map(|f| f as f32).collect();
let rhs_stride = vec![n * k, n, 1]; let rhs_stride = vec![n * k, n, 1];
let rhs: Vec<f32> = (0..b * n * k).map(|f| f as f32).collect(); let rhs: Vec<f32> = (0..b * n * k).map(|f| f as f32).collect();
let results = run_gemm((b, m, n, k), &lhs, lhs_stride, &rhs, rhs_stride); let results = run_gemm((b, m, n, k), &lhs, lhs_stride, 0, &rhs, rhs_stride, 0);
assert_eq!( assert_eq!(
approx(results, 4), approx(results, 4),
vec![ vec![
@ -797,4 +859,17 @@ fn gemm() {
518.0, 548.0, 578.0 518.0, 548.0, 578.0
] ]
); );
// OFFSET
let (b, m, n, k) = (2, 2, 4, 3);
let lhs_stride = vec![m * k, k, 1];
let lhs: Vec<f32> = (0..b * m * k).map(|f| f as f32).collect();
let rhs_stride = vec![n * k, n, 1];
let rhs: Vec<f32> = (0..b * n * k).map(|f| f as f32).collect();
// Manually set batch_size=1 and offset 12 elements * 4 the number of bytes for f32
let results = run_gemm((1, m, n, k), &lhs, lhs_stride, 0, &rhs, rhs_stride, 12 * 4);
assert_eq!(
approx(results, 4),
vec![56.0, 59.0, 62.0, 65.0, 200.0, 212.0, 224.0, 236.0]
);
} }

View File

@ -42,9 +42,14 @@ template <typename T> METAL_FUNC T erf(T in){
return T(sign*y); return T(sign*y);
} }
template <typename T> METAL_FUNC T id(T in){ return in; } template <typename T> METAL_FUNC T id(T in) { return in; }
template <typename T> METAL_FUNC T gelu_erf(T x){ return T(x * (1 + erf(x * M_SQRT1_2_F)) / 2); } template <typename T> METAL_FUNC T gelu_erf(T x) {
template <typename T> METAL_FUNC T gelu(T x){ return T(x * (1 + erf(x * M_SQRT1_2_F)) / 2);
}
template <typename T> METAL_FUNC T gelu(T x) {
if (x > 5) {
return x;
}
T x_sq = x * x; T x_sq = x * x;
T x_cube = x_sq * x; T x_cube = x_sq * x;
T alpha = x + static_cast<T>(0.044715) * x_cube; T alpha = x + static_cast<T>(0.044715) * x_cube;
@ -64,7 +69,7 @@ kernel void FN_NAME( \
if (thread_position_in_grid >= dim) { \ if (thread_position_in_grid >= dim) { \
return; \ return; \
} \ } \
output[thread_position_in_grid] = TYPENAME(FN(input[thread_position_in_grid])); \ output[thread_position_in_grid] = TYPENAME(FN(float(input[thread_position_in_grid]))); \
}\ }\
kernel void FN_NAME_STRIDED( \ kernel void FN_NAME_STRIDED( \
constant size_t &dim, \ constant size_t &dim, \
@ -78,15 +83,15 @@ kernel void FN_NAME_STRIDED( \
if (thread_position_in_grid >= dim) { \ if (thread_position_in_grid >= dim) { \
return; \ return; \
} \ } \
output[thread_position_in_grid] = TYPENAME(FN(input[get_strided_index(thread_position_in_grid, num_dims, dims, strides)])); \ output[thread_position_in_grid] = TYPENAME(FN(float(input[get_strided_index(thread_position_in_grid, num_dims, dims, strides)]))); \
} }
#define UNARY_OP(NAME) \ #define UNARY_OP(NAME) \
UNARY(NAME, float, NAME##_float, NAME##_float_strided); \ UNARY(NAME, float, NAME##_f32, NAME##_f32_strided); \
UNARY(NAME, half, NAME##_half, NAME##_half_strided); UNARY(NAME, half, NAME##_f16, NAME##_f16_strided);
#define BFLOAT_UNARY_OP(NAME) \ #define BFLOAT_UNARY_OP(NAME) \
UNARY(NAME, bfloat, NAME##_bfloat, NAME##_bfloat_strided); UNARY(NAME, bfloat, NAME##_bf16, NAME##_bf16_strided);
UNARY_OP(cos) UNARY_OP(cos)
@ -102,8 +107,9 @@ UNARY_OP(floor)
UNARY_OP(round) UNARY_OP(round)
UNARY_OP(gelu_erf) UNARY_OP(gelu_erf)
UNARY_OP(erf) UNARY_OP(erf)
UNARY(id, float, copy_float, copy_float_strided) UNARY_OP(tanh)
UNARY(id, half, copy_half, copy_half_strided) UNARY(id, float, copy_f32, copy_f32_strided)
UNARY(id, half, copy_f16, copy_f16_strided)
UNARY(id, uint8_t, copy_u8, copy_u8_strided) UNARY(id, uint8_t, copy_u8, copy_u8_strided)
UNARY(id, uint32_t, copy_u32, copy_u32_strided) UNARY(id, uint32_t, copy_u32, copy_u32_strided)
@ -121,6 +127,7 @@ BFLOAT_UNARY_OP(floor)
BFLOAT_UNARY_OP(round) BFLOAT_UNARY_OP(round)
BFLOAT_UNARY_OP(gelu_erf) BFLOAT_UNARY_OP(gelu_erf)
BFLOAT_UNARY_OP(erf) BFLOAT_UNARY_OP(erf)
BFLOAT_UNARY_OP(tanh)
UNARY(id, bfloat, copy_bfloat, copy_bfloat_strided) UNARY(id, bfloat, copy_bf16, copy_bf16_strided)
#endif #endif

View File

@ -19,6 +19,7 @@ num-traits = { workspace = true }
rayon = { workspace = true } rayon = { workspace = true }
safetensors = { workspace = true } safetensors = { workspace = true }
serde = { workspace = true } serde = { workspace = true }
metal = { workspace = true, optional = true }
candle-metal-kernels = { path = "../candle-metal-kernels", version = "0.3.0", optional = true } candle-metal-kernels = { path = "../candle-metal-kernels", version = "0.3.0", optional = true }
[dev-dependencies] [dev-dependencies]
@ -30,4 +31,4 @@ default = []
accelerate = ["dep:accelerate-src", "candle/accelerate"] accelerate = ["dep:accelerate-src", "candle/accelerate"]
cuda = ["candle/cuda"] cuda = ["candle/cuda"]
mkl = ["dep:intel-mkl-src", "candle/mkl"] mkl = ["dep:intel-mkl-src", "candle/mkl"]
metal = ["candle/metal", "dep:candle-metal-kernels"] metal = ["candle/metal", "dep:candle-metal-kernels", "dep:metal"]

View File

@ -210,32 +210,33 @@ impl candle::CustomOp1 for SoftmaxLastDim {
) -> Result<(candle::MetalStorage, Shape)> { ) -> Result<(candle::MetalStorage, Shape)> {
use candle::{backend::BackendStorage, DType}; use candle::{backend::BackendStorage, DType};
let device = storage.device(); let device = storage.device();
let command_buffer = device.command_buffer(); let command_buffer = device.command_buffer()?;
let kernels = device.kernels(); let kernels = device.kernels();
let name = match storage.dtype() { let name = match storage.dtype() {
DType::F32 => "softmax_float", DType::F32 => "softmax_f32",
DType::F16 => "softmax_half", DType::F16 => "softmax_f16",
DType::BF16 => "softmax_bfloat", DType::BF16 => "softmax_bf16",
dtype => candle::bail!("softmax-last-dim is not implemented for {dtype:?}"), dtype => candle::bail!("softmax-last-dim is not implemented for {dtype:?}"),
}; };
let n = layout.stride().len(); let n = layout.stride().len();
if !(layout.stride()[n - 1] == 1 && layout.start_offset() == 0) { if !(layout.is_contiguous() && layout.stride()[n - 1] == 1) {
candle::bail!("Non contiguous softmax-last-dim is not implemented"); candle::bail!("Non contiguous softmax-last-dim is not implemented");
} }
let last_dim = layout.dims()[layout.shape().rank() - 1]; let last_dim = layout.dims()[layout.shape().rank() - 1];
let elem_count = layout.shape().elem_count(); let elem_count = layout.shape().elem_count();
let mut output = device.new_buffer(elem_count, storage.dtype()); let output = device.new_buffer(elem_count, storage.dtype(), "softmax")?;
candle_metal_kernels::call_last_softmax( candle_metal_kernels::call_last_softmax(
device.metal_device(), device.metal_device(),
&command_buffer, &command_buffer,
&kernels, kernels,
name, name,
elem_count, elem_count,
last_dim, last_dim,
storage.buffer(), storage.buffer(),
&mut output, layout.start_offset() * storage.dtype().size_in_bytes(),
&output,
) )
.unwrap(); .unwrap();
let newstorage = candle::MetalStorage::new(output, device.clone(), storage.dtype()); let newstorage = candle::MetalStorage::new(output, device.clone(), storage.dtype());

View File

@ -31,3 +31,4 @@ accelerate = ["dep:accelerate-src", "candle/accelerate", "candle-nn/accelerate"]
cuda = ["candle/cuda", "candle-nn/cuda"] cuda = ["candle/cuda", "candle-nn/cuda"]
flash-attn = ["cuda", "dep:candle-flash-attn"] flash-attn = ["cuda", "dep:candle-flash-attn"]
mkl = ["dep:intel-mkl-src", "candle/mkl", "candle-nn/mkl"] mkl = ["dep:intel-mkl-src", "candle/mkl", "candle-nn/mkl"]
metal = ["candle/metal", "candle-nn/metal"]