Compare commits

..

9 Commits

Author SHA1 Message Date
5edb07a5b1 mps matmul 2023-12-20 02:53:18 +01:00
cf27868b57 More cleanup. 2023-12-15 01:44:22 +01:00
40c3e1bd5a cleanup. 2023-12-15 01:41:14 +01:00
ece4c69a68 Fixing softmax. 2023-12-15 01:35:08 +01:00
4eeaf205d6 Fix softmax for long sequences (missing barrier). 2023-12-14 19:37:03 +01:00
f419a38e1a Fix use resource. 2023-12-14 16:52:37 +01:00
361f2ad2af Working with merging encoders and using fences. 2023-12-14 16:05:33 +01:00
931432ed55 Fixing tests + matmul from MFA 2023-12-13 16:58:36 +01:00
0404a3eb5b Removed MPSMatrix entirely (buggy). 2023-12-13 16:21:48 +01:00
11 changed files with 903 additions and 361 deletions

View File

@ -61,7 +61,7 @@ tracing-subscriber = "0.3.7"
wav = "1.0.0"
yoke = { version = "0.7.2", features = ["derive"] }
zip = { version = "0.6.6", default-features = false }
metal = { version = "0.27.1", features = ["mps"], package="candle-metal" }
metal = { version = "0.27.0", features = ["mps"], package = "candle-metal" }
[profile.release-with-debug]
inherits = "release"

View File

@ -4,9 +4,7 @@ use crate::op::{BinaryOpT, CmpOp, ReduceOp, UnaryOpT};
use crate::{CpuStorage, DType, Layout, Result, Shape};
use candle_metal_kernels;
use candle_metal_kernels::Kernels;
use half::f16;
use metal;
use metal::mps::matrix::{Matrix, MatrixDescriptor, MatrixMultiplication};
use metal::{Buffer, CommandBuffer, CommandQueue, MTLResourceOptions, NSUInteger};
use std::collections::HashMap;
use std::path::Path;
@ -40,6 +38,7 @@ pub struct MetalDevice {
command_queue: metal::CommandQueue,
command_buffers: Arc<RwLock<Vec<metal::CommandBuffer>>>,
command_buffer_index: Arc<RwLock<usize>>,
fence: metal::Fence,
kernels: Arc<candle_metal_kernels::Kernels>,
buffers: Arc<RwLock<HashMap<(NSUInteger, MTLResourceOptions), Vec<Arc<Buffer>>>>>,
}
@ -73,68 +72,32 @@ impl MetalDevice {
pub fn command_buffer(&self) -> CommandBuffer {
let mut command_buffers = self.command_buffers.try_write().unwrap();
let mut command_buffer = command_buffers[0].to_owned();
let mut index = self.command_buffer_index.try_write().unwrap();
let n = command_buffers.len();
if *index == n {
// todo!("Cycle buffers");
for i in 0..n {
let command_buffer = &command_buffers[i];
match command_buffer.status() {
metal::MTLCommandBufferStatus::Committed
| metal::MTLCommandBufferStatus::Scheduled => {
// println!("Wait during cycling {i}");
// println!("Command {i} / {n}: {:?}", command_buffer.status());
command_buffer.wait_until_completed();
}
metal::MTLCommandBufferStatus::Completed => {}
_ => {
panic!("Command buffer {i} not committed during cycling");
}
}
}
let new_buffers = (0..n)
.map(|i| {
// println!("Creating command buffer {i}");
let command_buffer = self.command_queue.new_command_buffer().to_owned();
command_buffer.set_label(&format!("num {i}"));
command_buffer.enqueue();
command_buffer
})
.collect();
*command_buffers = new_buffers;
if *index > 20 {
command_buffer.commit();
command_buffer = self.command_queue.new_command_buffer().to_owned();
*command_buffers = vec![command_buffer.clone()];
*index = 0;
// println!("Reset");
}
// println!("Giving buffer {} / {n}", *index);
let out = &command_buffers[*index];
assert_eq!(out.status(), metal::MTLCommandBufferStatus::Enqueued);
*index += 1;
out.to_owned()
command_buffer
}
pub fn wait_until_completed(&self) {
let command_buffers = self.command_buffers.try_write().unwrap();
let index = self.command_buffer_index.try_write().unwrap();
let n = command_buffers.len();
// for i in 0..*index {
// let command_buffer = &command_buffers[i];
// println!("Command {i} / {n}: {:?}", command_buffer.status());
// }
for i in 0..*index {
let command_buffer = &command_buffers[i];
let mut command_buffers = self.command_buffers.try_write().unwrap();
let command_buffer = &command_buffers[0];
match command_buffer.status() {
metal::MTLCommandBufferStatus::Committed
| metal::MTLCommandBufferStatus::Scheduled => {}
metal::MTLCommandBufferStatus::Completed => {}
_ => {
panic!("Command buffer not committed");
| metal::MTLCommandBufferStatus::Scheduled
| metal::MTLCommandBufferStatus::Completed => {
panic!("Already committed");
}
_ => {}
}
// println!("Wait {i}");
command_buffer.commit();
command_buffer.wait_until_completed();
// println!("Ok {i}");
// command_buffer.wait_until_completed();
}
*command_buffers = vec![self.command_queue.new_command_buffer().to_owned()];
}
pub fn kernels(&self) -> &Kernels {
@ -150,21 +113,23 @@ impl MetalDevice {
self._new_buffer(size, MTLResourceOptions::StorageModePrivate, name)
}
fn _new_buffer(&self, size: NSUInteger, option: MTLResourceOptions, name: &str) -> Arc<Buffer> {
// println!("Creating new buffer {name}");
fn _new_buffer(
&self,
size: NSUInteger,
option: MTLResourceOptions,
_name: &str,
) -> Arc<Buffer> {
let mut buffers = self.buffers.try_write().unwrap();
let subbuffers = buffers.entry((size, option)).or_insert(vec![]);
for sub in &mut *subbuffers {
if Arc::strong_count(sub) == 1 {
// println!("Reusing tensor {size} {name}");
return sub.clone();
}
}
let new_buffer = self.device.new_buffer(size as NSUInteger, option);
let new_buffer = Arc::new(new_buffer);
// subbuffers.push(new_buffer.clone());
// println!("Created tensor {size} {name}");
subbuffers.push(new_buffer.clone());
for subbuffers in buffers.values_mut() {
let newbuffers = subbuffers
.iter()
@ -178,7 +143,7 @@ impl MetalDevice {
}
pub fn new_buffer_managed(&self, size: NSUInteger) -> Arc<Buffer> {
self._new_buffer(size, MTLResourceOptions::StorageModeShared, "managed")
self._new_buffer(size, MTLResourceOptions::StorageModeManaged, "managed")
}
pub fn new_buffer_with_data<T>(&self, data: &[T]) -> Arc<Buffer> {
@ -186,7 +151,7 @@ impl MetalDevice {
let tmp = self.device.new_buffer_with_data(
data.as_ptr() as *const core::ffi::c_void,
size,
metal::MTLResourceOptions::StorageModeShared,
metal::MTLResourceOptions::StorageModeManaged,
);
let real = self._new_buffer(
size,
@ -196,15 +161,12 @@ impl MetalDevice {
let command_buffer = self.command_buffer();
command_buffer.set_label("with_data");
let blit = command_buffer.new_blit_command_encoder();
blit.wait_for_fence(&self.fence);
blit.set_label("with_data_blit");
blit.copy_from_buffer(&tmp, 0, &real, 0, tmp.length());
blit.update_fence(&self.fence);
blit.end_encoding();
command_buffer.commit();
drop(command_buffer);
// real.did_modify_range(metal::NSRange::new(0, real.length()));
// println!("Command {:?}", command.status());
// self.commit();
// This is necessary, for mmaped safetensors
// Because of the unsafe slice cast we're doing.
// The slice might not live long enough for metal
@ -216,39 +178,6 @@ impl MetalDevice {
real
}
pub fn new_matrix(
&self,
(b, m, n): (NSUInteger, NSUInteger, NSUInteger),
size: NSUInteger,
type_id: u32,
dtype: DType,
) -> Result<(Matrix, Arc<Buffer>)> {
let elem_count = (b * m * n) as usize;
let buffer = self.new_buffer(elem_count, dtype, "matrix");
let command_buffer = self.command_buffer();
command_buffer.set_label("zeros_matmul");
let blit = command_buffer.new_blit_command_encoder();
blit.fill_buffer(
&buffer,
metal::NSRange {
location: 0,
length: buffer.length(),
},
0,
);
blit.end_encoding();
command_buffer.commit();
buffer.did_modify_range(metal::NSRange::new(0, buffer.length()));
let result_descriptor =
MatrixDescriptor::init_multiple(m, n, b, n * size, m * n * size, type_id);
let result_matrix = Matrix::init_with_buffer_descriptor(&buffer, 0, &result_descriptor)
.ok_or_else(|| {
MetalError::from("Failed to create matrix multiplication kernel".to_string())
})?;
Ok((result_matrix, buffer))
}
pub fn capture<P: AsRef<Path>>(&self, path: P) -> Result<()> {
let capture = metal::CaptureManager::shared();
let descriptor = metal::CaptureDescriptor::new();
@ -266,22 +195,6 @@ impl MetalDevice {
#[derive(Debug, Clone)]
pub struct MetalStorage {
buffer: Arc<metal::Buffer>,
matrices: Arc<
RwLock<
HashMap<
(
NSUInteger,
NSUInteger,
NSUInteger,
bool,
NSUInteger,
NSUInteger,
u32,
),
Matrix,
>,
>,
>,
device: MetalDevice,
dtype: DType,
}
@ -310,34 +223,27 @@ impl BackendStorage for MetalStorage {
self.dtype
);
}
self.device.wait_until_completed();
self.buffer
.did_modify_range(metal::NSRange::new(0, self.buffer.length()));
let buffer = self.device.new_buffer_managed(self.buffer.length());
{
let command_buffer = self.device.command_buffer();
command_buffer.set_label("to_cpu");
let blit = command_buffer.new_blit_command_encoder();
blit.set_label("blit_to_cpu");
blit.wait_for_fence(&self.device.fence);
blit.copy_from_buffer(&self.buffer, 0, &buffer, 0, self.buffer.length());
blit.update_fence(&self.device.fence);
blit.end_encoding();
command_buffer.commit();
}
self.device.wait_until_completed();
match self.dtype {
DType::U8 => Ok(CpuStorage::U8(buffer.read_to_vec(length / size))),
DType::U32 => Ok(CpuStorage::U32(buffer.read_to_vec(length / size))),
DType::I64 => Ok(CpuStorage::I64(buffer.read_to_vec(length / size))),
DType::F16 => Ok(CpuStorage::F16(buffer.read_to_vec(length / size))),
DType::BF16 => Ok(CpuStorage::BF16(buffer.read_to_vec(length / size))),
DType::F32 => {
let vec = buffer.read_to_vec(length / size);
// println!("Got back {:?}", &vec[..1]);
Ok(CpuStorage::F32(vec))
}
DType::F64 => Ok(CpuStorage::F64(buffer.read_to_vec(length / size))),
DType::U8 => Ok(CpuStorage::U8(read_to_vec(&buffer, length / size))),
DType::U32 => Ok(CpuStorage::U32(read_to_vec(&buffer, length / size))),
DType::I64 => Ok(CpuStorage::I64(read_to_vec(&buffer, length / size))),
DType::F16 => Ok(CpuStorage::F16(read_to_vec(&buffer, length / size))),
DType::BF16 => Ok(CpuStorage::BF16(read_to_vec(&buffer, length / size))),
DType::F32 => Ok(CpuStorage::F32(read_to_vec(&buffer, length / size))),
DType::F64 => Ok(CpuStorage::F64(read_to_vec(&buffer, length / size))),
}
}
@ -389,8 +295,6 @@ impl BackendStorage for MetalStorage {
)
.map_err(MetalError::from)?;
}
command_buffer.commit();
buffer.did_modify_range(metal::NSRange::new(0, buffer.length()));
Ok(Self::new(buffer, device.clone(), dtype))
}
@ -440,8 +344,6 @@ impl BackendStorage for MetalStorage {
)
.map_err(MetalError::from)?;
}
command_buffer.commit();
buffer.did_modify_range(metal::NSRange::new(0, buffer.length()));
Ok(Self::new(buffer, device.clone(), dtype))
}
@ -491,8 +393,6 @@ impl BackendStorage for MetalStorage {
)
.map_err(MetalError::from)?;
}
command_buffer.commit();
buffer.did_modify_range(metal::NSRange::new(0, buffer.length()));
Ok(Self::new(buffer, device.clone(), dtype))
}
@ -555,8 +455,6 @@ impl BackendStorage for MetalStorage {
&buffer,
)
.map_err(MetalError::from)?;
command_buffer.commit();
buffer.did_modify_range(metal::NSRange::new(0, buffer.length()));
Ok(Self::new(buffer, device, dtype))
}
@ -570,7 +468,6 @@ impl BackendStorage for MetalStorage {
let shape = layout.shape();
let el_count = shape.elem_count();
let buffer = device.new_buffer(el_count, dtype, "todtype");
device.wait_until_completed();
let command_buffer = device.command_buffer();
if layout.is_contiguous() && layout.start_offset() == 0 {
let kernel_name = match (self.dtype, dtype) {
@ -615,10 +512,6 @@ impl BackendStorage for MetalStorage {
.map_err(MetalError::from)?;
}
command_buffer.set_label("to_dtype");
command_buffer.commit();
buffer.did_modify_range(metal::NSRange::new(0, buffer.length()));
device.wait_until_completed();
Ok(Self::new(buffer, device.clone(), dtype))
}
@ -719,8 +612,6 @@ impl BackendStorage for MetalStorage {
)
.map_err(MetalError::from)?;
}
command_buffer.commit();
buffer.did_modify_range(metal::NSRange::new(0, buffer.length()));
Ok(Self::new(buffer, device.clone(), dtype))
}
@ -744,21 +635,13 @@ impl BackendStorage for MetalStorage {
let kernel_name = match (B::KERNEL, dtype) {
("add", DType::F32) => contiguous::add::FLOAT,
// ("badd", DType::F32) => contiguous::add::FLOAT,
("sub", DType::F32) => contiguous::sub::FLOAT,
//("bsub", DType::F32) => contiguous::sub::FLOAT,
("mul", DType::F32) => contiguous::mul::FLOAT,
// ("bmul", DType::F32) => contiguous::mul::FLOAT,
("div", DType::F32) => contiguous::div::FLOAT,
// ("bdiv", DType::F32) => contiguous::div::FLOAT,
("add", DType::F16) => contiguous::add::HALF,
// ("badd", DType::F16) => contiguous::add::HALF,
("sub", DType::F16) => contiguous::sub::HALF,
// ("bsub", DType::F16) => contiguous::sub::HALF,
("mul", DType::F16) => contiguous::mul::HALF,
// ("bmul", DType::F16) => contiguous::mul::HALF,
("div", DType::F16) => contiguous::div::HALF,
// ("bdiv", DType::F16) => contiguous::div::HALF,
(name, dtype) => crate::bail!("Match {name} - {dtype:?}"),
};
candle_metal_kernels::call_binary_contiguous(
@ -803,8 +686,6 @@ impl BackendStorage for MetalStorage {
.map_err(MetalError::from)?;
}
command_buffer.set_label("binary");
command_buffer.commit();
buffer.did_modify_range(metal::NSRange::new(0, buffer.length()));
Ok(Self::new(buffer, device.clone(), dtype))
}
@ -849,8 +730,6 @@ impl BackendStorage for MetalStorage {
&buffer,
)
.map_err(MetalError::from)?;
command_buffer.commit();
buffer.did_modify_range(metal::NSRange::new(0, buffer.length()));
Ok(Self::new(buffer, device, dtype))
}
@ -960,8 +839,6 @@ impl BackendStorage for MetalStorage {
&buffer,
)
.map_err(MetalError::from)?;
command_buffer.commit();
buffer.did_modify_range(metal::NSRange::new(0, buffer.length()));
Ok(Self::new(buffer, device.clone(), dtype))
}
@ -976,7 +853,6 @@ impl BackendStorage for MetalStorage {
) -> Result<Self> {
crate::bail!("index_add metal")
}
fn matmul(
&self,
rhs: &Self,
@ -984,110 +860,37 @@ impl BackendStorage for MetalStorage {
lhs_l: &Layout,
rhs_l: &Layout,
) -> Result<Self> {
// Create descriptors
let (type_id, size) = match self.dtype {
DType::F32 => (
metal::mps::MPS_FLOATBIT_ENCODING | 32,
core::mem::size_of::<f32>() as NSUInteger,
),
DType::F16 => (
metal::mps::MPS_FLOATBIT_ENCODING | 16,
core::mem::size_of::<f16>() as NSUInteger,
),
dtype => todo!("Dtype for matmul {dtype:?} is not supported"),
let buffer = self.device.new_buffer(b * m * n, self.dtype, "matmul");
let name = match self.dtype {
DType::F32 => "sgemm",
DType::F16 => "hgemm",
dtype => {
return Err(MetalError::Message(format!("matmul doesn't support {dtype:?}")).into())
}
};
let lhs_stride = lhs_l.stride();
let rhs_stride = rhs_l.stride();
let rhs_m1 = rhs_stride[rhs_stride.len() - 1];
let rhs_m2 = rhs_stride[rhs_stride.len() - 2];
let lhs_m1 = lhs_stride[lhs_stride.len() - 1];
let lhs_m2 = lhs_stride[lhs_stride.len() - 2];
// The a tensor has dims batching, k, n (rhs)
let transpose_left = if lhs_m1 == 1 && lhs_m2 == k {
false
} else if lhs_m1 == m && lhs_m2 == 1 {
true
} else {
Err(MetalError::MatMulNonContiguous {
lhs_stride: lhs_stride.to_vec(),
rhs_stride: rhs_stride.to_vec(),
mnk: (m, n, k),
})?
};
let transpose_right = if rhs_m1 == 1 && rhs_m2 == n {
false
} else if rhs_m1 == k && rhs_m2 == 1 {
true
} else {
Err(MetalError::MatMulNonContiguous {
lhs_stride: lhs_stride.to_vec(),
rhs_stride: rhs_stride.to_vec(),
mnk: (m, n, k),
})?
};
let b = b as NSUInteger;
let m = m as NSUInteger;
let n = n as NSUInteger;
let k = k as NSUInteger;
let left_matrix = self.matrix(
(b, m, k),
transpose_left,
size,
lhs_l.start_offset() as NSUInteger * size,
type_id,
)?;
let right_matrix = rhs.matrix(
(b, k, n),
transpose_right,
size,
rhs_l.start_offset() as NSUInteger * size,
type_id,
)?;
let (result_matrix, out_buffer) =
self.device
.new_matrix((b, m, n), size, type_id, self.dtype)?;
let command_buffer = self.device.command_buffer();
command_buffer.set_label("matmul");
let alpha = 1.0f64;
// let beta = f64::MIN;
let beta = 1.0;
// Create kernel
let matrix_multiplication = MatrixMultiplication::init(
&self.device,
transpose_left,
transpose_right,
m,
n,
k,
alpha,
beta,
)
.ok_or_else(|| {
MetalError::from("Failed to create matrix multiplication kernel".to_string())
})?;
matrix_multiplication.set_batch_size(b);
matrix_multiplication.set_batch_start(0);
// Encode kernel to command buffer
matrix_multiplication.encode_to_command_buffer(
candle_metal_kernels::call_gemm(
&self.device.device,
&command_buffer,
&left_matrix,
&right_matrix,
&result_matrix,
);
command_buffer.commit();
out_buffer.did_modify_range(metal::NSRange::new(0, out_buffer.length()));
// println!("========= MATMUL {:?}", Arc::strong_count(&out_buffer));
Ok(Self::new(out_buffer, self.device.clone(), self.dtype()))
&self.device.kernels,
name,
(b, m, n, k),
&lhs_l.stride(),
lhs_l.start_offset() * self.dtype.size_in_bytes(),
&self.buffer,
&rhs_l.stride(),
rhs_l.start_offset() * rhs.dtype.size_in_bytes(),
&rhs.buffer,
&buffer,
)
.map_err(MetalError::from)?;
Ok(Self::new(buffer, self.device.clone(), self.dtype()))
}
fn copy_strided_src(&self, dst: &mut Self, dst_offset: usize, src_l: &Layout) -> Result<()> {
let command_buffer = self.device.command_buffer();
// println!("Copy strided");
if src_l.is_contiguous() && self.dtype == dst.dtype() {
command_buffer.set_label("copy_contiguous");
let blit = command_buffer.new_blit_command_encoder();
@ -1126,63 +929,31 @@ impl BackendStorage for MetalStorage {
.map_err(MetalError::from)?;
command_buffer.set_label("copy_strided");
}
command_buffer.commit();
Ok(())
}
}
impl MetalStorage {
pub fn new(buffer: Arc<Buffer>, device: MetalDevice, dtype: DType) -> Self {
let matrices = Arc::new(RwLock::new(HashMap::new()));
Self {
buffer,
device,
dtype,
matrices,
}
}
pub fn buffer(&self) -> &Buffer {
&self.buffer
}
fn matrix(
&self,
(b, m, n): (NSUInteger, NSUInteger, NSUInteger),
transpose: bool,
size: NSUInteger,
offset: NSUInteger,
type_id: u32,
) -> Result<Matrix> {
let key = (b, m, n, transpose, size, offset, type_id);
// let mut matrices = self.matrices.try_write().unwrap();
// if let Some(matrix) = matrices.get(&key) {
// Ok(matrix.clone())
// } else {
let descriptor = if transpose {
MatrixDescriptor::init_multiple(n, m, b, m * size, m * n * size, type_id)
} else {
MatrixDescriptor::init_multiple(m, n, b, n * size, m * n * size, type_id)
};
let matrix = Matrix::init_with_buffer_descriptor(&self.buffer, offset, &descriptor)
.ok_or_else(|| {
MetalError::from("Failed to create matrix multiplication kernel".to_string())
})?;
// matrices.insert(key, matrix.clone());
Ok(matrix)
// }
}
}
impl BackendDevice for MetalDevice {
type Storage = MetalStorage;
fn new(ordinal: usize) -> Result<Self> {
// println!("CREATING DEVICE");
let device = metal::Device::all().swap_remove(ordinal);
let n = 64;
let n = 1;
let command_queue = device.new_command_queue();
let command_buffers = (0..n)
@ -1195,10 +966,12 @@ impl BackendDevice for MetalDevice {
.collect();
let command_buffers = Arc::new(RwLock::new(command_buffers));
let command_buffer_index = Arc::new(RwLock::new(0));
let kernels = Arc::new(Kernels::new());
let fence = device.new_fence();
let kernels = Arc::new(Kernels::new(fence.clone()));
let buffers = Arc::new(RwLock::new(HashMap::new()));
Ok(Self {
device,
fence,
command_queue,
command_buffers,
command_buffer_index,
@ -1226,6 +999,7 @@ impl BackendDevice for MetalDevice {
let command_buffer = self.command_buffer();
command_buffer.set_label("zeros");
let blit = command_buffer.new_blit_command_encoder();
blit.wait_for_fence(&self.fence);
blit.fill_buffer(
&buffer,
metal::NSRange {
@ -1234,9 +1008,8 @@ impl BackendDevice for MetalDevice {
},
0,
);
blit.update_fence(&self.fence);
blit.end_encoding();
command_buffer.commit();
buffer.did_modify_range(metal::NSRange::new(0, buffer.length()));
Ok(MetalStorage::new(buffer, self.clone(), dtype))
}
@ -1287,3 +1060,10 @@ impl BackendDevice for MetalDevice {
self.storage_from_cpu_storage(&cpu_storage)
}
}
fn read_to_vec<T: Clone>(buffer: &Buffer, n: usize) -> Vec<T> {
let ptr = buffer.contents() as *const T;
assert!(!ptr.is_null());
let slice = unsafe { std::slice::from_raw_parts(ptr, n) };
slice.to_vec()
}

View File

@ -900,7 +900,9 @@ fn matmul(device: &Device) -> Result<()> {
let b = Tensor::from_slice(&data, (2, 2), device)?;
let c = a.matmul(&b)?;
let d = a.matmul(&c)?;
assert_eq!(c.to_vec2::<f32>()?, &[[7.0f32, 10.0], [15.0, 22.0]]);
assert_eq!(d.to_vec2::<f32>()?, &[[37.0, 54.0], [81.0, 118.0]]);
let data = vec![1.0f32, 2.0];
let a = Tensor::from_slice(&data, (2, 1), device)?;

View File

@ -10,7 +10,7 @@ categories = ["science"]
license = "MIT OR Apache-2.0"
[dependencies]
metal = { version = "0.27.1", features = ["mps"], package="candle-metal" }
metal = { version = "0.27.0", features = ["mps"], package="candle-metal" }
once_cell = "1.18.0"
thiserror = "1"
tracing = "0.1.37"

View File

@ -1,10 +1,11 @@
use metal::{
Buffer, CommandBufferRef, CompileOptions, ComputeCommandEncoderRef, ComputePipelineState,
Device, Function, Library, MTLSize,
Device, Function, FunctionConstantValues, Library, MTLDataType, MTLSize, NSUInteger,
};
use std::collections::HashMap;
use std::ffi::c_void;
use std::sync::RwLock;
use metal::mps::matrix::{Matrix, MatrixDescriptor, MatrixMultiplication};
const AFFINE: &str = include_str!("affine.metal");
const INDEXING: &str = include_str!("indexing.metal");
@ -13,6 +14,7 @@ const BINARY: &str = include_str!("binary.metal");
const TERNARY: &str = include_str!("ternary.metal");
const CAST: &str = include_str!("cast.metal");
const REDUCE: &str = include_str!("reduce.metal");
const MFA: &[u8] = include_bytes!("libMetalFlashAttention.metallib");
fn linear_split(pipeline: &ComputePipelineState, length: usize) -> (MTLSize, MTLSize) {
let size = length as u64;
@ -105,6 +107,7 @@ pub enum Source {
Ternary,
Cast,
Reduce,
Mfa,
}
macro_rules! ops{
@ -171,6 +174,12 @@ pub enum MetalKernelError {
FailedToCreateComputeFunction,
#[error("Failed to create pipeline")]
FailedToCreatePipeline(String),
#[error("Invalid matmul arguments {lhs_stride:?} {rhs_stride:?} {mnk:?}")]
MatMulNonContiguous {
lhs_stride: Vec<usize>,
rhs_stride: Vec<usize>,
mnk: (usize, usize, usize),
},
}
impl<T> From<std::sync::PoisonError<T>> for MetalKernelError {
@ -179,23 +188,24 @@ impl<T> From<std::sync::PoisonError<T>> for MetalKernelError {
}
}
type KernelMap<T> = HashMap<&'static str, T>;
type Libraries = HashMap<Source, Library>;
type Pipelines = KernelMap<ComputePipelineState>;
type Pipelines = HashMap<(&'static str, Option<ConstantValues>), ComputePipelineState>;
#[derive(Debug, Default)]
#[derive(Debug)]
pub struct Kernels {
libraries: RwLock<Libraries>,
pipelines: RwLock<Pipelines>,
fence: metal::Fence,
}
impl Kernels {
pub fn new() -> Self {
pub fn new(fence: metal::Fence) -> Self {
let libraries = RwLock::new(Libraries::new());
let pipelines = RwLock::new(Pipelines::new());
Self {
libraries,
pipelines,
fence,
}
}
@ -208,9 +218,9 @@ impl Kernels {
Source::Indexing => INDEXING,
Source::Cast => CAST,
Source::Reduce => REDUCE,
Source::Mfa => panic!("Invalid lib"),
}
}
pub fn load_library(
&self,
device: &Device,
@ -220,10 +230,20 @@ impl Kernels {
if let Some(lib) = libraries.get(&source) {
Ok(lib.clone())
} else {
let lib = match source {
Source::Mfa => {
let source_data = MFA;
device
.new_library_with_data(source_data)
.map_err(|e| MetalKernelError::LoadLibraryError(e.to_string()))?
}
source => {
let source_content = self.get_library_source(source);
let lib = device
device
.new_library_with_source(source_content, &CompileOptions::new())
.map_err(|e| MetalKernelError::LoadLibraryError(e.to_string()))?;
.map_err(|e| MetalKernelError::LoadLibraryError(e.to_string()))?
}
};
libraries.insert(source, lib.clone());
Ok(lib)
}
@ -234,19 +254,41 @@ impl Kernels {
device: &Device,
source: Source,
name: &'static str,
constants: Option<FunctionConstantValues>,
) -> Result<Function, MetalKernelError> {
let func = self
.load_library(device, source)?
.get_function(name, None)
.get_function(name, constants)
.map_err(|e| MetalKernelError::LoadFunctionError(e.to_string()))?;
Ok(func)
// let mut funcs = self.funcs.write()?;
// if let Some(func) = funcs.get(name) {
// Ok(func.clone())
// } else {
// funcs.insert(name, func.clone());
// Ok(func)
// }
}
fn load_pipeline_with_constants(
&self,
device: &Device,
source: Source,
name: &'static str,
constants: Option<ConstantValues>,
) -> Result<ComputePipelineState, MetalKernelError> {
let mut pipelines = self.pipelines.write()?;
let key = (name, constants);
if let Some(pipeline) = pipelines.get(&key) {
Ok(pipeline.clone())
} else {
let (name, constants) = key;
let func = self.load_function(
device,
source,
name,
constants.as_ref().map(|c| c.function_constant_values()),
)?;
let pipeline = device
.new_compute_pipeline_state_with_function(&func)
.map_err(|e| MetalKernelError::FailedToCreatePipeline(e.to_string()))?;
pipelines.insert((name, constants), pipeline.clone());
Ok(pipeline)
}
}
pub fn load_pipeline(
@ -255,18 +297,7 @@ impl Kernels {
source: Source,
name: &'static str,
) -> Result<ComputePipelineState, MetalKernelError> {
let mut pipelines = self.pipelines.write()?;
if let Some(pipeline) = pipelines.get(name) {
Ok(pipeline.clone())
} else {
let func = self.load_function(device, source, name)?;
let pipeline = device
.new_compute_pipeline_state_with_function(&func)
.map_err(|e| MetalKernelError::FailedToCreatePipeline(e.to_string()))?;
pipelines.insert(name, pipeline.clone());
Ok(pipeline)
}
self.load_pipeline_with_constants(device, source, name, None)
}
}
@ -282,12 +313,16 @@ pub fn call_unary_contiguous(
) -> Result<(), MetalKernelError> {
let pipeline = kernels.load_pipeline(device, Source::Unary, kernel_name.0)?;
let encoder = command_buffer.new_compute_command_encoder();
encoder.wait_for_fence(&kernels.fence);
encoder.set_compute_pipeline_state(&pipeline);
set_params!(encoder, (length, input, output));
let (thread_group_count, thread_group_size) = linear_split(&pipeline, length);
encoder.use_resource(input, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
encoder.update_fence(&kernels.fence);
encoder.end_encoding();
Ok(())
}
@ -309,6 +344,7 @@ pub fn call_unary_strided(
let num_dims: usize = shape.len();
let encoder = command_buffer.new_compute_command_encoder();
encoder.wait_for_fence(&kernels.fence);
encoder.set_compute_pipeline_state(&pipeline);
let length: usize = shape.iter().product();
@ -327,7 +363,10 @@ pub fn call_unary_strided(
let width: usize = shape.iter().product();
let (thread_group_count, thread_group_size) = linear_split(&pipeline, width);
encoder.use_resource(input, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
encoder.update_fence(&kernels.fence);
encoder.end_encoding();
Ok(())
}
@ -346,13 +385,18 @@ pub fn call_binary_contiguous(
let pipeline = kernels.load_pipeline(device, Source::Binary, kernel_name.0)?;
let encoder = command_buffer.new_compute_command_encoder();
encoder.wait_for_fence(&kernels.fence);
encoder.set_compute_pipeline_state(&pipeline);
set_params!(encoder, (length, left, right, output));
let (thread_group_count, thread_group_size) = linear_split(&pipeline, length);
encoder.use_resource(left, metal::MTLResourceUsage::Read);
encoder.use_resource(right, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
encoder.update_fence(&kernels.fence);
encoder.end_encoding();
Ok(())
}
@ -377,6 +421,7 @@ pub fn call_binary_strided(
let num_dims: usize = shape.len();
let encoder = command_buffer.new_compute_command_encoder();
let width: usize = shape.iter().product();
encoder.wait_for_fence(&kernels.fence);
encoder.set_compute_pipeline_state(&pipeline);
let length: usize = shape.iter().product();
@ -397,7 +442,11 @@ pub fn call_binary_strided(
let (thread_group_count, thread_group_size) = linear_split(&pipeline, width);
encoder.use_resource(left_input, metal::MTLResourceUsage::Read);
encoder.use_resource(right_input, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
encoder.update_fence(&kernels.fence);
encoder.end_encoding();
Ok(())
}
@ -416,12 +465,16 @@ pub fn call_cast_contiguous(
let pipeline = kernels.load_pipeline(device, Source::Cast, kernel_name)?;
let encoder = command_buffer.new_compute_command_encoder();
encoder.wait_for_fence(&kernels.fence);
encoder.set_compute_pipeline_state(&pipeline);
set_params!(encoder, (length, (input, input_offset), output));
let (thread_group_count, thread_group_size) = linear_split(&pipeline, length);
encoder.use_resource(input, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
encoder.update_fence(&kernels.fence);
encoder.end_encoding();
Ok(())
}
@ -441,6 +494,7 @@ pub fn call_cast_strided(
let pipeline = kernels.load_pipeline(device, Source::Cast, kernel_name)?;
let encoder = command_buffer.new_compute_command_encoder();
encoder.wait_for_fence(&kernels.fence);
encoder.set_compute_pipeline_state(&pipeline);
let length: usize = shape.iter().product();
@ -459,7 +513,10 @@ pub fn call_cast_strided(
let (thread_group_count, thread_group_size) = linear_split(&pipeline, length);
encoder.use_resource(input, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
encoder.update_fence(&kernels.fence);
encoder.end_encoding();
Ok(())
}
@ -479,6 +536,7 @@ pub fn call_reduce_contiguous(
let elements_to_sum = length / out_length;
let encoder = command_buffer.new_compute_command_encoder();
encoder.wait_for_fence(&kernels.fence);
encoder.set_compute_pipeline_state(&pipeline);
set_params!(
@ -504,7 +562,10 @@ pub fn call_reduce_contiguous(
depth: 1,
};
encoder.use_resource(input, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
encoder.update_fence(&kernels.fence);
encoder.end_encoding();
Ok(())
}
@ -522,6 +583,7 @@ pub fn call_last_softmax(
) -> Result<(), MetalKernelError> {
let pipeline = kernels.load_pipeline(device, Source::Reduce, kernel_name)?;
let encoder = command_buffer.new_compute_command_encoder();
encoder.wait_for_fence(&kernels.fence);
encoder.set_compute_pipeline_state(&pipeline);
set_params!(encoder, (length, elements_to_sum, input, output));
@ -546,7 +608,10 @@ pub fn call_last_softmax(
depth: 1,
};
encoder.use_resource(input, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
encoder.update_fence(&kernels.fence);
encoder.end_encoding();
Ok(())
}
@ -566,12 +631,16 @@ pub fn call_affine(
let pipeline = kernels.load_pipeline(device, Source::Affine, name)?;
let encoder = command_buffer.new_compute_command_encoder();
encoder.wait_for_fence(&kernels.fence);
encoder.set_compute_pipeline_state(&pipeline);
set_params!(encoder, (size, mul, add, input, output));
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
encoder.use_resource(input, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
encoder.update_fence(&kernels.fence);
encoder.end_encoding();
Ok(())
}
@ -594,6 +663,7 @@ pub fn call_affine_strided(
let size: usize = shape.iter().product();
let encoder = command_buffer.new_compute_command_encoder();
encoder.wait_for_fence(&kernels.fence);
encoder.set_compute_pipeline_state(&pipeline);
set_params!(
@ -611,7 +681,10 @@ pub fn call_affine_strided(
);
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
encoder.use_resource(input, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
encoder.update_fence(&kernels.fence);
encoder.end_encoding();
Ok(())
}
@ -630,12 +703,16 @@ pub fn call_powf(
let pipeline = kernels.load_pipeline(device, Source::Affine, name)?;
let encoder = command_buffer.new_compute_command_encoder();
encoder.wait_for_fence(&kernels.fence);
encoder.set_compute_pipeline_state(&pipeline);
set_params!(encoder, (size, mul, input, output));
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
encoder.use_resource(input, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
encoder.update_fence(&kernels.fence);
encoder.end_encoding();
Ok(())
}
@ -657,6 +734,7 @@ pub fn call_powf_strided(
let size: usize = shape.iter().product();
let encoder = command_buffer.new_compute_command_encoder();
encoder.wait_for_fence(&kernels.fence);
encoder.set_compute_pipeline_state(&pipeline);
set_params!(
@ -673,7 +751,10 @@ pub fn call_powf_strided(
);
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
encoder.use_resource(input, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
encoder.update_fence(&kernels.fence);
encoder.end_encoding();
Ok(())
}
@ -692,12 +773,16 @@ pub fn call_elu(
let pipeline = kernels.load_pipeline(device, Source::Affine, name)?;
let encoder = command_buffer.new_compute_command_encoder();
encoder.wait_for_fence(&kernels.fence);
encoder.set_compute_pipeline_state(&pipeline);
set_params!(encoder, (size, mul, input, output));
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
encoder.use_resource(input, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
encoder.update_fence(&kernels.fence);
encoder.end_encoding();
Ok(())
}
@ -719,6 +804,7 @@ pub fn call_elu_strided(
let size: usize = shape.iter().product();
let encoder = command_buffer.new_compute_command_encoder();
encoder.wait_for_fence(&kernels.fence);
encoder.set_compute_pipeline_state(&pipeline);
set_params!(
@ -735,7 +821,10 @@ pub fn call_elu_strided(
);
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
encoder.use_resource(input, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
encoder.update_fence(&kernels.fence);
encoder.end_encoding();
Ok(())
}
@ -757,6 +846,7 @@ pub fn call_where_cond_strided(
let pipeline = kernels.load_pipeline(device, Source::Ternary, name)?;
let encoder = command_buffer.new_compute_command_encoder();
encoder.wait_for_fence(&kernels.fence);
encoder.set_compute_pipeline_state(&pipeline);
let size: usize = shape.iter().product();
@ -780,7 +870,12 @@ pub fn call_where_cond_strided(
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
encoder.use_resource(cond, metal::MTLResourceUsage::Read);
encoder.use_resource(left, metal::MTLResourceUsage::Read);
encoder.use_resource(right, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
encoder.update_fence(&kernels.fence);
encoder.end_encoding();
Ok(())
}
@ -807,6 +902,7 @@ pub fn call_index_select(
let encoder = command_buffer.new_compute_command_encoder();
encoder.wait_for_fence(&kernels.fence);
encoder.set_compute_pipeline_state(&pipeline);
set_params!(
@ -825,10 +921,341 @@ pub fn call_index_select(
let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el);
encoder.use_resource(input, metal::MTLResourceUsage::Read);
encoder.use_resource(ids, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
encoder.update_fence(&kernels.fence);
encoder.end_encoding();
Ok(())
}
#[derive(Debug, PartialEq)]
pub enum Value {
USize(usize),
Bool(bool),
F32(f32),
U16(u16),
}
impl std::hash::Hash for Value {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
match self {
Value::F32(v) => v.to_bits().hash(state),
Value::USize(v) => v.hash(state),
Value::U16(v) => v.hash(state),
Value::Bool(v) => v.hash(state),
}
}
}
impl Value {
fn data_type(&self) -> MTLDataType {
match self {
Value::USize(_) => MTLDataType::UInt,
Value::F32(_) => MTLDataType::Float,
Value::U16(_) => MTLDataType::UShort,
Value::Bool(_) => MTLDataType::Bool,
}
}
}
/// Not true, good enough for our purposes.
impl Eq for Value {}
#[derive(Debug, Eq, PartialEq, Hash)]
struct ConstantValues(Vec<(usize, Value)>);
impl ConstantValues {
pub fn new(values: Vec<(usize, Value)>) -> Self {
Self(values)
}
fn function_constant_values(&self) -> FunctionConstantValues {
let f = FunctionConstantValues::new();
for (index, value) in &self.0 {
let ty = value.data_type();
match value {
Value::USize(v) => {
f.set_constant_value_at_index(
v as *const usize as *const c_void,
ty,
*index as u64,
);
}
Value::F32(v) => {
f.set_constant_value_at_index(
v as *const f32 as *const c_void,
ty,
*index as u64,
);
}
Value::U16(v) => {
f.set_constant_value_at_index(
v as *const u16 as *const c_void,
ty,
*index as u64,
);
}
Value::Bool(v) => {
f.set_constant_value_at_index(
v as *const bool as *const c_void,
ty,
*index as u64,
);
}
}
}
f
}
}
#[allow(clippy::too_many_arguments)]
pub fn call_gemm(
device: &Device,
command_buffer: &CommandBufferRef,
kernels: &Kernels,
name: &'static str,
(b, m, n, k): (usize, usize, usize, usize),
lhs_stride: &[usize],
lhs_offset: usize,
lhs_buffer: &Buffer,
rhs_stride: &[usize],
rhs_offset: usize,
rhs_buffer: &Buffer,
output: &Buffer,
) -> Result<(), MetalKernelError> {
assert!(rhs_stride.len() >= 2);
assert!(lhs_stride.len() >= 2);
let rhs_m1 = rhs_stride[rhs_stride.len() - 1];
let rhs_m2 = rhs_stride[rhs_stride.len() - 2];
let lhs_m1 = lhs_stride[lhs_stride.len() - 1];
let lhs_m2 = lhs_stride[lhs_stride.len() - 2];
let a_trans = if lhs_m1 == 1 && lhs_m2 == k {
false
} else if lhs_m1 == m && lhs_m2 == 1 {
true
} else {
return Err(MetalKernelError::MatMulNonContiguous {
lhs_stride: lhs_stride.to_vec(),
rhs_stride: rhs_stride.to_vec(),
mnk: (m, n, k),
})?;
};
let b_trans = if rhs_m1 == 1 && rhs_m2 == n {
false
} else if rhs_m1 == k && rhs_m2 == 1 {
true
} else {
return Err(MetalKernelError::MatMulNonContiguous {
lhs_stride: lhs_stride.to_vec(),
rhs_stride: rhs_stride.to_vec(),
mnk: (m, n, k),
})?;
};
// let d_trans = false;
// let alpha = 1.0f32;
// let beta = 0.0f32;
// let batched = b > 1;
// let fused_activation = false;
// let fused_bias = false;
// let m_simd = 16;
// let n_simd = 16;
// let k_simd = 16;
// let m_splits = 2;
// let n_splits = 2;
// let constants = Some(ConstantValues::new(vec![
// (0, Value::USize(m)),
// (1, Value::USize(n)),
// (2, Value::USize(k)),
// (10, Value::Bool(a_trans)),
// (11, Value::Bool(b_trans)),
// (13, Value::Bool(d_trans)),
// (20, Value::F32(alpha)),
// (21, Value::F32(beta)),
// (100, Value::Bool(batched)),
// (101, Value::Bool(fused_activation)),
// // Garbage
// (102, Value::Bool(false)),
// (103, Value::Bool(false)),
// (113, Value::Bool(false)),
// (50_000, Value::Bool(false)),
// // End garbage
// (200, Value::U16(m_simd)),
// (201, Value::U16(n_simd)),
// (202, Value::U16(k_simd)),
// (210, Value::U16(m_splits)),
// (211, Value::U16(n_splits)),
// (50_001, Value::Bool(fused_bias)),
// ]));
// let pipeline = kernels.load_pipeline_with_constants(device, Source::Mfa, name, constants)?;
// let m_group = m_simd * m_splits;
// let n_group = n_simd * n_splits;
//
// let a_block_length = m_group * k_simd;
// let b_block_length = k_simd * n_group;
//
// let mut block_elements = a_block_length + b_block_length;
// if (m % 8 != 0) && (n % 8 != 0) {
// let c_block_length = m_group * n_group;
// block_elements = std::cmp::max(c_block_length, block_elements)
// }
// if fused_bias {
// if d_trans {
// block_elements = std::cmp::max(block_elements, m_group);
// } else {
// block_elements = std::cmp::max(block_elements, n_group);
// }
// }
// let bytes = match name {
// "sgemm" => 4,
// "hgemm" => 2,
// other => {
// return Err(MetalKernelError::LoadLibraryError(format!(
// "{other} is not a valid kernel for gemm"
// )));
// }
// };
// let block_bytes = block_elements * bytes;
//
// let encoder = command_buffer.new_compute_command_encoder();
// encoder.wait_for_fence(&kernels.fence);
// encoder.set_compute_pipeline_state(&pipeline);
// encoder.set_threadgroup_memory_length(0, block_bytes.into());
// encoder.set_buffer(0, Some(lhs_buffer), lhs_offset as NSUInteger);
// encoder.set_buffer(1, Some(rhs_buffer), rhs_offset as NSUInteger);
// encoder.set_buffer(2, Some(output), 0);
// // TODO Tensor D
//
// let grid_z = b;
// if batched {
// let byte_stride_a: usize = lhs_stride[lhs_stride.len() - 3] * bytes as usize;
// let byte_stride_b: usize = rhs_stride[rhs_stride.len() - 3] * bytes as usize;
// let byte_stride_c = m * n * bytes as usize;
// // TODO byte_stride_d
// let byte_stride_d = 0;
//
// let mut buffer: Vec<u64> = Vec::with_capacity(b * 4);
// for i in 0..b {
// buffer.push((i * byte_stride_a) as u64);
// buffer.push((i * byte_stride_b) as u64);
// buffer.push((i * byte_stride_c) as u64);
// buffer.push((i * byte_stride_d) as u64);
// }
// encoder.set_bytes(
// 10,
// (buffer.len() * core::mem::size_of::<u64>()) as NSUInteger,
// buffer.as_ptr() as *const NSUInteger as *const c_void,
// );
// }
//
// let grid_size = MTLSize {
// width: divide(n, n_group.into()),
// height: divide(m, m_group.into()),
// depth: grid_z as NSUInteger,
// };
// let group_size = MTLSize {
// width: 32 * (m_splits as u64) * (n_splits as u64),
// height: 1,
// depth: 1,
// };
// // println!("grid size {grid_size:?} group size {group_size:?}");
// encoder.use_resource(lhs_buffer, metal::MTLResourceUsage::Read);
// encoder.use_resource(rhs_buffer, metal::MTLResourceUsage::Read);
// encoder.use_resource(output, metal::MTLResourceUsage::Write);
// encoder.dispatch_thread_groups(grid_size, group_size);
// encoder.update_fence(&kernels.fence);
// encoder.end_encoding();
let (b, m, n, k) = (
b as NSUInteger,
m as NSUInteger,
n as NSUInteger,
k as NSUInteger,
);
let (size, data_type) = if name == "sgemm" { (4, 0x10000000 | 32) } else { (2, 0x10000000 | 16) };
let left_matrix = create_matrix(
lhs_buffer,
(b, m, k),
a_trans,
size,
lhs_offset as NSUInteger,
data_type,
).unwrap();
let right_matrix = create_matrix(
rhs_buffer,
(b, k, n),
b_trans,
size,
rhs_offset as NSUInteger,
data_type,
).unwrap();
let result_matrix = create_matrix(
output,
(b, m, n),
false,
size,
0,
data_type,
).unwrap();
// Create kernel
let matrix_multiplication = MatrixMultiplication::init(
&device,
a_trans,
b_trans,
m,
n,
k,
1.0,
0.0,
).unwrap();
matrix_multiplication.encode_to_command_buffer(
command_buffer,
&left_matrix,
&right_matrix,
&result_matrix,
);
Ok(())
}
fn create_matrix(
buffer: &Buffer,
(b, rows, columns): (NSUInteger, NSUInteger, NSUInteger),
transpose: bool,
size: NSUInteger,
offset: NSUInteger,
data_type: u32,
) -> Option<Matrix> {
let (rows, columns) = if transpose {
(columns, rows)
} else {
(rows, columns)
};
let descriptor = if b == 1 {
MatrixDescriptor::init_single(rows, columns, columns * size, data_type)
} else {
MatrixDescriptor::init_multiple(
rows,
columns,
b,
columns * size,
rows * columns * size,
data_type,
)
};
return Matrix::init_with_buffer_descriptor(&buffer, offset * size, &descriptor);
}
fn divide(m: usize, b: usize) -> NSUInteger {
((m + b - 1) / b) as NSUInteger
}
#[cfg(test)]
mod tests;

View File

@ -32,7 +32,7 @@ kernel void NAME( \
uint block_dim [[ threads_per_threadgroup ]] \
) { \
\
threadgroup float shared_memory[THREADGROUP_SIZE]; \
threadgroup T shared_memory[THREADGROUP_SIZE]; \
\
shared_memory[tid] = 0; \
/* \
@ -93,12 +93,13 @@ kernel void NAME(
size_t stop_idx = min(start_idx + el_to_sum_per_block, src_numel); \
size_t idx = start_idx + tid; \
\
threadgroup_barrier(mem_flags::mem_threadgroup); \
\
float tmp = -INFINITY; \
while (idx < stop_idx) { \
shared_memory[tid] = MAX(shared_memory[tid], src[idx]); \
tmp = MAX(tmp, float(src[idx])); \
idx += block_dim; \
} \
shared_memory[tid] = tmp; \
\
threadgroup_barrier(mem_flags::mem_threadgroup); \
\
@ -106,21 +107,26 @@ kernel void NAME(
if (tid < s) { \
shared_memory[tid] = MAX(shared_memory[tid], shared_memory[tid + s]); \
} \
threadgroup_barrier(mem_flags::mem_threadgroup); \
} \
\
/* wait for shared_memory[0] to be filled */ \
threadgroup_barrier(mem_flags::mem_threadgroup); \
\
float _max = shared_memory[0]; \
\
/* prevent tid=0 from overwriting _max before other threads have written it */ \
threadgroup_barrier(mem_flags::mem_threadgroup); \
shared_memory[tid] = 0; \
\
idx = start_idx + tid; \
while (idx < stop_idx) { \
const T val = T(exp(src[idx] - _max)); \
dst[idx] = val; \
const float val = exp(float(src[idx]) - _max); \
dst[idx] = T(val); \
shared_memory[tid] += val; \
idx += block_dim; \
} \
threadgroup_barrier(mem_flags::mem_threadgroup); \
for (uint s = block_dim / 2; s > 0; s >>= 1) { \
if (tid < s) { \
shared_memory[tid] += shared_memory[tid + s]; \
@ -128,7 +134,7 @@ kernel void NAME(
threadgroup_barrier(mem_flags::mem_threadgroup); \
} \
\
const T inv_acc = T(1/shared_memory[0]); \
const T inv_acc = T(1.0/shared_memory[0]); \
idx = start_idx + tid; \
while (idx < stop_idx) { \
dst[idx] *= inv_acc; \

View File

@ -0,0 +1,209 @@
import Metal
import MetalPerformanceShadersGraph
let type = MTLDataType.float;
let dataType = type;
var B = 2;
var M = 2;
var N = 2;
var K = 2;
var A_trans = false;
var B_trans = false;
var D_trans = false;
var alpha = Float(1.0);
var beta = Float(0.0);
var batched = B > 1;
var fused_activation = false;
var fused_bias = false;
let constants = MTLFunctionConstantValues()
constants.setConstantValue(&M, type: .uint, index: 0)
constants.setConstantValue(&N, type: .uint, index: 1)
constants.setConstantValue(&K, type: .uint, index: 2)
constants.setConstantValue(&A_trans, type: .bool, index: 10)
constants.setConstantValue(&B_trans, type: .bool, index: 11)
constants.setConstantValue(&D_trans, type: .bool, index: 13)
constants.setConstantValue(&alpha, type: .float, index: 20)
constants.setConstantValue(&beta, type: .float, index: 21)
constants.setConstantValue(&batched, type: .bool, index: 100)
constants.setConstantValue(&fused_activation, type: .bool, index: 101)
constants.setConstantValue(&fused_bias, type: .bool, index: 50001)
var M_simd = UInt16(16)
var N_simd = UInt16(16)
var K_simd = UInt16(32)
var M_splits = UInt16(2)
var N_splits = UInt16(2)
constants.setConstantValue(&M_simd, type: .ushort, index: 200)
constants.setConstantValue(&N_simd, type: .ushort, index: 201)
constants.setConstantValue(&K_simd, type: .ushort, index: 202)
constants.setConstantValue(&M_splits, type: .ushort, index: 210)
constants.setConstantValue(&N_splits, type: .ushort, index: 211)
let M_group = M_simd * M_splits
let N_group = N_simd * N_splits
// Satisfy Metal API validation.
#if DEBUG
do {
var garbage: SIMD4<UInt64> = .zero
constants.setConstantValue(&garbage, type: .bool, index: 102)
constants.setConstantValue(&garbage, type: .bool, index: 103)
constants.setConstantValue(&garbage, type: .bool, index: 113)
constants.setConstantValue(&garbage, type: .bool, index: 50000)
}
#endif
let device = MTLCopyAllDevices().first!
device.shouldMaximizeConcurrentCompilation = true
var libraryURL = URL.init(string: "/Users/nicolas/src/candle/candle-metal-kernels/")!;
libraryURL.append(component: "src")
libraryURL.append(component: "libMetalFlashAttention.metallib")
let library = try! device.makeLibrary(URL: libraryURL)
var name: String
switch dataType {
case .half: name = "hgemm"
case .float: name = "sgemm"
default: fatalError()
}
let function = try! library.makeFunction(
name: name, constantValues: constants)
let A_block_length = M_group * K_simd
let B_block_length = K_simd * N_group
var blockElements = A_block_length + B_block_length;
if (M % 8 != 0) && (N % 8 != 0) {
let C_block_length = M_group * N_group;
blockElements = max(C_block_length, blockElements)
}
if fused_bias {
if D_trans {
blockElements = max(blockElements, M_group)
} else {
blockElements = max(blockElements, N_group)
}
}
// let blockBytes = blockElements * UInt16(dataType.size)
let elementSize = 4
let blockBytes = blockElements * UInt16(elementSize)
func ceilDivide(target: Int, granularity: UInt16) -> Int {
(target + Int(granularity) - 1) / Int(granularity)
}
var gridSize = MTLSize(
width: ceilDivide(target: N, granularity: N_group),
height: ceilDivide(target: M, granularity: M_group),
depth: 1)
let groupSize = MTLSize(
width: Int(32 * M_splits * N_splits),
height: 1,
depth: 1)
let commandQueue = device.makeCommandQueue()!
let threadgroupMemoryLength = blockBytes;
let rowsA = M;
let columnsA = K;
let rowsB = K;
let columnsB = N;
let rowsC = M;
let columnsC = N;
var arrayA = [Float](repeating: 0, count: B * rowsA * columnsA)
var arrayB = [Float](repeating: 0, count: B * rowsB * columnsB)
var arrayC = [Float](repeating: 0, count: B * rowsC * columnsC)
var arrayD = [Float](repeating: 0, count: B * rowsC * columnsC)
for i in 0..<arrayA.count {
arrayA[i] = Float(i)
}
for i in 0..<arrayB.count {
arrayB[i] = Float(i)
}
let bufferA = device.makeBuffer(bytes: arrayA, length: B * rowsA * columnsA * MemoryLayout<Float>.stride, options: [])!
let bufferB = device.makeBuffer(bytes: arrayB, length: B * rowsB * columnsB * MemoryLayout<Float>.stride, options: [])!
let bufferC = device.makeBuffer(length: B * rowsC * columnsC * MemoryLayout<Float>.stride, options: [])!
let bufferD = device.makeBuffer(length: B * rowsC * columnsC * MemoryLayout<Float>.stride, options: [])!
let pipeline = try device.makeComputePipelineState(function: function)
func call(bufferA: MTLBuffer, bufferB: MTLBuffer, bufferC: MTLBuffer){
let encoder = commandBuffer.makeComputeCommandEncoder(dispatchType: MTLDispatchType.serial)!
encoder.setComputePipelineState(pipeline)
encoder.setThreadgroupMemoryLength(Int(threadgroupMemoryLength), index: 0)
encoder.setBuffer(bufferA, offset: 0, index: 0)
encoder.setBuffer(bufferB, offset: 0, index: 1)
encoder.setBuffer(bufferC, offset: 0, index: 2)
let gridZ: Int = B
if batched{
func byteStride(shape: [Int]) -> Int {
let rank = shape.count
var output = elementSize * shape[rank - 2] * shape[rank - 1]
if shape.dropLast(2).reduce(1, *) == 1 {
output = 0
}
return output
}
let byteStrideA = M*K*elementSize
let byteStrideB = N*K*elementSize
let byteStrideC = M*N*elementSize
let byteStrideD = 0
withUnsafeTemporaryAllocation(
of: SIMD4<UInt64>.self, capacity: gridZ
) { buffer in
for i in 0..<buffer.count {
buffer[i] = SIMD4(
UInt64(truncatingIfNeeded: i * byteStrideA),
UInt64(truncatingIfNeeded: i * byteStrideB),
UInt64(truncatingIfNeeded: i * byteStrideC),
UInt64(truncatingIfNeeded: i * byteStrideD))
}
let bufferLength = buffer.count * MemoryLayout<SIMD4<UInt64>>.stride
assert(MemoryLayout<SIMD4<UInt64>>.stride == 8 * 4)
encoder.setBytes(buffer.baseAddress!, length: bufferLength, index: 10)
}
}
gridSize.depth = gridZ
encoder.dispatchThreadgroups(
gridSize, threadsPerThreadgroup: groupSize
)
encoder.endEncoding()
}
var commandBuffer = commandQueue.makeCommandBuffer()!
call(bufferA:bufferA, bufferB:bufferB, bufferC:bufferC)
commandBuffer.commit()
commandBuffer = commandQueue.makeCommandBuffer()!
commandBuffer.encodeWaitForEvent(event, value: 2)
call(bufferA:bufferA, bufferB:bufferC, bufferC:bufferD)
commandBuffer.commit()
commandBuffer.waitUntilCompleted()
var contents = bufferC.contents();
var count = B * rowsA * columnsB;
var typedPointer = contents.bindMemory(to: Float.self, capacity: count)
var bufferedPointer = UnsafeBufferPointer(start: typedPointer, count: count)
print("First matmul is OK", Array(bufferedPointer))
contents = bufferD.contents();
count = B * rowsA * columnsB;
typedPointer = contents.bindMemory(to: Float.self, capacity: count)
bufferedPointer = UnsafeBufferPointer(start: typedPointer, count: count)
print("This should be filled", Array(bufferedPointer))

View File

@ -2,6 +2,13 @@ use super::*;
use half::{bf16, f16};
use metal::{CompileOptions, Device, MTLResourceOptions, MTLSize, NSUInteger};
fn read_to_vec<T: Clone>(buffer: &Buffer, n: usize) -> Vec<T> {
let ptr = buffer.contents() as *const T;
assert!(!ptr.is_null());
let slice = unsafe { std::slice::from_raw_parts(ptr, n) };
slice.to_vec()
}
fn new_buffer<T>(device: &Device, data: &[T]) -> Buffer {
let options = MTLResourceOptions::StorageModeManaged;
let ptr = data.as_ptr() as *const core::ffi::c_void;
@ -30,7 +37,8 @@ fn approx_bf16(v: Vec<bf16>, digits: i32) -> Vec<f32> {
fn run<T: Clone>(v: &[T], name: unary::contiguous::Kernel) -> Vec<T> {
let device = device();
let kernels = Kernels::new();
let fence = device.new_fence();
let kernels = Kernels::new(fence);
let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer();
let input = new_buffer(&device, v);
@ -47,12 +55,13 @@ fn run<T: Clone>(v: &[T], name: unary::contiguous::Kernel) -> Vec<T> {
.unwrap();
command_buffer.commit();
command_buffer.wait_until_completed();
output.read_to_vec::<T>(v.len())
read_to_vec(&output, v.len())
}
fn run_binary<T: Clone>(x: &[T], y: &[T], name: binary::contiguous::Kernel) -> Vec<T> {
let device = device();
let kernels = Kernels::new();
let fence = device.new_fence();
let kernels = Kernels::new(fence);
let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer();
let options = MTLResourceOptions::StorageModeManaged;
@ -72,7 +81,7 @@ fn run_binary<T: Clone>(x: &[T], y: &[T], name: binary::contiguous::Kernel) -> V
.unwrap();
command_buffer.commit();
command_buffer.wait_until_completed();
output.read_to_vec::<T>(x.len())
read_to_vec(&output, x.len())
}
fn run_strided<T: Clone>(
@ -87,7 +96,8 @@ fn run_strided<T: Clone>(
let command_buffer = command_queue.new_command_buffer();
let input = new_buffer(&device, v);
let output = new_buffer(&device, v);
let kernels = Kernels::new();
let fence = device.new_fence();
let kernels = Kernels::new(fence);
call_unary_strided(
&device,
command_buffer,
@ -103,7 +113,7 @@ fn run_strided<T: Clone>(
.unwrap();
command_buffer.commit();
command_buffer.wait_until_completed();
output.read_to_vec::<T>(v.len())
read_to_vec(&output, v.len())
}
#[test]
@ -240,7 +250,8 @@ fn binary_add_f32() {
fn cast<T: Clone, U: Clone>(v: &[T], name: &'static str) -> Vec<U> {
let device = device();
let kernels = Kernels::new();
let fence = device.new_fence();
let kernels = Kernels::new(fence);
let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer();
let input = new_buffer(&device, v);
@ -261,7 +272,7 @@ fn cast<T: Clone, U: Clone>(v: &[T], name: &'static str) -> Vec<U> {
.unwrap();
command_buffer.commit();
command_buffer.wait_until_completed();
output.read_to_vec::<U>(v.len())
read_to_vec(&output, v.len())
}
#[test]
@ -287,7 +298,8 @@ fn cast_u32_f32() {
fn run_affine<T: Clone>(v: &[T], mul: f64, add: f64) -> Vec<T> {
let device = device();
let kernels = Kernels::new();
let fence = device.new_fence();
let kernels = Kernels::new(fence);
let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer();
@ -311,7 +323,7 @@ fn run_affine<T: Clone>(v: &[T], mul: f64, add: f64) -> Vec<T> {
command_buffer.commit();
command_buffer.wait_until_completed();
output.read_to_vec::<T>(v.len())
read_to_vec(&output, v.len())
}
fn run_affine_strided<T: Clone>(
@ -322,7 +334,8 @@ fn run_affine_strided<T: Clone>(
add: f64,
) -> Vec<T> {
let device = device();
let kernels = Kernels::new();
let fence = device.new_fence();
let kernels = Kernels::new(fence);
let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer();
@ -347,7 +360,7 @@ fn run_affine_strided<T: Clone>(
command_buffer.wait_until_completed();
let len: usize = shape.iter().product();
output.read_to_vec::<T>(len)
read_to_vec(&output, len)
}
#[test]
@ -450,7 +463,8 @@ fn run_index_select<T: Clone, I: Clone + std::fmt::Debug>(
_ => unimplemented!(),
};
let kernels = Kernels::new();
let fence = device.new_fence();
let kernels = Kernels::new(fence);
call_index_select(
&device,
&command_buffer,
@ -468,7 +482,7 @@ fn run_index_select<T: Clone, I: Clone + std::fmt::Debug>(
command_buffer.commit();
command_buffer.wait_until_completed();
dst_buffer.read_to_vec::<T>(dst_el)
read_to_vec(&dst_buffer, dst_el)
}
#[test]
@ -534,7 +548,7 @@ fn index_add() {
let expected = vec![
2.0, 3.0, 4.0, 1.0, 1.0, 1.0, 8.0, 9.0, 10.0, 1.0, 1.0, 1.0, 5.0, 6.0, 7.0,
];
let result = outputs_buffer.read_to_vec::<f32>(right.len());
let result: Vec<f32> = read_to_vec(&outputs_buffer, right.len());
assert_eq!(result, expected);
}
@ -552,7 +566,8 @@ fn cos_f16() {
fn run_reduce<T: Clone>(v: &[T], out_length: usize, name: &'static str) -> Vec<T> {
let device = device();
let kernels = Kernels::new();
let fence = device.new_fence();
let kernels = Kernels::new(fence);
let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer();
let input = new_buffer(&device, v);
@ -574,12 +589,13 @@ fn run_reduce<T: Clone>(v: &[T], out_length: usize, name: &'static str) -> Vec<T
command_buffer.commit();
command_buffer.wait_until_completed();
output.read_to_vec::<T>(out_length)
read_to_vec(&output, out_length)
}
fn run_softmax<T: Clone + std::fmt::Debug>(v: &[T], last_dim: usize, name: &'static str) -> Vec<T> {
let device = device();
let kernels = Kernels::new();
let fence = device.new_fence();
let kernels = Kernels::new(fence);
let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer();
let input = new_buffer(&device, v);
@ -598,7 +614,7 @@ fn run_softmax<T: Clone + std::fmt::Debug>(v: &[T], last_dim: usize, name: &'sta
command_buffer.commit();
command_buffer.wait_until_completed();
output.read_to_vec::<T>(v.len())
read_to_vec(&output, v.len())
}
#[test]
@ -629,6 +645,24 @@ fn softmax() {
vec![0.0043, 0.0116, 0.0315, 0.0858, 0.2331, 0.6337]
);
let last_dim = 4096;
let n = 200;
let mut v = vec![0.0; n * last_dim];
for i in 0..n {
v[i * last_dim] = 20.0;
}
let results = run_softmax(&v, last_dim, "softmax_float");
let results = approx(results, 4);
println!("{results:?}");
assert_eq!(
results.iter().map(|&s| s.round() as usize).sum::<usize>(),
n
);
assert_eq!(results[0], 1.0);
assert_eq!(results[1], 0.0);
assert_eq!(results[last_dim], 1.0);
assert_eq!(results[2 * last_dim], 1.0);
let v = vec![0.0f32, 1.0, 2.0, 3.0, 4.0, 5.0];
let last_dim = 6;
let results = run_softmax(&v, last_dim, "softmax_float");
@ -679,7 +713,8 @@ fn run_where_cond<I: Clone, T: Clone>(
name: &'static str,
) -> Vec<T> {
let device = device();
let kernels = Kernels::new();
let fence = device.new_fence();
let kernels = Kernels::new(fence);
let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer();
let options = MTLResourceOptions::StorageModeManaged;
@ -720,7 +755,7 @@ fn run_where_cond<I: Clone, T: Clone>(
command_buffer.commit();
command_buffer.wait_until_completed();
output.read_to_vec::<T>(length)
read_to_vec(&output, length)
}
#[test]
@ -744,3 +779,93 @@ fn where_cond() {
);
assert_eq!(approx(results, 4), vec![-1.0f32, 2.0, -3.0, -4.0, 5.0, 6.0]);
}
fn run_gemm<T: Clone>(
(b, m, n, k): (usize, usize, usize, usize),
lhs: &[T],
lhs_stride: Vec<usize>,
lhs_offset: usize,
rhs: &[T],
rhs_stride: Vec<usize>,
rhs_offset: usize,
) -> Vec<T> {
let device = device();
let fence = device.new_fence();
let kernels = Kernels::new(fence);
let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer();
let options = MTLResourceOptions::StorageModeManaged;
let lhs = device.new_buffer_with_data(
lhs.as_ptr() as *const core::ffi::c_void,
std::mem::size_of_val(lhs) as u64,
options,
);
let rhs = device.new_buffer_with_data(
rhs.as_ptr() as *const core::ffi::c_void,
std::mem::size_of_val(rhs) as u64,
options,
);
let length = b * m * n;
let output = device.new_buffer((length * core::mem::size_of::<T>()) as u64, options);
call_gemm(
&device,
command_buffer,
&kernels,
"sgemm",
(b, m, n, k),
&lhs_stride,
lhs_offset,
&lhs,
&rhs_stride,
rhs_offset,
&rhs,
&output,
)
.unwrap();
command_buffer.commit();
command_buffer.wait_until_completed();
read_to_vec(&output, length)
}
#[test]
fn gemm() {
let (b, m, n, k) = (1, 2, 4, 3);
let lhs_stride = vec![m * k, k, 1];
let lhs: Vec<f32> = (0..b * m * k).map(|f| f as f32).collect();
let rhs_stride = vec![n * k, n, 1];
let rhs: Vec<f32> = (0..b * n * k).map(|f| f as f32).collect();
let results = run_gemm((b, m, n, k), &lhs, lhs_stride, 0, &rhs, rhs_stride, 0);
assert_eq!(
approx(results, 4),
vec![20.0, 23.0, 26.0, 29.0, 56.0, 68.0, 80.0, 92.0]
);
let (b, m, n, k) = (2, 2, 4, 3);
let lhs_stride = vec![m * k, k, 1];
let lhs: Vec<f32> = (0..b * m * k).map(|f| f as f32).collect();
let rhs_stride = vec![n * k, n, 1];
let rhs: Vec<f32> = (0..b * n * k).map(|f| f as f32).collect();
let results = run_gemm((b, m, n, k), &lhs, lhs_stride, 0, &rhs, rhs_stride, 0);
assert_eq!(
approx(results, 4),
vec![
20.0, 23.0, 26.0, 29.0, 56.0, 68.0, 80.0, 92.0, 344.0, 365.0, 386.0, 407.0, 488.0,
518.0, 548.0, 578.0
]
);
// OFFSET
let (b, m, n, k) = (2, 2, 4, 3);
let lhs_stride = vec![m * k, k, 1];
let lhs: Vec<f32> = (0..b * m * k).map(|f| f as f32).collect();
let rhs_stride = vec![n * k, n, 1];
let rhs: Vec<f32> = (0..b * n * k).map(|f| f as f32).collect();
// Manually set batch_size=1 and offset 12 elements * 4 the number of bytes for f32
let results = run_gemm((1, m, n, k), &lhs, lhs_stride, 0, &rhs, rhs_stride, 12 * 4);
assert_eq!(
approx(results, 4),
vec![56.0, 59.0, 62.0, 65.0, 200.0, 212.0, 224.0, 236.0]
);
}

View File

@ -220,7 +220,7 @@ impl candle::CustomOp1 for SoftmaxLastDim {
};
let n = layout.stride().len();
if !(layout.stride()[n - 1] == 1 && layout.start_offset() == 0) {
if !(layout.is_contiguous() && layout.stride()[n - 1] == 1 && layout.start_offset() == 0) {
candle::bail!("Non contiguous softmax-last-dim is not implemented");
}
@ -238,8 +238,6 @@ impl candle::CustomOp1 for SoftmaxLastDim {
&mut output,
)
.unwrap();
command_buffer.commit();
output.did_modify_range(metal::NSRange::new(0, output.length()));
let newstorage = candle::MetalStorage::new(output, device.clone(), storage.dtype());
Ok((newstorage, layout.shape().clone()))
}

View File

@ -144,7 +144,6 @@ impl RotaryEmbedding {
let freqs = t.matmul(&inv_freq)?;
let sin = freqs.sin()?;
let cos = freqs.cos()?;
// todo!("{}", sin);
Ok(Self { sin, cos })
}
@ -273,10 +272,6 @@ impl MHA {
}
fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> {
// let view = xs.to_string();
// if view.contains("NaN") {
// panic!("NaN");
// }
let _enter = self.span.enter();
let (b_size, seq_len, _n_embd) = xs.dims3()?;
let qkv = self