Compare commits

..

5 Commits

Author SHA1 Message Date
a9d0657432 Better version ? 2023-12-13 12:09:20 +01:00
87dc559817 Lots of updates including some stack of command buffers. 2023-12-12 17:41:56 +01:00
da0af3cb3e Merge pull request #1408 from jbochi/metal_gelu2
Fix NaN errors for Gelu in Metal
2023-12-09 19:46:36 +01:00
803ac8405b Put back affine strided tests
Co-Authored-By: Ivar Flakstad <69173633+ivarflakstad@users.noreply.github.com>
2023-12-06 17:04:15 +01:00
6e25822d4f Fix gelu for large x 2023-12-06 09:59:44 -05:00
13 changed files with 671 additions and 578 deletions

View File

@ -38,7 +38,8 @@ impl From<String> for MetalError {
pub struct MetalDevice {
device: metal::Device,
command_queue: metal::CommandQueue,
command_buffer: Arc<RwLock<metal::CommandBuffer>>,
command_buffers: Arc<RwLock<Vec<metal::CommandBuffer>>>,
command_buffer_index: Arc<RwLock<usize>>,
kernels: Arc<candle_metal_kernels::Kernels>,
buffers: Arc<RwLock<HashMap<(NSUInteger, MTLResourceOptions), Vec<Arc<Buffer>>>>>,
}
@ -70,38 +71,70 @@ impl MetalDevice {
&self.command_queue
}
pub fn command_buffer(&self) -> std::sync::RwLockReadGuard<CommandBuffer> {
self.command_buffer.try_read().unwrap()
}
pub fn commit(&self) {
let mut old = self.command_buffer.try_write().unwrap();
match old.status() {
metal::MTLCommandBufferStatus::NotEnqueued
| metal::MTLCommandBufferStatus::Enqueued => {
old.commit();
let command_buffer = self.command_queue.new_command_buffer().to_owned();
*old = command_buffer;
pub fn command_buffer(&self) -> CommandBuffer {
let mut command_buffers = self.command_buffers.try_write().unwrap();
let mut index = self.command_buffer_index.try_write().unwrap();
let n = command_buffers.len();
if *index == n {
// todo!("Cycle buffers");
for i in 0..n {
let command_buffer = &command_buffers[i];
match command_buffer.status() {
metal::MTLCommandBufferStatus::Committed
| metal::MTLCommandBufferStatus::Scheduled => {
// println!("Wait during cycling {i}");
// println!("Command {i} / {n}: {:?}", command_buffer.status());
command_buffer.wait_until_completed();
}
metal::MTLCommandBufferStatus::Completed => {}
_ => {
panic!("Command buffer {i} not committed during cycling");
}
}
}
_ => {}
let new_buffers = (0..n)
.map(|i| {
// println!("Creating command buffer {i}");
let command_buffer = self.command_queue.new_command_buffer().to_owned();
command_buffer.set_label(&format!("num {i}"));
command_buffer.enqueue();
command_buffer
})
.collect();
*command_buffers = new_buffers;
*index = 0;
// println!("Reset");
}
// println!("Giving buffer {} / {n}", *index);
let out = &command_buffers[*index];
assert_eq!(out.status(), metal::MTLCommandBufferStatus::Enqueued);
*index += 1;
out.to_owned()
}
pub fn wait_until_completed(&self) {
let mut old = self.command_buffer.try_write().unwrap();
match old.status() {
metal::MTLCommandBufferStatus::NotEnqueued
| metal::MTLCommandBufferStatus::Enqueued => {
old.commit();
old.wait_until_completed();
let command_buffers = self.command_buffers.try_write().unwrap();
let index = self.command_buffer_index.try_write().unwrap();
let n = command_buffers.len();
// for i in 0..*index {
// let command_buffer = &command_buffers[i];
// println!("Command {i} / {n}: {:?}", command_buffer.status());
// }
for i in 0..*index {
let command_buffer = &command_buffers[i];
match command_buffer.status() {
metal::MTLCommandBufferStatus::Committed
| metal::MTLCommandBufferStatus::Scheduled => {}
metal::MTLCommandBufferStatus::Completed => {}
_ => {
panic!("Command buffer not committed");
}
}
metal::MTLCommandBufferStatus::Committed | metal::MTLCommandBufferStatus::Scheduled => {
old.wait_until_completed();
}
_ => {}
// println!("Wait {i}");
command_buffer.wait_until_completed();
// println!("Ok {i}");
// command_buffer.wait_until_completed();
}
let command_buffer = self.command_queue.new_command_buffer().to_owned();
*old = command_buffer;
}
pub fn kernels(&self) -> &Kernels {
@ -112,28 +145,40 @@ impl MetalDevice {
&self.device
}
pub fn new_buffer(&self, element_count: usize, dtype: DType) -> Arc<Buffer> {
pub fn new_buffer(&self, element_count: usize, dtype: DType, name: &str) -> Arc<Buffer> {
let size = (element_count * dtype.size_in_bytes()) as NSUInteger;
self._new_buffer(size, MTLResourceOptions::StorageModePrivate)
self._new_buffer(size, MTLResourceOptions::StorageModePrivate, name)
}
fn _new_buffer(&self, size: NSUInteger, option: MTLResourceOptions) -> Arc<Buffer> {
fn _new_buffer(&self, size: NSUInteger, option: MTLResourceOptions, name: &str) -> Arc<Buffer> {
// println!("Creating new buffer {name}");
let mut buffers = self.buffers.try_write().unwrap();
let subbuffers = buffers.entry((size, option)).or_insert(vec![]);
for sub in &mut *subbuffers {
if Arc::strong_count(sub) == 1 {
// println!("Reusing tensor {size} {name}");
return sub.clone();
}
}
let new_buffer = self.device.new_buffer(size as NSUInteger, option);
let new_buffer = Arc::new(new_buffer);
subbuffers.push(new_buffer.clone());
// subbuffers.push(new_buffer.clone());
// println!("Created tensor {size} {name}");
for subbuffers in buffers.values_mut() {
let newbuffers = subbuffers
.iter()
.filter(|s| Arc::strong_count(s) > 1)
.map(|s| Arc::clone(s))
.collect();
*subbuffers = newbuffers;
}
new_buffer
}
pub fn new_buffer_managed(&self, size: NSUInteger) -> Arc<Buffer> {
self._new_buffer(size, MTLResourceOptions::StorageModeManaged)
self._new_buffer(size, MTLResourceOptions::StorageModeShared, "managed")
}
pub fn new_buffer_with_data<T>(&self, data: &[T]) -> Arc<Buffer> {
@ -141,15 +186,25 @@ impl MetalDevice {
let tmp = self.device.new_buffer_with_data(
data.as_ptr() as *const core::ffi::c_void,
size,
metal::MTLResourceOptions::StorageModeManaged,
metal::MTLResourceOptions::StorageModeShared,
);
let real = self._new_buffer(size, metal::MTLResourceOptions::StorageModePrivate);
{
let command = self.command_buffer();
let blit = command.new_blit_command_encoder();
blit.copy_from_buffer(&tmp, 0, &real, 0, tmp.length());
blit.end_encoding();
}
let real = self._new_buffer(
size,
metal::MTLResourceOptions::StorageModePrivate,
"with_data",
);
let command_buffer = self.command_buffer();
command_buffer.set_label("with_data");
let blit = command_buffer.new_blit_command_encoder();
blit.set_label("with_data_blit");
blit.copy_from_buffer(&tmp, 0, &real, 0, tmp.length());
blit.end_encoding();
command_buffer.commit();
drop(command_buffer);
// real.did_modify_range(metal::NSRange::new(0, real.length()));
// println!("Command {:?}", command.status());
// self.commit();
// This is necessary, for mmaped safetensors
// Because of the unsafe slice cast we're doing.
// The slice might not live long enough for metal
@ -169,15 +224,29 @@ impl MetalDevice {
dtype: DType,
) -> Result<(Matrix, Arc<Buffer>)> {
let elem_count = (b * m * n) as usize;
let out_buffer = self.new_buffer(elem_count, dtype);
let buffer = self.new_buffer(elem_count, dtype, "matrix");
let command_buffer = self.command_buffer();
command_buffer.set_label("zeros_matmul");
let blit = command_buffer.new_blit_command_encoder();
blit.fill_buffer(
&buffer,
metal::NSRange {
location: 0,
length: buffer.length(),
},
0,
);
blit.end_encoding();
command_buffer.commit();
buffer.did_modify_range(metal::NSRange::new(0, buffer.length()));
let result_descriptor =
MatrixDescriptor::init_multiple(m, n, b, n * size, m * n * size, type_id);
let result_matrix = Matrix::init_with_buffer_descriptor(&out_buffer, 0, &result_descriptor)
let result_matrix = Matrix::init_with_buffer_descriptor(&buffer, 0, &result_descriptor)
.ok_or_else(|| {
MetalError::from("Failed to create matrix multiplication kernel".to_string())
})?;
Ok((result_matrix, out_buffer))
Ok((result_matrix, buffer))
}
pub fn capture<P: AsRef<Path>>(&self, path: P) -> Result<()> {
@ -241,13 +310,20 @@ impl BackendStorage for MetalStorage {
self.dtype
);
}
self.device.wait_until_completed();
self.buffer
.did_modify_range(metal::NSRange::new(0, self.buffer.length()));
let buffer = self.device.new_buffer_managed(self.buffer.length());
let command_buffer = self.device.command_buffer();
let blit = command_buffer.new_blit_command_encoder();
blit.copy_from_buffer(&self.buffer, 0, &buffer, 0, self.buffer.length());
blit.end_encoding();
drop(command_buffer);
{
let command_buffer = self.device.command_buffer();
command_buffer.set_label("to_cpu");
let blit = command_buffer.new_blit_command_encoder();
blit.set_label("blit_to_cpu");
blit.copy_from_buffer(&self.buffer, 0, &buffer, 0, self.buffer.length());
blit.end_encoding();
command_buffer.commit();
}
self.device.wait_until_completed();
match self.dtype {
@ -256,7 +332,11 @@ impl BackendStorage for MetalStorage {
DType::I64 => Ok(CpuStorage::I64(buffer.read_to_vec(length / size))),
DType::F16 => Ok(CpuStorage::F16(buffer.read_to_vec(length / size))),
DType::BF16 => Ok(CpuStorage::BF16(buffer.read_to_vec(length / size))),
DType::F32 => Ok(CpuStorage::F32(buffer.read_to_vec(length / size))),
DType::F32 => {
let vec = buffer.read_to_vec(length / size);
// println!("Got back {:?}", &vec[..1]);
Ok(CpuStorage::F32(vec))
}
DType::F64 => Ok(CpuStorage::F64(buffer.read_to_vec(length / size))),
}
}
@ -268,7 +348,7 @@ impl BackendStorage for MetalStorage {
let el = shape.elem_count();
let dtype = self.dtype;
let buffer = device.new_buffer(el, self.dtype);
let buffer = device.new_buffer(el, self.dtype, "affine");
let command_buffer = self.device.command_buffer();
if layout.is_contiguous() && layout.start_offset() == 0 {
let name = match self.dtype {
@ -309,15 +389,111 @@ impl BackendStorage for MetalStorage {
)
.map_err(MetalError::from)?;
}
command_buffer.commit();
buffer.did_modify_range(metal::NSRange::new(0, buffer.length()));
Ok(Self::new(buffer, device.clone(), dtype))
}
fn powf(&self, _: &Layout, _: f64) -> Result<Self> {
crate::bail!("powf metal")
fn powf(&self, layout: &Layout, pow: f64) -> Result<Self> {
let device = self.device().clone();
let shape = layout.shape();
let el = shape.elem_count();
let dtype = self.dtype;
let buffer = device.new_buffer(el, self.dtype, "powf");
let command_buffer = self.device.command_buffer();
if layout.is_contiguous() && layout.start_offset() == 0 {
let name = match self.dtype {
DType::F32 => "powf_float",
DType::F16 => "powf_half",
dtype => crate::bail!("Powf {dtype:?}"),
};
candle_metal_kernels::call_powf(
&device.device,
&command_buffer,
&device.kernels,
name,
el,
&self.buffer,
&buffer,
pow as f32,
)
.map_err(MetalError::from)?;
} else {
let name = match self.dtype {
DType::F32 => "powf_float_strided",
DType::F16 => "powf_half_strided",
dtype => crate::bail!("Powf {dtype:?}"),
};
candle_metal_kernels::call_powf_strided(
&device.device,
&command_buffer,
&device.kernels,
name,
layout.dims(),
&self.buffer,
layout.stride(),
layout.start_offset() * dtype.size_in_bytes(),
&buffer,
pow as f32,
)
.map_err(MetalError::from)?;
}
command_buffer.commit();
buffer.did_modify_range(metal::NSRange::new(0, buffer.length()));
Ok(Self::new(buffer, device.clone(), dtype))
}
fn elu(&self, _: &Layout, _: f64) -> Result<Self> {
crate::bail!("elu metal")
fn elu(&self, layout: &Layout, alpha: f64) -> Result<Self> {
let device = self.device().clone();
let shape = layout.shape();
let el = shape.elem_count();
let dtype = self.dtype;
let buffer = device.new_buffer(el, self.dtype, "elu");
let command_buffer = self.device.command_buffer();
if layout.is_contiguous() && layout.start_offset() == 0 {
let name = match self.dtype {
DType::F32 => "elu_float",
DType::F16 => "elu_half",
dtype => crate::bail!("Powf {dtype:?}"),
};
candle_metal_kernels::call_elu(
&device.device,
&command_buffer,
&device.kernels,
name,
el,
&self.buffer,
&buffer,
alpha as f32,
)
.map_err(MetalError::from)?;
} else {
let name = match self.dtype {
DType::F32 => "elu_float_strided",
DType::F16 => "elu_half_strided",
dtype => crate::bail!("Powf {dtype:?}"),
};
candle_metal_kernels::call_elu_strided(
&device.device,
&command_buffer,
&device.kernels,
name,
layout.dims(),
&self.buffer,
layout.stride(),
layout.start_offset() * dtype.size_in_bytes(),
&buffer,
alpha as f32,
)
.map_err(MetalError::from)?;
}
command_buffer.commit();
buffer.did_modify_range(metal::NSRange::new(0, buffer.length()));
Ok(Self::new(buffer, device.clone(), dtype))
}
fn reduce_op(&self, op: ReduceOp, layout: &Layout, sum_dims: &[usize]) -> Result<Self> {
@ -365,7 +541,7 @@ impl BackendStorage for MetalStorage {
if dtype == DType::U32 {
crate::bail!("Implement return index reduce op");
}
let buffer = device.new_buffer(dst_el, dtype);
let buffer = device.new_buffer(dst_el, dtype, "reduce");
let command_buffer = self.device.command_buffer();
candle_metal_kernels::call_reduce_contiguous(
&device.device,
@ -379,6 +555,8 @@ impl BackendStorage for MetalStorage {
&buffer,
)
.map_err(MetalError::from)?;
command_buffer.commit();
buffer.did_modify_range(metal::NSRange::new(0, buffer.length()));
Ok(Self::new(buffer, device, dtype))
}
@ -391,9 +569,10 @@ impl BackendStorage for MetalStorage {
let device = self.device();
let shape = layout.shape();
let el_count = shape.elem_count();
let buffer = device.new_buffer(el_count, dtype);
let buffer = device.new_buffer(el_count, dtype, "todtype");
device.wait_until_completed();
let command_buffer = device.command_buffer();
if layout.is_contiguous() {
if layout.is_contiguous() && layout.start_offset() == 0 {
let kernel_name = match (self.dtype, dtype) {
(DType::U32, DType::F32) => "cast_u32_f32",
(DType::U32, DType::U8) => "cast_u32_u8",
@ -435,6 +614,10 @@ impl BackendStorage for MetalStorage {
)
.map_err(MetalError::from)?;
}
command_buffer.set_label("to_dtype");
command_buffer.commit();
buffer.did_modify_range(metal::NSRange::new(0, buffer.length()));
device.wait_until_completed();
Ok(Self::new(buffer, device.clone(), dtype))
}
@ -444,8 +627,9 @@ impl BackendStorage for MetalStorage {
let dtype = self.dtype;
let shape = layout.shape();
let el_count = shape.elem_count();
let buffer = device.new_buffer(el_count, dtype);
let buffer = device.new_buffer(el_count, dtype, B::KERNEL);
let command_buffer = device.command_buffer();
command_buffer.set_label(B::KERNEL);
if layout.is_contiguous() && layout.start_offset() == 0 {
use candle_metal_kernels::unary::contiguous;
@ -463,6 +647,7 @@ impl BackendStorage for MetalStorage {
("uceil", DType::F32) => contiguous::ceil::FLOAT,
("ufloor", DType::F32) => contiguous::floor::FLOAT,
("uround", DType::F32) => contiguous::round::FLOAT,
("utanh", DType::F32) => contiguous::tanh::FLOAT,
("ucos", DType::F16) => contiguous::cos::HALF,
("usin", DType::F16) => contiguous::sin::HALF,
("usqr", DType::F16) => contiguous::sqr::HALF,
@ -476,6 +661,7 @@ impl BackendStorage for MetalStorage {
("uceil", DType::F16) => contiguous::ceil::HALF,
("ufloor", DType::F16) => contiguous::floor::HALF,
("uround", DType::F16) => contiguous::round::HALF,
("utanh", DType::F16) => contiguous::tanh::HALF,
(name, dtype) => crate::bail!("Match {name} - {dtype:?}"),
};
candle_metal_kernels::call_unary_contiguous(
@ -533,9 +719,8 @@ impl BackendStorage for MetalStorage {
)
.map_err(MetalError::from)?;
}
command_buffer.set_label("unary");
drop(command_buffer);
self.device.commit();
command_buffer.commit();
buffer.did_modify_range(metal::NSRange::new(0, buffer.length()));
Ok(Self::new(buffer, device.clone(), dtype))
}
@ -549,30 +734,31 @@ impl BackendStorage for MetalStorage {
let dtype = self.dtype;
let shape = lhs_l.shape();
let el_count = shape.elem_count();
let buffer = device.new_buffer(el_count, dtype);
let buffer = device.new_buffer(el_count, dtype, B::KERNEL);
let command_buffer = device.command_buffer();
if (lhs_l.is_contiguous() && lhs_l.start_offset() == 0)
&& (rhs_l.is_contiguous() && rhs_l.start_offset() == 0)
&& &B::KERNEL[..1] != "b"
{
use candle_metal_kernels::binary::contiguous;
let kernel_name = match (B::KERNEL, dtype) {
("add", DType::F32) => contiguous::add::FLOAT,
("badd", DType::F32) => contiguous::add::FLOAT,
// ("badd", DType::F32) => contiguous::add::FLOAT,
("sub", DType::F32) => contiguous::sub::FLOAT,
("bsub", DType::F32) => contiguous::sub::FLOAT,
//("bsub", DType::F32) => contiguous::sub::FLOAT,
("mul", DType::F32) => contiguous::mul::FLOAT,
("bmul", DType::F32) => contiguous::mul::FLOAT,
// ("bmul", DType::F32) => contiguous::mul::FLOAT,
("div", DType::F32) => contiguous::div::FLOAT,
("bdiv", DType::F32) => contiguous::div::FLOAT,
// ("bdiv", DType::F32) => contiguous::div::FLOAT,
("add", DType::F16) => contiguous::add::HALF,
("badd", DType::F16) => contiguous::add::HALF,
// ("badd", DType::F16) => contiguous::add::HALF,
("sub", DType::F16) => contiguous::sub::HALF,
("bsub", DType::F16) => contiguous::sub::HALF,
// ("bsub", DType::F16) => contiguous::sub::HALF,
("mul", DType::F16) => contiguous::mul::HALF,
("bmul", DType::F16) => contiguous::mul::HALF,
// ("bmul", DType::F16) => contiguous::mul::HALF,
("div", DType::F16) => contiguous::div::HALF,
("bdiv", DType::F16) => contiguous::div::HALF,
// ("bdiv", DType::F16) => contiguous::div::HALF,
(name, dtype) => crate::bail!("Match {name} - {dtype:?}"),
};
candle_metal_kernels::call_binary_contiguous(
@ -617,8 +803,8 @@ impl BackendStorage for MetalStorage {
.map_err(MetalError::from)?;
}
command_buffer.set_label("binary");
drop(command_buffer);
self.device.commit();
command_buffer.commit();
buffer.did_modify_range(metal::NSRange::new(0, buffer.length()));
Ok(Self::new(buffer, device.clone(), dtype))
}
@ -635,7 +821,7 @@ impl BackendStorage for MetalStorage {
let dims = shape.dims();
let el = shape.elem_count();
let dtype = t.dtype;
let buffer = self.device.new_buffer(el, dtype);
let buffer = self.device.new_buffer(el, dtype, "where");
let command_buffer = self.device.command_buffer();
if t.dtype() != f.dtype() {
crate::bail!("Invalid ternary different dtypes for values");
@ -663,6 +849,8 @@ impl BackendStorage for MetalStorage {
&buffer,
)
.map_err(MetalError::from)?;
command_buffer.commit();
buffer.did_modify_range(metal::NSRange::new(0, buffer.length()));
Ok(Self::new(buffer, device, dtype))
}
@ -752,7 +940,7 @@ impl BackendStorage for MetalStorage {
let dst_el = ids_el * left_size * right_size;
let dtype = self.dtype;
let device = self.device();
let buffer = device.new_buffer(dst_el, dtype);
let buffer = device.new_buffer(dst_el, dtype, "index_select");
let name = match (ids.dtype, self.dtype) {
(DType::U32, DType::F32) => "is_u32_f32",
(DType::U32, DType::F16) => "is_u32_f16",
@ -772,6 +960,8 @@ impl BackendStorage for MetalStorage {
&buffer,
)
.map_err(MetalError::from)?;
command_buffer.commit();
buffer.did_modify_range(metal::NSRange::new(0, buffer.length()));
Ok(Self::new(buffer, device.clone(), dtype))
}
@ -795,16 +985,14 @@ impl BackendStorage for MetalStorage {
rhs_l: &Layout,
) -> Result<Self> {
// Create descriptors
let (type_id, size, name) = match self.dtype {
let (type_id, size) = match self.dtype {
DType::F32 => (
metal::mps::MPS_FLOATBIT_ENCODING | 32,
core::mem::size_of::<f32>() as NSUInteger,
"sgemm",
),
DType::F16 => (
metal::mps::MPS_FLOATBIT_ENCODING | 16,
core::mem::size_of::<f16>() as NSUInteger,
"hgemm",
),
dtype => todo!("Dtype for matmul {dtype:?} is not supported"),
};
@ -838,53 +1026,76 @@ impl BackendStorage for MetalStorage {
mnk: (m, n, k),
})?
};
let b = b as NSUInteger;
let m = m as NSUInteger;
let n = n as NSUInteger;
let k = k as NSUInteger;
let result_buffer = self.device.new_buffer(b * m * n, self.dtype);
let left_matrix = self.matrix(
(b, m, k),
transpose_left,
size,
lhs_l.start_offset() as NSUInteger * size,
type_id,
)?;
let right_matrix = rhs.matrix(
(b, k, n),
transpose_right,
size,
rhs_l.start_offset() as NSUInteger * size,
type_id,
)?;
let (result_matrix, out_buffer) =
self.device
.new_matrix((b, m, n), size, type_id, self.dtype)?;
let command_buffer = self.device.command_buffer();
command_buffer.set_label("matmul");
command_buffer.set_label("mfa gemm");
candle_metal_kernels::call_mfa_gemm(
&self.device.device,
&command_buffer,
&self.device.kernels,
name,
&self.buffer,
lhs_l.shape().dims(),
&rhs.buffer,
rhs_l.shape().dims(),
&result_buffer,
(b, m, n, k),
let alpha = 1.0f64;
// let beta = f64::MIN;
let beta = 1.0;
// Create kernel
let matrix_multiplication = MatrixMultiplication::init(
&self.device,
transpose_left,
transpose_right,
m,
n,
k,
alpha,
beta,
)
.map_err(MetalError::from)?;
.ok_or_else(|| {
MetalError::from("Failed to create matrix multiplication kernel".to_string())
})?;
matrix_multiplication.set_batch_size(b);
matrix_multiplication.set_batch_start(0);
drop(command_buffer);
self.device.commit();
Ok(Self::new(
self.buffer.clone(),
self.device.clone(),
self.dtype(),
))
// Encode kernel to command buffer
matrix_multiplication.encode_to_command_buffer(
&command_buffer,
&left_matrix,
&right_matrix,
&result_matrix,
);
command_buffer.commit();
out_buffer.did_modify_range(metal::NSRange::new(0, out_buffer.length()));
// println!("========= MATMUL {:?}", Arc::strong_count(&out_buffer));
Ok(Self::new(out_buffer, self.device.clone(), self.dtype()))
}
fn copy_strided_src(&self, dst: &mut Self, dst_offset: usize, src_l: &Layout) -> Result<()> {
let command_buffer = self.device.command_buffer();
// println!("Copy strided");
if src_l.is_contiguous() && self.dtype == dst.dtype() {
command_buffer.set_label("copy_contiguous");
let blit = command_buffer.new_blit_command_encoder();
blit.set_label("copy_contiguous");
let src_offset = (src_l.start_offset() * self.dtype.size_in_bytes()) as NSUInteger;
let length = (src_l.shape().elem_count() * self.dtype.size_in_bytes()) as NSUInteger;
let dst_offset = (dst_offset * dst.dtype().size_in_bytes()) as NSUInteger;
blit.copy_from_buffer(
&self.buffer,
src_offset,
dst.buffer(),
dst_offset,
self.buffer.length() - src_offset,
);
blit.copy_from_buffer(&self.buffer, src_offset, dst.buffer(), dst_offset, length);
blit.end_encoding();
} else {
let src_shape = src_l.shape();
@ -915,8 +1126,7 @@ impl BackendStorage for MetalStorage {
.map_err(MetalError::from)?;
command_buffer.set_label("copy_strided");
}
drop(command_buffer);
self.device.commit();
command_buffer.commit();
Ok(())
}
}
@ -946,22 +1156,22 @@ impl MetalStorage {
) -> Result<Matrix> {
let key = (b, m, n, transpose, size, offset, type_id);
let mut matrices = self.matrices.try_write().unwrap();
if let Some(matrix) = matrices.get(&key) {
Ok(matrix.clone())
// let mut matrices = self.matrices.try_write().unwrap();
// if let Some(matrix) = matrices.get(&key) {
// Ok(matrix.clone())
// } else {
let descriptor = if transpose {
MatrixDescriptor::init_multiple(n, m, b, m * size, m * n * size, type_id)
} else {
let descriptor = if transpose {
MatrixDescriptor::init_multiple(n, m, b, m * size, m * n * size, type_id)
} else {
MatrixDescriptor::init_multiple(m, n, b, n * size, m * n * size, type_id)
};
let matrix = Matrix::init_with_buffer_descriptor(&self.buffer, offset, &descriptor)
.ok_or_else(|| {
MetalError::from("Failed to create matrix multiplication kernel".to_string())
})?;
matrices.insert(key, matrix.clone());
Ok(matrix)
}
MatrixDescriptor::init_multiple(m, n, b, n * size, m * n * size, type_id)
};
let matrix = Matrix::init_with_buffer_descriptor(&self.buffer, offset, &descriptor)
.ok_or_else(|| {
MetalError::from("Failed to create matrix multiplication kernel".to_string())
})?;
// matrices.insert(key, matrix.clone());
Ok(matrix)
// }
}
}
@ -969,16 +1179,29 @@ impl BackendDevice for MetalDevice {
type Storage = MetalStorage;
fn new(ordinal: usize) -> Result<Self> {
// println!("CREATING DEVICE");
let device = metal::Device::all().swap_remove(ordinal);
let n = 64;
let command_queue = device.new_command_queue();
let command_buffer = Arc::new(RwLock::new(command_queue.new_command_buffer().to_owned()));
let command_buffers = (0..n)
.map(|i| {
let command_buffer = command_queue.new_command_buffer().to_owned();
command_buffer.enqueue();
command_buffer.set_label(&format!("num {i}"));
command_buffer
})
.collect();
let command_buffers = Arc::new(RwLock::new(command_buffers));
let command_buffer_index = Arc::new(RwLock::new(0));
let kernels = Arc::new(Kernels::new());
let buffers = Arc::new(RwLock::new(HashMap::new()));
Ok(Self {
device,
command_queue,
command_buffer,
command_buffers,
command_buffer_index,
buffers,
kernels,
})
@ -999,7 +1222,21 @@ impl BackendDevice for MetalDevice {
}
fn zeros_impl(&self, shape: &Shape, dtype: DType) -> Result<MetalStorage> {
let buffer = self.new_buffer(shape.elem_count(), dtype);
let buffer = self.new_buffer(shape.elem_count(), dtype, "zeros");
let command_buffer = self.command_buffer();
command_buffer.set_label("zeros");
let blit = command_buffer.new_blit_command_encoder();
blit.fill_buffer(
&buffer,
metal::NSRange {
location: 0,
length: buffer.length(),
},
0,
);
blit.end_encoding();
command_buffer.commit();
buffer.did_modify_range(metal::NSRange::new(0, buffer.length()));
Ok(MetalStorage::new(buffer, self.clone(), dtype))
}

View File

@ -1864,7 +1864,7 @@ impl Tensor {
}
(Storage::Cuda(storage), Device::Cpu) => Storage::Cpu(storage.to_cpu_storage()?),
(Storage::Metal(storage), Device::Cpu) => {
println!("{storage:?} - {:?}", storage.to_cpu_storage()?);
// println!("{storage:?} - {:?}", storage.to_cpu_storage()?);
Storage::Cpu(storage.to_cpu_storage()?)
}
(Storage::Cuda(storage), Device::Cuda(cuda)) => {

View File

@ -11,7 +11,6 @@ license = "MIT OR Apache-2.0"
[dependencies]
metal = { version = "0.27.1", features = ["mps"], package="candle-metal" }
metal-flash-attention = { path = "../../../metal-flash-attention" }
once_cell = "1.18.0"
thiserror = "1"
tracing = "0.1.37"

View File

@ -29,9 +29,7 @@ kernel void FN_NAME( \
if (id >= dim) { \
return; \
} \
const TYPENAME m = TYPENAME(mul); \
const TYPENAME a = TYPENAME(add); \
output[id] = input[id] * m + a; \
output[id] = TYPENAME(float(input[id]) * mul + add); \
} \
kernel void FN_NAME##_strided( \
constant size_t &dim, \
@ -47,15 +45,80 @@ kernel void FN_NAME##_strided( \
if (id >= dim) { \
return; \
} \
const TYPENAME m = TYPENAME(mul); \
const TYPENAME a = TYPENAME(add); \
output[id] = input[get_strided_index(id, num_dims, dims, strides)] * m + a; \
output[id] = TYPENAME(float(input[get_strided_index(id, num_dims, dims, strides)]) * mul + add); \
}
#define POWF(FN_NAME, TYPENAME) \
kernel void FN_NAME( \
constant size_t &dim, \
constant float &mul, \
device const TYPENAME *input, \
device TYPENAME *output, \
uint id [[ thread_position_in_grid ]] \
) { \
if (id >= dim) { \
return; \
} \
output[id] = TYPENAME(pow(input[id], TYPENAME(mul))); \
} \
kernel void FN_NAME##_strided( \
constant size_t &dim, \
constant size_t &num_dims, \
constant size_t *dims, \
constant size_t *strides, \
constant float &mul, \
device const TYPENAME *input, \
device TYPENAME *output, \
uint id [[ thread_position_in_grid ]] \
) { \
if (id >= dim) { \
return; \
} \
output[id] = TYPENAME(pow(input[get_strided_index(id, num_dims, dims, strides)], TYPENAME(mul))); \
}
#define ELU(FN_NAME, TYPENAME) \
kernel void FN_NAME( \
constant size_t &dim, \
constant float &mul, \
device const TYPENAME *input, \
device TYPENAME *output, \
uint id [[ thread_position_in_grid ]] \
) { \
if (id >= dim) { \
return; \
} \
const TYPENAME x = input[id]; \
output[id] = TYPENAME((x > 0)?x: mul * exp(x - 1)); \
} \
kernel void FN_NAME##_strided( \
constant size_t &dim, \
constant size_t &num_dims, \
constant size_t *dims, \
constant size_t *strides, \
constant float &mul, \
device const TYPENAME *input, \
device TYPENAME *output, \
uint id [[ thread_position_in_grid ]] \
) { \
if (id >= dim) { \
return; \
} \
const TYPENAME x = input[get_strided_index(id, num_dims, dims, strides)]; \
output[id] = TYPENAME((x > 0)?x: mul * exp(x - 1)); \
} \
AFFINE(affine_float, float)
AFFINE(affine_half, half)
POWF(powf_float, float)
POWF(powf_half, half)
ELU(elu_float, float)
ELU(elu_half, half)
#if __METAL_VERSION__ >= 310
AFFINE(affine_bfloat, bfloat);
POWF(powf_bfloat, bfloat);
ELU(elu_bfloat, bfloat);
#endif

View File

@ -1,12 +1,9 @@
use metal::{
Buffer, CommandBufferRef, CompileOptions, ComputeCommandEncoderRef, ComputePipelineState,
Device, Function, FunctionConstantValues, Library, MTLDataType, MTLResourceUsage, MTLSize,
NSUInteger,
Device, Function, Library, MTLSize,
};
use std::collections::{BTreeMap, HashMap};
use std::collections::HashMap;
use std::ffi::c_void;
use std::hash::Hash;
use std::io::{stdout, Write};
use std::sync::RwLock;
const AFFINE: &str = include_str!("affine.metal");
@ -16,7 +13,6 @@ const BINARY: &str = include_str!("binary.metal");
const TERNARY: &str = include_str!("ternary.metal");
const CAST: &str = include_str!("cast.metal");
const REDUCE: &str = include_str!("reduce.metal");
const MFA_LIB: &[u8] = include_bytes!("mfa.metallib");
fn linear_split(pipeline: &ComputePipelineState, length: usize) -> (MTLSize, MTLSize) {
let size = length as u64;
@ -109,7 +105,6 @@ pub enum Source {
Ternary,
Cast,
Reduce,
MetalFlashAttention,
}
macro_rules! ops{
@ -158,7 +153,7 @@ macro_rules! ops{
}
pub mod unary {
ops!(cos, sin, exp, sqr, sqrt, neg, log, gelu, ceil, floor, round, erf, gelu_erf);
ops!(cos, sin, exp, sqr, sqrt, neg, log, gelu, ceil, floor, round, erf, gelu_erf, tanh);
}
pub mod binary {
ops!(add, sub, mul, div);
@ -184,7 +179,7 @@ impl<T> From<std::sync::PoisonError<T>> for MetalKernelError {
}
}
type KernelMap<T> = HashMap<KernelKey, T>;
type KernelMap<T> = HashMap<&'static str, T>;
type Libraries = HashMap<Source, Library>;
type Pipelines = KernelMap<ComputePipelineState>;
@ -194,22 +189,6 @@ pub struct Kernels {
pipelines: RwLock<Pipelines>,
}
enum LibraryDefinition {
Source(&'static str),
Data(&'static [u8]),
}
impl From<&'static str> for LibraryDefinition {
fn from(s: &'static str) -> Self {
Self::Source(s)
}
}
impl From<&'static [u8]> for LibraryDefinition {
fn from(s: &'static [u8]) -> Self {
Self::Data(s)
}
}
impl Kernels {
pub fn new() -> Self {
let libraries = RwLock::new(Libraries::new());
@ -220,16 +199,15 @@ impl Kernels {
}
}
fn get_library_source(&self, source: Source) -> LibraryDefinition {
fn get_library_source(&self, source: Source) -> &'static str {
match source {
Source::Affine => AFFINE.into(),
Source::Unary => UNARY.into(),
Source::Binary => BINARY.into(),
Source::Ternary => TERNARY.into(),
Source::Indexing => INDEXING.into(),
Source::Cast => CAST.into(),
Source::Reduce => REDUCE.into(),
Source::MetalFlashAttention => MFA_LIB.into(),
Source::Affine => AFFINE,
Source::Unary => UNARY,
Source::Binary => BINARY,
Source::Ternary => TERNARY,
Source::Indexing => INDEXING,
Source::Cast => CAST,
Source::Reduce => REDUCE,
}
}
@ -242,15 +220,10 @@ impl Kernels {
if let Some(lib) = libraries.get(&source) {
Ok(lib.clone())
} else {
let lib = match self.get_library_source(source) {
LibraryDefinition::Source(source_content) => device
.new_library_with_source(source_content, &CompileOptions::new())
.map_err(|e| MetalKernelError::LoadLibraryError(e.to_string()))?,
LibraryDefinition::Data(data) => device
.new_library_with_data(data)
.map_err(|e| MetalKernelError::LoadLibraryError(e.to_string()))?,
};
let source_content = self.get_library_source(source);
let lib = device
.new_library_with_source(source_content, &CompileOptions::new())
.map_err(|e| MetalKernelError::LoadLibraryError(e.to_string()))?;
libraries.insert(source, lib.clone());
Ok(lib)
}
@ -260,190 +233,43 @@ impl Kernels {
&self,
device: &Device,
source: Source,
key: KernelKey,
name: &'static str,
) -> Result<Function, MetalKernelError> {
let func = self
.load_library(device, source)?
.get_function(
key.name,
key.constants.map(|c| c.create_function_constant_values()),
)
.get_function(name, None)
.map_err(|e| MetalKernelError::LoadFunctionError(e.to_string()))?;
Ok(func)
// let mut funcs = self.funcs.write()?;
// if let Some(func) = funcs.get(name) {
// Ok(func.clone())
// } else {
// funcs.insert(name, func.clone());
// Ok(func)
// }
}
pub fn load_pipeline<T: Into<KernelKey>>(
pub fn load_pipeline(
&self,
device: &Device,
source: Source,
key: T,
name: &'static str,
) -> Result<ComputePipelineState, MetalKernelError> {
let key: KernelKey = key.into();
let mut pipelines = self.pipelines.write()?;
if let Some(pipeline) = pipelines.get(&key) {
if let Some(pipeline) = pipelines.get(name) {
Ok(pipeline.clone())
} else {
let func = self.load_function(device, source, key.clone())?;
let func = self.load_function(device, source, name)?;
let pipeline = device
.new_compute_pipeline_state_with_function(&func)
.map_err(|e| MetalKernelError::FailedToCreatePipeline(e.to_string()))?;
pipelines.insert(key, pipeline.clone());
pipelines.insert(name, pipeline.clone());
Ok(pipeline)
}
}
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
struct KernelKey {
name: &'static str,
constants: Option<ConstantValues>,
}
impl KernelKey {
fn new(name: &'static str) -> Self {
Self {
name,
constants: None,
}
}
fn with_constants(mut self, constants: ConstantValues) -> Self {
self.constants = Some(constants);
self
}
}
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
enum ConstantValueId {
Index(NSUInteger),
Name(&'static str),
}
trait MetalDType {
const MTL_DATA_TYPE: MTLDataType;
}
macro_rules! metal_dtype {
($ty:ty, $mtl_data_type:ident) => {
impl MetalDType for $ty {
const MTL_DATA_TYPE: MTLDataType = MTLDataType::$mtl_data_type;
}
};
}
metal_dtype!(f32, Float);
metal_dtype!(u32, UInt);
metal_dtype!(u16, UShort);
metal_dtype!(bool, Bool);
#[derive(Debug, Clone, PartialEq)]
enum ConstantValueType {
Float(f32),
Uint(u32),
UShort(u16),
Bool(bool),
}
impl Hash for ConstantValueType {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
use ConstantValueType::*;
match self {
Float(v) => v.to_bits().hash(state),
Uint(v) => v.hash(state),
UShort(v) => v.hash(state),
Bool(v) => v.hash(state),
}
}
}
impl Eq for ConstantValueType {}
#[derive(Debug, Clone, PartialEq, Eq)]
struct ConstantValues(BTreeMap<ConstantValueId, ConstantValueType>);
macro_rules! add_indexed_constant {
($fcv:expr, $value:expr, $ty:ty, $idx:expr) => {
$fcv.set_constant_value_at_index(
$value as *const $ty as *const c_void,
<$ty>::MTL_DATA_TYPE,
$idx,
)
};
}
macro_rules! add_named_constant {
($fcv:expr, $value:expr, $ty:ty, $name:expr) => {
$fcv.set_constant_value_with_name(
$value as *const $ty as *const c_void,
<$ty>::MTL_DATA_TYPE,
$name,
)
};
}
impl Hash for ConstantValues {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
for (id, value) in &self.0 {
id.hash(state);
value.hash(state);
}
}
}
impl ConstantValues {
fn new() -> Self {
Self(BTreeMap::new())
}
fn set(mut self, id: impl Into<ConstantValueId>, value: impl Into<ConstantValueType>) -> Self {
self.0.insert(id.into(), value.into());
self
}
fn create_function_constant_values(&self) -> FunctionConstantValues {
use ConstantValueId::*;
use ConstantValueType::*;
let mut function_values = FunctionConstantValues::new();
for (id, value) in &self.0 {
match (&id, &value) {
(Index(index), Float(value)) => {
add_indexed_constant!(function_values, value, f32, *index);
}
(Index(index), Uint(value)) => {
add_indexed_constant!(function_values, value, u32, *index);
}
(Index(index), UShort(value)) => {
add_indexed_constant!(function_values, value, u16, *index);
}
(Index(index), Bool(value)) => {
add_indexed_constant!(function_values, value, bool, *index);
}
(Name(name), Float(value)) => {
add_named_constant!(function_values, value, f32, name);
}
(Name(name), Uint(value)) => {
add_named_constant!(function_values, value, u32, name);
}
(Name(name), UShort(value)) => {
add_named_constant!(function_values, value, u16, name);
}
(Name(name), Bool(value)) => {
add_named_constant!(function_values, value, bool, name);
}
}
}
function_values
}
}
impl From<&'static str> for KernelKey {
fn from(name: &'static str) -> Self {
Self {
name,
constants: None,
}
}
}
#[allow(clippy::too_many_arguments)]
pub fn call_unary_contiguous(
device: &Device,
@ -790,6 +616,130 @@ pub fn call_affine_strided(
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_powf(
device: &Device,
command_buffer: &CommandBufferRef,
kernels: &Kernels,
name: &'static str,
size: usize,
input: &Buffer,
output: &Buffer,
mul: f32,
) -> Result<(), MetalKernelError> {
let pipeline = kernels.load_pipeline(device, Source::Affine, name)?;
let encoder = command_buffer.new_compute_command_encoder();
encoder.set_compute_pipeline_state(&pipeline);
set_params!(encoder, (size, mul, input, output));
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
encoder.end_encoding();
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_powf_strided(
device: &Device,
command_buffer: &CommandBufferRef,
kernels: &Kernels,
name: &'static str,
shape: &[usize],
input: &Buffer,
input_stride: &[usize],
input_offset: usize,
output: &Buffer,
mul: f32,
) -> Result<(), MetalKernelError> {
let pipeline = kernels.load_pipeline(device, Source::Affine, name)?;
let size: usize = shape.iter().product();
let encoder = command_buffer.new_compute_command_encoder();
encoder.set_compute_pipeline_state(&pipeline);
set_params!(
encoder,
(
size,
shape.len(),
shape,
input_stride,
mul,
(input, input_offset),
output
)
);
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
encoder.end_encoding();
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_elu(
device: &Device,
command_buffer: &CommandBufferRef,
kernels: &Kernels,
name: &'static str,
size: usize,
input: &Buffer,
output: &Buffer,
mul: f32,
) -> Result<(), MetalKernelError> {
let pipeline = kernels.load_pipeline(device, Source::Affine, name)?;
let encoder = command_buffer.new_compute_command_encoder();
encoder.set_compute_pipeline_state(&pipeline);
set_params!(encoder, (size, mul, input, output));
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
encoder.end_encoding();
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_elu_strided(
device: &Device,
command_buffer: &CommandBufferRef,
kernels: &Kernels,
name: &'static str,
shape: &[usize],
input: &Buffer,
input_stride: &[usize],
input_offset: usize,
output: &Buffer,
mul: f32,
) -> Result<(), MetalKernelError> {
let pipeline = kernels.load_pipeline(device, Source::Affine, name)?;
let size: usize = shape.iter().product();
let encoder = command_buffer.new_compute_command_encoder();
encoder.set_compute_pipeline_state(&pipeline);
set_params!(
encoder,
(
size,
shape.len(),
shape,
input_stride,
mul,
(input, input_offset),
output
)
);
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
encoder.end_encoding();
Ok(())
}
pub fn call_where_cond_strided(
device: &Device,
command_buffer: &CommandBufferRef,
@ -880,230 +830,5 @@ pub fn call_index_select(
Ok(())
}
impl From<NSUInteger> for ConstantValueId {
fn from(idx: NSUInteger) -> Self {
Self::Index(idx)
}
}
impl From<usize> for ConstantValueId {
fn from(idx: usize) -> Self {
ConstantValueId::from(idx as NSUInteger)
}
}
impl From<i32> for ConstantValueId {
fn from(idx: i32) -> Self {
ConstantValueId::from(idx as NSUInteger)
}
}
impl From<&'static str> for ConstantValueId {
fn from(name: &'static str) -> Self {
Self::Name(name)
}
}
macro_rules! to_constant_value {
($ty:ty, $constant_value_type:ident) => {
to_constant_value!($ty, $ty, $constant_value_type);
};
($ty:ty, $via:ty, $constant_value_type:ident) => {
impl From<$ty> for ConstantValueType {
fn from(v: $ty) -> Self {
Self::$constant_value_type(v as $via)
}
}
};
}
to_constant_value!(f32, Float);
to_constant_value!(u32, Uint);
to_constant_value!(usize, u32, Uint);
to_constant_value!(u16, UShort);
to_constant_value!(bool, Bool);
struct MFAGemmConfig {
m: usize,
k: usize,
n: usize,
transpose_left: bool,
transpose_right: bool,
batched: bool,
m_simd: u16,
n_simd: u16,
k_simd: u16,
m_splits: u16,
n_splits: u16,
m_group: u16,
n_group: u16,
}
impl From<MFAGemmConfig> for ConstantValues {
fn from(conf: MFAGemmConfig) -> Self {
ConstantValues::new()
.set(0, conf.m)
.set(1, conf.k)
.set(2, conf.n)
.set(10, conf.transpose_left)
.set(11, conf.transpose_right)
.set(12, false)
.set(20, 1.0)
.set(21, 0.0)
.set(100, conf.batched)
.set(101, false)
.set(50001, false)
.set(200, conf.m_simd)
.set(201, conf.n_simd)
.set(202, conf.k_simd)
.set(210, conf.m_splits)
.set(211, conf.n_splits)
// garbage
.set(102, false)
.set(103, false)
.set(113, false)
.set(50000, false)
}
}
#[allow(clippy::too_many_arguments)]
pub fn call_mfa_gemm(
device: &Device,
command_buffer: &CommandBufferRef,
kernels: &Kernels,
name: &'static str,
lhs: &Buffer,
lhs_dims: &[usize],
rhs: &Buffer,
rhs_dims: &[usize],
output: &Buffer,
(b, m, n, k): (usize, usize, usize, usize),
transpose_left: bool,
transpose_right: bool,
) -> Result<(), MetalKernelError> {
let batched = b > 1;
let mut c_elements = m * n;
if batched {
c_elements *= 2;
}
let is_half = name == "hgemm";
let is_float = name == "sgemm";
let mut m_group = 32;
let mut n_group = 32;
let mut k_simd = 32;
if c_elements > 10 ^ 6 {
m_group = 48;
n_group = 48;
}
// If K_simd is perfectly equal to matrix K, the compiler can elide a large
// amount of logic in the kernel.
if k >= 33 && k <= 40 {
k_simd = 40;
} else if is_half && k >= 73 && k >= 80 {
k_simd = 80;
} else if c_elements > 10 ^ 6 {
if k <= 16 {
k_simd = 16;
} else if k <= 24 {
k_simd = 24;
} else if k <= 32 {
k_simd = 32;
} else if k <= 48 {
k_simd = 24;
} else if k <= 64 {
k_simd = 32;
} else if is_float {
k_simd = 24;
}
}
let m_splits = 2;
let n_splits = 2;
let m_simd = m_group / m_splits;
let n_simd = n_group / n_splits;
let config = MFAGemmConfig {
m,
k,
n,
transpose_left,
transpose_right,
batched,
m_simd,
n_simd,
k_simd,
m_splits,
n_splits,
m_group,
n_group,
};
let pipeline = kernels.load_pipeline(
device,
Source::MetalFlashAttention,
KernelKey::new(name).with_constants(config.into()),
)?;
let block_type_size = if is_half { 2 } else { 4 };
let a_block_bytes = m_group * k_simd * block_type_size;
let b_block_bytes = k_simd * n_group * block_type_size;
let c_block_bytes = m_group * n_group * block_type_size;
let mut thread_group_memory_length = a_block_bytes + b_block_bytes;
if m % 8 > 0 && n % 8 > 0 {
thread_group_memory_length = core::cmp::max(thread_group_memory_length, c_block_bytes);
}
let encoder = command_buffer.new_compute_command_encoder();
encoder.set_compute_pipeline_state(&pipeline);
encoder.set_threadgroup_memory_length(0, thread_group_memory_length as NSUInteger);
encoder.use_resources(&[&lhs, &rhs], MTLResourceUsage::Read);
encoder.use_resource(&output, MTLResourceUsage::Write);
encoder.set_buffers(0, &[Some(lhs), Some(rhs), Some(output)], &[0; 3]);
let ceil_divide = |a, b| (a + b - 1) / b;
let mut grid_z = 1;
if batched {
grid_z = lhs_dims[..lhs_dims.len() - 2].iter().product();
let byte_stride = |shape: &[usize]| -> u64 {
let rank = shape.len();
let mut output = core::mem::size_of::<f32>() * shape[rank - 2] * shape[rank - 1];
if shape[..shape.len() - 2].iter().product::<usize>() == 1 {
output = 0;
}
output as u64
};
let byte_stride_a = byte_stride(lhs_dims);
let byte_stride_b = byte_stride(rhs_dims);
let byte_stride_c = byte_stride(&[m, n]);
type BatchConfig = (u64, u64, u64, u64);
let mut batching_conf: Vec<BatchConfig> = vec![];
for i in 0..grid_z {
batching_conf.push((
i as u64 * byte_stride_a,
i as u64 * byte_stride_b,
i as u64 * byte_stride_c,
0u64,
));
}
set_param(encoder, 10, batching_conf.as_slice());
}
let grid_size = MTLSize::new(
ceil_divide(n as NSUInteger, n_group as NSUInteger),
ceil_divide(m as NSUInteger, m_group as NSUInteger),
grid_z as NSUInteger,
);
let group_size = MTLSize::new((32 * m_splits * n_splits) as NSUInteger, 1, 1);
encoder.dispatch_thread_groups(grid_size, group_size);
encoder.end_encoding();
Ok(())
}
#[cfg(test)]
mod tests;

View File

@ -18,7 +18,7 @@ METAL_FUNC uint get_strided_index(
return strided_i;
}
constant int THREADGROUP_SIZE = 1024;
constant int THREADGROUP_SIZE = 2048;
# define REDUCE(FN, NAME, T) \
kernel void NAME( \

View File

@ -205,6 +205,25 @@ fn cos_strided_random() {
);
}
#[test]
fn gelu_f16() {
let v: Vec<f16> = [-10f32, -1.0, 0., 1., 2., 3., 10.0, 20.0]
.iter()
.map(|v| f16::from_f32(*v))
.collect();
let expected: Vec<f32> = vec![-0.0, -0.16, 0.0, 0.84, 1.96, 3.0, 10.0, 20.0];
let results = run(&v, unary::contiguous::gelu::HALF);
assert_eq!(approx_f16(results, 2), expected);
}
#[test]
fn gelu_f32() {
let v: Vec<f32> = vec![-10f32, -1.0, 0., 1., 2., 3., 10.0, 20.0];
let expected: Vec<f32> = vec![-0.0, -0.159, 0.0, 0.841, 1.955, 2.996, 10.0, 20.0];
let results = run(&v, unary::contiguous::gelu::FLOAT);
assert_eq!(approx(results, 3), expected);
}
#[test]
fn binary_add_f32() {
let left = vec![1.0f32, 2.0, 3.0];
@ -527,8 +546,8 @@ fn cos_f16() {
.collect();
let results = run(&v, unary::contiguous::cos::HALF);
let expected: Vec<f16> = v.iter().map(|v| f16::from_f32(v.to_f32().cos())).collect();
assert_eq!(approx_f16(results, 4), vec![0.5405, -0.4163, -0.9902]);
assert_eq!(approx_f16(expected, 4), vec![0.5405, -0.4163, -0.9902]);
assert_eq!(approx_f16(results, 2), vec![0.54, -0.42, -0.99]);
assert_eq!(approx_f16(expected, 2), vec![0.54, -0.42, -0.99]);
}
fn run_reduce<T: Clone>(v: &[T], out_length: usize, name: &'static str) -> Vec<T> {

View File

@ -42,9 +42,14 @@ template <typename T> METAL_FUNC T erf(T in){
return T(sign*y);
}
template <typename T> METAL_FUNC T id(T in){ return in; }
template <typename T> METAL_FUNC T gelu_erf(T x){ return T(x * (1 + erf(x * M_SQRT1_2_F)) / 2); }
template <typename T> METAL_FUNC T gelu(T x){
template <typename T> METAL_FUNC T id(T in) { return in; }
template <typename T> METAL_FUNC T gelu_erf(T x) {
return T(x * (1 + erf(x * M_SQRT1_2_F)) / 2);
}
template <typename T> METAL_FUNC T gelu(T x) {
if (x > 5) {
return x;
}
T x_sq = x * x;
T x_cube = x_sq * x;
T alpha = x + static_cast<T>(0.044715) * x_cube;
@ -64,7 +69,7 @@ kernel void FN_NAME( \
if (thread_position_in_grid >= dim) { \
return; \
} \
output[thread_position_in_grid] = TYPENAME(FN(input[thread_position_in_grid])); \
output[thread_position_in_grid] = TYPENAME(FN(float(input[thread_position_in_grid]))); \
}\
kernel void FN_NAME_STRIDED( \
constant size_t &dim, \
@ -78,7 +83,7 @@ kernel void FN_NAME_STRIDED( \
if (thread_position_in_grid >= dim) { \
return; \
} \
output[thread_position_in_grid] = TYPENAME(FN(input[get_strided_index(thread_position_in_grid, num_dims, dims, strides)])); \
output[thread_position_in_grid] = TYPENAME(FN(float(input[get_strided_index(thread_position_in_grid, num_dims, dims, strides)]))); \
}
#define UNARY_OP(NAME) \
@ -102,6 +107,7 @@ UNARY_OP(floor)
UNARY_OP(round)
UNARY_OP(gelu_erf)
UNARY_OP(erf)
UNARY_OP(tanh)
UNARY(id, float, copy_float, copy_float_strided)
UNARY(id, half, copy_half, copy_half_strided)
UNARY(id, uint8_t, copy_u8, copy_u8_strided)
@ -121,6 +127,7 @@ BFLOAT_UNARY_OP(floor)
BFLOAT_UNARY_OP(round)
BFLOAT_UNARY_OP(gelu_erf)
BFLOAT_UNARY_OP(erf)
BFLOAT_UNARY_OP(tanh)
UNARY(id, bfloat, copy_bfloat, copy_bfloat_strided)
#endif

View File

@ -19,6 +19,7 @@ num-traits = { workspace = true }
rayon = { workspace = true }
safetensors = { workspace = true }
serde = { workspace = true }
metal = { workspace = true, optional = true }
candle-metal-kernels = { path = "../candle-metal-kernels", version = "0.3.0", optional = true }
[dev-dependencies]
@ -30,4 +31,4 @@ default = []
accelerate = ["dep:accelerate-src", "candle/accelerate"]
cuda = ["candle/cuda"]
mkl = ["dep:intel-mkl-src", "candle/mkl"]
metal = ["candle/metal", "dep:candle-metal-kernels"]
metal = ["candle/metal", "dep:candle-metal-kernels", "dep:metal"]

View File

@ -226,7 +226,7 @@ impl candle::CustomOp1 for SoftmaxLastDim {
let last_dim = layout.dims()[layout.shape().rank() - 1];
let elem_count = layout.shape().elem_count();
let mut output = device.new_buffer(elem_count, storage.dtype());
let mut output = device.new_buffer(elem_count, storage.dtype(), "softmax");
candle_metal_kernels::call_last_softmax(
device.metal_device(),
&command_buffer,
@ -238,6 +238,8 @@ impl candle::CustomOp1 for SoftmaxLastDim {
&mut output,
)
.unwrap();
command_buffer.commit();
output.did_modify_range(metal::NSRange::new(0, output.length()));
let newstorage = candle::MetalStorage::new(output, device.clone(), storage.dtype());
Ok((newstorage, layout.shape().clone()))
}

View File

@ -31,3 +31,4 @@ accelerate = ["dep:accelerate-src", "candle/accelerate", "candle-nn/accelerate"]
cuda = ["candle/cuda", "candle-nn/cuda"]
flash-attn = ["cuda", "dep:candle-flash-attn"]
mkl = ["dep:intel-mkl-src", "candle/mkl", "candle-nn/mkl"]
metal = ["candle/metal", "candle-nn/metal"]

View File

@ -142,10 +142,10 @@ impl RotaryEmbedding {
.to_dtype(DType::F32)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
})
let sin = freqs.sin()?;
let cos = freqs.cos()?;
// todo!("{}", sin);
Ok(Self { sin, cos })
}
fn apply_rotary_emb_qkv(
@ -273,6 +273,10 @@ impl MHA {
}
fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> {
// let view = xs.to_string();
// if view.contains("NaN") {
// panic!("NaN");
// }
let _enter = self.span.enter();
let (b_size, seq_len, _n_embd) = xs.dims3()?;
let qkv = self
@ -408,3 +412,38 @@ impl MixFormerSequentialForCausalLM {
self.blocks.iter_mut().for_each(|b| b.clear_kv_cache())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_rotary() {
let dev = Device::new_metal(0).unwrap();
for i in 0..10000 {
let dim = 8;
let max_seq_len = 12;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / 10000f32.powf(i as f32 / dim as f32))
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), &dev).unwrap();
let t = Tensor::arange(0u32, max_seq_len as u32, &dev)
.unwrap()
.to_dtype(DType::F32)
.unwrap()
.reshape((max_seq_len, 1))
.unwrap();
let x: f32 = t.i((1, 0)).unwrap().to_scalar().unwrap();
assert_eq!(x, 1.0);
let x: f32 = inv_freq.i((0, 1)).unwrap().to_scalar().unwrap();
assert_eq!(x, 0.1);
let freqs = t.matmul(&inv_freq).unwrap();
let x: f32 = freqs.i((1, 1)).unwrap().to_scalar().unwrap();
assert_eq!(x, 0.1);
let sin = freqs.sin().unwrap().contiguous().unwrap();
let x: f32 = sin.i((1, 1)).unwrap().to_scalar().unwrap();
assert_eq!(x, 0.099833414);
}
}
}