Compare commits

..

2 Commits

Author SHA1 Message Date
ce0783d9ff Stash for debugging 2023-12-10 13:11:53 +01:00
35352e441a Begin adding mfa support 2023-12-08 21:51:49 +01:00
16 changed files with 672 additions and 1307 deletions

View File

@ -61,7 +61,7 @@ tracing-subscriber = "0.3.7"
wav = "1.0.0" wav = "1.0.0"
yoke = { version = "0.7.2", features = ["derive"] } yoke = { version = "0.7.2", features = ["derive"] }
zip = { version = "0.6.6", default-features = false } zip = { version = "0.6.6", default-features = false }
metal = { version = "0.27.0", features = ["mps"], package = "candle-metal" } metal = { version = "0.27.1", features = ["mps"], package="candle-metal" }
[profile.release-with-debug] [profile.release-with-debug]
inherits = "release" inherits = "release"

View File

@ -4,7 +4,9 @@ use crate::op::{BinaryOpT, CmpOp, ReduceOp, UnaryOpT};
use crate::{CpuStorage, DType, Layout, Result, Shape}; use crate::{CpuStorage, DType, Layout, Result, Shape};
use candle_metal_kernels; use candle_metal_kernels;
use candle_metal_kernels::Kernels; use candle_metal_kernels::Kernels;
use half::f16;
use metal; use metal;
use metal::mps::matrix::{Matrix, MatrixDescriptor, MatrixMultiplication};
use metal::{Buffer, CommandBuffer, CommandQueue, MTLResourceOptions, NSUInteger}; use metal::{Buffer, CommandBuffer, CommandQueue, MTLResourceOptions, NSUInteger};
use std::collections::HashMap; use std::collections::HashMap;
use std::path::Path; use std::path::Path;
@ -36,9 +38,7 @@ impl From<String> for MetalError {
pub struct MetalDevice { pub struct MetalDevice {
device: metal::Device, device: metal::Device,
command_queue: metal::CommandQueue, command_queue: metal::CommandQueue,
command_buffers: Arc<RwLock<Vec<metal::CommandBuffer>>>, command_buffer: Arc<RwLock<metal::CommandBuffer>>,
command_buffer_index: Arc<RwLock<usize>>,
fence: metal::Fence,
kernels: Arc<candle_metal_kernels::Kernels>, kernels: Arc<candle_metal_kernels::Kernels>,
buffers: Arc<RwLock<HashMap<(NSUInteger, MTLResourceOptions), Vec<Arc<Buffer>>>>>, buffers: Arc<RwLock<HashMap<(NSUInteger, MTLResourceOptions), Vec<Arc<Buffer>>>>>,
} }
@ -70,34 +70,38 @@ impl MetalDevice {
&self.command_queue &self.command_queue
} }
pub fn command_buffer(&self) -> CommandBuffer { pub fn command_buffer(&self) -> std::sync::RwLockReadGuard<CommandBuffer> {
let mut command_buffers = self.command_buffers.try_write().unwrap(); self.command_buffer.try_read().unwrap()
let mut command_buffer = command_buffers[0].to_owned();
let mut index = self.command_buffer_index.try_write().unwrap();
if *index > 20 {
command_buffer.commit();
command_buffer = self.command_queue.new_command_buffer().to_owned();
*command_buffers = vec![command_buffer.clone()];
*index = 0;
}
*index += 1;
command_buffer
} }
pub fn wait_until_completed(&self) { pub fn commit(&self) {
let mut command_buffers = self.command_buffers.try_write().unwrap(); let mut old = self.command_buffer.try_write().unwrap();
let command_buffer = &command_buffers[0]; match old.status() {
match command_buffer.status() { metal::MTLCommandBufferStatus::NotEnqueued
metal::MTLCommandBufferStatus::Committed | metal::MTLCommandBufferStatus::Enqueued => {
| metal::MTLCommandBufferStatus::Scheduled old.commit();
| metal::MTLCommandBufferStatus::Completed => { let command_buffer = self.command_queue.new_command_buffer().to_owned();
panic!("Already committed"); *old = command_buffer;
} }
_ => {} _ => {}
} }
command_buffer.commit(); }
command_buffer.wait_until_completed();
*command_buffers = vec![self.command_queue.new_command_buffer().to_owned()]; pub fn wait_until_completed(&self) {
let mut old = self.command_buffer.try_write().unwrap();
match old.status() {
metal::MTLCommandBufferStatus::NotEnqueued
| metal::MTLCommandBufferStatus::Enqueued => {
old.commit();
old.wait_until_completed();
}
metal::MTLCommandBufferStatus::Committed | metal::MTLCommandBufferStatus::Scheduled => {
old.wait_until_completed();
}
_ => {}
}
let command_buffer = self.command_queue.new_command_buffer().to_owned();
*old = command_buffer;
} }
pub fn kernels(&self) -> &Kernels { pub fn kernels(&self) -> &Kernels {
@ -108,17 +112,12 @@ impl MetalDevice {
&self.device &self.device
} }
pub fn new_buffer(&self, element_count: usize, dtype: DType, name: &str) -> Arc<Buffer> { pub fn new_buffer(&self, element_count: usize, dtype: DType) -> Arc<Buffer> {
let size = (element_count * dtype.size_in_bytes()) as NSUInteger; let size = (element_count * dtype.size_in_bytes()) as NSUInteger;
self._new_buffer(size, MTLResourceOptions::StorageModePrivate, name) self._new_buffer(size, MTLResourceOptions::StorageModePrivate)
} }
fn _new_buffer( fn _new_buffer(&self, size: NSUInteger, option: MTLResourceOptions) -> Arc<Buffer> {
&self,
size: NSUInteger,
option: MTLResourceOptions,
_name: &str,
) -> Arc<Buffer> {
let mut buffers = self.buffers.try_write().unwrap(); let mut buffers = self.buffers.try_write().unwrap();
let subbuffers = buffers.entry((size, option)).or_insert(vec![]); let subbuffers = buffers.entry((size, option)).or_insert(vec![]);
@ -130,20 +129,11 @@ impl MetalDevice {
let new_buffer = self.device.new_buffer(size as NSUInteger, option); let new_buffer = self.device.new_buffer(size as NSUInteger, option);
let new_buffer = Arc::new(new_buffer); let new_buffer = Arc::new(new_buffer);
subbuffers.push(new_buffer.clone()); subbuffers.push(new_buffer.clone());
for subbuffers in buffers.values_mut() {
let newbuffers = subbuffers
.iter()
.filter(|s| Arc::strong_count(s) > 1)
.map(|s| Arc::clone(s))
.collect();
*subbuffers = newbuffers;
}
new_buffer new_buffer
} }
pub fn new_buffer_managed(&self, size: NSUInteger) -> Arc<Buffer> { pub fn new_buffer_managed(&self, size: NSUInteger) -> Arc<Buffer> {
self._new_buffer(size, MTLResourceOptions::StorageModeManaged, "managed") self._new_buffer(size, MTLResourceOptions::StorageModeManaged)
} }
pub fn new_buffer_with_data<T>(&self, data: &[T]) -> Arc<Buffer> { pub fn new_buffer_with_data<T>(&self, data: &[T]) -> Arc<Buffer> {
@ -153,20 +143,13 @@ impl MetalDevice {
size, size,
metal::MTLResourceOptions::StorageModeManaged, metal::MTLResourceOptions::StorageModeManaged,
); );
let real = self._new_buffer( let real = self._new_buffer(size, metal::MTLResourceOptions::StorageModePrivate);
size, {
metal::MTLResourceOptions::StorageModePrivate, let command = self.command_buffer();
"with_data", let blit = command.new_blit_command_encoder();
); blit.copy_from_buffer(&tmp, 0, &real, 0, tmp.length());
let command_buffer = self.command_buffer(); blit.end_encoding();
command_buffer.set_label("with_data"); }
let blit = command_buffer.new_blit_command_encoder();
blit.wait_for_fence(&self.fence);
blit.set_label("with_data_blit");
blit.copy_from_buffer(&tmp, 0, &real, 0, tmp.length());
blit.update_fence(&self.fence);
blit.end_encoding();
// This is necessary, for mmaped safetensors // This is necessary, for mmaped safetensors
// Because of the unsafe slice cast we're doing. // Because of the unsafe slice cast we're doing.
// The slice might not live long enough for metal // The slice might not live long enough for metal
@ -178,6 +161,25 @@ impl MetalDevice {
real real
} }
pub fn new_matrix(
&self,
(b, m, n): (NSUInteger, NSUInteger, NSUInteger),
size: NSUInteger,
type_id: u32,
dtype: DType,
) -> Result<(Matrix, Arc<Buffer>)> {
let elem_count = (b * m * n) as usize;
let out_buffer = self.new_buffer(elem_count, dtype);
let result_descriptor =
MatrixDescriptor::init_multiple(m, n, b, n * size, m * n * size, type_id);
let result_matrix = Matrix::init_with_buffer_descriptor(&out_buffer, 0, &result_descriptor)
.ok_or_else(|| {
MetalError::from("Failed to create matrix multiplication kernel".to_string())
})?;
Ok((result_matrix, out_buffer))
}
pub fn capture<P: AsRef<Path>>(&self, path: P) -> Result<()> { pub fn capture<P: AsRef<Path>>(&self, path: P) -> Result<()> {
let capture = metal::CaptureManager::shared(); let capture = metal::CaptureManager::shared();
let descriptor = metal::CaptureDescriptor::new(); let descriptor = metal::CaptureDescriptor::new();
@ -195,6 +197,22 @@ impl MetalDevice {
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct MetalStorage { pub struct MetalStorage {
buffer: Arc<metal::Buffer>, buffer: Arc<metal::Buffer>,
matrices: Arc<
RwLock<
HashMap<
(
NSUInteger,
NSUInteger,
NSUInteger,
bool,
NSUInteger,
NSUInteger,
u32,
),
Matrix,
>,
>,
>,
device: MetalDevice, device: MetalDevice,
dtype: DType, dtype: DType,
} }
@ -223,27 +241,23 @@ impl BackendStorage for MetalStorage {
self.dtype self.dtype
); );
} }
let buffer = self.device.new_buffer_managed(self.buffer.length()); let buffer = self.device.new_buffer_managed(self.buffer.length());
{ let command_buffer = self.device.command_buffer();
let command_buffer = self.device.command_buffer(); let blit = command_buffer.new_blit_command_encoder();
command_buffer.set_label("to_cpu"); blit.copy_from_buffer(&self.buffer, 0, &buffer, 0, self.buffer.length());
let blit = command_buffer.new_blit_command_encoder(); blit.end_encoding();
blit.set_label("blit_to_cpu"); drop(command_buffer);
blit.wait_for_fence(&self.device.fence);
blit.copy_from_buffer(&self.buffer, 0, &buffer, 0, self.buffer.length());
blit.update_fence(&self.device.fence);
blit.end_encoding();
}
self.device.wait_until_completed(); self.device.wait_until_completed();
match self.dtype { match self.dtype {
DType::U8 => Ok(CpuStorage::U8(read_to_vec(&buffer, length / size))), DType::U8 => Ok(CpuStorage::U8(buffer.read_to_vec(length / size))),
DType::U32 => Ok(CpuStorage::U32(read_to_vec(&buffer, length / size))), DType::U32 => Ok(CpuStorage::U32(buffer.read_to_vec(length / size))),
DType::I64 => Ok(CpuStorage::I64(read_to_vec(&buffer, length / size))), DType::I64 => Ok(CpuStorage::I64(buffer.read_to_vec(length / size))),
DType::F16 => Ok(CpuStorage::F16(read_to_vec(&buffer, length / size))), DType::F16 => Ok(CpuStorage::F16(buffer.read_to_vec(length / size))),
DType::BF16 => Ok(CpuStorage::BF16(read_to_vec(&buffer, length / size))), DType::BF16 => Ok(CpuStorage::BF16(buffer.read_to_vec(length / size))),
DType::F32 => Ok(CpuStorage::F32(read_to_vec(&buffer, length / size))), DType::F32 => Ok(CpuStorage::F32(buffer.read_to_vec(length / size))),
DType::F64 => Ok(CpuStorage::F64(read_to_vec(&buffer, length / size))), DType::F64 => Ok(CpuStorage::F64(buffer.read_to_vec(length / size))),
} }
} }
@ -254,7 +268,7 @@ impl BackendStorage for MetalStorage {
let el = shape.elem_count(); let el = shape.elem_count();
let dtype = self.dtype; let dtype = self.dtype;
let buffer = device.new_buffer(el, self.dtype, "affine"); let buffer = device.new_buffer(el, self.dtype);
let command_buffer = self.device.command_buffer(); let command_buffer = self.device.command_buffer();
if layout.is_contiguous() && layout.start_offset() == 0 { if layout.is_contiguous() && layout.start_offset() == 0 {
let name = match self.dtype { let name = match self.dtype {
@ -298,102 +312,12 @@ impl BackendStorage for MetalStorage {
Ok(Self::new(buffer, device.clone(), dtype)) Ok(Self::new(buffer, device.clone(), dtype))
} }
fn powf(&self, layout: &Layout, pow: f64) -> Result<Self> { fn powf(&self, _: &Layout, _: f64) -> Result<Self> {
let device = self.device().clone(); crate::bail!("powf metal")
let shape = layout.shape();
let el = shape.elem_count();
let dtype = self.dtype;
let buffer = device.new_buffer(el, self.dtype, "powf");
let command_buffer = self.device.command_buffer();
if layout.is_contiguous() && layout.start_offset() == 0 {
let name = match self.dtype {
DType::F32 => "powf_float",
DType::F16 => "powf_half",
dtype => crate::bail!("Powf {dtype:?}"),
};
candle_metal_kernels::call_powf(
&device.device,
&command_buffer,
&device.kernels,
name,
el,
&self.buffer,
&buffer,
pow as f32,
)
.map_err(MetalError::from)?;
} else {
let name = match self.dtype {
DType::F32 => "powf_float_strided",
DType::F16 => "powf_half_strided",
dtype => crate::bail!("Powf {dtype:?}"),
};
candle_metal_kernels::call_powf_strided(
&device.device,
&command_buffer,
&device.kernels,
name,
layout.dims(),
&self.buffer,
layout.stride(),
layout.start_offset() * dtype.size_in_bytes(),
&buffer,
pow as f32,
)
.map_err(MetalError::from)?;
}
Ok(Self::new(buffer, device.clone(), dtype))
} }
fn elu(&self, layout: &Layout, alpha: f64) -> Result<Self> { fn elu(&self, _: &Layout, _: f64) -> Result<Self> {
let device = self.device().clone(); crate::bail!("elu metal")
let shape = layout.shape();
let el = shape.elem_count();
let dtype = self.dtype;
let buffer = device.new_buffer(el, self.dtype, "elu");
let command_buffer = self.device.command_buffer();
if layout.is_contiguous() && layout.start_offset() == 0 {
let name = match self.dtype {
DType::F32 => "elu_float",
DType::F16 => "elu_half",
dtype => crate::bail!("Powf {dtype:?}"),
};
candle_metal_kernels::call_elu(
&device.device,
&command_buffer,
&device.kernels,
name,
el,
&self.buffer,
&buffer,
alpha as f32,
)
.map_err(MetalError::from)?;
} else {
let name = match self.dtype {
DType::F32 => "elu_float_strided",
DType::F16 => "elu_half_strided",
dtype => crate::bail!("Powf {dtype:?}"),
};
candle_metal_kernels::call_elu_strided(
&device.device,
&command_buffer,
&device.kernels,
name,
layout.dims(),
&self.buffer,
layout.stride(),
layout.start_offset() * dtype.size_in_bytes(),
&buffer,
alpha as f32,
)
.map_err(MetalError::from)?;
}
Ok(Self::new(buffer, device.clone(), dtype))
} }
fn reduce_op(&self, op: ReduceOp, layout: &Layout, sum_dims: &[usize]) -> Result<Self> { fn reduce_op(&self, op: ReduceOp, layout: &Layout, sum_dims: &[usize]) -> Result<Self> {
@ -441,7 +365,7 @@ impl BackendStorage for MetalStorage {
if dtype == DType::U32 { if dtype == DType::U32 {
crate::bail!("Implement return index reduce op"); crate::bail!("Implement return index reduce op");
} }
let buffer = device.new_buffer(dst_el, dtype, "reduce"); let buffer = device.new_buffer(dst_el, dtype);
let command_buffer = self.device.command_buffer(); let command_buffer = self.device.command_buffer();
candle_metal_kernels::call_reduce_contiguous( candle_metal_kernels::call_reduce_contiguous(
&device.device, &device.device,
@ -467,9 +391,9 @@ impl BackendStorage for MetalStorage {
let device = self.device(); let device = self.device();
let shape = layout.shape(); let shape = layout.shape();
let el_count = shape.elem_count(); let el_count = shape.elem_count();
let buffer = device.new_buffer(el_count, dtype, "todtype"); let buffer = device.new_buffer(el_count, dtype);
let command_buffer = device.command_buffer(); let command_buffer = device.command_buffer();
if layout.is_contiguous() && layout.start_offset() == 0 { if layout.is_contiguous() {
let kernel_name = match (self.dtype, dtype) { let kernel_name = match (self.dtype, dtype) {
(DType::U32, DType::F32) => "cast_u32_f32", (DType::U32, DType::F32) => "cast_u32_f32",
(DType::U32, DType::U8) => "cast_u32_u8", (DType::U32, DType::U8) => "cast_u32_u8",
@ -511,7 +435,7 @@ impl BackendStorage for MetalStorage {
) )
.map_err(MetalError::from)?; .map_err(MetalError::from)?;
} }
command_buffer.set_label("to_dtype");
Ok(Self::new(buffer, device.clone(), dtype)) Ok(Self::new(buffer, device.clone(), dtype))
} }
@ -520,9 +444,8 @@ impl BackendStorage for MetalStorage {
let dtype = self.dtype; let dtype = self.dtype;
let shape = layout.shape(); let shape = layout.shape();
let el_count = shape.elem_count(); let el_count = shape.elem_count();
let buffer = device.new_buffer(el_count, dtype, B::KERNEL); let buffer = device.new_buffer(el_count, dtype);
let command_buffer = device.command_buffer(); let command_buffer = device.command_buffer();
command_buffer.set_label(B::KERNEL);
if layout.is_contiguous() && layout.start_offset() == 0 { if layout.is_contiguous() && layout.start_offset() == 0 {
use candle_metal_kernels::unary::contiguous; use candle_metal_kernels::unary::contiguous;
@ -540,7 +463,6 @@ impl BackendStorage for MetalStorage {
("uceil", DType::F32) => contiguous::ceil::FLOAT, ("uceil", DType::F32) => contiguous::ceil::FLOAT,
("ufloor", DType::F32) => contiguous::floor::FLOAT, ("ufloor", DType::F32) => contiguous::floor::FLOAT,
("uround", DType::F32) => contiguous::round::FLOAT, ("uround", DType::F32) => contiguous::round::FLOAT,
("utanh", DType::F32) => contiguous::tanh::FLOAT,
("ucos", DType::F16) => contiguous::cos::HALF, ("ucos", DType::F16) => contiguous::cos::HALF,
("usin", DType::F16) => contiguous::sin::HALF, ("usin", DType::F16) => contiguous::sin::HALF,
("usqr", DType::F16) => contiguous::sqr::HALF, ("usqr", DType::F16) => contiguous::sqr::HALF,
@ -554,7 +476,6 @@ impl BackendStorage for MetalStorage {
("uceil", DType::F16) => contiguous::ceil::HALF, ("uceil", DType::F16) => contiguous::ceil::HALF,
("ufloor", DType::F16) => contiguous::floor::HALF, ("ufloor", DType::F16) => contiguous::floor::HALF,
("uround", DType::F16) => contiguous::round::HALF, ("uround", DType::F16) => contiguous::round::HALF,
("utanh", DType::F16) => contiguous::tanh::HALF,
(name, dtype) => crate::bail!("Match {name} - {dtype:?}"), (name, dtype) => crate::bail!("Match {name} - {dtype:?}"),
}; };
candle_metal_kernels::call_unary_contiguous( candle_metal_kernels::call_unary_contiguous(
@ -612,6 +533,9 @@ impl BackendStorage for MetalStorage {
) )
.map_err(MetalError::from)?; .map_err(MetalError::from)?;
} }
command_buffer.set_label("unary");
drop(command_buffer);
self.device.commit();
Ok(Self::new(buffer, device.clone(), dtype)) Ok(Self::new(buffer, device.clone(), dtype))
} }
@ -625,23 +549,30 @@ impl BackendStorage for MetalStorage {
let dtype = self.dtype; let dtype = self.dtype;
let shape = lhs_l.shape(); let shape = lhs_l.shape();
let el_count = shape.elem_count(); let el_count = shape.elem_count();
let buffer = device.new_buffer(el_count, dtype, B::KERNEL); let buffer = device.new_buffer(el_count, dtype);
let command_buffer = device.command_buffer(); let command_buffer = device.command_buffer();
if (lhs_l.is_contiguous() && lhs_l.start_offset() == 0) if (lhs_l.is_contiguous() && lhs_l.start_offset() == 0)
&& (rhs_l.is_contiguous() && rhs_l.start_offset() == 0) && (rhs_l.is_contiguous() && rhs_l.start_offset() == 0)
&& &B::KERNEL[..1] != "b"
{ {
use candle_metal_kernels::binary::contiguous; use candle_metal_kernels::binary::contiguous;
let kernel_name = match (B::KERNEL, dtype) { let kernel_name = match (B::KERNEL, dtype) {
("add", DType::F32) => contiguous::add::FLOAT, ("add", DType::F32) => contiguous::add::FLOAT,
("badd", DType::F32) => contiguous::add::FLOAT,
("sub", DType::F32) => contiguous::sub::FLOAT, ("sub", DType::F32) => contiguous::sub::FLOAT,
("bsub", DType::F32) => contiguous::sub::FLOAT,
("mul", DType::F32) => contiguous::mul::FLOAT, ("mul", DType::F32) => contiguous::mul::FLOAT,
("bmul", DType::F32) => contiguous::mul::FLOAT,
("div", DType::F32) => contiguous::div::FLOAT, ("div", DType::F32) => contiguous::div::FLOAT,
("bdiv", DType::F32) => contiguous::div::FLOAT,
("add", DType::F16) => contiguous::add::HALF, ("add", DType::F16) => contiguous::add::HALF,
("badd", DType::F16) => contiguous::add::HALF,
("sub", DType::F16) => contiguous::sub::HALF, ("sub", DType::F16) => contiguous::sub::HALF,
("bsub", DType::F16) => contiguous::sub::HALF,
("mul", DType::F16) => contiguous::mul::HALF, ("mul", DType::F16) => contiguous::mul::HALF,
("bmul", DType::F16) => contiguous::mul::HALF,
("div", DType::F16) => contiguous::div::HALF, ("div", DType::F16) => contiguous::div::HALF,
("bdiv", DType::F16) => contiguous::div::HALF,
(name, dtype) => crate::bail!("Match {name} - {dtype:?}"), (name, dtype) => crate::bail!("Match {name} - {dtype:?}"),
}; };
candle_metal_kernels::call_binary_contiguous( candle_metal_kernels::call_binary_contiguous(
@ -686,6 +617,8 @@ impl BackendStorage for MetalStorage {
.map_err(MetalError::from)?; .map_err(MetalError::from)?;
} }
command_buffer.set_label("binary"); command_buffer.set_label("binary");
drop(command_buffer);
self.device.commit();
Ok(Self::new(buffer, device.clone(), dtype)) Ok(Self::new(buffer, device.clone(), dtype))
} }
@ -702,7 +635,7 @@ impl BackendStorage for MetalStorage {
let dims = shape.dims(); let dims = shape.dims();
let el = shape.elem_count(); let el = shape.elem_count();
let dtype = t.dtype; let dtype = t.dtype;
let buffer = self.device.new_buffer(el, dtype, "where"); let buffer = self.device.new_buffer(el, dtype);
let command_buffer = self.device.command_buffer(); let command_buffer = self.device.command_buffer();
if t.dtype() != f.dtype() { if t.dtype() != f.dtype() {
crate::bail!("Invalid ternary different dtypes for values"); crate::bail!("Invalid ternary different dtypes for values");
@ -819,7 +752,7 @@ impl BackendStorage for MetalStorage {
let dst_el = ids_el * left_size * right_size; let dst_el = ids_el * left_size * right_size;
let dtype = self.dtype; let dtype = self.dtype;
let device = self.device(); let device = self.device();
let buffer = device.new_buffer(dst_el, dtype, "index_select"); let buffer = device.new_buffer(dst_el, dtype);
let name = match (ids.dtype, self.dtype) { let name = match (ids.dtype, self.dtype) {
(DType::U32, DType::F32) => "is_u32_f32", (DType::U32, DType::F32) => "is_u32_f32",
(DType::U32, DType::F16) => "is_u32_f16", (DType::U32, DType::F16) => "is_u32_f16",
@ -853,6 +786,7 @@ impl BackendStorage for MetalStorage {
) -> Result<Self> { ) -> Result<Self> {
crate::bail!("index_add metal") crate::bail!("index_add metal")
} }
fn matmul( fn matmul(
&self, &self,
rhs: &Self, rhs: &Self,
@ -860,33 +794,81 @@ impl BackendStorage for MetalStorage {
lhs_l: &Layout, lhs_l: &Layout,
rhs_l: &Layout, rhs_l: &Layout,
) -> Result<Self> { ) -> Result<Self> {
let buffer = self.device.new_buffer(b * m * n, self.dtype, "matmul"); // Create descriptors
let name = match self.dtype { let (type_id, size, name) = match self.dtype {
DType::F32 => "sgemm", DType::F32 => (
DType::F16 => "hgemm", metal::mps::MPS_FLOATBIT_ENCODING | 32,
dtype => { core::mem::size_of::<f32>() as NSUInteger,
return Err(MetalError::Message(format!("matmul doesn't support {dtype:?}")).into()) "sgemm",
} ),
DType::F16 => (
metal::mps::MPS_FLOATBIT_ENCODING | 16,
core::mem::size_of::<f16>() as NSUInteger,
"hgemm",
),
dtype => todo!("Dtype for matmul {dtype:?} is not supported"),
}; };
let lhs_stride = lhs_l.stride();
let rhs_stride = rhs_l.stride();
let rhs_m1 = rhs_stride[rhs_stride.len() - 1];
let rhs_m2 = rhs_stride[rhs_stride.len() - 2];
let lhs_m1 = lhs_stride[lhs_stride.len() - 1];
let lhs_m2 = lhs_stride[lhs_stride.len() - 2];
// The a tensor has dims batching, k, n (rhs)
let transpose_left = if lhs_m1 == 1 && lhs_m2 == k {
false
} else if lhs_m1 == m && lhs_m2 == 1 {
true
} else {
Err(MetalError::MatMulNonContiguous {
lhs_stride: lhs_stride.to_vec(),
rhs_stride: rhs_stride.to_vec(),
mnk: (m, n, k),
})?
};
let transpose_right = if rhs_m1 == 1 && rhs_m2 == n {
false
} else if rhs_m1 == k && rhs_m2 == 1 {
true
} else {
Err(MetalError::MatMulNonContiguous {
lhs_stride: lhs_stride.to_vec(),
rhs_stride: rhs_stride.to_vec(),
mnk: (m, n, k),
})?
};
let result_buffer = self.device.new_buffer(b * m * n, self.dtype);
let command_buffer = self.device.command_buffer(); let command_buffer = self.device.command_buffer();
command_buffer.set_label("matmul");
candle_metal_kernels::call_gemm( command_buffer.set_label("mfa gemm");
candle_metal_kernels::call_mfa_gemm(
&self.device.device, &self.device.device,
&command_buffer, &command_buffer,
&self.device.kernels, &self.device.kernels,
name, name,
(b, m, n, k),
&lhs_l.stride(),
lhs_l.start_offset() * self.dtype.size_in_bytes(),
&self.buffer, &self.buffer,
&rhs_l.stride(), lhs_l.shape().dims(),
rhs_l.start_offset() * rhs.dtype.size_in_bytes(),
&rhs.buffer, &rhs.buffer,
&buffer, rhs_l.shape().dims(),
&result_buffer,
(b, m, n, k),
transpose_left,
transpose_right,
) )
.map_err(MetalError::from)?; .map_err(MetalError::from)?;
Ok(Self::new(buffer, self.device.clone(), self.dtype()))
drop(command_buffer);
self.device.commit();
Ok(Self::new(
self.buffer.clone(),
self.device.clone(),
self.dtype(),
))
} }
fn copy_strided_src(&self, dst: &mut Self, dst_offset: usize, src_l: &Layout) -> Result<()> { fn copy_strided_src(&self, dst: &mut Self, dst_offset: usize, src_l: &Layout) -> Result<()> {
@ -894,11 +876,15 @@ impl BackendStorage for MetalStorage {
if src_l.is_contiguous() && self.dtype == dst.dtype() { if src_l.is_contiguous() && self.dtype == dst.dtype() {
command_buffer.set_label("copy_contiguous"); command_buffer.set_label("copy_contiguous");
let blit = command_buffer.new_blit_command_encoder(); let blit = command_buffer.new_blit_command_encoder();
blit.set_label("copy_contiguous");
let src_offset = (src_l.start_offset() * self.dtype.size_in_bytes()) as NSUInteger; let src_offset = (src_l.start_offset() * self.dtype.size_in_bytes()) as NSUInteger;
let length = (src_l.shape().elem_count() * self.dtype.size_in_bytes()) as NSUInteger;
let dst_offset = (dst_offset * dst.dtype().size_in_bytes()) as NSUInteger; let dst_offset = (dst_offset * dst.dtype().size_in_bytes()) as NSUInteger;
blit.copy_from_buffer(&self.buffer, src_offset, dst.buffer(), dst_offset, length); blit.copy_from_buffer(
&self.buffer,
src_offset,
dst.buffer(),
dst_offset,
self.buffer.length() - src_offset,
);
blit.end_encoding(); blit.end_encoding();
} else { } else {
let src_shape = src_l.shape(); let src_shape = src_l.shape();
@ -929,22 +915,54 @@ impl BackendStorage for MetalStorage {
.map_err(MetalError::from)?; .map_err(MetalError::from)?;
command_buffer.set_label("copy_strided"); command_buffer.set_label("copy_strided");
} }
drop(command_buffer);
self.device.commit();
Ok(()) Ok(())
} }
} }
impl MetalStorage { impl MetalStorage {
pub fn new(buffer: Arc<Buffer>, device: MetalDevice, dtype: DType) -> Self { pub fn new(buffer: Arc<Buffer>, device: MetalDevice, dtype: DType) -> Self {
let matrices = Arc::new(RwLock::new(HashMap::new()));
Self { Self {
buffer, buffer,
device, device,
dtype, dtype,
matrices,
} }
} }
pub fn buffer(&self) -> &Buffer { pub fn buffer(&self) -> &Buffer {
&self.buffer &self.buffer
} }
fn matrix(
&self,
(b, m, n): (NSUInteger, NSUInteger, NSUInteger),
transpose: bool,
size: NSUInteger,
offset: NSUInteger,
type_id: u32,
) -> Result<Matrix> {
let key = (b, m, n, transpose, size, offset, type_id);
let mut matrices = self.matrices.try_write().unwrap();
if let Some(matrix) = matrices.get(&key) {
Ok(matrix.clone())
} else {
let descriptor = if transpose {
MatrixDescriptor::init_multiple(n, m, b, m * size, m * n * size, type_id)
} else {
MatrixDescriptor::init_multiple(m, n, b, n * size, m * n * size, type_id)
};
let matrix = Matrix::init_with_buffer_descriptor(&self.buffer, offset, &descriptor)
.ok_or_else(|| {
MetalError::from("Failed to create matrix multiplication kernel".to_string())
})?;
matrices.insert(key, matrix.clone());
Ok(matrix)
}
}
} }
impl BackendDevice for MetalDevice { impl BackendDevice for MetalDevice {
@ -953,28 +971,14 @@ impl BackendDevice for MetalDevice {
fn new(ordinal: usize) -> Result<Self> { fn new(ordinal: usize) -> Result<Self> {
let device = metal::Device::all().swap_remove(ordinal); let device = metal::Device::all().swap_remove(ordinal);
let n = 1;
let command_queue = device.new_command_queue(); let command_queue = device.new_command_queue();
let command_buffer = Arc::new(RwLock::new(command_queue.new_command_buffer().to_owned()));
let command_buffers = (0..n) let kernels = Arc::new(Kernels::new());
.map(|i| {
let command_buffer = command_queue.new_command_buffer().to_owned();
command_buffer.enqueue();
command_buffer.set_label(&format!("num {i}"));
command_buffer
})
.collect();
let command_buffers = Arc::new(RwLock::new(command_buffers));
let command_buffer_index = Arc::new(RwLock::new(0));
let fence = device.new_fence();
let kernels = Arc::new(Kernels::new(fence.clone()));
let buffers = Arc::new(RwLock::new(HashMap::new())); let buffers = Arc::new(RwLock::new(HashMap::new()));
Ok(Self { Ok(Self {
device, device,
fence,
command_queue, command_queue,
command_buffers, command_buffer,
command_buffer_index,
buffers, buffers,
kernels, kernels,
}) })
@ -995,21 +999,7 @@ impl BackendDevice for MetalDevice {
} }
fn zeros_impl(&self, shape: &Shape, dtype: DType) -> Result<MetalStorage> { fn zeros_impl(&self, shape: &Shape, dtype: DType) -> Result<MetalStorage> {
let buffer = self.new_buffer(shape.elem_count(), dtype, "zeros"); let buffer = self.new_buffer(shape.elem_count(), dtype);
let command_buffer = self.command_buffer();
command_buffer.set_label("zeros");
let blit = command_buffer.new_blit_command_encoder();
blit.wait_for_fence(&self.fence);
blit.fill_buffer(
&buffer,
metal::NSRange {
location: 0,
length: buffer.length(),
},
0,
);
blit.update_fence(&self.fence);
blit.end_encoding();
Ok(MetalStorage::new(buffer, self.clone(), dtype)) Ok(MetalStorage::new(buffer, self.clone(), dtype))
} }
@ -1060,10 +1050,3 @@ impl BackendDevice for MetalDevice {
self.storage_from_cpu_storage(&cpu_storage) self.storage_from_cpu_storage(&cpu_storage)
} }
} }
fn read_to_vec<T: Clone>(buffer: &Buffer, n: usize) -> Vec<T> {
let ptr = buffer.contents() as *const T;
assert!(!ptr.is_null());
let slice = unsafe { std::slice::from_raw_parts(ptr, n) };
slice.to_vec()
}

View File

@ -1864,7 +1864,7 @@ impl Tensor {
} }
(Storage::Cuda(storage), Device::Cpu) => Storage::Cpu(storage.to_cpu_storage()?), (Storage::Cuda(storage), Device::Cpu) => Storage::Cpu(storage.to_cpu_storage()?),
(Storage::Metal(storage), Device::Cpu) => { (Storage::Metal(storage), Device::Cpu) => {
// println!("{storage:?} - {:?}", storage.to_cpu_storage()?); println!("{storage:?} - {:?}", storage.to_cpu_storage()?);
Storage::Cpu(storage.to_cpu_storage()?) Storage::Cpu(storage.to_cpu_storage()?)
} }
(Storage::Cuda(storage), Device::Cuda(cuda)) => { (Storage::Cuda(storage), Device::Cuda(cuda)) => {

View File

@ -900,9 +900,7 @@ fn matmul(device: &Device) -> Result<()> {
let b = Tensor::from_slice(&data, (2, 2), device)?; let b = Tensor::from_slice(&data, (2, 2), device)?;
let c = a.matmul(&b)?; let c = a.matmul(&b)?;
let d = a.matmul(&c)?;
assert_eq!(c.to_vec2::<f32>()?, &[[7.0f32, 10.0], [15.0, 22.0]]); assert_eq!(c.to_vec2::<f32>()?, &[[7.0f32, 10.0], [15.0, 22.0]]);
assert_eq!(d.to_vec2::<f32>()?, &[[37.0, 54.0], [81.0, 118.0]]);
let data = vec![1.0f32, 2.0]; let data = vec![1.0f32, 2.0];
let a = Tensor::from_slice(&data, (2, 1), device)?; let a = Tensor::from_slice(&data, (2, 1), device)?;

View File

@ -10,7 +10,8 @@ categories = ["science"]
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
[dependencies] [dependencies]
metal = { version = "0.27.0", features = ["mps"], package="candle-metal" } metal = { version = "0.27.1", features = ["mps"], package="candle-metal" }
metal-flash-attention = { path = "../../../metal-flash-attention" }
once_cell = "1.18.0" once_cell = "1.18.0"
thiserror = "1" thiserror = "1"
tracing = "0.1.37" tracing = "0.1.37"

View File

@ -29,7 +29,9 @@ kernel void FN_NAME( \
if (id >= dim) { \ if (id >= dim) { \
return; \ return; \
} \ } \
output[id] = TYPENAME(float(input[id]) * mul + add); \ const TYPENAME m = TYPENAME(mul); \
const TYPENAME a = TYPENAME(add); \
output[id] = input[id] * m + a; \
} \ } \
kernel void FN_NAME##_strided( \ kernel void FN_NAME##_strided( \
constant size_t &dim, \ constant size_t &dim, \
@ -45,80 +47,15 @@ kernel void FN_NAME##_strided( \
if (id >= dim) { \ if (id >= dim) { \
return; \ return; \
} \ } \
output[id] = TYPENAME(float(input[get_strided_index(id, num_dims, dims, strides)]) * mul + add); \ const TYPENAME m = TYPENAME(mul); \
} const TYPENAME a = TYPENAME(add); \
output[id] = input[get_strided_index(id, num_dims, dims, strides)] * m + a; \
#define POWF(FN_NAME, TYPENAME) \
kernel void FN_NAME( \
constant size_t &dim, \
constant float &mul, \
device const TYPENAME *input, \
device TYPENAME *output, \
uint id [[ thread_position_in_grid ]] \
) { \
if (id >= dim) { \
return; \
} \
output[id] = TYPENAME(pow(input[id], TYPENAME(mul))); \
} \ } \
kernel void FN_NAME##_strided( \
constant size_t &dim, \
constant size_t &num_dims, \
constant size_t *dims, \
constant size_t *strides, \
constant float &mul, \
device const TYPENAME *input, \
device TYPENAME *output, \
uint id [[ thread_position_in_grid ]] \
) { \
if (id >= dim) { \
return; \
} \
output[id] = TYPENAME(pow(input[get_strided_index(id, num_dims, dims, strides)], TYPENAME(mul))); \
}
#define ELU(FN_NAME, TYPENAME) \
kernel void FN_NAME( \
constant size_t &dim, \
constant float &mul, \
device const TYPENAME *input, \
device TYPENAME *output, \
uint id [[ thread_position_in_grid ]] \
) { \
if (id >= dim) { \
return; \
} \
const TYPENAME x = input[id]; \
output[id] = TYPENAME((x > 0)?x: mul * exp(x - 1)); \
} \
kernel void FN_NAME##_strided( \
constant size_t &dim, \
constant size_t &num_dims, \
constant size_t *dims, \
constant size_t *strides, \
constant float &mul, \
device const TYPENAME *input, \
device TYPENAME *output, \
uint id [[ thread_position_in_grid ]] \
) { \
if (id >= dim) { \
return; \
} \
const TYPENAME x = input[get_strided_index(id, num_dims, dims, strides)]; \
output[id] = TYPENAME((x > 0)?x: mul * exp(x - 1)); \
} \
AFFINE(affine_float, float) AFFINE(affine_float, float)
AFFINE(affine_half, half) AFFINE(affine_half, half)
POWF(powf_float, float)
POWF(powf_half, half)
ELU(elu_float, float)
ELU(elu_half, half)
#if __METAL_VERSION__ >= 310 #if __METAL_VERSION__ >= 310
AFFINE(affine_bfloat, bfloat); AFFINE(affine_bfloat, bfloat);
POWF(powf_bfloat, bfloat);
ELU(elu_bfloat, bfloat);
#endif #endif

File diff suppressed because it is too large Load Diff

View File

@ -18,7 +18,7 @@ METAL_FUNC uint get_strided_index(
return strided_i; return strided_i;
} }
constant int THREADGROUP_SIZE = 2048; constant int THREADGROUP_SIZE = 1024;
# define REDUCE(FN, NAME, T) \ # define REDUCE(FN, NAME, T) \
kernel void NAME( \ kernel void NAME( \
@ -32,7 +32,7 @@ kernel void NAME( \
uint block_dim [[ threads_per_threadgroup ]] \ uint block_dim [[ threads_per_threadgroup ]] \
) { \ ) { \
\ \
threadgroup T shared_memory[THREADGROUP_SIZE]; \ threadgroup float shared_memory[THREADGROUP_SIZE]; \
\ \
shared_memory[tid] = 0; \ shared_memory[tid] = 0; \
/* \ /* \
@ -93,13 +93,12 @@ kernel void NAME(
size_t stop_idx = min(start_idx + el_to_sum_per_block, src_numel); \ size_t stop_idx = min(start_idx + el_to_sum_per_block, src_numel); \
size_t idx = start_idx + tid; \ size_t idx = start_idx + tid; \
\ \
threadgroup_barrier(mem_flags::mem_threadgroup); \
\ \
float tmp = -INFINITY; \
while (idx < stop_idx) { \ while (idx < stop_idx) { \
tmp = MAX(tmp, float(src[idx])); \ shared_memory[tid] = MAX(shared_memory[tid], src[idx]); \
idx += block_dim; \ idx += block_dim; \
} \ } \
shared_memory[tid] = tmp; \
\ \
threadgroup_barrier(mem_flags::mem_threadgroup); \ threadgroup_barrier(mem_flags::mem_threadgroup); \
\ \
@ -107,34 +106,29 @@ kernel void NAME(
if (tid < s) { \ if (tid < s) { \
shared_memory[tid] = MAX(shared_memory[tid], shared_memory[tid + s]); \ shared_memory[tid] = MAX(shared_memory[tid], shared_memory[tid + s]); \
} \ } \
threadgroup_barrier(mem_flags::mem_threadgroup); \
} \ } \
\ \
/* wait for shared_memory[0] to be filled */ \
threadgroup_barrier(mem_flags::mem_threadgroup); \ threadgroup_barrier(mem_flags::mem_threadgroup); \
\ \
float _max = shared_memory[0]; \ float _max = shared_memory[0]; \
\ \
/* prevent tid=0 from overwriting _max before other threads have written it */ \
threadgroup_barrier(mem_flags::mem_threadgroup); \
shared_memory[tid] = 0; \ shared_memory[tid] = 0; \
\ \
idx = start_idx + tid; \ idx = start_idx + tid; \
while (idx < stop_idx) { \ while (idx < stop_idx) { \
const float val = exp(float(src[idx]) - _max); \ const T val = T(exp(src[idx] - _max)); \
dst[idx] = T(val); \ dst[idx] = val; \
shared_memory[tid] += val; \ shared_memory[tid] += val; \
idx += block_dim; \ idx += block_dim; \
} \ } \
threadgroup_barrier(mem_flags::mem_threadgroup); \
for (uint s = block_dim / 2; s > 0; s >>= 1) { \ for (uint s = block_dim / 2; s > 0; s >>= 1) { \
if (tid < s) { \ if (tid < s) { \
shared_memory[tid] += shared_memory[tid + s]; \ shared_memory[tid] += shared_memory[tid + s]; \
} \ } \
threadgroup_barrier(mem_flags::mem_threadgroup); \ threadgroup_barrier(mem_flags::mem_threadgroup); \
} \ } \
\ \
const T inv_acc = T(1.0/shared_memory[0]); \ const T inv_acc = T(1/shared_memory[0]); \
idx = start_idx + tid; \ idx = start_idx + tid; \
while (idx < stop_idx) { \ while (idx < stop_idx) { \
dst[idx] *= inv_acc; \ dst[idx] *= inv_acc; \

View File

@ -1,209 +0,0 @@
import Metal
import MetalPerformanceShadersGraph
let type = MTLDataType.float;
let dataType = type;
var B = 2;
var M = 2;
var N = 2;
var K = 2;
var A_trans = false;
var B_trans = false;
var D_trans = false;
var alpha = Float(1.0);
var beta = Float(0.0);
var batched = B > 1;
var fused_activation = false;
var fused_bias = false;
let constants = MTLFunctionConstantValues()
constants.setConstantValue(&M, type: .uint, index: 0)
constants.setConstantValue(&N, type: .uint, index: 1)
constants.setConstantValue(&K, type: .uint, index: 2)
constants.setConstantValue(&A_trans, type: .bool, index: 10)
constants.setConstantValue(&B_trans, type: .bool, index: 11)
constants.setConstantValue(&D_trans, type: .bool, index: 13)
constants.setConstantValue(&alpha, type: .float, index: 20)
constants.setConstantValue(&beta, type: .float, index: 21)
constants.setConstantValue(&batched, type: .bool, index: 100)
constants.setConstantValue(&fused_activation, type: .bool, index: 101)
constants.setConstantValue(&fused_bias, type: .bool, index: 50001)
var M_simd = UInt16(16)
var N_simd = UInt16(16)
var K_simd = UInt16(32)
var M_splits = UInt16(2)
var N_splits = UInt16(2)
constants.setConstantValue(&M_simd, type: .ushort, index: 200)
constants.setConstantValue(&N_simd, type: .ushort, index: 201)
constants.setConstantValue(&K_simd, type: .ushort, index: 202)
constants.setConstantValue(&M_splits, type: .ushort, index: 210)
constants.setConstantValue(&N_splits, type: .ushort, index: 211)
let M_group = M_simd * M_splits
let N_group = N_simd * N_splits
// Satisfy Metal API validation.
#if DEBUG
do {
var garbage: SIMD4<UInt64> = .zero
constants.setConstantValue(&garbage, type: .bool, index: 102)
constants.setConstantValue(&garbage, type: .bool, index: 103)
constants.setConstantValue(&garbage, type: .bool, index: 113)
constants.setConstantValue(&garbage, type: .bool, index: 50000)
}
#endif
let device = MTLCopyAllDevices().first!
device.shouldMaximizeConcurrentCompilation = true
var libraryURL = URL.init(string: "/Users/nicolas/src/candle/candle-metal-kernels/")!;
libraryURL.append(component: "src")
libraryURL.append(component: "libMetalFlashAttention.metallib")
let library = try! device.makeLibrary(URL: libraryURL)
var name: String
switch dataType {
case .half: name = "hgemm"
case .float: name = "sgemm"
default: fatalError()
}
let function = try! library.makeFunction(
name: name, constantValues: constants)
let A_block_length = M_group * K_simd
let B_block_length = K_simd * N_group
var blockElements = A_block_length + B_block_length;
if (M % 8 != 0) && (N % 8 != 0) {
let C_block_length = M_group * N_group;
blockElements = max(C_block_length, blockElements)
}
if fused_bias {
if D_trans {
blockElements = max(blockElements, M_group)
} else {
blockElements = max(blockElements, N_group)
}
}
// let blockBytes = blockElements * UInt16(dataType.size)
let elementSize = 4
let blockBytes = blockElements * UInt16(elementSize)
func ceilDivide(target: Int, granularity: UInt16) -> Int {
(target + Int(granularity) - 1) / Int(granularity)
}
var gridSize = MTLSize(
width: ceilDivide(target: N, granularity: N_group),
height: ceilDivide(target: M, granularity: M_group),
depth: 1)
let groupSize = MTLSize(
width: Int(32 * M_splits * N_splits),
height: 1,
depth: 1)
let commandQueue = device.makeCommandQueue()!
let threadgroupMemoryLength = blockBytes;
let rowsA = M;
let columnsA = K;
let rowsB = K;
let columnsB = N;
let rowsC = M;
let columnsC = N;
var arrayA = [Float](repeating: 0, count: B * rowsA * columnsA)
var arrayB = [Float](repeating: 0, count: B * rowsB * columnsB)
var arrayC = [Float](repeating: 0, count: B * rowsC * columnsC)
var arrayD = [Float](repeating: 0, count: B * rowsC * columnsC)
for i in 0..<arrayA.count {
arrayA[i] = Float(i)
}
for i in 0..<arrayB.count {
arrayB[i] = Float(i)
}
let bufferA = device.makeBuffer(bytes: arrayA, length: B * rowsA * columnsA * MemoryLayout<Float>.stride, options: [])!
let bufferB = device.makeBuffer(bytes: arrayB, length: B * rowsB * columnsB * MemoryLayout<Float>.stride, options: [])!
let bufferC = device.makeBuffer(length: B * rowsC * columnsC * MemoryLayout<Float>.stride, options: [])!
let bufferD = device.makeBuffer(length: B * rowsC * columnsC * MemoryLayout<Float>.stride, options: [])!
let pipeline = try device.makeComputePipelineState(function: function)
func call(bufferA: MTLBuffer, bufferB: MTLBuffer, bufferC: MTLBuffer){
let encoder = commandBuffer.makeComputeCommandEncoder(dispatchType: MTLDispatchType.serial)!
encoder.setComputePipelineState(pipeline)
encoder.setThreadgroupMemoryLength(Int(threadgroupMemoryLength), index: 0)
encoder.setBuffer(bufferA, offset: 0, index: 0)
encoder.setBuffer(bufferB, offset: 0, index: 1)
encoder.setBuffer(bufferC, offset: 0, index: 2)
let gridZ: Int = B
if batched{
func byteStride(shape: [Int]) -> Int {
let rank = shape.count
var output = elementSize * shape[rank - 2] * shape[rank - 1]
if shape.dropLast(2).reduce(1, *) == 1 {
output = 0
}
return output
}
let byteStrideA = M*K*elementSize
let byteStrideB = N*K*elementSize
let byteStrideC = M*N*elementSize
let byteStrideD = 0
withUnsafeTemporaryAllocation(
of: SIMD4<UInt64>.self, capacity: gridZ
) { buffer in
for i in 0..<buffer.count {
buffer[i] = SIMD4(
UInt64(truncatingIfNeeded: i * byteStrideA),
UInt64(truncatingIfNeeded: i * byteStrideB),
UInt64(truncatingIfNeeded: i * byteStrideC),
UInt64(truncatingIfNeeded: i * byteStrideD))
}
let bufferLength = buffer.count * MemoryLayout<SIMD4<UInt64>>.stride
assert(MemoryLayout<SIMD4<UInt64>>.stride == 8 * 4)
encoder.setBytes(buffer.baseAddress!, length: bufferLength, index: 10)
}
}
gridSize.depth = gridZ
encoder.dispatchThreadgroups(
gridSize, threadsPerThreadgroup: groupSize
)
encoder.endEncoding()
}
var commandBuffer = commandQueue.makeCommandBuffer()!
call(bufferA:bufferA, bufferB:bufferB, bufferC:bufferC)
commandBuffer.commit()
commandBuffer = commandQueue.makeCommandBuffer()!
commandBuffer.encodeWaitForEvent(event, value: 2)
call(bufferA:bufferA, bufferB:bufferC, bufferC:bufferD)
commandBuffer.commit()
commandBuffer.waitUntilCompleted()
var contents = bufferC.contents();
var count = B * rowsA * columnsB;
var typedPointer = contents.bindMemory(to: Float.self, capacity: count)
var bufferedPointer = UnsafeBufferPointer(start: typedPointer, count: count)
print("First matmul is OK", Array(bufferedPointer))
contents = bufferD.contents();
count = B * rowsA * columnsB;
typedPointer = contents.bindMemory(to: Float.self, capacity: count)
bufferedPointer = UnsafeBufferPointer(start: typedPointer, count: count)
print("This should be filled", Array(bufferedPointer))

View File

@ -2,13 +2,6 @@ use super::*;
use half::{bf16, f16}; use half::{bf16, f16};
use metal::{CompileOptions, Device, MTLResourceOptions, MTLSize, NSUInteger}; use metal::{CompileOptions, Device, MTLResourceOptions, MTLSize, NSUInteger};
fn read_to_vec<T: Clone>(buffer: &Buffer, n: usize) -> Vec<T> {
let ptr = buffer.contents() as *const T;
assert!(!ptr.is_null());
let slice = unsafe { std::slice::from_raw_parts(ptr, n) };
slice.to_vec()
}
fn new_buffer<T>(device: &Device, data: &[T]) -> Buffer { fn new_buffer<T>(device: &Device, data: &[T]) -> Buffer {
let options = MTLResourceOptions::StorageModeManaged; let options = MTLResourceOptions::StorageModeManaged;
let ptr = data.as_ptr() as *const core::ffi::c_void; let ptr = data.as_ptr() as *const core::ffi::c_void;
@ -37,8 +30,7 @@ fn approx_bf16(v: Vec<bf16>, digits: i32) -> Vec<f32> {
fn run<T: Clone>(v: &[T], name: unary::contiguous::Kernel) -> Vec<T> { fn run<T: Clone>(v: &[T], name: unary::contiguous::Kernel) -> Vec<T> {
let device = device(); let device = device();
let fence = device.new_fence(); let kernels = Kernels::new();
let kernels = Kernels::new(fence);
let command_queue = device.new_command_queue(); let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer(); let command_buffer = command_queue.new_command_buffer();
let input = new_buffer(&device, v); let input = new_buffer(&device, v);
@ -55,13 +47,12 @@ fn run<T: Clone>(v: &[T], name: unary::contiguous::Kernel) -> Vec<T> {
.unwrap(); .unwrap();
command_buffer.commit(); command_buffer.commit();
command_buffer.wait_until_completed(); command_buffer.wait_until_completed();
read_to_vec(&output, v.len()) output.read_to_vec::<T>(v.len())
} }
fn run_binary<T: Clone>(x: &[T], y: &[T], name: binary::contiguous::Kernel) -> Vec<T> { fn run_binary<T: Clone>(x: &[T], y: &[T], name: binary::contiguous::Kernel) -> Vec<T> {
let device = device(); let device = device();
let fence = device.new_fence(); let kernels = Kernels::new();
let kernels = Kernels::new(fence);
let command_queue = device.new_command_queue(); let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer(); let command_buffer = command_queue.new_command_buffer();
let options = MTLResourceOptions::StorageModeManaged; let options = MTLResourceOptions::StorageModeManaged;
@ -81,7 +72,7 @@ fn run_binary<T: Clone>(x: &[T], y: &[T], name: binary::contiguous::Kernel) -> V
.unwrap(); .unwrap();
command_buffer.commit(); command_buffer.commit();
command_buffer.wait_until_completed(); command_buffer.wait_until_completed();
read_to_vec(&output, x.len()) output.read_to_vec::<T>(x.len())
} }
fn run_strided<T: Clone>( fn run_strided<T: Clone>(
@ -96,8 +87,7 @@ fn run_strided<T: Clone>(
let command_buffer = command_queue.new_command_buffer(); let command_buffer = command_queue.new_command_buffer();
let input = new_buffer(&device, v); let input = new_buffer(&device, v);
let output = new_buffer(&device, v); let output = new_buffer(&device, v);
let fence = device.new_fence(); let kernels = Kernels::new();
let kernels = Kernels::new(fence);
call_unary_strided( call_unary_strided(
&device, &device,
command_buffer, command_buffer,
@ -113,7 +103,7 @@ fn run_strided<T: Clone>(
.unwrap(); .unwrap();
command_buffer.commit(); command_buffer.commit();
command_buffer.wait_until_completed(); command_buffer.wait_until_completed();
read_to_vec(&output, v.len()) output.read_to_vec::<T>(v.len())
} }
#[test] #[test]
@ -215,25 +205,6 @@ fn cos_strided_random() {
); );
} }
#[test]
fn gelu_f16() {
let v: Vec<f16> = [-10f32, -1.0, 0., 1., 2., 3., 10.0, 20.0]
.iter()
.map(|v| f16::from_f32(*v))
.collect();
let expected: Vec<f32> = vec![-0.0, -0.16, 0.0, 0.84, 1.96, 3.0, 10.0, 20.0];
let results = run(&v, unary::contiguous::gelu::HALF);
assert_eq!(approx_f16(results, 2), expected);
}
#[test]
fn gelu_f32() {
let v: Vec<f32> = vec![-10f32, -1.0, 0., 1., 2., 3., 10.0, 20.0];
let expected: Vec<f32> = vec![-0.0, -0.159, 0.0, 0.841, 1.955, 2.996, 10.0, 20.0];
let results = run(&v, unary::contiguous::gelu::FLOAT);
assert_eq!(approx(results, 3), expected);
}
#[test] #[test]
fn binary_add_f32() { fn binary_add_f32() {
let left = vec![1.0f32, 2.0, 3.0]; let left = vec![1.0f32, 2.0, 3.0];
@ -250,8 +221,7 @@ fn binary_add_f32() {
fn cast<T: Clone, U: Clone>(v: &[T], name: &'static str) -> Vec<U> { fn cast<T: Clone, U: Clone>(v: &[T], name: &'static str) -> Vec<U> {
let device = device(); let device = device();
let fence = device.new_fence(); let kernels = Kernels::new();
let kernels = Kernels::new(fence);
let command_queue = device.new_command_queue(); let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer(); let command_buffer = command_queue.new_command_buffer();
let input = new_buffer(&device, v); let input = new_buffer(&device, v);
@ -272,7 +242,7 @@ fn cast<T: Clone, U: Clone>(v: &[T], name: &'static str) -> Vec<U> {
.unwrap(); .unwrap();
command_buffer.commit(); command_buffer.commit();
command_buffer.wait_until_completed(); command_buffer.wait_until_completed();
read_to_vec(&output, v.len()) output.read_to_vec::<U>(v.len())
} }
#[test] #[test]
@ -298,8 +268,7 @@ fn cast_u32_f32() {
fn run_affine<T: Clone>(v: &[T], mul: f64, add: f64) -> Vec<T> { fn run_affine<T: Clone>(v: &[T], mul: f64, add: f64) -> Vec<T> {
let device = device(); let device = device();
let fence = device.new_fence(); let kernels = Kernels::new();
let kernels = Kernels::new(fence);
let command_queue = device.new_command_queue(); let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer(); let command_buffer = command_queue.new_command_buffer();
@ -323,7 +292,7 @@ fn run_affine<T: Clone>(v: &[T], mul: f64, add: f64) -> Vec<T> {
command_buffer.commit(); command_buffer.commit();
command_buffer.wait_until_completed(); command_buffer.wait_until_completed();
read_to_vec(&output, v.len()) output.read_to_vec::<T>(v.len())
} }
fn run_affine_strided<T: Clone>( fn run_affine_strided<T: Clone>(
@ -334,8 +303,7 @@ fn run_affine_strided<T: Clone>(
add: f64, add: f64,
) -> Vec<T> { ) -> Vec<T> {
let device = device(); let device = device();
let fence = device.new_fence(); let kernels = Kernels::new();
let kernels = Kernels::new(fence);
let command_queue = device.new_command_queue(); let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer(); let command_buffer = command_queue.new_command_buffer();
@ -360,7 +328,7 @@ fn run_affine_strided<T: Clone>(
command_buffer.wait_until_completed(); command_buffer.wait_until_completed();
let len: usize = shape.iter().product(); let len: usize = shape.iter().product();
read_to_vec(&output, len) output.read_to_vec::<T>(len)
} }
#[test] #[test]
@ -463,8 +431,7 @@ fn run_index_select<T: Clone, I: Clone + std::fmt::Debug>(
_ => unimplemented!(), _ => unimplemented!(),
}; };
let fence = device.new_fence(); let kernels = Kernels::new();
let kernels = Kernels::new(fence);
call_index_select( call_index_select(
&device, &device,
&command_buffer, &command_buffer,
@ -482,7 +449,7 @@ fn run_index_select<T: Clone, I: Clone + std::fmt::Debug>(
command_buffer.commit(); command_buffer.commit();
command_buffer.wait_until_completed(); command_buffer.wait_until_completed();
read_to_vec(&dst_buffer, dst_el) dst_buffer.read_to_vec::<T>(dst_el)
} }
#[test] #[test]
@ -548,7 +515,7 @@ fn index_add() {
let expected = vec![ let expected = vec![
2.0, 3.0, 4.0, 1.0, 1.0, 1.0, 8.0, 9.0, 10.0, 1.0, 1.0, 1.0, 5.0, 6.0, 7.0, 2.0, 3.0, 4.0, 1.0, 1.0, 1.0, 8.0, 9.0, 10.0, 1.0, 1.0, 1.0, 5.0, 6.0, 7.0,
]; ];
let result: Vec<f32> = read_to_vec(&outputs_buffer, right.len()); let result = outputs_buffer.read_to_vec::<f32>(right.len());
assert_eq!(result, expected); assert_eq!(result, expected);
} }
@ -560,14 +527,13 @@ fn cos_f16() {
.collect(); .collect();
let results = run(&v, unary::contiguous::cos::HALF); let results = run(&v, unary::contiguous::cos::HALF);
let expected: Vec<f16> = v.iter().map(|v| f16::from_f32(v.to_f32().cos())).collect(); let expected: Vec<f16> = v.iter().map(|v| f16::from_f32(v.to_f32().cos())).collect();
assert_eq!(approx_f16(results, 2), vec![0.54, -0.42, -0.99]); assert_eq!(approx_f16(results, 4), vec![0.5405, -0.4163, -0.9902]);
assert_eq!(approx_f16(expected, 2), vec![0.54, -0.42, -0.99]); assert_eq!(approx_f16(expected, 4), vec![0.5405, -0.4163, -0.9902]);
} }
fn run_reduce<T: Clone>(v: &[T], out_length: usize, name: &'static str) -> Vec<T> { fn run_reduce<T: Clone>(v: &[T], out_length: usize, name: &'static str) -> Vec<T> {
let device = device(); let device = device();
let fence = device.new_fence(); let kernels = Kernels::new();
let kernels = Kernels::new(fence);
let command_queue = device.new_command_queue(); let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer(); let command_buffer = command_queue.new_command_buffer();
let input = new_buffer(&device, v); let input = new_buffer(&device, v);
@ -589,13 +555,12 @@ fn run_reduce<T: Clone>(v: &[T], out_length: usize, name: &'static str) -> Vec<T
command_buffer.commit(); command_buffer.commit();
command_buffer.wait_until_completed(); command_buffer.wait_until_completed();
read_to_vec(&output, out_length) output.read_to_vec::<T>(out_length)
} }
fn run_softmax<T: Clone + std::fmt::Debug>(v: &[T], last_dim: usize, name: &'static str) -> Vec<T> { fn run_softmax<T: Clone + std::fmt::Debug>(v: &[T], last_dim: usize, name: &'static str) -> Vec<T> {
let device = device(); let device = device();
let fence = device.new_fence(); let kernels = Kernels::new();
let kernels = Kernels::new(fence);
let command_queue = device.new_command_queue(); let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer(); let command_buffer = command_queue.new_command_buffer();
let input = new_buffer(&device, v); let input = new_buffer(&device, v);
@ -614,7 +579,7 @@ fn run_softmax<T: Clone + std::fmt::Debug>(v: &[T], last_dim: usize, name: &'sta
command_buffer.commit(); command_buffer.commit();
command_buffer.wait_until_completed(); command_buffer.wait_until_completed();
read_to_vec(&output, v.len()) output.read_to_vec::<T>(v.len())
} }
#[test] #[test]
@ -645,24 +610,6 @@ fn softmax() {
vec![0.0043, 0.0116, 0.0315, 0.0858, 0.2331, 0.6337] vec![0.0043, 0.0116, 0.0315, 0.0858, 0.2331, 0.6337]
); );
let last_dim = 4096;
let n = 200;
let mut v = vec![0.0; n * last_dim];
for i in 0..n {
v[i * last_dim] = 20.0;
}
let results = run_softmax(&v, last_dim, "softmax_float");
let results = approx(results, 4);
println!("{results:?}");
assert_eq!(
results.iter().map(|&s| s.round() as usize).sum::<usize>(),
n
);
assert_eq!(results[0], 1.0);
assert_eq!(results[1], 0.0);
assert_eq!(results[last_dim], 1.0);
assert_eq!(results[2 * last_dim], 1.0);
let v = vec![0.0f32, 1.0, 2.0, 3.0, 4.0, 5.0]; let v = vec![0.0f32, 1.0, 2.0, 3.0, 4.0, 5.0];
let last_dim = 6; let last_dim = 6;
let results = run_softmax(&v, last_dim, "softmax_float"); let results = run_softmax(&v, last_dim, "softmax_float");
@ -713,8 +660,7 @@ fn run_where_cond<I: Clone, T: Clone>(
name: &'static str, name: &'static str,
) -> Vec<T> { ) -> Vec<T> {
let device = device(); let device = device();
let fence = device.new_fence(); let kernels = Kernels::new();
let kernels = Kernels::new(fence);
let command_queue = device.new_command_queue(); let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer(); let command_buffer = command_queue.new_command_buffer();
let options = MTLResourceOptions::StorageModeManaged; let options = MTLResourceOptions::StorageModeManaged;
@ -755,7 +701,7 @@ fn run_where_cond<I: Clone, T: Clone>(
command_buffer.commit(); command_buffer.commit();
command_buffer.wait_until_completed(); command_buffer.wait_until_completed();
read_to_vec(&output, length) output.read_to_vec::<T>(length)
} }
#[test] #[test]
@ -779,93 +725,3 @@ fn where_cond() {
); );
assert_eq!(approx(results, 4), vec![-1.0f32, 2.0, -3.0, -4.0, 5.0, 6.0]); assert_eq!(approx(results, 4), vec![-1.0f32, 2.0, -3.0, -4.0, 5.0, 6.0]);
} }
fn run_gemm<T: Clone>(
(b, m, n, k): (usize, usize, usize, usize),
lhs: &[T],
lhs_stride: Vec<usize>,
lhs_offset: usize,
rhs: &[T],
rhs_stride: Vec<usize>,
rhs_offset: usize,
) -> Vec<T> {
let device = device();
let fence = device.new_fence();
let kernels = Kernels::new(fence);
let command_queue = device.new_command_queue();
let command_buffer = command_queue.new_command_buffer();
let options = MTLResourceOptions::StorageModeManaged;
let lhs = device.new_buffer_with_data(
lhs.as_ptr() as *const core::ffi::c_void,
std::mem::size_of_val(lhs) as u64,
options,
);
let rhs = device.new_buffer_with_data(
rhs.as_ptr() as *const core::ffi::c_void,
std::mem::size_of_val(rhs) as u64,
options,
);
let length = b * m * n;
let output = device.new_buffer((length * core::mem::size_of::<T>()) as u64, options);
call_gemm(
&device,
command_buffer,
&kernels,
"sgemm",
(b, m, n, k),
&lhs_stride,
lhs_offset,
&lhs,
&rhs_stride,
rhs_offset,
&rhs,
&output,
)
.unwrap();
command_buffer.commit();
command_buffer.wait_until_completed();
read_to_vec(&output, length)
}
#[test]
fn gemm() {
let (b, m, n, k) = (1, 2, 4, 3);
let lhs_stride = vec![m * k, k, 1];
let lhs: Vec<f32> = (0..b * m * k).map(|f| f as f32).collect();
let rhs_stride = vec![n * k, n, 1];
let rhs: Vec<f32> = (0..b * n * k).map(|f| f as f32).collect();
let results = run_gemm((b, m, n, k), &lhs, lhs_stride, 0, &rhs, rhs_stride, 0);
assert_eq!(
approx(results, 4),
vec![20.0, 23.0, 26.0, 29.0, 56.0, 68.0, 80.0, 92.0]
);
let (b, m, n, k) = (2, 2, 4, 3);
let lhs_stride = vec![m * k, k, 1];
let lhs: Vec<f32> = (0..b * m * k).map(|f| f as f32).collect();
let rhs_stride = vec![n * k, n, 1];
let rhs: Vec<f32> = (0..b * n * k).map(|f| f as f32).collect();
let results = run_gemm((b, m, n, k), &lhs, lhs_stride, 0, &rhs, rhs_stride, 0);
assert_eq!(
approx(results, 4),
vec![
20.0, 23.0, 26.0, 29.0, 56.0, 68.0, 80.0, 92.0, 344.0, 365.0, 386.0, 407.0, 488.0,
518.0, 548.0, 578.0
]
);
// OFFSET
let (b, m, n, k) = (2, 2, 4, 3);
let lhs_stride = vec![m * k, k, 1];
let lhs: Vec<f32> = (0..b * m * k).map(|f| f as f32).collect();
let rhs_stride = vec![n * k, n, 1];
let rhs: Vec<f32> = (0..b * n * k).map(|f| f as f32).collect();
// Manually set batch_size=1 and offset 12 elements * 4 the number of bytes for f32
let results = run_gemm((1, m, n, k), &lhs, lhs_stride, 0, &rhs, rhs_stride, 12 * 4);
assert_eq!(
approx(results, 4),
vec![56.0, 59.0, 62.0, 65.0, 200.0, 212.0, 224.0, 236.0]
);
}

View File

@ -42,14 +42,9 @@ template <typename T> METAL_FUNC T erf(T in){
return T(sign*y); return T(sign*y);
} }
template <typename T> METAL_FUNC T id(T in) { return in; } template <typename T> METAL_FUNC T id(T in){ return in; }
template <typename T> METAL_FUNC T gelu_erf(T x) { template <typename T> METAL_FUNC T gelu_erf(T x){ return T(x * (1 + erf(x * M_SQRT1_2_F)) / 2); }
return T(x * (1 + erf(x * M_SQRT1_2_F)) / 2); template <typename T> METAL_FUNC T gelu(T x){
}
template <typename T> METAL_FUNC T gelu(T x) {
if (x > 5) {
return x;
}
T x_sq = x * x; T x_sq = x * x;
T x_cube = x_sq * x; T x_cube = x_sq * x;
T alpha = x + static_cast<T>(0.044715) * x_cube; T alpha = x + static_cast<T>(0.044715) * x_cube;
@ -69,7 +64,7 @@ kernel void FN_NAME( \
if (thread_position_in_grid >= dim) { \ if (thread_position_in_grid >= dim) { \
return; \ return; \
} \ } \
output[thread_position_in_grid] = TYPENAME(FN(float(input[thread_position_in_grid]))); \ output[thread_position_in_grid] = TYPENAME(FN(input[thread_position_in_grid])); \
}\ }\
kernel void FN_NAME_STRIDED( \ kernel void FN_NAME_STRIDED( \
constant size_t &dim, \ constant size_t &dim, \
@ -83,7 +78,7 @@ kernel void FN_NAME_STRIDED( \
if (thread_position_in_grid >= dim) { \ if (thread_position_in_grid >= dim) { \
return; \ return; \
} \ } \
output[thread_position_in_grid] = TYPENAME(FN(float(input[get_strided_index(thread_position_in_grid, num_dims, dims, strides)]))); \ output[thread_position_in_grid] = TYPENAME(FN(input[get_strided_index(thread_position_in_grid, num_dims, dims, strides)])); \
} }
#define UNARY_OP(NAME) \ #define UNARY_OP(NAME) \
@ -107,7 +102,6 @@ UNARY_OP(floor)
UNARY_OP(round) UNARY_OP(round)
UNARY_OP(gelu_erf) UNARY_OP(gelu_erf)
UNARY_OP(erf) UNARY_OP(erf)
UNARY_OP(tanh)
UNARY(id, float, copy_float, copy_float_strided) UNARY(id, float, copy_float, copy_float_strided)
UNARY(id, half, copy_half, copy_half_strided) UNARY(id, half, copy_half, copy_half_strided)
UNARY(id, uint8_t, copy_u8, copy_u8_strided) UNARY(id, uint8_t, copy_u8, copy_u8_strided)
@ -127,7 +121,6 @@ BFLOAT_UNARY_OP(floor)
BFLOAT_UNARY_OP(round) BFLOAT_UNARY_OP(round)
BFLOAT_UNARY_OP(gelu_erf) BFLOAT_UNARY_OP(gelu_erf)
BFLOAT_UNARY_OP(erf) BFLOAT_UNARY_OP(erf)
BFLOAT_UNARY_OP(tanh)
UNARY(id, bfloat, copy_bfloat, copy_bfloat_strided) UNARY(id, bfloat, copy_bfloat, copy_bfloat_strided)
#endif #endif

View File

@ -19,7 +19,6 @@ num-traits = { workspace = true }
rayon = { workspace = true } rayon = { workspace = true }
safetensors = { workspace = true } safetensors = { workspace = true }
serde = { workspace = true } serde = { workspace = true }
metal = { workspace = true, optional = true }
candle-metal-kernels = { path = "../candle-metal-kernels", version = "0.3.0", optional = true } candle-metal-kernels = { path = "../candle-metal-kernels", version = "0.3.0", optional = true }
[dev-dependencies] [dev-dependencies]
@ -31,4 +30,4 @@ default = []
accelerate = ["dep:accelerate-src", "candle/accelerate"] accelerate = ["dep:accelerate-src", "candle/accelerate"]
cuda = ["candle/cuda"] cuda = ["candle/cuda"]
mkl = ["dep:intel-mkl-src", "candle/mkl"] mkl = ["dep:intel-mkl-src", "candle/mkl"]
metal = ["candle/metal", "dep:candle-metal-kernels", "dep:metal"] metal = ["candle/metal", "dep:candle-metal-kernels"]

View File

@ -220,13 +220,13 @@ impl candle::CustomOp1 for SoftmaxLastDim {
}; };
let n = layout.stride().len(); let n = layout.stride().len();
if !(layout.is_contiguous() && layout.stride()[n - 1] == 1 && layout.start_offset() == 0) { if !(layout.stride()[n - 1] == 1 && layout.start_offset() == 0) {
candle::bail!("Non contiguous softmax-last-dim is not implemented"); candle::bail!("Non contiguous softmax-last-dim is not implemented");
} }
let last_dim = layout.dims()[layout.shape().rank() - 1]; let last_dim = layout.dims()[layout.shape().rank() - 1];
let elem_count = layout.shape().elem_count(); let elem_count = layout.shape().elem_count();
let mut output = device.new_buffer(elem_count, storage.dtype(), "softmax"); let mut output = device.new_buffer(elem_count, storage.dtype());
candle_metal_kernels::call_last_softmax( candle_metal_kernels::call_last_softmax(
device.metal_device(), device.metal_device(),
&command_buffer, &command_buffer,

View File

@ -31,4 +31,3 @@ accelerate = ["dep:accelerate-src", "candle/accelerate", "candle-nn/accelerate"]
cuda = ["candle/cuda", "candle-nn/cuda"] cuda = ["candle/cuda", "candle-nn/cuda"]
flash-attn = ["cuda", "dep:candle-flash-attn"] flash-attn = ["cuda", "dep:candle-flash-attn"]
mkl = ["dep:intel-mkl-src", "candle/mkl", "candle-nn/mkl"] mkl = ["dep:intel-mkl-src", "candle/mkl", "candle-nn/mkl"]
metal = ["candle/metal", "candle-nn/metal"]

View File

@ -142,9 +142,10 @@ impl RotaryEmbedding {
.to_dtype(DType::F32)? .to_dtype(DType::F32)?
.reshape((max_seq_len, 1))?; .reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?; let freqs = t.matmul(&inv_freq)?;
let sin = freqs.sin()?; Ok(Self {
let cos = freqs.cos()?; sin: freqs.sin()?,
Ok(Self { sin, cos }) cos: freqs.cos()?,
})
} }
fn apply_rotary_emb_qkv( fn apply_rotary_emb_qkv(
@ -407,38 +408,3 @@ impl MixFormerSequentialForCausalLM {
self.blocks.iter_mut().for_each(|b| b.clear_kv_cache()) self.blocks.iter_mut().for_each(|b| b.clear_kv_cache())
} }
} }
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_rotary() {
let dev = Device::new_metal(0).unwrap();
for i in 0..10000 {
let dim = 8;
let max_seq_len = 12;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / 10000f32.powf(i as f32 / dim as f32))
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), &dev).unwrap();
let t = Tensor::arange(0u32, max_seq_len as u32, &dev)
.unwrap()
.to_dtype(DType::F32)
.unwrap()
.reshape((max_seq_len, 1))
.unwrap();
let x: f32 = t.i((1, 0)).unwrap().to_scalar().unwrap();
assert_eq!(x, 1.0);
let x: f32 = inv_freq.i((0, 1)).unwrap().to_scalar().unwrap();
assert_eq!(x, 0.1);
let freqs = t.matmul(&inv_freq).unwrap();
let x: f32 = freqs.i((1, 1)).unwrap().to_scalar().unwrap();
assert_eq!(x, 0.1);
let sin = freqs.sin().unwrap().contiguous().unwrap();
let x: f32 = sin.i((1, 1)).unwrap().to_scalar().unwrap();
assert_eq!(x, 0.099833414);
}
}
}