mirror of
https://github.com/huggingface/candle.git
synced 2025-06-19 19:58:35 +00:00
Compare commits
3 Commits
Author | SHA1 | Date | |
---|---|---|---|
1f23cea90c | |||
ce33d6ad2a | |||
3d0ade406a |
@ -38,8 +38,7 @@ impl From<String> for MetalError {
|
|||||||
pub struct MetalDevice {
|
pub struct MetalDevice {
|
||||||
device: metal::Device,
|
device: metal::Device,
|
||||||
command_queue: metal::CommandQueue,
|
command_queue: metal::CommandQueue,
|
||||||
command_buffers: Arc<RwLock<Vec<metal::CommandBuffer>>>,
|
command_buffer: Arc<RwLock<metal::CommandBuffer>>,
|
||||||
command_buffer_index: Arc<RwLock<usize>>,
|
|
||||||
kernels: Arc<candle_metal_kernels::Kernels>,
|
kernels: Arc<candle_metal_kernels::Kernels>,
|
||||||
buffers: Arc<RwLock<HashMap<(NSUInteger, MTLResourceOptions), Vec<Arc<Buffer>>>>>,
|
buffers: Arc<RwLock<HashMap<(NSUInteger, MTLResourceOptions), Vec<Arc<Buffer>>>>>,
|
||||||
}
|
}
|
||||||
@ -71,70 +70,38 @@ impl MetalDevice {
|
|||||||
&self.command_queue
|
&self.command_queue
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn command_buffer(&self) -> CommandBuffer {
|
pub fn command_buffer(&self) -> std::sync::RwLockReadGuard<CommandBuffer> {
|
||||||
let mut command_buffers = self.command_buffers.try_write().unwrap();
|
self.command_buffer.try_read().unwrap()
|
||||||
let mut index = self.command_buffer_index.try_write().unwrap();
|
}
|
||||||
let n = command_buffers.len();
|
|
||||||
if *index == n {
|
pub fn commit(&self) {
|
||||||
// todo!("Cycle buffers");
|
let mut old = self.command_buffer.try_write().unwrap();
|
||||||
for i in 0..n {
|
match old.status() {
|
||||||
let command_buffer = &command_buffers[i];
|
metal::MTLCommandBufferStatus::NotEnqueued
|
||||||
match command_buffer.status() {
|
| metal::MTLCommandBufferStatus::Enqueued => {
|
||||||
metal::MTLCommandBufferStatus::Committed
|
old.commit();
|
||||||
| metal::MTLCommandBufferStatus::Scheduled => {
|
let command_buffer = self.command_queue.new_command_buffer().to_owned();
|
||||||
// println!("Wait during cycling {i}");
|
*old = command_buffer;
|
||||||
// println!("Command {i} / {n}: {:?}", command_buffer.status());
|
|
||||||
command_buffer.wait_until_completed();
|
|
||||||
}
|
|
||||||
metal::MTLCommandBufferStatus::Completed => {}
|
|
||||||
_ => {
|
|
||||||
panic!("Command buffer {i} not committed during cycling");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
let new_buffers = (0..n)
|
_ => {}
|
||||||
.map(|i| {
|
|
||||||
// println!("Creating command buffer {i}");
|
|
||||||
let command_buffer = self.command_queue.new_command_buffer().to_owned();
|
|
||||||
command_buffer.set_label(&format!("num {i}"));
|
|
||||||
command_buffer.enqueue();
|
|
||||||
command_buffer
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
*command_buffers = new_buffers;
|
|
||||||
*index = 0;
|
|
||||||
// println!("Reset");
|
|
||||||
}
|
}
|
||||||
// println!("Giving buffer {} / {n}", *index);
|
|
||||||
let out = &command_buffers[*index];
|
|
||||||
assert_eq!(out.status(), metal::MTLCommandBufferStatus::Enqueued);
|
|
||||||
*index += 1;
|
|
||||||
out.to_owned()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn wait_until_completed(&self) {
|
pub fn wait_until_completed(&self) {
|
||||||
let command_buffers = self.command_buffers.try_write().unwrap();
|
let mut old = self.command_buffer.try_write().unwrap();
|
||||||
let index = self.command_buffer_index.try_write().unwrap();
|
match old.status() {
|
||||||
let n = command_buffers.len();
|
metal::MTLCommandBufferStatus::NotEnqueued
|
||||||
// for i in 0..*index {
|
| metal::MTLCommandBufferStatus::Enqueued => {
|
||||||
// let command_buffer = &command_buffers[i];
|
old.commit();
|
||||||
// println!("Command {i} / {n}: {:?}", command_buffer.status());
|
old.wait_until_completed();
|
||||||
// }
|
|
||||||
for i in 0..*index {
|
|
||||||
let command_buffer = &command_buffers[i];
|
|
||||||
match command_buffer.status() {
|
|
||||||
metal::MTLCommandBufferStatus::Committed
|
|
||||||
| metal::MTLCommandBufferStatus::Scheduled => {}
|
|
||||||
metal::MTLCommandBufferStatus::Completed => {}
|
|
||||||
_ => {
|
|
||||||
panic!("Command buffer not committed");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// println!("Wait {i}");
|
metal::MTLCommandBufferStatus::Committed | metal::MTLCommandBufferStatus::Scheduled => {
|
||||||
command_buffer.wait_until_completed();
|
old.wait_until_completed();
|
||||||
// println!("Ok {i}");
|
}
|
||||||
// command_buffer.wait_until_completed();
|
_ => {}
|
||||||
}
|
}
|
||||||
|
let command_buffer = self.command_queue.new_command_buffer().to_owned();
|
||||||
|
*old = command_buffer;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn kernels(&self) -> &Kernels {
|
pub fn kernels(&self) -> &Kernels {
|
||||||
@ -145,40 +112,28 @@ impl MetalDevice {
|
|||||||
&self.device
|
&self.device
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new_buffer(&self, element_count: usize, dtype: DType, name: &str) -> Arc<Buffer> {
|
pub fn new_buffer(&self, element_count: usize, dtype: DType) -> Arc<Buffer> {
|
||||||
let size = (element_count * dtype.size_in_bytes()) as NSUInteger;
|
let size = (element_count * dtype.size_in_bytes()) as NSUInteger;
|
||||||
self._new_buffer(size, MTLResourceOptions::StorageModePrivate, name)
|
self._new_buffer(size, MTLResourceOptions::StorageModePrivate)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn _new_buffer(&self, size: NSUInteger, option: MTLResourceOptions, name: &str) -> Arc<Buffer> {
|
fn _new_buffer(&self, size: NSUInteger, option: MTLResourceOptions) -> Arc<Buffer> {
|
||||||
// println!("Creating new buffer {name}");
|
|
||||||
let mut buffers = self.buffers.try_write().unwrap();
|
let mut buffers = self.buffers.try_write().unwrap();
|
||||||
let subbuffers = buffers.entry((size, option)).or_insert(vec![]);
|
let subbuffers = buffers.entry((size, option)).or_insert(vec![]);
|
||||||
|
|
||||||
for sub in &mut *subbuffers {
|
for sub in &mut *subbuffers {
|
||||||
if Arc::strong_count(sub) == 1 {
|
if Arc::strong_count(sub) == 1 {
|
||||||
// println!("Reusing tensor {size} {name}");
|
|
||||||
return sub.clone();
|
return sub.clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
let new_buffer = self.device.new_buffer(size as NSUInteger, option);
|
let new_buffer = self.device.new_buffer(size as NSUInteger, option);
|
||||||
let new_buffer = Arc::new(new_buffer);
|
let new_buffer = Arc::new(new_buffer);
|
||||||
// subbuffers.push(new_buffer.clone());
|
subbuffers.push(new_buffer.clone());
|
||||||
// println!("Created tensor {size} {name}");
|
|
||||||
for subbuffers in buffers.values_mut() {
|
|
||||||
let newbuffers = subbuffers
|
|
||||||
.iter()
|
|
||||||
.filter(|s| Arc::strong_count(s) > 1)
|
|
||||||
.map(|s| Arc::clone(s))
|
|
||||||
.collect();
|
|
||||||
*subbuffers = newbuffers;
|
|
||||||
}
|
|
||||||
|
|
||||||
new_buffer
|
new_buffer
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new_buffer_managed(&self, size: NSUInteger) -> Arc<Buffer> {
|
pub fn new_buffer_managed(&self, size: NSUInteger) -> Arc<Buffer> {
|
||||||
self._new_buffer(size, MTLResourceOptions::StorageModeShared, "managed")
|
self._new_buffer(size, MTLResourceOptions::StorageModeManaged)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new_buffer_with_data<T>(&self, data: &[T]) -> Arc<Buffer> {
|
pub fn new_buffer_with_data<T>(&self, data: &[T]) -> Arc<Buffer> {
|
||||||
@ -186,25 +141,15 @@ impl MetalDevice {
|
|||||||
let tmp = self.device.new_buffer_with_data(
|
let tmp = self.device.new_buffer_with_data(
|
||||||
data.as_ptr() as *const core::ffi::c_void,
|
data.as_ptr() as *const core::ffi::c_void,
|
||||||
size,
|
size,
|
||||||
metal::MTLResourceOptions::StorageModeShared,
|
metal::MTLResourceOptions::StorageModeManaged,
|
||||||
);
|
);
|
||||||
let real = self._new_buffer(
|
let real = self._new_buffer(size, metal::MTLResourceOptions::StorageModePrivate);
|
||||||
size,
|
{
|
||||||
metal::MTLResourceOptions::StorageModePrivate,
|
let command = self.command_buffer();
|
||||||
"with_data",
|
let blit = command.new_blit_command_encoder();
|
||||||
);
|
blit.copy_from_buffer(&tmp, 0, &real, 0, tmp.length());
|
||||||
let command_buffer = self.command_buffer();
|
blit.end_encoding();
|
||||||
command_buffer.set_label("with_data");
|
}
|
||||||
let blit = command_buffer.new_blit_command_encoder();
|
|
||||||
blit.set_label("with_data_blit");
|
|
||||||
blit.copy_from_buffer(&tmp, 0, &real, 0, tmp.length());
|
|
||||||
blit.end_encoding();
|
|
||||||
command_buffer.commit();
|
|
||||||
drop(command_buffer);
|
|
||||||
// real.did_modify_range(metal::NSRange::new(0, real.length()));
|
|
||||||
// println!("Command {:?}", command.status());
|
|
||||||
|
|
||||||
// self.commit();
|
|
||||||
// This is necessary, for mmaped safetensors
|
// This is necessary, for mmaped safetensors
|
||||||
// Because of the unsafe slice cast we're doing.
|
// Because of the unsafe slice cast we're doing.
|
||||||
// The slice might not live long enough for metal
|
// The slice might not live long enough for metal
|
||||||
@ -224,29 +169,15 @@ impl MetalDevice {
|
|||||||
dtype: DType,
|
dtype: DType,
|
||||||
) -> Result<(Matrix, Arc<Buffer>)> {
|
) -> Result<(Matrix, Arc<Buffer>)> {
|
||||||
let elem_count = (b * m * n) as usize;
|
let elem_count = (b * m * n) as usize;
|
||||||
let buffer = self.new_buffer(elem_count, dtype, "matrix");
|
let out_buffer = self.new_buffer(elem_count, dtype);
|
||||||
let command_buffer = self.command_buffer();
|
|
||||||
command_buffer.set_label("zeros_matmul");
|
|
||||||
let blit = command_buffer.new_blit_command_encoder();
|
|
||||||
blit.fill_buffer(
|
|
||||||
&buffer,
|
|
||||||
metal::NSRange {
|
|
||||||
location: 0,
|
|
||||||
length: buffer.length(),
|
|
||||||
},
|
|
||||||
0,
|
|
||||||
);
|
|
||||||
blit.end_encoding();
|
|
||||||
command_buffer.commit();
|
|
||||||
buffer.did_modify_range(metal::NSRange::new(0, buffer.length()));
|
|
||||||
|
|
||||||
let result_descriptor =
|
let result_descriptor =
|
||||||
MatrixDescriptor::init_multiple(m, n, b, n * size, m * n * size, type_id);
|
MatrixDescriptor::init_multiple(m, n, b, n * size, m * n * size, type_id);
|
||||||
let result_matrix = Matrix::init_with_buffer_descriptor(&buffer, 0, &result_descriptor)
|
let result_matrix = Matrix::init_with_buffer_descriptor(&out_buffer, 0, &result_descriptor)
|
||||||
.ok_or_else(|| {
|
.ok_or_else(|| {
|
||||||
MetalError::from("Failed to create matrix multiplication kernel".to_string())
|
MetalError::from("Failed to create matrix multiplication kernel".to_string())
|
||||||
})?;
|
})?;
|
||||||
Ok((result_matrix, buffer))
|
Ok((result_matrix, out_buffer))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn capture<P: AsRef<Path>>(&self, path: P) -> Result<()> {
|
pub fn capture<P: AsRef<Path>>(&self, path: P) -> Result<()> {
|
||||||
@ -310,20 +241,13 @@ impl BackendStorage for MetalStorage {
|
|||||||
self.dtype
|
self.dtype
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
self.device.wait_until_completed();
|
|
||||||
self.buffer
|
|
||||||
.did_modify_range(metal::NSRange::new(0, self.buffer.length()));
|
|
||||||
let buffer = self.device.new_buffer_managed(self.buffer.length());
|
|
||||||
{
|
|
||||||
let command_buffer = self.device.command_buffer();
|
|
||||||
command_buffer.set_label("to_cpu");
|
|
||||||
let blit = command_buffer.new_blit_command_encoder();
|
|
||||||
blit.set_label("blit_to_cpu");
|
|
||||||
blit.copy_from_buffer(&self.buffer, 0, &buffer, 0, self.buffer.length());
|
|
||||||
blit.end_encoding();
|
|
||||||
|
|
||||||
command_buffer.commit();
|
let buffer = self.device.new_buffer_managed(self.buffer.length());
|
||||||
}
|
let command_buffer = self.device.command_buffer();
|
||||||
|
let blit = command_buffer.new_blit_command_encoder();
|
||||||
|
blit.copy_from_buffer(&self.buffer, 0, &buffer, 0, self.buffer.length());
|
||||||
|
blit.end_encoding();
|
||||||
|
drop(command_buffer);
|
||||||
self.device.wait_until_completed();
|
self.device.wait_until_completed();
|
||||||
|
|
||||||
match self.dtype {
|
match self.dtype {
|
||||||
@ -332,11 +256,7 @@ impl BackendStorage for MetalStorage {
|
|||||||
DType::I64 => Ok(CpuStorage::I64(buffer.read_to_vec(length / size))),
|
DType::I64 => Ok(CpuStorage::I64(buffer.read_to_vec(length / size))),
|
||||||
DType::F16 => Ok(CpuStorage::F16(buffer.read_to_vec(length / size))),
|
DType::F16 => Ok(CpuStorage::F16(buffer.read_to_vec(length / size))),
|
||||||
DType::BF16 => Ok(CpuStorage::BF16(buffer.read_to_vec(length / size))),
|
DType::BF16 => Ok(CpuStorage::BF16(buffer.read_to_vec(length / size))),
|
||||||
DType::F32 => {
|
DType::F32 => Ok(CpuStorage::F32(buffer.read_to_vec(length / size))),
|
||||||
let vec = buffer.read_to_vec(length / size);
|
|
||||||
// println!("Got back {:?}", &vec[..1]);
|
|
||||||
Ok(CpuStorage::F32(vec))
|
|
||||||
}
|
|
||||||
DType::F64 => Ok(CpuStorage::F64(buffer.read_to_vec(length / size))),
|
DType::F64 => Ok(CpuStorage::F64(buffer.read_to_vec(length / size))),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -348,7 +268,7 @@ impl BackendStorage for MetalStorage {
|
|||||||
let el = shape.elem_count();
|
let el = shape.elem_count();
|
||||||
let dtype = self.dtype;
|
let dtype = self.dtype;
|
||||||
|
|
||||||
let buffer = device.new_buffer(el, self.dtype, "affine");
|
let buffer = device.new_buffer(el, self.dtype);
|
||||||
let command_buffer = self.device.command_buffer();
|
let command_buffer = self.device.command_buffer();
|
||||||
if layout.is_contiguous() && layout.start_offset() == 0 {
|
if layout.is_contiguous() && layout.start_offset() == 0 {
|
||||||
let name = match self.dtype {
|
let name = match self.dtype {
|
||||||
@ -389,111 +309,15 @@ impl BackendStorage for MetalStorage {
|
|||||||
)
|
)
|
||||||
.map_err(MetalError::from)?;
|
.map_err(MetalError::from)?;
|
||||||
}
|
}
|
||||||
command_buffer.commit();
|
|
||||||
buffer.did_modify_range(metal::NSRange::new(0, buffer.length()));
|
|
||||||
Ok(Self::new(buffer, device.clone(), dtype))
|
Ok(Self::new(buffer, device.clone(), dtype))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn powf(&self, layout: &Layout, pow: f64) -> Result<Self> {
|
fn powf(&self, _: &Layout, _: f64) -> Result<Self> {
|
||||||
let device = self.device().clone();
|
crate::bail!("powf metal")
|
||||||
|
|
||||||
let shape = layout.shape();
|
|
||||||
let el = shape.elem_count();
|
|
||||||
let dtype = self.dtype;
|
|
||||||
|
|
||||||
let buffer = device.new_buffer(el, self.dtype, "powf");
|
|
||||||
let command_buffer = self.device.command_buffer();
|
|
||||||
if layout.is_contiguous() && layout.start_offset() == 0 {
|
|
||||||
let name = match self.dtype {
|
|
||||||
DType::F32 => "powf_float",
|
|
||||||
DType::F16 => "powf_half",
|
|
||||||
dtype => crate::bail!("Powf {dtype:?}"),
|
|
||||||
};
|
|
||||||
candle_metal_kernels::call_powf(
|
|
||||||
&device.device,
|
|
||||||
&command_buffer,
|
|
||||||
&device.kernels,
|
|
||||||
name,
|
|
||||||
el,
|
|
||||||
&self.buffer,
|
|
||||||
&buffer,
|
|
||||||
pow as f32,
|
|
||||||
)
|
|
||||||
.map_err(MetalError::from)?;
|
|
||||||
} else {
|
|
||||||
let name = match self.dtype {
|
|
||||||
DType::F32 => "powf_float_strided",
|
|
||||||
DType::F16 => "powf_half_strided",
|
|
||||||
dtype => crate::bail!("Powf {dtype:?}"),
|
|
||||||
};
|
|
||||||
candle_metal_kernels::call_powf_strided(
|
|
||||||
&device.device,
|
|
||||||
&command_buffer,
|
|
||||||
&device.kernels,
|
|
||||||
name,
|
|
||||||
layout.dims(),
|
|
||||||
&self.buffer,
|
|
||||||
layout.stride(),
|
|
||||||
layout.start_offset() * dtype.size_in_bytes(),
|
|
||||||
&buffer,
|
|
||||||
pow as f32,
|
|
||||||
)
|
|
||||||
.map_err(MetalError::from)?;
|
|
||||||
}
|
|
||||||
command_buffer.commit();
|
|
||||||
buffer.did_modify_range(metal::NSRange::new(0, buffer.length()));
|
|
||||||
Ok(Self::new(buffer, device.clone(), dtype))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn elu(&self, layout: &Layout, alpha: f64) -> Result<Self> {
|
fn elu(&self, _: &Layout, _: f64) -> Result<Self> {
|
||||||
let device = self.device().clone();
|
crate::bail!("elu metal")
|
||||||
|
|
||||||
let shape = layout.shape();
|
|
||||||
let el = shape.elem_count();
|
|
||||||
let dtype = self.dtype;
|
|
||||||
|
|
||||||
let buffer = device.new_buffer(el, self.dtype, "elu");
|
|
||||||
let command_buffer = self.device.command_buffer();
|
|
||||||
if layout.is_contiguous() && layout.start_offset() == 0 {
|
|
||||||
let name = match self.dtype {
|
|
||||||
DType::F32 => "elu_float",
|
|
||||||
DType::F16 => "elu_half",
|
|
||||||
dtype => crate::bail!("Powf {dtype:?}"),
|
|
||||||
};
|
|
||||||
candle_metal_kernels::call_elu(
|
|
||||||
&device.device,
|
|
||||||
&command_buffer,
|
|
||||||
&device.kernels,
|
|
||||||
name,
|
|
||||||
el,
|
|
||||||
&self.buffer,
|
|
||||||
&buffer,
|
|
||||||
alpha as f32,
|
|
||||||
)
|
|
||||||
.map_err(MetalError::from)?;
|
|
||||||
} else {
|
|
||||||
let name = match self.dtype {
|
|
||||||
DType::F32 => "elu_float_strided",
|
|
||||||
DType::F16 => "elu_half_strided",
|
|
||||||
dtype => crate::bail!("Powf {dtype:?}"),
|
|
||||||
};
|
|
||||||
candle_metal_kernels::call_elu_strided(
|
|
||||||
&device.device,
|
|
||||||
&command_buffer,
|
|
||||||
&device.kernels,
|
|
||||||
name,
|
|
||||||
layout.dims(),
|
|
||||||
&self.buffer,
|
|
||||||
layout.stride(),
|
|
||||||
layout.start_offset() * dtype.size_in_bytes(),
|
|
||||||
&buffer,
|
|
||||||
alpha as f32,
|
|
||||||
)
|
|
||||||
.map_err(MetalError::from)?;
|
|
||||||
}
|
|
||||||
command_buffer.commit();
|
|
||||||
buffer.did_modify_range(metal::NSRange::new(0, buffer.length()));
|
|
||||||
Ok(Self::new(buffer, device.clone(), dtype))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn reduce_op(&self, op: ReduceOp, layout: &Layout, sum_dims: &[usize]) -> Result<Self> {
|
fn reduce_op(&self, op: ReduceOp, layout: &Layout, sum_dims: &[usize]) -> Result<Self> {
|
||||||
@ -541,7 +365,7 @@ impl BackendStorage for MetalStorage {
|
|||||||
if dtype == DType::U32 {
|
if dtype == DType::U32 {
|
||||||
crate::bail!("Implement return index reduce op");
|
crate::bail!("Implement return index reduce op");
|
||||||
}
|
}
|
||||||
let buffer = device.new_buffer(dst_el, dtype, "reduce");
|
let buffer = device.new_buffer(dst_el, dtype);
|
||||||
let command_buffer = self.device.command_buffer();
|
let command_buffer = self.device.command_buffer();
|
||||||
candle_metal_kernels::call_reduce_contiguous(
|
candle_metal_kernels::call_reduce_contiguous(
|
||||||
&device.device,
|
&device.device,
|
||||||
@ -555,8 +379,6 @@ impl BackendStorage for MetalStorage {
|
|||||||
&buffer,
|
&buffer,
|
||||||
)
|
)
|
||||||
.map_err(MetalError::from)?;
|
.map_err(MetalError::from)?;
|
||||||
command_buffer.commit();
|
|
||||||
buffer.did_modify_range(metal::NSRange::new(0, buffer.length()));
|
|
||||||
|
|
||||||
Ok(Self::new(buffer, device, dtype))
|
Ok(Self::new(buffer, device, dtype))
|
||||||
}
|
}
|
||||||
@ -569,10 +391,9 @@ impl BackendStorage for MetalStorage {
|
|||||||
let device = self.device();
|
let device = self.device();
|
||||||
let shape = layout.shape();
|
let shape = layout.shape();
|
||||||
let el_count = shape.elem_count();
|
let el_count = shape.elem_count();
|
||||||
let buffer = device.new_buffer(el_count, dtype, "todtype");
|
let buffer = device.new_buffer(el_count, dtype);
|
||||||
device.wait_until_completed();
|
|
||||||
let command_buffer = device.command_buffer();
|
let command_buffer = device.command_buffer();
|
||||||
if layout.is_contiguous() && layout.start_offset() == 0 {
|
if layout.is_contiguous() {
|
||||||
let kernel_name = match (self.dtype, dtype) {
|
let kernel_name = match (self.dtype, dtype) {
|
||||||
(DType::U32, DType::F32) => "cast_u32_f32",
|
(DType::U32, DType::F32) => "cast_u32_f32",
|
||||||
(DType::U32, DType::U8) => "cast_u32_u8",
|
(DType::U32, DType::U8) => "cast_u32_u8",
|
||||||
@ -614,10 +435,6 @@ impl BackendStorage for MetalStorage {
|
|||||||
)
|
)
|
||||||
.map_err(MetalError::from)?;
|
.map_err(MetalError::from)?;
|
||||||
}
|
}
|
||||||
command_buffer.set_label("to_dtype");
|
|
||||||
command_buffer.commit();
|
|
||||||
buffer.did_modify_range(metal::NSRange::new(0, buffer.length()));
|
|
||||||
device.wait_until_completed();
|
|
||||||
|
|
||||||
Ok(Self::new(buffer, device.clone(), dtype))
|
Ok(Self::new(buffer, device.clone(), dtype))
|
||||||
}
|
}
|
||||||
@ -627,9 +444,8 @@ impl BackendStorage for MetalStorage {
|
|||||||
let dtype = self.dtype;
|
let dtype = self.dtype;
|
||||||
let shape = layout.shape();
|
let shape = layout.shape();
|
||||||
let el_count = shape.elem_count();
|
let el_count = shape.elem_count();
|
||||||
let buffer = device.new_buffer(el_count, dtype, B::KERNEL);
|
let buffer = device.new_buffer(el_count, dtype);
|
||||||
let command_buffer = device.command_buffer();
|
let command_buffer = device.command_buffer();
|
||||||
command_buffer.set_label(B::KERNEL);
|
|
||||||
if layout.is_contiguous() && layout.start_offset() == 0 {
|
if layout.is_contiguous() && layout.start_offset() == 0 {
|
||||||
use candle_metal_kernels::unary::contiguous;
|
use candle_metal_kernels::unary::contiguous;
|
||||||
|
|
||||||
@ -647,7 +463,6 @@ impl BackendStorage for MetalStorage {
|
|||||||
("uceil", DType::F32) => contiguous::ceil::FLOAT,
|
("uceil", DType::F32) => contiguous::ceil::FLOAT,
|
||||||
("ufloor", DType::F32) => contiguous::floor::FLOAT,
|
("ufloor", DType::F32) => contiguous::floor::FLOAT,
|
||||||
("uround", DType::F32) => contiguous::round::FLOAT,
|
("uround", DType::F32) => contiguous::round::FLOAT,
|
||||||
("utanh", DType::F32) => contiguous::tanh::FLOAT,
|
|
||||||
("ucos", DType::F16) => contiguous::cos::HALF,
|
("ucos", DType::F16) => contiguous::cos::HALF,
|
||||||
("usin", DType::F16) => contiguous::sin::HALF,
|
("usin", DType::F16) => contiguous::sin::HALF,
|
||||||
("usqr", DType::F16) => contiguous::sqr::HALF,
|
("usqr", DType::F16) => contiguous::sqr::HALF,
|
||||||
@ -661,7 +476,6 @@ impl BackendStorage for MetalStorage {
|
|||||||
("uceil", DType::F16) => contiguous::ceil::HALF,
|
("uceil", DType::F16) => contiguous::ceil::HALF,
|
||||||
("ufloor", DType::F16) => contiguous::floor::HALF,
|
("ufloor", DType::F16) => contiguous::floor::HALF,
|
||||||
("uround", DType::F16) => contiguous::round::HALF,
|
("uround", DType::F16) => contiguous::round::HALF,
|
||||||
("utanh", DType::F16) => contiguous::tanh::HALF,
|
|
||||||
(name, dtype) => crate::bail!("Match {name} - {dtype:?}"),
|
(name, dtype) => crate::bail!("Match {name} - {dtype:?}"),
|
||||||
};
|
};
|
||||||
candle_metal_kernels::call_unary_contiguous(
|
candle_metal_kernels::call_unary_contiguous(
|
||||||
@ -719,8 +533,9 @@ impl BackendStorage for MetalStorage {
|
|||||||
)
|
)
|
||||||
.map_err(MetalError::from)?;
|
.map_err(MetalError::from)?;
|
||||||
}
|
}
|
||||||
command_buffer.commit();
|
command_buffer.set_label("unary");
|
||||||
buffer.did_modify_range(metal::NSRange::new(0, buffer.length()));
|
drop(command_buffer);
|
||||||
|
self.device.commit();
|
||||||
Ok(Self::new(buffer, device.clone(), dtype))
|
Ok(Self::new(buffer, device.clone(), dtype))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -734,31 +549,30 @@ impl BackendStorage for MetalStorage {
|
|||||||
let dtype = self.dtype;
|
let dtype = self.dtype;
|
||||||
let shape = lhs_l.shape();
|
let shape = lhs_l.shape();
|
||||||
let el_count = shape.elem_count();
|
let el_count = shape.elem_count();
|
||||||
let buffer = device.new_buffer(el_count, dtype, B::KERNEL);
|
let buffer = device.new_buffer(el_count, dtype);
|
||||||
let command_buffer = device.command_buffer();
|
let command_buffer = device.command_buffer();
|
||||||
if (lhs_l.is_contiguous() && lhs_l.start_offset() == 0)
|
if (lhs_l.is_contiguous() && lhs_l.start_offset() == 0)
|
||||||
&& (rhs_l.is_contiguous() && rhs_l.start_offset() == 0)
|
&& (rhs_l.is_contiguous() && rhs_l.start_offset() == 0)
|
||||||
&& &B::KERNEL[..1] != "b"
|
|
||||||
{
|
{
|
||||||
use candle_metal_kernels::binary::contiguous;
|
use candle_metal_kernels::binary::contiguous;
|
||||||
|
|
||||||
let kernel_name = match (B::KERNEL, dtype) {
|
let kernel_name = match (B::KERNEL, dtype) {
|
||||||
("add", DType::F32) => contiguous::add::FLOAT,
|
("add", DType::F32) => contiguous::add::FLOAT,
|
||||||
// ("badd", DType::F32) => contiguous::add::FLOAT,
|
("badd", DType::F32) => contiguous::add::FLOAT,
|
||||||
("sub", DType::F32) => contiguous::sub::FLOAT,
|
("sub", DType::F32) => contiguous::sub::FLOAT,
|
||||||
//("bsub", DType::F32) => contiguous::sub::FLOAT,
|
("bsub", DType::F32) => contiguous::sub::FLOAT,
|
||||||
("mul", DType::F32) => contiguous::mul::FLOAT,
|
("mul", DType::F32) => contiguous::mul::FLOAT,
|
||||||
// ("bmul", DType::F32) => contiguous::mul::FLOAT,
|
("bmul", DType::F32) => contiguous::mul::FLOAT,
|
||||||
("div", DType::F32) => contiguous::div::FLOAT,
|
("div", DType::F32) => contiguous::div::FLOAT,
|
||||||
// ("bdiv", DType::F32) => contiguous::div::FLOAT,
|
("bdiv", DType::F32) => contiguous::div::FLOAT,
|
||||||
("add", DType::F16) => contiguous::add::HALF,
|
("add", DType::F16) => contiguous::add::HALF,
|
||||||
// ("badd", DType::F16) => contiguous::add::HALF,
|
("badd", DType::F16) => contiguous::add::HALF,
|
||||||
("sub", DType::F16) => contiguous::sub::HALF,
|
("sub", DType::F16) => contiguous::sub::HALF,
|
||||||
// ("bsub", DType::F16) => contiguous::sub::HALF,
|
("bsub", DType::F16) => contiguous::sub::HALF,
|
||||||
("mul", DType::F16) => contiguous::mul::HALF,
|
("mul", DType::F16) => contiguous::mul::HALF,
|
||||||
// ("bmul", DType::F16) => contiguous::mul::HALF,
|
("bmul", DType::F16) => contiguous::mul::HALF,
|
||||||
("div", DType::F16) => contiguous::div::HALF,
|
("div", DType::F16) => contiguous::div::HALF,
|
||||||
// ("bdiv", DType::F16) => contiguous::div::HALF,
|
("bdiv", DType::F16) => contiguous::div::HALF,
|
||||||
(name, dtype) => crate::bail!("Match {name} - {dtype:?}"),
|
(name, dtype) => crate::bail!("Match {name} - {dtype:?}"),
|
||||||
};
|
};
|
||||||
candle_metal_kernels::call_binary_contiguous(
|
candle_metal_kernels::call_binary_contiguous(
|
||||||
@ -803,8 +617,8 @@ impl BackendStorage for MetalStorage {
|
|||||||
.map_err(MetalError::from)?;
|
.map_err(MetalError::from)?;
|
||||||
}
|
}
|
||||||
command_buffer.set_label("binary");
|
command_buffer.set_label("binary");
|
||||||
command_buffer.commit();
|
drop(command_buffer);
|
||||||
buffer.did_modify_range(metal::NSRange::new(0, buffer.length()));
|
self.device.commit();
|
||||||
Ok(Self::new(buffer, device.clone(), dtype))
|
Ok(Self::new(buffer, device.clone(), dtype))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -821,7 +635,7 @@ impl BackendStorage for MetalStorage {
|
|||||||
let dims = shape.dims();
|
let dims = shape.dims();
|
||||||
let el = shape.elem_count();
|
let el = shape.elem_count();
|
||||||
let dtype = t.dtype;
|
let dtype = t.dtype;
|
||||||
let buffer = self.device.new_buffer(el, dtype, "where");
|
let buffer = self.device.new_buffer(el, dtype);
|
||||||
let command_buffer = self.device.command_buffer();
|
let command_buffer = self.device.command_buffer();
|
||||||
if t.dtype() != f.dtype() {
|
if t.dtype() != f.dtype() {
|
||||||
crate::bail!("Invalid ternary different dtypes for values");
|
crate::bail!("Invalid ternary different dtypes for values");
|
||||||
@ -849,8 +663,6 @@ impl BackendStorage for MetalStorage {
|
|||||||
&buffer,
|
&buffer,
|
||||||
)
|
)
|
||||||
.map_err(MetalError::from)?;
|
.map_err(MetalError::from)?;
|
||||||
command_buffer.commit();
|
|
||||||
buffer.did_modify_range(metal::NSRange::new(0, buffer.length()));
|
|
||||||
Ok(Self::new(buffer, device, dtype))
|
Ok(Self::new(buffer, device, dtype))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -940,7 +752,7 @@ impl BackendStorage for MetalStorage {
|
|||||||
let dst_el = ids_el * left_size * right_size;
|
let dst_el = ids_el * left_size * right_size;
|
||||||
let dtype = self.dtype;
|
let dtype = self.dtype;
|
||||||
let device = self.device();
|
let device = self.device();
|
||||||
let buffer = device.new_buffer(dst_el, dtype, "index_select");
|
let buffer = device.new_buffer(dst_el, dtype);
|
||||||
let name = match (ids.dtype, self.dtype) {
|
let name = match (ids.dtype, self.dtype) {
|
||||||
(DType::U32, DType::F32) => "is_u32_f32",
|
(DType::U32, DType::F32) => "is_u32_f32",
|
||||||
(DType::U32, DType::F16) => "is_u32_f16",
|
(DType::U32, DType::F16) => "is_u32_f16",
|
||||||
@ -960,8 +772,6 @@ impl BackendStorage for MetalStorage {
|
|||||||
&buffer,
|
&buffer,
|
||||||
)
|
)
|
||||||
.map_err(MetalError::from)?;
|
.map_err(MetalError::from)?;
|
||||||
command_buffer.commit();
|
|
||||||
buffer.did_modify_range(metal::NSRange::new(0, buffer.length()));
|
|
||||||
Ok(Self::new(buffer, device.clone(), dtype))
|
Ok(Self::new(buffer, device.clone(), dtype))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -985,117 +795,54 @@ impl BackendStorage for MetalStorage {
|
|||||||
rhs_l: &Layout,
|
rhs_l: &Layout,
|
||||||
) -> Result<Self> {
|
) -> Result<Self> {
|
||||||
// Create descriptors
|
// Create descriptors
|
||||||
let (type_id, size) = match self.dtype {
|
|
||||||
DType::F32 => (
|
|
||||||
metal::mps::MPS_FLOATBIT_ENCODING | 32,
|
|
||||||
core::mem::size_of::<f32>() as NSUInteger,
|
|
||||||
),
|
|
||||||
DType::F16 => (
|
|
||||||
metal::mps::MPS_FLOATBIT_ENCODING | 16,
|
|
||||||
core::mem::size_of::<f16>() as NSUInteger,
|
|
||||||
),
|
|
||||||
dtype => todo!("Dtype for matmul {dtype:?} is not supported"),
|
|
||||||
};
|
|
||||||
|
|
||||||
let lhs_stride = lhs_l.stride();
|
let buffer = self.device.new_buffer(b * m * n, self.dtype);
|
||||||
let rhs_stride = rhs_l.stride();
|
let name = match self.dtype {
|
||||||
let rhs_m1 = rhs_stride[rhs_stride.len() - 1];
|
DType::F32 => "sgemm",
|
||||||
let rhs_m2 = rhs_stride[rhs_stride.len() - 2];
|
DType::F16 => "hgemm",
|
||||||
let lhs_m1 = lhs_stride[lhs_stride.len() - 1];
|
dtype => {
|
||||||
let lhs_m2 = lhs_stride[lhs_stride.len() - 2];
|
return Err(MetalError::Message(format!("matmul doesn't support {dtype:?}")).into())
|
||||||
// The a tensor has dims batching, k, n (rhs)
|
}
|
||||||
let transpose_left = if lhs_m1 == 1 && lhs_m2 == k {
|
|
||||||
false
|
|
||||||
} else if lhs_m1 == m && lhs_m2 == 1 {
|
|
||||||
true
|
|
||||||
} else {
|
|
||||||
Err(MetalError::MatMulNonContiguous {
|
|
||||||
lhs_stride: lhs_stride.to_vec(),
|
|
||||||
rhs_stride: rhs_stride.to_vec(),
|
|
||||||
mnk: (m, n, k),
|
|
||||||
})?
|
|
||||||
};
|
};
|
||||||
let transpose_right = if rhs_m1 == 1 && rhs_m2 == n {
|
|
||||||
false
|
|
||||||
} else if rhs_m1 == k && rhs_m2 == 1 {
|
|
||||||
true
|
|
||||||
} else {
|
|
||||||
Err(MetalError::MatMulNonContiguous {
|
|
||||||
lhs_stride: lhs_stride.to_vec(),
|
|
||||||
rhs_stride: rhs_stride.to_vec(),
|
|
||||||
mnk: (m, n, k),
|
|
||||||
})?
|
|
||||||
};
|
|
||||||
let b = b as NSUInteger;
|
|
||||||
let m = m as NSUInteger;
|
|
||||||
let n = n as NSUInteger;
|
|
||||||
let k = k as NSUInteger;
|
|
||||||
|
|
||||||
let left_matrix = self.matrix(
|
|
||||||
(b, m, k),
|
|
||||||
transpose_left,
|
|
||||||
size,
|
|
||||||
lhs_l.start_offset() as NSUInteger * size,
|
|
||||||
type_id,
|
|
||||||
)?;
|
|
||||||
let right_matrix = rhs.matrix(
|
|
||||||
(b, k, n),
|
|
||||||
transpose_right,
|
|
||||||
size,
|
|
||||||
rhs_l.start_offset() as NSUInteger * size,
|
|
||||||
type_id,
|
|
||||||
)?;
|
|
||||||
let (result_matrix, out_buffer) =
|
|
||||||
self.device
|
|
||||||
.new_matrix((b, m, n), size, type_id, self.dtype)?;
|
|
||||||
|
|
||||||
let command_buffer = self.device.command_buffer();
|
let command_buffer = self.device.command_buffer();
|
||||||
command_buffer.set_label("matmul");
|
command_buffer.set_label("matmul");
|
||||||
|
candle_metal_kernels::call_gemm(
|
||||||
let alpha = 1.0f64;
|
&self.device.device,
|
||||||
// let beta = f64::MIN;
|
|
||||||
let beta = 1.0;
|
|
||||||
// Create kernel
|
|
||||||
let matrix_multiplication = MatrixMultiplication::init(
|
|
||||||
&self.device,
|
|
||||||
transpose_left,
|
|
||||||
transpose_right,
|
|
||||||
m,
|
|
||||||
n,
|
|
||||||
k,
|
|
||||||
alpha,
|
|
||||||
beta,
|
|
||||||
)
|
|
||||||
.ok_or_else(|| {
|
|
||||||
MetalError::from("Failed to create matrix multiplication kernel".to_string())
|
|
||||||
})?;
|
|
||||||
matrix_multiplication.set_batch_size(b);
|
|
||||||
matrix_multiplication.set_batch_start(0);
|
|
||||||
|
|
||||||
// Encode kernel to command buffer
|
|
||||||
matrix_multiplication.encode_to_command_buffer(
|
|
||||||
&command_buffer,
|
&command_buffer,
|
||||||
&left_matrix,
|
&self.device.kernels,
|
||||||
&right_matrix,
|
name,
|
||||||
&result_matrix,
|
(b, m, n, k),
|
||||||
);
|
&lhs_l.stride(),
|
||||||
command_buffer.commit();
|
lhs_l.start_offset(),
|
||||||
out_buffer.did_modify_range(metal::NSRange::new(0, out_buffer.length()));
|
&self.buffer,
|
||||||
// println!("========= MATMUL {:?}", Arc::strong_count(&out_buffer));
|
&rhs_l.stride(),
|
||||||
Ok(Self::new(out_buffer, self.device.clone(), self.dtype()))
|
rhs_l.start_offset(),
|
||||||
|
&rhs.buffer,
|
||||||
|
&buffer,
|
||||||
|
)
|
||||||
|
.map_err(MetalError::from)?;
|
||||||
|
// Create kernel
|
||||||
|
drop(command_buffer);
|
||||||
|
self.device.commit();
|
||||||
|
|
||||||
|
Ok(Self::new(buffer, self.device.clone(), self.dtype()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn copy_strided_src(&self, dst: &mut Self, dst_offset: usize, src_l: &Layout) -> Result<()> {
|
fn copy_strided_src(&self, dst: &mut Self, dst_offset: usize, src_l: &Layout) -> Result<()> {
|
||||||
let command_buffer = self.device.command_buffer();
|
let command_buffer = self.device.command_buffer();
|
||||||
// println!("Copy strided");
|
|
||||||
if src_l.is_contiguous() && self.dtype == dst.dtype() {
|
if src_l.is_contiguous() && self.dtype == dst.dtype() {
|
||||||
command_buffer.set_label("copy_contiguous");
|
command_buffer.set_label("copy_contiguous");
|
||||||
let blit = command_buffer.new_blit_command_encoder();
|
let blit = command_buffer.new_blit_command_encoder();
|
||||||
blit.set_label("copy_contiguous");
|
|
||||||
let src_offset = (src_l.start_offset() * self.dtype.size_in_bytes()) as NSUInteger;
|
let src_offset = (src_l.start_offset() * self.dtype.size_in_bytes()) as NSUInteger;
|
||||||
let length = (src_l.shape().elem_count() * self.dtype.size_in_bytes()) as NSUInteger;
|
|
||||||
let dst_offset = (dst_offset * dst.dtype().size_in_bytes()) as NSUInteger;
|
let dst_offset = (dst_offset * dst.dtype().size_in_bytes()) as NSUInteger;
|
||||||
blit.copy_from_buffer(&self.buffer, src_offset, dst.buffer(), dst_offset, length);
|
blit.copy_from_buffer(
|
||||||
|
&self.buffer,
|
||||||
|
src_offset,
|
||||||
|
dst.buffer(),
|
||||||
|
dst_offset,
|
||||||
|
self.buffer.length() - src_offset,
|
||||||
|
);
|
||||||
blit.end_encoding();
|
blit.end_encoding();
|
||||||
} else {
|
} else {
|
||||||
let src_shape = src_l.shape();
|
let src_shape = src_l.shape();
|
||||||
@ -1126,7 +873,8 @@ impl BackendStorage for MetalStorage {
|
|||||||
.map_err(MetalError::from)?;
|
.map_err(MetalError::from)?;
|
||||||
command_buffer.set_label("copy_strided");
|
command_buffer.set_label("copy_strided");
|
||||||
}
|
}
|
||||||
command_buffer.commit();
|
drop(command_buffer);
|
||||||
|
self.device.commit();
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1156,22 +904,22 @@ impl MetalStorage {
|
|||||||
) -> Result<Matrix> {
|
) -> Result<Matrix> {
|
||||||
let key = (b, m, n, transpose, size, offset, type_id);
|
let key = (b, m, n, transpose, size, offset, type_id);
|
||||||
|
|
||||||
// let mut matrices = self.matrices.try_write().unwrap();
|
let mut matrices = self.matrices.try_write().unwrap();
|
||||||
// if let Some(matrix) = matrices.get(&key) {
|
if let Some(matrix) = matrices.get(&key) {
|
||||||
// Ok(matrix.clone())
|
Ok(matrix.clone())
|
||||||
// } else {
|
|
||||||
let descriptor = if transpose {
|
|
||||||
MatrixDescriptor::init_multiple(n, m, b, m * size, m * n * size, type_id)
|
|
||||||
} else {
|
} else {
|
||||||
MatrixDescriptor::init_multiple(m, n, b, n * size, m * n * size, type_id)
|
let descriptor = if transpose {
|
||||||
};
|
MatrixDescriptor::init_multiple(n, m, b, m * size, m * n * size, type_id)
|
||||||
let matrix = Matrix::init_with_buffer_descriptor(&self.buffer, offset, &descriptor)
|
} else {
|
||||||
.ok_or_else(|| {
|
MatrixDescriptor::init_multiple(m, n, b, n * size, m * n * size, type_id)
|
||||||
MetalError::from("Failed to create matrix multiplication kernel".to_string())
|
};
|
||||||
})?;
|
let matrix = Matrix::init_with_buffer_descriptor(&self.buffer, offset, &descriptor)
|
||||||
// matrices.insert(key, matrix.clone());
|
.ok_or_else(|| {
|
||||||
Ok(matrix)
|
MetalError::from("Failed to create matrix multiplication kernel".to_string())
|
||||||
// }
|
})?;
|
||||||
|
matrices.insert(key, matrix.clone());
|
||||||
|
Ok(matrix)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1179,29 +927,16 @@ impl BackendDevice for MetalDevice {
|
|||||||
type Storage = MetalStorage;
|
type Storage = MetalStorage;
|
||||||
|
|
||||||
fn new(ordinal: usize) -> Result<Self> {
|
fn new(ordinal: usize) -> Result<Self> {
|
||||||
// println!("CREATING DEVICE");
|
|
||||||
let device = metal::Device::all().swap_remove(ordinal);
|
let device = metal::Device::all().swap_remove(ordinal);
|
||||||
|
|
||||||
let n = 64;
|
|
||||||
let command_queue = device.new_command_queue();
|
let command_queue = device.new_command_queue();
|
||||||
|
let command_buffer = Arc::new(RwLock::new(command_queue.new_command_buffer().to_owned()));
|
||||||
let command_buffers = (0..n)
|
|
||||||
.map(|i| {
|
|
||||||
let command_buffer = command_queue.new_command_buffer().to_owned();
|
|
||||||
command_buffer.enqueue();
|
|
||||||
command_buffer.set_label(&format!("num {i}"));
|
|
||||||
command_buffer
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
let command_buffers = Arc::new(RwLock::new(command_buffers));
|
|
||||||
let command_buffer_index = Arc::new(RwLock::new(0));
|
|
||||||
let kernels = Arc::new(Kernels::new());
|
let kernels = Arc::new(Kernels::new());
|
||||||
let buffers = Arc::new(RwLock::new(HashMap::new()));
|
let buffers = Arc::new(RwLock::new(HashMap::new()));
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
device,
|
device,
|
||||||
command_queue,
|
command_queue,
|
||||||
command_buffers,
|
command_buffer,
|
||||||
command_buffer_index,
|
|
||||||
buffers,
|
buffers,
|
||||||
kernels,
|
kernels,
|
||||||
})
|
})
|
||||||
@ -1222,21 +957,7 @@ impl BackendDevice for MetalDevice {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn zeros_impl(&self, shape: &Shape, dtype: DType) -> Result<MetalStorage> {
|
fn zeros_impl(&self, shape: &Shape, dtype: DType) -> Result<MetalStorage> {
|
||||||
let buffer = self.new_buffer(shape.elem_count(), dtype, "zeros");
|
let buffer = self.new_buffer(shape.elem_count(), dtype);
|
||||||
let command_buffer = self.command_buffer();
|
|
||||||
command_buffer.set_label("zeros");
|
|
||||||
let blit = command_buffer.new_blit_command_encoder();
|
|
||||||
blit.fill_buffer(
|
|
||||||
&buffer,
|
|
||||||
metal::NSRange {
|
|
||||||
location: 0,
|
|
||||||
length: buffer.length(),
|
|
||||||
},
|
|
||||||
0,
|
|
||||||
);
|
|
||||||
blit.end_encoding();
|
|
||||||
command_buffer.commit();
|
|
||||||
buffer.did_modify_range(metal::NSRange::new(0, buffer.length()));
|
|
||||||
Ok(MetalStorage::new(buffer, self.clone(), dtype))
|
Ok(MetalStorage::new(buffer, self.clone(), dtype))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1864,7 +1864,7 @@ impl Tensor {
|
|||||||
}
|
}
|
||||||
(Storage::Cuda(storage), Device::Cpu) => Storage::Cpu(storage.to_cpu_storage()?),
|
(Storage::Cuda(storage), Device::Cpu) => Storage::Cpu(storage.to_cpu_storage()?),
|
||||||
(Storage::Metal(storage), Device::Cpu) => {
|
(Storage::Metal(storage), Device::Cpu) => {
|
||||||
// println!("{storage:?} - {:?}", storage.to_cpu_storage()?);
|
println!("{storage:?} - {:?}", storage.to_cpu_storage()?);
|
||||||
Storage::Cpu(storage.to_cpu_storage()?)
|
Storage::Cpu(storage.to_cpu_storage()?)
|
||||||
}
|
}
|
||||||
(Storage::Cuda(storage), Device::Cuda(cuda)) => {
|
(Storage::Cuda(storage), Device::Cuda(cuda)) => {
|
||||||
|
@ -29,7 +29,9 @@ kernel void FN_NAME( \
|
|||||||
if (id >= dim) { \
|
if (id >= dim) { \
|
||||||
return; \
|
return; \
|
||||||
} \
|
} \
|
||||||
output[id] = TYPENAME(float(input[id]) * mul + add); \
|
const TYPENAME m = TYPENAME(mul); \
|
||||||
|
const TYPENAME a = TYPENAME(add); \
|
||||||
|
output[id] = input[id] * m + a; \
|
||||||
} \
|
} \
|
||||||
kernel void FN_NAME##_strided( \
|
kernel void FN_NAME##_strided( \
|
||||||
constant size_t &dim, \
|
constant size_t &dim, \
|
||||||
@ -45,80 +47,15 @@ kernel void FN_NAME##_strided( \
|
|||||||
if (id >= dim) { \
|
if (id >= dim) { \
|
||||||
return; \
|
return; \
|
||||||
} \
|
} \
|
||||||
output[id] = TYPENAME(float(input[get_strided_index(id, num_dims, dims, strides)]) * mul + add); \
|
const TYPENAME m = TYPENAME(mul); \
|
||||||
}
|
const TYPENAME a = TYPENAME(add); \
|
||||||
|
output[id] = input[get_strided_index(id, num_dims, dims, strides)] * m + a; \
|
||||||
#define POWF(FN_NAME, TYPENAME) \
|
|
||||||
kernel void FN_NAME( \
|
|
||||||
constant size_t &dim, \
|
|
||||||
constant float &mul, \
|
|
||||||
device const TYPENAME *input, \
|
|
||||||
device TYPENAME *output, \
|
|
||||||
uint id [[ thread_position_in_grid ]] \
|
|
||||||
) { \
|
|
||||||
if (id >= dim) { \
|
|
||||||
return; \
|
|
||||||
} \
|
|
||||||
output[id] = TYPENAME(pow(input[id], TYPENAME(mul))); \
|
|
||||||
} \
|
} \
|
||||||
kernel void FN_NAME##_strided( \
|
|
||||||
constant size_t &dim, \
|
|
||||||
constant size_t &num_dims, \
|
|
||||||
constant size_t *dims, \
|
|
||||||
constant size_t *strides, \
|
|
||||||
constant float &mul, \
|
|
||||||
device const TYPENAME *input, \
|
|
||||||
device TYPENAME *output, \
|
|
||||||
uint id [[ thread_position_in_grid ]] \
|
|
||||||
) { \
|
|
||||||
if (id >= dim) { \
|
|
||||||
return; \
|
|
||||||
} \
|
|
||||||
output[id] = TYPENAME(pow(input[get_strided_index(id, num_dims, dims, strides)], TYPENAME(mul))); \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define ELU(FN_NAME, TYPENAME) \
|
|
||||||
kernel void FN_NAME( \
|
|
||||||
constant size_t &dim, \
|
|
||||||
constant float &mul, \
|
|
||||||
device const TYPENAME *input, \
|
|
||||||
device TYPENAME *output, \
|
|
||||||
uint id [[ thread_position_in_grid ]] \
|
|
||||||
) { \
|
|
||||||
if (id >= dim) { \
|
|
||||||
return; \
|
|
||||||
} \
|
|
||||||
const TYPENAME x = input[id]; \
|
|
||||||
output[id] = TYPENAME((x > 0)?x: mul * exp(x - 1)); \
|
|
||||||
} \
|
|
||||||
kernel void FN_NAME##_strided( \
|
|
||||||
constant size_t &dim, \
|
|
||||||
constant size_t &num_dims, \
|
|
||||||
constant size_t *dims, \
|
|
||||||
constant size_t *strides, \
|
|
||||||
constant float &mul, \
|
|
||||||
device const TYPENAME *input, \
|
|
||||||
device TYPENAME *output, \
|
|
||||||
uint id [[ thread_position_in_grid ]] \
|
|
||||||
) { \
|
|
||||||
if (id >= dim) { \
|
|
||||||
return; \
|
|
||||||
} \
|
|
||||||
const TYPENAME x = input[get_strided_index(id, num_dims, dims, strides)]; \
|
|
||||||
output[id] = TYPENAME((x > 0)?x: mul * exp(x - 1)); \
|
|
||||||
} \
|
|
||||||
|
|
||||||
|
|
||||||
AFFINE(affine_float, float)
|
AFFINE(affine_float, float)
|
||||||
AFFINE(affine_half, half)
|
AFFINE(affine_half, half)
|
||||||
POWF(powf_float, float)
|
|
||||||
POWF(powf_half, half)
|
|
||||||
ELU(elu_float, float)
|
|
||||||
ELU(elu_half, half)
|
|
||||||
|
|
||||||
|
|
||||||
#if __METAL_VERSION__ >= 310
|
#if __METAL_VERSION__ >= 310
|
||||||
AFFINE(affine_bfloat, bfloat);
|
AFFINE(affine_bfloat, bfloat);
|
||||||
POWF(powf_bfloat, bfloat);
|
|
||||||
ELU(elu_bfloat, bfloat);
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
use metal::{
|
use metal::{
|
||||||
Buffer, CommandBufferRef, CompileOptions, ComputeCommandEncoderRef, ComputePipelineState,
|
Buffer, CommandBufferRef, CompileOptions, ComputeCommandEncoderRef, ComputePipelineState,
|
||||||
Device, Function, Library, MTLSize,
|
Device, Function, FunctionConstantValues, Library, MTLDataType, MTLSize, NSUInteger,
|
||||||
};
|
};
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::ffi::c_void;
|
use std::ffi::c_void;
|
||||||
@ -13,6 +13,7 @@ const BINARY: &str = include_str!("binary.metal");
|
|||||||
const TERNARY: &str = include_str!("ternary.metal");
|
const TERNARY: &str = include_str!("ternary.metal");
|
||||||
const CAST: &str = include_str!("cast.metal");
|
const CAST: &str = include_str!("cast.metal");
|
||||||
const REDUCE: &str = include_str!("reduce.metal");
|
const REDUCE: &str = include_str!("reduce.metal");
|
||||||
|
const MFA: &[u8] = include_bytes!("libMetalFlashAttention.metallib");
|
||||||
|
|
||||||
fn linear_split(pipeline: &ComputePipelineState, length: usize) -> (MTLSize, MTLSize) {
|
fn linear_split(pipeline: &ComputePipelineState, length: usize) -> (MTLSize, MTLSize) {
|
||||||
let size = length as u64;
|
let size = length as u64;
|
||||||
@ -105,6 +106,7 @@ pub enum Source {
|
|||||||
Ternary,
|
Ternary,
|
||||||
Cast,
|
Cast,
|
||||||
Reduce,
|
Reduce,
|
||||||
|
Mfa,
|
||||||
}
|
}
|
||||||
|
|
||||||
macro_rules! ops{
|
macro_rules! ops{
|
||||||
@ -153,7 +155,7 @@ macro_rules! ops{
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub mod unary {
|
pub mod unary {
|
||||||
ops!(cos, sin, exp, sqr, sqrt, neg, log, gelu, ceil, floor, round, erf, gelu_erf, tanh);
|
ops!(cos, sin, exp, sqr, sqrt, neg, log, gelu, ceil, floor, round, erf, gelu_erf);
|
||||||
}
|
}
|
||||||
pub mod binary {
|
pub mod binary {
|
||||||
ops!(add, sub, mul, div);
|
ops!(add, sub, mul, div);
|
||||||
@ -179,9 +181,88 @@ impl<T> From<std::sync::PoisonError<T>> for MetalKernelError {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type KernelMap<T> = HashMap<&'static str, T>;
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub enum Value {
|
||||||
|
USize(usize),
|
||||||
|
Bool(bool),
|
||||||
|
F32(f32),
|
||||||
|
U16(u16),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::hash::Hash for Value {
|
||||||
|
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
|
||||||
|
match self {
|
||||||
|
Value::F32(v) => v.to_bits().hash(state),
|
||||||
|
Value::USize(v) => v.hash(state),
|
||||||
|
Value::U16(v) => v.hash(state),
|
||||||
|
Value::Bool(v) => v.hash(state),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Value {
|
||||||
|
fn data_type(&self) -> MTLDataType {
|
||||||
|
match self {
|
||||||
|
Value::USize(_) => MTLDataType::UInt,
|
||||||
|
Value::F32(_) => MTLDataType::Float,
|
||||||
|
Value::U16(_) => MTLDataType::UShort,
|
||||||
|
Value::Bool(_) => MTLDataType::Bool,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Not true, good enough for our purposes.
|
||||||
|
impl Eq for Value {}
|
||||||
|
|
||||||
|
#[derive(Debug, Eq, PartialEq, Hash)]
|
||||||
|
struct ConstantValues(Vec<(usize, Value)>);
|
||||||
|
|
||||||
|
impl ConstantValues {
|
||||||
|
pub fn new(values: Vec<(usize, Value)>) -> Self {
|
||||||
|
Self(values)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn function_constant_values(&self) -> FunctionConstantValues {
|
||||||
|
let f = FunctionConstantValues::new();
|
||||||
|
for (index, value) in &self.0 {
|
||||||
|
let ty = value.data_type();
|
||||||
|
match value {
|
||||||
|
Value::USize(v) => {
|
||||||
|
f.set_constant_value_at_index(
|
||||||
|
v as *const usize as *const c_void,
|
||||||
|
ty,
|
||||||
|
*index as u64,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Value::F32(v) => {
|
||||||
|
f.set_constant_value_at_index(
|
||||||
|
v as *const f32 as *const c_void,
|
||||||
|
ty,
|
||||||
|
*index as u64,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Value::U16(v) => {
|
||||||
|
f.set_constant_value_at_index(
|
||||||
|
v as *const u16 as *const c_void,
|
||||||
|
ty,
|
||||||
|
*index as u64,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Value::Bool(v) => {
|
||||||
|
f.set_constant_value_at_index(
|
||||||
|
v as *const bool as *const c_void,
|
||||||
|
ty,
|
||||||
|
*index as u64,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type Libraries = HashMap<Source, Library>;
|
type Libraries = HashMap<Source, Library>;
|
||||||
type Pipelines = KernelMap<ComputePipelineState>;
|
type Pipelines = HashMap<(&'static str, Option<ConstantValues>), ComputePipelineState>;
|
||||||
|
|
||||||
#[derive(Debug, Default)]
|
#[derive(Debug, Default)]
|
||||||
pub struct Kernels {
|
pub struct Kernels {
|
||||||
@ -208,6 +289,7 @@ impl Kernels {
|
|||||||
Source::Indexing => INDEXING,
|
Source::Indexing => INDEXING,
|
||||||
Source::Cast => CAST,
|
Source::Cast => CAST,
|
||||||
Source::Reduce => REDUCE,
|
Source::Reduce => REDUCE,
|
||||||
|
Source::Mfa => unimplemented!("Mfa is not a source"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -220,10 +302,20 @@ impl Kernels {
|
|||||||
if let Some(lib) = libraries.get(&source) {
|
if let Some(lib) = libraries.get(&source) {
|
||||||
Ok(lib.clone())
|
Ok(lib.clone())
|
||||||
} else {
|
} else {
|
||||||
let source_content = self.get_library_source(source);
|
let lib = match source {
|
||||||
let lib = device
|
Source::Mfa => {
|
||||||
.new_library_with_source(source_content, &CompileOptions::new())
|
let source_data = MFA;
|
||||||
.map_err(|e| MetalKernelError::LoadLibraryError(e.to_string()))?;
|
device
|
||||||
|
.new_library_with_data(source_data)
|
||||||
|
.map_err(|e| MetalKernelError::LoadLibraryError(e.to_string()))?
|
||||||
|
}
|
||||||
|
source => {
|
||||||
|
let source_content = self.get_library_source(source);
|
||||||
|
device
|
||||||
|
.new_library_with_source(source_content, &CompileOptions::new())
|
||||||
|
.map_err(|e| MetalKernelError::LoadLibraryError(e.to_string()))?
|
||||||
|
}
|
||||||
|
};
|
||||||
libraries.insert(source, lib.clone());
|
libraries.insert(source, lib.clone());
|
||||||
Ok(lib)
|
Ok(lib)
|
||||||
}
|
}
|
||||||
@ -234,19 +326,41 @@ impl Kernels {
|
|||||||
device: &Device,
|
device: &Device,
|
||||||
source: Source,
|
source: Source,
|
||||||
name: &'static str,
|
name: &'static str,
|
||||||
|
constants: Option<FunctionConstantValues>,
|
||||||
) -> Result<Function, MetalKernelError> {
|
) -> Result<Function, MetalKernelError> {
|
||||||
let func = self
|
let func = self
|
||||||
.load_library(device, source)?
|
.load_library(device, source)?
|
||||||
.get_function(name, None)
|
.get_function(name, constants)
|
||||||
.map_err(|e| MetalKernelError::LoadFunctionError(e.to_string()))?;
|
.map_err(|e| MetalKernelError::LoadFunctionError(e.to_string()))?;
|
||||||
Ok(func)
|
Ok(func)
|
||||||
// let mut funcs = self.funcs.write()?;
|
}
|
||||||
// if let Some(func) = funcs.get(name) {
|
|
||||||
// Ok(func.clone())
|
fn load_pipeline_with_constants(
|
||||||
// } else {
|
&self,
|
||||||
// funcs.insert(name, func.clone());
|
device: &Device,
|
||||||
// Ok(func)
|
source: Source,
|
||||||
// }
|
name: &'static str,
|
||||||
|
constants: Option<ConstantValues>,
|
||||||
|
) -> Result<ComputePipelineState, MetalKernelError> {
|
||||||
|
let mut pipelines = self.pipelines.write()?;
|
||||||
|
let key = (name, constants);
|
||||||
|
if let Some(pipeline) = pipelines.get(&key) {
|
||||||
|
Ok(pipeline.clone())
|
||||||
|
} else {
|
||||||
|
let (name, constants) = key;
|
||||||
|
let func = self.load_function(
|
||||||
|
device,
|
||||||
|
source,
|
||||||
|
name,
|
||||||
|
constants.as_ref().map(|c| c.function_constant_values()),
|
||||||
|
)?;
|
||||||
|
let pipeline = device
|
||||||
|
.new_compute_pipeline_state_with_function(&func)
|
||||||
|
.map_err(|e| MetalKernelError::FailedToCreatePipeline(e.to_string()))?;
|
||||||
|
pipelines.insert((name, constants), pipeline.clone());
|
||||||
|
|
||||||
|
Ok(pipeline)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn load_pipeline(
|
pub fn load_pipeline(
|
||||||
@ -255,18 +369,7 @@ impl Kernels {
|
|||||||
source: Source,
|
source: Source,
|
||||||
name: &'static str,
|
name: &'static str,
|
||||||
) -> Result<ComputePipelineState, MetalKernelError> {
|
) -> Result<ComputePipelineState, MetalKernelError> {
|
||||||
let mut pipelines = self.pipelines.write()?;
|
self.load_pipeline_with_constants(device, source, name, None)
|
||||||
if let Some(pipeline) = pipelines.get(name) {
|
|
||||||
Ok(pipeline.clone())
|
|
||||||
} else {
|
|
||||||
let func = self.load_function(device, source, name)?;
|
|
||||||
let pipeline = device
|
|
||||||
.new_compute_pipeline_state_with_function(&func)
|
|
||||||
.map_err(|e| MetalKernelError::FailedToCreatePipeline(e.to_string()))?;
|
|
||||||
pipelines.insert(name, pipeline.clone());
|
|
||||||
|
|
||||||
Ok(pipeline)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -616,130 +719,6 @@ pub fn call_affine_strided(
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
|
||||||
pub fn call_powf(
|
|
||||||
device: &Device,
|
|
||||||
command_buffer: &CommandBufferRef,
|
|
||||||
kernels: &Kernels,
|
|
||||||
name: &'static str,
|
|
||||||
size: usize,
|
|
||||||
input: &Buffer,
|
|
||||||
output: &Buffer,
|
|
||||||
mul: f32,
|
|
||||||
) -> Result<(), MetalKernelError> {
|
|
||||||
let pipeline = kernels.load_pipeline(device, Source::Affine, name)?;
|
|
||||||
|
|
||||||
let encoder = command_buffer.new_compute_command_encoder();
|
|
||||||
encoder.set_compute_pipeline_state(&pipeline);
|
|
||||||
|
|
||||||
set_params!(encoder, (size, mul, input, output));
|
|
||||||
|
|
||||||
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
|
|
||||||
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
|
||||||
encoder.end_encoding();
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
|
||||||
pub fn call_powf_strided(
|
|
||||||
device: &Device,
|
|
||||||
command_buffer: &CommandBufferRef,
|
|
||||||
kernels: &Kernels,
|
|
||||||
name: &'static str,
|
|
||||||
shape: &[usize],
|
|
||||||
input: &Buffer,
|
|
||||||
input_stride: &[usize],
|
|
||||||
input_offset: usize,
|
|
||||||
output: &Buffer,
|
|
||||||
mul: f32,
|
|
||||||
) -> Result<(), MetalKernelError> {
|
|
||||||
let pipeline = kernels.load_pipeline(device, Source::Affine, name)?;
|
|
||||||
let size: usize = shape.iter().product();
|
|
||||||
|
|
||||||
let encoder = command_buffer.new_compute_command_encoder();
|
|
||||||
encoder.set_compute_pipeline_state(&pipeline);
|
|
||||||
|
|
||||||
set_params!(
|
|
||||||
encoder,
|
|
||||||
(
|
|
||||||
size,
|
|
||||||
shape.len(),
|
|
||||||
shape,
|
|
||||||
input_stride,
|
|
||||||
mul,
|
|
||||||
(input, input_offset),
|
|
||||||
output
|
|
||||||
)
|
|
||||||
);
|
|
||||||
|
|
||||||
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
|
|
||||||
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
|
||||||
encoder.end_encoding();
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
|
||||||
pub fn call_elu(
|
|
||||||
device: &Device,
|
|
||||||
command_buffer: &CommandBufferRef,
|
|
||||||
kernels: &Kernels,
|
|
||||||
name: &'static str,
|
|
||||||
size: usize,
|
|
||||||
input: &Buffer,
|
|
||||||
output: &Buffer,
|
|
||||||
mul: f32,
|
|
||||||
) -> Result<(), MetalKernelError> {
|
|
||||||
let pipeline = kernels.load_pipeline(device, Source::Affine, name)?;
|
|
||||||
|
|
||||||
let encoder = command_buffer.new_compute_command_encoder();
|
|
||||||
encoder.set_compute_pipeline_state(&pipeline);
|
|
||||||
|
|
||||||
set_params!(encoder, (size, mul, input, output));
|
|
||||||
|
|
||||||
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
|
|
||||||
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
|
||||||
encoder.end_encoding();
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
|
||||||
pub fn call_elu_strided(
|
|
||||||
device: &Device,
|
|
||||||
command_buffer: &CommandBufferRef,
|
|
||||||
kernels: &Kernels,
|
|
||||||
name: &'static str,
|
|
||||||
shape: &[usize],
|
|
||||||
input: &Buffer,
|
|
||||||
input_stride: &[usize],
|
|
||||||
input_offset: usize,
|
|
||||||
output: &Buffer,
|
|
||||||
mul: f32,
|
|
||||||
) -> Result<(), MetalKernelError> {
|
|
||||||
let pipeline = kernels.load_pipeline(device, Source::Affine, name)?;
|
|
||||||
let size: usize = shape.iter().product();
|
|
||||||
|
|
||||||
let encoder = command_buffer.new_compute_command_encoder();
|
|
||||||
encoder.set_compute_pipeline_state(&pipeline);
|
|
||||||
|
|
||||||
set_params!(
|
|
||||||
encoder,
|
|
||||||
(
|
|
||||||
size,
|
|
||||||
shape.len(),
|
|
||||||
shape,
|
|
||||||
input_stride,
|
|
||||||
mul,
|
|
||||||
(input, input_offset),
|
|
||||||
output
|
|
||||||
)
|
|
||||||
);
|
|
||||||
|
|
||||||
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
|
|
||||||
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
|
||||||
encoder.end_encoding();
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn call_where_cond_strided(
|
pub fn call_where_cond_strided(
|
||||||
device: &Device,
|
device: &Device,
|
||||||
command_buffer: &CommandBufferRef,
|
command_buffer: &CommandBufferRef,
|
||||||
@ -830,5 +809,169 @@ pub fn call_index_select(
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
pub fn call_gemm(
|
||||||
|
device: &Device,
|
||||||
|
command_buffer: &CommandBufferRef,
|
||||||
|
kernels: &Kernels,
|
||||||
|
name: &'static str,
|
||||||
|
(b, m, n, k): (usize, usize, usize, usize),
|
||||||
|
lhs_stride: &[usize],
|
||||||
|
lhs_offset: usize,
|
||||||
|
lhs_buffer: &Buffer,
|
||||||
|
rhs_stride: &[usize],
|
||||||
|
rhs_offset: usize,
|
||||||
|
rhs_buffer: &Buffer,
|
||||||
|
output: &Buffer,
|
||||||
|
) -> Result<(), MetalKernelError> {
|
||||||
|
assert!(rhs_stride.len() >= 2);
|
||||||
|
assert!(lhs_stride.len() >= 2);
|
||||||
|
let rhs_m1 = rhs_stride[rhs_stride.len() - 1];
|
||||||
|
let rhs_m2 = rhs_stride[rhs_stride.len() - 2];
|
||||||
|
let lhs_m1 = lhs_stride[lhs_stride.len() - 1];
|
||||||
|
let lhs_m2 = lhs_stride[lhs_stride.len() - 2];
|
||||||
|
let a_trans = if lhs_m1 == 1 && lhs_m2 == k {
|
||||||
|
false
|
||||||
|
} else if lhs_m1 == m && lhs_m2 == 1 {
|
||||||
|
true
|
||||||
|
} else {
|
||||||
|
todo!();
|
||||||
|
// Err(MetalError::MatMulNonContiguous {
|
||||||
|
// lhs_stride: lhs_stride.to_vec(),
|
||||||
|
// rhs_stride: rhs_stride.to_vec(),
|
||||||
|
// mnk: (m, n, k),
|
||||||
|
// })?
|
||||||
|
};
|
||||||
|
let b_trans = if rhs_m1 == 1 && rhs_m2 == n {
|
||||||
|
false
|
||||||
|
} else if rhs_m1 == k && rhs_m2 == 1 {
|
||||||
|
true
|
||||||
|
} else {
|
||||||
|
todo!();
|
||||||
|
// Err(MetalError::MatMulNonContiguous {
|
||||||
|
// lhs_stride: lhs_stride.to_vec(),
|
||||||
|
// rhs_stride: rhs_stride.to_vec(),
|
||||||
|
// mnk: (m, n, k),
|
||||||
|
// })?
|
||||||
|
};
|
||||||
|
let d_trans = false;
|
||||||
|
let alpha = 1.0f32;
|
||||||
|
let beta = 0.0f32;
|
||||||
|
let batched = b > 1;
|
||||||
|
let fused_activation = false;
|
||||||
|
let fused_bias = false;
|
||||||
|
let m_simd = 16;
|
||||||
|
let n_simd = 16;
|
||||||
|
let k_simd = 16;
|
||||||
|
let m_splits = 2;
|
||||||
|
let n_splits = 2;
|
||||||
|
let constants = Some(ConstantValues::new(vec![
|
||||||
|
(0, Value::USize(m)),
|
||||||
|
(1, Value::USize(n)),
|
||||||
|
(2, Value::USize(k)),
|
||||||
|
(10, Value::Bool(a_trans)),
|
||||||
|
(11, Value::Bool(b_trans)),
|
||||||
|
(13, Value::Bool(d_trans)),
|
||||||
|
(20, Value::F32(alpha)),
|
||||||
|
(21, Value::F32(beta)),
|
||||||
|
(100, Value::Bool(batched)),
|
||||||
|
(101, Value::Bool(fused_activation)),
|
||||||
|
// Garbage
|
||||||
|
(102, Value::Bool(false)),
|
||||||
|
(103, Value::Bool(false)),
|
||||||
|
(113, Value::Bool(false)),
|
||||||
|
(50_000, Value::Bool(false)),
|
||||||
|
// End garbage
|
||||||
|
(200, Value::U16(m_simd)),
|
||||||
|
(201, Value::U16(n_simd)),
|
||||||
|
(202, Value::U16(k_simd)),
|
||||||
|
(210, Value::U16(m_splits)),
|
||||||
|
(211, Value::U16(n_splits)),
|
||||||
|
(50_001, Value::Bool(fused_bias)),
|
||||||
|
]));
|
||||||
|
// println!("Constants {constants:?}");
|
||||||
|
let pipeline = kernels.load_pipeline_with_constants(device, Source::Mfa, name, constants)?;
|
||||||
|
let m_group = m_simd * m_splits;
|
||||||
|
let n_group = n_simd * n_splits;
|
||||||
|
|
||||||
|
let a_block_length = m_group * k_simd;
|
||||||
|
let b_block_length = k_simd * n_group;
|
||||||
|
|
||||||
|
let mut block_elements = a_block_length + b_block_length;
|
||||||
|
if (m % 8 != 0) && (n % 8 != 0) {
|
||||||
|
let c_block_length = m_group * n_group;
|
||||||
|
block_elements = std::cmp::max(c_block_length, block_elements)
|
||||||
|
}
|
||||||
|
if fused_bias {
|
||||||
|
if d_trans {
|
||||||
|
block_elements = std::cmp::max(block_elements, m_group);
|
||||||
|
} else {
|
||||||
|
block_elements = std::cmp::max(block_elements, n_group);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// TODO adapt for f16
|
||||||
|
let bytes = match name {
|
||||||
|
"sgemm" => 4,
|
||||||
|
"hgemm" => 2,
|
||||||
|
other => {
|
||||||
|
return Err(MetalKernelError::LoadLibraryError(format!(
|
||||||
|
"{other} is not a valid kernel for gemm"
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let block_bytes = block_elements * bytes;
|
||||||
|
|
||||||
|
let encoder = command_buffer.new_compute_command_encoder();
|
||||||
|
encoder.set_compute_pipeline_state(&pipeline);
|
||||||
|
// println!("Threadgroup {block_bytes}");
|
||||||
|
encoder.set_threadgroup_memory_length(0, block_bytes.into());
|
||||||
|
encoder.set_buffer(0, Some(lhs_buffer), lhs_offset as NSUInteger);
|
||||||
|
encoder.set_buffer(1, Some(rhs_buffer), rhs_offset as NSUInteger);
|
||||||
|
encoder.set_buffer(2, Some(output), 0);
|
||||||
|
// TODO Tensor D
|
||||||
|
|
||||||
|
let grid_z = b;
|
||||||
|
if batched {
|
||||||
|
let byte_stride_a: usize = lhs_stride[lhs_stride.len() - 3] * bytes as usize;
|
||||||
|
let byte_stride_b: usize = rhs_stride[rhs_stride.len() - 3] * bytes as usize;
|
||||||
|
let byte_stride_c = m * n * bytes as usize;
|
||||||
|
// TODO byte_stride_d
|
||||||
|
let byte_stride_d = 0;
|
||||||
|
|
||||||
|
let mut buffer: Vec<u64> = Vec::with_capacity(b * 4);
|
||||||
|
for i in 0..b {
|
||||||
|
buffer.push((i * byte_stride_a) as u64);
|
||||||
|
buffer.push((i * byte_stride_b) as u64);
|
||||||
|
buffer.push((i * byte_stride_c) as u64);
|
||||||
|
buffer.push((i * byte_stride_d) as u64);
|
||||||
|
}
|
||||||
|
encoder.set_bytes(
|
||||||
|
10,
|
||||||
|
buffer.len() as NSUInteger * core::mem::size_of::<u64>(),
|
||||||
|
buffer.as_ptr() as *const NSUInteger as *const c_void,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let grid_size = MTLSize {
|
||||||
|
width: divide(n, n_group.into()),
|
||||||
|
height: divide(m, m_group.into()),
|
||||||
|
depth: grid_z as NSUInteger,
|
||||||
|
};
|
||||||
|
let group_size = MTLSize {
|
||||||
|
width: 32 * (m_splits as u64) * (n_splits as u64),
|
||||||
|
height: 1,
|
||||||
|
depth: 1,
|
||||||
|
};
|
||||||
|
// println!("grid size {grid_size:?} group size {group_size:?}");
|
||||||
|
encoder.dispatch_thread_groups(grid_size, group_size);
|
||||||
|
encoder.end_encoding();
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn divide(m: usize, b: usize) -> NSUInteger {
|
||||||
|
((m + b - 1) / b) as NSUInteger
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests;
|
mod tests;
|
||||||
|
BIN
candle-metal-kernels/src/libMetalFlashAttention.metallib
Normal file
BIN
candle-metal-kernels/src/libMetalFlashAttention.metallib
Normal file
Binary file not shown.
@ -18,7 +18,7 @@ METAL_FUNC uint get_strided_index(
|
|||||||
return strided_i;
|
return strided_i;
|
||||||
}
|
}
|
||||||
|
|
||||||
constant int THREADGROUP_SIZE = 2048;
|
constant int THREADGROUP_SIZE = 1024;
|
||||||
|
|
||||||
# define REDUCE(FN, NAME, T) \
|
# define REDUCE(FN, NAME, T) \
|
||||||
kernel void NAME( \
|
kernel void NAME( \
|
||||||
|
211
candle-metal-kernels/src/test.swift
Normal file
211
candle-metal-kernels/src/test.swift
Normal file
@ -0,0 +1,211 @@
|
|||||||
|
|
||||||
|
import Metal
|
||||||
|
import MetalPerformanceShadersGraph
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
let type = MTLDataType.float;
|
||||||
|
let dataType = type;
|
||||||
|
var B = 2;
|
||||||
|
var M = 2;
|
||||||
|
var N = 4;
|
||||||
|
var K = 3;
|
||||||
|
var A_trans = false;
|
||||||
|
var B_trans = false;
|
||||||
|
var D_trans = false;
|
||||||
|
var alpha = Float(1.0);
|
||||||
|
var beta = Float(0.0);
|
||||||
|
var batched = B > 1;
|
||||||
|
var fused_activation = false;
|
||||||
|
var fused_bias = false;
|
||||||
|
let constants = MTLFunctionConstantValues()
|
||||||
|
constants.setConstantValue(&M, type: .uint, index: 0)
|
||||||
|
constants.setConstantValue(&N, type: .uint, index: 1)
|
||||||
|
constants.setConstantValue(&K, type: .uint, index: 2)
|
||||||
|
constants.setConstantValue(&A_trans, type: .bool, index: 10)
|
||||||
|
constants.setConstantValue(&B_trans, type: .bool, index: 11)
|
||||||
|
constants.setConstantValue(&D_trans, type: .bool, index: 13)
|
||||||
|
constants.setConstantValue(&alpha, type: .float, index: 20)
|
||||||
|
constants.setConstantValue(&beta, type: .float, index: 21)
|
||||||
|
constants.setConstantValue(&batched, type: .bool, index: 100)
|
||||||
|
constants.setConstantValue(&fused_activation, type: .bool, index: 101)
|
||||||
|
constants.setConstantValue(&fused_bias, type: .bool, index: 50001)
|
||||||
|
|
||||||
|
|
||||||
|
var M_simd = UInt16(16)
|
||||||
|
var N_simd = UInt16(16)
|
||||||
|
var K_simd = UInt16(32)
|
||||||
|
var M_splits = UInt16(2)
|
||||||
|
var N_splits = UInt16(2)
|
||||||
|
constants.setConstantValue(&M_simd, type: .ushort, index: 200)
|
||||||
|
constants.setConstantValue(&N_simd, type: .ushort, index: 201)
|
||||||
|
constants.setConstantValue(&K_simd, type: .ushort, index: 202)
|
||||||
|
constants.setConstantValue(&M_splits, type: .ushort, index: 210)
|
||||||
|
constants.setConstantValue(&N_splits, type: .ushort, index: 211)
|
||||||
|
|
||||||
|
let M_group = M_simd * M_splits
|
||||||
|
let N_group = N_simd * N_splits
|
||||||
|
|
||||||
|
// Satisfy Metal API validation.
|
||||||
|
#if DEBUG
|
||||||
|
do {
|
||||||
|
var garbage: SIMD4<UInt64> = .zero
|
||||||
|
constants.setConstantValue(&garbage, type: .bool, index: 102)
|
||||||
|
constants.setConstantValue(&garbage, type: .bool, index: 103)
|
||||||
|
constants.setConstantValue(&garbage, type: .bool, index: 113)
|
||||||
|
constants.setConstantValue(&garbage, type: .bool, index: 50000)
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
print(constants)
|
||||||
|
|
||||||
|
let device = MTLCopyAllDevices().first!
|
||||||
|
device.shouldMaximizeConcurrentCompilation = true
|
||||||
|
|
||||||
|
var libraryURL = URL.init(string: "/Users/nicolas/src/candle/candle-metal-kernels/")!;
|
||||||
|
libraryURL.append(component: "src")
|
||||||
|
libraryURL.append(component: "libMetalFlashAttention.metallib")
|
||||||
|
let library = try! device.makeLibrary(URL: libraryURL)
|
||||||
|
|
||||||
|
var name: String
|
||||||
|
switch dataType {
|
||||||
|
case .half: name = "hgemm"
|
||||||
|
case .float: name = "sgemm"
|
||||||
|
default: fatalError()
|
||||||
|
}
|
||||||
|
let function = try! library.makeFunction(
|
||||||
|
name: name, constantValues: constants)
|
||||||
|
|
||||||
|
let A_block_length = M_group * K_simd
|
||||||
|
let B_block_length = K_simd * N_group
|
||||||
|
|
||||||
|
var blockElements = A_block_length + B_block_length;
|
||||||
|
if (M % 8 != 0) && (N % 8 != 0) {
|
||||||
|
let C_block_length = M_group * N_group;
|
||||||
|
blockElements = max(C_block_length, blockElements)
|
||||||
|
}
|
||||||
|
if fused_bias {
|
||||||
|
if D_trans {
|
||||||
|
blockElements = max(blockElements, M_group)
|
||||||
|
} else {
|
||||||
|
blockElements = max(blockElements, N_group)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// let blockBytes = blockElements * UInt16(dataType.size)
|
||||||
|
let elementSize = 4
|
||||||
|
let blockBytes = blockElements * UInt16(elementSize)
|
||||||
|
|
||||||
|
func ceilDivide(target: Int, granularity: UInt16) -> Int {
|
||||||
|
(target + Int(granularity) - 1) / Int(granularity)
|
||||||
|
}
|
||||||
|
var gridSize = MTLSize(
|
||||||
|
width: ceilDivide(target: N, granularity: N_group),
|
||||||
|
height: ceilDivide(target: M, granularity: M_group),
|
||||||
|
depth: 1)
|
||||||
|
let groupSize = MTLSize(
|
||||||
|
width: Int(32 * M_splits * N_splits),
|
||||||
|
height: 1,
|
||||||
|
depth: 1)
|
||||||
|
|
||||||
|
let commandQueue = device.makeCommandQueue()!
|
||||||
|
let commandBuffer = commandQueue.makeCommandBuffer()!
|
||||||
|
let encoder = commandBuffer.makeComputeCommandEncoder(dispatchType: MTLDispatchType.serial)!
|
||||||
|
let pipeline = try device.makeComputePipelineState(function: function)
|
||||||
|
|
||||||
|
let threadgroupMemoryLength = blockBytes;
|
||||||
|
print(threadgroupMemoryLength)
|
||||||
|
encoder.setComputePipelineState(pipeline)
|
||||||
|
encoder.setThreadgroupMemoryLength(Int(threadgroupMemoryLength), index: 0)
|
||||||
|
|
||||||
|
|
||||||
|
let rowsA = M;
|
||||||
|
let columnsA = K;
|
||||||
|
let rowsB = K;
|
||||||
|
let columnsB = N;
|
||||||
|
let rowsC = M;
|
||||||
|
let columnsC = N;
|
||||||
|
var arrayA = [Float](repeating: 0, count: B * rowsA * columnsA)
|
||||||
|
|
||||||
|
var arrayB = [Float](repeating: 0, count: B * rowsB * columnsB)
|
||||||
|
|
||||||
|
var arrayC = [Float](repeating: 0, count: B * rowsC * columnsC)
|
||||||
|
for i in 0..<arrayA.count {
|
||||||
|
arrayA[i] = Float(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i in 0..<arrayB.count {
|
||||||
|
arrayB[i] = Float(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
let bufferA = device.makeBuffer(bytes: arrayA, length: B * rowsA * columnsA * MemoryLayout<Float>.stride, options: [])
|
||||||
|
|
||||||
|
let bufferB = device.makeBuffer(bytes: arrayB, length: B * rowsB * columnsB * MemoryLayout<Float>.stride, options: [])
|
||||||
|
|
||||||
|
let bufferC = device.makeBuffer(length: B * rowsC * columnsC * MemoryLayout<Float>.stride, options: [])
|
||||||
|
|
||||||
|
print(arrayA)
|
||||||
|
print(arrayB)
|
||||||
|
|
||||||
|
|
||||||
|
encoder.setBuffer(bufferA, offset: 0, index: 0)
|
||||||
|
encoder.setBuffer(bufferB, offset: 0, index: 1)
|
||||||
|
encoder.setBuffer(bufferC, offset: 0, index: 2)
|
||||||
|
var gridZ: Int = B
|
||||||
|
if batched{
|
||||||
|
func byteStride(shape: [Int]) -> Int {
|
||||||
|
let rank = shape.count
|
||||||
|
var output = elementSize * shape[rank - 2] * shape[rank - 1]
|
||||||
|
if shape.dropLast(2).reduce(1, *) == 1 {
|
||||||
|
output = 0
|
||||||
|
}
|
||||||
|
return output
|
||||||
|
}
|
||||||
|
let byteStrideA = M*K*elementSize
|
||||||
|
let byteStrideB = N*K*elementSize
|
||||||
|
let byteStrideC = M*N*elementSize
|
||||||
|
|
||||||
|
let byteStrideD = 0
|
||||||
|
// if let shapeD = tensors.d?.shape {
|
||||||
|
// let rank = shapeD.count
|
||||||
|
// byteStrideD = elementSize * shapeD[rank - 1]
|
||||||
|
// if shapeD.dropLast(1).reduce(1, *) == 1 {
|
||||||
|
// byteStrideD = 0
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
withUnsafeTemporaryAllocation(
|
||||||
|
of: SIMD4<UInt64>.self, capacity: gridZ
|
||||||
|
) { buffer in
|
||||||
|
for i in 0..<buffer.count {
|
||||||
|
buffer[i] = SIMD4(
|
||||||
|
UInt64(truncatingIfNeeded: i * byteStrideA),
|
||||||
|
UInt64(truncatingIfNeeded: i * byteStrideB),
|
||||||
|
UInt64(truncatingIfNeeded: i * byteStrideC),
|
||||||
|
UInt64(truncatingIfNeeded: i * byteStrideD))
|
||||||
|
}
|
||||||
|
|
||||||
|
let bufferLength = buffer.count * MemoryLayout<SIMD4<UInt64>>.stride
|
||||||
|
assert(MemoryLayout<SIMD4<UInt64>>.stride == 8 * 4)
|
||||||
|
encoder.setBytes(buffer.baseAddress!, length: bufferLength, index: 10)
|
||||||
|
print("BATCHED")
|
||||||
|
print(buffer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
gridSize.depth = gridZ
|
||||||
|
|
||||||
|
|
||||||
|
print(gridSize, groupSize)
|
||||||
|
encoder.dispatchThreadgroups(
|
||||||
|
gridSize, threadsPerThreadgroup: groupSize
|
||||||
|
)
|
||||||
|
encoder.endEncoding()
|
||||||
|
commandBuffer.commit()
|
||||||
|
|
||||||
|
commandBuffer.waitUntilCompleted()
|
||||||
|
var contents = bufferC!.contents();
|
||||||
|
|
||||||
|
var count = B * rowsA * columnsB;
|
||||||
|
|
||||||
|
var typedPointer = contents.bindMemory(to: Float.self, capacity: count)
|
||||||
|
|
||||||
|
var bufferedPointer = UnsafeBufferPointer(start: typedPointer, count: count)
|
||||||
|
|
||||||
|
print(Array(bufferedPointer))
|
@ -205,25 +205,6 @@ fn cos_strided_random() {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn gelu_f16() {
|
|
||||||
let v: Vec<f16> = [-10f32, -1.0, 0., 1., 2., 3., 10.0, 20.0]
|
|
||||||
.iter()
|
|
||||||
.map(|v| f16::from_f32(*v))
|
|
||||||
.collect();
|
|
||||||
let expected: Vec<f32> = vec![-0.0, -0.16, 0.0, 0.84, 1.96, 3.0, 10.0, 20.0];
|
|
||||||
let results = run(&v, unary::contiguous::gelu::HALF);
|
|
||||||
assert_eq!(approx_f16(results, 2), expected);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn gelu_f32() {
|
|
||||||
let v: Vec<f32> = vec![-10f32, -1.0, 0., 1., 2., 3., 10.0, 20.0];
|
|
||||||
let expected: Vec<f32> = vec![-0.0, -0.159, 0.0, 0.841, 1.955, 2.996, 10.0, 20.0];
|
|
||||||
let results = run(&v, unary::contiguous::gelu::FLOAT);
|
|
||||||
assert_eq!(approx(results, 3), expected);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn binary_add_f32() {
|
fn binary_add_f32() {
|
||||||
let left = vec![1.0f32, 2.0, 3.0];
|
let left = vec![1.0f32, 2.0, 3.0];
|
||||||
@ -546,8 +527,8 @@ fn cos_f16() {
|
|||||||
.collect();
|
.collect();
|
||||||
let results = run(&v, unary::contiguous::cos::HALF);
|
let results = run(&v, unary::contiguous::cos::HALF);
|
||||||
let expected: Vec<f16> = v.iter().map(|v| f16::from_f32(v.to_f32().cos())).collect();
|
let expected: Vec<f16> = v.iter().map(|v| f16::from_f32(v.to_f32().cos())).collect();
|
||||||
assert_eq!(approx_f16(results, 2), vec![0.54, -0.42, -0.99]);
|
assert_eq!(approx_f16(results, 4), vec![0.5405, -0.4163, -0.9902]);
|
||||||
assert_eq!(approx_f16(expected, 2), vec![0.54, -0.42, -0.99]);
|
assert_eq!(approx_f16(expected, 4), vec![0.5405, -0.4163, -0.9902]);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn run_reduce<T: Clone>(v: &[T], out_length: usize, name: &'static str) -> Vec<T> {
|
fn run_reduce<T: Clone>(v: &[T], out_length: usize, name: &'static str) -> Vec<T> {
|
||||||
@ -744,3 +725,76 @@ fn where_cond() {
|
|||||||
);
|
);
|
||||||
assert_eq!(approx(results, 4), vec![-1.0f32, 2.0, -3.0, -4.0, 5.0, 6.0]);
|
assert_eq!(approx(results, 4), vec![-1.0f32, 2.0, -3.0, -4.0, 5.0, 6.0]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn run_gemm<T: Clone>(
|
||||||
|
(b, m, n, k): (usize, usize, usize, usize),
|
||||||
|
lhs: &[T],
|
||||||
|
lhs_stride: Vec<usize>,
|
||||||
|
rhs: &[T],
|
||||||
|
rhs_stride: Vec<usize>,
|
||||||
|
) -> Vec<T> {
|
||||||
|
let device = device();
|
||||||
|
let kernels = Kernels::new();
|
||||||
|
let command_queue = device.new_command_queue();
|
||||||
|
let command_buffer = command_queue.new_command_buffer();
|
||||||
|
let options = MTLResourceOptions::StorageModeManaged;
|
||||||
|
|
||||||
|
let lhs = device.new_buffer_with_data(
|
||||||
|
lhs.as_ptr() as *const core::ffi::c_void,
|
||||||
|
std::mem::size_of_val(lhs) as u64,
|
||||||
|
options,
|
||||||
|
);
|
||||||
|
let rhs = device.new_buffer_with_data(
|
||||||
|
rhs.as_ptr() as *const core::ffi::c_void,
|
||||||
|
std::mem::size_of_val(rhs) as u64,
|
||||||
|
options,
|
||||||
|
);
|
||||||
|
let length = b * m * n;
|
||||||
|
let output = device.new_buffer((length * core::mem::size_of::<T>()) as u64, options);
|
||||||
|
call_gemm(
|
||||||
|
&device,
|
||||||
|
command_buffer,
|
||||||
|
&kernels,
|
||||||
|
"sgemm",
|
||||||
|
(b, m, n, k),
|
||||||
|
&lhs_stride,
|
||||||
|
0,
|
||||||
|
&lhs,
|
||||||
|
&rhs_stride,
|
||||||
|
0,
|
||||||
|
&rhs,
|
||||||
|
&output,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
command_buffer.commit();
|
||||||
|
command_buffer.wait_until_completed();
|
||||||
|
|
||||||
|
output.read_to_vec::<T>(length)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn gemm() {
|
||||||
|
let (b, m, n, k) = (1, 2, 4, 3);
|
||||||
|
let lhs_stride = vec![m * k, k, 1];
|
||||||
|
let lhs: Vec<f32> = (0..b * m * k).map(|f| f as f32).collect();
|
||||||
|
let rhs_stride = vec![n * k, n, 1];
|
||||||
|
let rhs: Vec<f32> = (0..b * n * k).map(|f| f as f32).collect();
|
||||||
|
let results = run_gemm((b, m, n, k), &lhs, lhs_stride, &rhs, rhs_stride);
|
||||||
|
assert_eq!(
|
||||||
|
approx(results, 4),
|
||||||
|
vec![20.0, 23.0, 26.0, 29.0, 56.0, 68.0, 80.0, 92.0]
|
||||||
|
);
|
||||||
|
let (b, m, n, k) = (2, 2, 4, 3);
|
||||||
|
let lhs_stride = vec![m * k, k, 1];
|
||||||
|
let lhs: Vec<f32> = (0..b * m * k).map(|f| f as f32).collect();
|
||||||
|
let rhs_stride = vec![n * k, n, 1];
|
||||||
|
let rhs: Vec<f32> = (0..b * n * k).map(|f| f as f32).collect();
|
||||||
|
let results = run_gemm((b, m, n, k), &lhs, lhs_stride, &rhs, rhs_stride);
|
||||||
|
assert_eq!(
|
||||||
|
approx(results, 4),
|
||||||
|
vec![
|
||||||
|
20.0, 23.0, 26.0, 29.0, 56.0, 68.0, 80.0, 92.0, 344.0, 365.0, 386.0, 407.0, 488.0,
|
||||||
|
518.0, 548.0, 578.0
|
||||||
|
]
|
||||||
|
);
|
||||||
|
}
|
||||||
|
@ -42,14 +42,9 @@ template <typename T> METAL_FUNC T erf(T in){
|
|||||||
|
|
||||||
return T(sign*y);
|
return T(sign*y);
|
||||||
}
|
}
|
||||||
template <typename T> METAL_FUNC T id(T in) { return in; }
|
template <typename T> METAL_FUNC T id(T in){ return in; }
|
||||||
template <typename T> METAL_FUNC T gelu_erf(T x) {
|
template <typename T> METAL_FUNC T gelu_erf(T x){ return T(x * (1 + erf(x * M_SQRT1_2_F)) / 2); }
|
||||||
return T(x * (1 + erf(x * M_SQRT1_2_F)) / 2);
|
template <typename T> METAL_FUNC T gelu(T x){
|
||||||
}
|
|
||||||
template <typename T> METAL_FUNC T gelu(T x) {
|
|
||||||
if (x > 5) {
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
T x_sq = x * x;
|
T x_sq = x * x;
|
||||||
T x_cube = x_sq * x;
|
T x_cube = x_sq * x;
|
||||||
T alpha = x + static_cast<T>(0.044715) * x_cube;
|
T alpha = x + static_cast<T>(0.044715) * x_cube;
|
||||||
@ -69,7 +64,7 @@ kernel void FN_NAME( \
|
|||||||
if (thread_position_in_grid >= dim) { \
|
if (thread_position_in_grid >= dim) { \
|
||||||
return; \
|
return; \
|
||||||
} \
|
} \
|
||||||
output[thread_position_in_grid] = TYPENAME(FN(float(input[thread_position_in_grid]))); \
|
output[thread_position_in_grid] = TYPENAME(FN(input[thread_position_in_grid])); \
|
||||||
}\
|
}\
|
||||||
kernel void FN_NAME_STRIDED( \
|
kernel void FN_NAME_STRIDED( \
|
||||||
constant size_t &dim, \
|
constant size_t &dim, \
|
||||||
@ -83,7 +78,7 @@ kernel void FN_NAME_STRIDED( \
|
|||||||
if (thread_position_in_grid >= dim) { \
|
if (thread_position_in_grid >= dim) { \
|
||||||
return; \
|
return; \
|
||||||
} \
|
} \
|
||||||
output[thread_position_in_grid] = TYPENAME(FN(float(input[get_strided_index(thread_position_in_grid, num_dims, dims, strides)]))); \
|
output[thread_position_in_grid] = TYPENAME(FN(input[get_strided_index(thread_position_in_grid, num_dims, dims, strides)])); \
|
||||||
}
|
}
|
||||||
|
|
||||||
#define UNARY_OP(NAME) \
|
#define UNARY_OP(NAME) \
|
||||||
@ -107,7 +102,6 @@ UNARY_OP(floor)
|
|||||||
UNARY_OP(round)
|
UNARY_OP(round)
|
||||||
UNARY_OP(gelu_erf)
|
UNARY_OP(gelu_erf)
|
||||||
UNARY_OP(erf)
|
UNARY_OP(erf)
|
||||||
UNARY_OP(tanh)
|
|
||||||
UNARY(id, float, copy_float, copy_float_strided)
|
UNARY(id, float, copy_float, copy_float_strided)
|
||||||
UNARY(id, half, copy_half, copy_half_strided)
|
UNARY(id, half, copy_half, copy_half_strided)
|
||||||
UNARY(id, uint8_t, copy_u8, copy_u8_strided)
|
UNARY(id, uint8_t, copy_u8, copy_u8_strided)
|
||||||
@ -127,7 +121,6 @@ BFLOAT_UNARY_OP(floor)
|
|||||||
BFLOAT_UNARY_OP(round)
|
BFLOAT_UNARY_OP(round)
|
||||||
BFLOAT_UNARY_OP(gelu_erf)
|
BFLOAT_UNARY_OP(gelu_erf)
|
||||||
BFLOAT_UNARY_OP(erf)
|
BFLOAT_UNARY_OP(erf)
|
||||||
BFLOAT_UNARY_OP(tanh)
|
|
||||||
|
|
||||||
UNARY(id, bfloat, copy_bfloat, copy_bfloat_strided)
|
UNARY(id, bfloat, copy_bfloat, copy_bfloat_strided)
|
||||||
#endif
|
#endif
|
||||||
|
@ -19,7 +19,6 @@ num-traits = { workspace = true }
|
|||||||
rayon = { workspace = true }
|
rayon = { workspace = true }
|
||||||
safetensors = { workspace = true }
|
safetensors = { workspace = true }
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
metal = { workspace = true, optional = true }
|
|
||||||
candle-metal-kernels = { path = "../candle-metal-kernels", version = "0.3.0", optional = true }
|
candle-metal-kernels = { path = "../candle-metal-kernels", version = "0.3.0", optional = true }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
@ -31,4 +30,4 @@ default = []
|
|||||||
accelerate = ["dep:accelerate-src", "candle/accelerate"]
|
accelerate = ["dep:accelerate-src", "candle/accelerate"]
|
||||||
cuda = ["candle/cuda"]
|
cuda = ["candle/cuda"]
|
||||||
mkl = ["dep:intel-mkl-src", "candle/mkl"]
|
mkl = ["dep:intel-mkl-src", "candle/mkl"]
|
||||||
metal = ["candle/metal", "dep:candle-metal-kernels", "dep:metal"]
|
metal = ["candle/metal", "dep:candle-metal-kernels"]
|
||||||
|
@ -226,7 +226,7 @@ impl candle::CustomOp1 for SoftmaxLastDim {
|
|||||||
|
|
||||||
let last_dim = layout.dims()[layout.shape().rank() - 1];
|
let last_dim = layout.dims()[layout.shape().rank() - 1];
|
||||||
let elem_count = layout.shape().elem_count();
|
let elem_count = layout.shape().elem_count();
|
||||||
let mut output = device.new_buffer(elem_count, storage.dtype(), "softmax");
|
let mut output = device.new_buffer(elem_count, storage.dtype());
|
||||||
candle_metal_kernels::call_last_softmax(
|
candle_metal_kernels::call_last_softmax(
|
||||||
device.metal_device(),
|
device.metal_device(),
|
||||||
&command_buffer,
|
&command_buffer,
|
||||||
@ -238,8 +238,6 @@ impl candle::CustomOp1 for SoftmaxLastDim {
|
|||||||
&mut output,
|
&mut output,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
command_buffer.commit();
|
|
||||||
output.did_modify_range(metal::NSRange::new(0, output.length()));
|
|
||||||
let newstorage = candle::MetalStorage::new(output, device.clone(), storage.dtype());
|
let newstorage = candle::MetalStorage::new(output, device.clone(), storage.dtype());
|
||||||
Ok((newstorage, layout.shape().clone()))
|
Ok((newstorage, layout.shape().clone()))
|
||||||
}
|
}
|
||||||
|
@ -31,4 +31,3 @@ accelerate = ["dep:accelerate-src", "candle/accelerate", "candle-nn/accelerate"]
|
|||||||
cuda = ["candle/cuda", "candle-nn/cuda"]
|
cuda = ["candle/cuda", "candle-nn/cuda"]
|
||||||
flash-attn = ["cuda", "dep:candle-flash-attn"]
|
flash-attn = ["cuda", "dep:candle-flash-attn"]
|
||||||
mkl = ["dep:intel-mkl-src", "candle/mkl", "candle-nn/mkl"]
|
mkl = ["dep:intel-mkl-src", "candle/mkl", "candle-nn/mkl"]
|
||||||
metal = ["candle/metal", "candle-nn/metal"]
|
|
||||||
|
@ -142,10 +142,10 @@ impl RotaryEmbedding {
|
|||||||
.to_dtype(DType::F32)?
|
.to_dtype(DType::F32)?
|
||||||
.reshape((max_seq_len, 1))?;
|
.reshape((max_seq_len, 1))?;
|
||||||
let freqs = t.matmul(&inv_freq)?;
|
let freqs = t.matmul(&inv_freq)?;
|
||||||
let sin = freqs.sin()?;
|
Ok(Self {
|
||||||
let cos = freqs.cos()?;
|
sin: freqs.sin()?,
|
||||||
// todo!("{}", sin);
|
cos: freqs.cos()?,
|
||||||
Ok(Self { sin, cos })
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn apply_rotary_emb_qkv(
|
fn apply_rotary_emb_qkv(
|
||||||
@ -273,10 +273,6 @@ impl MHA {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> {
|
fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> {
|
||||||
// let view = xs.to_string();
|
|
||||||
// if view.contains("NaN") {
|
|
||||||
// panic!("NaN");
|
|
||||||
// }
|
|
||||||
let _enter = self.span.enter();
|
let _enter = self.span.enter();
|
||||||
let (b_size, seq_len, _n_embd) = xs.dims3()?;
|
let (b_size, seq_len, _n_embd) = xs.dims3()?;
|
||||||
let qkv = self
|
let qkv = self
|
||||||
@ -412,38 +408,3 @@ impl MixFormerSequentialForCausalLM {
|
|||||||
self.blocks.iter_mut().for_each(|b| b.clear_kv_cache())
|
self.blocks.iter_mut().for_each(|b| b.clear_kv_cache())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
#[test]
|
|
||||||
fn test_rotary() {
|
|
||||||
let dev = Device::new_metal(0).unwrap();
|
|
||||||
for i in 0..10000 {
|
|
||||||
let dim = 8;
|
|
||||||
let max_seq_len = 12;
|
|
||||||
let inv_freq: Vec<_> = (0..dim)
|
|
||||||
.step_by(2)
|
|
||||||
.map(|i| 1f32 / 10000f32.powf(i as f32 / dim as f32))
|
|
||||||
.collect();
|
|
||||||
let inv_freq_len = inv_freq.len();
|
|
||||||
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), &dev).unwrap();
|
|
||||||
let t = Tensor::arange(0u32, max_seq_len as u32, &dev)
|
|
||||||
.unwrap()
|
|
||||||
.to_dtype(DType::F32)
|
|
||||||
.unwrap()
|
|
||||||
.reshape((max_seq_len, 1))
|
|
||||||
.unwrap();
|
|
||||||
let x: f32 = t.i((1, 0)).unwrap().to_scalar().unwrap();
|
|
||||||
assert_eq!(x, 1.0);
|
|
||||||
let x: f32 = inv_freq.i((0, 1)).unwrap().to_scalar().unwrap();
|
|
||||||
assert_eq!(x, 0.1);
|
|
||||||
let freqs = t.matmul(&inv_freq).unwrap();
|
|
||||||
let x: f32 = freqs.i((1, 1)).unwrap().to_scalar().unwrap();
|
|
||||||
assert_eq!(x, 0.1);
|
|
||||||
let sin = freqs.sin().unwrap().contiguous().unwrap();
|
|
||||||
let x: f32 = sin.i((1, 1)).unwrap().to_scalar().unwrap();
|
|
||||||
assert_eq!(x, 0.099833414);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
Reference in New Issue
Block a user