From 7adfb70dff1c8820081761e06329ba4d23c6260c Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Sat, 11 Nov 2023 02:06:48 +0100 Subject: [PATCH] Few fixes. --- Cargo.toml | 3 +- candle-core/src/metal_backend.rs | 136 +++++++++++++++----------- candle-metal-kernels/Cargo.toml | 3 +- candle-metal-kernels/src/affine.metal | 18 ++++ candle-metal-kernels/src/lib.rs | 94 ++++++++++++++++++ 5 files changed, 195 insertions(+), 59 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 1a8145ba..6cf99174 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -61,7 +61,8 @@ tracing-subscriber = "0.3.7" wav = "1.0.0" yoke = { version = "0.7.2", features = ["derive"] } zip = { version = "0.6.6", default-features = false } -metal = { git = "https://github.com/ivarflakstad/metal-rs.git", features = ["mps"] } +# metal = { git = "https://github.com/ivarflakstad/metal-rs.git", features = ["mps"] } +metal = { path = "../metal-rs", features = ["mps"] } [profile.release-with-debug] inherits = "release" diff --git a/candle-core/src/metal_backend.rs b/candle-core/src/metal_backend.rs index 72b15006..c48ad5c7 100644 --- a/candle-core/src/metal_backend.rs +++ b/candle-core/src/metal_backend.rs @@ -137,23 +137,37 @@ impl BackendStorage for MetalStorage { let el = shape.elem_count(); let dtype = self.dtype; - assert!(layout.is_contiguous()); - assert!(layout.start_offset() == 0); - assert_eq!(dtype, DType::F32); - let mut buffer = device.new_buffer(el, self.dtype); let command_buffer = self.device.command_queue.new_command_buffer(); - candle_metal_kernels::call_affine( - &device.device, - &command_buffer, - &device.kernels, - el, - &self.buffer, - &mut buffer, - mul as f32, - add as f32, - ) - .unwrap(); + if layout.is_contiguous() && layout.start_offset() == 0 { + assert_eq!(dtype, DType::F32); + candle_metal_kernels::call_affine( + &device.device, + &command_buffer, + &device.kernels, + el, + &self.buffer, + &mut buffer, + mul as f32, + add as f32, + ) + .unwrap(); + } else { + assert_eq!(dtype, DType::F32); + candle_metal_kernels::call_affine_strided( + &device.device, + &command_buffer, + &device.kernels, + layout.dims(), + &self.buffer, + layout.stride(), + layout.start_offset() * dtype.size_in_bytes(), + &mut buffer, + mul as f32, + add as f32, + ) + .unwrap(); + } command_buffer.commit(); command_buffer.wait_until_completed(); return Ok(Self { @@ -295,7 +309,8 @@ impl BackendStorage for MetalStorage { ("ulog", DType::F32) => contiguous::log::FLOAT, ("ugelu", DType::F32) => contiguous::gelu::FLOAT, // TODO erf does not exist in metal - ("ugelu_erf", DType::F32) => contiguous::gelu::FLOAT, + ("ugelu_erf", DType::F32) => crate::bail!("erf is not implemented in metal"), + ("uerf", DType::F32) => crate::bail!("erf is not implemented in metal"), ("uceil", DType::F32) => contiguous::ceil::FLOAT, ("ufloor", DType::F32) => contiguous::floor::FLOAT, ("uround", DType::F32) => contiguous::round::FLOAT, @@ -625,57 +640,64 @@ impl BackendStorage for MetalStorage { }; let result_descriptor = MatrixDescriptor::init_single(m, n, n * size, type_id); - // Create matrix objects - let left_matrix = Matrix::init_with_buffer_descriptor(&self.buffer, &left_descriptor) - .ok_or_else(|| { - MetalError::from("Failed to create matrix multiplication kernel".to_string()) - })?; - let right_matrix = Matrix::init_with_buffer_descriptor(&rhs.buffer, &right_descriptor) - .ok_or_else(|| { - MetalError::from("Failed to create matrix multiplication kernel".to_string()) - })?; - let out_buffer = self.device.new_buffer(elem_count, self.dtype); - let result_matrix = Matrix::init_with_buffer_descriptor(&out_buffer, &result_descriptor) + let command_buffer = self.device.command_queue.new_command_buffer(); + for bi in 0..b { + // Create matrix objects + let left_matrix = Matrix::init_with_buffer_descriptor( + &self.buffer, + bi * m * k * size, + &left_descriptor, + ) + .ok_or_else(|| { + MetalError::from("Failed to create matrix multiplication kernel".to_string()) + })?; + let right_matrix = Matrix::init_with_buffer_descriptor( + &rhs.buffer, + bi * n * k * size, + &right_descriptor, + ) .ok_or_else(|| { MetalError::from("Failed to create matrix multiplication kernel".to_string()) })?; - let alpha = 1.0f64; - let beta = 0.0f64; - // Create kernel - let matrix_multiplication = MatrixMultiplication::init( - &self.device, - transpose_left, - transpose_right, - m, - n, - k, - alpha, - beta, - ) - .ok_or_else(|| { - MetalError::from("Failed to create matrix multiplication kernel".to_string()) - })?; + let result_matrix = Matrix::init_with_buffer_descriptor( + &out_buffer, + bi * m * n * size, + &result_descriptor, + ) + .ok_or_else(|| { + MetalError::from("Failed to create matrix multiplication kernel".to_string()) + })?; - matrix_multiplication.set_batch_size(b); + let alpha = 1.0f64; + let beta = 0.0f64; + // Create kernel + let matrix_multiplication = MatrixMultiplication::init( + &self.device, + transpose_left, + transpose_right, + m, + n, + k, + alpha, + beta, + ) + .ok_or_else(|| { + MetalError::from("Failed to create matrix multiplication kernel".to_string()) + })?; - // Encode kernel to command buffer - let command_buffer = self.device.command_queue.new_command_buffer(); - matrix_multiplication.encode_to_command_buffer( - command_buffer, - &left_matrix, - &right_matrix, - &result_matrix, - ); + // Encode kernel to command buffer + matrix_multiplication.encode_to_command_buffer( + command_buffer, + &left_matrix, + &right_matrix, + &result_matrix, + ); + } command_buffer.commit(); command_buffer.wait_until_completed(); - // let left = self.buffer.read_to_vec::(10); - // let right = rhs.buffer.read_to_vec::(10); - // let out = out_buffer.read_to_vec::(40); - // todo!("Out {left:?} {right:?} {out:?}"); - Ok(Self { buffer: out_buffer, device: self.device.clone(), diff --git a/candle-metal-kernels/Cargo.toml b/candle-metal-kernels/Cargo.toml index 2585ca62..2d2742ab 100644 --- a/candle-metal-kernels/Cargo.toml +++ b/candle-metal-kernels/Cargo.toml @@ -10,7 +10,8 @@ categories = ["science"] license = "MIT OR Apache-2.0" [dependencies] -metal = { git = "https://github.com/ivarflakstad/metal-rs.git", features = ["mps"] } +# metal = { git = "https://github.com/ivarflakstad/metal-rs.git", features = ["mps"] } +metal = { path = "../../metal-rs", features = ["mps"] } once_cell = "1.18.0" thiserror = "1" tracing = "0.1.37" diff --git a/candle-metal-kernels/src/affine.metal b/candle-metal-kernels/src/affine.metal index e5f0a841..a08bfbc0 100644 --- a/candle-metal-kernels/src/affine.metal +++ b/candle-metal-kernels/src/affine.metal @@ -33,6 +33,24 @@ kernel void FN_NAME( \ const TYPENAME a = TYPENAME(add); \ output[id] = input[id] * m + a; \ } \ +kernel void FN_NAME##_strided( \ + constant size_t &dim, \ + constant size_t &num_dims, \ + constant size_t *dims, \ + constant size_t *strides, \ + constant float &mul, \ + constant float &add, \ + device const TYPENAME *input, \ + device TYPENAME *output, \ + uint id [[ thread_position_in_grid ]] \ +) { \ + if (id >= dim) { \ + return; \ + } \ + const TYPENAME m = TYPENAME(mul); \ + const TYPENAME a = TYPENAME(add); \ + output[id] = input[get_strided_index(id, num_dims, dims, strides)] * m + a; \ +} \ AFFINE(affine_float, float) AFFINE(affine_half, half) diff --git a/candle-metal-kernels/src/lib.rs b/candle-metal-kernels/src/lib.rs index 5a50d46f..6cdb313d 100644 --- a/candle-metal-kernels/src/lib.rs +++ b/candle-metal-kernels/src/lib.rs @@ -592,6 +592,53 @@ pub fn call_affine( Ok(()) } +pub fn call_affine_strided( + device: &Device, + command_buffer: &CommandBufferRef, + kernels: &Kernels, + shape: &[usize], + input: &Buffer, + input_stride: &[usize], + input_offset: usize, + output: &mut Buffer, + mul: f32, + add: f32, +) -> Result<(), MetalKernelError> { + let func = kernels.load_function(device, Source::Affine, "affine_float_strided")?; + let pipeline_state_descriptor = ComputePipelineDescriptor::new(); + pipeline_state_descriptor.set_compute_function(Some(&func)); + + let size: usize = shape.iter().product(); + + let pipeline = device + .new_compute_pipeline_state_with_function( + pipeline_state_descriptor.compute_function().unwrap(), + ) + .unwrap(); + + let encoder = command_buffer.new_compute_command_encoder(); + encoder.set_compute_pipeline_state(&pipeline); + + set_params!( + encoder, + ( + size, + shape.len(), + shape, + input_stride, + mul, + add, + (input, input_offset), + output + ) + ); + + let (thread_group_count, thread_group_size) = linear_split(&pipeline, size); + encoder.dispatch_thread_groups(thread_group_count, thread_group_size); + encoder.end_encoding(); + Ok(()) +} + pub fn call_where_cond_strided( device: &Device, command_buffer: &CommandBufferRef, @@ -977,6 +1024,43 @@ mod tests { output.read_to_vec::(v.len()) } + fn run_affine_strided( + v: &[T], + shape: &[usize], + strides: &[usize], + mul: f64, + add: f64, + ) -> Vec { + let device = device(); + let kernels = Kernels::new(); + let command_queue = device.new_command_queue(); + let command_buffer = command_queue.new_command_buffer(); + + let input = new_buffer(&device, v); + let mut output = new_buffer(&device, v); + + let size = v.len(); + + call_affine_strided( + &device, + command_buffer, + &kernels, + size, + shape, + &input, + strides, + 0, + &mut output, + mul as f32, + add as f32, + ) + .unwrap(); + command_buffer.commit(); + command_buffer.wait_until_completed(); + + output.read_to_vec::(v.len()) + } + #[test] fn affine() { let input = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]; @@ -992,6 +1076,16 @@ mod tests { assert_eq!(result, vec![2.6; 40_000]); } + // #[test] + // fn affine_strided() { + // let input = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]; + // let mul = 1.5; + // let add = 1.1; + // let result = run_affine_(&input, mul, add); + // assert_eq!(result, vec![2.6, 4.1, 5.6, 7.1, 8.6, 10.1, 11.6, 13.1]); + + // } + #[test] fn index_select() { let embedding = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0];