mirror of
https://github.com/huggingface/candle.git
synced 2025-06-19 03:54:56 +00:00
Few fixes.
This commit is contained in:
@ -61,7 +61,8 @@ tracing-subscriber = "0.3.7"
|
|||||||
wav = "1.0.0"
|
wav = "1.0.0"
|
||||||
yoke = { version = "0.7.2", features = ["derive"] }
|
yoke = { version = "0.7.2", features = ["derive"] }
|
||||||
zip = { version = "0.6.6", default-features = false }
|
zip = { version = "0.6.6", default-features = false }
|
||||||
metal = { git = "https://github.com/ivarflakstad/metal-rs.git", features = ["mps"] }
|
# metal = { git = "https://github.com/ivarflakstad/metal-rs.git", features = ["mps"] }
|
||||||
|
metal = { path = "../metal-rs", features = ["mps"] }
|
||||||
|
|
||||||
[profile.release-with-debug]
|
[profile.release-with-debug]
|
||||||
inherits = "release"
|
inherits = "release"
|
||||||
|
@ -137,23 +137,37 @@ impl BackendStorage for MetalStorage {
|
|||||||
let el = shape.elem_count();
|
let el = shape.elem_count();
|
||||||
let dtype = self.dtype;
|
let dtype = self.dtype;
|
||||||
|
|
||||||
assert!(layout.is_contiguous());
|
|
||||||
assert!(layout.start_offset() == 0);
|
|
||||||
assert_eq!(dtype, DType::F32);
|
|
||||||
|
|
||||||
let mut buffer = device.new_buffer(el, self.dtype);
|
let mut buffer = device.new_buffer(el, self.dtype);
|
||||||
let command_buffer = self.device.command_queue.new_command_buffer();
|
let command_buffer = self.device.command_queue.new_command_buffer();
|
||||||
candle_metal_kernels::call_affine(
|
if layout.is_contiguous() && layout.start_offset() == 0 {
|
||||||
&device.device,
|
assert_eq!(dtype, DType::F32);
|
||||||
&command_buffer,
|
candle_metal_kernels::call_affine(
|
||||||
&device.kernels,
|
&device.device,
|
||||||
el,
|
&command_buffer,
|
||||||
&self.buffer,
|
&device.kernels,
|
||||||
&mut buffer,
|
el,
|
||||||
mul as f32,
|
&self.buffer,
|
||||||
add as f32,
|
&mut buffer,
|
||||||
)
|
mul as f32,
|
||||||
.unwrap();
|
add as f32,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
} else {
|
||||||
|
assert_eq!(dtype, DType::F32);
|
||||||
|
candle_metal_kernels::call_affine_strided(
|
||||||
|
&device.device,
|
||||||
|
&command_buffer,
|
||||||
|
&device.kernels,
|
||||||
|
layout.dims(),
|
||||||
|
&self.buffer,
|
||||||
|
layout.stride(),
|
||||||
|
layout.start_offset() * dtype.size_in_bytes(),
|
||||||
|
&mut buffer,
|
||||||
|
mul as f32,
|
||||||
|
add as f32,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
command_buffer.commit();
|
command_buffer.commit();
|
||||||
command_buffer.wait_until_completed();
|
command_buffer.wait_until_completed();
|
||||||
return Ok(Self {
|
return Ok(Self {
|
||||||
@ -295,7 +309,8 @@ impl BackendStorage for MetalStorage {
|
|||||||
("ulog", DType::F32) => contiguous::log::FLOAT,
|
("ulog", DType::F32) => contiguous::log::FLOAT,
|
||||||
("ugelu", DType::F32) => contiguous::gelu::FLOAT,
|
("ugelu", DType::F32) => contiguous::gelu::FLOAT,
|
||||||
// TODO erf does not exist in metal
|
// TODO erf does not exist in metal
|
||||||
("ugelu_erf", DType::F32) => contiguous::gelu::FLOAT,
|
("ugelu_erf", DType::F32) => crate::bail!("erf is not implemented in metal"),
|
||||||
|
("uerf", DType::F32) => crate::bail!("erf is not implemented in metal"),
|
||||||
("uceil", DType::F32) => contiguous::ceil::FLOAT,
|
("uceil", DType::F32) => contiguous::ceil::FLOAT,
|
||||||
("ufloor", DType::F32) => contiguous::floor::FLOAT,
|
("ufloor", DType::F32) => contiguous::floor::FLOAT,
|
||||||
("uround", DType::F32) => contiguous::round::FLOAT,
|
("uround", DType::F32) => contiguous::round::FLOAT,
|
||||||
@ -625,57 +640,64 @@ impl BackendStorage for MetalStorage {
|
|||||||
};
|
};
|
||||||
let result_descriptor = MatrixDescriptor::init_single(m, n, n * size, type_id);
|
let result_descriptor = MatrixDescriptor::init_single(m, n, n * size, type_id);
|
||||||
|
|
||||||
// Create matrix objects
|
|
||||||
let left_matrix = Matrix::init_with_buffer_descriptor(&self.buffer, &left_descriptor)
|
|
||||||
.ok_or_else(|| {
|
|
||||||
MetalError::from("Failed to create matrix multiplication kernel".to_string())
|
|
||||||
})?;
|
|
||||||
let right_matrix = Matrix::init_with_buffer_descriptor(&rhs.buffer, &right_descriptor)
|
|
||||||
.ok_or_else(|| {
|
|
||||||
MetalError::from("Failed to create matrix multiplication kernel".to_string())
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let out_buffer = self.device.new_buffer(elem_count, self.dtype);
|
let out_buffer = self.device.new_buffer(elem_count, self.dtype);
|
||||||
let result_matrix = Matrix::init_with_buffer_descriptor(&out_buffer, &result_descriptor)
|
let command_buffer = self.device.command_queue.new_command_buffer();
|
||||||
|
for bi in 0..b {
|
||||||
|
// Create matrix objects
|
||||||
|
let left_matrix = Matrix::init_with_buffer_descriptor(
|
||||||
|
&self.buffer,
|
||||||
|
bi * m * k * size,
|
||||||
|
&left_descriptor,
|
||||||
|
)
|
||||||
|
.ok_or_else(|| {
|
||||||
|
MetalError::from("Failed to create matrix multiplication kernel".to_string())
|
||||||
|
})?;
|
||||||
|
let right_matrix = Matrix::init_with_buffer_descriptor(
|
||||||
|
&rhs.buffer,
|
||||||
|
bi * n * k * size,
|
||||||
|
&right_descriptor,
|
||||||
|
)
|
||||||
.ok_or_else(|| {
|
.ok_or_else(|| {
|
||||||
MetalError::from("Failed to create matrix multiplication kernel".to_string())
|
MetalError::from("Failed to create matrix multiplication kernel".to_string())
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let alpha = 1.0f64;
|
let result_matrix = Matrix::init_with_buffer_descriptor(
|
||||||
let beta = 0.0f64;
|
&out_buffer,
|
||||||
// Create kernel
|
bi * m * n * size,
|
||||||
let matrix_multiplication = MatrixMultiplication::init(
|
&result_descriptor,
|
||||||
&self.device,
|
)
|
||||||
transpose_left,
|
.ok_or_else(|| {
|
||||||
transpose_right,
|
MetalError::from("Failed to create matrix multiplication kernel".to_string())
|
||||||
m,
|
})?;
|
||||||
n,
|
|
||||||
k,
|
|
||||||
alpha,
|
|
||||||
beta,
|
|
||||||
)
|
|
||||||
.ok_or_else(|| {
|
|
||||||
MetalError::from("Failed to create matrix multiplication kernel".to_string())
|
|
||||||
})?;
|
|
||||||
|
|
||||||
matrix_multiplication.set_batch_size(b);
|
let alpha = 1.0f64;
|
||||||
|
let beta = 0.0f64;
|
||||||
|
// Create kernel
|
||||||
|
let matrix_multiplication = MatrixMultiplication::init(
|
||||||
|
&self.device,
|
||||||
|
transpose_left,
|
||||||
|
transpose_right,
|
||||||
|
m,
|
||||||
|
n,
|
||||||
|
k,
|
||||||
|
alpha,
|
||||||
|
beta,
|
||||||
|
)
|
||||||
|
.ok_or_else(|| {
|
||||||
|
MetalError::from("Failed to create matrix multiplication kernel".to_string())
|
||||||
|
})?;
|
||||||
|
|
||||||
// Encode kernel to command buffer
|
// Encode kernel to command buffer
|
||||||
let command_buffer = self.device.command_queue.new_command_buffer();
|
matrix_multiplication.encode_to_command_buffer(
|
||||||
matrix_multiplication.encode_to_command_buffer(
|
command_buffer,
|
||||||
command_buffer,
|
&left_matrix,
|
||||||
&left_matrix,
|
&right_matrix,
|
||||||
&right_matrix,
|
&result_matrix,
|
||||||
&result_matrix,
|
);
|
||||||
);
|
}
|
||||||
command_buffer.commit();
|
command_buffer.commit();
|
||||||
command_buffer.wait_until_completed();
|
command_buffer.wait_until_completed();
|
||||||
|
|
||||||
// let left = self.buffer.read_to_vec::<f32>(10);
|
|
||||||
// let right = rhs.buffer.read_to_vec::<f32>(10);
|
|
||||||
// let out = out_buffer.read_to_vec::<f32>(40);
|
|
||||||
// todo!("Out {left:?} {right:?} {out:?}");
|
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
buffer: out_buffer,
|
buffer: out_buffer,
|
||||||
device: self.device.clone(),
|
device: self.device.clone(),
|
||||||
|
@ -10,7 +10,8 @@ categories = ["science"]
|
|||||||
license = "MIT OR Apache-2.0"
|
license = "MIT OR Apache-2.0"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
metal = { git = "https://github.com/ivarflakstad/metal-rs.git", features = ["mps"] }
|
# metal = { git = "https://github.com/ivarflakstad/metal-rs.git", features = ["mps"] }
|
||||||
|
metal = { path = "../../metal-rs", features = ["mps"] }
|
||||||
once_cell = "1.18.0"
|
once_cell = "1.18.0"
|
||||||
thiserror = "1"
|
thiserror = "1"
|
||||||
tracing = "0.1.37"
|
tracing = "0.1.37"
|
||||||
|
@ -33,6 +33,24 @@ kernel void FN_NAME( \
|
|||||||
const TYPENAME a = TYPENAME(add); \
|
const TYPENAME a = TYPENAME(add); \
|
||||||
output[id] = input[id] * m + a; \
|
output[id] = input[id] * m + a; \
|
||||||
} \
|
} \
|
||||||
|
kernel void FN_NAME##_strided( \
|
||||||
|
constant size_t &dim, \
|
||||||
|
constant size_t &num_dims, \
|
||||||
|
constant size_t *dims, \
|
||||||
|
constant size_t *strides, \
|
||||||
|
constant float &mul, \
|
||||||
|
constant float &add, \
|
||||||
|
device const TYPENAME *input, \
|
||||||
|
device TYPENAME *output, \
|
||||||
|
uint id [[ thread_position_in_grid ]] \
|
||||||
|
) { \
|
||||||
|
if (id >= dim) { \
|
||||||
|
return; \
|
||||||
|
} \
|
||||||
|
const TYPENAME m = TYPENAME(mul); \
|
||||||
|
const TYPENAME a = TYPENAME(add); \
|
||||||
|
output[id] = input[get_strided_index(id, num_dims, dims, strides)] * m + a; \
|
||||||
|
} \
|
||||||
|
|
||||||
AFFINE(affine_float, float)
|
AFFINE(affine_float, float)
|
||||||
AFFINE(affine_half, half)
|
AFFINE(affine_half, half)
|
||||||
|
@ -592,6 +592,53 @@ pub fn call_affine(
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn call_affine_strided(
|
||||||
|
device: &Device,
|
||||||
|
command_buffer: &CommandBufferRef,
|
||||||
|
kernels: &Kernels,
|
||||||
|
shape: &[usize],
|
||||||
|
input: &Buffer,
|
||||||
|
input_stride: &[usize],
|
||||||
|
input_offset: usize,
|
||||||
|
output: &mut Buffer,
|
||||||
|
mul: f32,
|
||||||
|
add: f32,
|
||||||
|
) -> Result<(), MetalKernelError> {
|
||||||
|
let func = kernels.load_function(device, Source::Affine, "affine_float_strided")?;
|
||||||
|
let pipeline_state_descriptor = ComputePipelineDescriptor::new();
|
||||||
|
pipeline_state_descriptor.set_compute_function(Some(&func));
|
||||||
|
|
||||||
|
let size: usize = shape.iter().product();
|
||||||
|
|
||||||
|
let pipeline = device
|
||||||
|
.new_compute_pipeline_state_with_function(
|
||||||
|
pipeline_state_descriptor.compute_function().unwrap(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let encoder = command_buffer.new_compute_command_encoder();
|
||||||
|
encoder.set_compute_pipeline_state(&pipeline);
|
||||||
|
|
||||||
|
set_params!(
|
||||||
|
encoder,
|
||||||
|
(
|
||||||
|
size,
|
||||||
|
shape.len(),
|
||||||
|
shape,
|
||||||
|
input_stride,
|
||||||
|
mul,
|
||||||
|
add,
|
||||||
|
(input, input_offset),
|
||||||
|
output
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
|
||||||
|
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
|
||||||
|
encoder.end_encoding();
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub fn call_where_cond_strided(
|
pub fn call_where_cond_strided(
|
||||||
device: &Device,
|
device: &Device,
|
||||||
command_buffer: &CommandBufferRef,
|
command_buffer: &CommandBufferRef,
|
||||||
@ -977,6 +1024,43 @@ mod tests {
|
|||||||
output.read_to_vec::<T>(v.len())
|
output.read_to_vec::<T>(v.len())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn run_affine_strided<T: Clone>(
|
||||||
|
v: &[T],
|
||||||
|
shape: &[usize],
|
||||||
|
strides: &[usize],
|
||||||
|
mul: f64,
|
||||||
|
add: f64,
|
||||||
|
) -> Vec<T> {
|
||||||
|
let device = device();
|
||||||
|
let kernels = Kernels::new();
|
||||||
|
let command_queue = device.new_command_queue();
|
||||||
|
let command_buffer = command_queue.new_command_buffer();
|
||||||
|
|
||||||
|
let input = new_buffer(&device, v);
|
||||||
|
let mut output = new_buffer(&device, v);
|
||||||
|
|
||||||
|
let size = v.len();
|
||||||
|
|
||||||
|
call_affine_strided(
|
||||||
|
&device,
|
||||||
|
command_buffer,
|
||||||
|
&kernels,
|
||||||
|
size,
|
||||||
|
shape,
|
||||||
|
&input,
|
||||||
|
strides,
|
||||||
|
0,
|
||||||
|
&mut output,
|
||||||
|
mul as f32,
|
||||||
|
add as f32,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
command_buffer.commit();
|
||||||
|
command_buffer.wait_until_completed();
|
||||||
|
|
||||||
|
output.read_to_vec::<T>(v.len())
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn affine() {
|
fn affine() {
|
||||||
let input = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
|
let input = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
|
||||||
@ -992,6 +1076,16 @@ mod tests {
|
|||||||
assert_eq!(result, vec![2.6; 40_000]);
|
assert_eq!(result, vec![2.6; 40_000]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// #[test]
|
||||||
|
// fn affine_strided() {
|
||||||
|
// let input = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
|
||||||
|
// let mul = 1.5;
|
||||||
|
// let add = 1.1;
|
||||||
|
// let result = run_affine_(&input, mul, add);
|
||||||
|
// assert_eq!(result, vec![2.6, 4.1, 5.6, 7.1, 8.6, 10.1, 11.6, 13.1]);
|
||||||
|
|
||||||
|
// }
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn index_select() {
|
fn index_select() {
|
||||||
let embedding = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0];
|
let embedding = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0];
|
||||||
|
Reference in New Issue
Block a user