Improve arg reduce and add contiguous impl

This commit is contained in:
Ivar Flakstad
2024-01-21 18:12:49 +01:00
parent d5902840e0
commit 1f4c54493e
4 changed files with 358 additions and 234 deletions

View File

@ -1,4 +1,4 @@
mod benchmarks;
use criterion::criterion_main;
criterion_main!(benchmarks::reduce::benches);
criterion_main!(benchmarks::reduce::benches);

View File

@ -1,19 +1,25 @@
use candle_core::{DType, Tensor};
use crate::benchmarks::{bench_name, device, BenchDevice};
use candle_core::{DType, Device, Tensor};
use criterion::{black_box, criterion_group, Criterion, Throughput};
use std::time::Instant;
use crate::benchmarks::{bench_name, device, BenchDevice};
fn run(a: &Tensor) {
fn run_sum(a: &Tensor) {
a.sum(2).unwrap();
}
fn run_arg_min(a: &Tensor) {
a.argmin(2).unwrap();
}
fn criterion_benchmark(c: &mut Criterion) {
let device = device().unwrap();
run_reduce(c, &device);
run_arg_reduce(c, &device);
}
fn run_reduce(c: &mut Criterion, device: &Device) {
let b = 1;
let m = 2048;
let k = 2048;
let device = device().unwrap();
let a = Tensor::rand(-1000.0f32, 1000.0f32, (b, m, k), &device).unwrap();
let flops = b * m * k * DType::F32.size_in_bytes();
@ -24,7 +30,31 @@ fn criterion_benchmark(c: &mut Criterion) {
b.iter_custom(|iters| {
let start = Instant::now();
for _i in 0..iters {
run(black_box(&a));
run_sum(black_box(&a));
}
device.sync().unwrap();
start.elapsed()
})
});
group.finish();
}
fn run_arg_reduce(c: &mut Criterion, device: &Device) {
let b = 1;
let m = 2048;
let k = 2048;
let a = Tensor::rand(-1000.0f32, 1000.0f32, (b, m, k), &device).unwrap();
let flops = b * m * k * DType::F32.size_in_bytes();
let mut group = c.benchmark_group(bench_name("arg_reduce"));
group.throughput(Throughput::Bytes(flops as u64));
group.bench_function("iter", move |b| {
b.iter_custom(|iters| {
let start = Instant::now();
for _i in 0..iters {
run_arg_min(black_box(&a));
}
device.sync().unwrap();
start.elapsed()

View File

@ -511,59 +511,56 @@ impl BackendStorage for MetalStorage {
(ReduceOp::Sum, DType::F32) => ("fast_sum_f32", false, false),
(ReduceOp::Min, DType::F32) => ("fast_min_f32", true, false),
(ReduceOp::Max, DType::F32) => ("fast_max_f32", true, false),
//(ReduceOp::ArgMin, DType::F32) => ("fast_argmin_f32", true, true),
//(ReduceOp::ArgMax, DType::F32) => ("fast_argmax_f32", true, true),
(ReduceOp::ArgMin, DType::F32) => ("fast_argmin_f32", true, true),
(ReduceOp::ArgMax, DType::F32) => ("fast_argmax_f32", true, true),
(ReduceOp::Sum, DType::U32) => ("fast_sum_u32", false, false),
(ReduceOp::Min, DType::U32) => ("fast_min_u32", true, false),
(ReduceOp::Max, DType::U32) => ("fast_max_u32", true, false),
//(ReduceOp::ArgMin, DType::U32) => ("fast_argmin_u32", true, true),
//(ReduceOp::ArgMax, DType::U32) => ("fast_argmax_u32", true, true),
(ReduceOp::ArgMin, DType::U32) => ("fast_argmin_u32", true, true),
(ReduceOp::ArgMax, DType::U32) => ("fast_argmax_u32", true, true),
(ReduceOp::Sum, DType::F16) => ("fast_sum_f16", false, false),
(ReduceOp::Min, DType::F16) => ("fast_min_f16", true, false),
(ReduceOp::Max, DType::F16) => ("fast_max_f16", true, false),
//(ReduceOp::ArgMin, DType::F16) => ("fast_argmin_f16", true, true),
//(ReduceOp::ArgMax, DType::F16) => ("fast_argmax_f16", true, true),
(ReduceOp::ArgMin, DType::F16) => ("fast_argmin_f16", true, true),
(ReduceOp::ArgMax, DType::F16) => ("fast_argmax_f16", true, true),
(ReduceOp::Sum, DType::BF16) => ("fast_sum_bf16", false, false),
(ReduceOp::Min, DType::BF16) => ("fast_min_bf16", true, false),
(ReduceOp::Max, DType::BF16) => ("fast_max_bf16", true, false),
//(ReduceOp::ArgMin, DType::BF16) => ("fast_argmin_bf16", true, true),
//(ReduceOp::ArgMax, DType::BF16) => ("fast_argmax_bf16", true, true),
(ReduceOp::ArgMin, DType::BF16) => ("fast_argmin_bf16", true, true),
(ReduceOp::ArgMax, DType::BF16) => ("fast_argmax_bf16", true, true),
(ReduceOp::Sum, DType::I64) => ("fast_sum_i64", false, false),
(ReduceOp::Min, DType::I64) => ("fast_min_i64", true, false),
(ReduceOp::Max, DType::I64) => ("fast_max_i64", true, false),
//(ReduceOp::ArgMin, DType::I64) => ("fast_argmin_i64", true, true),
//(ReduceOp::ArgMax, DType::I64) => ("fast_argmax_i64", true, true),
(ReduceOp::ArgMin, DType::I64) => ("fast_argmin_i64", true, true),
(ReduceOp::ArgMax, DType::I64) => ("fast_argmax_i64", true, true),
(ReduceOp::Sum, DType::U8) => ("fast_sum_u8", false, false),
(ReduceOp::Min, DType::U8) => ("fast_min_u8", true, false),
(ReduceOp::Max, DType::U8) => ("fast_max_u8", true, false),
//(ReduceOp::ArgMin, DType::U8) => ("fast_argmin_u8", true, true),
//(ReduceOp::ArgMax, DType::U8) => ("fast_argmax_u8", true, true),
//(k, dtype) => crate::bail!("Metal reduce op {k:?} {dtype:?} not implemented"),
_ => ("fall back to strided impl", false, false)
};
if name != "fall back to strided impl" {
if check_empty && layout.shape().elem_count() == 0 {
Err(crate::Error::EmptyTensor { op: "reduce" }.bt())?
(ReduceOp::ArgMin, DType::U8) => ("fast_argmin_u8", true, true),
(ReduceOp::ArgMax, DType::U8) => ("fast_argmax_u8", true, true),
(k, dtype) => {
crate::bail!("Metal contiguous reduce op {k:?} {dtype:?} not implemented")
}
let buffer = device.new_buffer(1, self.dtype, "reduce")?;
let command_buffer = self.device.command_buffer()?;
candle_metal_kernels::call_reduce_contiguous(
&device.device,
&command_buffer,
&device.kernels,
name,
layout.shape().elem_count(),
dst_el,
&self.buffer,
layout.start_offset() * self.dtype.size_in_bytes(),
&buffer,
)
.map_err(MetalError::from)?;
return Ok(Self::new(buffer, device, self.dtype));
};
if check_empty && layout.shape().elem_count() == 0 {
Err(crate::Error::EmptyTensor { op: "reduce" }.bt())?
}
let buffer = device.new_buffer(1, self.dtype, "reduce")?;
let command_buffer = self.device.command_buffer()?;
candle_metal_kernels::call_reduce_contiguous(
&device.device,
&command_buffer,
&device.kernels,
name,
layout.shape().elem_count(),
dst_el,
&self.buffer,
layout.start_offset() * self.dtype.size_in_bytes(),
&buffer,
)
.map_err(MetalError::from)?;
return Ok(Self::new(buffer, device, self.dtype));
}
for &dim_idx in sum_dims.iter() {
@ -602,7 +599,7 @@ impl BackendStorage for MetalStorage {
(ReduceOp::Max, DType::U8) => ("fast_max_u8_strided", true, false),
(ReduceOp::ArgMin, DType::U8) => ("fast_argmin_u8_strided", true, true),
(ReduceOp::ArgMax, DType::U8) => ("fast_argmax_u8_strided", true, true),
(k, dtype) => crate::bail!("Metal reduce op {k:?} {dtype:?} not implemented"),
(k, dtype) => crate::bail!("Metal strided reduce op {k:?} {dtype:?} not implemented"),
};
if check_empty && layout.shape().elem_count() == 0 {
Err(crate::Error::EmptyTensor { op: "reduce" }.bt())?