Faster matmul when we can fall back to gemv.

This commit is contained in:
laurent
2023-08-04 22:44:30 +01:00
parent f7b2a0391d
commit 3fa3623135
3 changed files with 14 additions and 5 deletions

View File

@ -5,6 +5,8 @@ use anyhow::Result;
use candle_core::{Device, Tensor};
fn main() -> Result<()> {
let mut file = std::fs::File::open("ggml.bin")?;
let data = candle_core::ggml::Content::read(&mut file, &Device::Cpu)?;
let a = Tensor::randn(0f32, 1., (2, 3), &Device::Cpu)?;
let b = Tensor::randn(0f32, 1., (3, 4), &Device::Cpu)?;
let c = a.matmul(&b)?;

View File

@ -1010,12 +1010,18 @@ impl Map2 for MatMul {
};
let c_skip: usize = m * n;
let mut dst = vec![T::zero(); b * m * n];
let (dst_rs, dst_cs) = if m == 1 {
(1, 1)
} else if n == 1 {
(1, 1)
} else {
let dst_shape: Shape = (m, n).into();
let dst_strides = dst_shape.stride_contiguous();
let dst_rs = dst_strides[0];
let dst_cs = dst_strides[1];
(dst_strides[0], dst_strides[1])
};
let mut dst = vec![T::zero(); b * m * n];
let num_threads = crate::utils::get_num_threads();
let parallelism = if num_threads > 1 {
Parallelism::Rayon(num_threads)

View File

@ -111,6 +111,7 @@ impl TransformerWeights {
// matrix column major rather than row major. This ends up speeding up text generation from
// 120 token/s to 220 token/s on a Ryzen 2600X.
let tr = device.is_cpu() && !candle::utils::has_mkl();
let tr = false;
let tr = |x: Tensor| if tr { x.t()?.contiguous()?.t() } else { Ok(x) };
let mut ws = std::collections::HashMap::new();
let mut insert = |name: &str, t: Tensor| {