Add mkl support for matrix multiply. (#86)

* Fix some rebase issues.

* Use mkl instead.

* Use mkl in bert.

* Add the optional mkl feature.

* Conditional compilation based on the mkl feature.

* Add more mkl support.
This commit is contained in:
Laurent Mazare
2023-07-06 11:05:05 +01:00
committed by GitHub
parent cd230d26fe
commit c297a50960
9 changed files with 118 additions and 3 deletions

View File

@ -9,6 +9,9 @@
// In order to convert the llama weights to a .npz file, run:
// python examples/llama/convert_checkpoint.py ..../LLaMA/7B/consolidated.00.pth
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
// TODO: This does not use a batch dimension. If adding it back, be cautious about the
// transposition operations.
use anyhow::{Error as E, Result};
@ -24,7 +27,7 @@ mod var_store;
mod weights;
const MAX_SEQ_LEN: usize = 4096;
const DTYPE: DType = DType::F16;
const DTYPE: DType = DType::F32;
const DEFAULT_PROMPT: &str = r"
EDWARD:
I wonder how our princely father 'scaped,