mirror of
https://github.com/huggingface/candle.git
synced 2025-06-16 02:38:10 +00:00
Add mkl support for matrix multiply. (#86)
* Fix some rebase issues. * Use mkl instead. * Use mkl in bert. * Add the optional mkl feature. * Conditional compilation based on the mkl feature. * Add more mkl support.
This commit is contained in:
@ -9,6 +9,9 @@
|
||||
// In order to convert the llama weights to a .npz file, run:
|
||||
// python examples/llama/convert_checkpoint.py ..../LLaMA/7B/consolidated.00.pth
|
||||
|
||||
#[cfg(feature = "mkl")]
|
||||
extern crate intel_mkl_src;
|
||||
|
||||
// TODO: This does not use a batch dimension. If adding it back, be cautious about the
|
||||
// transposition operations.
|
||||
use anyhow::{Error as E, Result};
|
||||
@ -24,7 +27,7 @@ mod var_store;
|
||||
mod weights;
|
||||
|
||||
const MAX_SEQ_LEN: usize = 4096;
|
||||
const DTYPE: DType = DType::F16;
|
||||
const DTYPE: DType = DType::F32;
|
||||
const DEFAULT_PROMPT: &str = r"
|
||||
EDWARD:
|
||||
I wonder how our princely father 'scaped,
|
||||
|
Reference in New Issue
Block a user