Add support for Llama 3.1 (#2359)

* Add Llama 3.1 rope

* Clippy

* Format

* Clippy

* Add support for multiple eos tokens:

* Untagged either

* Remove either dep and fix settings.json

* Make the max positional embeddings configurable
This commit is contained in:
Eric Buehler
2024-07-26 15:32:26 -04:00
committed by GitHub
parent ddafc61055
commit 0f5cbb08b3
24 changed files with 165 additions and 71 deletions

View File

@ -12,7 +12,7 @@ fn run_unary_benchmark(c: &mut Criterion, device: &Device, dtype: DType, name: &
let m = 1024;
let k = 1024;
let tensor = Tensor::arange(0.0f32, (b * m * k) as f32, &device)
let tensor = Tensor::arange(0.0f32, (b * m * k) as f32, device)
.unwrap()
.to_dtype(dtype)
.unwrap()