Use the tokenizer-output-stream in the llama example. (#1715)

* Use the tokenizer-output-stream in the llama example.

* Also use tokenizer-output-stream for llama2-c.
This commit is contained in:
Laurent Mazare
2024-02-15 16:47:33 +01:00
committed by GitHub
parent 058a910d0e
commit 7c7400fb63
4 changed files with 17 additions and 20 deletions

View File

@ -143,7 +143,7 @@ struct Args {
seed: u64,
/// The length of the sample to generate (in tokens).
#[arg(long, short = 'n', default_value_t = 100)]
#[arg(long, short = 'n', default_value_t = 10000)]
sample_len: usize,
#[arg(long, default_value = "mistralai/Mixtral-8x7B-v0.1")]