From 098dd0d1e9cc2b1ca902e4e0d77a9abe3de72a9c Mon Sep 17 00:00:00 2001 From: Mahmoud Date: Wed, 20 Sep 2023 00:54:56 -0700 Subject: [PATCH] fix: add missing`top_p` in llama_multiprocess (#905) --- candle-examples/examples/llama_multiprocess/main.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/candle-examples/examples/llama_multiprocess/main.rs b/candle-examples/examples/llama_multiprocess/main.rs index 17dc90e2..8a13ce6c 100644 --- a/candle-examples/examples/llama_multiprocess/main.rs +++ b/candle-examples/examples/llama_multiprocess/main.rs @@ -89,6 +89,10 @@ struct Args { #[arg(long)] temperature: Option, + /// Nucleus sampling probability cutoff. + #[arg(long)] + top_p: Option, + /// The seed to use when generating random samples. #[arg(long, default_value_t = 299792458)] seed: u64, @@ -222,7 +226,7 @@ fn main() -> Result<()> { .to_vec(); println!("starting the inference loop"); - let mut logits_processor = LogitsProcessor::new(args.seed, args.temperature); + let mut logits_processor = LogitsProcessor::new(args.seed, args.temperature, args.top_p); let mut new_tokens = vec![]; let start_gen = std::time::Instant::now(); let mut index_pos = 0;