From 588ad4835ae9a64f95c096feab8b96ff9238321d Mon Sep 17 00:00:00 2001 From: Laurent Mazare Date: Sun, 15 Oct 2023 10:53:25 +0100 Subject: [PATCH] Fix the verbose prompt for phi. (#1097) --- candle-examples/examples/phi/main.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/candle-examples/examples/phi/main.rs b/candle-examples/examples/phi/main.rs index 605819ac..7ee99ef8 100644 --- a/candle-examples/examples/phi/main.rs +++ b/candle-examples/examples/phi/main.rs @@ -59,9 +59,10 @@ impl TextGeneration { fn run(&mut self, prompt: &str, sample_len: usize) -> Result<()> { use std::io::Write; println!("starting the inference loop"); - print!("{prompt}"); - std::io::stdout().flush()?; let tokens = self.tokenizer.encode(prompt, true).map_err(E::msg)?; + if tokens.is_empty() { + anyhow::bail!("Empty prompts are not supported in the phi model.") + } if self.verbose_prompt { for (token, id) in tokens.get_tokens().iter().zip(tokens.get_ids().iter()) { let token = token.replace('▁', " ").replace("<0x0A>", "\n"); @@ -74,6 +75,8 @@ impl TextGeneration { Some(token) => *token, None => anyhow::bail!("cannot find the endoftext token"), }; + print!("{prompt}"); + std::io::stdout().flush()?; let start_gen = std::time::Instant::now(); for index in 0..sample_len { let context_size = if index > 0 { 1 } else { tokens.len() };