Skip to content

Commit

Permalink
Fix the verbose prompt for phi. (#1097)
Browse files Browse the repository at this point in the history
  • Loading branch information
LaurentMazare authored Oct 15, 2023
1 parent b73c35c commit 588ad48
Showing 1 changed file with 5 additions and 2 deletions.
7 changes: 5 additions & 2 deletions candle-examples/examples/phi/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,9 +59,10 @@ impl TextGeneration {
fn run(&mut self, prompt: &str, sample_len: usize) -> Result<()> {
use std::io::Write;
println!("starting the inference loop");
print!("{prompt}");
std::io::stdout().flush()?;
let tokens = self.tokenizer.encode(prompt, true).map_err(E::msg)?;
if tokens.is_empty() {
anyhow::bail!("Empty prompts are not supported in the phi model.")
}
if self.verbose_prompt {
for (token, id) in tokens.get_tokens().iter().zip(tokens.get_ids().iter()) {
let token = token.replace('▁', " ").replace("<0x0A>", "\n");
Expand All @@ -74,6 +75,8 @@ impl TextGeneration {
Some(token) => *token,
None => anyhow::bail!("cannot find the endoftext token"),
};
print!("{prompt}");
std::io::stdout().flush()?;
let start_gen = std::time::Instant::now();
for index in 0..sample_len {
let context_size = if index > 0 { 1 } else { tokens.len() };
Expand Down

0 comments on commit 588ad48

Please sign in to comment.