From de48e6fd59ea23eeb1e9393bc2dc91e56ae1be09 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Thu, 29 Jun 2023 12:08:35 +0000 Subject: [PATCH] Putting back main. --- candle-core/examples/llama/main.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/candle-core/examples/llama/main.rs b/candle-core/examples/llama/main.rs index c91537d8..cc8a8ae9 100644 --- a/candle-core/examples/llama/main.rs +++ b/candle-core/examples/llama/main.rs @@ -1,7 +1,7 @@ -// An implementation of LLaMA https://github.com/facebookresearch/llama");");"); +// An implementation of LLaMA https://github.com/facebookresearch/llama.py // // This is based on nanoGPT in a similar way to: -// https://github.com/Lightning-AI/lit-llama/blob/main/lit_llama/model.py"); +// https://github.com/Lightning-AI/lit-llama/blob/main/lit_llama/model.py // // The tokenizer config can be retrieved from: // https://huggingface.co/hf-internal-testing/llama-tokenizer/raw/main/tokenizer.json @@ -417,7 +417,6 @@ struct Args { #[tokio::main] async fn main() -> Result<()> { - //use rand::prelude::*; use tokenizers::Tokenizer; let args = Args::parse();