Switch to using llama-v2 by default. (#251)

This commit is contained in:
Laurent Mazare
2023-07-26 17:18:27 +01:00
committed by GitHub
parent 89fd988836
commit 84ad558e50

View File

@ -115,7 +115,7 @@ struct Args {
model_id: Option<String>, model_id: Option<String>,
#[arg(long)] #[arg(long)]
v2: bool, v1: bool,
#[arg(long)] #[arg(long)]
use_flash_attn: bool, use_flash_attn: bool,
@ -139,10 +139,10 @@ fn main() -> Result<()> {
None => { None => {
let api = Api::new()?; let api = Api::new()?;
let model_id = args.model_id.unwrap_or_else(|| { let model_id = args.model_id.unwrap_or_else(|| {
if args.v2 { if args.v1 {
"meta-llama/Llama-2-7b-hf".to_string()
} else {
"Narsil/amall-7b".to_string() "Narsil/amall-7b".to_string()
} else {
"meta-llama/Llama-2-7b-hf".to_string()
} }
}); });
println!("loading the model weights from {model_id}"); println!("loading the model weights from {model_id}");