Add support for Llama 3.1 (#2359)

* Add Llama 3.1 rope

* Clippy

* Format

* Clippy

* Add support for multiple eos tokens:

* Untagged either

* Remove either dep and fix settings.json

* Make the max positional embeddings configurable
This commit is contained in:
Eric Buehler
2024-07-26 15:32:26 -04:00
committed by GitHub
parent ddafc61055
commit 0f5cbb08b3
24 changed files with 165 additions and 71 deletions

View File

@ -2,7 +2,7 @@ use std::collections::HashMap;
use crate::models::{
clip::{text_model::Activation, vision_model::ClipVisionConfig},
llama::Config,
llama::{Config, LlamaEosToks},
};
use serde::{Deserialize, Serialize};
@ -73,8 +73,10 @@ impl LLaVAConfig {
rms_norm_eps: self.rms_norm_eps as f64,
rope_theta: self.rope_theta,
bos_token_id: Some(self.bos_token_id as u32),
eos_token_id: Some(self.eos_token_id as u32),
eos_token_id: Some(LlamaEosToks::Single(self.eos_token_id as u32)),
use_flash_attn: false,
rope_scaling: None, // Assume we don't have LLaVA for Llama 3.1
max_position_embeddings: self.max_position_embeddings,
}
}
}