mirror of
https://github.com/huggingface/candle.git
synced 2025-06-16 18:48:51 +00:00
Use the newly minted gguf file.
This commit is contained in:
@ -167,8 +167,10 @@ fn run(args: Args) -> Result<()> {
|
||||
println!("{timesteps:?}");
|
||||
if quantized {
|
||||
let model_file = match model {
|
||||
Model::Schnell => bf_repo.get("flux1-schnell.safetensors")?,
|
||||
Model::Dev => bf_repo.get("flux1-dev.safetensors")?,
|
||||
Model::Schnell => api
|
||||
.repo(hf_hub::Repo::model("lmz/candle-flux".to_string()))
|
||||
.get("flux1-schnell.gguf")?,
|
||||
Model::Dev => todo!(),
|
||||
};
|
||||
let vb = candle_transformers::quantized_var_builder::VarBuilder::from_gguf(
|
||||
model_file, &device,
|
||||
|
Reference in New Issue
Block a user