mirror of
https://github.com/huggingface/candle.git
synced 2025-06-17 02:58:50 +00:00
Use the newly minted gguf file.
This commit is contained in:
@ -167,8 +167,10 @@ fn run(args: Args) -> Result<()> {
|
|||||||
println!("{timesteps:?}");
|
println!("{timesteps:?}");
|
||||||
if quantized {
|
if quantized {
|
||||||
let model_file = match model {
|
let model_file = match model {
|
||||||
Model::Schnell => bf_repo.get("flux1-schnell.safetensors")?,
|
Model::Schnell => api
|
||||||
Model::Dev => bf_repo.get("flux1-dev.safetensors")?,
|
.repo(hf_hub::Repo::model("lmz/candle-flux".to_string()))
|
||||||
|
.get("flux1-schnell.gguf")?,
|
||||||
|
Model::Dev => todo!(),
|
||||||
};
|
};
|
||||||
let vb = candle_transformers::quantized_var_builder::VarBuilder::from_gguf(
|
let vb = candle_transformers::quantized_var_builder::VarBuilder::from_gguf(
|
||||||
model_file, &device,
|
model_file, &device,
|
||||||
|
Reference in New Issue
Block a user