mirror of
https://github.com/huggingface/candle.git
synced 2025-06-16 18:48:51 +00:00
Use the hub weights for efficientnet. (#573)
This commit is contained in:
@ -87,7 +87,7 @@ And then head over to
|
|||||||
- LLMs: LLaMA v1 and v2, Falcon, StarCoder.
|
- LLMs: LLaMA v1 and v2, Falcon, StarCoder.
|
||||||
- Whisper (multi-lingual support).
|
- Whisper (multi-lingual support).
|
||||||
- Stable Diffusion.
|
- Stable Diffusion.
|
||||||
- Computer Vision: DINOv2.
|
- Computer Vision: DINOv2, EfficientNet, yolo-v3, yolo-v8.
|
||||||
- File formats: load models from safetensors, npz, ggml, or PyTorch files.
|
- File formats: load models from safetensors, npz, ggml, or PyTorch files.
|
||||||
- Serverless (on CPU), small and fast deployments.
|
- Serverless (on CPU), small and fast deployments.
|
||||||
- Quantization support using the llama.cpp quantized types.
|
- Quantization support using the llama.cpp quantized types.
|
||||||
|
@ -382,8 +382,18 @@ pub fn main() -> anyhow::Result<()> {
|
|||||||
let model_file = match args.model {
|
let model_file = match args.model {
|
||||||
None => {
|
None => {
|
||||||
let api = hf_hub::api::sync::Api::new()?;
|
let api = hf_hub::api::sync::Api::new()?;
|
||||||
let api = api.model("lmz/candle-dino-v2".into());
|
let api = api.model("lmz/candle-efficientnet".into());
|
||||||
api.get("dinov2_vits14.safetensors")?
|
let filename = match args.which {
|
||||||
|
Which::B0 => "efficientnet-b0.safetensors",
|
||||||
|
Which::B1 => "efficientnet-b1.safetensors",
|
||||||
|
Which::B2 => "efficientnet-b2.safetensors",
|
||||||
|
Which::B3 => "efficientnet-b3.safetensors",
|
||||||
|
Which::B4 => "efficientnet-b4.safetensors",
|
||||||
|
Which::B5 => "efficientnet-b5.safetensors",
|
||||||
|
Which::B6 => "efficientnet-b6.safetensors",
|
||||||
|
Which::B7 => "efficientnet-b7.safetensors",
|
||||||
|
};
|
||||||
|
api.get(filename)?
|
||||||
}
|
}
|
||||||
Some(model) => model.into(),
|
Some(model) => model.into(),
|
||||||
};
|
};
|
||||||
|
Reference in New Issue
Block a user