diff --git a/README.md b/README.md index 7650d7c6..7a1154a4 100644 --- a/README.md +++ b/README.md @@ -87,7 +87,7 @@ And then head over to - LLMs: LLaMA v1 and v2, Falcon, StarCoder. - Whisper (multi-lingual support). - Stable Diffusion. - - Computer Vision: DINOv2. + - Computer Vision: DINOv2, EfficientNet, yolo-v3, yolo-v8. - File formats: load models from safetensors, npz, ggml, or PyTorch files. - Serverless (on CPU), small and fast deployments. - Quantization support using the llama.cpp quantized types. diff --git a/candle-examples/examples/efficientnet/main.rs b/candle-examples/examples/efficientnet/main.rs index fb6a5806..cbe2c90a 100644 --- a/candle-examples/examples/efficientnet/main.rs +++ b/candle-examples/examples/efficientnet/main.rs @@ -382,8 +382,18 @@ pub fn main() -> anyhow::Result<()> { let model_file = match args.model { None => { let api = hf_hub::api::sync::Api::new()?; - let api = api.model("lmz/candle-dino-v2".into()); - api.get("dinov2_vits14.safetensors")? + let api = api.model("lmz/candle-efficientnet".into()); + let filename = match args.which { + Which::B0 => "efficientnet-b0.safetensors", + Which::B1 => "efficientnet-b1.safetensors", + Which::B2 => "efficientnet-b2.safetensors", + Which::B3 => "efficientnet-b3.safetensors", + Which::B4 => "efficientnet-b4.safetensors", + Which::B5 => "efficientnet-b5.safetensors", + Which::B6 => "efficientnet-b6.safetensors", + Which::B7 => "efficientnet-b7.safetensors", + }; + api.get(filename)? } Some(model) => model.into(), };