mirror of
https://github.com/huggingface/candle.git
synced 2025-06-16 02:38:10 +00:00

- Always take weights from the hub - Optional `model_id` + `revision` to use safetensors version potentially - Optional loading for `bert-base-uncased` (`weight` vs `gamma`). - Take the config from the hub.
29 lines
843 B
TOML
29 lines
843 B
TOML
[package]
|
|
name = "candle-examples"
|
|
version = "0.1.0"
|
|
edition = "2021"
|
|
|
|
description = "Examples for the candle ML framework."
|
|
repository = "https://github.com/LaurentMazare/candle"
|
|
keywords = ["blas", "tensor", "machine-learning"]
|
|
categories = ["science"]
|
|
license = "MIT/Apache-2.0"
|
|
readme = "README.md"
|
|
|
|
[dependencies]
|
|
candle = { path = "../candle-core", default-features=false }
|
|
serde = { version = "1.0.166", features = ["derive"] }
|
|
serde_json = "1.0.99"
|
|
|
|
[dev-dependencies]
|
|
anyhow = { version = "1", features = ["backtrace"] }
|
|
candle-hub = { path = "../candle-hub" }
|
|
clap = { version = "4.2.4", features = ["derive"] }
|
|
rand = "0.8.5"
|
|
tokenizers = { version = "0.13.3", default-features=false, features=["onig"] }
|
|
tokio = { version = "1.28.2", features = ["macros", "rt-multi-thread"] }
|
|
|
|
[features]
|
|
default = ["cuda"]
|
|
cuda = ["candle/cuda"]
|