Files
candle/candle-core/Cargo.toml
Laurent Mazare 9ce0f1c010 Sketch the candle-nn crate. (#115)
* Sketch the candle-nn crate.

* Tweak the cuda dependencies.

* More cuda tweaks.
2023-07-10 08:50:09 +01:00

39 lines
1.4 KiB
TOML

[package]
name = "candle"
version = "0.1.0"
edition = "2021"
description = "Minimalist ML framework."
repository = "https://github.com/LaurentMazare/candle"
keywords = ["blas", "tensor", "machine-learning"]
categories = ["science"]
license = "MIT/Apache-2.0"
readme = "README.md"
[dependencies]
blas = { version = "0.22.0", optional = true }
byteorder = "1.4.3"
candle-kernels = { path = "../candle-kernels", optional = true }
# Re-enable this once 0.9.13 as been released as it would include the cublas-f16 changes
# cudarc = { version = "0.9.13", optional = true, features = ["f16"] }
cudarc = { git = "https://github.com/LaurentMazare/cudarc.git", branch = "cublas-bf16", optional = true, features = ["f16"] }
# TODO: Switch back to the official gemm implementation once something similar to
# https://github.com/sarah-ek/gemm/pull/8 is available.
gemm = { git = "https://github.com/LaurentMazare/gemm.git", branch = "f16-vectorize-pack" }
half = { version = "2.3.1", features = ["num-traits"] }
intel-mkl-src = {version="0.8.1", optional=true, features = ["mkl-dynamic-lp64-iomp"]}
memmap2 = "0.7.1"
num-traits = "0.2.15"
num_cpus = "1.15.0"
safetensors = "0.3.1"
thiserror = "1"
zip = { version = "0.6.6", default-features=false }
[dev-dependencies]
anyhow = { version = "1", features = ["backtrace"] }
[features]
default = ["cuda"]
cuda = ["dep:cudarc", "dep:candle-kernels"]
mkl = ["dep:blas", "dep:intel-mkl-src"]