mirror of
https://github.com/huggingface/candle.git
synced 2025-06-18 19:47:12 +00:00
Expanding a bit the README
This commit is contained in:
@ -1,3 +1,38 @@
|
||||
//! ML framework for Rust
|
||||
//!
|
||||
//! ```rust
|
||||
//! use candle::{Tensor, DType, Device};
|
||||
//! # use candle::Error;
|
||||
//! # fn main() -> Result<(), Error>{
|
||||
//!
|
||||
//! let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?;
|
||||
//! let b = Tensor::zeros((3, 4), DType::F32, &Device::Cpu)?;
|
||||
//!
|
||||
//! let c = a.matmul(&b)?;
|
||||
//! # Ok(())}
|
||||
//! ```
|
||||
//!
|
||||
//! ## Features
|
||||
//!
|
||||
//! - Simple syntax (looks and like PyTorch)
|
||||
//! - CPU and Cuda backends (and M1 support)
|
||||
//! - Enable serverless (CPU) small and fast deployments
|
||||
//! - Model training
|
||||
//! - Distributed computing (NCCL).
|
||||
//! - Models out of the box (Llama, Whisper, Falcon, ...)
|
||||
//!
|
||||
//! ## FAQ
|
||||
//!
|
||||
//! - Why Candle?
|
||||
//!
|
||||
//! Candle stems from the need to reduce binary size in order to *enable serverless*
|
||||
//! possible by making the whole engine smaller than PyTorch very large library volume
|
||||
//!
|
||||
//! And simply *removing Python* from production workloads.
|
||||
//! Python can really add overhead in more complex workflows and the [GIL](https://www.backblaze.com/blog/the-python-gil-past-present-and-future/) is a notorious source of headaches.
|
||||
//!
|
||||
//! Rust is cool, and a lot of the HF ecosystem already has Rust crates [safetensors](https://github.com/huggingface/safetensors) and [tokenizers](https://github.com/huggingface/tokenizers)
|
||||
|
||||
mod backprop;
|
||||
mod conv;
|
||||
mod cpu_backend;
|
||||
|
Reference in New Issue
Block a user