From 6242a1470e4ccd43c0cc3c7facae5759c9592dc1 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Thu, 27 Jul 2023 12:41:15 +0200 Subject: [PATCH] Starting the book. --- .github/workflows/book.yml | 2 +- README.md | 7 ++++ candle-book/src/README.md | 5 +++ candle-book/src/SUMMARY.md | 5 ++- candle-book/src/advanced/mkl.md | 1 + candle-book/src/guide/cheatsheet.md | 3 ++ candle-book/src/guide/hello_world.md | 54 ++++++++++++++++++++++++++- candle-book/src/guide/installation.md | 23 ++++++++++++ 8 files changed, 96 insertions(+), 4 deletions(-) create mode 100644 candle-book/src/advanced/mkl.md create mode 100644 candle-book/src/guide/cheatsheet.md diff --git a/.github/workflows/book.yml b/.github/workflows/book.yml index 895a68db..bb4d0494 100644 --- a/.github/workflows/book.yml +++ b/.github/workflows/book.yml @@ -24,6 +24,6 @@ jobs: curl -sSL $url | tar -xz --directory=bin echo "$(pwd)/bin" >> $GITHUB_PATH - name: Run tests - run: cd candle-book && mdbook test + run: cd candle-book && cargo build && mdbook test -L ../target/debug/deps/ diff --git a/README.md b/README.md index 5f39d1fc..b6a30c17 100644 --- a/README.md +++ b/README.md @@ -48,6 +48,8 @@ trunk serve --release --public-url /candle-llama2/ --port 8081 And then browse to [http://localhost:8081/candle-llama2](http://localhost:8081/candle-llama2). + + ## Features - Simple syntax, looks and like PyTorch. @@ -60,8 +62,11 @@ And then browse to - Embed user-defined ops/kernels, such as [flash-attention v2](https://github.com/LaurentMazare/candle/blob/89ba005962495f2bfbda286e185e9c3c7f5300a3/candle-flash-attn/src/lib.rs#L152). + + ## How to use ? + Cheatsheet: | | Using PyTorch | Using Candle | @@ -76,6 +81,8 @@ Cheatsheet: | Saving | `torch.save({"A": A}, "model.bin")` | `tensor.save_safetensors("A", "model.safetensors")?` | | Loading | `weights = torch.load("model.bin")` | TODO (see the examples for now) | + + ## Structure diff --git a/candle-book/src/README.md b/candle-book/src/README.md index e10b99d0..be352dc1 100644 --- a/candle-book/src/README.md +++ b/candle-book/src/README.md @@ -1 +1,6 @@ # Introduction + +{{#include ../../README.md:features}} + + +This book will introduce step by step how to use `candle`. diff --git a/candle-book/src/SUMMARY.md b/candle-book/src/SUMMARY.md index 24e2b25a..ddd6e916 100644 --- a/candle-book/src/SUMMARY.md +++ b/candle-book/src/SUMMARY.md @@ -6,13 +6,13 @@ - [Installation](guide/installation.md) - [Hello World - MNIST](guide/hello_world.md) -- [PyTorch cheatsheet](guide/hello_world.md) +- [PyTorch cheatsheet](guide/cheatsheet.md) # Reference Guide - [Running a model](inference/README.md) - - [Serialization](inference/serialization.md) - [Using the hub](inference/hub.md) + - [Serialization](inference/serialization.md) - [Advanced Cuda usage](inference/cuda/README.md) - [Writing a custom kernel](inference/cuda/writing.md) - [Porting a custom kernel](inference/cuda/porting.md) @@ -24,3 +24,4 @@ - [Training](training/README.md) - [MNIST](training/mnist.md) - [Fine-tuning](training/finetuning.md) +- [Using MKL](advanced/mkl.md) diff --git a/candle-book/src/advanced/mkl.md b/candle-book/src/advanced/mkl.md new file mode 100644 index 00000000..f4dfa8ae --- /dev/null +++ b/candle-book/src/advanced/mkl.md @@ -0,0 +1 @@ +# Using MKL diff --git a/candle-book/src/guide/cheatsheet.md b/candle-book/src/guide/cheatsheet.md new file mode 100644 index 00000000..d0893ee0 --- /dev/null +++ b/candle-book/src/guide/cheatsheet.md @@ -0,0 +1,3 @@ +# Pytorch cheatsheet + +{{#include ../../../README.md:cheatsheet}} diff --git a/candle-book/src/guide/hello_world.md b/candle-book/src/guide/hello_world.md index c370cdd3..393576ad 100644 --- a/candle-book/src/guide/hello_world.md +++ b/candle-book/src/guide/hello_world.md @@ -1 +1,53 @@ -# PyTorch cheatsheet +# Hello world ! + +We will now create the hello world of the ML world, building a model capable of solving MNIST dataset. + +Open `src/main.rs` and fill in with these contents: + +```rust +# extern crate candle; +use candle::{DType, Device, Result, Tensor}; + +struct Model { + first: Tensor, + second: Tensor, +} + +impl Model { + fn forward(&self, image: &Tensor) -> Result { + let x = image.matmul(&self.first)?; + let x = x.relu()?; + x.matmul(&self.second) + } +} + +fn main() -> Result<()> { + // Use Device::new_cuda(0)?; to use the GPU. + let device = Device::Cpu; + + let first = Tensor::zeros((784, 100), DType::F32, &device)?; + let second = Tensor::zeros((100, 10), DType::F32, &device)?; + let model = Model { first, second }; + + let dummy_image = Tensor::zeros((1, 784), DType::F32, &device)?; + + let digit = model.forward(&dummy_image)?; + println!("Digit {digit:?} digit"); + Ok(()) +} +``` + +Everything should now run with: + +```bash +cargo run --release +``` + +Now that we have the running dummy code we can get to more advanced topics: + + +- [For PyTorch users](./guide/cheatsheet.md) +- [Running existing models](./inference/README.md) +- [Training models](./training/README.md) + + diff --git a/candle-book/src/guide/installation.md b/candle-book/src/guide/installation.md index 25267fe2..6ed9f6c3 100644 --- a/candle-book/src/guide/installation.md +++ b/candle-book/src/guide/installation.md @@ -1 +1,24 @@ # Installation + +Start by creating a new app: + +```bash +cargo new myapp +cd myapp +cargo add --git https://github.com/LaurentMazare/candle.git candle +``` + +At this point, candle will be built **without** CUDA support. +To get CUDA support use the feature `cuda` +```bash +cargo add --git https://github.com/LaurentMazare/candle.git candle --features cuda +``` + +You can check everything works properly: + +```bash +cargo build +``` + + +You can also see feature `mkl` which could be interesting to get faster inference on CPU. [Using mkl](./advanced/mkl.md)