Little docs changes (#791)

* Little doc fixes

* change imports in lib

* rename candle_core to candle

* revert "rename candle_core to candle"
This commit is contained in:
Ssslakter
2023-09-10 18:02:52 +07:00
committed by GitHub
parent 35f72514f5
commit 6c58fc59fd

View File

@ -25,8 +25,8 @@ fn main() -> Result<()> {
// Use Device::new_cuda(0)?; to use the GPU. // Use Device::new_cuda(0)?; to use the GPU.
let device = Device::Cpu; let device = Device::Cpu;
let first = Tensor::zeros((784, 100), DType::F32, &device)?; let first = Tensor::randn(0f32, 1.0, (784, 100), &device)?;
let second = Tensor::zeros((100, 10), DType::F32, &device)?; let second = Tensor::randn(0f32, 1.0, (100, 10), &device)?;
let model = Model { first, second }; let model = Model { first, second };
let dummy_image = Tensor::zeros((1, 784), DType::F32, &device)?; let dummy_image = Tensor::zeros((1, 784), DType::F32, &device)?;
@ -110,15 +110,15 @@ fn main() -> Result<()> {
let device = Device::cuda_if_available(0)?; let device = Device::cuda_if_available(0)?;
// Creating a dummy model // Creating a dummy model
let weight = Tensor::zeros((784, 100), DType::F32, &device)?; let weight = Tensor::randn(0f32, 1.0, (784, 100), &device)?;
let bias = Tensor::zeros((100, ), DType::F32, &device)?; let bias = Tensor::zeros((100, ), DType::F32, &device)?;
let first = Linear{weight, bias}; let first = Linear{weight, bias};
let weight = Tensor::zeros((100, 10), DType::F32, &device)?; let weight = Tensor::randn(0f32, 1.0, (100, 10), &device)?;
let bias = Tensor::zeros((10, ), DType::F32, &device)?; let bias = Tensor::randn(0f32, 1.0, (10, ), &device)?;
let second = Linear{weight, bias}; let second = Linear{weight, bias};
let model = Model { first, second }; let model = Model { first, second };
let dummy_image = Tensor::zeros((1, 784), DType::F32, &device)?; let dummy_image = Tensor::randn(0f32, 1.0, (1, 784), &device)?;
// Inference on the model // Inference on the model
let digit = model.forward(&dummy_image)?; let digit = model.forward(&dummy_image)?;
@ -167,15 +167,15 @@ fn main() -> Result<()> {
let device = Device::Cpu; let device = Device::Cpu;
// This has changed (784, 100) -> (100, 784) ! // This has changed (784, 100) -> (100, 784) !
let weight = Tensor::zeros((100, 784), DType::F32, &device)?; let weight = Tensor::randn(0f32, 1.0, (100, 784), &device)?;
let bias = Tensor::zeros((100, ), DType::F32, &device)?; let bias = Tensor::randn(0f32, 1.0, (100, ), &device)?;
let first = Linear::new(weight, Some(bias)); let first = Linear::new(weight, Some(bias));
let weight = Tensor::zeros((10, 100), DType::F32, &device)?; let weight = Tensor::randn(0f32, 1.0, (10, 100), &device)?;
let bias = Tensor::zeros((10, ), DType::F32, &device)?; let bias = Tensor::randn(0f32, 1.0, (10, ), &device)?;
let second = Linear::new(weight, Some(bias)); let second = Linear::new(weight, Some(bias));
let model = Model { first, second }; let model = Model { first, second };
let dummy_image = Tensor::zeros((1, 784), DType::F32, &device)?; let dummy_image = Tensor::randn(0f32, 1.0, (1, 784), &device)?;
let digit = model.forward(&dummy_image)?; let digit = model.forward(&dummy_image)?;
println!("Digit {digit:?} digit"); println!("Digit {digit:?} digit");
@ -188,8 +188,8 @@ Feel free to modify this example to use `Conv2d` to create a classical convnet i
Now that we have the running dummy code we can get to more advanced topics: Now that we have the running dummy code we can get to more advanced topics:
- [For PyTorch users](./guide/cheatsheet.md) - [For PyTorch users](../guide/cheatsheet.md)
- [Running existing models](./inference/README.md) - [Running existing models](../inference/README.md)
- [Training models](./training/README.md) - [Training models](../training/README.md)