mirror of
https://github.com/huggingface/candle.git
synced 2025-06-16 18:48:51 +00:00
s/candle/candle_core/g
This commit is contained in:
@ -10,17 +10,17 @@ Then let's start by downloading the [model file](https://huggingface.co/bert-bas
|
|||||||
|
|
||||||
|
|
||||||
```rust
|
```rust
|
||||||
# extern crate candle;
|
# extern crate candle_core;
|
||||||
# extern crate hf_hub;
|
# extern crate hf_hub;
|
||||||
use hf_hub::api::sync::Api;
|
use hf_hub::api::sync::Api;
|
||||||
use candle::Device;
|
use candle_core::Device;
|
||||||
|
|
||||||
let api = Api::new().unwrap();
|
let api = Api::new().unwrap();
|
||||||
let repo = api.model("bert-base-uncased".to_string());
|
let repo = api.model("bert-base-uncased".to_string());
|
||||||
|
|
||||||
let weights = repo.get("model.safetensors").unwrap();
|
let weights = repo.get("model.safetensors").unwrap();
|
||||||
|
|
||||||
let weights = candle::safetensors::load(weights, &Device::Cpu);
|
let weights = candle_core::safetensors::load(weights, &Device::Cpu);
|
||||||
```
|
```
|
||||||
|
|
||||||
We now have access to all the [tensors](https://huggingface.co/bert-base-uncased?show_tensors=true) within the file.
|
We now have access to all the [tensors](https://huggingface.co/bert-base-uncased?show_tensors=true) within the file.
|
||||||
@ -48,7 +48,7 @@ cargo add hf-hub --features tokio
|
|||||||
Now that we have our weights, we can use them in our bert architecture:
|
Now that we have our weights, we can use them in our bert architecture:
|
||||||
|
|
||||||
```rust
|
```rust
|
||||||
# extern crate candle;
|
# extern crate candle_core;
|
||||||
# extern crate candle_nn;
|
# extern crate candle_nn;
|
||||||
# extern crate hf_hub;
|
# extern crate hf_hub;
|
||||||
# use hf_hub::api::sync::Api;
|
# use hf_hub::api::sync::Api;
|
||||||
@ -57,10 +57,10 @@ Now that we have our weights, we can use them in our bert architecture:
|
|||||||
# let repo = api.model("bert-base-uncased".to_string());
|
# let repo = api.model("bert-base-uncased".to_string());
|
||||||
#
|
#
|
||||||
# let weights = repo.get("model.safetensors").unwrap();
|
# let weights = repo.get("model.safetensors").unwrap();
|
||||||
use candle::{Device, Tensor, DType};
|
use candle_core::{Device, Tensor, DType};
|
||||||
use candle_nn::Linear;
|
use candle_nn::Linear;
|
||||||
|
|
||||||
let weights = candle::safetensors::load(weights, &Device::Cpu).unwrap();
|
let weights = candle_core::safetensors::load(weights, &Device::Cpu).unwrap();
|
||||||
|
|
||||||
let weight = weights.get("bert.encoder.layer.0.attention.self.query.weight").unwrap();
|
let weight = weights.get("bert.encoder.layer.0.attention.self.query.weight").unwrap();
|
||||||
let bias = weights.get("bert.encoder.layer.0.attention.self.query.bias").unwrap();
|
let bias = weights.get("bert.encoder.layer.0.attention.self.query.bias").unwrap();
|
||||||
|
Reference in New Issue
Block a user