mirror of
https://github.com/huggingface/candle.git
synced 2025-06-16 02:38:10 +00:00
Add Beit model ( https://arxiv.org/abs/2106.08254 ) (#2305)
Co-authored-by: v-espitalier <>
This commit is contained in:
20
candle-examples/examples/beit/README.md
Normal file
20
candle-examples/examples/beit/README.md
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
# candle-beit
|
||||||
|
|
||||||
|
[Beit](https://arxiv.org/abs/2106.08254) is a computer vision model.
|
||||||
|
In this example, it is used as an ImageNet classifier: the model returns the
|
||||||
|
probability for the image to belong to each of the 1000 ImageNet categories.
|
||||||
|
|
||||||
|
## Running some example
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo run --example beit --release -- --image candle-examples/examples/yolo-v8/assets/bike.jpg
|
||||||
|
|
||||||
|
> mountain bike, all-terrain bike, off-roader: 56.16%
|
||||||
|
> bicycle-built-for-two, tandem bicycle, tandem: 3.08%
|
||||||
|
> maillot : 2.23%
|
||||||
|
> alp : 0.88%
|
||||||
|
> crash helmet : 0.85%
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|

|
79
candle-examples/examples/beit/main.rs
Normal file
79
candle-examples/examples/beit/main.rs
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
//! BEiT: BERT Pre-Training of Image Transformers
|
||||||
|
//! https://github.com/microsoft/unilm/tree/master/beit
|
||||||
|
|
||||||
|
#[cfg(feature = "mkl")]
|
||||||
|
extern crate intel_mkl_src;
|
||||||
|
|
||||||
|
#[cfg(feature = "accelerate")]
|
||||||
|
extern crate accelerate_src;
|
||||||
|
|
||||||
|
use clap::Parser;
|
||||||
|
|
||||||
|
use candle::{DType, Device, IndexOp, Result, Tensor, D};
|
||||||
|
use candle_nn::{Module, VarBuilder};
|
||||||
|
use candle_transformers::models::beit;
|
||||||
|
|
||||||
|
/// Loads an image from disk using the image crate, this returns a tensor with shape
|
||||||
|
/// (3, 384, 384). Beit special normalization is applied.
|
||||||
|
pub fn load_image384_beit_norm<P: AsRef<std::path::Path>>(p: P) -> Result<Tensor> {
|
||||||
|
let img = image::io::Reader::open(p)?
|
||||||
|
.decode()
|
||||||
|
.map_err(candle::Error::wrap)?
|
||||||
|
.resize_to_fill(384, 384, image::imageops::FilterType::Triangle);
|
||||||
|
let img = img.to_rgb8();
|
||||||
|
let data = img.into_raw();
|
||||||
|
let data = Tensor::from_vec(data, (384, 384, 3), &Device::Cpu)?.permute((2, 0, 1))?;
|
||||||
|
let mean = Tensor::new(&[0.5f32, 0.5, 0.5], &Device::Cpu)?.reshape((3, 1, 1))?;
|
||||||
|
let std = Tensor::new(&[0.5f32, 0.5, 0.5], &Device::Cpu)?.reshape((3, 1, 1))?;
|
||||||
|
(data.to_dtype(candle::DType::F32)? / 255.)?
|
||||||
|
.broadcast_sub(&mean)?
|
||||||
|
.broadcast_div(&std)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Parser)]
|
||||||
|
struct Args {
|
||||||
|
#[arg(long)]
|
||||||
|
model: Option<String>,
|
||||||
|
|
||||||
|
#[arg(long)]
|
||||||
|
image: String,
|
||||||
|
|
||||||
|
/// Run on CPU rather than on GPU.
|
||||||
|
#[arg(long)]
|
||||||
|
cpu: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn main() -> anyhow::Result<()> {
|
||||||
|
let args = Args::parse();
|
||||||
|
|
||||||
|
let device = candle_examples::device(args.cpu)?;
|
||||||
|
|
||||||
|
let image = load_image384_beit_norm(args.image)?.to_device(&device)?;
|
||||||
|
println!("loaded image {image:?}");
|
||||||
|
|
||||||
|
let model_file = match args.model {
|
||||||
|
None => {
|
||||||
|
let api = hf_hub::api::sync::Api::new()?;
|
||||||
|
let api = api.model("vincent-espitalier/candle-beit".into());
|
||||||
|
api.get("beit_base_patch16_384.in22k_ft_in22k_in1k_adapted.safetensors")?
|
||||||
|
}
|
||||||
|
Some(model) => model.into(),
|
||||||
|
};
|
||||||
|
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, &device)? };
|
||||||
|
let model = beit::vit_base(vb)?;
|
||||||
|
println!("model built");
|
||||||
|
let logits = model.forward(&image.unsqueeze(0)?)?;
|
||||||
|
let prs = candle_nn::ops::softmax(&logits, D::Minus1)?
|
||||||
|
.i(0)?
|
||||||
|
.to_vec1::<f32>()?;
|
||||||
|
let mut prs = prs.iter().enumerate().collect::<Vec<_>>();
|
||||||
|
prs.sort_by(|(_, p1), (_, p2)| p2.total_cmp(p1));
|
||||||
|
for &(category_idx, pr) in prs.iter().take(5) {
|
||||||
|
println!(
|
||||||
|
"{:24}: {:.2}%",
|
||||||
|
candle_examples::imagenet::CLASSES[category_idx],
|
||||||
|
100. * pr
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
367
candle-transformers/src/models/beit.rs
Normal file
367
candle-transformers/src/models/beit.rs
Normal file
@ -0,0 +1,367 @@
|
|||||||
|
use candle::{DType, IndexOp, Result, Tensor, D};
|
||||||
|
use candle_nn::{layer_norm, LayerNorm, Linear, Module, VarBuilder};
|
||||||
|
|
||||||
|
const IMG_SIZE: usize = 384;
|
||||||
|
const PATCH_SIZE: usize = 16;
|
||||||
|
const NUM_CLASSES: usize = 1000;
|
||||||
|
const WINDOW_SIZE: usize = IMG_SIZE / PATCH_SIZE; // 384 / 16 = 24
|
||||||
|
const NB_TOKENS: usize = WINDOW_SIZE * WINDOW_SIZE + 1; // 24 * 24 + 1 = 577
|
||||||
|
|
||||||
|
fn linear(vb: VarBuilder, in_dim: usize, out_dim: usize, bias: bool) -> Result<Linear> {
|
||||||
|
if bias {
|
||||||
|
candle_nn::linear(in_dim, out_dim, vb)
|
||||||
|
} else {
|
||||||
|
candle_nn::linear_no_bias(in_dim, out_dim, vb)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct Attention {
|
||||||
|
qkv: Linear,
|
||||||
|
proj: Linear,
|
||||||
|
relative_position_bias_table: Tensor,
|
||||||
|
relative_position_index: Tensor,
|
||||||
|
num_heads: usize,
|
||||||
|
scale: f64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Attention {
|
||||||
|
fn new(
|
||||||
|
vb: VarBuilder,
|
||||||
|
dim: usize,
|
||||||
|
num_heads: usize,
|
||||||
|
qkv_bias: bool,
|
||||||
|
proj_bias: bool,
|
||||||
|
relative_position_index: &Tensor,
|
||||||
|
) -> Result<Self> {
|
||||||
|
let qkv = linear(vb.pp("qkv"), dim, dim * 3, qkv_bias)?;
|
||||||
|
let proj = linear(vb.pp("proj"), dim, dim, proj_bias)?;
|
||||||
|
// num_relative_distance = token-token(47x47) + token-CLS(1) + CLS-token(1) + CLS-CLS(1) = 2212
|
||||||
|
let num_relative_distance = (2 * WINDOW_SIZE - 1) * (2 * WINDOW_SIZE - 1) + 3;
|
||||||
|
let relative_position_bias_table = vb.get(
|
||||||
|
(num_relative_distance, num_heads),
|
||||||
|
"relative_position_bias_table",
|
||||||
|
)?;
|
||||||
|
let relative_position_index = relative_position_index.clone();
|
||||||
|
let scale = 1. / ((dim / num_heads) as f64).sqrt();
|
||||||
|
Ok(Self {
|
||||||
|
qkv,
|
||||||
|
proj,
|
||||||
|
relative_position_bias_table,
|
||||||
|
relative_position_index,
|
||||||
|
num_heads,
|
||||||
|
scale,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Attention {
|
||||||
|
fn _get_rel_pos_bias(&self) -> Result<Tensor> {
|
||||||
|
self.relative_position_bias_table
|
||||||
|
.index_select(
|
||||||
|
&self
|
||||||
|
.relative_position_index
|
||||||
|
.flatten_all()?
|
||||||
|
.to_dtype(DType::U32)?,
|
||||||
|
0,
|
||||||
|
)?
|
||||||
|
.reshape((NB_TOKENS, NB_TOKENS, ()))?
|
||||||
|
.transpose(0, 1)? // 102
|
||||||
|
.transpose(0, 2)? // 201
|
||||||
|
.contiguous()?
|
||||||
|
.unsqueeze(0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Module for Attention {
|
||||||
|
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
|
||||||
|
let (b, n, c) = xs.dims3()?;
|
||||||
|
let qkv = self
|
||||||
|
.qkv
|
||||||
|
.forward(xs)?
|
||||||
|
.reshape((b, n, 3, self.num_heads, c / self.num_heads))?
|
||||||
|
.transpose(1, 2)? // 02134
|
||||||
|
.transpose(0, 1)? // 20134
|
||||||
|
.transpose(2, 3)?; // 20314
|
||||||
|
let q = (qkv.i(0)? * self.scale)?;
|
||||||
|
let k = qkv.i(1)?.contiguous()?;
|
||||||
|
let v = qkv.i(2)?.contiguous()?;
|
||||||
|
let attn = (&q.matmul(&k.t()?)? + self._get_rel_pos_bias())?;
|
||||||
|
let attn = candle_nn::ops::softmax(&attn, D::Minus1)?;
|
||||||
|
let attn = attn.matmul(&v)?.transpose(1, 2)?.reshape((b, n, c))?;
|
||||||
|
self.proj.forward(&attn)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct LayerScale {
|
||||||
|
gamma: Tensor,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LayerScale {
|
||||||
|
fn new(vb: VarBuilder, dim: usize) -> Result<Self> {
|
||||||
|
let gamma = vb.get(dim, "gamma")?;
|
||||||
|
Ok(Self { gamma })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Module for LayerScale {
|
||||||
|
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
|
||||||
|
xs.broadcast_mul(&self.gamma)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct Mlp {
|
||||||
|
fc1: Linear,
|
||||||
|
fc2: Linear,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Mlp {
|
||||||
|
fn new(vb: VarBuilder, in_features: usize, hidden_features: usize, bias: bool) -> Result<Self> {
|
||||||
|
let out_features = in_features;
|
||||||
|
let fc1 = linear(vb.pp("fc1"), in_features, hidden_features, bias)?;
|
||||||
|
let fc2 = linear(vb.pp("fc2"), hidden_features, out_features, bias)?;
|
||||||
|
Ok(Self { fc1, fc2 })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Module for Mlp {
|
||||||
|
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
|
||||||
|
let xs = self.fc1.forward(xs)?.gelu()?;
|
||||||
|
self.fc2.forward(&xs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct Block {
|
||||||
|
norm1: LayerNorm,
|
||||||
|
attn: Attention,
|
||||||
|
ls1: LayerScale,
|
||||||
|
norm2: LayerNorm,
|
||||||
|
mlp: Mlp,
|
||||||
|
ls2: LayerScale,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Block {
|
||||||
|
fn new(
|
||||||
|
vb: VarBuilder,
|
||||||
|
dim: usize,
|
||||||
|
num_heads: usize,
|
||||||
|
relative_position_index: &Tensor,
|
||||||
|
) -> Result<Self> {
|
||||||
|
let norm1 = layer_norm(dim, 1e-6, vb.pp("norm1"))?;
|
||||||
|
let attn = Attention::new(
|
||||||
|
vb.pp("attn"),
|
||||||
|
dim,
|
||||||
|
num_heads,
|
||||||
|
true,
|
||||||
|
true,
|
||||||
|
relative_position_index,
|
||||||
|
)?;
|
||||||
|
let ls1 = LayerScale::new(vb.pp("ls1"), dim)?;
|
||||||
|
let norm2 = layer_norm(dim, 1e-6, vb.pp("norm2"))?;
|
||||||
|
let mlp = Mlp::new(vb.pp("mlp"), dim, dim * 4, true)?;
|
||||||
|
let ls2 = LayerScale::new(vb.pp("ls2"), dim)?;
|
||||||
|
Ok(Self {
|
||||||
|
norm1,
|
||||||
|
attn,
|
||||||
|
ls1,
|
||||||
|
norm2,
|
||||||
|
mlp,
|
||||||
|
ls2,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Module for Block {
|
||||||
|
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
|
||||||
|
let residual = xs;
|
||||||
|
let xs = self
|
||||||
|
.ls1
|
||||||
|
.forward(&self.attn.forward(&self.norm1.forward(xs)?)?)?;
|
||||||
|
let xs = (xs + residual)?;
|
||||||
|
let residual = &xs;
|
||||||
|
let xs = self
|
||||||
|
.ls2
|
||||||
|
.forward(&self.mlp.forward(&self.norm2.forward(&xs)?)?)?;
|
||||||
|
xs + residual
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct PatchEmbed {
|
||||||
|
proj: candle_nn::Conv2d,
|
||||||
|
patch_size: (usize, usize),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PatchEmbed {
|
||||||
|
fn new(vb: VarBuilder, patch_size: usize, in_chans: usize, embed_dim: usize) -> Result<Self> {
|
||||||
|
let config = candle_nn::Conv2dConfig {
|
||||||
|
stride: patch_size,
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let proj = candle_nn::conv2d(in_chans, embed_dim, patch_size, config, vb.pp("proj"))?;
|
||||||
|
Ok(Self {
|
||||||
|
proj,
|
||||||
|
patch_size: (patch_size, patch_size),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Module for PatchEmbed {
|
||||||
|
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
|
||||||
|
let (_b, _c, h, w) = xs.dims4()?;
|
||||||
|
let (patch_h, patch_w) = self.patch_size;
|
||||||
|
if (h % patch_h) != 0 {
|
||||||
|
candle::bail!("image height {h} is not a multiple of patch height {patch_h}")
|
||||||
|
}
|
||||||
|
if (w % patch_w) != 0 {
|
||||||
|
candle::bail!("image width {w} is not a multiple of patch width {patch_w}")
|
||||||
|
}
|
||||||
|
let xs = self.proj.forward(xs)?;
|
||||||
|
let (b, c, h, w) = xs.dims4()?;
|
||||||
|
// flatten embeddings.
|
||||||
|
xs.reshape((b, c, h * w))?.transpose(1, 2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct BeitVisionTransformer {
|
||||||
|
patch_embed: PatchEmbed,
|
||||||
|
cls_token: Tensor,
|
||||||
|
blocks: Vec<Block>,
|
||||||
|
norm: LayerNorm,
|
||||||
|
head: Linear,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BeitVisionTransformer {
|
||||||
|
pub fn new(vb: VarBuilder, depth: usize, embed_dim: usize, num_heads: usize) -> Result<Self> {
|
||||||
|
let patch_embed = PatchEmbed::new(vb.pp("patch_embed"), PATCH_SIZE, 3, embed_dim)?;
|
||||||
|
let cls_token = vb.get((1, 1, embed_dim), "cls_token")?;
|
||||||
|
let head = linear(vb.pp("head"), embed_dim, NUM_CLASSES, true)?;
|
||||||
|
let relative_position_index = vb.get((NB_TOKENS, NB_TOKENS), "relative_position_index")?;
|
||||||
|
let norm = layer_norm(embed_dim, 1e-6, vb.pp("norm"))?;
|
||||||
|
let vb_b = vb.pp("blocks");
|
||||||
|
let blocks = (0..depth)
|
||||||
|
.map(|i| {
|
||||||
|
Block::new(
|
||||||
|
vb_b.pp(&i.to_string()),
|
||||||
|
embed_dim,
|
||||||
|
num_heads,
|
||||||
|
&relative_position_index,
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.collect::<Result<Vec<_>>>()?;
|
||||||
|
Ok(Self {
|
||||||
|
patch_embed,
|
||||||
|
cls_token,
|
||||||
|
blocks,
|
||||||
|
norm,
|
||||||
|
head,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn prepare_tokens_with_mask(&self, xs: &Tensor) -> Result<Tensor> {
|
||||||
|
let xs = self.patch_embed.forward(xs)?;
|
||||||
|
Tensor::cat(&[&self.cls_token, &xs], 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_intermediate_layers_not_chunked(
|
||||||
|
&self,
|
||||||
|
xs: &Tensor,
|
||||||
|
blocks_to_take: &[usize],
|
||||||
|
) -> Result<Vec<Tensor>> {
|
||||||
|
let mut xs = self.prepare_tokens_with_mask(xs)?;
|
||||||
|
let mut output = Vec::new();
|
||||||
|
for (i, blk) in self.blocks.iter().enumerate() {
|
||||||
|
xs = blk.forward(&xs)?;
|
||||||
|
if blocks_to_take.contains(&i) {
|
||||||
|
output.push(xs.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if output.len() != blocks_to_take.len() {
|
||||||
|
candle::bail!(
|
||||||
|
"only {} / {} blocks found",
|
||||||
|
output.len(),
|
||||||
|
blocks_to_take.len()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Ok(output)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_intermediate_layers(
|
||||||
|
&self,
|
||||||
|
xs: &Tensor,
|
||||||
|
blocks_to_take: &[usize],
|
||||||
|
reshape: bool,
|
||||||
|
return_class_token: bool,
|
||||||
|
norm: bool,
|
||||||
|
) -> Result<Tensor> {
|
||||||
|
let outputs = self.get_intermediate_layers_not_chunked(xs, blocks_to_take)?;
|
||||||
|
let outputs = if norm {
|
||||||
|
outputs
|
||||||
|
.iter()
|
||||||
|
.map(|out| self.norm.forward(out))
|
||||||
|
.collect::<Result<Vec<_>>>()?
|
||||||
|
} else {
|
||||||
|
outputs
|
||||||
|
};
|
||||||
|
let class_tokens = outputs
|
||||||
|
.iter()
|
||||||
|
.map(|out| out.i((.., 0)))
|
||||||
|
.collect::<Result<Vec<_>>>()?;
|
||||||
|
let outputs = outputs
|
||||||
|
.iter()
|
||||||
|
.map(|out| out.i((.., 1..)))
|
||||||
|
.collect::<Result<Vec<_>>>()?;
|
||||||
|
|
||||||
|
let outputs = if reshape {
|
||||||
|
let (b, _c, w, h) = xs.dims4()?;
|
||||||
|
let patch_size = self.patch_embed.patch_size.0;
|
||||||
|
let num_channels = outputs[0].elem_count() / (b * (w / patch_size) * (h / patch_size));
|
||||||
|
outputs
|
||||||
|
.iter()
|
||||||
|
.map(|out| {
|
||||||
|
out.reshape((b, w / patch_size, h / patch_size, num_channels))?
|
||||||
|
.transpose(2, 3)?
|
||||||
|
.transpose(1, 2)
|
||||||
|
})
|
||||||
|
.collect::<Result<Vec<_>>>()?
|
||||||
|
} else {
|
||||||
|
outputs
|
||||||
|
};
|
||||||
|
|
||||||
|
let outputs = if return_class_token {
|
||||||
|
outputs
|
||||||
|
.iter()
|
||||||
|
.zip(class_tokens.iter())
|
||||||
|
.map(|(out, class_token)| Tensor::cat(&[out, class_token], D::Minus1))
|
||||||
|
.collect::<Result<Vec<_>>>()?
|
||||||
|
} else {
|
||||||
|
outputs
|
||||||
|
};
|
||||||
|
|
||||||
|
Tensor::stack(&outputs[..], 0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Module for BeitVisionTransformer {
|
||||||
|
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
|
||||||
|
let mut xs = self.prepare_tokens_with_mask(xs)?;
|
||||||
|
for blk in self.blocks.iter() {
|
||||||
|
xs = blk.forward(&xs)?
|
||||||
|
}
|
||||||
|
let xs_moy_local_tokens = xs.i((.., 1..))?.mean(1)?;
|
||||||
|
let xs_norm = self.norm.forward(&xs_moy_local_tokens)?;
|
||||||
|
self.head.forward(&xs_norm)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn vit_base(vb: VarBuilder) -> Result<BeitVisionTransformer> {
|
||||||
|
BeitVisionTransformer::new(vb, 12, 768, 12)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn vit_large(vb: VarBuilder) -> Result<BeitVisionTransformer> {
|
||||||
|
BeitVisionTransformer::new(vb, 24, 1024, 16)
|
||||||
|
}
|
@ -1,3 +1,4 @@
|
|||||||
|
pub mod beit;
|
||||||
pub mod bert;
|
pub mod bert;
|
||||||
pub mod bigcode;
|
pub mod bigcode;
|
||||||
pub mod blip;
|
pub mod blip;
|
||||||
|
Reference in New Issue
Block a user