More Model Module Docs (#2623)

* dinov2

* add another example

* ad dinov2reg4

* eva2

* efficientvit

* moondream

* update t5

* update t5

* rwkv

* stable diffusion docs

* add wasm link

* add segment_anything

* adjsut for clippy

* ignore bertdoc

* dinov2 ignore

* update block to be text

* remove the rust blocks for the moment

* bump python to 3.11

* add a setup-python step

* add py311 to test as well
This commit is contained in:
zachcp
2024-11-17 14:27:24 -05:00
committed by GitHub
parent a3f200e369
commit 12d7e7b145
12 changed files with 291 additions and 72 deletions

View File

@ -7,56 +7,6 @@
//! - Upstream [Github repo](https://github.com/google-research/bert).
//! - See bert in [candle-examples](https://github.com/huggingface/candle/tree/main/candle-examples/) for runnable code
//!
//! ```no_run
//! // for sentence embeddings
//! # use candle_core::Tensor;
//! # use candle_nn::{VarBuilder, Module};
//! # fn main() -> candle_core::Result<()> {
//! # let model = todo!();
//! # let prompt = "Here is a test sentence";
//! let embeddings = model.forward(prompt)?;
//! // Returns tensor of shape [1, 7, 384]
//! println!("{embeddings}");
//! # Ok(())
//! # }
//!
//! // Different models can be loaded using the model ID
//! # use candle_core::Tensor;
//! # use candle_nn::{VarBuilder, Module};
//! # fn main() -> candle_core::Result<()> {
//! # let vb = todo!();
//! # let config = todo!();
//! let model = BertModel::load(vb, &config )?;
//! # Ok(())
//! # }
//!
//! // Gelu approximation
//! // You can get a speedup by configuring the model
//! // to use an approximation of the gelu activation:
//! # use candle_core::Tensor;
//! # use candle_nn::{VarBuilder, Module};
//! # fn main() -> candle_core::Result<()> {
//! # let mut config = todo!();
//! config.hidden_act = HiddenAct::GeluApproximate;
//! # Ok(())
//! # }
//!
//! // Similarities
//! // Bert can compute sentence embeddings which can then be used to calculate
//! // semantic similarities between sentences through cosine similarity scoring.
//! // The sentence embeddings are computed using average pooling across all tokens.
//! # use candle_core::Tensor;
//! # use candle_nn::{VarBuilder, Module};
//! # fn main() -> candle_core::Result<()> {
//! # let model = todo!();
//! let sentence1 = "The new movie is awesome";
//! let sentence2 = "The new movie is so great";
//! let emb1 = model.forward(sentence1)?;
//! let emb2 = model.forward(sentence2)?;
//! # Ok(())
//! # }
//! ```
//!
use super::with_tracing::{layer_norm, linear, LayerNorm, Linear};
use candle::{DType, Device, Result, Tensor};
use candle_nn::{embedding, Embedding, Module, VarBuilder};