mirror of
https://github.com/huggingface/candle.git
synced 2025-06-16 10:38:54 +00:00
Fix the padding used in stable diffusion. (#362)
This commit is contained in:
@ -31,7 +31,7 @@ pub struct Config {
|
||||
intermediate_size: usize,
|
||||
pub max_position_embeddings: usize,
|
||||
// The character to use for padding, use EOS when not set.
|
||||
pad_with: Option<String>,
|
||||
pub pad_with: Option<String>,
|
||||
num_hidden_layers: usize,
|
||||
num_attention_heads: usize,
|
||||
#[allow(dead_code)]
|
||||
@ -257,7 +257,7 @@ impl ClipEncoder {
|
||||
fn forward(&self, xs: &Tensor, causal_attention_mask: &Tensor) -> Result<Tensor> {
|
||||
let mut xs = xs.clone();
|
||||
for layer in self.layers.iter() {
|
||||
xs = layer.forward(&xs, causal_attention_mask)?
|
||||
xs = layer.forward(&xs, causal_attention_mask)?;
|
||||
}
|
||||
Ok(xs)
|
||||
}
|
||||
|
@ -181,8 +181,8 @@ fn run(args: Args) -> Result<()> {
|
||||
let device = candle_examples::device(cpu)?;
|
||||
|
||||
let tokenizer = Tokenizer::from_file(tokenizer).map_err(E::msg)?;
|
||||
let pad_id = match tokenizer.get_padding() {
|
||||
Some(padding) => padding.pad_id,
|
||||
let pad_id = match &sd_config.clip.pad_with {
|
||||
Some(padding) => *tokenizer.get_vocab(true).get(padding.as_str()).unwrap(),
|
||||
None => *tokenizer.get_vocab(true).get("<|endoftext|>").unwrap(),
|
||||
};
|
||||
println!("Running with prompt \"{prompt}\".");
|
||||
@ -212,7 +212,6 @@ fn run(args: Args) -> Result<()> {
|
||||
let uncond_embeddings = text_model.forward(&uncond_tokens)?;
|
||||
let text_embeddings = Tensor::cat(&[uncond_embeddings, text_embeddings], 0)?;
|
||||
|
||||
println!("text-embeddings: {text_embeddings:?}");
|
||||
println!("Building the autoencoder.");
|
||||
let vae = sd_config.build_vae(&vae_weights, &device)?;
|
||||
println!("Building the unet.");
|
||||
|
Reference in New Issue
Block a user