diff --git a/candle-examples/examples/whisper/main.rs b/candle-examples/examples/whisper/main.rs index 1b6f4bfe..71b03e72 100644 --- a/candle-examples/examples/whisper/main.rs +++ b/candle-examples/examples/whisper/main.rs @@ -435,10 +435,10 @@ impl AudioEncoder { }; let conv1 = Conv1D::load(cfg.n_mels, n_state, 3, cfg1, &format!("{p}.conv1"), vb)?; let conv2 = Conv1D::load(n_state, n_state, 3, cfg2, &format!("{p}.conv2"), vb)?; - /* The positional embeddings could be regenerated via the following. */ let positional_embedding = if true { vb.get((n_ctx, n_state), &format!("{p}.positional_embedding"))? } else { + /* The positional embeddings could be regenerated via the following. */ sinusoids(n_ctx, n_state)?.to_device(&vb.device)? }; let blocks = (0..cfg.n_audio_layer) @@ -474,7 +474,6 @@ struct TextDecoder { positional_embedding: Tensor, blocks: Vec, ln: LayerNorm, - mask: Tensor, } impl TextDecoder { @@ -492,13 +491,11 @@ impl TextDecoder { }) .collect::>>()?; let ln = LayerNorm::load(n_state, &format!("{p}.ln"), vb)?; - let mask = Tensor::new(&[0u32], &vb.device)?; // TODO Ok(Self { token_embedding, positional_embedding, blocks, ln, - mask, }) } fn forward(&self, x: &Tensor, xa: &Tensor) -> Result {