MPT alibi fixes. (#1120)

* MPT alibi fixes.

* Some more fixes.

* Finally get the model to return some sensible outputs.

* Add a readme.
This commit is contained in:
Laurent Mazare
2023-10-18 10:58:05 +01:00
committed by GitHub
parent 662c186fd5
commit 767a6578f1
3 changed files with 64 additions and 13 deletions

View File

@ -103,23 +103,25 @@ impl GroupedQueryAttention {
(k, v)
}
};
let key = repeat_kv(key, self.n_heads / self.kv_n_heads)?;
let value = repeat_kv(value, self.n_heads / self.kv_n_heads)?;
self.kv_cache = Some((key.clone(), value.clone()));
let query = query.contiguous()?;
let key = repeat_kv(key, self.n_heads / self.kv_n_heads)?.contiguous()?;
let value = repeat_kv(value, self.n_heads / self.kv_n_heads)?.contiguous()?;
let attn_weights = (query.matmul(&key)? * self.softmax_scale)?;
let attn_bias = {
let s_q = query.dim(D::Minus2)?;
let s_k = key.dim(D::Minus1)?;
let (_, _, a_q, a_k) = self.attn_bias.dims4()?;
self.attn_bias
.narrow(2, a_q - s_q, s_q)?
.narrow(3, a_k - s_k, s_k)?
let start_q = a_q.saturating_sub(s_q);
let start_k = a_k.saturating_sub(s_k);
self.attn_bias.i((.., .., start_q.., start_k..))?
};
let attn_weights = (attn_weights + attn_bias)?;
let attn_weights = attn_weights.broadcast_add(&attn_bias)?;
let attn_weights = match mask {
None => attn_weights,
Some(mask) => masked_fill(
&attn_weights,
&mask.broadcast_left(b_size * self.n_heads)?,
&mask.broadcast_as(attn_weights.shape())?,
f32::NEG_INFINITY,
)?,
};
@ -128,7 +130,8 @@ impl GroupedQueryAttention {
.matmul(&value)?
.transpose(1, 2)?
.flatten_from(D::Minus2)?;
attn_output.apply(&self.out_proj)
let out = attn_output.apply(&self.out_proj)?;
Ok(out)
}
}
@ -199,7 +202,7 @@ impl MPTBlock {
let xs = self.attn.forward(&xs, mask)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.norm2)?.apply(&self.ffn);
let xs = xs.apply(&self.norm2)?.apply(&self.ffn)?;
xs + residual
}
}
@ -275,12 +278,15 @@ impl Model {
Some(get_mask(seq_len, xs.device())?)
};
for block in self.blocks.iter_mut() {
xs = block.forward(&xs, mask.as_ref())?
xs = block.forward(&xs, mask.as_ref())?;
}
xs.narrow(1, seq_len - 1, 1)?
let xs = xs.apply(&self.norm_f)?;
let logits = xs
.narrow(1, seq_len - 1, 1)?
.squeeze(1)?
.matmul(&self.wte.embeddings().t()?)?
.squeeze(1)
.squeeze(1)?;
Ok(logits)
}
}