mirror of
https://github.com/huggingface/candle.git
synced 2025-06-16 02:38:10 +00:00
Add the gather op. (#219)
* Start adding gather. * Gather cpu implementation + use in simple training. * Add scatter_add for the gradient of gather. * Simple cpu implementation of scatter_add. * Use gather in the simple-training backprop.
This commit is contained in:
@ -17,10 +17,11 @@ fn log_softmax<D: candle::shape::Dim>(xs: &Tensor, d: D) -> candle::Result<Tenso
|
||||
Ok(log_sm)
|
||||
}
|
||||
|
||||
// TODO: Once the index_select backprop is efficient enough, switch to using this.
|
||||
fn _nll_loss(inp: &Tensor, target: &Tensor) -> candle::Result<Tensor> {
|
||||
let b_sz = target.shape().r1()?;
|
||||
inp.index_select(target, 0)?.sum_all()? / b_sz as f64
|
||||
fn nll_loss(inp: &Tensor, target: &Tensor) -> candle::Result<Tensor> {
|
||||
let b_sz = target.dim(0)?;
|
||||
inp.gather(target, 1)?
|
||||
.sum_all()?
|
||||
.affine(-1f64 / b_sz as f64, 0.)
|
||||
}
|
||||
|
||||
pub fn main() -> Result<()> {
|
||||
@ -32,12 +33,7 @@ pub fn main() -> Result<()> {
|
||||
println!("test-labels: {:?}", m.test_labels.shape());
|
||||
let train_labels = m.train_labels;
|
||||
let train_images = m.train_images;
|
||||
let train_labels = train_labels.to_vec1::<u8>()?;
|
||||
let train_label_mask = train_labels
|
||||
.iter()
|
||||
.flat_map(|l| (0..LABELS).map(|i| f32::from(i == *l as usize)))
|
||||
.collect::<Vec<_>>();
|
||||
let train_label_mask = Tensor::from_vec(train_label_mask, (train_labels.len(), LABELS), &dev)?;
|
||||
let train_labels = train_labels.to_dtype(DType::U32)?.unsqueeze(1)?;
|
||||
let ws = Var::zeros((IMAGE_DIM, LABELS), DType::F32, &dev)?;
|
||||
let bs = Var::zeros(LABELS, DType::F32, &dev)?;
|
||||
let sgd = candle_nn::SGD::new(&[&ws, &bs], 1.0);
|
||||
@ -46,9 +42,7 @@ pub fn main() -> Result<()> {
|
||||
for epoch in 1..200 {
|
||||
let logits = train_images.matmul(&ws)?.broadcast_add(&bs)?;
|
||||
let log_sm = log_softmax(&logits, D::Minus1)?;
|
||||
let loss = (&log_sm * &train_label_mask)?
|
||||
.sum_all()?
|
||||
.affine(-1f64 / train_images.dim(0)? as f64, 0f64)?;
|
||||
let loss = nll_loss(&log_sm, &train_labels)?;
|
||||
sgd.backward_step(&loss)?;
|
||||
|
||||
let test_logits = test_images.matmul(&ws)?.broadcast_add(&bs)?;
|
||||
@ -63,7 +57,7 @@ pub fn main() -> Result<()> {
|
||||
"{epoch:4} train loss: {:8.5} test acc: {:5.2}%",
|
||||
loss.to_scalar::<f32>()?,
|
||||
100. * test_accuracy
|
||||
)
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
Reference in New Issue
Block a user