Skeleton implementation for softmax.

This commit is contained in:
laurent
2023-06-23 22:00:13 +01:00
parent 5d44e76e3f
commit 8443963d4f
5 changed files with 32 additions and 0 deletions

View File

@ -147,6 +147,10 @@ impl CpuStorage {
}
}
pub(crate) fn divide_by_sum_over_dim(&mut self, _shape: &Shape, _dim: usize) {
todo!()
}
pub(crate) fn affine_impl(
&self,
shape: &Shape,

View File

@ -291,6 +291,10 @@ impl CudaStorage {
Ok(Self { slice, device })
}
pub(crate) fn divide_by_sum_over_dim(&mut self, _: &Shape, _: usize) {
todo!()
}
pub(crate) fn unary_impl<U: crate::op::UnaryOp>(
&self,
shape: &Shape,

View File

@ -62,6 +62,8 @@ impl CudaStorage {
Err(Error::NotCompiledWithCudaSupport)
}
pub(crate) fn divide_by_sum_over_dim(&mut self, _: &Shape, _: usize) {}
pub(crate) fn to_dtype(&self, _: &Shape, _: &[usize], _: DType) -> Result<Self> {
Err(Error::NotCompiledWithCudaSupport)
}

View File

@ -72,6 +72,13 @@ impl Storage {
}
}
pub(crate) fn divide_by_sum_over_dim(&mut self, shape: &Shape, dim: usize) {
match self {
Storage::Cpu(storage) => storage.divide_by_sum_over_dim(shape, dim),
Self::Cuda(storage) => storage.divide_by_sum_over_dim(shape, dim),
}
}
pub(crate) fn to_dtype(&self, shape: &Shape, stride: &[usize], dtype: DType) -> Result<Self> {
match self {
Storage::Cpu(storage) => {

View File

@ -295,6 +295,21 @@ impl Tensor {
Ok(from_storage(storage, shape.clone(), op, false))
}
pub fn softmax(&self, dim: usize) -> Result<Self> {
let shape = self.shape();
let mut storage = self
.storage
.unary_impl::<crate::op::Exp>(shape, self.stride())?;
// The resulting storage is contiguous.
storage.divide_by_sum_over_dim(shape, dim);
let op = if self.track_op() {
Some(Op::Softmax(self.clone(), dim))
} else {
None
};
Ok(from_storage(storage, shape.clone(), op, false))
}
pub fn matmul(&self, rhs: &Self) -> Result<Self> {
let a_dims = self.shape().dims();
let b_dims = rhs.shape().dims();