mirror of
https://github.com/huggingface/candle.git
synced 2025-06-17 19:18:50 +00:00
Skeleton implementation for softmax.
This commit is contained in:
@ -147,6 +147,10 @@ impl CpuStorage {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn divide_by_sum_over_dim(&mut self, _shape: &Shape, _dim: usize) {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) fn affine_impl(
|
pub(crate) fn affine_impl(
|
||||||
&self,
|
&self,
|
||||||
shape: &Shape,
|
shape: &Shape,
|
||||||
|
@ -291,6 +291,10 @@ impl CudaStorage {
|
|||||||
Ok(Self { slice, device })
|
Ok(Self { slice, device })
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn divide_by_sum_over_dim(&mut self, _: &Shape, _: usize) {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) fn unary_impl<U: crate::op::UnaryOp>(
|
pub(crate) fn unary_impl<U: crate::op::UnaryOp>(
|
||||||
&self,
|
&self,
|
||||||
shape: &Shape,
|
shape: &Shape,
|
||||||
|
@ -62,6 +62,8 @@ impl CudaStorage {
|
|||||||
Err(Error::NotCompiledWithCudaSupport)
|
Err(Error::NotCompiledWithCudaSupport)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn divide_by_sum_over_dim(&mut self, _: &Shape, _: usize) {}
|
||||||
|
|
||||||
pub(crate) fn to_dtype(&self, _: &Shape, _: &[usize], _: DType) -> Result<Self> {
|
pub(crate) fn to_dtype(&self, _: &Shape, _: &[usize], _: DType) -> Result<Self> {
|
||||||
Err(Error::NotCompiledWithCudaSupport)
|
Err(Error::NotCompiledWithCudaSupport)
|
||||||
}
|
}
|
||||||
|
@ -72,6 +72,13 @@ impl Storage {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn divide_by_sum_over_dim(&mut self, shape: &Shape, dim: usize) {
|
||||||
|
match self {
|
||||||
|
Storage::Cpu(storage) => storage.divide_by_sum_over_dim(shape, dim),
|
||||||
|
Self::Cuda(storage) => storage.divide_by_sum_over_dim(shape, dim),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) fn to_dtype(&self, shape: &Shape, stride: &[usize], dtype: DType) -> Result<Self> {
|
pub(crate) fn to_dtype(&self, shape: &Shape, stride: &[usize], dtype: DType) -> Result<Self> {
|
||||||
match self {
|
match self {
|
||||||
Storage::Cpu(storage) => {
|
Storage::Cpu(storage) => {
|
||||||
|
@ -295,6 +295,21 @@ impl Tensor {
|
|||||||
Ok(from_storage(storage, shape.clone(), op, false))
|
Ok(from_storage(storage, shape.clone(), op, false))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn softmax(&self, dim: usize) -> Result<Self> {
|
||||||
|
let shape = self.shape();
|
||||||
|
let mut storage = self
|
||||||
|
.storage
|
||||||
|
.unary_impl::<crate::op::Exp>(shape, self.stride())?;
|
||||||
|
// The resulting storage is contiguous.
|
||||||
|
storage.divide_by_sum_over_dim(shape, dim);
|
||||||
|
let op = if self.track_op() {
|
||||||
|
Some(Op::Softmax(self.clone(), dim))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
Ok(from_storage(storage, shape.clone(), op, false))
|
||||||
|
}
|
||||||
|
|
||||||
pub fn matmul(&self, rhs: &Self) -> Result<Self> {
|
pub fn matmul(&self, rhs: &Self) -> Result<Self> {
|
||||||
let a_dims = self.shape().dims();
|
let a_dims = self.shape().dims();
|
||||||
let b_dims = rhs.shape().dims();
|
let b_dims = rhs.shape().dims();
|
||||||
|
Reference in New Issue
Block a user