Add the prelu layer. (#1402)

This commit is contained in:
Laurent Mazare
2023-12-03 17:06:09 +01:00
committed by GitHub
parent 8418154ee0
commit b5c283e86f
3 changed files with 51 additions and 4 deletions

View File

@ -1,4 +1,4 @@
use candle::Tensor;
use candle::{Result, Tensor};
use serde::Deserialize;
#[derive(Debug, Clone, Copy, PartialEq, Deserialize, Default)]
@ -21,7 +21,7 @@ pub enum Activation {
}
impl super::Module for Activation {
fn forward(&self, xs: &Tensor) -> candle::Result<Tensor> {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
match self {
Self::Gelu => xs.gelu_erf(),
// https://github.com/huggingface/transformers/blob/12f043eaeaabfef6f6efea411d98e6f6d3c094b7/src/transformers/activations.py#L49-L78
@ -40,3 +40,49 @@ impl super::Module for Activation {
}
}
}
#[derive(Clone, Debug)]
pub struct PReLU {
weight: Tensor,
is_scalar: bool,
}
impl PReLU {
pub fn new(weight: Tensor, is_scalar: bool) -> Self {
Self { weight, is_scalar }
}
pub fn weight(&self) -> &Tensor {
&self.weight
}
pub fn is_scalar(&self) -> bool {
self.is_scalar
}
}
impl candle::Module for PReLU {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let weight = if self.is_scalar {
self.weight.reshape(())?
} else {
self.weight.clone()
};
let zeros = xs.zeros_like()?;
xs.maximum(&zeros)? + xs.minimum(&zeros)?.broadcast_mul(&weight)?
}
}
/// Create or initialize a new PReLU layer.
///
/// This uses some default name for weights, namely `"weight"`.
/// # Arguments
///
/// * `num_parameters` - The number of parameters. Use `None` to have as single trainable value
/// and `Some` for a 1D vector with the appropriate number of features.
pub fn prelu(num_parameters: Option<usize>, vs: crate::VarBuilder) -> Result<PReLU> {
let init_ws = crate::init::Init::Const(0.25);
// When using a scalar weight, the PyTorch encoding is to use a 1d vector of length 1.
let ws = vs.get_with_hints((num_parameters.unwrap_or(1),), "weight", init_ws)?;
Ok(PReLU::new(ws, num_parameters.is_none()))
}

View File

@ -15,7 +15,7 @@ pub mod sequential;
pub mod var_builder;
pub mod var_map;
pub use activation::Activation;
pub use activation::{prelu, Activation, PReLU};
pub use batch_norm::{batch_norm, BatchNorm, BatchNormConfig};
pub use conv::{
conv1d, conv2d, conv2d_no_bias, conv_transpose2d, conv_transpose2d_no_bias, Conv1d,

View File

@ -56,7 +56,7 @@ impl super::Module for Linear {
/// Create or initialize a new linear layer.
///
/// This uses some default names for weight and biases, namely `"weight"` and `"bias"`.
/// This uses some default names for weights and biases, namely `"weight"` and `"bias"`.
pub fn linear(in_dim: usize, out_dim: usize, vs: crate::VarBuilder) -> Result<Linear> {
let init_ws = crate::init::DEFAULT_KAIMING_NORMAL;
let ws = vs.get_with_hints((out_dim, in_dim), "weight", init_ws)?;
@ -69,6 +69,7 @@ pub fn linear(in_dim: usize, out_dim: usize, vs: crate::VarBuilder) -> Result<Li
Ok(Linear::new(ws, Some(bs)))
}
/// Create or initialize a new linear layer without biases.
pub fn linear_no_bias(in_dim: usize, out_dim: usize, vs: crate::VarBuilder) -> Result<Linear> {
let init_ws = crate::init::DEFAULT_KAIMING_NORMAL;
let ws = vs.get_with_hints((out_dim, in_dim), "weight", init_ws)?;