diff --git a/candle-transformers/src/models/quantized_stable_lm.rs b/candle-transformers/src/models/quantized_stable_lm.rs index c79877b6..7d4385a7 100644 --- a/candle-transformers/src/models/quantized_stable_lm.rs +++ b/candle-transformers/src/models/quantized_stable_lm.rs @@ -186,7 +186,11 @@ impl DecoderLayer { fn new(rotary_emb: Arc, cfg: &Config, vb: VarBuilder) -> Result { let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; - let input_layernorm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("input_layernorm"))?; + let input_layernorm = layer_norm( + cfg.hidden_size, + cfg.layer_norm_eps, + vb.pp("input_layernorm"), + )?; let post_attention_layernorm = layer_norm( cfg.hidden_size, cfg.layer_norm_eps, diff --git a/candle-transformers/src/models/stable_lm.rs b/candle-transformers/src/models/stable_lm.rs index f46d3a2c..a1d58936 100644 --- a/candle-transformers/src/models/stable_lm.rs +++ b/candle-transformers/src/models/stable_lm.rs @@ -316,8 +316,11 @@ impl DecoderLayer { fn new(rotary_emb: Arc, cfg: &Config, vb: VarBuilder) -> Result { let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; - let input_layernorm = - candle_nn::layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("input_layernorm"))?; + let input_layernorm = candle_nn::layer_norm( + cfg.hidden_size, + cfg.layer_norm_eps, + vb.pp("input_layernorm"), + )?; let post_attention_layernorm = candle_nn::layer_norm( cfg.hidden_size, cfg.layer_norm_eps,