Fix for clippy 1.86. (#2864)

* Fix for clippy 1.86.

* More clippy fixes.

* More fixes.
This commit is contained in:
Laurent Mazare
2025-04-03 19:38:27 +02:00
committed by GitHub
parent 648596c073
commit 9d31361c4f
9 changed files with 16 additions and 16 deletions

View File

@ -816,7 +816,7 @@ impl PthTensors {
/// # Arguments /// # Arguments
/// * `path` - Path to the pth file. /// * `path` - Path to the pth file.
/// * `key` - Optional key to retrieve `state_dict` from the pth file. Sometimes the pth file /// * `key` - Optional key to retrieve `state_dict` from the pth file. Sometimes the pth file
/// contains multiple objects and the state_dict is the one we are interested in. /// contains multiple objects and the state_dict is the one we are interested in.
pub fn read_all_with_key<P: AsRef<std::path::Path>>( pub fn read_all_with_key<P: AsRef<std::path::Path>>(
path: P, path: P,
key: Option<&str>, key: Option<&str>,

View File

@ -21,7 +21,7 @@ impl Config {
} }
fn dt_rank(&self) -> usize { fn dt_rank(&self) -> usize {
(self.d_model + 15) / 16 self.d_model.div_ceil(16)
} }
fn d_conv(&self) -> usize { fn d_conv(&self) -> usize {

View File

@ -7,7 +7,7 @@ use candle::{Result, Tensor};
/// Arguments /// Arguments
/// ///
/// * [inp]: The input tensor of dimensions `N, C` where `N` is the batch size and `C` the number /// * [inp]: The input tensor of dimensions `N, C` where `N` is the batch size and `C` the number
/// of categories. This is expected to contain log probabilities. /// of categories. This is expected to contain log probabilities.
/// * [target]: The ground truth labels as a tensor of u32 of dimension `N`. /// * [target]: The ground truth labels as a tensor of u32 of dimension `N`.
/// ///
/// The resulting tensor is a scalar containing the average value over the batch. /// The resulting tensor is a scalar containing the average value over the batch.
@ -34,7 +34,7 @@ pub fn nll(inp: &Tensor, target: &Tensor) -> Result<Tensor> {
/// Arguments /// Arguments
/// ///
/// * [inp]: The input tensor of dimensions `N, C` where `N` is the batch size and `C` the number /// * [inp]: The input tensor of dimensions `N, C` where `N` is the batch size and `C` the number
/// of categories. This is expected to raw logits. /// of categories. This is expected to raw logits.
/// * [target]: The ground truth labels as a tensor of u32 of dimension `N`. /// * [target]: The ground truth labels as a tensor of u32 of dimension `N`.
/// ///
/// The resulting tensor is a scalar containing the average value over the batch. /// The resulting tensor is a scalar containing the average value over the batch.
@ -56,9 +56,9 @@ pub fn mse(inp: &Tensor, target: &Tensor) -> Result<Tensor> {
/// Arguments /// Arguments
/// ///
/// * [inp]: The input tensor of dimensions `N, C` where `N` is the batch size and `C` the number /// * [inp]: The input tensor of dimensions `N, C` where `N` is the batch size and `C` the number
/// of categories. This is expected to raw logits. /// of categories. This is expected to raw logits.
/// * [target]: The ground truth labels as a tensor of u32 of dimension `N, C` where `N` is the batch size and `C` the number /// * [target]: The ground truth labels as a tensor of u32 of dimension `N, C` where `N` is the batch size and `C` the number
/// of categories. /// of categories.
/// ///
/// The resulting tensor is a scalar containing the average value over the batch. /// The resulting tensor is a scalar containing the average value over the batch.
pub fn binary_cross_entropy_with_logit(inp: &Tensor, target: &Tensor) -> Result<Tensor> { pub fn binary_cross_entropy_with_logit(inp: &Tensor, target: &Tensor) -> Result<Tensor> {

View File

@ -104,7 +104,7 @@ impl EncoderBlock {
let snake1 = Snake1d::new(dim / 2, vb.pp(3))?; let snake1 = Snake1d::new(dim / 2, vb.pp(3))?;
let cfg1 = Conv1dConfig { let cfg1 = Conv1dConfig {
stride, stride,
padding: (stride + 1) / 2, padding: stride.div_ceil(2),
..Default::default() ..Default::default()
}; };
let conv1 = encodec::conv1d_weight_norm(dim / 2, dim, 2 * stride, cfg1, vb.pp(4))?; let conv1 = encodec::conv1d_weight_norm(dim / 2, dim, 2 * stride, cfg1, vb.pp(4))?;
@ -196,7 +196,7 @@ impl DecoderBlock {
let snake1 = Snake1d::new(in_dim, vb.pp(0))?; let snake1 = Snake1d::new(in_dim, vb.pp(0))?;
let cfg = ConvTranspose1dConfig { let cfg = ConvTranspose1dConfig {
stride, stride,
padding: (stride + 1) / 2, padding: stride.div_ceil(2),
..Default::default() ..Default::default()
}; };
let conv_tr1 = encodec::conv_transpose1d_weight_norm( let conv_tr1 = encodec::conv_transpose1d_weight_norm(

View File

@ -6,8 +6,8 @@ pub fn get_noise(
width: usize, width: usize,
device: &Device, device: &Device,
) -> Result<Tensor> { ) -> Result<Tensor> {
let height = (height + 15) / 16 * 2; let height = height.div_ceil(16) * 2;
let width = (width + 15) / 16 * 2; let width = width.div_ceil(16) * 2;
Tensor::randn(0f32, 1., (num_samples, 16, height, width), device) Tensor::randn(0f32, 1., (num_samples, 16, height, width), device)
} }
@ -84,8 +84,8 @@ pub fn get_schedule(num_steps: usize, shift: Option<(usize, f64, f64)>) -> Vec<f
pub fn unpack(xs: &Tensor, height: usize, width: usize) -> Result<Tensor> { pub fn unpack(xs: &Tensor, height: usize, width: usize) -> Result<Tensor> {
let (b, _h_w, c_ph_pw) = xs.dims3()?; let (b, _h_w, c_ph_pw) = xs.dims3()?;
let height = (height + 15) / 16; let height = height.div_ceil(16);
let width = (width + 15) / 16; let width = width.div_ceil(16);
xs.reshape((b, height, width, c_ph_pw / 4, 2, 2))? // (b, h, w, c, ph, pw) xs.reshape((b, height, width, c_ph_pw / 4, 2, 2))? // (b, h, w, c, ph, pw)
.permute((0, 3, 1, 4, 2, 5))? // (b, c, h, ph, w, pw) .permute((0, 3, 1, 4, 2, 5))? // (b, c, h, ph, w, pw)
.reshape((b, c_ph_pw / 4, height * 2, width * 2)) .reshape((b, c_ph_pw / 4, height * 2, width * 2))

View File

@ -27,7 +27,7 @@ impl Config {
} }
fn dt_rank(&self) -> usize { fn dt_rank(&self) -> usize {
(self.d_model + 15) / 16 self.d_model.div_ceil(16)
} }
fn d_inner(&self) -> usize { fn d_inner(&self) -> usize {

View File

@ -716,7 +716,7 @@ pub mod transformer {
None => { None => {
let hidden_dim = self.dim * 4; let hidden_dim = self.dim * 4;
let n_hidden = ((2 * hidden_dim) as f64 / 3.) as usize; let n_hidden = ((2 * hidden_dim) as f64 / 3.) as usize;
(n_hidden + 255) / 256 * 256 n_hidden.div_ceil(256) * 256
} }
} }
} }

View File

@ -198,7 +198,7 @@ pub fn log_mel_spectrogram_<T: Float>(
let samples = { let samples = {
let mut samples_padded = samples.to_vec(); let mut samples_padded = samples.to_vec();
let to_add = n_len * fft_step - samples.len(); let to_add = n_len * fft_step - samples.len();
samples_padded.extend(std::iter::repeat(zero).take(to_add)); samples_padded.extend(std::iter::repeat_n(zero, to_add));
samples_padded samples_padded
}; };

View File

@ -177,7 +177,7 @@ fn log_mel_spectrogram_<T: Float + std::fmt::Display>(
let samples = { let samples = {
let mut samples_padded = samples.to_vec(); let mut samples_padded = samples.to_vec();
let to_add = n_len * fft_step - samples.len(); let to_add = n_len * fft_step - samples.len();
samples_padded.extend(std::iter::repeat(zero).take(to_add)); samples_padded.extend(std::iter::repeat_n(zero, to_add));
samples_padded samples_padded
}; };