mirror of
https://github.com/huggingface/candle.git
synced 2025-06-21 12:20:46 +00:00
Reduce the number of threads.
This commit is contained in:
@ -1050,11 +1050,11 @@ impl<'a> Map2 for ConvTranspose2D<'a> {
|
||||
k_l: &Layout,
|
||||
dev: &CudaDevice,
|
||||
) -> Result<CudaSlice<T>> {
|
||||
// Kernel shape: (c_in_k, c_out, h_k, w_k)
|
||||
// Kernel shape: (c_in_k, c_out / groups, h_k, w_k)
|
||||
// Input shape: (b_size, c_in, h_in, w_in)
|
||||
let p = &self.0;
|
||||
let (out_w, out_h) = (p.out_w(), p.out_h());
|
||||
let dst_el = p.c_out * out_w * out_h * p.b_size;
|
||||
let dst_el = p.c_out_per_group * p.groups * out_w * out_h * p.b_size;
|
||||
let inp = &inp.slice(inp_l.start_offset()..);
|
||||
let k = &k.slice(k_l.start_offset()..);
|
||||
let shape = inp_l.shape();
|
||||
@ -1063,7 +1063,13 @@ impl<'a> Map2 for ConvTranspose2D<'a> {
|
||||
|
||||
// SAFETY: Set later by running the kernel.
|
||||
let out = unsafe { dev.alloc::<T>(dst_el) }.w()?;
|
||||
let cfg = LaunchConfig::for_num_elems(dst_el as u32);
|
||||
const NUM_THREADS: u32 = 512;
|
||||
let num_blocks = (dst_el as u32 + NUM_THREADS - 1) / NUM_THREADS;
|
||||
let mut cfg = LaunchConfig {
|
||||
grid_dim: (num_blocks, 1, 1),
|
||||
block_dim: (NUM_THREADS, 1, 1),
|
||||
shared_mem_bytes: 0,
|
||||
};
|
||||
let func = dev.get_or_load_func(&kernel_name::<T>("conv_transpose2d"), kernels::CONV)?;
|
||||
let ds = if dims.len() == 4 {
|
||||
[dims, inp_l.stride(), k_l.dims(), k_l.stride()].concat()
|
||||
@ -1072,13 +1078,13 @@ impl<'a> Map2 for ConvTranspose2D<'a> {
|
||||
};
|
||||
let ds = dev.htod_copy(ds).w()?;
|
||||
let params = (
|
||||
el,
|
||||
out_w,
|
||||
out_h,
|
||||
p.stride,
|
||||
p.padding,
|
||||
p.output_padding,
|
||||
p.dilation,
|
||||
p.groups,
|
||||
&ds,
|
||||
inp,
|
||||
k,
|
||||
|
Reference in New Issue
Block a user