diff --git a/libavutil/riscv/float_dsp_rvv.S b/libavutil/riscv/float_dsp_rvv.S index a2f9488249..e6ec182a7a 100644 --- a/libavutil/riscv/float_dsp_rvv.S +++ b/libavutil/riscv/float_dsp_rvv.S @@ -75,18 +75,19 @@ endfunc func ff_vector_fmul_window_rvv, zve32f // a0: dst, a1: src0, a2: src1, a3: window, a4: length - vsetvli t0, zero, e16, m2, ta, ma + // e16/m2 and e32/m4 are possible but slower due to gather. + vsetvli t0, zero, e16, m1, ta, ma sh2add a2, a4, a2 vid.v v0 sh3add t3, a4, a3 vadd.vi v0, v0, 1 sh3add t0, a4, a0 1: - vsetvli t2, a4, e16, m2, ta, ma + vsetvli t2, a4, e16, m1, ta, ma slli t4, t2, 2 vrsub.vx v2, v0, t2 sub t3, t3, t4 - vsetvli zero, zero, e32, m4, ta, ma + vsetvli zero, zero, e32, m2, ta, ma sub a2, a2, t4 vle32.v v8, (t3) sub t0, t0, t4 @@ -133,16 +134,17 @@ endfunc // TODO factor vrsub, separate last iteration? // (a0) = (a1) * reverse(a2) [0..a3-1] func ff_vector_fmul_reverse_rvv, zve32f - vsetvli t0, zero, e16, m4, ta, ma + // e16/m4 and e32/m8 are possible but slower due to gather. + vsetvli t0, zero, e16, m1, ta, ma sh2add a2, a3, a2 vid.v v0 vadd.vi v0, v0, 1 1: - vsetvli t0, a3, e16, m4, ta, ma + vsetvli t0, a3, e16, m1, ta, ma slli t1, t0, 2 vrsub.vx v4, v0, t0 // v4[i] = [VL-1, VL-2... 1, 0] sub a2, a2, t1 - vsetvli zero, zero, e32, m8, ta, ma + vsetvli zero, zero, e32, m2, ta, ma vle32.v v8, (a2) sub a3, a3, t0 vle32.v v16, (a1)