/external/libvpx/vpx_dsp/loongarch/ |
D | variance_lsx.h | 18 __m128i res0_m; \ 20 res0_m = __lsx_vhaddw_d_w(in0, in0); \ 21 res0_m = __lsx_vhaddw_q_d(res0_m, res0_m); \ 22 in1 = __lsx_vpickve2gr_w(res0_m, 0); \
|
D | sad_lsx.c | 32 __m128i res0_m; in hadd_uw_u32() local 35 res0_m = __lsx_vhaddw_du_wu(in, in); in hadd_uw_u32() 36 res0_m = __lsx_vhaddw_qu_du(res0_m, res0_m); in hadd_uw_u32() 37 sum_m = __lsx_vpickve2gr_w(res0_m, 0); in hadd_uw_u32() 53 __m128i res0_m; in hadd_sw_s32() local 56 res0_m = __lsx_vhaddw_d_w(in, in); in hadd_sw_s32() 57 res0_m = __lsx_vhaddw_q_d(res0_m, res0_m); in hadd_sw_s32() 58 sum_m = __lsx_vpickve2gr_w(res0_m, 0); in hadd_sw_s32()
|
D | fwd_txfm_lsx.h | 198 __m128i res0_m, res1_m, res2_m, res3_m; \ 204 res0_m, res1_m, res2_m, res3_m); \ 205 DUP4_ARG2(__lsx_vadd_h, res0_m, in0, res1_m, in1, res2_m, in2, res3_m, \ 206 in3, res0_m, res1_m, res2_m, res3_m); \ 207 DUP2_ARG3(__lsx_vssrarni_bu_h, res1_m, res0_m, 0, res3_m, res2_m, 0, \
|
/external/libvpx/vpx_dsp/mips/ |
D | vpx_convolve_msa.h | 55 v8i16 res0_m, res1_m, res2_m, res3_m; \ 58 DOTP_SB2_SH(vec0_m, vec1_m, filt0, filt0, res0_m, res1_m); \ 60 DPADD_SB2_SH(vec2_m, vec3_m, filt1, filt1, res0_m, res1_m); \ 65 ADDS_SH2_SH(res0_m, res2_m, res1_m, res3_m, out0, out1); \ 73 v8i16 res0_m, res1_m, res2_m, res3_m, res4_m, res5_m, res6_m, res7_m; \ 78 res0_m, res1_m, res2_m, res3_m); \ 86 res0_m, res1_m, res2_m, res3_m); \ 91 ADDS_SH4_SH(res0_m, res4_m, res1_m, res5_m, res2_m, res6_m, res3_m, \
|
D | inv_txfm_msa.h | 99 v8i16 res0_m, res1_m, res2_m, res3_m; \ 103 res0_m, res1_m, res2_m, res3_m); \ 104 ADD4(res0_m, in0, res1_m, in1, res2_m, in2, res3_m, in3, res0_m, res1_m, \ 106 CLIP_SH4_0_255(res0_m, res1_m, res2_m, res3_m); \ 107 PCKEV_B2_SB(res1_m, res0_m, res3_m, res2_m, tmp0_m, tmp1_m); \ 136 v8i16 res0_m, res1_m, c0_m, c1_m; \ 146 ILVR_H2_SH(in0, in2, in1, in3, res0_m, res1_m); \ 147 DOTP_SH2_SW(res0_m, res1_m, c0_m, c1_m, tmp2_m, tmp1_m); \ 152 DOTP_SH2_SW(res0_m, res1_m, c0_m, c1_m, tmp0_m, tmp1_m); \ 157 ILVR_H2_SH(in0, in2, in1, in3, res0_m, res1_m); \ [all …]
|
D | macros_msa.h | 779 v2i64 res0_m, res1_m; \ 782 res0_m = __msa_hadd_s_d((v4i32)in, (v4i32)in); \ 783 res1_m = __msa_splati_d(res0_m, 1); \ 784 res0_m = res0_m + res1_m; \ 785 sum_m = __msa_copy_s_w((v4i32)res0_m, 0); \ 798 v2u64 res0_m, res1_m; \ 801 res0_m = __msa_hadd_u_d((v4u32)in, (v4u32)in); \ 802 res1_m = (v2u64)__msa_splati_d((v2i64)res0_m, 1); \ 803 res0_m += res1_m; \ 804 sum_m = __msa_copy_u_w((v4i32)res0_m, 0); \ [all …]
|
/external/webp/src/dsp/ |
D | msa_macro.h | 593 const v2i64 res0_m = __msa_hadd_s_d((v4i32)in, (v4i32)in); in func_hadd_sw_s32() local 594 const v2i64 res1_m = __msa_splati_d(res0_m, 1); in func_hadd_sw_s32() 595 const v2i64 out = res0_m + res1_m; in func_hadd_sw_s32() 628 v2u64 res0_m = __msa_hadd_u_d(res_m, res_m); in func_hadd_uh_u32() local 629 v2u64 res1_m = (v2u64)__msa_splati_d((v2i64)res0_m, 1); in func_hadd_uh_u32() 630 res0_m = res0_m + res1_m; in func_hadd_uh_u32() 631 sum_m = __msa_copy_s_w((v4i32)res0_m, 0); in func_hadd_uh_u32() 1356 v8i16 inp0_m, inp1_m, res0_m, res1_m; \ 1364 ILVR_B2_SH(zero_m, dst0_m, zero_m, dst1_m, res0_m, res1_m); \ 1365 ADD2(res0_m, inp0_m, res1_m, inp1_m, res0_m, res1_m); \ [all …]
|
/external/libvpx/vp8/common/mips/msa/ |
D | vp8_macros_msa.h | 791 v2i64 res0_m, res1_m; \ 794 res0_m = __msa_hadd_s_d((v4i32)in, (v4i32)in); \ 795 res1_m = __msa_splati_d(res0_m, 1); \ 796 res0_m = res0_m + res1_m; \ 797 sum_m = __msa_copy_s_w((v4i32)res0_m, 0); \ 811 v2u64 res0_m, res1_m; \ 815 res0_m = __msa_hadd_u_d(res_m, res_m); \ 816 res1_m = (v2u64)__msa_splati_d((v2i64)res0_m, 1); \ 817 res0_m = res0_m + res1_m; \ 818 sum_m = __msa_copy_u_w((v4i32)res0_m, 0); \
|