/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/Hexagon/ |
D | HexagonMapAsm2IntrinV65.gen.td | 10 …sat HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruwuhsat HvxVR:$src1, HvxVR:$src2, IntReg… 11 …28B HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruwuhsat HvxVR:$src1, HvxVR:$src2, IntReg… 12 …sat HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruhubsat HvxVR:$src1, HvxVR:$src2, IntReg… 13 …28B HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruhubsat HvxVR:$src1, HvxVR:$src2, IntReg… 14 …at HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruhubrndsat HvxVR:$src1, HvxVR:$src2, IntR… 15 …8B HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruhubrndsat HvxVR:$src1, HvxVR:$src2, IntR… 20 …_vaslh_acc HvxVR:$src1, HvxVR:$src2, IntRegs:$src3), (V6_vaslh_acc HvxVR:$src1, HvxVR:$src2, IntRe… 21 …h_acc_128B HvxVR:$src1, HvxVR:$src2, IntRegs:$src3), (V6_vaslh_acc HvxVR:$src1, HvxVR:$src2, IntRe… 22 …_vasrh_acc HvxVR:$src1, HvxVR:$src2, IntRegs:$src3), (V6_vasrh_acc HvxVR:$src1, HvxVR:$src2, IntRe… 23 …h_acc_128B HvxVR:$src1, HvxVR:$src2, IntRegs:$src3), (V6_vasrh_acc HvxVR:$src1, HvxVR:$src2, IntRe… [all …]
|
D | HexagonDepMapAsm2Intrin.td | 14 def: Pat<(int_hexagon_S2_asr_r_p_or DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3), 15 (S2_asr_r_p_or DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3)>, Requires<[HasV5]>; 20 def: Pat<(int_hexagon_M2_mpyud_acc_ll_s0 DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3), 21 (M2_mpyud_acc_ll_s0 DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3)>, Requires<[HasV5]>; 22 def: Pat<(int_hexagon_M2_mpyud_acc_ll_s1 DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3), 23 (M2_mpyud_acc_ll_s1 DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3)>, Requires<[HasV5]>; 46 def: Pat<(int_hexagon_M2_mpy_nac_sat_hl_s1 IntRegs:$src1, IntRegs:$src2, IntRegs:$src3), 47 (M2_mpy_nac_sat_hl_s1 IntRegs:$src1, IntRegs:$src2, IntRegs:$src3)>, Requires<[HasV5]>; 48 def: Pat<(int_hexagon_M4_vpmpyh_acc DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3), 49 (M4_vpmpyh_acc DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3)>, Requires<[HasV5]>; [all …]
|
D | HexagonMapAsm2IntrinV62.gen.td | 17 def: Pat<(IntID HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), 18 (MI HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3)>; 20 IntRegsLow8:$src3), 21 (MI HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3)>; 39 def: Pat<(IntID HvxWR:$src1, HvxVR:$src2, HvxVR:$src3), 40 (MI HvxWR:$src1, HvxVR:$src2, HvxVR:$src3)>; 42 HvxVR:$src3), 43 (MI HvxWR:$src1, HvxVR:$src2, HvxVR:$src3)>; 54 def: Pat<(IntID HvxWR:$src1, HvxWR:$src2, IntRegs:$src3), 55 (MI HvxWR:$src1, HvxWR:$src2, IntRegs:$src3)>; [all …]
|
D | HexagonIntrinsics.td | 145 : Pat <(IntID I32:$src1, I32:$src2, u4_0ImmPred_timm:$src3, u5_0ImmPred_timm:$src4), 146 (OutputInst I32:$src1, I32:$src2, u4_0ImmPred:$src3, 207 def : Pat<(IntID HvxQR:$src1, IntRegs:$src2, HvxVR:$src3), 208 (MI HvxQR:$src1, IntRegs:$src2, #0, HvxVR:$src3)>, 212 HvxVR:$src3), 213 (MI HvxQR:$src1, IntRegs:$src2, #0, HvxVR:$src3)>, 378 def: Pat<(IntID HvxVR:$src1, HvxVR:$src2, u3_0ImmPred:$src3), 379 (MI HvxVR:$src1, HvxVR:$src2, u3_0ImmPred:$src3)>, 383 u3_0ImmPred:$src3), 385 u3_0ImmPred:$src3)>, [all …]
|
/external/libvpx/vpx_dsp/mips/ |
D | sum_squares_msa.c | 22 uint64_t src0, src1, src2, src3; in vpx_sum_squares_2d_i16_msa() local 26 LD4(src, src_stride, src0, src1, src2, src3); in vpx_sum_squares_2d_i16_msa() 28 INSERT_D2_SH(src2, src3, diff1); in vpx_sum_squares_2d_i16_msa() 35 v8i16 src0, src1, src2, src3, src4, src5, src6, src7; in vpx_sum_squares_2d_i16_msa() local 37 LD_SH8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_sum_squares_2d_i16_msa() 39 DPADD_SH2_SW(src2, src3, src2, src3, mul0, mul1); in vpx_sum_squares_2d_i16_msa() 47 v8i16 src0, src1, src2, src3, src4, src5, src6, src7; in vpx_sum_squares_2d_i16_msa() local 49 LD_SH8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_sum_squares_2d_i16_msa() 51 DPADD_SH2_SW(src2, src3, src2, src3, mul0, mul1); in vpx_sum_squares_2d_i16_msa() 54 LD_SH8(src + 8, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_sum_squares_2d_i16_msa() [all …]
|
D | vpx_convolve_copy_msa.c | 19 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in copy_width8_msa() local 23 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in copy_width8_msa() 29 out3 = __msa_copy_u_d((v2i64)src3, 0); in copy_width8_msa() 40 LD_UB4(src, src_stride, src0, src1, src2, src3); in copy_width8_msa() 46 out3 = __msa_copy_u_d((v2i64)src3, 0); in copy_width8_msa() 52 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in copy_width8_msa() 58 out3 = __msa_copy_u_d((v2i64)src3, 0); in copy_width8_msa() 71 LD_UB4(src, src_stride, src0, src1, src2, src3); in copy_width8_msa() 76 out3 = __msa_copy_u_d((v2i64)src3, 0); in copy_width8_msa() 102 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in copy_16multx8mult_msa() local [all …]
|
D | vpx_convolve8_horiz_msa.c | 19 v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3; in common_hz_8t_4x4_msa() local 33 LD_SB4(src, src_stride, src0, src1, src2, src3); in common_hz_8t_4x4_msa() 34 XORI_B4_128_SB(src0, src1, src2, src3); in common_hz_8t_4x4_msa() 35 HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3, in common_hz_8t_4x4_msa() 47 v16i8 src0, src1, src2, src3; in common_hz_8t_4x8_msa() local 62 LD_SB4(src, src_stride, src0, src1, src2, src3); in common_hz_8t_4x8_msa() 63 XORI_B4_128_SB(src0, src1, src2, src3); in common_hz_8t_4x8_msa() 65 HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3, in common_hz_8t_4x8_msa() 67 LD_SB4(src, src_stride, src0, src1, src2, src3); in common_hz_8t_4x8_msa() 68 XORI_B4_128_SB(src0, src1, src2, src3); in common_hz_8t_4x8_msa() [all …]
|
D | vpx_convolve8_avg_horiz_msa.c | 20 v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3; in common_hz_8t_and_aver_dst_4x4_msa() local 36 LD_SB4(src, src_stride, src0, src1, src2, src3); in common_hz_8t_and_aver_dst_4x4_msa() 37 XORI_B4_128_SB(src0, src1, src2, src3); in common_hz_8t_and_aver_dst_4x4_msa() 38 HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3, in common_hz_8t_and_aver_dst_4x4_msa() 54 v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3; in common_hz_8t_and_aver_dst_4x8_msa() local 70 LD_SB4(src, src_stride, src0, src1, src2, src3); in common_hz_8t_and_aver_dst_4x8_msa() 71 XORI_B4_128_SB(src0, src1, src2, src3); in common_hz_8t_and_aver_dst_4x8_msa() 77 HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3, in common_hz_8t_and_aver_dst_4x8_msa() 79 LD_SB4(src, src_stride, src0, src1, src2, src3); in common_hz_8t_and_aver_dst_4x8_msa() 80 XORI_B4_128_SB(src0, src1, src2, src3); in common_hz_8t_and_aver_dst_4x8_msa() [all …]
|
/external/swiftshader/third_party/llvm-16.0/llvm/lib/Target/Hexagon/ |
D | HexagonDepMapAsm2Intrin.td | 290 def: Pat<(int_hexagon_A2_vraddub_acc DoubleRegs:$src1, DoubleRegs:$src2, DoubleRegs:$src3), 291 (A2_vraddub_acc DoubleRegs:$src1, DoubleRegs:$src2, DoubleRegs:$src3)>, Requires<[HasV5]>; 294 def: Pat<(int_hexagon_A2_vrsadub_acc DoubleRegs:$src1, DoubleRegs:$src2, DoubleRegs:$src3), 295 (A2_vrsadub_acc DoubleRegs:$src1, DoubleRegs:$src2, DoubleRegs:$src3)>, Requires<[HasV5]>; 408 def: Pat<(int_hexagon_A4_vrmaxh DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3), 409 (A4_vrmaxh DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3)>, Requires<[HasV5]>; 410 def: Pat<(int_hexagon_A4_vrmaxuh DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3), 411 (A4_vrmaxuh DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3)>, Requires<[HasV5]>; 412 def: Pat<(int_hexagon_A4_vrmaxuw DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3), 413 (A4_vrmaxuw DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3)>, Requires<[HasV5]>; [all …]
|
D | HexagonMapAsm2IntrinV62.gen.td | 17 def: Pat<(IntID HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), 18 (MI HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3)>; 20 IntRegsLow8:$src3), 21 (MI HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3)>; 39 def: Pat<(IntID HvxWR:$src1, HvxVR:$src2, HvxVR:$src3), 40 (MI HvxWR:$src1, HvxVR:$src2, HvxVR:$src3)>; 42 HvxVR:$src3), 43 (MI HvxWR:$src1, HvxVR:$src2, HvxVR:$src3)>; 54 def: Pat<(IntID HvxWR:$src1, HvxWR:$src2, IntRegs:$src3), 55 (MI HvxWR:$src1, HvxWR:$src2, IntRegs:$src3)>; [all …]
|
/external/llvm/lib/Target/X86/ |
D | X86InstrXOP.td | 150 (ins VR128:$src1, VR128:$src2, VR128:$src3), 152 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), 154 (Int VR128:$src1, VR128:$src2, VR128:$src3))]>, XOP_4V, VEX_I8IMM; 156 (ins VR128:$src1, i128mem:$src2, VR128:$src3), 158 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), 161 VR128:$src3))]>, XOP_4V, VEX_I8IMM; 201 (ins VR128:$src1, VR128:$src2, u8imm:$src3), 203 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), 207 (ins VR128:$src1, i128mem:$src2, u8imm:$src3), 209 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), [all …]
|
D | X86InstrFMA.td | 44 (ins VR128:$src1, VR128:$src2, VR128:$src3), 46 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 48 VR128:$src1, VR128:$src3)))]>; 52 (ins VR128:$src1, VR128:$src2, f128mem:$src3), 54 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 56 (MemFrag128 addr:$src3))))]>; 60 (ins VR256:$src1, VR256:$src2, VR256:$src3), 62 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 64 VR256:$src3)))]>, VEX_L; 68 (ins VR256:$src1, VR256:$src2, f256mem:$src3), [all …]
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/ |
D | X86InstrXOP.td | 172 (ins VR128:$src1, VR128:$src2, VR128:$src3), 174 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), 176 (Int VR128:$src1, VR128:$src2, VR128:$src3))]>, XOP_4V, 179 (ins VR128:$src1, i128mem:$src2, VR128:$src3), 181 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), 184 VR128:$src3))]>, XOP_4V, Sched<[sched.Folded, sched.ReadAfterFold]>; 218 (v8i16 VR128:$src3))), 219 (VPMACSWWrr VR128:$src1, VR128:$src2, VR128:$src3)>; 221 (v4i32 VR128:$src3))), 222 (VPMACSDDrr VR128:$src1, VR128:$src2, VR128:$src3)>; [all …]
|
D | X86InstrFMA.td | 40 (ins RC:$src1, RC:$src2, RC:$src3), 42 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 43 [(set RC:$dst, (VT (Op RC:$src2, RC:$src1, RC:$src3)))]>, 48 (ins RC:$src1, RC:$src2, x86memop:$src3), 50 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 52 (MemFrag addr:$src3))))]>, 61 (ins RC:$src1, RC:$src2, RC:$src3), 63 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 68 (ins RC:$src1, RC:$src2, x86memop:$src3), 70 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), [all …]
|
/external/swiftshader/third_party/llvm-16.0/llvm/lib/Target/X86/ |
D | X86InstrXOP.td | 172 (ins VR128:$src1, VR128:$src2, VR128:$src3), 174 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), 176 (Int VR128:$src1, VR128:$src2, VR128:$src3))]>, XOP_4V, 179 (ins VR128:$src1, i128mem:$src2, VR128:$src3), 181 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), 184 VR128:$src3))]>, XOP_4V, Sched<[sched.Folded, sched.ReadAfterFold]>; 218 (v8i16 VR128:$src3))), 219 (VPMACSWWrr VR128:$src1, VR128:$src2, VR128:$src3)>; 221 (v4i32 VR128:$src3))), 222 (VPMACSDDrr VR128:$src1, VR128:$src2, VR128:$src3)>; [all …]
|
D | X86InstrAMX.td | 57 opaquemem:$src3), []>; 61 opaquemem:$src3), []>; 64 GR16:$src2, opaquemem:$src3, 92 (ins TILE:$src1, TILE:$src2, TILE:$src3), 93 "tdpbssd\t{$src3, $src2, $dst|$dst, $src2, $src3}", []>, 96 (ins TILE:$src1, TILE:$src2, TILE:$src3), 97 "tdpbsud\t{$src3, $src2, $dst|$dst, $src2, $src3}", []>, 100 (ins TILE:$src1, TILE:$src2, TILE:$src3), 101 "tdpbusd\t{$src3, $src2, $dst|$dst, $src2, $src3}", []>, 104 (ins TILE:$src1, TILE:$src2, TILE:$src3), [all …]
|
D | X86InstrFMA.td | 40 (ins RC:$src1, RC:$src2, RC:$src3), 42 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 43 [(set RC:$dst, (VT (Op RC:$src2, RC:$src1, RC:$src3)))]>, 48 (ins RC:$src1, RC:$src2, x86memop:$src3), 50 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 52 (MemFrag addr:$src3))))]>, 61 (ins RC:$src1, RC:$src2, RC:$src3), 63 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 68 (ins RC:$src1, RC:$src2, x86memop:$src3), 70 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), [all …]
|
/external/libvpx/vpx_dsp/loongarch/ |
D | vpx_convolve8_horiz_lsx.c | 27 __m128i src0, src1, src2, src3; in common_hz_8t_4x4_lsx() local 39 LSX_LD_4(src, src_stride, src0, src1, src2, src3); in common_hz_8t_4x4_lsx() 40 DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0, in common_hz_8t_4x4_lsx() 41 src1, src2, src3); in common_hz_8t_4x4_lsx() 42 HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3, in common_hz_8t_4x4_lsx() 61 __m128i src0, src1, src2, src3; in common_hz_8t_4x8_lsx() local 75 src3 = __lsx_vldx(_src, src_stride3); in common_hz_8t_4x8_lsx() 77 DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0, in common_hz_8t_4x8_lsx() 78 src1, src2, src3); in common_hz_8t_4x8_lsx() 79 HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3, in common_hz_8t_4x8_lsx() [all …]
|
D | vpx_convolve8_avg_horiz_lsx.c | 29 __m128i src0, src1, src2, src3; in common_hz_8t_and_aver_dst_4x4_lsx() local 41 LSX_LD_4(src, src_stride, src0, src1, src2, src3); in common_hz_8t_and_aver_dst_4x4_lsx() 42 DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0, in common_hz_8t_and_aver_dst_4x4_lsx() 43 src1, src2, src3); in common_hz_8t_and_aver_dst_4x4_lsx() 44 HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3, in common_hz_8t_and_aver_dst_4x4_lsx() 73 __m128i src0, src1, src2, src3, filter0, filter1, filter2, filter3; in common_hz_8t_and_aver_dst_4x8_lsx() local 84 LSX_LD_4(src, src_stride, src0, src1, src2, src3); in common_hz_8t_and_aver_dst_4x8_lsx() 86 DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0, in common_hz_8t_and_aver_dst_4x8_lsx() 87 src1, src2, src3); in common_hz_8t_and_aver_dst_4x8_lsx() 111 HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3, in common_hz_8t_and_aver_dst_4x8_lsx() [all …]
|
/external/ComputeLibrary/src/gpu/cl/kernels/ |
D | ClWidthConcatenate4TensorsKernel.cpp | 46 Status validate_arguments(const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *src3… in validate_arguments() argument 48 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src1, src2, src3, src4, dst); in validate_arguments() 51 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src1, src2, src3, src4, dst); in validate_arguments() 52 …ARM_COMPUTE_RETURN_ERROR_ON(src1->dimension(0) + src2->dimension(0) + src3->dimension(0) + src4->d… in validate_arguments() 58 ARM_COMPUTE_RETURN_ERROR_ON(src3->dimension(i) != dst->dimension(i)); in validate_arguments() 72 …date(const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *src3, const ITensorInfo … in validate() argument 74 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src1, src2, src3, src4, dst)); in validate() 80 ITensorInfo *src3, ITensorInfo *src4, in configure() argument 83 ARM_COMPUTE_ERROR_ON_NULLPTR(src1, src2, src3, src4, dst); in configure() 84 ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src1, src2, src3, src4, dst)); in configure() [all …]
|
/external/libyuv/source/ |
D | scale_lsx.cc | 82 __m128i src0, src1, src2, src3, tmp0, tmp1, tmp2, tmp3, dst0; in ScaleARGBRowDown2Box_LSX() local 88 DUP2_ARG2(__lsx_vld, t, 0, t, 16, src2, src3); in ScaleARGBRowDown2Box_LSX() 90 shuff, src3, src3, shuff, tmp0, tmp1, tmp2, tmp3); in ScaleARGBRowDown2Box_LSX() 139 __m128i src0, src1, src2, src3; in ScaleARGBRowDownEvenBox_LSX() local 161 src0, src1, src2, src3); in ScaleARGBRowDownEvenBox_LSX() 162 DUP2_ARG2(__lsx_vaddwev_h_bu, src0, src2, src1, src3, tmp0, tmp2); in ScaleARGBRowDownEvenBox_LSX() 163 DUP2_ARG2(__lsx_vaddwod_h_bu, src0, src2, src1, src3, tmp1, tmp3); in ScaleARGBRowDownEvenBox_LSX() 180 __m128i src0, src1, src2, src3, dst0, dst1; in ScaleRowDown2_LSX() local 185 src0, src1, src2, src3); in ScaleRowDown2_LSX() 186 DUP2_ARG2(__lsx_vpickod_b, src1, src0, src3, src2, dst0, dst1); in ScaleRowDown2_LSX() [all …]
|
/external/libvpx/vp8/common/mips/msa/ |
D | copymem_msa.c | 16 uint64_t src0, src1, src2, src3; in copy_8x4_msa() local 18 LD4(src, src_stride, src0, src1, src2, src3); in copy_8x4_msa() 19 SD4(src0, src1, src2, src3, dst, dst_stride); in copy_8x4_msa() 24 uint64_t src0, src1, src2, src3; in copy_8x8_msa() local 26 LD4(src, src_stride, src0, src1, src2, src3); in copy_8x8_msa() 28 SD4(src0, src1, src2, src3, dst, dst_stride); in copy_8x8_msa() 31 LD4(src, src_stride, src0, src1, src2, src3); in copy_8x8_msa() 32 SD4(src0, src1, src2, src3, dst, dst_stride); in copy_8x8_msa() 37 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in copy_16x16_msa() local 40 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in copy_16x16_msa() [all …]
|
D | bilinear_filter_msa.c | 33 v16i8 src0, src1, src2, src3, mask; in common_hz_2t_4x4_msa() local 42 LD_SB4(src, src_stride, src0, src1, src2, src3); in common_hz_2t_4x4_msa() 43 VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1); in common_hz_2t_4x4_msa() 54 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask; in common_hz_2t_4x8_msa() local 63 LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in common_hz_2t_4x8_msa() 64 VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1); in common_hz_2t_4x8_msa() 90 v16i8 src0, src1, src2, src3, mask; in common_hz_2t_8x4_msa() local 98 LD_SB4(src, src_stride, src0, src1, src2, src3); in common_hz_2t_8x4_msa() 100 VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3); in common_hz_2t_8x4_msa() 112 v16i8 src0, src1, src2, src3, mask, out0, out1; in common_hz_2t_8x8mult_msa() local [all …]
|
D | sixtap_filter_msa.c | 52 #define HORIZ_6TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \ argument 58 VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, _6tap_4wid_vec0_m, \ 62 VSHF_B2_SB(src0, src1, src2, src3, mask1, mask1, _6tap_4wid_vec2_m, \ 66 VSHF_B2_SB(src0, src1, src2, src3, mask2, mask2, _6tap_4wid_vec4_m, \ 72 #define HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \ argument 82 VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, _6tap_8wid_vec2_m, \ 89 VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, _6tap_8wid_vec2_m, \ 93 VSHF_B2_SB(src2, src2, src3, src3, mask2, mask2, _6tap_8wid_vec6_m, \ 130 #define HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \ argument 136 VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, _4tap_4wid_vec0_m, \ [all …]
|
/external/libvpx/vp8/common/loongarch/ |
D | sixtap_filter_lsx.c | 88 #define HORIZ_6TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \ argument 93 DUP2_ARG3(__lsx_vshuf_b, src1, src0, mask0, src3, src2, mask0, vec0_m, \ 96 DUP2_ARG3(__lsx_vshuf_b, src1, src0, mask1, src3, src2, mask1, vec2_m, \ 100 DUP2_ARG3(__lsx_vshuf_b, src1, src0, mask2, src3, src2, mask2, vec4_m, \ 106 #define HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \ argument 114 DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask0, src3, src3, mask0, vec2_m, \ 120 DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask1, src3, src3, mask1, vec2_m, \ 124 DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask2, src3, src3, mask2, vec6_m, \ 134 #define HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \ argument 139 DUP2_ARG3(__lsx_vshuf_b, src1, src0, mask0, src3, src2, mask0, vec0_m, \ [all …]
|