/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/Hexagon/ |
D | HexagonMapAsm2IntrinV65.gen.td | 10 …sat HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruwuhsat HvxVR:$src1, HvxVR:$src2, IntReg… 11 …28B HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruwuhsat HvxVR:$src1, HvxVR:$src2, IntReg… 12 …sat HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruhubsat HvxVR:$src1, HvxVR:$src2, IntReg… 13 …28B HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruhubsat HvxVR:$src1, HvxVR:$src2, IntReg… 14 …at HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruhubrndsat HvxVR:$src1, HvxVR:$src2, IntR… 15 …8B HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruhubrndsat HvxVR:$src1, HvxVR:$src2, IntR… 20 …_vaslh_acc HvxVR:$src1, HvxVR:$src2, IntRegs:$src3), (V6_vaslh_acc HvxVR:$src1, HvxVR:$src2, IntRe… 21 …h_acc_128B HvxVR:$src1, HvxVR:$src2, IntRegs:$src3), (V6_vaslh_acc HvxVR:$src1, HvxVR:$src2, IntRe… 22 …_vasrh_acc HvxVR:$src1, HvxVR:$src2, IntRegs:$src3), (V6_vasrh_acc HvxVR:$src1, HvxVR:$src2, IntRe… 23 …h_acc_128B HvxVR:$src1, HvxVR:$src2, IntRegs:$src3), (V6_vasrh_acc HvxVR:$src1, HvxVR:$src2, IntRe… [all …]
|
D | HexagonDepMapAsm2Intrin.td | 14 def: Pat<(int_hexagon_S2_asr_r_p_or DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3), 15 (S2_asr_r_p_or DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3)>, Requires<[HasV5]>; 20 def: Pat<(int_hexagon_M2_mpyud_acc_ll_s0 DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3), 21 (M2_mpyud_acc_ll_s0 DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3)>, Requires<[HasV5]>; 22 def: Pat<(int_hexagon_M2_mpyud_acc_ll_s1 DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3), 23 (M2_mpyud_acc_ll_s1 DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3)>, Requires<[HasV5]>; 46 def: Pat<(int_hexagon_M2_mpy_nac_sat_hl_s1 IntRegs:$src1, IntRegs:$src2, IntRegs:$src3), 47 (M2_mpy_nac_sat_hl_s1 IntRegs:$src1, IntRegs:$src2, IntRegs:$src3)>, Requires<[HasV5]>; 48 def: Pat<(int_hexagon_M4_vpmpyh_acc DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3), 49 (M4_vpmpyh_acc DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3)>, Requires<[HasV5]>; [all …]
|
D | HexagonMapAsm2IntrinV62.gen.td | 17 def: Pat<(IntID HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), 18 (MI HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3)>; 20 IntRegsLow8:$src3), 21 (MI HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3)>; 39 def: Pat<(IntID HvxWR:$src1, HvxVR:$src2, HvxVR:$src3), 40 (MI HvxWR:$src1, HvxVR:$src2, HvxVR:$src3)>; 42 HvxVR:$src3), 43 (MI HvxWR:$src1, HvxVR:$src2, HvxVR:$src3)>; 54 def: Pat<(IntID HvxWR:$src1, HvxWR:$src2, IntRegs:$src3), 55 (MI HvxWR:$src1, HvxWR:$src2, IntRegs:$src3)>; [all …]
|
D | HexagonIntrinsics.td | 145 : Pat <(IntID I32:$src1, I32:$src2, u4_0ImmPred_timm:$src3, u5_0ImmPred_timm:$src4), 146 (OutputInst I32:$src1, I32:$src2, u4_0ImmPred:$src3, 207 def : Pat<(IntID HvxQR:$src1, IntRegs:$src2, HvxVR:$src3), 208 (MI HvxQR:$src1, IntRegs:$src2, #0, HvxVR:$src3)>, 212 HvxVR:$src3), 213 (MI HvxQR:$src1, IntRegs:$src2, #0, HvxVR:$src3)>, 378 def: Pat<(IntID HvxVR:$src1, HvxVR:$src2, u3_0ImmPred:$src3), 379 (MI HvxVR:$src1, HvxVR:$src2, u3_0ImmPred:$src3)>, 383 u3_0ImmPred:$src3), 385 u3_0ImmPred:$src3)>, [all …]
|
D | HexagonIntrinsicsV60.td | 171 def: Pat<(IntID HvxWR:$src1, HvxWR:$src2, IntRegs:$src3), 172 (MI HvxWR:$src1, HvxWR:$src2, IntRegs:$src3)>; 175 IntRegs:$src3), 176 (MI HvxWR:$src1, HvxWR:$src2, IntRegs:$src3)>; 180 def: Pat<(IntID HvxVR:$src1, HvxVR:$src2, IntRegs:$src3), 181 (MI HvxVR:$src1, HvxVR:$src2, IntRegs:$src3)>; 184 IntRegs:$src3), 185 (MI HvxVR:$src1, HvxVR:$src2, IntRegs:$src3)>; 189 def: Pat<(IntID HvxWR:$src1, HvxVR:$src2, IntRegs:$src3), 190 (MI HvxWR:$src1, HvxVR:$src2, IntRegs:$src3)>; [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/ |
D | X86InstrXOP.td | 172 (ins VR128:$src1, VR128:$src2, VR128:$src3), 174 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), 176 (Int VR128:$src1, VR128:$src2, VR128:$src3))]>, XOP_4V, 179 (ins VR128:$src1, i128mem:$src2, VR128:$src3), 181 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), 184 VR128:$src3))]>, XOP_4V, Sched<[sched.Folded, sched.ReadAfterFold]>; 218 (v8i16 VR128:$src3))), 219 (VPMACSWWrr VR128:$src1, VR128:$src2, VR128:$src3)>; 221 (v4i32 VR128:$src3))), 222 (VPMACSDDrr VR128:$src1, VR128:$src2, VR128:$src3)>; [all …]
|
D | X86InstrFMA.td | 40 (ins RC:$src1, RC:$src2, RC:$src3), 42 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 43 [(set RC:$dst, (VT (Op RC:$src2, RC:$src1, RC:$src3)))]>, 48 (ins RC:$src1, RC:$src2, x86memop:$src3), 50 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 52 (MemFrag addr:$src3))))]>, 61 (ins RC:$src1, RC:$src2, RC:$src3), 63 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 68 (ins RC:$src1, RC:$src2, x86memop:$src3), 70 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), [all …]
|
D | X86InstrFragmentsSIMD.td | 198 def X86any_cmpp : PatFrags<(ops node:$src1, node:$src2, node:$src3), 199 [(X86strict_cmpp node:$src1, node:$src2, node:$src3), 200 (X86cmpp node:$src1, node:$src2, node:$src3)]>; 212 def X86any_cmpm : PatFrags<(ops node:$src1, node:$src2, node:$src3), 213 [(X86strict_cmpm node:$src1, node:$src2, node:$src3), 214 (X86cmpm node:$src1, node:$src2, node:$src3)]>; 534 def X86any_Fmadd : PatFrags<(ops node:$src1, node:$src2, node:$src3), 535 [(X86strict_Fmadd node:$src1, node:$src2, node:$src3), 536 (X86Fmadd node:$src1, node:$src2, node:$src3)]>; 913 def mgatherv4i32 : PatFrag<(ops node:$src1, node:$src2, node:$src3), [all …]
|
/third_party/ffmpeg/libavcodec/mips/ |
D | vp8_mc_msa.c | 71 #define HORIZ_6TAP_4WID_4VECS_FILT(src0, src1, src2, src3, \ argument 78 VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, vec0_m, vec1_m); \ 80 VSHF_B2_SB(src0, src1, src2, src3, mask1, mask1, vec2_m, vec3_m); \ 82 VSHF_B2_SB(src0, src1, src2, src3, mask2, mask2, vec4_m, vec5_m); \ 86 #define HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, \ argument 94 VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec2_m, vec3_m); \ 98 VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec2_m, vec3_m); \ 100 VSHF_B2_SB(src2, src2, src3, src3, mask2, mask2, vec6_m, vec7_m); \ 131 #define HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, \ argument 137 VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, vec0_m, vec1_m); \ [all …]
|
D | pixblockdsp_msa.c | 48 v16u8 src0, src1, src2, src3; in copy_8bit_to_16bit_width8_msa() local 54 LD_UB4(src, src_stride, src0, src1, src2, src3); in copy_8bit_to_16bit_width8_msa() 57 ILVR_B4_UB(zero, src0, zero, src1, zero, src2, zero, src3, in copy_8bit_to_16bit_width8_msa() 58 src0, src1, src2, src3); in copy_8bit_to_16bit_width8_msa() 60 ST_UB4(src0, src1, src2, src3, dst_ptr, (dst_stride * 2)); in copy_8bit_to_16bit_width8_msa() 72 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in copy_16multx8mult_msa() local 80 src0, src1, src2, src3, src4, src5, src6, src7); in copy_16multx8mult_msa() 83 ST_UB8(src0, src1, src2, src3, src4, src5, src6, src7, in copy_16multx8mult_msa() 98 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in copy_width16_msa() local 103 src0, src1, src2, src3, src4, src5, src6, src7); in copy_width16_msa() [all …]
|
D | hpeldsp_msa.c | 85 v16i8 src0, src1, src2, src3, src0_sld1, src1_sld1, src2_sld1, src3_sld1; in common_hz_bil_8w_msa() local 89 LD_SB4(src, src_stride, src0, src1, src2, src3); in common_hz_bil_8w_msa() 92 SLDI_B4_SB(zeros, src0, zeros, src1, zeros, src2, zeros, src3, 1, in common_hz_bil_8w_msa() 95 src2, src2_sld1, src3, src3_sld1, dst, dst_stride); in common_hz_bil_8w_msa() 105 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in common_hz_bil_16w_msa() local 109 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in common_hz_bil_16w_msa() 114 AVER_ST16x4_UB(src0, src8, src1, src9, src2, src10, src3, src11, in common_hz_bil_16w_msa() 127 v16i8 src0, src1, src2, src3, src4, src5, src6, src7; in common_hz_bil_no_rnd_8x8_msa() local 132 LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in common_hz_bil_no_rnd_8x8_msa() 135 SLDI_B4_SB(zeros, src0, zeros, src1, zeros, src2, zeros, src3, 1, in common_hz_bil_no_rnd_8x8_msa() [all …]
|
D | hevc_mc_uni_msa.c | 34 #define HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, \ argument 41 VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, vec0_m, vec1_m); \ 43 VSHF_B2_SB(src0, src1, src2, src3, mask1, mask1, vec2_m, vec3_m); \ 45 VSHF_B2_SB(src0, src1, src2, src3, mask2, mask2, vec4_m, vec5_m); \ 47 VSHF_B2_SB(src0, src1, src2, src3, mask3, mask3, vec6_m, vec7_m); \ 51 #define HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, \ argument 59 VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec2_m, vec3_m); \ 63 VSHF_B2_SB(src2, src2, src3, src3, mask2, mask2, vec2_m, vec3_m); \ 67 VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec6_m, vec7_m); \ 71 VSHF_B2_SB(src2, src2, src3, src3, mask3, mask3, vec6_m, vec7_m); \ [all …]
|
D | vp9_mc_msa.c | 83 #define HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, \ argument 91 VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, vec0_m, vec1_m); \ 93 VSHF_B2_SB(src0, src1, src2, src3, mask1, mask1, vec2_m, vec3_m); \ 95 VSHF_B2_SB(src0, src1, src2, src3, mask2, mask2, vec4_m, vec5_m); \ 97 VSHF_B2_SB(src0, src1, src2, src3, mask3, mask3, vec6_m, vec7_m); \ 102 #define HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, \ argument 111 VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec2_m, vec3_m); \ 115 VSHF_B2_SB(src2, src2, src3, src3, mask2, mask2, vec2_m, vec3_m); \ 119 VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec6_m, vec7_m); \ 123 VSHF_B2_SB(src2, src2, src3, src3, mask3, mask3, vec6_m, vec7_m); \ [all …]
|
D | hevcdsp_msa.c | 49 v16i8 src0, src1, src2, src3; in hevc_copy_4w_msa() local 52 LD_SB4(src, src_stride, src0, src1, src2, src3); in hevc_copy_4w_msa() 54 ILVR_W2_SB(src1, src0, src3, src2, src0, src1); in hevc_copy_4w_msa() 60 v16i8 src0, src1, src2, src3, src4, src5, src6, src7; in hevc_copy_4w_msa() local 66 src0, src1, src2, src3, src4, src5, src6, src7); in hevc_copy_4w_msa() 69 ILVR_W4_SB(src1, src0, src3, src2, src5, src4, src7, src6, in hevc_copy_4w_msa() 70 src0, src1, src2, src3); in hevc_copy_4w_msa() 71 ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3, in hevc_copy_4w_msa() 86 v16i8 src0, src1, src2, src3, src4, src5, src6, src7; in hevc_copy_6w_msa() local 90 LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in hevc_copy_6w_msa() [all …]
|
D | h264chroma_msa.c | 66 v16u8 src0, src1, src2, src3; in avc_chroma_hz_2x4_msa() local 76 LD_UB4(src, stride, src0, src1, src2, src3); in avc_chroma_hz_2x4_msa() 78 VSHF_B2_UB(src0, src1, src2, src3, mask, mask, src0, src2); in avc_chroma_hz_2x4_msa() 130 v16u8 src0, src1, src2, src3, out; in avc_chroma_hz_4x4_msa() local 139 LD_UB4(src, stride, src0, src1, src2, src3); in avc_chroma_hz_4x4_msa() 140 VSHF_B2_UB(src0, src1, src2, src3, mask, mask, src0, src2); in avc_chroma_hz_4x4_msa() 153 v16u8 src0, src1, src2, src3, src4, src5, src6, src7, out0, out1; in avc_chroma_hz_4x8_msa() local 162 LD_UB8(src, stride, src0, src1, src2, src3, src4, src5, src6, src7); in avc_chroma_hz_4x8_msa() 163 VSHF_B2_UB(src0, src1, src2, src3, mask, mask, src0, src2); in avc_chroma_hz_4x8_msa() 190 v16u8 src0, src1, src2, src3, out0, out1; in avc_chroma_hz_8x4_msa() local [all …]
|
D | h264qpel_msa.c | 647 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in ff_put_h264_qpel16_mc00_msa() local 650 LD_UB8(src, stride, src0, src1, src2, src3, src4, src5, src6, src7); in ff_put_h264_qpel16_mc00_msa() 654 ST_UB8(src0, src1, src2, src3, src4, src5, src6, src7, dst, stride); in ff_put_h264_qpel16_mc00_msa() 662 uint64_t src0, src1, src2, src3, src4, src5, src6, src7; in ff_put_h264_qpel8_mc00_msa() local 664 LD4(src, stride, src0, src1, src2, src3); in ff_put_h264_qpel8_mc00_msa() 667 SD4(src0, src1, src2, src3, dst, stride); in ff_put_h264_qpel8_mc00_msa() 675 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in ff_avg_h264_qpel16_mc00_msa() local 678 LD_UB8(src, stride, src0, src1, src2, src3, src4, src5, src6, src7); in ff_avg_h264_qpel16_mc00_msa() 682 AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, dst0, dst1, in ff_avg_h264_qpel16_mc00_msa() 689 LD_UB8(src, stride, src0, src1, src2, src3, src4, src5, src6, src7); in ff_avg_h264_qpel16_mc00_msa() [all …]
|
D | hevc_mc_bi_msa.c | 141 v16i8 src0 = { 0 }, src1 = { 0 }, src2 = { 0 }, src3 = { 0 }; in hevc_bi_copy_6w_msa() local 153 INSERT_D2_SB(tp2, tp3, src3); in hevc_bi_copy_6w_msa() 159 ILVRL_B2_SH(zero, src3, dst6, dst7); in hevc_bi_copy_6w_msa() 191 v16i8 src0 = { 0 }, src1 = { 0 }, src2 = { 0 }, src3 = { 0 }; in hevc_bi_copy_8w_msa() local 247 INSERT_D2_SB(tp2, tp3, src3); in hevc_bi_copy_8w_msa() 251 ILVRL_B2_SH(zero, src3, dst6, dst7); in hevc_bi_copy_8w_msa() 280 v16i8 src0, src1, src2, src3; in hevc_bi_copy_12w_msa() local 285 LD_SB4(src0_ptr, src_stride, src0, src1, src2, src3); in hevc_bi_copy_12w_msa() 292 ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3, dst0, dst1, in hevc_bi_copy_12w_msa() 295 ILVL_W2_SB(src1, src0, src3, src2, src0, src1); in hevc_bi_copy_12w_msa() [all …]
|
D | me_cmp_msa.c | 29 v16u8 src0, src1, src2, src3, ref0, ref1, ref2, ref3; in sad_8width_msa() local 33 LD_UB4(src, src_stride, src0, src1, src2, src3); in sad_8width_msa() 38 PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2, in sad_8width_msa() 78 v16u8 src0, src1, src2, src3, comp0, comp1; in sad_horiz_bilinear_filter_8width_msa() local 83 LD_UB4(src, src_stride, src0, src1, src2, src3); in sad_horiz_bilinear_filter_8width_msa() 88 PCKEV_D2_UB(src1, src0, src3, src2, src0, src1); in sad_horiz_bilinear_filter_8width_msa() 96 LD_UB4(src, src_stride, src0, src1, src2, src3); in sad_horiz_bilinear_filter_8width_msa() 101 PCKEV_D2_UB(src1, src0, src3, src2, src0, src1); in sad_horiz_bilinear_filter_8width_msa() 120 v16u8 src0, src1, src2, src3, comp0, comp1; in sad_horiz_bilinear_filter_16width_msa() local 125 LD_UB4(src, src_stride, src0, src1, src2, src3); in sad_horiz_bilinear_filter_16width_msa() [all …]
|
D | hevc_mc_uniw_msa.c | 137 v16i8 src0, src1, src2, src3; in hevc_uniwgt_copy_6w_msa() local 154 INSERT_D2_SB(tp2, tp3, src3); in hevc_uniwgt_copy_6w_msa() 159 ILVRL_B2_SH(zero, src3, dst6, dst7); in hevc_uniwgt_copy_6w_msa() 197 v16i8 src0 = { 0 }, src1 = { 0 }, src2 = { 0 }, src3 = { 0 }; in hevc_uniwgt_copy_8w_msa() local 258 INSERT_D2_SB(tp2, tp3, src3); in hevc_uniwgt_copy_8w_msa() 263 ILVRL_B2_SH(zero, src3, dst6, dst7); in hevc_uniwgt_copy_8w_msa() 292 v16i8 src0, src1, src2, src3; in hevc_uniwgt_copy_12w_msa() local 304 LD_SB4(src, src_stride, src0, src1, src2, src3); in hevc_uniwgt_copy_12w_msa() 306 ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3, in hevc_uniwgt_copy_12w_msa() 309 ILVL_W2_SB(src1, src0, src3, src2, src0, src1); in hevc_uniwgt_copy_12w_msa() [all …]
|
D | hevc_mc_biw_msa.c | 330 v16i8 src0, src1, src2, src3; in hevc_biwgt_copy_12w_msa() local 344 LD_SB4(src0_ptr, src_stride, src0, src1, src2, src3); in hevc_biwgt_copy_12w_msa() 351 ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3, in hevc_biwgt_copy_12w_msa() 355 ILVL_W2_SB(src1, src0, src3, src2, src0, src1); in hevc_biwgt_copy_12w_msa() 389 v16i8 src0, src1, src2, src3; in hevc_biwgt_copy_16w_msa() local 403 LD_SB4(src0_ptr, src_stride, src0, src1, src2, src3); in hevc_biwgt_copy_16w_msa() 408 ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3, tmp0, tmp1, in hevc_biwgt_copy_16w_msa() 410 ILVL_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3, tmp4, tmp5, in hevc_biwgt_copy_16w_msa() 443 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, zero = { 0 }; in hevc_biwgt_copy_24w_msa() local 458 LD_SB4(src0_ptr + 16, src_stride, src2, src3, src6, src7); in hevc_biwgt_copy_24w_msa() [all …]
|
/third_party/ffmpeg/libavcodec/ |
D | qpel_template.c | 72 const uint8_t *src3, \ 88 c = AV_RN32(&src3[i * src_stride3]); \ 103 c = AV_RN32(&src3[i * src_stride3 + 4]); \ 122 const uint8_t *src3, \ 138 c = AV_RN32(&src3[i * src_stride3]); \ 153 c = AV_RN32(&src3[i * src_stride3 + 4]); \ 172 const uint8_t *src3, \ 181 OPNAME ## _pixels8_l4_8(dst, src1, src2, src3, src4, dst_stride, \ 186 src3 + 8, src4 + 8, \ 194 const uint8_t *src3, \ [all …]
|
/third_party/flutter/skia/third_party/externals/libpng/mips/ |
D | filter_msa_intrinsics.c | 374 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in png_read_filter_row_up_msa() local 378 LD_UB4(rp, 16, src0, src1, src2, src3); in png_read_filter_row_up_msa() 382 ADD4(src0, src4, src1, src5, src2, src6, src3, src7, in png_read_filter_row_up_msa() 383 src0, src1, src2, src3); in png_read_filter_row_up_msa() 385 ST_UB4(src0, src1, src2, src3, rp, 16); in png_read_filter_row_up_msa() 399 LD_UB4(rp, 16, src0, src1, src2, src3); in png_read_filter_row_up_msa() 402 ADD4(src0, src4, src1, src5, src2, src6, src3, src7, in png_read_filter_row_up_msa() 403 src0, src1, src2, src3); in png_read_filter_row_up_msa() 405 ST_UB4(src0, src1, src2, src3, rp, 16); in png_read_filter_row_up_msa() 466 v16u8 src0, src1, src2, src3, src4; in png_read_filter_row_sub4_msa() local [all …]
|
/third_party/skia/third_party/externals/libpng/mips/ |
D | filter_msa_intrinsics.c | 374 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in png_read_filter_row_up_msa() local 378 LD_UB4(rp, 16, src0, src1, src2, src3); in png_read_filter_row_up_msa() 382 ADD4(src0, src4, src1, src5, src2, src6, src3, src7, in png_read_filter_row_up_msa() 383 src0, src1, src2, src3); in png_read_filter_row_up_msa() 385 ST_UB4(src0, src1, src2, src3, rp, 16); in png_read_filter_row_up_msa() 399 LD_UB4(rp, 16, src0, src1, src2, src3); in png_read_filter_row_up_msa() 402 ADD4(src0, src4, src1, src5, src2, src6, src3, src7, in png_read_filter_row_up_msa() 403 src0, src1, src2, src3); in png_read_filter_row_up_msa() 405 ST_UB4(src0, src1, src2, src3, rp, 16); in png_read_filter_row_up_msa() 466 v16u8 src0, src1, src2, src3, src4; in png_read_filter_row_sub4_msa() local [all …]
|
/third_party/libpng/mips/ |
D | filter_msa_intrinsics.c | 374 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in png_read_filter_row_up_msa() local 378 LD_UB4(rp, 16, src0, src1, src2, src3); in png_read_filter_row_up_msa() 382 ADD4(src0, src4, src1, src5, src2, src6, src3, src7, in png_read_filter_row_up_msa() 383 src0, src1, src2, src3); in png_read_filter_row_up_msa() 385 ST_UB4(src0, src1, src2, src3, rp, 16); in png_read_filter_row_up_msa() 399 LD_UB4(rp, 16, src0, src1, src2, src3); in png_read_filter_row_up_msa() 402 ADD4(src0, src4, src1, src5, src2, src6, src3, src7, in png_read_filter_row_up_msa() 403 src0, src1, src2, src3); in png_read_filter_row_up_msa() 405 ST_UB4(src0, src1, src2, src3, rp, 16); in png_read_filter_row_up_msa() 466 v16u8 src0, src1, src2, src3, src4; in png_read_filter_row_sub4_msa() local [all …]
|
/third_party/ffmpeg/libavcodec/ppc/ |
D | vc1dsp_altivec.c | 140 vector signed short src0, src1, src2, src3, src4, src5, src6, src7; in vc1_inv_trans_8x8_altivec() local 156 src3 = vec_ld( 48, block); in vc1_inv_trans_8x8_altivec() 165 s3 = vec_unpackl(src3); in vc1_inv_trans_8x8_altivec() 173 sB = vec_unpackh(src3); in vc1_inv_trans_8x8_altivec() 185 src3 = vec_pack(sB, s3); in vc1_inv_trans_8x8_altivec() 190 TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7); in vc1_inv_trans_8x8_altivec() 195 s3 = vec_unpackl(src3); in vc1_inv_trans_8x8_altivec() 203 sB = vec_unpackh(src3); in vc1_inv_trans_8x8_altivec() 215 src3 = vec_pack(sB, s3); in vc1_inv_trans_8x8_altivec() 224 vec_st(src3, 48, block); in vc1_inv_trans_8x8_altivec() [all …]
|