/third_party/ffmpeg/libavcodec/mips/ |
D | h264chroma_msa.c | 1058 LD4(dst, stride, tp0, tp1, tp2, tp3); in avc_chroma_hz_and_aver_dst_8x4_msa() 1090 LD4(dst, stride, tp0, tp1, tp2, tp3); in avc_chroma_hz_and_aver_dst_8x8_msa() 1093 LD4(dst + 4 * stride, stride, tp0, tp1, tp2, tp3); in avc_chroma_hz_and_aver_dst_8x8_msa() 1343 LD4(dst, stride, tp0, tp1, tp2, tp3); in avc_chroma_vt_and_aver_dst_8x4_msa() 1374 LD4(dst, stride, tp0, tp1, tp2, tp3); in avc_chroma_vt_and_aver_dst_8x8_msa() 1377 LD4(dst + 4 * stride, stride, tp0, tp1, tp2, tp3); in avc_chroma_vt_and_aver_dst_8x8_msa() 1688 LD4(dst, stride, tp0, tp1, tp2, tp3); in avc_chroma_hv_and_aver_dst_8x4_msa() 1752 LD4(dst, stride, tp0, tp1, tp2, tp3); in avc_chroma_hv_and_aver_dst_8x8_msa() 1755 LD4(dst + 4 * stride, stride, tp0, tp1, tp2, tp3); in avc_chroma_hv_and_aver_dst_8x8_msa() 1823 LD4(src, stride, src0, src1, src2, src3); in copy_width8_msa() [all …]
|
D | h264dsp_msa.c | 126 LD4(data, stride, tp0, tp1, tp2, tp3); in avc_wgt_8x4_msa() 158 LD4(data, stride, tp0, tp1, tp2, tp3); in avc_wgt_8x8_msa() 161 LD4(data + 4 * stride, stride, tp0, tp1, tp2, tp3); in avc_wgt_8x8_msa() 202 LD4(data, stride, tp0, tp1, tp2, tp3); in avc_wgt_8x16_msa() 205 LD4(data + 4 * stride, stride, tp0, tp1, tp2, tp3); in avc_wgt_8x16_msa() 356 LD4(src, stride, tp0, tp1, tp2, tp3); in avc_biwgt_8x4_msa() 359 LD4(dst, stride, tp0, tp1, tp2, tp3); in avc_biwgt_8x4_msa() 393 LD4(src, stride, tp0, tp1, tp2, tp3); in avc_biwgt_8x8_msa() 396 LD4(src + 4 * stride, stride, tp0, tp1, tp2, tp3); in avc_biwgt_8x8_msa() 399 LD4(dst, stride, tp0, tp1, tp2, tp3); in avc_biwgt_8x8_msa() [all …]
|
D | vp9_mc_msa.c | 1111 LD4(dst, dst_stride, tp0, tp1, tp2, tp3); in common_hz_8t_and_aver_dst_8w_msa() 1391 LD4(dst, dst_stride, tp0, tp1, tp2, tp3); in common_vt_8t_and_aver_dst_8w_msa() 1691 LD4(dst, dst_stride, tp0, tp1, tp2, tp3); in common_hv_8ht_8vt_and_aver_dst_8w_msa() 2918 LD4(dst, dst_stride, tp0, tp1, tp2, tp3); in common_hz_2t_and_aver_dst_8x4_msa() 2949 LD4(dst, dst_stride, tp0, tp1, tp2, tp3); in common_hz_2t_and_aver_dst_8x8mult_msa() 2962 LD4(dst, dst_stride, tp0, tp1, tp2, tp3); in common_hz_2t_and_aver_dst_8x8mult_msa() 2977 LD4(dst, dst_stride, tp0, tp1, tp2, tp3); in common_hz_2t_and_aver_dst_8x8mult_msa() 2989 LD4(dst, dst_stride, tp0, tp1, tp2, tp3); in common_hz_2t_and_aver_dst_8x8mult_msa() 3278 LD4(dst, dst_stride, tp0, tp1, tp2, tp3); in common_vt_2t_and_aver_dst_8x4_msa() 3316 LD4(dst, dst_stride, tp0, tp1, tp2, tp3); in common_vt_2t_and_aver_dst_8x8mult_msa() [all …]
|
D | h264qpel_msa.c | 492 LD4(dst, stride, tp0, tp1, tp2, tp3); in avc_luma_hv_qrt_and_aver_dst_8x8_msa() 533 LD4(dst, stride, tp0, tp1, tp2, tp3); in avc_luma_hv_qrt_and_aver_dst_8x8_msa() 620 LD4(dst, stride, tp0, tp1, tp2, tp3); in avc_luma_hv_qrt_and_aver_dst_16x16_msa() 664 LD4(src, stride, src0, src1, src2, src3); in ff_put_h264_qpel8_mc00_msa() 666 LD4(src, stride, src4, src5, src6, src7); in ff_put_h264_qpel8_mc00_msa() 706 LD4(src, stride, tp0, tp1, tp2, tp3); in ff_avg_h264_qpel8_mc00_msa() 708 LD4(src, stride, tp4, tp5, tp6, tp7); in ff_avg_h264_qpel8_mc00_msa() 714 LD4(dst, stride, tp0, tp1, tp2, tp3); in ff_avg_h264_qpel8_mc00_msa() 715 LD4(dst + 4 * stride, stride, tp4, tp5, tp6, tp7); in ff_avg_h264_qpel8_mc00_msa() 3343 LD4(dst, stride, tp0, tp1, tp2, tp3); in ff_avg_h264_qpel8_mc10_msa() [all …]
|
D | hevc_idct_msa.c | 744 LD4(temp_dst, stride, dst0, dst1, dst2, dst3); in hevc_addblk_8x8_msa() 757 LD4(temp_dst, stride, dst0, dst1, dst2, dst3); in hevc_addblk_8x8_msa()
|
D | hevc_mc_bi_msa.c | 93 LD4(src1_ptr, src2_stride, tpd0, tpd1, tpd2, tpd3); in hevc_bi_copy_4w_msa() 109 LD4(src1_ptr, src2_stride, tpd0, tpd1, tpd2, tpd3); in hevc_bi_copy_4w_msa() 113 LD4(src1_ptr, src2_stride, tpd0, tpd1, tpd2, tpd3); in hevc_bi_copy_4w_msa() 146 LD4(src0_ptr, src_stride, tp0, tp1, tp2, tp3); in hevc_bi_copy_6w_msa() 150 LD4(src0_ptr, src_stride, tp0, tp1, tp2, tp3); in hevc_bi_copy_6w_msa() 206 LD4(src0_ptr, src_stride, tp0, tp1, tp2, tp3); in hevc_bi_copy_8w_msa() 218 LD4(src0_ptr, src_stride, tp0, tp1, tp2, tp3); in hevc_bi_copy_8w_msa() 240 LD4(src0_ptr, src_stride, tp0, tp1, tp2, tp3); in hevc_bi_copy_8w_msa() 244 LD4(src0_ptr, src_stride, tp0, tp1, tp2, tp3); in hevc_bi_copy_8w_msa()
|
D | hevc_mc_uni_msa.c | 117 LD4(src, src_stride, out0, out1, out2, out3); in copy_width8_msa() 127 LD4(src, src_stride, out0, out1, out2, out3); in copy_width8_msa() 129 LD4(src, src_stride, out4, out5, out6, out7); in copy_width8_msa() 138 LD4(src, src_stride, out0, out1, out2, out3); in copy_width8_msa() 206 LD4(src + 16, src_stride, out0, out1, out2, out3); in copy_width24_msa() 208 LD4(src + 16, src_stride, out4, out5, out6, out7); in copy_width24_msa()
|
D | hevc_mc_uniw_msa.c | 147 LD4(src, src_stride, tp0, tp1, tp2, tp3); in hevc_uniwgt_copy_6w_msa() 151 LD4(src, src_stride, tp0, tp1, tp2, tp3); in hevc_uniwgt_copy_6w_msa() 218 LD4(src, src_stride, tp0, tp1, tp2, tp3); in hevc_uniwgt_copy_8w_msa() 230 LD4(src, src_stride, tp0, tp1, tp2, tp3); in hevc_uniwgt_copy_8w_msa() 251 LD4(src, src_stride, tp0, tp1, tp2, tp3); in hevc_uniwgt_copy_8w_msa() 255 LD4(src, src_stride, tp0, tp1, tp2, tp3); in hevc_uniwgt_copy_8w_msa()
|
D | hevc_mc_biw_msa.c | 133 LD4(src1_ptr, src2_stride, tpd0, tpd1, tpd2, tpd3); in hevc_biwgt_copy_4w_msa() 150 LD4(src1_ptr, src2_stride, tpd0, tpd1, tpd2, tpd3); in hevc_biwgt_copy_4w_msa() 154 LD4(src1_ptr, src2_stride, tpd0, tpd1, tpd2, tpd3); in hevc_biwgt_copy_4w_msa() 203 LD4(src0_ptr, src_stride, tp0, tp1, tp2, tp3); in hevc_biwgt_copy_6w_msa() 269 LD4(src0_ptr, src_stride, tp0, tp1, tp2, tp3); in hevc_biwgt_copy_8w_msa() 293 LD4(src0_ptr, src_stride, tp0, tp1, tp2, tp3); in hevc_biwgt_copy_8w_msa()
|
/third_party/skia/third_party/externals/libwebp/src/dsp/ |
D | dec_mips_dsp_r2.c | 731 static void LD4(uint8_t* dst) { // Down-Left in LD4() function 980 VP8PredLuma4[6] = LD4; in VP8DspInitMIPSdspR2()
|
D | enc.c | 402 static void LD4(uint8_t* dst, const uint8_t* top) { in LD4() function 527 LD4(I4LD4 + dst, top); in Intra4Preds_C()
|
D | enc_mips_dsp_r2.c | 833 static void LD4(uint8_t* dst, const uint8_t* top) { in LD4() function 1053 LD4(I4LD4 + dst, top); in Intra4Preds_MIPSdspR2()
|
D | enc_msa.c | 316 static WEBP_INLINE void LD4(uint8_t* dst, const uint8_t* top) { in LD4() function 441 LD4(I4LD4 + dst, top); in Intra4Preds_MSA()
|
D | dec_msa.c | 752 static void LD4(uint8_t* dst) { // Down-Left in LD4() function 999 VP8PredLuma4[6] = LD4; in VP8DspInitMSA()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AArch64/ |
D | AArch64SchedCyclone.td | 582 // FCVT Rd, S/D = V6+LD4: 10 cycles
|
D | AArch64SchedKryoDetails.td | 1303 (instregex "LD4(i8|i16|i32)$")>; 1316 (instregex "LD4(i8|i16|i32)_POST$")>;
|
D | AArch64InstrInfo.td | 6069 defm LD4 : SIMDLd4Multiple<"ld4">; 6122 defm LD4 : SIMDLdSingleBTied<1, 0b001, "ld4", VecListFourb, GPR64pi4>; 6123 defm LD4 : SIMDLdSingleHTied<1, 0b011, 0, "ld4", VecListFourh, GPR64pi8>; 6124 defm LD4 : SIMDLdSingleSTied<1, 0b101, 0b00, "ld4", VecListFours, GPR64pi16>; 6125 defm LD4 : SIMDLdSingleDTied<1, 0b101, 0b01, "ld4", VecListFourd, GPR64pi32>; 6191 defm LD4 : SIMDLdSt4SingleAliases<"ld4">;
|
/third_party/ffmpeg/libavutil/mips/ |
D | generic_macros_msa.h | 232 #define LD4(psrc, stride, out0, out1, out2, out3) \ macro
|
/third_party/skia/third_party/externals/libwebp/ |
D | ChangeLog | 1696 73ba2915 MIPS: dspr2: added optimization for functions RD4 and LD4 1944 fe395f0e dec_neon: add LD4 intra predictor
|
/third_party/vixl/doc/aarch64/ |
D | supported-instructions-aarch64.md | 4466 ### LD4 ### subsection 4478 ### LD4 ### subsection
|