/external/libvpx/libvpx/vpx_dsp/mips/ |
D | vpx_convolve_msa.h | 100 tmp_m = __msa_aver_u_b(tmp_m, (v16u8)dst); \ 109 tmp_m = __msa_aver_u_b(tmp_m, (v16u8)dst); \
|
D | sub_pixel_variance_msa.c | 64 src = __msa_aver_u_b(src, pred); in avg_sse_diff_4width_msa() 126 src = __msa_aver_u_b(src, pred); in avg_sse_diff_16width_msa() 135 src = __msa_aver_u_b(src, pred); in avg_sse_diff_16width_msa() 144 src = __msa_aver_u_b(src, pred); in avg_sse_diff_16width_msa() 153 src = __msa_aver_u_b(src, pred); in avg_sse_diff_16width_msa() 983 out = __msa_aver_u_b(out, pred); in sub_pixel_avg_sse_diff_4width_h_msa() 1028 out = __msa_aver_u_b(out, pred); in sub_pixel_avg_sse_diff_8width_h_msa() 1033 out = __msa_aver_u_b(out, pred); in sub_pixel_avg_sse_diff_8width_h_msa() 1185 out = __msa_aver_u_b(out, pred); in sub_pixel_avg_sse_diff_4width_v_msa() 1399 out = __msa_aver_u_b(out, pred); in sub_pixel_avg_sse_diff_4width_hv_msa()
|
D | deblock_msa.c | 56 temp1 = __msa_aver_u_b(above2_in, above1_in); \ 57 temp0 = __msa_aver_u_b(below2_in, below1_in); \ 58 temp1 = __msa_aver_u_b(temp1, temp0); \ 59 out = __msa_aver_u_b(src_in, temp1); \
|
D | vpx_convolve8_avg_msa.c | 82 res = (v16u8)__msa_aver_u_b(res, dst0); in common_hv_8ht_8vt_and_aver_dst_4w_msa() 260 out = __msa_aver_u_b(out, dst0); in common_hv_2ht_2vt_and_aver_dst_4x4_msa()
|
D | vpx_convolve8_avg_vert_msa.c | 60 out = __msa_aver_u_b(out, dst0); in common_vt_8t_and_aver_dst_4w_msa() 276 out = __msa_aver_u_b(out, dst0); in common_vt_2t_and_aver_dst_4x4_msa()
|
D | vpx_convolve8_avg_horiz_msa.c | 45 res = (v16u8)__msa_aver_u_b(res, dst0); in common_hz_8t_and_aver_dst_4x4_msa() 334 res = (v16u8)__msa_aver_u_b(res, dst0); in common_hz_2t_and_aver_dst_4x4_msa()
|
D | macros_msa.h | 483 out0 = (RTYPE)__msa_aver_u_b((v16u8)in0, (v16u8)in1); \ 484 out1 = (RTYPE)__msa_aver_u_b((v16u8)in2, (v16u8)in3); \
|
D | sad_msa.c | 843 comp = __msa_aver_u_b(pred, ref); in avgsad_4width_msa()
|
/external/libaom/libaom/aom_dsp/mips/ |
D | sub_pixel_variance_msa.c | 61 src = __msa_aver_u_b(src, pred); in avg_sse_diff_4width_msa() 123 src = __msa_aver_u_b(src, pred); in avg_sse_diff_16width_msa() 132 src = __msa_aver_u_b(src, pred); in avg_sse_diff_16width_msa() 141 src = __msa_aver_u_b(src, pred); in avg_sse_diff_16width_msa() 150 src = __msa_aver_u_b(src, pred); in avg_sse_diff_16width_msa() 980 out = __msa_aver_u_b(out, pred); in sub_pixel_avg_sse_diff_4width_h_msa() 1025 out = __msa_aver_u_b(out, pred); in sub_pixel_avg_sse_diff_8width_h_msa() 1030 out = __msa_aver_u_b(out, pred); in sub_pixel_avg_sse_diff_8width_h_msa() 1182 out = __msa_aver_u_b(out, pred); in sub_pixel_avg_sse_diff_4width_v_msa() 1396 out = __msa_aver_u_b(out, pred); in sub_pixel_avg_sse_diff_4width_hv_msa()
|
D | sad_msa.c | 463 comp = __msa_aver_u_b(pred, ref); in avgsad_4width_msa()
|
D | macros_msa.h | 611 out0 = (RTYPE)__msa_aver_u_b((v16u8)in0, (v16u8)in1); \ 612 out1 = (RTYPE)__msa_aver_u_b((v16u8)in2, (v16u8)in3); \
|
/external/libvpx/libvpx/third_party/libyuv/source/ |
D | row_msa.cc | 672 vec0 = __msa_aver_u_b(src0, src2); in YUY2ToUVRow_MSA() 673 vec1 = __msa_aver_u_b(src1, src3); in YUY2ToUVRow_MSA() 737 vec0 = __msa_aver_u_b(src0, src2); in UYVYToUVRow_MSA() 738 vec1 = __msa_aver_u_b(src1, src3); in UYVYToUVRow_MSA() 2529 src0 = __msa_aver_u_b(src0, src4); in ARGBToUVJRow_MSA() 2530 src1 = __msa_aver_u_b(src1, src5); in ARGBToUVJRow_MSA() 2531 src2 = __msa_aver_u_b(src2, src6); in ARGBToUVJRow_MSA() 2532 src3 = __msa_aver_u_b(src3, src7); in ARGBToUVJRow_MSA() 2537 vec0 = __msa_aver_u_b(src4, src6); in ARGBToUVJRow_MSA() 2538 vec1 = __msa_aver_u_b(src5, src7); in ARGBToUVJRow_MSA() [all …]
|
D | scale_msa.cc | 63 dst0 = (v16u8)__msa_aver_u_b((v16u8)vec0, (v16u8)vec1); in ScaleARGBRowDown2Linear_MSA() 223 dst0 = __msa_aver_u_b(vec1, vec0); in ScaleRowDown2Linear_MSA() 224 dst1 = __msa_aver_u_b(vec3, vec2); in ScaleRowDown2Linear_MSA()
|
/external/libyuv/files/source/ |
D | row_msa.cc | 672 vec0 = __msa_aver_u_b(src0, src2); in YUY2ToUVRow_MSA() 673 vec1 = __msa_aver_u_b(src1, src3); in YUY2ToUVRow_MSA() 737 vec0 = __msa_aver_u_b(src0, src2); in UYVYToUVRow_MSA() 738 vec1 = __msa_aver_u_b(src1, src3); in UYVYToUVRow_MSA() 2529 src0 = __msa_aver_u_b(src0, src4); in ARGBToUVJRow_MSA() 2530 src1 = __msa_aver_u_b(src1, src5); in ARGBToUVJRow_MSA() 2531 src2 = __msa_aver_u_b(src2, src6); in ARGBToUVJRow_MSA() 2532 src3 = __msa_aver_u_b(src3, src7); in ARGBToUVJRow_MSA() 2537 vec0 = __msa_aver_u_b(src4, src6); in ARGBToUVJRow_MSA() 2538 vec1 = __msa_aver_u_b(src5, src7); in ARGBToUVJRow_MSA() [all …]
|
D | scale_msa.cc | 63 dst0 = (v16u8)__msa_aver_u_b((v16u8)vec0, (v16u8)vec1); in ScaleARGBRowDown2Linear_MSA() 223 dst0 = __msa_aver_u_b(vec1, vec0); in ScaleRowDown2Linear_MSA() 224 dst1 = __msa_aver_u_b(vec3, vec2); in ScaleRowDown2Linear_MSA()
|
/external/webp/src/dsp/ |
D | upsampling_msa.c | 537 v16u8 s = __msa_aver_u_b(a, d); \ 538 v16u8 t = __msa_aver_u_b(b, c); \ 545 v16u8 t3 = __msa_aver_u_b(s, t); \
|
D | enc_msa.c | 270 const v16u8 R = __msa_aver_u_b(AC, B2); in VE4() 305 const v16u8 R0 = __msa_aver_u_b(AC, B2); in RD4() 325 const v16u8 R0 = __msa_aver_u_b(AC, B2); in LD4()
|
D | dec_msa.c | 719 R = __msa_aver_u_b(AC, B2); in VE4() 741 R = __msa_aver_u_b(AC, B2); in RD4() 765 R = __msa_aver_u_b(AC, B2); in LD4()
|
D | msa_macro.h | 1387 out0 = (RTYPE)__msa_aver_u_b((v16u8)in0, (v16u8)in1); \ 1388 out1 = (RTYPE)__msa_aver_u_b((v16u8)in2, (v16u8)in3); \
|
/external/llvm-project/clang/lib/Headers/ |
D | msa.h | 259 #define __msa_aver_u_b __builtin_msa_aver_u_b macro
|
/external/llvm-project/clang/test/CodeGen/ |
D | builtins-mips-msa.c | 131 v16u8_r = __msa_aver_u_b(v16u8_a, v16u8_b); // CHECK: call <16 x i8> @llvm.mips.aver.u.b( in test()
|