/external/libmpeg2/common/x86/ |
D | impeg2_inter_pred_sse42_intr.c | 247 src1_r0 = _mm_avg_epu8 (src1_r0, src2_r0); in impeg2_interpolate_sse42() 248 src1_r1 = _mm_avg_epu8 (src1_r1, src2_r1); in impeg2_interpolate_sse42() 249 src1_r2 = _mm_avg_epu8 (src1_r2, src2_r2); in impeg2_interpolate_sse42() 250 src1_r3 = _mm_avg_epu8 (src1_r3, src2_r3); in impeg2_interpolate_sse42() 271 src1_r0 = _mm_avg_epu8 (src1_r0, src2_r0); in impeg2_interpolate_sse42() 272 src1_r1 = _mm_avg_epu8 (src1_r1, src2_r1); in impeg2_interpolate_sse42() 273 src1_r2 = _mm_avg_epu8 (src1_r2, src2_r2); in impeg2_interpolate_sse42() 274 src1_r3 = _mm_avg_epu8 (src1_r3, src2_r3); in impeg2_interpolate_sse42() 295 src1_r0 = _mm_avg_epu8 (src1_r0, src2_r0); in impeg2_interpolate_sse42() 296 src1_r1 = _mm_avg_epu8 (src1_r1, src2_r1); in impeg2_interpolate_sse42() [all …]
|
D | ideint_cac_ssse3.c | 173 avg1 = _mm_avg_epu8(top[0], top[1]); in ideint_cac_8x8_ssse3() 174 avg2 = _mm_avg_epu8(top[2], top[3]); in ideint_cac_8x8_ssse3() 175 top_avg = _mm_avg_epu8(avg1, avg2); in ideint_cac_8x8_ssse3() 177 avg1 = _mm_avg_epu8(bot[0], bot[1]); in ideint_cac_8x8_ssse3() 178 avg2 = _mm_avg_epu8(bot[2], bot[3]); in ideint_cac_8x8_ssse3() 179 bot_avg = _mm_avg_epu8(avg1, avg2); in ideint_cac_8x8_ssse3() 210 avg1 = _mm_avg_epu8(top[0], bot[0]); in ideint_cac_8x8_ssse3() 211 avg2 = _mm_avg_epu8(top[2], bot[2]); in ideint_cac_8x8_ssse3() 212 even_avg = _mm_avg_epu8(avg1, avg2); in ideint_cac_8x8_ssse3() 214 avg1 = _mm_avg_epu8(top[1], bot[1]); in ideint_cac_8x8_ssse3() [all …]
|
D | ideint_spatial_filter_ssse3.c | 231 dst = _mm_avg_epu8(row1, row2); in ideint_spatial_filter_ssse3()
|
/external/libavc/common/x86/ |
D | ih264_weighted_pred_sse42.c | 113 y0_0_16x8b = _mm_avg_epu8(y0_0_16x8b, y1_0_16x8b); in ih264_default_weighted_pred_luma_sse42() 114 y0_1_16x8b = _mm_avg_epu8(y0_1_16x8b, y1_1_16x8b); in ih264_default_weighted_pred_luma_sse42() 115 y0_2_16x8b = _mm_avg_epu8(y0_2_16x8b, y1_2_16x8b); in ih264_default_weighted_pred_luma_sse42() 116 y0_3_16x8b = _mm_avg_epu8(y0_3_16x8b, y1_3_16x8b); in ih264_default_weighted_pred_luma_sse42() 146 y0_0_16x8b = _mm_avg_epu8(y0_0_16x8b, y1_0_16x8b); in ih264_default_weighted_pred_luma_sse42() 147 y0_1_16x8b = _mm_avg_epu8(y0_1_16x8b, y1_1_16x8b); in ih264_default_weighted_pred_luma_sse42() 148 y0_2_16x8b = _mm_avg_epu8(y0_2_16x8b, y1_2_16x8b); in ih264_default_weighted_pred_luma_sse42() 149 y0_3_16x8b = _mm_avg_epu8(y0_3_16x8b, y1_3_16x8b); in ih264_default_weighted_pred_luma_sse42() 192 y0_0_16x8b = _mm_avg_epu8(y0_0_16x8b, y1_0_16x8b); in ih264_default_weighted_pred_luma_sse42() 193 y0_1_16x8b = _mm_avg_epu8(y0_1_16x8b, y1_1_16x8b); in ih264_default_weighted_pred_luma_sse42() [all …]
|
D | ih264_inter_pred_filters_ssse3.c | 1578 … res_r0r1_16x8b = _mm_avg_epu8(src_r0r1_16x8b, res_r0r1_16x8b); //computing q-pel in ih264_inter_pred_luma_horz_qpel_ssse3() 1661 res_r0_16x8b = _mm_avg_epu8(src_r0_16x8b, res_r0_16x8b); in ih264_inter_pred_luma_horz_qpel_ssse3() 1662 … res_r1_16x8b = _mm_avg_epu8(src_r1_16x8b, res_r1_16x8b); //computing q-pel in ih264_inter_pred_luma_horz_qpel_ssse3() 1742 … res_16x8b = _mm_avg_epu8(src_r0_16x8b, res_16x8b); //computing q-pel in ih264_inter_pred_luma_horz_qpel_ssse3() 1869 res_16x8b = _mm_avg_epu8(src_r0r1_16x8b, res_16x8b); //computing q-pel in ih264_inter_pred_luma_vert_qpel_ssse3() 1934 res_16x8b = _mm_avg_epu8(src_r0r1_16x8b, res_16x8b); //computing q-pel in ih264_inter_pred_luma_vert_qpel_ssse3() 1955 res_16x8b = _mm_avg_epu8(src_r0r1_16x8b, res_16x8b); //computing q-pel in ih264_inter_pred_luma_vert_qpel_ssse3() 2025 res_16x8b = _mm_avg_epu8(src_r0r1_16x8b, res_16x8b); //computing q-pel in ih264_inter_pred_luma_vert_qpel_ssse3() 2060 res_16x8b = _mm_avg_epu8(src_r0r1_16x8b, res_16x8b); //computing q-pel in ih264_inter_pred_luma_vert_qpel_ssse3() 2273 res_r0r1_16x8b = _mm_avg_epu8(res_r0r1_16x8b,src_r0r1_vpel_16x8b); in ih264_inter_pred_luma_horz_qpel_vert_qpel_ssse3() [all …]
|
D | ih264_luma_intra_pred_filters_ssse3.c | 520 row1_16x8b = _mm_avg_epu8(w11_a1_16x8b, w11_a2_16x8b); in ih264_intra_pred_luma_4x4_mode_vert_r_ssse3() 608 w11_16x8b = _mm_avg_epu8(val_16x8b, val_sh_16x8b); in ih264_intra_pred_luma_4x4_mode_horz_d_ssse3() 702 row1_16x8b = _mm_avg_epu8(val_16x8b, val_sh_16x8b); in ih264_intra_pred_luma_4x4_mode_vert_l_ssse3() 796 w11_16x8b = _mm_avg_epu8(val_16x8b, val_sh_16x8b); in ih264_intra_pred_luma_4x4_mode_horz_u_ssse3()
|
/external/webp/src/dsp/ |
D | upsampling_sse41.c | 44 const __m128i tmp0 = _mm_avg_epu8(k, (in)); /* (k + in + 1) / 2 */ \ 54 const __m128i t_a = _mm_avg_epu8(a, da); /* (9a + 3b + 3c + d + 8) / 16 */ \ 55 const __m128i t_b = _mm_avg_epu8(b, db); /* (3a + 9b + c + 3d + 8) / 16 */ \ 70 const __m128i s = _mm_avg_epu8(a, d); /* s = (a + d + 1) / 2 */ \ 71 const __m128i t = _mm_avg_epu8(b, c); /* t = (b + c + 1) / 2 */ \ 80 const __m128i t4 = _mm_avg_epu8(s, t); \
|
D | upsampling_sse2.c | 42 const __m128i tmp0 = _mm_avg_epu8(k, (in)); /* (k + in + 1) / 2 */ \ 52 const __m128i t_a = _mm_avg_epu8(a, da); /* (9a + 3b + 3c + d + 8) / 16 */ \ 53 const __m128i t_b = _mm_avg_epu8(b, db); /* (3a + 9b + c + 3d + 8) / 16 */ \ 68 const __m128i s = _mm_avg_epu8(a, d); /* s = (a + d + 1) / 2 */ \ 69 const __m128i t = _mm_avg_epu8(b, c); /* t = (b + c + 1) / 2 */ \ 78 const __m128i t4 = _mm_avg_epu8(s, t); \
|
D | dec_sse2.c | 414 t3 = _mm_avg_epu8(t2, zero); in DoFilter4_SSE2() 907 const __m128i a = _mm_avg_epu8(ABCDEFGH, CDEFGH00); in VE4_SSE2() 910 const __m128i avg = _mm_avg_epu8(b, BCDEFGH0); in VE4_SSE2() 924 const __m128i avg1 = _mm_avg_epu8(ABCDEFGH, CDEFGHH0); in LD4_SSE2() 927 const __m128i abcdefg = _mm_avg_epu8(avg2, BCDEFGH0); in LD4_SSE2() 942 const __m128i abcd = _mm_avg_epu8(XABCD, ABCD0); in VR4_SSE2() 945 const __m128i avg1 = _mm_avg_epu8(IXABCD, ABCD0); in VR4_SSE2() 948 const __m128i efgh = _mm_avg_epu8(avg2, XABCD); in VR4_SSE2() 964 const __m128i avg1 = _mm_avg_epu8(ABCDEFGH, BCDEFGH_); in VL4_SSE2() 965 const __m128i avg2 = _mm_avg_epu8(CDEFGH__, BCDEFGH_); in VL4_SSE2() [all …]
|
D | enc_sse2.c | 721 const __m128i a = _mm_avg_epu8(ABCDEFGH, CDEFGH00); in VE4_SSE2() 724 const __m128i avg = _mm_avg_epu8(b, BCDEFGH0); in VE4_SSE2() 759 const __m128i avg1 = _mm_avg_epu8(ABCDEFGH, CDEFGHH0); in LD4_SSE2() 762 const __m128i abcdefg = _mm_avg_epu8(avg2, BCDEFGH0); in LD4_SSE2() 778 const __m128i abcd = _mm_avg_epu8(XABCD, ABCD0); in VR4_SSE2() 781 const __m128i avg1 = _mm_avg_epu8(IXABCD, ABCD0); in VR4_SSE2() 784 const __m128i efgh = _mm_avg_epu8(avg2, XABCD); in VR4_SSE2() 801 const __m128i avg1 = _mm_avg_epu8(ABCDEFGH, BCDEFGH_); in VL4_SSE2() 802 const __m128i avg2 = _mm_avg_epu8(CDEFGH__, BCDEFGH_); in VL4_SSE2() 803 const __m128i avg3 = _mm_avg_epu8(avg1, avg2); in VL4_SSE2() [all …]
|
D | yuv_sse41.c | 528 U0 = _mm_avg_epu8(U0, prev_u); in ConvertARGBToUV_SSE41() 529 V0 = _mm_avg_epu8(V0, prev_v); in ConvertARGBToUV_SSE41()
|
D | lossless_sse2.c | 88 const __m128i avg1 = _mm_avg_epu8(*a0, *a1); in Average2_m128i() 100 const __m128i avg1 = _mm_avg_epu8(A0, A1); in Average2_uint32_SSE2()
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | avg_pred_sse2.c | 29 const __m128i avg = _mm_avg_epu8(p, r); in vpx_comp_avg_pred_sse2() 62 avg = _mm_avg_epu8(p, r); in vpx_comp_avg_pred_sse2()
|
D | vpx_subpixel_8t_intrin_avx2.c | 104 outReg1 = _mm_avg_epu8(outReg1, _mm_load_si128((__m128i *)output_ptr)); in vpx_filter_block1d16_h8_x_avx2() 105 outReg2 = _mm_avg_epu8( in vpx_filter_block1d16_h8_x_avx2() 158 outReg1 = _mm_avg_epu8(outReg1, _mm_load_si128((__m128i *)output_ptr)); in vpx_filter_block1d16_h8_x_avx2() 263 outReg1 = _mm_avg_epu8(outReg1, _mm_load_si128((__m128i *)output_ptr)); in vpx_filter_block1d16_v8_x_avx2() 264 outReg2 = _mm_avg_epu8( in vpx_filter_block1d16_v8_x_avx2() 308 outReg1 = _mm_avg_epu8(outReg1, _mm_load_si128((__m128i *)output_ptr)); in vpx_filter_block1d16_v8_x_avx2()
|
/external/swiftshader/src/Renderer/ |
D | Surface.cpp | 3829 c0 = _mm_avg_epu8(c0, c1); in resolve() 3849 c0 = _mm_avg_epu8(c0, c1); in resolve() 3850 c2 = _mm_avg_epu8(c2, c3); in resolve() 3851 c0 = _mm_avg_epu8(c0, c2); in resolve() 3877 c0 = _mm_avg_epu8(c0, c1); in resolve() 3878 c2 = _mm_avg_epu8(c2, c3); in resolve() 3879 c4 = _mm_avg_epu8(c4, c5); in resolve() 3880 c6 = _mm_avg_epu8(c6, c7); in resolve() 3881 c0 = _mm_avg_epu8(c0, c2); in resolve() 3882 c4 = _mm_avg_epu8(c4, c6); in resolve() [all …]
|
/external/pdfium/third_party/libpng16/intel/ |
D | filter_sse2_intrinsics.c | 134 avg = _mm_avg_epu8(a,b); in png_read_filter_row_avg3_sse2() 151 avg = _mm_avg_epu8(a,b); in png_read_filter_row_avg3_sse2() 187 avg = _mm_avg_epu8(a,b); in png_read_filter_row_avg4_sse2()
|
/external/libpng/intel/ |
D | filter_sse2_intrinsics.c | 134 avg = _mm_avg_epu8(a,b); in png_read_filter_row_avg3_sse2() 151 avg = _mm_avg_epu8(a,b); in png_read_filter_row_avg3_sse2() 187 avg = _mm_avg_epu8(a,b); in png_read_filter_row_avg4_sse2()
|
/external/libgav1/libgav1/src/dsp/x86/ |
D | convolve_sse4.cc | 1348 StoreUnaligned16(dst, _mm_avg_epu8(left, right)); in HalfAddHorizontal() 1419 StoreLo8(dest, _mm_avg_epu8(left, right)); in ConvolveIntraBlockCopyHorizontal_SSE4_1() 1434 const __m128i result = _mm_avg_epu8(left, right); in ConvolveIntraBlockCopyHorizontal_SSE4_1() 1455 const __m128i result = _mm_avg_epu8(left, right); in ConvolveIntraBlockCopyHorizontal_SSE4_1() 1522 StoreUnaligned16(dst, _mm_avg_epu8(row[0], below[0])); in IntraBlockCopyVertical() 1526 StoreUnaligned16(dst, _mm_avg_epu8(row[1], below[1])); in IntraBlockCopyVertical() 1530 StoreUnaligned16(dst, _mm_avg_epu8(row[2], below[2])); in IntraBlockCopyVertical() 1533 StoreUnaligned16(dst, _mm_avg_epu8(row[3], below[3])); in IntraBlockCopyVertical() 1537 StoreUnaligned16(dst, _mm_avg_epu8(row[4], below[4])); in IntraBlockCopyVertical() 1540 StoreUnaligned16(dst, _mm_avg_epu8(row[5], below[5])); in IntraBlockCopyVertical() [all …]
|
/external/libaom/libaom/aom_dsp/x86/ |
D | blend_a64_mask_sse4.c | 116 const __m128i v_m0_b = _mm_avg_epu8(v_r_lo_b, v_r_hi_b); in blend_a64_mask_sx_w4_sse4_1() 143 const __m128i v_m0_b = _mm_avg_epu8(v_r_lo_b, v_r_hi_b); in blend_a64_mask_sx_w8_sse4_1() 174 const __m128i v_m0_b = _mm_avg_epu8(v_r_lo_b, v_r_hi_b); in blend_a64_mask_sx_w16n_sse4_1() 205 const __m128i v_m0_b = _mm_avg_epu8(v_ra_b, v_rb_b); in blend_a64_mask_sy_w4_sse4_1() 230 const __m128i v_m0_b = _mm_avg_epu8(v_ra_b, v_rb_b); in blend_a64_mask_sy_w8_sse4_1() 254 const __m128i v_m0_b = _mm_avg_epu8(v_ra_b, v_rb_b); in blend_a64_mask_sy_w16n_sse4_1() 528 const __m128i v_a_b = _mm_avg_epu8(v_r_b, _mm_srli_si128(v_r_b, 1)); in blend_a64_mask_bn_sx_w4_sse4_1() 577 const __m128i v_a_b = _mm_avg_epu8(v_r_b, _mm_srli_si128(v_r_b, 1)); in blend_a64_mask_bn_sx_w8n_sse4_1() 624 const __m128i v_a_b = _mm_avg_epu8(v_ra_b, v_rb_b); in blend_a64_mask_bn_sy_w4_sse4_1() 672 const __m128i v_a_b = _mm_avg_epu8(v_ra_b, v_rb_b); in blend_a64_mask_bn_sy_w8n_sse4_1() [all …]
|
D | blend_mask_sse4.h | 205 const __m128i m = _mm_cvtepu8_epi16(_mm_avg_epu8(m_ac, zeros)); in aom_lowbd_blend_a64_d16_mask_subw0_subh1_w4_sse4_1() 227 const __m128i m = _mm_cvtepu8_epi16(_mm_avg_epu8(m_ac, zeros)); in aom_lowbd_blend_a64_d16_mask_subw0_subh1_w8_sse4_1()
|
D | masked_variance_intrin_ssse3.c | 170 _mm_storeu_si128((__m128i *)&b[j], _mm_avg_epu8(x, z)); in bilinear_filter() 201 _mm_storeu_si128((__m128i *)&dst[j], _mm_avg_epu8(x, y)); in bilinear_filter() 252 _mm_storel_epi64((__m128i *)b, _mm_avg_epu8(x, z)); in bilinear_filter8xh() 289 _mm_storel_epi64((__m128i *)dst, _mm_avg_epu8(x, y)); in bilinear_filter8xh() 324 xx_storel_32((__m128i *)b, _mm_avg_epu8(x, z)); in bilinear_filter4xh() 370 xx_storel_32((__m128i *)dst, _mm_avg_epu8(x, y)); in bilinear_filter4xh()
|
D | blend_a64_mask_avx2.c | 243 const __m128i m_ac = _mm_avg_epu8(_mm_adds_epu8(m_i00, m_i10), zeros); in lowbd_blend_a64_d16_mask_subw0_subh1_w16_avx2() 654 const __m128i v_m0_b = _mm_avg_epu8(v_r_lo_b, v_r_hi_b); in blend_a64_mask_sx_avx2() 673 const __m128i v_m0_b = _mm_avg_epu8(v_r_lo_b, v_r_hi_b); in blend_a64_mask_sx_avx2() 706 const __m128i v_m0_b = _mm_avg_epu8(v_ra_b, v_rb_b); in blend_a64_mask_sy_w16_avx2() 754 const __m128i v_m0_b = _mm_avg_epu8(v_ra_b, v_rb_b); in blend_a64_mask_sy_avx2() 770 const __m128i v_m0_b = _mm_avg_epu8(v_ra_b, v_rb_b); in blend_a64_mask_sy_avx2()
|
D | variance_impl_avx2.c | 444 src_reg_0 = _mm_avg_epu8(src_reg_0, src_reg_1); in aom_sub_pixel_variance16xh_avx2() 478 src_reg_0 = _mm_avg_epu8(src_reg_0, src_reg_1); in aom_sub_pixel_variance16xh_avx2()
|
/external/fec/ |
D | viterbi27_sse2.c | 144 …metric = _mm_avg_epu8(_mm_xor_si128(Branchtab27_sse2[0].v[i],sym0v),_mm_xor_si128(Branchtab27_sse2…
|
/external/libaom/libaom/aom_dsp/simd/ |
D | v64_intrinsics_x86.h | 383 SIMD_INLINE v64 v64_avg_u8(v64 a, v64 b) { return _mm_avg_epu8(a, b); } in v64_avg_u8() 386 return _mm_sub_epi8(_mm_avg_epu8(a, b), in v64_rdavg_u8()
|