/external/libvpx/libvpx/vpx_dsp/x86/ |
D | highbd_intrapred_intrin_ssse3.c | 32 const __m128i a = _mm_avg_epu16(*x, *z); in avg3_epu16() 35 return _mm_avg_epu16(b, *y); in avg3_epu16() 192 const __m128i avg2 = _mm_avg_epu16(ABCDEFGH, XABCDEFG); in vpx_highbd_d117_predictor_8x8_ssse3() 219 const __m128i avg2_0 = _mm_avg_epu16(A0, B0); in vpx_highbd_d117_predictor_16x16_ssse3() 220 const __m128i avg2_1 = _mm_avg_epu16(A1, B1); in vpx_highbd_d117_predictor_16x16_ssse3() 269 const __m128i avg2_0 = _mm_avg_epu16(A0, B0); in vpx_highbd_d117_predictor_32x32_ssse3() 270 const __m128i avg2_1 = _mm_avg_epu16(A1, B1); in vpx_highbd_d117_predictor_32x32_ssse3() 271 const __m128i avg2_2 = _mm_avg_epu16(A2, B2); in vpx_highbd_d117_predictor_32x32_ssse3() 272 const __m128i avg2_3 = _mm_avg_epu16(A3, B3); in vpx_highbd_d117_predictor_32x32_ssse3() 466 const __m128i avg2_left = _mm_avg_epu16(IJKLMNOP, XIJKLMNO); in vpx_highbd_d153_predictor_8x8_ssse3() [all …]
|
D | highbd_intrapred_intrin_sse2.c | 390 const __m128i a = _mm_avg_epu16(*x, *z); in avg3_epu16() 393 return _mm_avg_epu16(b, *y); in avg3_epu16() 408 const __m128i avg2 = _mm_avg_epu16(KJIXABCD, JIXABCD0); in vpx_highbd_d117_predictor_4x4_sse2() 472 const __m128i avg2 = _mm_avg_epu16(LKJIXABC, KJIXABC0); in vpx_highbd_d153_predictor_4x4_sse2() 497 const __m128i avg2 = _mm_avg_epu16(IJKLLLLL, JKLLLLL0); in vpx_highbd_d207_predictor_4x4_sse2() 520 const __m128i avg2 = _mm_avg_epu16(ABCDEFGH, BCDEFGH0); in vpx_highbd_d63_predictor_4x4_sse2()
|
D | highbd_convolve_avx2.c | 170 _mm_storeu_si128((__m128i *)dst, _mm_avg_epu16(p0, u0)); in vpx_highbd_convolve_avg_avx2() 171 _mm_storeu_si128((__m128i *)(dst + dst_stride), _mm_avg_epu16(p1, u1)); in vpx_highbd_convolve_avg_avx2() 184 _mm_storel_epi64((__m128i *)dst, _mm_avg_epu16(u0, p0)); in vpx_highbd_convolve_avg_avx2() 185 _mm_storel_epi64((__m128i *)(dst + dst_stride), _mm_avg_epu16(u1, p1)); in vpx_highbd_convolve_avg_avx2() 827 res = _mm_avg_epu16(res, pix); in store_8x1_avg_pixels() 875 res = _mm_avg_epu16(res, pix); in store_8x1_2t_avg_pixels_ver()
|
/external/swiftshader/src/Device/ |
D | Surface.cpp | 3672 c0 = _mm_avg_epu16(c0, c1); in resolve() 3692 c0 = _mm_avg_epu16(c0, c1); in resolve() 3693 c2 = _mm_avg_epu16(c2, c3); in resolve() 3694 c0 = _mm_avg_epu16(c0, c2); in resolve() 3720 c0 = _mm_avg_epu16(c0, c1); in resolve() 3721 c2 = _mm_avg_epu16(c2, c3); in resolve() 3722 c4 = _mm_avg_epu16(c4, c5); in resolve() 3723 c6 = _mm_avg_epu16(c6, c7); in resolve() 3724 c0 = _mm_avg_epu16(c0, c2); in resolve() 3725 c4 = _mm_avg_epu16(c4, c6); in resolve() [all …]
|
/external/swiftshader/src/Renderer/ |
D | Surface.cpp | 4344 c0 = _mm_avg_epu16(c0, c1); in resolve() 4364 c0 = _mm_avg_epu16(c0, c1); in resolve() 4365 c2 = _mm_avg_epu16(c2, c3); in resolve() 4366 c0 = _mm_avg_epu16(c0, c2); in resolve() 4392 c0 = _mm_avg_epu16(c0, c1); in resolve() 4393 c2 = _mm_avg_epu16(c2, c3); in resolve() 4394 c4 = _mm_avg_epu16(c4, c5); in resolve() 4395 c6 = _mm_avg_epu16(c6, c7); in resolve() 4396 c0 = _mm_avg_epu16(c0, c2); in resolve() 4397 c4 = _mm_avg_epu16(c4, c6); in resolve() [all …]
|
/external/libaom/libaom/aom_dsp/x86/ |
D | synonyms.h | 89 return _mm_avg_epu16(v_val_w, _mm_setzero_si128()); in xx_round_epu16() 94 return _mm_avg_epu16(v_s_w, _mm_setzero_si128()); in xx_roundn_epu16()
|
D | blend_mask_sse4.h | 162 const __m128i m = _mm_avg_epu16(m_ac, zeros); in aom_lowbd_blend_a64_d16_mask_subw1_subh0_w4_sse4_1() 184 const __m128i m = _mm_avg_epu16(m_ac, zeros); in aom_lowbd_blend_a64_d16_mask_subw1_subh0_w8_sse4_1()
|
D | masked_variance_intrin_ssse3.c | 749 _mm_storeu_si128((__m128i *)&b[j], _mm_avg_epu16(x, z)); in highbd_bilinear_filter() 780 _mm_storeu_si128((__m128i *)&dst[j], _mm_avg_epu16(x, y)); in highbd_bilinear_filter() 834 _mm_storel_epi64((__m128i *)b, _mm_avg_epu16(x, z)); in highbd_bilinear_filter4xh() 872 _mm_storel_epi64((__m128i *)dst, _mm_avg_epu16(x, y)); in highbd_bilinear_filter4xh()
|
D | blend_a64_mask_sse4.c | 971 const __m128i m0 = _mm_avg_epu16(m0_ac, zeros); in lowbd_blend_a64_d16_mask_subw1_subh0_w16_sse4_1() 972 const __m128i m1 = _mm_avg_epu16(m1_ac, zeros); in lowbd_blend_a64_d16_mask_subw1_subh0_w16_sse4_1()
|
D | highbd_variance_sse2.c | 785 _mm_storeu_si128((__m128i *)comp_pred16, _mm_avg_epu16(s0, p0)); in aom_highbd_comp_avg_upsampled_pred_sse2()
|
/external/libavc/common/x86/ |
D | ih264_luma_intra_pred_filters_ssse3.c | 1327 res3_8x16 = _mm_avg_epu16(a0_8x16, a1_8x16); in ih264_intra_pred_luma_8x8_mode_vert_r_ssse3() 1429 res1_8x16 = _mm_avg_epu16(a0_8x16, a1_8x16); in ih264_intra_pred_luma_8x8_mode_horz_d_ssse3() 1530 res1_8x16 = _mm_avg_epu16(a0_8x16, a1_8x16); in ih264_intra_pred_luma_8x8_mode_vert_l_ssse3() 1542 res3_8x16 = _mm_avg_epu16(a0_8x16, a1_8x16); in ih264_intra_pred_luma_8x8_mode_vert_l_ssse3() 1633 res1_8x16 = _mm_avg_epu16(a0_8x16, a1_8x16); in ih264_intra_pred_luma_8x8_mode_horz_u_ssse3()
|
D | ih264_deblk_luma_ssse3.c | 1340 temp1 = _mm_avg_epu16(_mm_unpacklo_epi8(q0_16x8, zero), in ih264_deblk_luma_horz_bslt4_ssse3() 1348 temp1 = _mm_avg_epu16(_mm_unpackhi_epi8(q0_16x8, zero), in ih264_deblk_luma_horz_bslt4_ssse3() 1375 temp1 = _mm_avg_epu16(_mm_unpacklo_epi8(q0_16x8, zero), in ih264_deblk_luma_horz_bslt4_ssse3() 1383 temp1 = _mm_avg_epu16(_mm_unpackhi_epi8(q0_16x8, zero), in ih264_deblk_luma_horz_bslt4_ssse3() 1939 temp1 = _mm_avg_epu16(_mm_unpacklo_epi8(q0_16x8, zero), in ih264_deblk_luma_vert_bslt4_mbaff_ssse3() 1962 temp1 = _mm_avg_epu16(_mm_unpacklo_epi8(q0_16x8, zero), in ih264_deblk_luma_vert_bslt4_mbaff_ssse3()
|
/external/libaom/libaom/aom_dsp/simd/ |
D | v64_intrinsics_x86.h | 391 return _mm_sub_epi16(_mm_avg_epu16(a, b), in v64_rdavg_u16() 395 SIMD_INLINE v64 v64_avg_u16(v64 a, v64 b) { return _mm_avg_epu16(a, b); } in v64_avg_u16()
|
D | v128_intrinsics_x86.h | 448 return _mm_sub_epi16(_mm_avg_epu16(a, b), in v128_rdavg_u16() 452 SIMD_INLINE v128 v128_avg_u16(v128 a, v128 b) { return _mm_avg_epu16(a, b); } in v128_avg_u16()
|
/external/gemmlowp/fixedpoint/ |
D | fixedpoint_sse.h | 311 __m128i avg_unsigned = _mm_avg_epu16(a_unsigned, b_unsigned);
|
/external/clang/test/CodeGen/ |
D | sse2-builtins.c | 109 return _mm_avg_epu16(A, B); in test_mm_avg_epu16()
|
/external/clang/lib/Headers/ |
D | emmintrin.h | 713 _mm_avg_epu16(__m128i __a, __m128i __b) in _mm_avg_epu16() function
|
/external/neon_2_sse/ |
D | NEON_2_SSE.h | 3080 sum = _mm_avg_epu16(a, b); //result is rounded, need to compensate it in vhaddq_u16() 3136 return64(_mm_avg_epu16(_pM128i(a),_pM128i(b))); //SSE, result rounding!!! in vrhadd_u16() 3168 sum = _mm_avg_epu16(au, bu); in vrhaddq_s16() 3190 #define vrhaddq_u16 _mm_avg_epu16 //SSE2, results rounded 4825 avg = _mm_avg_epu16 (a, b); in vhsubq_u16()
|