/external/libaom/libaom/av1/encoder/x86/ |
D | encodetxb_avx2.c | 69 xx_storel_64(ls, res0); in av1_txb_init_levels_avx2() 71 xx_storel_64(ls + stride, _mm_srli_si128(res0, 8)); in av1_txb_init_levels_avx2() 73 xx_storel_64(ls + stride * 2, res1); in av1_txb_init_levels_avx2() 75 xx_storel_64(ls + stride * 3, _mm_srli_si128(res1, 8)); in av1_txb_init_levels_avx2()
|
D | wedge_utils_avx2.c | 78 xx_storel_64(&csse, v_acc_q_0); in av1_wedge_sse_from_residuals_avx2() 147 xx_storel_64(&acc, v_acc_q_0); in av1_wedge_sign_from_residuals_avx2()
|
D | wedge_utils_sse2.c | 91 xx_storel_64(&csse, v_acc0_q); in av1_wedge_sse_from_residuals_sse2() 180 xx_storel_64(&acc, v_acc_q); in av1_wedge_sign_from_residuals_sse2()
|
/external/libaom/libaom/aom_dsp/x86/ |
D | sse_sse4.c | 27 xx_storel_64(&sum, sum_1x64); in summary_all_sse4() 242 xx_storel_64(&sse, _mm_add_epi64(sum, _mm_srli_si128(sum, 8))); in aom_highbd_sse_sse4_1() 260 xx_storel_64(&sse, _mm_add_epi64(sum, _mm_srli_si128(sum, 8))); in aom_highbd_sse_sse4_1() 282 xx_storel_64(&sse, _mm_add_epi64(sum, _mm_srli_si128(sum, 8))); in aom_highbd_sse_sse4_1() 312 xx_storel_64(&sse, _mm_add_epi64(sum, _mm_srli_si128(sum, 8))); in aom_highbd_sse_sse4_1() 348 xx_storel_64(&sse, _mm_add_epi64(sum, _mm_srli_si128(sum, 8))); in aom_highbd_sse_sse4_1()
|
D | blend_a64_mask_sse4.c | 64 xx_storel_64(dst, v_res_b); in blend_a64_mask_w8_sse4_1() 148 xx_storel_64(dst, v_res_b); in blend_a64_mask_sx_w8_sse4_1() 234 xx_storel_64(dst, v_res_b); in blend_a64_mask_sy_w8_sse4_1() 329 xx_storel_64(dst, v_res_b); in blend_a64_mask_sx_sy_w8_sse4_1() 444 xx_storel_64(dst, v_res_w); in blend_a64_mask_bn_w4_sse4_1() 535 xx_storel_64(dst, v_res_w); in blend_a64_mask_bn_sx_w4_sse4_1() 631 xx_storel_64(dst, v_res_w); in blend_a64_mask_bn_sy_w4_sse4_1() 732 xx_storel_64(dst, v_res_w); in blend_a64_mask_bn_sx_sy_w4_sse4_1() 1177 xx_storel_64(dst, _mm_srli_si128(clipa, 8)); in highbd_blend_a64_d16_mask_w4_sse4_1() 1178 xx_storel_64(dst + dst_stride, clipa); in highbd_blend_a64_d16_mask_w4_sse4_1() [all …]
|
D | blend_a64_vmask_sse4.c | 71 xx_storel_64(dst, v_res_b); in blend_a64_vmask_w8_sse4_1() 163 xx_storel_64(dst, v_res_w); in blend_a64_vmask_bn_w4_sse4_1()
|
D | synonyms.h | 54 static INLINE void xx_storel_64(void *const a, const __m128i v) { in xx_storel_64() function
|
D | variance_impl_ssse3.c | 77 xx_storel_64(b, res); in aom_var_filter_block2d_bil_first_pass_ssse3()
|
D | blend_a64_mask_avx2.c | 561 xx_storel_64(dst, v_res_b); in blend_a64_mask_sx_sy_avx2() 678 xx_storel_64(dst, v_res_b); in blend_a64_mask_sx_avx2() 774 xx_storel_64(dst, v_res_b); in blend_a64_mask_sy_avx2() 842 xx_storel_64(dst, v_res_b); in blend_a64_mask_avx2() 952 xx_storel_64(dst + 3 * dst_stride, _mm_srli_si128(cliph, 8)); in highbd_blend_a64_d16_mask_w4_avx2() 953 xx_storel_64(dst + 2 * dst_stride, cliph); in highbd_blend_a64_d16_mask_w4_avx2() 955 xx_storel_64(dst + 1 * dst_stride, _mm_srli_si128(clipl, 8)); in highbd_blend_a64_d16_mask_w4_avx2() 956 xx_storel_64(dst + 0 * dst_stride, clipl); in highbd_blend_a64_d16_mask_w4_avx2()
|
D | sse_avx2.c | 44 xx_storel_64(&sum, sum_1x64); in summary_all_avx2() 64 xx_storel_64(&sum, sum_1x64); in summary_4x64_avx2()
|
D | sum_squares_avx2.c | 61 xx_storel_64(&result, result_64_2_int); in aom_sum_squares_2d_i16_nxn_avx2()
|
/external/libaom/libaom/av1/common/x86/ |
D | warp_plane_sse2.c | 84 xx_storel_64(&sum_error_d_0, col_error); in av1_calc_frame_error_sse2() 85 xx_storel_64(&sum_error_d_1, _mm_srli_si128(col_error, 8)); in av1_calc_frame_error_sse2()
|
D | av1_convolve_horiz_rs_sse4.c | 225 xx_storel_64(&dst_y[x], clipped_16); in av1_highbd_convolve_horiz_rs_sse4_1()
|
D | selfguided_sse4.c | 658 xx_storel_64(dst8 + m, res); in av1_apply_selfguided_restoration_sse4_1()
|
D | warp_plane_avx2.c | 1120 xx_storel_64(&sum_error_d_0, sum_error_q_0); in av1_calc_frame_error_avx2() 1121 xx_storel_64(&sum_error_d_1, _mm_srli_si128(sum_error_q_0, 8)); in av1_calc_frame_error_avx2()
|