Searched refs:xx_storel_64 (Results 1 – 13 of 13) sorted by relevance
/external/libaom/libaom/av1/encoder/x86/ |
D | encodetxb_avx2.c | 69 xx_storel_64(ls, res0); in av1_txb_init_levels_avx2() 71 xx_storel_64(ls + stride, _mm_srli_si128(res0, 8)); in av1_txb_init_levels_avx2() 73 xx_storel_64(ls + stride * 2, res1); in av1_txb_init_levels_avx2() 75 xx_storel_64(ls + stride * 3, _mm_srli_si128(res1, 8)); in av1_txb_init_levels_avx2()
|
D | wedge_utils_avx2.c | 78 xx_storel_64(&csse, v_acc_q_0); in av1_wedge_sse_from_residuals_avx2() 147 xx_storel_64(&acc, v_acc_q_0); in av1_wedge_sign_from_residuals_avx2()
|
D | wedge_utils_sse2.c | 91 xx_storel_64(&csse, v_acc0_q); in av1_wedge_sse_from_residuals_sse2() 180 xx_storel_64(&acc, v_acc_q); in av1_wedge_sign_from_residuals_sse2()
|
/external/libaom/libaom/aom_dsp/x86/ |
D | sse_sse4.c | 27 xx_storel_64(&sum, sum_1x64); in summary_all_sse4() 240 xx_storel_64(&sse, _mm_add_epi64(sum, _mm_srli_si128(sum, 8))); in aom_highbd_sse_sse4_1() 258 xx_storel_64(&sse, _mm_add_epi64(sum, _mm_srli_si128(sum, 8))); in aom_highbd_sse_sse4_1() 280 xx_storel_64(&sse, _mm_add_epi64(sum, _mm_srli_si128(sum, 8))); in aom_highbd_sse_sse4_1() 310 xx_storel_64(&sse, _mm_add_epi64(sum, _mm_srli_si128(sum, 8))); in aom_highbd_sse_sse4_1() 346 xx_storel_64(&sse, _mm_add_epi64(sum, _mm_srli_si128(sum, 8))); in aom_highbd_sse_sse4_1()
|
D | blend_a64_mask_sse4.c | 64 xx_storel_64(dst, v_res_b); in blend_a64_mask_w8_sse4_1() 148 xx_storel_64(dst, v_res_b); in blend_a64_mask_sx_w8_sse4_1() 234 xx_storel_64(dst, v_res_b); in blend_a64_mask_sy_w8_sse4_1() 329 xx_storel_64(dst, v_res_b); in blend_a64_mask_sx_sy_w8_sse4_1() 443 xx_storel_64(dst, v_res_w); in blend_a64_mask_bn_w4_sse4_1() 534 xx_storel_64(dst, v_res_w); in blend_a64_mask_bn_sx_w4_sse4_1() 630 xx_storel_64(dst, v_res_w); in blend_a64_mask_bn_sy_w4_sse4_1() 731 xx_storel_64(dst, v_res_w); in blend_a64_mask_bn_sx_sy_w4_sse4_1() 1176 xx_storel_64(dst, _mm_srli_si128(clipa, 8)); in highbd_blend_a64_d16_mask_w4_sse4_1() 1177 xx_storel_64(dst + dst_stride, clipa); in highbd_blend_a64_d16_mask_w4_sse4_1() [all …]
|
D | blend_a64_vmask_sse4.c | 71 xx_storel_64(dst, v_res_b); in blend_a64_vmask_w8_sse4_1() 162 xx_storel_64(dst, v_res_w); in blend_a64_vmask_bn_w4_sse4_1()
|
D | sum_squares_avx2.c | 61 xx_storel_64(&result, result_64_2_int); in aom_sum_squares_2d_i16_nxn_avx2()
|
D | synonyms.h | 54 static INLINE void xx_storel_64(void *const a, const __m128i v) { in xx_storel_64() function
|
D | variance_impl_ssse3.c | 77 xx_storel_64(b, res); in aom_var_filter_block2d_bil_first_pass_ssse3()
|
D | blend_a64_mask_avx2.c | 561 xx_storel_64(dst, v_res_b); in blend_a64_mask_sx_sy_avx2() 678 xx_storel_64(dst, v_res_b); in blend_a64_mask_sx_avx2() 774 xx_storel_64(dst, v_res_b); in blend_a64_mask_sy_avx2() 842 xx_storel_64(dst, v_res_b); in blend_a64_mask_avx2() 951 xx_storel_64(dst + 3 * dst_stride, _mm_srli_si128(cliph, 8)); in highbd_blend_a64_d16_mask_w4_avx2() 952 xx_storel_64(dst + 2 * dst_stride, cliph); in highbd_blend_a64_d16_mask_w4_avx2() 954 xx_storel_64(dst + 1 * dst_stride, _mm_srli_si128(clipl, 8)); in highbd_blend_a64_d16_mask_w4_avx2() 955 xx_storel_64(dst + 0 * dst_stride, clipl); in highbd_blend_a64_d16_mask_w4_avx2()
|
D | sse_avx2.c | 44 xx_storel_64(&sum, sum_1x64); in summary_all_avx2() 63 xx_storel_64(&sum, sum_1x64); in summary_4x64_avx2()
|
/external/libaom/libaom/av1/common/x86/ |
D | av1_convolve_horiz_rs_sse4.c | 225 xx_storel_64(&dst_y[x], clipped_16); in av1_highbd_convolve_horiz_rs_sse4_1()
|
D | selfguided_sse4.c | 656 xx_storel_64(dst8 + m, res); in apply_selfguided_restoration_sse4_1()
|