/external/libaom/libaom/aom_dsp/x86/ |
D | blend_sse4.h | 43 const __m128i v_s0_b = xx_loadl_64(src0); in blend_8() 44 const __m128i v_s1_b = xx_loadl_64(src1); in blend_8() 75 const __m128i v_s0_b = xx_loadl_64(src0); in blend_8_u8() 76 const __m128i v_s1_b = xx_loadl_64(src1); in blend_8_u8() 108 const __m128i v_s0_w = xx_loadl_64(src0); in blend_4_b10() 109 const __m128i v_s1_w = xx_loadl_64(src1); in blend_4_b10() 138 const __m128i v_s0_w = xx_loadl_64(src0); in blend_4_b12() 139 const __m128i v_s1_w = xx_loadl_64(src1); in blend_4_b12()
|
D | variance_impl_ssse3.c | 39 __m128i source_low = xx_loadl_64(a); in aom_var_filter_block2d_bil_first_pass_ssse3() 40 __m128i source_hi = xx_loadl_64(a + 1); in aom_var_filter_block2d_bil_first_pass_ssse3() 67 __m128i source = xx_loadl_64(a); in aom_var_filter_block2d_bil_first_pass_ssse3() 104 __m128i source1 = xx_loadl_64(a); in aom_var_filter_block2d_bil_second_pass_ssse3() 105 __m128i source2 = xx_loadl_64(a + pixel_step); in aom_var_filter_block2d_bil_second_pass_ssse3()
|
D | blend_mask_sse4.h | 32 const __m128i s0 = xx_loadl_64(src0); in blend_a64_d16_mask_w4_sse41() 33 const __m128i s1 = xx_loadl_64(src1); in blend_a64_d16_mask_w4_sse41() 90 const __m128i m0 = xx_loadl_64(mask); in aom_lowbd_blend_a64_d16_mask_subw0_subh0_w8_sse4_1() 110 const __m128i m_i0 = xx_loadl_64(mask); in aom_lowbd_blend_a64_d16_mask_subw1_subh1_w4_sse4_1() 111 const __m128i m_i1 = xx_loadl_64(mask + mask_stride); in aom_lowbd_blend_a64_d16_mask_subw1_subh1_w4_sse4_1() 160 const __m128i m_i0 = xx_loadl_64(mask); in aom_lowbd_blend_a64_d16_mask_subw1_subh0_w4_sse4_1() 202 const __m128i m_i0 = xx_loadl_64(mask); in aom_lowbd_blend_a64_d16_mask_subw0_subh1_w4_sse4_1() 203 const __m128i m_i1 = xx_loadl_64(mask + mask_stride); in aom_lowbd_blend_a64_d16_mask_subw0_subh1_w4_sse4_1() 224 const __m128i m_i0 = xx_loadl_64(mask); in aom_lowbd_blend_a64_d16_mask_subw0_subh1_w8_sse4_1() 225 const __m128i m_i1 = xx_loadl_64(mask + mask_stride); in aom_lowbd_blend_a64_d16_mask_subw0_subh1_w8_sse4_1()
|
D | sse_avx2.c | 90 const __m128i v_a0 = xx_loadl_64(a); in sse_w8x2_avx2() 91 const __m128i v_a1 = xx_loadl_64(a + a_stride); in sse_w8x2_avx2() 92 const __m128i v_b0 = xx_loadl_64(b); in sse_w8x2_avx2() 93 const __m128i v_b1 = xx_loadl_64(b + b_stride); in sse_w8x2_avx2() 228 const __m128i v_a0 = xx_loadl_64(a); in highbd_sse_w4x4_avx2() 229 const __m128i v_a1 = xx_loadl_64(a + a_stride); in highbd_sse_w4x4_avx2() 230 const __m128i v_a2 = xx_loadl_64(a + a_stride * 2); in highbd_sse_w4x4_avx2() 231 const __m128i v_a3 = xx_loadl_64(a + a_stride * 3); in highbd_sse_w4x4_avx2() 232 const __m128i v_b0 = xx_loadl_64(b); in highbd_sse_w4x4_avx2() 233 const __m128i v_b1 = xx_loadl_64(b + b_stride); in highbd_sse_w4x4_avx2() [all …]
|
D | sse_sse4.c | 67 const __m128i v_a0 = xx_loadl_64(a); in sse8_sse4_1() 68 const __m128i v_b0 = xx_loadl_64(b); in sse8_sse4_1() 183 const __m128i v_a0 = xx_loadl_64(a); in highbd_sse_w4x2_sse4_1() 184 const __m128i v_a1 = xx_loadl_64(a + a_stride); in highbd_sse_w4x2_sse4_1() 185 const __m128i v_b0 = xx_loadl_64(b); in highbd_sse_w4x2_sse4_1() 186 const __m128i v_b1 = xx_loadl_64(b + b_stride); in highbd_sse_w4x2_sse4_1()
|
D | jnt_sad_ssse3.c | 70 __m128i x0 = xx_loadl_64(a + 0 * a_stride); in aom_sad8xh_sse2() 71 __m128i x1 = xx_loadl_64(a + 1 * a_stride); in aom_sad8xh_sse2() 75 x0 = xx_loadl_64(b + 0 * b_stride); in aom_sad8xh_sse2() 76 x1 = xx_loadl_64(b + 1 * b_stride); in aom_sad8xh_sse2()
|
D | obmc_sad_sse4.c | 161 const __m128i v_p_w = xx_loadl_64(pre + n); in hbd_obmc_sad_w4() 200 const __m128i v_p1_w = xx_loadl_64(pre + n + 4); in hbd_obmc_sad_w8n() 203 const __m128i v_p0_w = xx_loadl_64(pre + n); in hbd_obmc_sad_w8n()
|
D | obmc_sad_avx2.c | 81 const __m128i v_p0_b = xx_loadl_64(pre + n); in obmc_sad_w8n_avx2() 160 const __m128i v_p_w_0 = xx_loadl_64(pre); in hbd_obmc_sad_w4_avx2() 161 const __m128i v_p_w_1 = xx_loadl_64(pre + pre_stride); in hbd_obmc_sad_w4_avx2()
|
D | blend_a64_mask_sse4.c | 61 const __m128i v_m0_b = xx_loadl_64(mask); in blend_a64_mask_w8_sse4_1() 112 const __m128i v_r_b = xx_loadl_64(mask); in blend_a64_mask_sx_w4_sse4_1() 228 const __m128i v_ra_b = xx_loadl_64(mask); in blend_a64_mask_sy_w8_sse4_1() 229 const __m128i v_rb_b = xx_loadl_64(mask + mask_stride); in blend_a64_mask_sy_w8_sse4_1() 283 const __m128i v_ra_b = xx_loadl_64(mask); in blend_a64_mask_sx_sy_w4_sse4_1() 284 const __m128i v_rb_b = xx_loadl_64(mask + mask_stride); in blend_a64_mask_sx_sy_w4_sse4_1() 481 const __m128i v_m0_b = xx_loadl_64(mask + c); in blend_a64_mask_bn_w8n_sse4_1() 527 const __m128i v_r_b = xx_loadl_64(mask); in blend_a64_mask_bn_sx_w4_sse4_1() 670 const __m128i v_ra_b = xx_loadl_64(mask + c); in blend_a64_mask_bn_sy_w8n_sse4_1() 671 const __m128i v_rb_b = xx_loadl_64(mask + c + mask_stride); in blend_a64_mask_bn_sy_w8n_sse4_1() [all …]
|
D | jnt_variance_ssse3.c | 83 __m128i p0_0 = xx_loadl_64(ref + 0 * ref_stride); in aom_dist_wtd_comp_avg_pred_ssse3() 84 __m128i p0_1 = xx_loadl_64(ref + 1 * ref_stride); in aom_dist_wtd_comp_avg_pred_ssse3()
|
D | obmc_variance_sse4.c | 182 const __m128i v_p_w = xx_loadl_64(pre + n); in hbd_obmc_variance_w4() 224 const __m128i v_p1_w = xx_loadl_64(pre + n + 4); in hbd_obmc_variance_w8n() 227 const __m128i v_p0_w = xx_loadl_64(pre + n); in hbd_obmc_variance_w8n()
|
D | variance_sse2.c | 558 __m128i s0 = xx_loadl_64(ref + 0 * ref_stride); in aom_upsampled_pred_sse2() 559 __m128i s1 = xx_loadl_64(ref + 1 * ref_stride); in aom_upsampled_pred_sse2() 570 const __m128i row0 = xx_loadl_64(ref + 0 * ref_stride); in aom_upsampled_pred_sse2() 571 const __m128i row1 = xx_loadl_64(ref + 1 * ref_stride); in aom_upsampled_pred_sse2() 572 const __m128i row2 = xx_loadl_64(ref + 2 * ref_stride); in aom_upsampled_pred_sse2() 573 const __m128i row3 = xx_loadl_64(ref + 3 * ref_stride); in aom_upsampled_pred_sse2()
|
D | synonyms.h | 37 static INLINE __m128i xx_loadl_64(const void *a) { in xx_loadl_64() function
|
D | sum_squares_sse2.c | 38 const __m128i v_val_0_w = xx_loadl_64(src + 0 * stride); in sum_squares_i16_4x4_sse2() 39 const __m128i v_val_2_w = xx_loadl_64(src + 2 * stride); in sum_squares_i16_4x4_sse2()
|
D | blend_a64_mask_avx2.c | 525 const __m128i v_ra_b = xx_loadl_64(mask); in blend_a64_mask_sx_sy_avx2() 526 const __m128i v_rb_b = xx_loadl_64(mask + mask_stride); in blend_a64_mask_sx_sy_avx2() 650 const __m128i v_r_b = xx_loadl_64(mask); in blend_a64_mask_sx_avx2() 768 const __m128i v_ra_b = xx_loadl_64(mask); in blend_a64_mask_sy_avx2() 769 const __m128i v_rb_b = xx_loadl_64(mask + mask_stride); in blend_a64_mask_sy_avx2() 838 const __m128i v_m0_b = xx_loadl_64(mask); in blend_a64_mask_avx2()
|
D | highbd_variance_sse2.c | 795 __m128i p0_0 = xx_loadl_64(ref + 0 * ref_stride); in aom_highbd_dist_wtd_comp_avg_pred_sse2() 796 __m128i p0_1 = xx_loadl_64(ref + 1 * ref_stride); in aom_highbd_dist_wtd_comp_avg_pred_sse2()
|
/external/libaom/libaom/av1/common/x86/ |
D | reconinter_avx2.c | 68 const __m128i s0A = xx_loadl_64(src0); in av1_build_compound_diffwtd_mask_avx2() 69 const __m128i s0B = xx_loadl_64(src0 + src0_stride); in av1_build_compound_diffwtd_mask_avx2() 70 const __m128i s0C = xx_loadl_64(src0 + src0_stride * 2); in av1_build_compound_diffwtd_mask_avx2() 71 const __m128i s0D = xx_loadl_64(src0 + src0_stride * 3); in av1_build_compound_diffwtd_mask_avx2() 74 const __m128i s1A = xx_loadl_64(src1); in av1_build_compound_diffwtd_mask_avx2() 75 const __m128i s1B = xx_loadl_64(src1 + src1_stride); in av1_build_compound_diffwtd_mask_avx2() 76 const __m128i s1C = xx_loadl_64(src1 + src1_stride * 2); in av1_build_compound_diffwtd_mask_avx2() 77 const __m128i s1D = xx_loadl_64(src1 + src1_stride * 3); in av1_build_compound_diffwtd_mask_avx2() 182 const __m128i s0A = xx_loadl_64(src0); in build_compound_diffwtd_mask_d16_avx2() 183 const __m128i s0B = xx_loadl_64(src0 + src0_stride); in build_compound_diffwtd_mask_d16_avx2() [all …]
|
D | av1_convolve_horiz_rs_sse4.c | 82 const __m128i src0_8 = xx_loadl_64(src_x0); in av1_convolve_horiz_rs_sse4_1() 83 const __m128i src1_8 = xx_loadl_64(src_x1); in av1_convolve_horiz_rs_sse4_1() 84 const __m128i src2_8 = xx_loadl_64(src_x2); in av1_convolve_horiz_rs_sse4_1() 85 const __m128i src3_8 = xx_loadl_64(src_x3); in av1_convolve_horiz_rs_sse4_1()
|
D | selfguided_sse4.c | 29 return _mm_cvtepu16_epi32(xx_loadl_64(p)); in xx_load_extend_16_32() 294 xx_loadl_64(dgd_real + ((i * dgd_stride + j) << highbd)); in final_filter() 475 xx_loadl_64(dgd_real + ((i * dgd_stride + j) << highbd)); in final_filter_fast() 490 xx_loadl_64(dgd_real + ((i * dgd_stride + j) << highbd)); in final_filter_fast() 615 src = _mm_cvtepu8_epi16(xx_loadl_64(dat8ij)); in av1_apply_selfguided_restoration_sse4_1()
|
D | filterintra_sse4.c | 51 const __m128i p_b = xx_loadl_64(p); in av1_filter_intra_predictor_sse4_1()
|
D | selfguided_avx2.c | 24 return _mm256_cvtepu8_epi32(xx_loadl_64(p)); in yy256_load_extend_8_32()
|
/external/libaom/libaom/av1/encoder/x86/ |
D | pickrst_sse4.c | 520 const __m128i d0 = _mm_cvtepu8_epi16(xx_loadl_64(dat + j)); in av1_lowbd_pixel_proj_error_sse4_1() 521 const __m128i s0 = _mm_cvtepu8_epi16(xx_loadl_64(src + j)); in av1_lowbd_pixel_proj_error_sse4_1() 564 const __m128i d0 = _mm_cvtepu8_epi16(xx_loadl_64(dat + j)); in av1_lowbd_pixel_proj_error_sse4_1() 565 const __m128i s0 = _mm_cvtepu8_epi16(xx_loadl_64(src + j)); in av1_lowbd_pixel_proj_error_sse4_1()
|