Home
last modified time | relevance | path

Searched refs:xx_loadl_32 (Results 1 – 15 of 15) sorted by relevance

/external/libaom/libaom/aom_dsp/x86/
Djnt_sad_ssse3.c30 __m128i x0 = xx_loadl_32(a + 0 * a_stride); in aom_sad4xh_sse2()
31 __m128i x1 = xx_loadl_32(a + 1 * a_stride); in aom_sad4xh_sse2()
32 __m128i x2 = xx_loadl_32(a + 2 * a_stride); in aom_sad4xh_sse2()
33 __m128i x3 = xx_loadl_32(a + 3 * a_stride); in aom_sad4xh_sse2()
39 x0 = xx_loadl_32(b + 0 * b_stride); in aom_sad4xh_sse2()
40 x1 = xx_loadl_32(b + 1 * b_stride); in aom_sad4xh_sse2()
41 x2 = xx_loadl_32(b + 2 * b_stride); in aom_sad4xh_sse2()
42 x3 = xx_loadl_32(b + 3 * b_stride); in aom_sad4xh_sse2()
Dsse_avx2.c69 const __m128i v_a0 = xx_loadl_32(a); in sse_w4x4_avx2()
70 const __m128i v_a1 = xx_loadl_32(a + a_stride); in sse_w4x4_avx2()
71 const __m128i v_a2 = xx_loadl_32(a + a_stride * 2); in sse_w4x4_avx2()
72 const __m128i v_a3 = xx_loadl_32(a + a_stride * 3); in sse_w4x4_avx2()
73 const __m128i v_b0 = xx_loadl_32(b); in sse_w4x4_avx2()
74 const __m128i v_b1 = xx_loadl_32(b + b_stride); in sse_w4x4_avx2()
75 const __m128i v_b2 = xx_loadl_32(b + b_stride * 2); in sse_w4x4_avx2()
76 const __m128i v_b3 = xx_loadl_32(b + b_stride * 3); in sse_w4x4_avx2()
Dblend_sse4.h28 const __m128i v_s0_b = xx_loadl_32(src0); in blend_4()
29 const __m128i v_s1_b = xx_loadl_32(src1); in blend_4()
61 const __m128i v_s0_b = xx_loadl_32(src0); in blend_4_u8()
62 const __m128i v_s1_b = xx_loadl_32(src1); in blend_4_u8()
Dloopfilter_sse2.c341 p1 = xx_loadl_32(s - 2 * p); in aom_lpf_horizontal_4_sse2()
342 p0 = xx_loadl_32(s - 1 * p); in aom_lpf_horizontal_4_sse2()
343 q0 = xx_loadl_32(s - 0 * p); in aom_lpf_horizontal_4_sse2()
344 q1 = xx_loadl_32(s + 1 * p); in aom_lpf_horizontal_4_sse2()
966 q4p4 = _mm_unpacklo_epi32(xx_loadl_32(s - 5 * p), xx_loadl_32(s + 4 * p)); in aom_lpf_horizontal_14_sse2()
967 q3p3 = _mm_unpacklo_epi32(xx_loadl_32(s - 4 * p), xx_loadl_32(s + 3 * p)); in aom_lpf_horizontal_14_sse2()
968 q2p2 = _mm_unpacklo_epi32(xx_loadl_32(s - 3 * p), xx_loadl_32(s + 2 * p)); in aom_lpf_horizontal_14_sse2()
969 q1p1 = _mm_unpacklo_epi32(xx_loadl_32(s - 2 * p), xx_loadl_32(s + 1 * p)); in aom_lpf_horizontal_14_sse2()
971 q0p0 = _mm_unpacklo_epi32(xx_loadl_32(s - 1 * p), xx_loadl_32(s - 0 * p)); in aom_lpf_horizontal_14_sse2()
973 q5p5 = _mm_unpacklo_epi32(xx_loadl_32(s - 6 * p), xx_loadl_32(s + 5 * p)); in aom_lpf_horizontal_14_sse2()
[all …]
Dobmc_sad_sse4.c38 const __m128i v_p_b = xx_loadl_32(pre + n); in obmc_sad_w4()
76 const __m128i v_p1_b = xx_loadl_32(pre + n + 4); in obmc_sad_w8n()
79 const __m128i v_p0_b = xx_loadl_32(pre + n); in obmc_sad_w8n()
Dsse_sse4.c55 const __m128i v_a0 = xx_loadl_32(a); in aom_sse4x2_sse4_1()
56 const __m128i v_a1 = xx_loadl_32(a + a_stride); in aom_sse4x2_sse4_1()
57 const __m128i v_b0 = xx_loadl_32(b); in aom_sse4x2_sse4_1()
58 const __m128i v_b1 = xx_loadl_32(b + b_stride); in aom_sse4x2_sse4_1()
Dobmc_sad_avx2.c38 const __m128i v_p_b_0 = xx_loadl_32(pre); in obmc_sad_w4_avx2()
39 const __m128i v_p_b_1 = xx_loadl_32(pre + pre_stride); in obmc_sad_w4_avx2()
Dmasked_variance_intrin_ssse3.c314 __m128i x = xx_loadl_32((__m128i *)src); in bilinear_filter4xh()
368 __m128i x = xx_loadl_32((__m128i *)dst); in bilinear_filter4xh()
369 __m128i y = xx_loadl_32((__m128i *)&dst[4]); in bilinear_filter4xh()
377 const __m128i a = xx_loadl_32((__m128i *)dst); in bilinear_filter4xh()
378 const __m128i b = xx_loadl_32((__m128i *)&dst[4]); in bilinear_filter4xh()
379 const __m128i c = xx_loadl_32((__m128i *)&dst[8]); in bilinear_filter4xh()
380 const __m128i d = xx_loadl_32((__m128i *)&dst[12]); in bilinear_filter4xh()
381 const __m128i e = xx_loadl_32((__m128i *)&dst[16]); in bilinear_filter4xh()
Dsynonyms.h31 static INLINE __m128i xx_loadl_32(const void *a) { in xx_loadl_32() function
Dobmc_variance_sse4.c53 const __m128i v_p1_b = xx_loadl_32(pre + n + 4); in obmc_variance_w8n()
56 const __m128i v_p0_b = xx_loadl_32(pre + n); in obmc_variance_w8n()
Dblend_a64_mask_sse4.c40 const __m128i v_m0_b = xx_loadl_32(mask); in blend_a64_mask_w4_sse4_1()
203 const __m128i v_ra_b = xx_loadl_32(mask); in blend_a64_mask_sy_w4_sse4_1()
204 const __m128i v_rb_b = xx_loadl_32(mask + mask_stride); in blend_a64_mask_sy_w4_sse4_1()
437 const __m128i v_m0_b = xx_loadl_32(mask); in blend_a64_mask_bn_w4_sse4_1()
621 const __m128i v_ra_b = xx_loadl_32(mask); in blend_a64_mask_bn_sy_w4_sse4_1()
622 const __m128i v_rb_b = xx_loadl_32(mask + mask_stride); in blend_a64_mask_bn_sy_w4_sse4_1()
Dblend_mask_sse4.h71 const __m128i m0 = xx_loadl_32(mask); in aom_lowbd_blend_a64_d16_mask_subw0_subh0_w4_sse4_1()
Dblend_a64_mask_avx2.c752 const __m128i v_ra_b = xx_loadl_32(mask); in blend_a64_mask_sy_avx2()
753 const __m128i v_rb_b = xx_loadl_32(mask + mask_stride); in blend_a64_mask_sy_avx2()
824 const __m128i v_m0_b = xx_loadl_32(mask); in blend_a64_mask_avx2()
/external/libaom/libaom/av1/common/x86/
Dreconinter_avx2.c39 const __m128i s0A = xx_loadl_32(src0); in av1_build_compound_diffwtd_mask_avx2()
40 const __m128i s0B = xx_loadl_32(src0 + stride0); in av1_build_compound_diffwtd_mask_avx2()
41 const __m128i s0C = xx_loadl_32(src0 + stride0 * 2); in av1_build_compound_diffwtd_mask_avx2()
42 const __m128i s0D = xx_loadl_32(src0 + stride0 * 3); in av1_build_compound_diffwtd_mask_avx2()
48 const __m128i s1A = xx_loadl_32(src1); in av1_build_compound_diffwtd_mask_avx2()
49 const __m128i s1B = xx_loadl_32(src1 + stride1); in av1_build_compound_diffwtd_mask_avx2()
50 const __m128i s1C = xx_loadl_32(src1 + stride1 * 2); in av1_build_compound_diffwtd_mask_avx2()
51 const __m128i s1D = xx_loadl_32(src1 + stride1 * 3); in av1_build_compound_diffwtd_mask_avx2()
Dselfguided_sse4.c23 return _mm_cvtepu8_epi32(xx_loadl_32(p)); in xx_load_extend_8_32()