Searched refs:xx_loadl_32 (Results 1 – 15 of 15) sorted by relevance
30 __m128i x0 = xx_loadl_32(a + 0 * a_stride); in aom_sad4xh_sse2()31 __m128i x1 = xx_loadl_32(a + 1 * a_stride); in aom_sad4xh_sse2()32 __m128i x2 = xx_loadl_32(a + 2 * a_stride); in aom_sad4xh_sse2()33 __m128i x3 = xx_loadl_32(a + 3 * a_stride); in aom_sad4xh_sse2()39 x0 = xx_loadl_32(b + 0 * b_stride); in aom_sad4xh_sse2()40 x1 = xx_loadl_32(b + 1 * b_stride); in aom_sad4xh_sse2()41 x2 = xx_loadl_32(b + 2 * b_stride); in aom_sad4xh_sse2()42 x3 = xx_loadl_32(b + 3 * b_stride); in aom_sad4xh_sse2()
69 const __m128i v_a0 = xx_loadl_32(a); in sse_w4x4_avx2()70 const __m128i v_a1 = xx_loadl_32(a + a_stride); in sse_w4x4_avx2()71 const __m128i v_a2 = xx_loadl_32(a + a_stride * 2); in sse_w4x4_avx2()72 const __m128i v_a3 = xx_loadl_32(a + a_stride * 3); in sse_w4x4_avx2()73 const __m128i v_b0 = xx_loadl_32(b); in sse_w4x4_avx2()74 const __m128i v_b1 = xx_loadl_32(b + b_stride); in sse_w4x4_avx2()75 const __m128i v_b2 = xx_loadl_32(b + b_stride * 2); in sse_w4x4_avx2()76 const __m128i v_b3 = xx_loadl_32(b + b_stride * 3); in sse_w4x4_avx2()
28 const __m128i v_s0_b = xx_loadl_32(src0); in blend_4()29 const __m128i v_s1_b = xx_loadl_32(src1); in blend_4()61 const __m128i v_s0_b = xx_loadl_32(src0); in blend_4_u8()62 const __m128i v_s1_b = xx_loadl_32(src1); in blend_4_u8()
341 p1 = xx_loadl_32(s - 2 * p); in aom_lpf_horizontal_4_sse2()342 p0 = xx_loadl_32(s - 1 * p); in aom_lpf_horizontal_4_sse2()343 q0 = xx_loadl_32(s - 0 * p); in aom_lpf_horizontal_4_sse2()344 q1 = xx_loadl_32(s + 1 * p); in aom_lpf_horizontal_4_sse2()966 q4p4 = _mm_unpacklo_epi32(xx_loadl_32(s - 5 * p), xx_loadl_32(s + 4 * p)); in aom_lpf_horizontal_14_sse2()967 q3p3 = _mm_unpacklo_epi32(xx_loadl_32(s - 4 * p), xx_loadl_32(s + 3 * p)); in aom_lpf_horizontal_14_sse2()968 q2p2 = _mm_unpacklo_epi32(xx_loadl_32(s - 3 * p), xx_loadl_32(s + 2 * p)); in aom_lpf_horizontal_14_sse2()969 q1p1 = _mm_unpacklo_epi32(xx_loadl_32(s - 2 * p), xx_loadl_32(s + 1 * p)); in aom_lpf_horizontal_14_sse2()971 q0p0 = _mm_unpacklo_epi32(xx_loadl_32(s - 1 * p), xx_loadl_32(s - 0 * p)); in aom_lpf_horizontal_14_sse2()973 q5p5 = _mm_unpacklo_epi32(xx_loadl_32(s - 6 * p), xx_loadl_32(s + 5 * p)); in aom_lpf_horizontal_14_sse2()[all …]
38 const __m128i v_p_b = xx_loadl_32(pre + n); in obmc_sad_w4()76 const __m128i v_p1_b = xx_loadl_32(pre + n + 4); in obmc_sad_w8n()79 const __m128i v_p0_b = xx_loadl_32(pre + n); in obmc_sad_w8n()
55 const __m128i v_a0 = xx_loadl_32(a); in aom_sse4x2_sse4_1()56 const __m128i v_a1 = xx_loadl_32(a + a_stride); in aom_sse4x2_sse4_1()57 const __m128i v_b0 = xx_loadl_32(b); in aom_sse4x2_sse4_1()58 const __m128i v_b1 = xx_loadl_32(b + b_stride); in aom_sse4x2_sse4_1()
38 const __m128i v_p_b_0 = xx_loadl_32(pre); in obmc_sad_w4_avx2()39 const __m128i v_p_b_1 = xx_loadl_32(pre + pre_stride); in obmc_sad_w4_avx2()
314 __m128i x = xx_loadl_32((__m128i *)src); in bilinear_filter4xh()368 __m128i x = xx_loadl_32((__m128i *)dst); in bilinear_filter4xh()369 __m128i y = xx_loadl_32((__m128i *)&dst[4]); in bilinear_filter4xh()377 const __m128i a = xx_loadl_32((__m128i *)dst); in bilinear_filter4xh()378 const __m128i b = xx_loadl_32((__m128i *)&dst[4]); in bilinear_filter4xh()379 const __m128i c = xx_loadl_32((__m128i *)&dst[8]); in bilinear_filter4xh()380 const __m128i d = xx_loadl_32((__m128i *)&dst[12]); in bilinear_filter4xh()381 const __m128i e = xx_loadl_32((__m128i *)&dst[16]); in bilinear_filter4xh()
31 static INLINE __m128i xx_loadl_32(const void *a) { in xx_loadl_32() function
53 const __m128i v_p1_b = xx_loadl_32(pre + n + 4); in obmc_variance_w8n()56 const __m128i v_p0_b = xx_loadl_32(pre + n); in obmc_variance_w8n()
40 const __m128i v_m0_b = xx_loadl_32(mask); in blend_a64_mask_w4_sse4_1()203 const __m128i v_ra_b = xx_loadl_32(mask); in blend_a64_mask_sy_w4_sse4_1()204 const __m128i v_rb_b = xx_loadl_32(mask + mask_stride); in blend_a64_mask_sy_w4_sse4_1()437 const __m128i v_m0_b = xx_loadl_32(mask); in blend_a64_mask_bn_w4_sse4_1()621 const __m128i v_ra_b = xx_loadl_32(mask); in blend_a64_mask_bn_sy_w4_sse4_1()622 const __m128i v_rb_b = xx_loadl_32(mask + mask_stride); in blend_a64_mask_bn_sy_w4_sse4_1()
71 const __m128i m0 = xx_loadl_32(mask); in aom_lowbd_blend_a64_d16_mask_subw0_subh0_w4_sse4_1()
752 const __m128i v_ra_b = xx_loadl_32(mask); in blend_a64_mask_sy_avx2()753 const __m128i v_rb_b = xx_loadl_32(mask + mask_stride); in blend_a64_mask_sy_avx2()824 const __m128i v_m0_b = xx_loadl_32(mask); in blend_a64_mask_avx2()
39 const __m128i s0A = xx_loadl_32(src0); in av1_build_compound_diffwtd_mask_avx2()40 const __m128i s0B = xx_loadl_32(src0 + stride0); in av1_build_compound_diffwtd_mask_avx2()41 const __m128i s0C = xx_loadl_32(src0 + stride0 * 2); in av1_build_compound_diffwtd_mask_avx2()42 const __m128i s0D = xx_loadl_32(src0 + stride0 * 3); in av1_build_compound_diffwtd_mask_avx2()48 const __m128i s1A = xx_loadl_32(src1); in av1_build_compound_diffwtd_mask_avx2()49 const __m128i s1B = xx_loadl_32(src1 + stride1); in av1_build_compound_diffwtd_mask_avx2()50 const __m128i s1C = xx_loadl_32(src1 + stride1 * 2); in av1_build_compound_diffwtd_mask_avx2()51 const __m128i s1D = xx_loadl_32(src1 + stride1 * 3); in av1_build_compound_diffwtd_mask_avx2()
23 return _mm_cvtepu8_epi32(xx_loadl_32(p)); in xx_load_extend_8_32()