/external/libvpx/libvpx/vpx_dsp/x86/ |
D | variance_avx2.c | 47 const __m256i src_ref0 = _mm256_unpacklo_epi8(src, ref); in variance_kernel_avx2() 190 exp_dst_lo = _mm256_unpacklo_epi8(dst_reg, zero_reg); \ 239 exp_src_lo = _mm256_unpacklo_epi8(avg_reg, zero_reg); in spv32_x0_y0() 243 exp_src_lo = _mm256_unpacklo_epi8(src_reg, zero_reg); in spv32_x0_y0() 270 exp_src_lo = _mm256_unpacklo_epi8(avg_reg, zero_reg); in spv32_half_zero() 274 exp_src_lo = _mm256_unpacklo_epi8(src_avg, zero_reg); in spv32_half_zero() 324 exp_src_lo = _mm256_unpacklo_epi8(avg_reg, zero_reg); in spv32_x4_y4() 328 exp_src_lo = _mm256_unpacklo_epi8(current_avg, zero_reg); in spv32_x4_y4() 355 exp_src_lo = _mm256_unpacklo_epi8(src_0, src_1); in spv32_bilin_zero() 364 exp_src_lo = _mm256_unpacklo_epi8(avg_reg, zero_reg); in spv32_bilin_zero() [all …]
|
D | vpx_subpixel_8t_intrin_avx2.c | 222 s1[0] = _mm256_unpacklo_epi8(s32b[0], s32b[1]); in vpx_filter_block1d16_v8_x_avx2() 224 s1[1] = _mm256_unpacklo_epi8(s32b[2], s32b[3]); in vpx_filter_block1d16_v8_x_avx2() 226 s1[2] = _mm256_unpacklo_epi8(s32b[4], s32b[5]); in vpx_filter_block1d16_v8_x_avx2() 247 s1[3] = _mm256_unpacklo_epi8(srcRegHead1, srcRegHead2); in vpx_filter_block1d16_v8_x_avx2() 487 src_reg_m1001_lo = _mm256_unpacklo_epi8(src_reg_m10, src_reg_01); in vpx_filter_block1d16_v4_avx2() 504 src_reg_1223_lo = _mm256_unpacklo_epi8(src_reg_12, src_reg_23); in vpx_filter_block1d16_v4_avx2() 682 src_reg_m1001 = _mm256_unpacklo_epi8(src_reg_m10, src_reg_01); in vpx_filter_block1d8_v4_avx2() 698 src_reg_1223 = _mm256_unpacklo_epi8(src_reg_12, src_reg_23); in vpx_filter_block1d8_v4_avx2() 851 src_reg_m1001 = _mm256_unpacklo_epi8(src_reg_m10, src_reg_01); in vpx_filter_block1d4_v4_avx2() 867 src_reg_1223 = _mm256_unpacklo_epi8(src_reg_12, src_reg_23); in vpx_filter_block1d4_v4_avx2()
|
/external/libaom/libaom/av1/common/x86/ |
D | convolve_avx2.c | 75 s[0] = _mm256_unpacklo_epi8(src_01a, src_12a); in av1_convolve_y_sr_avx2() 76 s[1] = _mm256_unpacklo_epi8(src_23a, src_34a); in av1_convolve_y_sr_avx2() 91 s[2] = _mm256_unpacklo_epi8(src_45a, src_56a); in av1_convolve_y_sr_avx2() 180 s[0] = _mm256_unpacklo_epi8(src_01a, src_12a); in av1_convolve_y_sr_avx2() 181 s[1] = _mm256_unpacklo_epi8(src_23a, src_34a); in av1_convolve_y_sr_avx2() 182 s[2] = _mm256_unpacklo_epi8(src_45a, src_56a); in av1_convolve_y_sr_avx2() 203 s[3] = _mm256_unpacklo_epi8(src_67a, src_78a); in av1_convolve_y_sr_avx2()
|
D | jnt_convolve_avx2.c | 252 s[0] = _mm256_unpacklo_epi8(src_ab[0], src_ab[1]); in av1_dist_wtd_convolve_y_avx2() 253 s[1] = _mm256_unpacklo_epi8(src_ab[2], src_ab[3]); in av1_dist_wtd_convolve_y_avx2() 269 s[2] = _mm256_unpacklo_epi8(src_45a, src_56a); in av1_dist_wtd_convolve_y_avx2() 430 s[0] = _mm256_unpacklo_epi8(src_ab[0], src_ab[1]); in av1_dist_wtd_convolve_y_avx2() 431 s[1] = _mm256_unpacklo_epi8(src_ab[2], src_ab[3]); in av1_dist_wtd_convolve_y_avx2() 432 s[2] = _mm256_unpacklo_epi8(src_ab[4], src_ab[5]); in av1_dist_wtd_convolve_y_avx2() 448 s[3] = _mm256_unpacklo_epi8(src_67a, src_78a); in av1_dist_wtd_convolve_y_avx2() 876 const __m256i src_16bit = _mm256_unpacklo_epi8(src_10, zero); in av1_dist_wtd_convolve_2d_copy_avx2()
|
D | cfl_avx2.c | 136 __m256i row_lo = _mm256_unpacklo_epi8(top, zeros); in cfl_luma_subsampling_444_lbd_avx2()
|
/external/libaom/libaom/aom_dsp/x86/ |
D | aom_subpixel_8t_intrin_avx2.c | 840 srcReg23_34_lo = _mm256_unpacklo_epi8(srcReg23, srcReg34); in aom_filter_block1d8_v4_avx2() 856 srcReg45_56_lo = _mm256_unpacklo_epi8(srcReg45, srcReg56); in aom_filter_block1d8_v4_avx2() 938 srcReg32b10 = _mm256_unpacklo_epi8(srcReg32b1, srcReg32b2); in aom_filter_block1d8_v8_avx2() 939 srcReg32b11 = _mm256_unpacklo_epi8(srcReg32b3, srcReg32b4); in aom_filter_block1d8_v8_avx2() 940 srcReg32b2 = _mm256_unpacklo_epi8(srcReg32b5, srcReg32b6); in aom_filter_block1d8_v8_avx2() 956 srcReg32b4 = _mm256_unpacklo_epi8(srcReg32b7, srcReg32b8); in aom_filter_block1d8_v8_avx2() 1078 srcReg23_34_lo = _mm256_unpacklo_epi8(srcReg23, srcReg34); in aom_filter_block1d16_v4_avx2() 1095 srcReg45_56_lo = _mm256_unpacklo_epi8(srcReg45, srcReg56); in aom_filter_block1d16_v4_avx2() 1188 srcReg32b10 = _mm256_unpacklo_epi8(srcReg32b1, srcReg32b2); in aom_filter_block1d16_v8_avx2() 1192 srcReg32b11 = _mm256_unpacklo_epi8(srcReg32b3, srcReg32b4); in aom_filter_block1d16_v8_avx2() [all …]
|
D | sse_avx2.c | 25 const __m256i v_a00_w = _mm256_unpacklo_epi8(v_a0, zero); in sse_w32_avx2() 27 const __m256i v_b00_w = _mm256_unpacklo_epi8(v_b0, zero); in sse_w32_avx2() 134 const __m256i v_al = _mm256_unpacklo_epi8(v_a, zero); in aom_sse_avx2() 136 const __m256i v_bl = _mm256_unpacklo_epi8(v_b, zero); in aom_sse_avx2()
|
D | masked_sad_intrin_avx2.c | 42 const __m256i data_l = _mm256_unpacklo_epi8(a, b); in masked_sad32xh_avx2() 43 const __m256i mask_l = _mm256_unpacklo_epi8(m, m_inv); in masked_sad32xh_avx2() 96 const __m256i data_l = _mm256_unpacklo_epi8(a, b); in masked_sad16xh_avx2() 97 const __m256i mask_l = _mm256_unpacklo_epi8(m, m_inv); in masked_sad16xh_avx2()
|
D | variance_avx2.c | 34 const __m256i src_ref0 = _mm256_unpacklo_epi8(src, ref); in variance_kernel_avx2() 351 const __m256i ssAL = _mm256_unpacklo_epi8(s0, s1); in comp_mask_pred_line_avx2() 352 const __m256i aaAL = _mm256_unpacklo_epi8(a, ma); in comp_mask_pred_line_avx2() 469 const __m256i m_16 = _mm256_unpacklo_epi8(m, zero); in aom_highbd_comp_mask_pred_avx2()
|
D | sum_squares_avx2.c | 127 __m256i vsrc0 = _mm256_unpacklo_epi8(vsrc[k], vzero); in aom_var_2d_u8_avx2() 148 __m256i vsrc0 = _mm256_unpacklo_epi8(vsrc, vzero); in aom_var_2d_u8_avx2()
|
D | variance_impl_avx2.c | 53 exp_src_lo = _mm256_unpacklo_epi8(src_reg, reg); \ 72 exp_dst_lo = _mm256_unpacklo_epi8(dst_reg, zero_reg); \
|
D | blend_a64_mask_avx2.c | 414 _mm256_maddubs_epi16(_mm256_unpacklo_epi8(v_s0_s_b, v_s1_s_b), in blend_16_u8_avx2() 415 _mm256_unpacklo_epi8(*v_m0_b, *v_m1_b)); in blend_16_u8_avx2() 431 _mm256_maddubs_epi16(_mm256_unpacklo_epi8(v_s0_b, v_s1_b), in blend_32_u8_avx2() 432 _mm256_unpacklo_epi8(*v_m0_b, *v_m1_b)); in blend_32_u8_avx2()
|
D | intrapred_avx2.c | 401 __m256i u = _mm256_unpacklo_epi8(left_col, left_col); in aom_h_predictor_32x32_avx2() 403 __m256i v = _mm256_unpacklo_epi8(u, u); in aom_h_predictor_32x32_avx2() 413 v = _mm256_unpacklo_epi8(u, u); in aom_h_predictor_32x32_avx2() 4323 w0 = _mm256_unpacklo_epi8(x[0], x[1]); in transpose16x32_avx2() 4324 w1 = _mm256_unpacklo_epi8(x[2], x[3]); in transpose16x32_avx2() 4325 w2 = _mm256_unpacklo_epi8(x[4], x[5]); in transpose16x32_avx2() 4326 w3 = _mm256_unpacklo_epi8(x[6], x[7]); in transpose16x32_avx2() 4328 w8 = _mm256_unpacklo_epi8(x[8], x[9]); in transpose16x32_avx2() 4329 w9 = _mm256_unpacklo_epi8(x[10], x[11]); in transpose16x32_avx2() 4330 w10 = _mm256_unpacklo_epi8(x[12], x[13]); in transpose16x32_avx2() [all …]
|
/external/libaom/libaom/aom_dsp/simd/ |
D | v256_intrinsics_x86.h | 142 return _mm256_unpacklo_epi8( in v256_ziplo_8() 262 return _mm256_unpacklo_epi8( in v256_unpacklo_u8_s16() 279 _mm256_unpacklo_epi8( in v256_unpacklo_s8_s16() 468 v256 l = _mm256_sub_epi16(_mm256_unpacklo_epi8(a, _mm256_setzero_si256()), in v256_ssd_u8() 469 _mm256_unpacklo_epi8(b, _mm256_setzero_si256())); in v256_ssd_u8() 610 return _mm256_packs_epi16(_mm256_sra_epi16(_mm256_unpacklo_epi8(a, a), x), in v256_shr_s8() 687 _mm256_packs_epi16(_mm256_srai_epi16(_mm256_unpacklo_epi8(a, a), (c) + 8), \
|
/external/skia/src/opts/ |
D | SkBitmapProcState_opts.h | 94 __m256i lo = _mm256_maddubs_epi16(_mm256_unpacklo_epi8( l, r), in S32_alpha_D32_filter_DX() 95 _mm256_unpacklo_epi8(wl,wr)); in S32_alpha_D32_filter_DX()
|
/external/clang/test/CodeGen/ |
D | avx2-builtins.c | 1201 return _mm256_unpacklo_epi8(a, b); in test_mm256_unpacklo_epi8()
|
/external/clang/lib/Headers/ |
D | avx2intrin.h | 803 _mm256_unpacklo_epi8(__m256i __a, __m256i __b) in _mm256_unpacklo_epi8() function
|
D | avx512vlbwintrin.h | 2232 (__v32qi)_mm256_unpacklo_epi8(__A, __B), in _mm256_mask_unpacklo_epi8() 2239 (__v32qi)_mm256_unpacklo_epi8(__A, __B), in _mm256_maskz_unpacklo_epi8()
|