/external/libaom/libaom/av1/common/x86/ |
D | highbd_convolve_2d_avx2.c | 195 _mm256_storeu_si256((__m256i *)(dst + 0 * 16), s[0]); in copy_64() 196 _mm256_storeu_si256((__m256i *)(dst + 1 * 16), s[1]); in copy_64() 197 _mm256_storeu_si256((__m256i *)(dst + 2 * 16), s[2]); in copy_64() 198 _mm256_storeu_si256((__m256i *)(dst + 3 * 16), s[3]); in copy_64() 212 _mm256_storeu_si256((__m256i *)(dst + 0 * 16), s[0]); in copy_128() 213 _mm256_storeu_si256((__m256i *)(dst + 1 * 16), s[1]); in copy_128() 214 _mm256_storeu_si256((__m256i *)(dst + 2 * 16), s[2]); in copy_128() 215 _mm256_storeu_si256((__m256i *)(dst + 3 * 16), s[3]); in copy_128() 216 _mm256_storeu_si256((__m256i *)(dst + 4 * 16), s[4]); in copy_128() 217 _mm256_storeu_si256((__m256i *)(dst + 5 * 16), s[5]); in copy_128() [all …]
|
D | cfl_avx2.c | 78 _mm256_storeu_si256(row, sum_16x16); in cfl_luma_subsampling_420_lbd_avx2() 107 _mm256_storeu_si256(row, top_16x16); in cfl_luma_subsampling_422_lbd_avx2() 141 _mm256_storeu_si256(row, row_lo); in cfl_luma_subsampling_444_lbd_avx2() 142 _mm256_storeu_si256(row + 1, row_hi); in cfl_luma_subsampling_444_lbd_avx2() 183 _mm256_storeu_si256(row, hsum); in cfl_luma_subsampling_420_hbd_avx2() 216 _mm256_storeu_si256(row, hsum); in cfl_luma_subsampling_422_hbd_avx2() 234 _mm256_storeu_si256(row, _mm256_slli_epi16(top, 3)); in cfl_luma_subsampling_444_hbd_avx2() 235 _mm256_storeu_si256(row + 1, _mm256_slli_epi16(top_1, 3)); in cfl_luma_subsampling_444_hbd_avx2() 267 _mm256_storeu_si256((__m256i *)dst, res); in cfl_predict_lbd_avx2() 328 _mm256_storeu_si256((__m256i *)dst, in cfl_predict_hbd_avx2() [all …]
|
D | convolve_2d_avx2.c | 207 _mm256_storeu_si256((__m256i *)(dst + 0 * 32), s[0]); in copy_128() 208 _mm256_storeu_si256((__m256i *)(dst + 1 * 32), s[1]); in copy_128() 209 _mm256_storeu_si256((__m256i *)(dst + 2 * 32), s[2]); in copy_128() 210 _mm256_storeu_si256((__m256i *)(dst + 3 * 32), s[3]); in copy_128() 283 _mm256_storeu_si256((__m256i *)dst, s[0]); in av1_convolve_2d_copy_sr_avx2() 285 _mm256_storeu_si256((__m256i *)dst, s[1]); in av1_convolve_2d_copy_sr_avx2() 298 _mm256_storeu_si256((__m256i *)(dst + 0 * 32), s[0]); in av1_convolve_2d_copy_sr_avx2() 299 _mm256_storeu_si256((__m256i *)(dst + 1 * 32), s[1]); in av1_convolve_2d_copy_sr_avx2() 301 _mm256_storeu_si256((__m256i *)(dst + 0 * 32), s[2]); in av1_convolve_2d_copy_sr_avx2() 302 _mm256_storeu_si256((__m256i *)(dst + 1 * 32), s[3]); in av1_convolve_2d_copy_sr_avx2()
|
D | selfguided_avx2.c | 72 _mm256_storeu_si256((__m256i *)(dest + i), *zero); in memset_zero_avx() 73 _mm256_storeu_si256((__m256i *)(dest + i + 8), *zero); in memset_zero_avx() 74 _mm256_storeu_si256((__m256i *)(dest + i + 16), *zero); in memset_zero_avx() 75 _mm256_storeu_si256((__m256i *)(dest + i + 24), *zero); in memset_zero_avx() 78 _mm256_storeu_si256((__m256i *)(dest + i), *zero); in memset_zero_avx()
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | avg_intrin_avx2.c | 121 _mm256_storeu_si256((__m256i *)coeff, src32[0]); in vpx_highbd_hadamard_8x8_avx2() 123 _mm256_storeu_si256((__m256i *)coeff, src32[1]); in vpx_highbd_hadamard_8x8_avx2() 125 _mm256_storeu_si256((__m256i *)coeff, src32[2]); in vpx_highbd_hadamard_8x8_avx2() 127 _mm256_storeu_si256((__m256i *)coeff, src32[3]); in vpx_highbd_hadamard_8x8_avx2() 129 _mm256_storeu_si256((__m256i *)coeff, src32[4]); in vpx_highbd_hadamard_8x8_avx2() 131 _mm256_storeu_si256((__m256i *)coeff, src32[5]); in vpx_highbd_hadamard_8x8_avx2() 133 _mm256_storeu_si256((__m256i *)coeff, src32[6]); in vpx_highbd_hadamard_8x8_avx2() 135 _mm256_storeu_si256((__m256i *)coeff, src32[7]); in vpx_highbd_hadamard_8x8_avx2() 169 _mm256_storeu_si256((__m256i *)coeff, coeff0); in vpx_highbd_hadamard_16x16_avx2() 170 _mm256_storeu_si256((__m256i *)(coeff + 64), coeff1); in vpx_highbd_hadamard_16x16_avx2() [all …]
|
D | bitdepth_conversion_avx2.h | 38 _mm256_storeu_si256((__m256i *)b, a_1); in store_tran_low() 39 _mm256_storeu_si256((__m256i *)(b + 8), a_2); in store_tran_low() 41 _mm256_storeu_si256((__m256i *)b, a); in store_tran_low()
|
D | highbd_convolve_avx2.c | 39 _mm256_storeu_si256((__m256i *)dst, p0); in vpx_highbd_convolve_copy_avx2() 40 _mm256_storeu_si256((__m256i *)(dst + 16), p1); in vpx_highbd_convolve_copy_avx2() 41 _mm256_storeu_si256((__m256i *)(dst + 32), p2); in vpx_highbd_convolve_copy_avx2() 42 _mm256_storeu_si256((__m256i *)(dst + 48), p3); in vpx_highbd_convolve_copy_avx2() 51 _mm256_storeu_si256((__m256i *)dst, p0); in vpx_highbd_convolve_copy_avx2() 52 _mm256_storeu_si256((__m256i *)(dst + 16), p1); in vpx_highbd_convolve_copy_avx2() 64 _mm256_storeu_si256((__m256i *)dst, p0); in vpx_highbd_convolve_copy_avx2() 66 _mm256_storeu_si256((__m256i *)dst, p1); in vpx_highbd_convolve_copy_avx2() 126 _mm256_storeu_si256((__m256i *)dst, _mm256_avg_epu16(p0, u0)); in vpx_highbd_convolve_avg_avx2() 127 _mm256_storeu_si256((__m256i *)(dst + 16), _mm256_avg_epu16(p1, u1)); in vpx_highbd_convolve_avg_avx2() [all …]
|
/external/libaom/libaom/aom_dsp/x86/ |
D | avg_intrin_avx2.c | 110 _mm256_storeu_si256((__m256i *)coeff, in hadamard_8x8x2_avx2() 113 _mm256_storeu_si256((__m256i *)coeff, in hadamard_8x8x2_avx2() 116 _mm256_storeu_si256((__m256i *)coeff, in hadamard_8x8x2_avx2() 119 _mm256_storeu_si256((__m256i *)coeff, in hadamard_8x8x2_avx2() 122 _mm256_storeu_si256((__m256i *)coeff, in hadamard_8x8x2_avx2() 125 _mm256_storeu_si256((__m256i *)coeff, in hadamard_8x8x2_avx2() 128 _mm256_storeu_si256((__m256i *)coeff, in hadamard_8x8x2_avx2() 131 _mm256_storeu_si256((__m256i *)coeff, in hadamard_8x8x2_avx2() 169 _mm256_storeu_si256((__m256i *)coeff16, _mm256_add_epi16(b0, b2)); in hadamard_16x16_avx2() 170 _mm256_storeu_si256((__m256i *)(coeff16 + 64), _mm256_add_epi16(b1, b3)); in hadamard_16x16_avx2() [all …]
|
D | highbd_quantize_intrin_avx2.c | 89 _mm256_storeu_si256((__m256i *)qcoeff, q); in quantize() 90 _mm256_storeu_si256((__m256i *)dqcoeff, dq); in quantize() 107 _mm256_storeu_si256((__m256i *)qcoeff, zero); in quantize() 108 _mm256_storeu_si256((__m256i *)dqcoeff, zero); in quantize()
|
D | bitdepth_conversion_avx2.h | 30 _mm256_storeu_si256((__m256i *)b, a_1); in store_tran_low() 31 _mm256_storeu_si256((__m256i *)(b + 8), a_2); in store_tran_low()
|
D | highbd_convolve_avx2.c | 52 _mm256_storeu_si256((__m256i *)dst, p0); in aom_highbd_convolve_copy_avx2() 53 _mm256_storeu_si256((__m256i *)(dst + 16), p1); in aom_highbd_convolve_copy_avx2() 54 _mm256_storeu_si256((__m256i *)(dst + 32), p2); in aom_highbd_convolve_copy_avx2() 55 _mm256_storeu_si256((__m256i *)(dst + 48), p3); in aom_highbd_convolve_copy_avx2() 64 _mm256_storeu_si256((__m256i *)dst, p0); in aom_highbd_convolve_copy_avx2() 65 _mm256_storeu_si256((__m256i *)(dst + 16), p1); in aom_highbd_convolve_copy_avx2() 77 _mm256_storeu_si256((__m256i *)dst, p0); in aom_highbd_convolve_copy_avx2() 79 _mm256_storeu_si256((__m256i *)dst, p1); in aom_highbd_convolve_copy_avx2() 515 _mm256_storeu_si256((__m256i *)dst, a); in store_16x1_pixels() 1034 _mm256_storeu_si256((__m256i *)dst, p); in store_16x2_pixels() [all …]
|
D | intrapred_avx2.c | 43 _mm256_storeu_si256((__m256i *)dst, *r); in row_store_32xh() 52 _mm256_storeu_si256((__m256i *)dst, *r0); in row_store_32x2xh() 53 _mm256_storeu_si256((__m256i *)(dst + 32), *r1); in row_store_32x2xh() 61 _mm256_storeu_si256((__m256i *)dst, *r); in row_store_64xh() 62 _mm256_storeu_si256((__m256i *)(dst + 32), *r); in row_store_64xh() 320 _mm256_storeu_si256((__m256i *)dst, r0); in h_predictor_32x8line() 321 _mm256_storeu_si256((__m256i *)(dst + (stride << 4)), r1); in h_predictor_32x8line() 829 _mm256_storeu_si256((__m256i *)dst, r); in aom_paeth_predictor_32x16_avx2() 1266 _mm256_storeu_si256((__m256i *)(dst + stride * i), dstvec[i]); in highbd_dr_prediction_z1_16xN_avx2() 1375 _mm256_storeu_si256((__m256i *)(dst + stride * i), dstvec[i]); in highbd_dr_prediction_z1_32xN_avx2() [all …]
|
D | synonyms_avx2.h | 43 _mm256_storeu_si256((__m256i *)a, v); in yy_storeu_256()
|
D | variance_avx2.c | 353 _mm256_storeu_si256((__m256i *)(comp_pred), roundA); in comp_mask_pred_line_avx2() 484 _mm256_storeu_si256((__m256i *)comp_pred, comp); in aom_highbd_comp_mask_pred_avx2() 507 _mm256_storeu_si256((__m256i *)comp_pred, comp); in aom_highbd_comp_mask_pred_avx2() 508 _mm256_storeu_si256((__m256i *)(comp_pred + 16), comp1); in aom_highbd_comp_mask_pred_avx2()
|
D | blend_a64_mask_avx2.c | 79 _mm256_storeu_si256((__m256i *)(dst), res); in blend_a64_d16_mask_w32_avx2()
|
/external/libaom/libaom/av1/encoder/x86/ |
D | av1_quantize_avx2.c | 33 _mm256_storeu_si256((__m256i *)qcoeff, zero); in write_zero() 34 _mm256_storeu_si256((__m256i *)qcoeff + 1, zero); in write_zero() 36 _mm256_storeu_si256((__m256i *)qcoeff, zero); in write_zero() 83 _mm256_storeu_si256((__m256i *)addr, x0); \ 84 _mm256_storeu_si256((__m256i *)addr + 1, x1); \ 93 _mm256_storeu_si256((__m256i *)addr1, q); \ 94 _mm256_storeu_si256((__m256i *)addr2, dq); \
|
D | highbd_block_error_intrin_avx2.c | 50 _mm256_storeu_si256((__m256i *)temp1, res_diff); in av1_highbd_block_error_avx2() 51 _mm256_storeu_si256((__m256i *)temp1 + 1, res_sqcoeff); in av1_highbd_block_error_avx2()
|
D | av1_highbd_quantize_avx2.c | 72 _mm256_storeu_si256((__m256i *)qcoeff, q); in quantize() 73 _mm256_storeu_si256((__m256i *)dqcoeff, dq); in quantize()
|
/external/libvpx/libvpx/vp9/encoder/x86/ |
D | vp9_quantize_avx2.c | 24 _mm256_storeu_si256((__m256i *)(a), zero); in store_zero_tran_low() 25 _mm256_storeu_si256((__m256i *)(a + 8), zero); in store_zero_tran_low() 27 _mm256_storeu_si256((__m256i *)(a), zero); in store_zero_tran_low()
|
/external/flac/libFLAC/ |
D | lpc_intrin_avx2.c | 93 …_mm256_storeu_si256((__m256i*)(residual+i), _mm256_sub_epi32(_mm256_loadu_si256((const __m256i*)(d… in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() 124 …_mm256_storeu_si256((__m256i*)(residual+i), _mm256_sub_epi32(_mm256_loadu_si256((const __m256i*)(d… in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() 155 …_mm256_storeu_si256((__m256i*)(residual+i), _mm256_sub_epi32(_mm256_loadu_si256((const __m256i*)(d… in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() 182 …_mm256_storeu_si256((__m256i*)(residual+i), _mm256_sub_epi32(_mm256_loadu_si256((const __m256i*)(d… in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() 211 …_mm256_storeu_si256((__m256i*)(residual+i), _mm256_sub_epi32(_mm256_loadu_si256((const __m256i*)(d… in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() 234 …_mm256_storeu_si256((__m256i*)(residual+i), _mm256_sub_epi32(_mm256_loadu_si256((const __m256i*)(d… in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() 257 …_mm256_storeu_si256((__m256i*)(residual+i), _mm256_sub_epi32(_mm256_loadu_si256((const __m256i*)(d… in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() 276 …_mm256_storeu_si256((__m256i*)(residual+i), _mm256_sub_epi32(_mm256_loadu_si256((const __m256i*)(d… in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() 297 …_mm256_storeu_si256((__m256i*)(residual+i), _mm256_sub_epi32(_mm256_loadu_si256((const __m256i*)(d… in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() 312 …_mm256_storeu_si256((__m256i*)(residual+i), _mm256_sub_epi32(_mm256_loadu_si256((const __m256i*)(d… in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() [all …]
|
/external/eigen/Eigen/src/Core/arch/AVX/ |
D | PacketMath.h | 252 …>(int* to, const Packet8i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_storeu_si256(reinterpret… 256 …int* to, const Packet8i& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(reinterpret…
|
/external/clang/test/CodeGen/ |
D | avx-builtins.c | 1173 _mm256_storeu_si256(A, B); in test_mm256_storeu_si256()
|
/external/libaom/libaom/aom_dsp/simd/ |
D | v256_intrinsics_x86.h | 76 _mm256_storeu_si256((__m256i *)p, a); in v256_store_unaligned()
|
/external/clang/lib/Headers/ |
D | avxintrin.h | 2439 _mm256_storeu_si256(__m256i *__p, __m256i __a) in _mm256_storeu_si256() function
|
/external/eigen/Eigen/src/Core/arch/CUDA/ |
D | PacketMathHalf.h | 404 _mm256_storeu_si256((__m256i*)to, from.x);
|