/external/libaom/libaom/av1/encoder/x86/ |
D | highbd_fwd_txfm_avx2.c | 235 out1 = _mm256_sub_epi32(in0_w1, in1_w0); \ 248 out1 = _mm256_sub_epi32(in0_w1, in1_w0); \ 271 v[7] = _mm256_sub_epi32(in[0 * col_num + col], in[7 * col_num + col]); in av1_fdct8_avx2() 273 u[6] = _mm256_sub_epi32(in[1 * col_num + col], in[6 * col_num + col]); in av1_fdct8_avx2() 275 u[5] = _mm256_sub_epi32(in[2 * col_num + col], in[5 * col_num + col]); in av1_fdct8_avx2() 277 v[4] = _mm256_sub_epi32(in[3 * col_num + col], in[4 * col_num + col]); in av1_fdct8_avx2() 279 v[3] = _mm256_sub_epi32(u[0], u[3]); in av1_fdct8_avx2() 281 v[2] = _mm256_sub_epi32(u[1], u[2]); in av1_fdct8_avx2() 291 v[6] = _mm256_sub_epi32(u[0], v[6]); in av1_fdct8_avx2() 303 u[1] = _mm256_sub_epi32(v[0], v[1]); in av1_fdct8_avx2() [all …]
|
D | av1_fwd_txfm_avx2.h | 32 __m256i temp1 = _mm256_sub_epi32(in0_w1, in1_w0); in btf_32_avx2_type0() 51 __m256i temp1 = _mm256_sub_epi32(in1_w0, in0_w1); in btf_32_avx2_type1() 71 __m256i temp1 = _mm256_sub_epi32(in0_w1, in1_w0); in btf_32_avx2_type0_new() 91 __m256i temp1 = _mm256_sub_epi32(in1_w0, in0_w1); in btf_32_avx2_type1_new()
|
D | highbd_block_error_intrin_avx2.c | 32 __m256i diff1 = _mm256_sub_epi32(mm256_coeff, mm256_dqcoeff); in av1_highbd_block_error_avx2() 33 __m256i diff2 = _mm256_sub_epi32(mm256_coeff2, mm256_dqcoeff2); in av1_highbd_block_error_avx2()
|
D | av1_highbd_quantize_avx2.c | 85 __m256i cur_eob = _mm256_sub_epi32(iscan, nz); in quantize()
|
D | pickrst_avx2.c | 664 const __m256i flt0l_subu = _mm256_sub_epi32(flt0l, u0l); in av1_highbd_pixel_proj_error_avx2() 665 const __m256i flt0h_subu = _mm256_sub_epi32(flt0h, u0h); in av1_highbd_pixel_proj_error_avx2() 666 const __m256i flt1l_subu = _mm256_sub_epi32(flt1l, u0l); in av1_highbd_pixel_proj_error_avx2() 667 const __m256i flt1h_subu = _mm256_sub_epi32(flt1h, u0h); in av1_highbd_pixel_proj_error_avx2()
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | avg_intrin_avx2.c | 30 __m256i b1 = _mm256_sub_epi32(a0, a1); in highbd_hadamard_col8_avx2() 32 __m256i b3 = _mm256_sub_epi32(a2, a3); in highbd_hadamard_col8_avx2() 34 __m256i b5 = _mm256_sub_epi32(a4, a5); in highbd_hadamard_col8_avx2() 36 __m256i b7 = _mm256_sub_epi32(a6, a7); in highbd_hadamard_col8_avx2() 40 a2 = _mm256_sub_epi32(b0, b2); in highbd_hadamard_col8_avx2() 41 a3 = _mm256_sub_epi32(b1, b3); in highbd_hadamard_col8_avx2() 44 a6 = _mm256_sub_epi32(b4, b6); in highbd_hadamard_col8_avx2() 45 a7 = _mm256_sub_epi32(b5, b7); in highbd_hadamard_col8_avx2() 52 b2 = _mm256_sub_epi32(a0, a4); in highbd_hadamard_col8_avx2() 53 b6 = _mm256_sub_epi32(a1, a5); in highbd_hadamard_col8_avx2() [all …]
|
D | fwd_dct32x32_impl_avx2.h | 1524 lstep1[4] = _mm256_sub_epi32(lstep3[2], lstep3[4]); in FDCT32x32_2D_AVX2() 1525 lstep1[5] = _mm256_sub_epi32(lstep3[3], lstep3[5]); in FDCT32x32_2D_AVX2() 1526 lstep1[6] = _mm256_sub_epi32(lstep3[0], lstep3[6]); in FDCT32x32_2D_AVX2() 1527 lstep1[7] = _mm256_sub_epi32(lstep3[1], lstep3[7]); in FDCT32x32_2D_AVX2() 1532 lstep1[20] = _mm256_sub_epi32(lstep2[18], lstep3[20]); in FDCT32x32_2D_AVX2() 1533 lstep1[21] = _mm256_sub_epi32(lstep2[19], lstep3[21]); in FDCT32x32_2D_AVX2() 1534 lstep1[22] = _mm256_sub_epi32(lstep2[16], lstep3[22]); in FDCT32x32_2D_AVX2() 1535 lstep1[23] = _mm256_sub_epi32(lstep2[17], lstep3[23]); in FDCT32x32_2D_AVX2() 1536 lstep1[24] = _mm256_sub_epi32(lstep2[30], lstep3[24]); in FDCT32x32_2D_AVX2() 1537 lstep1[25] = _mm256_sub_epi32(lstep2[31], lstep3[25]); in FDCT32x32_2D_AVX2() [all …]
|
/external/libaom/libaom/av1/common/x86/ |
D | selfguided_avx2.c | 123 _mm256_extract_epi32(_mm256_sub_epi32(row1, above1), 7)); in integral_images() 125 _mm256_extract_epi32(_mm256_sub_epi32(row2, above2), 7)); in integral_images() 171 _mm256_extract_epi32(_mm256_sub_epi32(row1, above1), 7)); in integral_images_highbd() 173 _mm256_extract_epi32(_mm256_sub_epi32(row2, above2), 7)); in integral_images_highbd() 185 const __m256i u = _mm256_sub_epi32(tr, tl); in boxsum_from_ii() 186 const __m256i v = _mm256_sub_epi32(br, bl); in boxsum_from_ii() 187 return _mm256_sub_epi32(v, u); in boxsum_from_ii() 213 return _mm256_sub_epi32(an, bb); in compute_p() 271 _mm256_sub_epi32(_mm256_set1_epi32(SGRPROJ_SGR), a_res); in calc_ab() 318 return _mm256_sub_epi32(_mm256_slli_epi32(_mm256_add_epi32(fours, threes), 2), in cross_sum() [all …]
|
D | highbd_inv_txfm_avx2.c | 105 __m256i a1 = _mm256_sub_epi32(offset, in1); in neg_shift_avx2() 227 __m256i a1 = _mm256_sub_epi32(in0, in1); in addsub_avx2() 241 __m256i a1 = _mm256_sub_epi32(in0, in1); in addsub_no_clamp_avx2() 254 __m256i a1 = _mm256_sub_epi32(in0_w_offset, in1); in addsub_shift_avx2() 1356 u[5] = _mm256_sub_epi32(y, x); in idct16_low8_avx2() 1377 u[10] = _mm256_sub_epi32(y, x); in idct16_low8_avx2() 1387 u[11] = _mm256_sub_epi32(y, x); in idct16_low8_avx2() 1523 v[1] = _mm256_sub_epi32(x, y); in idct16_avx2() 1547 u[5] = _mm256_sub_epi32(y, x); in idct16_avx2() 1571 v[10] = _mm256_sub_epi32(y, x); in idct16_avx2() [all …]
|
/external/libaom/libaom/aom_dsp/x86/ |
D | obmc_sad_avx2.c | 51 const __m256i v_diff_d = _mm256_sub_epi32(v_w_d, v_pm_d); in obmc_sad_w4_avx2() 92 const __m256i v_diff0_d = _mm256_sub_epi32(v_w0_d, v_pm0_d); in obmc_sad_w8n_avx2() 173 const __m256i v_diff_d = _mm256_sub_epi32(v_w_d, v_pm_d); in hbd_obmc_sad_w4_avx2() 218 const __m256i v_diff0_d = _mm256_sub_epi32(v_w0_d, v_pm0_d); in hbd_obmc_sad_w8n_avx2()
|
D | obmc_variance_avx2.c | 54 const __m256i v_diff0_d = _mm256_sub_epi32(v_w_d, v_pm_d); in obmc_variance_w8n() 116 const __m256i v_diff0_d = _mm256_sub_epi32(v_w0_d, v_pm0_d); in obmc_variance_w16n() 117 const __m256i v_diff1_d = _mm256_sub_epi32(v_w1_d, v_pm1_d); in obmc_variance_w16n()
|
D | blend_a64_mask_avx2.c | 40 _mm256_srai_epi32(_mm256_sub_epi32(res0_lo, *v_round_offset), shift); in blend_a64_d16_mask_w16_avx2() 42 _mm256_srai_epi32(_mm256_sub_epi32(res0_hi, *v_round_offset), shift); in blend_a64_d16_mask_w16_avx2() 68 _mm256_srai_epi32(_mm256_sub_epi32(res0_lo, *v_round_offset), shift); in blend_a64_d16_mask_w32_avx2() 70 _mm256_srai_epi32(_mm256_sub_epi32(res0_hi, *v_round_offset), shift); in blend_a64_d16_mask_w32_avx2() 72 _mm256_srai_epi32(_mm256_sub_epi32(res1_lo, *v_round_offset), shift); in blend_a64_d16_mask_w32_avx2() 74 _mm256_srai_epi32(_mm256_sub_epi32(res1_hi, *v_round_offset), shift); in blend_a64_d16_mask_w32_avx2() 941 _mm256_srai_epi32(_mm256_sub_epi32(sumh, *round_offset), shift); in highbd_blend_a64_d16_mask_w4_avx2() 943 _mm256_srai_epi32(_mm256_sub_epi32(suml, *round_offset), shift); in highbd_blend_a64_d16_mask_w4_avx2() 1073 _mm256_srai_epi32(_mm256_sub_epi32(sumah, *round_offset), shift); in highbd_blend_a64_d16_mask_w8_avx2() 1075 _mm256_srai_epi32(_mm256_sub_epi32(sumal, *round_offset), shift); in highbd_blend_a64_d16_mask_w8_avx2() [all …]
|
D | intrapred_avx2.c | 1050 diff = _mm256_sub_epi32(a1, a0); // a[x+1] - a[x] in highbd_dr_prediction_z1_4xN_internal_avx2() 1144 diff = _mm256_sub_epi32(a1, a0); // a[x+1] - a[x] in highbd_dr_prediction_z1_8xN_internal_avx2() 1216 diff = _mm256_sub_epi32(a1, a0); // a[x+1] - a[x] in highbd_dr_prediction_z1_16xN_internal_avx2() 1233 diff = _mm256_sub_epi32(a1_1, a0_1); // a[x+1] - a[x] in highbd_dr_prediction_z1_16xN_internal_avx2() 1317 diff = _mm256_sub_epi32(a1, a0); // a[x+1] - a[x] in highbd_dr_prediction_z1_32xN_internal_avx2() 1333 diff = _mm256_sub_epi32(a1_1, a0_1); // a[x+1] - a[x] in highbd_dr_prediction_z1_32xN_internal_avx2() 1434 diff = _mm256_sub_epi32(a1, a0); // a[x+1] - a[x] in highbd_dr_prediction_z1_64xN_avx2() 1450 diff = _mm256_sub_epi32(a1_1, a0_1); // a[x+1] - a[x] in highbd_dr_prediction_z1_64xN_avx2() 1767 diff = _mm256_sub_epi32(a1_x, a0_x); // a[x+1] - a[x] in highbd_dr_prediction_z2_Nx4_avx2() 1881 diff = _mm256_sub_epi32(a1_x, a0_x); // a[x+1] - a[x] in highbd_dr_prediction_32bit_z2_Nx8_avx2() [all …]
|
D | txfm_common_avx2.h | 63 *in1 = _mm256_sub_epi32(_in0, _in1); in btf_32_add_sub_avx2() 79 *out1 = _mm256_sub_epi32(_in0, _in1); in btf_32_add_sub_out_avx2()
|
D | highbd_quantize_intrin_avx2.c | 102 __m256i cur_eob = _mm256_sub_epi32(iscan, nz); in quantize()
|
D | convolve_avx2.h | 431 const __m256i res_signed = _mm256_sub_epi32(*res_unsigned, *offset_const); in highbd_convolve_rounding()
|
/external/flac/libFLAC/ |
D | lpc_intrin_avx2.c | 93 …_mm256_storeu_si256((__m256i*)(residual+i), _mm256_sub_epi32(_mm256_loadu_si256((const __m256i*)(d… in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() 124 …_mm256_storeu_si256((__m256i*)(residual+i), _mm256_sub_epi32(_mm256_loadu_si256((const __m256i*)(d… in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() 155 …_mm256_storeu_si256((__m256i*)(residual+i), _mm256_sub_epi32(_mm256_loadu_si256((const __m256i*)(d… in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() 182 …_mm256_storeu_si256((__m256i*)(residual+i), _mm256_sub_epi32(_mm256_loadu_si256((const __m256i*)(d… in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() 211 …_mm256_storeu_si256((__m256i*)(residual+i), _mm256_sub_epi32(_mm256_loadu_si256((const __m256i*)(d… in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() 234 …_mm256_storeu_si256((__m256i*)(residual+i), _mm256_sub_epi32(_mm256_loadu_si256((const __m256i*)(d… in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() 257 …_mm256_storeu_si256((__m256i*)(residual+i), _mm256_sub_epi32(_mm256_loadu_si256((const __m256i*)(d… in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() 276 …_mm256_storeu_si256((__m256i*)(residual+i), _mm256_sub_epi32(_mm256_loadu_si256((const __m256i*)(d… in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() 297 …_mm256_storeu_si256((__m256i*)(residual+i), _mm256_sub_epi32(_mm256_loadu_si256((const __m256i*)(d… in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() 312 …_mm256_storeu_si256((__m256i*)(residual+i), _mm256_sub_epi32(_mm256_loadu_si256((const __m256i*)(d… in FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_avx2() [all …]
|
/external/clang/test/CodeGen/ |
D | avx2-builtins.c | 1141 return _mm256_sub_epi32(a, b); in test_mm256_sub_epi32()
|
/external/libaom/libaom/aom_dsp/simd/ |
D | v256_intrinsics_x86.h | 129 SIMD_INLINE v256 v256_sub_32(v256 a, v256 b) { return _mm256_sub_epi32(a, b); } in v256_sub_32()
|
/external/clang/lib/Headers/ |
D | avx2intrin.h | 743 _mm256_sub_epi32(__m256i __a, __m256i __b) in _mm256_sub_epi32() function
|
/external/pdfium/third_party/libopenjpeg20/ |
D | dwt.c | 611 #define SUB(x,y) _mm256_sub_epi32((x),(y))
|