/external/libhevc/encoder/arm/ |
D | ihevce_common_utils_neon.c | 574 edgeidx = vandq_s16(edgeidx_reg0, edgeidx); in ihevce_get_luma_eo_sao_params_neon() 577 edgeidx1 = vandq_s16(edgeidx_reg5, edgeidx1); in ihevce_get_luma_eo_sao_params_neon() 597 temp_reg0 = vandq_s16(temp_reg0, pel_error); in ihevce_get_luma_eo_sao_params_neon() 598 temp_reg4 = vandq_s16(temp_reg4, pel_error1); in ihevce_get_luma_eo_sao_params_neon() 599 temp_reg1 = vandq_s16(temp_reg1, pel_error); in ihevce_get_luma_eo_sao_params_neon() 600 temp_reg5 = vandq_s16(temp_reg5, pel_error1); in ihevce_get_luma_eo_sao_params_neon() 602 temp_reg2 = vandq_s16(temp_reg2, pel_error); in ihevce_get_luma_eo_sao_params_neon() 603 temp_reg6 = vandq_s16(temp_reg6, pel_error1); in ihevce_get_luma_eo_sao_params_neon() 604 temp_reg3 = vandq_s16(temp_reg3, pel_error); in ihevce_get_luma_eo_sao_params_neon() 605 temp_reg7 = vandq_s16(temp_reg7, pel_error1); in ihevce_get_luma_eo_sao_params_neon() [all …]
|
D | ihevce_had_compute_neon.c | 343 a0 = vandq_s16(a0, mask); in ihevce_HAD_8x8_8bit_plane_neon() 436 combined_rows[0] = vandq_s16(combined_rows[0], mask); in ihevce_HAD_4x4_8bit_plane_neon()
|
/external/gemmlowp/fixedpoint/ |
D | fixedpoint_neon.h | 44 return vandq_s16(a, b); 238 a = vandq_s16(a, vextq_s16(a, a, 1)); 239 a = vandq_s16(a, vextq_s16(a, a, 2)); 240 a = vandq_s16(a, vextq_s16(a, a, 4)); 290 const int16x8_t fixup = vshrq_n_s16(vandq_s16(x, shift_vec), 15); 306 const int16x8_t fixup = vshrq_n_s16(vandq_s16(x, shift_vec), 15);
|
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | quantize_neon.c | 81 qcoeff = vandq_s16(qcoeff, zbin_mask); in vpx_quantize_b_neon() 131 qcoeff = vandq_s16(qcoeff, zbin_mask); in vpx_quantize_b_neon() 241 qcoeff = vandq_s16(qcoeff, zbin_mask); in vpx_quantize_b_32x32_neon() 289 qcoeff = vandq_s16(qcoeff, zbin_mask); in vpx_quantize_b_32x32_neon()
|
D | highbd_loopfilter_neon.c | 246 filter = vandq_s16(filter, vreinterpretq_s16_u16(hev)); in filter4() 255 filter = vandq_s16(filter, vreinterpretq_s16_u16(mask)); in filter4()
|
/external/libvpx/libvpx/vp9/encoder/arm/neon/ |
D | vp9_quantize_neon.c | 179 qcoeff = vandq_s16(qcoeff, dequant_mask); in vp9_quantize_fp_32x32_neon() 228 qcoeff = vandq_s16(qcoeff, dequant_mask); in vp9_quantize_fp_32x32_neon()
|
/external/libhevc/common/arm/ |
D | ihevc_quant_iquant_ssd_neon_intr.c | 575 pq0 = vandq_s16(q_10, vreinterpretq_s16_u16(psgn0)); in ihevc_q_iq_ssd_flat_scale_mat_var_rnd_fact_neon() 576 pq1 = vandq_s16(q_11, vreinterpretq_s16_u16(psgn1)); in ihevc_q_iq_ssd_flat_scale_mat_var_rnd_fact_neon() 578 nq0 = vandq_s16(q_10, vreinterpretq_s16_u16(nsgn0)); in ihevc_q_iq_ssd_flat_scale_mat_var_rnd_fact_neon() 579 nq1 = vandq_s16(q_11, vreinterpretq_s16_u16(nsgn1)); in ihevc_q_iq_ssd_flat_scale_mat_var_rnd_fact_neon()
|
/external/llvm-project/clang/test/CodeGen/arm-mve-intrinsics/ |
D | vandq.c | 31 return vandq_s16(a, b); in test_vandq_s16()
|
/external/llvm-project/clang/test/CodeGen/ |
D | aarch64-neon-3v.c | 32 return vandq_s16(a, b); in test_vandq_s16()
|
D | arm_neon_intrinsics.c | 827 return vandq_s16(a, b); in test_vandq_s16()
|
/external/clang/test/CodeGen/ |
D | aarch64-neon-3v.c | 32 return vandq_s16(a, b); in test_vandq_s16()
|
D | arm_neon_intrinsics.c | 951 return vandq_s16(a, b); in test_vandq_s16()
|
/external/libgav1/libgav1/src/dsp/arm/ |
D | film_grain_neon.cc | 656 remainder = vandq_s16(remainder, vdupq_n_s16(3)); in GetScalingFactors()
|
D | intrapred_directional_neon.cc | 448 const int16x8_t shift_masked = vandq_s16(shift_upsampled, vdupq_n_s16(0x3f)); in DirectionalZone2FromLeftCol_WxH()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | quantization_utils.h | 456 const int16x8_t neg_offset = vandq_s16(val_sign, FIRST_BIT); in Divide16x8Pow()
|
/external/neon_2_sse/ |
D | NEON_2_SSE.h | 2118 _NEON2SSESTORAGE int16x8_t vandq_s16(int16x8_t a, int16x8_t b); // VAND q0,q0,q0 15220 _NEON2SSESTORAGE int16x8_t vandq_s16(int16x8_t a, int16x8_t b); // VAND q0,q0,q0 15221 #define vandq_s16 _mm_and_si128 macro
|