Searched refs:vcgeq_s16 (Results 1 – 11 of 11) sorted by relevance
/external/libaom/av1/encoder/arm/neon/ |
D | quantize_neon.c | 188 vcgeq_s16(v_abs_coeff, vshlq_s16(v_dequant, v_neg_log_scale_plus_1)); in quantize_fp_logscale_8() 269 const uint16x8_t v_mask_a = vcgeq_s16(v_abs_coeff_a, v_zbin_s16); in quantize_fp_no_qmatrix_neon() 270 const uint16x8_t v_mask_b = vcgeq_s16(v_abs_coeff_b, v_zbin_s16); in quantize_fp_no_qmatrix_neon() 381 uint16x8_t vcond = vcgeq_s16(v_abs, vzbins); in aom_quantize_b_neon() 421 vcond = vcgeq_s16(v_abs, vzbins); in aom_quantize_b_neon() 484 vcond = vcgeq_s16(v_abs, vzbins); in aom_quantize_b_helper_16x16_neon() 487 vcond = vcgeq_s16(QM_MULL_SHIFT(v_abs, vwt), vzbins); in aom_quantize_b_helper_16x16_neon() 541 vcond = vcgeq_s16(v_abs, vzbins); in aom_quantize_b_helper_16x16_neon() 544 vcond = vcgeq_s16(QM_MULL_SHIFT(v_abs, vwt), vzbins); in aom_quantize_b_helper_16x16_neon() 617 vcond = vcgeq_s16(v_abs, vzbins); in aom_quantize_b_helper_32x32_neon() [all …]
|
/external/libvpx/vpx_dsp/arm/ |
D | quantize_neon.c | 45 const int16x8_t zbin_mask = vreinterpretq_s16_u16(vcgeq_s16(coeff_abs, zbin)); in quantize_b_neon() 188 const int16x8_t zbin_mask = vreinterpretq_s16_u16(vcgeq_s16(coeff_abs, zbin)); in quantize_b_32x32_neon()
|
/external/libhevc/common/arm/ |
D | ihevc_quant_iquant_ssd_neon_intr.c | 454 psgn0 = vcgeq_s16(vcombine_s16(s0, s1), zero); in ihevc_q_iq_ssd_flat_scale_mat_var_rnd_fact_neon() 455 psgn1 = vcgeq_s16(vcombine_s16(s2, s3), zero); in ihevc_q_iq_ssd_flat_scale_mat_var_rnd_fact_neon()
|
/external/gemmlowp/fixedpoint/ |
D | fixedpoint_neon.h | 206 return vreinterpretq_s16_u16(vcgeq_s16(a, b));
|
/external/libvpx/vp9/encoder/arm/neon/ |
D | vp9_quantize_neon.c | 161 vreinterpretq_s16_u16(vcgeq_s16(v_coeff_abs, *dequant_thresh)); in quantize_fp_32x32_8()
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/ |
D | legacy_optimized_ops.h | 4150 uint8x8_t mask = vmovn_u16(vcgeq_s16(input_diff_s16, diff_min_s16)); in Softmax() 4306 vcgeq_s16(input_val_centered_0, vdupq_n_s16(-input_range_radius)); in Logistic() 4308 vcgeq_s16(input_val_centered_1, vdupq_n_s16(-input_range_radius)); in Logistic() 4501 vcgeq_s16(input_val_centered_0, vdupq_n_s16(-input_range_radius)); in Tanh() 4503 vcgeq_s16(input_val_centered_1, vdupq_n_s16(-input_range_radius)); in Tanh()
|
D | optimized_ops.h | 6656 vcgeq_s16(input_val.val[0], neg_range_radius_dup); in CalculateUnsignedClampingWithRangeBitMasks() 6658 vcgeq_s16(input_val.val[1], neg_range_radius_dup); in CalculateUnsignedClampingWithRangeBitMasks()
|
/external/libgav1/src/dsp/arm/ |
D | intrapred_directional_neon.cc | 1865 const uint16x8_t blend = vcgeq_s16(indices, vdupq_n_s16(zone_bounds >> 6)); in DirectionalZone1Blend_8x8()
|
/external/neon_2_sse/ |
D | NEON_2_SSE.h | 739 _NEON2SSESTORAGE uint16x8_t vcgeq_s16(int16x8_t a, int16x8_t b); // VCGE.S16 q0, q0, q0 5126 return64(vcgeq_s16(_pM128i(a), _pM128i(b))); in vcge_s16() 5182 _NEON2SSESTORAGE uint16x8_t vcgeq_s16(int16x8_t a, int16x8_t b); // VCGE.S16 q0, q0, q0 5183 _NEON2SSE_INLINE uint16x8_t vcgeq_s16(int16x8_t a, int16x8_t b) // VCGE.S16 q0, q0, q0 in vcgeq_s16() function
|
/external/clang/test/CodeGen/ |
D | aarch64-neon-intrinsics.c | 2136 return vcgeq_s16(v1, v2); in test_vcgeq_s16()
|
D | arm_neon_intrinsics.c | 1635 return vcgeq_s16(a, b); in test_vcgeq_s16()
|