/external/tensorflow/tensorflow/lite/kernels/internal/optimized/integer_ops/ |
D | mean.h | 100 temp_sum.val[0] = vminq_s32(vmaxq_s32(temp_sum.val[0], min_dup), max_dup); in MeanImpl() 101 temp_sum.val[1] = vminq_s32(vmaxq_s32(temp_sum.val[1], min_dup), max_dup); in MeanImpl() 102 temp_sum.val[2] = vminq_s32(vmaxq_s32(temp_sum.val[2], min_dup), max_dup); in MeanImpl() 103 temp_sum.val[3] = vminq_s32(vmaxq_s32(temp_sum.val[3], min_dup), max_dup); in MeanImpl()
|
/external/libaom/libaom/av1/common/arm/ |
D | convolve_neon.h | 55 sum0 = vmaxq_s32(sum0, zero); in wiener_convolve8_vert_4x8() 56 sum1 = vmaxq_s32(sum1, zero); in wiener_convolve8_vert_4x8() 152 sum_0 = vmaxq_s32(sum_0, zero); in wiener_convolve8_horiz_4x8() 222 sum0 = vmaxq_s32(sum0, zero); in convolve8_4x4_s32()
|
D | convolve_neon.c | 143 sum0 = vmaxq_s32(sum0, zero); in convolve8_vert_4x4_s32() 184 sum0 = vmaxq_s32(sum0, zero); in convolve8_vert_8x4_s32() 185 sum1 = vmaxq_s32(sum1, zero); in convolve8_vert_8x4_s32()
|
/external/webrtc/webrtc/common_audio/signal_processing/ |
D | min_max_operations_neon.c | 181 max32x4_0 = vmaxq_s32(max32x4_0, in32x4_0); in WebRtcSpl_MaxValueW32Neon() 182 max32x4_1 = vmaxq_s32(max32x4_1, in32x4_1); in WebRtcSpl_MaxValueW32Neon() 185 int32x4_t max32x4 = vmaxq_s32(max32x4_0, max32x4_1); in WebRtcSpl_MaxValueW32Neon()
|
/external/libopus/silk/arm/ |
D | LPC_inv_pred_gain_neon_intr.c | 128 max_s32x4 = vmaxq_s32( max_s32x4, s0_s32x4 ); in LPC_inverse_pred_gain_QA_neon() 130 max_s32x4 = vmaxq_s32( max_s32x4, s1_s32x4 ); in LPC_inverse_pred_gain_QA_neon()
|
D | NSQ_del_dec_neon_intr.c | 697 tmp1_s32x4 = vmaxq_s32( tmp1_s32x4, vdupq_n_s32( -( 31 << 10 ) ) ); in silk_noise_shape_quantizer_del_dec_neon() 755 tmp2_s32x4 = vaddq_s32( tmp2_s32x4, vmaxq_s32( rd1_Q10_s32x4, rd2_Q10_s32x4 ) ); in silk_noise_shape_quantizer_del_dec_neon()
|
/external/XNNPACK/src/math/ |
D | exp-neonfma-p5.c | 61 int32x4_t ven = vmaxq_s32(veo, vmin_exponent); in xnn_math_f32_exp__neonfma_p5()
|
D | exp-neonfma-lut64-p2.c | 79 int32x4_t ven = vmaxq_s32(veo, vmin_exponent); in xnn_math_f32_exp__neonfma_lut64_p2()
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/ |
D | neon_tensor_utils.cc | 858 vmaxq_s32(vminq_s32(temp_val.val[0], max_val_dup), min_val_dup); in NeonMatrixBatchVectorAccumulateImpl() 860 vmaxq_s32(vminq_s32(temp_val.val[1], max_val_dup), min_val_dup); in NeonMatrixBatchVectorAccumulateImpl() 923 vmaxq_s32(vminq_s32(temp_val.val[0], max_val_dup), min_val_dup); in NeonMatrixBatchVectorAccumulateImpl() 925 vmaxq_s32(vminq_s32(temp_val.val[1], max_val_dup), min_val_dup); in NeonMatrixBatchVectorAccumulateImpl() 927 vmaxq_s32(vminq_s32(temp_val.val[2], max_val_dup), min_val_dup); in NeonMatrixBatchVectorAccumulateImpl() 929 vmaxq_s32(vminq_s32(temp_val.val[3], max_val_dup), min_val_dup); in NeonMatrixBatchVectorAccumulateImpl() 1639 vmaxq_s32(vminq_s32(temp_val.val[0], max_val_dup), min_val_dup); in NeonCwiseMul() 1641 vmaxq_s32(vminq_s32(temp_val.val[1], max_val_dup), min_val_dup); in NeonCwiseMul() 2178 int32x4_t max0_i32x4 = vmaxq_s32(f2i0_i32x4, neg_scale_i32x4); in NeonSymmetricQuantizeFloats() 2179 int32x4_t max1_i32x4 = vmaxq_s32(f2i1_i32x4, neg_scale_i32x4); in NeonSymmetricQuantizeFloats() [all …]
|
D | optimized_ops.h | 996 temp_sum.val[0] = vminq_s32(vmaxq_s32(temp_sum.val[0], min_dup), max_dup); in MeanImpl() 997 temp_sum.val[1] = vminq_s32(vmaxq_s32(temp_sum.val[1], min_dup), max_dup); in MeanImpl() 998 temp_sum.val[2] = vminq_s32(vmaxq_s32(temp_sum.val[2], min_dup), max_dup); in MeanImpl() 999 temp_sum.val[3] = vminq_s32(vmaxq_s32(temp_sum.val[3], min_dup), max_dup); in MeanImpl() 5617 vmaxq_s32(vminq_s32(temp_val.val[0], max_val_dup), min_val_dup); in Quantize() 5619 vmaxq_s32(vminq_s32(temp_val.val[1], max_val_dup), min_val_dup); in Quantize() 5621 vmaxq_s32(vminq_s32(temp_val.val[2], max_val_dup), min_val_dup); in Quantize() 5623 vmaxq_s32(vminq_s32(temp_val.val[3], max_val_dup), min_val_dup); in Quantize() 5682 int32x4_t left_shift_1 = vmaxq_s32(out_shift_1, zeros); in Quantize() 5683 int32x4_t left_shift_2 = vmaxq_s32(out_shift_2, zeros); in Quantize() [all …]
|
D | depthwiseconv_uint8.h | 1885 acc[j] = vmaxq_s32(acc[j], output_activation_min_vec); 1928 acc0 = vmaxq_s32(acc0, output_activation_min_vec); 1929 acc1 = vmaxq_s32(acc1, output_activation_min_vec); 1960 acc = vmaxq_s32(acc, output_activation_min_vec);
|
/external/gemmlowp/internal/ |
D | simd_wrappers_neon.h | 102 inline Int32x4 Max(Int32x4 a, Int32x4 b) { return vmaxq_s32(a, b); } 105 return vmaxq_s32(a, vdupq_n_s32(b));
|
/external/skqp/src/core/ |
D | SkBitmapProcState_matrixProcs.cpp | 325 res = vmaxq_s32(res, vdupq_n_s32(0)); in clamp4()
|
/external/tensorflow/tensorflow/lite/kernels/ |
D | cpu_backend_gemm_custom_gemv.h | 566 vmaxq_s32(multiplier_exponent, vdupq_n_s32(0));
|
/external/eigen/Eigen/src/Core/arch/NEON/ |
D | PacketMath.h | 219 …ONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b) { return vmaxq_s32(a,b); }
|
/external/skia/include/private/ |
D | SkNx_neon.h | 563 AI static SkNx Max(const SkNx& a, const SkNx& b) { return vmaxq_s32(a.fVec, b.fVec); } in Max()
|
/external/libaom/libaom/aom_dsp/simd/ |
D | v128_intrinsics_arm.h | 412 vmaxq_s32(vreinterpretq_s32_s64(x), vreinterpretq_s32_s64(y))); in v128_max_s32()
|
/external/skqp/include/private/ |
D | SkNx_neon.h | 563 AI static SkNx Max(const SkNx& a, const SkNx& b) { return vmaxq_s32(a.fVec, b.fVec); } in Max()
|
/external/psimd/include/ |
D | psimd.h | 839 return (psimd_s32) vmaxq_s32((int32x4_t) a, (int32x4_t) b); in psimd_max_s32()
|
/external/clang/test/CodeGen/ |
D | aarch64-neon-intrinsics.c | 4315 return vmaxq_s32(a, b); in test_vmaxq_s32()
|
D | arm_neon_intrinsics.c | 8599 return vmaxq_s32(a, b); in test_vmaxq_s32()
|
/external/neon_2_sse/ |
D | NEON_2_SSE.h | 871 _NEON2SSESTORAGE int32x4_t vmaxq_s32(int32x4_t a, int32x4_t b); // VMAX.S32 q0,q0,q0 6147 _NEON2SSESTORAGE int32x4_t vmaxq_s32(int32x4_t a, int32x4_t b); // VMAX.S32 q0,q0,q0 6148 #define vmaxq_s32 _MM_MAX_EPI32 //SSE4.1 macro
|