/external/XNNPACK/src/f32-hswish/gen/ |
D | hswish-neon-x16.c | 52 vacc0123 = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc0123), vsix)); in xnn_f32_hswish_ukernel__neon_x16() 53 vacc4567 = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc4567), vsix)); in xnn_f32_hswish_ukernel__neon_x16() 54 vacc89AB = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc89AB), vsix)); in xnn_f32_hswish_ukernel__neon_x16() 55 vaccCDEF = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vaccCDEF), vsix)); in xnn_f32_hswish_ukernel__neon_x16() 72 vacc = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc), vsix)); in xnn_f32_hswish_ukernel__neon_x16() 81 vacc = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc), vsix)); in xnn_f32_hswish_ukernel__neon_x16()
|
D | hswish-neon-x8.c | 44 vacc0123 = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc0123), vsix)); in xnn_f32_hswish_ukernel__neon_x8() 45 vacc4567 = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc4567), vsix)); in xnn_f32_hswish_ukernel__neon_x8() 58 vacc = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc), vsix)); in xnn_f32_hswish_ukernel__neon_x8() 67 vacc = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc), vsix)); in xnn_f32_hswish_ukernel__neon_x8()
|
D | hswish-neon-x4.c | 37 vacc = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc), vsix)); in xnn_f32_hswish_ukernel__neon_x4() 46 vacc = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc), vsix)); in xnn_f32_hswish_ukernel__neon_x4()
|
/external/XNNPACK/src/f32-hswish/ |
D | neon.c.in | 44 …vacc${ABC[N:N+4]} = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc${ABC[N:N+4]}), vsix… 57 vacc = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc), vsix)); 66 vacc = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc), vsix));
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/integer_ops/ |
D | mean.h | 100 temp_sum.val[0] = vminq_s32(vmaxq_s32(temp_sum.val[0], min_dup), max_dup); in MeanImpl() 101 temp_sum.val[1] = vminq_s32(vmaxq_s32(temp_sum.val[1], min_dup), max_dup); in MeanImpl() 102 temp_sum.val[2] = vminq_s32(vmaxq_s32(temp_sum.val[2], min_dup), max_dup); in MeanImpl() 103 temp_sum.val[3] = vminq_s32(vmaxq_s32(temp_sum.val[3], min_dup), max_dup); in MeanImpl()
|
/external/webrtc/common_audio/signal_processing/ |
D | min_max_operations_neon.c | 261 min32x4_0 = vminq_s32(min32x4_0, in32x4_0); in WebRtcSpl_MinValueW32Neon() 262 min32x4_1 = vminq_s32(min32x4_1, in32x4_1); in WebRtcSpl_MinValueW32Neon() 265 int32x4_t min32x4 = vminq_s32(min32x4_0, min32x4_1); in WebRtcSpl_MinValueW32Neon()
|
/external/libaom/libaom/av1/common/arm/ |
D | convolve_neon.h | 110 sum_0 = vminq_s32(sum_0, round_vec_1); in wiener_convolve8_horiz_8x8() 111 sum_1 = vminq_s32(sum_1, round_vec_1); in wiener_convolve8_horiz_8x8() 153 sum_0 = vminq_s32(sum_0, round_vec_1); in wiener_convolve8_horiz_4x8()
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/ |
D | neon_tensor_utils.cc | 884 vmaxq_s32(vminq_s32(temp_val.val[0], max_val_dup), min_val_dup); in NeonMatrixBatchVectorAccumulateImpl() 886 vmaxq_s32(vminq_s32(temp_val.val[1], max_val_dup), min_val_dup); in NeonMatrixBatchVectorAccumulateImpl() 949 vmaxq_s32(vminq_s32(temp_val.val[0], max_val_dup), min_val_dup); in NeonMatrixBatchVectorAccumulateImpl() 951 vmaxq_s32(vminq_s32(temp_val.val[1], max_val_dup), min_val_dup); in NeonMatrixBatchVectorAccumulateImpl() 953 vmaxq_s32(vminq_s32(temp_val.val[2], max_val_dup), min_val_dup); in NeonMatrixBatchVectorAccumulateImpl() 955 vmaxq_s32(vminq_s32(temp_val.val[3], max_val_dup), min_val_dup); in NeonMatrixBatchVectorAccumulateImpl() 1787 vmaxq_s32(vminq_s32(temp_val.val[0], max_val_dup), min_val_dup); in NeonCwiseMul() 1789 vmaxq_s32(vminq_s32(temp_val.val[1], max_val_dup), min_val_dup); in NeonCwiseMul() 2340 int32x4_t min0_i32x4 = vminq_s32(max0_i32x4, scale_i32x4); in NeonSymmetricQuantizeFloats() 2341 int32x4_t min1_i32x4 = vminq_s32(max1_i32x4, scale_i32x4); in NeonSymmetricQuantizeFloats() [all …]
|
D | optimized_ops.h | 1046 temp_sum.val[0] = vminq_s32(vmaxq_s32(temp_sum.val[0], min_dup), max_dup); in MeanImpl() 1047 temp_sum.val[1] = vminq_s32(vmaxq_s32(temp_sum.val[1], min_dup), max_dup); in MeanImpl() 1048 temp_sum.val[2] = vminq_s32(vmaxq_s32(temp_sum.val[2], min_dup), max_dup); in MeanImpl() 1049 temp_sum.val[3] = vminq_s32(vmaxq_s32(temp_sum.val[3], min_dup), max_dup); in MeanImpl() 4041 vmaxq_s32(vminq_s32(temp_val.val[0], max_val_dup), min_val_dup); in SoftmaxInt8LUT() 4043 vmaxq_s32(vminq_s32(temp_val.val[1], max_val_dup), min_val_dup); in SoftmaxInt8LUT() 4045 vmaxq_s32(vminq_s32(temp_val.val[2], max_val_dup), min_val_dup); in SoftmaxInt8LUT() 4047 vmaxq_s32(vminq_s32(temp_val.val[3], max_val_dup), min_val_dup); in SoftmaxInt8LUT() 5813 vmaxq_s32(vminq_s32(temp_val.val[0], max_val_dup), min_val_dup); in Quantize() 5815 vmaxq_s32(vminq_s32(temp_val.val[1], max_val_dup), min_val_dup); in Quantize() [all …]
|
D | depthwiseconv_uint8.h | 1888 acc[j] = vminq_s32(acc[j], output_activation_max_vec); 1930 acc0 = vminq_s32(acc0, output_activation_max_vec); 1931 acc1 = vminq_s32(acc1, output_activation_max_vec); 1961 acc = vminq_s32(acc, output_activation_max_vec);
|
/external/XNNPACK/src/math/ |
D | exp-neonfma-rr2-p5.c | 62 ven = vminq_s32(ven, vmax_exponent); in xnn_math_f32_exp__neonfma_rr2_p5()
|
D | exp-neonfma-rr2-lut64-p2.c | 64 ven = vminq_s32(ven, vmax_exponent); in xnn_math_f32_exp__neonfma_rr2_lut64_p2()
|
/external/libopus/silk/arm/ |
D | LPC_inv_pred_gain_neon_intr.c | 129 min_s32x4 = vminq_s32( min_s32x4, s0_s32x4 ); in LPC_inverse_pred_gain_QA_neon() 131 min_s32x4 = vminq_s32( min_s32x4, s1_s32x4 ); in LPC_inverse_pred_gain_QA_neon()
|
D | NSQ_del_dec_neon_intr.c | 698 tmp1_s32x4 = vminq_s32( tmp1_s32x4, vdupq_n_s32( 30 << 10 ) ); in silk_noise_shape_quantizer_del_dec_neon() 754 tmp1_s32x4 = vaddq_s32( tmp2_s32x4, vminq_s32( rd1_Q10_s32x4, rd2_Q10_s32x4 ) ); in silk_noise_shape_quantizer_del_dec_neon()
|
/external/gemmlowp/internal/ |
D | simd_wrappers_neon.h | 100 inline Int32x4 Min(Int32x4 a, Int32x4 b) { return vminq_s32(a, b); }
|
/external/skqp/src/core/ |
D | SkBitmapProcState_matrixProcs.cpp | 326 res = vminq_s32(res, vdupq_n_s32(max)); in clamp4()
|
/external/tensorflow/tensorflow/lite/kernels/ |
D | cpu_backend_gemm_custom_gemv.h | 577 vminq_s32(multiplier_exponent, vdupq_n_s32(0));
|
/external/eigen/Eigen/src/Core/arch/NEON/ |
D | PacketMath.h | 216 …ONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b) { return vminq_s32(a,b); }
|
/external/skia/include/private/ |
D | SkNx_neon.h | 540 AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_s32(a.fVec, b.fVec); } in Min()
|
/external/skqp/include/private/ |
D | SkNx_neon.h | 562 AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_s32(a.fVec, b.fVec); } in Min()
|
/external/libaom/libaom/aom_dsp/simd/ |
D | v128_intrinsics_arm.h | 407 vminq_s32(vreinterpretq_s32_s64(x), vreinterpretq_s32_s64(y))); in v128_min_s32()
|
/external/libgav1/libgav1/src/dsp/arm/ |
D | inverse_transform_10bit_neon.cc | 181 *a = vmaxq_s32(vminq_s32(x, *max), *min); in HadamardRotation() 182 *b = vmaxq_s32(vminq_s32(y, *max), *min); in HadamardRotation()
|
/external/psimd/include/ |
D | psimd.h | 898 return (psimd_s32) vminq_s32((int32x4_t) a, (int32x4_t) b); in psimd_min_s32()
|
/external/pffft/ |
D | sse2neon.h | 3548 vminq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b))); in _mm_min_epi32()
|
/external/llvm-project/clang/test/CodeGen/ |
D | aarch64-neon-intrinsics.c | 3949 return vminq_s32(a, b); in test_vminq_s32()
|