/external/XNNPACK/src/f32-prelu/gen/ |
D | neon-2x8.c | 59 const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0)); in xnn_f32_prelu_ukernel__neon_2x8() 61 const uint32x4_t vm0x4567 = vcltq_s32(vreinterpretq_s32_f32(vi0x4567), vmovq_n_s32(0)); in xnn_f32_prelu_ukernel__neon_2x8() 63 const uint32x4_t vm1x0123 = vcltq_s32(vreinterpretq_s32_f32(vi1x0123), vmovq_n_s32(0)); in xnn_f32_prelu_ukernel__neon_2x8() 65 const uint32x4_t vm1x4567 = vcltq_s32(vreinterpretq_s32_f32(vi1x4567), vmovq_n_s32(0)); in xnn_f32_prelu_ukernel__neon_2x8() 96 const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0)); in xnn_f32_prelu_ukernel__neon_2x8() 98 const uint32x4_t vm1x0123 = vcltq_s32(vreinterpretq_s32_f32(vi1x0123), vmovq_n_s32(0)); in xnn_f32_prelu_ukernel__neon_2x8() 121 const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0)); in xnn_f32_prelu_ukernel__neon_2x8() 123 const uint32x4_t vm1x0123 = vcltq_s32(vreinterpretq_s32_f32(vi1x0123), vmovq_n_s32(0)); in xnn_f32_prelu_ukernel__neon_2x8()
|
D | neon-2x4.c | 56 const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0)); in xnn_f32_prelu_ukernel__neon_2x4() 58 const uint32x4_t vm1x0123 = vcltq_s32(vreinterpretq_s32_f32(vi1x0123), vmovq_n_s32(0)); in xnn_f32_prelu_ukernel__neon_2x4() 81 const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0)); in xnn_f32_prelu_ukernel__neon_2x4() 83 const uint32x4_t vm1x0123 = vcltq_s32(vreinterpretq_s32_f32(vi1x0123), vmovq_n_s32(0)); in xnn_f32_prelu_ukernel__neon_2x4()
|
/external/libvpx/libvpx/vp8/encoder/arm/neon/ |
D | vp8_shortwalsh4x4_neon.c | 89 q8u32 = vcltq_s32(q0s32, qEmptys32); in vp8_short_walsh4x4_neon() 90 q9u32 = vcltq_s32(q1s32, qEmptys32); in vp8_short_walsh4x4_neon() 91 q10u32 = vcltq_s32(q2s32, qEmptys32); in vp8_short_walsh4x4_neon() 92 q11u32 = vcltq_s32(q3s32, qEmptys32); in vp8_short_walsh4x4_neon()
|
/external/XNNPACK/src/requantization/ |
D | precise-neon.c | 54 const uint32x4_t x_neg_mask = vcltq_s32(x, vmovq_n_s32(0)); in xnn_requantize_precise__neon() 55 const uint32x4_t y_neg_mask = vcltq_s32(y, vmovq_n_s32(0)); in xnn_requantize_precise__neon() 56 const uint32x4_t z_neg_mask = vcltq_s32(z, vmovq_n_s32(0)); in xnn_requantize_precise__neon() 57 const uint32x4_t w_neg_mask = vcltq_s32(w, vmovq_n_s32(0)); in xnn_requantize_precise__neon()
|
/external/XNNPACK/src/q8-avgpool/ |
D | up9-neon.c | 104 const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0))); in xnn_q8_avgpool_ukernel_up9__neon() 105 const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0))); in xnn_q8_avgpool_ukernel_up9__neon() 177 const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0))); in xnn_q8_avgpool_ukernel_up9__neon() 178 const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0))); in xnn_q8_avgpool_ukernel_up9__neon()
|
D | mp9p8q-neon.c | 184 const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0))); in xnn_q8_avgpool_ukernel_mp9p8q__neon() 185 const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0))); in xnn_q8_avgpool_ukernel_mp9p8q__neon() 258 const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0))); in xnn_q8_avgpool_ukernel_mp9p8q__neon() 259 const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0))); in xnn_q8_avgpool_ukernel_mp9p8q__neon()
|
/external/XNNPACK/src/q8-gavgpool/ |
D | up7-neon.c | 86 const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0))); in xnn_q8_gavgpool_ukernel_up7__neon() 87 const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0))); in xnn_q8_gavgpool_ukernel_up7__neon() 156 const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0))); in xnn_q8_gavgpool_ukernel_up7__neon() 157 const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0))); in xnn_q8_gavgpool_ukernel_up7__neon()
|
D | mp7p7q-neon.c | 162 const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0))); in xnn_q8_gavgpool_ukernel_mp7p7q__neon() 163 const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0))); in xnn_q8_gavgpool_ukernel_mp7p7q__neon() 234 const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0))); in xnn_q8_gavgpool_ukernel_mp7p7q__neon() 235 const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0))); in xnn_q8_gavgpool_ukernel_mp7p7q__neon()
|
/external/XNNPACK/src/f32-prelu/ |
D | neon.c.in | 67 …const uint32x4_t vm${M}x${ABC[C:C+4]} = vcltq_s32(vreinterpretq_s32_f32(vi${M}x${ABC[C:C+4]}), vmo… 95 … const uint32x4_t vm${M}x0123 = vcltq_s32(vreinterpretq_s32_f32(vi${M}x0123), vmovq_n_s32(0)); 118 … const uint32x4_t vm${M}x0123 = vcltq_s32(vreinterpretq_s32_f32(vi${M}x0123), vmovq_n_s32(0));
|
/external/gemmlowp/fixedpoint/ |
D | fixedpoint_neon.h | 211 return vreinterpretq_s32_u32(vcltq_s32(a, b));
|
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | deblock_neon.c | 285 const uint32x4_t mask32 = vcltq_s32(b, f); in calculate_mask()
|
/external/libopus/silk/arm/ |
D | NSQ_del_dec_neon_intr.c | 694 sign_s32x4 = vreinterpretq_s32_u32( vcltq_s32( Seed_s32x4, vdupq_n_s32( 0 ) ) ); in silk_noise_shape_quantizer_del_dec_neon() 758 t_u32x4 = vcltq_s32( rd1_Q10_s32x4, rd2_Q10_s32x4 ); in silk_noise_shape_quantizer_del_dec_neon()
|
/external/skia/include/private/ |
D | SkNx_neon.h | 556 return vreinterpretq_s32_u32(vcltq_s32(fVec, o.fVec));
|
/external/libaom/libaom/aom_dsp/simd/ |
D | v128_intrinsics_arm.h | 690 vcltq_s32(vreinterpretq_s32_s64(x), vreinterpretq_s32_s64(y))); in v128_cmplt_s32()
|
/external/skqp/include/private/ |
D | SkNx_neon.h | 556 return vreinterpretq_s32_u32(vcltq_s32(fVec, o.fVec));
|
/external/clang/test/CodeGen/ |
D | aarch64-neon-intrinsics.c | 2633 return vcltq_s32(v1, v2); in test_vcltq_s32()
|
D | arm_neon_intrinsics.c | 2041 return vcltq_s32(a, b); in test_vcltq_s32()
|
/external/neon_2_sse/ |
D | NEON_2_SSE.h | 785 _NEON2SSESTORAGE uint32x4_t vcltq_s32(int32x4_t a, int32x4_t b); // VCGT.S32 q0, q0, q0 5520 _NEON2SSESTORAGE uint32x4_t vcltq_s32(int32x4_t a, int32x4_t b); // VCGT.S32 q0, q0, q0 5521 #define vcltq_s32(a,b) vcgtq_s32(b, a) //swap the arguments!! macro
|