Home
last modified time | relevance | path

Searched refs:vcltq_s32 (Results 1 – 18 of 18) sorted by relevance

/external/XNNPACK/src/f32-prelu/gen/
Dneon-2x8.c59 const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0)); in xnn_f32_prelu_ukernel__neon_2x8()
61 const uint32x4_t vm0x4567 = vcltq_s32(vreinterpretq_s32_f32(vi0x4567), vmovq_n_s32(0)); in xnn_f32_prelu_ukernel__neon_2x8()
63 const uint32x4_t vm1x0123 = vcltq_s32(vreinterpretq_s32_f32(vi1x0123), vmovq_n_s32(0)); in xnn_f32_prelu_ukernel__neon_2x8()
65 const uint32x4_t vm1x4567 = vcltq_s32(vreinterpretq_s32_f32(vi1x4567), vmovq_n_s32(0)); in xnn_f32_prelu_ukernel__neon_2x8()
96 const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0)); in xnn_f32_prelu_ukernel__neon_2x8()
98 const uint32x4_t vm1x0123 = vcltq_s32(vreinterpretq_s32_f32(vi1x0123), vmovq_n_s32(0)); in xnn_f32_prelu_ukernel__neon_2x8()
121 const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0)); in xnn_f32_prelu_ukernel__neon_2x8()
123 const uint32x4_t vm1x0123 = vcltq_s32(vreinterpretq_s32_f32(vi1x0123), vmovq_n_s32(0)); in xnn_f32_prelu_ukernel__neon_2x8()
Dneon-2x4.c56 const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0)); in xnn_f32_prelu_ukernel__neon_2x4()
58 const uint32x4_t vm1x0123 = vcltq_s32(vreinterpretq_s32_f32(vi1x0123), vmovq_n_s32(0)); in xnn_f32_prelu_ukernel__neon_2x4()
81 const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0)); in xnn_f32_prelu_ukernel__neon_2x4()
83 const uint32x4_t vm1x0123 = vcltq_s32(vreinterpretq_s32_f32(vi1x0123), vmovq_n_s32(0)); in xnn_f32_prelu_ukernel__neon_2x4()
/external/libvpx/libvpx/vp8/encoder/arm/neon/
Dvp8_shortwalsh4x4_neon.c89 q8u32 = vcltq_s32(q0s32, qEmptys32); in vp8_short_walsh4x4_neon()
90 q9u32 = vcltq_s32(q1s32, qEmptys32); in vp8_short_walsh4x4_neon()
91 q10u32 = vcltq_s32(q2s32, qEmptys32); in vp8_short_walsh4x4_neon()
92 q11u32 = vcltq_s32(q3s32, qEmptys32); in vp8_short_walsh4x4_neon()
/external/XNNPACK/src/requantization/
Dprecise-neon.c54 const uint32x4_t x_neg_mask = vcltq_s32(x, vmovq_n_s32(0)); in xnn_requantize_precise__neon()
55 const uint32x4_t y_neg_mask = vcltq_s32(y, vmovq_n_s32(0)); in xnn_requantize_precise__neon()
56 const uint32x4_t z_neg_mask = vcltq_s32(z, vmovq_n_s32(0)); in xnn_requantize_precise__neon()
57 const uint32x4_t w_neg_mask = vcltq_s32(w, vmovq_n_s32(0)); in xnn_requantize_precise__neon()
/external/XNNPACK/src/q8-avgpool/
Dup9-neon.c104 const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0))); in xnn_q8_avgpool_ukernel_up9__neon()
105 const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0))); in xnn_q8_avgpool_ukernel_up9__neon()
177 const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0))); in xnn_q8_avgpool_ukernel_up9__neon()
178 const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0))); in xnn_q8_avgpool_ukernel_up9__neon()
Dmp9p8q-neon.c184 const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0))); in xnn_q8_avgpool_ukernel_mp9p8q__neon()
185 const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0))); in xnn_q8_avgpool_ukernel_mp9p8q__neon()
258 const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0))); in xnn_q8_avgpool_ukernel_mp9p8q__neon()
259 const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0))); in xnn_q8_avgpool_ukernel_mp9p8q__neon()
/external/XNNPACK/src/q8-gavgpool/
Dup7-neon.c86 const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0))); in xnn_q8_gavgpool_ukernel_up7__neon()
87 const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0))); in xnn_q8_gavgpool_ukernel_up7__neon()
156 const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0))); in xnn_q8_gavgpool_ukernel_up7__neon()
157 const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0))); in xnn_q8_gavgpool_ukernel_up7__neon()
Dmp7p7q-neon.c162 const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0))); in xnn_q8_gavgpool_ukernel_mp7p7q__neon()
163 const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0))); in xnn_q8_gavgpool_ukernel_mp7p7q__neon()
234 const int32x4_t vneg_mask_lo = vreinterpretq_s32_u32(vcltq_s32(vacc_lo, vmovq_n_s32(0))); in xnn_q8_gavgpool_ukernel_mp7p7q__neon()
235 const int32x4_t vneg_mask_hi = vreinterpretq_s32_u32(vcltq_s32(vacc_hi, vmovq_n_s32(0))); in xnn_q8_gavgpool_ukernel_mp7p7q__neon()
/external/XNNPACK/src/f32-prelu/
Dneon.c.in67 …const uint32x4_t vm${M}x${ABC[C:C+4]} = vcltq_s32(vreinterpretq_s32_f32(vi${M}x${ABC[C:C+4]}), vmo…
95 … const uint32x4_t vm${M}x0123 = vcltq_s32(vreinterpretq_s32_f32(vi${M}x0123), vmovq_n_s32(0));
118 … const uint32x4_t vm${M}x0123 = vcltq_s32(vreinterpretq_s32_f32(vi${M}x0123), vmovq_n_s32(0));
/external/gemmlowp/fixedpoint/
Dfixedpoint_neon.h211 return vreinterpretq_s32_u32(vcltq_s32(a, b));
/external/libvpx/libvpx/vpx_dsp/arm/
Ddeblock_neon.c285 const uint32x4_t mask32 = vcltq_s32(b, f); in calculate_mask()
/external/libopus/silk/arm/
DNSQ_del_dec_neon_intr.c694 sign_s32x4 = vreinterpretq_s32_u32( vcltq_s32( Seed_s32x4, vdupq_n_s32( 0 ) ) ); in silk_noise_shape_quantizer_del_dec_neon()
758 t_u32x4 = vcltq_s32( rd1_Q10_s32x4, rd2_Q10_s32x4 ); in silk_noise_shape_quantizer_del_dec_neon()
/external/skia/include/private/
DSkNx_neon.h556 return vreinterpretq_s32_u32(vcltq_s32(fVec, o.fVec));
/external/libaom/libaom/aom_dsp/simd/
Dv128_intrinsics_arm.h690 vcltq_s32(vreinterpretq_s32_s64(x), vreinterpretq_s32_s64(y))); in v128_cmplt_s32()
/external/skqp/include/private/
DSkNx_neon.h556 return vreinterpretq_s32_u32(vcltq_s32(fVec, o.fVec));
/external/clang/test/CodeGen/
Daarch64-neon-intrinsics.c2633 return vcltq_s32(v1, v2); in test_vcltq_s32()
Darm_neon_intrinsics.c2041 return vcltq_s32(a, b); in test_vcltq_s32()
/external/neon_2_sse/
DNEON_2_SSE.h785 _NEON2SSESTORAGE uint32x4_t vcltq_s32(int32x4_t a, int32x4_t b); // VCGT.S32 q0, q0, q0
5520 _NEON2SSESTORAGE uint32x4_t vcltq_s32(int32x4_t a, int32x4_t b); // VCGT.S32 q0, q0, q0
5521 #define vcltq_s32(a,b) vcgtq_s32(b, a) //swap the arguments!! macro