Home
last modified time | relevance | path

Searched refs:vceqq_s32 (Results 1 – 25 of 174) sorted by relevance

1234567

/external/XNNPACK/src/qu8-requantization/
Dq31-neon.c49 const int32x4_t vshift_eq_0_mask = vreinterpretq_s32_u32(vceqq_s32(vshift, vmovq_n_s32(0))); in xnn_qu8_requantize_q31__neon()
/external/XNNPACK/src/qs8-requantization/
Dq31-neon.c49 const int32x4_t vshift_eq_0_mask = vreinterpretq_s32_u32(vceqq_s32(vshift, vmovq_n_s32(0))); in xnn_qs8_requantize_q31__neon()
/external/XNNPACK/src/qs8-vaddc/gen/
Dminmax-neon-ld64-x8.c30 const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x8()
Dminmax-neon-ld64-x16.c30 const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x16()
Dminmax-neon-ld64-x24.c30 const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x24()
Dminmax-neon-ld64-x32.c30 const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x32()
/external/XNNPACK/src/qs8-gemm/gen/
D1x8c4-minmax-neondot.c86 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_gemm_minmax_ukernel_1x8c4__neondot()
D1x16c4-minmax-neondot.c100 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_gemm_minmax_ukernel_1x16c4__neondot()
D1x8c8-minmax-neon-mull-padal.c116 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_gemm_minmax_ukernel_1x8c8__neon_mull_padal()
D1x8c2-minmax-neon-mull-padal-dup.c118 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_gemm_minmax_ukernel_1x8c2__neon_mull_padal_dup()
D1x8-minmax-neon-mlal-lane.c159 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_gemm_minmax_ukernel_1x8__neon_mlal_lane()
D1x8c16-minmax-neon-mlal-padal.c124 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_gemm_minmax_ukernel_1x8c16__neon_mlal_padal()
D1x8-minmax-neon-mull-addw-dup.c155 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_gemm_minmax_ukernel_1x8__neon_mull_addw_dup()
/external/XNNPACK/src/qs8-vadd/gen/
Dminmax-neon-ld64-x8.c29 const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x8()
Dminmax-neon-ld64-x16.c29 const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x16()
Dminmax-neon-ld64-x24.c29 const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x24()
Dminmax-neon-ld64-x32.c29 const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32()
/external/XNNPACK/src/qs8-igemm/gen/
D1x8c4-minmax-neondot.c100 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_igemm_minmax_ukernel_1x8c4__neondot()
D1x16c4-minmax-neondot.c116 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_igemm_minmax_ukernel_1x16c4__neondot()
D1x8c8-minmax-neon-mull-padal.c130 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_igemm_minmax_ukernel_1x8c8__neon_mull_padal()
D1x8c2-minmax-neon-mull-padal-dup.c132 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_igemm_minmax_ukernel_1x8c2__neon_mull_padal_dup()
D1x8-minmax-neon-mlal-lane.c172 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_igemm_minmax_ukernel_1x8__neon_mlal_lane()
D1x8-minmax-neon-mull-addw-dup.c169 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_igemm_minmax_ukernel_1x8__neon_mull_addw_dup()
/external/XNNPACK/src/qu8-dwconv/
Dup8x9-minmax-neon.c149 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qu8_dwconv_minmax_ukernel_up8x9__neon()
240 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qu8_dwconv_minmax_ukernel_up8x9__neon()
/external/XNNPACK/src/qs8-vaddc/
Dneon-ld64.c.in29 const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0)));

1234567