/external/XNNPACK/src/qu8-requantization/ |
D | q31-neon.c | 49 const int32x4_t vshift_eq_0_mask = vreinterpretq_s32_u32(vceqq_s32(vshift, vmovq_n_s32(0))); in xnn_qu8_requantize_q31__neon()
|
/external/XNNPACK/src/qs8-requantization/ |
D | q31-neon.c | 49 const int32x4_t vshift_eq_0_mask = vreinterpretq_s32_u32(vceqq_s32(vshift, vmovq_n_s32(0))); in xnn_qs8_requantize_q31__neon()
|
/external/XNNPACK/src/qs8-vaddc/gen/ |
D | minmax-neon-ld64-x8.c | 30 const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x8()
|
D | minmax-neon-ld64-x16.c | 30 const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x16()
|
D | minmax-neon-ld64-x24.c | 30 const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x24()
|
D | minmax-neon-ld64-x32.c | 30 const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x32()
|
/external/XNNPACK/src/qs8-gemm/gen/ |
D | 1x8c4-minmax-neondot.c | 86 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_gemm_minmax_ukernel_1x8c4__neondot()
|
D | 1x16c4-minmax-neondot.c | 100 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_gemm_minmax_ukernel_1x16c4__neondot()
|
D | 1x8c8-minmax-neon-mull-padal.c | 116 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_gemm_minmax_ukernel_1x8c8__neon_mull_padal()
|
D | 1x8c2-minmax-neon-mull-padal-dup.c | 118 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_gemm_minmax_ukernel_1x8c2__neon_mull_padal_dup()
|
D | 1x8-minmax-neon-mlal-lane.c | 159 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_gemm_minmax_ukernel_1x8__neon_mlal_lane()
|
D | 1x8c16-minmax-neon-mlal-padal.c | 124 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_gemm_minmax_ukernel_1x8c16__neon_mlal_padal()
|
D | 1x8-minmax-neon-mull-addw-dup.c | 155 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_gemm_minmax_ukernel_1x8__neon_mull_addw_dup()
|
/external/XNNPACK/src/qs8-vadd/gen/ |
D | minmax-neon-ld64-x8.c | 29 const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x8()
|
D | minmax-neon-ld64-x16.c | 29 const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x16()
|
D | minmax-neon-ld64-x24.c | 29 const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x24()
|
D | minmax-neon-ld64-x32.c | 29 const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32()
|
/external/XNNPACK/src/qs8-igemm/gen/ |
D | 1x8c4-minmax-neondot.c | 100 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_igemm_minmax_ukernel_1x8c4__neondot()
|
D | 1x16c4-minmax-neondot.c | 116 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_igemm_minmax_ukernel_1x16c4__neondot()
|
D | 1x8c8-minmax-neon-mull-padal.c | 130 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_igemm_minmax_ukernel_1x8c8__neon_mull_padal()
|
D | 1x8c2-minmax-neon-mull-padal-dup.c | 132 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_igemm_minmax_ukernel_1x8c2__neon_mull_padal_dup()
|
D | 1x8-minmax-neon-mlal-lane.c | 172 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_igemm_minmax_ukernel_1x8__neon_mlal_lane()
|
D | 1x8-minmax-neon-mull-addw-dup.c | 169 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qs8_igemm_minmax_ukernel_1x8__neon_mull_addw_dup()
|
/external/XNNPACK/src/qu8-dwconv/ |
D | up8x9-minmax-neon.c | 149 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qu8_dwconv_minmax_ukernel_up8x9__neon() 240 … const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); in xnn_qu8_dwconv_minmax_ukernel_up8x9__neon()
|
/external/XNNPACK/src/qs8-vaddc/ |
D | neon-ld64.c.in | 29 const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0)));
|