/external/XNNPACK/src/qs8-gemm/gen/ |
D | 2x8c8-minmax-rndnu-neon-mull.c | 154 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c8__neon_mull() local 158 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c8__neon_mull()
|
D | 2x8c16-minmax-rndnu-neon-mlal.c | 169 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c16__neon_mlal() local 173 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c16__neon_mlal()
|
D | 3x8c8-minmax-rndnu-neon-mull.c | 191 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c8__neon_mull() local 195 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c8__neon_mull()
|
D | 2x8c8-minmax-fp32-neonv8-mlal.c | 230 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_gemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal() local 234 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_gemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal()
|
D | 2x8c8-minmax-fp32-neon-mlal.c | 229 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_gemm_minmax_fp32_ukernel_2x8c8__neon_mlal() local 233 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_gemm_minmax_fp32_ukernel_2x8c8__neon_mlal()
|
D | 2x8c8-minmax-rndnu-neon-mlal.c | 229 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c8__neon_mlal() local 233 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c8__neon_mlal()
|
D | 3x8c16-minmax-rndnu-neon-mlal.c | 214 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c16__neon_mlal() local 218 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c16__neon_mlal()
|
D | 4x8c8-minmax-rndnu-neon-mull.c | 228 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c8__neon_mull() local 232 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c8__neon_mull()
|
D | 2x16c8-minmax-rndnu-neon-mull.c | 236 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mull() local 240 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mull()
|
D | 3x8c8-minmax-rndnu-neon-mlal.c | 292 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c8__neon_mlal() local 296 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c8__neon_mlal()
|
/external/XNNPACK/src/qs8-igemm/gen/ |
D | 2x8c8-minmax-rndnu-neon-mull.c | 170 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c8__neon_mull() local 174 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c8__neon_mull()
|
D | 2x8c16-minmax-rndnu-neon-mlal.c | 185 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c16__neon_mlal() local 189 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c16__neon_mlal()
|
D | 2x8c8-minmax-rndnu-neon-mlal.c | 245 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c8__neon_mlal() local 249 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c8__neon_mlal()
|
D | 2x8c8-minmax-fp32-neon-mlal.c | 245 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_igemm_minmax_fp32_ukernel_2x8c8__neon_mlal() local 249 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_igemm_minmax_fp32_ukernel_2x8c8__neon_mlal()
|
D | 2x8c8-minmax-fp32-neonv8-mlal.c | 246 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_igemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal() local 250 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_igemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal()
|
D | 3x8c8-minmax-rndnu-neon-mull.c | 209 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c8__neon_mull() local 213 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c8__neon_mull()
|
D | 3x8c16-minmax-rndnu-neon-mlal.c | 232 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c16__neon_mlal() local 236 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c16__neon_mlal()
|
D | 2x16c8-minmax-rndnu-neon-mull.c | 252 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mull() local 256 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mull()
|
D | 4x8c8-minmax-rndnu-neon-mull.c | 248 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c8__neon_mull() local 252 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c8__neon_mull()
|
D | 4x8c16-minmax-rndnu-neon-mlal.c | 279 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c16__neon_mlal() local 283 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c16__neon_mlal()
|
D | 2x16c16-minmax-rndnu-neon-mlal.c | 283 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c16__neon_mlal() local 287 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c16__neon_mlal()
|
/external/XNNPACK/src/qc8-gemm/gen/ |
D | 2x8c8-minmax-fp32-neon-mlal.c | 229 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qc8_gemm_minmax_fp32_ukernel_2x8c8__neon_mlal() local 233 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qc8_gemm_minmax_fp32_ukernel_2x8c8__neon_mlal()
|
D | 2x8c8-minmax-fp32-neonv8-mlal.c | 230 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qc8_gemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal() local 234 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qc8_gemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal()
|
/external/XNNPACK/src/qc8-igemm/gen/ |
D | 2x8c8-minmax-fp32-neonv8-mlal.c | 246 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qc8_igemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal() local 250 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qc8_igemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal()
|
D | 2x8c8-minmax-fp32-neon-mlal.c | 245 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qc8_igemm_minmax_fp32_ukernel_2x8c8__neon_mlal() local 249 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qc8_igemm_minmax_fp32_ukernel_2x8c8__neon_mlal()
|