/external/XNNPACK/src/qs8-gemm/gen/ |
D | 2x8c8-minmax-neon-mull-padal.c | 153 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_gemm_minmax_ukernel_2x8c8__neon_mull_padal() local 157 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_gemm_minmax_ukernel_2x8c8__neon_mull_padal()
|
D | 2x8c16-minmax-neon-mlal-padal.c | 169 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_gemm_minmax_ukernel_2x8c16__neon_mlal_padal() local 173 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_gemm_minmax_ukernel_2x8c16__neon_mlal_padal()
|
D | 2x8c8-minmax-neon-mlal-padal.c | 228 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_gemm_minmax_ukernel_2x8c8__neon_mlal_padal() local 232 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_gemm_minmax_ukernel_2x8c8__neon_mlal_padal()
|
D | 3x8c8-minmax-neon-mull-padal.c | 190 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_gemm_minmax_ukernel_3x8c8__neon_mull_padal() local 194 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_gemm_minmax_ukernel_3x8c8__neon_mull_padal()
|
D | 3x8c16-minmax-neon-mlal-padal.c | 214 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_gemm_minmax_ukernel_3x8c16__neon_mlal_padal() local 218 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_gemm_minmax_ukernel_3x8c16__neon_mlal_padal()
|
D | 4x8c8-minmax-neon-mull-padal.c | 227 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_gemm_minmax_ukernel_4x8c8__neon_mull_padal() local 231 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_gemm_minmax_ukernel_4x8c8__neon_mull_padal()
|
D | 2x16c8-minmax-neon-mull-padal.c | 235 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_gemm_minmax_ukernel_2x16c8__neon_mull_padal() local 239 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_gemm_minmax_ukernel_2x16c8__neon_mull_padal()
|
D | 3x8c8-minmax-neon-mlal-padal.c | 291 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_gemm_minmax_ukernel_3x8c8__neon_mlal_padal() local 295 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_gemm_minmax_ukernel_3x8c8__neon_mlal_padal()
|
D | 2x16c16-minmax-neon-mlal-padal.c | 267 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal() local 271 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
|
D | 4x8c16-minmax-neon-mlal-padal.c | 259 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_gemm_minmax_ukernel_4x8c16__neon_mlal_padal() local 263 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_gemm_minmax_ukernel_4x8c16__neon_mlal_padal()
|
D | 4x8c8-minmax-neon-mlal-padal.c | 354 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_gemm_minmax_ukernel_4x8c8__neon_mlal_padal() local 358 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_gemm_minmax_ukernel_4x8c8__neon_mlal_padal()
|
D | 3x16c8-minmax-neon-mull-padal.c | 302 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_gemm_minmax_ukernel_3x16c8__neon_mull_padal() local 306 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_gemm_minmax_ukernel_3x16c8__neon_mull_padal()
|
D | 2x16c8-minmax-neon-mlal-padal.c | 374 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_gemm_minmax_ukernel_2x16c8__neon_mlal_padal() local 378 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_gemm_minmax_ukernel_2x16c8__neon_mlal_padal()
|
/external/XNNPACK/src/qs8-igemm/gen/ |
D | 2x8c8-minmax-neon-mull-padal.c | 169 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_igemm_minmax_ukernel_2x8c8__neon_mull_padal() local 173 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_igemm_minmax_ukernel_2x8c8__neon_mull_padal()
|
D | 2x8c16-minmax-neon-mlal-padal.c | 185 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_igemm_minmax_ukernel_2x8c16__neon_mlal_padal() local 189 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_igemm_minmax_ukernel_2x8c16__neon_mlal_padal()
|
D | 2x8c8-minmax-neon-mlal-padal.c | 244 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_igemm_minmax_ukernel_2x8c8__neon_mlal_padal() local 248 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_igemm_minmax_ukernel_2x8c8__neon_mlal_padal()
|
D | 3x8c8-minmax-neon-mull-padal.c | 208 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_igemm_minmax_ukernel_3x8c8__neon_mull_padal() local 212 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_igemm_minmax_ukernel_3x8c8__neon_mull_padal()
|
D | 3x8c16-minmax-neon-mlal-padal.c | 232 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_igemm_minmax_ukernel_3x8c16__neon_mlal_padal() local 236 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_igemm_minmax_ukernel_3x8c16__neon_mlal_padal()
|
D | 4x8c8-minmax-neon-mull-padal.c | 247 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_igemm_minmax_ukernel_4x8c8__neon_mull_padal() local 251 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_igemm_minmax_ukernel_4x8c8__neon_mull_padal()
|
D | 2x16c8-minmax-neon-mull-padal.c | 251 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_igemm_minmax_ukernel_2x16c8__neon_mull_padal() local 255 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_igemm_minmax_ukernel_2x16c8__neon_mull_padal()
|
D | 3x8c8-minmax-neon-mlal-padal.c | 309 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_igemm_minmax_ukernel_3x8c8__neon_mlal_padal() local 313 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_igemm_minmax_ukernel_3x8c8__neon_mlal_padal()
|
D | 4x8c16-minmax-neon-mlal-padal.c | 279 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_igemm_minmax_ukernel_4x8c16__neon_mlal_padal() local 283 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_igemm_minmax_ukernel_4x8c16__neon_mlal_padal()
|
D | 2x16c16-minmax-neon-mlal-padal.c | 283 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_igemm_minmax_ukernel_2x16c16__neon_mlal_padal() local 287 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_igemm_minmax_ukernel_2x16c16__neon_mlal_padal()
|
D | 3x16c8-minmax-neon-mull-padal.c | 320 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_igemm_minmax_ukernel_3x16c8__neon_mull_padal() local 324 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_igemm_minmax_ukernel_3x16c8__neon_mull_padal()
|
D | 2x16c8-minmax-neon-mlal-padal.c | 390 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); in xnn_qs8_igemm_minmax_ukernel_2x16c8__neon_mlal_padal() local 394 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); in xnn_qs8_igemm_minmax_ukernel_2x16c8__neon_mlal_padal()
|