/external/XNNPACK/src/qs8-gemm/gen/ |
D | 2x8c8-minmax-neon-mull-padal.c | 148 const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2)); in xnn_qs8_gemm_minmax_ukernel_2x8c8__neon_mull_padal() local 151 const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3); in xnn_qs8_gemm_minmax_ukernel_2x8c8__neon_mull_padal()
|
D | 2x8c16-minmax-neon-mlal-padal.c | 164 const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2)); in xnn_qs8_gemm_minmax_ukernel_2x8c16__neon_mlal_padal() local 167 const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3); in xnn_qs8_gemm_minmax_ukernel_2x8c16__neon_mlal_padal()
|
D | 2x8c8-minmax-neon-mlal-padal.c | 223 const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2)); in xnn_qs8_gemm_minmax_ukernel_2x8c8__neon_mlal_padal() local 226 const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3); in xnn_qs8_gemm_minmax_ukernel_2x8c8__neon_mlal_padal()
|
D | 3x8c8-minmax-neon-mull-padal.c | 185 const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2)); in xnn_qs8_gemm_minmax_ukernel_3x8c8__neon_mull_padal() local 188 const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3); in xnn_qs8_gemm_minmax_ukernel_3x8c8__neon_mull_padal()
|
D | 3x8c16-minmax-neon-mlal-padal.c | 209 const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2)); in xnn_qs8_gemm_minmax_ukernel_3x8c16__neon_mlal_padal() local 212 const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3); in xnn_qs8_gemm_minmax_ukernel_3x8c16__neon_mlal_padal()
|
D | 4x8c8-minmax-neon-mull-padal.c | 222 const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2)); in xnn_qs8_gemm_minmax_ukernel_4x8c8__neon_mull_padal() local 225 const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3); in xnn_qs8_gemm_minmax_ukernel_4x8c8__neon_mull_padal()
|
D | 2x16c8-minmax-neon-mull-padal.c | 230 const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2)); in xnn_qs8_gemm_minmax_ukernel_2x16c8__neon_mull_padal() local 233 const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3); in xnn_qs8_gemm_minmax_ukernel_2x16c8__neon_mull_padal()
|
D | 3x8c8-minmax-neon-mlal-padal.c | 286 const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2)); in xnn_qs8_gemm_minmax_ukernel_3x8c8__neon_mlal_padal() local 289 const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3); in xnn_qs8_gemm_minmax_ukernel_3x8c8__neon_mlal_padal()
|
D | 2x16c16-minmax-neon-mlal-padal.c | 262 const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal() local 265 const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
|
D | 4x8c16-minmax-neon-mlal-padal.c | 254 const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2)); in xnn_qs8_gemm_minmax_ukernel_4x8c16__neon_mlal_padal() local 257 const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3); in xnn_qs8_gemm_minmax_ukernel_4x8c16__neon_mlal_padal()
|
D | 4x8c8-minmax-neon-mlal-padal.c | 349 const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2)); in xnn_qs8_gemm_minmax_ukernel_4x8c8__neon_mlal_padal() local 352 const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3); in xnn_qs8_gemm_minmax_ukernel_4x8c8__neon_mlal_padal()
|
D | 3x16c8-minmax-neon-mull-padal.c | 297 const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2)); in xnn_qs8_gemm_minmax_ukernel_3x16c8__neon_mull_padal() local 300 const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3); in xnn_qs8_gemm_minmax_ukernel_3x16c8__neon_mull_padal()
|
D | 2x16c8-minmax-neon-mlal-padal.c | 369 const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2)); in xnn_qs8_gemm_minmax_ukernel_2x16c8__neon_mlal_padal() local 372 const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3); in xnn_qs8_gemm_minmax_ukernel_2x16c8__neon_mlal_padal()
|
/external/XNNPACK/src/qs8-igemm/gen/ |
D | 2x8c8-minmax-neon-mull-padal.c | 164 const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2)); in xnn_qs8_igemm_minmax_ukernel_2x8c8__neon_mull_padal() local 167 const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3); in xnn_qs8_igemm_minmax_ukernel_2x8c8__neon_mull_padal()
|
D | 2x8c16-minmax-neon-mlal-padal.c | 180 const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2)); in xnn_qs8_igemm_minmax_ukernel_2x8c16__neon_mlal_padal() local 183 const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3); in xnn_qs8_igemm_minmax_ukernel_2x8c16__neon_mlal_padal()
|
D | 2x8c8-minmax-neon-mlal-padal.c | 239 const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2)); in xnn_qs8_igemm_minmax_ukernel_2x8c8__neon_mlal_padal() local 242 const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3); in xnn_qs8_igemm_minmax_ukernel_2x8c8__neon_mlal_padal()
|
D | 3x8c8-minmax-neon-mull-padal.c | 203 const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2)); in xnn_qs8_igemm_minmax_ukernel_3x8c8__neon_mull_padal() local 206 const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3); in xnn_qs8_igemm_minmax_ukernel_3x8c8__neon_mull_padal()
|
D | 3x8c16-minmax-neon-mlal-padal.c | 227 const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2)); in xnn_qs8_igemm_minmax_ukernel_3x8c16__neon_mlal_padal() local 230 const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3); in xnn_qs8_igemm_minmax_ukernel_3x8c16__neon_mlal_padal()
|
D | 4x8c8-minmax-neon-mull-padal.c | 242 const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2)); in xnn_qs8_igemm_minmax_ukernel_4x8c8__neon_mull_padal() local 245 const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3); in xnn_qs8_igemm_minmax_ukernel_4x8c8__neon_mull_padal()
|
D | 2x16c8-minmax-neon-mull-padal.c | 246 const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2)); in xnn_qs8_igemm_minmax_ukernel_2x16c8__neon_mull_padal() local 249 const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3); in xnn_qs8_igemm_minmax_ukernel_2x16c8__neon_mull_padal()
|
D | 3x8c8-minmax-neon-mlal-padal.c | 304 const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2)); in xnn_qs8_igemm_minmax_ukernel_3x8c8__neon_mlal_padal() local 307 const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3); in xnn_qs8_igemm_minmax_ukernel_3x8c8__neon_mlal_padal()
|
D | 4x8c16-minmax-neon-mlal-padal.c | 274 const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2)); in xnn_qs8_igemm_minmax_ukernel_4x8c16__neon_mlal_padal() local 277 const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3); in xnn_qs8_igemm_minmax_ukernel_4x8c16__neon_mlal_padal()
|
D | 2x16c16-minmax-neon-mlal-padal.c | 278 const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2)); in xnn_qs8_igemm_minmax_ukernel_2x16c16__neon_mlal_padal() local 281 const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3); in xnn_qs8_igemm_minmax_ukernel_2x16c16__neon_mlal_padal()
|
D | 3x16c8-minmax-neon-mull-padal.c | 315 const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2)); in xnn_qs8_igemm_minmax_ukernel_3x16c8__neon_mull_padal() local 318 const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3); in xnn_qs8_igemm_minmax_ukernel_3x16c8__neon_mull_padal()
|
D | 2x16c8-minmax-neon-mlal-padal.c | 385 const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2)); in xnn_qs8_igemm_minmax_ukernel_2x16c8__neon_mlal_padal() local 388 const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3); in xnn_qs8_igemm_minmax_ukernel_2x16c8__neon_mlal_padal()
|