/external/XNNPACK/src/qs8-igemm/gen/ |
D | 2x16c8-minmax-rndnu-neon-mull.c | 262 const int32x2_t vpsum1xB = vadd_s32(vget_low_s32(vacc1x11), vget_high_s32(vacc1x11)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mull() local 264 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mull()
|
D | 2x16c16-minmax-rndnu-neon-mlal.c | 293 const int32x2_t vpsum1xB = vadd_s32(vget_low_s32(vacc1x11), vget_high_s32(vacc1x11)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c16__neon_mlal() local 295 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c16__neon_mlal()
|
D | 2x16c8-minmax-rndnu-neon-mlal.c | 401 const int32x2_t vpsum1xB = vadd_s32(vget_low_s32(vacc1x11), vget_high_s32(vacc1x11)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() local 403 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mlal()
|
D | 3x16c8-minmax-rndnu-neon-mull.c | 331 const int32x2_t vpsum1xB = vadd_s32(vget_low_s32(vacc1x11), vget_high_s32(vacc1x11)); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mull() local 333 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mull()
|
D | 3x16c16-minmax-rndnu-neon-mlal.c | 378 const int32x2_t vpsum1xB = vadd_s32(vget_low_s32(vacc1x11), vget_high_s32(vacc1x11)); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c16__neon_mlal() local 380 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c16__neon_mlal()
|
D | 4x16c8-minmax-rndnu-neon-mull.c | 400 const int32x2_t vpsum1xB = vadd_s32(vget_low_s32(vacc1x11), vget_high_s32(vacc1x11)); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c8__neon_mull() local 402 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c8__neon_mull()
|
D | 3x16c8-minmax-rndnu-neon-mlal.c | 520 const int32x2_t vpsum1xB = vadd_s32(vget_low_s32(vacc1x11), vget_high_s32(vacc1x11)); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() local 522 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mlal()
|
D | 4x16c16-minmax-rndnu-neon-mlal.c | 463 const int32x2_t vpsum1xB = vadd_s32(vget_low_s32(vacc1x11), vget_high_s32(vacc1x11)); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c16__neon_mlal() local 465 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c16__neon_mlal()
|
D | 4x16c8-minmax-rndnu-neon-mlal.c | 639 const int32x2_t vpsum1xB = vadd_s32(vget_low_s32(vacc1x11), vget_high_s32(vacc1x11)); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c8__neon_mlal() local 641 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c8__neon_mlal()
|
/external/XNNPACK/src/qs8-gemm/gen/ |
D | 2x16c8-minmax-rndnu-neon-mull.c | 246 const int32x2_t vpsum1xB = vadd_s32(vget_low_s32(vacc1x11), vget_high_s32(vacc1x11)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mull() local 248 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mull()
|
D | 2x16c16-minmax-rndnu-neon-mlal.c | 277 const int32x2_t vpsum1xB = vadd_s32(vget_low_s32(vacc1x11), vget_high_s32(vacc1x11)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c16__neon_mlal() local 279 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c16__neon_mlal()
|
D | 3x16c8-minmax-rndnu-neon-mull.c | 313 const int32x2_t vpsum1xB = vadd_s32(vget_low_s32(vacc1x11), vget_high_s32(vacc1x11)); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mull() local 315 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mull()
|
D | 2x16c8-minmax-rndnu-neon-mlal.c | 385 const int32x2_t vpsum1xB = vadd_s32(vget_low_s32(vacc1x11), vget_high_s32(vacc1x11)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() local 387 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mlal()
|
D | 3x16c16-minmax-rndnu-neon-mlal.c | 360 const int32x2_t vpsum1xB = vadd_s32(vget_low_s32(vacc1x11), vget_high_s32(vacc1x11)); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c16__neon_mlal() local 362 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c16__neon_mlal()
|
D | 4x16c8-minmax-rndnu-neon-mull.c | 380 const int32x2_t vpsum1xB = vadd_s32(vget_low_s32(vacc1x11), vget_high_s32(vacc1x11)); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c8__neon_mull() local 382 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c8__neon_mull()
|
D | 3x16c8-minmax-rndnu-neon-mlal.c | 502 const int32x2_t vpsum1xB = vadd_s32(vget_low_s32(vacc1x11), vget_high_s32(vacc1x11)); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() local 504 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mlal()
|
D | 4x16c16-minmax-rndnu-neon-mlal.c | 443 const int32x2_t vpsum1xB = vadd_s32(vget_low_s32(vacc1x11), vget_high_s32(vacc1x11)); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c16__neon_mlal() local 445 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c16__neon_mlal()
|
D | 4x16c8-minmax-rndnu-neon-mlal.c | 619 const int32x2_t vpsum1xB = vadd_s32(vget_low_s32(vacc1x11), vget_high_s32(vacc1x11)); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c8__neon_mlal() local 621 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c8__neon_mlal()
|