/external/XNNPACK/src/qs8-gemm/gen/ |
D | 3x16c8-minmax-rndnu-neon-mull.c | 251 const int32x4_t vsum2xAB = vpaddq_s32(vacc2x10, vacc2x11); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mull() local 265 int32x4_t vacc2x89AB = vpaddq_s32(vsum2x89, vsum2xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mull() 343 const int32x2_t vsum2xAB = vpadd_s32(vpsum2xA, vpsum2xB); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mull() local 344 int32x4_t vacc2x89AB = vcombine_s32(vsum2x89, vsum2xAB ); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mull()
|
D | 3x16c16-minmax-rndnu-neon-mlal.c | 299 const int32x4_t vsum2xAB = vpaddq_s32(vacc2x10, vacc2x11); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c16__neon_mlal() local 312 int32x4_t vacc2x89AB = vpaddq_s32(vsum2x89, vsum2xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c16__neon_mlal() 390 const int32x2_t vsum2xAB = vpadd_s32(vpsum2xA, vpsum2xB); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c16__neon_mlal() local 391 int32x4_t vacc2x89AB = vcombine_s32(vsum2x89, vsum2xAB ); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c16__neon_mlal()
|
D | 4x16c8-minmax-rndnu-neon-mull.c | 306 const int32x4_t vsum2xAB = vpaddq_s32(vacc2x10, vacc2x11); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c8__neon_mull() local 328 int32x4_t vacc2x89AB = vpaddq_s32(vsum2x89, vsum2xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c8__neon_mull() 410 const int32x2_t vsum2xAB = vpadd_s32(vpsum2xA, vpsum2xB); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c8__neon_mull() local 411 int32x4_t vacc2x89AB = vcombine_s32(vsum2x89, vsum2xAB ); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c8__neon_mull()
|
D | 3x16c8-minmax-rndnu-neon-mlal.c | 440 const int32x4_t vsum2xAB = vpaddq_s32(vacc2x10, vacc2x11); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() local 454 int32x4_t vacc2x89AB = vpaddq_s32(vsum2x89, vsum2xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() 532 const int32x2_t vsum2xAB = vpadd_s32(vpsum2xA, vpsum2xB); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() local 533 int32x4_t vacc2x89AB = vcombine_s32(vsum2x89, vsum2xAB ); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mlal()
|
D | 4x16c16-minmax-rndnu-neon-mlal.c | 370 const int32x4_t vsum2xAB = vpaddq_s32(vacc2x10, vacc2x11); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c16__neon_mlal() local 391 int32x4_t vacc2x89AB = vpaddq_s32(vsum2x89, vsum2xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c16__neon_mlal() 473 const int32x2_t vsum2xAB = vpadd_s32(vpsum2xA, vpsum2xB); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c16__neon_mlal() local 474 int32x4_t vacc2x89AB = vcombine_s32(vsum2x89, vsum2xAB ); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c16__neon_mlal()
|
D | 3x16c4s2-minmax-rndnu-neon-mull.c | 252 const int32x2_t vsum2xAB = vpadd_s32(vget_low_s32(vacc2xAB), vget_high_s32(vacc2xAB)); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull() local 253 int32x4_t vacc2x89AB = vcombine_s32(vsum2x89, vsum2xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull()
|
D | 4x16c8-minmax-rndnu-neon-mlal.c | 545 const int32x4_t vsum2xAB = vpaddq_s32(vacc2x10, vacc2x11); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c8__neon_mlal() local 567 int32x4_t vacc2x89AB = vpaddq_s32(vsum2x89, vsum2xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c8__neon_mlal() 649 const int32x2_t vsum2xAB = vpadd_s32(vpsum2xA, vpsum2xB); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c8__neon_mlal() local 650 int32x4_t vacc2x89AB = vcombine_s32(vsum2x89, vsum2xAB ); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c8__neon_mlal()
|
D | 3x16c4-minmax-rndnu-neon-mull-dup.c | 326 const int32x2_t vsum2xAB = vpadd_s32(vget_low_s32(vacc2xAB), vget_high_s32(vacc2xAB)); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4__neon_mull_dup() local 327 int32x4_t vacc2x89AB = vcombine_s32(vsum2x89, vsum2xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4__neon_mull_dup()
|
D | 3x16c4-minmax-rndnu-neon-mull-ld2r.c | 326 const int32x2_t vsum2xAB = vpadd_s32(vget_low_s32(vacc2xAB), vget_high_s32(vacc2xAB)); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4__neon_mull_ld2r() local 327 int32x4_t vacc2x89AB = vcombine_s32(vsum2x89, vsum2xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4__neon_mull_ld2r()
|
D | 3x16c4-minmax-rndnu-neon-mull-ld1r.c | 329 const int32x2_t vsum2xAB = vpadd_s32(vget_low_s32(vacc2xAB), vget_high_s32(vacc2xAB)); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4__neon_mull_ld1r() local 330 int32x4_t vacc2x89AB = vcombine_s32(vsum2x89, vsum2xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4__neon_mull_ld1r()
|
D | 4x16c4s2-minmax-rndnu-neon-mull.c | 304 const int32x2_t vsum2xAB = vpadd_s32(vget_low_s32(vacc2xAB), vget_high_s32(vacc2xAB)); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull() local 305 int32x4_t vacc2x89AB = vcombine_s32(vsum2x89, vsum2xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull()
|
D | 4x16c4-minmax-rndnu-neon-mull-ld2r.c | 397 const int32x2_t vsum2xAB = vpadd_s32(vget_low_s32(vacc2xAB), vget_high_s32(vacc2xAB)); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4__neon_mull_ld2r() local 398 int32x4_t vacc2x89AB = vcombine_s32(vsum2x89, vsum2xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4__neon_mull_ld2r()
|
D | 3x16c4s2-minmax-rndnu-neon-mlal.c | 445 const int32x2_t vsum2xAB = vpadd_s32(vget_low_s32(vacc2xAB), vget_high_s32(vacc2xAB)); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal() local 446 int32x4_t vacc2x89AB = vcombine_s32(vsum2x89, vsum2xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal()
|
D | 4x16c4-minmax-rndnu-neon-mull-dup.c | 397 const int32x2_t vsum2xAB = vpadd_s32(vget_low_s32(vacc2xAB), vget_high_s32(vacc2xAB)); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4__neon_mull_dup() local 398 int32x4_t vacc2x89AB = vcombine_s32(vsum2x89, vsum2xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4__neon_mull_dup()
|
/external/XNNPACK/src/qs8-igemm/gen/ |
D | 3x16c8-minmax-rndnu-neon-mull.c | 269 const int32x4_t vsum2xAB = vpaddq_s32(vacc2x10, vacc2x11); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mull() local 283 int32x4_t vacc2x89AB = vpaddq_s32(vsum2x89, vsum2xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mull() 361 const int32x2_t vsum2xAB = vpadd_s32(vpsum2xA, vpsum2xB); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mull() local 362 int32x4_t vacc2x89AB = vcombine_s32(vsum2x89, vsum2xAB ); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mull()
|
D | 3x16c16-minmax-rndnu-neon-mlal.c | 317 const int32x4_t vsum2xAB = vpaddq_s32(vacc2x10, vacc2x11); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c16__neon_mlal() local 330 int32x4_t vacc2x89AB = vpaddq_s32(vsum2x89, vsum2xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c16__neon_mlal() 408 const int32x2_t vsum2xAB = vpadd_s32(vpsum2xA, vpsum2xB); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c16__neon_mlal() local 409 int32x4_t vacc2x89AB = vcombine_s32(vsum2x89, vsum2xAB ); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c16__neon_mlal()
|
D | 4x16c8-minmax-rndnu-neon-mull.c | 326 const int32x4_t vsum2xAB = vpaddq_s32(vacc2x10, vacc2x11); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c8__neon_mull() local 348 int32x4_t vacc2x89AB = vpaddq_s32(vsum2x89, vsum2xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c8__neon_mull() 430 const int32x2_t vsum2xAB = vpadd_s32(vpsum2xA, vpsum2xB); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c8__neon_mull() local 431 int32x4_t vacc2x89AB = vcombine_s32(vsum2x89, vsum2xAB ); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c8__neon_mull()
|
D | 3x16c8-minmax-rndnu-neon-mlal.c | 458 const int32x4_t vsum2xAB = vpaddq_s32(vacc2x10, vacc2x11); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() local 472 int32x4_t vacc2x89AB = vpaddq_s32(vsum2x89, vsum2xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() 550 const int32x2_t vsum2xAB = vpadd_s32(vpsum2xA, vpsum2xB); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() local 551 int32x4_t vacc2x89AB = vcombine_s32(vsum2x89, vsum2xAB ); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mlal()
|
D | 4x16c16-minmax-rndnu-neon-mlal.c | 390 const int32x4_t vsum2xAB = vpaddq_s32(vacc2x10, vacc2x11); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c16__neon_mlal() local 411 int32x4_t vacc2x89AB = vpaddq_s32(vsum2x89, vsum2xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c16__neon_mlal() 493 const int32x2_t vsum2xAB = vpadd_s32(vpsum2xA, vpsum2xB); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c16__neon_mlal() local 494 int32x4_t vacc2x89AB = vcombine_s32(vsum2x89, vsum2xAB ); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c16__neon_mlal()
|
D | 3x16c4s2-minmax-rndnu-neon-mull.c | 270 const int32x2_t vsum2xAB = vpadd_s32(vget_low_s32(vacc2xAB), vget_high_s32(vacc2xAB)); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull() local 271 int32x4_t vacc2x89AB = vcombine_s32(vsum2x89, vsum2xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull()
|
D | 4x16c8-minmax-rndnu-neon-mlal.c | 565 const int32x4_t vsum2xAB = vpaddq_s32(vacc2x10, vacc2x11); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c8__neon_mlal() local 587 int32x4_t vacc2x89AB = vpaddq_s32(vsum2x89, vsum2xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c8__neon_mlal() 669 const int32x2_t vsum2xAB = vpadd_s32(vpsum2xA, vpsum2xB); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c8__neon_mlal() local 670 int32x4_t vacc2x89AB = vcombine_s32(vsum2x89, vsum2xAB ); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c8__neon_mlal()
|
D | 3x16c4-minmax-rndnu-neon-mull-dup.c | 343 const int32x2_t vsum2xAB = vpadd_s32(vget_low_s32(vacc2xAB), vget_high_s32(vacc2xAB)); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4__neon_mull_dup() local 344 int32x4_t vacc2x89AB = vcombine_s32(vsum2x89, vsum2xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4__neon_mull_dup()
|
D | 4x16c4s2-minmax-rndnu-neon-mull.c | 324 const int32x2_t vsum2xAB = vpadd_s32(vget_low_s32(vacc2xAB), vget_high_s32(vacc2xAB)); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull() local 325 int32x4_t vacc2x89AB = vcombine_s32(vsum2x89, vsum2xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull()
|
D | 3x16c4-minmax-rndnu-neon-mull-ld2r.c | 343 const int32x2_t vsum2xAB = vpadd_s32(vget_low_s32(vacc2xAB), vget_high_s32(vacc2xAB)); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4__neon_mull_ld2r() local 344 int32x4_t vacc2x89AB = vcombine_s32(vsum2x89, vsum2xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4__neon_mull_ld2r()
|
D | 3x16c4-minmax-rndnu-neon-mull-ld1r.c | 346 const int32x2_t vsum2xAB = vpadd_s32(vget_low_s32(vacc2xAB), vget_high_s32(vacc2xAB)); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4__neon_mull_ld1r() local 347 int32x4_t vacc2x89AB = vcombine_s32(vsum2x89, vsum2xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4__neon_mull_ld1r()
|