/external/XNNPACK/src/qs8-igemm/gen/ |
D | 2x16c8-minmax-rndnu-neon-mull.c | 204 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mull() local 214 int32x4_t vacc1x89AB = vpaddq_s32(vsum1x89, vsum1xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mull() 264 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mull() local 265 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB ); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mull()
|
D | 2x16c16-minmax-rndnu-neon-mlal.c | 236 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c16__neon_mlal() local 245 int32x4_t vacc1x89AB = vpaddq_s32(vsum1x89, vsum1xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c16__neon_mlal() 295 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c16__neon_mlal() local 296 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB ); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c16__neon_mlal()
|
D | 2x16c8-minmax-rndnu-neon-mlal.c | 343 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() local 353 int32x4_t vacc1x89AB = vpaddq_s32(vsum1x89, vsum1xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() 403 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() local 404 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB ); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mlal()
|
D | 3x16c8-minmax-rndnu-neon-mull.c | 261 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mull() local 279 int32x4_t vacc1x89AB = vpaddq_s32(vsum1x89, vsum1xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mull() 333 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mull() local 334 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB ); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mull()
|
D | 3x16c16-minmax-rndnu-neon-mlal.c | 309 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c16__neon_mlal() local 326 int32x4_t vacc1x89AB = vpaddq_s32(vsum1x89, vsum1xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c16__neon_mlal() 380 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c16__neon_mlal() local 381 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB ); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c16__neon_mlal()
|
D | 2x16c4s2-minmax-rndnu-neon-mull.c | 204 const int32x2_t vsum1xAB = vpadd_s32(vget_low_s32(vacc1xAB), vget_high_s32(vacc1xAB)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull() local 205 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull()
|
D | 4x16c8-minmax-rndnu-neon-mull.c | 318 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c8__neon_mull() local 344 int32x4_t vacc1x89AB = vpaddq_s32(vsum1x89, vsum1xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c8__neon_mull() 402 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c8__neon_mull() local 403 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB ); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c8__neon_mull()
|
D | 3x16c8-minmax-rndnu-neon-mlal.c | 450 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() local 468 int32x4_t vacc1x89AB = vpaddq_s32(vsum1x89, vsum1xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() 522 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() local 523 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB ); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mlal()
|
D | 2x16c4-minmax-rndnu-neon-mull-ld2r.c | 258 const int32x2_t vsum1xAB = vpadd_s32(vget_low_s32(vacc1xAB), vget_high_s32(vacc1xAB)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4__neon_mull_ld2r() local 259 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4__neon_mull_ld2r()
|
D | 4x16c16-minmax-rndnu-neon-mlal.c | 382 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c16__neon_mlal() local 407 int32x4_t vacc1x89AB = vpaddq_s32(vsum1x89, vsum1xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c16__neon_mlal() 465 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c16__neon_mlal() local 466 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB ); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c16__neon_mlal()
|
D | 2x16c4-minmax-rndnu-neon-mull-dup.c | 258 const int32x2_t vsum1xAB = vpadd_s32(vget_low_s32(vacc1xAB), vget_high_s32(vacc1xAB)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4__neon_mull_dup() local 259 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4__neon_mull_dup()
|
D | 2x16c4-minmax-rndnu-neon-mull-ld1r.c | 260 const int32x2_t vsum1xAB = vpadd_s32(vget_low_s32(vacc1xAB), vget_high_s32(vacc1xAB)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4__neon_mull_ld1r() local 261 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4__neon_mull_ld1r()
|
/external/XNNPACK/src/qs8-gemm/gen/ |
D | 2x16c8-minmax-rndnu-neon-mull.c | 188 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mull() local 198 int32x4_t vacc1x89AB = vpaddq_s32(vsum1x89, vsum1xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mull() 248 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mull() local 249 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB ); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mull()
|
D | 2x16c16-minmax-rndnu-neon-mlal.c | 220 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c16__neon_mlal() local 229 int32x4_t vacc1x89AB = vpaddq_s32(vsum1x89, vsum1xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c16__neon_mlal() 279 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c16__neon_mlal() local 280 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB ); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c16__neon_mlal()
|
D | 3x16c8-minmax-rndnu-neon-mull.c | 243 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mull() local 261 int32x4_t vacc1x89AB = vpaddq_s32(vsum1x89, vsum1xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mull() 315 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mull() local 316 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB ); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mull()
|
D | 2x16c8-minmax-rndnu-neon-mlal.c | 327 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() local 337 int32x4_t vacc1x89AB = vpaddq_s32(vsum1x89, vsum1xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() 387 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() local 388 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB ); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mlal()
|
D | 3x16c16-minmax-rndnu-neon-mlal.c | 291 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c16__neon_mlal() local 308 int32x4_t vacc1x89AB = vpaddq_s32(vsum1x89, vsum1xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c16__neon_mlal() 362 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c16__neon_mlal() local 363 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB ); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c16__neon_mlal()
|
D | 2x16c4s2-minmax-rndnu-neon-mull.c | 188 const int32x2_t vsum1xAB = vpadd_s32(vget_low_s32(vacc1xAB), vget_high_s32(vacc1xAB)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull() local 189 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull()
|
D | 4x16c8-minmax-rndnu-neon-mull.c | 298 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c8__neon_mull() local 324 int32x4_t vacc1x89AB = vpaddq_s32(vsum1x89, vsum1xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c8__neon_mull() 382 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c8__neon_mull() local 383 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB ); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c8__neon_mull()
|
D | 3x16c8-minmax-rndnu-neon-mlal.c | 432 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() local 450 int32x4_t vacc1x89AB = vpaddq_s32(vsum1x89, vsum1xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() 504 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() local 505 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB ); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mlal()
|
D | 2x16c4-minmax-rndnu-neon-mull-dup.c | 243 const int32x2_t vsum1xAB = vpadd_s32(vget_low_s32(vacc1xAB), vget_high_s32(vacc1xAB)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4__neon_mull_dup() local 244 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4__neon_mull_dup()
|
D | 2x16c4-minmax-rndnu-neon-mull-ld1r.c | 245 const int32x2_t vsum1xAB = vpadd_s32(vget_low_s32(vacc1xAB), vget_high_s32(vacc1xAB)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4__neon_mull_ld1r() local 246 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4__neon_mull_ld1r()
|
D | 2x16c4-minmax-rndnu-neon-mull-ld2r.c | 243 const int32x2_t vsum1xAB = vpadd_s32(vget_low_s32(vacc1xAB), vget_high_s32(vacc1xAB)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4__neon_mull_ld2r() local 244 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4__neon_mull_ld2r()
|
D | 4x16c16-minmax-rndnu-neon-mlal.c | 362 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c16__neon_mlal() local 387 int32x4_t vacc1x89AB = vpaddq_s32(vsum1x89, vsum1xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c16__neon_mlal() 445 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c16__neon_mlal() local 446 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB ); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c16__neon_mlal()
|
D | 3x16c4s2-minmax-rndnu-neon-mull.c | 240 const int32x2_t vsum1xAB = vpadd_s32(vget_low_s32(vacc1xAB), vget_high_s32(vacc1xAB)); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull() local 241 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull()
|