/external/XNNPACK/src/qs8-igemm/gen/ |
D | 2x16c8-minmax-rndnu-neon-mull.c | 203 const int32x4_t vsum1x89 = vpaddq_s32(vacc1x8, vacc1x9); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mull() local 214 int32x4_t vacc1x89AB = vpaddq_s32(vsum1x89, vsum1xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mull() 263 const int32x2_t vsum1x89 = vpadd_s32(vpsum1x8, vpsum1x9); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mull() local 265 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB ); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mull()
|
D | 2x16c16-minmax-rndnu-neon-mlal.c | 235 const int32x4_t vsum1x89 = vpaddq_s32(vacc1x8, vacc1x9); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c16__neon_mlal() local 245 int32x4_t vacc1x89AB = vpaddq_s32(vsum1x89, vsum1xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c16__neon_mlal() 294 const int32x2_t vsum1x89 = vpadd_s32(vpsum1x8, vpsum1x9); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c16__neon_mlal() local 296 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB ); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c16__neon_mlal()
|
D | 2x16c8-minmax-rndnu-neon-mlal.c | 342 const int32x4_t vsum1x89 = vpaddq_s32(vacc1x8, vacc1x9); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() local 353 int32x4_t vacc1x89AB = vpaddq_s32(vsum1x89, vsum1xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() 402 const int32x2_t vsum1x89 = vpadd_s32(vpsum1x8, vpsum1x9); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() local 404 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB ); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mlal()
|
D | 3x16c8-minmax-rndnu-neon-mull.c | 260 const int32x4_t vsum1x89 = vpaddq_s32(vacc1x8, vacc1x9); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mull() local 279 int32x4_t vacc1x89AB = vpaddq_s32(vsum1x89, vsum1xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mull() 332 const int32x2_t vsum1x89 = vpadd_s32(vpsum1x8, vpsum1x9); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mull() local 334 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB ); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mull()
|
D | 3x16c16-minmax-rndnu-neon-mlal.c | 308 const int32x4_t vsum1x89 = vpaddq_s32(vacc1x8, vacc1x9); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c16__neon_mlal() local 326 int32x4_t vacc1x89AB = vpaddq_s32(vsum1x89, vsum1xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c16__neon_mlal() 379 const int32x2_t vsum1x89 = vpadd_s32(vpsum1x8, vpsum1x9); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c16__neon_mlal() local 381 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB ); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c16__neon_mlal()
|
D | 2x16c4s2-minmax-rndnu-neon-mull.c | 203 const int32x2_t vsum1x89 = vpadd_s32(vget_low_s32(vacc1x89), vget_high_s32(vacc1x89)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull() local 205 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull()
|
D | 4x16c8-minmax-rndnu-neon-mull.c | 317 const int32x4_t vsum1x89 = vpaddq_s32(vacc1x8, vacc1x9); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c8__neon_mull() local 344 int32x4_t vacc1x89AB = vpaddq_s32(vsum1x89, vsum1xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c8__neon_mull() 401 const int32x2_t vsum1x89 = vpadd_s32(vpsum1x8, vpsum1x9); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c8__neon_mull() local 403 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB ); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c8__neon_mull()
|
D | 3x16c8-minmax-rndnu-neon-mlal.c | 449 const int32x4_t vsum1x89 = vpaddq_s32(vacc1x8, vacc1x9); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() local 468 int32x4_t vacc1x89AB = vpaddq_s32(vsum1x89, vsum1xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() 521 const int32x2_t vsum1x89 = vpadd_s32(vpsum1x8, vpsum1x9); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() local 523 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB ); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mlal()
|
D | 2x16c4-minmax-rndnu-neon-mull-ld2r.c | 257 const int32x2_t vsum1x89 = vpadd_s32(vget_low_s32(vacc1x89), vget_high_s32(vacc1x89)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4__neon_mull_ld2r() local 259 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4__neon_mull_ld2r()
|
D | 4x16c16-minmax-rndnu-neon-mlal.c | 381 const int32x4_t vsum1x89 = vpaddq_s32(vacc1x8, vacc1x9); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c16__neon_mlal() local 407 int32x4_t vacc1x89AB = vpaddq_s32(vsum1x89, vsum1xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c16__neon_mlal() 464 const int32x2_t vsum1x89 = vpadd_s32(vpsum1x8, vpsum1x9); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c16__neon_mlal() local 466 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB ); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c16__neon_mlal()
|
D | 2x16c4-minmax-rndnu-neon-mull-dup.c | 257 const int32x2_t vsum1x89 = vpadd_s32(vget_low_s32(vacc1x89), vget_high_s32(vacc1x89)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4__neon_mull_dup() local 259 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4__neon_mull_dup()
|
D | 2x16c4-minmax-rndnu-neon-mull-ld1r.c | 259 const int32x2_t vsum1x89 = vpadd_s32(vget_low_s32(vacc1x89), vget_high_s32(vacc1x89)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4__neon_mull_ld1r() local 261 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4__neon_mull_ld1r()
|
/external/XNNPACK/src/qs8-gemm/gen/ |
D | 2x16c8-minmax-rndnu-neon-mull.c | 187 const int32x4_t vsum1x89 = vpaddq_s32(vacc1x8, vacc1x9); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mull() local 198 int32x4_t vacc1x89AB = vpaddq_s32(vsum1x89, vsum1xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mull() 247 const int32x2_t vsum1x89 = vpadd_s32(vpsum1x8, vpsum1x9); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mull() local 249 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB ); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mull()
|
D | 2x16c16-minmax-rndnu-neon-mlal.c | 219 const int32x4_t vsum1x89 = vpaddq_s32(vacc1x8, vacc1x9); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c16__neon_mlal() local 229 int32x4_t vacc1x89AB = vpaddq_s32(vsum1x89, vsum1xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c16__neon_mlal() 278 const int32x2_t vsum1x89 = vpadd_s32(vpsum1x8, vpsum1x9); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c16__neon_mlal() local 280 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB ); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c16__neon_mlal()
|
D | 3x16c8-minmax-rndnu-neon-mull.c | 242 const int32x4_t vsum1x89 = vpaddq_s32(vacc1x8, vacc1x9); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mull() local 261 int32x4_t vacc1x89AB = vpaddq_s32(vsum1x89, vsum1xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mull() 314 const int32x2_t vsum1x89 = vpadd_s32(vpsum1x8, vpsum1x9); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mull() local 316 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB ); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mull()
|
D | 2x16c8-minmax-rndnu-neon-mlal.c | 326 const int32x4_t vsum1x89 = vpaddq_s32(vacc1x8, vacc1x9); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() local 337 int32x4_t vacc1x89AB = vpaddq_s32(vsum1x89, vsum1xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() 386 const int32x2_t vsum1x89 = vpadd_s32(vpsum1x8, vpsum1x9); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() local 388 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB ); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mlal()
|
D | 3x16c16-minmax-rndnu-neon-mlal.c | 290 const int32x4_t vsum1x89 = vpaddq_s32(vacc1x8, vacc1x9); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c16__neon_mlal() local 308 int32x4_t vacc1x89AB = vpaddq_s32(vsum1x89, vsum1xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c16__neon_mlal() 361 const int32x2_t vsum1x89 = vpadd_s32(vpsum1x8, vpsum1x9); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c16__neon_mlal() local 363 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB ); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c16__neon_mlal()
|
D | 2x16c4s2-minmax-rndnu-neon-mull.c | 187 const int32x2_t vsum1x89 = vpadd_s32(vget_low_s32(vacc1x89), vget_high_s32(vacc1x89)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull() local 189 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull()
|
D | 4x16c8-minmax-rndnu-neon-mull.c | 297 const int32x4_t vsum1x89 = vpaddq_s32(vacc1x8, vacc1x9); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c8__neon_mull() local 324 int32x4_t vacc1x89AB = vpaddq_s32(vsum1x89, vsum1xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c8__neon_mull() 381 const int32x2_t vsum1x89 = vpadd_s32(vpsum1x8, vpsum1x9); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c8__neon_mull() local 383 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB ); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c8__neon_mull()
|
D | 3x16c8-minmax-rndnu-neon-mlal.c | 431 const int32x4_t vsum1x89 = vpaddq_s32(vacc1x8, vacc1x9); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() local 450 int32x4_t vacc1x89AB = vpaddq_s32(vsum1x89, vsum1xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() 503 const int32x2_t vsum1x89 = vpadd_s32(vpsum1x8, vpsum1x9); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() local 505 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB ); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mlal()
|
D | 2x16c4-minmax-rndnu-neon-mull-dup.c | 242 const int32x2_t vsum1x89 = vpadd_s32(vget_low_s32(vacc1x89), vget_high_s32(vacc1x89)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4__neon_mull_dup() local 244 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4__neon_mull_dup()
|
D | 2x16c4-minmax-rndnu-neon-mull-ld1r.c | 244 const int32x2_t vsum1x89 = vpadd_s32(vget_low_s32(vacc1x89), vget_high_s32(vacc1x89)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4__neon_mull_ld1r() local 246 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4__neon_mull_ld1r()
|
D | 2x16c4-minmax-rndnu-neon-mull-ld2r.c | 242 const int32x2_t vsum1x89 = vpadd_s32(vget_low_s32(vacc1x89), vget_high_s32(vacc1x89)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4__neon_mull_ld2r() local 244 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4__neon_mull_ld2r()
|
D | 4x16c16-minmax-rndnu-neon-mlal.c | 361 const int32x4_t vsum1x89 = vpaddq_s32(vacc1x8, vacc1x9); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c16__neon_mlal() local 387 int32x4_t vacc1x89AB = vpaddq_s32(vsum1x89, vsum1xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c16__neon_mlal() 444 const int32x2_t vsum1x89 = vpadd_s32(vpsum1x8, vpsum1x9); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c16__neon_mlal() local 446 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB ); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c16__neon_mlal()
|
D | 3x16c4s2-minmax-rndnu-neon-mull.c | 239 const int32x2_t vsum1x89 = vpadd_s32(vget_low_s32(vacc1x89), vget_high_s32(vacc1x89)); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull() local 241 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull()
|