/external/XNNPACK/src/qs8-igemm/gen/ |
D | 2x16c8-minmax-neon-mull-padal.c | 77 int32x4_t vacc1x10 = vacc0x10; in xnn_qs8_igemm_minmax_ukernel_2x16c8__neon_mull_padal() local 157 vacc1x10 = vpadalq_s16(vacc1x10, vprod1x10); in xnn_qs8_igemm_minmax_ukernel_2x16c8__neon_mull_padal() 204 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_igemm_minmax_ukernel_2x16c8__neon_mull_padal() 260 const int32x2_t vpsum1xA = vadd_s32(vget_low_s32(vacc1x10), vget_high_s32(vacc1x10)); in xnn_qs8_igemm_minmax_ukernel_2x16c8__neon_mull_padal()
|
D | 2x16c8-minmax-neon-mlal-padal.c | 77 int32x4_t vacc1x10 = vacc0x10; in xnn_qs8_igemm_minmax_ukernel_2x16c8__neon_mlal_padal() local 197 vacc1x10 = vpadalq_s16(vacc1x10, vprod1x10); in xnn_qs8_igemm_minmax_ukernel_2x16c8__neon_mlal_padal() 296 vacc1x10 = vpadalq_s16(vacc1x10, vprod1x10); in xnn_qs8_igemm_minmax_ukernel_2x16c8__neon_mlal_padal() 343 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_igemm_minmax_ukernel_2x16c8__neon_mlal_padal() 399 const int32x2_t vpsum1xA = vadd_s32(vget_low_s32(vacc1x10), vget_high_s32(vacc1x10)); in xnn_qs8_igemm_minmax_ukernel_2x16c8__neon_mlal_padal()
|
D | 2x16c16-minmax-neon-mlal-padal.c | 77 int32x4_t vacc1x10 = vacc0x10; in xnn_qs8_igemm_minmax_ukernel_2x16c16__neon_mlal_padal() local 184 vacc1x10 = vpadalq_s16(vacc1x10, vprod1x10); in xnn_qs8_igemm_minmax_ukernel_2x16c16__neon_mlal_padal() 236 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_igemm_minmax_ukernel_2x16c16__neon_mlal_padal() 292 const int32x2_t vpsum1xA = vadd_s32(vget_low_s32(vacc1x10), vget_high_s32(vacc1x10)); in xnn_qs8_igemm_minmax_ukernel_2x16c16__neon_mlal_padal()
|
D | 3x16c8-minmax-neon-mull-padal.c | 81 int32x4_t vacc1x10 = vacc0x10; in xnn_qs8_igemm_minmax_ukernel_3x16c8__neon_mull_padal() local 203 vacc1x10 = vpadalq_s16(vacc1x10, vprod1x10); in xnn_qs8_igemm_minmax_ukernel_3x16c8__neon_mull_padal() 261 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_igemm_minmax_ukernel_3x16c8__neon_mull_padal() 329 const int32x2_t vpsum1xA = vadd_s32(vget_low_s32(vacc1x10), vget_high_s32(vacc1x10)); in xnn_qs8_igemm_minmax_ukernel_3x16c8__neon_mull_padal()
|
D | 3x16c8-minmax-neon-mlal-padal.c | 81 int32x4_t vacc1x10 = vacc0x10; in xnn_qs8_igemm_minmax_ukernel_3x16c8__neon_mlal_padal() local 255 vacc1x10 = vpadalq_s16(vacc1x10, vprod1x10); in xnn_qs8_igemm_minmax_ukernel_3x16c8__neon_mlal_padal() 392 vacc1x10 = vpadalq_s16(vacc1x10, vprod1x10); in xnn_qs8_igemm_minmax_ukernel_3x16c8__neon_mlal_padal() 450 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_igemm_minmax_ukernel_3x16c8__neon_mlal_padal() 518 const int32x2_t vpsum1xA = vadd_s32(vget_low_s32(vacc1x10), vget_high_s32(vacc1x10)); in xnn_qs8_igemm_minmax_ukernel_3x16c8__neon_mlal_padal()
|
D | 3x16c16-minmax-neon-mlal-padal.c | 81 int32x4_t vacc1x10 = vacc0x10; in xnn_qs8_igemm_minmax_ukernel_3x16c16__neon_mlal_padal() local 241 vacc1x10 = vpadalq_s16(vacc1x10, vprod1x10); in xnn_qs8_igemm_minmax_ukernel_3x16c16__neon_mlal_padal() 309 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_igemm_minmax_ukernel_3x16c16__neon_mlal_padal() 377 const int32x2_t vpsum1xA = vadd_s32(vget_low_s32(vacc1x10), vget_high_s32(vacc1x10)); in xnn_qs8_igemm_minmax_ukernel_3x16c16__neon_mlal_padal()
|
D | 4x16c8-minmax-neon-mull-padal.c | 85 int32x4_t vacc1x10 = vacc0x10; in xnn_qs8_igemm_minmax_ukernel_4x16c8__neon_mull_padal() local 249 vacc1x10 = vpadalq_s16(vacc1x10, vprod1x10); in xnn_qs8_igemm_minmax_ukernel_4x16c8__neon_mull_padal() 318 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_igemm_minmax_ukernel_4x16c8__neon_mull_padal() 398 const int32x2_t vpsum1xA = vadd_s32(vget_low_s32(vacc1x10), vget_high_s32(vacc1x10)); in xnn_qs8_igemm_minmax_ukernel_4x16c8__neon_mull_padal()
|
D | 4x16c8-minmax-neon-mlal-padal.c | 85 int32x4_t vacc1x10 = vacc0x10; in xnn_qs8_igemm_minmax_ukernel_4x16c8__neon_mlal_padal() local 313 vacc1x10 = vpadalq_s16(vacc1x10, vprod1x10); in xnn_qs8_igemm_minmax_ukernel_4x16c8__neon_mlal_padal() 488 vacc1x10 = vpadalq_s16(vacc1x10, vprod1x10); in xnn_qs8_igemm_minmax_ukernel_4x16c8__neon_mlal_padal() 557 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_igemm_minmax_ukernel_4x16c8__neon_mlal_padal() 637 const int32x2_t vpsum1xA = vadd_s32(vget_low_s32(vacc1x10), vget_high_s32(vacc1x10)); in xnn_qs8_igemm_minmax_ukernel_4x16c8__neon_mlal_padal()
|
D | 4x16c16-minmax-neon-mlal-padal.c | 85 int32x4_t vacc1x10 = vacc0x10; in xnn_qs8_igemm_minmax_ukernel_4x16c16__neon_mlal_padal() local 298 vacc1x10 = vpadalq_s16(vacc1x10, vprod1x10); in xnn_qs8_igemm_minmax_ukernel_4x16c16__neon_mlal_padal() 382 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_igemm_minmax_ukernel_4x16c16__neon_mlal_padal() 462 const int32x2_t vpsum1xA = vadd_s32(vget_low_s32(vacc1x10), vget_high_s32(vacc1x10)); in xnn_qs8_igemm_minmax_ukernel_4x16c16__neon_mlal_padal()
|
/external/XNNPACK/src/qs8-gemm/gen/ |
D | 2x16c8-minmax-neon-mull-padal.c | 76 int32x4_t vacc1x10 = vacc0x10; in xnn_qs8_gemm_minmax_ukernel_2x16c8__neon_mull_padal() local 144 vacc1x10 = vpadalq_s16(vacc1x10, vprod1x10); in xnn_qs8_gemm_minmax_ukernel_2x16c8__neon_mull_padal() 188 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_gemm_minmax_ukernel_2x16c8__neon_mull_padal() 244 const int32x2_t vpsum1xA = vadd_s32(vget_low_s32(vacc1x10), vget_high_s32(vacc1x10)); in xnn_qs8_gemm_minmax_ukernel_2x16c8__neon_mull_padal()
|
D | 2x16c8-minmax-neon-mlal-padal.c | 76 int32x4_t vacc1x10 = vacc0x10; in xnn_qs8_gemm_minmax_ukernel_2x16c8__neon_mlal_padal() local 184 vacc1x10 = vpadalq_s16(vacc1x10, vprod1x10); in xnn_qs8_gemm_minmax_ukernel_2x16c8__neon_mlal_padal() 283 vacc1x10 = vpadalq_s16(vacc1x10, vprod1x10); in xnn_qs8_gemm_minmax_ukernel_2x16c8__neon_mlal_padal() 327 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_gemm_minmax_ukernel_2x16c8__neon_mlal_padal() 383 const int32x2_t vpsum1xA = vadd_s32(vget_low_s32(vacc1x10), vget_high_s32(vacc1x10)); in xnn_qs8_gemm_minmax_ukernel_2x16c8__neon_mlal_padal()
|
D | 2x16c16-minmax-neon-mlal-padal.c | 76 int32x4_t vacc1x10 = vacc0x10; in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal() local 171 vacc1x10 = vpadalq_s16(vacc1x10, vprod1x10); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal() 220 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal() 276 const int32x2_t vpsum1xA = vadd_s32(vget_low_s32(vacc1x10), vget_high_s32(vacc1x10)); in xnn_qs8_gemm_minmax_ukernel_2x16c16__neon_mlal_padal()
|
D | 3x16c8-minmax-neon-mull-padal.c | 82 int32x4_t vacc1x10 = vacc0x10; in xnn_qs8_gemm_minmax_ukernel_3x16c8__neon_mull_padal() local 188 vacc1x10 = vpadalq_s16(vacc1x10, vprod1x10); in xnn_qs8_gemm_minmax_ukernel_3x16c8__neon_mull_padal() 243 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_gemm_minmax_ukernel_3x16c8__neon_mull_padal() 311 const int32x2_t vpsum1xA = vadd_s32(vget_low_s32(vacc1x10), vget_high_s32(vacc1x10)); in xnn_qs8_gemm_minmax_ukernel_3x16c8__neon_mull_padal()
|
D | 3x16c8-minmax-neon-mlal-padal.c | 82 int32x4_t vacc1x10 = vacc0x10; in xnn_qs8_gemm_minmax_ukernel_3x16c8__neon_mlal_padal() local 240 vacc1x10 = vpadalq_s16(vacc1x10, vprod1x10); in xnn_qs8_gemm_minmax_ukernel_3x16c8__neon_mlal_padal() 377 vacc1x10 = vpadalq_s16(vacc1x10, vprod1x10); in xnn_qs8_gemm_minmax_ukernel_3x16c8__neon_mlal_padal() 432 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_gemm_minmax_ukernel_3x16c8__neon_mlal_padal() 500 const int32x2_t vpsum1xA = vadd_s32(vget_low_s32(vacc1x10), vget_high_s32(vacc1x10)); in xnn_qs8_gemm_minmax_ukernel_3x16c8__neon_mlal_padal()
|
D | 3x16c16-minmax-neon-mlal-padal.c | 82 int32x4_t vacc1x10 = vacc0x10; in xnn_qs8_gemm_minmax_ukernel_3x16c16__neon_mlal_padal() local 226 vacc1x10 = vpadalq_s16(vacc1x10, vprod1x10); in xnn_qs8_gemm_minmax_ukernel_3x16c16__neon_mlal_padal() 291 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_gemm_minmax_ukernel_3x16c16__neon_mlal_padal() 359 const int32x2_t vpsum1xA = vadd_s32(vget_low_s32(vacc1x10), vget_high_s32(vacc1x10)); in xnn_qs8_gemm_minmax_ukernel_3x16c16__neon_mlal_padal()
|
D | 4x16c8-minmax-neon-mull-padal.c | 88 int32x4_t vacc1x10 = vacc0x10; in xnn_qs8_gemm_minmax_ukernel_4x16c8__neon_mull_padal() local 232 vacc1x10 = vpadalq_s16(vacc1x10, vprod1x10); in xnn_qs8_gemm_minmax_ukernel_4x16c8__neon_mull_padal() 298 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_gemm_minmax_ukernel_4x16c8__neon_mull_padal() 378 const int32x2_t vpsum1xA = vadd_s32(vget_low_s32(vacc1x10), vget_high_s32(vacc1x10)); in xnn_qs8_gemm_minmax_ukernel_4x16c8__neon_mull_padal()
|
D | 4x16c8-minmax-neon-mlal-padal.c | 88 int32x4_t vacc1x10 = vacc0x10; in xnn_qs8_gemm_minmax_ukernel_4x16c8__neon_mlal_padal() local 296 vacc1x10 = vpadalq_s16(vacc1x10, vprod1x10); in xnn_qs8_gemm_minmax_ukernel_4x16c8__neon_mlal_padal() 471 vacc1x10 = vpadalq_s16(vacc1x10, vprod1x10); in xnn_qs8_gemm_minmax_ukernel_4x16c8__neon_mlal_padal() 537 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_gemm_minmax_ukernel_4x16c8__neon_mlal_padal() 617 const int32x2_t vpsum1xA = vadd_s32(vget_low_s32(vacc1x10), vget_high_s32(vacc1x10)); in xnn_qs8_gemm_minmax_ukernel_4x16c8__neon_mlal_padal()
|
D | 4x16c16-minmax-neon-mlal-padal.c | 88 int32x4_t vacc1x10 = vacc0x10; in xnn_qs8_gemm_minmax_ukernel_4x16c16__neon_mlal_padal() local 281 vacc1x10 = vpadalq_s16(vacc1x10, vprod1x10); in xnn_qs8_gemm_minmax_ukernel_4x16c16__neon_mlal_padal() 362 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_gemm_minmax_ukernel_4x16c16__neon_mlal_padal() 442 const int32x2_t vpsum1xA = vadd_s32(vget_low_s32(vacc1x10), vget_high_s32(vacc1x10)); in xnn_qs8_gemm_minmax_ukernel_4x16c16__neon_mlal_padal()
|