/external/XNNPACK/src/qs8-gemm/gen/ |
D | 3x8c8-minmax-neon-mlal-padal.c | 77 int32x4_t vacc2x5 = vacc0x5; in xnn_qs8_gemm_minmax_ukernel_3x8c8__neon_mlal_padal() local 159 vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); in xnn_qs8_gemm_minmax_ukernel_3x8c8__neon_mlal_padal() 231 vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); in xnn_qs8_gemm_minmax_ukernel_3x8c8__neon_mlal_padal() 261 const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5); in xnn_qs8_gemm_minmax_ukernel_3x8c8__neon_mlal_padal() 306 const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5)); in xnn_qs8_gemm_minmax_ukernel_3x8c8__neon_mlal_padal()
|
D | 3x8c8-minmax-neon-mull-padal.c | 77 int32x4_t vacc2x5 = vacc0x5; in xnn_qs8_gemm_minmax_ukernel_3x8c8__neon_mull_padal() local 130 vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); in xnn_qs8_gemm_minmax_ukernel_3x8c8__neon_mull_padal() 160 const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5); in xnn_qs8_gemm_minmax_ukernel_3x8c8__neon_mull_padal() 205 const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5)); in xnn_qs8_gemm_minmax_ukernel_3x8c8__neon_mull_padal()
|
D | 3x8c16-minmax-neon-mlal-padal.c | 77 int32x4_t vacc2x5 = vacc0x5; in xnn_qs8_gemm_minmax_ukernel_3x8c16__neon_mlal_padal() local 150 vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); in xnn_qs8_gemm_minmax_ukernel_3x8c16__neon_mlal_padal() 184 const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5); in xnn_qs8_gemm_minmax_ukernel_3x8c16__neon_mlal_padal() 229 const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5)); in xnn_qs8_gemm_minmax_ukernel_3x8c16__neon_mlal_padal()
|
D | 4x8c8-minmax-neon-mull-padal.c | 83 int32x4_t vacc2x5 = vacc0x5; in xnn_qs8_gemm_minmax_ukernel_4x8c8__neon_mull_padal() local 156 vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); in xnn_qs8_gemm_minmax_ukernel_4x8c8__neon_mull_padal() 191 const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5); in xnn_qs8_gemm_minmax_ukernel_4x8c8__neon_mull_padal() 242 const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5)); in xnn_qs8_gemm_minmax_ukernel_4x8c8__neon_mull_padal()
|
D | 4x8c8-minmax-neon-mlal-padal.c | 83 int32x4_t vacc2x5 = vacc0x5; in xnn_qs8_gemm_minmax_ukernel_4x8c8__neon_mlal_padal() local 192 vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); in xnn_qs8_gemm_minmax_ukernel_4x8c8__neon_mlal_padal() 283 vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); in xnn_qs8_gemm_minmax_ukernel_4x8c8__neon_mlal_padal() 318 const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5); in xnn_qs8_gemm_minmax_ukernel_4x8c8__neon_mlal_padal() 369 const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5)); in xnn_qs8_gemm_minmax_ukernel_4x8c8__neon_mlal_padal()
|
D | 4x8c16-minmax-neon-mlal-padal.c | 83 int32x4_t vacc2x5 = vacc0x5; in xnn_qs8_gemm_minmax_ukernel_4x8c16__neon_mlal_padal() local 182 vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); in xnn_qs8_gemm_minmax_ukernel_4x8c16__neon_mlal_padal() 223 const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5); in xnn_qs8_gemm_minmax_ukernel_4x8c16__neon_mlal_padal() 274 const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5)); in xnn_qs8_gemm_minmax_ukernel_4x8c16__neon_mlal_padal()
|
D | 3x16c8-minmax-neon-mull-padal.c | 93 int32x4_t vacc2x5 = vacc0x5; in xnn_qs8_gemm_minmax_ukernel_3x16c8__neon_mull_padal() local 154 vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); in xnn_qs8_gemm_minmax_ukernel_3x16c8__neon_mull_padal() 248 const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5); in xnn_qs8_gemm_minmax_ukernel_3x16c8__neon_mull_padal() 331 const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5)); in xnn_qs8_gemm_minmax_ukernel_3x16c8__neon_mull_padal()
|
D | 3x16c8-minmax-neon-mlal-padal.c | 93 int32x4_t vacc2x5 = vacc0x5; in xnn_qs8_gemm_minmax_ukernel_3x16c8__neon_mlal_padal() local 191 vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); in xnn_qs8_gemm_minmax_ukernel_3x16c8__neon_mlal_padal() 343 vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); in xnn_qs8_gemm_minmax_ukernel_3x16c8__neon_mlal_padal() 437 const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5); in xnn_qs8_gemm_minmax_ukernel_3x16c8__neon_mlal_padal() 520 const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5)); in xnn_qs8_gemm_minmax_ukernel_3x16c8__neon_mlal_padal()
|
D | 3x16c16-minmax-neon-mlal-padal.c | 93 int32x4_t vacc2x5 = vacc0x5; in xnn_qs8_gemm_minmax_ukernel_3x16c16__neon_mlal_padal() local 182 vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); in xnn_qs8_gemm_minmax_ukernel_3x16c16__neon_mlal_padal() 296 const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5); in xnn_qs8_gemm_minmax_ukernel_3x16c16__neon_mlal_padal() 379 const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5)); in xnn_qs8_gemm_minmax_ukernel_3x16c16__neon_mlal_padal()
|
D | 4x16c8-minmax-neon-mull-padal.c | 99 int32x4_t vacc2x5 = vacc0x5; in xnn_qs8_gemm_minmax_ukernel_4x16c8__neon_mull_padal() local 188 vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); in xnn_qs8_gemm_minmax_ukernel_4x16c8__neon_mull_padal() 303 const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5); in xnn_qs8_gemm_minmax_ukernel_4x16c8__neon_mull_padal() 398 const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5)); in xnn_qs8_gemm_minmax_ukernel_4x16c8__neon_mull_padal()
|
D | 4x16c8-minmax-neon-mlal-padal.c | 99 int32x4_t vacc2x5 = vacc0x5; in xnn_qs8_gemm_minmax_ukernel_4x16c8__neon_mlal_padal() local 232 vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); in xnn_qs8_gemm_minmax_ukernel_4x16c8__neon_mlal_padal() 427 vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); in xnn_qs8_gemm_minmax_ukernel_4x16c8__neon_mlal_padal() 542 const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5); in xnn_qs8_gemm_minmax_ukernel_4x16c8__neon_mlal_padal() 637 const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5)); in xnn_qs8_gemm_minmax_ukernel_4x16c8__neon_mlal_padal()
|
D | 4x16c16-minmax-neon-mlal-padal.c | 99 int32x4_t vacc2x5 = vacc0x5; in xnn_qs8_gemm_minmax_ukernel_4x16c16__neon_mlal_padal() local 222 vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); in xnn_qs8_gemm_minmax_ukernel_4x16c16__neon_mlal_padal() 367 const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5); in xnn_qs8_gemm_minmax_ukernel_4x16c16__neon_mlal_padal() 462 const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5)); in xnn_qs8_gemm_minmax_ukernel_4x16c16__neon_mlal_padal()
|
/external/XNNPACK/src/qs8-igemm/gen/ |
D | 3x8c8-minmax-neon-mull-padal.c | 76 int32x4_t vacc2x5 = vacc0x5; in xnn_qs8_igemm_minmax_ukernel_3x8c8__neon_mull_padal() local 145 vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); in xnn_qs8_igemm_minmax_ukernel_3x8c8__neon_mull_padal() 178 const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5); in xnn_qs8_igemm_minmax_ukernel_3x8c8__neon_mull_padal() 223 const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5)); in xnn_qs8_igemm_minmax_ukernel_3x8c8__neon_mull_padal()
|
D | 3x8c8-minmax-neon-mlal-padal.c | 76 int32x4_t vacc2x5 = vacc0x5; in xnn_qs8_igemm_minmax_ukernel_3x8c8__neon_mlal_padal() local 174 vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); in xnn_qs8_igemm_minmax_ukernel_3x8c8__neon_mlal_padal() 246 vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); in xnn_qs8_igemm_minmax_ukernel_3x8c8__neon_mlal_padal() 279 const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5); in xnn_qs8_igemm_minmax_ukernel_3x8c8__neon_mlal_padal() 324 const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5)); in xnn_qs8_igemm_minmax_ukernel_3x8c8__neon_mlal_padal()
|
D | 3x8c16-minmax-neon-mlal-padal.c | 76 int32x4_t vacc2x5 = vacc0x5; in xnn_qs8_igemm_minmax_ukernel_3x8c16__neon_mlal_padal() local 165 vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); in xnn_qs8_igemm_minmax_ukernel_3x8c16__neon_mlal_padal() 202 const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5); in xnn_qs8_igemm_minmax_ukernel_3x8c16__neon_mlal_padal() 247 const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5)); in xnn_qs8_igemm_minmax_ukernel_3x8c16__neon_mlal_padal()
|
D | 4x8c8-minmax-neon-mull-padal.c | 80 int32x4_t vacc2x5 = vacc0x5; in xnn_qs8_igemm_minmax_ukernel_4x8c8__neon_mull_padal() local 173 vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); in xnn_qs8_igemm_minmax_ukernel_4x8c8__neon_mull_padal() 211 const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5); in xnn_qs8_igemm_minmax_ukernel_4x8c8__neon_mull_padal() 262 const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5)); in xnn_qs8_igemm_minmax_ukernel_4x8c8__neon_mull_padal()
|
D | 4x8c8-minmax-neon-mlal-padal.c | 80 int32x4_t vacc2x5 = vacc0x5; in xnn_qs8_igemm_minmax_ukernel_4x8c8__neon_mlal_padal() local 209 vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); in xnn_qs8_igemm_minmax_ukernel_4x8c8__neon_mlal_padal() 300 vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); in xnn_qs8_igemm_minmax_ukernel_4x8c8__neon_mlal_padal() 338 const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5); in xnn_qs8_igemm_minmax_ukernel_4x8c8__neon_mlal_padal() 389 const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5)); in xnn_qs8_igemm_minmax_ukernel_4x8c8__neon_mlal_padal()
|
D | 4x8c16-minmax-neon-mlal-padal.c | 80 int32x4_t vacc2x5 = vacc0x5; in xnn_qs8_igemm_minmax_ukernel_4x8c16__neon_mlal_padal() local 199 vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); in xnn_qs8_igemm_minmax_ukernel_4x8c16__neon_mlal_padal() 243 const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5); in xnn_qs8_igemm_minmax_ukernel_4x8c16__neon_mlal_padal() 294 const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5)); in xnn_qs8_igemm_minmax_ukernel_4x8c16__neon_mlal_padal()
|
D | 3x16c8-minmax-neon-mull-padal.c | 92 int32x4_t vacc2x5 = vacc0x5; in xnn_qs8_igemm_minmax_ukernel_3x16c8__neon_mull_padal() local 169 vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); in xnn_qs8_igemm_minmax_ukernel_3x16c8__neon_mull_padal() 266 const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5); in xnn_qs8_igemm_minmax_ukernel_3x16c8__neon_mull_padal() 349 const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5)); in xnn_qs8_igemm_minmax_ukernel_3x16c8__neon_mull_padal()
|
D | 3x16c8-minmax-neon-mlal-padal.c | 92 int32x4_t vacc2x5 = vacc0x5; in xnn_qs8_igemm_minmax_ukernel_3x16c8__neon_mlal_padal() local 206 vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); in xnn_qs8_igemm_minmax_ukernel_3x16c8__neon_mlal_padal() 358 vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); in xnn_qs8_igemm_minmax_ukernel_3x16c8__neon_mlal_padal() 455 const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5); in xnn_qs8_igemm_minmax_ukernel_3x16c8__neon_mlal_padal() 538 const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5)); in xnn_qs8_igemm_minmax_ukernel_3x16c8__neon_mlal_padal()
|
D | 3x16c16-minmax-neon-mlal-padal.c | 92 int32x4_t vacc2x5 = vacc0x5; in xnn_qs8_igemm_minmax_ukernel_3x16c16__neon_mlal_padal() local 197 vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); in xnn_qs8_igemm_minmax_ukernel_3x16c16__neon_mlal_padal() 314 const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5); in xnn_qs8_igemm_minmax_ukernel_3x16c16__neon_mlal_padal() 397 const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5)); in xnn_qs8_igemm_minmax_ukernel_3x16c16__neon_mlal_padal()
|
D | 4x16c8-minmax-neon-mull-padal.c | 96 int32x4_t vacc2x5 = vacc0x5; in xnn_qs8_igemm_minmax_ukernel_4x16c8__neon_mull_padal() local 205 vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); in xnn_qs8_igemm_minmax_ukernel_4x16c8__neon_mull_padal() 323 const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5); in xnn_qs8_igemm_minmax_ukernel_4x16c8__neon_mull_padal() 418 const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5)); in xnn_qs8_igemm_minmax_ukernel_4x16c8__neon_mull_padal()
|
D | 4x16c8-minmax-neon-mlal-padal.c | 96 int32x4_t vacc2x5 = vacc0x5; in xnn_qs8_igemm_minmax_ukernel_4x16c8__neon_mlal_padal() local 249 vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); in xnn_qs8_igemm_minmax_ukernel_4x16c8__neon_mlal_padal() 444 vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); in xnn_qs8_igemm_minmax_ukernel_4x16c8__neon_mlal_padal() 562 const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5); in xnn_qs8_igemm_minmax_ukernel_4x16c8__neon_mlal_padal() 657 const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5)); in xnn_qs8_igemm_minmax_ukernel_4x16c8__neon_mlal_padal()
|
D | 4x16c16-minmax-neon-mlal-padal.c | 96 int32x4_t vacc2x5 = vacc0x5; in xnn_qs8_igemm_minmax_ukernel_4x16c16__neon_mlal_padal() local 239 vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); in xnn_qs8_igemm_minmax_ukernel_4x16c16__neon_mlal_padal() 387 const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5); in xnn_qs8_igemm_minmax_ukernel_4x16c16__neon_mlal_padal() 482 const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5)); in xnn_qs8_igemm_minmax_ukernel_4x16c16__neon_mlal_padal()
|