/external/XNNPACK/src/qs8-gemm/gen/ |
D | 3x8c8-minmax-rndnu-neon-mull.c | 161 const int32x4_t vsum2x67 = vpaddq_s32(vacc2x6, vacc2x7); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c8__neon_mull() local 168 int32x4_t vacc2x4567 = vpaddq_s32(vsum2x45, vsum2x67); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c8__neon_mull() 210 const int32x2_t vsum2x67 = vpadd_s32(vpsum2x6, vpsum2x7); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c8__neon_mull() local 211 int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67 ); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c8__neon_mull()
|
D | 3x8c16-minmax-rndnu-neon-mlal.c | 185 const int32x4_t vsum2x67 = vpaddq_s32(vacc2x6, vacc2x7); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c16__neon_mlal() local 191 int32x4_t vacc2x4567 = vpaddq_s32(vsum2x45, vsum2x67); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c16__neon_mlal() 233 const int32x2_t vsum2x67 = vpadd_s32(vpsum2x6, vpsum2x7); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c16__neon_mlal() local 234 int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67 ); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c16__neon_mlal()
|
D | 4x8c8-minmax-rndnu-neon-mull.c | 192 const int32x4_t vsum2x67 = vpaddq_s32(vacc2x6, vacc2x7); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c8__neon_mull() local 203 int32x4_t vacc2x4567 = vpaddq_s32(vsum2x45, vsum2x67); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c8__neon_mull() 247 const int32x2_t vsum2x67 = vpadd_s32(vpsum2x6, vpsum2x7); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c8__neon_mull() local 248 int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67 ); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c8__neon_mull()
|
D | 3x8c8-minmax-rndnu-neon-mlal.c | 262 const int32x4_t vsum2x67 = vpaddq_s32(vacc2x6, vacc2x7); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c8__neon_mlal() local 269 int32x4_t vacc2x4567 = vpaddq_s32(vsum2x45, vsum2x67); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c8__neon_mlal() 311 const int32x2_t vsum2x67 = vpadd_s32(vpsum2x6, vpsum2x7); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c8__neon_mlal() local 312 int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67 ); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c8__neon_mlal()
|
D | 4x8c16-minmax-rndnu-neon-mlal.c | 224 const int32x4_t vsum2x67 = vpaddq_s32(vacc2x6, vacc2x7); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c16__neon_mlal() local 234 int32x4_t vacc2x4567 = vpaddq_s32(vsum2x45, vsum2x67); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c16__neon_mlal() 278 const int32x2_t vsum2x67 = vpadd_s32(vpsum2x6, vpsum2x7); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c16__neon_mlal() local 279 int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67 ); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c16__neon_mlal()
|
D | 3x8c4s2-minmax-rndnu-neon-mull.c | 163 const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67)); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull() local 164 int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull()
|
D | 4x8c8-minmax-rndnu-neon-mlal.c | 319 const int32x4_t vsum2x67 = vpaddq_s32(vacc2x6, vacc2x7); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c8__neon_mlal() local 330 int32x4_t vacc2x4567 = vpaddq_s32(vsum2x45, vsum2x67); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c8__neon_mlal() 374 const int32x2_t vsum2x67 = vpadd_s32(vpsum2x6, vpsum2x7); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c8__neon_mlal() local 375 int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67 ); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c8__neon_mlal()
|
D | 3x16c8-minmax-rndnu-neon-mull.c | 249 const int32x4_t vsum2x67 = vpaddq_s32(vacc2x6, vacc2x7); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mull() local 264 int32x4_t vacc2x4567 = vpaddq_s32(vsum2x45, vsum2x67); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mull() 336 const int32x2_t vsum2x67 = vpadd_s32(vpsum2x6, vpsum2x7); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mull() local 337 int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67 ); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mull()
|
D | 3x16c16-minmax-rndnu-neon-mlal.c | 297 const int32x4_t vsum2x67 = vpaddq_s32(vacc2x6, vacc2x7); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c16__neon_mlal() local 311 int32x4_t vacc2x4567 = vpaddq_s32(vsum2x45, vsum2x67); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c16__neon_mlal() 383 const int32x2_t vsum2x67 = vpadd_s32(vpsum2x6, vpsum2x7); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c16__neon_mlal() local 384 int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67 ); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c16__neon_mlal()
|
D | 3x8c4-minmax-rndnu-neon-mull-ld1r.c | 212 const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67)); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4__neon_mull_ld1r() local 213 int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4__neon_mull_ld1r()
|
D | 3x8c4-minmax-rndnu-neon-mull-dup.c | 209 const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67)); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4__neon_mull_dup() local 210 int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4__neon_mull_dup()
|
D | 3x8c4-minmax-rndnu-neon-mull-ld2r.c | 209 const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67)); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4__neon_mull_ld2r() local 210 int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4__neon_mull_ld2r()
|
D | 4x8c4s2-minmax-rndnu-neon-mull.c | 193 const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67)); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull() local 194 int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull()
|
/external/XNNPACK/src/qs8-igemm/gen/ |
D | 3x8c8-minmax-rndnu-neon-mull.c | 179 const int32x4_t vsum2x67 = vpaddq_s32(vacc2x6, vacc2x7); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c8__neon_mull() local 186 int32x4_t vacc2x4567 = vpaddq_s32(vsum2x45, vsum2x67); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c8__neon_mull() 228 const int32x2_t vsum2x67 = vpadd_s32(vpsum2x6, vpsum2x7); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c8__neon_mull() local 229 int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67 ); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c8__neon_mull()
|
D | 3x8c16-minmax-rndnu-neon-mlal.c | 203 const int32x4_t vsum2x67 = vpaddq_s32(vacc2x6, vacc2x7); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c16__neon_mlal() local 209 int32x4_t vacc2x4567 = vpaddq_s32(vsum2x45, vsum2x67); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c16__neon_mlal() 251 const int32x2_t vsum2x67 = vpadd_s32(vpsum2x6, vpsum2x7); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c16__neon_mlal() local 252 int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67 ); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c16__neon_mlal()
|
D | 4x8c8-minmax-rndnu-neon-mull.c | 212 const int32x4_t vsum2x67 = vpaddq_s32(vacc2x6, vacc2x7); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c8__neon_mull() local 223 int32x4_t vacc2x4567 = vpaddq_s32(vsum2x45, vsum2x67); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c8__neon_mull() 267 const int32x2_t vsum2x67 = vpadd_s32(vpsum2x6, vpsum2x7); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c8__neon_mull() local 268 int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67 ); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c8__neon_mull()
|
D | 4x8c16-minmax-rndnu-neon-mlal.c | 244 const int32x4_t vsum2x67 = vpaddq_s32(vacc2x6, vacc2x7); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c16__neon_mlal() local 254 int32x4_t vacc2x4567 = vpaddq_s32(vsum2x45, vsum2x67); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c16__neon_mlal() 298 const int32x2_t vsum2x67 = vpadd_s32(vpsum2x6, vpsum2x7); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c16__neon_mlal() local 299 int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67 ); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c16__neon_mlal()
|
D | 3x8c8-minmax-rndnu-neon-mlal.c | 280 const int32x4_t vsum2x67 = vpaddq_s32(vacc2x6, vacc2x7); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c8__neon_mlal() local 287 int32x4_t vacc2x4567 = vpaddq_s32(vsum2x45, vsum2x67); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c8__neon_mlal() 329 const int32x2_t vsum2x67 = vpadd_s32(vpsum2x6, vpsum2x7); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c8__neon_mlal() local 330 int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67 ); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c8__neon_mlal()
|
D | 4x8c8-minmax-rndnu-neon-mlal.c | 339 const int32x4_t vsum2x67 = vpaddq_s32(vacc2x6, vacc2x7); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c8__neon_mlal() local 350 int32x4_t vacc2x4567 = vpaddq_s32(vsum2x45, vsum2x67); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c8__neon_mlal() 394 const int32x2_t vsum2x67 = vpadd_s32(vpsum2x6, vpsum2x7); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c8__neon_mlal() local 395 int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67 ); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c8__neon_mlal()
|
D | 3x16c8-minmax-rndnu-neon-mull.c | 267 const int32x4_t vsum2x67 = vpaddq_s32(vacc2x6, vacc2x7); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mull() local 282 int32x4_t vacc2x4567 = vpaddq_s32(vsum2x45, vsum2x67); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mull() 354 const int32x2_t vsum2x67 = vpadd_s32(vpsum2x6, vpsum2x7); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mull() local 355 int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67 ); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mull()
|
D | 3x8c4s2-minmax-rndnu-neon-mull.c | 181 const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67)); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull() local 182 int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull()
|
D | 3x16c16-minmax-rndnu-neon-mlal.c | 315 const int32x4_t vsum2x67 = vpaddq_s32(vacc2x6, vacc2x7); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c16__neon_mlal() local 329 int32x4_t vacc2x4567 = vpaddq_s32(vsum2x45, vsum2x67); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c16__neon_mlal() 401 const int32x2_t vsum2x67 = vpadd_s32(vpsum2x6, vpsum2x7); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c16__neon_mlal() local 402 int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67 ); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c16__neon_mlal()
|
D | 4x8c4s2-minmax-rndnu-neon-mull.c | 213 const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67)); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull() local 214 int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull()
|
D | 3x8c4-minmax-rndnu-neon-mull-ld1r.c | 229 const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67)); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4__neon_mull_ld1r() local 230 int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4__neon_mull_ld1r()
|
D | 3x8c4-minmax-rndnu-neon-mull-dup.c | 226 const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67)); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4__neon_mull_dup() local 227 int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4__neon_mull_dup()
|