/external/XNNPACK/src/qs8-gemm/gen/ |
D | 1x16c8-minmax-rndnu-neon-mull.c | 126 const int32x4_t vsum0xCD = vpaddq_s32(vacc0x12, vacc0x13); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c8__neon_mull() local 132 int32x4_t vacc0xCDEF = vpaddq_s32(vsum0xCD, vsum0xEF); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c8__neon_mull() 159 const int32x2_t vsum0xCD = vpadd_s32(vpsum0xC, vpsum0xD); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c8__neon_mull() local 161 int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF ); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c8__neon_mull()
|
D | 1x16c16-minmax-rndnu-neon-mlal.c | 142 const int32x4_t vsum0xCD = vpaddq_s32(vacc0x12, vacc0x13); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c16__neon_mlal() local 147 int32x4_t vacc0xCDEF = vpaddq_s32(vsum0xCD, vsum0xEF); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c16__neon_mlal() 174 const int32x2_t vsum0xCD = vpadd_s32(vpsum0xC, vpsum0xD); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c16__neon_mlal() local 176 int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF ); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c16__neon_mlal()
|
D | 1x16c8-minmax-rndnu-neon-mlal.c | 215 const int32x4_t vsum0xCD = vpaddq_s32(vacc0x12, vacc0x13); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c8__neon_mlal() local 221 int32x4_t vacc0xCDEF = vpaddq_s32(vsum0xCD, vsum0xEF); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c8__neon_mlal() 248 const int32x2_t vsum0xCD = vpadd_s32(vpsum0xC, vpsum0xD); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c8__neon_mlal() local 250 int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF ); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c8__neon_mlal()
|
D | 2x16c8-minmax-rndnu-neon-mull.c | 181 const int32x4_t vsum0xCD = vpaddq_s32(vacc0x12, vacc0x13); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mull() local 195 int32x4_t vacc0xCDEF = vpaddq_s32(vsum0xCD, vsum0xEF); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mull() 226 const int32x2_t vsum0xCD = vpadd_s32(vpsum0xC, vpsum0xD); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mull() local 228 int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF ); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mull()
|
D | 1x16c4s2-minmax-rndnu-neon-mull.c | 126 const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD)); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull() local 128 int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull()
|
D | 2x16c16-minmax-rndnu-neon-mlal.c | 213 const int32x4_t vsum0xCD = vpaddq_s32(vacc0x12, vacc0x13); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c16__neon_mlal() local 226 int32x4_t vacc0xCDEF = vpaddq_s32(vsum0xCD, vsum0xEF); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c16__neon_mlal() 257 const int32x2_t vsum0xCD = vpadd_s32(vpsum0xC, vpsum0xD); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c16__neon_mlal() local 259 int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF ); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c16__neon_mlal()
|
D | 1x16c4-minmax-rndnu-neon-mull-ld1r.c | 163 const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD)); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4__neon_mull_ld1r() local 165 int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4__neon_mull_ld1r()
|
D | 1x16c4-minmax-rndnu-neon-mull-dup.c | 162 const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD)); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4__neon_mull_dup() local 164 int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4__neon_mull_dup()
|
D | 1x16c4-minmax-rndnu-neon-mull-ld2r.c | 162 const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD)); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4__neon_mull_ld2r() local 164 int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4__neon_mull_ld2r()
|
D | 2x16c8-minmax-rndnu-neon-mlal.c | 320 const int32x4_t vsum0xCD = vpaddq_s32(vacc0x12, vacc0x13); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() local 334 int32x4_t vacc0xCDEF = vpaddq_s32(vsum0xCD, vsum0xEF); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() 365 const int32x2_t vsum0xCD = vpadd_s32(vpsum0xC, vpsum0xD); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() local 367 int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF ); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mlal()
|
D | 3x16c8-minmax-rndnu-neon-mull.c | 236 const int32x4_t vsum0xCD = vpaddq_s32(vacc0x12, vacc0x13); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mull() local 258 int32x4_t vacc0xCDEF = vpaddq_s32(vsum0xCD, vsum0xEF); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mull() 293 const int32x2_t vsum0xCD = vpadd_s32(vpsum0xC, vpsum0xD); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mull() local 295 int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF ); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mull()
|
D | 2x16c4s2-minmax-rndnu-neon-mull.c | 178 const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull() local 180 int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull()
|
D | 1x16c4s2-minmax-rndnu-neon-mlal.c | 215 const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD)); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal() local 217 int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal()
|
/external/XNNPACK/src/qs8-igemm/gen/ |
D | 1x16c8-minmax-rndnu-neon-mull.c | 140 const int32x4_t vsum0xCD = vpaddq_s32(vacc0x12, vacc0x13); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c8__neon_mull() local 146 int32x4_t vacc0xCDEF = vpaddq_s32(vsum0xCD, vsum0xEF); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c8__neon_mull() 173 const int32x2_t vsum0xCD = vpadd_s32(vpsum0xC, vpsum0xD); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c8__neon_mull() local 175 int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF ); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c8__neon_mull()
|
D | 1x16c16-minmax-rndnu-neon-mlal.c | 156 const int32x4_t vsum0xCD = vpaddq_s32(vacc0x12, vacc0x13); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c16__neon_mlal() local 161 int32x4_t vacc0xCDEF = vpaddq_s32(vsum0xCD, vsum0xEF); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c16__neon_mlal() 188 const int32x2_t vsum0xCD = vpadd_s32(vpsum0xC, vpsum0xD); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c16__neon_mlal() local 190 int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF ); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c16__neon_mlal()
|
D | 1x16c8-minmax-rndnu-neon-mlal.c | 229 const int32x4_t vsum0xCD = vpaddq_s32(vacc0x12, vacc0x13); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c8__neon_mlal() local 235 int32x4_t vacc0xCDEF = vpaddq_s32(vsum0xCD, vsum0xEF); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c8__neon_mlal() 262 const int32x2_t vsum0xCD = vpadd_s32(vpsum0xC, vpsum0xD); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c8__neon_mlal() local 264 int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF ); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c8__neon_mlal()
|
D | 2x16c8-minmax-rndnu-neon-mull.c | 197 const int32x4_t vsum0xCD = vpaddq_s32(vacc0x12, vacc0x13); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mull() local 211 int32x4_t vacc0xCDEF = vpaddq_s32(vsum0xCD, vsum0xEF); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mull() 242 const int32x2_t vsum0xCD = vpadd_s32(vpsum0xC, vpsum0xD); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mull() local 244 int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF ); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mull()
|
D | 1x16c4s2-minmax-rndnu-neon-mull.c | 140 const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD)); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull() local 142 int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull()
|
D | 2x16c16-minmax-rndnu-neon-mlal.c | 229 const int32x4_t vsum0xCD = vpaddq_s32(vacc0x12, vacc0x13); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c16__neon_mlal() local 242 int32x4_t vacc0xCDEF = vpaddq_s32(vsum0xCD, vsum0xEF); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c16__neon_mlal() 273 const int32x2_t vsum0xCD = vpadd_s32(vpsum0xC, vpsum0xD); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c16__neon_mlal() local 275 int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF ); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c16__neon_mlal()
|
D | 1x16c4-minmax-rndnu-neon-mull-ld2r.c | 175 const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD)); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4__neon_mull_ld2r() local 177 int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4__neon_mull_ld2r()
|
D | 1x16c4-minmax-rndnu-neon-mull-ld1r.c | 176 const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD)); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4__neon_mull_ld1r() local 178 int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4__neon_mull_ld1r()
|
D | 1x16c4-minmax-rndnu-neon-mull-dup.c | 175 const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD)); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4__neon_mull_dup() local 177 int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4__neon_mull_dup()
|
D | 3x16c8-minmax-rndnu-neon-mull.c | 254 const int32x4_t vsum0xCD = vpaddq_s32(vacc0x12, vacc0x13); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mull() local 276 int32x4_t vacc0xCDEF = vpaddq_s32(vsum0xCD, vsum0xEF); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mull() 311 const int32x2_t vsum0xCD = vpadd_s32(vpsum0xC, vpsum0xD); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mull() local 313 int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF ); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mull()
|
D | 2x16c8-minmax-rndnu-neon-mlal.c | 336 const int32x4_t vsum0xCD = vpaddq_s32(vacc0x12, vacc0x13); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() local 350 int32x4_t vacc0xCDEF = vpaddq_s32(vsum0xCD, vsum0xEF); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() 381 const int32x2_t vsum0xCD = vpadd_s32(vpsum0xC, vpsum0xD); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() local 383 int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF ); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mlal()
|
D | 2x16c4s2-minmax-rndnu-neon-mull.c | 194 const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull() local 196 int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull()
|