/external/XNNPACK/src/qs8-gemm/gen/ |
D | 2x8c8-minmax-rndnu-neon-mull.c | 148 const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c8__neon_mull() local 151 const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c8__neon_mull()
|
D | 2x8c16-minmax-rndnu-neon-mlal.c | 163 const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c16__neon_mlal() local 166 const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c16__neon_mlal()
|
D | 3x8c8-minmax-rndnu-neon-mull.c | 185 const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1)); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c8__neon_mull() local 188 const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c8__neon_mull()
|
D | 2x8c8-minmax-fp32-neonv8-mlal.c | 224 const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1)); in xnn_qs8_gemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal() local 227 const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1); in xnn_qs8_gemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal()
|
D | 2x8c8-minmax-fp32-neon-mlal.c | 223 const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1)); in xnn_qs8_gemm_minmax_fp32_ukernel_2x8c8__neon_mlal() local 226 const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1); in xnn_qs8_gemm_minmax_fp32_ukernel_2x8c8__neon_mlal()
|
D | 2x8c8-minmax-rndnu-neon-mlal.c | 223 const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c8__neon_mlal() local 226 const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c8__neon_mlal()
|
D | 3x8c16-minmax-rndnu-neon-mlal.c | 208 const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1)); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c16__neon_mlal() local 211 const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c16__neon_mlal()
|
D | 4x8c8-minmax-rndnu-neon-mull.c | 222 const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1)); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c8__neon_mull() local 225 const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c8__neon_mull()
|
D | 2x16c8-minmax-rndnu-neon-mull.c | 230 const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mull() local 233 const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mull()
|
D | 3x8c8-minmax-rndnu-neon-mlal.c | 286 const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1)); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c8__neon_mlal() local 289 const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c8__neon_mlal()
|
/external/XNNPACK/src/qs8-igemm/gen/ |
D | 2x8c8-minmax-rndnu-neon-mull.c | 164 const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c8__neon_mull() local 167 const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c8__neon_mull()
|
D | 2x8c16-minmax-rndnu-neon-mlal.c | 179 const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c16__neon_mlal() local 182 const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c16__neon_mlal()
|
D | 2x8c8-minmax-rndnu-neon-mlal.c | 239 const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c8__neon_mlal() local 242 const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c8__neon_mlal()
|
D | 2x8c8-minmax-fp32-neon-mlal.c | 239 const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1)); in xnn_qs8_igemm_minmax_fp32_ukernel_2x8c8__neon_mlal() local 242 const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1); in xnn_qs8_igemm_minmax_fp32_ukernel_2x8c8__neon_mlal()
|
D | 2x8c8-minmax-fp32-neonv8-mlal.c | 240 const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1)); in xnn_qs8_igemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal() local 243 const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1); in xnn_qs8_igemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal()
|
D | 3x8c8-minmax-rndnu-neon-mull.c | 203 const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1)); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c8__neon_mull() local 206 const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c8__neon_mull()
|
D | 3x8c16-minmax-rndnu-neon-mlal.c | 226 const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1)); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c16__neon_mlal() local 229 const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c16__neon_mlal()
|
D | 2x16c8-minmax-rndnu-neon-mull.c | 246 const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mull() local 249 const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mull()
|
D | 4x8c8-minmax-rndnu-neon-mull.c | 242 const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1)); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c8__neon_mull() local 245 const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c8__neon_mull()
|
D | 4x8c16-minmax-rndnu-neon-mlal.c | 273 const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1)); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c16__neon_mlal() local 276 const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c16__neon_mlal()
|
D | 2x16c16-minmax-rndnu-neon-mlal.c | 277 const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c16__neon_mlal() local 280 const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c16__neon_mlal()
|
/external/XNNPACK/src/qc8-gemm/gen/ |
D | 2x8c8-minmax-fp32-neon-mlal.c | 223 const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1)); in xnn_qc8_gemm_minmax_fp32_ukernel_2x8c8__neon_mlal() local 226 const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1); in xnn_qc8_gemm_minmax_fp32_ukernel_2x8c8__neon_mlal()
|
D | 2x8c8-minmax-fp32-neonv8-mlal.c | 224 const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1)); in xnn_qc8_gemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal() local 227 const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1); in xnn_qc8_gemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal()
|
/external/XNNPACK/src/qc8-igemm/gen/ |
D | 2x8c8-minmax-fp32-neonv8-mlal.c | 240 const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1)); in xnn_qc8_igemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal() local 243 const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1); in xnn_qc8_igemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal()
|
D | 2x8c8-minmax-fp32-neon-mlal.c | 239 const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1)); in xnn_qc8_igemm_minmax_fp32_ukernel_2x8c8__neon_mlal() local 242 const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1); in xnn_qc8_igemm_minmax_fp32_ukernel_2x8c8__neon_mlal()
|