/external/XNNPACK/src/qs8-gemm/gen/ |
D | 1x8c8-minmax-rndnu-neon-mull.c | 99 const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3)); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c8__neon_mull() local 101 const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c8__neon_mull()
|
D | 1x8c16-minmax-rndnu-neon-mlal.c | 106 const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3)); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c16__neon_mlal() local 108 const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c16__neon_mlal()
|
D | 1x8c8-minmax-rndnu-neon-mlal.c | 148 const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3)); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c8__neon_mlal() local 150 const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c8__neon_mlal()
|
D | 1x8c8-minmax-fp32-neon-mlal.c | 148 const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3)); in xnn_qs8_gemm_minmax_fp32_ukernel_1x8c8__neon_mlal() local 150 const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3); in xnn_qs8_gemm_minmax_fp32_ukernel_1x8c8__neon_mlal()
|
D | 1x8c8-minmax-fp32-neonv8-mlal.c | 149 const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3)); in xnn_qs8_gemm_minmax_fp32_ukernel_1x8c8__neonv8_mlal() local 151 const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3); in xnn_qs8_gemm_minmax_fp32_ukernel_1x8c8__neonv8_mlal()
|
D | 2x8c8-minmax-rndnu-neon-mull.c | 136 const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c8__neon_mull() local 138 const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c8__neon_mull()
|
D | 1x16c8-minmax-rndnu-neon-mull.c | 137 const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3)); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c8__neon_mull() local 139 const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c8__neon_mull()
|
D | 2x8c16-minmax-rndnu-neon-mlal.c | 151 const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c16__neon_mlal() local 153 const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c16__neon_mlal()
|
D | 3x8c8-minmax-rndnu-neon-mull.c | 173 const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3)); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c8__neon_mull() local 175 const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c8__neon_mull()
|
D | 1x16c16-minmax-rndnu-neon-mlal.c | 152 const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3)); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c16__neon_mlal() local 154 const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3); in xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c16__neon_mlal()
|
D | 2x8c8-minmax-fp32-neonv8-mlal.c | 212 const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3)); in xnn_qs8_gemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal() local 214 const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3); in xnn_qs8_gemm_minmax_fp32_ukernel_2x8c8__neonv8_mlal()
|
/external/XNNPACK/src/qs8-igemm/gen/ |
D | 1x8c8-minmax-rndnu-neon-mull.c | 113 const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3)); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c8__neon_mull() local 115 const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c8__neon_mull()
|
D | 1x8c16-minmax-rndnu-neon-mlal.c | 120 const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3)); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c16__neon_mlal() local 122 const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c16__neon_mlal()
|
D | 1x8c8-minmax-fp32-neonv8-mlal.c | 163 const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3)); in xnn_qs8_igemm_minmax_fp32_ukernel_1x8c8__neonv8_mlal() local 165 const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3); in xnn_qs8_igemm_minmax_fp32_ukernel_1x8c8__neonv8_mlal()
|
D | 1x8c8-minmax-rndnu-neon-mlal.c | 162 const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3)); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c8__neon_mlal() local 164 const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c8__neon_mlal()
|
D | 1x8c8-minmax-fp32-neon-mlal.c | 162 const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3)); in xnn_qs8_igemm_minmax_fp32_ukernel_1x8c8__neon_mlal() local 164 const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3); in xnn_qs8_igemm_minmax_fp32_ukernel_1x8c8__neon_mlal()
|
D | 2x8c8-minmax-rndnu-neon-mull.c | 152 const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c8__neon_mull() local 154 const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c8__neon_mull()
|
D | 1x16c8-minmax-rndnu-neon-mull.c | 151 const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3)); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c8__neon_mull() local 153 const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c8__neon_mull()
|
D | 2x8c16-minmax-rndnu-neon-mlal.c | 167 const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c16__neon_mlal() local 169 const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c16__neon_mlal()
|
D | 1x16c16-minmax-rndnu-neon-mlal.c | 166 const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3)); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c16__neon_mlal() local 168 const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3); in xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c16__neon_mlal()
|
D | 2x8c8-minmax-rndnu-neon-mlal.c | 227 const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c8__neon_mlal() local 229 const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c8__neon_mlal()
|
/external/XNNPACK/src/qc8-gemm/gen/ |
D | 1x8c8-minmax-fp32-neon-mlal.c | 148 const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3)); in xnn_qc8_gemm_minmax_fp32_ukernel_1x8c8__neon_mlal() local 150 const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3); in xnn_qc8_gemm_minmax_fp32_ukernel_1x8c8__neon_mlal()
|
D | 1x8c8-minmax-fp32-neonv8-mlal.c | 149 const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3)); in xnn_qc8_gemm_minmax_fp32_ukernel_1x8c8__neonv8_mlal() local 151 const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3); in xnn_qc8_gemm_minmax_fp32_ukernel_1x8c8__neonv8_mlal()
|
/external/XNNPACK/src/qc8-igemm/gen/ |
D | 1x8c8-minmax-fp32-neon-mlal.c | 162 const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3)); in xnn_qc8_igemm_minmax_fp32_ukernel_1x8c8__neon_mlal() local 164 const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3); in xnn_qc8_igemm_minmax_fp32_ukernel_1x8c8__neon_mlal()
|
D | 1x8c8-minmax-fp32-neonv8-mlal.c | 163 const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3)); in xnn_qc8_igemm_minmax_fp32_ukernel_1x8c8__neonv8_mlal() local 165 const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3); in xnn_qc8_igemm_minmax_fp32_ukernel_1x8c8__neonv8_mlal()
|