/external/XNNPACK/src/qs8-gemm/gen/ |
D | 1x8c8-minmax-neon-mull-padal.c | 88 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1); in xnn_qs8_gemm_minmax_ukernel_1x8c8__neon_mull_padal() local 92 int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23); in xnn_qs8_gemm_minmax_ukernel_1x8c8__neon_mull_padal() 99 const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1); in xnn_qs8_gemm_minmax_ukernel_1x8c8__neon_mull_padal() local 101 int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 ); in xnn_qs8_gemm_minmax_ukernel_1x8c8__neon_mull_padal()
|
D | 1x8c16-minmax-neon-mlal-padal.c | 96 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1); in xnn_qs8_gemm_minmax_ukernel_1x8c16__neon_mlal_padal() local 100 int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23); in xnn_qs8_gemm_minmax_ukernel_1x8c16__neon_mlal_padal() 107 const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1); in xnn_qs8_gemm_minmax_ukernel_1x8c16__neon_mlal_padal() local 109 int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 ); in xnn_qs8_gemm_minmax_ukernel_1x8c16__neon_mlal_padal()
|
D | 1x8c8-minmax-neon-mlal-padal.c | 137 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1); in xnn_qs8_gemm_minmax_ukernel_1x8c8__neon_mlal_padal() local 141 int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23); in xnn_qs8_gemm_minmax_ukernel_1x8c8__neon_mlal_padal() 148 const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1); in xnn_qs8_gemm_minmax_ukernel_1x8c8__neon_mlal_padal() local 150 int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 ); in xnn_qs8_gemm_minmax_ukernel_1x8c8__neon_mlal_padal()
|
D | 2x8c8-minmax-neon-mull-padal.c | 119 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1); in xnn_qs8_gemm_minmax_ukernel_2x8c8__neon_mull_padal() local 127 int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23); in xnn_qs8_gemm_minmax_ukernel_2x8c8__neon_mull_padal() 136 const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1); in xnn_qs8_gemm_minmax_ukernel_2x8c8__neon_mull_padal() local 138 int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 ); in xnn_qs8_gemm_minmax_ukernel_2x8c8__neon_mull_padal()
|
D | 1x16c8-minmax-neon-mull-padal.c | 120 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1); in xnn_qs8_gemm_minmax_ukernel_1x16c8__neon_mull_padal() local 128 int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23); in xnn_qs8_gemm_minmax_ukernel_1x16c8__neon_mull_padal() 137 const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1); in xnn_qs8_gemm_minmax_ukernel_1x16c8__neon_mull_padal() local 139 int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 ); in xnn_qs8_gemm_minmax_ukernel_1x16c8__neon_mull_padal()
|
D | 2x8c16-minmax-neon-mlal-padal.c | 135 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1); in xnn_qs8_gemm_minmax_ukernel_2x8c16__neon_mlal_padal() local 143 int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23); in xnn_qs8_gemm_minmax_ukernel_2x8c16__neon_mlal_padal() 152 const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1); in xnn_qs8_gemm_minmax_ukernel_2x8c16__neon_mlal_padal() local 154 int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 ); in xnn_qs8_gemm_minmax_ukernel_2x8c16__neon_mlal_padal()
|
D | 1x16c16-minmax-neon-mlal-padal.c | 136 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1); in xnn_qs8_gemm_minmax_ukernel_1x16c16__neon_mlal_padal() local 144 int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23); in xnn_qs8_gemm_minmax_ukernel_1x16c16__neon_mlal_padal() 153 const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1); in xnn_qs8_gemm_minmax_ukernel_1x16c16__neon_mlal_padal() local 155 int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 ); in xnn_qs8_gemm_minmax_ukernel_1x16c16__neon_mlal_padal()
|
D | 2x8c8-minmax-neon-mlal-padal.c | 194 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1); in xnn_qs8_gemm_minmax_ukernel_2x8c8__neon_mlal_padal() local 202 int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23); in xnn_qs8_gemm_minmax_ukernel_2x8c8__neon_mlal_padal() 211 const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1); in xnn_qs8_gemm_minmax_ukernel_2x8c8__neon_mlal_padal() local 213 int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 ); in xnn_qs8_gemm_minmax_ukernel_2x8c8__neon_mlal_padal()
|
D | 3x8c8-minmax-neon-mull-padal.c | 150 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1); in xnn_qs8_gemm_minmax_ukernel_3x8c8__neon_mull_padal() local 162 int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23); in xnn_qs8_gemm_minmax_ukernel_3x8c8__neon_mull_padal() 173 const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1); in xnn_qs8_gemm_minmax_ukernel_3x8c8__neon_mull_padal() local 175 int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 ); in xnn_qs8_gemm_minmax_ukernel_3x8c8__neon_mull_padal()
|
D | 3x8c16-minmax-neon-mlal-padal.c | 174 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1); in xnn_qs8_gemm_minmax_ukernel_3x8c16__neon_mlal_padal() local 186 int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23); in xnn_qs8_gemm_minmax_ukernel_3x8c16__neon_mlal_padal() 197 const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1); in xnn_qs8_gemm_minmax_ukernel_3x8c16__neon_mlal_padal() local 199 int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 ); in xnn_qs8_gemm_minmax_ukernel_3x8c16__neon_mlal_padal()
|
D | 1x16c8-minmax-neon-mlal-padal.c | 209 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1); in xnn_qs8_gemm_minmax_ukernel_1x16c8__neon_mlal_padal() local 217 int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23); in xnn_qs8_gemm_minmax_ukernel_1x16c8__neon_mlal_padal() 226 const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1); in xnn_qs8_gemm_minmax_ukernel_1x16c8__neon_mlal_padal() local 228 int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 ); in xnn_qs8_gemm_minmax_ukernel_1x16c8__neon_mlal_padal()
|
D | 4x8c8-minmax-neon-mull-padal.c | 181 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1); in xnn_qs8_gemm_minmax_ukernel_4x8c8__neon_mull_padal() local 197 int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23); in xnn_qs8_gemm_minmax_ukernel_4x8c8__neon_mull_padal() 210 const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1); in xnn_qs8_gemm_minmax_ukernel_4x8c8__neon_mull_padal() local 212 int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 ); in xnn_qs8_gemm_minmax_ukernel_4x8c8__neon_mull_padal()
|
/external/XNNPACK/src/qs8-igemm/gen/ |
D | 1x8c8-minmax-neon-mull-padal.c | 102 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1); in xnn_qs8_igemm_minmax_ukernel_1x8c8__neon_mull_padal() local 106 int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23); in xnn_qs8_igemm_minmax_ukernel_1x8c8__neon_mull_padal() 113 const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1); in xnn_qs8_igemm_minmax_ukernel_1x8c8__neon_mull_padal() local 115 int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 ); in xnn_qs8_igemm_minmax_ukernel_1x8c8__neon_mull_padal()
|
D | 1x8c16-minmax-neon-mlal-padal.c | 110 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1); in xnn_qs8_igemm_minmax_ukernel_1x8c16__neon_mlal_padal() local 114 int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23); in xnn_qs8_igemm_minmax_ukernel_1x8c16__neon_mlal_padal() 121 const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1); in xnn_qs8_igemm_minmax_ukernel_1x8c16__neon_mlal_padal() local 123 int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 ); in xnn_qs8_igemm_minmax_ukernel_1x8c16__neon_mlal_padal()
|
D | 1x8c8-minmax-neon-mlal-padal.c | 151 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1); in xnn_qs8_igemm_minmax_ukernel_1x8c8__neon_mlal_padal() local 155 int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23); in xnn_qs8_igemm_minmax_ukernel_1x8c8__neon_mlal_padal() 162 const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1); in xnn_qs8_igemm_minmax_ukernel_1x8c8__neon_mlal_padal() local 164 int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 ); in xnn_qs8_igemm_minmax_ukernel_1x8c8__neon_mlal_padal()
|
D | 2x8c8-minmax-neon-mull-padal.c | 135 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1); in xnn_qs8_igemm_minmax_ukernel_2x8c8__neon_mull_padal() local 143 int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23); in xnn_qs8_igemm_minmax_ukernel_2x8c8__neon_mull_padal() 152 const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1); in xnn_qs8_igemm_minmax_ukernel_2x8c8__neon_mull_padal() local 154 int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 ); in xnn_qs8_igemm_minmax_ukernel_2x8c8__neon_mull_padal()
|
D | 1x16c8-minmax-neon-mull-padal.c | 134 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1); in xnn_qs8_igemm_minmax_ukernel_1x16c8__neon_mull_padal() local 142 int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23); in xnn_qs8_igemm_minmax_ukernel_1x16c8__neon_mull_padal() 151 const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1); in xnn_qs8_igemm_minmax_ukernel_1x16c8__neon_mull_padal() local 153 int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 ); in xnn_qs8_igemm_minmax_ukernel_1x16c8__neon_mull_padal()
|
D | 2x8c16-minmax-neon-mlal-padal.c | 151 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1); in xnn_qs8_igemm_minmax_ukernel_2x8c16__neon_mlal_padal() local 159 int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23); in xnn_qs8_igemm_minmax_ukernel_2x8c16__neon_mlal_padal() 168 const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1); in xnn_qs8_igemm_minmax_ukernel_2x8c16__neon_mlal_padal() local 170 int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 ); in xnn_qs8_igemm_minmax_ukernel_2x8c16__neon_mlal_padal()
|
D | 1x16c16-minmax-neon-mlal-padal.c | 150 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1); in xnn_qs8_igemm_minmax_ukernel_1x16c16__neon_mlal_padal() local 158 int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23); in xnn_qs8_igemm_minmax_ukernel_1x16c16__neon_mlal_padal() 167 const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1); in xnn_qs8_igemm_minmax_ukernel_1x16c16__neon_mlal_padal() local 169 int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 ); in xnn_qs8_igemm_minmax_ukernel_1x16c16__neon_mlal_padal()
|
D | 2x8c8-minmax-neon-mlal-padal.c | 210 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1); in xnn_qs8_igemm_minmax_ukernel_2x8c8__neon_mlal_padal() local 218 int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23); in xnn_qs8_igemm_minmax_ukernel_2x8c8__neon_mlal_padal() 227 const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1); in xnn_qs8_igemm_minmax_ukernel_2x8c8__neon_mlal_padal() local 229 int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 ); in xnn_qs8_igemm_minmax_ukernel_2x8c8__neon_mlal_padal()
|
D | 3x8c8-minmax-neon-mull-padal.c | 168 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1); in xnn_qs8_igemm_minmax_ukernel_3x8c8__neon_mull_padal() local 180 int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23); in xnn_qs8_igemm_minmax_ukernel_3x8c8__neon_mull_padal() 191 const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1); in xnn_qs8_igemm_minmax_ukernel_3x8c8__neon_mull_padal() local 193 int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 ); in xnn_qs8_igemm_minmax_ukernel_3x8c8__neon_mull_padal()
|
D | 1x16c8-minmax-neon-mlal-padal.c | 223 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1); in xnn_qs8_igemm_minmax_ukernel_1x16c8__neon_mlal_padal() local 231 int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23); in xnn_qs8_igemm_minmax_ukernel_1x16c8__neon_mlal_padal() 240 const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1); in xnn_qs8_igemm_minmax_ukernel_1x16c8__neon_mlal_padal() local 242 int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 ); in xnn_qs8_igemm_minmax_ukernel_1x16c8__neon_mlal_padal()
|
D | 3x8c16-minmax-neon-mlal-padal.c | 192 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1); in xnn_qs8_igemm_minmax_ukernel_3x8c16__neon_mlal_padal() local 204 int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23); in xnn_qs8_igemm_minmax_ukernel_3x8c16__neon_mlal_padal() 215 const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1); in xnn_qs8_igemm_minmax_ukernel_3x8c16__neon_mlal_padal() local 217 int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 ); in xnn_qs8_igemm_minmax_ukernel_3x8c16__neon_mlal_padal()
|
D | 4x8c8-minmax-neon-mull-padal.c | 201 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1); in xnn_qs8_igemm_minmax_ukernel_4x8c8__neon_mull_padal() local 217 int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23); in xnn_qs8_igemm_minmax_ukernel_4x8c8__neon_mull_padal() 230 const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1); in xnn_qs8_igemm_minmax_ukernel_4x8c8__neon_mull_padal() local 232 int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 ); in xnn_qs8_igemm_minmax_ukernel_4x8c8__neon_mull_padal()
|
D | 2x16c8-minmax-neon-mull-padal.c | 191 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1); in xnn_qs8_igemm_minmax_ukernel_2x16c8__neon_mull_padal() local 207 int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23); in xnn_qs8_igemm_minmax_ukernel_2x16c8__neon_mull_padal() 220 const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1); in xnn_qs8_igemm_minmax_ukernel_2x16c8__neon_mull_padal() local 222 int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 ); in xnn_qs8_igemm_minmax_ukernel_2x16c8__neon_mull_padal()
|