1// Copyright 2021 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 7$assert NR % 8 == 0 8$assert 8 <= NR <= 16 9$assert REQUANTIZATION in ["FP32", "RNDNU"] 10$assert not CHANNELWISE or REQUANTIZATION == "FP32" 11#include <assert.h> 12 13#include <arm_neon.h> 14 15#include <xnnpack/gemm.h> 16$if REQUANTIZATION == "FP32" and ARMV8: 17 #include <xnnpack/intrinsics-polyfill.h> 18#include <xnnpack/math.h> 19 20 21$DATATYPE = "qc8" if CHANNELWISE else "qs8" 22$PARAMS_STRUCT = REQUANTIZATION.lower() + "_" + ("neonv8" if REQUANTIZATION == "FP32" and ARMV8 else "neon") 23$PARAMS_UNION = "xnn_%s_conv_minmax_params" % DATATYPE.lower() 24$ISA = "neonv8" if ARMV8 else "neon" 25void xnn_${DATATYPE}_gemm_minmax_${REQUANTIZATION.lower()}_ukernel_${MR}x${NR}c8__${ISA}_${"mlal" if MLA else "mull"}( 26 size_t mr, 27 size_t nc, 28 size_t kc, 29 const int8_t* restrict a, 30 size_t a_stride, 31 const void* restrict w, 32 int8_t* restrict c, 33 size_t cm_stride, 34 size_t cn_stride, 35 const union ${PARAMS_UNION} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS 36{ 37 assert(mr != 0); 38 assert(mr <= ${MR}); 39 assert(nc != 0); 40 assert(kc != 0); 41 assert(kc % sizeof(int8_t) == 0); 42 assert(a != NULL); 43 assert(w != NULL); 44 assert(c != NULL); 45 46 kc = round_up_po2(kc, 8 * sizeof(int8_t)); 47 const int8_t* a0 = a; 48 int8_t* c0 = c; 49 $for M in range(1, MR): 50 const int8_t* a${M} = (const int8_t*) ((uintptr_t) a${M-1} + a_stride); 51 int8_t* c${M} = (int8_t*) ((uintptr_t) c${M-1} + cm_stride); 52 $if M % 2 == 0: 53 if XNN_UNPREDICTABLE(mr <= ${M}) { 54 a${M} = a${M-1}; 55 c${M} = c${M-1}; 56 } 57 $elif M + 1 == MR: 58 if XNN_UNPREDICTABLE(mr != ${M+1}) { 59 a${M} = a${M-1}; 60 c${M} = c${M-1}; 61 } 62 $else: 63 if XNN_UNPREDICTABLE(mr < ${M+1}) { 64 a${M} = a${M-1}; 65 c${M} = c${M-1}; 66 } 67 68 do { 69 $for N in range(NR): 70 int32x4_t vacc0x${N} = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); 71 $for M in range(1, MR): 72 $for N in range(NR): 73 int32x4_t vacc${M}x${N} = vacc0x${N}; 74 75 size_t k = kc; 76 $if MLA: 77 // 2x partial unrolled loop to load 16 bytes at a time using MLA. 78 while (k >= 16 * sizeof(int8_t)) { 79 $for M in range(MR): 80 const int8x8_t va${M}x0 = vld1_s8(a${M}); a${M} += 8; 81 const int8x8_t va${M}x1 = vld1_s8(a${M}); a${M} += 8; 82 83 $for N in range(NR): 84 const int8x8_t vb${N}x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); 85 86 $for N in range(NR): 87 const int8x8_t vb${N}x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); 88 $for M in range(MR): 89 int16x8_t vprod${M}x${N} = vmull_s8(vb${N}x0, va${M}x0); 90 $for M in range(MR): 91 vprod${M}x${N} = vmlal_s8(vprod${M}x${N}, vb${N}x1, va${M}x1); 92 $for M in range(MR): 93 vacc${M}x${N} = vpadalq_s16(vacc${M}x${N}, vprod${M}x${N}); 94 95 k -= 16 * sizeof(int8_t); 96 } 97 98 // Handle 8 bytes at a time using MUL. 99 ${"if" if MLA else "while"} (k != 0) { 100 $for M in range(MR): 101 const int8x8_t va${M} = vld1_s8(a${M}); a${M} += 8; 102 103 $for N in range(NR): 104 const int8x8_t vb${N} = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); 105 $for M in range(MR): 106 const int16x8_t vprod${M}x${N} = vmull_s8(vb${N}, va${M}); 107 $for M in range(MR): 108 vacc${M}x${N} = vpadalq_s16(vacc${M}x${N}, vprod${M}x${N}); 109 110 k -= 8 * sizeof(int8_t); 111 } 112 113#if XNN_ARCH_ARM64 114 $for M in range(MR): 115 $for N in range(0, NR, 4): 116 const int32x4_t vsum${M}x${ABC[N:N+2]} = vpaddq_s32(vacc${M}x${N}, vacc${M}x${N+1}); 117 const int32x4_t vsum${M}x${ABC[N+2:N+4]} = vpaddq_s32(vacc${M}x${N+2}, vacc${M}x${N+3}); 118 119 $for M in range(MR): 120 $for N in range(0, NR, 4): 121 int32x4_t vacc${M}x${ABC[N:N+4]} = vpaddq_s32(vsum${M}x${ABC[N:N+2]}, vsum${M}x${ABC[N+2:N+4]}); 122#else 123 $for M in range(MR): 124 $for N in range(0, NR, 4): 125 const int32x2_t vpsum${M}x${ABC[N]} = vadd_s32(vget_low_s32(vacc${M}x${N}), vget_high_s32(vacc${M}x${N})); 126 const int32x2_t vpsum${M}x${ABC[N+1]} = vadd_s32(vget_low_s32(vacc${M}x${N+1}), vget_high_s32(vacc${M}x${N+1})); 127 const int32x2_t vpsum${M}x${ABC[N+2]} = vadd_s32(vget_low_s32(vacc${M}x${N+2}), vget_high_s32(vacc${M}x${N+2})); 128 const int32x2_t vpsum${M}x${ABC[N+3]} = vadd_s32(vget_low_s32(vacc${M}x${N+3}), vget_high_s32(vacc${M}x${N+3})); 129 const int32x2_t vsum${M}x${ABC[N:N+2]} = vpadd_s32(vpsum${M}x${ABC[N]}, vpsum${M}x${ABC[N+1]}); 130 const int32x2_t vsum${M}x${ABC[N+2:N+4]} = vpadd_s32(vpsum${M}x${ABC[N+2]}, vpsum${M}x${ABC[N+3]}); 131 int32x4_t vacc${M}x${ABC[N:N+4]} = vcombine_s32(vsum${M}x${ABC[N:N+2]}, vsum${M}x${ABC[N+2:N+4]} ); 132#endif 133 134 $if REQUANTIZATION == "RNDNU": 135 const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->${PARAMS_STRUCT}.right_pre_shift); 136 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->${PARAMS_STRUCT}.multiplier); 137 const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->${PARAMS_STRUCT}.right_post_shift); 138 139 $for M in range(MR): 140 $for N in range(0, NR, 4): 141 vacc${M}x${ABC[N:N+4]} = vqshlq_s32(vacc${M}x${ABC[N:N+4]}, vright_pre_shift); 142 143 $for M in range(MR): 144 $for N in range(0, NR, 4): 145 vacc${M}x${ABC[N:N+4]} = vqdmulhq_s32(vacc${M}x${ABC[N:N+4]}, vmultiplier); 146 147 $for M in range(MR): 148 $for N in range(0, NR, 4): 149 vacc${M}x${ABC[N:N+4]} = vrshlq_s32(vacc${M}x${ABC[N:N+4]}, vright_post_shift); 150 $elif REQUANTIZATION == "FP32": 151 $for M in range(MR): 152 $for N in range(0, NR, 4): 153 float32x4_t vfpacc${M}x${ABC[N:N+4]} = vcvtq_f32_s32(vacc${M}x${ABC[N:N+4]}); 154 155 $if CHANNELWISE: 156 $for N in range(0, NR, 4): 157 const float32x4_t vscale${ABC[N:N+4]} = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); 158 $for M in range(MR): 159 vfpacc${M}x${ABC[N:N+4]} = vmulq_f32(vfpacc${M}x${ABC[N:N+4]}, vscale${ABC[N:N+4]}); 160 $else: 161 const float32x4_t vscale = vld1q_dup_f32(¶ms->${PARAMS_STRUCT}.scale); 162 $for M in range(MR): 163 $for N in range(0, NR, 4): 164 vfpacc${M}x${ABC[N:N+4]} = vmulq_f32(vfpacc${M}x${ABC[N:N+4]}, vscale); 165 166 $if ARMV8: 167 $for M in range(MR): 168 $for N in range(0, NR, 4): 169 vacc${M}x${ABC[N:N+4]} = vcvtnq_s32_f32(vfpacc${M}x${ABC[N:N+4]}); 170 $else: 171 const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->${PARAMS_STRUCT}.magic_bias); 172 $for M in range(MR): 173 $for N in range(0, NR, 4): 174 vacc${M}x${ABC[N:N+4]} = vreinterpretq_s32_f32(vaddq_f32(vfpacc${M}x${ABC[N:N+4]}, vmagic_bias)); 175 176 const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->${PARAMS_STRUCT}.magic_bias_less_output_zero_point); 177 $for M in range(MR): 178 $for N in range(0, NR, 4): 179 vacc${M}x${ABC[N:N+4]} = vqsubq_s32(vacc${M}x${ABC[N:N+4]}, vmagic_bias_less_output_zero_point); 180 181 $if REQUANTIZATION != "FP32" or ARMV8: 182 const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->${PARAMS_STRUCT}.output_zero_point); 183#if XNN_ARCH_ARM64 184 $for M in range(MR): 185 $for N in range(0, NR, 8): 186 int16x8_t vacc${M}x${ABC[N:N+8]} = vqmovn_high_s32(vqmovn_s32(vacc${M}x${ABC[N:N+4]}), vacc${M}x${ABC[N+4:N+8]}); 187 188 $if REQUANTIZATION != "FP32" or ARMV8: 189 $for M in range(MR): 190 $for N in range(0, NR, 8): 191 vacc${M}x${ABC[N:N+8]} = vqaddq_s16(vacc${M}x${ABC[N:N+8]}, voutput_zero_point); 192 193 $for M in range(MR): 194 $for N in range(0, NR, 16): 195 $if N + 8 < NR: 196 int8x16_t vout${M}x${ABC[N:N+16]} = vqmovn_high_s16(vqmovn_s16(vacc${M}x${ABC[N:N+8]}), vacc${M}x${ABC[N+8:N+16]}); 197 $elif M % 2 == 1: 198 int8x16_t vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vqmovn_high_s16(vqmovn_s16(vacc${M-1}x${ABC[N:N+8]}), vacc${M}x${ABC[N:N+8]}); 199 $elif M + 1 == MR: 200 int8x8_t vout${M}x${ABC[N:N+8]} = vqmovn_s16(vacc${M}x${ABC[N:N+8]}); 201#else 202 $for M in range(MR): 203 $for N in range(0, NR, 8): 204 int16x8_t vacc${M}x${ABC[N:N+8]} = vcombine_s16(vqmovn_s32(vacc${M}x${ABC[N:N+4]}), vqmovn_s32(vacc${M}x${ABC[N+4:N+8]})); 205 206 $if REQUANTIZATION != "FP32" or ARMV8: 207 $for M in range(MR): 208 $for N in range(0, NR, 8): 209 vacc${M}x${ABC[N:N+8]} = vqaddq_s16(vacc${M}x${ABC[N:N+8]}, voutput_zero_point); 210 211 $for M in range(MR): 212 $for N in range(0, NR, 16): 213 $if N + 8 < NR: 214 int8x16_t vout${M}x${ABC[N:N+16]} = vcombine_s8(vqmovn_s16(vacc${M}x${ABC[N:N+8]}), vqmovn_s16(vacc${M}x${ABC[N+8:N+16]})); 215 $elif M % 2 == 1: 216 int8x16_t vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vcombine_s8(vqmovn_s16(vacc${M-1}x${ABC[N:N+8]}), vqmovn_s16(vacc${M}x${ABC[N:N+8]})); 217 $elif M + 1 == MR: 218 int8x8_t vout${M}x${ABC[N:N+8]} = vqmovn_s16(vacc${M}x${ABC[N:N+8]}); 219#endif 220 221 $if NR == 8 and MR == 1: 222 const int8x8_t voutput_min = vld1_dup_s8(¶ms->${PARAMS_STRUCT}.output_min); 223 $else: 224 const int8x16_t voutput_min = vld1q_dup_s8(¶ms->${PARAMS_STRUCT}.output_min); 225 $for M in range(MR): 226 $for N in range(0, NR, 16): 227 $if N + 8 < NR: 228 vout${M}x${ABC[N:N+16]} = vmaxq_s8(vout${M}x${ABC[N:N+16]}, voutput_min); 229 $elif M % 2 == 1: 230 vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vmaxq_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}, voutput_min); 231 $elif M + 1 == MR: 232 $if NR == 8 and MR == 1: 233 vout${M}x${ABC[N:N+8]} = vmax_s8(vout${M}x${ABC[N:N+8]}, voutput_min); 234 $else: 235 vout${M}x${ABC[N:N+8]} = vmax_s8(vout${M}x${ABC[N:N+8]}, vget_low_s8(voutput_min)); 236 237 $if NR == 8 and MR == 1: 238 const int8x8_t voutput_max = vld1_dup_s8(¶ms->${PARAMS_STRUCT}.output_max); 239 $else: 240 const int8x16_t voutput_max = vld1q_dup_s8(¶ms->${PARAMS_STRUCT}.output_max); 241 $for M in range(MR): 242 $for N in range(0, NR, 16): 243 $if N + 8 < NR: 244 vout${M}x${ABC[N:N+16]} = vminq_s8(vout${M}x${ABC[N:N+16]}, voutput_max); 245 $elif M % 2 == 1: 246 vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vminq_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}, voutput_max); 247 $elif M + 1 == MR: 248 $if NR == 8 and MR == 1: 249 vout${M}x${ABC[N:N+8]} = vmin_s8(vout${M}x${ABC[N:N+8]}, voutput_max); 250 $else: 251 vout${M}x${ABC[N:N+8]} = vmin_s8(vout${M}x${ABC[N:N+8]}, vget_low_s8(voutput_max)); 252 253 if (nc >= ${NR}) { 254 $for M in range(MR): 255 $for N in range(0, NR, 16): 256 $if N + 8 < NR: 257 vst1q_s8(c${M} + ${N}, vout${M}x${ABC[N:N+16]}); 258 $elif M % 2 == 1: 259 vst1_s8(c${M-1} + ${N}, vget_low_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]})); 260 vst1_s8(c${M} + ${N}, vget_high_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]})); 261 $elif M + 1 == MR: 262 vst1_s8(c${M} + ${N}, vout${M}x${ABC[N:N+8]}); 263 264 $for M in range(MR): 265 c${M} = (int8_t*) ((uintptr_t) c${M} + cn_stride); 266 267 $for M in range(MR): 268 a${M} = (const int8_t*) ((uintptr_t) a${M} - kc); 269 270 nc -= ${NR}; 271 } else { 272 // Final case where not all of the ${NR} columns fit in the destination. 273 $if NR == 16: 274 $for M in range(MR): 275 $if M % 2 == 1: 276 int8x16_t vout${M-1}x01234567_${M}x01234567 = vcombine_s8(vget_low_s8(vout${M-1}x0123456789ABCDEF), vget_low_s8(vout${M}x0123456789ABCDEF)); 277 $elif M + 1 == MR: 278 int8x8_t vout${M}x01234567 = vget_low_s8(vout${M}x0123456789ABCDEF); 279 if (nc & 8) { 280 $for M in range(MR): 281 $if M % 2 == 1: 282 vst1_s8(c${M-1}, vget_low_s8(vout${M-1}x01234567_${M}x01234567)); c${M-1} += 8; 283 vst1_s8(c${M}, vget_high_s8(vout${M-1}x01234567_${M}x01234567)); c${M} += 8; 284 $elif M + 1 == MR: 285 vst1_s8(c${M}, vout${M}x01234567); c${M} += 8; 286 $for M in range(MR): 287 $if M % 2 == 1: 288 vout${M-1}x01234567_${M}x01234567 = vcombine_s8(vget_high_s8(vout${M-1}x0123456789ABCDEF), vget_high_s8(vout${M}x0123456789ABCDEF)); 289 $elif M + 1 == MR: 290 vout${M}x01234567 = vget_high_s8(vout${M}x0123456789ABCDEF); 291 } 292 if (nc & 4) { 293 $for M in range(MR): 294 $if M % 2 == 1: 295 vst1q_lane_u32((void*) c${M-1}, vreinterpretq_u32_s8(vout${M-1}x01234567_${M}x01234567), 0); c${M-1} += 4; 296 vst1q_lane_u32((void*) c${M}, vreinterpretq_u32_s8(vout${M-1}x01234567_${M}x01234567), 2); c${M} += 4; 297 $elif M + 1 == MR: 298 vst1_lane_u32((void*) c${M}, vreinterpret_u32_s8(vout${M}x01234567), 0); c${M} += 4; 299 $for M in range(MR): 300 $if M % 2 == 1: 301 vout${M-1}x01234567_${M}x01234567 = vextq_s8(vout${M-1}x01234567_${M}x01234567, vout${M-1}x01234567_${M}x01234567, 4); 302 $elif M + 1 == MR: 303 vout${M}x01234567 = vext_s8(vout${M}x01234567, vout${M}x01234567, 4); 304 } 305 if (nc & 2) { 306 $for M in range(MR): 307 $if M % 2 == 1: 308 vst1q_lane_u16((void*) c${M-1}, vreinterpretq_u16_s8(vout${M-1}x01234567_${M}x01234567), 0); c${M-1} += 2; 309 vst1q_lane_u16((void*) c${M}, vreinterpretq_u16_s8(vout${M-1}x01234567_${M}x01234567), 4); c${M} += 2; 310 $elif M + 1 == MR: 311 vst1_lane_u16((void*) c${M}, vreinterpret_u16_s8(vout${M}x01234567), 0); c${M} += 2; 312 $for M in range(MR): 313 $if M % 2 == 1: 314 vout${M-1}x01234567_${M}x01234567 = vextq_s8(vout${M-1}x01234567_${M}x01234567, vout${M-1}x01234567_${M}x01234567, 2); 315 $elif M + 1 == MR: 316 vout${M}x01234567 = vext_s8(vout${M}x01234567, vout${M}x01234567, 2); 317 } 318 if (nc & 1) { 319 $for M in range(MR): 320 $if M % 2 == 1: 321 vst1q_lane_s8(c${M-1}, vout${M-1}x01234567_${M}x01234567, 0); 322 vst1q_lane_s8(c${M}, vout${M-1}x01234567_${M}x01234567, 8); 323 $elif M + 1 == MR: 324 vst1_lane_s8(c${M}, vout${M}x01234567, 0); 325 } 326 327 nc = 0; 328 } 329 } while (nc != 0); 330} 331