1// Copyright 2021 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 7$assert NR % 8 == 0 8$assert 8 <= NR <= 16 9#include <assert.h> 10 11#include <arm_neon.h> 12 13#include <xnnpack/igemm.h> 14#include <xnnpack/math.h> 15 16 17void xnn_qs8_igemm_minmax_ukernel_${MR}x${NR}c8__neon_${"mlal" if MLA else "mull"}_padal( 18 size_t mr, 19 size_t nc, 20 size_t kc, 21 size_t ks, 22 const int8_t** restrict a, 23 const void* restrict w, 24 int8_t* restrict c, 25 size_t cm_stride, 26 size_t cn_stride, 27 size_t a_offset, 28 const int8_t* zero, 29 const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN 30{ 31 assert(mr != 0); 32 assert(mr <= ${MR}); 33 assert(nc != 0); 34 assert(kc != 0); 35 assert(ks != 0); 36 assert(ks % (${MR} * sizeof(void*)) == 0); 37 assert(a_offset % sizeof(int8_t) == 0); 38 assert(a != NULL); 39 assert(w != NULL); 40 assert(c != NULL); 41 42 kc = round_up_po2(kc, 8); 43 int8_t* c0 = c; 44 $for M in range(1, MR): 45 int8_t* c${M} = (int8_t*) ((uintptr_t) c${M-1} + cm_stride); 46 $if M % 2 == 0: 47 if XNN_UNPREDICTABLE(mr <= ${M}) { 48 c${M} = c${M-1}; 49 } 50 $elif M + 1 == MR: 51 if XNN_UNPREDICTABLE(mr != ${M+1}) { 52 c${M} = c${M-1}; 53 } 54 $else: 55 if XNN_UNPREDICTABLE(mr < ${M+1}) { 56 c${M} = c${M-1}; 57 } 58 59 do { 60 $for N in range(NR): 61 int32x4_t vacc0x${N} = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); 62 $for M in range(1, MR): 63 $for N in range(NR): 64 int32x4_t vacc${M}x${N} = vacc0x${N}; 65 66 size_t p = ks; 67 do { 68 $for M in range(MR): 69 const int8_t* restrict a${M} = a[${M}]; 70 if XNN_UNPREDICTABLE(a${M} != zero) { 71 a${M} = (const int8_t*) ((uintptr_t) a${M} + a_offset); 72 } 73 a += ${MR}; 74 75 size_t k = kc; 76 $if MLA: 77 // 2x partial unrolled loop to load 16 bytes at a time using MLA. 78 while (k >= 16 * sizeof(int8_t)) { 79 $for M in range(MR): 80 const int8x8_t va${M}x0 = vld1_s8(a${M}); a${M} += 8; 81 const int8x8_t va${M}x1 = vld1_s8(a${M}); a${M} += 8; 82 83 $for N in range(NR): 84 const int8x8_t vb${N}x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); 85 86 $for N in range(NR): 87 const int8x8_t vb${N}x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); 88 $for M in range(MR): 89 int16x8_t vprod${M}x${N} = vmull_s8(vb${N}x0, va${M}x0); 90 $for M in range(MR): 91 vprod${M}x${N} = vmlal_s8(vprod${M}x${N}, vb${N}x1, va${M}x1); 92 $for M in range(MR): 93 vacc${M}x${N} = vpadalq_s16(vacc${M}x${N}, vprod${M}x${N}); 94 95 k -= 16 * sizeof(int8_t); 96 } 97 98 // Handle 8 bytes at a time using MUL. 99 ${"if" if MLA else "while"} (k > 0) { 100 $for M in range(MR): 101 const int8x8_t va${M} = vld1_s8(a${M}); a${M} += 8; 102 103 $for N in range(NR): 104 const int8x8_t vb${N} = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); 105 $for M in range(MR): 106 const int16x8_t vprod${M}x${N} = vmull_s8(vb${N}, va${M}); 107 $for M in range(MR): 108 vacc${M}x${N} = vpadalq_s16(vacc${M}x${N}, vprod${M}x${N}); 109 110 k -= 8 * sizeof(int8_t); 111 } 112 113 p -= ${MR} * sizeof(void*); 114 } while (p != 0); 115 116#if XNN_ARCH_ARM64 117 $for M in range(MR): 118 $for N in range(0, NR, 4): 119 const int32x4_t vsum${M}x${ABC[N:N+2]} = vpaddq_s32(vacc${M}x${N}, vacc${M}x${N+1}); 120 const int32x4_t vsum${M}x${ABC[N+2:N+4]} = vpaddq_s32(vacc${M}x${N+2}, vacc${M}x${N+3}); 121 $for M in range(MR): 122 $for N in range(0, NR, 4): 123 int32x4_t vacc${M}x${ABC[N:N+4]} = vpaddq_s32(vsum${M}x${ABC[N:N+2]}, vsum${M}x${ABC[N+2:N+4]}); 124#else 125 $for M in range(MR): 126 $for N in range(0, NR, 4): 127 const int32x2_t vpsum${M}x${ABC[N]} = vadd_s32(vget_low_s32(vacc${M}x${N}), vget_high_s32(vacc${M}x${N})); 128 const int32x2_t vpsum${M}x${ABC[N+1]} = vadd_s32(vget_low_s32(vacc${M}x${N+1}), vget_high_s32(vacc${M}x${N+1})); 129 const int32x2_t vpsum${M}x${ABC[N+2]} = vadd_s32(vget_low_s32(vacc${M}x${N+2}), vget_high_s32(vacc${M}x${N+2})); 130 const int32x2_t vpsum${M}x${ABC[N+3]} = vadd_s32(vget_low_s32(vacc${M}x${N+3}), vget_high_s32(vacc${M}x${N+3})); 131 const int32x2_t vsum${M}x${ABC[N:N+2]} = vpadd_s32(vpsum${M}x${ABC[N]}, vpsum${M}x${ABC[N+1]}); 132 const int32x2_t vsum${M}x${ABC[N+2:N+4]} = vpadd_s32(vpsum${M}x${ABC[N+2]}, vpsum${M}x${ABC[N+3]}); 133 int32x4_t vacc${M}x${ABC[N:N+4]} = vcombine_s32(vsum${M}x${ABC[N:N+2]}, vsum${M}x${ABC[N+2:N+4]} ); 134#endif 135 136 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->neon.multiplier); 137 $for M in range(MR): 138 $for N in range(0, NR, 4): 139 vacc${M}x${ABC[N:N+4]} = vqrdmulhq_s32(vacc${M}x${ABC[N:N+4]}, vmultiplier); 140 141 const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift); 142 const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); 143 $for M in range(MR): 144 $for N in range(0, NR, 4): 145 vacc${M}x${ABC[N:N+4]} = vsraq_n_s32(vacc${M}x${ABC[N:N+4]}, vbicq_s32(vacc${M}x${ABC[N:N+4]}, vzero_shift_mask), 31); 146 147 $for M in range(MR): 148 $for N in range(0, NR, 4): 149 vacc${M}x${ABC[N:N+4]} = vrshlq_s32(vacc${M}x${ABC[N:N+4]}, vright_shift); 150 151 const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point); 152#if XNN_ARCH_ARM64 153 $for M in range(MR): 154 $for N in range(0, NR, 8): 155 const int16x8_t vacc${M}x${ABC[N:N+8]} = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc${M}x${ABC[N:N+4]}), vacc${M}x${ABC[N+4:N+8]}), voutput_zero_point); 156 $for M in range(MR): 157 $for N in range(0, NR, 16): 158 $if N + 8 < NR: 159 int8x16_t vout${M}x${ABC[N:N+16]} = vqmovn_high_s16(vqmovn_s16(vacc${M}x${ABC[N:N+8]}), vacc${M}x${ABC[N+8:N+16]}); 160 $elif M % 2 == 1: 161 int8x16_t vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vqmovn_high_s16(vqmovn_s16(vacc${M-1}x${ABC[N:N+8]}), vacc${M}x${ABC[N:N+8]}); 162 $elif M + 1 == MR: 163 int8x8_t vout${M}x${ABC[N:N+8]} = vqmovn_s16(vacc${M}x${ABC[N:N+8]}); 164#else 165 $for M in range(MR): 166 $for N in range(0, NR, 8): 167 const int16x8_t vacc${M}x${ABC[N:N+8]} = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc${M}x${ABC[N:N+4]}), vqmovn_s32(vacc${M}x${ABC[N+4:N+8]})), voutput_zero_point); 168 169 $for M in range(MR): 170 $for N in range(0, NR, 16): 171 $if N + 8 < NR: 172 int8x16_t vout${M}x${ABC[N:N+16]} = vcombine_s8(vqmovn_s16(vacc${M}x${ABC[N:N+8]}), vqmovn_s16(vacc${M}x${ABC[N+8:N+16]})); 173 $elif M % 2 == 1: 174 int8x16_t vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vcombine_s8(vqmovn_s16(vacc${M-1}x${ABC[N:N+8]}), vqmovn_s16(vacc${M}x${ABC[N:N+8]})); 175 $elif M + 1 == MR: 176 int8x8_t vout${M}x${ABC[N:N+8]} = vqmovn_s16(vacc${M}x${ABC[N:N+8]}); 177#endif 178 $if NR == 8 and MR == 1: 179 const int8x8_t voutput_min = vld1_dup_s8(¶ms->neon.output_min); 180 const int8x8_t voutput_max = vld1_dup_s8(¶ms->neon.output_max); 181 $else: 182 const int8x16_t voutput_min = vld1q_dup_s8(¶ms->neon.output_min); 183 const int8x16_t voutput_max = vld1q_dup_s8(¶ms->neon.output_max); 184 185 $for M in reversed(range(MR)): 186 $for N in range(0, NR, 16): 187 $if N + 8 < NR: 188 vout${M}x${ABC[N:N+16]} = vmaxq_s8(vout${M}x${ABC[N:N+16]}, voutput_min); 189 $elif M % 2 == 1: 190 vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vmaxq_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}, voutput_min); 191 $elif M + 1 == MR: 192 $if NR == 8 and MR == 1: 193 vout${M}x${ABC[N:N+8]} = vmax_s8(vout${M}x${ABC[N:N+8]}, voutput_min); 194 $else: 195 vout${M}x${ABC[N:N+8]} = vmax_s8(vout${M}x${ABC[N:N+8]}, vget_low_s8(voutput_min)); 196 197 $for M in reversed(range(MR)): 198 $for N in range(0, NR, 16): 199 $if N + 8 < NR: 200 vout${M}x${ABC[N:N+16]} = vminq_s8(vout${M}x${ABC[N:N+16]}, voutput_max); 201 $elif M % 2 == 1: 202 vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vminq_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}, voutput_max); 203 $elif M + 1 == MR: 204 $if NR == 8 and MR == 1: 205 vout${M}x${ABC[N:N+8]} = vmin_s8(vout${M}x${ABC[N:N+8]}, voutput_max); 206 $else: 207 vout${M}x${ABC[N:N+8]} = vmin_s8(vout${M}x${ABC[N:N+8]}, vget_low_s8(voutput_max)); 208 209 if (nc >= ${NR}) { 210 $for M in reversed(range(MR)): 211 $for N in range(0, NR, 16): 212 $if N + 8 < NR: 213 vst1q_s8(c${M} + ${N}, vout${M}x${ABC[N:N+16]}); 214 $elif M % 2 == 1: 215 vst1_s8(c${M} + ${N}, vget_high_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]})); 216 vst1_s8(c${M-1} + ${N}, vget_low_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]})); 217 $elif M + 1 == MR: 218 vst1_s8(c${M} + ${N}, vout${M}x${ABC[N:N+8]}); 219 220 $for M in reversed(range(MR)): 221 c${M} = (int8_t*) ((uintptr_t) c${M} + cn_stride); 222 223 a = (const int8_t**restrict) ((uintptr_t) a - ks); 224 225 nc -= ${NR}; 226 } else { 227 $if NR == 16: 228 $for M in range(MR): 229 $if M % 2 == 1: 230 int8x16_t vout${M-1}x01234567_${M}x01234567 = vcombine_s8(vget_low_s8(vout${M-1}x0123456789ABCDEF), vget_low_s8(vout${M}x0123456789ABCDEF)); 231 $elif M + 1 == MR: 232 int8x8_t vout${M}x01234567 = vget_low_s8(vout${M}x0123456789ABCDEF); 233 if (nc & 8) { 234 $for M in reversed(range(MR)): 235 $if M % 2 == 1: 236 vst1_s8(c${M}, vget_high_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]})); c${M} += 8; 237 vst1_s8(c${M-1}, vget_low_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]})); c${M-1} += 8; 238 $elif M + 1 == MR: 239 vst1_s8(c${M}, vout${M}x${ABC[N:N+8]}); c${M} += 8; 240 $for M in reversed(range(MR)): 241 $if M % 2 == 1: 242 vout${M-1}x01234567_${M}x01234567 = vcombine_s8(vget_high_s8(vout${M-1}x0123456789ABCDEF), vget_high_s8(vout${M}x0123456789ABCDEF)); 243 $elif M + 1 == MR: 244 vout${M}x01234567 = vget_high_s8(vout${M}x0123456789ABCDEF); 245 } 246 if (nc & 4) { 247 $for M in reversed(range(MR)): 248 $if M % 2 == 1: 249 vst1q_lane_u32(__builtin_assume_aligned(c${M}, 1), vreinterpretq_u32_s8(vout${M-1}x01234567_${M}x01234567), 2); c${M} += 4; 250 vst1q_lane_u32(__builtin_assume_aligned(c${M-1}, 1), vreinterpretq_u32_s8(vout${M-1}x01234567_${M}x01234567), 0); c${M-1} += 4; 251 $elif M + 1 == MR: 252 vst1_lane_u32(__builtin_assume_aligned(c${M}, 1), vreinterpret_u32_s8(vout${M}x01234567), 0); c${M} += 4; 253 $for M in reversed(range(MR)): 254 $if M % 2 == 1: 255 vout${M-1}x01234567_${M}x01234567 = vextq_s8(vout${M-1}x01234567_${M}x01234567, vout${M-1}x01234567_${M}x01234567, 4); 256 $elif M + 1 == MR: 257 vout${M}x01234567 = vext_s8(vout${M}x01234567, vout${M}x01234567, 4); 258 } 259 if (nc & 2) { 260 $for M in reversed(range(MR)): 261 $if M % 2 == 1: 262 vst1q_lane_u16(__builtin_assume_aligned(c${M}, 1), vreinterpretq_u16_s8(vout${M-1}x01234567_${M}x01234567), 4); c${M} += 2; 263 vst1q_lane_u16(__builtin_assume_aligned(c${M-1}, 1), vreinterpretq_u16_s8(vout${M-1}x01234567_${M}x01234567), 0); c${M-1} += 2; 264 $elif M + 1 == MR: 265 vst1_lane_u16(__builtin_assume_aligned(c${M}, 1), vreinterpret_u16_s8(vout${M}x01234567), 0); c${M} += 2; 266 $for M in reversed(range(MR)): 267 $if M % 2 == 1: 268 vout${M-1}x01234567_${M}x01234567 = vextq_s8(vout${M-1}x01234567_${M}x01234567, vout${M-1}x01234567_${M}x01234567, 2); 269 $elif M + 1 == MR: 270 vout${M}x01234567 = vext_s8(vout${M}x01234567, vout${M}x01234567, 2); 271 } 272 if (nc & 1) { 273 $for M in reversed(range(MR)): 274 $if M % 2 == 1: 275 vst1q_lane_s8(c${M}, vout${M-1}x01234567_${M}x01234567, 8); 276 vst1q_lane_s8(c${M-1}, vout${M-1}x01234567_${M}x01234567, 0); 277 $elif M + 1 == MR: 278 vst1_lane_s8(c${M}, vout${M}x01234567, 0); 279 } 280 281 nc = 0; 282 } 283 } while (nc != 0); 284} 285