1// Copyright 2021 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 7$assert NR % 8 == 0 8$assert 8 <= NR <= 16 9#include <assert.h> 10 11#include <arm_neon.h> 12 13#include <xnnpack/gemm.h> 14#include <xnnpack/math.h> 15 16 17void xnn_qs8_igemm_minmax_ukernel_${MR}x${NR}c16__neon_mlal_padal( 18 size_t mr, 19 size_t nc, 20 size_t kc, 21 size_t ks, 22 const int8_t** restrict a, 23 const void* restrict w, 24 int8_t* restrict c, 25 size_t cm_stride, 26 size_t cn_stride, 27 size_t a_offset, 28 const int8_t* zero, 29 const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN 30{ 31 assert(mr != 0); 32 assert(mr <= ${MR}); 33 assert(nc != 0); 34 assert(kc != 0); 35 assert(ks != 0); 36 assert(ks % (${MR} * sizeof(void*)) == 0); 37 assert(a_offset % sizeof(int8_t) == 0); 38 assert(a != NULL); 39 assert(w != NULL); 40 assert(c != NULL); 41 42 kc = round_up_po2(kc, 16); 43 int8_t* c0 = c; 44 $for M in range(1, MR): 45 int8_t* c${M} = (int8_t*) ((uintptr_t) c${M-1} + cm_stride); 46 $if M % 2 == 0: 47 if XNN_UNPREDICTABLE(mr <= ${M}) { 48 c${M} = c${M-1}; 49 } 50 $elif M + 1 == MR: 51 if XNN_UNPREDICTABLE(mr != ${M+1}) { 52 c${M} = c${M-1}; 53 } 54 $else: 55 if XNN_UNPREDICTABLE(mr < ${M+1}) { 56 c${M} = c${M-1}; 57 } 58 59 do { 60 $for N in range(NR): 61 int32x4_t vacc0x${N} = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); 62 $for M in range(1, MR): 63 $for N in range(NR): 64 int32x4_t vacc${M}x${N} = vacc0x${N}; 65 66 size_t p = ks; 67 do { 68 $for M in range(MR): 69 const int8_t* restrict a${M} = a[${M}]; 70 if XNN_UNPREDICTABLE(a${M} != zero) { 71 a${M} = (const int8_t*) ((uintptr_t) a${M} + a_offset); 72 } 73 a += ${MR}; 74 75 // KC loop of 16 with up to 15 remainder 76 size_t k = 0; 77 while (k < kc) { 78 $for M in range(MR): 79 const int8x16_t va${M} = vld1q_s8(a${M}); a${M} += 16; 80 81 $for N in range(NR): 82 const int8x16_t vb${N} = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t)); 83 84 $for N in range(NR): 85 $for M in range(MR): 86 int16x8_t vprod${M}x${N} = vmull_s8(vget_low_s8(vb${N}), vget_low_s8(va${M})); 87 $for M in range(MR): 88 vprod${M}x${N} = vmlal_s8(vprod${M}x${N}, vget_high_s8(vb${N}), vget_high_s8(va${M})); 89 $for M in range(MR): 90 vacc${M}x${N} = vpadalq_s16(vacc${M}x${N}, vprod${M}x${N}); 91 92 k += 16 * sizeof(int8_t); 93 } 94 95 p -= ${MR} * sizeof(void*); 96 } while (p != 0); 97 98#if XNN_ARCH_ARM64 99 $for M in range(MR): 100 $for N in range(0, NR, 4): 101 const int32x4_t vsum${M}x${ABC[N:N+2]} = vpaddq_s32(vacc${M}x${N}, vacc${M}x${N+1}); 102 const int32x4_t vsum${M}x${ABC[N+2:N+4]} = vpaddq_s32(vacc${M}x${N+2}, vacc${M}x${N+3}); 103 $for M in range(MR): 104 $for N in range(0, NR, 4): 105 int32x4_t vacc${M}x${ABC[N:N+4]} = vpaddq_s32(vsum${M}x${ABC[N:N+2]}, vsum${M}x${ABC[N+2:N+4]}); 106#else 107 $for M in range(MR): 108 $for N in range(0, NR, 4): 109 const int32x2_t vpsum${M}x${ABC[N]} = vadd_s32(vget_low_s32(vacc${M}x${N}), vget_high_s32(vacc${M}x${N})); 110 const int32x2_t vpsum${M}x${ABC[N+1]} = vadd_s32(vget_low_s32(vacc${M}x${N+1}), vget_high_s32(vacc${M}x${N+1})); 111 const int32x2_t vpsum${M}x${ABC[N+2]} = vadd_s32(vget_low_s32(vacc${M}x${N+2}), vget_high_s32(vacc${M}x${N+2})); 112 const int32x2_t vpsum${M}x${ABC[N+3]} = vadd_s32(vget_low_s32(vacc${M}x${N+3}), vget_high_s32(vacc${M}x${N+3})); 113 const int32x2_t vsum${M}x${ABC[N:N+2]} = vpadd_s32(vpsum${M}x${ABC[N]}, vpsum${M}x${ABC[N+1]}); 114 const int32x2_t vsum${M}x${ABC[N+2:N+4]} = vpadd_s32(vpsum${M}x${ABC[N+2]}, vpsum${M}x${ABC[N+3]}); 115 int32x4_t vacc${M}x${ABC[N:N+4]} = vcombine_s32(vsum${M}x${ABC[N:N+2]}, vsum${M}x${ABC[N+2:N+4]} ); 116#endif 117 118 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->neon.multiplier); 119 $for M in range(MR): 120 $for N in range(0, NR, 4): 121 vacc${M}x${ABC[N:N+4]} = vqrdmulhq_s32(vacc${M}x${ABC[N:N+4]}, vmultiplier); 122 123 const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift); 124 const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0))); 125 $for M in range(MR): 126 $for N in range(0, NR, 4): 127 vacc${M}x${ABC[N:N+4]} = vsraq_n_s32(vacc${M}x${ABC[N:N+4]}, vbicq_s32(vacc${M}x${ABC[N:N+4]}, vzero_shift_mask), 31); 128 129 $for M in range(MR): 130 $for N in range(0, NR, 4): 131 vacc${M}x${ABC[N:N+4]} = vrshlq_s32(vacc${M}x${ABC[N:N+4]}, vright_shift); 132 133 const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point); 134#if XNN_ARCH_ARM64 135 $for M in range(MR): 136 $for N in range(0, NR, 8): 137 const int16x8_t vacc${M}x${ABC[N:N+8]} = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc${M}x${ABC[N:N+4]}), vacc${M}x${ABC[N+4:N+8]}), voutput_zero_point); 138 $for M in range(MR): 139 $for N in range(0, NR, 16): 140 $if N + 8 < NR: 141 int8x16_t vout${M}x${ABC[N:N+16]} = vqmovn_high_s16(vqmovn_s16(vacc${M}x${ABC[N:N+8]}), vacc${M}x${ABC[N+8:N+16]}); 142 $elif M % 2 == 1: 143 int8x16_t vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vqmovn_high_s16(vqmovn_s16(vacc${M-1}x${ABC[N:N+8]}), vacc${M}x${ABC[N:N+8]}); 144 $elif M + 1 == MR: 145 int8x8_t vout${M}x${ABC[N:N+8]} = vqmovn_s16(vacc${M}x${ABC[N:N+8]}); 146#else 147 $for M in range(MR): 148 $for N in range(0, NR, 8): 149 const int16x8_t vacc${M}x${ABC[N:N+8]} = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc${M}x${ABC[N:N+4]}), vqmovn_s32(vacc${M}x${ABC[N+4:N+8]})), voutput_zero_point); 150 151 $for M in range(MR): 152 $for N in range(0, NR, 16): 153 $if N + 8 < NR: 154 int8x16_t vout${M}x${ABC[N:N+16]} = vcombine_s8(vqmovn_s16(vacc${M}x${ABC[N:N+8]}), vqmovn_s16(vacc${M}x${ABC[N+8:N+16]})); 155 $elif M % 2 == 1: 156 int8x16_t vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vcombine_s8(vqmovn_s16(vacc${M-1}x${ABC[N:N+8]}), vqmovn_s16(vacc${M}x${ABC[N:N+8]})); 157 $elif M + 1 == MR: 158 int8x8_t vout${M}x${ABC[N:N+8]} = vqmovn_s16(vacc${M}x${ABC[N:N+8]}); 159#endif 160 $if NR == 8 and MR == 1: 161 const int8x8_t voutput_min = vld1_dup_s8(¶ms->neon.output_min); 162 const int8x8_t voutput_max = vld1_dup_s8(¶ms->neon.output_max); 163 $else: 164 const int8x16_t voutput_min = vld1q_dup_s8(¶ms->neon.output_min); 165 const int8x16_t voutput_max = vld1q_dup_s8(¶ms->neon.output_max); 166 167 $for M in reversed(range(MR)): 168 $for N in range(0, NR, 16): 169 $if N + 8 < NR: 170 vout${M}x${ABC[N:N+16]} = vmaxq_s8(vout${M}x${ABC[N:N+16]}, voutput_min); 171 $elif M % 2 == 1: 172 vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vmaxq_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}, voutput_min); 173 $elif M + 1 == MR: 174 $if NR == 8 and MR == 1: 175 vout${M}x${ABC[N:N+8]} = vmax_s8(vout${M}x${ABC[N:N+8]}, voutput_min); 176 $else: 177 vout${M}x${ABC[N:N+8]} = vmax_s8(vout${M}x${ABC[N:N+8]}, vget_low_s8(voutput_min)); 178 179 $for M in reversed(range(MR)): 180 $for N in range(0, NR, 16): 181 $if N + 8 < NR: 182 vout${M}x${ABC[N:N+16]} = vminq_s8(vout${M}x${ABC[N:N+16]}, voutput_max); 183 $elif M % 2 == 1: 184 vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vminq_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}, voutput_max); 185 $elif M + 1 == MR: 186 $if NR == 8 and MR == 1: 187 vout${M}x${ABC[N:N+8]} = vmin_s8(vout${M}x${ABC[N:N+8]}, voutput_max); 188 $else: 189 vout${M}x${ABC[N:N+8]} = vmin_s8(vout${M}x${ABC[N:N+8]}, vget_low_s8(voutput_max)); 190 191 if (nc >= ${NR}) { 192 $for M in reversed(range(MR)): 193 $for N in range(0, NR, 16): 194 $if N + 8 < NR: 195 vst1q_s8(c${M} + ${N}, vout${M}x${ABC[N:N+16]}); 196 $elif M % 2 == 1: 197 vst1_s8(c${M} + ${N}, vget_high_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]})); 198 vst1_s8(c${M-1} + ${N}, vget_low_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]})); 199 $elif M + 1 == MR: 200 vst1_s8(c${M} + ${N}, vout${M}x${ABC[N:N+8]}); 201 202 $for M in reversed(range(MR)): 203 c${M} = (int8_t*) ((uintptr_t) c${M} + cn_stride); 204 205 a = (const int8_t**restrict) ((uintptr_t) a - ks); 206 207 nc -= ${NR}; 208 } else { 209 $if NR == 16: 210 $for M in range(MR): 211 $if M % 2 == 1: 212 int8x16_t vout${M-1}x01234567_${M}x01234567 = vcombine_s8(vget_low_s8(vout${M-1}x0123456789ABCDEF), vget_low_s8(vout${M}x0123456789ABCDEF)); 213 $elif M + 1 == MR: 214 int8x8_t vout${M}x01234567 = vget_low_s8(vout${M}x0123456789ABCDEF); 215 if (nc & 8) { 216 $for M in reversed(range(MR)): 217 $if M % 2 == 1: 218 vst1_s8(c${M}, vget_high_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]})); c${M} += 8; 219 vst1_s8(c${M-1}, vget_low_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]})); c${M-1} += 8; 220 $elif M + 1 == MR: 221 vst1_s8(c${M}, vout${M}x${ABC[N:N+8]}); c${M} += 8; 222 $for M in reversed(range(MR)): 223 $if M % 2 == 1: 224 vout${M-1}x01234567_${M}x01234567 = vcombine_s8(vget_high_s8(vout${M-1}x0123456789ABCDEF), vget_high_s8(vout${M}x0123456789ABCDEF)); 225 $elif M + 1 == MR: 226 vout${M}x01234567 = vget_high_s8(vout${M}x0123456789ABCDEF); 227 } 228 if (nc & 4) { 229 $for M in reversed(range(MR)): 230 $if M % 2 == 1: 231 vst1q_lane_u32(__builtin_assume_aligned(c${M}, 1), vreinterpretq_u32_s8(vout${M-1}x01234567_${M}x01234567), 2); c${M} += 4; 232 vst1q_lane_u32(__builtin_assume_aligned(c${M-1}, 1), vreinterpretq_u32_s8(vout${M-1}x01234567_${M}x01234567), 0); c${M-1} += 4; 233 $elif M + 1 == MR: 234 vst1_lane_u32(__builtin_assume_aligned(c${M}, 1), vreinterpret_u32_s8(vout${M}x01234567), 0); c${M} += 4; 235 $for M in reversed(range(MR)): 236 $if M % 2 == 1: 237 vout${M-1}x01234567_${M}x01234567 = vextq_s8(vout${M-1}x01234567_${M}x01234567, vout${M-1}x01234567_${M}x01234567, 4); 238 $elif M + 1 == MR: 239 vout${M}x01234567 = vext_s8(vout${M}x01234567, vout${M}x01234567, 4); 240 } 241 if (nc & 2) { 242 $for M in reversed(range(MR)): 243 $if M % 2 == 1: 244 vst1q_lane_u16(__builtin_assume_aligned(c${M}, 1), vreinterpretq_u16_s8(vout${M-1}x01234567_${M}x01234567), 4); c${M} += 2; 245 vst1q_lane_u16(__builtin_assume_aligned(c${M-1}, 1), vreinterpretq_u16_s8(vout${M-1}x01234567_${M}x01234567), 0); c${M-1} += 2; 246 $elif M + 1 == MR: 247 vst1_lane_u16(__builtin_assume_aligned(c${M}, 1), vreinterpret_u16_s8(vout${M}x01234567), 0); c${M} += 2; 248 $for M in reversed(range(MR)): 249 $if M % 2 == 1: 250 vout${M-1}x01234567_${M}x01234567 = vextq_s8(vout${M-1}x01234567_${M}x01234567, vout${M-1}x01234567_${M}x01234567, 2); 251 $elif M + 1 == MR: 252 vout${M}x01234567 = vext_s8(vout${M}x01234567, vout${M}x01234567, 2); 253 } 254 if (nc & 1) { 255 $for M in reversed(range(MR)): 256 $if M % 2 == 1: 257 vst1q_lane_s8(c${M}, vout${M-1}x01234567_${M}x01234567, 8); 258 vst1q_lane_s8(c${M-1}, vout${M-1}x01234567_${M}x01234567, 0); 259 $elif M + 1 == MR: 260 vst1_lane_s8(c${M}, vout${M}x01234567, 0); 261 } 262 263 nc = 0; 264 } 265 } while (nc != 0); 266} 267