1// Copyright 2020 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert SSE in [2, 4] 7$assert not XOP or AVX 8$assert not AVX or SSE == 4 9$assert REQUANTIZATION == "FP32" 10$assert DATATYPE in ["QC8", "QS8", "QU8"] 11$assert VARIANT in ["LD64", "LD128"] 12$assert MR <= 4 13#include <assert.h> 14 15$if XOP: 16 #if defined(__GNUC__) || defined(__clang__) 17 #include <x86intrin.h> 18 #else 19 #include <immintrin.h> 20 #include <ammintrin.h> 21 #endif 22$else: 23 $SSE_HEADER = {2: "emmintrin.h", 4: "smmintrin.h"}[SSE] 24 #include <${SSE_HEADER}> 25 26#include <xnnpack/igemm.h> 27#include <xnnpack/math.h> 28 29 30$PARAMS_UNION = "xnn_qs8_minmax_params" if DATATYPE == "QC8" else "xnn_%s_conv_minmax_params" % DATATYPE.lower() 31$PARAMS_STRUCT = ("" if DATATYPE == "QC8" else "fp32_") + ("sse4" if SSE >= 4 and DATATYPE != "QU8" else "sse2") 32$XINT8_T = "uint8_t" if DATATYPE == "QU8" else "int8_t" 33$ISA = "xop" if XOP else "avx" if AVX else {2: "sse2", 3: "ssse3", 4: "sse41"}[SSE] 34void xnn_${DATATYPE.lower()}_igemm_minmax_fp32_ukernel_${MR}x4c2__${ISA}_${VARIANT.lower()}( 35 size_t mr, 36 size_t nc, 37 size_t kc, 38 size_t ks, 39 const ${XINT8_T}** restrict a, 40 const void* restrict w, 41 ${XINT8_T}* restrict c, 42 size_t cm_stride, 43 size_t cn_stride, 44 size_t a_offset, 45 const ${XINT8_T}* zero, 46 const union ${PARAMS_UNION} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS 47{ 48 assert(mr != 0); 49 assert(mr <= ${MR}); 50 assert(nc != 0); 51 assert(kc != 0); 52 assert(ks != 0); 53 assert(ks % (${MR} * sizeof(void*)) == 0); 54 assert(a_offset % sizeof(${XINT8_T}) == 0); 55 assert(a != NULL); 56 assert(w != NULL); 57 assert(c != NULL); 58 59 kc = round_up_po2(kc, 2); 60 ${XINT8_T}* c0 = c; 61 $for M in range(1, MR): 62 ${XINT8_T}* c${M} = (${XINT8_T}*) ((uintptr_t) c${M-1} + cm_stride); 63 $if M % 2 == 0: 64 if XNN_UNPREDICTABLE(mr <= ${M}) { 65 c${M} = c${M-1}; 66 } 67 $elif M + 1 == MR: 68 if XNN_UNPREDICTABLE(mr != ${M+1}) { 69 c${M} = c${M-1}; 70 } 71 $else: 72 if XNN_UNPREDICTABLE(mr < ${M+1}) { 73 c${M} = c${M-1}; 74 } 75 76 do { 77 __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); 78 $for M in range(1, MR): 79 __m128i vacc${M}x0123 = vacc0x0123; 80 w = (const void*) ((const int32_t*) w + 4); 81 82 size_t p = ks; 83 do { 84 $for M in range(MR): 85 const ${XINT8_T}* restrict a${M} = a[${M}]; 86 if XNN_UNPREDICTABLE(a${M} != zero) { 87 a${M} = (const ${XINT8_T}*) ((uintptr_t) a${M} + a_offset); 88 } 89 a += ${MR}; 90 91 size_t k = kc; 92 $if DATATYPE == "QU8": 93 const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.kernel_zero_point); 94 $if SSE < 4 or VARIANT == "LD128": 95 const __m128i vzero = _mm_setzero_si128(); 96 while (k >= 8 * sizeof(${XINT8_T})) { 97 $for M in range(MR): 98 const __m128i va${M} = _mm_loadl_epi64((const __m128i*) a${M}); 99 $if DATATYPE == "QU8": 100 $if SSE == 4: 101 const __m128i vxa${M} = _mm_cvtepu8_epi16(va${M}); 102 $else: 103 const __m128i vxa${M} = _mm_unpacklo_epi8(va${M}, vzero); 104 $else: 105 $if SSE == 4: 106 const __m128i vxa${M} = _mm_cvtepi8_epi16(va${M}); 107 $else: 108 const __m128i vxa${M} = _mm_srai_epi16(_mm_unpacklo_epi8(va${M}, va${M}), 8); 109 a${M} += 8; 110 111 $if VARIANT == "LD128": 112 $for K in range(0, 4, 2): 113 $if K == 0: 114 const __m128i vb${K}${K+1} = _mm_loadu_si128((const __m128i*) w); 115 $else: 116 const __m128i vb${K}${K+1} = _mm_loadu_si128((const __m128i*) ((const ${XINT8_T}*) w + ${K * 8})); 117 $if DATATYPE == "QU8": 118 const __m128i vxb${K} = _mm_sub_epi16(_mm_unpacklo_epi8(vb${K}${K+1}, vzero), vb_zero_point); 119 const __m128i vxb${K+1} = _mm_sub_epi16(_mm_unpackhi_epi8(vb${K}${K+1}, vzero), vb_zero_point); 120 $elif SSE == 4: 121 const __m128i vxb${K} = _mm_cvtepi8_epi16(vb${K}${K+1}); 122 const __m128i vxb${K+1} = _mm_srai_epi16(_mm_unpackhi_epi8(vb${K}${K+1}, vb${K}${K+1}), 8); 123 $else: 124 const __m128i vsb${K}${K+1} = _mm_cmpgt_epi8(_mm_setzero_si128(), vb${K}${K+1}); 125 const __m128i vxb${K} = _mm_unpacklo_epi8(vb${K}${K+1}, vsb${K}${K+1}); 126 const __m128i vxb${K+1} = _mm_unpackhi_epi8(vb${K}${K+1}, vsb${K}${K+1}); 127 128 $for M in range(MR): 129 $if XOP: 130 vacc${M}x0123 = _mm_maddd_epi16( 131 _mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(${K}, ${K}, ${K}, ${K})), vxb${K}, vacc${M}x0123); 132 $else: 133 vacc${M}x0123 = _mm_add_epi32(vacc${M}x0123, 134 _mm_madd_epi16(_mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(${K}, ${K}, ${K}, ${K})), vxb${K})); 135 136 $for M in range(MR): 137 $if XOP: 138 vacc${M}x0123 = _mm_maddd_epi16( 139 _mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(${K+1}, ${K+1}, ${K+1}, ${K+1})), vxb${K+1}, vacc${M}x0123); 140 $else: 141 vacc${M}x0123 = _mm_add_epi32(vacc${M}x0123, 142 _mm_madd_epi16(_mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(${K+1}, ${K+1}, ${K+1}, ${K+1})), vxb${K+1})); 143 $else: 144 $for K in range(4): 145 $if K == 0: 146 const __m128i vb${K} = _mm_loadl_epi64((const __m128i*) w); 147 $else: 148 const __m128i vb${K} = _mm_loadl_epi64((const __m128i*) ((const ${XINT8_T}*) w + ${K * 8})); 149 $if DATATYPE == "QU8": 150 $if SSE == 4: 151 const __m128i vxb${K} = _mm_sub_epi16(_mm_cvtepu8_epi16(vb${K}), vb_zero_point); 152 $else: 153 const __m128i vxb${K} = _mm_sub_epi16(_mm_unpacklo_epi8(vb${K}, vzero), vb_zero_point); 154 $else: 155 $if SSE == 4: 156 const __m128i vxb${K} = _mm_cvtepi8_epi16(vb${K}); 157 $else: 158 const __m128i vxb${K} = _mm_srai_epi16(_mm_unpacklo_epi8(vb${K}, vb${K}), 8); 159 160 $for M in range(MR): 161 $if XOP: 162 vacc${M}x0123 = _mm_maddd_epi16( 163 _mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(${K}, ${K}, ${K}, ${K})), vxb${K}, vacc${M}x0123); 164 $else: 165 vacc${M}x0123 = _mm_add_epi32(vacc${M}x0123, 166 _mm_madd_epi16(_mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(${K}, ${K}, ${K}, ${K})), vxb${K})); 167 168 w = (const void*) ((const ${XINT8_T}*) w + 32); 169 k -= 8 * sizeof(${XINT8_T}); 170 } 171 if (k != 0) { 172 $for M in range(MR): 173 const __m128i va${M} = _mm_loadl_epi64((const __m128i*) a${M}); 174 $if DATATYPE == "QU8": 175 $if SSE == 4: 176 const __m128i vxa${M} = _mm_cvtepu8_epi16(va${M}); 177 $else: 178 const __m128i vxa${M} = _mm_unpacklo_epi8(va${M}, vzero); 179 $else: 180 $if SSE == 4: 181 const __m128i vxa${M} = _mm_cvtepi8_epi16(va${M}); 182 $else: 183 const __m128i vxa${M} = _mm_srai_epi16(_mm_unpacklo_epi8(va${M}, va${M}), 8); 184 a${M} = (const ${XINT8_T}*) ((uintptr_t) a${M} + k); 185 186 const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); 187 w = (const void*) ((const ${XINT8_T}*) w + 8); 188 $if DATATYPE == "QU8": 189 $if SSE == 4: 190 const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point); 191 $else: 192 const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb0, vzero), vb_zero_point); 193 $else: 194 $if SSE == 4: 195 const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); 196 $else: 197 const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8); 198 199 $for M in range(MR): 200 $if XOP: 201 vacc${M}x0123 = _mm_maddd_epi16( 202 _mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc${M}x0123); 203 $else: 204 vacc${M}x0123 = _mm_add_epi32(vacc${M}x0123, 205 _mm_madd_epi16(_mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); 206 207 if (k > 2 * sizeof(${XINT8_T})) { 208 const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w); 209 w = (const void*) ((const ${XINT8_T}*) w + 8); 210 $if DATATYPE == "QU8": 211 $if SSE == 4: 212 const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point); 213 $else: 214 const __m128i vxb1 = _mm_sub_epi16(_mm_unpacklo_epi8(vb1, vzero), vb_zero_point); 215 $else: 216 $if SSE == 4: 217 const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); 218 $else: 219 const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8); 220 221 $for M in range(MR): 222 $if XOP: 223 vacc${M}x0123 = _mm_maddd_epi16( 224 _mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc${M}x0123); 225 $else: 226 vacc${M}x0123 = _mm_add_epi32(vacc${M}x0123, 227 _mm_madd_epi16(_mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); 228 229 if (k > 4 * sizeof(${XINT8_T})) { 230 const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w); 231 w = (const void*) ((const ${XINT8_T}*) w + 8); 232 $if DATATYPE == "QU8": 233 $if SSE == 4: 234 const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point); 235 $else: 236 const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb2, vzero), vb_zero_point); 237 $else: 238 $if SSE == 4: 239 const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); 240 $else: 241 const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8); 242 243 $for M in range(MR): 244 $if XOP: 245 vacc${M}x0123 = _mm_maddd_epi16( 246 _mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc${M}x0123); 247 $else: 248 vacc${M}x0123 = _mm_add_epi32(vacc${M}x0123, 249 _mm_madd_epi16(_mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); 250 } 251 } 252 } 253 p -= ${MR} * sizeof(void*); 254 } while (p != 0); 255 256 $for M in range(MR): 257 __m128 vscaled${M}x0123 = _mm_cvtepi32_ps(vacc${M}x0123); 258 259 $if DATATYPE == "QC8": 260 const __m128 vscale0123 = _mm_loadu_ps((const float*) w); 261 w = (const void*) ((const float*) w + 4); 262 $for M in range(MR): 263 vscaled${M}x0123 = _mm_mul_ps(vscaled${M}x0123, vscale0123); 264 $else: 265 const __m128 vscale = _mm_load_ps(params->${PARAMS_STRUCT}.scale); 266 $for M in range(MR): 267 vscaled${M}x0123 = _mm_mul_ps(vscaled${M}x0123, vscale); 268 269 const __m128 voutput_max_less_zero_point = _mm_load_ps(params->${PARAMS_STRUCT}.output_max_less_zero_point); 270 $for M in range(MR): 271 vscaled${M}x0123 = _mm_min_ps(vscaled${M}x0123, voutput_max_less_zero_point); 272 273 $for M in range(MR): 274 vacc${M}x0123 = _mm_cvtps_epi32(vscaled${M}x0123); 275 276 const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_zero_point); 277 $for M in range(0, MR, 2): 278 __m128i vacc${M}${min(M+1, MR-1)}x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc${M}x0123, vacc${min(M+1, MR-1)}x0123), voutput_zero_point); 279 280 $if DATATYPE == "QU8": 281 $if MR > 2: 282 __m128i vout = _mm_packus_epi16(vacc0${min(1, MR-1)}x0123, vacc${min(2, MR-1)}${min(3, MR-1)}x0123); 283 $else: 284 __m128i vout = _mm_packus_epi16(vacc0${min(1, MR-1)}x0123, vacc0${min(1, MR-1)}x0123); 285 286 vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_min)); 287 $else: 288 $if SSE < 4: 289 const __m128i voutput_min = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_min); 290 $for M in range(0, MR, 2): 291 vacc${M}${min(M+1, MR-1)}x0123 = _mm_max_epi16(vacc${M}${min(M+1, MR-1)}x0123, voutput_min); 292 293 $if MR > 2: 294 __m128i vout = _mm_packs_epi16(vacc0${min(1, MR-1)}x0123, vacc${min(2, MR-1)}${min(3, MR-1)}x0123); 295 $else: 296 __m128i vout = _mm_packs_epi16(vacc0${min(1, MR-1)}x0123, vacc0${min(1, MR-1)}x0123); 297 298 $if SSE == 4: 299 vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_min)); 300 301 if (nc >= 4) { 302 $for M in reversed(range(1, MR)): 303 $if SSE == 4: 304 *((uint32_t*) c${M}) = (uint32_t) _mm_extract_epi32(vout, ${M}); 305 $else: 306 *((uint32_t*) c${M}) = (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(${M}, ${M}, ${M}, ${M}))); 307 c${M} = (${XINT8_T}*) ((uintptr_t) c${M} + cn_stride); 308 *((uint32_t*) c0) = (uint32_t) _mm_cvtsi128_si32(vout); 309 c0 = (${XINT8_T}*) ((uintptr_t) c0 + cn_stride); 310 311 a = (const ${XINT8_T}**restrict) ((uintptr_t) a - ks); 312 313 nc -= 4; 314 } else { 315 if (nc & 2) { 316 $for M in reversed(range(MR)): 317 *((uint16_t*) c${M}) = (uint16_t) _mm_extract_epi16(vout, ${M * 2}); 318 c${M} += 2; 319 vout = _mm_srli_epi32(vout, 16); 320 } 321 if (nc & 1) { 322 $if SSE == 4: 323 $for M in reversed(range(MR)): 324 *c${M} = (${XINT8_T}) _mm_extract_epi8(vout, ${M * 4}); 325 $else: 326 $for M in reversed(range(1, MR)): 327 *c${M} = (${XINT8_T}) _mm_extract_epi16(vout, ${M * 2}); 328 *c0 = (${XINT8_T}) _mm_cvtsi128_si32(vout); 329 } 330 331 nc = 0; 332 } 333 } while (nc != 0); 334} 335