// Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" $assert VARIANT in ["LD256", "EXTENDED"] $assert MR <= 4 #include #include #include #include #include $GEMM_SUFFIX = "_xw" if VARIANT == "EXTENDED" else "" void xnn_qs8_igemm${GEMM_SUFFIX}_minmax_ukernel_${MR}x16c8__avx512skx( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_gemm${GEMM_SUFFIX}_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN { assert(mr != 0); assert(mr <= ${MR}); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8); int8_t* c0 = c; $for M in range(1, MR): int8_t* c${M} = (int8_t*) ((uintptr_t) c${M-1} + cm_stride); $if M % 2 == 0: if XNN_UNPREDICTABLE(mr <= ${M}) { c${M} = c${M-1}; } $elif M + 1 == MR: if XNN_UNPREDICTABLE(mr != ${M+1}) { c${M} = c${M-1}; } $else: if XNN_UNPREDICTABLE(mr < ${M+1}) { c${M} = c${M-1}; } const __mmask16 vbias_mask = _cvtu32_mask16(0x1111); const __mmask16 vblend_mask = _cvtu32_mask16(0xAAAA); const __m512i vmultiplier = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) params->sse2.multiplier)); const __m512i vrounding = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) params->sse2.rounding)); const __m512i vremainder_mask = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) params->sse2.remainder_mask)); const __m512i vremainder_threshold = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) params->sse2.remainder_threshold)); const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift); $if MR > 1: const __m512i voutput_zero_point = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) params->sse2.output_zero_point)); const __m512i voutput_min = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) params->sse2.output_min)); const __m512i voutput_max = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) params->sse2.output_max)); $else: const __m256i voutput_zero_point = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_zero_point)); const __m256i voutput_min = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_min)); const __m256i voutput_max = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_max)); do { __m512i vacc0x0123 = _mm512_maskz_expandloadu_epi32(vbias_mask, w); $for N in range(4, 16, 4): __m512i vacc0x${ABC[N:N+4]} = _mm512_maskz_expandloadu_epi32(vbias_mask, (const void*) ((uintptr_t) w + ${N} * sizeof(int32_t))); $for M in range(1, MR): $for N in range(0, 16, 4): __m512i vacc${M}x${ABC[N:N+4]} = vacc0x${ABC[N:N+4]}; w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t)); size_t p = ks; do { $for M in range(MR): const int8_t* restrict a${M} = a[${M}]; if XNN_UNPREDICTABLE(a${M} != zero) { a${M} = (const int8_t*) ((uintptr_t) a${M} + a_offset); } a += ${MR}; size_t k = 0; while (k < kc) { $for M in range(MR): const __m512i va${M} = _mm512_broadcast_i32x4(_mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) a${M}))); a${M} += 8; $for N in range(0, 16, 4): $if VARIANT == "EXTENDED": $if N == 0: const __m512i vb${ABC[N:N+4]} = _mm512_load_si512((const __m512i*) w); $else: const __m512i vb${ABC[N:N+4]} = _mm512_load_si512((const __m512i*) ((uintptr_t) w + ${N * 8} * sizeof(int16_t))); $else: $if N == 0: const __m512i vb${ABC[N:N+4]} = _mm512_cvtepi8_epi16(_mm256_load_si256((const __m256i*) w)); $else: const __m512i vb${ABC[N:N+4]} = _mm512_cvtepi8_epi16(_mm256_load_si256((const __m256i*) ((uintptr_t) w + ${N * 8} * sizeof(int8_t)))); $for M in range(MR): vacc${M}x${ABC[N:N+4]} = _mm512_add_epi32(vacc${M}x${ABC[N:N+4]}, _mm512_madd_epi16(va${M}, vb${ABC[N:N+4]})); $if VARIANT == "EXTENDED": w = (const void*) ((uintptr_t) w + 128 * sizeof(int16_t)); $else: w = (const void*) ((uintptr_t) w + 128 * sizeof(int8_t)); k += 8 * sizeof(int8_t); } p -= ${MR} * sizeof(void*); } while (p != 0); $for M in range(MR): const __m512i vacc${M}x04152637 = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc${M}x0123, vacc${M}x4567), _mm512_unpackhi_epi32(vacc${M}x0123, vacc${M}x4567)); const __m512i vacc${M}x8C9DAEBF = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc${M}x89AB, vacc${M}xCDEF), _mm512_unpackhi_epi32(vacc${M}x89AB, vacc${M}xCDEF)); $for M in range(MR): __m512i vacc${M}x084C195D2A6E3B7F = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc${M}x04152637, vacc${M}x8C9DAEBF), _mm512_unpackhi_epi32(vacc${M}x04152637, vacc${M}x8C9DAEBF)); $for M in range(MR): const __m512i vacc${M}x88CC99DDAAEEBBFF = _mm512_shuffle_epi32(vacc${M}x084C195D2A6E3B7F, _MM_SHUFFLE(3, 3, 1, 1)); $for M in range(MR): const __m512i vprod${M}x04152637 = _mm512_add_epi64(_mm512_mul_epi32(vacc${M}x084C195D2A6E3B7F, vmultiplier), vrounding); $for M in range(MR): const __m512i vprod${M}x8C9DAEBF = _mm512_add_epi64(_mm512_mul_epi32(vacc${M}x88CC99DDAAEEBBFF, vmultiplier), vrounding); $for M in range(MR): const __m512i vq31prod${M}x04152637 = _mm512_srli_epi64(vprod${M}x04152637, 31); const __m512i vq31prod${M}x8C9DAEBF = _mm512_add_epi64(vprod${M}x8C9DAEBF, vprod${M}x8C9DAEBF); $for M in range(MR): const __m512i vq31prod${M}x084C195D2A6E3B7F = _mm512_mask_blend_epi32(vblend_mask, vq31prod${M}x04152637, vq31prod${M}x8C9DAEBF); $for M in range(MR): const __m512i vrem${M}x084C195D2A6E3B7F = _mm512_add_epi32(_mm512_and_si512(vq31prod${M}x084C195D2A6E3B7F, vremainder_mask), _mm512_srai_epi32(vq31prod${M}x084C195D2A6E3B7F, 31)); $for M in range(MR): vacc${M}x084C195D2A6E3B7F = _mm512_sra_epi32(vq31prod${M}x084C195D2A6E3B7F, vshift); const __m512i vminus_one = _mm512_set1_epi32(-1); $for M in range(MR): vacc${M}x084C195D2A6E3B7F = _mm512_mask_sub_epi32(vacc${M}x084C195D2A6E3B7F, _mm512_cmpgt_epi32_mask(vrem${M}x084C195D2A6E3B7F, vremainder_threshold), vacc${M}x084C195D2A6E3B7F, vminus_one); $if MR == 1: __m256i vacc0x084C2A6E195D3B7F = _mm256_adds_epi16(_mm256_packs_epi32(_mm512_castsi512_si256(vacc0x084C195D2A6E3B7F), _mm512_extracti32x8_epi32(vacc0x084C195D2A6E3B7F, 1)), voutput_zero_point); vacc0x084C2A6E195D3B7F = _mm256_min_epi16(_mm256_max_epi16(vacc0x084C2A6E195D3B7F, voutput_min), voutput_max); $else: $for M in range(0, MR, 2): __m512i vacc${M}${min(M+1, MR-1)}x084Cx195Dx2A6Ex3B7F = _mm512_adds_epi16(_mm512_packs_epi32(vacc${M}x084C195D2A6E3B7F, vacc${min(M+1, MR-1)}x084C195D2A6E3B7F), voutput_zero_point); $for M in range(0, MR, 2): vacc${M}${min(M+1, MR-1)}x084Cx195Dx2A6Ex3B7F = _mm512_min_epi16(_mm512_max_epi16(vacc${M}${min(M+1, MR-1)}x084Cx195Dx2A6Ex3B7F, voutput_min), voutput_max); $if MR > 2: __m512i vout012${min(3, MR-1)}x084Cx195Dx2A6Ex3B7F = _mm512_packs_epi16(vacc01x084Cx195Dx2A6Ex3B7F, vacc2${min(3, MR-1)}x084Cx195Dx2A6Ex3B7F); vout012${min(M+3, MR-1)}x084Cx195Dx2A6Ex3B7F = _mm512_permutexvar_epi32(_mm512_set_epi32(15, 11, 7, 3, 14, 10, 6, 2, 13, 9, 5, 1, 12, 8, 4, 0), vout012${min(3, MR-1)}x084Cx195Dx2A6Ex3B7F); const __m512i vout012${min(3, MR-1)}x0123456789ABCDEF = _mm512_shuffle_epi8(vout012${min(3, MR-1)}x084Cx195Dx2A6Ex3B7F, _mm512_set_epi8(15, 11, 7, 3, 13, 9, 5, 1, 14, 10, 6, 2, 12, 8, 4, 0, 15, 11, 7, 3, 13, 9, 5, 1, 14, 10, 6, 2, 12, 8, 4, 0, 15, 11, 7, 3, 13, 9, 5, 1, 14, 10, 6, 2, 12, 8, 4, 0, 15, 11, 7, 3, 13, 9, 5, 1, 14, 10, 6, 2, 12, 8, 4, 0)); $elif MR == 2: const __m256i vout01x084Cx2A6Ex195Dx3B7F = _mm256_packs_epi16(_mm512_castsi512_si256(vacc01x084Cx195Dx2A6Ex3B7F), _mm512_extracti32x8_epi32(vacc01x084Cx195Dx2A6Ex3B7F, 1)); const __m256i vout01x084C2A6E195D3B7F = _mm256_permutevar8x32_epi32(vout01x084Cx2A6Ex195Dx3B7F, _mm256_set_epi32(7, 5, 3, 1, 6, 4, 2, 0)); const __m256i vout01x0123456789ABCDEF = _mm256_shuffle_epi8(vout01x084C2A6E195D3B7F, _mm256_set_epi8(15, 7, 11, 3, 13, 5, 9, 1, 14, 6, 10, 2, 12, 4, 8, 0, 15, 7, 11, 3, 13, 5, 9, 1, 14, 6, 10, 2, 12, 4, 8, 0)); $elif MR == 1: const __m128i vout0x084C2A6E195D3B7F = _mm_packs_epi16(_mm256_castsi256_si128(vacc0x084C2A6E195D3B7F), _mm256_extracti128_si256(vacc0x084C2A6E195D3B7F, 1)); const __m128i vout0x0123456789ABCDEF = _mm_shuffle_epi8(vout0x084C2A6E195D3B7F, _mm_set_epi8(15, 7, 11, 3, 13, 5, 9, 1, 14, 6, 10, 2, 12, 4, 8, 0)); $if MR > 2: if (nc >= 16) { $for M in reversed(range(1, MR)): _mm_storeu_si128((__m128i*) c${M}, _mm512_extracti32x4_epi32(vout012${min(M+3, MR-1)}x0123456789ABCDEF, ${M})); _mm_storeu_si128((__m128i*) c0, _mm512_castsi512_si128(vout012${min(M+3, MR-1)}x0123456789ABCDEF)); $for M in reversed(range(MR)): c${M} = (int8_t*) ((uintptr_t) c${M} + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 16; } else { // Prepare mask for valid 8-bit elements (depends on nc). __mmask64 vmask = _cvtu64_mask64((uint64_t) ((UINT64_C(1) << (nc + ${16 * (MR - 1)})) - (UINT64_C(1) << ${16 * (MR - 1)}))); $for M in reversed(range(1, MR)): _mm512_mask_storeu_epi8(c${M} - ${M * 16}, vmask, vout012${min(3, MR-1)}x0123456789ABCDEF); vmask = _kshiftri_mask64(vmask, 16); _mm512_mask_storeu_epi8(c0, vmask, vout012${min(M+3, MR-1)}x0123456789ABCDEF); nc = 0; } $elif MR == 2: if (nc >= 16) { _mm_storeu_si128((__m128i*) c1, _mm256_extracti128_si256(vout01x0123456789ABCDEF, 1)); _mm_storeu_si128((__m128i*) c0, _mm256_castsi256_si128(vout01x0123456789ABCDEF)); $for M in reversed(range(MR)): c${M} = (int8_t*) ((uintptr_t) c${M} + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 16; } else { // Prepare mask for valid 8-bit elements (depends on nc). __mmask64 vmask = _cvtu64_mask64((uint64_t) ((UINT32_C(1) << (nc + 16)) - (UINT32_C(1) << 16))); _mm256_mask_storeu_epi8(c1 - 16, vmask, vout01x0123456789ABCDEF); vmask = _kshiftri_mask64(vmask, 16); _mm256_mask_storeu_epi8(c0, vmask, vout01x0123456789ABCDEF); nc = 0; } $elif MR == 1: if (nc >= 16) { _mm_storeu_si128((__m128i*) c0, vout0x0123456789ABCDEF); $for M in range(MR): c${M} = (int8_t*) ((uintptr_t) c${M} + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 16; } else { // Prepare mask for valid 8-bit elements (depends on nc). const __mmask64 vmask = _cvtu64_mask64((uint64_t) ((UINT32_C(1) << nc) - UINT32_C(1))); _mm_mask_storeu_epi8(c0, vmask, vout0x0123456789ABCDEF); nc = 0; } } while (nc != 0); }