1// Copyright 2022 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert NR % 8 == 0 7$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 8#include <assert.h> 9 10#include <immintrin.h> 11 12#include <xnnpack/igemm.h> 13#include <xnnpack/intrinsics-polyfill.h> 14 15 16void xnn_f16_igemm_minmax_ukernel_${MR}x${NR}__avx2_broadcast( 17 size_t mr, 18 size_t nc, 19 size_t kc, 20 size_t ks, 21 const void**restrict a, 22 const void*restrict w, 23 void*restrict c, 24 size_t cm_stride, 25 size_t cn_stride, 26 size_t a_offset, 27 const void* zero, 28 const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) 29{ 30 assert(mr != 0); 31 assert(mr <= ${MR}); 32 assert(nc != 0); 33 assert(kc != 0); 34 assert(kc % sizeof(uint16_t) == 0); 35 assert(ks != 0); 36 assert(ks % (${MR} * sizeof(void*)) == 0); 37 assert(a_offset % sizeof(uint16_t) == 0); 38 assert(a != NULL); 39 assert(w != NULL); 40 assert(c != NULL); 41 42 uint16_t* c0 = c; 43 $for M in range(1, MR): 44 uint16_t* c${M} = (uint16_t*) ((uintptr_t) c${M-1} + cm_stride); 45 $if M % 2 == 0: 46 if XNN_UNPREDICTABLE(mr <= ${M}) { 47 c${M} = c${M-1}; 48 } 49 $elif M + 1 == MR: 50 if XNN_UNPREDICTABLE(mr != ${M+1}) { 51 c${M} = c${M-1}; 52 } 53 $else: 54 if XNN_UNPREDICTABLE(mr < ${M+1}) { 55 c${M} = c${M-1}; 56 } 57 58 do { 59 __m256 vacc0x${ABC[0:8]} = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w)); 60 $for N in range(8, NR, 8): 61 __m256 vacc0x${ABC[N:N+8]} = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) ((const uint16_t*) w + ${N}))); 62 $for M in range(1, MR): 63 $for N in range(0, NR, 8): 64 __m256 vacc${M}x${ABC[N:N+8]} = vacc0x${ABC[N:N+8]}; 65 w = (const uint16_t*) w + ${NR}; 66 67 size_t p = ks; 68 do { 69 $for M in range(MR): 70 const uint16_t* restrict a${M} = (const uint16_t*) a[${M}]; 71 assert(a${M} != NULL); 72 if XNN_UNPREDICTABLE(a${M} != zero) { 73 a${M} = (const uint16_t*) ((uintptr_t) a${M} + a_offset); 74 } 75 a += ${MR}; 76 77 size_t k = kc; 78 do { 79 const __m256 vb${ABC[0:8]} = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w)); 80 $for N in range(8, NR, 8): 81 const __m256 vb${ABC[N:N+8]} = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) ((const uint16_t*) w + ${N}))); 82 w = (const uint16_t*) w + ${NR}; 83 84 $for M in range(MR): 85 const __m256 va${M} = _mm256_cvtph_ps(_mm_set1_epi16((short) *a${M})); 86 a${M} += 1; 87 88 $for M in range(MR): 89 $for N in range(0, NR, 8): 90 vacc${M}x${ABC[N:N+8]} = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va${M}, vb${ABC[N:N+8]}, vacc${M}x${ABC[N:N+8]}), _MM_FROUND_NO_EXC)); 91 92 k -= sizeof(uint16_t); 93 } while (k != 0); 94 p -= ${MR} * sizeof(void*); 95 } while (p != 0); 96 97 const __m256 vscale = _mm256_load_ps(params->avx.scale); 98 $for N in range(0, NR, 8): 99 $for M in range(MR): 100 vacc${M}x${ABC[N:N+8]} = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vacc${M}x${ABC[N:N+8]}, vscale), _MM_FROUND_NO_EXC)); 101 102 const __m256 vmin = _mm256_load_ps(params->avx.min); 103 $for N in range(0, NR, 8): 104 $for M in range(MR): 105 vacc${M}x${ABC[N:N+8]} = _mm256_max_ps(vacc${M}x${ABC[N:N+8]}, vmin); 106 107 const __m256 vmax = _mm256_load_ps(params->avx.max); 108 $for N in range(0, NR, 8): 109 $for M in range(MR): 110 vacc${M}x${ABC[N:N+8]} = _mm256_min_ps(vacc${M}x${ABC[N:N+8]}, vmax); 111 112 if XNN_LIKELY(nc >= ${NR}) { 113 $for M in reversed(range(MR)): 114 _mm_storeu_si128((__m128i*) c${M}, _mm256_cvtps_ph(vacc${M}x${ABC[0:8]}, _MM_FROUND_NO_EXC)); 115 $for N in range(8, NR, 8): 116 _mm_storeu_si128((__m128i*) (c${M} + ${N}), _mm256_cvtps_ph(vacc${M}x${ABC[N:N+8]}, _MM_FROUND_NO_EXC)); 117 c${M} = (uint16_t*) ((uintptr_t) c${M} + cn_stride); 118 119 a = (const void**restrict) ((uintptr_t) a - ks); 120 nc -= ${NR}; 121 } else { 122 $for LOG2N in reversed(range(NR.bit_length())): 123 $if LOG2N == 3: 124 $for M in reversed(range(MR)): 125 __m128i vh${M}x${ABC[0:8]} = _mm256_cvtps_ph(vacc${M}x${ABC[0:8]}, _MM_FROUND_NO_EXC); 126 $if NR != 1 << LOG2N: 127 if (nc & ${1 << LOG2N}) { 128 $if LOG2N >= 4: 129 $for M in reversed(range(MR)): 130 _mm_storeu_si128((__m128i*) c${M}, _mm256_cvtps_ph(vacc${M}x${ABC[0:8]}, _MM_FROUND_NO_EXC)); 131 $for N in range(8, 1 << LOG2N, 8): 132 _mm_storeu_si128((__m128i*) (c${M} + ${N}), _mm256_cvtps_ph(vacc${M}x${ABC[N:N+8]}, _MM_FROUND_NO_EXC)); 133 134 $for M in reversed(range(MR)): 135 $for N in range(0, 1 << (LOG2N - 1), 8): 136 vacc${M}x${ABC[N:N+8]} = vacc${M}x${ABC[N + (1 << LOG2N):N + (1 << LOG2N)+8]}; 137 138 $for M in reversed(range(MR)): 139 c${M} += ${1 << LOG2N}; 140 $elif LOG2N == 3: 141 $for M in reversed(range(MR)): 142 _mm_storeu_si128((__m128i*) c${M}, vh${M}x${ABC[0:8]}); 143 144 $for M in reversed(range(MR)): 145 vh${M}x${ABC[0:8]} = _mm256_cvtps_ph(vacc${M}x${ABC[8:16]}, _MM_FROUND_NO_EXC); 146 147 $for M in reversed(range(MR)): 148 c${M} += ${1 << LOG2N}; 149 $elif LOG2N == 2: 150 $for M in reversed(range(MR)): 151 _mm_storel_epi64((__m128i*) c${M}, vh${M}x${ABC[0:8]}); 152 153 $for M in reversed(range(MR)): 154 vh${M}x${ABC[0:8]} = _mm_unpackhi_epi64(vh${M}x${ABC[0:8]}, vh${M}x${ABC[0:8]}); 155 156 $for M in reversed(range(MR)): 157 c${M} += 4; 158 $elif LOG2N == 1: 159 $for M in reversed(range(MR)): 160 _mm_storeu_si32(c${M}, vh${M}x${ABC[0:8]}); 161 162 $for M in reversed(range(MR)): 163 vh${M}x${ABC[0:8]} = _mm_srli_epi64(vh${M}x${ABC[0:8]}, 32); 164 165 $for M in reversed(range(MR)): 166 c${M} += 2; 167 $elif LOG2N == 0: 168 $for M in reversed(range(MR)): 169 *c${M} = _mm_extract_epi16(vh${M}x${ABC[0:8]}, 0); 170 } 171 172 nc = 0; 173 } 174 } while (nc != 0); 175} 176