1// Copyright 2020 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert DATATYPE in ["QS8", "QU8"] 7$assert CHANNEL_TILE % 8 == 0 8$assert CHANNEL_TILE >= 8 9$assert ROW_TILE >= 3 10$assert REQUANTIZATION == "FP32" 11$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 12#include <assert.h> 13 14#include <smmintrin.h> 15 16#include <xnnpack/gavgpool.h> 17#include <xnnpack/unaligned.h> 18 19 20$XINT8_T = "uint8_t" if DATATYPE == "QU8" else "int8_t" 21$_MM_CVTEPX8_EPI16 = {"QS8": "_mm_cvtepi8_epi16", "QU8": "_mm_cvtepu8_epi16"}[DATATYPE] 22$_MM_CVTEPX16_EPI32 = {"QS8": "_mm_cvtepi16_epi32", "QU8": "_mm_cvtepu16_epi32"}[DATATYPE] 23$_MM_PACKXS_EPI16 = {"QS8": "_mm_packs_epi16", "QU8": "_mm_packus_epi16"}[DATATYPE] 24$_MM_MAX_EPX8 = {"QS8": "_mm_max_epi8", "QU8": "_mm_max_epu8"}[DATATYPE] 25void xnn_${DATATYPE.lower()}_gavgpool_minmax_fp32_ukernel_${ROW_TILE}x__sse41_c${CHANNEL_TILE}( 26 size_t rows, 27 size_t channels, 28 const ${XINT8_T}* input, 29 size_t input_stride, 30 const ${XINT8_T}* zero, 31 ${XINT8_T}* output, 32 const union xnn_${DATATYPE.lower()}_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS 33{ 34 assert(rows != 0); 35 assert(rows <= ${ROW_TILE}); 36 assert(channels != 0); 37 38 const ${XINT8_T}* i0 = input; 39 $for M in range(1, ROW_TILE): 40 const ${XINT8_T}* i${M} = (const ${XINT8_T}*) ((uintptr_t) i${M-1} + input_stride); 41 $if M % 2 == 1: 42 if XNN_UNPREDICTABLE(rows < ${M+1}) { 43 i${M} = zero; 44 } 45 $else: 46 if XNN_UNPREDICTABLE(rows <= ${M}) { 47 i${M} = zero; 48 } 49 50 const __m128i vinit_bias = _mm_load_si128((const __m128i*) params->fp32_sse4.init_bias); 51 const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale); 52 const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); 53 const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); 54 const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min); 55 for (; channels >= ${CHANNEL_TILE}; channels -= ${CHANNEL_TILE}) { 56 $for M in range(2): 57 const __m128i vxi${M}x${ABC[0:8]} = ${_MM_CVTEPX8_EPI16}(_mm_loadl_epi64((const __m128i*) i${M})); 58 $for C in range(8, CHANNEL_TILE, 8): 59 const __m128i vxi${M}x${ABC[C:C+8]} = ${_MM_CVTEPX8_EPI16}(_mm_loadl_epi64((const __m128i*) (i${M} + ${C}))); 60 i${M} += ${CHANNEL_TILE}; 61 62 __m128i vacc${ABC[0:8]} = _mm_add_epi16(vxi0x${ABC[0:8]}, vxi1x${ABC[0:8]}); 63 const __m128i vxi2x${ABC[0:8]} = ${_MM_CVTEPX8_EPI16}(_mm_loadl_epi64((const __m128i*) i2)); 64 $for C in range(8, CHANNEL_TILE, 8): 65 __m128i vacc${ABC[C:C+8]} = _mm_add_epi16(vxi0x${ABC[C:C+8]}, vxi1x${ABC[C:C+8]}); 66 const __m128i vxi2x${ABC[C:C+8]} = ${_MM_CVTEPX8_EPI16}(_mm_loadl_epi64((const __m128i*) (i2 + ${C}))); 67 i2 += ${CHANNEL_TILE}; 68 69 $for M in range(3, ROW_TILE): 70 vacc${ABC[0:8]} = _mm_add_epi16(vacc${ABC[0:8]}, vxi${M-1}x${ABC[0:8]}); 71 const __m128i vxi${M}x${ABC[0:8]} = ${_MM_CVTEPX8_EPI16}(_mm_loadl_epi64((const __m128i*) i${M})); 72 $for C in range(8, CHANNEL_TILE, 8): 73 vacc${ABC[C:C+8]} = _mm_add_epi16(vacc${ABC[C:C+8]}, vxi${M-1}x${ABC[C:C+8]}); 74 const __m128i vxi${M}x${ABC[C:C+8]} = ${_MM_CVTEPX8_EPI16}(_mm_loadl_epi64((const __m128i*) (i${M} + ${C}))); 75 i${M} += ${CHANNEL_TILE}; 76 77 $for C in range(0, CHANNEL_TILE, 8): 78 vacc${ABC[C:C+8]} = _mm_add_epi16(vacc${ABC[C:C+8]}, vxi${ROW_TILE-1}x${ABC[C:C+8]}); 79 80 $if DATATYPE == "QU8": 81 const __m128i vzero = _mm_setzero_si128(); 82 $for C in range(0, CHANNEL_TILE, 8): 83 __m128i vacc${ABC[C:C+4]} = ${_MM_CVTEPX16_EPI32}(vacc${ABC[C:C+8]}); 84 $if DATATYPE == "QS8": 85 __m128i vacc${ABC[C+4:C+8]} = _mm_srai_epi32(_mm_unpackhi_epi16(vacc${ABC[C:C+8]}, vacc${ABC[C:C+8]}), 16); 86 $else: 87 __m128i vacc${ABC[C+4:C+8]} = _mm_unpackhi_epi16(vacc${ABC[C:C+8]}, vzero); 88 89 $for C in range(0, CHANNEL_TILE, 4): 90 vacc${ABC[C:C+4]} = _mm_add_epi32(vacc${ABC[C:C+4]}, vinit_bias); 91 92 $for C in range(0, CHANNEL_TILE, 4): 93 __m128 vfpacc${ABC[C:C+4]} = _mm_cvtepi32_ps(vacc${ABC[C:C+4]}); 94 95 $for C in range(0, CHANNEL_TILE, 4): 96 vfpacc${ABC[C:C+4]} = _mm_mul_ps(vfpacc${ABC[C:C+4]}, vscale); 97 98 $for C in range(0, CHANNEL_TILE, 4): 99 vfpacc${ABC[C:C+4]} = _mm_min_ps(vfpacc${ABC[C:C+4]}, voutput_max_less_zero_point); 100 101 $for C in range(0, CHANNEL_TILE, 4): 102 vacc${ABC[C:C+4]} = _mm_cvtps_epi32(vfpacc${ABC[C:C+4]}); 103 104 $for C in range(0, CHANNEL_TILE, 8): 105 __m128i vout${ABC[C:C+8]} = _mm_adds_epi16(_mm_packs_epi32(vacc${ABC[C:C+4]}, vacc${ABC[C+4:C+8]}), voutput_zero_point); 106 107 $for C in range(0, CHANNEL_TILE, 16): 108 $if C + 8 < CHANNEL_TILE: 109 __m128i vout${ABC[C:C+16]} = ${_MM_PACKXS_EPI16}(vout${ABC[C:C+8]}, vout${ABC[C+8:C+16]}); 110 $else: 111 __m128i vout${ABC[C:C+8]}${ABC[C:C+8]} = ${_MM_PACKXS_EPI16}(vout${ABC[C:C+8]}, vout${ABC[C:C+8]}); 112 113 $for C in range(0, CHANNEL_TILE, 16): 114 $if C + 8 < CHANNEL_TILE: 115 vout${ABC[C:C+16]} = ${_MM_MAX_EPX8}(vout${ABC[C:C+16]}, voutput_min); 116 $else: 117 vout${ABC[C:C+8]}${ABC[C:C+8]} = ${_MM_MAX_EPX8}(vout${ABC[C:C+8]}${ABC[C:C+8]}, voutput_min); 118 119 $if CHANNEL_TILE > 8: 120 _mm_storeu_si128((__m128i*) output, vout${ABC[0:16]}); 121 $else: 122 _mm_storel_epi64((__m128i*) output, vout${ABC[0:8]}${ABC[0:8]}); 123 $for C in range(16, CHANNEL_TILE, 16): 124 $if C + 8 < CHANNEL_TILE: 125 _mm_storeu_si128((__m128i*) (output + ${C}), vout${ABC[C:C+16]}); 126 $else: 127 _mm_storel_epi64((__m128i*) (output + ${C}), vout${ABC[C:C+8]}${ABC[C:C+8]}); 128 output += ${CHANNEL_TILE}; 129 } 130 if XNN_UNLIKELY(channels != 0) { 131 ${"do " if CHANNEL_TILE > 8 else ""}{ 132 $for M in range(2): 133 const __m128i vxi${M}x${ABC[0:8]} = ${_MM_CVTEPX8_EPI16}(_mm_loadl_epi64((const __m128i*) i${M})); 134 i${M} += 8; 135 136 __m128i vacc${ABC[0:8]} = _mm_add_epi16(vxi0x${ABC[0:8]}, vxi1x${ABC[0:8]}); 137 const __m128i vxi2x${ABC[0:8]} = ${_MM_CVTEPX8_EPI16}(_mm_loadl_epi64((const __m128i*) i2)); 138 i2 += 8; 139 140 $for M in range(3, ROW_TILE): 141 vacc${ABC[0:8]} = _mm_add_epi16(vacc${ABC[0:8]}, vxi${M-1}x${ABC[0:8]}); 142 const __m128i vxi${M}x${ABC[0:8]} = ${_MM_CVTEPX8_EPI16}(_mm_loadl_epi64((const __m128i*) i${M})); 143 i${M} += 8; 144 145 vacc${ABC[0:8]} = _mm_add_epi16(vacc${ABC[0:8]}, vxi${ROW_TILE-1}x${ABC[0:8]}); 146 147 __m128i vacc${ABC[0:4]} = ${_MM_CVTEPX16_EPI32}(vacc${ABC[0:8]}); 148 $if DATATYPE == "QS8": 149 __m128i vacc${ABC[4:8]} = _mm_srai_epi32(_mm_unpackhi_epi16(vacc${ABC[0:8]}, vacc${ABC[0:8]}), 16); 150 $else: 151 __m128i vacc${ABC[4:8]} = _mm_unpackhi_epi16(vacc${ABC[0:8]}, _mm_setzero_si128()); 152 153 vacc${ABC[0:4]} = _mm_add_epi32(vacc${ABC[0:4]}, vinit_bias); 154 vacc${ABC[4:8]} = _mm_add_epi32(vacc${ABC[4:8]}, vinit_bias); 155 156 __m128 vfpacc${ABC[0:4]} = _mm_cvtepi32_ps(vacc${ABC[0:4]}); 157 __m128 vfpacc${ABC[4:8]} = _mm_cvtepi32_ps(vacc${ABC[4:8]}); 158 159 vfpacc${ABC[0:4]} = _mm_mul_ps(vfpacc${ABC[0:4]}, vscale); 160 vfpacc${ABC[4:8]} = _mm_mul_ps(vfpacc${ABC[4:8]}, vscale); 161 162 vfpacc${ABC[0:4]} = _mm_min_ps(vfpacc${ABC[0:4]}, voutput_max_less_zero_point); 163 vfpacc${ABC[4:8]} = _mm_min_ps(vfpacc${ABC[4:8]}, voutput_max_less_zero_point); 164 165 vacc${ABC[0:4]} = _mm_cvtps_epi32(vfpacc${ABC[0:4]}); 166 vacc${ABC[4:8]} = _mm_cvtps_epi32(vfpacc${ABC[4:8]}); 167 168 __m128i vout${ABC[0:8]} = _mm_adds_epi16(_mm_packs_epi32(vacc${ABC[0:4]}, vacc${ABC[4:8]}), voutput_zero_point); 169 170 __m128i vout${ABC[0:8]}${ABC[0:8]} = ${_MM_PACKXS_EPI16}(vout${ABC[0:8]}, vout${ABC[0:8]}); 171 vout${ABC[0:8]}${ABC[0:8]} = ${_MM_MAX_EPX8}(vout${ABC[0:8]}${ABC[0:8]}, voutput_min); 172 173 $if CHANNEL_TILE > 8: 174 if XNN_LIKELY(channels >= 8) { 175 _mm_storel_epi64((__m128i*) output, vout${ABC[0:8]}${ABC[0:8]}); 176 output += 8; 177 channels -= 8; 178 } else { 179 if (channels & 4) { 180 unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]})); 181 vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi64(vout${ABC[0:8]}${ABC[0:8]}, 32); 182 output += 4; 183 } 184 if (channels & 2) { 185 unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout${ABC[0:8]}${ABC[0:8]}, 0)); 186 vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi32(vout${ABC[0:8]}${ABC[0:8]}, 16); 187 output += 2; 188 } 189 if (channels & 1) { 190 *output = (${XINT8_T}) _mm_extract_epi8(vout${ABC[0:8]}${ABC[0:8]}, 0); 191 output += 1; 192 } 193 channels = 0; 194 } 195 $else: 196 if (channels & 4) { 197 unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]})); 198 vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi64(vout${ABC[0:8]}${ABC[0:8]}, 32); 199 output += 4; 200 } 201 if (channels & 2) { 202 unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout${ABC[0:8]}${ABC[0:8]}, 0)); 203 vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi32(vout${ABC[0:8]}${ABC[0:8]}, 16); 204 output += 2; 205 } 206 if (channels & 1) { 207 *output = (${XINT8_T}) _mm_extract_epi8(vout${ABC[0:8]}${ABC[0:8]}, 0); 208 } 209 }${" while (channels != 0);" if CHANNEL_TILE > 8 else ""} 210 } 211} 212