• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2022 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert CHANNEL_TILE % 8 == 0
7$assert CHANNEL_TILE >= 8
8$assert ROW_TILE >= 3
9$assert ROW_SUBTILE >= 3
10$assert ROW_SUBTILE <= ROW_TILE
11$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
12#include <assert.h>
13
14#include <immintrin.h>
15
16#include <xnnpack/gavgpool.h>
17#include <xnnpack/math.h>
18
19
20void xnn_f16_gavgpool_minmax_ukernel_${ROW_TILE}p${ROW_SUBTILE}x__f16c_c${CHANNEL_TILE}(
21    size_t rows,
22    size_t channels,
23    const void* input,
24    size_t input_stride,
25    const void* zero,
26    void* buffer,
27    void* output,
28    const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
29{
30  assert(rows > ${ROW_TILE});
31  assert(channels != 0);
32
33  const uint16_t* i0 = input;
34  $for M in range(1, ROW_TILE):
35    const uint16_t* i${M} = (const uint16_t*) ((uintptr_t) i${M-1} + input_stride);
36  const size_t input_increment = ${ROW_TILE} * input_stride - round_up_po2(channels, 8) * sizeof(uint16_t);
37
38  uint16_t* b = buffer;
39  size_t c = channels;
40  for (; ${"c >= %d" % CHANNEL_TILE if CHANNEL_TILE > 8 else "c != 0"}; ${("c -= %d" if CHANNEL_TILE > 8 else "c = doz(c, %d)") % CHANNEL_TILE}) {
41    $for M in range(2):
42      $for C in range(0, CHANNEL_TILE, 8):
43        const __m256 vi${M}x${ABC[C:C+8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i${M})); i${M} += 8;
44
45    $for C in range(0, CHANNEL_TILE, 8):
46      const __m256 vi2x${ABC[C:C+8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
47      __m128i vacc${ABC[C:C+8]} = _mm256_cvtps_ph(_mm256_add_ps(vi0x${ABC[C:C+8]}, vi1x${ABC[C:C+8]}), _MM_FROUND_NO_EXC);
48
49    $for M in range(2, ROW_TILE):
50      $for C in range(0, CHANNEL_TILE, 8):
51        $if M + 1 != ROW_TILE:
52          const __m256 vi${M+1}x${ABC[C:C+8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i${M+1})); i${M+1} += 8;
53        vacc${ABC[C:C+8]} = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc${ABC[C:C+8]}), vi${M}x${ABC[C:C+8]}), _MM_FROUND_NO_EXC);
54
55    $for C in range(0, CHANNEL_TILE, 8):
56      _mm_store_si128((__m128i*) b, vacc${ABC[C:C+8]}); b += 8;
57  }
58  $if CHANNEL_TILE > 8:
59    if XNN_UNLIKELY(c != 0) {
60      do {
61        $for M in range(3):
62          const __m256 vi${M}x${ABC[0:8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i${M})); i${M} += 8;
63        __m128i vacc${ABC[0:8]} = _mm256_cvtps_ph(_mm256_add_ps(vi0x${ABC[0:8]}, vi1x${ABC[0:8]}), _MM_FROUND_NO_EXC);
64
65        $for M in range(2, ROW_TILE):
66          $if M + 1 != ROW_TILE:
67            const __m256 vi${M+1}x${ABC[0:8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i${M+1})); i${M+1} += 8;
68          vacc${ABC[0:8]} = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc${ABC[0:8]}), vi${M}x${ABC[0:8]}), _MM_FROUND_NO_EXC);
69
70        _mm_store_si128((__m128i*) b, vacc${ABC[0:8]}); b += 8;
71
72        c = doz(c, 8);
73      } while (c != 0);
74    }
75
76  for (rows -= ${ROW_TILE}; rows > ${ROW_SUBTILE}; rows -= ${ROW_SUBTILE}) {
77    $for M in range(ROW_SUBTILE):
78      i${M} = (const uint16_t*) ((uintptr_t) i${M + ROW_TILE - ROW_SUBTILE} + input_increment);
79
80    uint16_t* b = buffer;
81    size_t c = channels;
82    for (; ${"c >= %d" % CHANNEL_TILE if CHANNEL_TILE > 8 else "c != 0"}; ${("c -= %d" if CHANNEL_TILE > 8 else "c = doz(c, %d)") % CHANNEL_TILE}) {
83      __m128i vacc${ABC[0:8]} = _mm_loadu_si128((const __m128i*) b);
84      $for C in range(8, CHANNEL_TILE, 8):
85        __m128i vacc${ABC[C:C+8]} = _mm_loadu_si128((const __m128i*) (b + ${C}));
86
87      $for C in range(0, CHANNEL_TILE, 8):
88        const __m256 vi0x${ABC[C:C+8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
89
90      $for M in range(ROW_TILE):
91        $for C in range(0, CHANNEL_TILE, 8):
92          $if M + 1 != ROW_TILE:
93            const __m256 vi${M+1}x${ABC[C:C+8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i${M+1})); i${M+1} += 8;
94          vacc${ABC[C:C+8]} = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc${ABC[C:C+8]}), vi${M}x${ABC[C:C+8]}), _MM_FROUND_NO_EXC);
95
96      $for C in range(0, CHANNEL_TILE, 8):
97        _mm_store_si128((__m128i*) b, vacc${ABC[C:C+8]}); b += 8;
98    }
99    $if CHANNEL_TILE > 8:
100      if XNN_UNLIKELY(c != 0) {
101        do {
102          __m128i vacc${ABC[0:8]} = _mm_loadu_si128((const __m128i*) b);
103          const __m256 vi0x${ABC[0:8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
104
105          $for M in range(ROW_TILE):
106            $if M + 1 != ROW_TILE:
107              const __m256 vi${M+1}x${ABC[0:8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i${M+1})); i${M+1} += 8;
108            vacc${ABC[0:8]} = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc${ABC[0:8]}), vi${M}x${ABC[0:8]}), _MM_FROUND_NO_EXC);
109
110          _mm_store_si128((__m128i*) b, vacc${ABC[0:8]});
111          b += 8;
112
113          c = doz(c, 8);
114        } while (c != 0);
115      }
116  }
117
118  i0 = (const uint16_t*) ((uintptr_t) i${ROW_TILE - ROW_SUBTILE} + input_increment);
119  $for M in range(1, ROW_SUBTILE):
120    i${M} = (const uint16_t*) ((uintptr_t) i${M + ROW_TILE - ROW_SUBTILE} + input_increment);
121    $if M % 2 == 1:
122      if XNN_UNPREDICTABLE(rows < ${M+1}) {
123        i${M} = (const uint16_t*) zero;
124      }
125    $else:
126      if XNN_UNPREDICTABLE(rows <= ${M}) {
127        i${M} = (const uint16_t*) zero;
128      }
129
130  const __m256 vscale = _mm256_load_ps(params->avx.scale);
131  const __m256 vmin = _mm256_load_ps(params->avx.min);
132  const __m256 vmax = _mm256_load_ps(params->avx.max);
133  for (; channels >= ${CHANNEL_TILE}; channels -= ${CHANNEL_TILE}) {
134    $for C in range(0, CHANNEL_TILE, 8):
135      __m128i vacc${ABC[C:C+8]} = _mm_loadu_si128((const __m128i*) buffer); buffer = (uint16_t*) buffer + 8;
136
137    $for C in range(0, CHANNEL_TILE, 8):
138      const __m256 vi0x${ABC[C:C+8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
139
140    $for M in range(ROW_TILE):
141      $for C in range(0, CHANNEL_TILE, 8):
142        $if M + 1 != ROW_TILE:
143          const __m256 vi${M+1}x${ABC[C:C+8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i${M+1})); i${M+1} += 8;
144        vacc${ABC[C:C+8]} = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc${ABC[C:C+8]}), vi${M}x${ABC[C:C+8]}), _MM_FROUND_NO_EXC);
145
146    $for C in range(0, CHANNEL_TILE, 8):
147      vacc${ABC[C:C+8]} = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc${ABC[C:C+8]}), vscale), _MM_FROUND_NO_EXC);
148
149    $for C in range(0, CHANNEL_TILE, 8):
150      __m256 vout${ABC[C:C+8]} = _mm256_max_ps(_mm256_cvtph_ps(vacc${ABC[C:C+8]}), vmin);
151
152    $for C in range(0, CHANNEL_TILE, 8):
153      vout${ABC[C:C+8]} = _mm256_min_ps(vout${ABC[C:C+8]}, vmax);
154
155    _mm_storeu_si128((__m128i*) output, _mm256_cvtps_ph(vout${ABC[0:8]}, _MM_FROUND_NO_EXC));
156    $for C in range(8, CHANNEL_TILE, 8):
157      _mm_storeu_si128((__m128i*) ((uint16_t*) output + ${C}), _mm256_cvtps_ph(vout${ABC[C:C+8]}, _MM_FROUND_NO_EXC));
158    output = (uint16_t*) output + ${CHANNEL_TILE};
159  }
160  if XNN_UNLIKELY(channels != 0) {
161    ${"do " if CHANNEL_TILE > 8 else ""}{
162      __m128i vacc${ABC[0:8]} = _mm_loadu_si128((const __m128i*) buffer); buffer = (uint16_t*) buffer + 8;
163
164      const __m256 vi0x${ABC[0:8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
165      $for M in range(ROW_TILE):
166        $if M + 1 != ROW_TILE:
167          const __m256 vi${M+1}x${ABC[0:8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i${M+1})); i${M+1} += 8;
168        vacc${ABC[0:8]} = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc${ABC[0:8]}), vi${M}x${ABC[0:8]}), _MM_FROUND_NO_EXC);
169
170      vacc${ABC[0:8]} = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc${ABC[0:8]}), vscale), _MM_FROUND_NO_EXC);
171      __m256 vout${ABC[0:8]} = _mm256_max_ps(_mm256_cvtph_ps(vacc${ABC[0:8]}), vmin);
172      vout${ABC[0:8]} = _mm256_min_ps(vout${ABC[0:8]}, vmax);
173
174      $if CHANNEL_TILE > 8:
175        if XNN_LIKELY(channels >= 8) {
176          _mm_storeu_si128((__m128i*) output, _mm256_cvtps_ph(vout${ABC[0:8]}, _MM_FROUND_NO_EXC));
177          output = (uint16_t*) output + 8;
178          channels -= 8;
179        } else {
180          __m128i vh${ABC[0:8]} = _mm256_cvtps_ph(vout${ABC[0:8]}, _MM_FROUND_NO_EXC);
181          if (channels & 4) {
182            _mm_storel_epi64((__m128i*) output, vh${ABC[0:8]});
183            output = (uint16_t*) output + 4;
184            vh${ABC[0:8]} = _mm_unpackhi_epi64(vh${ABC[0:8]}, vh${ABC[0:8]});
185          }
186          if (channels & 2) {
187            *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vh${ABC[0:8]});
188            output = (uint16_t*) output + 2;
189            vh${ABC[0:8]} = _mm_srli_epi64(vh${ABC[0:8]}, 32);
190          }
191          if (channels & 1) {
192            *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vh${ABC[0:8]}, 0);
193          }
194          channels = 0;
195        }
196      $else:
197        __m128i vh${ABC[0:8]} = _mm256_cvtps_ph(vout${ABC[0:8]}, _MM_FROUND_NO_EXC);
198        if (channels & 4) {
199          _mm_storel_epi64((__m128i*) output, vh${ABC[0:8]});
200          output = (uint16_t*) output + 4;
201          vh${ABC[0:8]} = _mm_unpackhi_epi64(vh${ABC[0:8]}, vh${ABC[0:8]});
202        }
203        if (channels & 2) {
204          *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vh${ABC[0:8]});
205          output = (uint16_t*) output + 2;
206          vh${ABC[0:8]} = _mm_srli_epi64(vh${ABC[0:8]}, 32);
207        }
208        if (channels & 1) {
209          *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vh${ABC[0:8]}, 0);
210        }
211    }${" while (channels != 0);" if CHANNEL_TILE > 8 else ""}
212  }
213}
214