• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert SSE in [2, 4]
7$assert not XOP or AVX
8$assert not AVX or SSE == 4
9$assert REQUANTIZATION == "FP32"
10$assert DATATYPE in ["QC8", "QS8", "QU8"]
11$assert VARIANT in ["LD64", "LD128", "EXTENDED"]
12$assert MR <= 4
13#include <assert.h>
14
15$if XOP:
16  #if defined(__GNUC__) || defined(__clang__)
17    #include <x86intrin.h>
18  #else
19    #include <immintrin.h>
20    #include <ammintrin.h>
21  #endif
22$else:
23  $SSE_HEADER = {2: "emmintrin.h", 4: "smmintrin.h"}[SSE]
24  #include <${SSE_HEADER}>
25
26#include <xnnpack/gemm.h>
27#include <xnnpack/math.h>
28
29
30
31$LOAD_SUFFIX = {"LD128": "_ld128", "LD64": "_ld64", "EXTENDED": ""}[VARIANT]
32$GEMM_SUFFIX = "_xw" if VARIANT == "EXTENDED" else ""
33$PARAMS_UNION = "xnn_qs8_minmax_params" if DATATYPE == "QC8" else "xnn_%s_conv_minmax_params" % DATATYPE.lower()
34$PARAMS_STRUCT = ("" if DATATYPE == "QC8" else "fp32_") + ("sse4" if SSE == 4 and DATATYPE != "QU8" else "sse2")
35$XINT8_T = "uint8_t" if DATATYPE == "QU8" else "int8_t"
36$ISA = "xop" if XOP else "avx" if AVX else {2: "sse2", 4: "sse41"}[SSE]
37void xnn_${DATATYPE.lower()}_gemm${GEMM_SUFFIX}_minmax_fp32_ukernel_${MR}x4c2__${ISA}${LOAD_SUFFIX}(
38    size_t mr,
39    size_t nc,
40    size_t kc,
41    const ${XINT8_T}* restrict a,
42    size_t a_stride,
43    const void* restrict w,
44    ${XINT8_T}* restrict c,
45    size_t cm_stride,
46    size_t cn_stride,
47    const union ${PARAMS_UNION} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
48{
49  assert(mr != 0);
50  assert(mr <= ${MR});
51  assert(nc != 0);
52  assert(kc != 0);
53  assert(kc % sizeof(${XINT8_T}) == 0);
54  assert(a != NULL);
55  assert(w != NULL);
56  assert(c != NULL);
57
58  kc = round_up_po2(kc, 2);
59  const ${XINT8_T}* a0 = a;
60  ${XINT8_T}* c0 = c;
61  $for M in range(1, MR):
62    const ${XINT8_T}* a${M} = (const ${XINT8_T}*) ((uintptr_t) a${M-1} + a_stride);
63    ${XINT8_T}* c${M} = (${XINT8_T}*) ((uintptr_t) c${M-1} + cm_stride);
64    $if M % 2 == 0:
65      if XNN_UNPREDICTABLE(mr <= ${M}) {
66        a${M} = a${M-1};
67        c${M} = c${M-1};
68      }
69    $elif M + 1 == MR:
70      if XNN_UNPREDICTABLE(mr != ${M+1}) {
71        a${M} = a${M-1};
72        c${M} = c${M-1};
73      }
74    $else:
75      if XNN_UNPREDICTABLE(mr < ${M+1}) {
76        a${M} = a${M-1};
77        c${M} = c${M-1};
78      }
79
80  do {
81    __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
82    $for M in range(1, MR):
83      __m128i vacc${M}x0123 = vacc0x0123;
84    w = (const void*) ((const int32_t*) w + 4);
85
86    size_t k = kc;
87    $if DATATYPE == "QU8":
88      const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.kernel_zero_point);
89      $if SSE < 4 or VARIANT == "LD128":
90        const __m128i vzero = _mm_setzero_si128();
91    while (k >= 8 * sizeof(${XINT8_T})) {
92      $for M in range(MR):
93        const __m128i va${M} = _mm_loadl_epi64((const __m128i*) a${M});
94        $if DATATYPE == "QU8":
95          $if SSE == 4:
96            const __m128i vxa${M} = _mm_cvtepu8_epi16(va${M});
97          $else:
98            const __m128i vxa${M} = _mm_unpacklo_epi8(va${M}, vzero);
99        $else:
100          $if SSE == 4:
101            const __m128i vxa${M} = _mm_cvtepi8_epi16(va${M});
102          $else:
103            const __m128i vxa${M} = _mm_srai_epi16(_mm_unpacklo_epi8(va${M}, va${M}), 8);
104        a${M} += 8;
105
106      $if VARIANT == "LD128":
107        $for K in range(0, 4, 2):
108          $if K == 0:
109            const __m128i vb${K}${K+1} = _mm_loadu_si128((const __m128i*) w);
110          $else:
111            const __m128i vb${K}${K+1} = _mm_loadu_si128((const __m128i*) ((const ${XINT8_T}*) w + ${K * 8}));
112          $if DATATYPE == "QU8":
113            const __m128i vxb${K} = _mm_sub_epi16(_mm_unpacklo_epi8(vb${K}${K+1}, vzero), vb_zero_point);
114            const __m128i vxb${K+1} = _mm_sub_epi16(_mm_unpackhi_epi8(vb${K}${K+1}, vzero), vb_zero_point);
115          $elif SSE == 4:
116            const __m128i vxb${K} = _mm_cvtepi8_epi16(vb${K}${K+1});
117            const __m128i vxb${K+1} = _mm_srai_epi16(_mm_unpackhi_epi8(vb${K}${K+1}, vb${K}${K+1}), 8);
118          $else:
119            const __m128i vsb${K}${K+1} = _mm_cmpgt_epi8(_mm_setzero_si128(), vb${K}${K+1});
120            const __m128i vxb${K} = _mm_unpacklo_epi8(vb${K}${K+1}, vsb${K}${K+1});
121            const __m128i vxb${K+1} = _mm_unpackhi_epi8(vb${K}${K+1}, vsb${K}${K+1});
122
123          $for M in range(MR):
124            $if XOP:
125              vacc${M}x0123 = _mm_maddd_epi16(
126                _mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(${K}, ${K}, ${K}, ${K})), vxb${K}, vacc${M}x0123);
127            $else:
128              vacc${M}x0123 = _mm_add_epi32(vacc${M}x0123,
129                _mm_madd_epi16(_mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(${K}, ${K}, ${K}, ${K})), vxb${K}));
130
131          $for M in range(MR):
132            $if XOP:
133              vacc${M}x0123 = _mm_maddd_epi16(
134                _mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(${K+1}, ${K+1}, ${K+1}, ${K+1})), vxb${K+1}, vacc${M}x0123);
135            $else:
136              vacc${M}x0123 = _mm_add_epi32(vacc${M}x0123,
137                _mm_madd_epi16(_mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(${K+1}, ${K+1}, ${K+1}, ${K+1})), vxb${K+1}));
138      $else:
139        $for K in range(4):
140          $if VARIANT == "LD64":
141            $if K == 0:
142              const __m128i vb${K} = _mm_loadl_epi64((const __m128i*) w);
143            $else:
144              const __m128i vb${K} = _mm_loadl_epi64((const __m128i*) ((const ${XINT8_T}*) w + ${K * 8}));
145            $if DATATYPE == "QU8":
146              $if SSE == 4:
147                const __m128i vxb${K} = _mm_sub_epi16(_mm_cvtepu8_epi16(vb${K}), vb_zero_point);
148              $else:
149                const __m128i vxb${K} = _mm_sub_epi16(_mm_unpacklo_epi8(vb${K}, vzero), vb_zero_point);
150            $else:
151              $if SSE == 4:
152                const __m128i vxb${K} = _mm_cvtepi8_epi16(vb${K});
153              $else:
154                const __m128i vxb${K} = _mm_srai_epi16(_mm_unpacklo_epi8(vb${K}, vb${K}), 8);
155          $elif VARIANT == "EXTENDED":
156            $if K == 0:
157              const __m128i vxb${K} = _mm_load_si128((const __m128i*) w);
158            $else:
159              const __m128i vxb${K} = _mm_load_si128((const __m128i*) ((const int16_t*) w + ${K * 8}));
160
161          $for M in range(MR):
162            $if XOP:
163              vacc${M}x0123 = _mm_maddd_epi16(
164                _mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(${K}, ${K}, ${K}, ${K})), vxb${K}, vacc${M}x0123);
165            $else:
166              vacc${M}x0123 = _mm_add_epi32(vacc${M}x0123,
167                _mm_madd_epi16(_mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(${K}, ${K}, ${K}, ${K})), vxb${K}));
168
169      $if VARIANT == "EXTENDED":
170        w = (const void*) ((const int16_t*) w + 32);
171      $else:
172        w = (const void*) ((const ${XINT8_T}*) w + 32);
173      k -= 8 * sizeof(${XINT8_T});
174    }
175    if (k != 0) {
176      $for M in range(MR):
177        const __m128i va${M} = _mm_loadl_epi64((const __m128i*) a${M});
178        $if DATATYPE == "QU8":
179          $if SSE == 4:
180            const __m128i vxa${M} = _mm_cvtepu8_epi16(va${M});
181          $else:
182            const __m128i vxa${M} = _mm_unpacklo_epi8(va${M}, vzero);
183        $else:
184          $if SSE == 4:
185            const __m128i vxa${M} = _mm_cvtepi8_epi16(va${M});
186          $else:
187            const __m128i vxa${M} = _mm_srai_epi16(_mm_unpacklo_epi8(va${M}, va${M}), 8);
188        a${M} = (const ${XINT8_T}*) ((uintptr_t) a${M} + k);
189
190      $if VARIANT == "EXTENDED":
191        const __m128i vxb0 = _mm_load_si128((const __m128i*) w);
192        w = (const void*) ((const int16_t*) w + 8);
193      $else:
194        const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
195        $if DATATYPE == "QU8":
196          $if SSE == 4:
197            const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
198          $else:
199            const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb0, vzero), vb_zero_point);
200        $else:
201          $if SSE == 4:
202            const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
203          $else:
204            const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8);
205        w = (const void*) ((const ${XINT8_T}*) w + 8);
206
207      $for M in range(MR):
208        $if XOP:
209          vacc${M}x0123 = _mm_maddd_epi16(
210            _mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc${M}x0123);
211        $else:
212          vacc${M}x0123 = _mm_add_epi32(vacc${M}x0123,
213            _mm_madd_epi16(_mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
214
215      if (k > 2 * sizeof(${XINT8_T})) {
216        $if VARIANT == "EXTENDED":
217          const __m128i vxb1 = _mm_load_si128((const __m128i*) w);
218          w = (const void*) ((const int16_t*) w + 8);
219        $else:
220          const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
221          $if DATATYPE == "QU8":
222            $if SSE == 4:
223              const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
224            $else:
225              const __m128i vxb1 = _mm_sub_epi16(_mm_unpacklo_epi8(vb1, vzero), vb_zero_point);
226          $else:
227            $if SSE == 4:
228              const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
229            $else:
230              const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8);
231          w = (const void*) ((const ${XINT8_T}*) w + 8);
232
233        $for M in range(MR):
234          $if XOP:
235            vacc${M}x0123 = _mm_maddd_epi16(
236              _mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc${M}x0123);
237          $else:
238            vacc${M}x0123 = _mm_add_epi32(vacc${M}x0123,
239              _mm_madd_epi16(_mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
240
241        if (k > 4 * sizeof(${XINT8_T})) {
242          $if VARIANT == "EXTENDED":
243            const __m128i vxb2 = _mm_load_si128((const __m128i*) w);
244            w = (const void*) ((const int16_t*) w + 8);
245          $else:
246            const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
247            $if DATATYPE == "QU8":
248              $if SSE == 4:
249                const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
250              $else:
251                const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb2, vzero), vb_zero_point);
252            $else:
253              $if SSE == 4:
254                const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
255              $else:
256                const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8);
257            w = (const void*) ((const ${XINT8_T}*) w + 8);
258
259          $for M in range(MR):
260            $if XOP:
261              vacc${M}x0123 = _mm_maddd_epi16(
262                _mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc${M}x0123);
263            $else:
264              vacc${M}x0123 = _mm_add_epi32(vacc${M}x0123,
265                _mm_madd_epi16(_mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
266        }
267      }
268    }
269
270    $for M in range(MR):
271      __m128 vscaled${M}x0123 = _mm_cvtepi32_ps(vacc${M}x0123);
272
273    $if DATATYPE == "QC8":
274      const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
275      w = (const void*) ((const float*) w + 4);
276      $for M in range(MR):
277        vscaled${M}x0123 = _mm_mul_ps(vscaled${M}x0123, vscale0123);
278    $else:
279      const __m128 vscale = _mm_load_ps(params->${PARAMS_STRUCT}.scale);
280      $for M in range(MR):
281        vscaled${M}x0123 = _mm_mul_ps(vscaled${M}x0123, vscale);
282
283    const __m128 voutput_max_less_zero_point = _mm_load_ps(params->${PARAMS_STRUCT}.output_max_less_zero_point);
284    $for M in range(MR):
285      vscaled${M}x0123 = _mm_min_ps(vscaled${M}x0123, voutput_max_less_zero_point);
286
287    $for M in range(MR):
288      vacc${M}x0123 = _mm_cvtps_epi32(vscaled${M}x0123);
289
290    const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_zero_point);
291    $for M in range(0, MR, 2):
292      __m128i vacc${M}${min(M+1, MR-1)}x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc${M}x0123, vacc${min(M+1, MR-1)}x0123), voutput_zero_point);
293
294    $if DATATYPE == "QU8":
295      $if MR > 2:
296        __m128i vout = _mm_packus_epi16(vacc0${min(1, MR-1)}x0123, vacc${min(2, MR-1)}${min(3, MR-1)}x0123);
297      $else:
298        __m128i vout = _mm_packus_epi16(vacc0${min(1, MR-1)}x0123, vacc0${min(1, MR-1)}x0123);
299
300      vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_min));
301    $else:
302      $if SSE < 4:
303        const __m128i voutput_min = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_min);
304        $for M in range(0, MR, 2):
305          vacc${M}${min(M+1, MR-1)}x0123 = _mm_max_epi16(vacc${M}${min(M+1, MR-1)}x0123, voutput_min);
306
307      $if MR > 2:
308        __m128i vout = _mm_packs_epi16(vacc0${min(1, MR-1)}x0123, vacc${min(2, MR-1)}${min(3, MR-1)}x0123);
309      $else:
310        __m128i vout = _mm_packs_epi16(vacc0${min(1, MR-1)}x0123, vacc0${min(1, MR-1)}x0123);
311
312      $if SSE == 4:
313        vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_min));
314
315    if (nc >= 4) {
316      *((uint32_t*) c0) = (uint32_t) _mm_cvtsi128_si32(vout);
317      $for M in range(1, MR):
318        $if SSE == 4:
319          *((uint32_t*) c${M}) = (uint32_t) _mm_extract_epi32(vout, ${M});
320        $else:
321          vout = _mm_srli_si128(vout, 4);
322          *((uint32_t*) c${M}) = (uint32_t) _mm_cvtsi128_si32(vout);
323
324      $for M in range(MR):
325        c${M} = (${XINT8_T}*) ((uintptr_t) c${M} + cn_stride);
326
327      $for M in range(MR):
328        a${M} = (const ${XINT8_T}*) ((uintptr_t) a${M} - kc);
329
330      nc -= 4;
331    } else {
332      if (nc & 2) {
333        $for M in range(MR):
334          *((uint16_t*) c${M}) = (uint16_t) _mm_extract_epi16(vout, ${M * 2});
335          c${M} += 2;
336        vout = _mm_srli_epi32(vout, 16);
337      }
338      if (nc & 1) {
339        $if SSE == 4:
340          $for M in range(MR):
341            *c${M} = (${XINT8_T}) _mm_extract_epi8(vout, ${M * 4});
342        $else:
343          *c0 = (${XINT8_T}) _mm_cvtsi128_si32(vout);
344          $for M in range(1, MR):
345            *c${M} = (${XINT8_T}) _mm_extract_epi16(vout, ${M * 2});
346      }
347
348      nc = 0;
349    }
350  } while (nc != 0);
351}
352