• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2021 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
7$assert NR % 8 == 0
8$assert 8 <= NR <= 16
9$assert REQUANTIZATION in ["FP32", "RNDNU"]
10$assert not CHANNELWISE or REQUANTIZATION == "FP32"
11$assert DUP in ["DUP", "LD1R", "LD2R", "LD4R"]
12#include <assert.h>
13
14#include <arm_neon.h>
15
16#include <xnnpack/gemm.h>
17$if REQUANTIZATION == "FP32" and ARMV8:
18  #include <xnnpack/intrinsics-polyfill.h>
19#include <xnnpack/math.h>
20
21$DATATYPE = "qc8" if CHANNELWISE else "qs8"
22$PARAMS_STRUCT = REQUANTIZATION.lower() + "_" + ("neonv8" if REQUANTIZATION == "FP32" and ARMV8 else "neon")
23$PARAMS_UNION = "xnn_%s_conv_minmax_params" % DATATYPE.lower()
24$ISA = "neonv8" if ARMV8 else "neon"
25void xnn_${DATATYPE}_gemm_minmax_${REQUANTIZATION.lower()}_ukernel_${MR}x${NR}c2__${ISA}_${"mlal" if MLA else "mull"}_${DUP.lower()}(
26    size_t mr,
27    size_t nc,
28    size_t kc,
29    const int8_t* restrict a,
30    size_t a_stride,
31    const void* restrict w,
32    int8_t* restrict c,
33    size_t cm_stride,
34    size_t cn_stride,
35    const union ${PARAMS_UNION} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
36{
37  assert(mr != 0);
38  assert(mr <= ${MR});
39  assert(nc != 0);
40  assert(kc != 0);
41  assert(kc % sizeof(int8_t) == 0);
42  assert(a != NULL);
43  assert(w != NULL);
44  assert(c != NULL);
45
46  kc = round_up_po2(kc, 2 * sizeof(int8_t));
47  const int8_t* a0 = a;
48  int8_t* c0 = c;
49  $for M in range(1, MR):
50    const int8_t* a${M} = (const int8_t*) ((uintptr_t) a${M-1} + a_stride);
51    int8_t* c${M} = (int8_t*) ((uintptr_t) c${M-1} + cm_stride);
52    $if M % 2 == 0:
53      if XNN_UNPREDICTABLE(mr <= ${M}) {
54        a${M} = a${M-1};
55        c${M} = c${M-1};
56      }
57    $elif M + 1 == MR:
58      if XNN_UNPREDICTABLE(mr != ${M+1}) {
59        a${M} = a${M-1};
60        c${M} = c${M-1};
61      }
62    $else:
63      if XNN_UNPREDICTABLE(mr < ${M+1}) {
64        a${M} = a${M-1};
65        c${M} = c${M-1};
66      }
67
68  do {
69    $for N in range(0, NR, 4):
70      int32x4_t vacc0x${ABC[N:N+4]} = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
71    $for M in range(1, MR):
72      $for N in range(0, NR, 4):
73        int32x4_t vacc${M}x${ABC[N:N+4]} = vacc0x${ABC[N:N+4]};
74
75    size_t k = kc;
76
77    $if MLA:
78      while (k >= 16 * sizeof(int8_t)) {
79        $for M in range(MR):
80          $if DUP == "LD4R":
81            const int16x4x4_t va${M}x0 = vld4_dup_s16((const void*)a${M}); a${M} += 8;
82            const int16x4x4_t va${M}x1 = vld4_dup_s16((const void*)a${M}); a${M} += 8;
83          $elif DUP == "LD2R":
84            const int16x4x2_t va${M}0x0 = vld2_dup_s16((const void*)a${M});
85            const int16x4x2_t va${M}1x0 = vld2_dup_s16((const void*)(a${M} + 4)); a${M} += 8;
86            const int16x4x2_t va${M}0x1 = vld2_dup_s16((const void*)a${M});
87            const int16x4x2_t va${M}1x1 = vld2_dup_s16((const void*)(a${M} + 4)); a${M} += 8;
88          $elif DUP == "LD1R":
89            const int16x4_t va${M}0x0 = vld1_dup_s16((const void*)a${M});
90            const int16x4_t va${M}1x0 = vld1_dup_s16((const void*)(a${M} + 2));
91            const int16x4_t va${M}2x0 = vld1_dup_s16((const void*)(a${M} + 4));
92            const int16x4_t va${M}3x0 = vld1_dup_s16((const void*)(a${M} + 6)); a${M} += 8;
93            const int16x4_t va${M}0x1 = vld1_dup_s16((const void*)a${M});
94            const int16x4_t va${M}1x1 = vld1_dup_s16((const void*)(a${M} + 2));
95            const int16x4_t va${M}2x1 = vld1_dup_s16((const void*)(a${M} + 4));
96            const int16x4_t va${M}3x1 = vld1_dup_s16((const void*)(a${M} + 6)); a${M} += 8;
97          $else:
98            const int8x8_t va${M}x0 = vld1_s8(a${M}); a${M} += 8;
99            const int8x8_t va${M}x1 = vld1_s8(a${M}); a${M} += 8;
100        $for K in range(4):
101          $for N in range(0, NR, 4):
102            const int8x8_t vb${ABC[N:N+4]}c${K}x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
103
104        $for K in range(4):
105          $for M in range(MR):
106            $if DUP == "LD4R":
107              const int8x8_t va${M}c${K}x0 = vreinterpret_s8_s16(va${M}x0.val[${K}]);
108              const int8x8_t va${M}c${K}x1 = vreinterpret_s8_s16(va${M}x1.val[${K}]);
109            $elif DUP == "LD2R":
110              const int8x8_t va${M}c${K}x0 = vreinterpret_s8_s16(va${M}${int(K/2)}x0.val[${K%2}]);
111              const int8x8_t va${M}c${K}x1 = vreinterpret_s8_s16(va${M}${int(K/2)}x1.val[${K%2}]);
112            $elif DUP == "LD1R":
113              const int8x8_t va${M}c${K}x0 = vreinterpret_s8_s16(va${M}${K}x0);
114              const int8x8_t va${M}c${K}x1 = vreinterpret_s8_s16(va${M}${K}x1);
115            $else:
116              const int8x8_t va${M}c${K}x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va${M}x0), ${K}));
117              const int8x8_t va${M}c${K}x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va${M}x1), ${K}));
118
119          $for N in range(0, NR, 4):
120            $for M in range(MR):
121              int16x8_t vprod${M}x${ABC[N:N+4]}c${K} = vmull_s8(vb${ABC[N:N+4]}c${K}x0, va${M}c${K}x0);
122            const int8x8_t vb${ABC[N:N+4]}c${K}x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
123            $for M in range(MR):
124              vprod${M}x${ABC[N:N+4]}c${K} = vmlal_s8(vprod${M}x${ABC[N:N+4]}c${K}, vb${ABC[N:N+4]}c${K}x1, va${M}c${K}x1);
125            $for M in range(MR):
126              vacc${M}x${ABC[N:N+4]} = vpadalq_s16(vacc${M}x${ABC[N:N+4]}, vprod${M}x${ABC[N:N+4]}c${K});
127
128        k -= 16 * sizeof(int8_t);
129      }
130
131    ${"if" if MLA else "while"} (k >= 8 * sizeof(int8_t)) {
132      $for M in range(MR):
133        $if DUP == "LD4R":
134          const int16x4x4_t va${M} = vld4_dup_s16((const void*)a${M}); a${M} += 8;
135        $elif DUP == "LD2R":
136          const int16x4x2_t va${M}0 = vld2_dup_s16((const void*)a${M});
137          const int16x4x2_t va${M}1 = vld2_dup_s16((const void*)(a${M} + 4)); a${M} += 8;
138        $elif DUP == "LD1R":
139          const int16x4_t va${M}0 = vld1_dup_s16((const void*)a${M});
140          const int16x4_t va${M}1 = vld1_dup_s16((const void*)(a${M} + 2));
141          const int16x4_t va${M}2 = vld1_dup_s16((const void*)(a${M} + 4));
142          const int16x4_t va${M}3 = vld1_dup_s16((const void*)(a${M} + 6)); a${M} += 8;
143        $else:
144          const int8x8_t va${M} = vld1_s8(a${M}); a${M} += 8;
145
146      $for K in range(4):
147        $for N in range(0, NR, 4):
148          const int8x8_t vb${ABC[N:N+4]}c${K} = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
149
150      $for K in range(4):
151        $for M in range(MR):
152          $if DUP == "LD4R":
153            const int8x8_t va${M}c${K} = vreinterpret_s8_s16(va${M}.val[${K}]);
154          $elif DUP == "LD2R":
155            const int8x8_t va${M}c${K} = vreinterpret_s8_s16(va${M}${int(K/2)}.val[${K%2}]);
156          $elif DUP == "LD1R":
157            const int8x8_t va${M}c${K} = vreinterpret_s8_s16(va${M}${K});
158          $else:
159            const int8x8_t va${M}c${K} = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va${M}), ${K}));
160
161        $for N in range(0, NR, 4):
162          $for M in range(MR):
163            const int16x8_t vprod${M}x${ABC[N:N+4]}c${K} = vmull_s8(vb${ABC[N:N+4]}c${K}, va${M}c${K});
164          $for M in range(MR):
165            vacc${M}x${ABC[N:N+4]} = vpadalq_s16(vacc${M}x${ABC[N:N+4]}, vprod${M}x${ABC[N:N+4]}c${K});
166
167      k -= 8 * sizeof(int8_t);
168    }
169
170    if XNN_UNLIKELY(k != 0) {
171      $for M in range(MR):
172        const int8x8_t va${M} = vld1_s8(a${M}); a${M} = (const int8_t*) ((uintptr_t) a${M} + k);
173
174      $for N in range(0, NR, 4):
175        const int8x8_t vb${ABC[N:N+4]}c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
176
177      $for M in range(MR):
178        const int8x8_t va${M}c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va${M}), 0));
179        $for N in range(0, NR, 4):
180          const int16x8_t vprod${M}x${ABC[N:N+4]}c0 = vmull_s8(vb${ABC[N:N+4]}c0, va${M}c0);
181          vacc${M}x${ABC[N:N+4]} = vpadalq_s16(vacc${M}x${ABC[N:N+4]}, vprod${M}x${ABC[N:N+4]}c0);
182
183      if (k > 2 * sizeof(int8_t)) {
184        $for N in range(0, NR, 4):
185          const int8x8_t vb${ABC[N:N+4]}c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
186
187        $for M in range(MR):
188          const int8x8_t va${M}c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va${M}), 1));
189          $for N in range(0, NR, 4):
190            const int16x8_t vprod${M}x${ABC[N:N+4]}c1 = vmull_s8(vb${ABC[N:N+4]}c1, va${M}c1);
191            vacc${M}x${ABC[N:N+4]} = vpadalq_s16(vacc${M}x${ABC[N:N+4]}, vprod${M}x${ABC[N:N+4]}c1);
192
193        if (k > 4 * sizeof(int8_t)) {
194          $for N in range(0, NR, 4):
195            const int8x8_t vb${ABC[N:N+4]}c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
196
197          $for M in range(MR):
198            const int8x8_t va${M}c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va${M}), 2));
199            $for N in range(0, NR, 4):
200              const int16x8_t vprod${M}x${ABC[N:N+4]}c2 = vmull_s8(vb${ABC[N:N+4]}c2, va${M}c2);
201              vacc${M}x${ABC[N:N+4]} = vpadalq_s16(vacc${M}x${ABC[N:N+4]}, vprod${M}x${ABC[N:N+4]}c2);
202        }
203      }
204    }
205
206    $if REQUANTIZATION == "RNDNU":
207      const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->${PARAMS_STRUCT}.right_pre_shift);
208      const int32x4_t vmultiplier = vld1q_dup_s32(&params->${PARAMS_STRUCT}.multiplier);
209      const int32x4_t vright_post_shift = vld1q_dup_s32(&params->${PARAMS_STRUCT}.right_post_shift);
210
211      $for M in range(MR):
212        $for N in range(0, NR, 4):
213          vacc${M}x${ABC[N:N+4]} = vqshlq_s32(vacc${M}x${ABC[N:N+4]}, vright_pre_shift);
214
215      $for M in range(MR):
216        $for N in range(0, NR, 4):
217          vacc${M}x${ABC[N:N+4]} = vqdmulhq_s32(vacc${M}x${ABC[N:N+4]}, vmultiplier);
218
219      $for M in range(MR):
220        $for N in range(0, NR, 4):
221          vacc${M}x${ABC[N:N+4]} = vrshlq_s32(vacc${M}x${ABC[N:N+4]}, vright_post_shift);
222    $elif REQUANTIZATION == "FP32":
223      $for M in range(MR):
224        $for N in range(0, NR, 4):
225          float32x4_t vfpacc${M}x${ABC[N:N+4]} = vcvtq_f32_s32(vacc${M}x${ABC[N:N+4]});
226
227      $if CHANNELWISE:
228        $for N in range(0, NR, 4):
229          const float32x4_t vscale${ABC[N:N+4]} = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
230          $for M in range(MR):
231            vfpacc${M}x${ABC[N:N+4]} = vmulq_f32(vfpacc${M}x${ABC[N:N+4]}, vscale${ABC[N:N+4]});
232      $else:
233        const float32x4_t vscale = vld1q_dup_f32(&params->${PARAMS_STRUCT}.scale);
234        $for M in range(MR):
235          $for N in range(0, NR, 4):
236            vfpacc${M}x${ABC[N:N+4]} = vmulq_f32(vfpacc${M}x${ABC[N:N+4]}, vscale);
237
238      $if ARMV8:
239        $for M in range(MR):
240          $for N in range(0, NR, 4):
241            vacc${M}x${ABC[N:N+4]} = vcvtnq_s32_f32(vfpacc${M}x${ABC[N:N+4]});
242      $else:
243        const float32x4_t vmagic_bias = vld1q_dup_f32(&params->${PARAMS_STRUCT}.magic_bias);
244        $for M in range(MR):
245          $for N in range(0, NR, 4):
246            vacc${M}x${ABC[N:N+4]} = vreinterpretq_s32_f32(vaddq_f32(vfpacc${M}x${ABC[N:N+4]}, vmagic_bias));
247
248        const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(&params->${PARAMS_STRUCT}.magic_bias_less_output_zero_point);
249        $for M in range(MR):
250          $for N in range(0, NR, 4):
251            vacc${M}x${ABC[N:N+4]} = vqsubq_s32(vacc${M}x${ABC[N:N+4]}, vmagic_bias_less_output_zero_point);
252
253    $if REQUANTIZATION != "FP32" or ARMV8:
254      const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->${PARAMS_STRUCT}.output_zero_point);
255#if XNN_ARCH_ARM64
256    $for M in range(MR):
257      $for N in range(0, NR, 8):
258        int16x8_t vacc${M}x${ABC[N:N+8]} = vqmovn_high_s32(vqmovn_s32(vacc${M}x${ABC[N:N+4]}), vacc${M}x${ABC[N+4:N+8]});
259
260    $if REQUANTIZATION != "FP32" or ARMV8:
261      $for M in range(MR):
262        $for N in range(0, NR, 8):
263          vacc${M}x${ABC[N:N+8]} = vqaddq_s16(vacc${M}x${ABC[N:N+8]}, voutput_zero_point);
264
265    $for M in range(MR):
266      $for N in range(0, NR, 16):
267        $if N + 8 < NR:
268          int8x16_t vout${M}x${ABC[N:N+16]} = vqmovn_high_s16(vqmovn_s16(vacc${M}x${ABC[N:N+8]}), vacc${M}x${ABC[N+8:N+16]});
269        $elif M % 2 == 1:
270          int8x16_t vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vqmovn_high_s16(vqmovn_s16(vacc${M-1}x${ABC[N:N+8]}), vacc${M}x${ABC[N:N+8]});
271        $elif M + 1 == MR:
272          int8x8_t vout${M}x${ABC[N:N+8]} = vqmovn_s16(vacc${M}x${ABC[N:N+8]});
273#else
274    $for M in range(MR):
275      $for N in range(0, NR, 8):
276        int16x8_t vacc${M}x${ABC[N:N+8]} = vcombine_s16(vqmovn_s32(vacc${M}x${ABC[N:N+4]}), vqmovn_s32(vacc${M}x${ABC[N+4:N+8]}));
277
278    $if REQUANTIZATION != "FP32" or ARMV8:
279      $for M in range(MR):
280        $for N in range(0, NR, 8):
281          vacc${M}x${ABC[N:N+8]} = vqaddq_s16(vacc${M}x${ABC[N:N+8]}, voutput_zero_point);
282
283    $for M in range(MR):
284      $for N in range(0, NR, 16):
285        $if N + 8 < NR:
286          int8x16_t vout${M}x${ABC[N:N+16]} = vcombine_s8(vqmovn_s16(vacc${M}x${ABC[N:N+8]}), vqmovn_s16(vacc${M}x${ABC[N+8:N+16]}));
287        $elif M % 2 == 1:
288          int8x16_t vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vcombine_s8(vqmovn_s16(vacc${M-1}x${ABC[N:N+8]}), vqmovn_s16(vacc${M}x${ABC[N:N+8]}));
289        $elif M + 1 == MR:
290          int8x8_t vout${M}x${ABC[N:N+8]} = vqmovn_s16(vacc${M}x${ABC[N:N+8]});
291#endif
292
293    $if NR == 8 and MR == 1:
294      const int8x8_t voutput_min = vld1_dup_s8(&params->${PARAMS_STRUCT}.output_min);
295    $else:
296      const int8x16_t voutput_min = vld1q_dup_s8(&params->${PARAMS_STRUCT}.output_min);
297    $for M in range(MR):
298      $for N in range(0, NR, 16):
299        $if N + 8 < NR:
300          vout${M}x${ABC[N:N+16]} = vmaxq_s8(vout${M}x${ABC[N:N+16]}, voutput_min);
301        $elif M % 2 == 1:
302          vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vmaxq_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}, voutput_min);
303        $elif M + 1 == MR:
304          $if NR == 8 and MR == 1:
305            vout${M}x${ABC[N:N+8]} = vmax_s8(vout${M}x${ABC[N:N+8]}, voutput_min);
306          $else:
307            vout${M}x${ABC[N:N+8]} = vmax_s8(vout${M}x${ABC[N:N+8]}, vget_low_s8(voutput_min));
308
309    $if NR == 8 and MR == 1:
310      const int8x8_t voutput_max = vld1_dup_s8(&params->${PARAMS_STRUCT}.output_max);
311    $else:
312      const int8x16_t voutput_max = vld1q_dup_s8(&params->${PARAMS_STRUCT}.output_max);
313    $for M in range(MR):
314      $for N in range(0, NR, 16):
315        $if N + 8 < NR:
316          vout${M}x${ABC[N:N+16]} = vminq_s8(vout${M}x${ABC[N:N+16]}, voutput_max);
317        $elif M % 2 == 1:
318          vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vminq_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}, voutput_max);
319        $elif M + 1 == MR:
320          $if NR == 8 and MR == 1:
321            vout${M}x${ABC[N:N+8]} = vmin_s8(vout${M}x${ABC[N:N+8]}, voutput_max);
322          $else:
323            vout${M}x${ABC[N:N+8]} = vmin_s8(vout${M}x${ABC[N:N+8]}, vget_low_s8(voutput_max));
324
325    if (nc >= ${NR}) {
326      $for M in range(MR):
327        $for N in range(0, NR, 16):
328          $if N + 8 < NR:
329            vst1q_s8(c${M} + ${N}, vout${M}x${ABC[N:N+16]});
330          $elif M % 2 == 1:
331            vst1_s8(c${M-1} + ${N}, vget_low_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}));
332            vst1_s8(c${M} + ${N}, vget_high_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}));
333          $elif M + 1 == MR:
334            vst1_s8(c${M} + ${N}, vout${M}x${ABC[N:N+8]});
335
336      $for M in range(MR):
337        c${M} = (int8_t*) ((uintptr_t) c${M} + cn_stride);
338
339      $for M in range(MR):
340        a${M} = (const int8_t*) ((uintptr_t) a${M} - kc);
341
342      nc -= ${NR};
343    } else {
344      // Final case where not all of the ${NR} columns fit in the destination.
345      $if NR == 16:
346        $for M in range(MR):
347          $if M % 2 == 1:
348            int8x16_t vout${M-1}x01234567_${M}x01234567 = vcombine_s8(vget_low_s8(vout${M-1}x0123456789ABCDEF), vget_low_s8(vout${M}x0123456789ABCDEF));
349          $elif M + 1 == MR:
350            int8x8_t vout${M}x01234567 = vget_low_s8(vout${M}x0123456789ABCDEF);
351        if (nc & 8) {
352          $for M in range(MR):
353            $if M % 2 == 1:
354              vst1_s8(c${M-1}, vget_low_s8(vout${M-1}x01234567_${M}x01234567)); c${M-1} += 8;
355              vst1_s8(c${M}, vget_high_s8(vout${M-1}x01234567_${M}x01234567)); c${M} += 8;
356            $elif M + 1 == MR:
357              vst1_s8(c${M}, vout${M}x01234567); c${M} += 8;
358          $for M in range(MR):
359            $if M % 2 == 1:
360              vout${M-1}x01234567_${M}x01234567 = vcombine_s8(vget_high_s8(vout${M-1}x0123456789ABCDEF), vget_high_s8(vout${M}x0123456789ABCDEF));
361            $elif M + 1 == MR:
362              vout${M}x01234567 = vget_high_s8(vout${M}x0123456789ABCDEF);
363        }
364      if (nc & 4) {
365        $for M in range(MR):
366          $if M % 2 == 1:
367            vst1q_lane_u32((void*) c${M-1}, vreinterpretq_u32_s8(vout${M-1}x01234567_${M}x01234567), 0); c${M-1} += 4;
368            vst1q_lane_u32((void*) c${M}, vreinterpretq_u32_s8(vout${M-1}x01234567_${M}x01234567), 2); c${M} += 4;
369          $elif M + 1 == MR:
370            vst1_lane_u32((void*) c${M}, vreinterpret_u32_s8(vout${M}x01234567), 0); c${M} += 4;
371        $for M in range(MR):
372          $if M % 2 == 1:
373            vout${M-1}x01234567_${M}x01234567 = vextq_s8(vout${M-1}x01234567_${M}x01234567, vout${M-1}x01234567_${M}x01234567, 4);
374          $elif M + 1 == MR:
375            vout${M}x01234567 = vext_s8(vout${M}x01234567, vout${M}x01234567, 4);
376      }
377      if (nc & 2) {
378        $for M in range(MR):
379          $if M % 2 == 1:
380            vst1q_lane_u16((void*) c${M-1}, vreinterpretq_u16_s8(vout${M-1}x01234567_${M}x01234567), 0); c${M-1} += 2;
381            vst1q_lane_u16((void*) c${M}, vreinterpretq_u16_s8(vout${M-1}x01234567_${M}x01234567), 4); c${M} += 2;
382          $elif M + 1 == MR:
383            vst1_lane_u16((void*) c${M}, vreinterpret_u16_s8(vout${M}x01234567), 0); c${M} += 2;
384        $for M in range(MR):
385          $if M % 2 == 1:
386            vout${M-1}x01234567_${M}x01234567 = vextq_s8(vout${M-1}x01234567_${M}x01234567, vout${M-1}x01234567_${M}x01234567, 2);
387          $elif M + 1 == MR:
388            vout${M}x01234567 = vext_s8(vout${M}x01234567, vout${M}x01234567, 2);
389      }
390      if (nc & 1) {
391        $for M in range(MR):
392          $if M % 2 == 1:
393            vst1q_lane_s8(c${M-1}, vout${M-1}x01234567_${M}x01234567, 0);
394            vst1q_lane_s8(c${M}, vout${M-1}x01234567_${M}x01234567, 8);
395          $elif M + 1 == MR:
396            vst1_lane_s8(c${M}, vout${M}x01234567, 0);
397      }
398
399      nc = 0;
400    }
401  } while (nc != 0);
402}
403