• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-igemm/MRxNRc4-neondot.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/igemm.h>
15 #include <xnnpack/math.h>
16 
17 
xnn_qs8_igemm_minmax_ukernel_8x8c4__neondot(size_t mr,size_t nc,size_t kc,size_t ks,const int8_t ** restrict a,const void * restrict w,int8_t * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const int8_t * zero,const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qs8_igemm_minmax_ukernel_8x8c4__neondot(
19     size_t mr,
20     size_t nc,
21     size_t kc,
22     size_t ks,
23     const int8_t** restrict a,
24     const void* restrict w,
25     int8_t* restrict c,
26     size_t cm_stride,
27     size_t cn_stride,
28     size_t a_offset,
29     const int8_t* zero,
30     const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
31 {
32   assert(mr != 0);
33   assert(mr <= 8);
34   assert(nc != 0);
35   assert(kc != 0);
36   assert(ks != 0);
37   assert(ks % (8 * sizeof(void*)) == 0);
38   assert(a_offset % sizeof(int8_t) == 0);
39   assert(a != NULL);
40   assert(w != NULL);
41   assert(c != NULL);
42 
43   kc = round_up_po2(kc, 4);
44   int8_t* c0 = c;
45   int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
46   if XNN_UNPREDICTABLE(mr < 2) {
47     c1 = c0;
48   }
49   int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
50   if XNN_UNPREDICTABLE(mr <= 2) {
51     c2 = c1;
52   }
53   int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
54   if XNN_UNPREDICTABLE(mr < 4) {
55     c3 = c2;
56   }
57   int8_t* c4 = (int8_t*) ((uintptr_t) c3 + cm_stride);
58   if XNN_UNPREDICTABLE(mr <= 4) {
59     c4 = c3;
60   }
61   int8_t* c5 = (int8_t*) ((uintptr_t) c4 + cm_stride);
62   if XNN_UNPREDICTABLE(mr < 6) {
63     c5 = c4;
64   }
65   int8_t* c6 = (int8_t*) ((uintptr_t) c5 + cm_stride);
66   if XNN_UNPREDICTABLE(mr <= 6) {
67     c6 = c5;
68   }
69   int8_t* c7 = (int8_t*) ((uintptr_t) c6 + cm_stride);
70   if XNN_UNPREDICTABLE(mr != 8) {
71     c7 = c6;
72   }
73 
74   do {
75     int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
76     int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
77     int32x4_t vacc1x0123 = vacc0x0123;
78     int32x4_t vacc1x4567 = vacc0x4567;
79     int32x4_t vacc2x0123 = vacc0x0123;
80     int32x4_t vacc2x4567 = vacc0x4567;
81     int32x4_t vacc3x0123 = vacc0x0123;
82     int32x4_t vacc3x4567 = vacc0x4567;
83     int32x4_t vacc4x0123 = vacc0x0123;
84     int32x4_t vacc4x4567 = vacc0x4567;
85     int32x4_t vacc5x0123 = vacc0x0123;
86     int32x4_t vacc5x4567 = vacc0x4567;
87     int32x4_t vacc6x0123 = vacc0x0123;
88     int32x4_t vacc6x4567 = vacc0x4567;
89     int32x4_t vacc7x0123 = vacc0x0123;
90     int32x4_t vacc7x4567 = vacc0x4567;
91 
92     size_t p = ks;
93     do {
94       const int8_t* restrict a0 = a[0];
95       if XNN_UNPREDICTABLE(a0 != zero) {
96         a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
97       }
98       const int8_t* restrict a1 = a[1];
99       if XNN_UNPREDICTABLE(a1 != zero) {
100         a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
101       }
102       const int8_t* restrict a2 = a[2];
103       if XNN_UNPREDICTABLE(a2 != zero) {
104         a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
105       }
106       const int8_t* restrict a3 = a[3];
107       if XNN_UNPREDICTABLE(a3 != zero) {
108         a3 = (const int8_t*) ((uintptr_t) a3 + a_offset);
109       }
110       const int8_t* restrict a4 = a[4];
111       if XNN_UNPREDICTABLE(a4 != zero) {
112         a4 = (const int8_t*) ((uintptr_t) a4 + a_offset);
113       }
114       const int8_t* restrict a5 = a[5];
115       if XNN_UNPREDICTABLE(a5 != zero) {
116         a5 = (const int8_t*) ((uintptr_t) a5 + a_offset);
117       }
118       const int8_t* restrict a6 = a[6];
119       if XNN_UNPREDICTABLE(a6 != zero) {
120         a6 = (const int8_t*) ((uintptr_t) a6 + a_offset);
121       }
122       const int8_t* restrict a7 = a[7];
123       if XNN_UNPREDICTABLE(a7 != zero) {
124         a7 = (const int8_t*) ((uintptr_t) a7 + a_offset);
125       }
126       a += 8;
127 
128       // Inner accumulation loop along the 8 columns.
129       size_t k = kc;
130       // 2x partial unrolled loop to load 8 bytes at a time.
131       while (k >= 8 * sizeof(int8_t)) {
132         // Load a 8x8 block of activations.
133         const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 8;
134         const int8x8_t va1x01234567 = vld1_s8(a1); a1 += 8;
135         const int8x8_t va2x01234567 = vld1_s8(a2); a2 += 8;
136         const int8x8_t va3x01234567 = vld1_s8(a3); a3 += 8;
137         const int8x8_t va4x01234567 = vld1_s8(a4); a4 += 8;
138         const int8x8_t va5x01234567 = vld1_s8(a5); a5 += 8;
139         const int8x8_t va6x01234567 = vld1_s8(a6); a6 += 8;
140         const int8x8_t va7x01234567 = vld1_s8(a7); a7 += 8;
141 
142         // Load a 8x8 block of weights.
143         const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const void*) ((const int8_t*) w + 16);
144         const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const void*) ((const int8_t*) w + 16);
145         const int8x16_t vb4567x0123 = vld1q_s8(w); w = (const void*) ((const int8_t*) w + 16);
146         const int8x16_t vb4567x4567 = vld1q_s8(w); w = (const void*) ((const int8_t*) w + 16);
147 
148         // Multiply-accumulate: 8x8 * 8x8 --> 8x8.
149         vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
150         vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
151         vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0);
152         vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0);
153         vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb0123x0123, va2x01234567, 0);
154         vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb0123x4567, va2x01234567, 0);
155         vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb0123x0123, va3x01234567, 0);
156         vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb0123x4567, va3x01234567, 0);
157         vacc4x0123 = vdotq_lane_s32(vacc4x0123, vb0123x0123, va4x01234567, 0);
158         vacc4x4567 = vdotq_lane_s32(vacc4x4567, vb0123x4567, va4x01234567, 0);
159         vacc5x0123 = vdotq_lane_s32(vacc5x0123, vb0123x0123, va5x01234567, 0);
160         vacc5x4567 = vdotq_lane_s32(vacc5x4567, vb0123x4567, va5x01234567, 0);
161         vacc6x0123 = vdotq_lane_s32(vacc6x0123, vb0123x0123, va6x01234567, 0);
162         vacc6x4567 = vdotq_lane_s32(vacc6x4567, vb0123x4567, va6x01234567, 0);
163         vacc7x0123 = vdotq_lane_s32(vacc7x0123, vb0123x0123, va7x01234567, 0);
164         vacc7x4567 = vdotq_lane_s32(vacc7x4567, vb0123x4567, va7x01234567, 0);
165         vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb4567x0123, va0x01234567, 1);
166         vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb4567x4567, va0x01234567, 1);
167         vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb4567x0123, va1x01234567, 1);
168         vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb4567x4567, va1x01234567, 1);
169         vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb4567x0123, va2x01234567, 1);
170         vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb4567x4567, va2x01234567, 1);
171         vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb4567x0123, va3x01234567, 1);
172         vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb4567x4567, va3x01234567, 1);
173         vacc4x0123 = vdotq_lane_s32(vacc4x0123, vb4567x0123, va4x01234567, 1);
174         vacc4x4567 = vdotq_lane_s32(vacc4x4567, vb4567x4567, va4x01234567, 1);
175         vacc5x0123 = vdotq_lane_s32(vacc5x0123, vb4567x0123, va5x01234567, 1);
176         vacc5x4567 = vdotq_lane_s32(vacc5x4567, vb4567x4567, va5x01234567, 1);
177         vacc6x0123 = vdotq_lane_s32(vacc6x0123, vb4567x0123, va6x01234567, 1);
178         vacc6x4567 = vdotq_lane_s32(vacc6x4567, vb4567x4567, va6x01234567, 1);
179         vacc7x0123 = vdotq_lane_s32(vacc7x0123, vb4567x0123, va7x01234567, 1);
180         vacc7x4567 = vdotq_lane_s32(vacc7x4567, vb4567x4567, va7x01234567, 1);
181 
182         k -= 8 * sizeof(int8_t);
183       }
184       // Handle up to 4 final positions of `k`
185       if XNN_UNLIKELY(k != 0) {
186         // Load a 8x4 block of activations.
187         const int8x8_t va0x01234567 = vld1_s8(a0);
188         const int8x8_t va1x01234567 = vld1_s8(a1);
189         const int8x8_t va2x01234567 = vld1_s8(a2);
190         const int8x8_t va3x01234567 = vld1_s8(a3);
191         const int8x8_t va4x01234567 = vld1_s8(a4);
192         const int8x8_t va5x01234567 = vld1_s8(a5);
193         const int8x8_t va6x01234567 = vld1_s8(a6);
194         const int8x8_t va7x01234567 = vld1_s8(a7);
195 
196         // Load a 4x8 block of weights.
197         const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const void*) ((const int8_t*) w + 16);
198         const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const void*) ((const int8_t*) w + 16);
199 
200         // Multiply-accumulate: 8x4 * 4x8 --> 8x8.
201         vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
202         vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
203         vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0);
204         vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0);
205         vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb0123x0123, va2x01234567, 0);
206         vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb0123x4567, va2x01234567, 0);
207         vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb0123x0123, va3x01234567, 0);
208         vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb0123x4567, va3x01234567, 0);
209         vacc4x0123 = vdotq_lane_s32(vacc4x0123, vb0123x0123, va4x01234567, 0);
210         vacc4x4567 = vdotq_lane_s32(vacc4x4567, vb0123x4567, va4x01234567, 0);
211         vacc5x0123 = vdotq_lane_s32(vacc5x0123, vb0123x0123, va5x01234567, 0);
212         vacc5x4567 = vdotq_lane_s32(vacc5x4567, vb0123x4567, va5x01234567, 0);
213         vacc6x0123 = vdotq_lane_s32(vacc6x0123, vb0123x0123, va6x01234567, 0);
214         vacc6x4567 = vdotq_lane_s32(vacc6x4567, vb0123x4567, va6x01234567, 0);
215         vacc7x0123 = vdotq_lane_s32(vacc7x0123, vb0123x0123, va7x01234567, 0);
216         vacc7x4567 = vdotq_lane_s32(vacc7x4567, vb0123x4567, va7x01234567, 0);
217       }
218       p -= 8 * sizeof(void*);
219     } while (p != 0);
220 
221     const int32x4_t vmultiplier = vld1q_dup_s32(&params->neon.multiplier);
222     vacc0x0123 = vqrdmulhq_s32(vacc0x0123, vmultiplier);
223     vacc0x4567 = vqrdmulhq_s32(vacc0x4567, vmultiplier);
224     vacc1x0123 = vqrdmulhq_s32(vacc1x0123, vmultiplier);
225     vacc1x4567 = vqrdmulhq_s32(vacc1x4567, vmultiplier);
226     vacc2x0123 = vqrdmulhq_s32(vacc2x0123, vmultiplier);
227     vacc2x4567 = vqrdmulhq_s32(vacc2x4567, vmultiplier);
228     vacc3x0123 = vqrdmulhq_s32(vacc3x0123, vmultiplier);
229     vacc3x4567 = vqrdmulhq_s32(vacc3x4567, vmultiplier);
230     vacc4x0123 = vqrdmulhq_s32(vacc4x0123, vmultiplier);
231     vacc4x4567 = vqrdmulhq_s32(vacc4x4567, vmultiplier);
232     vacc5x0123 = vqrdmulhq_s32(vacc5x0123, vmultiplier);
233     vacc5x4567 = vqrdmulhq_s32(vacc5x4567, vmultiplier);
234     vacc6x0123 = vqrdmulhq_s32(vacc6x0123, vmultiplier);
235     vacc6x4567 = vqrdmulhq_s32(vacc6x4567, vmultiplier);
236     vacc7x0123 = vqrdmulhq_s32(vacc7x0123, vmultiplier);
237     vacc7x4567 = vqrdmulhq_s32(vacc7x4567, vmultiplier);
238 
239     const int32x4_t vright_shift = vld1q_dup_s32(&params->neon.right_shift);
240     const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0)));
241     vacc0x0123 = vsraq_n_s32(vacc0x0123, vbicq_s32(vacc0x0123, vzero_shift_mask), 31);
242     vacc0x4567 = vsraq_n_s32(vacc0x4567, vbicq_s32(vacc0x4567, vzero_shift_mask), 31);
243     vacc1x0123 = vsraq_n_s32(vacc1x0123, vbicq_s32(vacc1x0123, vzero_shift_mask), 31);
244     vacc1x4567 = vsraq_n_s32(vacc1x4567, vbicq_s32(vacc1x4567, vzero_shift_mask), 31);
245     vacc2x0123 = vsraq_n_s32(vacc2x0123, vbicq_s32(vacc2x0123, vzero_shift_mask), 31);
246     vacc2x4567 = vsraq_n_s32(vacc2x4567, vbicq_s32(vacc2x4567, vzero_shift_mask), 31);
247     vacc3x0123 = vsraq_n_s32(vacc3x0123, vbicq_s32(vacc3x0123, vzero_shift_mask), 31);
248     vacc3x4567 = vsraq_n_s32(vacc3x4567, vbicq_s32(vacc3x4567, vzero_shift_mask), 31);
249     vacc4x0123 = vsraq_n_s32(vacc4x0123, vbicq_s32(vacc4x0123, vzero_shift_mask), 31);
250     vacc4x4567 = vsraq_n_s32(vacc4x4567, vbicq_s32(vacc4x4567, vzero_shift_mask), 31);
251     vacc5x0123 = vsraq_n_s32(vacc5x0123, vbicq_s32(vacc5x0123, vzero_shift_mask), 31);
252     vacc5x4567 = vsraq_n_s32(vacc5x4567, vbicq_s32(vacc5x4567, vzero_shift_mask), 31);
253     vacc6x0123 = vsraq_n_s32(vacc6x0123, vbicq_s32(vacc6x0123, vzero_shift_mask), 31);
254     vacc6x4567 = vsraq_n_s32(vacc6x4567, vbicq_s32(vacc6x4567, vzero_shift_mask), 31);
255     vacc7x0123 = vsraq_n_s32(vacc7x0123, vbicq_s32(vacc7x0123, vzero_shift_mask), 31);
256     vacc7x4567 = vsraq_n_s32(vacc7x4567, vbicq_s32(vacc7x4567, vzero_shift_mask), 31);
257 
258     vacc0x0123 = vrshlq_s32(vacc0x0123, vright_shift);
259     vacc0x4567 = vrshlq_s32(vacc0x4567, vright_shift);
260     vacc1x0123 = vrshlq_s32(vacc1x0123, vright_shift);
261     vacc1x4567 = vrshlq_s32(vacc1x4567, vright_shift);
262     vacc2x0123 = vrshlq_s32(vacc2x0123, vright_shift);
263     vacc2x4567 = vrshlq_s32(vacc2x4567, vright_shift);
264     vacc3x0123 = vrshlq_s32(vacc3x0123, vright_shift);
265     vacc3x4567 = vrshlq_s32(vacc3x4567, vright_shift);
266     vacc4x0123 = vrshlq_s32(vacc4x0123, vright_shift);
267     vacc4x4567 = vrshlq_s32(vacc4x4567, vright_shift);
268     vacc5x0123 = vrshlq_s32(vacc5x0123, vright_shift);
269     vacc5x4567 = vrshlq_s32(vacc5x4567, vright_shift);
270     vacc6x0123 = vrshlq_s32(vacc6x0123, vright_shift);
271     vacc6x4567 = vrshlq_s32(vacc6x4567, vright_shift);
272     vacc7x0123 = vrshlq_s32(vacc7x0123, vright_shift);
273     vacc7x4567 = vrshlq_s32(vacc7x4567, vright_shift);
274 
275     const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->neon.output_zero_point);
276 #if XNN_ARCH_ARM64
277     const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
278     const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
279     const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
280     const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
281     const int16x8_t vacc4x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc4x0123), vacc4x4567), voutput_zero_point);
282     const int16x8_t vacc5x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc5x0123), vacc5x4567), voutput_zero_point);
283     const int16x8_t vacc6x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc6x0123), vacc6x4567), voutput_zero_point);
284     const int16x8_t vacc7x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc7x0123), vacc7x4567), voutput_zero_point);
285 
286     int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
287     int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
288     int8x16_t vout4x01234567_5x01234567 = vqmovn_high_s16(vqmovn_s16(vacc4x01234567), vacc5x01234567);
289     int8x16_t vout6x01234567_7x01234567 = vqmovn_high_s16(vqmovn_s16(vacc6x01234567), vacc7x01234567);
290 #else
291     const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
292     const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
293     const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
294     const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
295     const int16x8_t vacc4x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc4x0123), vqmovn_s32(vacc4x4567)), voutput_zero_point);
296     const int16x8_t vacc5x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc5x0123), vqmovn_s32(vacc5x4567)), voutput_zero_point);
297     const int16x8_t vacc6x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc6x0123), vqmovn_s32(vacc6x4567)), voutput_zero_point);
298     const int16x8_t vacc7x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc7x0123), vqmovn_s32(vacc7x4567)), voutput_zero_point);
299 
300     int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
301     int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
302     int8x16_t vout4x01234567_5x01234567 = vcombine_s8(vqmovn_s16(vacc4x01234567), vqmovn_s16(vacc5x01234567));
303     int8x16_t vout6x01234567_7x01234567 = vcombine_s8(vqmovn_s16(vacc6x01234567), vqmovn_s16(vacc7x01234567));
304 #endif
305     const int8x16_t voutput_min = vld1q_dup_s8(&params->neon.output_min);
306     const int8x16_t voutput_max = vld1q_dup_s8(&params->neon.output_max);
307 
308     vout6x01234567_7x01234567 = vmaxq_s8(vout6x01234567_7x01234567, voutput_min);
309     vout4x01234567_5x01234567 = vmaxq_s8(vout4x01234567_5x01234567, voutput_min);
310     vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
311     vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
312 
313     vout6x01234567_7x01234567 = vminq_s8(vout6x01234567_7x01234567, voutput_max);
314     vout4x01234567_5x01234567 = vminq_s8(vout4x01234567_5x01234567, voutput_max);
315     vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
316     vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
317 
318     if (nc >= 8) {
319       vst1_s8(c7 + 0, vget_high_s8(vout6x01234567_7x01234567));
320       vst1_s8(c6 + 0, vget_low_s8(vout6x01234567_7x01234567));
321       vst1_s8(c5 + 0, vget_high_s8(vout4x01234567_5x01234567));
322       vst1_s8(c4 + 0, vget_low_s8(vout4x01234567_5x01234567));
323       vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
324       vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
325       vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
326       vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
327 
328       c7 = (int8_t*) ((uintptr_t) c7 + cn_stride);
329       c6 = (int8_t*) ((uintptr_t) c6 + cn_stride);
330       c5 = (int8_t*) ((uintptr_t) c5 + cn_stride);
331       c4 = (int8_t*) ((uintptr_t) c4 + cn_stride);
332       c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
333       c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
334       c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
335       c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
336 
337       a = (const int8_t**restrict) ((uintptr_t) a - ks);
338 
339       nc -= 8;
340     } else {
341       if (nc & 4) {
342         vst1q_lane_u32(__builtin_assume_aligned(c7, 1), vreinterpretq_u32_s8(vout6x01234567_7x01234567), 2); c7 += 4;
343         vst1q_lane_u32(__builtin_assume_aligned(c6, 1), vreinterpretq_u32_s8(vout6x01234567_7x01234567), 0); c6 += 4;
344         vst1q_lane_u32(__builtin_assume_aligned(c5, 1), vreinterpretq_u32_s8(vout4x01234567_5x01234567), 2); c5 += 4;
345         vst1q_lane_u32(__builtin_assume_aligned(c4, 1), vreinterpretq_u32_s8(vout4x01234567_5x01234567), 0); c4 += 4;
346         vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
347         vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
348         vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
349         vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
350         vout6x01234567_7x01234567 = vextq_s8(vout6x01234567_7x01234567, vout6x01234567_7x01234567, 4);
351         vout4x01234567_5x01234567 = vextq_s8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 4);
352         vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
353         vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
354       }
355       if (nc & 2) {
356         vst1q_lane_u16(__builtin_assume_aligned(c7, 1), vreinterpretq_u16_s8(vout6x01234567_7x01234567), 4); c7 += 2;
357         vst1q_lane_u16(__builtin_assume_aligned(c6, 1), vreinterpretq_u16_s8(vout6x01234567_7x01234567), 0); c6 += 2;
358         vst1q_lane_u16(__builtin_assume_aligned(c5, 1), vreinterpretq_u16_s8(vout4x01234567_5x01234567), 4); c5 += 2;
359         vst1q_lane_u16(__builtin_assume_aligned(c4, 1), vreinterpretq_u16_s8(vout4x01234567_5x01234567), 0); c4 += 2;
360         vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
361         vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
362         vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
363         vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
364         vout6x01234567_7x01234567 = vextq_s8(vout6x01234567_7x01234567, vout6x01234567_7x01234567, 2);
365         vout4x01234567_5x01234567 = vextq_s8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 2);
366         vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
367         vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
368       }
369       if (nc & 1) {
370         vst1q_lane_s8(c7, vout6x01234567_7x01234567, 8);
371         vst1q_lane_s8(c6, vout6x01234567_7x01234567, 0);
372         vst1q_lane_s8(c5, vout4x01234567_5x01234567, 8);
373         vst1q_lane_s8(c4, vout4x01234567_5x01234567, 0);
374         vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
375         vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
376         vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
377         vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
378       }
379 
380       nc = 0;
381     }
382   } while (nc != 0);
383 }
384