• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-gemm/c8-neon-mull.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/math.h>
16 
17 
xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c8__neon_mlal(size_t mr,size_t nc,size_t kc,const int8_t * restrict a,size_t a_stride,const void * restrict w,int8_t * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c8__neon_mlal(
19     size_t mr,
20     size_t nc,
21     size_t kc,
22     const int8_t* restrict a,
23     size_t a_stride,
24     const void* restrict w,
25     int8_t* restrict c,
26     size_t cm_stride,
27     size_t cn_stride,
28     const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
29 {
30   assert(mr != 0);
31   assert(mr <= 3);
32   assert(nc != 0);
33   assert(kc != 0);
34   assert(kc % sizeof(int8_t) == 0);
35   assert(a != NULL);
36   assert(w != NULL);
37   assert(c != NULL);
38 
39   kc = round_up_po2(kc, 8 * sizeof(int8_t));
40   const int8_t* a0 = a;
41   int8_t* c0 = c;
42   const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
43   int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
44   if XNN_UNPREDICTABLE(mr < 2) {
45     a1 = a0;
46     c1 = c0;
47   }
48   const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
49   int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
50   if XNN_UNPREDICTABLE(mr <= 2) {
51     a2 = a1;
52     c2 = c1;
53   }
54 
55   do {
56     int32x4_t vacc0x0 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
57     int32x4_t vacc0x1 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
58     int32x4_t vacc0x2 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
59     int32x4_t vacc0x3 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
60     int32x4_t vacc0x4 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
61     int32x4_t vacc0x5 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
62     int32x4_t vacc0x6 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
63     int32x4_t vacc0x7 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
64     int32x4_t vacc1x0 = vacc0x0;
65     int32x4_t vacc1x1 = vacc0x1;
66     int32x4_t vacc1x2 = vacc0x2;
67     int32x4_t vacc1x3 = vacc0x3;
68     int32x4_t vacc1x4 = vacc0x4;
69     int32x4_t vacc1x5 = vacc0x5;
70     int32x4_t vacc1x6 = vacc0x6;
71     int32x4_t vacc1x7 = vacc0x7;
72     int32x4_t vacc2x0 = vacc0x0;
73     int32x4_t vacc2x1 = vacc0x1;
74     int32x4_t vacc2x2 = vacc0x2;
75     int32x4_t vacc2x3 = vacc0x3;
76     int32x4_t vacc2x4 = vacc0x4;
77     int32x4_t vacc2x5 = vacc0x5;
78     int32x4_t vacc2x6 = vacc0x6;
79     int32x4_t vacc2x7 = vacc0x7;
80 
81     size_t k = kc;
82     // 2x partial unrolled loop to load 16 bytes at a time using MLA.
83     while (k >= 16 * sizeof(int8_t)) {
84       const int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
85       const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
86       const int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
87       const int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
88       const int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
89       const int8x8_t va2x1 = vld1_s8(a2); a2 += 8;
90 
91       const int8x8_t vb0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(  int8_t));
92       const int8x8_t vb1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(  int8_t));
93       const int8x8_t vb2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(  int8_t));
94       const int8x8_t vb3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(  int8_t));
95       const int8x8_t vb4x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(  int8_t));
96       const int8x8_t vb5x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(  int8_t));
97       const int8x8_t vb6x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(  int8_t));
98       const int8x8_t vb7x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(  int8_t));
99 
100       const int8x8_t vb0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(  int8_t));
101       int16x8_t vprod0x0 = vmull_s8(vb0x0, va0x0);
102       int16x8_t vprod1x0 = vmull_s8(vb0x0, va1x0);
103       int16x8_t vprod2x0 = vmull_s8(vb0x0, va2x0);
104       vprod0x0 = vmlal_s8(vprod0x0, vb0x1, va0x1);
105       vprod1x0 = vmlal_s8(vprod1x0, vb0x1, va1x1);
106       vprod2x0 = vmlal_s8(vprod2x0, vb0x1, va2x1);
107       vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0);
108       vacc1x0 = vpadalq_s16(vacc1x0, vprod1x0);
109       vacc2x0 = vpadalq_s16(vacc2x0, vprod2x0);
110       const int8x8_t vb1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(  int8_t));
111       int16x8_t vprod0x1 = vmull_s8(vb1x0, va0x0);
112       int16x8_t vprod1x1 = vmull_s8(vb1x0, va1x0);
113       int16x8_t vprod2x1 = vmull_s8(vb1x0, va2x0);
114       vprod0x1 = vmlal_s8(vprod0x1, vb1x1, va0x1);
115       vprod1x1 = vmlal_s8(vprod1x1, vb1x1, va1x1);
116       vprod2x1 = vmlal_s8(vprod2x1, vb1x1, va2x1);
117       vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1);
118       vacc1x1 = vpadalq_s16(vacc1x1, vprod1x1);
119       vacc2x1 = vpadalq_s16(vacc2x1, vprod2x1);
120       const int8x8_t vb2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(  int8_t));
121       int16x8_t vprod0x2 = vmull_s8(vb2x0, va0x0);
122       int16x8_t vprod1x2 = vmull_s8(vb2x0, va1x0);
123       int16x8_t vprod2x2 = vmull_s8(vb2x0, va2x0);
124       vprod0x2 = vmlal_s8(vprod0x2, vb2x1, va0x1);
125       vprod1x2 = vmlal_s8(vprod1x2, vb2x1, va1x1);
126       vprod2x2 = vmlal_s8(vprod2x2, vb2x1, va2x1);
127       vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2);
128       vacc1x2 = vpadalq_s16(vacc1x2, vprod1x2);
129       vacc2x2 = vpadalq_s16(vacc2x2, vprod2x2);
130       const int8x8_t vb3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(  int8_t));
131       int16x8_t vprod0x3 = vmull_s8(vb3x0, va0x0);
132       int16x8_t vprod1x3 = vmull_s8(vb3x0, va1x0);
133       int16x8_t vprod2x3 = vmull_s8(vb3x0, va2x0);
134       vprod0x3 = vmlal_s8(vprod0x3, vb3x1, va0x1);
135       vprod1x3 = vmlal_s8(vprod1x3, vb3x1, va1x1);
136       vprod2x3 = vmlal_s8(vprod2x3, vb3x1, va2x1);
137       vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3);
138       vacc1x3 = vpadalq_s16(vacc1x3, vprod1x3);
139       vacc2x3 = vpadalq_s16(vacc2x3, vprod2x3);
140       const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(  int8_t));
141       int16x8_t vprod0x4 = vmull_s8(vb4x0, va0x0);
142       int16x8_t vprod1x4 = vmull_s8(vb4x0, va1x0);
143       int16x8_t vprod2x4 = vmull_s8(vb4x0, va2x0);
144       vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1);
145       vprod1x4 = vmlal_s8(vprod1x4, vb4x1, va1x1);
146       vprod2x4 = vmlal_s8(vprod2x4, vb4x1, va2x1);
147       vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4);
148       vacc1x4 = vpadalq_s16(vacc1x4, vprod1x4);
149       vacc2x4 = vpadalq_s16(vacc2x4, vprod2x4);
150       const int8x8_t vb5x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(  int8_t));
151       int16x8_t vprod0x5 = vmull_s8(vb5x0, va0x0);
152       int16x8_t vprod1x5 = vmull_s8(vb5x0, va1x0);
153       int16x8_t vprod2x5 = vmull_s8(vb5x0, va2x0);
154       vprod0x5 = vmlal_s8(vprod0x5, vb5x1, va0x1);
155       vprod1x5 = vmlal_s8(vprod1x5, vb5x1, va1x1);
156       vprod2x5 = vmlal_s8(vprod2x5, vb5x1, va2x1);
157       vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5);
158       vacc1x5 = vpadalq_s16(vacc1x5, vprod1x5);
159       vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5);
160       const int8x8_t vb6x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(  int8_t));
161       int16x8_t vprod0x6 = vmull_s8(vb6x0, va0x0);
162       int16x8_t vprod1x6 = vmull_s8(vb6x0, va1x0);
163       int16x8_t vprod2x6 = vmull_s8(vb6x0, va2x0);
164       vprod0x6 = vmlal_s8(vprod0x6, vb6x1, va0x1);
165       vprod1x6 = vmlal_s8(vprod1x6, vb6x1, va1x1);
166       vprod2x6 = vmlal_s8(vprod2x6, vb6x1, va2x1);
167       vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6);
168       vacc1x6 = vpadalq_s16(vacc1x6, vprod1x6);
169       vacc2x6 = vpadalq_s16(vacc2x6, vprod2x6);
170       const int8x8_t vb7x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(  int8_t));
171       int16x8_t vprod0x7 = vmull_s8(vb7x0, va0x0);
172       int16x8_t vprod1x7 = vmull_s8(vb7x0, va1x0);
173       int16x8_t vprod2x7 = vmull_s8(vb7x0, va2x0);
174       vprod0x7 = vmlal_s8(vprod0x7, vb7x1, va0x1);
175       vprod1x7 = vmlal_s8(vprod1x7, vb7x1, va1x1);
176       vprod2x7 = vmlal_s8(vprod2x7, vb7x1, va2x1);
177       vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7);
178       vacc1x7 = vpadalq_s16(vacc1x7, vprod1x7);
179       vacc2x7 = vpadalq_s16(vacc2x7, vprod2x7);
180 
181       k -= 16 * sizeof(int8_t);
182     }
183 
184     // Handle 8 bytes at a time using MUL.
185     if (k != 0) {
186       const int8x8_t va0 = vld1_s8(a0); a0 += 8;
187       const int8x8_t va1 = vld1_s8(a1); a1 += 8;
188       const int8x8_t va2 = vld1_s8(a2); a2 += 8;
189 
190       const int8x8_t vb0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
191       const int16x8_t vprod0x0 = vmull_s8(vb0, va0);
192       const int16x8_t vprod1x0 = vmull_s8(vb0, va1);
193       const int16x8_t vprod2x0 = vmull_s8(vb0, va2);
194       vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0);
195       vacc1x0 = vpadalq_s16(vacc1x0, vprod1x0);
196       vacc2x0 = vpadalq_s16(vacc2x0, vprod2x0);
197       const int8x8_t vb1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
198       const int16x8_t vprod0x1 = vmull_s8(vb1, va0);
199       const int16x8_t vprod1x1 = vmull_s8(vb1, va1);
200       const int16x8_t vprod2x1 = vmull_s8(vb1, va2);
201       vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1);
202       vacc1x1 = vpadalq_s16(vacc1x1, vprod1x1);
203       vacc2x1 = vpadalq_s16(vacc2x1, vprod2x1);
204       const int8x8_t vb2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
205       const int16x8_t vprod0x2 = vmull_s8(vb2, va0);
206       const int16x8_t vprod1x2 = vmull_s8(vb2, va1);
207       const int16x8_t vprod2x2 = vmull_s8(vb2, va2);
208       vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2);
209       vacc1x2 = vpadalq_s16(vacc1x2, vprod1x2);
210       vacc2x2 = vpadalq_s16(vacc2x2, vprod2x2);
211       const int8x8_t vb3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
212       const int16x8_t vprod0x3 = vmull_s8(vb3, va0);
213       const int16x8_t vprod1x3 = vmull_s8(vb3, va1);
214       const int16x8_t vprod2x3 = vmull_s8(vb3, va2);
215       vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3);
216       vacc1x3 = vpadalq_s16(vacc1x3, vprod1x3);
217       vacc2x3 = vpadalq_s16(vacc2x3, vprod2x3);
218       const int8x8_t vb4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
219       const int16x8_t vprod0x4 = vmull_s8(vb4, va0);
220       const int16x8_t vprod1x4 = vmull_s8(vb4, va1);
221       const int16x8_t vprod2x4 = vmull_s8(vb4, va2);
222       vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4);
223       vacc1x4 = vpadalq_s16(vacc1x4, vprod1x4);
224       vacc2x4 = vpadalq_s16(vacc2x4, vprod2x4);
225       const int8x8_t vb5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
226       const int16x8_t vprod0x5 = vmull_s8(vb5, va0);
227       const int16x8_t vprod1x5 = vmull_s8(vb5, va1);
228       const int16x8_t vprod2x5 = vmull_s8(vb5, va2);
229       vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5);
230       vacc1x5 = vpadalq_s16(vacc1x5, vprod1x5);
231       vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5);
232       const int8x8_t vb6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
233       const int16x8_t vprod0x6 = vmull_s8(vb6, va0);
234       const int16x8_t vprod1x6 = vmull_s8(vb6, va1);
235       const int16x8_t vprod2x6 = vmull_s8(vb6, va2);
236       vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6);
237       vacc1x6 = vpadalq_s16(vacc1x6, vprod1x6);
238       vacc2x6 = vpadalq_s16(vacc2x6, vprod2x6);
239       const int8x8_t vb7 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
240       const int16x8_t vprod0x7 = vmull_s8(vb7, va0);
241       const int16x8_t vprod1x7 = vmull_s8(vb7, va1);
242       const int16x8_t vprod2x7 = vmull_s8(vb7, va2);
243       vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7);
244       vacc1x7 = vpadalq_s16(vacc1x7, vprod1x7);
245       vacc2x7 = vpadalq_s16(vacc2x7, vprod2x7);
246 
247       k -= 8 * sizeof(int8_t);
248     }
249 
250 #if XNN_ARCH_ARM64
251     const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1);
252     const int32x4_t vsum0x23 = vpaddq_s32(vacc0x2, vacc0x3);
253     const int32x4_t vsum0x45 = vpaddq_s32(vacc0x4, vacc0x5);
254     const int32x4_t vsum0x67 = vpaddq_s32(vacc0x6, vacc0x7);
255     const int32x4_t vsum1x01 = vpaddq_s32(vacc1x0, vacc1x1);
256     const int32x4_t vsum1x23 = vpaddq_s32(vacc1x2, vacc1x3);
257     const int32x4_t vsum1x45 = vpaddq_s32(vacc1x4, vacc1x5);
258     const int32x4_t vsum1x67 = vpaddq_s32(vacc1x6, vacc1x7);
259     const int32x4_t vsum2x01 = vpaddq_s32(vacc2x0, vacc2x1);
260     const int32x4_t vsum2x23 = vpaddq_s32(vacc2x2, vacc2x3);
261     const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5);
262     const int32x4_t vsum2x67 = vpaddq_s32(vacc2x6, vacc2x7);
263 
264     int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23);
265     int32x4_t vacc0x4567 = vpaddq_s32(vsum0x45, vsum0x67);
266     int32x4_t vacc1x0123 = vpaddq_s32(vsum1x01, vsum1x23);
267     int32x4_t vacc1x4567 = vpaddq_s32(vsum1x45, vsum1x67);
268     int32x4_t vacc2x0123 = vpaddq_s32(vsum2x01, vsum2x23);
269     int32x4_t vacc2x4567 = vpaddq_s32(vsum2x45, vsum2x67);
270 #else
271     const int32x2_t vpsum0x0 = vadd_s32(vget_low_s32(vacc0x0), vget_high_s32(vacc0x0));
272     const int32x2_t vpsum0x1 = vadd_s32(vget_low_s32(vacc0x1), vget_high_s32(vacc0x1));
273     const int32x2_t vpsum0x2 = vadd_s32(vget_low_s32(vacc0x2), vget_high_s32(vacc0x2));
274     const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3));
275     const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1);
276     const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3);
277     int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 );
278     const int32x2_t vpsum0x4 = vadd_s32(vget_low_s32(vacc0x4), vget_high_s32(vacc0x4));
279     const int32x2_t vpsum0x5 = vadd_s32(vget_low_s32(vacc0x5), vget_high_s32(vacc0x5));
280     const int32x2_t vpsum0x6 = vadd_s32(vget_low_s32(vacc0x6), vget_high_s32(vacc0x6));
281     const int32x2_t vpsum0x7 = vadd_s32(vget_low_s32(vacc0x7), vget_high_s32(vacc0x7));
282     const int32x2_t vsum0x45 = vpadd_s32(vpsum0x4, vpsum0x5);
283     const int32x2_t vsum0x67 = vpadd_s32(vpsum0x6, vpsum0x7);
284     int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67 );
285     const int32x2_t vpsum1x0 = vadd_s32(vget_low_s32(vacc1x0), vget_high_s32(vacc1x0));
286     const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1));
287     const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2));
288     const int32x2_t vpsum1x3 = vadd_s32(vget_low_s32(vacc1x3), vget_high_s32(vacc1x3));
289     const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1);
290     const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3);
291     int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23 );
292     const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4));
293     const int32x2_t vpsum1x5 = vadd_s32(vget_low_s32(vacc1x5), vget_high_s32(vacc1x5));
294     const int32x2_t vpsum1x6 = vadd_s32(vget_low_s32(vacc1x6), vget_high_s32(vacc1x6));
295     const int32x2_t vpsum1x7 = vadd_s32(vget_low_s32(vacc1x7), vget_high_s32(vacc1x7));
296     const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5);
297     const int32x2_t vsum1x67 = vpadd_s32(vpsum1x6, vpsum1x7);
298     int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67 );
299     const int32x2_t vpsum2x0 = vadd_s32(vget_low_s32(vacc2x0), vget_high_s32(vacc2x0));
300     const int32x2_t vpsum2x1 = vadd_s32(vget_low_s32(vacc2x1), vget_high_s32(vacc2x1));
301     const int32x2_t vpsum2x2 = vadd_s32(vget_low_s32(vacc2x2), vget_high_s32(vacc2x2));
302     const int32x2_t vpsum2x3 = vadd_s32(vget_low_s32(vacc2x3), vget_high_s32(vacc2x3));
303     const int32x2_t vsum2x01 = vpadd_s32(vpsum2x0, vpsum2x1);
304     const int32x2_t vsum2x23 = vpadd_s32(vpsum2x2, vpsum2x3);
305     int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23 );
306     const int32x2_t vpsum2x4 = vadd_s32(vget_low_s32(vacc2x4), vget_high_s32(vacc2x4));
307     const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5));
308     const int32x2_t vpsum2x6 = vadd_s32(vget_low_s32(vacc2x6), vget_high_s32(vacc2x6));
309     const int32x2_t vpsum2x7 = vadd_s32(vget_low_s32(vacc2x7), vget_high_s32(vacc2x7));
310     const int32x2_t vsum2x45 = vpadd_s32(vpsum2x4, vpsum2x5);
311     const int32x2_t vsum2x67 = vpadd_s32(vpsum2x6, vpsum2x7);
312     int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67 );
313 #endif
314 
315     const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
316     const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
317     const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
318 
319     vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
320     vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
321     vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
322     vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
323     vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift);
324     vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift);
325 
326     vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
327     vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
328     vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
329     vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
330     vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
331     vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
332 
333     vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
334     vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
335     vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
336     vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
337     vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
338     vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
339 
340     const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
341 #if XNN_ARCH_ARM64
342     int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
343     int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
344     int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567);
345 
346     vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
347     vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
348     vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
349 
350     int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
351     int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
352 #else
353     int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
354     int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
355     int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567));
356 
357     vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
358     vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
359     vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
360 
361     int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
362     int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
363 #endif
364 
365     const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
366     vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
367     vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min));
368 
369     const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
370     vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
371     vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max));
372 
373     if (nc >= 8) {
374       vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
375       vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
376       vst1_s8(c2 + 0, vout2x01234567);
377 
378       c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
379       c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
380       c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
381 
382       a0 = (const int8_t*) ((uintptr_t) a0 - kc);
383       a1 = (const int8_t*) ((uintptr_t) a1 - kc);
384       a2 = (const int8_t*) ((uintptr_t) a2 - kc);
385 
386       nc -= 8;
387     } else {
388       // Final case where not all of the 8 columns fit in the destination.
389       if (nc & 4) {
390         vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
391         vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
392         vst1_lane_u32((void*) c2, vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
393         vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
394         vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
395       }
396       if (nc & 2) {
397         vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
398         vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
399         vst1_lane_u16((void*) c2, vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
400         vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
401         vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
402       }
403       if (nc & 1) {
404         vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
405         vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
406         vst1_lane_s8(c2, vout2x01234567, 0);
407       }
408 
409       nc = 0;
410     }
411   } while (nc != 0);
412 }
413