• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/math.h>
16 
xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_dup(size_t mr,size_t nc,size_t kc,const int8_t * restrict a,size_t a_stride,const void * restrict w,int8_t * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_dup(
18     size_t mr,
19     size_t nc,
20     size_t kc,
21     const int8_t* restrict a,
22     size_t a_stride,
23     const void* restrict w,
24     int8_t* restrict c,
25     size_t cm_stride,
26     size_t cn_stride,
27     const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29   assert(mr != 0);
30   assert(mr <= 4);
31   assert(nc != 0);
32   assert(kc != 0);
33   assert(kc % sizeof(int8_t) == 0);
34   assert(a != NULL);
35   assert(w != NULL);
36   assert(c != NULL);
37 
38   kc = round_up_po2(kc, 2 * sizeof(int8_t));
39   const int8_t* a0 = a;
40   int8_t* c0 = c;
41   const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
42   int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
43   if XNN_UNPREDICTABLE(mr < 2) {
44     a1 = a0;
45     c1 = c0;
46   }
47   const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
48   int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
49   if XNN_UNPREDICTABLE(mr <= 2) {
50     a2 = a1;
51     c2 = c1;
52   }
53   const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
54   int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
55   if XNN_UNPREDICTABLE(mr != 4) {
56     a3 = a2;
57     c3 = c2;
58   }
59 
60   do {
61     int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
62     int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
63     int32x4_t vacc1x0123 = vacc0x0123;
64     int32x4_t vacc1x4567 = vacc0x4567;
65     int32x4_t vacc2x0123 = vacc0x0123;
66     int32x4_t vacc2x4567 = vacc0x4567;
67     int32x4_t vacc3x0123 = vacc0x0123;
68     int32x4_t vacc3x4567 = vacc0x4567;
69 
70     size_t k = kc;
71 
72     while (k >= 16 * sizeof(int8_t)) {
73       const int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
74       const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
75       const int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
76       const int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
77       const int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
78       const int8x8_t va2x1 = vld1_s8(a2); a2 += 8;
79       const int8x8_t va3x0 = vld1_s8(a3); a3 += 8;
80       const int8x8_t va3x1 = vld1_s8(a3); a3 += 8;
81       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
82       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
83       const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
84       const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
85       const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
86       const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
87       const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
88       const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
89 
90       const int8x8_t va0c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 0));
91       const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
92       const int8x8_t va1c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 0));
93       const int8x8_t va1c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 0));
94       const int8x8_t va2c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x0), 0));
95       const int8x8_t va2c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 0));
96       const int8x8_t va3c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x0), 0));
97       const int8x8_t va3c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x1), 0));
98 
99       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
100       int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
101       int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
102       int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0x0, va3c0x0);
103       const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
104       vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
105       vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
106       vprod2x0123c0 = vmlal_s8(vprod2x0123c0, vb0123c0x1, va2c0x1);
107       vprod3x0123c0 = vmlal_s8(vprod3x0123c0, vb0123c0x1, va3c0x1);
108       vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
109       vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
110       vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
111       vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
112       int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
113       int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
114       int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2c0x0);
115       int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0x0, va3c0x0);
116       const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
117       vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
118       vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
119       vprod2x4567c0 = vmlal_s8(vprod2x4567c0, vb4567c0x1, va2c0x1);
120       vprod3x4567c0 = vmlal_s8(vprod3x4567c0, vb4567c0x1, va3c0x1);
121       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
122       vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
123       vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
124       vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
125       const int8x8_t va0c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 1));
126       const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
127       const int8x8_t va1c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 1));
128       const int8x8_t va1c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 1));
129       const int8x8_t va2c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x0), 1));
130       const int8x8_t va2c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 1));
131       const int8x8_t va3c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x0), 1));
132       const int8x8_t va3c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x1), 1));
133 
134       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
135       int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
136       int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
137       int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1x0, va3c1x0);
138       const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
139       vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
140       vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
141       vprod2x0123c1 = vmlal_s8(vprod2x0123c1, vb0123c1x1, va2c1x1);
142       vprod3x0123c1 = vmlal_s8(vprod3x0123c1, vb0123c1x1, va3c1x1);
143       vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
144       vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
145       vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
146       vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
147       int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
148       int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
149       int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2c1x0);
150       int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1x0, va3c1x0);
151       const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
152       vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
153       vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
154       vprod2x4567c1 = vmlal_s8(vprod2x4567c1, vb4567c1x1, va2c1x1);
155       vprod3x4567c1 = vmlal_s8(vprod3x4567c1, vb4567c1x1, va3c1x1);
156       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
157       vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
158       vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
159       vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
160       const int8x8_t va0c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 2));
161       const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
162       const int8x8_t va1c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 2));
163       const int8x8_t va1c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 2));
164       const int8x8_t va2c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x0), 2));
165       const int8x8_t va2c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 2));
166       const int8x8_t va3c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x0), 2));
167       const int8x8_t va3c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x1), 2));
168 
169       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
170       int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
171       int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
172       int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2x0, va3c2x0);
173       const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
174       vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
175       vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
176       vprod2x0123c2 = vmlal_s8(vprod2x0123c2, vb0123c2x1, va2c2x1);
177       vprod3x0123c2 = vmlal_s8(vprod3x0123c2, vb0123c2x1, va3c2x1);
178       vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
179       vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
180       vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
181       vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
182       int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
183       int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
184       int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2c2x0);
185       int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2x0, va3c2x0);
186       const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
187       vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
188       vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
189       vprod2x4567c2 = vmlal_s8(vprod2x4567c2, vb4567c2x1, va2c2x1);
190       vprod3x4567c2 = vmlal_s8(vprod3x4567c2, vb4567c2x1, va3c2x1);
191       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
192       vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
193       vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
194       vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
195       const int8x8_t va0c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 3));
196       const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
197       const int8x8_t va1c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 3));
198       const int8x8_t va1c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 3));
199       const int8x8_t va2c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x0), 3));
200       const int8x8_t va2c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 3));
201       const int8x8_t va3c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x0), 3));
202       const int8x8_t va3c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x1), 3));
203 
204       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
205       int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
206       int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
207       int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3x0, va3c3x0);
208       const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
209       vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
210       vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
211       vprod2x0123c3 = vmlal_s8(vprod2x0123c3, vb0123c3x1, va2c3x1);
212       vprod3x0123c3 = vmlal_s8(vprod3x0123c3, vb0123c3x1, va3c3x1);
213       vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
214       vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
215       vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
216       vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
217       int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
218       int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
219       int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2c3x0);
220       int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3x0, va3c3x0);
221       const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
222       vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
223       vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
224       vprod2x4567c3 = vmlal_s8(vprod2x4567c3, vb4567c3x1, va2c3x1);
225       vprod3x4567c3 = vmlal_s8(vprod3x4567c3, vb4567c3x1, va3c3x1);
226       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
227       vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
228       vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
229       vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
230 
231       k -= 16 * sizeof(int8_t);
232     }
233 
234     if (k >= 8 * sizeof(int8_t)) {
235       const int8x8_t va0 = vld1_s8(a0); a0 += 8;
236       const int8x8_t va1 = vld1_s8(a1); a1 += 8;
237       const int8x8_t va2 = vld1_s8(a2); a2 += 8;
238       const int8x8_t va3 = vld1_s8(a3); a3 += 8;
239 
240       const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
241       const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
242       const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
243       const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
244       const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
245       const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
246       const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
247       const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
248 
249       const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
250       const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
251       const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
252       const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0));
253 
254       const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
255       const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
256       const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
257       const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
258       vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
259       vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
260       vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
261       vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
262       const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
263       const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
264       const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
265       const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
266       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
267       vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
268       vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
269       vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
270       const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
271       const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
272       const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
273       const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1));
274 
275       const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
276       const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
277       const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
278       const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
279       vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
280       vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
281       vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
282       vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
283       const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
284       const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
285       const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
286       const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
287       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
288       vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
289       vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
290       vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
291       const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
292       const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
293       const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
294       const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2));
295 
296       const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
297       const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
298       const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
299       const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
300       vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
301       vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
302       vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
303       vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
304       const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
305       const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
306       const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
307       const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
308       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
309       vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
310       vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
311       vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
312       const int8x8_t va0c3 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 3));
313       const int8x8_t va1c3 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 3));
314       const int8x8_t va2c3 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 3));
315       const int8x8_t va3c3 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 3));
316 
317       const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
318       const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
319       const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
320       const int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3, va3c3);
321       vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
322       vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
323       vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
324       vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
325       const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
326       const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
327       const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
328       const int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3, va3c3);
329       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
330       vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
331       vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
332       vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
333 
334       k -= 8 * sizeof(int8_t);
335     }
336 
337     if XNN_UNLIKELY(k != 0) {
338       const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
339       const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
340       const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
341       const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
342 
343       const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
344       const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
345 
346       const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
347       const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
348       vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
349       const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
350       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
351       const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
352       const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
353       vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
354       const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
355       vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
356       const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
357       const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
358       vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
359       const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
360       vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
361       const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0));
362       const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
363       vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
364       const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
365       vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
366 
367       if (k > 2 * sizeof(int8_t)) {
368         const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
369         const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
370 
371         const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
372         const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
373         vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
374         const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
375         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
376         const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
377         const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
378         vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
379         const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
380         vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
381         const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
382         const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
383         vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
384         const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
385         vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
386         const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1));
387         const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
388         vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
389         const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
390         vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
391 
392         if (k > 4 * sizeof(int8_t)) {
393           const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
394           const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
395 
396           const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
397           const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
398           vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
399           const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
400           vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
401           const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
402           const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
403           vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
404           const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
405           vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
406           const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
407           const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
408           vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
409           const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
410           vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
411           const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2));
412           const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
413           vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
414           const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
415           vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
416         }
417       }
418     }
419 
420     const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
421     const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
422     const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
423 
424     vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
425     vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
426     vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
427     vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
428     vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift);
429     vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift);
430     vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift);
431     vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift);
432 
433     vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
434     vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
435     vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
436     vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
437     vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
438     vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
439     vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
440     vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
441 
442     vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
443     vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
444     vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
445     vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
446     vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
447     vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
448     vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
449     vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
450 
451     const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
452 #if XNN_ARCH_ARM64
453     int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
454     int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
455     int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567);
456     int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567);
457 
458     vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
459     vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
460     vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
461     vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
462 
463     int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
464     int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
465 #else
466     int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
467     int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
468     int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567));
469     int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567));
470 
471     vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
472     vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
473     vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
474     vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
475 
476     int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
477     int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
478 #endif
479 
480     const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
481     vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
482     vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
483 
484     const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
485     vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
486     vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
487 
488     if (nc >= 8) {
489       vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
490       vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
491       vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
492       vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
493 
494       c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
495       c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
496       c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
497       c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
498 
499       a0 = (const int8_t*) ((uintptr_t) a0 - kc);
500       a1 = (const int8_t*) ((uintptr_t) a1 - kc);
501       a2 = (const int8_t*) ((uintptr_t) a2 - kc);
502       a3 = (const int8_t*) ((uintptr_t) a3 - kc);
503 
504       nc -= 8;
505     } else {
506       // Final case where not all of the 8 columns fit in the destination.
507       if (nc & 4) {
508         vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
509         vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
510         vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
511         vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
512         vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
513         vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
514       }
515       if (nc & 2) {
516         vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
517         vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
518         vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
519         vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
520         vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
521         vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
522       }
523       if (nc & 1) {
524         vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
525         vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
526         vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
527         vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
528       }
529 
530       nc = 0;
531     }
532   } while (nc != 0);
533 }
534