1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-igemm/c2-neon-mull-shuffle.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/math.h>
16
17
xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2s4__neon_mlal(size_t mr,size_t nc,size_t kc,size_t ks,const int8_t ** restrict a,const void * restrict w,int8_t * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const int8_t * zero,const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2s4__neon_mlal(
19 size_t mr,
20 size_t nc,
21 size_t kc,
22 size_t ks,
23 const int8_t** restrict a,
24 const void* restrict w,
25 int8_t* restrict c,
26 size_t cm_stride,
27 size_t cn_stride,
28 size_t a_offset,
29 const int8_t* zero,
30 const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
31 {
32 assert(mr != 0);
33 assert(mr <= 4);
34 assert(nc != 0);
35 assert(kc != 0);
36 assert(ks != 0);
37 assert(ks % (4 * sizeof(void*)) == 0);
38 assert(a_offset % sizeof(int8_t) == 0);
39 assert(a != NULL);
40 assert(w != NULL);
41 assert(c != NULL);
42
43 int8_t* c0 = c;
44 int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
45 if XNN_UNPREDICTABLE(mr < 2) {
46 c1 = c0;
47 }
48 int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
49 if XNN_UNPREDICTABLE(mr <= 2) {
50 c2 = c1;
51 }
52 int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
53 if XNN_UNPREDICTABLE(mr != 4) {
54 c3 = c2;
55 }
56
57 kc = round_up_po2(kc, 8 * sizeof(int8_t));
58 do {
59 int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
60 int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
61 int32x4_t vacc1x0123 = vacc0x0123;
62 int32x4_t vacc1x4567 = vacc0x4567;
63 int32x4_t vacc2x0123 = vacc0x0123;
64 int32x4_t vacc2x4567 = vacc0x4567;
65 int32x4_t vacc3x0123 = vacc0x0123;
66 int32x4_t vacc3x4567 = vacc0x4567;
67
68 size_t p = ks;
69 do {
70 const int8_t* restrict a0 = a[0];
71 if XNN_UNPREDICTABLE(a0 != zero) {
72 a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
73 }
74 const int8_t* restrict a1 = a[1];
75 if XNN_UNPREDICTABLE(a1 != zero) {
76 a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
77 }
78 const int8_t* restrict a2 = a[2];
79 if XNN_UNPREDICTABLE(a2 != zero) {
80 a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
81 }
82 const int8_t* restrict a3 = a[3];
83 if XNN_UNPREDICTABLE(a3 != zero) {
84 a3 = (const int8_t*) ((uintptr_t) a3 + a_offset);
85 }
86 a += 4;
87
88 size_t k = kc;
89 while (k >= 16 * sizeof(int8_t)) {
90 int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
91 int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
92 int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
93 int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
94 int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
95 int8x8_t va2x1 = vld1_s8(a2); a2 += 8;
96 int8x8_t va3x0 = vld1_s8(a3); a3 += 8;
97 int8x8_t va3x1 = vld1_s8(a3); a3 += 8;
98
99 const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
100 const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
101 const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
102 const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
103 const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
104 const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
105 const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
106 const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
107
108 int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0);
109 int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1x0);
110 int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2x0);
111 int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0x0, va3x0);
112 const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
113 vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0x1);
114 vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1x1);
115 vprod2x0123c0 = vmlal_s8(vprod2x0123c0, vb0123c0x1, va2x1);
116 vprod3x0123c0 = vmlal_s8(vprod3x0123c0, vb0123c0x1, va3x1);
117 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
118 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
119 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
120 vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
121 int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0);
122 int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1x0);
123 int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2x0);
124 int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0x0, va3x0);
125 const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
126 vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0x1);
127 vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1x1);
128 vprod2x4567c0 = vmlal_s8(vprod2x4567c0, vb4567c0x1, va2x1);
129 vprod3x4567c0 = vmlal_s8(vprod3x4567c0, vb4567c0x1, va3x1);
130 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
131 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
132 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
133 vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
134 va0x0 = vext_s8(va0x0, va0x0, 2);
135 va0x1 = vext_s8(va0x1, va0x1, 2);
136 va1x0 = vext_s8(va1x0, va1x0, 2);
137 va1x1 = vext_s8(va1x1, va1x1, 2);
138 va2x0 = vext_s8(va2x0, va2x0, 2);
139 va2x1 = vext_s8(va2x1, va2x1, 2);
140 va3x0 = vext_s8(va3x0, va3x0, 2);
141 va3x1 = vext_s8(va3x1, va3x1, 2);
142 int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0);
143 int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1x0);
144 int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2x0);
145 int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1x0, va3x0);
146 const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
147 vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0x1);
148 vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1x1);
149 vprod2x0123c1 = vmlal_s8(vprod2x0123c1, vb0123c1x1, va2x1);
150 vprod3x0123c1 = vmlal_s8(vprod3x0123c1, vb0123c1x1, va3x1);
151 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
152 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
153 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
154 vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
155 int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0);
156 int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1x0);
157 int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2x0);
158 int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1x0, va3x0);
159 const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
160 vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0x1);
161 vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1x1);
162 vprod2x4567c1 = vmlal_s8(vprod2x4567c1, vb4567c1x1, va2x1);
163 vprod3x4567c1 = vmlal_s8(vprod3x4567c1, vb4567c1x1, va3x1);
164 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
165 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
166 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
167 vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
168 va0x0 = vext_s8(va0x0, va0x0, 2);
169 va0x1 = vext_s8(va0x1, va0x1, 2);
170 va1x0 = vext_s8(va1x0, va1x0, 2);
171 va1x1 = vext_s8(va1x1, va1x1, 2);
172 va2x0 = vext_s8(va2x0, va2x0, 2);
173 va2x1 = vext_s8(va2x1, va2x1, 2);
174 va3x0 = vext_s8(va3x0, va3x0, 2);
175 va3x1 = vext_s8(va3x1, va3x1, 2);
176 int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0);
177 int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1x0);
178 int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2x0);
179 int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2x0, va3x0);
180 const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const int8_t*) w + 8;
181 vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0x1);
182 vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1x1);
183 vprod2x0123c2 = vmlal_s8(vprod2x0123c2, vb0123c2x1, va2x1);
184 vprod3x0123c2 = vmlal_s8(vprod3x0123c2, vb0123c2x1, va3x1);
185 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
186 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
187 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
188 vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
189 int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0);
190 int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1x0);
191 int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2x0);
192 int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2x0, va3x0);
193 const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const int8_t*) w + 8;
194 vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0x1);
195 vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1x1);
196 vprod2x4567c2 = vmlal_s8(vprod2x4567c2, vb4567c2x1, va2x1);
197 vprod3x4567c2 = vmlal_s8(vprod3x4567c2, vb4567c2x1, va3x1);
198 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
199 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
200 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
201 vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
202 va0x0 = vext_s8(va0x0, va0x0, 2);
203 va0x1 = vext_s8(va0x1, va0x1, 2);
204 va1x0 = vext_s8(va1x0, va1x0, 2);
205 va1x1 = vext_s8(va1x1, va1x1, 2);
206 va2x0 = vext_s8(va2x0, va2x0, 2);
207 va2x1 = vext_s8(va2x1, va2x1, 2);
208 va3x0 = vext_s8(va3x0, va3x0, 2);
209 va3x1 = vext_s8(va3x1, va3x1, 2);
210 int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0);
211 int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1x0);
212 int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2x0);
213 int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3x0, va3x0);
214 const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const int8_t*) w + 8;
215 vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0x1);
216 vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1x1);
217 vprod2x0123c3 = vmlal_s8(vprod2x0123c3, vb0123c3x1, va2x1);
218 vprod3x0123c3 = vmlal_s8(vprod3x0123c3, vb0123c3x1, va3x1);
219 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
220 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
221 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
222 vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
223 int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0);
224 int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1x0);
225 int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2x0);
226 int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3x0, va3x0);
227 const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const int8_t*) w + 8;
228 vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0x1);
229 vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1x1);
230 vprod2x4567c3 = vmlal_s8(vprod2x4567c3, vb4567c3x1, va2x1);
231 vprod3x4567c3 = vmlal_s8(vprod3x4567c3, vb4567c3x1, va3x1);
232 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
233 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
234 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
235 vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
236
237 k -= 16 * sizeof(int8_t);
238 }
239 if (k != 0) {
240 int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
241 int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
242 int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
243 int8x8_t va3x0 = vld1_s8(a3); a3 += 8;
244
245 const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
246 const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
247 const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
248 const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
249 const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
250 const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
251 const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
252 const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
253
254 int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0);
255 int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1x0);
256 int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2x0);
257 int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0x0, va3x0);
258 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
259 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
260 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
261 vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
262 int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0);
263 int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1x0);
264 int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2x0);
265 int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0x0, va3x0);
266 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
267 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
268 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
269 vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
270 va0x0 = vext_s8(va0x0, va0x0, 2);
271 va1x0 = vext_s8(va1x0, va1x0, 2);
272 va2x0 = vext_s8(va2x0, va2x0, 2);
273 va3x0 = vext_s8(va3x0, va3x0, 2);
274 int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0);
275 int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1x0);
276 int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2x0);
277 int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1x0, va3x0);
278 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
279 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
280 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
281 vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
282 int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0);
283 int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1x0);
284 int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2x0);
285 int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1x0, va3x0);
286 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
287 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
288 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
289 vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
290 va0x0 = vext_s8(va0x0, va0x0, 2);
291 va1x0 = vext_s8(va1x0, va1x0, 2);
292 va2x0 = vext_s8(va2x0, va2x0, 2);
293 va3x0 = vext_s8(va3x0, va3x0, 2);
294 int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0);
295 int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1x0);
296 int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2x0);
297 int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2x0, va3x0);
298 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
299 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
300 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
301 vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
302 int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0);
303 int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1x0);
304 int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2x0);
305 int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2x0, va3x0);
306 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
307 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
308 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
309 vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
310 va0x0 = vext_s8(va0x0, va0x0, 2);
311 va1x0 = vext_s8(va1x0, va1x0, 2);
312 va2x0 = vext_s8(va2x0, va2x0, 2);
313 va3x0 = vext_s8(va3x0, va3x0, 2);
314 int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0);
315 int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1x0);
316 int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2x0);
317 int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3x0, va3x0);
318 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
319 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
320 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
321 vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
322 int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0);
323 int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1x0);
324 int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2x0);
325 int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3x0, va3x0);
326 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
327 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
328 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
329 vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
330
331 }
332
333 p -= 4 * sizeof(void*);
334 } while (p != 0);
335
336 const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
337 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
338 const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
339
340 vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
341 vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
342 vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
343 vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
344 vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift);
345 vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift);
346 vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift);
347 vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift);
348
349 vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
350 vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
351 vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
352 vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
353 vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
354 vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
355 vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
356 vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
357
358 vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
359 vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
360 vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
361 vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
362 vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
363 vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
364 vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
365 vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
366
367 const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
368 #if XNN_ARCH_ARM64
369 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
370 int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
371 int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567);
372 int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567);
373
374 vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
375 vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
376 vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
377 vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
378
379 int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
380 int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
381 #else
382 int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
383 int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
384 int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567));
385 int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567));
386
387 vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
388 vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
389 vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
390 vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
391
392 int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
393 int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
394 #endif
395
396 const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
397 vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
398 vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
399
400 const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
401 vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
402 vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
403
404 if (nc >= 8) {
405 vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
406 vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
407 vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
408 vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
409
410 c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
411 c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
412 c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
413 c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
414
415 a = (const int8_t**restrict) ((uintptr_t) a - ks);
416
417 nc -= 8;
418 } else {
419 if (nc & 4) {
420 vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
421 vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
422 vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
423 vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
424 vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
425 vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
426 }
427 if (nc & 2) {
428 vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
429 vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
430 vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
431 vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
432 vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
433 vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
434 }
435 if (nc & 1) {
436 vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
437 vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
438 vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
439 vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
440 }
441
442 nc = 0;
443 }
444 } while (nc != 0);
445 }
446