1 // Auto-generated file. Do not edit!
2 // Template: src/qu8-igemm/c4-neondot.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/igemm.h>
15 #include <xnnpack/math.h>
16
17
xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot(size_t mr,size_t nc,size_t kc,size_t ks,const uint8_t ** restrict a,const void * restrict w,uint8_t * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const uint8_t * zero,const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot(
19 size_t mr,
20 size_t nc,
21 size_t kc,
22 size_t ks,
23 const uint8_t** restrict a,
24 const void* restrict w,
25 uint8_t* restrict c,
26 size_t cm_stride,
27 size_t cn_stride,
28 size_t a_offset,
29 const uint8_t* zero,
30 const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
31 {
32 assert(mr != 0);
33 assert(mr <= 4);
34 assert(nc != 0);
35 assert(kc != 0);
36 assert(ks != 0);
37 assert(ks % (4 * sizeof(void*)) == 0);
38 assert(a_offset % sizeof(uint8_t) == 0);
39 assert(a != NULL);
40 assert(w != NULL);
41 assert(c != NULL);
42
43 kc = round_up_po2(kc, 4 * sizeof(uint8_t));
44 uint8_t* c0 = c;
45 uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
46 if XNN_UNPREDICTABLE(mr < 2) {
47 c1 = c0;
48 }
49 uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
50 if XNN_UNPREDICTABLE(mr <= 2) {
51 c2 = c1;
52 }
53 uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
54 if XNN_UNPREDICTABLE(mr != 4) {
55 c3 = c2;
56 }
57
58 const uint8x8_t va_zero_point = vld1_dup_u8(¶ms->rndnu_neon.kernel_zero_point[0]);
59
60 do {
61 // Initialize accumulators with bias. 16 bias values are loaded from the
62 // weight matrix, at the start of the group of 16 columns.
63 uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
64 uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
65 uint32x4_t vpacc0x89AB = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
66 uint32x4_t vpacc0xCDEF = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
67 uint32x4_t vpacc1x0123 = vpacc0x0123;
68 uint32x4_t vpacc1x4567 = vpacc0x4567;
69 uint32x4_t vpacc1x89AB = vpacc0x89AB;
70 uint32x4_t vpacc1xCDEF = vpacc0xCDEF;
71 uint32x4_t vpacc2x0123 = vpacc0x0123;
72 uint32x4_t vpacc2x4567 = vpacc0x4567;
73 uint32x4_t vpacc2x89AB = vpacc0x89AB;
74 uint32x4_t vpacc2xCDEF = vpacc0xCDEF;
75 uint32x4_t vpacc3x0123 = vpacc0x0123;
76 uint32x4_t vpacc3x4567 = vpacc0x4567;
77 uint32x4_t vpacc3x89AB = vpacc0x89AB;
78 uint32x4_t vpacc3xCDEF = vpacc0xCDEF;
79 uint32x2_t vnacc0 = vmov_n_u32(0);
80 uint32x2_t vnacc1 = vmov_n_u32(0);
81 uint32x2_t vnacc2 = vmov_n_u32(0);
82 uint32x2_t vnacc3 = vmov_n_u32(0);
83
84 size_t p = ks;
85 do {
86 const uint8_t* restrict a0 = a[0];
87 if XNN_UNPREDICTABLE(a0 != zero) {
88 a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
89 }
90 const uint8_t* restrict a1 = a[1];
91 if XNN_UNPREDICTABLE(a1 != zero) {
92 a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
93 }
94 const uint8_t* restrict a2 = a[2];
95 if XNN_UNPREDICTABLE(a2 != zero) {
96 a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
97 }
98 const uint8_t* restrict a3 = a[3];
99 if XNN_UNPREDICTABLE(a3 != zero) {
100 a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
101 }
102 a += 4;
103
104 // Inner accumulation loop along the 16 columns.
105 size_t k = kc;
106 // 2x partial unrolled loop to load 8 bytes at a time.
107 while (k >= 8 * sizeof(uint8_t)) {
108 // Load a 4x8 block of activations.
109 const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
110 const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 8;
111 const uint8x8_t va2x01234567 = vld1_u8(a2); a2 += 8;
112 const uint8x8_t va3x01234567 = vld1_u8(a3); a3 += 8;
113
114 // Load a 8x16 block of weights.
115 const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
116 const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
117 const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
118 const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
119 const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
120 const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
121 const uint8x16_t vb4567x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
122 const uint8x16_t vb4567xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
123
124 // Multiply-accumulate: 4x8 * 8x16 --> 4x16.
125 vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
126 vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
127 vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
128 vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
129 vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
130 vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
131 vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
132 vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb4567x89AB, va0x01234567, 1);
133 vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb4567xCDEF, va0x01234567, 1);
134 vnacc1 = vdot_u32(vnacc1, va_zero_point, va1x01234567);
135 vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
136 vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
137 vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb0123x89AB, va1x01234567, 0);
138 vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
139 vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb4567x0123, va1x01234567, 1);
140 vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb4567x4567, va1x01234567, 1);
141 vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb4567x89AB, va1x01234567, 1);
142 vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb4567xCDEF, va1x01234567, 1);
143 vnacc2 = vdot_u32(vnacc2, va_zero_point, va2x01234567);
144 vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
145 vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
146 vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb0123x89AB, va2x01234567, 0);
147 vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
148 vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb4567x0123, va2x01234567, 1);
149 vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb4567x4567, va2x01234567, 1);
150 vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb4567x89AB, va2x01234567, 1);
151 vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb4567xCDEF, va2x01234567, 1);
152 vnacc3 = vdot_u32(vnacc3, va_zero_point, va3x01234567);
153 vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
154 vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
155 vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb0123x89AB, va3x01234567, 0);
156 vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb0123xCDEF, va3x01234567, 0);
157 vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb4567x0123, va3x01234567, 1);
158 vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb4567x4567, va3x01234567, 1);
159 vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb4567x89AB, va3x01234567, 1);
160 vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb4567xCDEF, va3x01234567, 1);
161
162 k -= 8 * sizeof(uint8_t);
163 }
164 // Handle up to 4 final positions of `k`
165 if XNN_UNLIKELY(k != 0) {
166 // Load a 4x4 block of activations.
167 const uint8x8_t va0x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a0, vmov_n_u32(0), 0)); a0 += 4;
168 const uint8x8_t va1x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a1, vmov_n_u32(0), 0)); a1 += 4;
169 const uint8x8_t va2x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a2, vmov_n_u32(0), 0)); a2 += 4;
170 const uint8x8_t va3x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a3, vmov_n_u32(0), 0)); a3 += 4;
171
172 // Load a 4x16 block of weights.
173 const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
174 const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
175 const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
176 const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
177
178 // Multiply-accumulate: 4x4 * 4x16 --> 4x16.
179 vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
180 vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
181 vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
182 vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
183 vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
184 vnacc1 = vdot_u32(vnacc1, va_zero_point, va1x01234567);
185 vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
186 vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
187 vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb0123x89AB, va1x01234567, 0);
188 vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
189 vnacc2 = vdot_u32(vnacc2, va_zero_point, va2x01234567);
190 vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
191 vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
192 vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb0123x89AB, va2x01234567, 0);
193 vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
194 vnacc3 = vdot_u32(vnacc3, va_zero_point, va3x01234567);
195 vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
196 vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
197 vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb0123x89AB, va3x01234567, 0);
198 vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb0123xCDEF, va3x01234567, 0);
199 }
200 p -= 4 * sizeof(void*);
201 } while (p != 0);
202
203 // Subtract zero point from accumulators.
204 vnacc0 = vpadd_u32(vnacc0, vnacc0);
205 const uint32x4_t vnacc0x0123 = vcombine_u32(vnacc0, vnacc0);
206 int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
207 int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x0123));
208 int32x4_t vacc0x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc0x89AB, vnacc0x0123));
209 int32x4_t vacc0xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc0xCDEF, vnacc0x0123));
210 vnacc1 = vpadd_u32(vnacc1, vnacc1);
211 const uint32x4_t vnacc1x0123 = vcombine_u32(vnacc1, vnacc1);
212 int32x4_t vacc1x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x0123, vnacc1x0123));
213 int32x4_t vacc1x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x4567, vnacc1x0123));
214 int32x4_t vacc1x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc1x89AB, vnacc1x0123));
215 int32x4_t vacc1xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc1xCDEF, vnacc1x0123));
216 vnacc2 = vpadd_u32(vnacc2, vnacc2);
217 const uint32x4_t vnacc2x0123 = vcombine_u32(vnacc2, vnacc2);
218 int32x4_t vacc2x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x0123, vnacc2x0123));
219 int32x4_t vacc2x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x4567, vnacc2x0123));
220 int32x4_t vacc2x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc2x89AB, vnacc2x0123));
221 int32x4_t vacc2xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc2xCDEF, vnacc2x0123));
222 vnacc3 = vpadd_u32(vnacc3, vnacc3);
223 const uint32x4_t vnacc3x0123 = vcombine_u32(vnacc3, vnacc3);
224 int32x4_t vacc3x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x0123, vnacc3x0123));
225 int32x4_t vacc3x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x4567, vnacc3x0123));
226 int32x4_t vacc3x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc3x89AB, vnacc3x0123));
227 int32x4_t vacc3xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc3xCDEF, vnacc3x0123));
228
229 const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
230 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
231 const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
232
233 vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
234 vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
235 vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
236 vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
237 vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
238 vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
239 vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
240 vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
241 vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
242 vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
243 vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
244 vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
245 vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
246 vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
247 vacc3x89AB = vshlq_s32(vacc3x89AB, vright_pre_shift);
248 vacc3xCDEF = vshlq_s32(vacc3xCDEF, vright_pre_shift);
249
250 vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
251 vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
252 vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
253 vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
254 vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
255 vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
256 vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
257 vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
258 vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
259 vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
260 vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
261 vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
262 vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
263 vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
264 vacc3x89AB = vqdmulhq_s32(vacc3x89AB, vmultiplier);
265 vacc3xCDEF = vqdmulhq_s32(vacc3xCDEF, vmultiplier);
266
267 vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
268 vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
269 vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
270 vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
271 vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
272 vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
273 vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
274 vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
275 vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
276 vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
277 vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
278 vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
279 vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
280 vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
281 vacc3x89AB = vrshlq_s32(vacc3x89AB, vright_post_shift);
282 vacc3xCDEF = vrshlq_s32(vacc3xCDEF, vright_post_shift);
283
284 const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
285 #if XNN_ARCH_ARM64
286 const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
287 const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
288 const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
289 const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
290 const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
291 const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
292 const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
293 const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF), voutput_zero_point);
294
295 uint8x16_t vout0x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc0x89ABCDEF);
296 uint8x16_t vout1x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc1x01234567), vacc1x89ABCDEF);
297 uint8x16_t vout2x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc2x01234567), vacc2x89ABCDEF);
298 uint8x16_t vout3x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc3x01234567), vacc3x89ABCDEF);
299 #else
300 const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
301 const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
302 const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
303 const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
304 const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
305 const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
306 const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
307 const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF)), voutput_zero_point);
308
309 uint8x16_t vout0x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc0x89ABCDEF));
310 uint8x16_t vout1x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc1x01234567), vqmovun_s16(vacc1x89ABCDEF));
311 uint8x16_t vout2x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc2x01234567), vqmovun_s16(vacc2x89ABCDEF));
312 uint8x16_t vout3x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc3x01234567), vqmovun_s16(vacc3x89ABCDEF));
313 #endif
314 const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->rndnu_neon.output_min);
315 const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->rndnu_neon.output_max);
316
317 vout0x0123456789ABCDEF = vmaxq_u8(vout0x0123456789ABCDEF, voutput_min);
318 vout1x0123456789ABCDEF = vmaxq_u8(vout1x0123456789ABCDEF, voutput_min);
319 vout2x0123456789ABCDEF = vmaxq_u8(vout2x0123456789ABCDEF, voutput_min);
320 vout3x0123456789ABCDEF = vmaxq_u8(vout3x0123456789ABCDEF, voutput_min);
321
322 vout0x0123456789ABCDEF = vminq_u8(vout0x0123456789ABCDEF, voutput_max);
323 vout1x0123456789ABCDEF = vminq_u8(vout1x0123456789ABCDEF, voutput_max);
324 vout2x0123456789ABCDEF = vminq_u8(vout2x0123456789ABCDEF, voutput_max);
325 vout3x0123456789ABCDEF = vminq_u8(vout3x0123456789ABCDEF, voutput_max);
326
327 if (nc >= 16) {
328 vst1q_u8(c3 + 0, vout3x0123456789ABCDEF);
329 vst1q_u8(c2 + 0, vout2x0123456789ABCDEF);
330 vst1q_u8(c1 + 0, vout1x0123456789ABCDEF);
331 vst1q_u8(c0 + 0, vout0x0123456789ABCDEF);
332
333 c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
334 c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
335 c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
336 c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
337
338 a = (const uint8_t**restrict) ((uintptr_t) a - ks);
339
340 nc -= 16;
341 } else {
342 uint8x16_t vout2x01234567_3x01234567 = vcombine_u8(vget_low_u8(vout2x0123456789ABCDEF), vget_low_u8(vout3x0123456789ABCDEF));
343 uint8x16_t vout0x01234567_1x01234567 = vcombine_u8(vget_low_u8(vout0x0123456789ABCDEF), vget_low_u8(vout1x0123456789ABCDEF));
344 if (nc & 8) {
345 vst1_u8(c3, vget_high_u8(vout2x01234567_3x01234567)); c3 += 8;
346 vst1_u8(c2, vget_low_u8(vout2x01234567_3x01234567)); c2 += 8;
347 vst1_u8(c1, vget_high_u8(vout0x01234567_1x01234567)); c1 += 8;
348 vst1_u8(c0, vget_low_u8(vout0x01234567_1x01234567)); c0 += 8;
349 vout2x01234567_3x01234567 = vcombine_u8(vget_high_u8(vout2x0123456789ABCDEF), vget_high_u8(vout3x0123456789ABCDEF));
350 vout0x01234567_1x01234567 = vcombine_u8(vget_high_u8(vout0x0123456789ABCDEF), vget_high_u8(vout1x0123456789ABCDEF));
351 }
352 if (nc & 4) {
353 vst1q_lane_u32((void*) c3, vreinterpretq_u32_u8(vout2x01234567_3x01234567), 2); c3 += 4;
354 vst1q_lane_u32((void*) c2, vreinterpretq_u32_u8(vout2x01234567_3x01234567), 0); c2 += 4;
355 vst1q_lane_u32((void*) c1, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2); c1 += 4;
356 vst1q_lane_u32((void*) c0, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0); c0 += 4;
357 vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
358 vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
359 }
360 if (nc & 2) {
361 vst1q_lane_u16((void*) c3, vreinterpretq_u16_u8(vout2x01234567_3x01234567), 4); c3 += 2;
362 vst1q_lane_u16((void*) c2, vreinterpretq_u16_u8(vout2x01234567_3x01234567), 0); c2 += 2;
363 vst1q_lane_u16((void*) c1, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 4); c1 += 2;
364 vst1q_lane_u16((void*) c0, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 0); c0 += 2;
365 vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
366 vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
367 }
368 if (nc & 1) {
369 vst1q_lane_u8(c3, vout2x01234567_3x01234567, 8);
370 vst1q_lane_u8(c2, vout2x01234567_3x01234567, 0);
371 vst1q_lane_u8(c1, vout0x01234567_1x01234567, 8);
372 vst1q_lane_u8(c0, vout0x01234567_1x01234567, 0);
373 }
374
375 nc = 0;
376 }
377 } while (nc != 0);
378 }
379