• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f16-gemm/neonfp16arith-ld64.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 
11 #include <assert.h>
12 
13 #include <arm_neon.h>
14 
15 #include <xnnpack/common.h>
16 
17 #include <xnnpack/gemm.h>
18 
19 
xnn_f16_gemm_minmax_ukernel_6x16__neonfp16arith_ld64(size_t mr,size_t nc,size_t kc,const void * restrict a,size_t a_stride,const void * restrict w,void * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f16_gemm_minmax_ukernel_6x16__neonfp16arith_ld64(
21     size_t mr,
22     size_t nc,
23     size_t kc,
24     const void* restrict a,
25     size_t a_stride,
26     const void* restrict w,
27     void* restrict c,
28     size_t cm_stride,
29     size_t cn_stride,
30     const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
31 {
32   assert(mr != 0);
33   assert(mr <= 6);
34   assert(nc != 0);
35   assert(kc != 0);
36   assert(kc % sizeof(__fp16) == 0);
37   assert(a != NULL);
38   assert(w != NULL);
39   assert(c != NULL);
40 
41   const __fp16* a0 = (const __fp16*) a;
42   __fp16* c0 = (__fp16*) c;
43   const __fp16* a1 = (const __fp16*) ((uintptr_t) a0 + a_stride);
44   __fp16* c1 = (__fp16*) ((uintptr_t) c0 + cm_stride);
45   if XNN_UNPREDICTABLE(mr < 2) {
46     a1 = a0;
47     c1 = c0;
48   }
49   const __fp16* a2 = (const __fp16*) ((uintptr_t) a1 + a_stride);
50   __fp16* c2 = (__fp16*) ((uintptr_t) c1 + cm_stride);
51   if XNN_UNPREDICTABLE(mr <= 2) {
52     a2 = a1;
53     c2 = c1;
54   }
55   const __fp16* a3 = (const __fp16*) ((uintptr_t) a2 + a_stride);
56   __fp16* c3 = (__fp16*) ((uintptr_t) c2 + cm_stride);
57   if XNN_UNPREDICTABLE(mr < 4) {
58     a3 = a2;
59     c3 = c2;
60   }
61   const __fp16* a4 = (const __fp16*) ((uintptr_t) a3 + a_stride);
62   __fp16* c4 = (__fp16*) ((uintptr_t) c3 + cm_stride);
63   if XNN_UNPREDICTABLE(mr <= 4) {
64     a4 = a3;
65     c4 = c3;
66   }
67   const __fp16* a5 = (const __fp16*) ((uintptr_t) a4 + a_stride);
68   __fp16* c5 = (__fp16*) ((uintptr_t) c4 + cm_stride);
69   if XNN_UNPREDICTABLE(mr != 6) {
70     a5 = a4;
71     c5 = c4;
72   }
73 
74   do {
75     float16x8_t vacc0x01234567 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
76     float16x8_t vacc0x89ABCDEF = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
77     float16x8_t vacc1x01234567 = vacc0x01234567;
78     float16x8_t vacc1x89ABCDEF = vacc0x89ABCDEF;
79     float16x8_t vacc2x01234567 = vacc0x01234567;
80     float16x8_t vacc2x89ABCDEF = vacc0x89ABCDEF;
81     float16x8_t vacc3x01234567 = vacc0x01234567;
82     float16x8_t vacc3x89ABCDEF = vacc0x89ABCDEF;
83     float16x8_t vacc4x01234567 = vacc0x01234567;
84     float16x8_t vacc4x89ABCDEF = vacc0x89ABCDEF;
85     float16x8_t vacc5x01234567 = vacc0x01234567;
86     float16x8_t vacc5x89ABCDEF = vacc0x89ABCDEF;
87 
88     size_t k = kc;
89     while (k >= 4 * sizeof(__fp16)) {
90       const float16x4_t va0 = vld1_f16(a0); a0 += 4;
91       const float16x4_t va1 = vld1_f16(a1); a1 += 4;
92       const float16x4_t va2 = vld1_f16(a2); a2 += 4;
93       const float16x4_t va3 = vld1_f16(a3); a3 += 4;
94       const float16x4_t va4 = vld1_f16(a4); a4 += 4;
95       const float16x4_t va5 = vld1_f16(a5); a5 += 4;
96 
97       const float16x8_t vb01234567c0 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
98       const float16x8_t vb89ABCDEFc0 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
99 
100       #if XNN_ARCH_ARM64
101         vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
102         vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
103         vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
104         vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
105         vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c0, va4, 0);
106         vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c0, va5, 0);
107         vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc0, va0, 0);
108         vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc0, va1, 0);
109         vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc0, va2, 0);
110         vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc0, va3, 0);
111         vacc4x89ABCDEF = vfmaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc0, va4, 0);
112         vacc5x89ABCDEF = vfmaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc0, va5, 0);
113       #else
114         const float16x8_t va0c0 = vdupq_lane_f16(va0, 0);
115         const float16x8_t va1c0 = vdupq_lane_f16(va1, 0);
116         const float16x8_t va2c0 = vdupq_lane_f16(va2, 0);
117         const float16x8_t va3c0 = vdupq_lane_f16(va3, 0);
118         const float16x8_t va4c0 = vdupq_lane_f16(va4, 0);
119         const float16x8_t va5c0 = vdupq_lane_f16(va5, 0);
120 
121         vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c0, vb01234567c0);
122         vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c0, vb01234567c0);
123         vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c0, vb01234567c0);
124         vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c0, vb01234567c0);
125         vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c0, vb01234567c0);
126         vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c0, vb01234567c0);
127         vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0c0, vb89ABCDEFc0);
128         vacc1x89ABCDEF = vfmaq_f16(vacc1x89ABCDEF, va1c0, vb89ABCDEFc0);
129         vacc2x89ABCDEF = vfmaq_f16(vacc2x89ABCDEF, va2c0, vb89ABCDEFc0);
130         vacc3x89ABCDEF = vfmaq_f16(vacc3x89ABCDEF, va3c0, vb89ABCDEFc0);
131         vacc4x89ABCDEF = vfmaq_f16(vacc4x89ABCDEF, va4c0, vb89ABCDEFc0);
132         vacc5x89ABCDEF = vfmaq_f16(vacc5x89ABCDEF, va5c0, vb89ABCDEFc0);
133       #endif
134       const float16x8_t vb01234567c1 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
135       const float16x8_t vb89ABCDEFc1 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
136 
137       #if XNN_ARCH_ARM64
138         vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
139         vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
140         vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
141         vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
142         vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c1, va4, 1);
143         vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c1, va5, 1);
144         vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc1, va0, 1);
145         vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc1, va1, 1);
146         vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc1, va2, 1);
147         vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc1, va3, 1);
148         vacc4x89ABCDEF = vfmaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc1, va4, 1);
149         vacc5x89ABCDEF = vfmaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc1, va5, 1);
150       #else
151         const float16x8_t va0c1 = vdupq_lane_f16(va0, 1);
152         const float16x8_t va1c1 = vdupq_lane_f16(va1, 1);
153         const float16x8_t va2c1 = vdupq_lane_f16(va2, 1);
154         const float16x8_t va3c1 = vdupq_lane_f16(va3, 1);
155         const float16x8_t va4c1 = vdupq_lane_f16(va4, 1);
156         const float16x8_t va5c1 = vdupq_lane_f16(va5, 1);
157 
158         vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c1, vb01234567c1);
159         vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c1, vb01234567c1);
160         vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c1, vb01234567c1);
161         vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c1, vb01234567c1);
162         vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c1, vb01234567c1);
163         vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c1, vb01234567c1);
164         vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0c1, vb89ABCDEFc1);
165         vacc1x89ABCDEF = vfmaq_f16(vacc1x89ABCDEF, va1c1, vb89ABCDEFc1);
166         vacc2x89ABCDEF = vfmaq_f16(vacc2x89ABCDEF, va2c1, vb89ABCDEFc1);
167         vacc3x89ABCDEF = vfmaq_f16(vacc3x89ABCDEF, va3c1, vb89ABCDEFc1);
168         vacc4x89ABCDEF = vfmaq_f16(vacc4x89ABCDEF, va4c1, vb89ABCDEFc1);
169         vacc5x89ABCDEF = vfmaq_f16(vacc5x89ABCDEF, va5c1, vb89ABCDEFc1);
170       #endif
171       const float16x8_t vb01234567c2 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
172       const float16x8_t vb89ABCDEFc2 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
173 
174       #if XNN_ARCH_ARM64
175         vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
176         vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
177         vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
178         vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
179         vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c2, va4, 2);
180         vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c2, va5, 2);
181         vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc2, va0, 2);
182         vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc2, va1, 2);
183         vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc2, va2, 2);
184         vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc2, va3, 2);
185         vacc4x89ABCDEF = vfmaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc2, va4, 2);
186         vacc5x89ABCDEF = vfmaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc2, va5, 2);
187       #else
188         const float16x8_t va0c2 = vdupq_lane_f16(va0, 2);
189         const float16x8_t va1c2 = vdupq_lane_f16(va1, 2);
190         const float16x8_t va2c2 = vdupq_lane_f16(va2, 2);
191         const float16x8_t va3c2 = vdupq_lane_f16(va3, 2);
192         const float16x8_t va4c2 = vdupq_lane_f16(va4, 2);
193         const float16x8_t va5c2 = vdupq_lane_f16(va5, 2);
194 
195         vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c2, vb01234567c2);
196         vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c2, vb01234567c2);
197         vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c2, vb01234567c2);
198         vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c2, vb01234567c2);
199         vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c2, vb01234567c2);
200         vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c2, vb01234567c2);
201         vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0c2, vb89ABCDEFc2);
202         vacc1x89ABCDEF = vfmaq_f16(vacc1x89ABCDEF, va1c2, vb89ABCDEFc2);
203         vacc2x89ABCDEF = vfmaq_f16(vacc2x89ABCDEF, va2c2, vb89ABCDEFc2);
204         vacc3x89ABCDEF = vfmaq_f16(vacc3x89ABCDEF, va3c2, vb89ABCDEFc2);
205         vacc4x89ABCDEF = vfmaq_f16(vacc4x89ABCDEF, va4c2, vb89ABCDEFc2);
206         vacc5x89ABCDEF = vfmaq_f16(vacc5x89ABCDEF, va5c2, vb89ABCDEFc2);
207       #endif
208       const float16x8_t vb01234567c3 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
209       const float16x8_t vb89ABCDEFc3 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
210 
211       #if XNN_ARCH_ARM64
212         vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
213         vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
214         vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
215         vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
216         vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c3, va4, 3);
217         vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c3, va5, 3);
218         vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc3, va0, 3);
219         vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc3, va1, 3);
220         vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc3, va2, 3);
221         vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc3, va3, 3);
222         vacc4x89ABCDEF = vfmaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc3, va4, 3);
223         vacc5x89ABCDEF = vfmaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc3, va5, 3);
224       #else
225         const float16x8_t va0c3 = vdupq_lane_f16(va0, 3);
226         const float16x8_t va1c3 = vdupq_lane_f16(va1, 3);
227         const float16x8_t va2c3 = vdupq_lane_f16(va2, 3);
228         const float16x8_t va3c3 = vdupq_lane_f16(va3, 3);
229         const float16x8_t va4c3 = vdupq_lane_f16(va4, 3);
230         const float16x8_t va5c3 = vdupq_lane_f16(va5, 3);
231 
232         vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c3, vb01234567c3);
233         vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c3, vb01234567c3);
234         vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c3, vb01234567c3);
235         vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c3, vb01234567c3);
236         vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c3, vb01234567c3);
237         vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c3, vb01234567c3);
238         vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0c3, vb89ABCDEFc3);
239         vacc1x89ABCDEF = vfmaq_f16(vacc1x89ABCDEF, va1c3, vb89ABCDEFc3);
240         vacc2x89ABCDEF = vfmaq_f16(vacc2x89ABCDEF, va2c3, vb89ABCDEFc3);
241         vacc3x89ABCDEF = vfmaq_f16(vacc3x89ABCDEF, va3c3, vb89ABCDEFc3);
242         vacc4x89ABCDEF = vfmaq_f16(vacc4x89ABCDEF, va4c3, vb89ABCDEFc3);
243         vacc5x89ABCDEF = vfmaq_f16(vacc5x89ABCDEF, va5c3, vb89ABCDEFc3);
244       #endif
245 
246       k -= 4 * sizeof(__fp16);
247     }
248     if XNN_UNLIKELY(k != 0) {
249       do {
250         const float16x8_t va0 = vld1q_dup_f16(a0); a0 += 1;
251         const float16x8_t va1 = vld1q_dup_f16(a1); a1 += 1;
252         const float16x8_t va2 = vld1q_dup_f16(a2); a2 += 1;
253         const float16x8_t va3 = vld1q_dup_f16(a3); a3 += 1;
254         const float16x8_t va4 = vld1q_dup_f16(a4); a4 += 1;
255         const float16x8_t va5 = vld1q_dup_f16(a5); a5 += 1;
256 
257         const float16x8_t vb01234567 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
258         const float16x8_t vb89ABCDEF = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
259 
260         vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
261         vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1, vb01234567);
262         vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2, vb01234567);
263         vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3, vb01234567);
264         vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4, vb01234567);
265         vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5, vb01234567);
266         vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0, vb89ABCDEF);
267         vacc1x89ABCDEF = vfmaq_f16(vacc1x89ABCDEF, va1, vb89ABCDEF);
268         vacc2x89ABCDEF = vfmaq_f16(vacc2x89ABCDEF, va2, vb89ABCDEF);
269         vacc3x89ABCDEF = vfmaq_f16(vacc3x89ABCDEF, va3, vb89ABCDEF);
270         vacc4x89ABCDEF = vfmaq_f16(vacc4x89ABCDEF, va4, vb89ABCDEF);
271         vacc5x89ABCDEF = vfmaq_f16(vacc5x89ABCDEF, va5, vb89ABCDEF);
272 
273         k -= sizeof(__fp16);
274       } while (k != 0);
275     }
276 
277 
278     const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neon.max));
279     vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
280     vacc1x01234567 = vminq_f16(vacc1x01234567, vmax);
281     vacc2x01234567 = vminq_f16(vacc2x01234567, vmax);
282     vacc3x01234567 = vminq_f16(vacc3x01234567, vmax);
283     vacc4x01234567 = vminq_f16(vacc4x01234567, vmax);
284     vacc5x01234567 = vminq_f16(vacc5x01234567, vmax);
285     vacc0x89ABCDEF = vminq_f16(vacc0x89ABCDEF, vmax);
286     vacc1x89ABCDEF = vminq_f16(vacc1x89ABCDEF, vmax);
287     vacc2x89ABCDEF = vminq_f16(vacc2x89ABCDEF, vmax);
288     vacc3x89ABCDEF = vminq_f16(vacc3x89ABCDEF, vmax);
289     vacc4x89ABCDEF = vminq_f16(vacc4x89ABCDEF, vmax);
290     vacc5x89ABCDEF = vminq_f16(vacc5x89ABCDEF, vmax);
291 
292     const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neon.min));
293     vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
294     vacc1x01234567 = vmaxq_f16(vacc1x01234567, vmin);
295     vacc2x01234567 = vmaxq_f16(vacc2x01234567, vmin);
296     vacc3x01234567 = vmaxq_f16(vacc3x01234567, vmin);
297     vacc4x01234567 = vmaxq_f16(vacc4x01234567, vmin);
298     vacc5x01234567 = vmaxq_f16(vacc5x01234567, vmin);
299     vacc0x89ABCDEF = vmaxq_f16(vacc0x89ABCDEF, vmin);
300     vacc1x89ABCDEF = vmaxq_f16(vacc1x89ABCDEF, vmin);
301     vacc2x89ABCDEF = vmaxq_f16(vacc2x89ABCDEF, vmin);
302     vacc3x89ABCDEF = vmaxq_f16(vacc3x89ABCDEF, vmin);
303     vacc4x89ABCDEF = vmaxq_f16(vacc4x89ABCDEF, vmin);
304     vacc5x89ABCDEF = vmaxq_f16(vacc5x89ABCDEF, vmin);
305 
306     if XNN_LIKELY(nc >= 16) {
307       vst1q_f16(c0, vacc0x01234567);
308       vst1q_f16(c0 + 8, vacc0x89ABCDEF);
309       c0 = (__fp16*) ((uintptr_t) c0 + cn_stride);
310       vst1q_f16(c1, vacc1x01234567);
311       vst1q_f16(c1 + 8, vacc1x89ABCDEF);
312       c1 = (__fp16*) ((uintptr_t) c1 + cn_stride);
313       vst1q_f16(c2, vacc2x01234567);
314       vst1q_f16(c2 + 8, vacc2x89ABCDEF);
315       c2 = (__fp16*) ((uintptr_t) c2 + cn_stride);
316       vst1q_f16(c3, vacc3x01234567);
317       vst1q_f16(c3 + 8, vacc3x89ABCDEF);
318       c3 = (__fp16*) ((uintptr_t) c3 + cn_stride);
319       vst1q_f16(c4, vacc4x01234567);
320       vst1q_f16(c4 + 8, vacc4x89ABCDEF);
321       c4 = (__fp16*) ((uintptr_t) c4 + cn_stride);
322       vst1q_f16(c5, vacc5x01234567);
323       vst1q_f16(c5 + 8, vacc5x89ABCDEF);
324       c5 = (__fp16*) ((uintptr_t) c5 + cn_stride);
325 
326       a0 = (const __fp16*) ((uintptr_t) a0 - kc);
327       a1 = (const __fp16*) ((uintptr_t) a1 - kc);
328       a2 = (const __fp16*) ((uintptr_t) a2 - kc);
329       a3 = (const __fp16*) ((uintptr_t) a3 - kc);
330       a4 = (const __fp16*) ((uintptr_t) a4 - kc);
331       a5 = (const __fp16*) ((uintptr_t) a5 - kc);
332 
333       nc -= 16;
334     } else {
335       if (nc & 8) {
336         vst1q_f16(c0, vacc0x01234567); c0 += 8;
337         vst1q_f16(c1, vacc1x01234567); c1 += 8;
338         vst1q_f16(c2, vacc2x01234567); c2 += 8;
339         vst1q_f16(c3, vacc3x01234567); c3 += 8;
340         vst1q_f16(c4, vacc4x01234567); c4 += 8;
341         vst1q_f16(c5, vacc5x01234567); c5 += 8;
342 
343         vacc0x01234567 = vacc0x89ABCDEF;
344         vacc1x01234567 = vacc1x89ABCDEF;
345         vacc2x01234567 = vacc2x89ABCDEF;
346         vacc3x01234567 = vacc3x89ABCDEF;
347         vacc4x01234567 = vacc4x89ABCDEF;
348         vacc5x01234567 = vacc5x89ABCDEF;
349       }
350       float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
351       float16x4_t vacc1x0123 = vget_low_f16(vacc1x01234567);
352       float16x4_t vacc2x0123 = vget_low_f16(vacc2x01234567);
353       float16x4_t vacc3x0123 = vget_low_f16(vacc3x01234567);
354       float16x4_t vacc4x0123 = vget_low_f16(vacc4x01234567);
355       float16x4_t vacc5x0123 = vget_low_f16(vacc5x01234567);
356       if (nc & 4) {
357         vst1_f16(c0, vacc0x0123); c0 += 4;
358         vst1_f16(c1, vacc1x0123); c1 += 4;
359         vst1_f16(c2, vacc2x0123); c2 += 4;
360         vst1_f16(c3, vacc3x0123); c3 += 4;
361         vst1_f16(c4, vacc4x0123); c4 += 4;
362         vst1_f16(c5, vacc5x0123); c5 += 4;
363 
364         vacc0x0123 = vget_high_f16(vacc0x01234567);
365         vacc1x0123 = vget_high_f16(vacc1x01234567);
366         vacc2x0123 = vget_high_f16(vacc2x01234567);
367         vacc3x0123 = vget_high_f16(vacc3x01234567);
368         vacc4x0123 = vget_high_f16(vacc4x01234567);
369         vacc5x0123 = vget_high_f16(vacc5x01234567);
370       }
371       if (nc & 2) {
372         vst1_lane_u32((void*) c0, vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
373         vst1_lane_u32((void*) c1, vreinterpret_u32_f16(vacc1x0123), 0); c1 += 2;
374         vst1_lane_u32((void*) c2, vreinterpret_u32_f16(vacc2x0123), 0); c2 += 2;
375         vst1_lane_u32((void*) c3, vreinterpret_u32_f16(vacc3x0123), 0); c3 += 2;
376         vst1_lane_u32((void*) c4, vreinterpret_u32_f16(vacc4x0123), 0); c4 += 2;
377         vst1_lane_u32((void*) c5, vreinterpret_u32_f16(vacc5x0123), 0); c5 += 2;
378 
379         vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
380         vacc1x0123 = vext_f16(vacc1x0123, vacc1x0123, 2);
381         vacc2x0123 = vext_f16(vacc2x0123, vacc2x0123, 2);
382         vacc3x0123 = vext_f16(vacc3x0123, vacc3x0123, 2);
383         vacc4x0123 = vext_f16(vacc4x0123, vacc4x0123, 2);
384         vacc5x0123 = vext_f16(vacc5x0123, vacc5x0123, 2);
385       }
386       if (nc & 1) {
387         vst1_lane_f16(c0, vacc0x0123, 0);
388         vst1_lane_f16(c1, vacc1x0123, 0);
389         vst1_lane_f16(c2, vacc2x0123, 0);
390         vst1_lane_f16(c3, vacc3x0123, 0);
391         vst1_lane_f16(c4, vacc4x0123, 0);
392         vst1_lane_f16(c5, vacc5x0123, 0);
393       }
394 
395       nc = 0;
396     }
397   } while (nc != 0);
398 }
399