• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f16-gemm/neonfp16arith-ld64.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 
11 #include <assert.h>
12 
13 #include <arm_neon.h>
14 
15 #include <xnnpack/common.h>
16 
17 #include <xnnpack/gemm.h>
18 
19 
xnn_f16_gemminc_minmax_ukernel_8x16__neonfp16arith_ld64(size_t mr,size_t nc,size_t kc,const void * restrict a,size_t a_stride,const void * restrict w,void * restrict c,size_t cm_stride,size_t cn_stride,const void * restrict acc,const struct xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f16_gemminc_minmax_ukernel_8x16__neonfp16arith_ld64(
21     size_t mr,
22     size_t nc,
23     size_t kc,
24     const void* restrict a,
25     size_t a_stride,
26     const void* restrict w,
27     void* restrict c,
28     size_t cm_stride,
29     size_t cn_stride,
30     const void*restrict acc,
31     const struct xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)])
32 {
33   assert(mr != 0);
34   assert(mr <= 8);
35   assert(nc != 0);
36   assert(kc != 0);
37   assert(kc % sizeof(__fp16) == 0);
38   assert(a != NULL);
39   assert(w != NULL);
40   assert(c != NULL);
41   assert(acc != NULL);
42 
43   const __fp16* a0 = (const __fp16*) a;
44   __fp16* c0 = (__fp16*) c;
45   const __fp16* a1 = (const __fp16*) ((uintptr_t) a0 + a_stride);
46   __fp16* c1 = (__fp16*) ((uintptr_t) c0 + cm_stride);
47   if XNN_UNPREDICTABLE(mr < 2) {
48     a1 = a0;
49     c1 = c0;
50   }
51   const __fp16* a2 = (const __fp16*) ((uintptr_t) a1 + a_stride);
52   __fp16* c2 = (__fp16*) ((uintptr_t) c1 + cm_stride);
53   if XNN_UNPREDICTABLE(mr <= 2) {
54     a2 = a1;
55     c2 = c1;
56   }
57   const __fp16* a3 = (const __fp16*) ((uintptr_t) a2 + a_stride);
58   __fp16* c3 = (__fp16*) ((uintptr_t) c2 + cm_stride);
59   if XNN_UNPREDICTABLE(mr < 4) {
60     a3 = a2;
61     c3 = c2;
62   }
63   const __fp16* a4 = (const __fp16*) ((uintptr_t) a3 + a_stride);
64   __fp16* c4 = (__fp16*) ((uintptr_t) c3 + cm_stride);
65   if XNN_UNPREDICTABLE(mr <= 4) {
66     a4 = a3;
67     c4 = c3;
68   }
69   const __fp16* a5 = (const __fp16*) ((uintptr_t) a4 + a_stride);
70   __fp16* c5 = (__fp16*) ((uintptr_t) c4 + cm_stride);
71   if XNN_UNPREDICTABLE(mr < 6) {
72     a5 = a4;
73     c5 = c4;
74   }
75   const __fp16* a6 = (const __fp16*) ((uintptr_t) a5 + a_stride);
76   __fp16* c6 = (__fp16*) ((uintptr_t) c5 + cm_stride);
77   if XNN_UNPREDICTABLE(mr <= 6) {
78     a6 = a5;
79     c6 = c5;
80   }
81   const __fp16* a7 = (const __fp16*) ((uintptr_t) a6 + a_stride);
82   __fp16* c7 = (__fp16*) ((uintptr_t) c6 + cm_stride);
83   if XNN_UNPREDICTABLE(mr != 8) {
84     a7 = a6;
85     c7 = c6;
86   }
87 
88   do {
89     float16x8_t vacc0x01234567 = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
90     float16x8_t vacc0x89ABCDEF = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
91     float16x8_t vacc1x01234567 = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
92     float16x8_t vacc1x89ABCDEF = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
93     float16x8_t vacc2x01234567 = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
94     float16x8_t vacc2x89ABCDEF = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
95     float16x8_t vacc3x01234567 = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
96     float16x8_t vacc3x89ABCDEF = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
97     float16x8_t vacc4x01234567 = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
98     float16x8_t vacc4x89ABCDEF = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
99     float16x8_t vacc5x01234567 = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
100     float16x8_t vacc5x89ABCDEF = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
101     float16x8_t vacc6x01234567 = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
102     float16x8_t vacc6x89ABCDEF = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
103     float16x8_t vacc7x01234567 = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
104     float16x8_t vacc7x89ABCDEF = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
105 
106     size_t k = kc;
107     while (k >= 4 * sizeof(__fp16)) {
108       const float16x4_t va0 = vld1_f16(a0); a0 += 4;
109       const float16x4_t va1 = vld1_f16(a1); a1 += 4;
110       const float16x4_t va2 = vld1_f16(a2); a2 += 4;
111       const float16x4_t va3 = vld1_f16(a3); a3 += 4;
112       const float16x4_t va4 = vld1_f16(a4); a4 += 4;
113       const float16x4_t va5 = vld1_f16(a5); a5 += 4;
114       const float16x4_t va6 = vld1_f16(a6); a6 += 4;
115       const float16x4_t va7 = vld1_f16(a7); a7 += 4;
116 
117       const float16x8_t vb01234567c0 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
118       const float16x8_t vb89ABCDEFc0 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
119 
120       #if XNN_ARCH_ARM64
121         vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
122         vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
123         vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
124         vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
125         vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c0, va4, 0);
126         vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c0, va5, 0);
127         vacc6x01234567 = vfmaq_lane_f16(vacc6x01234567, vb01234567c0, va6, 0);
128         vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c0, va7, 0);
129         vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc0, va0, 0);
130         vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc0, va1, 0);
131         vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc0, va2, 0);
132         vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc0, va3, 0);
133         vacc4x89ABCDEF = vfmaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc0, va4, 0);
134         vacc5x89ABCDEF = vfmaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc0, va5, 0);
135         vacc6x89ABCDEF = vfmaq_lane_f16(vacc6x89ABCDEF, vb89ABCDEFc0, va6, 0);
136         vacc7x89ABCDEF = vfmaq_lane_f16(vacc7x89ABCDEF, vb89ABCDEFc0, va7, 0);
137       #else
138         const float16x8_t va0c0 = vdupq_lane_f16(va0, 0);
139         const float16x8_t va1c0 = vdupq_lane_f16(va1, 0);
140         const float16x8_t va2c0 = vdupq_lane_f16(va2, 0);
141         const float16x8_t va3c0 = vdupq_lane_f16(va3, 0);
142         const float16x8_t va4c0 = vdupq_lane_f16(va4, 0);
143         const float16x8_t va5c0 = vdupq_lane_f16(va5, 0);
144         const float16x8_t va6c0 = vdupq_lane_f16(va6, 0);
145         const float16x8_t va7c0 = vdupq_lane_f16(va7, 0);
146 
147         vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c0, vb01234567c0);
148         vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c0, vb01234567c0);
149         vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c0, vb01234567c0);
150         vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c0, vb01234567c0);
151         vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c0, vb01234567c0);
152         vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c0, vb01234567c0);
153         vacc6x01234567 = vfmaq_f16(vacc6x01234567, va6c0, vb01234567c0);
154         vacc7x01234567 = vfmaq_f16(vacc7x01234567, va7c0, vb01234567c0);
155         vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0c0, vb89ABCDEFc0);
156         vacc1x89ABCDEF = vfmaq_f16(vacc1x89ABCDEF, va1c0, vb89ABCDEFc0);
157         vacc2x89ABCDEF = vfmaq_f16(vacc2x89ABCDEF, va2c0, vb89ABCDEFc0);
158         vacc3x89ABCDEF = vfmaq_f16(vacc3x89ABCDEF, va3c0, vb89ABCDEFc0);
159         vacc4x89ABCDEF = vfmaq_f16(vacc4x89ABCDEF, va4c0, vb89ABCDEFc0);
160         vacc5x89ABCDEF = vfmaq_f16(vacc5x89ABCDEF, va5c0, vb89ABCDEFc0);
161         vacc6x89ABCDEF = vfmaq_f16(vacc6x89ABCDEF, va6c0, vb89ABCDEFc0);
162         vacc7x89ABCDEF = vfmaq_f16(vacc7x89ABCDEF, va7c0, vb89ABCDEFc0);
163       #endif
164       const float16x8_t vb01234567c1 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
165       const float16x8_t vb89ABCDEFc1 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
166 
167       #if XNN_ARCH_ARM64
168         vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
169         vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
170         vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
171         vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
172         vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c1, va4, 1);
173         vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c1, va5, 1);
174         vacc6x01234567 = vfmaq_lane_f16(vacc6x01234567, vb01234567c1, va6, 1);
175         vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c1, va7, 1);
176         vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc1, va0, 1);
177         vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc1, va1, 1);
178         vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc1, va2, 1);
179         vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc1, va3, 1);
180         vacc4x89ABCDEF = vfmaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc1, va4, 1);
181         vacc5x89ABCDEF = vfmaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc1, va5, 1);
182         vacc6x89ABCDEF = vfmaq_lane_f16(vacc6x89ABCDEF, vb89ABCDEFc1, va6, 1);
183         vacc7x89ABCDEF = vfmaq_lane_f16(vacc7x89ABCDEF, vb89ABCDEFc1, va7, 1);
184       #else
185         const float16x8_t va0c1 = vdupq_lane_f16(va0, 1);
186         const float16x8_t va1c1 = vdupq_lane_f16(va1, 1);
187         const float16x8_t va2c1 = vdupq_lane_f16(va2, 1);
188         const float16x8_t va3c1 = vdupq_lane_f16(va3, 1);
189         const float16x8_t va4c1 = vdupq_lane_f16(va4, 1);
190         const float16x8_t va5c1 = vdupq_lane_f16(va5, 1);
191         const float16x8_t va6c1 = vdupq_lane_f16(va6, 1);
192         const float16x8_t va7c1 = vdupq_lane_f16(va7, 1);
193 
194         vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c1, vb01234567c1);
195         vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c1, vb01234567c1);
196         vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c1, vb01234567c1);
197         vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c1, vb01234567c1);
198         vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c1, vb01234567c1);
199         vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c1, vb01234567c1);
200         vacc6x01234567 = vfmaq_f16(vacc6x01234567, va6c1, vb01234567c1);
201         vacc7x01234567 = vfmaq_f16(vacc7x01234567, va7c1, vb01234567c1);
202         vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0c1, vb89ABCDEFc1);
203         vacc1x89ABCDEF = vfmaq_f16(vacc1x89ABCDEF, va1c1, vb89ABCDEFc1);
204         vacc2x89ABCDEF = vfmaq_f16(vacc2x89ABCDEF, va2c1, vb89ABCDEFc1);
205         vacc3x89ABCDEF = vfmaq_f16(vacc3x89ABCDEF, va3c1, vb89ABCDEFc1);
206         vacc4x89ABCDEF = vfmaq_f16(vacc4x89ABCDEF, va4c1, vb89ABCDEFc1);
207         vacc5x89ABCDEF = vfmaq_f16(vacc5x89ABCDEF, va5c1, vb89ABCDEFc1);
208         vacc6x89ABCDEF = vfmaq_f16(vacc6x89ABCDEF, va6c1, vb89ABCDEFc1);
209         vacc7x89ABCDEF = vfmaq_f16(vacc7x89ABCDEF, va7c1, vb89ABCDEFc1);
210       #endif
211       const float16x8_t vb01234567c2 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
212       const float16x8_t vb89ABCDEFc2 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
213 
214       #if XNN_ARCH_ARM64
215         vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
216         vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
217         vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
218         vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
219         vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c2, va4, 2);
220         vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c2, va5, 2);
221         vacc6x01234567 = vfmaq_lane_f16(vacc6x01234567, vb01234567c2, va6, 2);
222         vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c2, va7, 2);
223         vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc2, va0, 2);
224         vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc2, va1, 2);
225         vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc2, va2, 2);
226         vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc2, va3, 2);
227         vacc4x89ABCDEF = vfmaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc2, va4, 2);
228         vacc5x89ABCDEF = vfmaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc2, va5, 2);
229         vacc6x89ABCDEF = vfmaq_lane_f16(vacc6x89ABCDEF, vb89ABCDEFc2, va6, 2);
230         vacc7x89ABCDEF = vfmaq_lane_f16(vacc7x89ABCDEF, vb89ABCDEFc2, va7, 2);
231       #else
232         const float16x8_t va0c2 = vdupq_lane_f16(va0, 2);
233         const float16x8_t va1c2 = vdupq_lane_f16(va1, 2);
234         const float16x8_t va2c2 = vdupq_lane_f16(va2, 2);
235         const float16x8_t va3c2 = vdupq_lane_f16(va3, 2);
236         const float16x8_t va4c2 = vdupq_lane_f16(va4, 2);
237         const float16x8_t va5c2 = vdupq_lane_f16(va5, 2);
238         const float16x8_t va6c2 = vdupq_lane_f16(va6, 2);
239         const float16x8_t va7c2 = vdupq_lane_f16(va7, 2);
240 
241         vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c2, vb01234567c2);
242         vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c2, vb01234567c2);
243         vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c2, vb01234567c2);
244         vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c2, vb01234567c2);
245         vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c2, vb01234567c2);
246         vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c2, vb01234567c2);
247         vacc6x01234567 = vfmaq_f16(vacc6x01234567, va6c2, vb01234567c2);
248         vacc7x01234567 = vfmaq_f16(vacc7x01234567, va7c2, vb01234567c2);
249         vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0c2, vb89ABCDEFc2);
250         vacc1x89ABCDEF = vfmaq_f16(vacc1x89ABCDEF, va1c2, vb89ABCDEFc2);
251         vacc2x89ABCDEF = vfmaq_f16(vacc2x89ABCDEF, va2c2, vb89ABCDEFc2);
252         vacc3x89ABCDEF = vfmaq_f16(vacc3x89ABCDEF, va3c2, vb89ABCDEFc2);
253         vacc4x89ABCDEF = vfmaq_f16(vacc4x89ABCDEF, va4c2, vb89ABCDEFc2);
254         vacc5x89ABCDEF = vfmaq_f16(vacc5x89ABCDEF, va5c2, vb89ABCDEFc2);
255         vacc6x89ABCDEF = vfmaq_f16(vacc6x89ABCDEF, va6c2, vb89ABCDEFc2);
256         vacc7x89ABCDEF = vfmaq_f16(vacc7x89ABCDEF, va7c2, vb89ABCDEFc2);
257       #endif
258       const float16x8_t vb01234567c3 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
259       const float16x8_t vb89ABCDEFc3 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
260 
261       #if XNN_ARCH_ARM64
262         vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
263         vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
264         vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
265         vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
266         vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c3, va4, 3);
267         vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c3, va5, 3);
268         vacc6x01234567 = vfmaq_lane_f16(vacc6x01234567, vb01234567c3, va6, 3);
269         vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c3, va7, 3);
270         vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc3, va0, 3);
271         vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc3, va1, 3);
272         vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc3, va2, 3);
273         vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc3, va3, 3);
274         vacc4x89ABCDEF = vfmaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc3, va4, 3);
275         vacc5x89ABCDEF = vfmaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc3, va5, 3);
276         vacc6x89ABCDEF = vfmaq_lane_f16(vacc6x89ABCDEF, vb89ABCDEFc3, va6, 3);
277         vacc7x89ABCDEF = vfmaq_lane_f16(vacc7x89ABCDEF, vb89ABCDEFc3, va7, 3);
278       #else
279         const float16x8_t va0c3 = vdupq_lane_f16(va0, 3);
280         const float16x8_t va1c3 = vdupq_lane_f16(va1, 3);
281         const float16x8_t va2c3 = vdupq_lane_f16(va2, 3);
282         const float16x8_t va3c3 = vdupq_lane_f16(va3, 3);
283         const float16x8_t va4c3 = vdupq_lane_f16(va4, 3);
284         const float16x8_t va5c3 = vdupq_lane_f16(va5, 3);
285         const float16x8_t va6c3 = vdupq_lane_f16(va6, 3);
286         const float16x8_t va7c3 = vdupq_lane_f16(va7, 3);
287 
288         vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c3, vb01234567c3);
289         vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c3, vb01234567c3);
290         vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c3, vb01234567c3);
291         vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c3, vb01234567c3);
292         vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c3, vb01234567c3);
293         vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c3, vb01234567c3);
294         vacc6x01234567 = vfmaq_f16(vacc6x01234567, va6c3, vb01234567c3);
295         vacc7x01234567 = vfmaq_f16(vacc7x01234567, va7c3, vb01234567c3);
296         vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0c3, vb89ABCDEFc3);
297         vacc1x89ABCDEF = vfmaq_f16(vacc1x89ABCDEF, va1c3, vb89ABCDEFc3);
298         vacc2x89ABCDEF = vfmaq_f16(vacc2x89ABCDEF, va2c3, vb89ABCDEFc3);
299         vacc3x89ABCDEF = vfmaq_f16(vacc3x89ABCDEF, va3c3, vb89ABCDEFc3);
300         vacc4x89ABCDEF = vfmaq_f16(vacc4x89ABCDEF, va4c3, vb89ABCDEFc3);
301         vacc5x89ABCDEF = vfmaq_f16(vacc5x89ABCDEF, va5c3, vb89ABCDEFc3);
302         vacc6x89ABCDEF = vfmaq_f16(vacc6x89ABCDEF, va6c3, vb89ABCDEFc3);
303         vacc7x89ABCDEF = vfmaq_f16(vacc7x89ABCDEF, va7c3, vb89ABCDEFc3);
304       #endif
305 
306       k -= 4 * sizeof(__fp16);
307     }
308     if XNN_UNLIKELY(k != 0) {
309       do {
310         const float16x8_t va0 = vld1q_dup_f16(a0); a0 += 1;
311         const float16x8_t va1 = vld1q_dup_f16(a1); a1 += 1;
312         const float16x8_t va2 = vld1q_dup_f16(a2); a2 += 1;
313         const float16x8_t va3 = vld1q_dup_f16(a3); a3 += 1;
314         const float16x8_t va4 = vld1q_dup_f16(a4); a4 += 1;
315         const float16x8_t va5 = vld1q_dup_f16(a5); a5 += 1;
316         const float16x8_t va6 = vld1q_dup_f16(a6); a6 += 1;
317         const float16x8_t va7 = vld1q_dup_f16(a7); a7 += 1;
318 
319         const float16x8_t vb01234567 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
320         const float16x8_t vb89ABCDEF = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
321 
322         vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
323         vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1, vb01234567);
324         vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2, vb01234567);
325         vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3, vb01234567);
326         vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4, vb01234567);
327         vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5, vb01234567);
328         vacc6x01234567 = vfmaq_f16(vacc6x01234567, va6, vb01234567);
329         vacc7x01234567 = vfmaq_f16(vacc7x01234567, va7, vb01234567);
330         vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0, vb89ABCDEF);
331         vacc1x89ABCDEF = vfmaq_f16(vacc1x89ABCDEF, va1, vb89ABCDEF);
332         vacc2x89ABCDEF = vfmaq_f16(vacc2x89ABCDEF, va2, vb89ABCDEF);
333         vacc3x89ABCDEF = vfmaq_f16(vacc3x89ABCDEF, va3, vb89ABCDEF);
334         vacc4x89ABCDEF = vfmaq_f16(vacc4x89ABCDEF, va4, vb89ABCDEF);
335         vacc5x89ABCDEF = vfmaq_f16(vacc5x89ABCDEF, va5, vb89ABCDEF);
336         vacc6x89ABCDEF = vfmaq_f16(vacc6x89ABCDEF, va6, vb89ABCDEF);
337         vacc7x89ABCDEF = vfmaq_f16(vacc7x89ABCDEF, va7, vb89ABCDEF);
338 
339         k -= sizeof(__fp16);
340       } while (k != 0);
341     }
342 
343     const float16x8_t vscale = vld1q_dup_f16((const __fp16*) &params->scale);
344     vacc0x01234567 = vmulq_f16(vacc0x01234567, vscale);
345     vacc1x01234567 = vmulq_f16(vacc1x01234567, vscale);
346     vacc2x01234567 = vmulq_f16(vacc2x01234567, vscale);
347     vacc3x01234567 = vmulq_f16(vacc3x01234567, vscale);
348     vacc4x01234567 = vmulq_f16(vacc4x01234567, vscale);
349     vacc5x01234567 = vmulq_f16(vacc5x01234567, vscale);
350     vacc6x01234567 = vmulq_f16(vacc6x01234567, vscale);
351     vacc7x01234567 = vmulq_f16(vacc7x01234567, vscale);
352     vacc0x89ABCDEF = vmulq_f16(vacc0x89ABCDEF, vscale);
353     vacc1x89ABCDEF = vmulq_f16(vacc1x89ABCDEF, vscale);
354     vacc2x89ABCDEF = vmulq_f16(vacc2x89ABCDEF, vscale);
355     vacc3x89ABCDEF = vmulq_f16(vacc3x89ABCDEF, vscale);
356     vacc4x89ABCDEF = vmulq_f16(vacc4x89ABCDEF, vscale);
357     vacc5x89ABCDEF = vmulq_f16(vacc5x89ABCDEF, vscale);
358     vacc6x89ABCDEF = vmulq_f16(vacc6x89ABCDEF, vscale);
359     vacc7x89ABCDEF = vmulq_f16(vacc7x89ABCDEF, vscale);
360 
361     const float16x8_t vmax = vld1q_dup_f16((const __fp16*) &params->max);
362     vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
363     vacc1x01234567 = vminq_f16(vacc1x01234567, vmax);
364     vacc2x01234567 = vminq_f16(vacc2x01234567, vmax);
365     vacc3x01234567 = vminq_f16(vacc3x01234567, vmax);
366     vacc4x01234567 = vminq_f16(vacc4x01234567, vmax);
367     vacc5x01234567 = vminq_f16(vacc5x01234567, vmax);
368     vacc6x01234567 = vminq_f16(vacc6x01234567, vmax);
369     vacc7x01234567 = vminq_f16(vacc7x01234567, vmax);
370     vacc0x89ABCDEF = vminq_f16(vacc0x89ABCDEF, vmax);
371     vacc1x89ABCDEF = vminq_f16(vacc1x89ABCDEF, vmax);
372     vacc2x89ABCDEF = vminq_f16(vacc2x89ABCDEF, vmax);
373     vacc3x89ABCDEF = vminq_f16(vacc3x89ABCDEF, vmax);
374     vacc4x89ABCDEF = vminq_f16(vacc4x89ABCDEF, vmax);
375     vacc5x89ABCDEF = vminq_f16(vacc5x89ABCDEF, vmax);
376     vacc6x89ABCDEF = vminq_f16(vacc6x89ABCDEF, vmax);
377     vacc7x89ABCDEF = vminq_f16(vacc7x89ABCDEF, vmax);
378 
379     const float16x8_t vmin = vld1q_dup_f16((const __fp16*) &params->min);
380     vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
381     vacc1x01234567 = vmaxq_f16(vacc1x01234567, vmin);
382     vacc2x01234567 = vmaxq_f16(vacc2x01234567, vmin);
383     vacc3x01234567 = vmaxq_f16(vacc3x01234567, vmin);
384     vacc4x01234567 = vmaxq_f16(vacc4x01234567, vmin);
385     vacc5x01234567 = vmaxq_f16(vacc5x01234567, vmin);
386     vacc6x01234567 = vmaxq_f16(vacc6x01234567, vmin);
387     vacc7x01234567 = vmaxq_f16(vacc7x01234567, vmin);
388     vacc0x89ABCDEF = vmaxq_f16(vacc0x89ABCDEF, vmin);
389     vacc1x89ABCDEF = vmaxq_f16(vacc1x89ABCDEF, vmin);
390     vacc2x89ABCDEF = vmaxq_f16(vacc2x89ABCDEF, vmin);
391     vacc3x89ABCDEF = vmaxq_f16(vacc3x89ABCDEF, vmin);
392     vacc4x89ABCDEF = vmaxq_f16(vacc4x89ABCDEF, vmin);
393     vacc5x89ABCDEF = vmaxq_f16(vacc5x89ABCDEF, vmin);
394     vacc6x89ABCDEF = vmaxq_f16(vacc6x89ABCDEF, vmin);
395     vacc7x89ABCDEF = vmaxq_f16(vacc7x89ABCDEF, vmin);
396 
397     if XNN_LIKELY(nc >= 16) {
398       vst1q_f16(c0, vacc0x01234567);
399       vst1q_f16(c0 + 8, vacc0x89ABCDEF);
400       c0 = (__fp16*) ((uintptr_t) c0 + cn_stride);
401       vst1q_f16(c1, vacc1x01234567);
402       vst1q_f16(c1 + 8, vacc1x89ABCDEF);
403       c1 = (__fp16*) ((uintptr_t) c1 + cn_stride);
404       vst1q_f16(c2, vacc2x01234567);
405       vst1q_f16(c2 + 8, vacc2x89ABCDEF);
406       c2 = (__fp16*) ((uintptr_t) c2 + cn_stride);
407       vst1q_f16(c3, vacc3x01234567);
408       vst1q_f16(c3 + 8, vacc3x89ABCDEF);
409       c3 = (__fp16*) ((uintptr_t) c3 + cn_stride);
410       vst1q_f16(c4, vacc4x01234567);
411       vst1q_f16(c4 + 8, vacc4x89ABCDEF);
412       c4 = (__fp16*) ((uintptr_t) c4 + cn_stride);
413       vst1q_f16(c5, vacc5x01234567);
414       vst1q_f16(c5 + 8, vacc5x89ABCDEF);
415       c5 = (__fp16*) ((uintptr_t) c5 + cn_stride);
416       vst1q_f16(c6, vacc6x01234567);
417       vst1q_f16(c6 + 8, vacc6x89ABCDEF);
418       c6 = (__fp16*) ((uintptr_t) c6 + cn_stride);
419       vst1q_f16(c7, vacc7x01234567);
420       vst1q_f16(c7 + 8, vacc7x89ABCDEF);
421       c7 = (__fp16*) ((uintptr_t) c7 + cn_stride);
422 
423       a0 = (const __fp16*) ((uintptr_t) a0 - kc);
424       a1 = (const __fp16*) ((uintptr_t) a1 - kc);
425       a2 = (const __fp16*) ((uintptr_t) a2 - kc);
426       a3 = (const __fp16*) ((uintptr_t) a3 - kc);
427       a4 = (const __fp16*) ((uintptr_t) a4 - kc);
428       a5 = (const __fp16*) ((uintptr_t) a5 - kc);
429       a6 = (const __fp16*) ((uintptr_t) a6 - kc);
430       a7 = (const __fp16*) ((uintptr_t) a7 - kc);
431 
432       nc -= 16;
433     } else {
434       if (nc & 8) {
435         vst1q_f16(c0, vacc0x01234567); c0 += 8;
436         vst1q_f16(c1, vacc1x01234567); c1 += 8;
437         vst1q_f16(c2, vacc2x01234567); c2 += 8;
438         vst1q_f16(c3, vacc3x01234567); c3 += 8;
439         vst1q_f16(c4, vacc4x01234567); c4 += 8;
440         vst1q_f16(c5, vacc5x01234567); c5 += 8;
441         vst1q_f16(c6, vacc6x01234567); c6 += 8;
442         vst1q_f16(c7, vacc7x01234567); c7 += 8;
443 
444         vacc0x01234567 = vacc0x89ABCDEF;
445         vacc1x01234567 = vacc1x89ABCDEF;
446         vacc2x01234567 = vacc2x89ABCDEF;
447         vacc3x01234567 = vacc3x89ABCDEF;
448         vacc4x01234567 = vacc4x89ABCDEF;
449         vacc5x01234567 = vacc5x89ABCDEF;
450         vacc6x01234567 = vacc6x89ABCDEF;
451         vacc7x01234567 = vacc7x89ABCDEF;
452       }
453       float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
454       float16x4_t vacc1x0123 = vget_low_f16(vacc1x01234567);
455       float16x4_t vacc2x0123 = vget_low_f16(vacc2x01234567);
456       float16x4_t vacc3x0123 = vget_low_f16(vacc3x01234567);
457       float16x4_t vacc4x0123 = vget_low_f16(vacc4x01234567);
458       float16x4_t vacc5x0123 = vget_low_f16(vacc5x01234567);
459       float16x4_t vacc6x0123 = vget_low_f16(vacc6x01234567);
460       float16x4_t vacc7x0123 = vget_low_f16(vacc7x01234567);
461       if (nc & 4) {
462         vst1_f16(c0, vacc0x0123); c0 += 4;
463         vst1_f16(c1, vacc1x0123); c1 += 4;
464         vst1_f16(c2, vacc2x0123); c2 += 4;
465         vst1_f16(c3, vacc3x0123); c3 += 4;
466         vst1_f16(c4, vacc4x0123); c4 += 4;
467         vst1_f16(c5, vacc5x0123); c5 += 4;
468         vst1_f16(c6, vacc6x0123); c6 += 4;
469         vst1_f16(c7, vacc7x0123); c7 += 4;
470 
471         vacc0x0123 = vget_high_f16(vacc0x01234567);
472         vacc1x0123 = vget_high_f16(vacc1x01234567);
473         vacc2x0123 = vget_high_f16(vacc2x01234567);
474         vacc3x0123 = vget_high_f16(vacc3x01234567);
475         vacc4x0123 = vget_high_f16(vacc4x01234567);
476         vacc5x0123 = vget_high_f16(vacc5x01234567);
477         vacc6x0123 = vget_high_f16(vacc6x01234567);
478         vacc7x0123 = vget_high_f16(vacc7x01234567);
479       }
480       if (nc & 2) {
481         vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
482         vst1_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpret_u32_f16(vacc1x0123), 0); c1 += 2;
483         vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_f16(vacc2x0123), 0); c2 += 2;
484         vst1_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpret_u32_f16(vacc3x0123), 0); c3 += 2;
485         vst1_lane_u32(__builtin_assume_aligned(c4, 1), vreinterpret_u32_f16(vacc4x0123), 0); c4 += 2;
486         vst1_lane_u32(__builtin_assume_aligned(c5, 1), vreinterpret_u32_f16(vacc5x0123), 0); c5 += 2;
487         vst1_lane_u32(__builtin_assume_aligned(c6, 1), vreinterpret_u32_f16(vacc6x0123), 0); c6 += 2;
488         vst1_lane_u32(__builtin_assume_aligned(c7, 1), vreinterpret_u32_f16(vacc7x0123), 0); c7 += 2;
489 
490         vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
491         vacc1x0123 = vext_f16(vacc1x0123, vacc1x0123, 2);
492         vacc2x0123 = vext_f16(vacc2x0123, vacc2x0123, 2);
493         vacc3x0123 = vext_f16(vacc3x0123, vacc3x0123, 2);
494         vacc4x0123 = vext_f16(vacc4x0123, vacc4x0123, 2);
495         vacc5x0123 = vext_f16(vacc5x0123, vacc5x0123, 2);
496         vacc6x0123 = vext_f16(vacc6x0123, vacc6x0123, 2);
497         vacc7x0123 = vext_f16(vacc7x0123, vacc7x0123, 2);
498       }
499       if (nc & 1) {
500         vst1_lane_f16(c0, vacc0x0123, 0);
501         vst1_lane_f16(c1, vacc1x0123, 0);
502         vst1_lane_f16(c2, vacc2x0123, 0);
503         vst1_lane_f16(c3, vacc3x0123, 0);
504         vst1_lane_f16(c4, vacc4x0123, 0);
505         vst1_lane_f16(c5, vacc5x0123, 0);
506         vst1_lane_f16(c6, vacc6x0123, 0);
507         vst1_lane_f16(c7, vacc7x0123, 0);
508       }
509 
510       nc = 0;
511     }
512   } while (nc != 0);
513 }
514