• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f16-igemm/neonfp16arith-ld64.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 
11 #include <assert.h>
12 
13 #include <arm_neon.h>
14 
15 #include <xnnpack/igemm.h>
16 
17 
xnn_f16_igemm_minmax_ukernel_6x8__neonfp16arith_ld64(size_t mr,size_t nc,size_t kc,size_t ks,const void ** restrict a,const void * restrict w,void * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const void * zero,const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f16_igemm_minmax_ukernel_6x8__neonfp16arith_ld64(
19     size_t mr,
20     size_t nc,
21     size_t kc,
22     size_t ks,
23     const void** restrict a,
24     const void* restrict w,
25     void* restrict c,
26     size_t cm_stride,
27     size_t cn_stride,
28     size_t a_offset,
29     const void* zero,
30     const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)])
31 {
32   assert(mr != 0);
33   assert(mr <= 6);
34   assert(nc != 0);
35   assert(kc != 0);
36   assert(kc % sizeof(__fp16) == 0);
37   assert(ks != 0);
38   assert(ks % (6 * sizeof(void*)) == 0);
39   assert(a_offset % sizeof(__fp16) == 0);
40   assert(a != NULL);
41   assert(w != NULL);
42   assert(c != NULL);
43 
44   __fp16* c0 = (__fp16*) c;
45   __fp16* c1 = (__fp16*) ((uintptr_t) c0 + cm_stride);
46   if XNN_UNPREDICTABLE(mr < 2) {
47     c1 = c0;
48   }
49   __fp16* c2 = (__fp16*) ((uintptr_t) c1 + cm_stride);
50   if XNN_UNPREDICTABLE(mr <= 2) {
51     c2 = c1;
52   }
53   __fp16* c3 = (__fp16*) ((uintptr_t) c2 + cm_stride);
54   if XNN_UNPREDICTABLE(mr < 4) {
55     c3 = c2;
56   }
57   __fp16* c4 = (__fp16*) ((uintptr_t) c3 + cm_stride);
58   if XNN_UNPREDICTABLE(mr <= 4) {
59     c4 = c3;
60   }
61   __fp16* c5 = (__fp16*) ((uintptr_t) c4 + cm_stride);
62   if XNN_UNPREDICTABLE(mr != 6) {
63     c5 = c4;
64   }
65 
66   do {
67     float16x8_t vacc0x01234567 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
68     float16x8_t vacc1x01234567 = vacc0x01234567;
69     float16x8_t vacc2x01234567 = vacc0x01234567;
70     float16x8_t vacc3x01234567 = vacc0x01234567;
71     float16x8_t vacc4x01234567 = vacc0x01234567;
72     float16x8_t vacc5x01234567 = vacc0x01234567;
73 
74     size_t p = ks;
75     do {
76       const __fp16* restrict a0 = (const __fp16*) a[0];
77       assert(a0 != NULL);
78       if XNN_UNPREDICTABLE(a0 != zero) {
79         a0 = (const __fp16*) ((uintptr_t) a0 + a_offset);
80       }
81       const __fp16* restrict a1 = (const __fp16*) a[1];
82       assert(a1 != NULL);
83       if XNN_UNPREDICTABLE(a1 != zero) {
84         a1 = (const __fp16*) ((uintptr_t) a1 + a_offset);
85       }
86       const __fp16* restrict a2 = (const __fp16*) a[2];
87       assert(a2 != NULL);
88       if XNN_UNPREDICTABLE(a2 != zero) {
89         a2 = (const __fp16*) ((uintptr_t) a2 + a_offset);
90       }
91       const __fp16* restrict a3 = (const __fp16*) a[3];
92       assert(a3 != NULL);
93       if XNN_UNPREDICTABLE(a3 != zero) {
94         a3 = (const __fp16*) ((uintptr_t) a3 + a_offset);
95       }
96       const __fp16* restrict a4 = (const __fp16*) a[4];
97       assert(a4 != NULL);
98       if XNN_UNPREDICTABLE(a4 != zero) {
99         a4 = (const __fp16*) ((uintptr_t) a4 + a_offset);
100       }
101       const __fp16* restrict a5 = (const __fp16*) a[5];
102       assert(a5 != NULL);
103       if XNN_UNPREDICTABLE(a5 != zero) {
104         a5 = (const __fp16*) ((uintptr_t) a5 + a_offset);
105       }
106       a += 6;
107 
108       size_t k = kc;
109       for (; k >= 4 * sizeof(__fp16); k -= 4 * sizeof(__fp16)) {
110         const float16x4_t va0 = vld1_f16(a0); a0 += 4;
111         const float16x4_t va1 = vld1_f16(a1); a1 += 4;
112         const float16x4_t va2 = vld1_f16(a2); a2 += 4;
113         const float16x4_t va3 = vld1_f16(a3); a3 += 4;
114         const float16x4_t va4 = vld1_f16(a4); a4 += 4;
115         const float16x4_t va5 = vld1_f16(a5); a5 += 4;
116 
117         const float16x8_t vb01234567c0 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
118 
119         #if XNN_ARCH_ARM64
120           vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
121           vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
122           vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
123           vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
124           vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c0, va4, 0);
125           vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c0, va5, 0);
126         #else
127           const float16x8_t va0c0 = vdupq_lane_f16(va0, 0);
128           const float16x8_t va1c0 = vdupq_lane_f16(va1, 0);
129           const float16x8_t va2c0 = vdupq_lane_f16(va2, 0);
130           const float16x8_t va3c0 = vdupq_lane_f16(va3, 0);
131           const float16x8_t va4c0 = vdupq_lane_f16(va4, 0);
132           const float16x8_t va5c0 = vdupq_lane_f16(va5, 0);
133 
134           vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c0, vb01234567c0);
135           vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c0, vb01234567c0);
136           vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c0, vb01234567c0);
137           vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c0, vb01234567c0);
138           vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c0, vb01234567c0);
139           vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c0, vb01234567c0);
140         #endif
141         const float16x8_t vb01234567c1 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
142 
143         #if XNN_ARCH_ARM64
144           vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
145           vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
146           vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
147           vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
148           vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c1, va4, 1);
149           vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c1, va5, 1);
150         #else
151           const float16x8_t va0c1 = vdupq_lane_f16(va0, 1);
152           const float16x8_t va1c1 = vdupq_lane_f16(va1, 1);
153           const float16x8_t va2c1 = vdupq_lane_f16(va2, 1);
154           const float16x8_t va3c1 = vdupq_lane_f16(va3, 1);
155           const float16x8_t va4c1 = vdupq_lane_f16(va4, 1);
156           const float16x8_t va5c1 = vdupq_lane_f16(va5, 1);
157 
158           vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c1, vb01234567c1);
159           vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c1, vb01234567c1);
160           vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c1, vb01234567c1);
161           vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c1, vb01234567c1);
162           vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c1, vb01234567c1);
163           vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c1, vb01234567c1);
164         #endif
165         const float16x8_t vb01234567c2 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
166 
167         #if XNN_ARCH_ARM64
168           vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
169           vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
170           vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
171           vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
172           vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c2, va4, 2);
173           vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c2, va5, 2);
174         #else
175           const float16x8_t va0c2 = vdupq_lane_f16(va0, 2);
176           const float16x8_t va1c2 = vdupq_lane_f16(va1, 2);
177           const float16x8_t va2c2 = vdupq_lane_f16(va2, 2);
178           const float16x8_t va3c2 = vdupq_lane_f16(va3, 2);
179           const float16x8_t va4c2 = vdupq_lane_f16(va4, 2);
180           const float16x8_t va5c2 = vdupq_lane_f16(va5, 2);
181 
182           vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c2, vb01234567c2);
183           vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c2, vb01234567c2);
184           vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c2, vb01234567c2);
185           vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c2, vb01234567c2);
186           vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c2, vb01234567c2);
187           vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c2, vb01234567c2);
188         #endif
189         const float16x8_t vb01234567c3 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
190 
191         #if XNN_ARCH_ARM64
192           vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
193           vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
194           vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
195           vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
196           vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c3, va4, 3);
197           vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c3, va5, 3);
198         #else
199           const float16x8_t va0c3 = vdupq_lane_f16(va0, 3);
200           const float16x8_t va1c3 = vdupq_lane_f16(va1, 3);
201           const float16x8_t va2c3 = vdupq_lane_f16(va2, 3);
202           const float16x8_t va3c3 = vdupq_lane_f16(va3, 3);
203           const float16x8_t va4c3 = vdupq_lane_f16(va4, 3);
204           const float16x8_t va5c3 = vdupq_lane_f16(va5, 3);
205 
206           vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c3, vb01234567c3);
207           vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c3, vb01234567c3);
208           vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c3, vb01234567c3);
209           vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c3, vb01234567c3);
210           vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c3, vb01234567c3);
211           vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c3, vb01234567c3);
212         #endif
213       }
214       if XNN_UNLIKELY(k != 0) {
215         do {
216           const float16x8_t va0 = vld1q_dup_f16(a0); a0 += 1;
217           const float16x8_t va1 = vld1q_dup_f16(a1); a1 += 1;
218           const float16x8_t va2 = vld1q_dup_f16(a2); a2 += 1;
219           const float16x8_t va3 = vld1q_dup_f16(a3); a3 += 1;
220           const float16x8_t va4 = vld1q_dup_f16(a4); a4 += 1;
221           const float16x8_t va5 = vld1q_dup_f16(a5); a5 += 1;
222 
223           const float16x8_t vb01234567 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
224 
225           vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
226           vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1, vb01234567);
227           vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2, vb01234567);
228           vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3, vb01234567);
229           vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4, vb01234567);
230           vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5, vb01234567);
231 
232           k -= sizeof(__fp16);
233         } while (k != 0);
234       }
235       p -= 6 * sizeof(void*);
236     } while (p != 0);
237 
238     const float16x8_t vscale = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neon.scale));
239     vacc0x01234567 = vmulq_f16(vacc0x01234567, vscale);
240     vacc1x01234567 = vmulq_f16(vacc1x01234567, vscale);
241     vacc2x01234567 = vmulq_f16(vacc2x01234567, vscale);
242     vacc3x01234567 = vmulq_f16(vacc3x01234567, vscale);
243     vacc4x01234567 = vmulq_f16(vacc4x01234567, vscale);
244     vacc5x01234567 = vmulq_f16(vacc5x01234567, vscale);
245 
246     const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neon.max));
247     vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
248     vacc1x01234567 = vminq_f16(vacc1x01234567, vmax);
249     vacc2x01234567 = vminq_f16(vacc2x01234567, vmax);
250     vacc3x01234567 = vminq_f16(vacc3x01234567, vmax);
251     vacc4x01234567 = vminq_f16(vacc4x01234567, vmax);
252     vacc5x01234567 = vminq_f16(vacc5x01234567, vmax);
253 
254     const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neon.min));
255     vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
256     vacc1x01234567 = vmaxq_f16(vacc1x01234567, vmin);
257     vacc2x01234567 = vmaxq_f16(vacc2x01234567, vmin);
258     vacc3x01234567 = vmaxq_f16(vacc3x01234567, vmin);
259     vacc4x01234567 = vmaxq_f16(vacc4x01234567, vmin);
260     vacc5x01234567 = vmaxq_f16(vacc5x01234567, vmin);
261 
262     if XNN_LIKELY(nc >= 8) {
263       vst1q_f16(c5, vacc5x01234567);
264       c5 = (__fp16*) ((uintptr_t) c5 + cn_stride);
265       vst1q_f16(c4, vacc4x01234567);
266       c4 = (__fp16*) ((uintptr_t) c4 + cn_stride);
267       vst1q_f16(c3, vacc3x01234567);
268       c3 = (__fp16*) ((uintptr_t) c3 + cn_stride);
269       vst1q_f16(c2, vacc2x01234567);
270       c2 = (__fp16*) ((uintptr_t) c2 + cn_stride);
271       vst1q_f16(c1, vacc1x01234567);
272       c1 = (__fp16*) ((uintptr_t) c1 + cn_stride);
273       vst1q_f16(c0, vacc0x01234567);
274       c0 = (__fp16*) ((uintptr_t) c0 + cn_stride);
275 
276       a = (const void**restrict) ((uintptr_t) a - ks);
277       nc -= 8;
278     } else {
279       float16x4_t vacc5x0123 = vget_low_f16(vacc5x01234567);
280       float16x4_t vacc4x0123 = vget_low_f16(vacc4x01234567);
281       float16x4_t vacc3x0123 = vget_low_f16(vacc3x01234567);
282       float16x4_t vacc2x0123 = vget_low_f16(vacc2x01234567);
283       float16x4_t vacc1x0123 = vget_low_f16(vacc1x01234567);
284       float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
285       if (nc & 4) {
286         vst1_f16(c5, vacc5x0123); c5 += 4;
287         vst1_f16(c4, vacc4x0123); c4 += 4;
288         vst1_f16(c3, vacc3x0123); c3 += 4;
289         vst1_f16(c2, vacc2x0123); c2 += 4;
290         vst1_f16(c1, vacc1x0123); c1 += 4;
291         vst1_f16(c0, vacc0x0123); c0 += 4;
292 
293         vacc5x0123 = vget_high_f16(vacc5x01234567);
294         vacc4x0123 = vget_high_f16(vacc4x01234567);
295         vacc3x0123 = vget_high_f16(vacc3x01234567);
296         vacc2x0123 = vget_high_f16(vacc2x01234567);
297         vacc1x0123 = vget_high_f16(vacc1x01234567);
298         vacc0x0123 = vget_high_f16(vacc0x01234567);
299       }
300       if (nc & 2) {
301         vst1_lane_u32((void*) c5, vreinterpret_u32_f16(vacc5x0123), 0); c5 += 2;
302         vst1_lane_u32((void*) c4, vreinterpret_u32_f16(vacc4x0123), 0); c4 += 2;
303         vst1_lane_u32((void*) c3, vreinterpret_u32_f16(vacc3x0123), 0); c3 += 2;
304         vst1_lane_u32((void*) c2, vreinterpret_u32_f16(vacc2x0123), 0); c2 += 2;
305         vst1_lane_u32((void*) c1, vreinterpret_u32_f16(vacc1x0123), 0); c1 += 2;
306         vst1_lane_u32((void*) c0, vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
307 
308         vacc5x0123 = vext_f16(vacc5x0123, vacc5x0123, 2);
309         vacc4x0123 = vext_f16(vacc4x0123, vacc4x0123, 2);
310         vacc3x0123 = vext_f16(vacc3x0123, vacc3x0123, 2);
311         vacc2x0123 = vext_f16(vacc2x0123, vacc2x0123, 2);
312         vacc1x0123 = vext_f16(vacc1x0123, vacc1x0123, 2);
313         vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
314       }
315       if (nc & 1) {
316         vst1_lane_f16(c5, vacc5x0123, 0);
317         vst1_lane_f16(c4, vacc4x0123, 0);
318         vst1_lane_f16(c3, vacc3x0123, 0);
319         vst1_lane_f16(c2, vacc2x0123, 0);
320         vst1_lane_f16(c1, vacc1x0123, 0);
321         vst1_lane_f16(c0, vacc0x0123, 0);
322       }
323 
324       nc = 0;
325     }
326   } while (nc != 0);
327 }
328