• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f16-gemm/neonfp16arith-ld64.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 
11 #include <assert.h>
12 
13 #include <arm_neon.h>
14 
15 #include <xnnpack/common.h>
16 
17 #include <xnnpack/gemm.h>
18 
19 
xnn_f16_gemm_minmax_ukernel_6x8__neonfp16arith_ld64(size_t mr,size_t nc,size_t kc,const void * restrict a,size_t a_stride,const void * restrict w,void * restrict c,size_t cm_stride,size_t cn_stride,const struct xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f16_gemm_minmax_ukernel_6x8__neonfp16arith_ld64(
21     size_t mr,
22     size_t nc,
23     size_t kc,
24     const void* restrict a,
25     size_t a_stride,
26     const void* restrict w,
27     void* restrict c,
28     size_t cm_stride,
29     size_t cn_stride,
30     const struct xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)])
31 {
32   assert(mr != 0);
33   assert(mr <= 6);
34   assert(nc != 0);
35   assert(kc != 0);
36   assert(kc % sizeof(__fp16) == 0);
37   assert(a != NULL);
38   assert(w != NULL);
39   assert(c != NULL);
40 
41   const __fp16* a0 = (const __fp16*) a;
42   __fp16* c0 = (__fp16*) c;
43   const __fp16* a1 = (const __fp16*) ((uintptr_t) a0 + a_stride);
44   __fp16* c1 = (__fp16*) ((uintptr_t) c0 + cm_stride);
45   if XNN_UNPREDICTABLE(mr < 2) {
46     a1 = a0;
47     c1 = c0;
48   }
49   const __fp16* a2 = (const __fp16*) ((uintptr_t) a1 + a_stride);
50   __fp16* c2 = (__fp16*) ((uintptr_t) c1 + cm_stride);
51   if XNN_UNPREDICTABLE(mr <= 2) {
52     a2 = a1;
53     c2 = c1;
54   }
55   const __fp16* a3 = (const __fp16*) ((uintptr_t) a2 + a_stride);
56   __fp16* c3 = (__fp16*) ((uintptr_t) c2 + cm_stride);
57   if XNN_UNPREDICTABLE(mr < 4) {
58     a3 = a2;
59     c3 = c2;
60   }
61   const __fp16* a4 = (const __fp16*) ((uintptr_t) a3 + a_stride);
62   __fp16* c4 = (__fp16*) ((uintptr_t) c3 + cm_stride);
63   if XNN_UNPREDICTABLE(mr <= 4) {
64     a4 = a3;
65     c4 = c3;
66   }
67   const __fp16* a5 = (const __fp16*) ((uintptr_t) a4 + a_stride);
68   __fp16* c5 = (__fp16*) ((uintptr_t) c4 + cm_stride);
69   if XNN_UNPREDICTABLE(mr != 6) {
70     a5 = a4;
71     c5 = c4;
72   }
73 
74   do {
75     float16x8_t vacc0x01234567 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
76     float16x8_t vacc1x01234567 = vacc0x01234567;
77     float16x8_t vacc2x01234567 = vacc0x01234567;
78     float16x8_t vacc3x01234567 = vacc0x01234567;
79     float16x8_t vacc4x01234567 = vacc0x01234567;
80     float16x8_t vacc5x01234567 = vacc0x01234567;
81 
82     size_t k = kc;
83     while (k >= 4 * sizeof(__fp16)) {
84       const float16x4_t va0 = vld1_f16(a0); a0 += 4;
85       const float16x4_t va1 = vld1_f16(a1); a1 += 4;
86       const float16x4_t va2 = vld1_f16(a2); a2 += 4;
87       const float16x4_t va3 = vld1_f16(a3); a3 += 4;
88       const float16x4_t va4 = vld1_f16(a4); a4 += 4;
89       const float16x4_t va5 = vld1_f16(a5); a5 += 4;
90 
91       const float16x8_t vb01234567c0 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
92 
93       #if XNN_ARCH_ARM64
94         vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
95         vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
96         vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
97         vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
98         vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c0, va4, 0);
99         vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c0, va5, 0);
100       #else
101         const float16x8_t va0c0 = vdupq_lane_f16(va0, 0);
102         const float16x8_t va1c0 = vdupq_lane_f16(va1, 0);
103         const float16x8_t va2c0 = vdupq_lane_f16(va2, 0);
104         const float16x8_t va3c0 = vdupq_lane_f16(va3, 0);
105         const float16x8_t va4c0 = vdupq_lane_f16(va4, 0);
106         const float16x8_t va5c0 = vdupq_lane_f16(va5, 0);
107 
108         vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c0, vb01234567c0);
109         vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c0, vb01234567c0);
110         vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c0, vb01234567c0);
111         vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c0, vb01234567c0);
112         vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c0, vb01234567c0);
113         vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c0, vb01234567c0);
114       #endif
115       const float16x8_t vb01234567c1 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
116 
117       #if XNN_ARCH_ARM64
118         vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
119         vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
120         vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
121         vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
122         vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c1, va4, 1);
123         vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c1, va5, 1);
124       #else
125         const float16x8_t va0c1 = vdupq_lane_f16(va0, 1);
126         const float16x8_t va1c1 = vdupq_lane_f16(va1, 1);
127         const float16x8_t va2c1 = vdupq_lane_f16(va2, 1);
128         const float16x8_t va3c1 = vdupq_lane_f16(va3, 1);
129         const float16x8_t va4c1 = vdupq_lane_f16(va4, 1);
130         const float16x8_t va5c1 = vdupq_lane_f16(va5, 1);
131 
132         vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c1, vb01234567c1);
133         vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c1, vb01234567c1);
134         vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c1, vb01234567c1);
135         vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c1, vb01234567c1);
136         vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c1, vb01234567c1);
137         vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c1, vb01234567c1);
138       #endif
139       const float16x8_t vb01234567c2 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
140 
141       #if XNN_ARCH_ARM64
142         vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
143         vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
144         vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
145         vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
146         vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c2, va4, 2);
147         vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c2, va5, 2);
148       #else
149         const float16x8_t va0c2 = vdupq_lane_f16(va0, 2);
150         const float16x8_t va1c2 = vdupq_lane_f16(va1, 2);
151         const float16x8_t va2c2 = vdupq_lane_f16(va2, 2);
152         const float16x8_t va3c2 = vdupq_lane_f16(va3, 2);
153         const float16x8_t va4c2 = vdupq_lane_f16(va4, 2);
154         const float16x8_t va5c2 = vdupq_lane_f16(va5, 2);
155 
156         vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c2, vb01234567c2);
157         vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c2, vb01234567c2);
158         vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c2, vb01234567c2);
159         vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c2, vb01234567c2);
160         vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c2, vb01234567c2);
161         vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c2, vb01234567c2);
162       #endif
163       const float16x8_t vb01234567c3 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
164 
165       #if XNN_ARCH_ARM64
166         vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
167         vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
168         vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
169         vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
170         vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c3, va4, 3);
171         vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c3, va5, 3);
172       #else
173         const float16x8_t va0c3 = vdupq_lane_f16(va0, 3);
174         const float16x8_t va1c3 = vdupq_lane_f16(va1, 3);
175         const float16x8_t va2c3 = vdupq_lane_f16(va2, 3);
176         const float16x8_t va3c3 = vdupq_lane_f16(va3, 3);
177         const float16x8_t va4c3 = vdupq_lane_f16(va4, 3);
178         const float16x8_t va5c3 = vdupq_lane_f16(va5, 3);
179 
180         vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c3, vb01234567c3);
181         vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c3, vb01234567c3);
182         vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c3, vb01234567c3);
183         vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c3, vb01234567c3);
184         vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c3, vb01234567c3);
185         vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c3, vb01234567c3);
186       #endif
187 
188       k -= 4 * sizeof(__fp16);
189     }
190     if XNN_UNLIKELY(k != 0) {
191       do {
192         const float16x8_t va0 = vld1q_dup_f16(a0); a0 += 1;
193         const float16x8_t va1 = vld1q_dup_f16(a1); a1 += 1;
194         const float16x8_t va2 = vld1q_dup_f16(a2); a2 += 1;
195         const float16x8_t va3 = vld1q_dup_f16(a3); a3 += 1;
196         const float16x8_t va4 = vld1q_dup_f16(a4); a4 += 1;
197         const float16x8_t va5 = vld1q_dup_f16(a5); a5 += 1;
198 
199         const float16x8_t vb01234567 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
200 
201         vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
202         vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1, vb01234567);
203         vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2, vb01234567);
204         vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3, vb01234567);
205         vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4, vb01234567);
206         vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5, vb01234567);
207 
208         k -= sizeof(__fp16);
209       } while (k != 0);
210     }
211 
212     const float16x8_t vscale = vld1q_dup_f16((const __fp16*) &params->scale);
213     vacc0x01234567 = vmulq_f16(vacc0x01234567, vscale);
214     vacc1x01234567 = vmulq_f16(vacc1x01234567, vscale);
215     vacc2x01234567 = vmulq_f16(vacc2x01234567, vscale);
216     vacc3x01234567 = vmulq_f16(vacc3x01234567, vscale);
217     vacc4x01234567 = vmulq_f16(vacc4x01234567, vscale);
218     vacc5x01234567 = vmulq_f16(vacc5x01234567, vscale);
219 
220     const float16x8_t vmax = vld1q_dup_f16((const __fp16*) &params->max);
221     vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
222     vacc1x01234567 = vminq_f16(vacc1x01234567, vmax);
223     vacc2x01234567 = vminq_f16(vacc2x01234567, vmax);
224     vacc3x01234567 = vminq_f16(vacc3x01234567, vmax);
225     vacc4x01234567 = vminq_f16(vacc4x01234567, vmax);
226     vacc5x01234567 = vminq_f16(vacc5x01234567, vmax);
227 
228     const float16x8_t vmin = vld1q_dup_f16((const __fp16*) &params->min);
229     vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
230     vacc1x01234567 = vmaxq_f16(vacc1x01234567, vmin);
231     vacc2x01234567 = vmaxq_f16(vacc2x01234567, vmin);
232     vacc3x01234567 = vmaxq_f16(vacc3x01234567, vmin);
233     vacc4x01234567 = vmaxq_f16(vacc4x01234567, vmin);
234     vacc5x01234567 = vmaxq_f16(vacc5x01234567, vmin);
235 
236     if XNN_LIKELY(nc >= 8) {
237       vst1q_f16(c0, vacc0x01234567);
238       c0 = (__fp16*) ((uintptr_t) c0 + cn_stride);
239       vst1q_f16(c1, vacc1x01234567);
240       c1 = (__fp16*) ((uintptr_t) c1 + cn_stride);
241       vst1q_f16(c2, vacc2x01234567);
242       c2 = (__fp16*) ((uintptr_t) c2 + cn_stride);
243       vst1q_f16(c3, vacc3x01234567);
244       c3 = (__fp16*) ((uintptr_t) c3 + cn_stride);
245       vst1q_f16(c4, vacc4x01234567);
246       c4 = (__fp16*) ((uintptr_t) c4 + cn_stride);
247       vst1q_f16(c5, vacc5x01234567);
248       c5 = (__fp16*) ((uintptr_t) c5 + cn_stride);
249 
250       a0 = (const __fp16*) ((uintptr_t) a0 - kc);
251       a1 = (const __fp16*) ((uintptr_t) a1 - kc);
252       a2 = (const __fp16*) ((uintptr_t) a2 - kc);
253       a3 = (const __fp16*) ((uintptr_t) a3 - kc);
254       a4 = (const __fp16*) ((uintptr_t) a4 - kc);
255       a5 = (const __fp16*) ((uintptr_t) a5 - kc);
256 
257       nc -= 8;
258     } else {
259       float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
260       float16x4_t vacc1x0123 = vget_low_f16(vacc1x01234567);
261       float16x4_t vacc2x0123 = vget_low_f16(vacc2x01234567);
262       float16x4_t vacc3x0123 = vget_low_f16(vacc3x01234567);
263       float16x4_t vacc4x0123 = vget_low_f16(vacc4x01234567);
264       float16x4_t vacc5x0123 = vget_low_f16(vacc5x01234567);
265       if (nc & 4) {
266         vst1_f16(c0, vacc0x0123); c0 += 4;
267         vst1_f16(c1, vacc1x0123); c1 += 4;
268         vst1_f16(c2, vacc2x0123); c2 += 4;
269         vst1_f16(c3, vacc3x0123); c3 += 4;
270         vst1_f16(c4, vacc4x0123); c4 += 4;
271         vst1_f16(c5, vacc5x0123); c5 += 4;
272 
273         vacc0x0123 = vget_high_f16(vacc0x01234567);
274         vacc1x0123 = vget_high_f16(vacc1x01234567);
275         vacc2x0123 = vget_high_f16(vacc2x01234567);
276         vacc3x0123 = vget_high_f16(vacc3x01234567);
277         vacc4x0123 = vget_high_f16(vacc4x01234567);
278         vacc5x0123 = vget_high_f16(vacc5x01234567);
279       }
280       if (nc & 2) {
281         vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
282         vst1_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpret_u32_f16(vacc1x0123), 0); c1 += 2;
283         vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_f16(vacc2x0123), 0); c2 += 2;
284         vst1_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpret_u32_f16(vacc3x0123), 0); c3 += 2;
285         vst1_lane_u32(__builtin_assume_aligned(c4, 1), vreinterpret_u32_f16(vacc4x0123), 0); c4 += 2;
286         vst1_lane_u32(__builtin_assume_aligned(c5, 1), vreinterpret_u32_f16(vacc5x0123), 0); c5 += 2;
287 
288         vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
289         vacc1x0123 = vext_f16(vacc1x0123, vacc1x0123, 2);
290         vacc2x0123 = vext_f16(vacc2x0123, vacc2x0123, 2);
291         vacc3x0123 = vext_f16(vacc3x0123, vacc3x0123, 2);
292         vacc4x0123 = vext_f16(vacc4x0123, vacc4x0123, 2);
293         vacc5x0123 = vext_f16(vacc5x0123, vacc5x0123, 2);
294       }
295       if (nc & 1) {
296         vst1_lane_f16(c0, vacc0x0123, 0);
297         vst1_lane_f16(c1, vacc1x0123, 0);
298         vst1_lane_f16(c2, vacc2x0123, 0);
299         vst1_lane_f16(c3, vacc3x0123, 0);
300         vst1_lane_f16(c4, vacc4x0123, 0);
301         vst1_lane_f16(c5, vacc5x0123, 0);
302       }
303 
304       nc = 0;
305     }
306   } while (nc != 0);
307 }
308