• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f16-gemm/neonfp16arith-ld64.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 
11 #include <assert.h>
12 
13 #include <arm_neon.h>
14 
15 #include <xnnpack/common.h>
16 
17 #include <xnnpack/gemm.h>
18 
19 
xnn_f16_gemminc_minmax_ukernel_4x8__neonfp16arith_ld64(size_t mr,size_t nc,size_t kc,const void * restrict a,size_t a_stride,const void * restrict w,void * restrict c,size_t cm_stride,size_t cn_stride,const void * restrict acc,const struct xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f16_gemminc_minmax_ukernel_4x8__neonfp16arith_ld64(
21     size_t mr,
22     size_t nc,
23     size_t kc,
24     const void* restrict a,
25     size_t a_stride,
26     const void* restrict w,
27     void* restrict c,
28     size_t cm_stride,
29     size_t cn_stride,
30     const void*restrict acc,
31     const struct xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)])
32 {
33   assert(mr != 0);
34   assert(mr <= 4);
35   assert(nc != 0);
36   assert(kc != 0);
37   assert(kc % sizeof(__fp16) == 0);
38   assert(a != NULL);
39   assert(w != NULL);
40   assert(c != NULL);
41   assert(acc != NULL);
42 
43   const __fp16* a0 = (const __fp16*) a;
44   __fp16* c0 = (__fp16*) c;
45   const __fp16* a1 = (const __fp16*) ((uintptr_t) a0 + a_stride);
46   __fp16* c1 = (__fp16*) ((uintptr_t) c0 + cm_stride);
47   if XNN_UNPREDICTABLE(mr < 2) {
48     a1 = a0;
49     c1 = c0;
50   }
51   const __fp16* a2 = (const __fp16*) ((uintptr_t) a1 + a_stride);
52   __fp16* c2 = (__fp16*) ((uintptr_t) c1 + cm_stride);
53   if XNN_UNPREDICTABLE(mr <= 2) {
54     a2 = a1;
55     c2 = c1;
56   }
57   const __fp16* a3 = (const __fp16*) ((uintptr_t) a2 + a_stride);
58   __fp16* c3 = (__fp16*) ((uintptr_t) c2 + cm_stride);
59   if XNN_UNPREDICTABLE(mr != 4) {
60     a3 = a2;
61     c3 = c2;
62   }
63 
64   do {
65     float16x8_t vacc0x01234567 = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
66     float16x8_t vacc1x01234567 = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
67     float16x8_t vacc2x01234567 = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
68     float16x8_t vacc3x01234567 = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
69 
70     size_t k = kc;
71     while (k >= 4 * sizeof(__fp16)) {
72       const float16x4_t va0 = vld1_f16(a0); a0 += 4;
73       const float16x4_t va1 = vld1_f16(a1); a1 += 4;
74       const float16x4_t va2 = vld1_f16(a2); a2 += 4;
75       const float16x4_t va3 = vld1_f16(a3); a3 += 4;
76 
77       const float16x8_t vb01234567c0 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
78 
79       #if XNN_ARCH_ARM64
80         vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
81         vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
82         vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
83         vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
84       #else
85         const float16x8_t va0c0 = vdupq_lane_f16(va0, 0);
86         const float16x8_t va1c0 = vdupq_lane_f16(va1, 0);
87         const float16x8_t va2c0 = vdupq_lane_f16(va2, 0);
88         const float16x8_t va3c0 = vdupq_lane_f16(va3, 0);
89 
90         vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c0, vb01234567c0);
91         vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c0, vb01234567c0);
92         vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c0, vb01234567c0);
93         vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c0, vb01234567c0);
94       #endif
95       const float16x8_t vb01234567c1 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
96 
97       #if XNN_ARCH_ARM64
98         vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
99         vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
100         vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
101         vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
102       #else
103         const float16x8_t va0c1 = vdupq_lane_f16(va0, 1);
104         const float16x8_t va1c1 = vdupq_lane_f16(va1, 1);
105         const float16x8_t va2c1 = vdupq_lane_f16(va2, 1);
106         const float16x8_t va3c1 = vdupq_lane_f16(va3, 1);
107 
108         vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c1, vb01234567c1);
109         vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c1, vb01234567c1);
110         vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c1, vb01234567c1);
111         vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c1, vb01234567c1);
112       #endif
113       const float16x8_t vb01234567c2 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
114 
115       #if XNN_ARCH_ARM64
116         vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
117         vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
118         vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
119         vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
120       #else
121         const float16x8_t va0c2 = vdupq_lane_f16(va0, 2);
122         const float16x8_t va1c2 = vdupq_lane_f16(va1, 2);
123         const float16x8_t va2c2 = vdupq_lane_f16(va2, 2);
124         const float16x8_t va3c2 = vdupq_lane_f16(va3, 2);
125 
126         vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c2, vb01234567c2);
127         vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c2, vb01234567c2);
128         vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c2, vb01234567c2);
129         vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c2, vb01234567c2);
130       #endif
131       const float16x8_t vb01234567c3 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
132 
133       #if XNN_ARCH_ARM64
134         vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
135         vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
136         vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
137         vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
138       #else
139         const float16x8_t va0c3 = vdupq_lane_f16(va0, 3);
140         const float16x8_t va1c3 = vdupq_lane_f16(va1, 3);
141         const float16x8_t va2c3 = vdupq_lane_f16(va2, 3);
142         const float16x8_t va3c3 = vdupq_lane_f16(va3, 3);
143 
144         vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c3, vb01234567c3);
145         vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c3, vb01234567c3);
146         vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c3, vb01234567c3);
147         vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c3, vb01234567c3);
148       #endif
149 
150       k -= 4 * sizeof(__fp16);
151     }
152     if XNN_UNLIKELY(k != 0) {
153       do {
154         const float16x8_t va0 = vld1q_dup_f16(a0); a0 += 1;
155         const float16x8_t va1 = vld1q_dup_f16(a1); a1 += 1;
156         const float16x8_t va2 = vld1q_dup_f16(a2); a2 += 1;
157         const float16x8_t va3 = vld1q_dup_f16(a3); a3 += 1;
158 
159         const float16x8_t vb01234567 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
160 
161         vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
162         vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1, vb01234567);
163         vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2, vb01234567);
164         vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3, vb01234567);
165 
166         k -= sizeof(__fp16);
167       } while (k != 0);
168     }
169 
170     const float16x8_t vscale = vld1q_dup_f16((const __fp16*) &params->scale);
171     vacc0x01234567 = vmulq_f16(vacc0x01234567, vscale);
172     vacc1x01234567 = vmulq_f16(vacc1x01234567, vscale);
173     vacc2x01234567 = vmulq_f16(vacc2x01234567, vscale);
174     vacc3x01234567 = vmulq_f16(vacc3x01234567, vscale);
175 
176     const float16x8_t vmax = vld1q_dup_f16((const __fp16*) &params->max);
177     vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
178     vacc1x01234567 = vminq_f16(vacc1x01234567, vmax);
179     vacc2x01234567 = vminq_f16(vacc2x01234567, vmax);
180     vacc3x01234567 = vminq_f16(vacc3x01234567, vmax);
181 
182     const float16x8_t vmin = vld1q_dup_f16((const __fp16*) &params->min);
183     vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
184     vacc1x01234567 = vmaxq_f16(vacc1x01234567, vmin);
185     vacc2x01234567 = vmaxq_f16(vacc2x01234567, vmin);
186     vacc3x01234567 = vmaxq_f16(vacc3x01234567, vmin);
187 
188     if XNN_LIKELY(nc >= 8) {
189       vst1q_f16(c0, vacc0x01234567);
190       c0 = (__fp16*) ((uintptr_t) c0 + cn_stride);
191       vst1q_f16(c1, vacc1x01234567);
192       c1 = (__fp16*) ((uintptr_t) c1 + cn_stride);
193       vst1q_f16(c2, vacc2x01234567);
194       c2 = (__fp16*) ((uintptr_t) c2 + cn_stride);
195       vst1q_f16(c3, vacc3x01234567);
196       c3 = (__fp16*) ((uintptr_t) c3 + cn_stride);
197 
198       a0 = (const __fp16*) ((uintptr_t) a0 - kc);
199       a1 = (const __fp16*) ((uintptr_t) a1 - kc);
200       a2 = (const __fp16*) ((uintptr_t) a2 - kc);
201       a3 = (const __fp16*) ((uintptr_t) a3 - kc);
202 
203       nc -= 8;
204     } else {
205       float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
206       float16x4_t vacc1x0123 = vget_low_f16(vacc1x01234567);
207       float16x4_t vacc2x0123 = vget_low_f16(vacc2x01234567);
208       float16x4_t vacc3x0123 = vget_low_f16(vacc3x01234567);
209       if (nc & 4) {
210         vst1_f16(c0, vacc0x0123); c0 += 4;
211         vst1_f16(c1, vacc1x0123); c1 += 4;
212         vst1_f16(c2, vacc2x0123); c2 += 4;
213         vst1_f16(c3, vacc3x0123); c3 += 4;
214 
215         vacc0x0123 = vget_high_f16(vacc0x01234567);
216         vacc1x0123 = vget_high_f16(vacc1x01234567);
217         vacc2x0123 = vget_high_f16(vacc2x01234567);
218         vacc3x0123 = vget_high_f16(vacc3x01234567);
219       }
220       if (nc & 2) {
221         vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
222         vst1_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpret_u32_f16(vacc1x0123), 0); c1 += 2;
223         vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_f16(vacc2x0123), 0); c2 += 2;
224         vst1_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpret_u32_f16(vacc3x0123), 0); c3 += 2;
225 
226         vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
227         vacc1x0123 = vext_f16(vacc1x0123, vacc1x0123, 2);
228         vacc2x0123 = vext_f16(vacc2x0123, vacc2x0123, 2);
229         vacc3x0123 = vext_f16(vacc3x0123, vacc3x0123, 2);
230       }
231       if (nc & 1) {
232         vst1_lane_f16(c0, vacc0x0123, 0);
233         vst1_lane_f16(c1, vacc1x0123, 0);
234         vst1_lane_f16(c2, vacc2x0123, 0);
235         vst1_lane_f16(c3, vacc3x0123, 0);
236       }
237 
238       nc = 0;
239     }
240   } while (nc != 0);
241 }
242