1 // Auto-generated file. Do not edit!
2 // Template: src/f16-gemm/neonfp16arith-ld64.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10
11 #include <assert.h>
12
13 #include <arm_neon.h>
14
15 #include <xnnpack/common.h>
16
17 #include <xnnpack/gemm.h>
18
19
xnn_f16_gemm_minmax_ukernel_4x8__neonfp16arith_ld64(size_t mr,size_t nc,size_t kc,const void * restrict a,size_t a_stride,const void * restrict w,void * restrict c,size_t cm_stride,size_t cn_stride,const struct xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f16_gemm_minmax_ukernel_4x8__neonfp16arith_ld64(
21 size_t mr,
22 size_t nc,
23 size_t kc,
24 const void* restrict a,
25 size_t a_stride,
26 const void* restrict w,
27 void* restrict c,
28 size_t cm_stride,
29 size_t cn_stride,
30 const struct xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)])
31 {
32 assert(mr != 0);
33 assert(mr <= 4);
34 assert(nc != 0);
35 assert(kc != 0);
36 assert(kc % sizeof(__fp16) == 0);
37 assert(a != NULL);
38 assert(w != NULL);
39 assert(c != NULL);
40
41 const __fp16* a0 = (const __fp16*) a;
42 __fp16* c0 = (__fp16*) c;
43 const __fp16* a1 = (const __fp16*) ((uintptr_t) a0 + a_stride);
44 __fp16* c1 = (__fp16*) ((uintptr_t) c0 + cm_stride);
45 if XNN_UNPREDICTABLE(mr < 2) {
46 a1 = a0;
47 c1 = c0;
48 }
49 const __fp16* a2 = (const __fp16*) ((uintptr_t) a1 + a_stride);
50 __fp16* c2 = (__fp16*) ((uintptr_t) c1 + cm_stride);
51 if XNN_UNPREDICTABLE(mr <= 2) {
52 a2 = a1;
53 c2 = c1;
54 }
55 const __fp16* a3 = (const __fp16*) ((uintptr_t) a2 + a_stride);
56 __fp16* c3 = (__fp16*) ((uintptr_t) c2 + cm_stride);
57 if XNN_UNPREDICTABLE(mr != 4) {
58 a3 = a2;
59 c3 = c2;
60 }
61
62 do {
63 float16x8_t vacc0x01234567 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
64 float16x8_t vacc1x01234567 = vacc0x01234567;
65 float16x8_t vacc2x01234567 = vacc0x01234567;
66 float16x8_t vacc3x01234567 = vacc0x01234567;
67
68 size_t k = kc;
69 while (k >= 4 * sizeof(__fp16)) {
70 const float16x4_t va0 = vld1_f16(a0); a0 += 4;
71 const float16x4_t va1 = vld1_f16(a1); a1 += 4;
72 const float16x4_t va2 = vld1_f16(a2); a2 += 4;
73 const float16x4_t va3 = vld1_f16(a3); a3 += 4;
74
75 const float16x8_t vb01234567c0 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
76
77 #if XNN_ARCH_ARM64
78 vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
79 vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
80 vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
81 vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
82 #else
83 const float16x8_t va0c0 = vdupq_lane_f16(va0, 0);
84 const float16x8_t va1c0 = vdupq_lane_f16(va1, 0);
85 const float16x8_t va2c0 = vdupq_lane_f16(va2, 0);
86 const float16x8_t va3c0 = vdupq_lane_f16(va3, 0);
87
88 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c0, vb01234567c0);
89 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c0, vb01234567c0);
90 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c0, vb01234567c0);
91 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c0, vb01234567c0);
92 #endif
93 const float16x8_t vb01234567c1 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
94
95 #if XNN_ARCH_ARM64
96 vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
97 vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
98 vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
99 vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
100 #else
101 const float16x8_t va0c1 = vdupq_lane_f16(va0, 1);
102 const float16x8_t va1c1 = vdupq_lane_f16(va1, 1);
103 const float16x8_t va2c1 = vdupq_lane_f16(va2, 1);
104 const float16x8_t va3c1 = vdupq_lane_f16(va3, 1);
105
106 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c1, vb01234567c1);
107 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c1, vb01234567c1);
108 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c1, vb01234567c1);
109 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c1, vb01234567c1);
110 #endif
111 const float16x8_t vb01234567c2 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
112
113 #if XNN_ARCH_ARM64
114 vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
115 vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
116 vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
117 vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
118 #else
119 const float16x8_t va0c2 = vdupq_lane_f16(va0, 2);
120 const float16x8_t va1c2 = vdupq_lane_f16(va1, 2);
121 const float16x8_t va2c2 = vdupq_lane_f16(va2, 2);
122 const float16x8_t va3c2 = vdupq_lane_f16(va3, 2);
123
124 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c2, vb01234567c2);
125 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c2, vb01234567c2);
126 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c2, vb01234567c2);
127 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c2, vb01234567c2);
128 #endif
129 const float16x8_t vb01234567c3 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
130
131 #if XNN_ARCH_ARM64
132 vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
133 vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
134 vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
135 vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
136 #else
137 const float16x8_t va0c3 = vdupq_lane_f16(va0, 3);
138 const float16x8_t va1c3 = vdupq_lane_f16(va1, 3);
139 const float16x8_t va2c3 = vdupq_lane_f16(va2, 3);
140 const float16x8_t va3c3 = vdupq_lane_f16(va3, 3);
141
142 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c3, vb01234567c3);
143 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c3, vb01234567c3);
144 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c3, vb01234567c3);
145 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c3, vb01234567c3);
146 #endif
147
148 k -= 4 * sizeof(__fp16);
149 }
150 if XNN_UNLIKELY(k != 0) {
151 do {
152 const float16x8_t va0 = vld1q_dup_f16(a0); a0 += 1;
153 const float16x8_t va1 = vld1q_dup_f16(a1); a1 += 1;
154 const float16x8_t va2 = vld1q_dup_f16(a2); a2 += 1;
155 const float16x8_t va3 = vld1q_dup_f16(a3); a3 += 1;
156
157 const float16x8_t vb01234567 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
158
159 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
160 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1, vb01234567);
161 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2, vb01234567);
162 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3, vb01234567);
163
164 k -= sizeof(__fp16);
165 } while (k != 0);
166 }
167
168 const float16x8_t vscale = vld1q_dup_f16((const __fp16*) ¶ms->scale);
169 vacc0x01234567 = vmulq_f16(vacc0x01234567, vscale);
170 vacc1x01234567 = vmulq_f16(vacc1x01234567, vscale);
171 vacc2x01234567 = vmulq_f16(vacc2x01234567, vscale);
172 vacc3x01234567 = vmulq_f16(vacc3x01234567, vscale);
173
174 const float16x8_t vmax = vld1q_dup_f16((const __fp16*) ¶ms->max);
175 vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
176 vacc1x01234567 = vminq_f16(vacc1x01234567, vmax);
177 vacc2x01234567 = vminq_f16(vacc2x01234567, vmax);
178 vacc3x01234567 = vminq_f16(vacc3x01234567, vmax);
179
180 const float16x8_t vmin = vld1q_dup_f16((const __fp16*) ¶ms->min);
181 vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
182 vacc1x01234567 = vmaxq_f16(vacc1x01234567, vmin);
183 vacc2x01234567 = vmaxq_f16(vacc2x01234567, vmin);
184 vacc3x01234567 = vmaxq_f16(vacc3x01234567, vmin);
185
186 if XNN_LIKELY(nc >= 8) {
187 vst1q_f16(c0, vacc0x01234567);
188 c0 = (__fp16*) ((uintptr_t) c0 + cn_stride);
189 vst1q_f16(c1, vacc1x01234567);
190 c1 = (__fp16*) ((uintptr_t) c1 + cn_stride);
191 vst1q_f16(c2, vacc2x01234567);
192 c2 = (__fp16*) ((uintptr_t) c2 + cn_stride);
193 vst1q_f16(c3, vacc3x01234567);
194 c3 = (__fp16*) ((uintptr_t) c3 + cn_stride);
195
196 a0 = (const __fp16*) ((uintptr_t) a0 - kc);
197 a1 = (const __fp16*) ((uintptr_t) a1 - kc);
198 a2 = (const __fp16*) ((uintptr_t) a2 - kc);
199 a3 = (const __fp16*) ((uintptr_t) a3 - kc);
200
201 nc -= 8;
202 } else {
203 float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
204 float16x4_t vacc1x0123 = vget_low_f16(vacc1x01234567);
205 float16x4_t vacc2x0123 = vget_low_f16(vacc2x01234567);
206 float16x4_t vacc3x0123 = vget_low_f16(vacc3x01234567);
207 if (nc & 4) {
208 vst1_f16(c0, vacc0x0123); c0 += 4;
209 vst1_f16(c1, vacc1x0123); c1 += 4;
210 vst1_f16(c2, vacc2x0123); c2 += 4;
211 vst1_f16(c3, vacc3x0123); c3 += 4;
212
213 vacc0x0123 = vget_high_f16(vacc0x01234567);
214 vacc1x0123 = vget_high_f16(vacc1x01234567);
215 vacc2x0123 = vget_high_f16(vacc2x01234567);
216 vacc3x0123 = vget_high_f16(vacc3x01234567);
217 }
218 if (nc & 2) {
219 vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
220 vst1_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpret_u32_f16(vacc1x0123), 0); c1 += 2;
221 vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_f16(vacc2x0123), 0); c2 += 2;
222 vst1_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpret_u32_f16(vacc3x0123), 0); c3 += 2;
223
224 vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
225 vacc1x0123 = vext_f16(vacc1x0123, vacc1x0123, 2);
226 vacc2x0123 = vext_f16(vacc2x0123, vacc2x0123, 2);
227 vacc3x0123 = vext_f16(vacc3x0123, vacc3x0123, 2);
228 }
229 if (nc & 1) {
230 vst1_lane_f16(c0, vacc0x0123, 0);
231 vst1_lane_f16(c1, vacc1x0123, 0);
232 vst1_lane_f16(c2, vacc2x0123, 0);
233 vst1_lane_f16(c3, vacc3x0123, 0);
234 }
235
236 nc = 0;
237 }
238 } while (nc != 0);
239 }
240