1 // Auto-generated file. Do not edit!
2 // Template: src/bf16-gemm/c8-neon.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10
11 #include <assert.h>
12
13 #include <arm_neon.h>
14
15 #include <xnnpack/gemm.h>
16
17
xnn_bf16_gemm_minmax_ukernel_3x4c8__neonfma_zip(size_t mr,size_t nc,size_t kc,const void * restrict a,size_t a_stride,const void * restrict w_ptr,void * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_bf16_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_bf16_gemm_minmax_ukernel_3x4c8__neonfma_zip(
19 size_t mr,
20 size_t nc,
21 size_t kc,
22 const void* restrict a,
23 size_t a_stride,
24 const void* restrict w_ptr,
25 void* restrict c,
26 size_t cm_stride,
27 size_t cn_stride,
28 const union xnn_bf16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
29 {
30 assert(mr != 0);
31 assert(mr <= 3);
32 assert(nc != 0);
33 assert(kc != 0);
34 assert(kc % sizeof(uint16_t) == 0);
35 assert(a != NULL);
36 assert(w_ptr != NULL);
37 assert(c != NULL);
38
39 const uint16_t* a0 = (const uint16_t*) a;
40 uint16_t* c0 = (uint16_t*) c;
41 const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
42 uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
43 if XNN_UNPREDICTABLE(mr < 2) {
44 a1 = a0;
45 c1 = c0;
46 }
47 const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride);
48 uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
49 if XNN_UNPREDICTABLE(mr <= 2) {
50 a2 = a1;
51 c2 = c1;
52 }
53
54 const uint16_t* w = (const uint16_t*) w_ptr;
55 const uint16x8_t vzero = vmovq_n_u16(0);
56 do {
57 float32x4_t vacc0x0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
58 float32x4_t vacc0x1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
59 float32x4_t vacc0x2 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
60 float32x4_t vacc0x3 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
61 float32x4_t vacc1x0 = vacc0x0;
62 float32x4_t vacc1x1 = vacc0x1;
63 float32x4_t vacc1x2 = vacc0x2;
64 float32x4_t vacc1x3 = vacc0x3;
65 float32x4_t vacc2x0 = vacc0x0;
66 float32x4_t vacc2x1 = vacc0x1;
67 float32x4_t vacc2x2 = vacc0x2;
68 float32x4_t vacc2x3 = vacc0x3;
69
70 size_t k = kc;
71 for (; k >= 8 * sizeof(uint16_t); k -= 8 * sizeof(uint16_t)) {
72 const uint16x8_t va0 = vld1q_u16(a0); a0 += 8;
73 const uint16x8_t va1 = vld1q_u16(a1); a1 += 8;
74 const uint16x8_t va2 = vld1q_u16(a2); a2 += 8;
75
76 const uint16x8_t vb0 = vld1q_u16(w); w += 8;
77 const uint16x8_t vb1 = vld1q_u16(w); w += 8;
78 const uint16x8_t vb2 = vld1q_u16(w); w += 8;
79 const uint16x8_t vb3 = vld1q_u16(w); w += 8;
80
81 const float32x4_t va0e = vreinterpretq_f32_u16(vzip1q_u16(vzero, va0));
82 const float32x4_t va1e = vreinterpretq_f32_u16(vzip1q_u16(vzero, va1));
83 const float32x4_t va2e = vreinterpretq_f32_u16(vzip1q_u16(vzero, va2));
84
85 const float32x4_t vb0e = vreinterpretq_f32_u16(vzip1q_u16(vzero, vb0));
86 const float32x4_t vb1e = vreinterpretq_f32_u16(vzip1q_u16(vzero, vb1));
87 const float32x4_t vb2e = vreinterpretq_f32_u16(vzip1q_u16(vzero, vb2));
88 const float32x4_t vb3e = vreinterpretq_f32_u16(vzip1q_u16(vzero, vb3));
89
90 vacc0x0 = vfmaq_f32(vacc0x0, va0e, vb0e);
91 vacc1x0 = vfmaq_f32(vacc1x0, va1e, vb0e);
92 vacc2x0 = vfmaq_f32(vacc2x0, va2e, vb0e);
93 vacc0x1 = vfmaq_f32(vacc0x1, va0e, vb1e);
94 vacc1x1 = vfmaq_f32(vacc1x1, va1e, vb1e);
95 vacc2x1 = vfmaq_f32(vacc2x1, va2e, vb1e);
96 vacc0x2 = vfmaq_f32(vacc0x2, va0e, vb2e);
97 vacc1x2 = vfmaq_f32(vacc1x2, va1e, vb2e);
98 vacc2x2 = vfmaq_f32(vacc2x2, va2e, vb2e);
99 vacc0x3 = vfmaq_f32(vacc0x3, va0e, vb3e);
100 vacc1x3 = vfmaq_f32(vacc1x3, va1e, vb3e);
101 vacc2x3 = vfmaq_f32(vacc2x3, va2e, vb3e);
102
103 const float32x4_t va0o = vreinterpretq_f32_u16(vzip2q_u16(vzero, va0));
104 const float32x4_t va1o = vreinterpretq_f32_u16(vzip2q_u16(vzero, va1));
105 const float32x4_t va2o = vreinterpretq_f32_u16(vzip2q_u16(vzero, va2));
106
107 const float32x4_t vb0o = vreinterpretq_f32_u16(vzip2q_u16(vzero, vb0));
108 const float32x4_t vb1o = vreinterpretq_f32_u16(vzip2q_u16(vzero, vb1));
109 const float32x4_t vb2o = vreinterpretq_f32_u16(vzip2q_u16(vzero, vb2));
110 const float32x4_t vb3o = vreinterpretq_f32_u16(vzip2q_u16(vzero, vb3));
111
112 vacc0x0 = vfmaq_f32(vacc0x0, va0o, vb0o);
113 vacc1x0 = vfmaq_f32(vacc1x0, va1o, vb0o);
114 vacc2x0 = vfmaq_f32(vacc2x0, va2o, vb0o);
115 vacc0x1 = vfmaq_f32(vacc0x1, va0o, vb1o);
116 vacc1x1 = vfmaq_f32(vacc1x1, va1o, vb1o);
117 vacc2x1 = vfmaq_f32(vacc2x1, va2o, vb1o);
118 vacc0x2 = vfmaq_f32(vacc0x2, va0o, vb2o);
119 vacc1x2 = vfmaq_f32(vacc1x2, va1o, vb2o);
120 vacc2x2 = vfmaq_f32(vacc2x2, va2o, vb2o);
121 vacc0x3 = vfmaq_f32(vacc0x3, va0o, vb3o);
122 vacc1x3 = vfmaq_f32(vacc1x3, va1o, vb3o);
123 vacc2x3 = vfmaq_f32(vacc2x3, va2o, vb3o);
124 }
125 if XNN_UNLIKELY(k != 0) {
126 const uint16x8_t va0 = vld1q_u16(a0); a0 = (const uint16_t*) ((uintptr_t) a0 + k);
127 const uint16x8_t va1 = vld1q_u16(a1); a1 = (const uint16_t*) ((uintptr_t) a1 + k);
128 const uint16x8_t va2 = vld1q_u16(a2); a2 = (const uint16_t*) ((uintptr_t) a2 + k);
129
130 const uint16x8_t vb0 = vld1q_u16(w); w += 8;
131 const uint16x8_t vb1 = vld1q_u16(w); w += 8;
132 const uint16x8_t vb2 = vld1q_u16(w); w += 8;
133 const uint16x8_t vb3 = vld1q_u16(w); w += 8;
134
135 const uint16x8_t vm0 = vceqq_u16(vb0, vmovq_n_u16(0));
136 const uint16x8_t vm1 = vceqq_u16(vb1, vmovq_n_u16(0));
137 const uint16x8_t vm2 = vceqq_u16(vb2, vmovq_n_u16(0));
138 const uint16x8_t vm3 = vceqq_u16(vb3, vmovq_n_u16(0));
139
140 const float32x4_t vb0e = vreinterpretq_f32_u16(vzip1q_u16(vzero, vb0));
141 const float32x4_t vb1e = vreinterpretq_f32_u16(vzip1q_u16(vzero, vb1));
142 const float32x4_t vb2e = vreinterpretq_f32_u16(vzip1q_u16(vzero, vb2));
143 const float32x4_t vb3e = vreinterpretq_f32_u16(vzip1q_u16(vzero, vb3));
144
145 const uint16x8_t va0x0 = vbicq_u16(va0, vm0);
146 const uint16x8_t va1x0 = vbicq_u16(va1, vm0);
147 const uint16x8_t va2x0 = vbicq_u16(va2, vm0);
148 const uint16x8_t va0x1 = vbicq_u16(va0, vm1);
149 const uint16x8_t va1x1 = vbicq_u16(va1, vm1);
150 const uint16x8_t va2x1 = vbicq_u16(va2, vm1);
151 const uint16x8_t va0x2 = vbicq_u16(va0, vm2);
152 const uint16x8_t va1x2 = vbicq_u16(va1, vm2);
153 const uint16x8_t va2x2 = vbicq_u16(va2, vm2);
154 const uint16x8_t va0x3 = vbicq_u16(va0, vm3);
155 const uint16x8_t va1x3 = vbicq_u16(va1, vm3);
156 const uint16x8_t va2x3 = vbicq_u16(va2, vm3);
157
158 const float32x4_t va0x0e = vreinterpretq_f32_u16(vzip1q_u16(vzero, va0x0));
159 const float32x4_t va1x0e = vreinterpretq_f32_u16(vzip1q_u16(vzero, va1x0));
160 const float32x4_t va2x0e = vreinterpretq_f32_u16(vzip1q_u16(vzero, va2x0));
161 const float32x4_t va0x1e = vreinterpretq_f32_u16(vzip1q_u16(vzero, va0x1));
162 const float32x4_t va1x1e = vreinterpretq_f32_u16(vzip1q_u16(vzero, va1x1));
163 const float32x4_t va2x1e = vreinterpretq_f32_u16(vzip1q_u16(vzero, va2x1));
164 const float32x4_t va0x2e = vreinterpretq_f32_u16(vzip1q_u16(vzero, va0x2));
165 const float32x4_t va1x2e = vreinterpretq_f32_u16(vzip1q_u16(vzero, va1x2));
166 const float32x4_t va2x2e = vreinterpretq_f32_u16(vzip1q_u16(vzero, va2x2));
167 const float32x4_t va0x3e = vreinterpretq_f32_u16(vzip1q_u16(vzero, va0x3));
168 const float32x4_t va1x3e = vreinterpretq_f32_u16(vzip1q_u16(vzero, va1x3));
169 const float32x4_t va2x3e = vreinterpretq_f32_u16(vzip1q_u16(vzero, va2x3));
170
171 vacc0x0 = vfmaq_f32(vacc0x0, va0x0e, vb0e);
172 vacc1x0 = vfmaq_f32(vacc1x0, va1x0e, vb0e);
173 vacc2x0 = vfmaq_f32(vacc2x0, va2x0e, vb0e);
174 vacc0x1 = vfmaq_f32(vacc0x1, va0x1e, vb1e);
175 vacc1x1 = vfmaq_f32(vacc1x1, va1x1e, vb1e);
176 vacc2x1 = vfmaq_f32(vacc2x1, va2x1e, vb1e);
177 vacc0x2 = vfmaq_f32(vacc0x2, va0x2e, vb2e);
178 vacc1x2 = vfmaq_f32(vacc1x2, va1x2e, vb2e);
179 vacc2x2 = vfmaq_f32(vacc2x2, va2x2e, vb2e);
180 vacc0x3 = vfmaq_f32(vacc0x3, va0x3e, vb3e);
181 vacc1x3 = vfmaq_f32(vacc1x3, va1x3e, vb3e);
182 vacc2x3 = vfmaq_f32(vacc2x3, va2x3e, vb3e);
183
184 const float32x4_t vb0o = vreinterpretq_f32_u16(vzip2q_u16(vzero, vb0));
185 const float32x4_t vb1o = vreinterpretq_f32_u16(vzip2q_u16(vzero, vb1));
186 const float32x4_t vb2o = vreinterpretq_f32_u16(vzip2q_u16(vzero, vb2));
187 const float32x4_t vb3o = vreinterpretq_f32_u16(vzip2q_u16(vzero, vb3));
188
189 const float32x4_t va0x0o = vreinterpretq_f32_u16(vzip2q_u16(vzero, va0x0));
190 const float32x4_t va1x0o = vreinterpretq_f32_u16(vzip2q_u16(vzero, va1x0));
191 const float32x4_t va2x0o = vreinterpretq_f32_u16(vzip2q_u16(vzero, va2x0));
192 const float32x4_t va0x1o = vreinterpretq_f32_u16(vzip2q_u16(vzero, va0x1));
193 const float32x4_t va1x1o = vreinterpretq_f32_u16(vzip2q_u16(vzero, va1x1));
194 const float32x4_t va2x1o = vreinterpretq_f32_u16(vzip2q_u16(vzero, va2x1));
195 const float32x4_t va0x2o = vreinterpretq_f32_u16(vzip2q_u16(vzero, va0x2));
196 const float32x4_t va1x2o = vreinterpretq_f32_u16(vzip2q_u16(vzero, va1x2));
197 const float32x4_t va2x2o = vreinterpretq_f32_u16(vzip2q_u16(vzero, va2x2));
198 const float32x4_t va0x3o = vreinterpretq_f32_u16(vzip2q_u16(vzero, va0x3));
199 const float32x4_t va1x3o = vreinterpretq_f32_u16(vzip2q_u16(vzero, va1x3));
200 const float32x4_t va2x3o = vreinterpretq_f32_u16(vzip2q_u16(vzero, va2x3));
201
202 vacc0x0 = vfmaq_f32(vacc0x0, va0x0o, vb0o);
203 vacc1x0 = vfmaq_f32(vacc1x0, va1x0o, vb0o);
204 vacc2x0 = vfmaq_f32(vacc2x0, va2x0o, vb0o);
205 vacc0x1 = vfmaq_f32(vacc0x1, va0x1o, vb1o);
206 vacc1x1 = vfmaq_f32(vacc1x1, va1x1o, vb1o);
207 vacc2x1 = vfmaq_f32(vacc2x1, va2x1o, vb1o);
208 vacc0x2 = vfmaq_f32(vacc0x2, va0x2o, vb2o);
209 vacc1x2 = vfmaq_f32(vacc1x2, va1x2o, vb2o);
210 vacc2x2 = vfmaq_f32(vacc2x2, va2x2o, vb2o);
211 vacc0x3 = vfmaq_f32(vacc0x3, va0x3o, vb3o);
212 vacc1x3 = vfmaq_f32(vacc1x3, va1x3o, vb3o);
213 vacc2x3 = vfmaq_f32(vacc2x3, va2x3o, vb3o);
214 }
215
216 #if XNN_ARCH_ARM64
217 const float32x4_t vacc0x01 = vpaddq_f32(vacc0x0, vacc0x1);
218 const float32x4_t vacc1x01 = vpaddq_f32(vacc1x0, vacc1x1);
219 const float32x4_t vacc2x01 = vpaddq_f32(vacc2x0, vacc2x1);
220 const float32x4_t vacc0x23 = vpaddq_f32(vacc0x2, vacc0x3);
221 const float32x4_t vacc1x23 = vpaddq_f32(vacc1x2, vacc1x3);
222 const float32x4_t vacc2x23 = vpaddq_f32(vacc2x2, vacc2x3);
223
224 float32x4_t vacc0x0123 = vpaddq_f32(vacc0x01, vacc0x23);
225 float32x4_t vacc1x0123 = vpaddq_f32(vacc1x01, vacc1x23);
226 float32x4_t vacc2x0123 = vpaddq_f32(vacc2x01, vacc2x23);
227 #else
228 const float32x2_t vsum0x0 = vadd_f32(vget_low_f32(vacc0x0), vget_high_f32(vacc0x0));
229 const float32x2_t vsum1x0 = vadd_f32(vget_low_f32(vacc1x0), vget_high_f32(vacc1x0));
230 const float32x2_t vsum2x0 = vadd_f32(vget_low_f32(vacc2x0), vget_high_f32(vacc2x0));
231 const float32x2_t vsum0x1 = vadd_f32(vget_low_f32(vacc0x1), vget_high_f32(vacc0x1));
232 const float32x2_t vsum1x1 = vadd_f32(vget_low_f32(vacc1x1), vget_high_f32(vacc1x1));
233 const float32x2_t vsum2x1 = vadd_f32(vget_low_f32(vacc2x1), vget_high_f32(vacc2x1));
234 const float32x2_t vsum0x2 = vadd_f32(vget_low_f32(vacc0x2), vget_high_f32(vacc0x2));
235 const float32x2_t vsum1x2 = vadd_f32(vget_low_f32(vacc1x2), vget_high_f32(vacc1x2));
236 const float32x2_t vsum2x2 = vadd_f32(vget_low_f32(vacc2x2), vget_high_f32(vacc2x2));
237 const float32x2_t vsum0x3 = vadd_f32(vget_low_f32(vacc0x3), vget_high_f32(vacc0x3));
238 const float32x2_t vsum1x3 = vadd_f32(vget_low_f32(vacc1x3), vget_high_f32(vacc1x3));
239 const float32x2_t vsum2x3 = vadd_f32(vget_low_f32(vacc2x3), vget_high_f32(vacc2x3));
240
241 float32x4_t vacc0x0123 = vcombine_f32(vpadd_f32(vsum0x0, vsum0x1), vpadd_f32(vsum0x2, vsum0x3));
242 float32x4_t vacc1x0123 = vcombine_f32(vpadd_f32(vsum1x0, vsum1x1), vpadd_f32(vsum1x2, vsum1x3));
243 float32x4_t vacc2x0123 = vcombine_f32(vpadd_f32(vsum2x0, vsum2x1), vpadd_f32(vsum2x2, vsum2x3));
244 #endif
245
246 const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
247 vacc0x0123 = vminq_f32(vacc0x0123, vmax);
248 vacc1x0123 = vminq_f32(vacc1x0123, vmax);
249 vacc2x0123 = vminq_f32(vacc2x0123, vmax);
250
251 const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
252 vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
253 vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
254 vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
255
256 uint16x4_t vout0x0123 = vshrn_n_u32(vreinterpretq_u32_f32(vacc0x0123), 16);
257 uint16x4_t vout1x0123 = vshrn_n_u32(vreinterpretq_u32_f32(vacc1x0123), 16);
258 uint16x4_t vout2x0123 = vshrn_n_u32(vreinterpretq_u32_f32(vacc2x0123), 16);
259
260 if XNN_LIKELY(nc >= 4) {
261 vst1_u16(c0, vout0x0123);
262 c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
263 vst1_u16(c1, vout1x0123);
264 c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
265 vst1_u16(c2, vout2x0123);
266 c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
267
268 a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
269 a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
270 a2 = (const uint16_t*) ((uintptr_t) a2 - kc);
271
272 nc -= 4;
273 } else {
274 if (nc & 2) {
275 vst1_lane_u32((void*) c0, vreinterpret_u32_u16(vout0x0123), 0); c0 += 2;
276 vst1_lane_u32((void*) c1, vreinterpret_u32_u16(vout1x0123), 0); c1 += 2;
277 vst1_lane_u32((void*) c2, vreinterpret_u32_u16(vout2x0123), 0); c2 += 2;
278
279 vout0x0123 = vext_u16(vout0x0123, vout0x0123, 2);
280 vout1x0123 = vext_u16(vout1x0123, vout1x0123, 2);
281 vout2x0123 = vext_u16(vout2x0123, vout2x0123, 2);
282 }
283 if (nc & 1) {
284 vst1_lane_u16(c0, vout0x0123, 0);
285 vst1_lane_u16(c1, vout1x0123, 0);
286 vst1_lane_u16(c2, vout2x0123, 0);
287 }
288
289 nc = 0;
290 }
291 } while (nc != 0);
292 }
293