1 // Auto-generated file. Do not edit!
2 // Template: src/f32-gemm/psimd-s4.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <psimd.h>
13
14 #include <xnnpack/gemm.h>
15
16
xnn_f32_gemm_ukernel_6x8s4__psimd(size_t mr,size_t nc,size_t kc,const float * restrict a,size_t a_stride,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_f32_output_params params[restrict static1])17 void xnn_f32_gemm_ukernel_6x8s4__psimd(
18 size_t mr,
19 size_t nc,
20 size_t kc,
21 const float*restrict a,
22 size_t a_stride,
23 const float*restrict w,
24 float*restrict c,
25 size_t cm_stride,
26 size_t cn_stride,
27 const union xnn_f32_output_params params[restrict static 1])
28 {
29 assert(mr != 0);
30 assert(mr <= 6);
31 assert(nc != 0);
32 assert(kc != 0);
33 assert(kc % sizeof(float) == 0);
34 assert(a != NULL);
35 assert(w != NULL);
36 assert(c != NULL);
37
38 const float* a0 = a;
39 float* c0 = c;
40 const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
41 float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
42 if XNN_UNPREDICTABLE(mr < 2) {
43 a1 = a0;
44 c1 = c0;
45 }
46 const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
47 float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
48 if XNN_UNPREDICTABLE(mr <= 2) {
49 a2 = a1;
50 c2 = c1;
51 }
52 const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
53 float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
54 if XNN_UNPREDICTABLE(mr < 4) {
55 a3 = a2;
56 c3 = c2;
57 }
58 const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
59 float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
60 if XNN_UNPREDICTABLE(mr <= 4) {
61 a4 = a3;
62 c4 = c3;
63 }
64 const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
65 float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
66 if XNN_UNPREDICTABLE(mr != 6) {
67 a5 = a4;
68 c5 = c4;
69 }
70
71 do {
72 psimd_f32 vacc0x0123 = psimd_load_f32(w + 0);
73 psimd_f32 vacc0x4567 = psimd_load_f32(w + 4);
74 psimd_f32 vacc1x0123 = vacc0x0123;
75 psimd_f32 vacc1x4567 = vacc0x4567;
76 psimd_f32 vacc2x0123 = vacc0x0123;
77 psimd_f32 vacc2x4567 = vacc0x4567;
78 psimd_f32 vacc3x0123 = vacc0x0123;
79 psimd_f32 vacc3x4567 = vacc0x4567;
80 psimd_f32 vacc4x0123 = vacc0x0123;
81 psimd_f32 vacc4x4567 = vacc0x4567;
82 psimd_f32 vacc5x0123 = vacc0x0123;
83 psimd_f32 vacc5x4567 = vacc0x4567;
84 w += 8;
85
86 size_t k = kc;
87 while (k >= 4 * sizeof(float)) {
88 psimd_f32 va0 = psimd_load_f32(a0);
89 a0 += 4;
90 psimd_f32 va1 = psimd_load_f32(a1);
91 a1 += 4;
92 psimd_f32 va2 = psimd_load_f32(a2);
93 a2 += 4;
94 psimd_f32 va3 = psimd_load_f32(a3);
95 a3 += 4;
96 psimd_f32 va4 = psimd_load_f32(a4);
97 a4 += 4;
98 psimd_f32 va5 = psimd_load_f32(a5);
99 a5 += 4;
100
101
102 const psimd_f32 vb0123c0 = psimd_load_f32(w + 0);
103 const psimd_f32 vb4567c0 = psimd_load_f32(w + 4);
104
105 vacc0x0123 = psimd_qfma_f32(vacc0x0123, va0, vb0123c0);
106 vacc1x0123 = psimd_qfma_f32(vacc1x0123, va1, vb0123c0);
107 vacc2x0123 = psimd_qfma_f32(vacc2x0123, va2, vb0123c0);
108 vacc3x0123 = psimd_qfma_f32(vacc3x0123, va3, vb0123c0);
109 vacc4x0123 = psimd_qfma_f32(vacc4x0123, va4, vb0123c0);
110 vacc5x0123 = psimd_qfma_f32(vacc5x0123, va5, vb0123c0);
111 vacc0x4567 = psimd_qfma_f32(vacc0x4567, va0, vb4567c0);
112 vacc1x4567 = psimd_qfma_f32(vacc1x4567, va1, vb4567c0);
113 vacc2x4567 = psimd_qfma_f32(vacc2x4567, va2, vb4567c0);
114 vacc3x4567 = psimd_qfma_f32(vacc3x4567, va3, vb4567c0);
115 vacc4x4567 = psimd_qfma_f32(vacc4x4567, va4, vb4567c0);
116 vacc5x4567 = psimd_qfma_f32(vacc5x4567, va5, vb4567c0);
117
118 #ifdef __clang__
119 va0 = __builtin_shufflevector(va0, va0, 1, 2, 3, 0);
120 va1 = __builtin_shufflevector(va1, va1, 1, 2, 3, 0);
121 va2 = __builtin_shufflevector(va2, va2, 1, 2, 3, 0);
122 va3 = __builtin_shufflevector(va3, va3, 1, 2, 3, 0);
123 va4 = __builtin_shufflevector(va4, va4, 1, 2, 3, 0);
124 va5 = __builtin_shufflevector(va5, va5, 1, 2, 3, 0);
125 #else
126 va0 = __builtin_shuffle(va0, va0, (psimd_s32) { 1, 2, 3, 0 });
127 va1 = __builtin_shuffle(va1, va1, (psimd_s32) { 1, 2, 3, 0 });
128 va2 = __builtin_shuffle(va2, va2, (psimd_s32) { 1, 2, 3, 0 });
129 va3 = __builtin_shuffle(va3, va3, (psimd_s32) { 1, 2, 3, 0 });
130 va4 = __builtin_shuffle(va4, va4, (psimd_s32) { 1, 2, 3, 0 });
131 va5 = __builtin_shuffle(va5, va5, (psimd_s32) { 1, 2, 3, 0 });
132 #endif
133
134 const psimd_f32 vb0123c1 = psimd_load_f32(w + 8);
135 const psimd_f32 vb4567c1 = psimd_load_f32(w + 12);
136
137 vacc0x0123 = psimd_qfma_f32(vacc0x0123, va0, vb0123c1);
138 vacc1x0123 = psimd_qfma_f32(vacc1x0123, va1, vb0123c1);
139 vacc2x0123 = psimd_qfma_f32(vacc2x0123, va2, vb0123c1);
140 vacc3x0123 = psimd_qfma_f32(vacc3x0123, va3, vb0123c1);
141 vacc4x0123 = psimd_qfma_f32(vacc4x0123, va4, vb0123c1);
142 vacc5x0123 = psimd_qfma_f32(vacc5x0123, va5, vb0123c1);
143 vacc0x4567 = psimd_qfma_f32(vacc0x4567, va0, vb4567c1);
144 vacc1x4567 = psimd_qfma_f32(vacc1x4567, va1, vb4567c1);
145 vacc2x4567 = psimd_qfma_f32(vacc2x4567, va2, vb4567c1);
146 vacc3x4567 = psimd_qfma_f32(vacc3x4567, va3, vb4567c1);
147 vacc4x4567 = psimd_qfma_f32(vacc4x4567, va4, vb4567c1);
148 vacc5x4567 = psimd_qfma_f32(vacc5x4567, va5, vb4567c1);
149
150 #ifdef __clang__
151 va0 = __builtin_shufflevector(va0, va0, 1, 2, 3, 0);
152 va1 = __builtin_shufflevector(va1, va1, 1, 2, 3, 0);
153 va2 = __builtin_shufflevector(va2, va2, 1, 2, 3, 0);
154 va3 = __builtin_shufflevector(va3, va3, 1, 2, 3, 0);
155 va4 = __builtin_shufflevector(va4, va4, 1, 2, 3, 0);
156 va5 = __builtin_shufflevector(va5, va5, 1, 2, 3, 0);
157 #else
158 va0 = __builtin_shuffle(va0, va0, (psimd_s32) { 1, 2, 3, 0 });
159 va1 = __builtin_shuffle(va1, va1, (psimd_s32) { 1, 2, 3, 0 });
160 va2 = __builtin_shuffle(va2, va2, (psimd_s32) { 1, 2, 3, 0 });
161 va3 = __builtin_shuffle(va3, va3, (psimd_s32) { 1, 2, 3, 0 });
162 va4 = __builtin_shuffle(va4, va4, (psimd_s32) { 1, 2, 3, 0 });
163 va5 = __builtin_shuffle(va5, va5, (psimd_s32) { 1, 2, 3, 0 });
164 #endif
165
166 const psimd_f32 vb0123c2 = psimd_load_f32(w + 16);
167 const psimd_f32 vb4567c2 = psimd_load_f32(w + 20);
168
169 vacc0x0123 = psimd_qfma_f32(vacc0x0123, va0, vb0123c2);
170 vacc1x0123 = psimd_qfma_f32(vacc1x0123, va1, vb0123c2);
171 vacc2x0123 = psimd_qfma_f32(vacc2x0123, va2, vb0123c2);
172 vacc3x0123 = psimd_qfma_f32(vacc3x0123, va3, vb0123c2);
173 vacc4x0123 = psimd_qfma_f32(vacc4x0123, va4, vb0123c2);
174 vacc5x0123 = psimd_qfma_f32(vacc5x0123, va5, vb0123c2);
175 vacc0x4567 = psimd_qfma_f32(vacc0x4567, va0, vb4567c2);
176 vacc1x4567 = psimd_qfma_f32(vacc1x4567, va1, vb4567c2);
177 vacc2x4567 = psimd_qfma_f32(vacc2x4567, va2, vb4567c2);
178 vacc3x4567 = psimd_qfma_f32(vacc3x4567, va3, vb4567c2);
179 vacc4x4567 = psimd_qfma_f32(vacc4x4567, va4, vb4567c2);
180 vacc5x4567 = psimd_qfma_f32(vacc5x4567, va5, vb4567c2);
181
182 #ifdef __clang__
183 va0 = __builtin_shufflevector(va0, va0, 1, 2, 3, 0);
184 va1 = __builtin_shufflevector(va1, va1, 1, 2, 3, 0);
185 va2 = __builtin_shufflevector(va2, va2, 1, 2, 3, 0);
186 va3 = __builtin_shufflevector(va3, va3, 1, 2, 3, 0);
187 va4 = __builtin_shufflevector(va4, va4, 1, 2, 3, 0);
188 va5 = __builtin_shufflevector(va5, va5, 1, 2, 3, 0);
189 #else
190 va0 = __builtin_shuffle(va0, va0, (psimd_s32) { 1, 2, 3, 0 });
191 va1 = __builtin_shuffle(va1, va1, (psimd_s32) { 1, 2, 3, 0 });
192 va2 = __builtin_shuffle(va2, va2, (psimd_s32) { 1, 2, 3, 0 });
193 va3 = __builtin_shuffle(va3, va3, (psimd_s32) { 1, 2, 3, 0 });
194 va4 = __builtin_shuffle(va4, va4, (psimd_s32) { 1, 2, 3, 0 });
195 va5 = __builtin_shuffle(va5, va5, (psimd_s32) { 1, 2, 3, 0 });
196 #endif
197
198 const psimd_f32 vb0123c3 = psimd_load_f32(w + 24);
199 const psimd_f32 vb4567c3 = psimd_load_f32(w + 28);
200
201 vacc0x0123 = psimd_qfma_f32(vacc0x0123, va0, vb0123c3);
202 vacc1x0123 = psimd_qfma_f32(vacc1x0123, va1, vb0123c3);
203 vacc2x0123 = psimd_qfma_f32(vacc2x0123, va2, vb0123c3);
204 vacc3x0123 = psimd_qfma_f32(vacc3x0123, va3, vb0123c3);
205 vacc4x0123 = psimd_qfma_f32(vacc4x0123, va4, vb0123c3);
206 vacc5x0123 = psimd_qfma_f32(vacc5x0123, va5, vb0123c3);
207 vacc0x4567 = psimd_qfma_f32(vacc0x4567, va0, vb4567c3);
208 vacc1x4567 = psimd_qfma_f32(vacc1x4567, va1, vb4567c3);
209 vacc2x4567 = psimd_qfma_f32(vacc2x4567, va2, vb4567c3);
210 vacc3x4567 = psimd_qfma_f32(vacc3x4567, va3, vb4567c3);
211 vacc4x4567 = psimd_qfma_f32(vacc4x4567, va4, vb4567c3);
212 vacc5x4567 = psimd_qfma_f32(vacc5x4567, va5, vb4567c3);
213
214
215 w += 32;
216 k -= 4 * sizeof(float);
217 }
218 if XNN_UNLIKELY(k != 0) {
219 do {
220 const psimd_f32 va0 = psimd_load_splat_f32(a0);
221 a0 += 1;
222 const psimd_f32 va1 = psimd_load_splat_f32(a1);
223 a1 += 1;
224 const psimd_f32 va2 = psimd_load_splat_f32(a2);
225 a2 += 1;
226 const psimd_f32 va3 = psimd_load_splat_f32(a3);
227 a3 += 1;
228 const psimd_f32 va4 = psimd_load_splat_f32(a4);
229 a4 += 1;
230 const psimd_f32 va5 = psimd_load_splat_f32(a5);
231 a5 += 1;
232
233 const psimd_f32 vb0123 = psimd_load_f32(w);
234 const psimd_f32 vb4567 = psimd_load_f32(w + 4);
235 w += 8;
236
237 vacc0x0123 = psimd_qfma_f32(vacc0x0123, va0, vb0123);
238 vacc1x0123 = psimd_qfma_f32(vacc1x0123, va1, vb0123);
239 vacc2x0123 = psimd_qfma_f32(vacc2x0123, va2, vb0123);
240 vacc3x0123 = psimd_qfma_f32(vacc3x0123, va3, vb0123);
241 vacc4x0123 = psimd_qfma_f32(vacc4x0123, va4, vb0123);
242 vacc5x0123 = psimd_qfma_f32(vacc5x0123, va5, vb0123);
243 vacc0x4567 = psimd_qfma_f32(vacc0x4567, va0, vb4567);
244 vacc1x4567 = psimd_qfma_f32(vacc1x4567, va1, vb4567);
245 vacc2x4567 = psimd_qfma_f32(vacc2x4567, va2, vb4567);
246 vacc3x4567 = psimd_qfma_f32(vacc3x4567, va3, vb4567);
247 vacc4x4567 = psimd_qfma_f32(vacc4x4567, va4, vb4567);
248 vacc5x4567 = psimd_qfma_f32(vacc5x4567, va5, vb4567);
249
250 k -= sizeof(float);
251 } while (k != 0);
252 }
253
254 const psimd_f32 vmax = psimd_load_splat_f32(¶ms->scalar.max);
255 vacc0x0123 = psimd_min_f32(vacc0x0123, vmax);
256 vacc1x0123 = psimd_min_f32(vacc1x0123, vmax);
257 vacc2x0123 = psimd_min_f32(vacc2x0123, vmax);
258 vacc3x0123 = psimd_min_f32(vacc3x0123, vmax);
259 vacc4x0123 = psimd_min_f32(vacc4x0123, vmax);
260 vacc5x0123 = psimd_min_f32(vacc5x0123, vmax);
261 vacc0x4567 = psimd_min_f32(vacc0x4567, vmax);
262 vacc1x4567 = psimd_min_f32(vacc1x4567, vmax);
263 vacc2x4567 = psimd_min_f32(vacc2x4567, vmax);
264 vacc3x4567 = psimd_min_f32(vacc3x4567, vmax);
265 vacc4x4567 = psimd_min_f32(vacc4x4567, vmax);
266 vacc5x4567 = psimd_min_f32(vacc5x4567, vmax);
267
268 const psimd_f32 vmin = psimd_load_splat_f32(¶ms->scalar.min);
269 vacc0x0123 = psimd_max_f32(vacc0x0123, vmin);
270 vacc1x0123 = psimd_max_f32(vacc1x0123, vmin);
271 vacc2x0123 = psimd_max_f32(vacc2x0123, vmin);
272 vacc3x0123 = psimd_max_f32(vacc3x0123, vmin);
273 vacc4x0123 = psimd_max_f32(vacc4x0123, vmin);
274 vacc5x0123 = psimd_max_f32(vacc5x0123, vmin);
275 vacc0x4567 = psimd_max_f32(vacc0x4567, vmin);
276 vacc1x4567 = psimd_max_f32(vacc1x4567, vmin);
277 vacc2x4567 = psimd_max_f32(vacc2x4567, vmin);
278 vacc3x4567 = psimd_max_f32(vacc3x4567, vmin);
279 vacc4x4567 = psimd_max_f32(vacc4x4567, vmin);
280 vacc5x4567 = psimd_max_f32(vacc5x4567, vmin);
281
282 if XNN_LIKELY(nc >= 8) {
283 psimd_store_f32(c5, vacc5x0123);
284 psimd_store_f32(c5 + 4, vacc5x4567);
285 c5 = (float*) ((uintptr_t) c5 + cn_stride);
286 psimd_store_f32(c4, vacc4x0123);
287 psimd_store_f32(c4 + 4, vacc4x4567);
288 c4 = (float*) ((uintptr_t) c4 + cn_stride);
289 psimd_store_f32(c3, vacc3x0123);
290 psimd_store_f32(c3 + 4, vacc3x4567);
291 c3 = (float*) ((uintptr_t) c3 + cn_stride);
292 psimd_store_f32(c2, vacc2x0123);
293 psimd_store_f32(c2 + 4, vacc2x4567);
294 c2 = (float*) ((uintptr_t) c2 + cn_stride);
295 psimd_store_f32(c1, vacc1x0123);
296 psimd_store_f32(c1 + 4, vacc1x4567);
297 c1 = (float*) ((uintptr_t) c1 + cn_stride);
298 psimd_store_f32(c0, vacc0x0123);
299 psimd_store_f32(c0 + 4, vacc0x4567);
300 c0 = (float*) ((uintptr_t) c0 + cn_stride);
301
302 a5 = (const float*) ((uintptr_t) a5 - kc);
303 a4 = (const float*) ((uintptr_t) a4 - kc);
304 a3 = (const float*) ((uintptr_t) a3 - kc);
305 a2 = (const float*) ((uintptr_t) a2 - kc);
306 a1 = (const float*) ((uintptr_t) a1 - kc);
307 a0 = (const float*) ((uintptr_t) a0 - kc);
308
309 nc -= 8;
310 } else {
311 if (nc & 4) {
312 psimd_store_f32(c5, vacc5x0123);
313 psimd_store_f32(c4, vacc4x0123);
314 psimd_store_f32(c3, vacc3x0123);
315 psimd_store_f32(c2, vacc2x0123);
316 psimd_store_f32(c1, vacc1x0123);
317 psimd_store_f32(c0, vacc0x0123);
318
319 vacc5x0123 = vacc5x4567;
320 vacc4x0123 = vacc4x4567;
321 vacc3x0123 = vacc3x4567;
322 vacc2x0123 = vacc2x4567;
323 vacc1x0123 = vacc1x4567;
324 vacc0x0123 = vacc0x4567;
325
326 c5 += 4;
327 c4 += 4;
328 c3 += 4;
329 c2 += 4;
330 c1 += 4;
331 c0 += 4;
332 }
333 if (nc & 2) {
334 psimd_store2_f32(c5, vacc5x0123);
335 psimd_store2_f32(c4, vacc4x0123);
336 psimd_store2_f32(c3, vacc3x0123);
337 psimd_store2_f32(c2, vacc2x0123);
338 psimd_store2_f32(c1, vacc1x0123);
339 psimd_store2_f32(c0, vacc0x0123);
340
341 vacc5x0123 = psimd_concat_hi_f32(vacc5x0123, vacc5x0123);
342 vacc4x0123 = psimd_concat_hi_f32(vacc4x0123, vacc4x0123);
343 vacc3x0123 = psimd_concat_hi_f32(vacc3x0123, vacc3x0123);
344 vacc2x0123 = psimd_concat_hi_f32(vacc2x0123, vacc2x0123);
345 vacc1x0123 = psimd_concat_hi_f32(vacc1x0123, vacc1x0123);
346 vacc0x0123 = psimd_concat_hi_f32(vacc0x0123, vacc0x0123);
347
348 c5 += 2;
349 c4 += 2;
350 c3 += 2;
351 c2 += 2;
352 c1 += 2;
353 c0 += 2;
354 }
355 if (nc & 1) {
356 psimd_store1_f32(c5, vacc5x0123);
357 psimd_store1_f32(c4, vacc4x0123);
358 psimd_store1_f32(c3, vacc3x0123);
359 psimd_store1_f32(c2, vacc2x0123);
360 psimd_store1_f32(c1, vacc1x0123);
361 psimd_store1_f32(c0, vacc0x0123);
362 }
363
364 nc = 0;
365 }
366 } while (nc != 0);
367 }
368