• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-igemm/sse-dup.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <emmintrin.h>
13 
14 #include <xnnpack/igemm.h>
15 
16 
xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup(size_t mr,size_t nc,size_t kc,size_t ks,const float ** restrict a,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const float * zero,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_igemm_minmax_ukernel_5x8__sse2_dup(
18     size_t mr,
19     size_t nc,
20     size_t kc,
21     size_t ks,
22     const float**restrict a,
23     const float*restrict w,
24     float*restrict c,
25     size_t cm_stride,
26     size_t cn_stride,
27     size_t a_offset,
28     const float* zero,
29     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
30 {
31   assert(mr != 0);
32   assert(mr <= 5);
33   assert(nc != 0);
34   assert(kc != 0);
35   assert(kc % sizeof(float) == 0);
36   assert(ks != 0);
37   assert(ks % (5 * sizeof(void*)) == 0);
38   assert(a_offset % sizeof(float) == 0);
39   assert(a != NULL);
40   assert(w != NULL);
41   assert(c != NULL);
42 
43   float* c0 = c;
44   float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
45   if XNN_UNPREDICTABLE(mr < 2) {
46     c1 = c0;
47   }
48   float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
49   if XNN_UNPREDICTABLE(mr <= 2) {
50     c2 = c1;
51   }
52   float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
53   if XNN_UNPREDICTABLE(mr < 4) {
54     c3 = c2;
55   }
56   float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
57   if XNN_UNPREDICTABLE(mr <= 4) {
58     c4 = c3;
59   }
60 
61   do {
62     __m128 vacc0x0123 = _mm_load_ps(w);
63     __m128 vacc0x4567 = _mm_load_ps(w + 4);
64     __m128 vacc1x0123 = vacc0x0123;
65     __m128 vacc1x4567 = vacc0x4567;
66     __m128 vacc2x0123 = vacc0x0123;
67     __m128 vacc2x4567 = vacc0x4567;
68     __m128 vacc3x0123 = vacc0x0123;
69     __m128 vacc3x4567 = vacc0x4567;
70     __m128 vacc4x0123 = vacc0x0123;
71     __m128 vacc4x4567 = vacc0x4567;
72     w += 8;
73 
74     size_t p = ks;
75     do {
76       const float* restrict a0 = a[0];
77       assert(a0 != NULL);
78       if XNN_UNPREDICTABLE(a0 != zero) {
79         a0 = (const float*) ((uintptr_t) a0 + a_offset);
80       }
81       const float* restrict a1 = a[1];
82       assert(a1 != NULL);
83       if XNN_UNPREDICTABLE(a1 != zero) {
84         a1 = (const float*) ((uintptr_t) a1 + a_offset);
85       }
86       const float* restrict a2 = a[2];
87       assert(a2 != NULL);
88       if XNN_UNPREDICTABLE(a2 != zero) {
89         a2 = (const float*) ((uintptr_t) a2 + a_offset);
90       }
91       const float* restrict a3 = a[3];
92       assert(a3 != NULL);
93       if XNN_UNPREDICTABLE(a3 != zero) {
94         a3 = (const float*) ((uintptr_t) a3 + a_offset);
95       }
96       const float* restrict a4 = a[4];
97       assert(a4 != NULL);
98       if XNN_UNPREDICTABLE(a4 != zero) {
99         a4 = (const float*) ((uintptr_t) a4 + a_offset);
100       }
101       a += 5;
102 
103       size_t k = kc;
104       while (k >= 4 * sizeof(float)) {
105         const __m128 va0 = _mm_loadu_ps(a0);
106         a0 += 4;
107         const __m128 va1 = _mm_loadu_ps(a1);
108         a1 += 4;
109         const __m128 va2 = _mm_loadu_ps(a2);
110         a2 += 4;
111         const __m128 va3 = _mm_loadu_ps(a3);
112         a3 += 4;
113         const __m128 va4 = _mm_loadu_ps(a4);
114         a4 += 4;
115 
116 
117         const __m128 va0c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(0, 0, 0, 0)));
118         const __m128 va1c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(0, 0, 0, 0)));
119         const __m128 va2c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(0, 0, 0, 0)));
120         const __m128 va3c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(0, 0, 0, 0)));
121         const __m128 va4c0000 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va4), _MM_SHUFFLE(0, 0, 0, 0)));
122 
123         const __m128 vb0123c0 = _mm_load_ps(w + 0);
124         const __m128 vb4567c0 = _mm_load_ps(w + 4);
125 
126         vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c0000, vb0123c0));
127         vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c0000, vb0123c0));
128         vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c0000, vb0123c0));
129         vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c0000, vb0123c0));
130         vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c0000, vb0123c0));
131         vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c0000, vb4567c0));
132         vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c0000, vb4567c0));
133         vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c0000, vb4567c0));
134         vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c0000, vb4567c0));
135         vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c0000, vb4567c0));
136 
137         const __m128 va0c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(1, 1, 1, 1)));
138         const __m128 va1c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(1, 1, 1, 1)));
139         const __m128 va2c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(1, 1, 1, 1)));
140         const __m128 va3c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(1, 1, 1, 1)));
141         const __m128 va4c1111 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va4), _MM_SHUFFLE(1, 1, 1, 1)));
142 
143         const __m128 vb0123c1 = _mm_load_ps(w + 8);
144         const __m128 vb4567c1 = _mm_load_ps(w + 12);
145 
146         vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c1111, vb0123c1));
147         vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c1111, vb0123c1));
148         vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c1111, vb0123c1));
149         vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c1111, vb0123c1));
150         vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c1111, vb0123c1));
151         vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c1111, vb4567c1));
152         vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c1111, vb4567c1));
153         vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c1111, vb4567c1));
154         vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c1111, vb4567c1));
155         vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c1111, vb4567c1));
156 
157         const __m128 va0c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va0), _MM_SHUFFLE(2, 2, 2, 2)));
158         const __m128 va1c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va1), _MM_SHUFFLE(2, 2, 2, 2)));
159         const __m128 va2c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va2), _MM_SHUFFLE(2, 2, 2, 2)));
160         const __m128 va3c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va3), _MM_SHUFFLE(2, 2, 2, 2)));
161         const __m128 va4c2222 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(va4), _MM_SHUFFLE(2, 2, 2, 2)));
162 
163         const __m128 vb0123c2 = _mm_load_ps(w + 16);
164         const __m128 vb4567c2 = _mm_load_ps(w + 20);
165 
166         vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c2222, vb0123c2));
167         vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c2222, vb0123c2));
168         vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c2222, vb0123c2));
169         vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c2222, vb0123c2));
170         vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c2222, vb0123c2));
171         vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c2222, vb4567c2));
172         vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c2222, vb4567c2));
173         vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c2222, vb4567c2));
174         vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c2222, vb4567c2));
175         vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c2222, vb4567c2));
176 
177         const __m128 va0c3333 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(3, 3, 3, 3));
178         const __m128 va1c3333 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(3, 3, 3, 3));
179         const __m128 va2c3333 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(3, 3, 3, 3));
180         const __m128 va3c3333 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(3, 3, 3, 3));
181         const __m128 va4c3333 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(3, 3, 3, 3));
182 
183         const __m128 vb0123c3 = _mm_load_ps(w + 24);
184         const __m128 vb4567c3 = _mm_load_ps(w + 28);
185 
186         vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0c3333, vb0123c3));
187         vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1c3333, vb0123c3));
188         vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2c3333, vb0123c3));
189         vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3c3333, vb0123c3));
190         vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4c3333, vb0123c3));
191         vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0c3333, vb4567c3));
192         vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1c3333, vb4567c3));
193         vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2c3333, vb4567c3));
194         vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3c3333, vb4567c3));
195         vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4c3333, vb4567c3));
196 
197         w += 32;
198         k -= 4 * sizeof(float);
199       }
200       if XNN_UNLIKELY(k != 0) {
201         do {
202           const __m128 vb0123 = _mm_load_ps(w);
203           const __m128 vb4567 = _mm_load_ps(w + 4);
204           w += 8;
205 
206           const __m128 va0 = _mm_load1_ps(a0);
207           a0 += 1;
208           const __m128 va1 = _mm_load1_ps(a1);
209           a1 += 1;
210           const __m128 va2 = _mm_load1_ps(a2);
211           a2 += 1;
212           const __m128 va3 = _mm_load1_ps(a3);
213           a3 += 1;
214           const __m128 va4 = _mm_load1_ps(a4);
215           a4 += 1;
216 
217           vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
218           vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
219           vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
220           vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
221           vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
222           vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
223           vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123));
224           vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567));
225           vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123));
226           vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567));
227           k -= sizeof(float);
228         } while (k != 0);
229       }
230       p -= 5 * sizeof(void*);
231     } while (p != 0);
232 
233     const __m128 vmax = _mm_load_ps(params->sse.max);
234     vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
235     vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
236     vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
237     vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
238     vacc4x0123 = _mm_min_ps(vacc4x0123, vmax);
239     vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
240     vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
241     vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
242     vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
243     vacc4x4567 = _mm_min_ps(vacc4x4567, vmax);
244 
245     const __m128 vmin = _mm_load_ps(params->sse.min);
246     vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
247     vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
248     vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
249     vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
250     vacc4x0123 = _mm_max_ps(vacc4x0123, vmin);
251     vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
252     vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
253     vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
254     vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
255     vacc4x4567 = _mm_max_ps(vacc4x4567, vmin);
256 
257     if XNN_LIKELY(nc >= 8) {
258       _mm_storeu_ps(c4, vacc4x0123);
259       _mm_storeu_ps(c4 + 4, vacc4x4567);
260       c4 = (float*) ((uintptr_t) c4 + cn_stride);
261       _mm_storeu_ps(c3, vacc3x0123);
262       _mm_storeu_ps(c3 + 4, vacc3x4567);
263       c3 = (float*) ((uintptr_t) c3 + cn_stride);
264       _mm_storeu_ps(c2, vacc2x0123);
265       _mm_storeu_ps(c2 + 4, vacc2x4567);
266       c2 = (float*) ((uintptr_t) c2 + cn_stride);
267       _mm_storeu_ps(c1, vacc1x0123);
268       _mm_storeu_ps(c1 + 4, vacc1x4567);
269       c1 = (float*) ((uintptr_t) c1 + cn_stride);
270       _mm_storeu_ps(c0, vacc0x0123);
271       _mm_storeu_ps(c0 + 4, vacc0x4567);
272       c0 = (float*) ((uintptr_t) c0 + cn_stride);
273 
274       a = (const float**restrict) ((uintptr_t) a - ks);
275       nc -= 8;
276     } else {
277       if (nc & 4) {
278         _mm_storeu_ps(c4, vacc4x0123);
279         _mm_storeu_ps(c3, vacc3x0123);
280         _mm_storeu_ps(c2, vacc2x0123);
281         _mm_storeu_ps(c1, vacc1x0123);
282         _mm_storeu_ps(c0, vacc0x0123);
283 
284         vacc4x0123 = vacc4x4567;
285         vacc3x0123 = vacc3x4567;
286         vacc2x0123 = vacc2x4567;
287         vacc1x0123 = vacc1x4567;
288         vacc0x0123 = vacc0x4567;
289 
290         c4 += 4;
291         c3 += 4;
292         c2 += 4;
293         c1 += 4;
294         c0 += 4;
295       }
296       if (nc & 2) {
297         _mm_storel_pi((__m64*) c4, vacc4x0123);
298         _mm_storel_pi((__m64*) c3, vacc3x0123);
299         _mm_storel_pi((__m64*) c2, vacc2x0123);
300         _mm_storel_pi((__m64*) c1, vacc1x0123);
301         _mm_storel_pi((__m64*) c0, vacc0x0123);
302 
303         vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
304         vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
305         vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
306         vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
307         vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
308 
309         c4 += 2;
310         c3 += 2;
311         c2 += 2;
312         c1 += 2;
313         c0 += 2;
314       }
315       if (nc & 1) {
316         _mm_store_ss(c4, vacc4x0123);
317         _mm_store_ss(c3, vacc3x0123);
318         _mm_store_ss(c2, vacc2x0123);
319         _mm_store_ss(c1, vacc1x0123);
320         _mm_store_ss(c0, vacc0x0123);
321       }
322 
323       nc = 0;
324     }
325   } while (nc != 0);
326 }
327