• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-gemm/sse-shuffle.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <xmmintrin.h>
13 
14 #include <xnnpack/gemm.h>
15 
16 
xnn_f32_gemminc_minmax_ukernel_3x8s4__sse(size_t mr,size_t nc,size_t kc,const float * restrict a,size_t a_stride,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,const float * restrict acc,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_gemminc_minmax_ukernel_3x8s4__sse(
18     size_t mr,
19     size_t nc,
20     size_t kc,
21     const float*restrict a,
22     size_t a_stride,
23     const float*restrict w,
24     float*restrict c,
25     size_t cm_stride,
26     size_t cn_stride,
27     const float*restrict acc,
28     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
29 {
30   assert(mr != 0);
31   assert(mr <= 3);
32   assert(nc != 0);
33   assert(kc != 0);
34   assert(kc % sizeof(float) == 0);
35   assert(a != NULL);
36   assert(w != NULL);
37   assert(c != NULL);
38   assert(acc != NULL);
39 
40   const float* a0 = a;
41   float* c0 = c;
42   const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
43   float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
44   if XNN_UNPREDICTABLE(mr < 2) {
45     a1 = a0;
46     c1 = c0;
47   }
48   const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
49   float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
50   if XNN_UNPREDICTABLE(mr <= 2) {
51     a2 = a1;
52     c2 = c1;
53   }
54 
55   do {
56     __m128 vacc0x0123 = _mm_load_ps(acc + 0);
57     __m128 vacc0x4567 = _mm_load_ps(acc + 4);
58     __m128 vacc1x0123 = _mm_load_ps(acc + 8);
59     __m128 vacc1x4567 = _mm_load_ps(acc + 12);
60     __m128 vacc2x0123 = _mm_load_ps(acc + 16);
61     __m128 vacc2x4567 = _mm_load_ps(acc + 20);
62     acc += 24;
63 
64     size_t k = kc;
65     while (k >= 4 * sizeof(float)) {
66       __m128 va0 = _mm_loadu_ps(a0);
67       a0 += 4;
68       __m128 va1 = _mm_loadu_ps(a1);
69       a1 += 4;
70       __m128 va2 = _mm_loadu_ps(a2);
71       a2 += 4;
72 
73 
74       const __m128 vb0123c0 = _mm_load_ps(w + 0);
75       const __m128 vb4567c0 = _mm_load_ps(w + 4);
76 
77       vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c0));
78       vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c0));
79       vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c0));
80       vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c0));
81       vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c0));
82       vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c0));
83 
84       va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
85       va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
86       va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
87 
88       const __m128 vb0123c1 = _mm_load_ps(w + 8);
89       const __m128 vb4567c1 = _mm_load_ps(w + 12);
90 
91       vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c1));
92       vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c1));
93       vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c1));
94       vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c1));
95       vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c1));
96       vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c1));
97 
98       va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
99       va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
100       va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
101 
102       const __m128 vb0123c2 = _mm_load_ps(w + 16);
103       const __m128 vb4567c2 = _mm_load_ps(w + 20);
104 
105       vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c2));
106       vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c2));
107       vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c2));
108       vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c2));
109       vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c2));
110       vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c2));
111 
112       va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
113       va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
114       va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
115 
116       const __m128 vb0123c3 = _mm_load_ps(w + 24);
117       const __m128 vb4567c3 = _mm_load_ps(w + 28);
118 
119       vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c3));
120       vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c3));
121       vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c3));
122       vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c3));
123       vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c3));
124       vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c3));
125 
126 
127       w += 32;
128       k -= 4 * sizeof(float);
129     }
130     if XNN_UNLIKELY(k != 0) {
131       do {
132         const __m128 va0 = _mm_load1_ps(a0);
133         a0 += 1;
134         const __m128 va1 = _mm_load1_ps(a1);
135         a1 += 1;
136         const __m128 va2 = _mm_load1_ps(a2);
137         a2 += 1;
138 
139         const __m128 vb0123 = _mm_load_ps(w);
140         const __m128 vb4567 = _mm_load_ps(w + 4);
141         w += 8;
142 
143         vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123));
144         vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123));
145         vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123));
146         vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567));
147         vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567));
148         vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567));
149 
150         k -= sizeof(float);
151       } while (k != 0);
152     }
153 
154     const __m128 vmax = _mm_load_ps(params->sse.max);
155     vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
156     vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
157     vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
158     vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
159     vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
160     vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
161 
162     const __m128 vmin = _mm_load_ps(params->sse.min);
163     vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
164     vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
165     vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
166     vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
167     vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
168     vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
169 
170     if XNN_LIKELY(nc >= 8) {
171       _mm_storeu_ps(c2, vacc2x0123);
172       _mm_storeu_ps(c2 + 4, vacc2x4567);
173       c2 = (float*) ((uintptr_t) c2 + cn_stride);
174       _mm_storeu_ps(c1, vacc1x0123);
175       _mm_storeu_ps(c1 + 4, vacc1x4567);
176       c1 = (float*) ((uintptr_t) c1 + cn_stride);
177       _mm_storeu_ps(c0, vacc0x0123);
178       _mm_storeu_ps(c0 + 4, vacc0x4567);
179       c0 = (float*) ((uintptr_t) c0 + cn_stride);
180 
181       a2 = (const float*) ((uintptr_t) a2 - kc);
182       a1 = (const float*) ((uintptr_t) a1 - kc);
183       a0 = (const float*) ((uintptr_t) a0 - kc);
184 
185       nc -= 8;
186     } else {
187       if (nc & 4) {
188         _mm_storeu_ps(c2, vacc2x0123);
189         _mm_storeu_ps(c1, vacc1x0123);
190         _mm_storeu_ps(c0, vacc0x0123);
191 
192         vacc2x0123 = vacc2x4567;
193         vacc1x0123 = vacc1x4567;
194         vacc0x0123 = vacc0x4567;
195 
196         c2 += 4;
197         c1 += 4;
198         c0 += 4;
199       }
200       if (nc & 2) {
201         _mm_storel_pi((__m64*) c2, vacc2x0123);
202         _mm_storel_pi((__m64*) c1, vacc1x0123);
203         _mm_storel_pi((__m64*) c0, vacc0x0123);
204 
205         vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
206         vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
207         vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
208 
209         c2 += 2;
210         c1 += 2;
211         c0 += 2;
212       }
213       if (nc & 1) {
214         _mm_store_ss(c2, vacc2x0123);
215         _mm_store_ss(c1, vacc1x0123);
216         _mm_store_ss(c0, vacc0x0123);
217       }
218 
219       nc = 0;
220     }
221   } while (nc != 0);
222 }
223