• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-gemm/avx-broadcast.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/gemm.h>
15 
16 
xnn_f32_gemm_minmax_ukernel_8x8__fma3_broadcast(size_t mr,size_t nc,size_t kc,const float * restrict a,size_t a_stride,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_gemm_minmax_ukernel_8x8__fma3_broadcast(
18     size_t mr,
19     size_t nc,
20     size_t kc,
21     const float*restrict a,
22     size_t a_stride,
23     const float*restrict w,
24     float*restrict c,
25     size_t cm_stride,
26     size_t cn_stride,
27     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
28 {
29   assert(mr != 0);
30   assert(mr <= 8);
31   assert(nc != 0);
32   assert(kc != 0);
33   assert(kc % sizeof(float) == 0);
34   assert(a != NULL);
35   assert(w != NULL);
36   assert(c != NULL);
37 
38   const float* a0 = a;
39   float* c0 = c;
40   const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
41   float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
42   if XNN_UNPREDICTABLE(mr < 2) {
43     a1 = a0;
44     c1 = c0;
45   }
46   const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
47   float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
48   if XNN_UNPREDICTABLE(mr <= 2) {
49     a2 = a1;
50     c2 = c1;
51   }
52   const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
53   float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
54   if XNN_UNPREDICTABLE(mr < 4) {
55     a3 = a2;
56     c3 = c2;
57   }
58   const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
59   float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
60   if XNN_UNPREDICTABLE(mr <= 4) {
61     a4 = a3;
62     c4 = c3;
63   }
64   const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
65   float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
66   if XNN_UNPREDICTABLE(mr < 6) {
67     a5 = a4;
68     c5 = c4;
69   }
70   const float* a6 = (const float*) ((uintptr_t) a5 + a_stride);
71   float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
72   if XNN_UNPREDICTABLE(mr <= 6) {
73     a6 = a5;
74     c6 = c5;
75   }
76   const float* a7 = (const float*) ((uintptr_t) a6 + a_stride);
77   float* c7 = (float*) ((uintptr_t) c6 + cm_stride);
78   if XNN_UNPREDICTABLE(mr != 8) {
79     a7 = a6;
80     c7 = c6;
81   }
82 
83   do {
84     __m256 vacc0x01234567 = _mm256_load_ps(w + 0);
85     __m256 vacc1x01234567 = vacc0x01234567;
86     __m256 vacc2x01234567 = vacc0x01234567;
87     __m256 vacc3x01234567 = vacc0x01234567;
88     __m256 vacc4x01234567 = vacc0x01234567;
89     __m256 vacc5x01234567 = vacc0x01234567;
90     __m256 vacc6x01234567 = vacc0x01234567;
91     __m256 vacc7x01234567 = vacc0x01234567;
92     w += 8;
93 
94     size_t k = kc;
95     do {
96       const __m256 va0 = _mm256_broadcast_ss(a0);
97       a0 += 1;
98       const __m256 va1 = _mm256_broadcast_ss(a1);
99       a1 += 1;
100       const __m256 va2 = _mm256_broadcast_ss(a2);
101       a2 += 1;
102       const __m256 va3 = _mm256_broadcast_ss(a3);
103       a3 += 1;
104       const __m256 va4 = _mm256_broadcast_ss(a4);
105       a4 += 1;
106       const __m256 va5 = _mm256_broadcast_ss(a5);
107       a5 += 1;
108       const __m256 va6 = _mm256_broadcast_ss(a6);
109       a6 += 1;
110       const __m256 va7 = _mm256_broadcast_ss(a7);
111       a7 += 1;
112 
113       const __m256 vb01234567 = _mm256_load_ps(w);
114       w += 8;
115 
116       vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
117       vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
118       vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
119       vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
120       vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
121       vacc5x01234567 = _mm256_fmadd_ps(va5, vb01234567, vacc5x01234567);
122       vacc6x01234567 = _mm256_fmadd_ps(va6, vb01234567, vacc6x01234567);
123       vacc7x01234567 = _mm256_fmadd_ps(va7, vb01234567, vacc7x01234567);
124 
125       k -= sizeof(float);
126     } while (k != 0);
127 
128     const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
129     vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
130     vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
131     vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
132     vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
133     vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
134     vacc5x01234567 = _mm256_min_ps(vacc5x01234567, vmax);
135     vacc6x01234567 = _mm256_min_ps(vacc6x01234567, vmax);
136     vacc7x01234567 = _mm256_min_ps(vacc7x01234567, vmax);
137 
138     const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
139     vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
140     vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
141     vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
142     vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
143     vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
144     vacc5x01234567 = _mm256_max_ps(vacc5x01234567, vmin);
145     vacc6x01234567 = _mm256_max_ps(vacc6x01234567, vmin);
146     vacc7x01234567 = _mm256_max_ps(vacc7x01234567, vmin);
147 
148     if XNN_LIKELY(nc >= 8) {
149       _mm256_storeu_ps(c7, vacc7x01234567);
150       c7 = (float*) ((uintptr_t) c7 + cn_stride);
151       _mm256_storeu_ps(c6, vacc6x01234567);
152       c6 = (float*) ((uintptr_t) c6 + cn_stride);
153       _mm256_storeu_ps(c5, vacc5x01234567);
154       c5 = (float*) ((uintptr_t) c5 + cn_stride);
155       _mm256_storeu_ps(c4, vacc4x01234567);
156       c4 = (float*) ((uintptr_t) c4 + cn_stride);
157       _mm256_storeu_ps(c3, vacc3x01234567);
158       c3 = (float*) ((uintptr_t) c3 + cn_stride);
159       _mm256_storeu_ps(c2, vacc2x01234567);
160       c2 = (float*) ((uintptr_t) c2 + cn_stride);
161       _mm256_storeu_ps(c1, vacc1x01234567);
162       c1 = (float*) ((uintptr_t) c1 + cn_stride);
163       _mm256_storeu_ps(c0, vacc0x01234567);
164       c0 = (float*) ((uintptr_t) c0 + cn_stride);
165 
166       a7 = (const float*) ((uintptr_t) a7 - kc);
167       a6 = (const float*) ((uintptr_t) a6 - kc);
168       a5 = (const float*) ((uintptr_t) a5 - kc);
169       a4 = (const float*) ((uintptr_t) a4 - kc);
170       a3 = (const float*) ((uintptr_t) a3 - kc);
171       a2 = (const float*) ((uintptr_t) a2 - kc);
172       a1 = (const float*) ((uintptr_t) a1 - kc);
173       a0 = (const float*) ((uintptr_t) a0 - kc);
174 
175       nc -= 8;
176     } else {
177       __m128 vacc7x0123 = _mm256_castps256_ps128(vacc7x01234567);
178       __m128 vacc6x0123 = _mm256_castps256_ps128(vacc6x01234567);
179       __m128 vacc5x0123 = _mm256_castps256_ps128(vacc5x01234567);
180       __m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
181       __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
182       __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
183       __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
184       __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
185       if (nc & 4) {
186         _mm_storeu_ps(c7, vacc7x0123);
187         _mm_storeu_ps(c6, vacc6x0123);
188         _mm_storeu_ps(c5, vacc5x0123);
189         _mm_storeu_ps(c4, vacc4x0123);
190         _mm_storeu_ps(c3, vacc3x0123);
191         _mm_storeu_ps(c2, vacc2x0123);
192         _mm_storeu_ps(c1, vacc1x0123);
193         _mm_storeu_ps(c0, vacc0x0123);
194 
195         vacc7x0123 = _mm256_extractf128_ps(vacc7x01234567, 1);
196         vacc6x0123 = _mm256_extractf128_ps(vacc6x01234567, 1);
197         vacc5x0123 = _mm256_extractf128_ps(vacc5x01234567, 1);
198         vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
199         vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
200         vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
201         vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
202         vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
203 
204         c7 += 4;
205         c6 += 4;
206         c5 += 4;
207         c4 += 4;
208         c3 += 4;
209         c2 += 4;
210         c1 += 4;
211         c0 += 4;
212       }
213       if (nc & 2) {
214         _mm_storel_pi((__m64*) c7, vacc7x0123);
215         _mm_storel_pi((__m64*) c6, vacc6x0123);
216         _mm_storel_pi((__m64*) c5, vacc5x0123);
217         _mm_storel_pi((__m64*) c4, vacc4x0123);
218         _mm_storel_pi((__m64*) c3, vacc3x0123);
219         _mm_storel_pi((__m64*) c2, vacc2x0123);
220         _mm_storel_pi((__m64*) c1, vacc1x0123);
221         _mm_storel_pi((__m64*) c0, vacc0x0123);
222 
223         vacc7x0123 = _mm_movehl_ps(vacc7x0123, vacc7x0123);
224         vacc6x0123 = _mm_movehl_ps(vacc6x0123, vacc6x0123);
225         vacc5x0123 = _mm_movehl_ps(vacc5x0123, vacc5x0123);
226         vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
227         vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
228         vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
229         vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
230         vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
231 
232         c7 += 2;
233         c6 += 2;
234         c5 += 2;
235         c4 += 2;
236         c3 += 2;
237         c2 += 2;
238         c1 += 2;
239         c0 += 2;
240       }
241       if (nc & 1) {
242         _mm_store_ss(c7, vacc7x0123);
243         _mm_store_ss(c6, vacc6x0123);
244         _mm_store_ss(c5, vacc5x0123);
245         _mm_store_ss(c4, vacc4x0123);
246         _mm_store_ss(c3, vacc3x0123);
247         _mm_store_ss(c2, vacc2x0123);
248         _mm_store_ss(c1, vacc1x0123);
249         _mm_store_ss(c0, vacc0x0123);
250       }
251 
252       nc = 0;
253     }
254   } while (nc != 0);
255 }
256