• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-gemm/avx-broadcast.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/gemm.h>
15 
16 
xnn_f32_gemm_minmax_ukernel_5x16__fma3_broadcast(size_t mr,size_t nc,size_t kc,const float * restrict a,size_t a_stride,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_gemm_minmax_ukernel_5x16__fma3_broadcast(
18     size_t mr,
19     size_t nc,
20     size_t kc,
21     const float*restrict a,
22     size_t a_stride,
23     const float*restrict w,
24     float*restrict c,
25     size_t cm_stride,
26     size_t cn_stride,
27     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
28 {
29   assert(mr != 0);
30   assert(mr <= 5);
31   assert(nc != 0);
32   assert(kc != 0);
33   assert(kc % sizeof(float) == 0);
34   assert(a != NULL);
35   assert(w != NULL);
36   assert(c != NULL);
37 
38   const float* a0 = a;
39   float* c0 = c;
40   const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
41   float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
42   if XNN_UNPREDICTABLE(mr < 2) {
43     a1 = a0;
44     c1 = c0;
45   }
46   const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
47   float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
48   if XNN_UNPREDICTABLE(mr <= 2) {
49     a2 = a1;
50     c2 = c1;
51   }
52   const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
53   float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
54   if XNN_UNPREDICTABLE(mr < 4) {
55     a3 = a2;
56     c3 = c2;
57   }
58   const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
59   float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
60   if XNN_UNPREDICTABLE(mr <= 4) {
61     a4 = a3;
62     c4 = c3;
63   }
64 
65   do {
66     __m256 vacc0x01234567 = _mm256_load_ps(w + 0);
67     __m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8);
68     __m256 vacc1x01234567 = vacc0x01234567;
69     __m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
70     __m256 vacc2x01234567 = vacc0x01234567;
71     __m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
72     __m256 vacc3x01234567 = vacc0x01234567;
73     __m256 vacc3x89ABCDEF = vacc0x89ABCDEF;
74     __m256 vacc4x01234567 = vacc0x01234567;
75     __m256 vacc4x89ABCDEF = vacc0x89ABCDEF;
76     w += 16;
77 
78     size_t k = kc;
79     do {
80       const __m256 va0 = _mm256_broadcast_ss(a0);
81       a0 += 1;
82       const __m256 va1 = _mm256_broadcast_ss(a1);
83       a1 += 1;
84       const __m256 va2 = _mm256_broadcast_ss(a2);
85       a2 += 1;
86       const __m256 va3 = _mm256_broadcast_ss(a3);
87       a3 += 1;
88       const __m256 va4 = _mm256_broadcast_ss(a4);
89       a4 += 1;
90 
91       const __m256 vb01234567 = _mm256_load_ps(w);
92       const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
93       w += 16;
94 
95       vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
96       vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
97       vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
98       vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
99       vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
100       vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF);
101       vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEF, vacc1x89ABCDEF);
102       vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEF, vacc2x89ABCDEF);
103       vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEF, vacc3x89ABCDEF);
104       vacc4x89ABCDEF = _mm256_fmadd_ps(va4, vb89ABCDEF, vacc4x89ABCDEF);
105 
106       k -= sizeof(float);
107     } while (k != 0);
108 
109     const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
110     vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
111     vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
112     vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
113     vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
114     vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
115     vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
116     vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
117     vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
118     vacc3x89ABCDEF = _mm256_min_ps(vacc3x89ABCDEF, vmax);
119     vacc4x89ABCDEF = _mm256_min_ps(vacc4x89ABCDEF, vmax);
120 
121     const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
122     vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
123     vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
124     vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
125     vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
126     vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
127     vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
128     vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
129     vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
130     vacc3x89ABCDEF = _mm256_max_ps(vacc3x89ABCDEF, vmin);
131     vacc4x89ABCDEF = _mm256_max_ps(vacc4x89ABCDEF, vmin);
132 
133     if XNN_LIKELY(nc >= 16) {
134       _mm256_storeu_ps(c4, vacc4x01234567);
135       _mm256_storeu_ps(c4 + 8, vacc4x89ABCDEF);
136       c4 = (float*) ((uintptr_t) c4 + cn_stride);
137       _mm256_storeu_ps(c3, vacc3x01234567);
138       _mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF);
139       c3 = (float*) ((uintptr_t) c3 + cn_stride);
140       _mm256_storeu_ps(c2, vacc2x01234567);
141       _mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
142       c2 = (float*) ((uintptr_t) c2 + cn_stride);
143       _mm256_storeu_ps(c1, vacc1x01234567);
144       _mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
145       c1 = (float*) ((uintptr_t) c1 + cn_stride);
146       _mm256_storeu_ps(c0, vacc0x01234567);
147       _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
148       c0 = (float*) ((uintptr_t) c0 + cn_stride);
149 
150       a4 = (const float*) ((uintptr_t) a4 - kc);
151       a3 = (const float*) ((uintptr_t) a3 - kc);
152       a2 = (const float*) ((uintptr_t) a2 - kc);
153       a1 = (const float*) ((uintptr_t) a1 - kc);
154       a0 = (const float*) ((uintptr_t) a0 - kc);
155 
156       nc -= 16;
157     } else {
158       if (nc & 8) {
159         _mm256_storeu_ps(c4, vacc4x01234567);
160         _mm256_storeu_ps(c3, vacc3x01234567);
161         _mm256_storeu_ps(c2, vacc2x01234567);
162         _mm256_storeu_ps(c1, vacc1x01234567);
163         _mm256_storeu_ps(c0, vacc0x01234567);
164 
165         vacc4x01234567 = vacc4x89ABCDEF;
166         vacc3x01234567 = vacc3x89ABCDEF;
167         vacc2x01234567 = vacc2x89ABCDEF;
168         vacc1x01234567 = vacc1x89ABCDEF;
169         vacc0x01234567 = vacc0x89ABCDEF;
170 
171         c4 += 8;
172         c3 += 8;
173         c2 += 8;
174         c1 += 8;
175         c0 += 8;
176       }
177       __m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
178       __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
179       __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
180       __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
181       __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
182       if (nc & 4) {
183         _mm_storeu_ps(c4, vacc4x0123);
184         _mm_storeu_ps(c3, vacc3x0123);
185         _mm_storeu_ps(c2, vacc2x0123);
186         _mm_storeu_ps(c1, vacc1x0123);
187         _mm_storeu_ps(c0, vacc0x0123);
188 
189         vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
190         vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
191         vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
192         vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
193         vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
194 
195         c4 += 4;
196         c3 += 4;
197         c2 += 4;
198         c1 += 4;
199         c0 += 4;
200       }
201       if (nc & 2) {
202         _mm_storel_pi((__m64*) c4, vacc4x0123);
203         _mm_storel_pi((__m64*) c3, vacc3x0123);
204         _mm_storel_pi((__m64*) c2, vacc2x0123);
205         _mm_storel_pi((__m64*) c1, vacc1x0123);
206         _mm_storel_pi((__m64*) c0, vacc0x0123);
207 
208         vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
209         vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
210         vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
211         vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
212         vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
213 
214         c4 += 2;
215         c3 += 2;
216         c2 += 2;
217         c1 += 2;
218         c0 += 2;
219       }
220       if (nc & 1) {
221         _mm_store_ss(c4, vacc4x0123);
222         _mm_store_ss(c3, vacc3x0123);
223         _mm_store_ss(c2, vacc2x0123);
224         _mm_store_ss(c1, vacc1x0123);
225         _mm_store_ss(c0, vacc0x0123);
226       }
227 
228       nc = 0;
229     }
230   } while (nc != 0);
231 }
232