• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-gemm/avx-broadcast.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/gemm.h>
15 
16 
xnn_f32_gemminc_ukernel_5x16__avx_broadcast(size_t mr,size_t nc,size_t kc,const float * restrict a,size_t a_stride,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,const float * restrict acc,const union xnn_f32_output_params params[restrict static1])17 void xnn_f32_gemminc_ukernel_5x16__avx_broadcast(
18     size_t mr,
19     size_t nc,
20     size_t kc,
21     const float*restrict a,
22     size_t a_stride,
23     const float*restrict w,
24     float*restrict c,
25     size_t cm_stride,
26     size_t cn_stride,
27     const float*restrict acc,
28     const union xnn_f32_output_params params[restrict static 1])
29 {
30   assert(mr != 0);
31   assert(mr <= 5);
32   assert(nc != 0);
33   assert(kc != 0);
34   assert(kc % sizeof(float) == 0);
35   assert(a != NULL);
36   assert(w != NULL);
37   assert(c != NULL);
38   assert(acc != NULL);
39 
40   const float* a0 = a;
41   float* c0 = c;
42   const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
43   float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
44   if XNN_UNPREDICTABLE(mr < 2) {
45     a1 = a0;
46     c1 = c0;
47   }
48   const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
49   float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
50   if XNN_UNPREDICTABLE(mr <= 2) {
51     a2 = a1;
52     c2 = c1;
53   }
54   const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
55   float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
56   if XNN_UNPREDICTABLE(mr < 4) {
57     a3 = a2;
58     c3 = c2;
59   }
60   const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
61   float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
62   if XNN_UNPREDICTABLE(mr <= 4) {
63     a4 = a3;
64     c4 = c3;
65   }
66 
67   do {
68     __m256 vacc0x01234567 = _mm256_load_ps(acc + 0);
69     __m256 vacc0x89ABCDEF = _mm256_load_ps(acc + 8);
70     __m256 vacc1x01234567 = _mm256_load_ps(acc + 16);
71     __m256 vacc1x89ABCDEF = _mm256_load_ps(acc + 24);
72     __m256 vacc2x01234567 = _mm256_load_ps(acc + 32);
73     __m256 vacc2x89ABCDEF = _mm256_load_ps(acc + 40);
74     __m256 vacc3x01234567 = _mm256_load_ps(acc + 48);
75     __m256 vacc3x89ABCDEF = _mm256_load_ps(acc + 56);
76     __m256 vacc4x01234567 = _mm256_load_ps(acc + 64);
77     __m256 vacc4x89ABCDEF = _mm256_load_ps(acc + 72);
78     acc += 80;
79 
80     size_t k = kc;
81     do {
82       const __m256 va0 = _mm256_broadcast_ss(a0);
83       a0 += 1;
84       const __m256 va1 = _mm256_broadcast_ss(a1);
85       a1 += 1;
86       const __m256 va2 = _mm256_broadcast_ss(a2);
87       a2 += 1;
88       const __m256 va3 = _mm256_broadcast_ss(a3);
89       a3 += 1;
90       const __m256 va4 = _mm256_broadcast_ss(a4);
91       a4 += 1;
92 
93       const __m256 vb01234567 = _mm256_load_ps(w);
94       const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
95       w += 16;
96 
97       vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
98       vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567));
99       vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567));
100       vacc3x01234567 = _mm256_add_ps(vacc3x01234567, _mm256_mul_ps(va3, vb01234567));
101       vacc4x01234567 = _mm256_add_ps(vacc4x01234567, _mm256_mul_ps(va4, vb01234567));
102       vacc0x89ABCDEF = _mm256_add_ps(vacc0x89ABCDEF, _mm256_mul_ps(va0, vb89ABCDEF));
103       vacc1x89ABCDEF = _mm256_add_ps(vacc1x89ABCDEF, _mm256_mul_ps(va1, vb89ABCDEF));
104       vacc2x89ABCDEF = _mm256_add_ps(vacc2x89ABCDEF, _mm256_mul_ps(va2, vb89ABCDEF));
105       vacc3x89ABCDEF = _mm256_add_ps(vacc3x89ABCDEF, _mm256_mul_ps(va3, vb89ABCDEF));
106       vacc4x89ABCDEF = _mm256_add_ps(vacc4x89ABCDEF, _mm256_mul_ps(va4, vb89ABCDEF));
107 
108       k -= sizeof(float);
109     } while (k != 0);
110 
111     const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
112     vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
113     vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
114     vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
115     vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
116     vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
117     vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
118     vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
119     vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
120     vacc3x89ABCDEF = _mm256_min_ps(vacc3x89ABCDEF, vmax);
121     vacc4x89ABCDEF = _mm256_min_ps(vacc4x89ABCDEF, vmax);
122 
123     const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
124     vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
125     vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
126     vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
127     vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
128     vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
129     vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
130     vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
131     vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
132     vacc3x89ABCDEF = _mm256_max_ps(vacc3x89ABCDEF, vmin);
133     vacc4x89ABCDEF = _mm256_max_ps(vacc4x89ABCDEF, vmin);
134 
135     if XNN_LIKELY(nc >= 16) {
136       _mm256_storeu_ps(c4, vacc4x01234567);
137       _mm256_storeu_ps(c4 + 8, vacc4x89ABCDEF);
138       c4 = (float*) ((uintptr_t) c4 + cn_stride);
139       _mm256_storeu_ps(c3, vacc3x01234567);
140       _mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF);
141       c3 = (float*) ((uintptr_t) c3 + cn_stride);
142       _mm256_storeu_ps(c2, vacc2x01234567);
143       _mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
144       c2 = (float*) ((uintptr_t) c2 + cn_stride);
145       _mm256_storeu_ps(c1, vacc1x01234567);
146       _mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
147       c1 = (float*) ((uintptr_t) c1 + cn_stride);
148       _mm256_storeu_ps(c0, vacc0x01234567);
149       _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
150       c0 = (float*) ((uintptr_t) c0 + cn_stride);
151 
152       a4 = (const float*) ((uintptr_t) a4 - kc);
153       a3 = (const float*) ((uintptr_t) a3 - kc);
154       a2 = (const float*) ((uintptr_t) a2 - kc);
155       a1 = (const float*) ((uintptr_t) a1 - kc);
156       a0 = (const float*) ((uintptr_t) a0 - kc);
157 
158       nc -= 16;
159     } else {
160       if (nc & 8) {
161         _mm256_storeu_ps(c4, vacc4x01234567);
162         _mm256_storeu_ps(c3, vacc3x01234567);
163         _mm256_storeu_ps(c2, vacc2x01234567);
164         _mm256_storeu_ps(c1, vacc1x01234567);
165         _mm256_storeu_ps(c0, vacc0x01234567);
166 
167         vacc4x01234567 = vacc4x89ABCDEF;
168         vacc3x01234567 = vacc3x89ABCDEF;
169         vacc2x01234567 = vacc2x89ABCDEF;
170         vacc1x01234567 = vacc1x89ABCDEF;
171         vacc0x01234567 = vacc0x89ABCDEF;
172 
173         c4 += 8;
174         c3 += 8;
175         c2 += 8;
176         c1 += 8;
177         c0 += 8;
178       }
179       __m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
180       __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
181       __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
182       __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
183       __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
184       if (nc & 4) {
185         _mm_storeu_ps(c4, vacc4x0123);
186         _mm_storeu_ps(c3, vacc3x0123);
187         _mm_storeu_ps(c2, vacc2x0123);
188         _mm_storeu_ps(c1, vacc1x0123);
189         _mm_storeu_ps(c0, vacc0x0123);
190 
191         vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
192         vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
193         vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
194         vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
195         vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
196 
197         c4 += 4;
198         c3 += 4;
199         c2 += 4;
200         c1 += 4;
201         c0 += 4;
202       }
203       if (nc & 2) {
204         _mm_storel_pi((__m64*) c4, vacc4x0123);
205         _mm_storel_pi((__m64*) c3, vacc3x0123);
206         _mm_storel_pi((__m64*) c2, vacc2x0123);
207         _mm_storel_pi((__m64*) c1, vacc1x0123);
208         _mm_storel_pi((__m64*) c0, vacc0x0123);
209 
210         vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
211         vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
212         vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
213         vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
214         vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
215 
216         c4 += 2;
217         c3 += 2;
218         c2 += 2;
219         c1 += 2;
220         c0 += 2;
221       }
222       if (nc & 1) {
223         _mm_store_ss(c4, vacc4x0123);
224         _mm_store_ss(c3, vacc3x0123);
225         _mm_store_ss(c2, vacc2x0123);
226         _mm_store_ss(c1, vacc1x0123);
227         _mm_store_ss(c0, vacc0x0123);
228       }
229 
230       nc = 0;
231     }
232   } while (nc != 0);
233 }
234