• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-igemm/avx-broadcast.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/igemm.h>
15 
16 
xnn_f32_igemm_minmax_ukernel_7x8__fma3_broadcast(size_t mr,size_t nc,size_t kc,size_t ks,const float ** restrict a,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const float * zero,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_igemm_minmax_ukernel_7x8__fma3_broadcast(
18     size_t mr,
19     size_t nc,
20     size_t kc,
21     size_t ks,
22     const float**restrict a,
23     const float*restrict w,
24     float*restrict c,
25     size_t cm_stride,
26     size_t cn_stride,
27     size_t a_offset,
28     const float* zero,
29     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
30 {
31   assert(mr != 0);
32   assert(mr <= 7);
33   assert(nc != 0);
34   assert(kc != 0);
35   assert(kc % sizeof(float) == 0);
36   assert(ks != 0);
37   assert(ks % (7 * sizeof(void*)) == 0);
38   assert(a_offset % sizeof(float) == 0);
39   assert(a != NULL);
40   assert(w != NULL);
41   assert(c != NULL);
42 
43   float* c0 = c;
44   float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
45   if XNN_UNPREDICTABLE(mr < 2) {
46     c1 = c0;
47   }
48   float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
49   if XNN_UNPREDICTABLE(mr <= 2) {
50     c2 = c1;
51   }
52   float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
53   if XNN_UNPREDICTABLE(mr < 4) {
54     c3 = c2;
55   }
56   float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
57   if XNN_UNPREDICTABLE(mr <= 4) {
58     c4 = c3;
59   }
60   float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
61   if XNN_UNPREDICTABLE(mr < 6) {
62     c5 = c4;
63   }
64   float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
65   if XNN_UNPREDICTABLE(mr <= 6) {
66     c6 = c5;
67   }
68 
69   do {
70     __m256 vacc0x01234567 = _mm256_load_ps(w);
71     __m256 vacc1x01234567 = vacc0x01234567;
72     __m256 vacc2x01234567 = vacc0x01234567;
73     __m256 vacc3x01234567 = vacc0x01234567;
74     __m256 vacc4x01234567 = vacc0x01234567;
75     __m256 vacc5x01234567 = vacc0x01234567;
76     __m256 vacc6x01234567 = vacc0x01234567;
77     w += 8;
78 
79     size_t p = ks;
80     do {
81       const float* restrict a0 = a[0];
82       assert(a0 != NULL);
83       if XNN_UNPREDICTABLE(a0 != zero) {
84         a0 = (const float*) ((uintptr_t) a0 + a_offset);
85       }
86       const float* restrict a1 = a[1];
87       assert(a1 != NULL);
88       if XNN_UNPREDICTABLE(a1 != zero) {
89         a1 = (const float*) ((uintptr_t) a1 + a_offset);
90       }
91       const float* restrict a2 = a[2];
92       assert(a2 != NULL);
93       if XNN_UNPREDICTABLE(a2 != zero) {
94         a2 = (const float*) ((uintptr_t) a2 + a_offset);
95       }
96       const float* restrict a3 = a[3];
97       assert(a3 != NULL);
98       if XNN_UNPREDICTABLE(a3 != zero) {
99         a3 = (const float*) ((uintptr_t) a3 + a_offset);
100       }
101       const float* restrict a4 = a[4];
102       assert(a4 != NULL);
103       if XNN_UNPREDICTABLE(a4 != zero) {
104         a4 = (const float*) ((uintptr_t) a4 + a_offset);
105       }
106       const float* restrict a5 = a[5];
107       assert(a5 != NULL);
108       if XNN_UNPREDICTABLE(a5 != zero) {
109         a5 = (const float*) ((uintptr_t) a5 + a_offset);
110       }
111       const float* restrict a6 = a[6];
112       assert(a6 != NULL);
113       if XNN_UNPREDICTABLE(a6 != zero) {
114         a6 = (const float*) ((uintptr_t) a6 + a_offset);
115       }
116       a += 7;
117 
118       size_t k = kc;
119       do {
120         const __m256 vb01234567 = _mm256_load_ps(w);
121         w += 8;
122 
123         const __m256 va0 = _mm256_broadcast_ss(a0);
124         a0 += 1;
125         const __m256 va1 = _mm256_broadcast_ss(a1);
126         a1 += 1;
127         const __m256 va2 = _mm256_broadcast_ss(a2);
128         a2 += 1;
129         const __m256 va3 = _mm256_broadcast_ss(a3);
130         a3 += 1;
131         const __m256 va4 = _mm256_broadcast_ss(a4);
132         a4 += 1;
133         const __m256 va5 = _mm256_broadcast_ss(a5);
134         a5 += 1;
135         const __m256 va6 = _mm256_broadcast_ss(a6);
136         a6 += 1;
137 
138         vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
139         vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
140         vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
141         vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
142         vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
143         vacc5x01234567 = _mm256_fmadd_ps(va5, vb01234567, vacc5x01234567);
144         vacc6x01234567 = _mm256_fmadd_ps(va6, vb01234567, vacc6x01234567);
145         k -= sizeof(float);
146       } while (k != 0);
147       p -= 7 * sizeof(void*);
148     } while (p != 0);
149 
150     const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
151     vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
152     vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
153     vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
154     vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
155     vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
156     vacc5x01234567 = _mm256_min_ps(vacc5x01234567, vmax);
157     vacc6x01234567 = _mm256_min_ps(vacc6x01234567, vmax);
158 
159     const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
160     vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
161     vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
162     vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
163     vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
164     vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
165     vacc5x01234567 = _mm256_max_ps(vacc5x01234567, vmin);
166     vacc6x01234567 = _mm256_max_ps(vacc6x01234567, vmin);
167 
168     if XNN_LIKELY(nc >= 8) {
169       _mm256_storeu_ps(c6, vacc6x01234567);
170       c6 = (float*) ((uintptr_t) c6 + cn_stride);
171       _mm256_storeu_ps(c5, vacc5x01234567);
172       c5 = (float*) ((uintptr_t) c5 + cn_stride);
173       _mm256_storeu_ps(c4, vacc4x01234567);
174       c4 = (float*) ((uintptr_t) c4 + cn_stride);
175       _mm256_storeu_ps(c3, vacc3x01234567);
176       c3 = (float*) ((uintptr_t) c3 + cn_stride);
177       _mm256_storeu_ps(c2, vacc2x01234567);
178       c2 = (float*) ((uintptr_t) c2 + cn_stride);
179       _mm256_storeu_ps(c1, vacc1x01234567);
180       c1 = (float*) ((uintptr_t) c1 + cn_stride);
181       _mm256_storeu_ps(c0, vacc0x01234567);
182       c0 = (float*) ((uintptr_t) c0 + cn_stride);
183 
184       a = (const float**restrict) ((uintptr_t) a - ks);
185       nc -= 8;
186     } else {
187       __m128 vacc6x0123 = _mm256_castps256_ps128(vacc6x01234567);
188       __m128 vacc5x0123 = _mm256_castps256_ps128(vacc5x01234567);
189       __m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
190       __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
191       __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
192       __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
193       __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
194       if (nc & 4) {
195         _mm_storeu_ps(c6, vacc6x0123);
196         _mm_storeu_ps(c5, vacc5x0123);
197         _mm_storeu_ps(c4, vacc4x0123);
198         _mm_storeu_ps(c3, vacc3x0123);
199         _mm_storeu_ps(c2, vacc2x0123);
200         _mm_storeu_ps(c1, vacc1x0123);
201         _mm_storeu_ps(c0, vacc0x0123);
202 
203         vacc6x0123 = _mm256_extractf128_ps(vacc6x01234567, 1);
204         vacc5x0123 = _mm256_extractf128_ps(vacc5x01234567, 1);
205         vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
206         vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
207         vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
208         vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
209         vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
210 
211         c6 += 4;
212         c5 += 4;
213         c4 += 4;
214         c3 += 4;
215         c2 += 4;
216         c1 += 4;
217         c0 += 4;
218       }
219       if (nc & 2) {
220         _mm_storel_pi((__m64*) c6, vacc6x0123);
221         _mm_storel_pi((__m64*) c5, vacc5x0123);
222         _mm_storel_pi((__m64*) c4, vacc4x0123);
223         _mm_storel_pi((__m64*) c3, vacc3x0123);
224         _mm_storel_pi((__m64*) c2, vacc2x0123);
225         _mm_storel_pi((__m64*) c1, vacc1x0123);
226         _mm_storel_pi((__m64*) c0, vacc0x0123);
227 
228         vacc6x0123 = _mm_movehl_ps(vacc6x0123, vacc6x0123);
229         vacc5x0123 = _mm_movehl_ps(vacc5x0123, vacc5x0123);
230         vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
231         vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
232         vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
233         vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
234         vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
235 
236         c6 += 2;
237         c5 += 2;
238         c4 += 2;
239         c3 += 2;
240         c2 += 2;
241         c1 += 2;
242         c0 += 2;
243       }
244       if (nc & 1) {
245         _mm_store_ss(c6, vacc6x0123);
246         _mm_store_ss(c5, vacc5x0123);
247         _mm_store_ss(c4, vacc4x0123);
248         _mm_store_ss(c3, vacc3x0123);
249         _mm_store_ss(c2, vacc2x0123);
250         _mm_store_ss(c1, vacc1x0123);
251         _mm_store_ss(c0, vacc0x0123);
252       }
253 
254       nc = 0;
255     }
256   } while (nc != 0);
257 }
258