• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-gemm/avx-broadcast.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/gemm.h>
15 
16 
xnn_f32_gemm_ukernel_7x8__fma3_broadcast(size_t mr,size_t nc,size_t kc,const float * restrict a,size_t a_stride,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_f32_output_params params[restrict static1])17 void xnn_f32_gemm_ukernel_7x8__fma3_broadcast(
18     size_t mr,
19     size_t nc,
20     size_t kc,
21     const float*restrict a,
22     size_t a_stride,
23     const float*restrict w,
24     float*restrict c,
25     size_t cm_stride,
26     size_t cn_stride,
27     const union xnn_f32_output_params params[restrict static 1])
28 {
29   assert(mr != 0);
30   assert(mr <= 7);
31   assert(nc != 0);
32   assert(kc != 0);
33   assert(kc % sizeof(float) == 0);
34   assert(a != NULL);
35   assert(w != NULL);
36   assert(c != NULL);
37 
38   const float* a0 = a;
39   float* c0 = c;
40   const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
41   float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
42   if XNN_UNPREDICTABLE(mr < 2) {
43     a1 = a0;
44     c1 = c0;
45   }
46   const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
47   float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
48   if XNN_UNPREDICTABLE(mr <= 2) {
49     a2 = a1;
50     c2 = c1;
51   }
52   const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
53   float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
54   if XNN_UNPREDICTABLE(mr < 4) {
55     a3 = a2;
56     c3 = c2;
57   }
58   const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
59   float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
60   if XNN_UNPREDICTABLE(mr <= 4) {
61     a4 = a3;
62     c4 = c3;
63   }
64   const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
65   float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
66   if XNN_UNPREDICTABLE(mr < 6) {
67     a5 = a4;
68     c5 = c4;
69   }
70   const float* a6 = (const float*) ((uintptr_t) a5 + a_stride);
71   float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
72   if XNN_UNPREDICTABLE(mr <= 6) {
73     a6 = a5;
74     c6 = c5;
75   }
76 
77   do {
78     __m256 vacc0x01234567 = _mm256_load_ps(w + 0);
79     __m256 vacc1x01234567 = vacc0x01234567;
80     __m256 vacc2x01234567 = vacc0x01234567;
81     __m256 vacc3x01234567 = vacc0x01234567;
82     __m256 vacc4x01234567 = vacc0x01234567;
83     __m256 vacc5x01234567 = vacc0x01234567;
84     __m256 vacc6x01234567 = vacc0x01234567;
85     w += 8;
86 
87     size_t k = kc;
88     do {
89       const __m256 va0 = _mm256_broadcast_ss(a0);
90       a0 += 1;
91       const __m256 va1 = _mm256_broadcast_ss(a1);
92       a1 += 1;
93       const __m256 va2 = _mm256_broadcast_ss(a2);
94       a2 += 1;
95       const __m256 va3 = _mm256_broadcast_ss(a3);
96       a3 += 1;
97       const __m256 va4 = _mm256_broadcast_ss(a4);
98       a4 += 1;
99       const __m256 va5 = _mm256_broadcast_ss(a5);
100       a5 += 1;
101       const __m256 va6 = _mm256_broadcast_ss(a6);
102       a6 += 1;
103 
104       const __m256 vb01234567 = _mm256_load_ps(w);
105       w += 8;
106 
107       vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
108       vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
109       vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
110       vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
111       vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
112       vacc5x01234567 = _mm256_fmadd_ps(va5, vb01234567, vacc5x01234567);
113       vacc6x01234567 = _mm256_fmadd_ps(va6, vb01234567, vacc6x01234567);
114 
115       k -= sizeof(float);
116     } while (k != 0);
117 
118     const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
119     vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
120     vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
121     vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
122     vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
123     vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
124     vacc5x01234567 = _mm256_min_ps(vacc5x01234567, vmax);
125     vacc6x01234567 = _mm256_min_ps(vacc6x01234567, vmax);
126 
127     const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
128     vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
129     vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
130     vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
131     vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
132     vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
133     vacc5x01234567 = _mm256_max_ps(vacc5x01234567, vmin);
134     vacc6x01234567 = _mm256_max_ps(vacc6x01234567, vmin);
135 
136     if XNN_LIKELY(nc >= 8) {
137       _mm256_storeu_ps(c6, vacc6x01234567);
138       c6 = (float*) ((uintptr_t) c6 + cn_stride);
139       _mm256_storeu_ps(c5, vacc5x01234567);
140       c5 = (float*) ((uintptr_t) c5 + cn_stride);
141       _mm256_storeu_ps(c4, vacc4x01234567);
142       c4 = (float*) ((uintptr_t) c4 + cn_stride);
143       _mm256_storeu_ps(c3, vacc3x01234567);
144       c3 = (float*) ((uintptr_t) c3 + cn_stride);
145       _mm256_storeu_ps(c2, vacc2x01234567);
146       c2 = (float*) ((uintptr_t) c2 + cn_stride);
147       _mm256_storeu_ps(c1, vacc1x01234567);
148       c1 = (float*) ((uintptr_t) c1 + cn_stride);
149       _mm256_storeu_ps(c0, vacc0x01234567);
150       c0 = (float*) ((uintptr_t) c0 + cn_stride);
151 
152       a6 = (const float*) ((uintptr_t) a6 - kc);
153       a5 = (const float*) ((uintptr_t) a5 - kc);
154       a4 = (const float*) ((uintptr_t) a4 - kc);
155       a3 = (const float*) ((uintptr_t) a3 - kc);
156       a2 = (const float*) ((uintptr_t) a2 - kc);
157       a1 = (const float*) ((uintptr_t) a1 - kc);
158       a0 = (const float*) ((uintptr_t) a0 - kc);
159 
160       nc -= 8;
161     } else {
162       __m128 vacc6x0123 = _mm256_castps256_ps128(vacc6x01234567);
163       __m128 vacc5x0123 = _mm256_castps256_ps128(vacc5x01234567);
164       __m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
165       __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
166       __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
167       __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
168       __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
169       if (nc & 4) {
170         _mm_storeu_ps(c6, vacc6x0123);
171         _mm_storeu_ps(c5, vacc5x0123);
172         _mm_storeu_ps(c4, vacc4x0123);
173         _mm_storeu_ps(c3, vacc3x0123);
174         _mm_storeu_ps(c2, vacc2x0123);
175         _mm_storeu_ps(c1, vacc1x0123);
176         _mm_storeu_ps(c0, vacc0x0123);
177 
178         vacc6x0123 = _mm256_extractf128_ps(vacc6x01234567, 1);
179         vacc5x0123 = _mm256_extractf128_ps(vacc5x01234567, 1);
180         vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
181         vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
182         vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
183         vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
184         vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
185 
186         c6 += 4;
187         c5 += 4;
188         c4 += 4;
189         c3 += 4;
190         c2 += 4;
191         c1 += 4;
192         c0 += 4;
193       }
194       if (nc & 2) {
195         _mm_storel_pi((__m64*) c6, vacc6x0123);
196         _mm_storel_pi((__m64*) c5, vacc5x0123);
197         _mm_storel_pi((__m64*) c4, vacc4x0123);
198         _mm_storel_pi((__m64*) c3, vacc3x0123);
199         _mm_storel_pi((__m64*) c2, vacc2x0123);
200         _mm_storel_pi((__m64*) c1, vacc1x0123);
201         _mm_storel_pi((__m64*) c0, vacc0x0123);
202 
203         vacc6x0123 = _mm_movehl_ps(vacc6x0123, vacc6x0123);
204         vacc5x0123 = _mm_movehl_ps(vacc5x0123, vacc5x0123);
205         vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
206         vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
207         vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
208         vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
209         vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
210 
211         c6 += 2;
212         c5 += 2;
213         c4 += 2;
214         c3 += 2;
215         c2 += 2;
216         c1 += 2;
217         c0 += 2;
218       }
219       if (nc & 1) {
220         _mm_store_ss(c6, vacc6x0123);
221         _mm_store_ss(c5, vacc5x0123);
222         _mm_store_ss(c4, vacc4x0123);
223         _mm_store_ss(c3, vacc3x0123);
224         _mm_store_ss(c2, vacc2x0123);
225         _mm_store_ss(c1, vacc1x0123);
226         _mm_store_ss(c0, vacc0x0123);
227       }
228 
229       nc = 0;
230     }
231   } while (nc != 0);
232 }
233