• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-gemm/avx-broadcast.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/gemm.h>
15 
16 
xnn_f32_gemminc_ukernel_5x8__fma3_broadcast(size_t mr,size_t nc,size_t kc,const float * restrict a,size_t a_stride,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,const float * restrict acc,const union xnn_f32_output_params params[restrict static1])17 void xnn_f32_gemminc_ukernel_5x8__fma3_broadcast(
18     size_t mr,
19     size_t nc,
20     size_t kc,
21     const float*restrict a,
22     size_t a_stride,
23     const float*restrict w,
24     float*restrict c,
25     size_t cm_stride,
26     size_t cn_stride,
27     const float*restrict acc,
28     const union xnn_f32_output_params params[restrict static 1])
29 {
30   assert(mr != 0);
31   assert(mr <= 5);
32   assert(nc != 0);
33   assert(kc != 0);
34   assert(kc % sizeof(float) == 0);
35   assert(a != NULL);
36   assert(w != NULL);
37   assert(c != NULL);
38   assert(acc != NULL);
39 
40   const float* a0 = a;
41   float* c0 = c;
42   const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
43   float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
44   if XNN_UNPREDICTABLE(mr < 2) {
45     a1 = a0;
46     c1 = c0;
47   }
48   const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
49   float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
50   if XNN_UNPREDICTABLE(mr <= 2) {
51     a2 = a1;
52     c2 = c1;
53   }
54   const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
55   float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
56   if XNN_UNPREDICTABLE(mr < 4) {
57     a3 = a2;
58     c3 = c2;
59   }
60   const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
61   float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
62   if XNN_UNPREDICTABLE(mr <= 4) {
63     a4 = a3;
64     c4 = c3;
65   }
66 
67   do {
68     __m256 vacc0x01234567 = _mm256_load_ps(acc + 0);
69     __m256 vacc1x01234567 = _mm256_load_ps(acc + 8);
70     __m256 vacc2x01234567 = _mm256_load_ps(acc + 16);
71     __m256 vacc3x01234567 = _mm256_load_ps(acc + 24);
72     __m256 vacc4x01234567 = _mm256_load_ps(acc + 32);
73     acc += 40;
74 
75     size_t k = kc;
76     do {
77       const __m256 va0 = _mm256_broadcast_ss(a0);
78       a0 += 1;
79       const __m256 va1 = _mm256_broadcast_ss(a1);
80       a1 += 1;
81       const __m256 va2 = _mm256_broadcast_ss(a2);
82       a2 += 1;
83       const __m256 va3 = _mm256_broadcast_ss(a3);
84       a3 += 1;
85       const __m256 va4 = _mm256_broadcast_ss(a4);
86       a4 += 1;
87 
88       const __m256 vb01234567 = _mm256_load_ps(w);
89       w += 8;
90 
91       vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
92       vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
93       vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
94       vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
95       vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
96 
97       k -= sizeof(float);
98     } while (k != 0);
99 
100     const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
101     vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
102     vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
103     vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
104     vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
105     vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
106 
107     const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
108     vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
109     vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
110     vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
111     vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
112     vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
113 
114     if XNN_LIKELY(nc >= 8) {
115       _mm256_storeu_ps(c4, vacc4x01234567);
116       c4 = (float*) ((uintptr_t) c4 + cn_stride);
117       _mm256_storeu_ps(c3, vacc3x01234567);
118       c3 = (float*) ((uintptr_t) c3 + cn_stride);
119       _mm256_storeu_ps(c2, vacc2x01234567);
120       c2 = (float*) ((uintptr_t) c2 + cn_stride);
121       _mm256_storeu_ps(c1, vacc1x01234567);
122       c1 = (float*) ((uintptr_t) c1 + cn_stride);
123       _mm256_storeu_ps(c0, vacc0x01234567);
124       c0 = (float*) ((uintptr_t) c0 + cn_stride);
125 
126       a4 = (const float*) ((uintptr_t) a4 - kc);
127       a3 = (const float*) ((uintptr_t) a3 - kc);
128       a2 = (const float*) ((uintptr_t) a2 - kc);
129       a1 = (const float*) ((uintptr_t) a1 - kc);
130       a0 = (const float*) ((uintptr_t) a0 - kc);
131 
132       nc -= 8;
133     } else {
134       __m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
135       __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
136       __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
137       __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
138       __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
139       if (nc & 4) {
140         _mm_storeu_ps(c4, vacc4x0123);
141         _mm_storeu_ps(c3, vacc3x0123);
142         _mm_storeu_ps(c2, vacc2x0123);
143         _mm_storeu_ps(c1, vacc1x0123);
144         _mm_storeu_ps(c0, vacc0x0123);
145 
146         vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
147         vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
148         vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
149         vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
150         vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
151 
152         c4 += 4;
153         c3 += 4;
154         c2 += 4;
155         c1 += 4;
156         c0 += 4;
157       }
158       if (nc & 2) {
159         _mm_storel_pi((__m64*) c4, vacc4x0123);
160         _mm_storel_pi((__m64*) c3, vacc3x0123);
161         _mm_storel_pi((__m64*) c2, vacc2x0123);
162         _mm_storel_pi((__m64*) c1, vacc1x0123);
163         _mm_storel_pi((__m64*) c0, vacc0x0123);
164 
165         vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
166         vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
167         vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
168         vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
169         vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
170 
171         c4 += 2;
172         c3 += 2;
173         c2 += 2;
174         c1 += 2;
175         c0 += 2;
176       }
177       if (nc & 1) {
178         _mm_store_ss(c4, vacc4x0123);
179         _mm_store_ss(c3, vacc3x0123);
180         _mm_store_ss(c2, vacc2x0123);
181         _mm_store_ss(c1, vacc1x0123);
182         _mm_store_ss(c0, vacc0x0123);
183       }
184 
185       nc = 0;
186     }
187   } while (nc != 0);
188 }
189