1 // Auto-generated file. Do not edit!
2 // Template: src/f32-gemm/avx512-broadcast.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16
17
xnn_f32_gemminc_ukernel_8x16__avx512f_broadcast(size_t mr,size_t nc,size_t kc,const float * restrict a,size_t a_stride,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,const float * restrict acc,const union xnn_f32_output_params params[restrict static1])18 void xnn_f32_gemminc_ukernel_8x16__avx512f_broadcast(
19 size_t mr,
20 size_t nc,
21 size_t kc,
22 const float*restrict a,
23 size_t a_stride,
24 const float*restrict w,
25 float*restrict c,
26 size_t cm_stride,
27 size_t cn_stride,
28 const float*restrict acc,
29 const union xnn_f32_output_params params[restrict static 1])
30 {
31 assert(mr != 0);
32 assert(mr <= 8);
33 assert(nc != 0);
34 assert(kc != 0);
35 assert(kc % sizeof(float) == 0);
36 assert(a != NULL);
37 assert(w != NULL);
38 assert(c != NULL);
39 assert(acc != NULL);
40
41 const float* a0 = a;
42 float* c0 = c;
43 const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
44 float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
45 if XNN_UNPREDICTABLE(mr < 2) {
46 a1 = a0;
47 c1 = c0;
48 }
49 const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
50 float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
51 if XNN_UNPREDICTABLE(mr <= 2) {
52 a2 = a1;
53 c2 = c1;
54 }
55 const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
56 float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
57 if XNN_UNPREDICTABLE(mr < 4) {
58 a3 = a2;
59 c3 = c2;
60 }
61 const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
62 float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
63 if XNN_UNPREDICTABLE(mr <= 4) {
64 a4 = a3;
65 c4 = c3;
66 }
67 const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
68 float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
69 if XNN_UNPREDICTABLE(mr < 6) {
70 a5 = a4;
71 c5 = c4;
72 }
73 const float* a6 = (const float*) ((uintptr_t) a5 + a_stride);
74 float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
75 if XNN_UNPREDICTABLE(mr <= 6) {
76 a6 = a5;
77 c6 = c5;
78 }
79 const float* a7 = (const float*) ((uintptr_t) a6 + a_stride);
80 float* c7 = (float*) ((uintptr_t) c6 + cm_stride);
81 if XNN_UNPREDICTABLE(mr != 8) {
82 a7 = a6;
83 c7 = c6;
84 }
85
86 do {
87 __m512 vacc0x0123456789ABCDEF = _mm512_load_ps(acc + 0);
88 __m512 vacc1x0123456789ABCDEF = _mm512_load_ps(acc + 16);
89 __m512 vacc2x0123456789ABCDEF = _mm512_load_ps(acc + 32);
90 __m512 vacc3x0123456789ABCDEF = _mm512_load_ps(acc + 48);
91 __m512 vacc4x0123456789ABCDEF = _mm512_load_ps(acc + 64);
92 __m512 vacc5x0123456789ABCDEF = _mm512_load_ps(acc + 80);
93 __m512 vacc6x0123456789ABCDEF = _mm512_load_ps(acc + 96);
94 __m512 vacc7x0123456789ABCDEF = _mm512_load_ps(acc + 112);
95 acc += 128;
96
97 size_t k = kc;
98 do {
99 const __m512 vb0123456789ABCDEF = _mm512_load_ps(w);
100 w += 16;
101
102 vacc0x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a0), vb0123456789ABCDEF, vacc0x0123456789ABCDEF);
103 vacc1x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a1), vb0123456789ABCDEF, vacc1x0123456789ABCDEF);
104 vacc2x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a2), vb0123456789ABCDEF, vacc2x0123456789ABCDEF);
105 vacc3x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a3), vb0123456789ABCDEF, vacc3x0123456789ABCDEF);
106 vacc4x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a4), vb0123456789ABCDEF, vacc4x0123456789ABCDEF);
107 vacc5x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a5), vb0123456789ABCDEF, vacc5x0123456789ABCDEF);
108 vacc6x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a6), vb0123456789ABCDEF, vacc6x0123456789ABCDEF);
109 vacc7x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a7), vb0123456789ABCDEF, vacc7x0123456789ABCDEF);
110
111 a0 += 1;
112 a1 += 1;
113 a2 += 1;
114 a3 += 1;
115 a4 += 1;
116 a5 += 1;
117 a6 += 1;
118 a7 += 1;
119
120 k -= sizeof(float);
121 } while (k != 0);
122
123 const __m512 vmax = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.max));
124 vacc0x0123456789ABCDEF = _mm512_min_ps(vacc0x0123456789ABCDEF, vmax);
125 vacc1x0123456789ABCDEF = _mm512_min_ps(vacc1x0123456789ABCDEF, vmax);
126 vacc2x0123456789ABCDEF = _mm512_min_ps(vacc2x0123456789ABCDEF, vmax);
127 vacc3x0123456789ABCDEF = _mm512_min_ps(vacc3x0123456789ABCDEF, vmax);
128 vacc4x0123456789ABCDEF = _mm512_min_ps(vacc4x0123456789ABCDEF, vmax);
129 vacc5x0123456789ABCDEF = _mm512_min_ps(vacc5x0123456789ABCDEF, vmax);
130 vacc6x0123456789ABCDEF = _mm512_min_ps(vacc6x0123456789ABCDEF, vmax);
131 vacc7x0123456789ABCDEF = _mm512_min_ps(vacc7x0123456789ABCDEF, vmax);
132
133 const __m512 vmin = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.min));
134 vacc0x0123456789ABCDEF = _mm512_max_ps(vacc0x0123456789ABCDEF, vmin);
135 vacc1x0123456789ABCDEF = _mm512_max_ps(vacc1x0123456789ABCDEF, vmin);
136 vacc2x0123456789ABCDEF = _mm512_max_ps(vacc2x0123456789ABCDEF, vmin);
137 vacc3x0123456789ABCDEF = _mm512_max_ps(vacc3x0123456789ABCDEF, vmin);
138 vacc4x0123456789ABCDEF = _mm512_max_ps(vacc4x0123456789ABCDEF, vmin);
139 vacc5x0123456789ABCDEF = _mm512_max_ps(vacc5x0123456789ABCDEF, vmin);
140 vacc6x0123456789ABCDEF = _mm512_max_ps(vacc6x0123456789ABCDEF, vmin);
141 vacc7x0123456789ABCDEF = _mm512_max_ps(vacc7x0123456789ABCDEF, vmin);
142
143 if XNN_LIKELY(nc >= 16) {
144 _mm512_storeu_ps(c7, vacc7x0123456789ABCDEF);
145 c7 = (float*) ((uintptr_t) c7 + cn_stride);
146 _mm512_storeu_ps(c6, vacc6x0123456789ABCDEF);
147 c6 = (float*) ((uintptr_t) c6 + cn_stride);
148 _mm512_storeu_ps(c5, vacc5x0123456789ABCDEF);
149 c5 = (float*) ((uintptr_t) c5 + cn_stride);
150 _mm512_storeu_ps(c4, vacc4x0123456789ABCDEF);
151 c4 = (float*) ((uintptr_t) c4 + cn_stride);
152 _mm512_storeu_ps(c3, vacc3x0123456789ABCDEF);
153 c3 = (float*) ((uintptr_t) c3 + cn_stride);
154 _mm512_storeu_ps(c2, vacc2x0123456789ABCDEF);
155 c2 = (float*) ((uintptr_t) c2 + cn_stride);
156 _mm512_storeu_ps(c1, vacc1x0123456789ABCDEF);
157 c1 = (float*) ((uintptr_t) c1 + cn_stride);
158 _mm512_storeu_ps(c0, vacc0x0123456789ABCDEF);
159 c0 = (float*) ((uintptr_t) c0 + cn_stride);
160
161 a7 = (const float*) ((uintptr_t) a7 - kc);
162 a6 = (const float*) ((uintptr_t) a6 - kc);
163 a5 = (const float*) ((uintptr_t) a5 - kc);
164 a4 = (const float*) ((uintptr_t) a4 - kc);
165 a3 = (const float*) ((uintptr_t) a3 - kc);
166 a2 = (const float*) ((uintptr_t) a2 - kc);
167 a1 = (const float*) ((uintptr_t) a1 - kc);
168 a0 = (const float*) ((uintptr_t) a0 - kc);
169
170 nc -= 16;
171 } else {
172 if (nc & 15) {
173 // Prepare mask for valid 32-bit elements (depends on nc).
174 const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1)));
175
176 _mm512_mask_storeu_ps(c7, vmask, vacc7x0123456789ABCDEF);
177 _mm512_mask_storeu_ps(c6, vmask, vacc6x0123456789ABCDEF);
178 _mm512_mask_storeu_ps(c5, vmask, vacc5x0123456789ABCDEF);
179 _mm512_mask_storeu_ps(c4, vmask, vacc4x0123456789ABCDEF);
180 _mm512_mask_storeu_ps(c3, vmask, vacc3x0123456789ABCDEF);
181 _mm512_mask_storeu_ps(c2, vmask, vacc2x0123456789ABCDEF);
182 _mm512_mask_storeu_ps(c1, vmask, vacc1x0123456789ABCDEF);
183 _mm512_mask_storeu_ps(c0, vmask, vacc0x0123456789ABCDEF);
184 }
185
186 nc = 0;
187 }
188 } while (nc != 0);
189 }
190