1 // Auto-generated file. Do not edit!
2 // Template: src/f16-gemm/avx2-broadcast.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16
17
xnn_f16_gemm_minmax_ukernel_5x16__avx2_broadcast(size_t mr,size_t nc,size_t kc,const void * restrict a,size_t a_stride,const void * restrict w,void * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f16_gemm_minmax_ukernel_5x16__avx2_broadcast(
19 size_t mr,
20 size_t nc,
21 size_t kc,
22 const void*restrict a,
23 size_t a_stride,
24 const void*restrict w,
25 void*restrict c,
26 size_t cm_stride,
27 size_t cn_stride,
28 const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)])
29 {
30 assert(mr != 0);
31 assert(mr <= 5);
32 assert(nc != 0);
33 assert(kc != 0);
34 assert(kc % sizeof(uint16_t) == 0);
35 assert(a != NULL);
36 assert(w != NULL);
37 assert(c != NULL);
38
39 const uint16_t* a0 = a;
40 uint16_t* c0 = c;
41 const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
42 uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
43 if XNN_UNPREDICTABLE(mr < 2) {
44 a1 = a0;
45 c1 = c0;
46 }
47 const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride);
48 uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
49 if XNN_UNPREDICTABLE(mr <= 2) {
50 a2 = a1;
51 c2 = c1;
52 }
53 const uint16_t* a3 = (const uint16_t*) ((uintptr_t) a2 + a_stride);
54 uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
55 if XNN_UNPREDICTABLE(mr < 4) {
56 a3 = a2;
57 c3 = c2;
58 }
59 const uint16_t* a4 = (const uint16_t*) ((uintptr_t) a3 + a_stride);
60 uint16_t* c4 = (uint16_t*) ((uintptr_t) c3 + cm_stride);
61 if XNN_UNPREDICTABLE(mr <= 4) {
62 a4 = a3;
63 c4 = c3;
64 }
65
66 do {
67 __m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
68 __m256 vacc0x89ABCDEF = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) ((const uint16_t*) w + 8)));
69 __m256 vacc1x01234567 = vacc0x01234567;
70 __m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
71 __m256 vacc2x01234567 = vacc0x01234567;
72 __m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
73 __m256 vacc3x01234567 = vacc0x01234567;
74 __m256 vacc3x89ABCDEF = vacc0x89ABCDEF;
75 __m256 vacc4x01234567 = vacc0x01234567;
76 __m256 vacc4x89ABCDEF = vacc0x89ABCDEF;
77 w = (const uint16_t*) w + 16;
78
79 size_t k = kc;
80 do {
81 const __m256 va0 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a0));
82 a0 += 1;
83 const __m256 va1 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a1));
84 a1 += 1;
85 const __m256 va2 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a2));
86 a2 += 1;
87 const __m256 va3 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a3));
88 a3 += 1;
89 const __m256 va4 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a4));
90 a4 += 1;
91
92 const __m256 vb01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
93 const __m256 vb89ABCDEF = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) ((const uint16_t*) w + 8)));
94 w = (const uint16_t*) w + 16;
95
96 vacc0x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va0, vb01234567, vacc0x01234567), _MM_FROUND_NO_EXC));
97 vacc1x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va1, vb01234567, vacc1x01234567), _MM_FROUND_NO_EXC));
98 vacc2x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va2, vb01234567, vacc2x01234567), _MM_FROUND_NO_EXC));
99 vacc3x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va3, vb01234567, vacc3x01234567), _MM_FROUND_NO_EXC));
100 vacc4x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va4, vb01234567, vacc4x01234567), _MM_FROUND_NO_EXC));
101 vacc0x89ABCDEF = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF), _MM_FROUND_NO_EXC));
102 vacc1x89ABCDEF = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va1, vb89ABCDEF, vacc1x89ABCDEF), _MM_FROUND_NO_EXC));
103 vacc2x89ABCDEF = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va2, vb89ABCDEF, vacc2x89ABCDEF), _MM_FROUND_NO_EXC));
104 vacc3x89ABCDEF = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va3, vb89ABCDEF, vacc3x89ABCDEF), _MM_FROUND_NO_EXC));
105 vacc4x89ABCDEF = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va4, vb89ABCDEF, vacc4x89ABCDEF), _MM_FROUND_NO_EXC));
106
107 k -= sizeof(uint16_t);
108 } while (k != 0);
109
110 const __m256 vscale = _mm256_load_ps(params->avx.scale);
111 vacc0x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vacc0x01234567, vscale), _MM_FROUND_NO_EXC));
112 vacc1x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vacc1x01234567, vscale), _MM_FROUND_NO_EXC));
113 vacc2x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vacc2x01234567, vscale), _MM_FROUND_NO_EXC));
114 vacc3x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vacc3x01234567, vscale), _MM_FROUND_NO_EXC));
115 vacc4x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vacc4x01234567, vscale), _MM_FROUND_NO_EXC));
116 vacc0x89ABCDEF = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vacc0x89ABCDEF, vscale), _MM_FROUND_NO_EXC));
117 vacc1x89ABCDEF = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vacc1x89ABCDEF, vscale), _MM_FROUND_NO_EXC));
118 vacc2x89ABCDEF = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vacc2x89ABCDEF, vscale), _MM_FROUND_NO_EXC));
119 vacc3x89ABCDEF = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vacc3x89ABCDEF, vscale), _MM_FROUND_NO_EXC));
120 vacc4x89ABCDEF = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vacc4x89ABCDEF, vscale), _MM_FROUND_NO_EXC));
121
122 const __m256 vmin = _mm256_load_ps(params->avx.min);
123 vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
124 vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
125 vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
126 vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
127 vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
128 vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
129 vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
130 vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
131 vacc3x89ABCDEF = _mm256_max_ps(vacc3x89ABCDEF, vmin);
132 vacc4x89ABCDEF = _mm256_max_ps(vacc4x89ABCDEF, vmin);
133
134 const __m256 vmax = _mm256_load_ps(params->avx.max);
135 vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
136 vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
137 vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
138 vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
139 vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
140 vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
141 vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
142 vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
143 vacc3x89ABCDEF = _mm256_min_ps(vacc3x89ABCDEF, vmax);
144 vacc4x89ABCDEF = _mm256_min_ps(vacc4x89ABCDEF, vmax);
145
146 if XNN_LIKELY(nc >= 16) {
147 _mm_storeu_si128((__m128i*) c0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_NO_EXC));
148 _mm_storeu_si128((__m128i*) (c0 + 8), _mm256_cvtps_ph(vacc0x89ABCDEF, _MM_FROUND_NO_EXC));
149 c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
150 _mm_storeu_si128((__m128i*) c1, _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_NO_EXC));
151 _mm_storeu_si128((__m128i*) (c1 + 8), _mm256_cvtps_ph(vacc1x89ABCDEF, _MM_FROUND_NO_EXC));
152 c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
153 _mm_storeu_si128((__m128i*) c2, _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_NO_EXC));
154 _mm_storeu_si128((__m128i*) (c2 + 8), _mm256_cvtps_ph(vacc2x89ABCDEF, _MM_FROUND_NO_EXC));
155 c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
156 _mm_storeu_si128((__m128i*) c3, _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_NO_EXC));
157 _mm_storeu_si128((__m128i*) (c3 + 8), _mm256_cvtps_ph(vacc3x89ABCDEF, _MM_FROUND_NO_EXC));
158 c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
159 _mm_storeu_si128((__m128i*) c4, _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_NO_EXC));
160 _mm_storeu_si128((__m128i*) (c4 + 8), _mm256_cvtps_ph(vacc4x89ABCDEF, _MM_FROUND_NO_EXC));
161 c4 = (uint16_t*) ((uintptr_t) c4 + cn_stride);
162
163 a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
164 a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
165 a2 = (const uint16_t*) ((uintptr_t) a2 - kc);
166 a3 = (const uint16_t*) ((uintptr_t) a3 - kc);
167 a4 = (const uint16_t*) ((uintptr_t) a4 - kc);
168
169 nc -= 16;
170 } else {
171 __m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_NO_EXC);
172 __m128i vh1x01234567 = _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_NO_EXC);
173 __m128i vh2x01234567 = _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_NO_EXC);
174 __m128i vh3x01234567 = _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_NO_EXC);
175 __m128i vh4x01234567 = _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_NO_EXC);
176 if (nc & 8) {
177 _mm_storeu_si128((__m128i*) c0, vh0x01234567);
178 _mm_storeu_si128((__m128i*) c1, vh1x01234567);
179 _mm_storeu_si128((__m128i*) c2, vh2x01234567);
180 _mm_storeu_si128((__m128i*) c3, vh3x01234567);
181 _mm_storeu_si128((__m128i*) c4, vh4x01234567);
182
183 vh0x01234567 = _mm256_cvtps_ph(vacc0x89ABCDEF, _MM_FROUND_NO_EXC);
184 vh1x01234567 = _mm256_cvtps_ph(vacc1x89ABCDEF, _MM_FROUND_NO_EXC);
185 vh2x01234567 = _mm256_cvtps_ph(vacc2x89ABCDEF, _MM_FROUND_NO_EXC);
186 vh3x01234567 = _mm256_cvtps_ph(vacc3x89ABCDEF, _MM_FROUND_NO_EXC);
187 vh4x01234567 = _mm256_cvtps_ph(vacc4x89ABCDEF, _MM_FROUND_NO_EXC);
188
189 c0 += 8;
190 c1 += 8;
191 c2 += 8;
192 c3 += 8;
193 c4 += 8;
194 }
195 if (nc & 4) {
196 _mm_storel_epi64((__m128i*) c0, vh0x01234567);
197 _mm_storel_epi64((__m128i*) c1, vh1x01234567);
198 _mm_storel_epi64((__m128i*) c2, vh2x01234567);
199 _mm_storel_epi64((__m128i*) c3, vh3x01234567);
200 _mm_storel_epi64((__m128i*) c4, vh4x01234567);
201
202 vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
203 vh1x01234567 = _mm_unpackhi_epi64(vh1x01234567, vh1x01234567);
204 vh2x01234567 = _mm_unpackhi_epi64(vh2x01234567, vh2x01234567);
205 vh3x01234567 = _mm_unpackhi_epi64(vh3x01234567, vh3x01234567);
206 vh4x01234567 = _mm_unpackhi_epi64(vh4x01234567, vh4x01234567);
207
208 c0 += 4;
209 c1 += 4;
210 c2 += 4;
211 c3 += 4;
212 c4 += 4;
213 }
214 if (nc & 2) {
215 _mm_storeu_si32(c0, vh0x01234567);
216 _mm_storeu_si32(c1, vh1x01234567);
217 _mm_storeu_si32(c2, vh2x01234567);
218 _mm_storeu_si32(c3, vh3x01234567);
219 _mm_storeu_si32(c4, vh4x01234567);
220
221 vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
222 vh1x01234567 = _mm_srli_epi64(vh1x01234567, 32);
223 vh2x01234567 = _mm_srli_epi64(vh2x01234567, 32);
224 vh3x01234567 = _mm_srli_epi64(vh3x01234567, 32);
225 vh4x01234567 = _mm_srli_epi64(vh4x01234567, 32);
226
227 c0 += 2;
228 c1 += 2;
229 c2 += 2;
230 c3 += 2;
231 c4 += 2;
232 }
233 if (nc & 1) {
234 *c0 = (uint16_t) _mm_extract_epi16(vh0x01234567, 0);
235 *c1 = (uint16_t) _mm_extract_epi16(vh1x01234567, 0);
236 *c2 = (uint16_t) _mm_extract_epi16(vh2x01234567, 0);
237 *c3 = (uint16_t) _mm_extract_epi16(vh3x01234567, 0);
238 *c4 = (uint16_t) _mm_extract_epi16(vh4x01234567, 0);
239 }
240
241 nc = 0;
242 }
243 } while (nc != 0);
244 }
245