1 // Auto-generated file. Do not edit!
2 // Template: src/f16-igemm/avx2-broadcast.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/igemm.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16
17
xnn_f16_igemm_minmax_ukernel_6x8__avx2_broadcast(size_t mr,size_t nc,size_t kc,size_t ks,const void ** restrict a,const void * restrict w,void * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const void * zero,const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f16_igemm_minmax_ukernel_6x8__avx2_broadcast(
19 size_t mr,
20 size_t nc,
21 size_t kc,
22 size_t ks,
23 const void**restrict a,
24 const void*restrict w,
25 void*restrict c,
26 size_t cm_stride,
27 size_t cn_stride,
28 size_t a_offset,
29 const void* zero,
30 const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)])
31 {
32 assert(mr != 0);
33 assert(mr <= 6);
34 assert(nc != 0);
35 assert(kc != 0);
36 assert(kc % sizeof(uint16_t) == 0);
37 assert(ks != 0);
38 assert(ks % (6 * sizeof(void*)) == 0);
39 assert(a_offset % sizeof(uint16_t) == 0);
40 assert(a != NULL);
41 assert(w != NULL);
42 assert(c != NULL);
43
44 uint16_t* c0 = c;
45 uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
46 if XNN_UNPREDICTABLE(mr < 2) {
47 c1 = c0;
48 }
49 uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
50 if XNN_UNPREDICTABLE(mr <= 2) {
51 c2 = c1;
52 }
53 uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
54 if XNN_UNPREDICTABLE(mr < 4) {
55 c3 = c2;
56 }
57 uint16_t* c4 = (uint16_t*) ((uintptr_t) c3 + cm_stride);
58 if XNN_UNPREDICTABLE(mr <= 4) {
59 c4 = c3;
60 }
61 uint16_t* c5 = (uint16_t*) ((uintptr_t) c4 + cm_stride);
62 if XNN_UNPREDICTABLE(mr != 6) {
63 c5 = c4;
64 }
65
66 do {
67 __m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
68 __m256 vacc1x01234567 = vacc0x01234567;
69 __m256 vacc2x01234567 = vacc0x01234567;
70 __m256 vacc3x01234567 = vacc0x01234567;
71 __m256 vacc4x01234567 = vacc0x01234567;
72 __m256 vacc5x01234567 = vacc0x01234567;
73 w = (const uint16_t*) w + 8;
74
75 size_t p = ks;
76 do {
77 const uint16_t* restrict a0 = (const uint16_t*) a[0];
78 assert(a0 != NULL);
79 if XNN_UNPREDICTABLE(a0 != zero) {
80 a0 = (const uint16_t*) ((uintptr_t) a0 + a_offset);
81 }
82 const uint16_t* restrict a1 = (const uint16_t*) a[1];
83 assert(a1 != NULL);
84 if XNN_UNPREDICTABLE(a1 != zero) {
85 a1 = (const uint16_t*) ((uintptr_t) a1 + a_offset);
86 }
87 const uint16_t* restrict a2 = (const uint16_t*) a[2];
88 assert(a2 != NULL);
89 if XNN_UNPREDICTABLE(a2 != zero) {
90 a2 = (const uint16_t*) ((uintptr_t) a2 + a_offset);
91 }
92 const uint16_t* restrict a3 = (const uint16_t*) a[3];
93 assert(a3 != NULL);
94 if XNN_UNPREDICTABLE(a3 != zero) {
95 a3 = (const uint16_t*) ((uintptr_t) a3 + a_offset);
96 }
97 const uint16_t* restrict a4 = (const uint16_t*) a[4];
98 assert(a4 != NULL);
99 if XNN_UNPREDICTABLE(a4 != zero) {
100 a4 = (const uint16_t*) ((uintptr_t) a4 + a_offset);
101 }
102 const uint16_t* restrict a5 = (const uint16_t*) a[5];
103 assert(a5 != NULL);
104 if XNN_UNPREDICTABLE(a5 != zero) {
105 a5 = (const uint16_t*) ((uintptr_t) a5 + a_offset);
106 }
107 a += 6;
108
109 size_t k = kc;
110 do {
111 const __m256 vb01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
112 w = (const uint16_t*) w + 8;
113
114 const __m256 va0 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a0));
115 a0 += 1;
116 const __m256 va1 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a1));
117 a1 += 1;
118 const __m256 va2 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a2));
119 a2 += 1;
120 const __m256 va3 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a3));
121 a3 += 1;
122 const __m256 va4 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a4));
123 a4 += 1;
124 const __m256 va5 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a5));
125 a5 += 1;
126
127 vacc0x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va0, vb01234567, vacc0x01234567), _MM_FROUND_NO_EXC));
128 vacc1x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va1, vb01234567, vacc1x01234567), _MM_FROUND_NO_EXC));
129 vacc2x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va2, vb01234567, vacc2x01234567), _MM_FROUND_NO_EXC));
130 vacc3x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va3, vb01234567, vacc3x01234567), _MM_FROUND_NO_EXC));
131 vacc4x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va4, vb01234567, vacc4x01234567), _MM_FROUND_NO_EXC));
132 vacc5x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va5, vb01234567, vacc5x01234567), _MM_FROUND_NO_EXC));
133
134 k -= sizeof(uint16_t);
135 } while (k != 0);
136 p -= 6 * sizeof(void*);
137 } while (p != 0);
138
139 const __m256 vscale = _mm256_load_ps(params->avx.scale);
140 vacc0x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vacc0x01234567, vscale), _MM_FROUND_NO_EXC));
141 vacc1x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vacc1x01234567, vscale), _MM_FROUND_NO_EXC));
142 vacc2x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vacc2x01234567, vscale), _MM_FROUND_NO_EXC));
143 vacc3x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vacc3x01234567, vscale), _MM_FROUND_NO_EXC));
144 vacc4x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vacc4x01234567, vscale), _MM_FROUND_NO_EXC));
145 vacc5x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vacc5x01234567, vscale), _MM_FROUND_NO_EXC));
146
147 const __m256 vmin = _mm256_load_ps(params->avx.min);
148 vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
149 vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
150 vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
151 vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
152 vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
153 vacc5x01234567 = _mm256_max_ps(vacc5x01234567, vmin);
154
155 const __m256 vmax = _mm256_load_ps(params->avx.max);
156 vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
157 vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
158 vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
159 vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
160 vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
161 vacc5x01234567 = _mm256_min_ps(vacc5x01234567, vmax);
162
163 if XNN_LIKELY(nc >= 8) {
164 _mm_storeu_si128((__m128i*) c5, _mm256_cvtps_ph(vacc5x01234567, _MM_FROUND_NO_EXC));
165 c5 = (uint16_t*) ((uintptr_t) c5 + cn_stride);
166 _mm_storeu_si128((__m128i*) c4, _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_NO_EXC));
167 c4 = (uint16_t*) ((uintptr_t) c4 + cn_stride);
168 _mm_storeu_si128((__m128i*) c3, _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_NO_EXC));
169 c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
170 _mm_storeu_si128((__m128i*) c2, _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_NO_EXC));
171 c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
172 _mm_storeu_si128((__m128i*) c1, _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_NO_EXC));
173 c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
174 _mm_storeu_si128((__m128i*) c0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_NO_EXC));
175 c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
176
177 a = (const void**restrict) ((uintptr_t) a - ks);
178 nc -= 8;
179 } else {
180 __m128i vh5x01234567 = _mm256_cvtps_ph(vacc5x01234567, _MM_FROUND_NO_EXC);
181 __m128i vh4x01234567 = _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_NO_EXC);
182 __m128i vh3x01234567 = _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_NO_EXC);
183 __m128i vh2x01234567 = _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_NO_EXC);
184 __m128i vh1x01234567 = _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_NO_EXC);
185 __m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_NO_EXC);
186 if (nc & 4) {
187 _mm_storel_epi64((__m128i*) c5, vh5x01234567);
188 _mm_storel_epi64((__m128i*) c4, vh4x01234567);
189 _mm_storel_epi64((__m128i*) c3, vh3x01234567);
190 _mm_storel_epi64((__m128i*) c2, vh2x01234567);
191 _mm_storel_epi64((__m128i*) c1, vh1x01234567);
192 _mm_storel_epi64((__m128i*) c0, vh0x01234567);
193
194 vh5x01234567 = _mm_unpackhi_epi64(vh5x01234567, vh5x01234567);
195 vh4x01234567 = _mm_unpackhi_epi64(vh4x01234567, vh4x01234567);
196 vh3x01234567 = _mm_unpackhi_epi64(vh3x01234567, vh3x01234567);
197 vh2x01234567 = _mm_unpackhi_epi64(vh2x01234567, vh2x01234567);
198 vh1x01234567 = _mm_unpackhi_epi64(vh1x01234567, vh1x01234567);
199 vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
200
201 c5 += 4;
202 c4 += 4;
203 c3 += 4;
204 c2 += 4;
205 c1 += 4;
206 c0 += 4;
207 }
208 if (nc & 2) {
209 _mm_storeu_si32(c5, vh5x01234567);
210 _mm_storeu_si32(c4, vh4x01234567);
211 _mm_storeu_si32(c3, vh3x01234567);
212 _mm_storeu_si32(c2, vh2x01234567);
213 _mm_storeu_si32(c1, vh1x01234567);
214 _mm_storeu_si32(c0, vh0x01234567);
215
216 vh5x01234567 = _mm_srli_epi64(vh5x01234567, 32);
217 vh4x01234567 = _mm_srli_epi64(vh4x01234567, 32);
218 vh3x01234567 = _mm_srli_epi64(vh3x01234567, 32);
219 vh2x01234567 = _mm_srli_epi64(vh2x01234567, 32);
220 vh1x01234567 = _mm_srli_epi64(vh1x01234567, 32);
221 vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
222
223 c5 += 2;
224 c4 += 2;
225 c3 += 2;
226 c2 += 2;
227 c1 += 2;
228 c0 += 2;
229 }
230 if (nc & 1) {
231 *c5 = _mm_extract_epi16(vh5x01234567, 0);
232 *c4 = _mm_extract_epi16(vh4x01234567, 0);
233 *c3 = _mm_extract_epi16(vh3x01234567, 0);
234 *c2 = _mm_extract_epi16(vh2x01234567, 0);
235 *c1 = _mm_extract_epi16(vh1x01234567, 0);
236 *c0 = _mm_extract_epi16(vh0x01234567, 0);
237 }
238
239 nc = 0;
240 }
241 } while (nc != 0);
242 }
243