1 // Auto-generated file. Do not edit!
2 // Template: src/f16-gemm/avx2-broadcast.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16
17
xnn_f16_gemm_minmax_ukernel_5x8__avx2_broadcast(size_t mr,size_t nc,size_t kc,const void * restrict a,size_t a_stride,const void * restrict w,void * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f16_gemm_minmax_ukernel_5x8__avx2_broadcast(
19 size_t mr,
20 size_t nc,
21 size_t kc,
22 const void*restrict a,
23 size_t a_stride,
24 const void*restrict w,
25 void*restrict c,
26 size_t cm_stride,
27 size_t cn_stride,
28 const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)])
29 {
30 assert(mr != 0);
31 assert(mr <= 5);
32 assert(nc != 0);
33 assert(kc != 0);
34 assert(kc % sizeof(uint16_t) == 0);
35 assert(a != NULL);
36 assert(w != NULL);
37 assert(c != NULL);
38
39 const uint16_t* a0 = a;
40 uint16_t* c0 = c;
41 const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
42 uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
43 if XNN_UNPREDICTABLE(mr < 2) {
44 a1 = a0;
45 c1 = c0;
46 }
47 const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride);
48 uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
49 if XNN_UNPREDICTABLE(mr <= 2) {
50 a2 = a1;
51 c2 = c1;
52 }
53 const uint16_t* a3 = (const uint16_t*) ((uintptr_t) a2 + a_stride);
54 uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
55 if XNN_UNPREDICTABLE(mr < 4) {
56 a3 = a2;
57 c3 = c2;
58 }
59 const uint16_t* a4 = (const uint16_t*) ((uintptr_t) a3 + a_stride);
60 uint16_t* c4 = (uint16_t*) ((uintptr_t) c3 + cm_stride);
61 if XNN_UNPREDICTABLE(mr <= 4) {
62 a4 = a3;
63 c4 = c3;
64 }
65
66 do {
67 __m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
68 __m256 vacc1x01234567 = vacc0x01234567;
69 __m256 vacc2x01234567 = vacc0x01234567;
70 __m256 vacc3x01234567 = vacc0x01234567;
71 __m256 vacc4x01234567 = vacc0x01234567;
72 w = (const uint16_t*) w + 8;
73
74 size_t k = kc;
75 do {
76 const __m256 va0 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a0));
77 a0 += 1;
78 const __m256 va1 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a1));
79 a1 += 1;
80 const __m256 va2 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a2));
81 a2 += 1;
82 const __m256 va3 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a3));
83 a3 += 1;
84 const __m256 va4 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a4));
85 a4 += 1;
86
87 const __m256 vb01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
88 w = (const uint16_t*) w + 8;
89
90 vacc0x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va0, vb01234567, vacc0x01234567), _MM_FROUND_NO_EXC));
91 vacc1x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va1, vb01234567, vacc1x01234567), _MM_FROUND_NO_EXC));
92 vacc2x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va2, vb01234567, vacc2x01234567), _MM_FROUND_NO_EXC));
93 vacc3x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va3, vb01234567, vacc3x01234567), _MM_FROUND_NO_EXC));
94 vacc4x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va4, vb01234567, vacc4x01234567), _MM_FROUND_NO_EXC));
95
96 k -= sizeof(uint16_t);
97 } while (k != 0);
98
99 const __m256 vscale = _mm256_load_ps(params->avx.scale);
100 vacc0x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vacc0x01234567, vscale), _MM_FROUND_NO_EXC));
101 vacc1x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vacc1x01234567, vscale), _MM_FROUND_NO_EXC));
102 vacc2x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vacc2x01234567, vscale), _MM_FROUND_NO_EXC));
103 vacc3x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vacc3x01234567, vscale), _MM_FROUND_NO_EXC));
104 vacc4x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vacc4x01234567, vscale), _MM_FROUND_NO_EXC));
105
106 const __m256 vmin = _mm256_load_ps(params->avx.min);
107 vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
108 vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
109 vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
110 vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
111 vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
112
113 const __m256 vmax = _mm256_load_ps(params->avx.max);
114 vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
115 vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
116 vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
117 vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
118 vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
119
120 if XNN_LIKELY(nc >= 8) {
121 _mm_storeu_si128((__m128i*) c0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_NO_EXC));
122 c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
123 _mm_storeu_si128((__m128i*) c1, _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_NO_EXC));
124 c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
125 _mm_storeu_si128((__m128i*) c2, _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_NO_EXC));
126 c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
127 _mm_storeu_si128((__m128i*) c3, _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_NO_EXC));
128 c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
129 _mm_storeu_si128((__m128i*) c4, _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_NO_EXC));
130 c4 = (uint16_t*) ((uintptr_t) c4 + cn_stride);
131
132 a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
133 a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
134 a2 = (const uint16_t*) ((uintptr_t) a2 - kc);
135 a3 = (const uint16_t*) ((uintptr_t) a3 - kc);
136 a4 = (const uint16_t*) ((uintptr_t) a4 - kc);
137
138 nc -= 8;
139 } else {
140 __m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_NO_EXC);
141 __m128i vh1x01234567 = _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_NO_EXC);
142 __m128i vh2x01234567 = _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_NO_EXC);
143 __m128i vh3x01234567 = _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_NO_EXC);
144 __m128i vh4x01234567 = _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_NO_EXC);
145 if (nc & 4) {
146 _mm_storel_epi64((__m128i*) c0, vh0x01234567);
147 _mm_storel_epi64((__m128i*) c1, vh1x01234567);
148 _mm_storel_epi64((__m128i*) c2, vh2x01234567);
149 _mm_storel_epi64((__m128i*) c3, vh3x01234567);
150 _mm_storel_epi64((__m128i*) c4, vh4x01234567);
151
152 vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
153 vh1x01234567 = _mm_unpackhi_epi64(vh1x01234567, vh1x01234567);
154 vh2x01234567 = _mm_unpackhi_epi64(vh2x01234567, vh2x01234567);
155 vh3x01234567 = _mm_unpackhi_epi64(vh3x01234567, vh3x01234567);
156 vh4x01234567 = _mm_unpackhi_epi64(vh4x01234567, vh4x01234567);
157
158 c0 += 4;
159 c1 += 4;
160 c2 += 4;
161 c3 += 4;
162 c4 += 4;
163 }
164 if (nc & 2) {
165 _mm_storeu_si32(c0, vh0x01234567);
166 _mm_storeu_si32(c1, vh1x01234567);
167 _mm_storeu_si32(c2, vh2x01234567);
168 _mm_storeu_si32(c3, vh3x01234567);
169 _mm_storeu_si32(c4, vh4x01234567);
170
171 vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
172 vh1x01234567 = _mm_srli_epi64(vh1x01234567, 32);
173 vh2x01234567 = _mm_srli_epi64(vh2x01234567, 32);
174 vh3x01234567 = _mm_srli_epi64(vh3x01234567, 32);
175 vh4x01234567 = _mm_srli_epi64(vh4x01234567, 32);
176
177 c0 += 2;
178 c1 += 2;
179 c2 += 2;
180 c3 += 2;
181 c4 += 2;
182 }
183 if (nc & 1) {
184 *c0 = (uint16_t) _mm_extract_epi16(vh0x01234567, 0);
185 *c1 = (uint16_t) _mm_extract_epi16(vh1x01234567, 0);
186 *c2 = (uint16_t) _mm_extract_epi16(vh2x01234567, 0);
187 *c3 = (uint16_t) _mm_extract_epi16(vh3x01234567, 0);
188 *c4 = (uint16_t) _mm_extract_epi16(vh4x01234567, 0);
189 }
190
191 nc = 0;
192 }
193 } while (nc != 0);
194 }
195