• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f16-gemm/avx2-broadcast.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 
17 
xnn_f16_gemm_minmax_ukernel_7x8__avx2_broadcast(size_t mr,size_t nc,size_t kc,const void * restrict a,size_t a_stride,const void * restrict w,void * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f16_gemm_minmax_ukernel_7x8__avx2_broadcast(
19     size_t mr,
20     size_t nc,
21     size_t kc,
22     const void*restrict a,
23     size_t a_stride,
24     const void*restrict w,
25     void*restrict c,
26     size_t cm_stride,
27     size_t cn_stride,
28     const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
29 {
30   assert(mr != 0);
31   assert(mr <= 7);
32   assert(nc != 0);
33   assert(kc != 0);
34   assert(kc % sizeof(uint16_t) == 0);
35   assert(a != NULL);
36   assert(w != NULL);
37   assert(c != NULL);
38 
39   const uint16_t* a0 = a;
40   uint16_t* c0 = c;
41   const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
42   uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
43   if XNN_UNPREDICTABLE(mr < 2) {
44     a1 = a0;
45     c1 = c0;
46   }
47   const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride);
48   uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
49   if XNN_UNPREDICTABLE(mr <= 2) {
50     a2 = a1;
51     c2 = c1;
52   }
53   const uint16_t* a3 = (const uint16_t*) ((uintptr_t) a2 + a_stride);
54   uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
55   if XNN_UNPREDICTABLE(mr < 4) {
56     a3 = a2;
57     c3 = c2;
58   }
59   const uint16_t* a4 = (const uint16_t*) ((uintptr_t) a3 + a_stride);
60   uint16_t* c4 = (uint16_t*) ((uintptr_t) c3 + cm_stride);
61   if XNN_UNPREDICTABLE(mr <= 4) {
62     a4 = a3;
63     c4 = c3;
64   }
65   const uint16_t* a5 = (const uint16_t*) ((uintptr_t) a4 + a_stride);
66   uint16_t* c5 = (uint16_t*) ((uintptr_t) c4 + cm_stride);
67   if XNN_UNPREDICTABLE(mr < 6) {
68     a5 = a4;
69     c5 = c4;
70   }
71   const uint16_t* a6 = (const uint16_t*) ((uintptr_t) a5 + a_stride);
72   uint16_t* c6 = (uint16_t*) ((uintptr_t) c5 + cm_stride);
73   if XNN_UNPREDICTABLE(mr <= 6) {
74     a6 = a5;
75     c6 = c5;
76   }
77 
78   do {
79     __m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
80     __m256 vacc1x01234567 = vacc0x01234567;
81     __m256 vacc2x01234567 = vacc0x01234567;
82     __m256 vacc3x01234567 = vacc0x01234567;
83     __m256 vacc4x01234567 = vacc0x01234567;
84     __m256 vacc5x01234567 = vacc0x01234567;
85     __m256 vacc6x01234567 = vacc0x01234567;
86     w = (const uint16_t*) w + 8;
87 
88     size_t k = kc;
89     do {
90       const __m256 va0 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a0));
91       a0 += 1;
92       const __m256 va1 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a1));
93       a1 += 1;
94       const __m256 va2 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a2));
95       a2 += 1;
96       const __m256 va3 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a3));
97       a3 += 1;
98       const __m256 va4 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a4));
99       a4 += 1;
100       const __m256 va5 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a5));
101       a5 += 1;
102       const __m256 va6 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a6));
103       a6 += 1;
104 
105       const __m256 vb01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
106       w = (const uint16_t*) w + 8;
107 
108       vacc0x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va0, vb01234567, vacc0x01234567), _MM_FROUND_NO_EXC));
109       vacc1x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va1, vb01234567, vacc1x01234567), _MM_FROUND_NO_EXC));
110       vacc2x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va2, vb01234567, vacc2x01234567), _MM_FROUND_NO_EXC));
111       vacc3x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va3, vb01234567, vacc3x01234567), _MM_FROUND_NO_EXC));
112       vacc4x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va4, vb01234567, vacc4x01234567), _MM_FROUND_NO_EXC));
113       vacc5x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va5, vb01234567, vacc5x01234567), _MM_FROUND_NO_EXC));
114       vacc6x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va6, vb01234567, vacc6x01234567), _MM_FROUND_NO_EXC));
115 
116       k -= sizeof(uint16_t);
117     } while (k != 0);
118 
119     const __m256 vmin = _mm256_load_ps(params->avx.min);
120     vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
121     vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
122     vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
123     vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
124     vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
125     vacc5x01234567 = _mm256_max_ps(vacc5x01234567, vmin);
126     vacc6x01234567 = _mm256_max_ps(vacc6x01234567, vmin);
127 
128     const __m256 vmax = _mm256_load_ps(params->avx.max);
129     vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
130     vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
131     vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
132     vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
133     vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
134     vacc5x01234567 = _mm256_min_ps(vacc5x01234567, vmax);
135     vacc6x01234567 = _mm256_min_ps(vacc6x01234567, vmax);
136 
137     if XNN_LIKELY(nc >= 8) {
138       _mm_storeu_si128((__m128i*) c0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_NO_EXC));
139       c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
140       _mm_storeu_si128((__m128i*) c1, _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_NO_EXC));
141       c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
142       _mm_storeu_si128((__m128i*) c2, _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_NO_EXC));
143       c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
144       _mm_storeu_si128((__m128i*) c3, _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_NO_EXC));
145       c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
146       _mm_storeu_si128((__m128i*) c4, _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_NO_EXC));
147       c4 = (uint16_t*) ((uintptr_t) c4 + cn_stride);
148       _mm_storeu_si128((__m128i*) c5, _mm256_cvtps_ph(vacc5x01234567, _MM_FROUND_NO_EXC));
149       c5 = (uint16_t*) ((uintptr_t) c5 + cn_stride);
150       _mm_storeu_si128((__m128i*) c6, _mm256_cvtps_ph(vacc6x01234567, _MM_FROUND_NO_EXC));
151       c6 = (uint16_t*) ((uintptr_t) c6 + cn_stride);
152 
153       a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
154       a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
155       a2 = (const uint16_t*) ((uintptr_t) a2 - kc);
156       a3 = (const uint16_t*) ((uintptr_t) a3 - kc);
157       a4 = (const uint16_t*) ((uintptr_t) a4 - kc);
158       a5 = (const uint16_t*) ((uintptr_t) a5 - kc);
159       a6 = (const uint16_t*) ((uintptr_t) a6 - kc);
160 
161       nc -= 8;
162     } else {
163       __m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_NO_EXC);
164       __m128i vh1x01234567 = _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_NO_EXC);
165       __m128i vh2x01234567 = _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_NO_EXC);
166       __m128i vh3x01234567 = _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_NO_EXC);
167       __m128i vh4x01234567 = _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_NO_EXC);
168       __m128i vh5x01234567 = _mm256_cvtps_ph(vacc5x01234567, _MM_FROUND_NO_EXC);
169       __m128i vh6x01234567 = _mm256_cvtps_ph(vacc6x01234567, _MM_FROUND_NO_EXC);
170       if (nc & 4) {
171         _mm_storel_epi64((__m128i*) c0, vh0x01234567);
172         _mm_storel_epi64((__m128i*) c1, vh1x01234567);
173         _mm_storel_epi64((__m128i*) c2, vh2x01234567);
174         _mm_storel_epi64((__m128i*) c3, vh3x01234567);
175         _mm_storel_epi64((__m128i*) c4, vh4x01234567);
176         _mm_storel_epi64((__m128i*) c5, vh5x01234567);
177         _mm_storel_epi64((__m128i*) c6, vh6x01234567);
178 
179         vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
180         vh1x01234567 = _mm_unpackhi_epi64(vh1x01234567, vh1x01234567);
181         vh2x01234567 = _mm_unpackhi_epi64(vh2x01234567, vh2x01234567);
182         vh3x01234567 = _mm_unpackhi_epi64(vh3x01234567, vh3x01234567);
183         vh4x01234567 = _mm_unpackhi_epi64(vh4x01234567, vh4x01234567);
184         vh5x01234567 = _mm_unpackhi_epi64(vh5x01234567, vh5x01234567);
185         vh6x01234567 = _mm_unpackhi_epi64(vh6x01234567, vh6x01234567);
186 
187         c0 += 4;
188         c1 += 4;
189         c2 += 4;
190         c3 += 4;
191         c4 += 4;
192         c5 += 4;
193         c6 += 4;
194       }
195       if (nc & 2) {
196         _mm_storeu_si32(c0, vh0x01234567);
197         _mm_storeu_si32(c1, vh1x01234567);
198         _mm_storeu_si32(c2, vh2x01234567);
199         _mm_storeu_si32(c3, vh3x01234567);
200         _mm_storeu_si32(c4, vh4x01234567);
201         _mm_storeu_si32(c5, vh5x01234567);
202         _mm_storeu_si32(c6, vh6x01234567);
203 
204         vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
205         vh1x01234567 = _mm_srli_epi64(vh1x01234567, 32);
206         vh2x01234567 = _mm_srli_epi64(vh2x01234567, 32);
207         vh3x01234567 = _mm_srli_epi64(vh3x01234567, 32);
208         vh4x01234567 = _mm_srli_epi64(vh4x01234567, 32);
209         vh5x01234567 = _mm_srli_epi64(vh5x01234567, 32);
210         vh6x01234567 = _mm_srli_epi64(vh6x01234567, 32);
211 
212         c0 += 2;
213         c1 += 2;
214         c2 += 2;
215         c3 += 2;
216         c4 += 2;
217         c5 += 2;
218         c6 += 2;
219       }
220       if (nc & 1) {
221         *c0 = (uint16_t) _mm_extract_epi16(vh0x01234567, 0);
222         *c1 = (uint16_t) _mm_extract_epi16(vh1x01234567, 0);
223         *c2 = (uint16_t) _mm_extract_epi16(vh2x01234567, 0);
224         *c3 = (uint16_t) _mm_extract_epi16(vh3x01234567, 0);
225         *c4 = (uint16_t) _mm_extract_epi16(vh4x01234567, 0);
226         *c5 = (uint16_t) _mm_extract_epi16(vh5x01234567, 0);
227         *c6 = (uint16_t) _mm_extract_epi16(vh6x01234567, 0);
228       }
229 
230       nc = 0;
231     }
232   } while (nc != 0);
233 }
234