• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f16-gemm/avx2-broadcast.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 
17 
xnn_f16_gemm_minmax_ukernel_6x8__avx2_broadcast(size_t mr,size_t nc,size_t kc,const void * restrict a,size_t a_stride,const void * restrict w,void * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f16_gemm_minmax_ukernel_6x8__avx2_broadcast(
19     size_t mr,
20     size_t nc,
21     size_t kc,
22     const void*restrict a,
23     size_t a_stride,
24     const void*restrict w,
25     void*restrict c,
26     size_t cm_stride,
27     size_t cn_stride,
28     const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)])
29 {
30   assert(mr != 0);
31   assert(mr <= 6);
32   assert(nc != 0);
33   assert(kc != 0);
34   assert(kc % sizeof(uint16_t) == 0);
35   assert(a != NULL);
36   assert(w != NULL);
37   assert(c != NULL);
38 
39   const uint16_t* a0 = a;
40   uint16_t* c0 = c;
41   const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
42   uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
43   if XNN_UNPREDICTABLE(mr < 2) {
44     a1 = a0;
45     c1 = c0;
46   }
47   const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride);
48   uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
49   if XNN_UNPREDICTABLE(mr <= 2) {
50     a2 = a1;
51     c2 = c1;
52   }
53   const uint16_t* a3 = (const uint16_t*) ((uintptr_t) a2 + a_stride);
54   uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
55   if XNN_UNPREDICTABLE(mr < 4) {
56     a3 = a2;
57     c3 = c2;
58   }
59   const uint16_t* a4 = (const uint16_t*) ((uintptr_t) a3 + a_stride);
60   uint16_t* c4 = (uint16_t*) ((uintptr_t) c3 + cm_stride);
61   if XNN_UNPREDICTABLE(mr <= 4) {
62     a4 = a3;
63     c4 = c3;
64   }
65   const uint16_t* a5 = (const uint16_t*) ((uintptr_t) a4 + a_stride);
66   uint16_t* c5 = (uint16_t*) ((uintptr_t) c4 + cm_stride);
67   if XNN_UNPREDICTABLE(mr != 6) {
68     a5 = a4;
69     c5 = c4;
70   }
71 
72   do {
73     __m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
74     __m256 vacc1x01234567 = vacc0x01234567;
75     __m256 vacc2x01234567 = vacc0x01234567;
76     __m256 vacc3x01234567 = vacc0x01234567;
77     __m256 vacc4x01234567 = vacc0x01234567;
78     __m256 vacc5x01234567 = vacc0x01234567;
79     w = (const uint16_t*) w + 8;
80 
81     size_t k = kc;
82     do {
83       const __m256 va0 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a0));
84       a0 += 1;
85       const __m256 va1 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a1));
86       a1 += 1;
87       const __m256 va2 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a2));
88       a2 += 1;
89       const __m256 va3 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a3));
90       a3 += 1;
91       const __m256 va4 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a4));
92       a4 += 1;
93       const __m256 va5 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a5));
94       a5 += 1;
95 
96       const __m256 vb01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
97       w = (const uint16_t*) w + 8;
98 
99       vacc0x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va0, vb01234567, vacc0x01234567), _MM_FROUND_NO_EXC));
100       vacc1x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va1, vb01234567, vacc1x01234567), _MM_FROUND_NO_EXC));
101       vacc2x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va2, vb01234567, vacc2x01234567), _MM_FROUND_NO_EXC));
102       vacc3x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va3, vb01234567, vacc3x01234567), _MM_FROUND_NO_EXC));
103       vacc4x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va4, vb01234567, vacc4x01234567), _MM_FROUND_NO_EXC));
104       vacc5x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va5, vb01234567, vacc5x01234567), _MM_FROUND_NO_EXC));
105 
106       k -= sizeof(uint16_t);
107     } while (k != 0);
108 
109     const __m256 vscale = _mm256_load_ps(params->avx.scale);
110     vacc0x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vacc0x01234567, vscale), _MM_FROUND_NO_EXC));
111     vacc1x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vacc1x01234567, vscale), _MM_FROUND_NO_EXC));
112     vacc2x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vacc2x01234567, vscale), _MM_FROUND_NO_EXC));
113     vacc3x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vacc3x01234567, vscale), _MM_FROUND_NO_EXC));
114     vacc4x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vacc4x01234567, vscale), _MM_FROUND_NO_EXC));
115     vacc5x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vacc5x01234567, vscale), _MM_FROUND_NO_EXC));
116 
117     const __m256 vmin = _mm256_load_ps(params->avx.min);
118     vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
119     vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
120     vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
121     vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
122     vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
123     vacc5x01234567 = _mm256_max_ps(vacc5x01234567, vmin);
124 
125     const __m256 vmax = _mm256_load_ps(params->avx.max);
126     vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
127     vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
128     vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
129     vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
130     vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
131     vacc5x01234567 = _mm256_min_ps(vacc5x01234567, vmax);
132 
133     if XNN_LIKELY(nc >= 8) {
134       _mm_storeu_si128((__m128i*) c0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_NO_EXC));
135       c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
136       _mm_storeu_si128((__m128i*) c1, _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_NO_EXC));
137       c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
138       _mm_storeu_si128((__m128i*) c2, _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_NO_EXC));
139       c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
140       _mm_storeu_si128((__m128i*) c3, _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_NO_EXC));
141       c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
142       _mm_storeu_si128((__m128i*) c4, _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_NO_EXC));
143       c4 = (uint16_t*) ((uintptr_t) c4 + cn_stride);
144       _mm_storeu_si128((__m128i*) c5, _mm256_cvtps_ph(vacc5x01234567, _MM_FROUND_NO_EXC));
145       c5 = (uint16_t*) ((uintptr_t) c5 + cn_stride);
146 
147       a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
148       a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
149       a2 = (const uint16_t*) ((uintptr_t) a2 - kc);
150       a3 = (const uint16_t*) ((uintptr_t) a3 - kc);
151       a4 = (const uint16_t*) ((uintptr_t) a4 - kc);
152       a5 = (const uint16_t*) ((uintptr_t) a5 - kc);
153 
154       nc -= 8;
155     } else {
156       __m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_NO_EXC);
157       __m128i vh1x01234567 = _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_NO_EXC);
158       __m128i vh2x01234567 = _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_NO_EXC);
159       __m128i vh3x01234567 = _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_NO_EXC);
160       __m128i vh4x01234567 = _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_NO_EXC);
161       __m128i vh5x01234567 = _mm256_cvtps_ph(vacc5x01234567, _MM_FROUND_NO_EXC);
162       if (nc & 4) {
163         _mm_storel_epi64((__m128i*) c0, vh0x01234567);
164         _mm_storel_epi64((__m128i*) c1, vh1x01234567);
165         _mm_storel_epi64((__m128i*) c2, vh2x01234567);
166         _mm_storel_epi64((__m128i*) c3, vh3x01234567);
167         _mm_storel_epi64((__m128i*) c4, vh4x01234567);
168         _mm_storel_epi64((__m128i*) c5, vh5x01234567);
169 
170         vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
171         vh1x01234567 = _mm_unpackhi_epi64(vh1x01234567, vh1x01234567);
172         vh2x01234567 = _mm_unpackhi_epi64(vh2x01234567, vh2x01234567);
173         vh3x01234567 = _mm_unpackhi_epi64(vh3x01234567, vh3x01234567);
174         vh4x01234567 = _mm_unpackhi_epi64(vh4x01234567, vh4x01234567);
175         vh5x01234567 = _mm_unpackhi_epi64(vh5x01234567, vh5x01234567);
176 
177         c0 += 4;
178         c1 += 4;
179         c2 += 4;
180         c3 += 4;
181         c4 += 4;
182         c5 += 4;
183       }
184       if (nc & 2) {
185         _mm_storeu_si32(c0, vh0x01234567);
186         _mm_storeu_si32(c1, vh1x01234567);
187         _mm_storeu_si32(c2, vh2x01234567);
188         _mm_storeu_si32(c3, vh3x01234567);
189         _mm_storeu_si32(c4, vh4x01234567);
190         _mm_storeu_si32(c5, vh5x01234567);
191 
192         vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
193         vh1x01234567 = _mm_srli_epi64(vh1x01234567, 32);
194         vh2x01234567 = _mm_srli_epi64(vh2x01234567, 32);
195         vh3x01234567 = _mm_srli_epi64(vh3x01234567, 32);
196         vh4x01234567 = _mm_srli_epi64(vh4x01234567, 32);
197         vh5x01234567 = _mm_srli_epi64(vh5x01234567, 32);
198 
199         c0 += 2;
200         c1 += 2;
201         c2 += 2;
202         c3 += 2;
203         c4 += 2;
204         c5 += 2;
205       }
206       if (nc & 1) {
207         *c0 = (uint16_t) _mm_extract_epi16(vh0x01234567, 0);
208         *c1 = (uint16_t) _mm_extract_epi16(vh1x01234567, 0);
209         *c2 = (uint16_t) _mm_extract_epi16(vh2x01234567, 0);
210         *c3 = (uint16_t) _mm_extract_epi16(vh3x01234567, 0);
211         *c4 = (uint16_t) _mm_extract_epi16(vh4x01234567, 0);
212         *c5 = (uint16_t) _mm_extract_epi16(vh5x01234567, 0);
213       }
214 
215       nc = 0;
216     }
217   } while (nc != 0);
218 }
219