• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-gemm/avx512-broadcast.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 
17 
xnn_f32_gemm_minmax_ukernel_7x16__avx512f_broadcast(size_t mr,size_t nc,size_t kc,const float * restrict a,size_t a_stride,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_gemm_minmax_ukernel_7x16__avx512f_broadcast(
19     size_t mr,
20     size_t nc,
21     size_t kc,
22     const float*restrict a,
23     size_t a_stride,
24     const float*restrict w,
25     float*restrict c,
26     size_t cm_stride,
27     size_t cn_stride,
28     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
29 {
30   assert(mr != 0);
31   assert(mr <= 7);
32   assert(nc != 0);
33   assert(kc != 0);
34   assert(kc % sizeof(float) == 0);
35   assert(a != NULL);
36   assert(w != NULL);
37   assert(c != NULL);
38 
39   const float* a0 = a;
40   float* c0 = c;
41   const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
42   float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
43   if XNN_UNPREDICTABLE(mr < 2) {
44     a1 = a0;
45     c1 = c0;
46   }
47   const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
48   float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
49   if XNN_UNPREDICTABLE(mr <= 2) {
50     a2 = a1;
51     c2 = c1;
52   }
53   const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
54   float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
55   if XNN_UNPREDICTABLE(mr < 4) {
56     a3 = a2;
57     c3 = c2;
58   }
59   const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
60   float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
61   if XNN_UNPREDICTABLE(mr <= 4) {
62     a4 = a3;
63     c4 = c3;
64   }
65   const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
66   float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
67   if XNN_UNPREDICTABLE(mr < 6) {
68     a5 = a4;
69     c5 = c4;
70   }
71   const float* a6 = (const float*) ((uintptr_t) a5 + a_stride);
72   float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
73   if XNN_UNPREDICTABLE(mr <= 6) {
74     a6 = a5;
75     c6 = c5;
76   }
77 
78   do {
79     __m512 vacc0x0123456789ABCDEF = _mm512_load_ps(w);
80     __m512 vacc1x0123456789ABCDEF = vacc0x0123456789ABCDEF;
81     __m512 vacc2x0123456789ABCDEF = vacc0x0123456789ABCDEF;
82     __m512 vacc3x0123456789ABCDEF = vacc0x0123456789ABCDEF;
83     __m512 vacc4x0123456789ABCDEF = vacc0x0123456789ABCDEF;
84     __m512 vacc5x0123456789ABCDEF = vacc0x0123456789ABCDEF;
85     __m512 vacc6x0123456789ABCDEF = vacc0x0123456789ABCDEF;
86     w += 16;
87 
88     size_t k = kc;
89     do {
90       const __m512 vb0123456789ABCDEF = _mm512_load_ps(w);
91       w += 16;
92 
93       vacc0x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a0), vb0123456789ABCDEF, vacc0x0123456789ABCDEF);
94       vacc1x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a1), vb0123456789ABCDEF, vacc1x0123456789ABCDEF);
95       vacc2x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a2), vb0123456789ABCDEF, vacc2x0123456789ABCDEF);
96       vacc3x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a3), vb0123456789ABCDEF, vacc3x0123456789ABCDEF);
97       vacc4x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a4), vb0123456789ABCDEF, vacc4x0123456789ABCDEF);
98       vacc5x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a5), vb0123456789ABCDEF, vacc5x0123456789ABCDEF);
99       vacc6x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a6), vb0123456789ABCDEF, vacc6x0123456789ABCDEF);
100 
101       a0 += 1;
102       a1 += 1;
103       a2 += 1;
104       a3 += 1;
105       a4 += 1;
106       a5 += 1;
107       a6 += 1;
108 
109       k -= sizeof(float);
110     } while (k != 0);
111 
112     const __m512 vmax = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.max));
113     vacc0x0123456789ABCDEF = _mm512_min_ps(vacc0x0123456789ABCDEF, vmax);
114     vacc1x0123456789ABCDEF = _mm512_min_ps(vacc1x0123456789ABCDEF, vmax);
115     vacc2x0123456789ABCDEF = _mm512_min_ps(vacc2x0123456789ABCDEF, vmax);
116     vacc3x0123456789ABCDEF = _mm512_min_ps(vacc3x0123456789ABCDEF, vmax);
117     vacc4x0123456789ABCDEF = _mm512_min_ps(vacc4x0123456789ABCDEF, vmax);
118     vacc5x0123456789ABCDEF = _mm512_min_ps(vacc5x0123456789ABCDEF, vmax);
119     vacc6x0123456789ABCDEF = _mm512_min_ps(vacc6x0123456789ABCDEF, vmax);
120 
121     const __m512 vmin = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.min));
122     vacc0x0123456789ABCDEF = _mm512_max_ps(vacc0x0123456789ABCDEF, vmin);
123     vacc1x0123456789ABCDEF = _mm512_max_ps(vacc1x0123456789ABCDEF, vmin);
124     vacc2x0123456789ABCDEF = _mm512_max_ps(vacc2x0123456789ABCDEF, vmin);
125     vacc3x0123456789ABCDEF = _mm512_max_ps(vacc3x0123456789ABCDEF, vmin);
126     vacc4x0123456789ABCDEF = _mm512_max_ps(vacc4x0123456789ABCDEF, vmin);
127     vacc5x0123456789ABCDEF = _mm512_max_ps(vacc5x0123456789ABCDEF, vmin);
128     vacc6x0123456789ABCDEF = _mm512_max_ps(vacc6x0123456789ABCDEF, vmin);
129 
130     if XNN_LIKELY(nc >= 16) {
131       _mm512_storeu_ps(c6, vacc6x0123456789ABCDEF);
132       c6 = (float*) ((uintptr_t) c6 + cn_stride);
133       _mm512_storeu_ps(c5, vacc5x0123456789ABCDEF);
134       c5 = (float*) ((uintptr_t) c5 + cn_stride);
135       _mm512_storeu_ps(c4, vacc4x0123456789ABCDEF);
136       c4 = (float*) ((uintptr_t) c4 + cn_stride);
137       _mm512_storeu_ps(c3, vacc3x0123456789ABCDEF);
138       c3 = (float*) ((uintptr_t) c3 + cn_stride);
139       _mm512_storeu_ps(c2, vacc2x0123456789ABCDEF);
140       c2 = (float*) ((uintptr_t) c2 + cn_stride);
141       _mm512_storeu_ps(c1, vacc1x0123456789ABCDEF);
142       c1 = (float*) ((uintptr_t) c1 + cn_stride);
143       _mm512_storeu_ps(c0, vacc0x0123456789ABCDEF);
144       c0 = (float*) ((uintptr_t) c0 + cn_stride);
145 
146       a6 = (const float*) ((uintptr_t) a6 - kc);
147       a5 = (const float*) ((uintptr_t) a5 - kc);
148       a4 = (const float*) ((uintptr_t) a4 - kc);
149       a3 = (const float*) ((uintptr_t) a3 - kc);
150       a2 = (const float*) ((uintptr_t) a2 - kc);
151       a1 = (const float*) ((uintptr_t) a1 - kc);
152       a0 = (const float*) ((uintptr_t) a0 - kc);
153 
154       nc -= 16;
155     } else {
156       if (nc & 15) {
157         // Prepare mask for valid 32-bit elements (depends on nc).
158         const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1)));
159 
160         _mm512_mask_storeu_ps(c6, vmask, vacc6x0123456789ABCDEF);
161         _mm512_mask_storeu_ps(c5, vmask, vacc5x0123456789ABCDEF);
162         _mm512_mask_storeu_ps(c4, vmask, vacc4x0123456789ABCDEF);
163         _mm512_mask_storeu_ps(c3, vmask, vacc3x0123456789ABCDEF);
164         _mm512_mask_storeu_ps(c2, vmask, vacc2x0123456789ABCDEF);
165         _mm512_mask_storeu_ps(c1, vmask, vacc1x0123456789ABCDEF);
166         _mm512_mask_storeu_ps(c0, vmask, vacc0x0123456789ABCDEF);
167       }
168 
169       nc = 0;
170     }
171   } while (nc != 0);
172 }
173