• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-gemm/avx512-broadcast.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 
17 
xnn_f32_gemminc_ukernel_5x16__avx512f_broadcast(size_t mr,size_t nc,size_t kc,const float * restrict a,size_t a_stride,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,const float * restrict acc,const union xnn_f32_output_params params[restrict static1])18 void xnn_f32_gemminc_ukernel_5x16__avx512f_broadcast(
19     size_t mr,
20     size_t nc,
21     size_t kc,
22     const float*restrict a,
23     size_t a_stride,
24     const float*restrict w,
25     float*restrict c,
26     size_t cm_stride,
27     size_t cn_stride,
28     const float*restrict acc,
29     const union xnn_f32_output_params params[restrict static 1])
30 {
31   assert(mr != 0);
32   assert(mr <= 5);
33   assert(nc != 0);
34   assert(kc != 0);
35   assert(kc % sizeof(float) == 0);
36   assert(a != NULL);
37   assert(w != NULL);
38   assert(c != NULL);
39   assert(acc != NULL);
40 
41   const float* a0 = a;
42   float* c0 = c;
43   const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
44   float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
45   if XNN_UNPREDICTABLE(mr < 2) {
46     a1 = a0;
47     c1 = c0;
48   }
49   const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
50   float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
51   if XNN_UNPREDICTABLE(mr <= 2) {
52     a2 = a1;
53     c2 = c1;
54   }
55   const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
56   float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
57   if XNN_UNPREDICTABLE(mr < 4) {
58     a3 = a2;
59     c3 = c2;
60   }
61   const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
62   float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
63   if XNN_UNPREDICTABLE(mr <= 4) {
64     a4 = a3;
65     c4 = c3;
66   }
67 
68   do {
69     __m512 vacc0x0123456789ABCDEF = _mm512_load_ps(acc + 0);
70     __m512 vacc1x0123456789ABCDEF = _mm512_load_ps(acc + 16);
71     __m512 vacc2x0123456789ABCDEF = _mm512_load_ps(acc + 32);
72     __m512 vacc3x0123456789ABCDEF = _mm512_load_ps(acc + 48);
73     __m512 vacc4x0123456789ABCDEF = _mm512_load_ps(acc + 64);
74     acc += 80;
75 
76     size_t k = kc;
77     do {
78       const __m512 vb0123456789ABCDEF = _mm512_load_ps(w);
79       w += 16;
80 
81       vacc0x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a0), vb0123456789ABCDEF, vacc0x0123456789ABCDEF);
82       vacc1x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a1), vb0123456789ABCDEF, vacc1x0123456789ABCDEF);
83       vacc2x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a2), vb0123456789ABCDEF, vacc2x0123456789ABCDEF);
84       vacc3x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a3), vb0123456789ABCDEF, vacc3x0123456789ABCDEF);
85       vacc4x0123456789ABCDEF = _mm512_fmadd_ps(_mm512_set1_ps(*a4), vb0123456789ABCDEF, vacc4x0123456789ABCDEF);
86 
87       a0 += 1;
88       a1 += 1;
89       a2 += 1;
90       a3 += 1;
91       a4 += 1;
92 
93       k -= sizeof(float);
94     } while (k != 0);
95 
96     const __m512 vmax = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.max));
97     vacc0x0123456789ABCDEF = _mm512_min_ps(vacc0x0123456789ABCDEF, vmax);
98     vacc1x0123456789ABCDEF = _mm512_min_ps(vacc1x0123456789ABCDEF, vmax);
99     vacc2x0123456789ABCDEF = _mm512_min_ps(vacc2x0123456789ABCDEF, vmax);
100     vacc3x0123456789ABCDEF = _mm512_min_ps(vacc3x0123456789ABCDEF, vmax);
101     vacc4x0123456789ABCDEF = _mm512_min_ps(vacc4x0123456789ABCDEF, vmax);
102 
103     const __m512 vmin = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.min));
104     vacc0x0123456789ABCDEF = _mm512_max_ps(vacc0x0123456789ABCDEF, vmin);
105     vacc1x0123456789ABCDEF = _mm512_max_ps(vacc1x0123456789ABCDEF, vmin);
106     vacc2x0123456789ABCDEF = _mm512_max_ps(vacc2x0123456789ABCDEF, vmin);
107     vacc3x0123456789ABCDEF = _mm512_max_ps(vacc3x0123456789ABCDEF, vmin);
108     vacc4x0123456789ABCDEF = _mm512_max_ps(vacc4x0123456789ABCDEF, vmin);
109 
110     if XNN_LIKELY(nc >= 16) {
111       _mm512_storeu_ps(c4, vacc4x0123456789ABCDEF);
112       c4 = (float*) ((uintptr_t) c4 + cn_stride);
113       _mm512_storeu_ps(c3, vacc3x0123456789ABCDEF);
114       c3 = (float*) ((uintptr_t) c3 + cn_stride);
115       _mm512_storeu_ps(c2, vacc2x0123456789ABCDEF);
116       c2 = (float*) ((uintptr_t) c2 + cn_stride);
117       _mm512_storeu_ps(c1, vacc1x0123456789ABCDEF);
118       c1 = (float*) ((uintptr_t) c1 + cn_stride);
119       _mm512_storeu_ps(c0, vacc0x0123456789ABCDEF);
120       c0 = (float*) ((uintptr_t) c0 + cn_stride);
121 
122       a4 = (const float*) ((uintptr_t) a4 - kc);
123       a3 = (const float*) ((uintptr_t) a3 - kc);
124       a2 = (const float*) ((uintptr_t) a2 - kc);
125       a1 = (const float*) ((uintptr_t) a1 - kc);
126       a0 = (const float*) ((uintptr_t) a0 - kc);
127 
128       nc -= 16;
129     } else {
130       if (nc & 15) {
131         // Prepare mask for valid 32-bit elements (depends on nc).
132         const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1)));
133 
134         _mm512_mask_storeu_ps(c4, vmask, vacc4x0123456789ABCDEF);
135         _mm512_mask_storeu_ps(c3, vmask, vacc3x0123456789ABCDEF);
136         _mm512_mask_storeu_ps(c2, vmask, vacc2x0123456789ABCDEF);
137         _mm512_mask_storeu_ps(c1, vmask, vacc1x0123456789ABCDEF);
138         _mm512_mask_storeu_ps(c0, vmask, vacc0x0123456789ABCDEF);
139       }
140 
141       nc = 0;
142     }
143   } while (nc != 0);
144 }
145