• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-vsqrt/avx512f-nr1fma1adj.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 #include <xnnpack/vunary.h>
17 
18 
xnn_f32_vsqrt_ukernel__avx512f_nr1fma1adj_x128(size_t n,const float * x,float * y,const union xnn_f32_sqrt_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_vsqrt_ukernel__avx512f_nr1fma1adj_x128(
20     size_t n,
21     const float* x,
22     float* y,
23     const union xnn_f32_sqrt_params params[restrict XNN_MIN_ELEMENTS(1)])
24 {
25   assert(n != 0);
26   assert(n % sizeof(float) == 0);
27 
28   const __m512 vhalf = _mm512_set1_ps(params->fma.half);
29   for (; n >= 128 * sizeof(float); n -= 128 * sizeof(float)) {
30     const __m512 vx0 = _mm512_loadu_ps(x);
31     const __m512 vx1 = _mm512_loadu_ps(x + 16);
32     const __m512 vx2 = _mm512_loadu_ps(x + 32);
33     const __m512 vx3 = _mm512_loadu_ps(x + 48);
34     const __m512 vx4 = _mm512_loadu_ps(x + 64);
35     const __m512 vx5 = _mm512_loadu_ps(x + 80);
36     const __m512 vx6 = _mm512_loadu_ps(x + 96);
37     const __m512 vx7 = _mm512_loadu_ps(x + 112);
38     x += 128;
39 
40     const __m512 vrsqrtx0 = _mm512_rsqrt14_ps(vx0);
41     const __m512 vrsqrtx1 = _mm512_rsqrt14_ps(vx1);
42     const __m512 vrsqrtx2 = _mm512_rsqrt14_ps(vx2);
43     const __m512 vrsqrtx3 = _mm512_rsqrt14_ps(vx3);
44     const __m512 vrsqrtx4 = _mm512_rsqrt14_ps(vx4);
45     const __m512 vrsqrtx5 = _mm512_rsqrt14_ps(vx5);
46     const __m512 vrsqrtx6 = _mm512_rsqrt14_ps(vx6);
47     const __m512 vrsqrtx7 = _mm512_rsqrt14_ps(vx7);
48 
49     __m512 vsqrtx0 = _mm512_mul_ps(vrsqrtx0, vx0);
50     __m512 vhalfrsqrtx0 = _mm512_mul_ps(vrsqrtx0, vhalf);
51     __m512 vsqrtx1 = _mm512_mul_ps(vrsqrtx1, vx1);
52     __m512 vhalfrsqrtx1 = _mm512_mul_ps(vrsqrtx1, vhalf);
53     __m512 vsqrtx2 = _mm512_mul_ps(vrsqrtx2, vx2);
54     __m512 vhalfrsqrtx2 = _mm512_mul_ps(vrsqrtx2, vhalf);
55     __m512 vsqrtx3 = _mm512_mul_ps(vrsqrtx3, vx3);
56     __m512 vhalfrsqrtx3 = _mm512_mul_ps(vrsqrtx3, vhalf);
57     __m512 vsqrtx4 = _mm512_mul_ps(vrsqrtx4, vx4);
58     __m512 vhalfrsqrtx4 = _mm512_mul_ps(vrsqrtx4, vhalf);
59     __m512 vsqrtx5 = _mm512_mul_ps(vrsqrtx5, vx5);
60     __m512 vhalfrsqrtx5 = _mm512_mul_ps(vrsqrtx5, vhalf);
61     __m512 vsqrtx6 = _mm512_mul_ps(vrsqrtx6, vx6);
62     __m512 vhalfrsqrtx6 = _mm512_mul_ps(vrsqrtx6, vhalf);
63     __m512 vsqrtx7 = _mm512_mul_ps(vrsqrtx7, vx7);
64     __m512 vhalfrsqrtx7 = _mm512_mul_ps(vrsqrtx7, vhalf);
65 
66     const __m512 vresidual0 = _mm512_fnmadd_ps(vsqrtx0, vhalfrsqrtx0, vhalf);
67     const __m512 vresidual1 = _mm512_fnmadd_ps(vsqrtx1, vhalfrsqrtx1, vhalf);
68     const __m512 vresidual2 = _mm512_fnmadd_ps(vsqrtx2, vhalfrsqrtx2, vhalf);
69     const __m512 vresidual3 = _mm512_fnmadd_ps(vsqrtx3, vhalfrsqrtx3, vhalf);
70     const __m512 vresidual4 = _mm512_fnmadd_ps(vsqrtx4, vhalfrsqrtx4, vhalf);
71     const __m512 vresidual5 = _mm512_fnmadd_ps(vsqrtx5, vhalfrsqrtx5, vhalf);
72     const __m512 vresidual6 = _mm512_fnmadd_ps(vsqrtx6, vhalfrsqrtx6, vhalf);
73     const __m512 vresidual7 = _mm512_fnmadd_ps(vsqrtx7, vhalfrsqrtx7, vhalf);
74 
75     vhalfrsqrtx0 = _mm512_fmadd_ps(vhalfrsqrtx0, vresidual0, vhalfrsqrtx0);
76     vsqrtx0 = _mm512_fmadd_ps(vsqrtx0, vresidual0, vsqrtx0);
77     vhalfrsqrtx1 = _mm512_fmadd_ps(vhalfrsqrtx1, vresidual1, vhalfrsqrtx1);
78     vsqrtx1 = _mm512_fmadd_ps(vsqrtx1, vresidual1, vsqrtx1);
79     vhalfrsqrtx2 = _mm512_fmadd_ps(vhalfrsqrtx2, vresidual2, vhalfrsqrtx2);
80     vsqrtx2 = _mm512_fmadd_ps(vsqrtx2, vresidual2, vsqrtx2);
81     vhalfrsqrtx3 = _mm512_fmadd_ps(vhalfrsqrtx3, vresidual3, vhalfrsqrtx3);
82     vsqrtx3 = _mm512_fmadd_ps(vsqrtx3, vresidual3, vsqrtx3);
83     vhalfrsqrtx4 = _mm512_fmadd_ps(vhalfrsqrtx4, vresidual4, vhalfrsqrtx4);
84     vsqrtx4 = _mm512_fmadd_ps(vsqrtx4, vresidual4, vsqrtx4);
85     vhalfrsqrtx5 = _mm512_fmadd_ps(vhalfrsqrtx5, vresidual5, vhalfrsqrtx5);
86     vsqrtx5 = _mm512_fmadd_ps(vsqrtx5, vresidual5, vsqrtx5);
87     vhalfrsqrtx6 = _mm512_fmadd_ps(vhalfrsqrtx6, vresidual6, vhalfrsqrtx6);
88     vsqrtx6 = _mm512_fmadd_ps(vsqrtx6, vresidual6, vsqrtx6);
89     vhalfrsqrtx7 = _mm512_fmadd_ps(vhalfrsqrtx7, vresidual7, vhalfrsqrtx7);
90     vsqrtx7 = _mm512_fmadd_ps(vsqrtx7, vresidual7, vsqrtx7);
91 
92     const __m512 vadjustment0 = _mm512_fnmadd_ps(vsqrtx0, vsqrtx0, vx0);
93     const __m512 vadjustment1 = _mm512_fnmadd_ps(vsqrtx1, vsqrtx1, vx1);
94     const __m512 vadjustment2 = _mm512_fnmadd_ps(vsqrtx2, vsqrtx2, vx2);
95     const __m512 vadjustment3 = _mm512_fnmadd_ps(vsqrtx3, vsqrtx3, vx3);
96     const __m512 vadjustment4 = _mm512_fnmadd_ps(vsqrtx4, vsqrtx4, vx4);
97     const __m512 vadjustment5 = _mm512_fnmadd_ps(vsqrtx5, vsqrtx5, vx5);
98     const __m512 vadjustment6 = _mm512_fnmadd_ps(vsqrtx6, vsqrtx6, vx6);
99     const __m512 vadjustment7 = _mm512_fnmadd_ps(vsqrtx7, vsqrtx7, vx7);
100 
101     const __m512 vy0 = _mm512_fmadd_ps(vhalfrsqrtx0, vadjustment0, vsqrtx0);
102     const __m512 vy1 = _mm512_fmadd_ps(vhalfrsqrtx1, vadjustment1, vsqrtx1);
103     const __m512 vy2 = _mm512_fmadd_ps(vhalfrsqrtx2, vadjustment2, vsqrtx2);
104     const __m512 vy3 = _mm512_fmadd_ps(vhalfrsqrtx3, vadjustment3, vsqrtx3);
105     const __m512 vy4 = _mm512_fmadd_ps(vhalfrsqrtx4, vadjustment4, vsqrtx4);
106     const __m512 vy5 = _mm512_fmadd_ps(vhalfrsqrtx5, vadjustment5, vsqrtx5);
107     const __m512 vy6 = _mm512_fmadd_ps(vhalfrsqrtx6, vadjustment6, vsqrtx6);
108     const __m512 vy7 = _mm512_fmadd_ps(vhalfrsqrtx7, vadjustment7, vsqrtx7);
109 
110     _mm512_storeu_ps(y, vy0);
111     _mm512_storeu_ps(y + 16, vy1);
112     _mm512_storeu_ps(y + 32, vy2);
113     _mm512_storeu_ps(y + 48, vy3);
114     _mm512_storeu_ps(y + 64, vy4);
115     _mm512_storeu_ps(y + 80, vy5);
116     _mm512_storeu_ps(y + 96, vy6);
117     _mm512_storeu_ps(y + 112, vy7);
118     y += 128;
119   }
120   for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
121     const __m512 vx = _mm512_loadu_ps(x);
122     x += 16;
123 
124     const __m512 vrsqrtx = _mm512_rsqrt14_ps(vx);
125     __m512 vsqrtx = _mm512_mul_ps(vrsqrtx, vx);
126     __m512 vhalfrsqrtx = _mm512_mul_ps(vrsqrtx, vhalf);
127     const __m512 vresidual = _mm512_fnmadd_ps(vsqrtx, vhalfrsqrtx, vhalf);
128     vhalfrsqrtx = _mm512_fmadd_ps(vhalfrsqrtx, vresidual, vhalfrsqrtx);
129     vsqrtx = _mm512_fmadd_ps(vsqrtx, vresidual, vsqrtx);
130     const __m512 vadjustment = _mm512_fnmadd_ps(vsqrtx, vsqrtx, vx);
131     const __m512 vy = _mm512_fmadd_ps(vhalfrsqrtx, vadjustment, vsqrtx);
132 
133     _mm512_storeu_ps(y, vy);
134     y += 16;
135   }
136   if XNN_UNLIKELY(n != 0) {
137     assert(n >= 1 * sizeof(float));
138     assert(n <= 15 * sizeof(float));
139     // Prepare mask for valid 32-bit elements (depends on n).
140     n >>= 2 /* log2(sizeof(float)) */;
141     const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
142 
143     const __m512 vx = _mm512_maskz_loadu_ps(vmask, x);
144     const __m512 vrsqrtx = _mm512_rsqrt14_ps(vx);
145     __m512 vsqrtx = _mm512_mul_ps(vrsqrtx, vx);
146     __m512 vhalfrsqrtx = _mm512_mul_ps(vrsqrtx, vhalf);
147     const __m512 vresidual = _mm512_fnmadd_ps(vsqrtx, vhalfrsqrtx, vhalf);
148     vhalfrsqrtx = _mm512_fmadd_ps(vhalfrsqrtx, vresidual, vhalfrsqrtx);
149     vsqrtx = _mm512_fmadd_ps(vsqrtx, vresidual, vsqrtx);
150     const __m512 vadjustment = _mm512_fnmadd_ps(vsqrtx, vsqrtx, vx);
151     const __m512 vy = _mm512_fmadd_ps(vhalfrsqrtx, vadjustment, vsqrtx);
152 
153     _mm512_mask_storeu_ps(y, vmask, vy);
154   }
155 }
156