1 // Auto-generated file. Do not edit!
2 // Template: src/f32-vsqrt/fma3-nr1fma1adj.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/vunary.h>
16
17
18 static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
19
xnn_f32_vsqrt_ukernel__fma3_nr1fma1adj_x32(size_t n,const float * x,float * y,const union xnn_f32_sqrt_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f32_vsqrt_ukernel__fma3_nr1fma1adj_x32(
21 size_t n,
22 const float* x,
23 float* y,
24 const union xnn_f32_sqrt_params params[restrict XNN_MIN_ELEMENTS(1)])
25 {
26 assert(n != 0);
27 assert(n % sizeof(float) == 0);
28
29 const __m256 vhalf = _mm256_broadcast_ss(¶ms->fma.half);
30 for (; n >= 32 * sizeof(float); n -= 32 * sizeof(float)) {
31 const __m256 vx0 = _mm256_loadu_ps(x);
32 const __m256 vx1 = _mm256_loadu_ps(x + 8);
33 const __m256 vx2 = _mm256_loadu_ps(x + 16);
34 const __m256 vx3 = _mm256_loadu_ps(x + 24);
35 x += 32;
36
37 const __m256 vrsqrtx0 = _mm256_rsqrt_ps(vx0);
38 const __m256 vrsqrtx1 = _mm256_rsqrt_ps(vx1);
39 const __m256 vrsqrtx2 = _mm256_rsqrt_ps(vx2);
40 const __m256 vrsqrtx3 = _mm256_rsqrt_ps(vx3);
41
42 __m256 vsqrtx0 = _mm256_mul_ps(vrsqrtx0, vx0);
43 __m256 vhalfrsqrtx0 = _mm256_mul_ps(vrsqrtx0, vhalf);
44 __m256 vsqrtx1 = _mm256_mul_ps(vrsqrtx1, vx1);
45 __m256 vhalfrsqrtx1 = _mm256_mul_ps(vrsqrtx1, vhalf);
46 __m256 vsqrtx2 = _mm256_mul_ps(vrsqrtx2, vx2);
47 __m256 vhalfrsqrtx2 = _mm256_mul_ps(vrsqrtx2, vhalf);
48 __m256 vsqrtx3 = _mm256_mul_ps(vrsqrtx3, vx3);
49 __m256 vhalfrsqrtx3 = _mm256_mul_ps(vrsqrtx3, vhalf);
50
51 const __m256 vresidual0 = _mm256_fnmadd_ps(vsqrtx0, vhalfrsqrtx0, vhalf);
52 const __m256 vresidual1 = _mm256_fnmadd_ps(vsqrtx1, vhalfrsqrtx1, vhalf);
53 const __m256 vresidual2 = _mm256_fnmadd_ps(vsqrtx2, vhalfrsqrtx2, vhalf);
54 const __m256 vresidual3 = _mm256_fnmadd_ps(vsqrtx3, vhalfrsqrtx3, vhalf);
55
56 vhalfrsqrtx0 = _mm256_fmadd_ps(vhalfrsqrtx0, vresidual0, vhalfrsqrtx0);
57 vsqrtx0 = _mm256_fmadd_ps(vsqrtx0, vresidual0, vsqrtx0);
58 vhalfrsqrtx1 = _mm256_fmadd_ps(vhalfrsqrtx1, vresidual1, vhalfrsqrtx1);
59 vsqrtx1 = _mm256_fmadd_ps(vsqrtx1, vresidual1, vsqrtx1);
60 vhalfrsqrtx2 = _mm256_fmadd_ps(vhalfrsqrtx2, vresidual2, vhalfrsqrtx2);
61 vsqrtx2 = _mm256_fmadd_ps(vsqrtx2, vresidual2, vsqrtx2);
62 vhalfrsqrtx3 = _mm256_fmadd_ps(vhalfrsqrtx3, vresidual3, vhalfrsqrtx3);
63 vsqrtx3 = _mm256_fmadd_ps(vsqrtx3, vresidual3, vsqrtx3);
64
65 const __m256 vadjustment0 = _mm256_fnmadd_ps(vsqrtx0, vsqrtx0, vx0);
66 const __m256 vadjustment1 = _mm256_fnmadd_ps(vsqrtx1, vsqrtx1, vx1);
67 const __m256 vadjustment2 = _mm256_fnmadd_ps(vsqrtx2, vsqrtx2, vx2);
68 const __m256 vadjustment3 = _mm256_fnmadd_ps(vsqrtx3, vsqrtx3, vx3);
69
70 const __m256 vy0 = _mm256_fmadd_ps(vhalfrsqrtx0, vadjustment0, vsqrtx0);
71 const __m256 vy1 = _mm256_fmadd_ps(vhalfrsqrtx1, vadjustment1, vsqrtx1);
72 const __m256 vy2 = _mm256_fmadd_ps(vhalfrsqrtx2, vadjustment2, vsqrtx2);
73 const __m256 vy3 = _mm256_fmadd_ps(vhalfrsqrtx3, vadjustment3, vsqrtx3);
74
75 _mm256_storeu_ps(y, vy0);
76 _mm256_storeu_ps(y + 8, vy1);
77 _mm256_storeu_ps(y + 16, vy2);
78 _mm256_storeu_ps(y + 24, vy3);
79 y += 32;
80 }
81 for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
82 const __m256 vx = _mm256_loadu_ps(x);
83 x += 8;
84
85 const __m256 vrsqrtx = _mm256_rsqrt_ps(vx);
86 __m256 vsqrtx = _mm256_mul_ps(vrsqrtx, vx);
87 __m256 vhalfrsqrtx = _mm256_mul_ps(vrsqrtx, vhalf);
88 const __m256 vresidual = _mm256_fnmadd_ps(vsqrtx, vhalfrsqrtx, vhalf);
89 vhalfrsqrtx = _mm256_fmadd_ps(vhalfrsqrtx, vresidual, vhalfrsqrtx);
90 vsqrtx = _mm256_fmadd_ps(vsqrtx, vresidual, vsqrtx);
91 const __m256 vadjustment = _mm256_fnmadd_ps(vsqrtx, vsqrtx, vx);
92 const __m256 vy = _mm256_fmadd_ps(vhalfrsqrtx, vadjustment, vsqrtx);
93
94 _mm256_storeu_ps(y, vy);
95 y += 8;
96 }
97 if XNN_UNLIKELY(n != 0) {
98 assert(n >= 1 * sizeof(float));
99 assert(n <= 7 * sizeof(float));
100 __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
101
102 const __m256 vx = _mm256_maskload_ps(x, vmask);
103
104 const __m256 vrsqrtx = _mm256_rsqrt_ps(vx);
105 __m256 vsqrtx = _mm256_mul_ps(vrsqrtx, vx);
106 __m256 vhalfrsqrtx = _mm256_mul_ps(vrsqrtx, vhalf);
107 const __m256 vresidual = _mm256_fnmadd_ps(vsqrtx, vhalfrsqrtx, vhalf);
108 vhalfrsqrtx = _mm256_fmadd_ps(vhalfrsqrtx, vresidual, vhalfrsqrtx);
109 vsqrtx = _mm256_fmadd_ps(vsqrtx, vresidual, vsqrtx);
110 const __m256 vadjustment = _mm256_fnmadd_ps(vsqrtx, vsqrtx, vx);
111 const __m256 vy = _mm256_fmadd_ps(vhalfrsqrtx, vadjustment, vsqrtx);
112
113 // _mm256_maskstore_ps(y, vmask, vy) could be used here, but triggers msan failures (probably an msan bug).
114 __m128 vy_lo = _mm256_castps256_ps128(vy);
115 if (n & (4 * sizeof(float))) {
116 _mm_storeu_ps(y, vy_lo);
117 vy_lo = _mm256_extractf128_ps(vy, 1);
118 y += 4;
119 }
120 if (n & (2 * sizeof(float))) {
121 _mm_storel_pi((__m64*) y, vy_lo);
122 vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
123 y += 2;
124 }
125 if (n & (1 * sizeof(float))) {
126 _mm_store_ss(y, vy_lo);
127 }
128 }
129 }
130