• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-vsigmoid/avx-rr2-p5.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/vunary.h>
16 
17 
xnn_f32_vsigmoid_ukernel__avx_rr2_p5_nr2_x16(size_t n,const float * x,float * y,const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_vsigmoid_ukernel__avx_rr2_p5_nr2_x16(
19     size_t n,
20     const float* x,
21     float* y,
22     const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
23 {
24   assert(n % sizeof(float) == 0);
25 
26   const __m256 vsign_mask = _mm256_load_ps(params->avx_rr2_p5.sign_mask);
27   const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_p5.magic_bias);
28   const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_p5.log2e);
29   const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_hi);
30   const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_lo);
31   const __m256 vc5 = _mm256_load_ps(params->avx_rr2_p5.c5);
32   const __m256 vc4 = _mm256_load_ps(params->avx_rr2_p5.c4);
33   const __m256 vc3 = _mm256_load_ps(params->avx_rr2_p5.c3);
34   const __m256 vc2 = _mm256_load_ps(params->avx_rr2_p5.c2);
35   const __m256 vc1 = _mm256_load_ps(params->avx_rr2_p5.c1);
36   const __m256 vone = _mm256_load_ps(params->avx_rr2_p5.one);
37   const __m256 vtwo = _mm256_load_ps(params->avx_rr2_p5.two);
38   const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx_rr2_p5.denorm_cutoff);
39 
40   for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
41     const __m256 vx0 = _mm256_loadu_ps(x);
42     const __m256 vx1 = _mm256_loadu_ps(x + 8);
43     x += 16;
44 
45     const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
46     const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
47 
48     __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
49     __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
50 
51     const __m128 vs_lo0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23));
52     const __m128 vs_hi0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 23));
53     const __m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo0), vs_hi0, 1);
54     const __m128 vs_lo1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23));
55     const __m128 vs_hi1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 23));
56     const __m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo1), vs_hi1, 1);
57 
58     vn0 = _mm256_sub_ps(vn0, vmagic_bias);
59     vn1 = _mm256_sub_ps(vn1, vmagic_bias);
60 
61     __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
62     __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
63 
64     vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
65     vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
66 
67     __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc5, vt0), vc4);
68     __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc5, vt1), vc4);
69 
70     vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3);
71     vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3);
72 
73     vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
74     vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
75 
76     vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc1);
77     vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc1);
78 
79     vt0 = _mm256_mul_ps(vt0, vs0);
80     vt1 = _mm256_mul_ps(vt1, vs1);
81 
82     const __m256 ve0 = _mm256_add_ps(_mm256_mul_ps(vt0, vp0), vs0);
83     const __m256 ve1 = _mm256_add_ps(_mm256_mul_ps(vt1, vp1), vs1);
84 
85     const __m256 vd0 = _mm256_add_ps(ve0, vone);
86     const __m256 vd1 = _mm256_add_ps(ve1, vone);
87 
88     __m256 vr0 = _mm256_rcp_ps(vd0);
89     __m256 vr1 = _mm256_rcp_ps(vd1);
90 
91     vr0 = _mm256_mul_ps(vr0, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr0, vd0)));
92     vr0 = _mm256_mul_ps(vr0, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr0, vd0)));
93     vr1 = _mm256_mul_ps(vr1, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr1, vd1)));
94     vr1 = _mm256_mul_ps(vr1, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr1, vd1)));
95 
96     __m256 vf0 = _mm256_mul_ps(ve0, vr0);
97     __m256 vf1 = _mm256_mul_ps(ve1, vr1);
98 
99     vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
100     vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
101 
102     vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
103     vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
104 
105     _mm256_storeu_ps(y, vf0);
106     _mm256_storeu_ps(y + 8, vf1);
107     y += 16;
108   }
109   for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
110     const __m256 vx = _mm256_loadu_ps(x);
111     x += 8;
112 
113     const __m256 vz = _mm256_or_ps(vx, vsign_mask);
114 
115     __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
116 
117     const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
118     const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
119     const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
120 
121     vn = _mm256_sub_ps(vn, vmagic_bias);
122 
123     __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
124     vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
125 
126     __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
127     vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
128     vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
129     vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
130 
131     vt = _mm256_mul_ps(vt, vs);
132     const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
133 
134     const __m256 vd = _mm256_add_ps(ve, vone);
135     __m256 vr = _mm256_rcp_ps(vd);
136     vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
137     vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
138     __m256 vf = _mm256_mul_ps(ve, vr);
139 
140     vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
141     vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
142 
143     _mm256_storeu_ps(y, vf);
144     y += 8;
145   }
146   if XNN_UNLIKELY(n != 0) {
147     assert(n >= 1 * sizeof(float));
148     assert(n <= 7 * sizeof(float));
149     const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &params->avx_rr2_p5.mask_table[7] - n));
150 
151     const __m256 vx = _mm256_maskload_ps(x, vmask);
152 
153     const __m256 vz = _mm256_or_ps(vx, vsign_mask);
154 
155     __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
156     const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
157     const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
158     const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
159 
160     vn = _mm256_sub_ps(vn, vmagic_bias);
161 
162     __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
163     vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
164 
165     __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
166     vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
167     vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
168     vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
169 
170     vt = _mm256_mul_ps(vt, vs);
171     const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
172 
173     const __m256 vd = _mm256_add_ps(ve, vone);
174     __m256 vr = _mm256_rcp_ps(vd);
175     vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
176     vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
177     __m256 vf = _mm256_mul_ps(ve, vr);
178 
179     vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
180     vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
181 
182     __m128 vf_lo = _mm256_castps256_ps128(vf);
183     if (n & (4 * sizeof(float))) {
184       _mm_storeu_ps(y, vf_lo);
185       vf_lo = _mm256_extractf128_ps(vf, 1);
186       y += 4;
187     }
188     if (n & (2 * sizeof(float))) {
189       _mm_storel_pi((__m64*) y, vf_lo);
190       vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
191       y += 2;
192     }
193     if (n & (1 * sizeof(float))) {
194       _mm_store_ss(y, vf_lo);
195     }
196   }
197 }
198