1 // Auto-generated file. Do not edit!
2 // Template: src/f32-vsigmoid/avx-rr2-p5.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/vunary.h>
16
17
xnn_f32_vsigmoid_ukernel__avx_rr2_p5_nr2_x56(size_t n,const float * x,float * y,const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_vsigmoid_ukernel__avx_rr2_p5_nr2_x56(
19 size_t n,
20 const float* x,
21 float* y,
22 const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
23 {
24 assert(n % sizeof(float) == 0);
25
26 const __m256 vsign_mask = _mm256_load_ps(params->avx_rr2_p5.sign_mask);
27 const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_p5.magic_bias);
28 const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_p5.log2e);
29 const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_hi);
30 const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_lo);
31 const __m256 vc5 = _mm256_load_ps(params->avx_rr2_p5.c5);
32 const __m256 vc4 = _mm256_load_ps(params->avx_rr2_p5.c4);
33 const __m256 vc3 = _mm256_load_ps(params->avx_rr2_p5.c3);
34 const __m256 vc2 = _mm256_load_ps(params->avx_rr2_p5.c2);
35 const __m256 vc1 = _mm256_load_ps(params->avx_rr2_p5.c1);
36 const __m256 vone = _mm256_load_ps(params->avx_rr2_p5.one);
37 const __m256 vtwo = _mm256_load_ps(params->avx_rr2_p5.two);
38 const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx_rr2_p5.denorm_cutoff);
39
40 for (; n >= 56 * sizeof(float); n -= 56 * sizeof(float)) {
41 const __m256 vx0 = _mm256_loadu_ps(x);
42 const __m256 vx1 = _mm256_loadu_ps(x + 8);
43 const __m256 vx2 = _mm256_loadu_ps(x + 16);
44 const __m256 vx3 = _mm256_loadu_ps(x + 24);
45 const __m256 vx4 = _mm256_loadu_ps(x + 32);
46 const __m256 vx5 = _mm256_loadu_ps(x + 40);
47 const __m256 vx6 = _mm256_loadu_ps(x + 48);
48 x += 56;
49
50 const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
51 const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
52 const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
53 const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
54 const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
55 const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
56 const __m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
57
58 __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
59 __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
60 __m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
61 __m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias);
62 __m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias);
63 __m256 vn5 = _mm256_add_ps(_mm256_mul_ps(vz5, vlog2e), vmagic_bias);
64 __m256 vn6 = _mm256_add_ps(_mm256_mul_ps(vz6, vlog2e), vmagic_bias);
65
66 const __m128 vs_lo0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23));
67 const __m128 vs_hi0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 23));
68 const __m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo0), vs_hi0, 1);
69 const __m128 vs_lo1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23));
70 const __m128 vs_hi1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 23));
71 const __m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo1), vs_hi1, 1);
72 const __m128 vs_lo2 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23));
73 const __m128 vs_hi2 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn2, 1)), 23));
74 const __m256 vs2 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo2), vs_hi2, 1);
75 const __m128 vs_lo3 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23));
76 const __m128 vs_hi3 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn3, 1)), 23));
77 const __m256 vs3 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo3), vs_hi3, 1);
78 const __m128 vs_lo4 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23));
79 const __m128 vs_hi4 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn4, 1)), 23));
80 const __m256 vs4 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo4), vs_hi4, 1);
81 const __m128 vs_lo5 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23));
82 const __m128 vs_hi5 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn5, 1)), 23));
83 const __m256 vs5 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo5), vs_hi5, 1);
84 const __m128 vs_lo6 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23));
85 const __m128 vs_hi6 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn6, 1)), 23));
86 const __m256 vs6 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo6), vs_hi6, 1);
87
88 vn0 = _mm256_sub_ps(vn0, vmagic_bias);
89 vn1 = _mm256_sub_ps(vn1, vmagic_bias);
90 vn2 = _mm256_sub_ps(vn2, vmagic_bias);
91 vn3 = _mm256_sub_ps(vn3, vmagic_bias);
92 vn4 = _mm256_sub_ps(vn4, vmagic_bias);
93 vn5 = _mm256_sub_ps(vn5, vmagic_bias);
94 vn6 = _mm256_sub_ps(vn6, vmagic_bias);
95
96 __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
97 __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
98 __m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
99 __m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_hi), vz3);
100 __m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_hi), vz4);
101 __m256 vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2_hi), vz5);
102 __m256 vt6 = _mm256_add_ps(_mm256_mul_ps(vn6, vminus_ln2_hi), vz6);
103
104 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
105 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
106 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
107 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_lo), vt3);
108 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_lo), vt4);
109 vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2_lo), vt5);
110 vt6 = _mm256_add_ps(_mm256_mul_ps(vn6, vminus_ln2_lo), vt6);
111
112 __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc5, vt0), vc4);
113 __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc5, vt1), vc4);
114 __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc5, vt2), vc4);
115 __m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc5, vt3), vc4);
116 __m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc5, vt4), vc4);
117 __m256 vp5 = _mm256_add_ps(_mm256_mul_ps(vc5, vt5), vc4);
118 __m256 vp6 = _mm256_add_ps(_mm256_mul_ps(vc5, vt6), vc4);
119
120 vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3);
121 vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3);
122 vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc3);
123 vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc3);
124 vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc3);
125 vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc3);
126 vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc3);
127
128 vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
129 vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
130 vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc2);
131 vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc2);
132 vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc2);
133 vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc2);
134 vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc2);
135
136 vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc1);
137 vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc1);
138 vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc1);
139 vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc1);
140 vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc1);
141 vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc1);
142 vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc1);
143
144 vt0 = _mm256_mul_ps(vt0, vs0);
145 vt1 = _mm256_mul_ps(vt1, vs1);
146 vt2 = _mm256_mul_ps(vt2, vs2);
147 vt3 = _mm256_mul_ps(vt3, vs3);
148 vt4 = _mm256_mul_ps(vt4, vs4);
149 vt5 = _mm256_mul_ps(vt5, vs5);
150 vt6 = _mm256_mul_ps(vt6, vs6);
151
152 const __m256 ve0 = _mm256_add_ps(_mm256_mul_ps(vt0, vp0), vs0);
153 const __m256 ve1 = _mm256_add_ps(_mm256_mul_ps(vt1, vp1), vs1);
154 const __m256 ve2 = _mm256_add_ps(_mm256_mul_ps(vt2, vp2), vs2);
155 const __m256 ve3 = _mm256_add_ps(_mm256_mul_ps(vt3, vp3), vs3);
156 const __m256 ve4 = _mm256_add_ps(_mm256_mul_ps(vt4, vp4), vs4);
157 const __m256 ve5 = _mm256_add_ps(_mm256_mul_ps(vt5, vp5), vs5);
158 const __m256 ve6 = _mm256_add_ps(_mm256_mul_ps(vt6, vp6), vs6);
159
160 const __m256 vd0 = _mm256_add_ps(ve0, vone);
161 const __m256 vd1 = _mm256_add_ps(ve1, vone);
162 const __m256 vd2 = _mm256_add_ps(ve2, vone);
163 const __m256 vd3 = _mm256_add_ps(ve3, vone);
164 const __m256 vd4 = _mm256_add_ps(ve4, vone);
165 const __m256 vd5 = _mm256_add_ps(ve5, vone);
166 const __m256 vd6 = _mm256_add_ps(ve6, vone);
167
168 __m256 vr0 = _mm256_rcp_ps(vd0);
169 __m256 vr1 = _mm256_rcp_ps(vd1);
170 __m256 vr2 = _mm256_rcp_ps(vd2);
171 __m256 vr3 = _mm256_rcp_ps(vd3);
172 __m256 vr4 = _mm256_rcp_ps(vd4);
173 __m256 vr5 = _mm256_rcp_ps(vd5);
174 __m256 vr6 = _mm256_rcp_ps(vd6);
175
176 vr0 = _mm256_mul_ps(vr0, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr0, vd0)));
177 vr0 = _mm256_mul_ps(vr0, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr0, vd0)));
178 vr1 = _mm256_mul_ps(vr1, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr1, vd1)));
179 vr1 = _mm256_mul_ps(vr1, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr1, vd1)));
180 vr2 = _mm256_mul_ps(vr2, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr2, vd2)));
181 vr2 = _mm256_mul_ps(vr2, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr2, vd2)));
182 vr3 = _mm256_mul_ps(vr3, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr3, vd3)));
183 vr3 = _mm256_mul_ps(vr3, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr3, vd3)));
184 vr4 = _mm256_mul_ps(vr4, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr4, vd4)));
185 vr4 = _mm256_mul_ps(vr4, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr4, vd4)));
186 vr5 = _mm256_mul_ps(vr5, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr5, vd5)));
187 vr5 = _mm256_mul_ps(vr5, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr5, vd5)));
188 vr6 = _mm256_mul_ps(vr6, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr6, vd6)));
189 vr6 = _mm256_mul_ps(vr6, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr6, vd6)));
190
191 __m256 vf0 = _mm256_mul_ps(ve0, vr0);
192 __m256 vf1 = _mm256_mul_ps(ve1, vr1);
193 __m256 vf2 = _mm256_mul_ps(ve2, vr2);
194 __m256 vf3 = _mm256_mul_ps(ve3, vr3);
195 __m256 vf4 = _mm256_mul_ps(ve4, vr4);
196 __m256 vf5 = _mm256_mul_ps(ve5, vr5);
197 __m256 vf6 = _mm256_mul_ps(ve6, vr6);
198
199 vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
200 vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
201 vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
202 vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
203 vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
204 vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vz5, vdenorm_cutoff, _CMP_LT_OS), vf5);
205 vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vz6, vdenorm_cutoff, _CMP_LT_OS), vf6);
206
207 vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
208 vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
209 vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
210 vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
211 vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
212 vf5 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf5), vf5, vx5);
213 vf6 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf6), vf6, vx6);
214
215 _mm256_storeu_ps(y, vf0);
216 _mm256_storeu_ps(y + 8, vf1);
217 _mm256_storeu_ps(y + 16, vf2);
218 _mm256_storeu_ps(y + 24, vf3);
219 _mm256_storeu_ps(y + 32, vf4);
220 _mm256_storeu_ps(y + 40, vf5);
221 _mm256_storeu_ps(y + 48, vf6);
222 y += 56;
223 }
224 for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
225 const __m256 vx = _mm256_loadu_ps(x);
226 x += 8;
227
228 const __m256 vz = _mm256_or_ps(vx, vsign_mask);
229
230 __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
231
232 const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
233 const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
234 const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
235
236 vn = _mm256_sub_ps(vn, vmagic_bias);
237
238 __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
239 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
240
241 __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
242 vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
243 vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
244 vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
245
246 vt = _mm256_mul_ps(vt, vs);
247 const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
248
249 const __m256 vd = _mm256_add_ps(ve, vone);
250 __m256 vr = _mm256_rcp_ps(vd);
251 vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
252 vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
253 __m256 vf = _mm256_mul_ps(ve, vr);
254
255 vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
256 vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
257
258 _mm256_storeu_ps(y, vf);
259 y += 8;
260 }
261 if XNN_UNLIKELY(n != 0) {
262 assert(n >= 1 * sizeof(float));
263 assert(n <= 7 * sizeof(float));
264 const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_rr2_p5.mask_table[7] - n));
265
266 const __m256 vx = _mm256_maskload_ps(x, vmask);
267
268 const __m256 vz = _mm256_or_ps(vx, vsign_mask);
269
270 __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
271 const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
272 const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
273 const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
274
275 vn = _mm256_sub_ps(vn, vmagic_bias);
276
277 __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
278 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
279
280 __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
281 vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
282 vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
283 vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
284
285 vt = _mm256_mul_ps(vt, vs);
286 const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
287
288 const __m256 vd = _mm256_add_ps(ve, vone);
289 __m256 vr = _mm256_rcp_ps(vd);
290 vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
291 vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
292 __m256 vf = _mm256_mul_ps(ve, vr);
293
294 vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
295 vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
296
297 __m128 vf_lo = _mm256_castps256_ps128(vf);
298 if (n & (4 * sizeof(float))) {
299 _mm_storeu_ps(y, vf_lo);
300 vf_lo = _mm256_extractf128_ps(vf, 1);
301 y += 4;
302 }
303 if (n & (2 * sizeof(float))) {
304 _mm_storel_pi((__m64*) y, vf_lo);
305 vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
306 y += 2;
307 }
308 if (n & (1 * sizeof(float))) {
309 _mm_store_ss(y, vf_lo);
310 }
311 }
312 }
313