• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-velu/sse-rr2-p6.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <smmintrin.h>
13 
14 #include <xnnpack/vunary.h>
15 #include <xnnpack/common.h>
16 
17 
xnn_f32_velu_ukernel__sse41_rr2_p6_x24(size_t n,const float * x,float * y,const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_velu_ukernel__sse41_rr2_p6_x24(
19     size_t n,
20     const float* x,
21     float* y,
22     const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23 {
24   assert(n != 0);
25   assert(n % sizeof(float) == 0);
26   assert(x != NULL);
27   assert(y != NULL);
28 
29   const __m128 vprescale = _mm_load_ps(params->sse2_rr2_p6.prescale);
30   const __m128 valpha = _mm_load_ps(params->sse2_rr2_p6.alpha);
31   const __m128 vbeta = _mm_load_ps(params->sse2_rr2_p6.beta);
32   const __m128 vsat_cutoff = _mm_load_ps(params->sse2_rr2_p6.sat_cutoff);
33   const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p6.magic_bias);
34   const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p6.log2e);
35   const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p6.minus_ln2_hi);
36   const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p6.minus_ln2_lo);
37   const __m128 vc6 = _mm_load_ps(params->sse2_rr2_p6.c6);
38   const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p6.c5);
39   const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p6.c4);
40   const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p6.c3);
41   const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p6.c2);
42   const __m128 vone = _mm_load_ps(params->sse2_rr2_p6.one);
43 
44   for (; n >= 24 * sizeof(float); n -= 24 * sizeof(float)) {
45     __m128 vx0123 = _mm_loadu_ps(x);
46     __m128 vx4567 = _mm_loadu_ps(x + 4);
47     __m128 vx89AB = _mm_loadu_ps(x + 8);
48     __m128 vxCDEF = _mm_loadu_ps(x + 12);
49     __m128 vxGHIJ = _mm_loadu_ps(x + 16);
50     __m128 vxKLMN = _mm_loadu_ps(x + 20);
51     x += 24;
52 
53     const __m128 vz0123 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx0123, vprescale));
54     const __m128 vz4567 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx4567, vprescale));
55     const __m128 vz89AB = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx89AB, vprescale));
56     const __m128 vzCDEF = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxCDEF, vprescale));
57     const __m128 vzGHIJ = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxGHIJ, vprescale));
58     const __m128 vzKLMN = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxKLMN, vprescale));
59 
60     __m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
61     __m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
62     __m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
63     __m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
64     __m128 vnGHIJ = _mm_add_ps(_mm_mul_ps(vzGHIJ, vlog2e), vmagic_bias);
65     __m128 vnKLMN = _mm_add_ps(_mm_mul_ps(vzKLMN, vlog2e), vmagic_bias);
66 
67     __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
68     __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
69     __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
70     __m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
71     __m128 vsGHIJ = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnGHIJ), 23));
72     __m128 vsKLMN = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnKLMN), 23));
73 
74     vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
75     vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
76     vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
77     vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
78     vnGHIJ = _mm_sub_ps(vnGHIJ, vmagic_bias);
79     vnKLMN = _mm_sub_ps(vnKLMN, vmagic_bias);
80 
81     __m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
82     __m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
83     __m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vz89AB);
84     __m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_hi), vzCDEF);
85     __m128 vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_hi), vzGHIJ);
86     __m128 vtKLMN = _mm_add_ps(_mm_mul_ps(vnKLMN, vminus_ln2_hi), vzKLMN);
87 
88     vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
89     vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
90     vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
91     vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_lo), vtCDEF);
92     vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_lo), vtGHIJ);
93     vtKLMN = _mm_add_ps(_mm_mul_ps(vnKLMN, vminus_ln2_lo), vtKLMN);
94 
95     __m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
96     __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
97     __m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
98     __m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc6, vtCDEF), vc5);
99     __m128 vpGHIJ = _mm_add_ps(_mm_mul_ps(vc6, vtGHIJ), vc5);
100     __m128 vpKLMN = _mm_add_ps(_mm_mul_ps(vc6, vtKLMN), vc5);
101 
102     vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
103     vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
104     vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
105     vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc4);
106     vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc4);
107     vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vc4);
108 
109     vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
110     vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
111     vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
112     vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
113     vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc3);
114     vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vc3);
115 
116     vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
117     vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
118     vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
119     vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
120     vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc2);
121     vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vc2);
122 
123     vp0123 = _mm_mul_ps(vp0123, vt0123);
124     vp4567 = _mm_mul_ps(vp4567, vt4567);
125     vp89AB = _mm_mul_ps(vp89AB, vt89AB);
126     vpCDEF = _mm_mul_ps(vpCDEF, vtCDEF);
127     vpGHIJ = _mm_mul_ps(vpGHIJ, vtGHIJ);
128     vpKLMN = _mm_mul_ps(vpKLMN, vtKLMN);
129 
130     vt0123 = _mm_mul_ps(vt0123, vs0123);
131     vs0123 = _mm_sub_ps(vs0123, vone);
132     vt4567 = _mm_mul_ps(vt4567, vs4567);
133     vs4567 = _mm_sub_ps(vs4567, vone);
134     vt89AB = _mm_mul_ps(vt89AB, vs89AB);
135     vs89AB = _mm_sub_ps(vs89AB, vone);
136     vtCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
137     vsCDEF = _mm_sub_ps(vsCDEF, vone);
138     vtGHIJ = _mm_mul_ps(vtGHIJ, vsGHIJ);
139     vsGHIJ = _mm_sub_ps(vsGHIJ, vone);
140     vtKLMN = _mm_mul_ps(vtKLMN, vsKLMN);
141     vsKLMN = _mm_sub_ps(vsKLMN, vone);
142 
143     vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vt0123);
144     vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567);
145     vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vt89AB);
146     vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vtCDEF);
147     vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vtGHIJ);
148     vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vtKLMN);
149 
150     const __m128 ve0123 = _mm_mul_ps(_mm_add_ps(vp0123, vs0123), valpha);
151     const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha);
152     const __m128 ve89AB = _mm_mul_ps(_mm_add_ps(vp89AB, vs89AB), valpha);
153     const __m128 veCDEF = _mm_mul_ps(_mm_add_ps(vpCDEF, vsCDEF), valpha);
154     const __m128 veGHIJ = _mm_mul_ps(_mm_add_ps(vpGHIJ, vsGHIJ), valpha);
155     const __m128 veKLMN = _mm_mul_ps(_mm_add_ps(vpKLMN, vsKLMN), valpha);
156 
157     vx0123 = _mm_mul_ps(vx0123, vbeta);
158     vx4567 = _mm_mul_ps(vx4567, vbeta);
159     vx89AB = _mm_mul_ps(vx89AB, vbeta);
160     vxCDEF = _mm_mul_ps(vxCDEF, vbeta);
161     vxGHIJ = _mm_mul_ps(vxGHIJ, vbeta);
162     vxKLMN = _mm_mul_ps(vxKLMN, vbeta);
163 
164     const __m128 vy0123 = _mm_blendv_ps(vx0123, ve0123, vx0123);
165     const __m128 vy4567 = _mm_blendv_ps(vx4567, ve4567, vx4567);
166     const __m128 vy89AB = _mm_blendv_ps(vx89AB, ve89AB, vx89AB);
167     const __m128 vyCDEF = _mm_blendv_ps(vxCDEF, veCDEF, vxCDEF);
168     const __m128 vyGHIJ = _mm_blendv_ps(vxGHIJ, veGHIJ, vxGHIJ);
169     const __m128 vyKLMN = _mm_blendv_ps(vxKLMN, veKLMN, vxKLMN);
170 
171     _mm_storeu_ps(y, vy0123);
172     _mm_storeu_ps(y + 4, vy4567);
173     _mm_storeu_ps(y + 8, vy89AB);
174     _mm_storeu_ps(y + 12, vyCDEF);
175     _mm_storeu_ps(y + 16, vyGHIJ);
176     _mm_storeu_ps(y + 20, vyKLMN);
177     y += 24;
178   }
179   for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
180     __m128 vx = _mm_loadu_ps(x);
181     x += 4;
182 
183     const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
184 
185     __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
186     __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
187     vn = _mm_sub_ps(vn, vmagic_bias);
188 
189     __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
190     vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
191 
192     __m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
193     vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
194     vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
195     vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
196     vp = _mm_mul_ps(vp, vt);
197 
198     vt = _mm_mul_ps(vt, vs);
199     vs = _mm_sub_ps(vs, vone);
200     vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
201     const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
202 
203     vx = _mm_mul_ps(vx, vbeta);
204     const __m128 vy = _mm_blendv_ps(vx, ve, vx);
205 
206     _mm_storeu_ps(y, vy);
207     y += 4;
208   }
209   if XNN_UNLIKELY(n != 0) {
210     __m128 vx = _mm_loadu_ps(x);
211 
212     const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
213 
214     __m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
215     __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
216     vn = _mm_sub_ps(vn, vmagic_bias);
217 
218     __m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
219     vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
220 
221     __m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
222     vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
223     vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
224     vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
225     vp = _mm_mul_ps(vp, vt);
226 
227     vt = _mm_mul_ps(vt, vs);
228     vs = _mm_sub_ps(vs, vone);
229     vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
230     const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
231 
232     vx = _mm_mul_ps(vx, vbeta);
233     __m128 vy = _mm_blendv_ps(vx, ve, vx);
234 
235     if (n & (2 * sizeof(float))) {
236       _mm_storel_pi((__m64*) y, vy);
237       vy = _mm_movehl_ps(vy, vy);
238       y += 2;
239     }
240     if (n & (1 * sizeof(float))) {
241       _mm_store_ss(y, vy);
242     }
243   }
244 }
245