1 // Auto-generated file. Do not edit!
2 // Template: src/f32-vsigmoid/neon-p5.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/vunary.h>
16
17
xnn_f32_vsigmoid_ukernel__neonfma_rr1_p5_nr2fma_x12(size_t n,const float * x,float * y,const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_vsigmoid_ukernel__neonfma_rr1_p5_nr2fma_x12(
19 size_t n,
20 const float* x,
21 float* y,
22 const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23 {
24 assert(n % sizeof(float) == 0);
25
26 const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
27 const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_log2e);
28 const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.ln2);
29 const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
30 const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
31 const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
32 const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
33 const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
34 const float32x4_t vone = vmovq_n_f32(1.0f);
35 const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
36
37 for (; n >= 12 * sizeof(float); n -= 12 * sizeof(float)) {
38 const float32x4_t vx0123 = vld1q_f32(x); x += 4;
39 const float32x4_t vx4567 = vld1q_f32(x); x += 4;
40 const float32x4_t vx89AB = vld1q_f32(x); x += 4;
41
42 const float32x4_t vz0123 = vabsq_f32(vx0123);
43 const float32x4_t vz4567 = vabsq_f32(vx4567);
44 const float32x4_t vz89AB = vabsq_f32(vx89AB);
45
46 float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
47 float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
48 float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
49
50 const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
51 const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
52 const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
53
54 vn0123 = vsubq_f32(vn0123, vmagic_bias);
55 vn4567 = vsubq_f32(vn4567, vmagic_bias);
56 vn89AB = vsubq_f32(vn89AB, vmagic_bias);
57
58 float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
59 float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
60 float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
61
62 float32x4_t vp0123 = vfmaq_f32(vc4, vc5, vt0123);
63 float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567);
64 float32x4_t vp89AB = vfmaq_f32(vc4, vc5, vt89AB);
65
66 vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
67 vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
68 vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
69
70 vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
71 vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
72 vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
73
74 vp0123 = vfmaq_f32(vc1, vp0123, vt0123);
75 vp4567 = vfmaq_f32(vc1, vp4567, vt4567);
76 vp89AB = vfmaq_f32(vc1, vp89AB, vt89AB);
77
78 vt0123 = vmulq_f32(vt0123, vs0123);
79 vt4567 = vmulq_f32(vt4567, vs4567);
80 vt89AB = vmulq_f32(vt89AB, vs89AB);
81
82 const float32x4_t ve0123 = vfmaq_f32(vs0123, vp0123, vt0123);
83 const float32x4_t ve4567 = vfmaq_f32(vs4567, vp4567, vt4567);
84 const float32x4_t ve89AB = vfmaq_f32(vs89AB, vp89AB, vt89AB);
85
86 const float32x4_t vd0123 = vaddq_f32(ve0123, vone);
87 const float32x4_t vd4567 = vaddq_f32(ve4567, vone);
88 const float32x4_t vd89AB = vaddq_f32(ve89AB, vone);
89
90 float32x4_t vr0123 = vrecpeq_f32(vd0123);
91 float32x4_t vr4567 = vrecpeq_f32(vd4567);
92 float32x4_t vr89AB = vrecpeq_f32(vd89AB);
93
94 vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
95 vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
96 vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
97
98 vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
99 vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
100 vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
101
102 float32x4_t vf0123 = vmulq_f32(ve0123, vr0123);
103 float32x4_t vf4567 = vmulq_f32(ve4567, vr4567);
104 float32x4_t vf89AB = vmulq_f32(ve89AB, vr89AB);
105
106 vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
107 vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
108 vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
109
110 const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
111 const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
112 const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
113
114 vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
115 vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
116 vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
117
118 vst1q_f32(y, vf0123); y += 4;
119 vst1q_f32(y, vf4567); y += 4;
120 vst1q_f32(y, vf89AB); y += 4;
121 }
122 for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
123 const float32x4_t vx = vld1q_f32(x); x += 4;
124
125 const float32x4_t vz = vabsq_f32(vx);
126
127 float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
128 const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
129 vn = vsubq_f32(vn, vmagic_bias);
130 float32x4_t vt = vfmaq_f32(vz, vn, vln2);
131
132 float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
133 vp = vfmaq_f32(vc3, vp, vt);
134 vp = vfmaq_f32(vc2, vp, vt);
135 vp = vfmaq_f32(vc1, vp, vt);
136
137 vt = vmulq_f32(vt, vs);
138 const float32x4_t ve = vfmaq_f32(vs, vp, vt);
139 const float32x4_t vd = vaddq_f32(ve, vone);
140
141 float32x4_t vr = vrecpeq_f32(vd);
142 vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
143 vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
144
145 float32x4_t vf = vmulq_f32(ve, vr);
146 vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
147 const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
148 vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
149
150 vst1q_f32(y, vf); y += 4;
151 }
152 if XNN_UNLIKELY(n != 0) {
153 const float32x4_t vx = vld1q_f32(x);
154
155 const float32x4_t vz = vabsq_f32(vx);
156
157 float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
158 const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
159 vn = vsubq_f32(vn, vmagic_bias);
160 float32x4_t vt = vfmaq_f32(vz, vn, vln2);
161
162 float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
163 vp = vfmaq_f32(vc3, vp, vt);
164 vp = vfmaq_f32(vc2, vp, vt);
165 vp = vfmaq_f32(vc1, vp, vt);
166
167 vt = vmulq_f32(vt, vs);
168 const float32x4_t ve = vfmaq_f32(vs, vp, vt);
169 const float32x4_t vd = vaddq_f32(ve, vone);
170
171 float32x4_t vr = vrecpeq_f32(vd);
172 vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
173 vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
174
175 float32x4_t vf = vmulq_f32(ve, vr);
176 vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
177 const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
178 vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
179
180 float32x2_t vf_lo = vget_low_f32(vf);
181 if (n & (2 * sizeof(float))) {
182 vst1_f32(y, vf_lo); y += 2;
183 vf_lo = vget_high_f32(vf);
184 }
185 if (n & (1 * sizeof(float))) {
186 vst1_lane_f32(y, vf_lo, 0);
187 }
188 }
189 }
190