1 // Auto-generated file. Do not edit!
2 // Template: src/f16-vsigmoid/neonfp16arith.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/vunary.h>
16
17
xnn_f16_vsigmoid_ukernel__neonfp16arith_rr2_p2_nr1fma_x24(size_t batch,const void * input,void * output,const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f16_vsigmoid_ukernel__neonfp16arith_rr2_p2_nr1fma_x24(
19 size_t batch,
20 const void* input,
21 void* output,
22 const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23 {
24 assert(batch % sizeof(__fp16) == 0);
25
26 const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->neonfp16arith_rr2_p2.magic_bias));
27 const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->neonfp16arith_rr2_p2.minus_log2e));
28 const float16x8_t vln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->neonfp16arith_rr2_p2.ln2_hi));
29 const float16x8_t vln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->neonfp16arith_rr2_p2.ln2_lo));
30 const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->neonfp16arith_rr2_p2.c2));
31 const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->neonfp16arith_rr2_p2.c1));
32 const float16x8_t vone = vmovq_n_f16(1.0f);
33 const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->neonfp16arith_rr2_p2.denorm_cutoff));
34
35 const __fp16* i = (const __fp16*) input;
36 __fp16* o = (__fp16*) output;
37 for (; batch >= 24 * sizeof(__fp16); batch -= 24 * sizeof(__fp16)) {
38 const float16x8_t vx0 = vld1q_f16(i); i += 8;
39 const float16x8_t vx1 = vld1q_f16(i); i += 8;
40 const float16x8_t vx2 = vld1q_f16(i); i += 8;
41
42 const float16x8_t vz0 = vabsq_f16(vx0);
43 const float16x8_t vz1 = vabsq_f16(vx1);
44 const float16x8_t vz2 = vabsq_f16(vx2);
45
46 float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e);
47 float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e);
48 float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e);
49
50 const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
51 const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
52 const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
53
54 vn0 = vsubq_f16(vn0, vmagic_bias);
55 vn1 = vsubq_f16(vn1, vmagic_bias);
56 vn2 = vsubq_f16(vn2, vmagic_bias);
57
58 float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2_hi);
59 float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2_hi);
60 float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2_hi);
61
62 vt0 = vfmaq_f16(vt0, vn0, vln2_lo);
63 vt1 = vfmaq_f16(vt1, vn1, vln2_lo);
64 vt2 = vfmaq_f16(vt2, vn2, vln2_lo);
65
66 const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
67 const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
68 const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
69
70 vt0 = vmulq_f16(vt0, vs0);
71 vt1 = vmulq_f16(vt1, vs1);
72 vt2 = vmulq_f16(vt2, vs2);
73
74 const float16x8_t ve0 = vfmaq_f16(vs0, vp0, vt0);
75 const float16x8_t ve1 = vfmaq_f16(vs1, vp1, vt1);
76 const float16x8_t ve2 = vfmaq_f16(vs2, vp2, vt2);
77
78 const float16x8_t vd0 = vaddq_f16(ve0, vone);
79 const float16x8_t vd1 = vaddq_f16(ve1, vone);
80 const float16x8_t vd2 = vaddq_f16(ve2, vone);
81
82 float16x8_t vr0 = vrecpeq_f16(vd0);
83 float16x8_t vr1 = vrecpeq_f16(vd1);
84 float16x8_t vr2 = vrecpeq_f16(vd2);
85
86 const float16x8_t vadj0 = vfmsq_f16(vone, vr0, vd0);
87 const float16x8_t vadj1 = vfmsq_f16(vone, vr1, vd1);
88 const float16x8_t vadj2 = vfmsq_f16(vone, vr2, vd2);
89
90 vr0 = vfmaq_f16(vr0, vr0, vadj0);
91 vr1 = vfmaq_f16(vr1, vr1, vadj1);
92 vr2 = vfmaq_f16(vr2, vr2, vadj2);
93
94 float16x8_t vf0 = vmulq_f16(ve0, vr0);
95 float16x8_t vf1 = vmulq_f16(ve1, vr1);
96 float16x8_t vf2 = vmulq_f16(ve2, vr2);
97
98 vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vcagtq_f16(vx0, vdenorm_cutoff)));
99 vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vcagtq_f16(vx1, vdenorm_cutoff)));
100 vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vcagtq_f16(vx2, vdenorm_cutoff)));
101
102 const uint16x8_t vm0 = vcltq_f16(vx0, vmovq_n_f16(0.0f));
103 const uint16x8_t vm1 = vcltq_f16(vx1, vmovq_n_f16(0.0f));
104 const uint16x8_t vm2 = vcltq_f16(vx2, vmovq_n_f16(0.0f));
105
106 vf0 = vbslq_f16(vm0, vf0, vsubq_f16(vone, vf0));
107 vf1 = vbslq_f16(vm1, vf1, vsubq_f16(vone, vf1));
108 vf2 = vbslq_f16(vm2, vf2, vsubq_f16(vone, vf2));
109
110 vst1q_f16(o, vf0); o += 8;
111 vst1q_f16(o, vf1); o += 8;
112 vst1q_f16(o, vf2); o += 8;
113 }
114 for (; batch >= 8 * sizeof(__fp16); batch -= 8 * sizeof(__fp16)) {
115 const float16x8_t vx = vld1q_f16(i); i += 8;
116
117 const float16x8_t vz = vabsq_f16(vx);
118
119 float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
120 const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
121 vn = vsubq_f16(vn, vmagic_bias);
122
123 float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
124 vt = vfmaq_f16(vt, vn, vln2_lo);
125
126 const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
127 vt = vmulq_f16(vt, vs);
128 const float16x8_t ve = vfmaq_f16(vs, vp, vt);
129 const float16x8_t vd = vaddq_f16(ve, vone);
130
131 float16x8_t vr = vrecpeq_f16(vd);
132 const float16x8_t vadj = vfmsq_f16(vone, vr, vd);
133 vr = vfmaq_f16(vr, vr, vadj);
134
135 float16x8_t vf = vmulq_f16(ve, vr);
136 vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
137 const uint16x8_t vm = vcltq_f16(vx, vmovq_n_f16(0.0f));
138 vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
139
140 vst1q_f16(o, vf); o += 8;
141 }
142 if XNN_UNLIKELY(batch != 0) {
143 const float16x8_t vx = vld1q_f16(i);
144
145 const float16x8_t vz = vabsq_f16(vx);
146
147 float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
148 const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
149 vn = vsubq_f16(vn, vmagic_bias);
150
151 float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
152 vt = vfmaq_f16(vt, vn, vln2_lo);
153
154 const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
155 vt = vmulq_f16(vt, vs);
156 const float16x8_t ve = vfmaq_f16(vs, vp, vt);
157 const float16x8_t vd = vaddq_f16(ve, vone);
158
159 float16x8_t vr = vrecpeq_f16(vd);
160 const float16x8_t vadj = vfmsq_f16(vone, vr, vd);
161 vr = vfmaq_f16(vr, vr, vadj);
162
163 float16x8_t vf = vmulq_f16(ve, vr);
164 vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
165 const uint16x8_t vm = vcltq_f16(vx, vmovq_n_f16(0.0f));
166 vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
167
168 float16x4_t vf_lo = vget_low_f16(vf);
169 if (batch & (4 * sizeof(__fp16))) {
170 vst1_f16(o, vf_lo); o += 4;
171 vf_lo = vget_high_f16(vf);
172 }
173 if (batch & (2 * sizeof(__fp16))) {
174 vst1_f16(o, vf_lo); o += 2;
175 vf_lo = vext_f16(vf_lo, vf_lo, 2);
176 }
177 if (batch & (1 * sizeof(__fp16))) {
178 vst1_lane_f16(o, vf_lo, 0);
179 }
180 }
181 }
182