• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f16-vsigmoid/neonfp16arith.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/vunary.h>
16 
17 
xnn_f16_vsigmoid_ukernel__neonfp16arith_rr2_p2_div_x16(size_t batch,const void * input,void * output,const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f16_vsigmoid_ukernel__neonfp16arith_rr2_p2_div_x16(
19     size_t batch,
20     const void* input,
21     void* output,
22     const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23 {
24   assert(batch % sizeof(__fp16) == 0);
25 
26   const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neonfp16arith_rr2_p2.magic_bias));
27   const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neonfp16arith_rr2_p2.minus_log2e));
28   const float16x8_t vln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neonfp16arith_rr2_p2.ln2_hi));
29   const float16x8_t vln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neonfp16arith_rr2_p2.ln2_lo));
30   const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neonfp16arith_rr2_p2.c2));
31   const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neonfp16arith_rr2_p2.c1));
32   const float16x8_t vone = vmovq_n_f16(1.0f);
33   const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neonfp16arith_rr2_p2.denorm_cutoff));
34 
35   const __fp16* i = (const __fp16*) input;
36   __fp16* o = (__fp16*) output;
37   for (; batch >= 16 * sizeof(__fp16); batch -= 16 * sizeof(__fp16)) {
38     const float16x8_t vx0 = vld1q_f16(i); i += 8;
39     const float16x8_t vx1 = vld1q_f16(i); i += 8;
40 
41     const float16x8_t vz0 = vabsq_f16(vx0);
42     const float16x8_t vz1 = vabsq_f16(vx1);
43 
44     float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e);
45     float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e);
46 
47     const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
48     const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
49 
50     vn0 = vsubq_f16(vn0, vmagic_bias);
51     vn1 = vsubq_f16(vn1, vmagic_bias);
52 
53     float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2_hi);
54     float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2_hi);
55 
56     vt0 = vfmaq_f16(vt0, vn0, vln2_lo);
57     vt1 = vfmaq_f16(vt1, vn1, vln2_lo);
58 
59     const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
60     const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
61 
62     vt0 = vmulq_f16(vt0, vs0);
63     vt1 = vmulq_f16(vt1, vs1);
64 
65     const float16x8_t ve0 = vfmaq_f16(vs0, vp0, vt0);
66     const float16x8_t ve1 = vfmaq_f16(vs1, vp1, vt1);
67 
68     const float16x8_t vd0 = vaddq_f16(ve0, vone);
69     const float16x8_t vd1 = vaddq_f16(ve1, vone);
70 
71     float16x8_t vf0 = vdivq_f16(ve0, vd0);
72     float16x8_t vf1 = vdivq_f16(ve1, vd1);
73 
74     vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vcagtq_f16(vx0, vdenorm_cutoff)));
75     vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vcagtq_f16(vx1, vdenorm_cutoff)));
76 
77     const uint16x8_t vm0 = vcltq_f16(vx0, vmovq_n_f16(0.0f));
78     const uint16x8_t vm1 = vcltq_f16(vx1, vmovq_n_f16(0.0f));
79 
80     vf0 = vbslq_f16(vm0, vf0, vsubq_f16(vone, vf0));
81     vf1 = vbslq_f16(vm1, vf1, vsubq_f16(vone, vf1));
82 
83     vst1q_f16(o, vf0); o += 8;
84     vst1q_f16(o, vf1); o += 8;
85   }
86   for (; batch >= 8 * sizeof(__fp16); batch -= 8 * sizeof(__fp16)) {
87     const float16x8_t vx = vld1q_f16(i); i += 8;
88 
89     const float16x8_t vz = vabsq_f16(vx);
90 
91     float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
92     const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
93     vn = vsubq_f16(vn, vmagic_bias);
94 
95     float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
96     vt = vfmaq_f16(vt, vn, vln2_lo);
97 
98     const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
99     vt = vmulq_f16(vt, vs);
100     const float16x8_t ve = vfmaq_f16(vs, vp, vt);
101     const float16x8_t vd = vaddq_f16(ve, vone);
102 
103     float16x8_t vf = vdivq_f16(ve, vd);
104     vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
105     const uint16x8_t vm = vcltq_f16(vx, vmovq_n_f16(0.0f));
106     vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
107 
108     vst1q_f16(o, vf); o += 8;
109   }
110   if XNN_UNLIKELY(batch != 0) {
111     const float16x8_t vx = vld1q_f16(i);
112 
113     const float16x8_t vz = vabsq_f16(vx);
114 
115     float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
116     const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
117     vn = vsubq_f16(vn, vmagic_bias);
118 
119     float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
120     vt = vfmaq_f16(vt, vn, vln2_lo);
121 
122     const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
123     vt = vmulq_f16(vt, vs);
124     const float16x8_t ve = vfmaq_f16(vs, vp, vt);
125     const float16x8_t vd = vaddq_f16(ve, vone);
126 
127     float16x8_t vf = vdivq_f16(ve, vd);
128     vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
129     const uint16x8_t vm = vcltq_f16(vx, vmovq_n_f16(0.0f));
130     vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
131 
132     float16x4_t vf_lo = vget_low_f16(vf);
133     if (batch & (4 * sizeof(__fp16))) {
134       vst1_f16(o, vf_lo); o += 4;
135       vf_lo = vget_high_f16(vf);
136     }
137     if (batch & (2 * sizeof(__fp16))) {
138       vst1_f16(o, vf_lo); o += 2;
139       vf_lo = vext_f16(vf_lo, vf_lo, 2);
140     }
141     if (batch & (1 * sizeof(__fp16))) {
142       vst1_lane_f16(o, vf_lo, 0);
143     }
144   }
145 }
146