• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-vsigmoid/neon-lut64-p2.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/vunary.h>
16 
17 
18 extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
19 
xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut64_p2_nr1recps1fma_x4(size_t n,const float * x,float * y,const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut64_p2_nr1recps1fma_x4(
21     size_t n,
22     const float* x,
23     float* y,
24     const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
25 {
26   assert(n % sizeof(float) == 0);
27 
28   const float32x4_t vmagic_bias = vld1q_dup_f32(&params->neonfma_rr1_lut64_p2.magic_bias);
29   const float32x4_t vminus_log2e = vld1q_dup_f32(&params->neonfma_rr1_lut64_p2.minus_log2e);
30   const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
31   const float32x4_t vln2 = vld1q_dup_f32(&params->neonfma_rr1_lut64_p2.ln2);
32   const float32x4_t vc2 = vld1q_dup_f32(&params->neonfma_rr1_lut64_p2.c2);
33   const float32x4_t vone = vmovq_n_f32(1.0f);
34   const float32x4_t vdenorm_cutoff = vld1q_dup_f32(&params->neonfma_rr1_lut64_p2.denorm_cutoff);
35 
36   for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
37     const float32x4_t vx = vld1q_f32(x); x += 4;
38 
39     const float32x4_t vz = vabsq_f32(vx);
40 
41     float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
42     const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
43 
44     const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
45     const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
46     const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
47     float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
48     float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
49     vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
50     vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
51     const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
52 
53     const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
54     vn = vsubq_f32(vn, vmagic_bias);
55     float32x4_t vt = vfmaq_f32(vz, vn, vln2);
56 
57     float32x4_t vp = vmulq_f32(vt, vc2);
58     vp = vfmsq_f32(vt, vp, vt);
59 
60     const float32x4_t vy = vfmsq_f32(vs, vs, vp);
61     const float32x4_t vd = vaddq_f32(vy, vone);
62 
63     float32x4_t vr = vrecpeq_f32(vd);
64     vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
65     vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
66 
67     float32x4_t vf = vmulq_f32(vy, vr);
68     vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
69     const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
70     vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
71 
72     vst1q_f32(y, vf); y += 4;
73   }
74   if XNN_UNLIKELY(n != 0) {
75     const float32x4_t vx = vld1q_f32(x);
76 
77     const float32x4_t vz = vabsq_f32(vx);
78 
79     float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
80     const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
81 
82     const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
83     const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
84     const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
85     float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
86     float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
87     vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
88     vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
89     const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
90 
91     const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
92     vn = vsubq_f32(vn, vmagic_bias);
93     float32x4_t vt = vfmaq_f32(vz, vn, vln2);
94 
95     float32x4_t vp = vmulq_f32(vt, vc2);
96     vp = vfmsq_f32(vt, vp, vt);
97 
98     const float32x4_t vy = vfmsq_f32(vs, vs, vp);
99     const float32x4_t vd = vaddq_f32(vy, vone);
100 
101     float32x4_t vr = vrecpeq_f32(vd);
102     vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
103     vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
104 
105     float32x4_t vf = vmulq_f32(vy, vr);
106     vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
107     const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
108     vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
109 
110     float32x2_t vf_lo = vget_low_f32(vf);
111     if (n & (2 * sizeof(float))) {
112       vst1_f32(y, vf_lo); y += 2;
113       vf_lo = vget_high_f32(vf);
114     }
115     if (n & (1 * sizeof(float))) {
116       vst1_lane_f32(y, vf_lo, 0);
117     }
118   }
119 }
120