1 // Auto-generated file. Do not edit!
2 // Template: src/f32-vsigmoid/neon-lut64-p2.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/vunary.h>
16
17
18 extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
19
xnn_f32_vsigmoid_ukernel__neon_rr2_lut64_p2_nr2recps_x4(size_t n,const float * x,float * y,const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f32_vsigmoid_ukernel__neon_rr2_lut64_p2_nr2recps_x4(
21 size_t n,
22 const float* x,
23 float* y,
24 const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
25 {
26 assert(n % sizeof(float) == 0);
27
28 const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.magic_bias);
29 const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.minus_log2e);
30 const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
31 const float32x4_t vln2_hi = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.ln2_hi);
32 const float32x4_t vln2_lo = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.ln2_lo);
33 const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.c2);
34 const float32x4_t vone = vmovq_n_f32(1.0f);
35 const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.denorm_cutoff);
36
37 for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
38 const float32x4_t vx = vld1q_f32(x); x += 4;
39
40 const float32x4_t vz = vabsq_f32(vx);
41
42 float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
43 const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
44
45 const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
46 const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
47 const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
48 float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
49 float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
50 vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
51 vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
52 const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
53
54 const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
55 vn = vsubq_f32(vn, vmagic_bias);
56 float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
57 vt = vmlaq_f32(vt, vn, vln2_lo);
58
59 float32x4_t vp = vmulq_f32(vt, vc2);
60 vp = vmlsq_f32(vt, vp, vt);
61
62 const float32x4_t vy = vmlsq_f32(vs, vs, vp);
63 const float32x4_t vd = vaddq_f32(vy, vone);
64
65 float32x4_t vr = vrecpeq_f32(vd);
66 vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
67 vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
68
69 float32x4_t vf = vmulq_f32(vy, vr);
70 vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
71 const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
72 vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
73
74 vst1q_f32(y, vf); y += 4;
75 }
76 if XNN_UNLIKELY(n != 0) {
77 const float32x4_t vx = vld1q_f32(x);
78
79 const float32x4_t vz = vabsq_f32(vx);
80
81 float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
82 const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
83
84 const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
85 const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
86 const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
87 float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
88 float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
89 vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
90 vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
91 const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
92
93 const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
94 vn = vsubq_f32(vn, vmagic_bias);
95 float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
96 vt = vmlaq_f32(vt, vn, vln2_lo);
97
98 float32x4_t vp = vmulq_f32(vt, vc2);
99 vp = vmlsq_f32(vt, vp, vt);
100
101 const float32x4_t vy = vmlsq_f32(vs, vs, vp);
102 const float32x4_t vd = vaddq_f32(vy, vone);
103
104 float32x4_t vr = vrecpeq_f32(vd);
105 vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
106 vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
107
108 float32x4_t vf = vmulq_f32(vy, vr);
109 vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
110 const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
111 vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
112
113 float32x2_t vf_lo = vget_low_f32(vf);
114 if (n & (2 * sizeof(float))) {
115 vst1_f32(y, vf_lo); y += 2;
116 vf_lo = vget_high_f32(vf);
117 }
118 if (n & (1 * sizeof(float))) {
119 vst1_lane_f32(y, vf_lo, 0);
120 }
121 }
122 }
123