1 // Auto-generated file. Do not edit!
2 // Template: src/f32-velu/neon-p6.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/vunary.h>
15 #include <xnnpack/common.h>
16
17
xnn_f32_velu_ukernel__neon_rr2_p6_x4(size_t n,const float * x,float * y,const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_velu_ukernel__neon_rr2_p6_x4(
19 size_t n,
20 const float* x,
21 float* y,
22 const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
23 {
24 assert(n != 0);
25 assert(n % sizeof(float) == 0);
26 assert(x != NULL);
27 assert(y != NULL);
28
29 const float32x4_t vprescale = vld1q_dup_f32(¶ms->scalar.prescale);
30 const float32x4_t valpha = vld1q_dup_f32(¶ms->scalar.alpha);
31 const float32x4_t vbeta = vld1q_dup_f32(¶ms->scalar.beta);
32
33 const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
34 const float32x4_t vmagic_bias = vmovq_n_f32(0x1.8000FEp23f);
35 const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
36 const float32x4_t vminus_ln2_hi = vmovq_n_f32(-0x1.62E440p-1f);
37 const float32x4_t vminus_ln2_lo = vmovq_n_f32(0x1.0105C6p-21f);
38 const float32x4_t vc6 = vmovq_n_f32(0x1.6b7338p-10f);
39 const float32x4_t vc5 = vmovq_n_f32(0x1.12278Ep-7f);
40 const float32x4_t vc4 = vmovq_n_f32(0x1.555716p-5f);
41 const float32x4_t vc3 = vmovq_n_f32(0x1.5554B0p-3f);
42 const float32x4_t vc2 = vmovq_n_f32(0x1.FFFFFEp-2f);
43 const float32x4_t vone = vmovq_n_f32(1.0f);
44
45 for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
46 float32x4_t vx = vld1q_f32(x); x += 4;
47
48 const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
49
50 float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
51 float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
52 vn = vsubq_f32(vn, vmagic_bias);
53
54 float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
55 vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
56
57 float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
58 vp = vmlaq_f32(vc4, vp, vt);
59 vp = vmlaq_f32(vc3, vp, vt);
60 vp = vmlaq_f32(vc2, vp, vt);
61 vp = vmulq_f32(vp, vt);
62
63 vt = vmulq_f32(vt, vs);
64 vs = vsubq_f32(vs, vone);
65 vp = vmlaq_f32(vt, vp, vt);
66 const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
67
68 const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
69 vx = vmulq_f32(vx, vbeta);
70 const float32x4_t vy = vbslq_f32(vm, ve, vx);
71
72 vst1q_f32(y, vy); y += 4;
73 }
74 if XNN_UNLIKELY(n != 0) {
75 float32x4_t vx = vld1q_f32(x);
76
77 const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
78
79 float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
80 float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
81 vn = vsubq_f32(vn, vmagic_bias);
82
83 float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
84 vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
85
86 float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
87 vp = vmlaq_f32(vc4, vp, vt);
88 vp = vmlaq_f32(vc3, vp, vt);
89 vp = vmlaq_f32(vc2, vp, vt);
90 vp = vmulq_f32(vp, vt);
91
92 vt = vmulq_f32(vt, vs);
93 vs = vsubq_f32(vs, vone);
94 vp = vmlaq_f32(vt, vp, vt);
95 const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
96
97 const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
98 vx = vmulq_f32(vx, vbeta);
99 const float32x4_t vy = vbslq_f32(vm, ve, vx);
100
101 float32x2_t vy_lo = vget_low_f32(vy);
102 if (n & (2 * sizeof(float))) {
103 vst1_f32(y, vy_lo); y += 2;
104 vy_lo = vget_high_f32(vy);
105 }
106 if (n & (1 * sizeof(float))) {
107 vst1_lane_f32(y, vy_lo, 0);
108 }
109 }
110 }
111