• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-velu/neon-p6.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/vunary.h>
15 #include <xnnpack/common.h>
16 
17 
xnn_f32_velu_ukernel__neonfma_rr1_p6_x12(size_t n,const float * x,float * y,const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_velu_ukernel__neonfma_rr1_p6_x12(
19     size_t n,
20     const float* x,
21     float* y,
22     const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
23 {
24   assert(n != 0);
25   assert(n % sizeof(float) == 0);
26   assert(x != NULL);
27   assert(y != NULL);
28 
29   const float32x4_t vprescale = vld1q_dup_f32(&params->scalar.prescale);
30   const float32x4_t valpha = vld1q_dup_f32(&params->scalar.alpha);
31   const float32x4_t vbeta = vld1q_dup_f32(&params->scalar.beta);
32 
33   const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
34   const float32x4_t vmagic_bias = vmovq_n_f32(0x1.8000FEp23f);
35   const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
36   const float32x4_t vminus_ln2 = vmovq_n_f32(-0x1.62E430p-1f);
37   const float32x4_t vc6 = vmovq_n_f32(0x1.6b7338p-10f);
38   const float32x4_t vc5 = vmovq_n_f32(0x1.12278Ep-7f);
39   const float32x4_t vc4 = vmovq_n_f32(0x1.555716p-5f);
40   const float32x4_t vc3 = vmovq_n_f32(0x1.5554B0p-3f);
41   const float32x4_t vc2 = vmovq_n_f32(0x1.FFFFFEp-2f);
42   const float32x4_t vone = vmovq_n_f32(1.0f);
43 
44   for (; n >= 12 * sizeof(float); n -= 12 * sizeof(float)) {
45     float32x4_t vx0123 = vld1q_f32(x); x += 4;
46     float32x4_t vx4567 = vld1q_f32(x); x += 4;
47     float32x4_t vx89AB = vld1q_f32(x); x += 4;
48 
49     const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
50     const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
51     const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
52 
53     float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vlog2e);
54     float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vlog2e);
55     float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vlog2e);
56 
57     float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
58     vn0123 = vsubq_f32(vn0123, vmagic_bias);
59     float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
60     vn4567 = vsubq_f32(vn4567, vmagic_bias);
61     float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
62     vn89AB = vsubq_f32(vn89AB, vmagic_bias);
63 
64     float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vminus_ln2);
65     float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vminus_ln2);
66     float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vminus_ln2);
67 
68     float32x4_t vp0123 = vfmaq_f32(vc5, vc6, vt0123);
69     float32x4_t vp4567 = vfmaq_f32(vc5, vc6, vt4567);
70     float32x4_t vp89AB = vfmaq_f32(vc5, vc6, vt89AB);
71 
72     vp0123 = vfmaq_f32(vc4, vp0123, vt0123);
73     vp4567 = vfmaq_f32(vc4, vp4567, vt4567);
74     vp89AB = vfmaq_f32(vc4, vp89AB, vt89AB);
75 
76     vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
77     vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
78     vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
79 
80     vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
81     vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
82     vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
83 
84     vp0123 = vmulq_f32(vp0123, vt0123);
85     vp4567 = vmulq_f32(vp4567, vt4567);
86     vp89AB = vmulq_f32(vp89AB, vt89AB);
87 
88     vt0123 = vmulq_f32(vt0123, vs0123);
89     vs0123 = vsubq_f32(vs0123, vone);
90     vt4567 = vmulq_f32(vt4567, vs4567);
91     vs4567 = vsubq_f32(vs4567, vone);
92     vt89AB = vmulq_f32(vt89AB, vs89AB);
93     vs89AB = vsubq_f32(vs89AB, vone);
94 
95     vp0123 = vfmaq_f32(vt0123, vp0123, vt0123);
96     vp4567 = vfmaq_f32(vt4567, vp4567, vt4567);
97     vp89AB = vfmaq_f32(vt89AB, vp89AB, vt89AB);
98 
99     const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
100     const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
101     const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
102 
103     const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
104     vx0123 = vmulq_f32(vx0123, vbeta);
105     const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
106     vx4567 = vmulq_f32(vx4567, vbeta);
107     const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
108     vx89AB = vmulq_f32(vx89AB, vbeta);
109 
110     const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
111     const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
112     const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
113 
114     vst1q_f32(y, vy0123); y += 4;
115     vst1q_f32(y, vy4567); y += 4;
116     vst1q_f32(y, vy89AB); y += 4;
117   }
118   for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
119     float32x4_t vx = vld1q_f32(x); x += 4;
120 
121     const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
122 
123     float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
124     float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
125     vn = vsubq_f32(vn, vmagic_bias);
126 
127     float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
128 
129     float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
130     vp = vfmaq_f32(vc4, vp, vt);
131     vp = vfmaq_f32(vc3, vp, vt);
132     vp = vfmaq_f32(vc2, vp, vt);
133     vp = vmulq_f32(vp, vt);
134 
135     vt = vmulq_f32(vt, vs);
136     vs = vsubq_f32(vs, vone);
137     vp = vfmaq_f32(vt, vp, vt);
138     const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
139 
140     const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
141     vx = vmulq_f32(vx, vbeta);
142     const float32x4_t vy = vbslq_f32(vm, ve, vx);
143 
144     vst1q_f32(y, vy); y += 4;
145   }
146   if XNN_UNLIKELY(n != 0) {
147     float32x4_t vx = vld1q_f32(x);
148 
149     const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
150 
151     float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
152     float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
153     vn = vsubq_f32(vn, vmagic_bias);
154 
155     float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
156 
157     float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
158     vp = vfmaq_f32(vc4, vp, vt);
159     vp = vfmaq_f32(vc3, vp, vt);
160     vp = vfmaq_f32(vc2, vp, vt);
161     vp = vmulq_f32(vp, vt);
162 
163     vt = vmulq_f32(vt, vs);
164     vs = vsubq_f32(vs, vone);
165     vp = vfmaq_f32(vt, vp, vt);
166     const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
167 
168     const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
169     vx = vmulq_f32(vx, vbeta);
170     const float32x4_t vy = vbslq_f32(vm, ve, vx);
171 
172     float32x2_t vy_lo = vget_low_f32(vy);
173     if (n & (2 * sizeof(float))) {
174       vst1_f32(y, vy_lo); y += 2;
175       vy_lo = vget_high_f32(vy);
176     }
177     if (n & (1 * sizeof(float))) {
178       vst1_lane_f32(y, vy_lo, 0);
179     }
180   }
181 }
182