• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-velu/neon-lut16-p3.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/vunary.h>
15 #include <xnnpack/common.h>
16 
17 
18 extern XNN_INTERNAL const int32_t xnn_table_exp2minus_k_over_16[16];
19 
xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x8(size_t n,const float * x,float * y,const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x8(
21     size_t n,
22     const float* x,
23     float* y,
24     const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
25 {
26   assert(n != 0);
27   assert(n % sizeof(float) == 0);
28   assert(x != NULL);
29   assert(y != NULL);
30 
31   const float32x4_t vprescale = vld1q_dup_f32(&params->scalar.prescale);
32   const float32x4_t valpha = vld1q_dup_f32(&params->scalar.alpha);
33   const float32x4_t vbeta = vld1q_dup_f32(&params->scalar.beta);
34 
35   const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
36   const float32x4_t vmagic_bias = vmovq_n_f32(0x1.800000p19f);
37   const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
38   const int32x4_t vindex_mask = vmovq_n_s32(0xF);
39   const float32x4_t vminus_ln2 = vmovq_n_f32(-0x1.62E430p-1f);
40   const float32x4_t vc3 = vmovq_n_f32(0x1.55561Cp-3f);
41   const float32x4_t vc2 = vmovq_n_f32(0x1.0001ECp-1f);
42   const float32x4_t vone = vmovq_n_f32(1.0f);
43 
44   for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
45     float32x4_t vx0123 = vld1q_f32(x); x += 4;
46     float32x4_t vx4567 = vld1q_f32(x); x += 4;
47 
48     const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
49     const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
50 
51     float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vlog2e);
52     float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vlog2e);
53 
54     const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask), 2));
55     const int32x4_t ven0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 19);
56     const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask), 2));
57     const int32x4_t ven4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 19);
58 
59     const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
60     const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
61     int32x2_t vl01 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01));
62     int32x2_t vl23 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23));
63     vl01 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32)), vl01, 1);
64     vl23 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32)), vl23, 1);
65     const int32x4_t vl0123 = vcombine_s32(vl01, vl23);
66     const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
67     const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
68     int32x2_t vl45 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45));
69     int32x2_t vl67 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67));
70     vl45 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32)), vl45, 1);
71     vl67 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32)), vl67, 1);
72     const int32x4_t vl4567 = vcombine_s32(vl45, vl67);
73 
74     vn0123 = vsubq_f32(vn0123, vmagic_bias);
75     float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vl0123, ven0123));
76     vn4567 = vsubq_f32(vn4567, vmagic_bias);
77     float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vl4567, ven4567));
78 
79     float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vminus_ln2);
80     float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vminus_ln2);
81 
82     float32x4_t vp0123 = vfmaq_f32(vc2, vc3, vt0123);
83     float32x4_t vp4567 = vfmaq_f32(vc2, vc3, vt4567);
84 
85     vp0123 = vmulq_f32(vp0123, vt0123);
86     vp4567 = vmulq_f32(vp4567, vt4567);
87 
88     vt0123 = vmulq_f32(vt0123, vs0123);
89     vs0123 = vsubq_f32(vs0123, vone);
90     vt4567 = vmulq_f32(vt4567, vs4567);
91     vs4567 = vsubq_f32(vs4567, vone);
92 
93     vp0123 = vfmaq_f32(vt0123, vp0123, vt0123);
94     vp4567 = vfmaq_f32(vt4567, vp4567, vt4567);
95 
96     const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
97     const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
98 
99     const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
100     vx0123 = vmulq_f32(vx0123, vbeta);
101     const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
102     vx4567 = vmulq_f32(vx4567, vbeta);
103 
104     const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
105     const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
106 
107     vst1q_f32(y, vy0123); y += 4;
108     vst1q_f32(y, vy4567); y += 4;
109   }
110   for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
111     float32x4_t vx = vld1q_f32(x); x += 4;
112 
113     const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
114 
115     float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
116     const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
117     const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
118 
119     const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
120     const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
121     int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
122     int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
123     vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
124     vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
125 
126     vn = vsubq_f32(vn, vmagic_bias);
127     const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
128 
129     float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
130     float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
131 
132     float32x4_t vp = vfmaq_f32(vc2, vc3, vt);
133     vp = vmulq_f32(vp, vt);
134 
135     vt = vmulq_f32(vt, vs);
136     vs = vsubq_f32(vs, vone);
137     vp = vfmaq_f32(vt, vp, vt);
138     const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
139 
140     const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
141     vx = vmulq_f32(vx, vbeta);
142     const float32x4_t vy = vbslq_f32(vm, ve, vx);
143 
144     vst1q_f32(y, vy); y += 4;
145   }
146   if XNN_UNLIKELY(n != 0) {
147     float32x4_t vx = vld1q_f32(x);
148 
149     const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
150 
151     float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
152     const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
153     const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
154 
155     const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
156     const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
157     int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
158     int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
159     vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
160     vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
161 
162     vn = vsubq_f32(vn, vmagic_bias);
163     const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
164 
165     float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
166     float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
167 
168     float32x4_t vp = vfmaq_f32(vc2, vc3, vt);
169     vp = vmulq_f32(vp, vt);
170 
171     vt = vmulq_f32(vt, vs);
172     vs = vsubq_f32(vs, vone);
173     vp = vfmaq_f32(vt, vp, vt);
174     const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
175 
176     const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
177     vx = vmulq_f32(vx, vbeta);
178     const float32x4_t vy = vbslq_f32(vm, ve, vx);
179 
180     float32x2_t vy_lo = vget_low_f32(vy);
181     if (n & (2 * sizeof(float))) {
182       vst1_f32(y, vy_lo); y += 2;
183       vy_lo = vget_high_f32(vy);
184     }
185     if (n & (1 * sizeof(float))) {
186       vst1_lane_f32(y, vy_lo, 0);
187     }
188   }
189 }
190