• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-velu/neon-lut16-p3.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/vunary.h>
15 #include <xnnpack/common.h>
16 
17 
18 extern XNN_INTERNAL const int32_t xnn_table_exp2minus_k_over_16[16];
19 
xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x4(size_t n,const float * x,float * y,const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x4(
21     size_t n,
22     const float* x,
23     float* y,
24     const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
25 {
26   assert(n != 0);
27   assert(n % sizeof(float) == 0);
28   assert(x != NULL);
29   assert(y != NULL);
30 
31   const float32x4_t vprescale = vld1q_dup_f32(&params->scalar.prescale);
32   const float32x4_t valpha = vld1q_dup_f32(&params->scalar.alpha);
33   const float32x4_t vbeta = vld1q_dup_f32(&params->scalar.beta);
34 
35   const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
36   const float32x4_t vmagic_bias = vmovq_n_f32(0x1.800000p19f);
37   const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
38   const int32x4_t vindex_mask = vmovq_n_s32(0xF);
39   const float32x4_t vminus_ln2 = vmovq_n_f32(-0x1.62E430p-1f);
40   const float32x4_t vc3 = vmovq_n_f32(0x1.55561Cp-3f);
41   const float32x4_t vc2 = vmovq_n_f32(0x1.0001ECp-1f);
42   const float32x4_t vone = vmovq_n_f32(1.0f);
43 
44   for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
45     float32x4_t vx = vld1q_f32(x); x += 4;
46 
47     const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
48 
49     float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
50     const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
51     const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
52 
53     const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
54     const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
55     int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
56     int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
57     vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
58     vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
59 
60     vn = vsubq_f32(vn, vmagic_bias);
61     const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
62 
63     float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
64     float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
65 
66     float32x4_t vp = vfmaq_f32(vc2, vc3, vt);
67     vp = vmulq_f32(vp, vt);
68 
69     vt = vmulq_f32(vt, vs);
70     vs = vsubq_f32(vs, vone);
71     vp = vfmaq_f32(vt, vp, vt);
72     const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
73 
74     const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
75     vx = vmulq_f32(vx, vbeta);
76     const float32x4_t vy = vbslq_f32(vm, ve, vx);
77 
78     vst1q_f32(y, vy); y += 4;
79   }
80   if XNN_UNLIKELY(n != 0) {
81     float32x4_t vx = vld1q_f32(x);
82 
83     const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
84 
85     float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
86     const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
87     const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
88 
89     const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
90     const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
91     int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
92     int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
93     vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
94     vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
95 
96     vn = vsubq_f32(vn, vmagic_bias);
97     const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
98 
99     float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
100     float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
101 
102     float32x4_t vp = vfmaq_f32(vc2, vc3, vt);
103     vp = vmulq_f32(vp, vt);
104 
105     vt = vmulq_f32(vt, vs);
106     vs = vsubq_f32(vs, vone);
107     vp = vfmaq_f32(vt, vp, vt);
108     const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
109 
110     const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
111     vx = vmulq_f32(vx, vbeta);
112     const float32x4_t vy = vbslq_f32(vm, ve, vx);
113 
114     float32x2_t vy_lo = vget_low_f32(vy);
115     if (n & (2 * sizeof(float))) {
116       vst1_f32(y, vy_lo); y += 2;
117       vy_lo = vget_high_f32(vy);
118     }
119     if (n & (1 * sizeof(float))) {
120       vst1_lane_f32(y, vy_lo, 0);
121     }
122   }
123 }
124