• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 4 == 0
7$assert BATCH_TILE >= 4
8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
9$VMULADDQ_F32 = "vfmaq_f32" if FMA else "vmlaq_f32"
10#include <assert.h>
11
12#include <arm_neon.h>
13
14#include <xnnpack/vunary.h>
15#include <xnnpack/common.h>
16
17
18extern XNN_INTERNAL const int32_t xnn_table_exp2minus_k_over_16[16];
19
20void xnn_f32_velu_ukernel__${"neonfma" if FMA else "neon"}_rr${1 if FMA else 2}_lut16_p3_x${BATCH_TILE}(
21    size_t n,
22    const float* x,
23    float* y,
24    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
25{
26  assert(n != 0);
27  assert(n % sizeof(float) == 0);
28  assert(x != NULL);
29  assert(y != NULL);
30
31  const float32x4_t vprescale = vld1q_dup_f32(&params->scalar.prescale);
32  const float32x4_t valpha = vld1q_dup_f32(&params->scalar.alpha);
33  const float32x4_t vbeta = vld1q_dup_f32(&params->scalar.beta);
34
35  const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
36  const float32x4_t vmagic_bias = vmovq_n_f32(0x1.800000p19f);
37  const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
38  const int32x4_t vindex_mask = vmovq_n_s32(0xF);
39  $if FMA:
40    const float32x4_t vminus_ln2 = vmovq_n_f32(-0x1.62E430p-1f);
41  $else:
42    const float32x4_t vminus_ln2_hi = vmovq_n_f32(-0x1.62E400p-1f);
43    const float32x4_t vminus_ln2_lo = vmovq_n_f32(-0x1.7F7D1Cp-20f);
44  const float32x4_t vc3 = vmovq_n_f32(0x1.55561Cp-3f);
45  const float32x4_t vc2 = vmovq_n_f32(0x1.0001ECp-1f);
46  const float32x4_t vone = vmovq_n_f32(1.0f);
47
48  $if BATCH_TILE > 4:
49    for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
50      $for N in range(0, BATCH_TILE, 4):
51        float32x4_t vx${ABC[N:N+4]} = vld1q_f32(x); x += 4;
52
53      $for N in range(0, BATCH_TILE, 4):
54        const float32x4_t vz${ABC[N:N+4]} = vmaxq_f32(vmulq_f32(vx${ABC[N:N+4]}, vprescale), vsat_cutoff);
55
56      $for N in range(0, BATCH_TILE, 4):
57        float32x4_t vn${ABC[N:N+4]} = ${VMULADDQ_F32}(vmagic_bias, vz${ABC[N:N+4]}, vlog2e);
58
59      $for N in range(0, BATCH_TILE, 4):
60        const uint64x2_t vidx${ABC[N:N+4]} = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn${ABC[N:N+4]}), vindex_mask), 2));
61        const int32x4_t ven${ABC[N:N+4]} = vshlq_n_s32(vreinterpretq_s32_f32(vn${ABC[N:N+4]}), 19);
62
63      $for N in range(0, BATCH_TILE, 4):
64        const uint64_t vidx${ABC[N:N+2]} = vgetq_lane_u64(vidx${ABC[N:N+4]}, 0);
65        const uint64_t vidx${ABC[N+2:N+4]} = vgetq_lane_u64(vidx${ABC[N:N+4]}, 1);
66        int32x2_t vl${ABC[N:N+2]} = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx${ABC[N:N+2]}));
67        int32x2_t vl${ABC[N+2:N+4]} = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx${ABC[N+2:N+4]}));
68        vl${ABC[N:N+2]} = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx${ABC[N:N+2]} >> 32)), vl${ABC[N:N+2]}, 1);
69        vl${ABC[N+2:N+4]} = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx${ABC[N+2:N+4]} >> 32)), vl${ABC[N+2:N+4]}, 1);
70        const int32x4_t vl${ABC[N:N+4]} = vcombine_s32(vl${ABC[N:N+2]}, vl${ABC[N+2:N+4]});
71
72      $for N in range(0, BATCH_TILE, 4):
73        vn${ABC[N:N+4]} = vsubq_f32(vn${ABC[N:N+4]}, vmagic_bias);
74        float32x4_t vs${ABC[N:N+4]} = vreinterpretq_f32_s32(vaddq_s32(vl${ABC[N:N+4]}, ven${ABC[N:N+4]}));
75
76      $if FMA:
77        $for N in range(0, BATCH_TILE, 4):
78          float32x4_t vt${ABC[N:N+4]} = ${VMULADDQ_F32}(vz${ABC[N:N+4]}, vn${ABC[N:N+4]}, vminus_ln2);
79      $else:
80        $for N in range(0, BATCH_TILE, 4):
81          float32x4_t vt${ABC[N:N+4]} = ${VMULADDQ_F32}(vz${ABC[N:N+4]}, vn${ABC[N:N+4]}, vminus_ln2_hi);
82
83        $for N in range(0, BATCH_TILE, 4):
84          vt${ABC[N:N+4]} = ${VMULADDQ_F32}(vt${ABC[N:N+4]}, vn${ABC[N:N+4]}, vminus_ln2_lo);
85
86      $for N in range(0, BATCH_TILE, 4):
87        float32x4_t vp${ABC[N:N+4]} = ${VMULADDQ_F32}(vc2, vc3, vt${ABC[N:N+4]});
88
89      $for N in range(0, BATCH_TILE, 4):
90        vp${ABC[N:N+4]} = vmulq_f32(vp${ABC[N:N+4]}, vt${ABC[N:N+4]});
91
92      $for N in range(0, BATCH_TILE, 4):
93        vt${ABC[N:N+4]} = vmulq_f32(vt${ABC[N:N+4]}, vs${ABC[N:N+4]});
94        vs${ABC[N:N+4]} = vsubq_f32(vs${ABC[N:N+4]}, vone);
95
96      $for N in range(0, BATCH_TILE, 4):
97        vp${ABC[N:N+4]} = ${VMULADDQ_F32}(vt${ABC[N:N+4]}, vp${ABC[N:N+4]}, vt${ABC[N:N+4]});
98
99      $for N in range(0, BATCH_TILE, 4):
100        const float32x4_t ve${ABC[N:N+4]} = vmulq_f32(vaddq_f32(vp${ABC[N:N+4]}, vs${ABC[N:N+4]}), valpha);
101
102      $for N in range(0, BATCH_TILE, 4):
103        const uint32x4_t vm${ABC[N:N+4]} = vcltq_f32(vx${ABC[N:N+4]}, vmovq_n_f32(0.0f));
104        vx${ABC[N:N+4]} = vmulq_f32(vx${ABC[N:N+4]}, vbeta);
105
106      $for N in range(0, BATCH_TILE, 4):
107        const float32x4_t vy${ABC[N:N+4]} = vbslq_f32(vm${ABC[N:N+4]}, ve${ABC[N:N+4]}, vx${ABC[N:N+4]});
108
109      $for N in range(0, BATCH_TILE, 4):
110        vst1q_f32(y, vy${ABC[N:N+4]}); y += 4;
111    }
112  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
113    float32x4_t vx = vld1q_f32(x); x += 4;
114
115    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
116
117    float32x4_t vn = ${VMULADDQ_F32}(vmagic_bias, vz, vlog2e);
118    const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
119    const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
120
121    const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
122    const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
123    int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
124    int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
125    vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
126    vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
127
128    vn = vsubq_f32(vn, vmagic_bias);
129    const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
130
131    $if FMA:
132      float32x4_t vt = ${VMULADDQ_F32}(vz, vn, vminus_ln2);
133    $else:
134      float32x4_t vt = ${VMULADDQ_F32}(vz, vn, vminus_ln2_hi);
135      vt = ${VMULADDQ_F32}(vt, vn, vminus_ln2_lo);
136    float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
137
138    float32x4_t vp = ${VMULADDQ_F32}(vc2, vc3, vt);
139    vp = vmulq_f32(vp, vt);
140
141    vt = vmulq_f32(vt, vs);
142    vs = vsubq_f32(vs, vone);
143    vp = ${VMULADDQ_F32}(vt, vp, vt);
144    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
145
146    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
147    vx = vmulq_f32(vx, vbeta);
148    const float32x4_t vy = vbslq_f32(vm, ve, vx);
149
150    vst1q_f32(y, vy); y += 4;
151  }
152  if XNN_UNLIKELY(n != 0) {
153    float32x4_t vx = vld1q_f32(x);
154
155    const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
156
157    float32x4_t vn = ${VMULADDQ_F32}(vmagic_bias, vz, vlog2e);
158    const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
159    const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
160
161    const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
162    const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
163    int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
164    int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
165    vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
166    vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
167
168    vn = vsubq_f32(vn, vmagic_bias);
169    const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
170
171    $if FMA:
172      float32x4_t vt = ${VMULADDQ_F32}(vz, vn, vminus_ln2);
173    $else:
174      float32x4_t vt = ${VMULADDQ_F32}(vz, vn, vminus_ln2_hi);
175      vt = ${VMULADDQ_F32}(vt, vn, vminus_ln2_lo);
176    float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
177
178    float32x4_t vp = ${VMULADDQ_F32}(vc2, vc3, vt);
179    vp = vmulq_f32(vp, vt);
180
181    vt = vmulq_f32(vt, vs);
182    vs = vsubq_f32(vs, vone);
183    vp = ${VMULADDQ_F32}(vt, vp, vt);
184    const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
185
186    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
187    vx = vmulq_f32(vx, vbeta);
188    const float32x4_t vy = vbslq_f32(vm, ve, vx);
189
190    float32x2_t vy_lo = vget_low_f32(vy);
191    if (n & (2 * sizeof(float))) {
192      vst1_f32(y, vy_lo); y += 2;
193      vy_lo = vget_high_f32(vy);
194    }
195    if (n & (1 * sizeof(float))) {
196      vst1_lane_f32(y, vy_lo, 0);
197    }
198  }
199}
200