• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert ELEMENTS_TILE % 4 == 0
7$assert ELEMENTS_TILE >= 4
8$SIMD_TILE = ELEMENTS_TILE // 4
9$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
10$VMULADDQ_F32 = "vfmaq_f32" if FMA else "vmlaq_f32"
11#include <assert.h>
12
13#include <arm_neon.h>
14
15#include <xnnpack/common.h>
16#include <xnnpack/raddstoreexpminusmax.h>
17
18
19void xnn_f32_raddstoreexpminusmax_ukernel__${"neonfma" if FMA else "neon"}_p5_x${ELEMENTS_TILE}${"" if ACCUMULATORS == 1 else "_acc%d" % ACCUMULATORS}(
20    size_t elements,
21    const float* input,
22    float* output,
23    float* sum,
24    float max) XNN_DISABLE_TSAN
25{
26  assert(elements % sizeof(float) == 0);
27
28  const float32x4_t vmagic_bias = vmovq_n_f32(0x1.8000FEp23f);
29  // The smallest x for which expf(x) is normalized.
30  const float32x4_t vdenorm_cutoff = vmovq_n_f32(-0x1.5D589Ep6f);
31  const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
32  $if FMA:
33    const float32x4_t vminus_ln2_hi = vmovq_n_f32(-0x1.62E43p-1f);
34    const float32x4_t vminus_ln2_lo = vmovq_n_f32(0x1.05C61p-29f);
35  $else:
36    // Last 7 bits are zeroes
37    const float32x4_t vminus_ln2_hi = vmovq_n_f32(-0x1.62E400p-1f);
38    const float32x4_t vminus_ln2_lo = vmovq_n_f32(-0x1.7F7D1Cp-20f);
39
40  const float32x4_t vc1 = vmovq_n_f32(0x1.FFFFF6p-1f);
41  const float32x4_t vc2 = vmovq_n_f32(0x1.FFFDC6p-2f);
42  const float32x4_t vc3 = vmovq_n_f32(0x1.555A80p-3f);
43  const float32x4_t vc4 = vmovq_n_f32(0x1.573A1Ap-5f);
44  const float32x4_t vc5 = vmovq_n_f32(0x1.0F9F9Cp-7f);
45
46  const float32x4_t vi_max = vdupq_n_f32(max);
47
48  $if ELEMENTS_TILE > 4:
49    $for K in range(ACCUMULATORS):
50      float32x4_t vacc${K} = vmovq_n_f32(0.0f);
51    for (; elements >= ${ELEMENTS_TILE} * sizeof(float); elements -= ${ELEMENTS_TILE} * sizeof(float)) {
52      // Load ${ELEMENTS_TILE} (${SIMD_TILE}x4) inputs at a time.
53      $for N in range(0, ELEMENTS_TILE, 4):
54        const float32x4_t vi${ABC[N:N+4]} = vld1q_f32(input); input += 4;
55
56      // Subtract maximum input x := i - i_max. This implies x <= 0.
57      $for N in range(0, ELEMENTS_TILE, 4):
58        const float32x4_t vx${ABC[N:N+4]} = vsubq_f32(vi${ABC[N:N+4]}, vi_max);
59
60      // Compute reduced argument n := round(x / log(2)).
61      // We do it by adding a large number (magic bias), which cause rounding of result to an integer, then subtracing the
62      // large number back. The first addition is combined with multiplication by log2e into a single FMA instruction.
63      // The trick with adding large number is valid only within certain bounds (|x| <= 2**22), but thats ok, because
64      // inputs outside of [-87.336540, 0.0] underflow expf(x) anyway. We fixup the result for such inputs at the very end
65      // of the algorithm.
66      $for N in range(0, ELEMENTS_TILE, 4):
67        float32x4_t vn${ABC[N:N+4]} = ${VMULADDQ_F32}(vmagic_bias, vx${ABC[N:N+4]}, vlog2e);
68
69      // Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
70      // -87.33642 <= x <= 0.0, and -126 <= n <= 0 accordingly.
71      $for N in range(0, ELEMENTS_TILE, 4):
72        const float32x4_t vs${ABC[N:N+4]} = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn${ABC[N:N+4]}), 23));
73
74      // Subtract the large number back to get final n := round(x / log(2)).
75      $for N in range(0, ELEMENTS_TILE, 4):
76        vn${ABC[N:N+4]} = vsubq_f32(vn${ABC[N:N+4]}, vmagic_bias);
77
78      // Compute reduced argument t := z - n * log(2).
79      // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
80      $for N in range(0, ELEMENTS_TILE, 4):
81        float32x4_t vt${ABC[N:N+4]} = ${VMULADDQ_F32}(vx${ABC[N:N+4]}, vn${ABC[N:N+4]}, vminus_ln2_hi);
82
83      $for N in range(0, ELEMENTS_TILE, 4):
84        vt${ABC[N:N+4]} = ${VMULADDQ_F32}(vt${ABC[N:N+4]}, vn${ABC[N:N+4]}, vminus_ln2_lo);
85
86      // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
87      $for N in range(0, ELEMENTS_TILE, 4):
88        float32x4_t vp${ABC[N:N+4]} = ${VMULADDQ_F32}(vc4, vc5, vt${ABC[N:N+4]});
89
90      $for N in range(0, ELEMENTS_TILE, 4):
91        vp${ABC[N:N+4]} = ${VMULADDQ_F32}(vc3, vp${ABC[N:N+4]}, vt${ABC[N:N+4]});
92
93      $for N in range(0, ELEMENTS_TILE, 4):
94        vp${ABC[N:N+4]} = ${VMULADDQ_F32}(vc2, vp${ABC[N:N+4]}, vt${ABC[N:N+4]});
95
96      $for N in range(0, ELEMENTS_TILE, 4):
97        vp${ABC[N:N+4]} = ${VMULADDQ_F32}(vc1, vp${ABC[N:N+4]}, vt${ABC[N:N+4]});
98
99      // Reconstruct the final f value:
100      //   f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
101      //     = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
102      //     = s + (t * s) * p
103      $for N in range(0, ELEMENTS_TILE, 4):
104        vt${ABC[N:N+4]} = vmulq_f32(vt${ABC[N:N+4]}, vs${ABC[N:N+4]});
105
106      $for N in range(0, ELEMENTS_TILE, 4):
107        float32x4_t vf${ABC[N:N+4]} = ${VMULADDQ_F32}(vs${ABC[N:N+4]}, vp${ABC[N:N+4]}, vt${ABC[N:N+4]});
108
109      // For inputs below denormal cutoff, replace output with +0.0f.
110      // Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
111      $for N in range(0, ELEMENTS_TILE, 4):
112        vf${ABC[N:N+4]} = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf${ABC[N:N+4]}), vcltq_f32(vx${ABC[N:N+4]}, vdenorm_cutoff)));
113
114      // Store ${ELEMENTS_TILE} (${SIMD_TILE}x4) outputs at a time.
115      $for N in range(0, ELEMENTS_TILE, 4):
116        vst1q_f32(output, vf${ABC[N:N+4]}); output += 4;
117
118      // Accumulate computed exponents.
119      $for N in range(0, ELEMENTS_TILE, 4):
120        vacc${N % ACCUMULATORS} = vaddq_f32(vacc${N % ACCUMULATORS}, vf${ABC[N:N+4]});
121    }
122    $if ACCUMULATORS > 1:
123      // Add up all accumulators to vacc0
124      $ACC_SLICE = 1
125      $while ACC_SLICE < ACCUMULATORS:
126        $for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
127          $if A + ACC_SLICE < ACCUMULATORS:
128            vacc${A} = vaddq_f32(vacc${A}, vacc${A + ACC_SLICE});
129        $ACC_SLICE *= 2
130
131    float32x4_t vacc = vacc0;
132  $else:
133    float32x4_t vacc = vmovq_n_f32(0.0f);
134  for (; elements >= 4 * sizeof(float); elements -= 4 * sizeof(float)) {
135    // Load 4 inputs at a time.
136    const float32x4_t vi = vld1q_f32(input); input += 4;
137
138    // Subtract maximum input x := i - i_max. This implies x <= 0.
139    const float32x4_t vx = vsubq_f32(vi, vi_max);
140
141    // Compute reduced argument n := round(x / log(2)).
142    // We do it by adding a large number (magic bias), which cause rounding of result to an integer, then subtracing the
143    // large number back. The first addition is combined with multiplication by log2e into a single FMA instruction.
144    // The trick with adding large number is valid only within certain bounds (|x| <= 2**22), but thats ok, because
145    // inputs outside of [-87.336540, 0.0] underflow expf(x) anyway. We fixup the result for such inputs at the very end
146    // of the algorithm.
147    float32x4_t vn = ${VMULADDQ_F32}(vmagic_bias, vx, vlog2e);
148
149    // Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
150    // -87.33642 <= x <= 0.0, and -126 <= n <= 0 accordingly.
151    const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
152
153    // Subtract the large number back to get final n := round(x / log(2)).
154    vn = vsubq_f32(vn, vmagic_bias);
155
156    // Compute reduced argument t := z - n * log(2).
157    // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
158    float32x4_t vt = ${VMULADDQ_F32}(vx, vn, vminus_ln2_hi);
159    vt = ${VMULADDQ_F32}(vt, vn, vminus_ln2_lo);
160
161    // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
162    float32x4_t vp = ${VMULADDQ_F32}(vc4, vc5, vt);
163    vp = ${VMULADDQ_F32}(vc3, vp, vt);
164    vp = ${VMULADDQ_F32}(vc2, vp, vt);
165    vp = ${VMULADDQ_F32}(vc1, vp, vt);
166
167    // Reconstruct the final f value:
168    //   f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
169    //     = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
170    //     = s + (t * s) * p
171    vt = vmulq_f32(vt, vs);
172    float32x4_t vf = ${VMULADDQ_F32}(vs, vp, vt);
173
174    // For inputs below denormal cutoff, replace output with +0.0f.
175    // Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
176    vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
177
178    // Store 4 outputs at a time.
179    vst1q_f32(output, vf); output += 4;
180
181    // Accumulate computed exponents.
182    vacc = vaddq_f32(vacc, vf);
183  }
184#if XNN_ARCH_ARM64
185  float vacc_lo = vaddvq_f32(vacc);
186#else
187  float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
188#endif
189  if (elements != 0) {
190    assert(elements >= 1 * sizeof(float));
191    assert(elements <= 3 * sizeof(float));
192    // Load 4 inputs at a time.
193    const float32x4_t vi = vld1q_f32(input); input += 4;
194
195    // Subtract maximum input x := i - i_max. This implies x <= 0.
196    const float32x4_t vx = vsubq_f32(vi, vi_max);
197
198    // Compute reduced argument n := round(x / log(2)).
199    // We do it by adding a large number (magic bias), which cause rounding of result to an integer, then subtracing the
200    // large number back. The first addition is combined with multiplication by log2e into a single FMA instruction.
201    // The trick with adding large number is valid only within certain bounds (|x| <= 2**22), but thats ok, because
202    // inputs outside of [-87.336540, 0.0] underflow expf(x) anyway. We fixup the result for such inputs at the very end
203    // of the algorithm.
204    float32x4_t vn = ${VMULADDQ_F32}(vmagic_bias, vx, vlog2e);
205
206    // Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
207    // -87.33642 <= x <= 0.0, and -126 <= n <= 0 accordingly.
208    const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
209
210    // Subtract the large number back to get final n := round(x / log(2)).
211    vn = vsubq_f32(vn, vmagic_bias);
212
213    // Compute reduced argument t := z - n * log(2).
214    // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
215    float32x4_t vt = ${VMULADDQ_F32}(vx, vn, vminus_ln2_hi);
216    vt = ${VMULADDQ_F32}(vt, vn, vminus_ln2_lo);
217
218    // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
219    float32x4_t vp = ${VMULADDQ_F32}(vc4, vc5, vt);
220    vp = ${VMULADDQ_F32}(vc3, vp, vt);
221    vp = ${VMULADDQ_F32}(vc2, vp, vt);
222    vp = ${VMULADDQ_F32}(vc1, vp, vt);
223
224    // Reconstruct the final f value:
225    //   f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
226    //     = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
227    //     = s + (t * s) * p
228    vt = vmulq_f32(vt, vs);
229    float32x4_t vf = ${VMULADDQ_F32}(vs, vp, vt);
230
231    // For inputs below denormal cutoff, replace output with +0.0f.
232    // Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
233    vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
234
235    float32x2_t vf_lo = vget_low_f32(vf);
236    if (elements & (2 * sizeof(float))) {
237      // Store 2 outputs at a time.
238      vst1_f32(output, vf_lo); output += 2;
239
240      // Accumulate 2 computed exponents.
241      #if XNN_ARCH_ARM64
242        vacc_lo += vaddv_f32(vf_lo);
243      #else
244        vacc_lo = vadd_f32(vacc_lo, vf_lo);
245      #endif
246
247      vf_lo = vget_high_f32(vf);
248    }
249    if (elements & (1 * sizeof(float))) {
250      // Store 1 output at a time.
251      vst1_lane_f32(output, vf_lo, 0);
252
253      // Accumulate 1 computed exponent.
254      #if XNN_ARCH_ARM64
255        vacc_lo += vget_lane_f32(vf_lo, 0);
256      #else
257        vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
258      #endif
259    }
260  }
261  // Reduce 4 elements in the SIMD register
262#if XNN_ARCH_ARM64
263  *sum = vacc_lo;
264#else
265  vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
266#endif
267}
268