1 /*
2 * Double-precision vector exp(x) - 1 function.
3 *
4 * Copyright (c) 2022-2023, Arm Limited.
5 * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6 */
7
8 #include "v_math.h"
9 #include "pl_sig.h"
10 #include "pl_test.h"
11
12 #if V_SUPPORTED
13
14 #define InvLn2 v_f64 (0x1.71547652b82fep0)
15 #define MLn2hi v_f64 (-0x1.62e42fefa39efp-1)
16 #define MLn2lo v_f64 (-0x1.abc9e3b39803fp-56)
17 #define Shift v_f64 (0x1.8p52)
18 #define TinyBound \
19 0x3cc0000000000000 /* 0x1p-51, below which expm1(x) is within 2 ULP of x. */
20 #define SpecialBound \
21 0x40862b7d369a5aa9 /* 0x1.62b7d369a5aa9p+9. For |x| > SpecialBound, the \
22 final stage of the algorithm overflows so fall back to \
23 scalar. */
24 #define AbsMask 0x7fffffffffffffff
25 #define One 0x3ff0000000000000
26
27 #define C(i) v_f64 (__expm1_poly[i])
28
29 static inline v_f64_t
eval_poly(v_f64_t f,v_f64_t f2)30 eval_poly (v_f64_t f, v_f64_t f2)
31 {
32 /* Evaluate custom polynomial using Estrin scheme. */
33 v_f64_t p_01 = v_fma_f64 (f, C (1), C (0));
34 v_f64_t p_23 = v_fma_f64 (f, C (3), C (2));
35 v_f64_t p_45 = v_fma_f64 (f, C (5), C (4));
36 v_f64_t p_67 = v_fma_f64 (f, C (7), C (6));
37 v_f64_t p_89 = v_fma_f64 (f, C (9), C (8));
38
39 v_f64_t p_03 = v_fma_f64 (f2, p_23, p_01);
40 v_f64_t p_47 = v_fma_f64 (f2, p_67, p_45);
41 v_f64_t p_8a = v_fma_f64 (f2, C (10), p_89);
42
43 v_f64_t f4 = f2 * f2;
44 v_f64_t p_07 = v_fma_f64 (f4, p_47, p_03);
45 return v_fma_f64 (f4 * f4, p_8a, p_07);
46 }
47
48 /* Double-precision vector exp(x) - 1 function.
49 The maximum error observed error is 2.18 ULP:
50 __v_expm1(0x1.634ba0c237d7bp-2) got 0x1.a8b9ea8d66e22p-2
51 want 0x1.a8b9ea8d66e2p-2. */
52 VPCS_ATTR
V_NAME(expm1)53 v_f64_t V_NAME (expm1) (v_f64_t x)
54 {
55 v_u64_t ix = v_as_u64_f64 (x);
56 v_u64_t ax = ix & AbsMask;
57
58 #if WANT_SIMD_EXCEPT
59 /* If fp exceptions are to be triggered correctly, fall back to the scalar
60 variant for all lanes if any of them should trigger an exception. */
61 v_u64_t special = v_cond_u64 ((ax >= SpecialBound) | (ax <= TinyBound));
62 if (unlikely (v_any_u64 (special)))
63 return v_call_f64 (expm1, x, x, v_u64 (-1));
64 #else
65 /* Large input, NaNs and Infs. */
66 v_u64_t special
67 = v_cond_u64 ((ax >= SpecialBound) | (ix == 0x8000000000000000));
68 #endif
69
70 /* Reduce argument to smaller range:
71 Let i = round(x / ln2)
72 and f = x - i * ln2, then f is in [-ln2/2, ln2/2].
73 exp(x) - 1 = 2^i * (expm1(f) + 1) - 1
74 where 2^i is exact because i is an integer. */
75 v_f64_t j = v_fma_f64 (InvLn2, x, Shift) - Shift;
76 v_s64_t i = v_to_s64_f64 (j);
77 v_f64_t f = v_fma_f64 (j, MLn2hi, x);
78 f = v_fma_f64 (j, MLn2lo, f);
79
80 /* Approximate expm1(f) using polynomial.
81 Taylor expansion for expm1(x) has the form:
82 x + ax^2 + bx^3 + cx^4 ....
83 So we calculate the polynomial P(f) = a + bf + cf^2 + ...
84 and assemble the approximation expm1(f) ~= f + f^2 * P(f). */
85 v_f64_t f2 = f * f;
86 v_f64_t p = v_fma_f64 (f2, eval_poly (f, f2), f);
87
88 /* Assemble the result.
89 expm1(x) ~= 2^i * (p + 1) - 1
90 Let t = 2^i. */
91 v_f64_t t = v_as_f64_u64 (v_as_u64_s64 (i << 52) + One);
92 /* expm1(x) ~= p * t + (t - 1). */
93 v_f64_t y = v_fma_f64 (p, t, t - 1);
94
95 #if !WANT_SIMD_EXCEPT
96 if (unlikely (v_any_u64 (special)))
97 return v_call_f64 (expm1, x, y, special);
98 #endif
99
100 return y;
101 }
102 VPCS_ALIAS
103
104 PL_SIG (V, D, 1, expm1, -9.9, 9.9)
105 PL_TEST_ULP (V_NAME (expm1), 1.68)
106 PL_TEST_EXPECT_FENV (V_NAME (expm1), WANT_SIMD_EXCEPT)
107 PL_TEST_INTERVAL (V_NAME (expm1), 0, 0x1p-51, 1000)
108 PL_TEST_INTERVAL (V_NAME (expm1), -0, -0x1p-51, 1000)
109 PL_TEST_INTERVAL (V_NAME (expm1), 0x1p-51, 0x1.63108c75a1937p+9, 100000)
110 PL_TEST_INTERVAL (V_NAME (expm1), -0x1p-51, -0x1.740bf7c0d927dp+9, 100000)
111 PL_TEST_INTERVAL (V_NAME (expm1), 0x1.63108c75a1937p+9, inf, 100)
112 PL_TEST_INTERVAL (V_NAME (expm1), -0x1.740bf7c0d927dp+9, -inf, 100)
113 #endif
114