• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Single-precision vector e^x function.
3  *
4  * Copyright (c) 2019-2022, Arm Limited.
5  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6  */
7 
8 #include "mathlib.h"
9 #include "v_math.h"
10 #if V_SUPPORTED
11 
12 static const float Poly[] = {
13   /* maxerr: 1.45358 +0.5 ulp.  */
14   0x1.0e4020p-7f,
15   0x1.573e2ep-5f,
16   0x1.555e66p-3f,
17   0x1.fffdb6p-2f,
18   0x1.ffffecp-1f,
19 };
20 #define C0 v_f32 (Poly[0])
21 #define C1 v_f32 (Poly[1])
22 #define C2 v_f32 (Poly[2])
23 #define C3 v_f32 (Poly[3])
24 #define C4 v_f32 (Poly[4])
25 
26 #define Shift v_f32 (0x1.8p23f)
27 #define InvLn2 v_f32 (0x1.715476p+0f)
28 #define Ln2hi v_f32 (0x1.62e4p-1f)
29 #define Ln2lo v_f32 (0x1.7f7d1cp-20f)
30 
31 #if WANT_SIMD_EXCEPT
32 
33 #define TinyBound 0x20000000 /* asuint (0x1p-63).  */
34 #define BigBound 0x42800000  /* asuint (0x1p6).  */
35 
36 VPCS_ATTR
37 static NOINLINE v_f32_t
specialcase(v_f32_t x,v_f32_t y,v_u32_t cmp)38 specialcase (v_f32_t x, v_f32_t y, v_u32_t cmp)
39 {
40   /* If fenv exceptions are to be triggered correctly, fall back to the scalar
41      routine to special lanes.  */
42   return v_call_f32 (expf, x, y, cmp);
43 }
44 
45 #else
46 
47 VPCS_ATTR
48 static v_f32_t
specialcase(v_f32_t poly,v_f32_t n,v_u32_t e,v_f32_t absn,v_u32_t cmp1,v_f32_t scale)49 specialcase (v_f32_t poly, v_f32_t n, v_u32_t e, v_f32_t absn, v_u32_t cmp1, v_f32_t scale)
50 {
51   /* 2^n may overflow, break it up into s1*s2.  */
52   v_u32_t b = v_cond_u32 (n <= v_f32 (0.0f)) & v_u32 (0x82000000);
53   v_f32_t s1 = v_as_f32_u32 (v_u32 (0x7f000000) + b);
54   v_f32_t s2 = v_as_f32_u32 (e - b);
55   v_u32_t cmp2 = v_cond_u32 (absn > v_f32 (192.0f));
56   v_u32_t r2 = v_as_u32_f32 (s1 * s1);
57   v_u32_t r1 = v_as_u32_f32 (v_fma_f32 (poly, s2, s2) * s1);
58   /* Similar to r1 but avoids double rounding in the subnormal range.  */
59   v_u32_t r0 = v_as_u32_f32 (v_fma_f32 (poly, scale, scale));
60   return v_as_f32_u32 ((cmp2 & r2) | (~cmp2 & cmp1 & r1) | (~cmp1 & r0));
61 }
62 
63 #endif
64 
65 VPCS_ATTR
66 v_f32_t
V_NAME(expf)67 V_NAME(expf) (v_f32_t x)
68 {
69   v_f32_t n, r, r2, scale, p, q, poly, z;
70   v_u32_t cmp, e;
71 
72 #if WANT_SIMD_EXCEPT
73   cmp = v_cond_u32 ((v_as_u32_f32 (x) & 0x7fffffff) - TinyBound
74 		    >= BigBound - TinyBound);
75   v_f32_t xm = x;
76   /* If any lanes are special, mask them with 1 and retain a copy of x to allow
77      specialcase to fix special lanes later. This is only necessary if fenv
78      exceptions are to be triggered correctly.  */
79   if (unlikely (v_any_u32 (cmp)))
80     x = v_sel_f32 (cmp, v_f32 (1), x);
81 #endif
82 
83     /* exp(x) = 2^n (1 + poly(r)), with 1 + poly(r) in [1/sqrt(2),sqrt(2)]
84        x = ln2*n + r, with r in [-ln2/2, ln2/2].  */
85 #if 1
86   z = v_fma_f32 (x, InvLn2, Shift);
87   n = z - Shift;
88   r = v_fma_f32 (n, -Ln2hi, x);
89   r = v_fma_f32 (n, -Ln2lo, r);
90   e = v_as_u32_f32 (z) << 23;
91 #else
92   z = x * InvLn2;
93   n = v_round_f32 (z);
94   r = v_fma_f32 (n, -Ln2hi, x);
95   r = v_fma_f32 (n, -Ln2lo, r);
96   e = v_as_u32_s32 (v_round_s32 (z)) << 23;
97 #endif
98   scale = v_as_f32_u32 (e + v_u32 (0x3f800000));
99 
100 #if !WANT_SIMD_EXCEPT
101   v_f32_t absn = v_abs_f32 (n);
102   cmp = v_cond_u32 (absn > v_f32 (126.0f));
103 #endif
104 
105   r2 = r * r;
106   p = v_fma_f32 (C0, r, C1);
107   q = v_fma_f32 (C2, r, C3);
108   q = v_fma_f32 (p, r2, q);
109   p = C4 * r;
110   poly = v_fma_f32 (q, r2, p);
111 
112   if (unlikely (v_any_u32 (cmp)))
113 #if WANT_SIMD_EXCEPT
114     return specialcase (xm, v_fma_f32 (poly, scale, scale), cmp);
115 #else
116     return specialcase (poly, n, e, absn, cmp, scale);
117 #endif
118 
119   return v_fma_f32 (poly, scale, scale);
120 }
121 VPCS_ALIAS
122 #endif
123