• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Single-precision vector 2^x function.
3  *
4  * Copyright (c) 2019-2022, Arm Limited.
5  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6  */
7 
8 #include "mathlib.h"
9 #include "v_math.h"
10 #if V_SUPPORTED
11 
12 static const float Poly[] = {
13   /* maxerr: 1.962 ulp.  */
14   0x1.59977ap-10f,
15   0x1.3ce9e4p-7f,
16   0x1.c6bd32p-5f,
17   0x1.ebf9bcp-3f,
18   0x1.62e422p-1f,
19 };
20 #define C0 v_f32 (Poly[0])
21 #define C1 v_f32 (Poly[1])
22 #define C2 v_f32 (Poly[2])
23 #define C3 v_f32 (Poly[3])
24 #define C4 v_f32 (Poly[4])
25 
26 #define Shift v_f32 (0x1.8p23f)
27 
28 #if WANT_SIMD_EXCEPT
29 
30 #define TinyBound 0x20000000 /* asuint (0x1p-63).  */
31 #define BigBound 0x42800000  /* asuint (0x1p6).  */
32 
33 VPCS_ATTR
34 static NOINLINE v_f32_t
specialcase(v_f32_t x,v_f32_t y,v_u32_t cmp)35 specialcase (v_f32_t x, v_f32_t y, v_u32_t cmp)
36 {
37   /* If fenv exceptions are to be triggered correctly, fall back to the scalar
38      routine to special lanes.  */
39   return v_call_f32 (exp2f, x, y, cmp);
40 }
41 
42 #else
43 
44 VPCS_ATTR
45 static v_f32_t
specialcase(v_f32_t poly,v_f32_t n,v_u32_t e,v_f32_t absn,v_u32_t cmp1,v_f32_t scale)46 specialcase (v_f32_t poly, v_f32_t n, v_u32_t e, v_f32_t absn, v_u32_t cmp1, v_f32_t scale)
47 {
48   /* 2^n may overflow, break it up into s1*s2.  */
49   v_u32_t b = v_cond_u32 (n <= v_f32 (0.0f)) & v_u32 (0x82000000);
50   v_f32_t s1 = v_as_f32_u32 (v_u32 (0x7f000000) + b);
51   v_f32_t s2 = v_as_f32_u32 (e - b);
52   v_u32_t cmp2 = v_cond_u32 (absn > v_f32 (192.0f));
53   v_u32_t r2 = v_as_u32_f32 (s1 * s1);
54   v_u32_t r1 = v_as_u32_f32 (v_fma_f32 (poly, s2, s2) * s1);
55   /* Similar to r1 but avoids double rounding in the subnormal range.  */
56   v_u32_t r0 = v_as_u32_f32 (v_fma_f32 (poly, scale, scale));
57   return v_as_f32_u32 ((cmp2 & r2) | (~cmp2 & cmp1 & r1) | (~cmp1 & r0));
58 }
59 
60 #endif
61 
62 VPCS_ATTR
63 v_f32_t
V_NAME(exp2f)64 V_NAME(exp2f) (v_f32_t x)
65 {
66   v_f32_t n, r, r2, scale, p, q, poly;
67   v_u32_t cmp, e;
68 
69 #if WANT_SIMD_EXCEPT
70   cmp = v_cond_u32 ((v_as_u32_f32 (x) & 0x7fffffff) - TinyBound
71 		    >= BigBound - TinyBound);
72   v_f32_t xm = x;
73   /* If any lanes are special, mask them with 1 and retain a copy of x to allow
74      specialcase to fix special lanes later. This is only necessary if fenv
75      exceptions are to be triggered correctly.  */
76   if (unlikely (v_any_u32 (cmp)))
77     x = v_sel_f32 (cmp, v_f32 (1), x);
78 #endif
79 
80     /* exp2(x) = 2^n (1 + poly(r)), with 1 + poly(r) in [1/sqrt(2),sqrt(2)]
81        x = n + r, with r in [-1/2, 1/2].  */
82 #if 0
83   v_f32_t z;
84   z = x + Shift;
85   n = z - Shift;
86   r = x - n;
87   e = v_as_u32_f32 (z) << 23;
88 #else
89   n = v_round_f32 (x);
90   r = x - n;
91   e = v_as_u32_s32 (v_round_s32 (x)) << 23;
92 #endif
93   scale = v_as_f32_u32 (e + v_u32 (0x3f800000));
94 
95 #if !WANT_SIMD_EXCEPT
96   v_f32_t absn = v_abs_f32 (n);
97   cmp = v_cond_u32 (absn > v_f32 (126.0f));
98 #endif
99 
100   r2 = r * r;
101   p = v_fma_f32 (C0, r, C1);
102   q = v_fma_f32 (C2, r, C3);
103   q = v_fma_f32 (p, r2, q);
104   p = C4 * r;
105   poly = v_fma_f32 (q, r2, p);
106 
107   if (unlikely (v_any_u32 (cmp)))
108 #if WANT_SIMD_EXCEPT
109     return specialcase (xm, v_fma_f32 (poly, scale, scale), cmp);
110 #else
111     return specialcase (poly, n, e, absn, cmp, scale);
112 #endif
113 
114   return v_fma_f32 (poly, scale, scale);
115 }
116 VPCS_ALIAS
117 #endif
118