• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Double-precision vector sin function.
3  *
4  * Copyright (c) 2019-2022, Arm Limited.
5  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6  */
7 
8 #include "mathlib.h"
9 #include "v_math.h"
10 #if V_SUPPORTED
11 
12 static const double Poly[] = {
13 /* worst-case error is 3.5 ulp.
14    abs error: 0x1.be222a58p-53 in [-pi/2, pi/2].  */
15 -0x1.9f4a9c8b21dc9p-41,
16  0x1.60e88a10163f2p-33,
17 -0x1.ae6361b7254e7p-26,
18  0x1.71de382e8d62bp-19,
19 -0x1.a01a019aeb4ffp-13,
20  0x1.111111110b25ep-7,
21 -0x1.55555555554c3p-3,
22 };
23 
24 #define C7 v_f64 (Poly[0])
25 #define C6 v_f64 (Poly[1])
26 #define C5 v_f64 (Poly[2])
27 #define C4 v_f64 (Poly[3])
28 #define C3 v_f64 (Poly[4])
29 #define C2 v_f64 (Poly[5])
30 #define C1 v_f64 (Poly[6])
31 
32 #define InvPi v_f64 (0x1.45f306dc9c883p-2)
33 #define Pi1 v_f64 (0x1.921fb54442d18p+1)
34 #define Pi2 v_f64 (0x1.1a62633145c06p-53)
35 #define Pi3 v_f64 (0x1.c1cd129024e09p-106)
36 #define Shift v_f64 (0x1.8p52)
37 #define AbsMask v_u64 (0x7fffffffffffffff)
38 
39 #if WANT_SIMD_EXCEPT
40 #define TinyBound 0x202 /* top12 (asuint64 (0x1p-509)).  */
41 #define Thresh 0x214	/* top12 (asuint64 (RangeVal)) - TinyBound.  */
42 #else
43 #define RangeVal v_f64 (0x1p23)
44 #endif
45 
46 VPCS_ATTR
47 __attribute__ ((noinline)) static v_f64_t
specialcase(v_f64_t x,v_f64_t y,v_u64_t cmp)48 specialcase (v_f64_t x, v_f64_t y, v_u64_t cmp)
49 {
50   return v_call_f64 (sin, x, y, cmp);
51 }
52 
53 VPCS_ATTR
54 v_f64_t
V_NAME(sin)55 V_NAME(sin) (v_f64_t x)
56 {
57   v_f64_t n, r, r2, y;
58   v_u64_t sign, odd, cmp, ir;
59 
60   ir = v_as_u64_f64 (x) & AbsMask;
61   r = v_as_f64_u64 (ir);
62   sign = v_as_u64_f64 (x) & ~AbsMask;
63 
64 #if WANT_SIMD_EXCEPT
65   /* Detect |x| <= 0x1p-509 or |x| >= RangeVal. If fenv exceptions are to be
66      triggered correctly, set any special lanes to 1 (which is neutral w.r.t.
67      fenv). These lanes will be fixed by specialcase later.  */
68   cmp = v_cond_u64 ((ir >> 52) - TinyBound >= Thresh);
69   if (unlikely (v_any_u64 (cmp)))
70     r = v_sel_f64 (cmp, v_f64 (1), r);
71 #else
72   cmp = v_cond_u64 (ir >= v_as_u64_f64 (RangeVal));
73 #endif
74 
75   /* n = rint(|x|/pi).  */
76   n = v_fma_f64 (InvPi, r, Shift);
77   odd = v_as_u64_f64 (n) << 63;
78   n -= Shift;
79 
80   /* r = |x| - n*pi  (range reduction into -pi/2 .. pi/2).  */
81   r = v_fma_f64 (-Pi1, n, r);
82   r = v_fma_f64 (-Pi2, n, r);
83   r = v_fma_f64 (-Pi3, n, r);
84 
85   /* sin(r) poly approx.  */
86   r2 = r * r;
87   y = v_fma_f64 (C7, r2, C6);
88   y = v_fma_f64 (y, r2, C5);
89   y = v_fma_f64 (y, r2, C4);
90   y = v_fma_f64 (y, r2, C3);
91   y = v_fma_f64 (y, r2, C2);
92   y = v_fma_f64 (y, r2, C1);
93   y = v_fma_f64 (y * r2, r, r);
94 
95   /* sign.  */
96   y = v_as_f64_u64 (v_as_u64_f64 (y) ^ sign ^ odd);
97 
98   if (unlikely (v_any_u64 (cmp)))
99     return specialcase (x, y, cmp);
100   return y;
101 }
102 VPCS_ALIAS
103 #endif
104