• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Double-precision vector cos function.
3  *
4  * Copyright (c) 2019-2022, Arm Limited.
5  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6  */
7 
8 #include "mathlib.h"
9 #include "v_math.h"
10 #if V_SUPPORTED
11 
12 static const double Poly[] = {
13 /* worst-case error is 3.5 ulp.
14    abs error: 0x1.be222a58p-53 in [-pi/2, pi/2].  */
15 -0x1.9f4a9c8b21dc9p-41,
16  0x1.60e88a10163f2p-33,
17 -0x1.ae6361b7254e7p-26,
18  0x1.71de382e8d62bp-19,
19 -0x1.a01a019aeb4ffp-13,
20  0x1.111111110b25ep-7,
21 -0x1.55555555554c3p-3,
22 };
23 
24 #define C7 v_f64 (Poly[0])
25 #define C6 v_f64 (Poly[1])
26 #define C5 v_f64 (Poly[2])
27 #define C4 v_f64 (Poly[3])
28 #define C3 v_f64 (Poly[4])
29 #define C2 v_f64 (Poly[5])
30 #define C1 v_f64 (Poly[6])
31 
32 #define InvPi v_f64 (0x1.45f306dc9c883p-2)
33 #define HalfPi v_f64 (0x1.921fb54442d18p+0)
34 #define Pi1 v_f64 (0x1.921fb54442d18p+1)
35 #define Pi2 v_f64 (0x1.1a62633145c06p-53)
36 #define Pi3 v_f64 (0x1.c1cd129024e09p-106)
37 #define Shift v_f64 (0x1.8p52)
38 #define RangeVal v_f64 (0x1p23)
39 #define AbsMask v_u64 (0x7fffffffffffffff)
40 
41 VPCS_ATTR
42 __attribute__ ((noinline)) static v_f64_t
specialcase(v_f64_t x,v_f64_t y,v_u64_t cmp)43 specialcase (v_f64_t x, v_f64_t y, v_u64_t cmp)
44 {
45   return v_call_f64 (cos, x, y, cmp);
46 }
47 
48 VPCS_ATTR
49 v_f64_t
V_NAME(cos)50 V_NAME(cos) (v_f64_t x)
51 {
52   v_f64_t n, r, r2, y;
53   v_u64_t odd, cmp;
54 
55   r = v_as_f64_u64 (v_as_u64_f64 (x) & AbsMask);
56   cmp = v_cond_u64 (v_as_u64_f64 (r) >= v_as_u64_f64 (RangeVal));
57 
58 #if WANT_SIMD_EXCEPT
59   if (unlikely (v_any_u64 (cmp)))
60     /* If fenv exceptions are to be triggered correctly, set any special lanes
61        to 1 (which is neutral w.r.t. fenv). These lanes will be fixed by
62        specialcase later.  */
63     r = v_sel_f64 (cmp, v_f64 (1.0), r);
64 #endif
65 
66   /* n = rint((|x|+pi/2)/pi) - 0.5.  */
67   n = v_fma_f64 (InvPi, r + HalfPi, Shift);
68   odd = v_as_u64_f64 (n) << 63;
69   n -= Shift;
70   n -= v_f64 (0.5);
71 
72   /* r = |x| - n*pi  (range reduction into -pi/2 .. pi/2).  */
73   r = v_fma_f64 (-Pi1, n, r);
74   r = v_fma_f64 (-Pi2, n, r);
75   r = v_fma_f64 (-Pi3, n, r);
76 
77   /* sin(r) poly approx.  */
78   r2 = r * r;
79   y = v_fma_f64 (C7, r2, C6);
80   y = v_fma_f64 (y, r2, C5);
81   y = v_fma_f64 (y, r2, C4);
82   y = v_fma_f64 (y, r2, C3);
83   y = v_fma_f64 (y, r2, C2);
84   y = v_fma_f64 (y, r2, C1);
85   y = v_fma_f64 (y * r2, r, r);
86 
87   /* sign.  */
88   y = v_as_f64_u64 (v_as_u64_f64 (y) ^ odd);
89 
90   if (unlikely (v_any_u64 (cmp)))
91     return specialcase (x, y, cmp);
92   return y;
93 }
94 VPCS_ALIAS
95 #endif
96