1 /*
2 * Double-precision SVE sin(x) function.
3 *
4 * Copyright (c) 2019-2023, Arm Limited.
5 * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6 */
7
8 #include "sv_math.h"
9 #include "pl_sig.h"
10 #include "pl_test.h"
11
12 #if SV_SUPPORTED
13
14 #define InvPi (sv_f64 (0x1.45f306dc9c883p-2))
15 #define HalfPi (sv_f64 (0x1.921fb54442d18p+0))
16 #define InvPio2 (sv_f64 (0x1.45f306dc9c882p-1))
17 #define NegPio2_1 (sv_f64 (-0x1.921fb50000000p+0))
18 #define NegPio2_2 (sv_f64 (-0x1.110b460000000p-26))
19 #define NegPio2_3 (sv_f64 (-0x1.1a62633145c07p-54))
20 #define Shift (sv_f64 (0x1.8p52))
21 #define RangeVal (sv_f64 (0x1p23))
22 #define AbsMask (0x7fffffffffffffff)
23
24 static NOINLINE sv_f64_t
__sv_sin_specialcase(sv_f64_t x,sv_f64_t y,svbool_t cmp)25 __sv_sin_specialcase (sv_f64_t x, sv_f64_t y, svbool_t cmp)
26 {
27 return sv_call_f64 (sin, x, y, cmp);
28 }
29
30 /* A fast SVE implementation of sin based on trigonometric
31 instructions (FTMAD, FTSSEL, FTSMUL).
32 Maximum observed error in 2.52 ULP:
33 __sv_sin(0x1.2d2b00df69661p+19) got 0x1.10ace8f3e786bp-40
34 want 0x1.10ace8f3e7868p-40. */
35 sv_f64_t
__sv_sin_x(sv_f64_t x,const svbool_t pg)36 __sv_sin_x (sv_f64_t x, const svbool_t pg)
37 {
38 sv_f64_t n, r, r2, y;
39 sv_u64_t sign;
40 svbool_t cmp;
41
42 r = sv_as_f64_u64 (svand_n_u64_x (pg, sv_as_u64_f64 (x), AbsMask));
43 sign = svand_n_u64_x (pg, sv_as_u64_f64 (x), ~AbsMask);
44 cmp = svcmpge_u64 (pg, sv_as_u64_f64 (r), sv_as_u64_f64 (RangeVal));
45
46 /* n = rint(|x|/(pi/2)). */
47 sv_f64_t q = sv_fma_f64_x (pg, InvPio2, r, Shift);
48 n = svsub_f64_x (pg, q, Shift);
49
50 /* r = |x| - n*(pi/2) (range reduction into -pi/4 .. pi/4). */
51 r = sv_fma_f64_x (pg, NegPio2_1, n, r);
52 r = sv_fma_f64_x (pg, NegPio2_2, n, r);
53 r = sv_fma_f64_x (pg, NegPio2_3, n, r);
54
55 /* Final multiplicative factor: 1.0 or x depending on bit #0 of q. */
56 sv_f64_t f = svtssel_f64 (r, sv_as_u64_f64 (q));
57
58 /* sin(r) poly approx. */
59 r2 = svtsmul_f64 (r, sv_as_u64_f64 (q));
60 y = sv_f64 (0.0);
61 y = svtmad_f64 (y, r2, 7);
62 y = svtmad_f64 (y, r2, 6);
63 y = svtmad_f64 (y, r2, 5);
64 y = svtmad_f64 (y, r2, 4);
65 y = svtmad_f64 (y, r2, 3);
66 y = svtmad_f64 (y, r2, 2);
67 y = svtmad_f64 (y, r2, 1);
68 y = svtmad_f64 (y, r2, 0);
69
70 /* Apply factor. */
71 y = svmul_f64_x (pg, f, y);
72
73 /* sign = y^sign. */
74 y = sv_as_f64_u64 (sveor_u64_x (pg, sv_as_u64_f64 (y), sign));
75
76 /* No need to pass pg to specialcase here since cmp is a strict subset,
77 guaranteed by the cmpge above. */
78 if (unlikely (svptest_any (pg, cmp)))
79 return __sv_sin_specialcase (x, y, cmp);
80 return y;
81 }
82
83 PL_ALIAS (__sv_sin_x, _ZGVsMxv_sin)
84
85 PL_SIG (SV, D, 1, sin, -3.1, 3.1)
86 PL_TEST_ULP (__sv_sin, 2.03)
87 PL_TEST_INTERVAL (__sv_sin, 0, 0xffff0000, 10000)
88 PL_TEST_INTERVAL (__sv_sin, 0x1p-4, 0x1p4, 500000)
89 #endif
90