1 /*
2 * Double-precision log2(x) function.
3 *
4 * Copyright (c) 2018, Arm Limited.
5 * SPDX-License-Identifier: MIT
6 */
7
8 #include <math.h>
9 #include <stdint.h>
10 #include "math_config.h"
11
12 #define T __log2_data.tab
13 #define T2 __log2_data.tab2
14 #define B __log2_data.poly1
15 #define A __log2_data.poly
16 #define InvLn2hi __log2_data.invln2hi
17 #define InvLn2lo __log2_data.invln2lo
18 #define N (1 << LOG2_TABLE_BITS)
19 #define OFF 0x3fe6000000000000
20
21 /* Top 16 bits of a double. */
22 static inline uint32_t
top16(double x)23 top16 (double x)
24 {
25 return asuint64 (x) >> 48;
26 }
27
28 double
log2(double x)29 log2 (double x)
30 {
31 /* double_t for better performance on targets with FLT_EVAL_METHOD==2. */
32 double_t z, r, r2, r4, y, invc, logc, kd, hi, lo, t1, t2, t3, p;
33 uint64_t ix, iz, tmp;
34 uint32_t top;
35 int k, i;
36
37 ix = asuint64 (x);
38 top = top16 (x);
39
40 #if LOG2_POLY1_ORDER == 11
41 # define LO asuint64 (1.0 - 0x1.5b51p-5)
42 # define HI asuint64 (1.0 + 0x1.6ab2p-5)
43 #endif
44 if (unlikely (ix - LO < HI - LO))
45 {
46 /* Handle close to 1.0 inputs separately. */
47 /* Fix sign of zero with downward rounding when x==1. */
48 if (WANT_ROUNDING && unlikely (ix == asuint64 (1.0)))
49 return 0;
50 r = x - 1.0;
51 #if HAVE_FAST_FMA
52 hi = r * InvLn2hi;
53 lo = r * InvLn2lo + fma (r, InvLn2hi, -hi);
54 #else
55 double_t rhi, rlo;
56 rhi = asdouble (asuint64 (r) & -1ULL << 32);
57 rlo = r - rhi;
58 hi = rhi * InvLn2hi;
59 lo = rlo * InvLn2hi + r * InvLn2lo;
60 #endif
61 r2 = r * r; /* rounding error: 0x1p-62. */
62 r4 = r2 * r2;
63 #if LOG2_POLY1_ORDER == 11
64 /* Worst-case error is less than 0.54 ULP (0.55 ULP without fma). */
65 p = r2 * (B[0] + r * B[1]);
66 y = hi + p;
67 lo += hi - y + p;
68 lo += r4 * (B[2] + r * B[3] + r2 * (B[4] + r * B[5])
69 + r4 * (B[6] + r * B[7] + r2 * (B[8] + r * B[9])));
70 y += lo;
71 #endif
72 return eval_as_double (y);
73 }
74 if (unlikely (top - 0x0010 >= 0x7ff0 - 0x0010))
75 {
76 /* x < 0x1p-1022 or inf or nan. */
77 if (ix * 2 == 0)
78 return __math_divzero (1);
79 if (ix == asuint64 (INFINITY)) /* log(inf) == inf. */
80 return x;
81 if ((top & 0x8000) || (top & 0x7ff0) == 0x7ff0)
82 return __math_invalid (x);
83 /* x is subnormal, normalize it. */
84 ix = asuint64 (x * 0x1p52);
85 ix -= 52ULL << 52;
86 }
87
88 /* x = 2^k z; where z is in range [OFF,2*OFF) and exact.
89 The range is split into N subintervals.
90 The ith subinterval contains z and c is near its center. */
91 tmp = ix - OFF;
92 i = (tmp >> (52 - LOG2_TABLE_BITS)) % N;
93 k = (int64_t) tmp >> 52; /* arithmetic shift */
94 iz = ix - (tmp & 0xfffULL << 52);
95 invc = T[i].invc;
96 logc = T[i].logc;
97 z = asdouble (iz);
98 kd = (double_t) k;
99
100 /* log2(x) = log2(z/c) + log2(c) + k. */
101 /* r ~= z/c - 1, |r| < 1/(2*N). */
102 #if HAVE_FAST_FMA
103 /* rounding error: 0x1p-55/N. */
104 r = fma (z, invc, -1.0);
105 t1 = r * InvLn2hi;
106 t2 = r * InvLn2lo + fma (r, InvLn2hi, -t1);
107 #else
108 double_t rhi, rlo;
109 /* rounding error: 0x1p-55/N + 0x1p-65. */
110 r = (z - T2[i].chi - T2[i].clo) * invc;
111 rhi = asdouble (asuint64 (r) & -1ULL << 32);
112 rlo = r - rhi;
113 t1 = rhi * InvLn2hi;
114 t2 = rlo * InvLn2hi + r * InvLn2lo;
115 #endif
116
117 /* hi + lo = r/ln2 + log2(c) + k. */
118 t3 = kd + logc;
119 hi = t3 + t1;
120 lo = t3 - hi + t1 + t2;
121
122 /* log2(r+1) = r/ln2 + r^2*poly(r). */
123 /* Evaluation is optimized assuming superscalar pipelined execution. */
124 r2 = r * r; /* rounding error: 0x1p-54/N^2. */
125 r4 = r2 * r2;
126 #if LOG2_POLY_ORDER == 7
127 /* Worst-case error if |y| > 0x1p-4: 0.547 ULP (0.550 ULP without fma).
128 ~ 0.5 + 2/N/ln2 + abs-poly-error*0x1p56 ULP (+ 0.003 ULP without fma). */
129 p = A[0] + r * A[1] + r2 * (A[2] + r * A[3]) + r4 * (A[4] + r * A[5]);
130 y = lo + r2 * p + hi;
131 #endif
132 return eval_as_double (y);
133 }
134 #if USE_GLIBC_ABI
135 strong_alias (log2, __log2_finite)
136 hidden_alias (log2, __ieee754_log2)
137 #endif
138