• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Configuration for math routines.
3  *
4  * Copyright (c) 2017-2018, Arm Limited.
5  * SPDX-License-Identifier: MIT
6  */
7 
8 #ifndef _MATH_CONFIG_H
9 #define _MATH_CONFIG_H
10 
11 #include <math.h>
12 #include <stdint.h>
13 
14 #ifndef WANT_ROUNDING
15 /* Correct special case results in non-nearest rounding modes.  */
16 # define WANT_ROUNDING 1
17 #endif
18 #ifndef WANT_ERRNO
19 /* Set errno according to ISO C with (math_errhandling & MATH_ERRNO) != 0.  */
20 # define WANT_ERRNO 1
21 #endif
22 #ifndef WANT_ERRNO_UFLOW
23 /* Set errno to ERANGE if result underflows to 0 (in all rounding modes).  */
24 # define WANT_ERRNO_UFLOW (WANT_ROUNDING && WANT_ERRNO)
25 #endif
26 
27 /* Compiler can inline round as a single instruction.  */
28 #ifndef HAVE_FAST_ROUND
29 # if __aarch64__
30 #   define HAVE_FAST_ROUND 1
31 # else
32 #   define HAVE_FAST_ROUND 0
33 # endif
34 #endif
35 
36 /* Compiler can inline lround, but not (long)round(x).  */
37 #ifndef HAVE_FAST_LROUND
38 # if __aarch64__ && (100*__GNUC__ + __GNUC_MINOR__) >= 408 && __NO_MATH_ERRNO__
39 #   define HAVE_FAST_LROUND 1
40 # else
41 #   define HAVE_FAST_LROUND 0
42 # endif
43 #endif
44 
45 /* Compiler can inline fma as a single instruction.  */
46 #ifndef HAVE_FAST_FMA
47 # if defined FP_FAST_FMA || __aarch64__
48 #   define HAVE_FAST_FMA 1
49 # else
50 #   define HAVE_FAST_FMA 0
51 # endif
52 #endif
53 
54 #if HAVE_FAST_ROUND
55 /* When set, the roundtoint and converttoint functions are provided with
56    the semantics documented below.  */
57 # define TOINT_INTRINSICS 1
58 
59 /* Round x to nearest int in all rounding modes, ties have to be rounded
60    consistently with converttoint so the results match.  If the result
61    would be outside of [-2^31, 2^31-1] then the semantics is unspecified.  */
62 static inline double_t
roundtoint(double_t x)63 roundtoint (double_t x)
64 {
65   return round (x);
66 }
67 
68 /* Convert x to nearest int in all rounding modes, ties have to be rounded
69    consistently with roundtoint.  If the result is not representible in an
70    int32_t then the semantics is unspecified.  */
71 static inline int32_t
converttoint(double_t x)72 converttoint (double_t x)
73 {
74 # if HAVE_FAST_LROUND
75   return lround (x);
76 # else
77   return (long) round (x);
78 # endif
79 }
80 #endif
81 
82 static inline uint32_t
asuint(float f)83 asuint (float f)
84 {
85   union
86   {
87     float f;
88     uint32_t i;
89   } u = {f};
90   return u.i;
91 }
92 
93 static inline float
asfloat(uint32_t i)94 asfloat (uint32_t i)
95 {
96   union
97   {
98     uint32_t i;
99     float f;
100   } u = {i};
101   return u.f;
102 }
103 
104 static inline uint64_t
asuint64(double f)105 asuint64 (double f)
106 {
107   union
108   {
109     double f;
110     uint64_t i;
111   } u = {f};
112   return u.i;
113 }
114 
115 static inline double
asdouble(uint64_t i)116 asdouble (uint64_t i)
117 {
118   union
119   {
120     uint64_t i;
121     double f;
122   } u = {i};
123   return u.f;
124 }
125 
126 #ifndef IEEE_754_2008_SNAN
127 # define IEEE_754_2008_SNAN 1
128 #endif
129 static inline int
issignalingf_inline(float x)130 issignalingf_inline (float x)
131 {
132   uint32_t ix = asuint (x);
133   if (!IEEE_754_2008_SNAN)
134     return (ix & 0x7fc00000) == 0x7fc00000;
135   return 2 * (ix ^ 0x00400000) > 2u * 0x7fc00000;
136 }
137 
138 static inline int
issignaling_inline(double x)139 issignaling_inline (double x)
140 {
141   uint64_t ix = asuint64 (x);
142   if (!IEEE_754_2008_SNAN)
143     return (ix & 0x7ff8000000000000) == 0x7ff8000000000000;
144   return 2 * (ix ^ 0x0008000000000000) > 2 * 0x7ff8000000000000ULL;
145 }
146 
147 #if __aarch64__ && __GNUC__
148 /* Prevent the optimization of a floating-point expression.  */
149 static inline float
opt_barrier_float(float x)150 opt_barrier_float (float x)
151 {
152   __asm__ __volatile__ ("" : "+w" (x));
153   return x;
154 }
155 static inline double
opt_barrier_double(double x)156 opt_barrier_double (double x)
157 {
158   __asm__ __volatile__ ("" : "+w" (x));
159   return x;
160 }
161 /* Force the evaluation of a floating-point expression for its side-effect.  */
162 static inline void
force_eval_float(float x)163 force_eval_float (float x)
164 {
165   __asm__ __volatile__ ("" : "+w" (x));
166 }
167 static inline void
force_eval_double(double x)168 force_eval_double (double x)
169 {
170   __asm__ __volatile__ ("" : "+w" (x));
171 }
172 #else
173 static inline float
opt_barrier_float(float x)174 opt_barrier_float (float x)
175 {
176   volatile float y = x;
177   return y;
178 }
179 static inline double
opt_barrier_double(double x)180 opt_barrier_double (double x)
181 {
182   volatile double y = x;
183   return y;
184 }
185 static inline void
force_eval_float(float x)186 force_eval_float (float x)
187 {
188   volatile float y = x;
189 }
190 static inline void
force_eval_double(double x)191 force_eval_double (double x)
192 {
193   volatile double y = x;
194 }
195 #endif
196 
197 /* Evaluate an expression as the specified type, normally a type
198    cast should be enough, but compilers implement non-standard
199    excess-precision handling, so when FLT_EVAL_METHOD != 0 then
200    these functions may need to be customized.  */
201 static inline float
eval_as_float(float x)202 eval_as_float (float x)
203 {
204   return x;
205 }
206 static inline double
eval_as_double(double x)207 eval_as_double (double x)
208 {
209   return x;
210 }
211 
212 /* Provide *_finite symbols and some of the glibc hidden symbols
213    so libmathlib can be used with binaries compiled against glibc
214    to interpose math functions with both static and dynamic linking.  */
215 #ifndef USE_GLIBC_ABI
216 # if __GNUC__
217 #   define USE_GLIBC_ABI 1
218 # else
219 #   define USE_GLIBC_ABI 0
220 # endif
221 #endif
222 
223 #ifdef __GNUC__
224 # define HIDDEN __attribute__ ((__visibility__ ("hidden")))
225 # define NOINLINE __attribute__ ((noinline))
226 # define likely(x) __builtin_expect (!!(x), 1)
227 # define unlikely(x) __builtin_expect (x, 0)
228 # define strong_alias(f, a) \
229   extern __typeof (f) a __attribute__ ((alias (#f)));
230 # define hidden_alias(f, a) \
231   extern __typeof (f) a __attribute__ ((alias (#f), visibility ("hidden")));
232 #else
233 # define HIDDEN
234 # define NOINLINE
235 # define likely(x) (x)
236 # define unlikely(x) (x)
237 #endif
238 
239 /* Error handling tail calls for special cases, with a sign argument.
240    The sign of the return value is set if the argument is non-zero.  */
241 
242 /* The result overflows.  */
243 HIDDEN float __math_oflowf (uint32_t);
244 /* The result underflows to 0 in nearest rounding mode.  */
245 HIDDEN float __math_uflowf (uint32_t);
246 /* The result underflows to 0 in some directed rounding mode only.  */
247 HIDDEN float __math_may_uflowf (uint32_t);
248 /* Division by zero.  */
249 HIDDEN float __math_divzerof (uint32_t);
250 /* The result overflows.  */
251 HIDDEN double __math_oflow (uint32_t);
252 /* The result underflows to 0 in nearest rounding mode.  */
253 HIDDEN double __math_uflow (uint32_t);
254 /* The result underflows to 0 in some directed rounding mode only.  */
255 HIDDEN double __math_may_uflow (uint32_t);
256 /* Division by zero.  */
257 HIDDEN double __math_divzero (uint32_t);
258 
259 /* Error handling using input checking.  */
260 
261 /* Invalid input unless it is a quiet NaN.  */
262 HIDDEN float __math_invalidf (float);
263 /* Invalid input unless it is a quiet NaN.  */
264 HIDDEN double __math_invalid (double);
265 
266 /* Error handling using output checking, only for errno setting.  */
267 
268 /* Check if the result overflowed to infinity.  */
269 HIDDEN double __math_check_oflow (double);
270 /* Check if the result underflowed to 0.  */
271 HIDDEN double __math_check_uflow (double);
272 
273 /* Check if the result overflowed to infinity.  */
274 static inline double
check_oflow(double x)275 check_oflow (double x)
276 {
277   return WANT_ERRNO ? __math_check_oflow (x) : x;
278 }
279 
280 /* Check if the result underflowed to 0.  */
281 static inline double
check_uflow(double x)282 check_uflow (double x)
283 {
284   return WANT_ERRNO ? __math_check_uflow (x) : x;
285 }
286 
287 
288 /* Shared between expf, exp2f and powf.  */
289 #define EXP2F_TABLE_BITS 5
290 #define EXP2F_POLY_ORDER 3
291 extern const struct exp2f_data
292 {
293   uint64_t tab[1 << EXP2F_TABLE_BITS];
294   double shift_scaled;
295   double poly[EXP2F_POLY_ORDER];
296   double shift;
297   double invln2_scaled;
298   double poly_scaled[EXP2F_POLY_ORDER];
299 } __exp2f_data HIDDEN;
300 
301 #define LOGF_TABLE_BITS 4
302 #define LOGF_POLY_ORDER 4
303 extern const struct logf_data
304 {
305   struct
306   {
307     double invc, logc;
308   } tab[1 << LOGF_TABLE_BITS];
309   double ln2;
310   double poly[LOGF_POLY_ORDER - 1]; /* First order coefficient is 1.  */
311 } __logf_data HIDDEN;
312 
313 #define LOG2F_TABLE_BITS 4
314 #define LOG2F_POLY_ORDER 4
315 extern const struct log2f_data
316 {
317   struct
318   {
319     double invc, logc;
320   } tab[1 << LOG2F_TABLE_BITS];
321   double poly[LOG2F_POLY_ORDER];
322 } __log2f_data HIDDEN;
323 
324 #define POWF_LOG2_TABLE_BITS 4
325 #define POWF_LOG2_POLY_ORDER 5
326 #if TOINT_INTRINSICS
327 # define POWF_SCALE_BITS EXP2F_TABLE_BITS
328 #else
329 # define POWF_SCALE_BITS 0
330 #endif
331 #define POWF_SCALE ((double) (1 << POWF_SCALE_BITS))
332 extern const struct powf_log2_data
333 {
334   struct
335   {
336     double invc, logc;
337   } tab[1 << POWF_LOG2_TABLE_BITS];
338   double poly[POWF_LOG2_POLY_ORDER];
339 } __powf_log2_data HIDDEN;
340 
341 
342 #define EXP_TABLE_BITS 7
343 #define EXP_POLY_ORDER 5
344 /* Use polynomial that is optimized for a wider input range.  This may be
345    needed for good precision in non-nearest rounding and !TOINT_INTRINSICS.  */
346 #define EXP_POLY_WIDE 0
347 /* Use close to nearest rounding toint when !TOINT_INTRINSICS.  This may be
348    needed for good precision in non-nearest rouning and !EXP_POLY_WIDE.  */
349 #define EXP_USE_TOINT_NARROW 0
350 #define EXP2_POLY_ORDER 5
351 #define EXP2_POLY_WIDE 0
352 extern const struct exp_data
353 {
354   double invln2N;
355   double shift;
356   double negln2hiN;
357   double negln2loN;
358   double poly[4]; /* Last four coefficients.  */
359   double exp2_shift;
360   double exp2_poly[EXP2_POLY_ORDER];
361   uint64_t tab[2*(1 << EXP_TABLE_BITS)];
362 } __exp_data HIDDEN;
363 
364 #define LOG_TABLE_BITS 7
365 #define LOG_POLY_ORDER 6
366 #define LOG_POLY1_ORDER 12
367 extern const struct log_data
368 {
369   double ln2hi;
370   double ln2lo;
371   double poly[LOG_POLY_ORDER - 1]; /* First coefficient is 1.  */
372   double poly1[LOG_POLY1_ORDER - 1];
373   struct {double invc, logc;} tab[1 << LOG_TABLE_BITS];
374 #if !HAVE_FAST_FMA
375   struct {double chi, clo;} tab2[1 << LOG_TABLE_BITS];
376 #endif
377 } __log_data HIDDEN;
378 
379 #define LOG2_TABLE_BITS 6
380 #define LOG2_POLY_ORDER 7
381 #define LOG2_POLY1_ORDER 11
382 extern const struct log2_data
383 {
384   double invln2hi;
385   double invln2lo;
386   double poly[LOG2_POLY_ORDER - 1];
387   double poly1[LOG2_POLY1_ORDER - 1];
388   struct {double invc, logc;} tab[1 << LOG2_TABLE_BITS];
389 #if !HAVE_FAST_FMA
390   struct {double chi, clo;} tab2[1 << LOG2_TABLE_BITS];
391 #endif
392 } __log2_data HIDDEN;
393 
394 #define POW_LOG_TABLE_BITS 7
395 #define POW_LOG_POLY_ORDER 8
396 extern const struct pow_log_data
397 {
398   double ln2hi;
399   double ln2lo;
400   double poly[POW_LOG_POLY_ORDER - 1]; /* First coefficient is 1.  */
401   /* Note: the pad field is unused, but allows slightly faster indexing.  */
402   struct {double invc, pad, logc, logctail;} tab[1 << POW_LOG_TABLE_BITS];
403 } __pow_log_data HIDDEN;
404 
405 #endif
406