1 /*
2 * Single-precision vector erf(x) function.
3 *
4 * Copyright (c) 2020-2023, Arm Limited.
5 * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6 */
7
8 #include "sv_math.h"
9 #include "pl_sig.h"
10 #include "pl_test.h"
11
12 #if SV_SUPPORTED
13
14 #define AbsMask (0x7fffffff)
15
16 static NOINLINE sv_f32_t
__sv_erff_specialcase(sv_f32_t x,sv_f32_t y,svbool_t cmp)17 __sv_erff_specialcase (sv_f32_t x, sv_f32_t y, svbool_t cmp)
18 {
19 return sv_call_f32 (erff, x, y, cmp);
20 }
21
22 sv_f32_t __sv_expf_x (svbool_t, sv_f32_t);
23
24 /* Optimized single precision vector erf. Worst-case error is 1.25 ULP:
25 __sv_erff(0x1.dc59fap-1) got 0x1.9f9c88p-1
26 want 0x1.9f9c8ap-1. */
27 sv_f32_t
__sv_erff_x(sv_f32_t x,const svbool_t pg)28 __sv_erff_x (sv_f32_t x, const svbool_t pg)
29 {
30 sv_u32_t ix = sv_as_u32_f32 (x);
31 sv_u32_t atop = svand_n_u32_x (pg, svlsr_n_u32_x (pg, ix, 16), 0x7fff);
32 /* Handle both inf/nan as well as small values (|x|<2^-28). */
33 svbool_t cmp
34 = svcmpge_n_u32 (pg, svsub_n_u32_x (pg, atop, 0x3180), 0x7ff0 - 0x3180);
35
36 sv_u32_t sign = svand_n_u32_x (pg, ix, ~AbsMask);
37 /* |x| < 0.921875. */
38 svbool_t red = svaclt_n_f32 (pg, x, 0.921875f);
39 /* |x| > 4.0. */
40 svbool_t bor = svacgt_n_f32 (pg, x, 4.0f);
41
42 /* Load polynomial coefficients. */
43 sv_u32_t idx_lo = svsel (red, sv_u32 (0), sv_u32 (1));
44 sv_u32_t idx_hi = svadd_n_u32_x (pg, idx_lo, 2);
45
46 const float *base = (float *) __v_erff_data.coeffs;
47 sv_f32_t c_2_5 = svld1rq (svptrue_b32 (), base + 2);
48 sv_f32_t c_6_9 = svld1rq (svptrue_b32 (), base + 6);
49 sv_f32_t c_10_13 = svld1rq (svptrue_b32 (), base + 10);
50
51 /* Do not need to store elem 0 of __v_erff_data as it is not used. */
52 sv_f32_t p1 = svtbl (c_2_5, idx_lo);
53 sv_f32_t p2 = svtbl (c_2_5, idx_hi);
54 sv_f32_t p3 = svtbl (c_6_9, idx_lo);
55 sv_f32_t p4 = svtbl (c_6_9, idx_hi);
56 sv_f32_t p5 = svtbl (c_10_13, idx_lo);
57 sv_f32_t p6 = svtbl (c_10_13, idx_hi);
58
59 sv_f32_t a = svabs_f32_x (pg, x);
60 /* Square with merging mul - z is x^2 for reduced, |x| otherwise. */
61 sv_f32_t z = svmul_f32_m (red, a, a);
62
63 /* Evaluate polynomial on |x| or x^2. */
64 sv_f32_t r = sv_fma_f32_x (pg, z, p6, p5);
65 r = sv_fma_f32_x (pg, z, r, p4);
66 r = sv_fma_f32_x (pg, z, r, p3);
67 r = sv_fma_f32_x (pg, z, r, p2);
68 r = sv_fma_f32_x (pg, z, r, p1);
69 /* Use merging svmad for last operation - apply first coefficient if not
70 reduced, otherwise r is propagated unchanged. This is because the reduced
71 polynomial has lower order than the non-reduced. */
72 r = svmad_n_f32_m (svnot_b_z (pg, red), r, z, base[1]);
73 r = sv_fma_f32_x (pg, a, r, a);
74
75 /* y = |x| + |x| * P(x^2) if |x| < 0.921875
76 y = 1 - exp (-(|x| + |x| * P(|x|))) otherwise. */
77 sv_f32_t y = __sv_expf_x (pg, svneg_f32_x (pg, r));
78 y = svsel_f32 (red, r, svsubr_n_f32_x (pg, y, 1.0));
79
80 /* Boring domain (absolute value is required to get the sign of erf(-nan)
81 right). */
82 y = svsel_f32 (bor, sv_f32 (1.0f), svabs_f32_x (pg, y));
83
84 /* y = erf(x) if x>0, -erf(-x) otherwise. */
85 y = sv_as_f32_u32 (sveor_u32_x (pg, sv_as_u32_f32 (y), sign));
86
87 if (unlikely (svptest_any (pg, cmp)))
88 return __sv_erff_specialcase (x, y, cmp);
89 return y;
90 }
91
92 PL_ALIAS (__sv_erff_x, _ZGVsMxv_erff)
93
94 PL_SIG (SV, F, 1, erf, -4.0, 4.0)
95 PL_TEST_ULP (__sv_erff, 0.76)
96 PL_TEST_INTERVAL (__sv_erff, 0, 0x1p-28, 20000)
97 PL_TEST_INTERVAL (__sv_erff, 0x1p-28, 1, 60000)
98 PL_TEST_INTERVAL (__sv_erff, 1, 0x1p28, 60000)
99 PL_TEST_INTERVAL (__sv_erff, 0x1p28, inf, 20000)
100 PL_TEST_INTERVAL (__sv_erff, -0, -0x1p-28, 20000)
101 PL_TEST_INTERVAL (__sv_erff, -0x1p-28, -1, 60000)
102 PL_TEST_INTERVAL (__sv_erff, -1, -0x1p28, 60000)
103 PL_TEST_INTERVAL (__sv_erff, -0x1p28, -inf, 20000)
104 #endif
105