1 use core::{f32, f64};
2
3 use super::scalbn;
4
5 const ZEROINFNAN: i32 = 0x7ff - 0x3ff - 52 - 1;
6
7 struct Num {
8 m: u64,
9 e: i32,
10 sign: i32,
11 }
12
normalize(x: f64) -> Num13 fn normalize(x: f64) -> Num {
14 let x1p63: f64 = f64::from_bits(0x43e0000000000000); // 0x1p63 === 2 ^ 63
15
16 let mut ix: u64 = x.to_bits();
17 let mut e: i32 = (ix >> 52) as i32;
18 let sign: i32 = e & 0x800;
19 e &= 0x7ff;
20 if e == 0 {
21 ix = (x * x1p63).to_bits();
22 e = (ix >> 52) as i32 & 0x7ff;
23 e = if e != 0 { e - 63 } else { 0x800 };
24 }
25 ix &= (1 << 52) - 1;
26 ix |= 1 << 52;
27 ix <<= 1;
28 e -= 0x3ff + 52 + 1;
29 Num { m: ix, e, sign }
30 }
31
mul(x: u64, y: u64) -> (u64, u64)32 fn mul(x: u64, y: u64) -> (u64, u64) {
33 let t1: u64;
34 let t2: u64;
35 let t3: u64;
36 let xlo: u64 = x as u32 as u64;
37 let xhi: u64 = x >> 32;
38 let ylo: u64 = y as u32 as u64;
39 let yhi: u64 = y >> 32;
40
41 t1 = xlo * ylo;
42 t2 = xlo * yhi + xhi * ylo;
43 t3 = xhi * yhi;
44 let lo = t1.wrapping_add(t2 << 32);
45 let hi = t3 + (t2 >> 32) + (t1 > lo) as u64;
46 (hi, lo)
47 }
48
49 /// Floating multiply add (f64)
50 ///
51 /// Computes `(x*y)+z`, rounded as one ternary operation:
52 /// Computes the value (as if) to infinite precision and rounds once to the result format,
53 /// according to the rounding mode characterized by the value of FLT_ROUNDS.
54 #[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
fma(x: f64, y: f64, z: f64) -> f6455 pub fn fma(x: f64, y: f64, z: f64) -> f64 {
56 let x1p63: f64 = f64::from_bits(0x43e0000000000000); // 0x1p63 === 2 ^ 63
57 let x0_ffffff8p_63 = f64::from_bits(0x3bfffffff0000000); // 0x0.ffffff8p-63
58
59 /* normalize so top 10bits and last bit are 0 */
60 let nx = normalize(x);
61 let ny = normalize(y);
62 let nz = normalize(z);
63
64 if nx.e >= ZEROINFNAN || ny.e >= ZEROINFNAN {
65 return x * y + z;
66 }
67 if nz.e >= ZEROINFNAN {
68 if nz.e > ZEROINFNAN {
69 /* z==0 */
70 return x * y + z;
71 }
72 return z;
73 }
74
75 /* mul: r = x*y */
76 let zhi: u64;
77 let zlo: u64;
78 let (mut rhi, mut rlo) = mul(nx.m, ny.m);
79 /* either top 20 or 21 bits of rhi and last 2 bits of rlo are 0 */
80
81 /* align exponents */
82 let mut e: i32 = nx.e + ny.e;
83 let mut d: i32 = nz.e - e;
84 /* shift bits z<<=kz, r>>=kr, so kz+kr == d, set e = e+kr (== ez-kz) */
85 if d > 0 {
86 if d < 64 {
87 zlo = nz.m << d;
88 zhi = nz.m >> (64 - d);
89 } else {
90 zlo = 0;
91 zhi = nz.m;
92 e = nz.e - 64;
93 d -= 64;
94 if d == 0 {
95 } else if d < 64 {
96 rlo = rhi << (64 - d) | rlo >> d | ((rlo << (64 - d)) != 0) as u64;
97 rhi = rhi >> d;
98 } else {
99 rlo = 1;
100 rhi = 0;
101 }
102 }
103 } else {
104 zhi = 0;
105 d = -d;
106 if d == 0 {
107 zlo = nz.m;
108 } else if d < 64 {
109 zlo = nz.m >> d | ((nz.m << (64 - d)) != 0) as u64;
110 } else {
111 zlo = 1;
112 }
113 }
114
115 /* add */
116 let mut sign: i32 = nx.sign ^ ny.sign;
117 let samesign: bool = (sign ^ nz.sign) == 0;
118 let mut nonzero: i32 = 1;
119 if samesign {
120 /* r += z */
121 rlo = rlo.wrapping_add(zlo);
122 rhi += zhi + (rlo < zlo) as u64;
123 } else {
124 /* r -= z */
125 let t = rlo;
126 rlo = rlo.wrapping_sub(zlo);
127 rhi = rhi.wrapping_sub(zhi.wrapping_sub((t < rlo) as u64));
128 if (rhi >> 63) != 0 {
129 rlo = (-(rlo as i64)) as u64;
130 rhi = (-(rhi as i64)) as u64 - (rlo != 0) as u64;
131 sign = (sign == 0) as i32;
132 }
133 nonzero = (rhi != 0) as i32;
134 }
135
136 /* set rhi to top 63bit of the result (last bit is sticky) */
137 if nonzero != 0 {
138 e += 64;
139 d = rhi.leading_zeros() as i32 - 1;
140 /* note: d > 0 */
141 rhi = rhi << d | rlo >> (64 - d) | ((rlo << d) != 0) as u64;
142 } else if rlo != 0 {
143 d = rlo.leading_zeros() as i32 - 1;
144 if d < 0 {
145 rhi = rlo >> 1 | (rlo & 1);
146 } else {
147 rhi = rlo << d;
148 }
149 } else {
150 /* exact +-0 */
151 return x * y + z;
152 }
153 e -= d;
154
155 /* convert to double */
156 let mut i: i64 = rhi as i64; /* i is in [1<<62,(1<<63)-1] */
157 if sign != 0 {
158 i = -i;
159 }
160 let mut r: f64 = i as f64; /* |r| is in [0x1p62,0x1p63] */
161
162 if e < -1022 - 62 {
163 /* result is subnormal before rounding */
164 if e == -1022 - 63 {
165 let mut c: f64 = x1p63;
166 if sign != 0 {
167 c = -c;
168 }
169 if r == c {
170 /* min normal after rounding, underflow depends
171 on arch behaviour which can be imitated by
172 a double to float conversion */
173 let fltmin: f32 = (x0_ffffff8p_63 * f32::MIN_POSITIVE as f64 * r) as f32;
174 return f64::MIN_POSITIVE / f32::MIN_POSITIVE as f64 * fltmin as f64;
175 }
176 /* one bit is lost when scaled, add another top bit to
177 only round once at conversion if it is inexact */
178 if (rhi << 53) != 0 {
179 i = (rhi >> 1 | (rhi & 1) | 1 << 62) as i64;
180 if sign != 0 {
181 i = -i;
182 }
183 r = i as f64;
184 r = 2. * r - c; /* remove top bit */
185
186 /* raise underflow portably, such that it
187 cannot be optimized away */
188 {
189 let tiny: f64 = f64::MIN_POSITIVE / f32::MIN_POSITIVE as f64 * r;
190 r += (tiny * tiny) * (r - r);
191 }
192 }
193 } else {
194 /* only round once when scaled */
195 d = 10;
196 i = ((rhi >> d | ((rhi << (64 - d)) != 0) as u64) << d) as i64;
197 if sign != 0 {
198 i = -i;
199 }
200 r = i as f64;
201 }
202 }
203 scalbn(r, e)
204 }
205
206 #[cfg(test)]
207 mod tests {
208 use super::*;
209 #[test]
fma_segfault()210 fn fma_segfault() {
211 // These two inputs cause fma to segfault on release due to overflow:
212 assert_eq!(
213 fma(
214 -0.0000000000000002220446049250313,
215 -0.0000000000000002220446049250313,
216 -0.0000000000000002220446049250313
217 ),
218 -0.00000000000000022204460492503126,
219 );
220
221 assert_eq!(fma(-0.992, -0.992, -0.992), -0.00793599999988632,);
222 }
223 }
224