1 /*
2 * Licensed to the Apache Software Foundation (ASF) under one or more
3 * contributor license agreements. See the NOTICE file distributed with
4 * this work for additional information regarding copyright ownership.
5 * The ASF licenses this file to You under the Apache License, Version 2.0
6 * (the "License"); you may not use this file except in compliance with
7 * the License. You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18 #include <string.h>
19 #include "cbigint.h"
20
21 #if defined(__linux__) || defined(FREEBSD) || defined(ZOS) || defined(MACOSX) || defined(AIX)
22 #define USE_LL
23 #endif
24
25 #if __BYTE_ORDER == __LITTLE_ENDIAN
26 #define at(i) (i)
27 #else
28 #define at(i) ((i)^1)
29 /* the sequence for halfAt is -1, 2, 1, 4, 3, 6, 5, 8... */
30 /* and it should correspond to 0, 1, 2, 3, 4, 5, 6, 7... */
31 #define halfAt(i) (-((-(i)) ^ 1))
32 #endif
33
34 #define HIGH_IN_U64(u64) ((u64) >> 32)
35 #if defined(USE_LL)
36 #define LOW_IN_U64(u64) ((u64) & 0x00000000FFFFFFFFLL)
37 #else
38 #if defined(USE_L)
39 #define LOW_IN_U64(u64) ((u64) & 0x00000000FFFFFFFFL)
40 #else
41 #define LOW_IN_U64(u64) ((u64) & 0x00000000FFFFFFFF)
42 #endif /* USE_L */
43 #endif /* USE_LL */
44
45 #if defined(USE_LL)
46 #define TEN_E1 (0xALL)
47 #define TEN_E2 (0x64LL)
48 #define TEN_E3 (0x3E8LL)
49 #define TEN_E4 (0x2710LL)
50 #define TEN_E5 (0x186A0LL)
51 #define TEN_E6 (0xF4240LL)
52 #define TEN_E7 (0x989680LL)
53 #define TEN_E8 (0x5F5E100LL)
54 #define TEN_E9 (0x3B9ACA00LL)
55 #define TEN_E19 (0x8AC7230489E80000LL)
56 #else
57 #if defined(USE_L)
58 #define TEN_E1 (0xAL)
59 #define TEN_E2 (0x64L)
60 #define TEN_E3 (0x3E8L)
61 #define TEN_E4 (0x2710L)
62 #define TEN_E5 (0x186A0L)
63 #define TEN_E6 (0xF4240L)
64 #define TEN_E7 (0x989680L)
65 #define TEN_E8 (0x5F5E100L)
66 #define TEN_E9 (0x3B9ACA00L)
67 #define TEN_E19 (0x8AC7230489E80000L)
68 #else
69 #define TEN_E1 (0xA)
70 #define TEN_E2 (0x64)
71 #define TEN_E3 (0x3E8)
72 #define TEN_E4 (0x2710)
73 #define TEN_E5 (0x186A0)
74 #define TEN_E6 (0xF4240)
75 #define TEN_E7 (0x989680)
76 #define TEN_E8 (0x5F5E100)
77 #define TEN_E9 (0x3B9ACA00)
78 #define TEN_E19 (0x8AC7230489E80000)
79 #endif /* USE_L */
80 #endif /* USE_LL */
81
82 #define TIMES_TEN(x) (((x) << 3) + ((x) << 1))
83 #define bitSection(x, mask, shift) (((x) & (mask)) >> (shift))
84 #define CREATE_DOUBLE_BITS(normalizedM, e) (((normalizedM) & MANTISSA_MASK) | ((static_cast<uint64_t>((e) + E_OFFSET)) << 52))
85
86 #if defined(USE_LL)
87 #define MANTISSA_MASK (0x000FFFFFFFFFFFFFLL)
88 #define EXPONENT_MASK (0x7FF0000000000000LL)
89 #define NORMAL_MASK (0x0010000000000000LL)
90 #define SIGN_MASK (0x8000000000000000LL)
91 #else
92 #if defined(USE_L)
93 #define MANTISSA_MASK (0x000FFFFFFFFFFFFFL)
94 #define EXPONENT_MASK (0x7FF0000000000000L)
95 #define NORMAL_MASK (0x0010000000000000L)
96 #define SIGN_MASK (0x8000000000000000L)
97 #else
98 #define MANTISSA_MASK (0x000FFFFFFFFFFFFF)
99 #define EXPONENT_MASK (0x7FF0000000000000)
100 #define NORMAL_MASK (0x0010000000000000)
101 #define SIGN_MASK (0x8000000000000000)
102 #endif /* USE_L */
103 #endif /* USE_LL */
104
105 #define E_OFFSET (1075)
106
107 #define FLOAT_MANTISSA_MASK (0x007FFFFF)
108 #define FLOAT_EXPONENT_MASK (0x7F800000)
109 #define FLOAT_NORMAL_MASK (0x00800000)
110 #define FLOAT_E_OFFSET (150)
111
112 int32_t
simpleAddHighPrecision(uint64_t * arg1,int32_t length,uint64_t arg2)113 simpleAddHighPrecision (uint64_t * arg1, int32_t length, uint64_t arg2)
114 {
115 /* assumes length > 0 */
116 int32_t index = 1;
117
118 *arg1 += arg2;
119 if (arg2 <= *arg1)
120 return 0;
121 else if (length == 1)
122 return 1;
123
124 while (++arg1[index] == 0 && ++index < length);
125
126 return index == length;
127 }
128
129 int32_t
addHighPrecision(uint64_t * arg1,int32_t length1,uint64_t * arg2,int32_t length2)130 addHighPrecision (uint64_t * arg1, int32_t length1, uint64_t * arg2, int32_t length2)
131 {
132 /* addition is limited by length of arg1 as it this function is
133 * storing the result in arg1 */
134 /* fix for cc (GCC) 3.2 20020903 (Red Hat Linux 8.0 3.2-7): code generated does not
135 * do the temp1 + temp2 + carry addition correct. carry is 64 bit because gcc has
136 * subtle issues when you mix 64 / 32 bit maths. */
137 uint64_t temp1, temp2, temp3; /* temporary variables to help the SH-4, and gcc */
138 uint64_t carry;
139 int32_t index;
140
141 if (length1 == 0 || length2 == 0)
142 {
143 return 0;
144 }
145 else if (length1 < length2)
146 {
147 length2 = length1;
148 }
149
150 carry = 0;
151 index = 0;
152 do
153 {
154 temp1 = arg1[index];
155 temp2 = arg2[index];
156 temp3 = temp1 + temp2;
157 arg1[index] = temp3 + carry;
158 if (arg2[index] < arg1[index])
159 carry = 0;
160 else if (arg2[index] != arg1[index])
161 carry = 1;
162 }
163 while (++index < length2);
164 if (!carry)
165 return 0;
166 else if (index == length1)
167 return 1;
168
169 while (++arg1[index] == 0 && ++index < length1);
170
171 return index == length1;
172 }
173
174 void
subtractHighPrecision(uint64_t * arg1,int32_t length1,uint64_t * arg2,int32_t length2)175 subtractHighPrecision (uint64_t * arg1, int32_t length1, uint64_t * arg2, int32_t length2)
176 {
177 /* assumes arg1 > arg2 */
178 int32_t index;
179 for (index = 0; index < length1; ++index)
180 arg1[index] = ~arg1[index];
181 simpleAddHighPrecision (arg1, length1, 1);
182
183 while (length2 > 0 && arg2[length2 - 1] == 0)
184 --length2;
185
186 addHighPrecision (arg1, length1, arg2, length2);
187
188 for (index = 0; index < length1; ++index)
189 arg1[index] = ~arg1[index];
190 simpleAddHighPrecision (arg1, length1, 1);
191 }
192
simpleMultiplyHighPrecision(uint64_t * arg1,int32_t length,uint64_t arg2)193 static uint32_t simpleMultiplyHighPrecision(uint64_t* arg1, int32_t length, uint64_t arg2) {
194 /* assumes arg2 only holds 32 bits of information */
195 uint64_t product;
196 int32_t index;
197
198 index = 0;
199 product = 0;
200
201 do
202 {
203 product =
204 HIGH_IN_U64 (product) + arg2 * LOW_U32_FROM_PTR (arg1 + index);
205 LOW_U32_FROM_PTR (arg1 + index) = LOW_U32_FROM_VAR (product);
206 product =
207 HIGH_IN_U64 (product) + arg2 * HIGH_U32_FROM_PTR (arg1 + index);
208 HIGH_U32_FROM_PTR (arg1 + index) = LOW_U32_FROM_VAR (product);
209 }
210 while (++index < length);
211
212 return HIGH_U32_FROM_VAR (product);
213 }
214
215 static void
simpleMultiplyAddHighPrecision(uint64_t * arg1,int32_t length,uint64_t arg2,uint32_t * result)216 simpleMultiplyAddHighPrecision (uint64_t * arg1, int32_t length, uint64_t arg2,
217 uint32_t * result)
218 {
219 /* Assumes result can hold the product and arg2 only holds 32 bits
220 of information */
221 uint64_t product;
222 int32_t index, resultIndex;
223
224 index = resultIndex = 0;
225 product = 0;
226
227 do
228 {
229 product =
230 HIGH_IN_U64 (product) + result[at (resultIndex)] +
231 arg2 * LOW_U32_FROM_PTR (arg1 + index);
232 result[at (resultIndex)] = LOW_U32_FROM_VAR (product);
233 ++resultIndex;
234 product =
235 HIGH_IN_U64 (product) + result[at (resultIndex)] +
236 arg2 * HIGH_U32_FROM_PTR (arg1 + index);
237 result[at (resultIndex)] = LOW_U32_FROM_VAR (product);
238 ++resultIndex;
239 }
240 while (++index < length);
241
242 result[at (resultIndex)] += HIGH_U32_FROM_VAR (product);
243 if (result[at (resultIndex)] < HIGH_U32_FROM_VAR (product))
244 {
245 /* must be careful with ++ operator and macro expansion */
246 ++resultIndex;
247 while (++result[at (resultIndex)] == 0)
248 ++resultIndex;
249 }
250 }
251
252 #if __BYTE_ORDER != __LITTLE_ENDIAN
simpleMultiplyAddHighPrecisionBigEndianFix(uint64_t * arg1,int32_t length,uint64_t arg2,uint32_t * result)253 void simpleMultiplyAddHighPrecisionBigEndianFix(uint64_t* arg1, int32_t length, uint64_t arg2, uint32_t* result) {
254 /* Assumes result can hold the product and arg2 only holds 32 bits
255 of information */
256 uint64_t product;
257 int32_t index, resultIndex;
258
259 index = resultIndex = 0;
260 product = 0;
261
262 do {
263 product = HIGH_IN_U64(product) + result[halfAt(resultIndex)] + arg2 * LOW_U32_FROM_PTR(arg1 + index);
264 result[halfAt(resultIndex)] = LOW_U32_FROM_VAR(product);
265 ++resultIndex;
266 product = HIGH_IN_U64(product) + result[halfAt(resultIndex)] + arg2 * HIGH_U32_FROM_PTR(arg1 + index);
267 result[halfAt(resultIndex)] = LOW_U32_FROM_VAR(product);
268 ++resultIndex;
269 } while (++index < length);
270
271 result[halfAt(resultIndex)] += HIGH_U32_FROM_VAR(product);
272 if (result[halfAt(resultIndex)] < HIGH_U32_FROM_VAR(product)) {
273 /* must be careful with ++ operator and macro expansion */
274 ++resultIndex;
275 while (++result[halfAt(resultIndex)] == 0) ++resultIndex;
276 }
277 }
278 #endif
279
280 void
multiplyHighPrecision(uint64_t * arg1,int32_t length1,uint64_t * arg2,int32_t length2,uint64_t * result,int32_t length)281 multiplyHighPrecision (uint64_t * arg1, int32_t length1, uint64_t * arg2, int32_t length2,
282 uint64_t * result, int32_t length)
283 {
284 /* assumes result is large enough to hold product */
285 uint64_t* temp;
286 uint32_t* resultIn32;
287 int32_t count, index;
288
289 if (length1 < length2)
290 {
291 temp = arg1;
292 arg1 = arg2;
293 arg2 = temp;
294 count = length1;
295 length1 = length2;
296 length2 = count;
297 }
298
299 memset (result, 0, sizeof (uint64_t) * length);
300
301 /* length1 > length2 */
302 resultIn32 = reinterpret_cast<uint32_t*>(result);
303 index = -1;
304 for (count = 0; count < length2; ++count)
305 {
306 simpleMultiplyAddHighPrecision (arg1, length1, LOW_IN_U64 (arg2[count]),
307 resultIn32 + (++index));
308 #if __BYTE_ORDER == __LITTLE_ENDIAN
309 simpleMultiplyAddHighPrecision(arg1, length1, HIGH_IN_U64(arg2[count]), resultIn32 + (++index));
310 #else
311 simpleMultiplyAddHighPrecisionBigEndianFix(arg1, length1, HIGH_IN_U64(arg2[count]), resultIn32 + (++index));
312 #endif
313 }
314 }
315
316 uint32_t
simpleAppendDecimalDigitHighPrecision(uint64_t * arg1,int32_t length,uint64_t digit)317 simpleAppendDecimalDigitHighPrecision (uint64_t * arg1, int32_t length, uint64_t digit)
318 {
319 /* assumes digit is less than 32 bits */
320 uint64_t arg;
321 int32_t index = 0;
322
323 digit <<= 32;
324 do
325 {
326 arg = LOW_IN_U64 (arg1[index]);
327 digit = HIGH_IN_U64 (digit) + TIMES_TEN (arg);
328 LOW_U32_FROM_PTR (arg1 + index) = LOW_U32_FROM_VAR (digit);
329
330 arg = HIGH_IN_U64 (arg1[index]);
331 digit = HIGH_IN_U64 (digit) + TIMES_TEN (arg);
332 HIGH_U32_FROM_PTR (arg1 + index) = LOW_U32_FROM_VAR (digit);
333 }
334 while (++index < length);
335
336 return HIGH_U32_FROM_VAR (digit);
337 }
338
339 void
simpleShiftLeftHighPrecision(uint64_t * arg1,int32_t length,int32_t arg2)340 simpleShiftLeftHighPrecision (uint64_t * arg1, int32_t length, int32_t arg2)
341 {
342 /* assumes length > 0 */
343 int32_t index, offset;
344 if (arg2 >= 64)
345 {
346 offset = arg2 >> 6;
347 index = length;
348
349 while (--index - offset >= 0)
350 arg1[index] = arg1[index - offset];
351 do
352 {
353 arg1[index] = 0;
354 }
355 while (--index >= 0);
356
357 arg2 &= 0x3F;
358 }
359
360 if (arg2 == 0)
361 return;
362 while (--length > 0)
363 {
364 arg1[length] = arg1[length] << arg2 | arg1[length - 1] >> (64 - arg2);
365 }
366 *arg1 <<= arg2;
367 }
368
369 int32_t
highestSetBit(uint64_t * y)370 highestSetBit (uint64_t * y)
371 {
372 uint32_t x;
373 int32_t result;
374
375 if (*y == 0)
376 return 0;
377
378 #if defined(USE_LL)
379 if (*y & 0xFFFFFFFF00000000LL)
380 {
381 x = HIGH_U32_FROM_PTR (y);
382 result = 32;
383 }
384 else
385 {
386 x = LOW_U32_FROM_PTR (y);
387 result = 0;
388 }
389 #else
390 #if defined(USE_L)
391 if (*y & 0xFFFFFFFF00000000L)
392 {
393 x = HIGH_U32_FROM_PTR (y);
394 result = 32;
395 }
396 else
397 {
398 x = LOW_U32_FROM_PTR (y);
399 result = 0;
400 }
401 #else
402 if (*y & 0xFFFFFFFF00000000)
403 {
404 x = HIGH_U32_FROM_PTR (y);
405 result = 32;
406 }
407 else
408 {
409 x = LOW_U32_FROM_PTR (y);
410 result = 0;
411 }
412 #endif /* USE_L */
413 #endif /* USE_LL */
414
415 if (x & 0xFFFF0000)
416 {
417 x = bitSection (x, 0xFFFF0000, 16);
418 result += 16;
419 }
420 if (x & 0xFF00)
421 {
422 x = bitSection (x, 0xFF00, 8);
423 result += 8;
424 }
425 if (x & 0xF0)
426 {
427 x = bitSection (x, 0xF0, 4);
428 result += 4;
429 }
430 if (x > 0x7)
431 return result + 4;
432 else if (x > 0x3)
433 return result + 3;
434 else if (x > 0x1)
435 return result + 2;
436 else
437 return result + 1;
438 }
439
440 int32_t
lowestSetBit(uint64_t * y)441 lowestSetBit (uint64_t * y)
442 {
443 uint32_t x;
444 int32_t result;
445
446 if (*y == 0)
447 return 0;
448
449 #if defined(USE_LL)
450 if (*y & 0x00000000FFFFFFFFLL)
451 {
452 x = LOW_U32_FROM_PTR (y);
453 result = 0;
454 }
455 else
456 {
457 x = HIGH_U32_FROM_PTR (y);
458 result = 32;
459 }
460 #else
461 #if defined(USE_L)
462 if (*y & 0x00000000FFFFFFFFL)
463 {
464 x = LOW_U32_FROM_PTR (y);
465 result = 0;
466 }
467 else
468 {
469 x = HIGH_U32_FROM_PTR (y);
470 result = 32;
471 }
472 #else
473 if (*y & 0x00000000FFFFFFFF)
474 {
475 x = LOW_U32_FROM_PTR (y);
476 result = 0;
477 }
478 else
479 {
480 x = HIGH_U32_FROM_PTR (y);
481 result = 32;
482 }
483 #endif /* USE_L */
484 #endif /* USE_LL */
485
486 if (!(x & 0xFFFF))
487 {
488 x = bitSection (x, 0xFFFF0000, 16);
489 result += 16;
490 }
491 if (!(x & 0xFF))
492 {
493 x = bitSection (x, 0xFF00, 8);
494 result += 8;
495 }
496 if (!(x & 0xF))
497 {
498 x = bitSection (x, 0xF0, 4);
499 result += 4;
500 }
501
502 if (x & 0x1)
503 return result + 1;
504 else if (x & 0x2)
505 return result + 2;
506 else if (x & 0x4)
507 return result + 3;
508 else
509 return result + 4;
510 }
511
512 int32_t
highestSetBitHighPrecision(uint64_t * arg,int32_t length)513 highestSetBitHighPrecision (uint64_t * arg, int32_t length)
514 {
515 int32_t highBit;
516
517 while (--length >= 0)
518 {
519 highBit = highestSetBit (arg + length);
520 if (highBit)
521 return highBit + 64 * length;
522 }
523
524 return 0;
525 }
526
527 int32_t
lowestSetBitHighPrecision(uint64_t * arg,int32_t length)528 lowestSetBitHighPrecision (uint64_t * arg, int32_t length)
529 {
530 int32_t lowBit, index = -1;
531
532 while (++index < length)
533 {
534 lowBit = lowestSetBit (arg + index);
535 if (lowBit)
536 return lowBit + 64 * index;
537 }
538
539 return 0;
540 }
541
542 int32_t
compareHighPrecision(uint64_t * arg1,int32_t length1,uint64_t * arg2,int32_t length2)543 compareHighPrecision (uint64_t * arg1, int32_t length1, uint64_t * arg2, int32_t length2)
544 {
545 while (--length1 >= 0 && arg1[length1] == 0);
546 while (--length2 >= 0 && arg2[length2] == 0);
547
548 if (length1 > length2)
549 return 1;
550 else if (length1 < length2)
551 return -1;
552 else if (length1 > -1)
553 {
554 do
555 {
556 if (arg1[length1] > arg2[length1])
557 return 1;
558 else if (arg1[length1] < arg2[length1])
559 return -1;
560 }
561 while (--length1 >= 0);
562 }
563
564 return 0;
565 }
566
567 jdouble
toDoubleHighPrecision(uint64_t * arg,int32_t length)568 toDoubleHighPrecision (uint64_t * arg, int32_t length)
569 {
570 int32_t highBit;
571 uint64_t mantissa, test64;
572 uint32_t test;
573 jdouble result;
574
575 while (length > 0 && arg[length - 1] == 0)
576 --length;
577
578 if (length == 0)
579 result = 0.0;
580 else if (length > 16)
581 {
582 DOUBLE_TO_LONGBITS (result) = EXPONENT_MASK;
583 }
584 else if (length == 1)
585 {
586 highBit = highestSetBit (arg);
587 if (highBit <= 53)
588 {
589 highBit = 53 - highBit;
590 mantissa = *arg << highBit;
591 DOUBLE_TO_LONGBITS (result) =
592 CREATE_DOUBLE_BITS (mantissa, -highBit);
593 }
594 else
595 {
596 highBit -= 53;
597 mantissa = *arg >> highBit;
598 DOUBLE_TO_LONGBITS (result) =
599 CREATE_DOUBLE_BITS (mantissa, highBit);
600
601 /* perform rounding, round to even in case of tie */
602 test = (LOW_U32_FROM_PTR (arg) << (11 - highBit)) & 0x7FF;
603 if (test > 0x400 || ((test == 0x400) && (mantissa & 1)))
604 DOUBLE_TO_LONGBITS (result) = DOUBLE_TO_LONGBITS (result) + 1;
605 }
606 }
607 else
608 {
609 highBit = highestSetBit (arg + (--length));
610 if (highBit <= 53)
611 {
612 highBit = 53 - highBit;
613 if (highBit > 0)
614 {
615 mantissa =
616 (arg[length] << highBit) | (arg[length - 1] >>
617 (64 - highBit));
618 }
619 else
620 {
621 mantissa = arg[length];
622 }
623 DOUBLE_TO_LONGBITS (result) =
624 CREATE_DOUBLE_BITS (mantissa, length * 64 - highBit);
625
626 /* perform rounding, round to even in case of tie */
627 test64 = arg[--length] << highBit;
628 if (test64 > SIGN_MASK || ((test64 == SIGN_MASK) && (mantissa & 1)))
629 DOUBLE_TO_LONGBITS (result) = DOUBLE_TO_LONGBITS (result) + 1;
630 else if (test64 == SIGN_MASK)
631 {
632 while (--length >= 0)
633 {
634 if (arg[length] != 0)
635 {
636 DOUBLE_TO_LONGBITS (result) =
637 DOUBLE_TO_LONGBITS (result) + 1;
638 break;
639 }
640 }
641 }
642 }
643 else
644 {
645 highBit -= 53;
646 mantissa = arg[length] >> highBit;
647 DOUBLE_TO_LONGBITS (result) =
648 CREATE_DOUBLE_BITS (mantissa, length * 64 + highBit);
649
650 /* perform rounding, round to even in case of tie */
651 test = (LOW_U32_FROM_PTR (arg + length) << (11 - highBit)) & 0x7FF;
652 if (test > 0x400 || ((test == 0x400) && (mantissa & 1)))
653 DOUBLE_TO_LONGBITS (result) = DOUBLE_TO_LONGBITS (result) + 1;
654 else if (test == 0x400)
655 {
656 do
657 {
658 if (arg[--length] != 0)
659 {
660 DOUBLE_TO_LONGBITS (result) =
661 DOUBLE_TO_LONGBITS (result) + 1;
662 break;
663 }
664 }
665 while (length > 0);
666 }
667 }
668 }
669
670 return result;
671 }
672
673 static uint64_t simpleMultiplyHighPrecision64(uint64_t* arg1, int32_t length, uint64_t arg2);
674
675 int32_t
timesTenToTheEHighPrecision(uint64_t * result,int32_t length,jint e)676 timesTenToTheEHighPrecision (uint64_t * result, int32_t length, jint e)
677 {
678 /* assumes result can hold value */
679 uint64_t overflow;
680 int exp10 = e;
681
682 if (e == 0)
683 return length;
684
685 /* bad O(n) way of doing it, but simple */
686 /*
687 do {
688 overflow = simpleAppendDecimalDigitHighPrecision(result, length, 0);
689 if (overflow)
690 result[length++] = overflow;
691 } while (--e);
692 */
693 /* Replace the current implementaion which performs a
694 * "multiplication" by 10 e number of times with an actual
695 * multiplication. 10e19 is the largest exponent to the power of ten
696 * that will fit in a 64-bit integer, and 10e9 is the largest exponent to
697 * the power of ten that will fit in a 64-bit integer. Not sure where the
698 * break-even point is between an actual multiplication and a
699 * simpleAappendDecimalDigit() so just pick 10e3 as that point for
700 * now.
701 */
702 while (exp10 >= 19)
703 {
704 overflow = simpleMultiplyHighPrecision64 (result, length, TEN_E19);
705 if (overflow)
706 result[length++] = overflow;
707 exp10 -= 19;
708 }
709 while (exp10 >= 9)
710 {
711 overflow = simpleMultiplyHighPrecision (result, length, TEN_E9);
712 if (overflow)
713 result[length++] = overflow;
714 exp10 -= 9;
715 }
716 if (exp10 == 0)
717 return length;
718 else if (exp10 == 1)
719 {
720 overflow = simpleAppendDecimalDigitHighPrecision (result, length, 0);
721 if (overflow)
722 result[length++] = overflow;
723 }
724 else if (exp10 == 2)
725 {
726 overflow = simpleAppendDecimalDigitHighPrecision (result, length, 0);
727 if (overflow)
728 result[length++] = overflow;
729 overflow = simpleAppendDecimalDigitHighPrecision (result, length, 0);
730 if (overflow)
731 result[length++] = overflow;
732 }
733 else if (exp10 == 3)
734 {
735 overflow = simpleMultiplyHighPrecision (result, length, TEN_E3);
736 if (overflow)
737 result[length++] = overflow;
738 }
739 else if (exp10 == 4)
740 {
741 overflow = simpleMultiplyHighPrecision (result, length, TEN_E4);
742 if (overflow)
743 result[length++] = overflow;
744 }
745 else if (exp10 == 5)
746 {
747 overflow = simpleMultiplyHighPrecision (result, length, TEN_E5);
748 if (overflow)
749 result[length++] = overflow;
750 }
751 else if (exp10 == 6)
752 {
753 overflow = simpleMultiplyHighPrecision (result, length, TEN_E6);
754 if (overflow)
755 result[length++] = overflow;
756 }
757 else if (exp10 == 7)
758 {
759 overflow = simpleMultiplyHighPrecision (result, length, TEN_E7);
760 if (overflow)
761 result[length++] = overflow;
762 }
763 else if (exp10 == 8)
764 {
765 overflow = simpleMultiplyHighPrecision (result, length, TEN_E8);
766 if (overflow)
767 result[length++] = overflow;
768 }
769 return length;
770 }
771
772 uint64_t
doubleMantissa(jdouble z)773 doubleMantissa (jdouble z)
774 {
775 uint64_t m = DOUBLE_TO_LONGBITS (z);
776
777 if ((m & EXPONENT_MASK) != 0)
778 m = (m & MANTISSA_MASK) | NORMAL_MASK;
779 else
780 m = (m & MANTISSA_MASK);
781
782 return m;
783 }
784
785 int32_t
doubleExponent(jdouble z)786 doubleExponent (jdouble z)
787 {
788 /* assumes positive double */
789 int32_t k = HIGH_U32_FROM_VAR (z) >> 20;
790
791 if (k)
792 k -= E_OFFSET;
793 else
794 k = 1 - E_OFFSET;
795
796 return k;
797 }
798
floatMantissa(jfloat z)799 uint32_t floatMantissa(jfloat z) {
800 uint32_t m = FLOAT_TO_INTBITS (z);
801
802 if ((m & FLOAT_EXPONENT_MASK) != 0)
803 m = (m & FLOAT_MANTISSA_MASK) | FLOAT_NORMAL_MASK;
804 else
805 m = (m & FLOAT_MANTISSA_MASK);
806
807 return m;
808 }
809
810 int32_t
floatExponent(jfloat z)811 floatExponent (jfloat z)
812 {
813 /* assumes positive float */
814 int32_t k = FLOAT_TO_INTBITS (z) >> 23;
815 if (k)
816 k -= FLOAT_E_OFFSET;
817 else
818 k = 1 - FLOAT_E_OFFSET;
819
820 return k;
821 }
822
823 /* Allow a 64-bit value in arg2 */
824 uint64_t
simpleMultiplyHighPrecision64(uint64_t * arg1,int32_t length,uint64_t arg2)825 simpleMultiplyHighPrecision64 (uint64_t * arg1, int32_t length, uint64_t arg2)
826 {
827 uint64_t intermediate, carry1, carry2, prod1, prod2, sum;
828 uint64_t* pArg1;
829 int32_t index;
830 uint32_t buf32;
831
832 index = 0;
833 intermediate = 0;
834 pArg1 = arg1 + index;
835 carry1 = carry2 = 0;
836
837 do
838 {
839 if ((*pArg1 != 0) || (intermediate != 0))
840 {
841 prod1 =
842 static_cast<uint64_t>(LOW_U32_FROM_VAR (arg2)) * static_cast<uint64_t>(LOW_U32_FROM_PTR (pArg1));
843 sum = intermediate + prod1;
844 if ((sum < prod1) || (sum < intermediate))
845 {
846 carry1 = 1;
847 }
848 else
849 {
850 carry1 = 0;
851 }
852 prod1 =
853 static_cast<uint64_t>(LOW_U32_FROM_VAR (arg2)) * static_cast<uint64_t>(HIGH_U32_FROM_PTR (pArg1));
854 prod2 =
855 static_cast<uint64_t>(HIGH_U32_FROM_VAR (arg2)) * static_cast<uint64_t>(LOW_U32_FROM_PTR (pArg1));
856 intermediate = carry2 + HIGH_IN_U64 (sum) + prod1 + prod2;
857 if ((intermediate < prod1) || (intermediate < prod2))
858 {
859 carry2 = 1;
860 }
861 else
862 {
863 carry2 = 0;
864 }
865 LOW_U32_FROM_PTR (pArg1) = LOW_U32_FROM_VAR (sum);
866 buf32 = HIGH_U32_FROM_PTR (pArg1);
867 HIGH_U32_FROM_PTR (pArg1) = LOW_U32_FROM_VAR (intermediate);
868 intermediate = carry1 + HIGH_IN_U64 (intermediate)
869 + static_cast<uint64_t>(HIGH_U32_FROM_VAR (arg2)) * static_cast<uint64_t>(buf32);
870 }
871 pArg1++;
872 }
873 while (++index < length);
874 return intermediate;
875 }
876