1 /*
2 * Licensed to the Apache Software Foundation (ASF) under one or more
3 * contributor license agreements. See the NOTICE file distributed with
4 * this work for additional information regarding copyright ownership.
5 * The ASF licenses this file to You under the Apache License, Version 2.0
6 * (the "License"); you may not use this file except in compliance with
7 * the License. You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18 #include <string.h>
19 #include "cbigint.h"
20
21 #if defined(LINUX) || defined(FREEBSD)
22 #define USE_LL
23 #endif
24
25 #ifdef HY_LITTLE_ENDIAN
26 #define at(i) (i)
27 #else
28 #define at(i) ((i)^1)
29 /* the sequence for halfAt is -1, 2, 1, 4, 3, 6, 5, 8... */
30 /* and it should correspond to 0, 1, 2, 3, 4, 5, 6, 7... */
31 #define halfAt(i) (-((-(i)) ^ 1))
32 #endif
33
34 #define HIGH_IN_U64(u64) ((u64) >> 32)
35 #if defined(USE_LL)
36 #define LOW_IN_U64(u64) ((u64) & 0x00000000FFFFFFFFLL)
37 #else
38 #if defined(USE_L)
39 #define LOW_IN_U64(u64) ((u64) & 0x00000000FFFFFFFFL)
40 #else
41 #define LOW_IN_U64(u64) ((u64) & 0x00000000FFFFFFFF)
42 #endif /* USE_L */
43 #endif /* USE_LL */
44
45 #if defined(USE_LL)
46 #define TEN_E1 (0xALL)
47 #define TEN_E2 (0x64LL)
48 #define TEN_E3 (0x3E8LL)
49 #define TEN_E4 (0x2710LL)
50 #define TEN_E5 (0x186A0LL)
51 #define TEN_E6 (0xF4240LL)
52 #define TEN_E7 (0x989680LL)
53 #define TEN_E8 (0x5F5E100LL)
54 #define TEN_E9 (0x3B9ACA00LL)
55 #define TEN_E19 (0x8AC7230489E80000LL)
56 #else
57 #if defined(USE_L)
58 #define TEN_E1 (0xAL)
59 #define TEN_E2 (0x64L)
60 #define TEN_E3 (0x3E8L)
61 #define TEN_E4 (0x2710L)
62 #define TEN_E5 (0x186A0L)
63 #define TEN_E6 (0xF4240L)
64 #define TEN_E7 (0x989680L)
65 #define TEN_E8 (0x5F5E100L)
66 #define TEN_E9 (0x3B9ACA00L)
67 #define TEN_E19 (0x8AC7230489E80000L)
68 #else
69 #define TEN_E1 (0xA)
70 #define TEN_E2 (0x64)
71 #define TEN_E3 (0x3E8)
72 #define TEN_E4 (0x2710)
73 #define TEN_E5 (0x186A0)
74 #define TEN_E6 (0xF4240)
75 #define TEN_E7 (0x989680)
76 #define TEN_E8 (0x5F5E100)
77 #define TEN_E9 (0x3B9ACA00)
78 #define TEN_E19 (0x8AC7230489E80000)
79 #endif /* USE_L */
80 #endif /* USE_LL */
81
82 #define TIMES_TEN(x) (((x) << 3) + ((x) << 1))
83 #define bitSection(x, mask, shift) (((x) & (mask)) >> (shift))
84 #define DOUBLE_TO_LONGBITS(dbl) (*((U_64 *)(&dbl)))
85 #define FLOAT_TO_INTBITS(flt) (*((U_32 *)(&flt)))
86 #define CREATE_DOUBLE_BITS(normalizedM, e) (((normalizedM) & MANTISSA_MASK) | (((U_64)((e) + E_OFFSET)) << 52))
87
88 #if defined(USE_LL)
89 #define MANTISSA_MASK (0x000FFFFFFFFFFFFFLL)
90 #define EXPONENT_MASK (0x7FF0000000000000LL)
91 #define NORMAL_MASK (0x0010000000000000LL)
92 #define SIGN_MASK (0x8000000000000000LL)
93 #else
94 #if defined(USE_L)
95 #define MANTISSA_MASK (0x000FFFFFFFFFFFFFL)
96 #define EXPONENT_MASK (0x7FF0000000000000L)
97 #define NORMAL_MASK (0x0010000000000000L)
98 #define SIGN_MASK (0x8000000000000000L)
99 #else
100 #define MANTISSA_MASK (0x000FFFFFFFFFFFFF)
101 #define EXPONENT_MASK (0x7FF0000000000000)
102 #define NORMAL_MASK (0x0010000000000000)
103 #define SIGN_MASK (0x8000000000000000)
104 #endif /* USE_L */
105 #endif /* USE_LL */
106
107 #define E_OFFSET (1075)
108
109 #define FLOAT_MANTISSA_MASK (0x007FFFFF)
110 #define FLOAT_EXPONENT_MASK (0x7F800000)
111 #define FLOAT_NORMAL_MASK (0x00800000)
112 #define FLOAT_E_OFFSET (150)
113
114 IDATA
simpleAddHighPrecision(U_64 * arg1,IDATA length,U_64 arg2)115 simpleAddHighPrecision (U_64 * arg1, IDATA length, U_64 arg2)
116 {
117 /* assumes length > 0 */
118 IDATA index = 1;
119
120 *arg1 += arg2;
121 if (arg2 <= *arg1)
122 return 0;
123 else if (length == 1)
124 return 1;
125
126 while (++arg1[index] == 0 && ++index < length);
127
128 return (IDATA) index == length;
129 }
130
131 IDATA
addHighPrecision(U_64 * arg1,IDATA length1,U_64 * arg2,IDATA length2)132 addHighPrecision (U_64 * arg1, IDATA length1, U_64 * arg2, IDATA length2)
133 {
134 /* addition is limited by length of arg1 as it this function is
135 * storing the result in arg1 */
136 /* fix for cc (GCC) 3.2 20020903 (Red Hat Linux 8.0 3.2-7): code generated does not
137 * do the temp1 + temp2 + carry addition correct. carry is 64 bit because gcc has
138 * subtle issues when you mix 64 / 32 bit maths. */
139 U_64 temp1, temp2, temp3; /* temporary variables to help the SH-4, and gcc */
140 U_64 carry;
141 IDATA index;
142
143 if (length1 == 0 || length2 == 0)
144 {
145 return 0;
146 }
147 else if (length1 < length2)
148 {
149 length2 = length1;
150 }
151
152 carry = 0;
153 index = 0;
154 do
155 {
156 temp1 = arg1[index];
157 temp2 = arg2[index];
158 temp3 = temp1 + temp2;
159 arg1[index] = temp3 + carry;
160 if (arg2[index] < arg1[index])
161 carry = 0;
162 else if (arg2[index] != arg1[index])
163 carry = 1;
164 }
165 while (++index < length2);
166 if (!carry)
167 return 0;
168 else if (index == length1)
169 return 1;
170
171 while (++arg1[index] == 0 && ++index < length1);
172
173 return (IDATA) index == length1;
174 }
175
176 void
subtractHighPrecision(U_64 * arg1,IDATA length1,U_64 * arg2,IDATA length2)177 subtractHighPrecision (U_64 * arg1, IDATA length1, U_64 * arg2, IDATA length2)
178 {
179 /* assumes arg1 > arg2 */
180 IDATA index;
181 for (index = 0; index < length1; ++index)
182 arg1[index] = ~arg1[index];
183 simpleAddHighPrecision (arg1, length1, 1);
184
185 while (length2 > 0 && arg2[length2 - 1] == 0)
186 --length2;
187
188 addHighPrecision (arg1, length1, arg2, length2);
189
190 for (index = 0; index < length1; ++index)
191 arg1[index] = ~arg1[index];
192 simpleAddHighPrecision (arg1, length1, 1);
193 }
194
195 U_32
simpleMultiplyHighPrecision(U_64 * arg1,IDATA length,U_64 arg2)196 simpleMultiplyHighPrecision (U_64 * arg1, IDATA length, U_64 arg2)
197 {
198 /* assumes arg2 only holds 32 bits of information */
199 U_64 product;
200 IDATA index;
201
202 index = 0;
203 product = 0;
204
205 do
206 {
207 product =
208 HIGH_IN_U64 (product) + arg2 * LOW_U32_FROM_PTR (arg1 + index);
209 LOW_U32_FROM_PTR (arg1 + index) = LOW_U32_FROM_VAR (product);
210 product =
211 HIGH_IN_U64 (product) + arg2 * HIGH_U32_FROM_PTR (arg1 + index);
212 HIGH_U32_FROM_PTR (arg1 + index) = LOW_U32_FROM_VAR (product);
213 }
214 while (++index < length);
215
216 return HIGH_U32_FROM_VAR (product);
217 }
218
219 void
simpleMultiplyAddHighPrecision(U_64 * arg1,IDATA length,U_64 arg2,U_32 * result)220 simpleMultiplyAddHighPrecision (U_64 * arg1, IDATA length, U_64 arg2,
221 U_32 * result)
222 {
223 /* Assumes result can hold the product and arg2 only holds 32 bits
224 of information */
225 U_64 product;
226 IDATA index, resultIndex;
227
228 index = resultIndex = 0;
229 product = 0;
230
231 do
232 {
233 product =
234 HIGH_IN_U64 (product) + result[at (resultIndex)] +
235 arg2 * LOW_U32_FROM_PTR (arg1 + index);
236 result[at (resultIndex)] = LOW_U32_FROM_VAR (product);
237 ++resultIndex;
238 product =
239 HIGH_IN_U64 (product) + result[at (resultIndex)] +
240 arg2 * HIGH_U32_FROM_PTR (arg1 + index);
241 result[at (resultIndex)] = LOW_U32_FROM_VAR (product);
242 ++resultIndex;
243 }
244 while (++index < length);
245
246 result[at (resultIndex)] += HIGH_U32_FROM_VAR (product);
247 if (result[at (resultIndex)] < HIGH_U32_FROM_VAR (product))
248 {
249 /* must be careful with ++ operator and macro expansion */
250 ++resultIndex;
251 while (++result[at (resultIndex)] == 0)
252 ++resultIndex;
253 }
254 }
255
256 void
multiplyHighPrecision(U_64 * arg1,IDATA length1,U_64 * arg2,IDATA length2,U_64 * result,IDATA length)257 multiplyHighPrecision (U_64 * arg1, IDATA length1, U_64 * arg2, IDATA length2,
258 U_64 * result, IDATA length)
259 {
260 /* assumes result is large enough to hold product */
261 U_64 *temp;
262 U_32 *resultIn32;
263 IDATA count, index;
264
265 if (length1 < length2)
266 {
267 temp = arg1;
268 arg1 = arg2;
269 arg2 = temp;
270 count = length1;
271 length1 = length2;
272 length2 = count;
273 }
274
275 memset (result, 0, sizeof (U_64) * length);
276
277 /* length1 > length2 */
278 resultIn32 = (U_32 *) result;
279 index = -1;
280 for (count = 0; count < length2; ++count)
281 {
282 simpleMultiplyAddHighPrecision (arg1, length1, LOW_IN_U64 (arg2[count]),
283 resultIn32 + (++index));
284 simpleMultiplyAddHighPrecision (arg1, length1,
285 HIGH_IN_U64 (arg2[count]),
286 resultIn32 + (++index));
287
288 }
289 }
290
291 U_32
simpleAppendDecimalDigitHighPrecision(U_64 * arg1,IDATA length,U_64 digit)292 simpleAppendDecimalDigitHighPrecision (U_64 * arg1, IDATA length, U_64 digit)
293 {
294 /* assumes digit is less than 32 bits */
295 U_64 arg;
296 IDATA index = 0;
297
298 digit <<= 32;
299 do
300 {
301 arg = LOW_IN_U64 (arg1[index]);
302 digit = HIGH_IN_U64 (digit) + TIMES_TEN (arg);
303 LOW_U32_FROM_PTR (arg1 + index) = LOW_U32_FROM_VAR (digit);
304
305 arg = HIGH_IN_U64 (arg1[index]);
306 digit = HIGH_IN_U64 (digit) + TIMES_TEN (arg);
307 HIGH_U32_FROM_PTR (arg1 + index) = LOW_U32_FROM_VAR (digit);
308 }
309 while (++index < length);
310
311 return HIGH_U32_FROM_VAR (digit);
312 }
313
314 void
simpleShiftLeftHighPrecision(U_64 * arg1,IDATA length,IDATA arg2)315 simpleShiftLeftHighPrecision (U_64 * arg1, IDATA length, IDATA arg2)
316 {
317 /* assumes length > 0 */
318 IDATA index, offset;
319 if (arg2 >= 64)
320 {
321 offset = arg2 >> 6;
322 index = length;
323
324 while (--index - offset >= 0)
325 arg1[index] = arg1[index - offset];
326 do
327 {
328 arg1[index] = 0;
329 }
330 while (--index >= 0);
331
332 arg2 &= 0x3F;
333 }
334
335 if (arg2 == 0)
336 return;
337 while (--length > 0)
338 {
339 arg1[length] = arg1[length] << arg2 | arg1[length - 1] >> (64 - arg2);
340 }
341 *arg1 <<= arg2;
342 }
343
344 IDATA
highestSetBit(U_64 * y)345 highestSetBit (U_64 * y)
346 {
347 U_32 x;
348 IDATA result;
349
350 if (*y == 0)
351 return 0;
352
353 #if defined(USE_LL)
354 if (*y & 0xFFFFFFFF00000000LL)
355 {
356 x = HIGH_U32_FROM_PTR (y);
357 result = 32;
358 }
359 else
360 {
361 x = LOW_U32_FROM_PTR (y);
362 result = 0;
363 }
364 #else
365 #if defined(USE_L)
366 if (*y & 0xFFFFFFFF00000000L)
367 {
368 x = HIGH_U32_FROM_PTR (y);
369 result = 32;
370 }
371 else
372 {
373 x = LOW_U32_FROM_PTR (y);
374 result = 0;
375 }
376 #else
377 if (*y & 0xFFFFFFFF00000000)
378 {
379 x = HIGH_U32_FROM_PTR (y);
380 result = 32;
381 }
382 else
383 {
384 x = LOW_U32_FROM_PTR (y);
385 result = 0;
386 }
387 #endif /* USE_L */
388 #endif /* USE_LL */
389
390 if (x & 0xFFFF0000)
391 {
392 x = bitSection (x, 0xFFFF0000, 16);
393 result += 16;
394 }
395 if (x & 0xFF00)
396 {
397 x = bitSection (x, 0xFF00, 8);
398 result += 8;
399 }
400 if (x & 0xF0)
401 {
402 x = bitSection (x, 0xF0, 4);
403 result += 4;
404 }
405 if (x > 0x7)
406 return result + 4;
407 else if (x > 0x3)
408 return result + 3;
409 else if (x > 0x1)
410 return result + 2;
411 else
412 return result + 1;
413 }
414
415 IDATA
lowestSetBit(U_64 * y)416 lowestSetBit (U_64 * y)
417 {
418 U_32 x;
419 IDATA result;
420
421 if (*y == 0)
422 return 0;
423
424 #if defined(USE_LL)
425 if (*y & 0x00000000FFFFFFFFLL)
426 {
427 x = LOW_U32_FROM_PTR (y);
428 result = 0;
429 }
430 else
431 {
432 x = HIGH_U32_FROM_PTR (y);
433 result = 32;
434 }
435 #else
436 #if defined(USE_L)
437 if (*y & 0x00000000FFFFFFFFL)
438 {
439 x = LOW_U32_FROM_PTR (y);
440 result = 0;
441 }
442 else
443 {
444 x = HIGH_U32_FROM_PTR (y);
445 result = 32;
446 }
447 #else
448 if (*y & 0x00000000FFFFFFFF)
449 {
450 x = LOW_U32_FROM_PTR (y);
451 result = 0;
452 }
453 else
454 {
455 x = HIGH_U32_FROM_PTR (y);
456 result = 32;
457 }
458 #endif /* USE_L */
459 #endif /* USE_LL */
460
461 if (!(x & 0xFFFF))
462 {
463 x = bitSection (x, 0xFFFF0000, 16);
464 result += 16;
465 }
466 if (!(x & 0xFF))
467 {
468 x = bitSection (x, 0xFF00, 8);
469 result += 8;
470 }
471 if (!(x & 0xF))
472 {
473 x = bitSection (x, 0xF0, 4);
474 result += 4;
475 }
476
477 if (x & 0x1)
478 return result + 1;
479 else if (x & 0x2)
480 return result + 2;
481 else if (x & 0x4)
482 return result + 3;
483 else
484 return result + 4;
485 }
486
487 IDATA
highestSetBitHighPrecision(U_64 * arg,IDATA length)488 highestSetBitHighPrecision (U_64 * arg, IDATA length)
489 {
490 IDATA highBit;
491
492 while (--length >= 0)
493 {
494 highBit = highestSetBit (arg + length);
495 if (highBit)
496 return highBit + 64 * length;
497 }
498
499 return 0;
500 }
501
502 IDATA
lowestSetBitHighPrecision(U_64 * arg,IDATA length)503 lowestSetBitHighPrecision (U_64 * arg, IDATA length)
504 {
505 IDATA lowBit, index = -1;
506
507 while (++index < length)
508 {
509 lowBit = lowestSetBit (arg + index);
510 if (lowBit)
511 return lowBit + 64 * index;
512 }
513
514 return 0;
515 }
516
517 IDATA
compareHighPrecision(U_64 * arg1,IDATA length1,U_64 * arg2,IDATA length2)518 compareHighPrecision (U_64 * arg1, IDATA length1, U_64 * arg2, IDATA length2)
519 {
520 while (--length1 >= 0 && arg1[length1] == 0);
521 while (--length2 >= 0 && arg2[length2] == 0);
522
523 if (length1 > length2)
524 return 1;
525 else if (length1 < length2)
526 return -1;
527 else if (length1 > -1)
528 {
529 do
530 {
531 if (arg1[length1] > arg2[length1])
532 return 1;
533 else if (arg1[length1] < arg2[length1])
534 return -1;
535 }
536 while (--length1 >= 0);
537 }
538
539 return 0;
540 }
541
542 jdouble
toDoubleHighPrecision(U_64 * arg,IDATA length)543 toDoubleHighPrecision (U_64 * arg, IDATA length)
544 {
545 IDATA highBit;
546 U_64 mantissa, test64;
547 U_32 test;
548 jdouble result;
549
550 while (length > 0 && arg[length - 1] == 0)
551 --length;
552
553 if (length == 0)
554 result = 0.0;
555 else if (length > 16)
556 {
557 DOUBLE_TO_LONGBITS (result) = EXPONENT_MASK;
558 }
559 else if (length == 1)
560 {
561 highBit = highestSetBit (arg);
562 if (highBit <= 53)
563 {
564 highBit = 53 - highBit;
565 mantissa = *arg << highBit;
566 DOUBLE_TO_LONGBITS (result) =
567 CREATE_DOUBLE_BITS (mantissa, -highBit);
568 }
569 else
570 {
571 highBit -= 53;
572 mantissa = *arg >> highBit;
573 DOUBLE_TO_LONGBITS (result) =
574 CREATE_DOUBLE_BITS (mantissa, highBit);
575
576 /* perform rounding, round to even in case of tie */
577 test = (LOW_U32_FROM_PTR (arg) << (11 - highBit)) & 0x7FF;
578 if (test > 0x400 || ((test == 0x400) && (mantissa & 1)))
579 DOUBLE_TO_LONGBITS (result) = DOUBLE_TO_LONGBITS (result) + 1;
580 }
581 }
582 else
583 {
584 highBit = highestSetBit (arg + (--length));
585 if (highBit <= 53)
586 {
587 highBit = 53 - highBit;
588 if (highBit > 0)
589 {
590 mantissa =
591 (arg[length] << highBit) | (arg[length - 1] >>
592 (64 - highBit));
593 }
594 else
595 {
596 mantissa = arg[length];
597 }
598 DOUBLE_TO_LONGBITS (result) =
599 CREATE_DOUBLE_BITS (mantissa, length * 64 - highBit);
600
601 /* perform rounding, round to even in case of tie */
602 test64 = arg[--length] << highBit;
603 if (test64 > SIGN_MASK || ((test64 == SIGN_MASK) && (mantissa & 1)))
604 DOUBLE_TO_LONGBITS (result) = DOUBLE_TO_LONGBITS (result) + 1;
605 else if (test64 == SIGN_MASK)
606 {
607 while (--length >= 0)
608 {
609 if (arg[length] != 0)
610 {
611 DOUBLE_TO_LONGBITS (result) =
612 DOUBLE_TO_LONGBITS (result) + 1;
613 break;
614 }
615 }
616 }
617 }
618 else
619 {
620 highBit -= 53;
621 mantissa = arg[length] >> highBit;
622 DOUBLE_TO_LONGBITS (result) =
623 CREATE_DOUBLE_BITS (mantissa, length * 64 + highBit);
624
625 /* perform rounding, round to even in case of tie */
626 test = (LOW_U32_FROM_PTR (arg + length) << (11 - highBit)) & 0x7FF;
627 if (test > 0x400 || ((test == 0x400) && (mantissa & 1)))
628 DOUBLE_TO_LONGBITS (result) = DOUBLE_TO_LONGBITS (result) + 1;
629 else if (test == 0x400)
630 {
631 do
632 {
633 if (arg[--length] != 0)
634 {
635 DOUBLE_TO_LONGBITS (result) =
636 DOUBLE_TO_LONGBITS (result) + 1;
637 break;
638 }
639 }
640 while (length > 0);
641 }
642 }
643 }
644
645 return result;
646 }
647
648 IDATA
tenToTheEHighPrecision(U_64 * result,IDATA length,jint e)649 tenToTheEHighPrecision (U_64 * result, IDATA length, jint e)
650 {
651 /* size test */
652 if (length < ((e / 19) + 1))
653 return 0;
654
655 memset (result, 0, length * sizeof (U_64));
656 *result = 1;
657
658 if (e == 0)
659 return 1;
660
661 length = 1;
662 length = timesTenToTheEHighPrecision (result, length, e);
663 /* bad O(n) way of doing it, but simple */
664 /*
665 do {
666 overflow = simpleAppendDecimalDigitHighPrecision(result, length, 0);
667 if (overflow)
668 result[length++] = overflow;
669 } while (--e);
670 */
671 return length;
672 }
673
674 IDATA
timesTenToTheEHighPrecision(U_64 * result,IDATA length,jint e)675 timesTenToTheEHighPrecision (U_64 * result, IDATA length, jint e)
676 {
677 /* assumes result can hold value */
678 U_64 overflow;
679 int exp10 = e;
680
681 if (e == 0)
682 return length;
683
684 /* bad O(n) way of doing it, but simple */
685 /*
686 do {
687 overflow = simpleAppendDecimalDigitHighPrecision(result, length, 0);
688 if (overflow)
689 result[length++] = overflow;
690 } while (--e);
691 */
692 /* Replace the current implementaion which performs a
693 * "multiplication" by 10 e number of times with an actual
694 * mulitplication. 10e19 is the largest exponent to the power of ten
695 * that will fit in a 64-bit integer, and 10e9 is the largest exponent to
696 * the power of ten that will fit in a 64-bit integer. Not sure where the
697 * break-even point is between an actual multiplication and a
698 * simpleAappendDecimalDigit() so just pick 10e3 as that point for
699 * now.
700 */
701 while (exp10 >= 19)
702 {
703 overflow = simpleMultiplyHighPrecision64 (result, length, TEN_E19);
704 if (overflow)
705 result[length++] = overflow;
706 exp10 -= 19;
707 }
708 while (exp10 >= 9)
709 {
710 overflow = simpleMultiplyHighPrecision (result, length, TEN_E9);
711 if (overflow)
712 result[length++] = overflow;
713 exp10 -= 9;
714 }
715 if (exp10 == 0)
716 return length;
717 else if (exp10 == 1)
718 {
719 overflow = simpleAppendDecimalDigitHighPrecision (result, length, 0);
720 if (overflow)
721 result[length++] = overflow;
722 }
723 else if (exp10 == 2)
724 {
725 overflow = simpleAppendDecimalDigitHighPrecision (result, length, 0);
726 if (overflow)
727 result[length++] = overflow;
728 overflow = simpleAppendDecimalDigitHighPrecision (result, length, 0);
729 if (overflow)
730 result[length++] = overflow;
731 }
732 else if (exp10 == 3)
733 {
734 overflow = simpleMultiplyHighPrecision (result, length, TEN_E3);
735 if (overflow)
736 result[length++] = overflow;
737 }
738 else if (exp10 == 4)
739 {
740 overflow = simpleMultiplyHighPrecision (result, length, TEN_E4);
741 if (overflow)
742 result[length++] = overflow;
743 }
744 else if (exp10 == 5)
745 {
746 overflow = simpleMultiplyHighPrecision (result, length, TEN_E5);
747 if (overflow)
748 result[length++] = overflow;
749 }
750 else if (exp10 == 6)
751 {
752 overflow = simpleMultiplyHighPrecision (result, length, TEN_E6);
753 if (overflow)
754 result[length++] = overflow;
755 }
756 else if (exp10 == 7)
757 {
758 overflow = simpleMultiplyHighPrecision (result, length, TEN_E7);
759 if (overflow)
760 result[length++] = overflow;
761 }
762 else if (exp10 == 8)
763 {
764 overflow = simpleMultiplyHighPrecision (result, length, TEN_E8);
765 if (overflow)
766 result[length++] = overflow;
767 }
768 return length;
769 }
770
771 U_64
doubleMantissa(jdouble z)772 doubleMantissa (jdouble z)
773 {
774 U_64 m = DOUBLE_TO_LONGBITS (z);
775
776 if ((m & EXPONENT_MASK) != 0)
777 m = (m & MANTISSA_MASK) | NORMAL_MASK;
778 else
779 m = (m & MANTISSA_MASK);
780
781 return m;
782 }
783
784 IDATA
doubleExponent(jdouble z)785 doubleExponent (jdouble z)
786 {
787 /* assumes positive double */
788 IDATA k = HIGH_U32_FROM_VAR (z) >> 20;
789
790 if (k)
791 k -= E_OFFSET;
792 else
793 k = 1 - E_OFFSET;
794
795 return k;
796 }
797
798 UDATA
floatMantissa(jfloat z)799 floatMantissa (jfloat z)
800 {
801 UDATA m = (UDATA) FLOAT_TO_INTBITS (z);
802
803 if ((m & FLOAT_EXPONENT_MASK) != 0)
804 m = (m & FLOAT_MANTISSA_MASK) | FLOAT_NORMAL_MASK;
805 else
806 m = (m & FLOAT_MANTISSA_MASK);
807
808 return m;
809 }
810
811 IDATA
floatExponent(jfloat z)812 floatExponent (jfloat z)
813 {
814 /* assumes positive float */
815 IDATA k = FLOAT_TO_INTBITS (z) >> 23;
816 if (k)
817 k -= FLOAT_E_OFFSET;
818 else
819 k = 1 - FLOAT_E_OFFSET;
820
821 return k;
822 }
823
824 /* Allow a 64-bit value in arg2 */
825 U_64
simpleMultiplyHighPrecision64(U_64 * arg1,IDATA length,U_64 arg2)826 simpleMultiplyHighPrecision64 (U_64 * arg1, IDATA length, U_64 arg2)
827 {
828 U_64 intermediate, *pArg1, carry1, carry2, prod1, prod2, sum;
829 IDATA index;
830 U_32 buf32;
831
832 index = 0;
833 intermediate = 0;
834 pArg1 = arg1 + index;
835 carry1 = carry2 = 0;
836
837 do
838 {
839 if ((*pArg1 != 0) || (intermediate != 0))
840 {
841 prod1 =
842 (U_64) LOW_U32_FROM_VAR (arg2) * (U_64) LOW_U32_FROM_PTR (pArg1);
843 sum = intermediate + prod1;
844 if ((sum < prod1) || (sum < intermediate))
845 {
846 carry1 = 1;
847 }
848 else
849 {
850 carry1 = 0;
851 }
852 prod1 =
853 (U_64) LOW_U32_FROM_VAR (arg2) * (U_64) HIGH_U32_FROM_PTR (pArg1);
854 prod2 =
855 (U_64) HIGH_U32_FROM_VAR (arg2) * (U_64) LOW_U32_FROM_PTR (pArg1);
856 intermediate = carry2 + HIGH_IN_U64 (sum) + prod1 + prod2;
857 if ((intermediate < prod1) || (intermediate < prod2))
858 {
859 carry2 = 1;
860 }
861 else
862 {
863 carry2 = 0;
864 }
865 LOW_U32_FROM_PTR (pArg1) = LOW_U32_FROM_VAR (sum);
866 buf32 = HIGH_U32_FROM_PTR (pArg1);
867 HIGH_U32_FROM_PTR (pArg1) = LOW_U32_FROM_VAR (intermediate);
868 intermediate = carry1 + HIGH_IN_U64 (intermediate)
869 + (U_64) HIGH_U32_FROM_VAR (arg2) * (U_64) buf32;
870 }
871 pArg1++;
872 }
873 while (++index < length);
874 return intermediate;
875 }
876