• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Licensed to the Apache Software Foundation (ASF) under one or more
3  *  contributor license agreements.  See the NOTICE file distributed with
4  *  this work for additional information regarding copyright ownership.
5  *  The ASF licenses this file to You under the Apache License, Version 2.0
6  *  (the "License"); you may not use this file except in compliance with
7  *  the License.  You may obtain a copy of the License at
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  *  Unless required by applicable law or agreed to in writing, software
12  *  distributed under the License is distributed on an "AS IS" BASIS,
13  *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  *  See the License for the specific language governing permissions and
15  *  limitations under the License.
16  */
17 
18 #include <string.h>
19 #include "cbigint.h"
20 
21 #if defined(__linux__) || defined(__APPLE__)
22 #define USE_LL
23 #endif
24 
25 #if __BYTE_ORDER == __LITTLE_ENDIAN
26 #define at(i) (i)
27 #else
28 #define at(i) ((i)^1)
29 /* the sequence for halfAt is -1, 2, 1, 4, 3, 6, 5, 8... */
30 /* and it should correspond to 0, 1, 2, 3, 4, 5, 6, 7... */
31 #define halfAt(i) (-((-(i)) ^ 1))
32 #endif
33 
34 #define HIGH_IN_U64(u64) ((u64) >> 32)
35 #if defined(USE_LL)
36 #define LOW_IN_U64(u64) ((u64) & 0x00000000FFFFFFFFLL)
37 #else
38 #if defined(USE_L)
39 #define LOW_IN_U64(u64) ((u64) & 0x00000000FFFFFFFFL)
40 #else
41 #define LOW_IN_U64(u64) ((u64) & 0x00000000FFFFFFFF)
42 #endif /* USE_L */
43 #endif /* USE_LL */
44 
45 #if defined(USE_LL)
46 #define TEN_E1 (0xALL)
47 #define TEN_E2 (0x64LL)
48 #define TEN_E3 (0x3E8LL)
49 #define TEN_E4 (0x2710LL)
50 #define TEN_E5 (0x186A0LL)
51 #define TEN_E6 (0xF4240LL)
52 #define TEN_E7 (0x989680LL)
53 #define TEN_E8 (0x5F5E100LL)
54 #define TEN_E9 (0x3B9ACA00LL)
55 #define TEN_E19 (0x8AC7230489E80000LL)
56 #else
57 #if defined(USE_L)
58 #define TEN_E1 (0xAL)
59 #define TEN_E2 (0x64L)
60 #define TEN_E3 (0x3E8L)
61 #define TEN_E4 (0x2710L)
62 #define TEN_E5 (0x186A0L)
63 #define TEN_E6 (0xF4240L)
64 #define TEN_E7 (0x989680L)
65 #define TEN_E8 (0x5F5E100L)
66 #define TEN_E9 (0x3B9ACA00L)
67 #define TEN_E19 (0x8AC7230489E80000L)
68 #else
69 #define TEN_E1 (0xA)
70 #define TEN_E2 (0x64)
71 #define TEN_E3 (0x3E8)
72 #define TEN_E4 (0x2710)
73 #define TEN_E5 (0x186A0)
74 #define TEN_E6 (0xF4240)
75 #define TEN_E7 (0x989680)
76 #define TEN_E8 (0x5F5E100)
77 #define TEN_E9 (0x3B9ACA00)
78 #define TEN_E19 (0x8AC7230489E80000)
79 #endif /* USE_L */
80 #endif /* USE_LL */
81 
82 #define TIMES_TEN(x) (((x) << 3) + ((x) << 1))
83 #define bitSection(x, mask, shift) (((x) & (mask)) >> (shift))
84 #define CREATE_DOUBLE_BITS(normalizedM, e) (((normalizedM) & MANTISSA_MASK) | ((static_cast<uint64_t>((e) + E_OFFSET)) << 52))
85 
86 #if defined(USE_LL)
87 #define MANTISSA_MASK (0x000FFFFFFFFFFFFFLL)
88 #define EXPONENT_MASK (0x7FF0000000000000LL)
89 #define NORMAL_MASK (0x0010000000000000LL)
90 #define SIGN_MASK (0x8000000000000000LL)
91 #else
92 #if defined(USE_L)
93 #define MANTISSA_MASK (0x000FFFFFFFFFFFFFL)
94 #define EXPONENT_MASK (0x7FF0000000000000L)
95 #define NORMAL_MASK (0x0010000000000000L)
96 #define SIGN_MASK (0x8000000000000000L)
97 #else
98 #define MANTISSA_MASK (0x000FFFFFFFFFFFFF)
99 #define EXPONENT_MASK (0x7FF0000000000000)
100 #define NORMAL_MASK (0x0010000000000000)
101 #define SIGN_MASK (0x8000000000000000)
102 #endif /* USE_L */
103 #endif /* USE_LL */
104 
105 #define E_OFFSET (1075)
106 
107 #define FLOAT_MANTISSA_MASK (0x007FFFFF)
108 #define FLOAT_EXPONENT_MASK (0x7F800000)
109 #define FLOAT_NORMAL_MASK   (0x00800000)
110 #define FLOAT_E_OFFSET (150)
111 
112 int32_t
simpleAddHighPrecision(uint64_t * arg1,int32_t length,uint64_t arg2)113 simpleAddHighPrecision (uint64_t * arg1, int32_t length, uint64_t arg2)
114 {
115   /* assumes length > 0 */
116   int32_t index = 1;
117 
118   *arg1 += arg2;
119   if (arg2 <= *arg1)
120     return 0;
121   else if (length == 1)
122     return 1;
123 
124   while (++arg1[index] == 0 && ++index < length) {
125   }
126 
127   return index == length;
128 }
129 
130 int32_t
addHighPrecision(uint64_t * arg1,int32_t length1,uint64_t * arg2,int32_t length2)131 addHighPrecision (uint64_t * arg1, int32_t length1, uint64_t * arg2, int32_t length2)
132 {
133   /* addition is limited by length of arg1 as it this function is
134    * storing the result in arg1 */
135   /* fix for cc (GCC) 3.2 20020903 (Red Hat Linux 8.0 3.2-7): code generated does not
136    * do the temp1 + temp2 + carry addition correct.  carry is 64 bit because gcc has
137    * subtle issues when you mix 64 / 32 bit maths. */
138   uint64_t temp1, temp2, temp3;     /* temporary variables to help the SH-4, and gcc */
139   uint64_t carry;
140   int32_t index;
141 
142   if (length1 == 0 || length2 == 0)
143     {
144       return 0;
145     }
146   else if (length1 < length2)
147     {
148       length2 = length1;
149     }
150 
151   carry = 0;
152   index = 0;
153   do
154     {
155       temp1 = arg1[index];
156       temp2 = arg2[index];
157       temp3 = temp1 + temp2;
158       arg1[index] = temp3 + carry;
159       if (arg2[index] < arg1[index])
160         carry = 0;
161       else if (arg2[index] != arg1[index])
162         carry = 1;
163     }
164   while (++index < length2);
165   if (!carry)
166     return 0;
167   else if (index == length1)
168     return 1;
169 
170   while (++arg1[index] == 0 && ++index < length1) {
171   }
172 
173   return index == length1;
174 }
175 
176 void
subtractHighPrecision(uint64_t * arg1,int32_t length1,uint64_t * arg2,int32_t length2)177 subtractHighPrecision (uint64_t * arg1, int32_t length1, uint64_t * arg2, int32_t length2)
178 {
179   /* assumes arg1 > arg2 */
180   int32_t index;
181   for (index = 0; index < length1; ++index)
182     arg1[index] = ~arg1[index];
183   simpleAddHighPrecision (arg1, length1, 1);
184 
185   while (length2 > 0 && arg2[length2 - 1] == 0)
186     --length2;
187 
188   addHighPrecision (arg1, length1, arg2, length2);
189 
190   for (index = 0; index < length1; ++index)
191     arg1[index] = ~arg1[index];
192   simpleAddHighPrecision (arg1, length1, 1);
193 }
194 
simpleMultiplyHighPrecision(uint64_t * arg1,int32_t length,uint64_t arg2)195 static uint32_t simpleMultiplyHighPrecision(uint64_t* arg1, int32_t length, uint64_t arg2) {
196   /* assumes arg2 only holds 32 bits of information */
197   uint64_t product;
198   int32_t index;
199 
200   index = 0;
201   product = 0;
202 
203   do
204     {
205       product =
206         HIGH_IN_U64 (product) + arg2 * LOW_U32_FROM_PTR (arg1 + index);
207       LOW_U32_FROM_PTR (arg1 + index) = LOW_U32_FROM_VAR (product);
208       product =
209         HIGH_IN_U64 (product) + arg2 * HIGH_U32_FROM_PTR (arg1 + index);
210       HIGH_U32_FROM_PTR (arg1 + index) = LOW_U32_FROM_VAR (product);
211     }
212   while (++index < length);
213 
214   return HIGH_U32_FROM_VAR (product);
215 }
216 
217 static void
simpleMultiplyAddHighPrecision(uint64_t * arg1,int32_t length,uint64_t arg2,uint32_t * result)218 simpleMultiplyAddHighPrecision (uint64_t * arg1, int32_t length, uint64_t arg2,
219                                 uint32_t * result)
220 {
221   /* Assumes result can hold the product and arg2 only holds 32 bits
222      of information */
223   uint64_t product;
224   int32_t index, resultIndex;
225 
226   index = resultIndex = 0;
227   product = 0;
228 
229   do
230     {
231       product =
232         HIGH_IN_U64 (product) + result[at (resultIndex)] +
233         arg2 * LOW_U32_FROM_PTR (arg1 + index);
234       result[at (resultIndex)] = LOW_U32_FROM_VAR (product);
235       ++resultIndex;
236       product =
237         HIGH_IN_U64 (product) + result[at (resultIndex)] +
238         arg2 * HIGH_U32_FROM_PTR (arg1 + index);
239       result[at (resultIndex)] = LOW_U32_FROM_VAR (product);
240       ++resultIndex;
241     }
242   while (++index < length);
243 
244   result[at (resultIndex)] += HIGH_U32_FROM_VAR (product);
245   if (result[at (resultIndex)] < HIGH_U32_FROM_VAR (product))
246     {
247       /* must be careful with ++ operator and macro expansion */
248       ++resultIndex;
249       while (++result[at (resultIndex)] == 0)
250         ++resultIndex;
251     }
252 }
253 
254 #if __BYTE_ORDER != __LITTLE_ENDIAN
simpleMultiplyAddHighPrecisionBigEndianFix(uint64_t * arg1,int32_t length,uint64_t arg2,uint32_t * result)255 void simpleMultiplyAddHighPrecisionBigEndianFix(uint64_t* arg1, int32_t length, uint64_t arg2, uint32_t* result) {
256     /* Assumes result can hold the product and arg2 only holds 32 bits of information */
257     int32_t index = 0;
258     int32_t resultIndex = 0;
259     uint64_t product = 0;
260 
261     do {
262         product = HIGH_IN_U64(product) + result[halfAt(resultIndex)] + arg2 * LOW_U32_FROM_PTR(arg1 + index);
263         result[halfAt(resultIndex)] = LOW_U32_FROM_VAR(product);
264         ++resultIndex;
265         product = HIGH_IN_U64(product) + result[halfAt(resultIndex)] + arg2 * HIGH_U32_FROM_PTR(arg1 + index);
266         result[halfAt(resultIndex)] = LOW_U32_FROM_VAR(product);
267         ++resultIndex;
268     } while (++index < length);
269 
270     result[halfAt(resultIndex)] += HIGH_U32_FROM_VAR(product);
271     if (result[halfAt(resultIndex)] < HIGH_U32_FROM_VAR(product)) {
272         /* must be careful with ++ operator and macro expansion */
273         ++resultIndex;
274         while (++result[halfAt(resultIndex)] == 0) ++resultIndex;
275     }
276 }
277 #endif
278 
279 void
multiplyHighPrecision(uint64_t * arg1,int32_t length1,uint64_t * arg2,int32_t length2,uint64_t * result,int32_t length)280 multiplyHighPrecision (uint64_t * arg1, int32_t length1, uint64_t * arg2, int32_t length2,
281                        uint64_t * result, int32_t length)
282 {
283   /* assumes result is large enough to hold product */
284   uint64_t* temp;
285   uint32_t* resultIn32;
286   int32_t count, index;
287 
288   if (length1 < length2)
289     {
290       temp = arg1;
291       arg1 = arg2;
292       arg2 = temp;
293       count = length1;
294       length1 = length2;
295       length2 = count;
296     }
297 
298   memset (result, 0, sizeof (uint64_t) * length);
299 
300   /* length1 > length2 */
301   resultIn32 = reinterpret_cast<uint32_t*>(result);
302   index = -1;
303   for (count = 0; count < length2; ++count)
304     {
305       simpleMultiplyAddHighPrecision (arg1, length1, LOW_IN_U64 (arg2[count]),
306                                       resultIn32 + (++index));
307 #if __BYTE_ORDER == __LITTLE_ENDIAN
308       simpleMultiplyAddHighPrecision(arg1, length1, HIGH_IN_U64(arg2[count]), resultIn32 + (++index));
309 #else
310       simpleMultiplyAddHighPrecisionBigEndianFix(arg1, length1, HIGH_IN_U64(arg2[count]), resultIn32 + (++index));
311 #endif
312     }
313 }
314 
315 uint32_t
simpleAppendDecimalDigitHighPrecision(uint64_t * arg1,int32_t length,uint64_t digit)316 simpleAppendDecimalDigitHighPrecision (uint64_t * arg1, int32_t length, uint64_t digit)
317 {
318   /* assumes digit is less than 32 bits */
319   uint64_t arg;
320   int32_t index = 0;
321 
322   digit <<= 32;
323   do
324     {
325       arg = LOW_IN_U64 (arg1[index]);
326       digit = HIGH_IN_U64 (digit) + TIMES_TEN (arg);
327       LOW_U32_FROM_PTR (arg1 + index) = LOW_U32_FROM_VAR (digit);
328 
329       arg = HIGH_IN_U64 (arg1[index]);
330       digit = HIGH_IN_U64 (digit) + TIMES_TEN (arg);
331       HIGH_U32_FROM_PTR (arg1 + index) = LOW_U32_FROM_VAR (digit);
332     }
333   while (++index < length);
334 
335   return HIGH_U32_FROM_VAR (digit);
336 }
337 
338 void
simpleShiftLeftHighPrecision(uint64_t * arg1,int32_t length,int32_t arg2)339 simpleShiftLeftHighPrecision (uint64_t * arg1, int32_t length, int32_t arg2)
340 {
341   /* assumes length > 0 */
342   int32_t index, offset;
343   if (arg2 >= 64)
344     {
345       offset = arg2 >> 6;
346       index = length;
347 
348       while (--index - offset >= 0)
349         arg1[index] = arg1[index - offset];
350       do
351         {
352           arg1[index] = 0;
353         }
354       while (--index >= 0);
355 
356       arg2 &= 0x3F;
357     }
358 
359   if (arg2 == 0)
360     return;
361   while (--length > 0)
362     {
363       arg1[length] = arg1[length] << arg2 | arg1[length - 1] >> (64 - arg2);
364     }
365   *arg1 <<= arg2;
366 }
367 
368 int32_t
highestSetBit(uint64_t * y)369 highestSetBit (uint64_t * y)
370 {
371   uint32_t x;
372   int32_t result;
373 
374   if (*y == 0)
375     return 0;
376 
377 #if defined(USE_LL)
378   if (*y & 0xFFFFFFFF00000000LL)
379     {
380       x = HIGH_U32_FROM_PTR (y);
381       result = 32;
382     }
383   else
384     {
385       x = LOW_U32_FROM_PTR (y);
386       result = 0;
387     }
388 #else
389 #if defined(USE_L)
390   if (*y & 0xFFFFFFFF00000000L)
391     {
392       x = HIGH_U32_FROM_PTR (y);
393       result = 32;
394     }
395   else
396     {
397       x = LOW_U32_FROM_PTR (y);
398       result = 0;
399     }
400 #else
401   if (*y & 0xFFFFFFFF00000000)
402     {
403       x = HIGH_U32_FROM_PTR (y);
404       result = 32;
405     }
406   else
407     {
408       x = LOW_U32_FROM_PTR (y);
409       result = 0;
410     }
411 #endif /* USE_L */
412 #endif /* USE_LL */
413 
414   if (x & 0xFFFF0000)
415     {
416       x = bitSection (x, 0xFFFF0000, 16);
417       result += 16;
418     }
419   if (x & 0xFF00)
420     {
421       x = bitSection (x, 0xFF00, 8);
422       result += 8;
423     }
424   if (x & 0xF0)
425     {
426       x = bitSection (x, 0xF0, 4);
427       result += 4;
428     }
429   if (x > 0x7)
430     return result + 4;
431   else if (x > 0x3)
432     return result + 3;
433   else if (x > 0x1)
434     return result + 2;
435   else
436     return result + 1;
437 }
438 
439 int32_t
lowestSetBit(uint64_t * y)440 lowestSetBit (uint64_t * y)
441 {
442   uint32_t x;
443   int32_t result;
444 
445   if (*y == 0)
446     return 0;
447 
448 #if defined(USE_LL)
449   if (*y & 0x00000000FFFFFFFFLL)
450     {
451       x = LOW_U32_FROM_PTR (y);
452       result = 0;
453     }
454   else
455     {
456       x = HIGH_U32_FROM_PTR (y);
457       result = 32;
458     }
459 #else
460 #if defined(USE_L)
461   if (*y & 0x00000000FFFFFFFFL)
462     {
463       x = LOW_U32_FROM_PTR (y);
464       result = 0;
465     }
466   else
467     {
468       x = HIGH_U32_FROM_PTR (y);
469       result = 32;
470     }
471 #else
472   if (*y & 0x00000000FFFFFFFF)
473     {
474       x = LOW_U32_FROM_PTR (y);
475       result = 0;
476     }
477   else
478     {
479       x = HIGH_U32_FROM_PTR (y);
480       result = 32;
481     }
482 #endif /* USE_L */
483 #endif /* USE_LL */
484 
485   if (!(x & 0xFFFF))
486     {
487       x = bitSection (x, 0xFFFF0000, 16);
488       result += 16;
489     }
490   if (!(x & 0xFF))
491     {
492       x = bitSection (x, 0xFF00, 8);
493       result += 8;
494     }
495   if (!(x & 0xF))
496     {
497       x = bitSection (x, 0xF0, 4);
498       result += 4;
499     }
500 
501   if (x & 0x1)
502     return result + 1;
503   else if (x & 0x2)
504     return result + 2;
505   else if (x & 0x4)
506     return result + 3;
507   else
508     return result + 4;
509 }
510 
511 int32_t
highestSetBitHighPrecision(uint64_t * arg,int32_t length)512 highestSetBitHighPrecision (uint64_t * arg, int32_t length)
513 {
514   int32_t highBit;
515 
516   while (--length >= 0)
517     {
518       highBit = highestSetBit (arg + length);
519       if (highBit)
520         return highBit + 64 * length;
521     }
522 
523   return 0;
524 }
525 
526 int32_t
lowestSetBitHighPrecision(uint64_t * arg,int32_t length)527 lowestSetBitHighPrecision (uint64_t * arg, int32_t length)
528 {
529   int32_t lowBit, index = -1;
530 
531   while (++index < length)
532     {
533       lowBit = lowestSetBit (arg + index);
534       if (lowBit)
535         return lowBit + 64 * index;
536     }
537 
538   return 0;
539 }
540 
541 int32_t
compareHighPrecision(uint64_t * arg1,int32_t length1,uint64_t * arg2,int32_t length2)542 compareHighPrecision (uint64_t * arg1, int32_t length1, uint64_t * arg2, int32_t length2)
543 {
544   while (--length1 >= 0 && arg1[length1] == 0) {
545   }
546   while (--length2 >= 0 && arg2[length2] == 0) {
547   }
548 
549   if (length1 > length2)
550     return 1;
551   else if (length1 < length2)
552     return -1;
553   else if (length1 > -1)
554     {
555       do
556         {
557           if (arg1[length1] > arg2[length1])
558             return 1;
559           else if (arg1[length1] < arg2[length1])
560             return -1;
561         }
562       while (--length1 >= 0);
563     }
564 
565   return 0;
566 }
567 
568 jdouble
toDoubleHighPrecision(uint64_t * arg,int32_t length)569 toDoubleHighPrecision (uint64_t * arg, int32_t length)
570 {
571   int32_t highBit;
572   uint64_t mantissa, test64;
573   uint32_t test;
574   jdouble result;
575 
576   while (length > 0 && arg[length - 1] == 0)
577     --length;
578 
579   if (length == 0)
580     result = 0.0;
581   else if (length > 16)
582     {
583       DOUBLE_TO_LONGBITS (result) = EXPONENT_MASK;
584     }
585   else if (length == 1)
586     {
587       highBit = highestSetBit (arg);
588       if (highBit <= 53)
589         {
590           highBit = 53 - highBit;
591           mantissa = *arg << highBit;
592           DOUBLE_TO_LONGBITS (result) =
593             CREATE_DOUBLE_BITS (mantissa, -highBit);
594         }
595       else
596         {
597           highBit -= 53;
598           mantissa = *arg >> highBit;
599           DOUBLE_TO_LONGBITS (result) =
600             CREATE_DOUBLE_BITS (mantissa, highBit);
601 
602           /* perform rounding, round to even in case of tie */
603           test = (LOW_U32_FROM_PTR (arg) << (11 - highBit)) & 0x7FF;
604           if (test > 0x400 || ((test == 0x400) && (mantissa & 1)))
605             DOUBLE_TO_LONGBITS (result) = DOUBLE_TO_LONGBITS (result) + 1;
606         }
607     }
608   else
609     {
610       highBit = highestSetBit (arg + (--length));
611       if (highBit <= 53)
612         {
613           highBit = 53 - highBit;
614           if (highBit > 0)
615             {
616               mantissa =
617                 (arg[length] << highBit) | (arg[length - 1] >>
618                                             (64 - highBit));
619             }
620           else
621             {
622               mantissa = arg[length];
623             }
624           DOUBLE_TO_LONGBITS (result) =
625             CREATE_DOUBLE_BITS (mantissa, length * 64 - highBit);
626 
627           /* perform rounding, round to even in case of tie */
628           test64 = arg[--length] << highBit;
629           if (test64 > SIGN_MASK || ((test64 == SIGN_MASK) && (mantissa & 1)))
630             DOUBLE_TO_LONGBITS (result) = DOUBLE_TO_LONGBITS (result) + 1;
631           else if (test64 == SIGN_MASK)
632             {
633               while (--length >= 0)
634                 {
635                   if (arg[length] != 0)
636                     {
637                       DOUBLE_TO_LONGBITS (result) =
638                         DOUBLE_TO_LONGBITS (result) + 1;
639                       break;
640                     }
641                 }
642             }
643         }
644       else
645         {
646           highBit -= 53;
647           mantissa = arg[length] >> highBit;
648           DOUBLE_TO_LONGBITS (result) =
649             CREATE_DOUBLE_BITS (mantissa, length * 64 + highBit);
650 
651           /* perform rounding, round to even in case of tie */
652           test = (LOW_U32_FROM_PTR (arg + length) << (11 - highBit)) & 0x7FF;
653           if (test > 0x400 || ((test == 0x400) && (mantissa & 1)))
654             DOUBLE_TO_LONGBITS (result) = DOUBLE_TO_LONGBITS (result) + 1;
655           else if (test == 0x400)
656             {
657               do
658                 {
659                   if (arg[--length] != 0)
660                     {
661                       DOUBLE_TO_LONGBITS (result) =
662                         DOUBLE_TO_LONGBITS (result) + 1;
663                       break;
664                     }
665                 }
666               while (length > 0);
667             }
668         }
669     }
670 
671   return result;
672 }
673 
674 static uint64_t simpleMultiplyHighPrecision64(uint64_t* arg1, int32_t length, uint64_t arg2);
675 
676 int32_t
timesTenToTheEHighPrecision(uint64_t * result,int32_t length,jint e)677 timesTenToTheEHighPrecision (uint64_t * result, int32_t length, jint e)
678 {
679   /* assumes result can hold value */
680   uint64_t overflow;
681   int exp10 = e;
682 
683   if (e == 0)
684     return length;
685 
686   /* bad O(n) way of doing it, but simple */
687   /*
688      do {
689      overflow = simpleAppendDecimalDigitHighPrecision(result, length, 0);
690      if (overflow)
691      result[length++] = overflow;
692      } while (--e);
693    */
694   /* Replace the current implementaion which performs a
695    * "multiplication" by 10 e number of times with an actual
696    * multiplication. 10e19 is the largest exponent to the power of ten
697    * that will fit in a 64-bit integer, and 10e9 is the largest exponent to
698    * the power of ten that will fit in a 64-bit integer. Not sure where the
699    * break-even point is between an actual multiplication and a
700    * simpleAappendDecimalDigit() so just pick 10e3 as that point for
701    * now.
702    */
703   while (exp10 >= 19)
704     {
705       overflow = simpleMultiplyHighPrecision64 (result, length, TEN_E19);
706       if (overflow)
707         result[length++] = overflow;
708       exp10 -= 19;
709     }
710   while (exp10 >= 9)
711     {
712       overflow = simpleMultiplyHighPrecision (result, length, TEN_E9);
713       if (overflow)
714         result[length++] = overflow;
715       exp10 -= 9;
716     }
717   if (exp10 == 0)
718     return length;
719   else if (exp10 == 1)
720     {
721       overflow = simpleAppendDecimalDigitHighPrecision (result, length, 0);
722       if (overflow)
723         result[length++] = overflow;
724     }
725   else if (exp10 == 2)
726     {
727       overflow = simpleAppendDecimalDigitHighPrecision (result, length, 0);
728       if (overflow)
729         result[length++] = overflow;
730       overflow = simpleAppendDecimalDigitHighPrecision (result, length, 0);
731       if (overflow)
732         result[length++] = overflow;
733     }
734   else if (exp10 == 3)
735     {
736       overflow = simpleMultiplyHighPrecision (result, length, TEN_E3);
737       if (overflow)
738         result[length++] = overflow;
739     }
740   else if (exp10 == 4)
741     {
742       overflow = simpleMultiplyHighPrecision (result, length, TEN_E4);
743       if (overflow)
744         result[length++] = overflow;
745     }
746   else if (exp10 == 5)
747     {
748       overflow = simpleMultiplyHighPrecision (result, length, TEN_E5);
749       if (overflow)
750         result[length++] = overflow;
751     }
752   else if (exp10 == 6)
753     {
754       overflow = simpleMultiplyHighPrecision (result, length, TEN_E6);
755       if (overflow)
756         result[length++] = overflow;
757     }
758   else if (exp10 == 7)
759     {
760       overflow = simpleMultiplyHighPrecision (result, length, TEN_E7);
761       if (overflow)
762         result[length++] = overflow;
763     }
764   else if (exp10 == 8)
765     {
766       overflow = simpleMultiplyHighPrecision (result, length, TEN_E8);
767       if (overflow)
768         result[length++] = overflow;
769     }
770   return length;
771 }
772 
773 uint64_t
doubleMantissa(jdouble z)774 doubleMantissa (jdouble z)
775 {
776   uint64_t m = DOUBLE_TO_LONGBITS (z);
777 
778   if ((m & EXPONENT_MASK) != 0)
779     m = (m & MANTISSA_MASK) | NORMAL_MASK;
780   else
781     m = (m & MANTISSA_MASK);
782 
783   return m;
784 }
785 
786 int32_t
doubleExponent(jdouble z)787 doubleExponent (jdouble z)
788 {
789   /* assumes positive double */
790   int32_t k = HIGH_U32_FROM_VAR (z) >> 20;
791 
792   if (k)
793     k -= E_OFFSET;
794   else
795     k = 1 - E_OFFSET;
796 
797   return k;
798 }
799 
floatMantissa(jfloat z)800 uint32_t floatMantissa(jfloat z) {
801   uint32_t m = FLOAT_TO_INTBITS (z);
802 
803   if ((m & FLOAT_EXPONENT_MASK) != 0)
804     m = (m & FLOAT_MANTISSA_MASK) | FLOAT_NORMAL_MASK;
805   else
806     m = (m & FLOAT_MANTISSA_MASK);
807 
808   return m;
809 }
810 
811 int32_t
floatExponent(jfloat z)812 floatExponent (jfloat z)
813 {
814   /* assumes positive float */
815   int32_t k = FLOAT_TO_INTBITS (z) >> 23;
816   if (k)
817     k -= FLOAT_E_OFFSET;
818   else
819     k = 1 - FLOAT_E_OFFSET;
820 
821   return k;
822 }
823 
824 /* Allow a 64-bit value in arg2 */
825 uint64_t
simpleMultiplyHighPrecision64(uint64_t * arg1,int32_t length,uint64_t arg2)826 simpleMultiplyHighPrecision64 (uint64_t * arg1, int32_t length, uint64_t arg2)
827 {
828   uint64_t intermediate, carry1, carry2, prod1, prod2, sum;
829   uint64_t* pArg1;
830   int32_t index;
831   uint32_t buf32;
832 
833   index = 0;
834   intermediate = 0;
835   pArg1 = arg1 + index;
836   carry1 = carry2 = 0;
837 
838   do
839     {
840       if ((*pArg1 != 0) || (intermediate != 0))
841         {
842           prod1 =
843             static_cast<uint64_t>(LOW_U32_FROM_VAR (arg2)) * static_cast<uint64_t>(LOW_U32_FROM_PTR (pArg1));
844           sum = intermediate + prod1;
845           if ((sum < prod1) || (sum < intermediate))
846             {
847               carry1 = 1;
848             }
849           else
850             {
851               carry1 = 0;
852             }
853           prod1 =
854             static_cast<uint64_t>(LOW_U32_FROM_VAR (arg2)) * static_cast<uint64_t>(HIGH_U32_FROM_PTR (pArg1));
855           prod2 =
856             static_cast<uint64_t>(HIGH_U32_FROM_VAR (arg2)) * static_cast<uint64_t>(LOW_U32_FROM_PTR (pArg1));
857           intermediate = carry2 + HIGH_IN_U64 (sum) + prod1 + prod2;
858           if ((intermediate < prod1) || (intermediate < prod2))
859             {
860               carry2 = 1;
861             }
862           else
863             {
864               carry2 = 0;
865             }
866           LOW_U32_FROM_PTR (pArg1) = LOW_U32_FROM_VAR (sum);
867           buf32 = HIGH_U32_FROM_PTR (pArg1);
868           HIGH_U32_FROM_PTR (pArg1) = LOW_U32_FROM_VAR (intermediate);
869           intermediate = carry1 + HIGH_IN_U64 (intermediate)
870             + static_cast<uint64_t>(HIGH_U32_FROM_VAR (arg2)) * static_cast<uint64_t>(buf32);
871         }
872       pArg1++;
873     }
874   while (++index < length);
875   return intermediate;
876 }
877