1 // Copyright 2015, VIXL authors
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 // * Redistributions of source code must retain the above copyright notice,
8 // this list of conditions and the following disclaimer.
9 // * Redistributions in binary form must reproduce the above copyright notice,
10 // this list of conditions and the following disclaimer in the documentation
11 // and/or other materials provided with the distribution.
12 // * Neither the name of ARM Limited nor the names of its contributors may be
13 // used to endorse or promote products derived from this software without
14 // specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27 #ifndef VIXL_UTILS_H
28 #define VIXL_UTILS_H
29
30 #include <cmath>
31 #include <cstring>
32 #include <limits>
33 #include <vector>
34
35 #include "compiler-intrinsics-vixl.h"
36 #include "globals-vixl.h"
37
38 namespace vixl {
39
40 // Macros for compile-time format checking.
41 #if GCC_VERSION_OR_NEWER(4, 4, 0)
42 #define PRINTF_CHECK(format_index, varargs_index) \
43 __attribute__((format(gnu_printf, format_index, varargs_index)))
44 #else
45 #define PRINTF_CHECK(format_index, varargs_index)
46 #endif
47
48 #ifdef __GNUC__
49 #define VIXL_HAS_DEPRECATED_WITH_MSG
50 #elif defined(__clang__)
51 #ifdef __has_extension(attribute_deprecated_with_message)
52 #define VIXL_HAS_DEPRECATED_WITH_MSG
53 #endif
54 #endif
55
56 #ifdef VIXL_HAS_DEPRECATED_WITH_MSG
57 #define VIXL_DEPRECATED(replaced_by, declarator) \
58 __attribute__((deprecated("Use \"" replaced_by "\" instead"))) declarator
59 #else
60 #define VIXL_DEPRECATED(replaced_by, declarator) declarator
61 #endif
62
63 #ifdef VIXL_DEBUG
64 #define VIXL_UNREACHABLE_OR_FALLTHROUGH() VIXL_UNREACHABLE()
65 #else
66 #define VIXL_UNREACHABLE_OR_FALLTHROUGH() VIXL_FALLTHROUGH()
67 #endif
68
69 template <typename T, size_t n>
ArrayLength(const T (&)[n])70 size_t ArrayLength(const T (&)[n]) {
71 return n;
72 }
73
GetUintMask(unsigned bits)74 inline uint64_t GetUintMask(unsigned bits) {
75 VIXL_ASSERT(bits <= 64);
76 uint64_t base = (bits >= 64) ? 0 : (UINT64_C(1) << bits);
77 return base - 1;
78 }
79
80 // Check number width.
81 // TODO: Refactor these using templates.
IsIntN(unsigned n,uint32_t x)82 inline bool IsIntN(unsigned n, uint32_t x) {
83 VIXL_ASSERT((0 < n) && (n < 32));
84 uint32_t limit = UINT32_C(1) << (n - 1);
85 return x < limit;
86 }
IsIntN(unsigned n,int32_t x)87 inline bool IsIntN(unsigned n, int32_t x) {
88 VIXL_ASSERT((0 < n) && (n < 32));
89 int32_t limit = INT32_C(1) << (n - 1);
90 return (-limit <= x) && (x < limit);
91 }
IsIntN(unsigned n,uint64_t x)92 inline bool IsIntN(unsigned n, uint64_t x) {
93 VIXL_ASSERT((0 < n) && (n < 64));
94 uint64_t limit = UINT64_C(1) << (n - 1);
95 return x < limit;
96 }
IsIntN(unsigned n,int64_t x)97 inline bool IsIntN(unsigned n, int64_t x) {
98 VIXL_ASSERT((0 < n) && (n < 64));
99 int64_t limit = INT64_C(1) << (n - 1);
100 return (-limit <= x) && (x < limit);
101 }
is_intn(unsigned n,int64_t x)102 VIXL_DEPRECATED("IsIntN", inline bool is_intn(unsigned n, int64_t x)) {
103 return IsIntN(n, x);
104 }
105
IsUintN(unsigned n,uint32_t x)106 inline bool IsUintN(unsigned n, uint32_t x) {
107 VIXL_ASSERT((0 < n) && (n < 32));
108 return !(x >> n);
109 }
IsUintN(unsigned n,int32_t x)110 inline bool IsUintN(unsigned n, int32_t x) {
111 VIXL_ASSERT((0 < n) && (n < 32));
112 // Convert to an unsigned integer to avoid implementation-defined behavior.
113 return !(static_cast<uint32_t>(x) >> n);
114 }
IsUintN(unsigned n,uint64_t x)115 inline bool IsUintN(unsigned n, uint64_t x) {
116 VIXL_ASSERT((0 < n) && (n < 64));
117 return !(x >> n);
118 }
IsUintN(unsigned n,int64_t x)119 inline bool IsUintN(unsigned n, int64_t x) {
120 VIXL_ASSERT((0 < n) && (n < 64));
121 // Convert to an unsigned integer to avoid implementation-defined behavior.
122 return !(static_cast<uint64_t>(x) >> n);
123 }
is_uintn(unsigned n,int64_t x)124 VIXL_DEPRECATED("IsUintN", inline bool is_uintn(unsigned n, int64_t x)) {
125 return IsUintN(n, x);
126 }
127
TruncateToUintN(unsigned n,uint64_t x)128 inline uint64_t TruncateToUintN(unsigned n, uint64_t x) {
129 VIXL_ASSERT((0 < n) && (n < 64));
130 return static_cast<uint64_t>(x) & ((UINT64_C(1) << n) - 1);
131 }
132 VIXL_DEPRECATED("TruncateToUintN",
133 inline uint64_t truncate_to_intn(unsigned n, int64_t x)) {
134 return TruncateToUintN(n, x);
135 }
136
137 // clang-format off
138 #define INT_1_TO_32_LIST(V) \
139 V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) \
140 V(9) V(10) V(11) V(12) V(13) V(14) V(15) V(16) \
141 V(17) V(18) V(19) V(20) V(21) V(22) V(23) V(24) \
142 V(25) V(26) V(27) V(28) V(29) V(30) V(31) V(32)
143
144 #define INT_33_TO_63_LIST(V) \
145 V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40) \
146 V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) \
147 V(49) V(50) V(51) V(52) V(53) V(54) V(55) V(56) \
148 V(57) V(58) V(59) V(60) V(61) V(62) V(63)
149
150 #define INT_1_TO_63_LIST(V) INT_1_TO_32_LIST(V) INT_33_TO_63_LIST(V)
151
152 // clang-format on
153
154 #define DECLARE_IS_INT_N(N) \
155 inline bool IsInt##N(int64_t x) { return IsIntN(N, x); } \
156 VIXL_DEPRECATED("IsInt" #N, inline bool is_int##N(int64_t x)) { \
157 return IsIntN(N, x); \
158 }
159
160 #define DECLARE_IS_UINT_N(N) \
161 inline bool IsUint##N(int64_t x) { return IsUintN(N, x); } \
162 VIXL_DEPRECATED("IsUint" #N, inline bool is_uint##N(int64_t x)) { \
163 return IsUintN(N, x); \
164 }
165
166 #define DECLARE_TRUNCATE_TO_UINT_32(N) \
167 inline uint32_t TruncateToUint##N(uint64_t x) { \
168 return static_cast<uint32_t>(TruncateToUintN(N, x)); \
169 } \
170 VIXL_DEPRECATED("TruncateToUint" #N, \
171 inline uint32_t truncate_to_int##N(int64_t x)) { \
172 return TruncateToUint##N(x); \
173 }
174
175 INT_1_TO_63_LIST(DECLARE_IS_INT_N)
INT_1_TO_63_LIST(DECLARE_IS_UINT_N)176 INT_1_TO_63_LIST(DECLARE_IS_UINT_N)
177 INT_1_TO_32_LIST(DECLARE_TRUNCATE_TO_UINT_32)
178
179 #undef DECLARE_IS_INT_N
180 #undef DECLARE_IS_UINT_N
181 #undef DECLARE_TRUNCATE_TO_INT_N
182
183 // Bit field extraction.
184 inline uint64_t ExtractUnsignedBitfield64(int msb, int lsb, uint64_t x) {
185 VIXL_ASSERT((static_cast<size_t>(msb) < sizeof(x) * 8) && (lsb >= 0) &&
186 (msb >= lsb));
187 if ((msb == 63) && (lsb == 0)) return x;
188 return (x >> lsb) & ((static_cast<uint64_t>(1) << (1 + msb - lsb)) - 1);
189 }
190
191
ExtractUnsignedBitfield32(int msb,int lsb,uint32_t x)192 inline uint32_t ExtractUnsignedBitfield32(int msb, int lsb, uint32_t x) {
193 VIXL_ASSERT((static_cast<size_t>(msb) < sizeof(x) * 8) && (lsb >= 0) &&
194 (msb >= lsb));
195 return TruncateToUint32(ExtractUnsignedBitfield64(msb, lsb, x));
196 }
197
198
ExtractSignedBitfield64(int msb,int lsb,uint64_t x)199 inline int64_t ExtractSignedBitfield64(int msb, int lsb, uint64_t x) {
200 VIXL_ASSERT((static_cast<size_t>(msb) < sizeof(x) * 8) && (lsb >= 0) &&
201 (msb >= lsb));
202 uint64_t temp = ExtractUnsignedBitfield64(msb, lsb, x);
203 // If the highest extracted bit is set, sign extend.
204 if ((temp >> (msb - lsb)) == 1) {
205 temp |= ~UINT64_C(0) << (msb - lsb);
206 }
207 int64_t result;
208 memcpy(&result, &temp, sizeof(result));
209 return result;
210 }
211
212
ExtractSignedBitfield32(int msb,int lsb,uint32_t x)213 inline int32_t ExtractSignedBitfield32(int msb, int lsb, uint32_t x) {
214 VIXL_ASSERT((static_cast<size_t>(msb) < sizeof(x) * 8) && (lsb >= 0) &&
215 (msb >= lsb));
216 uint32_t temp = TruncateToUint32(ExtractSignedBitfield64(msb, lsb, x));
217 int32_t result;
218 memcpy(&result, &temp, sizeof(result));
219 return result;
220 }
221
222
RotateRight(uint64_t value,unsigned int rotate,unsigned int width)223 inline uint64_t RotateRight(uint64_t value,
224 unsigned int rotate,
225 unsigned int width) {
226 VIXL_ASSERT((width > 0) && (width <= 64));
227 uint64_t width_mask = ~UINT64_C(0) >> (64 - width);
228 rotate &= 63;
229 if (rotate > 0) {
230 value &= width_mask;
231 value = (value << (width - rotate)) | (value >> rotate);
232 }
233 return value & width_mask;
234 }
235
236
237 // Wrapper class for passing FP16 values through the assembler.
238 // This is purely to aid with type checking/casting.
239 class Float16 {
240 public:
241 explicit Float16(double dvalue);
Float16()242 Float16() : rawbits_(0x0) {}
243 friend uint16_t Float16ToRawbits(Float16 value);
244 friend Float16 RawbitsToFloat16(uint16_t bits);
245
246 protected:
247 uint16_t rawbits_;
248 };
249
250 // Floating point representation.
251 uint16_t Float16ToRawbits(Float16 value);
252
253
254 uint32_t FloatToRawbits(float value);
255 VIXL_DEPRECATED("FloatToRawbits",
256 inline uint32_t float_to_rawbits(float value)) {
257 return FloatToRawbits(value);
258 }
259
260 uint64_t DoubleToRawbits(double value);
261 VIXL_DEPRECATED("DoubleToRawbits",
262 inline uint64_t double_to_rawbits(double value)) {
263 return DoubleToRawbits(value);
264 }
265
266 Float16 RawbitsToFloat16(uint16_t bits);
267
268 float RawbitsToFloat(uint32_t bits);
269 VIXL_DEPRECATED("RawbitsToFloat",
rawbits_to_float(uint32_t bits)270 inline float rawbits_to_float(uint32_t bits)) {
271 return RawbitsToFloat(bits);
272 }
273
274 double RawbitsToDouble(uint64_t bits);
275 VIXL_DEPRECATED("RawbitsToDouble",
rawbits_to_double(uint64_t bits)276 inline double rawbits_to_double(uint64_t bits)) {
277 return RawbitsToDouble(bits);
278 }
279
280 namespace internal {
281
282 // Internal simulation class used solely by the simulator to
283 // provide an abstraction layer for any half-precision arithmetic.
284 class SimFloat16 : public Float16 {
285 public:
286 // TODO: We should investigate making this constructor explicit.
287 // This is currently difficult to do due to a number of templated
288 // functions in the simulator which rely on returning double values.
SimFloat16(double dvalue)289 SimFloat16(double dvalue) : Float16(dvalue) {} // NOLINT(runtime/explicit)
SimFloat16(Float16 f)290 SimFloat16(Float16 f) { // NOLINT(runtime/explicit)
291 this->rawbits_ = Float16ToRawbits(f);
292 }
SimFloat16()293 SimFloat16() : Float16() {}
294 SimFloat16 operator-() const;
295 SimFloat16 operator+(SimFloat16 rhs) const;
296 SimFloat16 operator-(SimFloat16 rhs) const;
297 SimFloat16 operator*(SimFloat16 rhs) const;
298 SimFloat16 operator/(SimFloat16 rhs) const;
299 bool operator<(SimFloat16 rhs) const;
300 bool operator>(SimFloat16 rhs) const;
301 bool operator==(SimFloat16 rhs) const;
302 bool operator!=(SimFloat16 rhs) const;
303 // This is necessary for conversions peformed in (macro asm) Fmov.
304 bool operator==(double rhs) const;
305 operator double() const;
306 };
307 } // namespace internal
308
309 uint32_t Float16Sign(internal::SimFloat16 value);
310
311 uint32_t Float16Exp(internal::SimFloat16 value);
312
313 uint32_t Float16Mantissa(internal::SimFloat16 value);
314
315 uint32_t FloatSign(float value);
316 VIXL_DEPRECATED("FloatSign", inline uint32_t float_sign(float value)) {
317 return FloatSign(value);
318 }
319
320 uint32_t FloatExp(float value);
321 VIXL_DEPRECATED("FloatExp", inline uint32_t float_exp(float value)) {
322 return FloatExp(value);
323 }
324
325 uint32_t FloatMantissa(float value);
326 VIXL_DEPRECATED("FloatMantissa", inline uint32_t float_mantissa(float value)) {
327 return FloatMantissa(value);
328 }
329
330 uint32_t DoubleSign(double value);
331 VIXL_DEPRECATED("DoubleSign", inline uint32_t double_sign(double value)) {
332 return DoubleSign(value);
333 }
334
335 uint32_t DoubleExp(double value);
336 VIXL_DEPRECATED("DoubleExp", inline uint32_t double_exp(double value)) {
337 return DoubleExp(value);
338 }
339
340 uint64_t DoubleMantissa(double value);
341 VIXL_DEPRECATED("DoubleMantissa",
342 inline uint64_t double_mantissa(double value)) {
343 return DoubleMantissa(value);
344 }
345
346 internal::SimFloat16 Float16Pack(uint16_t sign,
347 uint16_t exp,
348 uint16_t mantissa);
349
350 float FloatPack(uint32_t sign, uint32_t exp, uint32_t mantissa);
351 VIXL_DEPRECATED("FloatPack",
float_pack(uint32_t sign,uint32_t exp,uint32_t mantissa)352 inline float float_pack(uint32_t sign,
353 uint32_t exp,
354 uint32_t mantissa)) {
355 return FloatPack(sign, exp, mantissa);
356 }
357
358 double DoublePack(uint64_t sign, uint64_t exp, uint64_t mantissa);
359 VIXL_DEPRECATED("DoublePack",
double_pack(uint32_t sign,uint32_t exp,uint64_t mantissa)360 inline double double_pack(uint32_t sign,
361 uint32_t exp,
362 uint64_t mantissa)) {
363 return DoublePack(sign, exp, mantissa);
364 }
365
366 // An fpclassify() function for 16-bit half-precision floats.
367 int Float16Classify(Float16 value);
float16classify(uint16_t value)368 VIXL_DEPRECATED("Float16Classify", inline int float16classify(uint16_t value)) {
369 return Float16Classify(RawbitsToFloat16(value));
370 }
371
372 bool IsZero(Float16 value);
373
IsNaN(float value)374 inline bool IsNaN(float value) { return std::isnan(value); }
375
IsNaN(double value)376 inline bool IsNaN(double value) { return std::isnan(value); }
377
IsNaN(Float16 value)378 inline bool IsNaN(Float16 value) { return Float16Classify(value) == FP_NAN; }
379
IsInf(float value)380 inline bool IsInf(float value) { return std::isinf(value); }
381
IsInf(double value)382 inline bool IsInf(double value) { return std::isinf(value); }
383
IsInf(Float16 value)384 inline bool IsInf(Float16 value) {
385 return Float16Classify(value) == FP_INFINITE;
386 }
387
388
389 // NaN tests.
IsSignallingNaN(double num)390 inline bool IsSignallingNaN(double num) {
391 const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000);
392 uint64_t raw = DoubleToRawbits(num);
393 if (IsNaN(num) && ((raw & kFP64QuietNaNMask) == 0)) {
394 return true;
395 }
396 return false;
397 }
398
399
IsSignallingNaN(float num)400 inline bool IsSignallingNaN(float num) {
401 const uint32_t kFP32QuietNaNMask = 0x00400000;
402 uint32_t raw = FloatToRawbits(num);
403 if (IsNaN(num) && ((raw & kFP32QuietNaNMask) == 0)) {
404 return true;
405 }
406 return false;
407 }
408
409
IsSignallingNaN(Float16 num)410 inline bool IsSignallingNaN(Float16 num) {
411 const uint16_t kFP16QuietNaNMask = 0x0200;
412 return IsNaN(num) && ((Float16ToRawbits(num) & kFP16QuietNaNMask) == 0);
413 }
414
415
416 template <typename T>
IsQuietNaN(T num)417 inline bool IsQuietNaN(T num) {
418 return IsNaN(num) && !IsSignallingNaN(num);
419 }
420
421
422 // Convert the NaN in 'num' to a quiet NaN.
ToQuietNaN(double num)423 inline double ToQuietNaN(double num) {
424 const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000);
425 VIXL_ASSERT(IsNaN(num));
426 return RawbitsToDouble(DoubleToRawbits(num) | kFP64QuietNaNMask);
427 }
428
429
ToQuietNaN(float num)430 inline float ToQuietNaN(float num) {
431 const uint32_t kFP32QuietNaNMask = 0x00400000;
432 VIXL_ASSERT(IsNaN(num));
433 return RawbitsToFloat(FloatToRawbits(num) | kFP32QuietNaNMask);
434 }
435
436
ToQuietNaN(internal::SimFloat16 num)437 inline internal::SimFloat16 ToQuietNaN(internal::SimFloat16 num) {
438 const uint16_t kFP16QuietNaNMask = 0x0200;
439 VIXL_ASSERT(IsNaN(num));
440 return internal::SimFloat16(
441 RawbitsToFloat16(Float16ToRawbits(num) | kFP16QuietNaNMask));
442 }
443
444
445 // Fused multiply-add.
FusedMultiplyAdd(double op1,double op2,double a)446 inline double FusedMultiplyAdd(double op1, double op2, double a) {
447 return fma(op1, op2, a);
448 }
449
450
FusedMultiplyAdd(float op1,float op2,float a)451 inline float FusedMultiplyAdd(float op1, float op2, float a) {
452 return fmaf(op1, op2, a);
453 }
454
455
LowestSetBit(uint64_t value)456 inline uint64_t LowestSetBit(uint64_t value) { return value & -value; }
457
458
459 template <typename T>
HighestSetBitPosition(T value)460 inline int HighestSetBitPosition(T value) {
461 VIXL_ASSERT(value != 0);
462 return (sizeof(value) * 8 - 1) - CountLeadingZeros(value);
463 }
464
465
466 template <typename V>
WhichPowerOf2(V value)467 inline int WhichPowerOf2(V value) {
468 VIXL_ASSERT(IsPowerOf2(value));
469 return CountTrailingZeros(value);
470 }
471
472
473 unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
474
475
476 int BitCount(uint64_t value);
477
478
479 template <typename T>
ReverseBits(T value)480 T ReverseBits(T value) {
481 VIXL_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||
482 (sizeof(value) == 4) || (sizeof(value) == 8));
483 T result = 0;
484 for (unsigned i = 0; i < (sizeof(value) * 8); i++) {
485 result = (result << 1) | (value & 1);
486 value >>= 1;
487 }
488 return result;
489 }
490
491
492 template <typename T>
SignExtend(T val,int bitSize)493 inline T SignExtend(T val, int bitSize) {
494 VIXL_ASSERT(bitSize > 0);
495 T mask = (T(2) << (bitSize - 1)) - T(1);
496 val &= mask;
497 T sign_bits = -((val >> (bitSize - 1)) << bitSize);
498 val |= sign_bits;
499 return val;
500 }
501
502
503 template <typename T>
ReverseBytes(T value,int block_bytes_log2)504 T ReverseBytes(T value, int block_bytes_log2) {
505 VIXL_ASSERT((sizeof(value) == 4) || (sizeof(value) == 8));
506 VIXL_ASSERT((1U << block_bytes_log2) <= sizeof(value));
507 // Split the 64-bit value into an 8-bit array, where b[0] is the least
508 // significant byte, and b[7] is the most significant.
509 uint8_t bytes[8];
510 uint64_t mask = UINT64_C(0xff00000000000000);
511 for (int i = 7; i >= 0; i--) {
512 bytes[i] = (static_cast<uint64_t>(value) & mask) >> (i * 8);
513 mask >>= 8;
514 }
515
516 // Permutation tables for REV instructions.
517 // permute_table[0] is used by REV16_x, REV16_w
518 // permute_table[1] is used by REV32_x, REV_w
519 // permute_table[2] is used by REV_x
520 VIXL_ASSERT((0 < block_bytes_log2) && (block_bytes_log2 < 4));
521 static const uint8_t permute_table[3][8] = {{6, 7, 4, 5, 2, 3, 0, 1},
522 {4, 5, 6, 7, 0, 1, 2, 3},
523 {0, 1, 2, 3, 4, 5, 6, 7}};
524 uint64_t temp = 0;
525 for (int i = 0; i < 8; i++) {
526 temp <<= 8;
527 temp |= bytes[permute_table[block_bytes_log2 - 1][i]];
528 }
529
530 T result;
531 VIXL_STATIC_ASSERT(sizeof(result) <= sizeof(temp));
532 memcpy(&result, &temp, sizeof(result));
533 return result;
534 }
535
536 template <unsigned MULTIPLE, typename T>
IsMultiple(T value)537 inline bool IsMultiple(T value) {
538 VIXL_ASSERT(IsPowerOf2(MULTIPLE));
539 return (value & (MULTIPLE - 1)) == 0;
540 }
541
542 template <typename T>
IsMultiple(T value,unsigned multiple)543 inline bool IsMultiple(T value, unsigned multiple) {
544 VIXL_ASSERT(IsPowerOf2(multiple));
545 return (value & (multiple - 1)) == 0;
546 }
547
548 template <typename T>
IsAligned(T pointer,int alignment)549 inline bool IsAligned(T pointer, int alignment) {
550 VIXL_ASSERT(IsPowerOf2(alignment));
551 return (pointer & (alignment - 1)) == 0;
552 }
553
554 // Pointer alignment
555 // TODO: rename/refactor to make it specific to instructions.
556 template <unsigned ALIGN, typename T>
IsAligned(T pointer)557 inline bool IsAligned(T pointer) {
558 VIXL_ASSERT(sizeof(pointer) == sizeof(intptr_t)); // NOLINT(runtime/sizeof)
559 // Use C-style casts to get static_cast behaviour for integral types (T), and
560 // reinterpret_cast behaviour for other types.
561 return IsAligned((intptr_t)(pointer), ALIGN);
562 }
563
564 template <typename T>
IsWordAligned(T pointer)565 bool IsWordAligned(T pointer) {
566 return IsAligned<4>(pointer);
567 }
568
569 // Increment a pointer until it has the specified alignment. The alignment must
570 // be a power of two.
571 template <class T>
AlignUp(T pointer,typename Unsigned<sizeof (T)* kBitsPerByte>::type alignment)572 T AlignUp(T pointer,
573 typename Unsigned<sizeof(T) * kBitsPerByte>::type alignment) {
574 VIXL_ASSERT(IsPowerOf2(alignment));
575 // Use C-style casts to get static_cast behaviour for integral types (T), and
576 // reinterpret_cast behaviour for other types.
577
578 typename Unsigned<sizeof(T)* kBitsPerByte>::type pointer_raw =
579 (typename Unsigned<sizeof(T) * kBitsPerByte>::type)pointer;
580 VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw));
581
582 size_t mask = alignment - 1;
583 T result = (T)((pointer_raw + mask) & ~mask);
584 VIXL_ASSERT(result >= pointer);
585
586 return result;
587 }
588
589 // Decrement a pointer until it has the specified alignment. The alignment must
590 // be a power of two.
591 template <class T>
AlignDown(T pointer,typename Unsigned<sizeof (T)* kBitsPerByte>::type alignment)592 T AlignDown(T pointer,
593 typename Unsigned<sizeof(T) * kBitsPerByte>::type alignment) {
594 VIXL_ASSERT(IsPowerOf2(alignment));
595 // Use C-style casts to get static_cast behaviour for integral types (T), and
596 // reinterpret_cast behaviour for other types.
597
598 typename Unsigned<sizeof(T)* kBitsPerByte>::type pointer_raw =
599 (typename Unsigned<sizeof(T) * kBitsPerByte>::type)pointer;
600 VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw));
601
602 size_t mask = alignment - 1;
603 return (T)(pointer_raw & ~mask);
604 }
605
606
607 template <typename T>
ExtractBit(T value,unsigned bit)608 inline T ExtractBit(T value, unsigned bit) {
609 return (value >> bit) & T(1);
610 }
611
612 template <typename Ts, typename Td>
ExtractBits(Ts value,int least_significant_bit,Td mask)613 inline Td ExtractBits(Ts value, int least_significant_bit, Td mask) {
614 return Td((value >> least_significant_bit) & Ts(mask));
615 }
616
617 template <typename Ts, typename Td>
AssignBit(Td & dst,int bit,Ts value)618 inline void AssignBit(Td& dst, // NOLINT(runtime/references)
619 int bit,
620 Ts value) {
621 VIXL_ASSERT((value == Ts(0)) || (value == Ts(1)));
622 VIXL_ASSERT(bit >= 0);
623 VIXL_ASSERT(bit < static_cast<int>(sizeof(Td) * 8));
624 Td mask(1);
625 dst &= ~(mask << bit);
626 dst |= Td(value) << bit;
627 }
628
629 template <typename Td, typename Ts>
AssignBits(Td & dst,int least_significant_bit,Ts mask,Ts value)630 inline void AssignBits(Td& dst, // NOLINT(runtime/references)
631 int least_significant_bit,
632 Ts mask,
633 Ts value) {
634 VIXL_ASSERT(least_significant_bit >= 0);
635 VIXL_ASSERT(least_significant_bit < static_cast<int>(sizeof(Td) * 8));
636 VIXL_ASSERT(((Td(mask) << least_significant_bit) >> least_significant_bit) ==
637 Td(mask));
638 VIXL_ASSERT((value & mask) == value);
639 dst &= ~(Td(mask) << least_significant_bit);
640 dst |= Td(value) << least_significant_bit;
641 }
642
643 class VFP {
644 public:
FP32ToImm8(float imm)645 static uint32_t FP32ToImm8(float imm) {
646 // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
647 uint32_t bits = FloatToRawbits(imm);
648 // bit7: a000.0000
649 uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
650 // bit6: 0b00.0000
651 uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
652 // bit5_to_0: 00cd.efgh
653 uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
654 return static_cast<uint32_t>(bit7 | bit6 | bit5_to_0);
655 }
FP64ToImm8(double imm)656 static uint32_t FP64ToImm8(double imm) {
657 // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
658 // 0000.0000.0000.0000.0000.0000.0000.0000
659 uint64_t bits = DoubleToRawbits(imm);
660 // bit7: a000.0000
661 uint64_t bit7 = ((bits >> 63) & 0x1) << 7;
662 // bit6: 0b00.0000
663 uint64_t bit6 = ((bits >> 61) & 0x1) << 6;
664 // bit5_to_0: 00cd.efgh
665 uint64_t bit5_to_0 = (bits >> 48) & 0x3f;
666
667 return static_cast<uint32_t>(bit7 | bit6 | bit5_to_0);
668 }
Imm8ToFP32(uint32_t imm8)669 static float Imm8ToFP32(uint32_t imm8) {
670 // Imm8: abcdefgh (8 bits)
671 // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
672 // where B is b ^ 1
673 uint32_t bits = imm8;
674 uint32_t bit7 = (bits >> 7) & 0x1;
675 uint32_t bit6 = (bits >> 6) & 0x1;
676 uint32_t bit5_to_0 = bits & 0x3f;
677 uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
678
679 return RawbitsToFloat(result);
680 }
Imm8ToFP64(uint32_t imm8)681 static double Imm8ToFP64(uint32_t imm8) {
682 // Imm8: abcdefgh (8 bits)
683 // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
684 // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
685 // where B is b ^ 1
686 uint32_t bits = imm8;
687 uint64_t bit7 = (bits >> 7) & 0x1;
688 uint64_t bit6 = (bits >> 6) & 0x1;
689 uint64_t bit5_to_0 = bits & 0x3f;
690 uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
691 return RawbitsToDouble(result);
692 }
IsImmFP32(float imm)693 static bool IsImmFP32(float imm) {
694 // Valid values will have the form:
695 // aBbb.bbbc.defg.h000.0000.0000.0000.0000
696 uint32_t bits = FloatToRawbits(imm);
697 // bits[19..0] are cleared.
698 if ((bits & 0x7ffff) != 0) {
699 return false;
700 }
701
702
703 // bits[29..25] are all set or all cleared.
704 uint32_t b_pattern = (bits >> 16) & 0x3e00;
705 if (b_pattern != 0 && b_pattern != 0x3e00) {
706 return false;
707 }
708 // bit[30] and bit[29] are opposite.
709 if (((bits ^ (bits << 1)) & 0x40000000) == 0) {
710 return false;
711 }
712 return true;
713 }
IsImmFP64(double imm)714 static bool IsImmFP64(double imm) {
715 // Valid values will have the form:
716 // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
717 // 0000.0000.0000.0000.0000.0000.0000.0000
718 uint64_t bits = DoubleToRawbits(imm);
719 // bits[47..0] are cleared.
720 if ((bits & 0x0000ffffffffffff) != 0) {
721 return false;
722 }
723 // bits[61..54] are all set or all cleared.
724 uint32_t b_pattern = (bits >> 48) & 0x3fc0;
725 if ((b_pattern != 0) && (b_pattern != 0x3fc0)) {
726 return false;
727 }
728 // bit[62] and bit[61] are opposite.
729 if (((bits ^ (bits << 1)) & (UINT64_C(1) << 62)) == 0) {
730 return false;
731 }
732 return true;
733 }
734 };
735
736 class BitField {
737 // ForEachBitHelper is a functor that will call
738 // bool ForEachBitHelper::execute(ElementType id) const
739 // and expects a boolean in return whether to continue (if true)
740 // or stop (if false)
741 // check_set will check if the bits are on (true) or off(false)
742 template <typename ForEachBitHelper, bool check_set>
ForEachBit(const ForEachBitHelper & helper)743 bool ForEachBit(const ForEachBitHelper& helper) {
744 for (int i = 0; static_cast<size_t>(i) < bitfield_.size(); i++) {
745 if (bitfield_[i] == check_set)
746 if (!helper.execute(i)) return false;
747 }
748 return true;
749 }
750
751 public:
BitField(unsigned size)752 explicit BitField(unsigned size) : bitfield_(size, 0) {}
753
Set(int i)754 void Set(int i) {
755 VIXL_ASSERT((i >= 0) && (static_cast<size_t>(i) < bitfield_.size()));
756 bitfield_[i] = true;
757 }
758
Unset(int i)759 void Unset(int i) {
760 VIXL_ASSERT((i >= 0) && (static_cast<size_t>(i) < bitfield_.size()));
761 bitfield_[i] = true;
762 }
763
IsSet(int i)764 bool IsSet(int i) const { return bitfield_[i]; }
765
766 // For each bit not set in the bitfield call the execute functor
767 // execute.
768 // ForEachBitSetHelper::execute returns true if the iteration through
769 // the bits can continue, otherwise it will stop.
770 // struct ForEachBitSetHelper {
771 // bool execute(int /*id*/) { return false; }
772 // };
773 template <typename ForEachBitNotSetHelper>
ForEachBitNotSet(const ForEachBitNotSetHelper & helper)774 bool ForEachBitNotSet(const ForEachBitNotSetHelper& helper) {
775 return ForEachBit<ForEachBitNotSetHelper, false>(helper);
776 }
777
778 // For each bit set in the bitfield call the execute functor
779 // execute.
780 template <typename ForEachBitSetHelper>
ForEachBitSet(const ForEachBitSetHelper & helper)781 bool ForEachBitSet(const ForEachBitSetHelper& helper) {
782 return ForEachBit<ForEachBitSetHelper, true>(helper);
783 }
784
785 private:
786 std::vector<bool> bitfield_;
787 };
788
789 namespace internal {
790
791 typedef int64_t Int64;
792 class Uint64;
793 class Uint128;
794
795 class Uint32 {
796 uint32_t data_;
797
798 public:
799 // Unlike uint32_t, Uint32 has a default constructor.
Uint32()800 Uint32() { data_ = 0; }
Uint32(uint32_t data)801 explicit Uint32(uint32_t data) : data_(data) {}
802 inline explicit Uint32(Uint64 data);
Get()803 uint32_t Get() const { return data_; }
804 template <int N>
GetSigned()805 int32_t GetSigned() const {
806 return ExtractSignedBitfield32(N - 1, 0, data_);
807 }
GetSigned()808 int32_t GetSigned() const { return data_; }
809 Uint32 operator~() const { return Uint32(~data_); }
810 Uint32 operator-() const { return Uint32(-data_); }
811 bool operator==(Uint32 value) const { return data_ == value.data_; }
812 bool operator!=(Uint32 value) const { return data_ != value.data_; }
813 bool operator>(Uint32 value) const { return data_ > value.data_; }
814 Uint32 operator+(Uint32 value) const { return Uint32(data_ + value.data_); }
815 Uint32 operator-(Uint32 value) const { return Uint32(data_ - value.data_); }
816 Uint32 operator&(Uint32 value) const { return Uint32(data_ & value.data_); }
817 Uint32 operator&=(Uint32 value) {
818 data_ &= value.data_;
819 return *this;
820 }
821 Uint32 operator^(Uint32 value) const { return Uint32(data_ ^ value.data_); }
822 Uint32 operator^=(Uint32 value) {
823 data_ ^= value.data_;
824 return *this;
825 }
826 Uint32 operator|(Uint32 value) const { return Uint32(data_ | value.data_); }
827 Uint32 operator|=(Uint32 value) {
828 data_ |= value.data_;
829 return *this;
830 }
831 // Unlike uint32_t, the shift functions can accept negative shift and
832 // return 0 when the shift is too big.
833 Uint32 operator>>(int shift) const {
834 if (shift == 0) return *this;
835 if (shift < 0) {
836 int tmp = -shift;
837 if (tmp >= 32) return Uint32(0);
838 return Uint32(data_ << tmp);
839 }
840 int tmp = shift;
841 if (tmp >= 32) return Uint32(0);
842 return Uint32(data_ >> tmp);
843 }
844 Uint32 operator<<(int shift) const {
845 if (shift == 0) return *this;
846 if (shift < 0) {
847 int tmp = -shift;
848 if (tmp >= 32) return Uint32(0);
849 return Uint32(data_ >> tmp);
850 }
851 int tmp = shift;
852 if (tmp >= 32) return Uint32(0);
853 return Uint32(data_ << tmp);
854 }
855 };
856
857 class Uint64 {
858 uint64_t data_;
859
860 public:
861 // Unlike uint64_t, Uint64 has a default constructor.
Uint64()862 Uint64() { data_ = 0; }
Uint64(uint64_t data)863 explicit Uint64(uint64_t data) : data_(data) {}
Uint64(Uint32 data)864 explicit Uint64(Uint32 data) : data_(data.Get()) {}
865 inline explicit Uint64(Uint128 data);
Get()866 uint64_t Get() const { return data_; }
GetSigned(int N)867 int64_t GetSigned(int N) const {
868 return ExtractSignedBitfield64(N - 1, 0, data_);
869 }
GetSigned()870 int64_t GetSigned() const { return data_; }
ToUint32()871 Uint32 ToUint32() const {
872 VIXL_ASSERT((data_ >> 32) == 0);
873 return Uint32(static_cast<uint32_t>(data_));
874 }
GetHigh32()875 Uint32 GetHigh32() const { return Uint32(data_ >> 32); }
GetLow32()876 Uint32 GetLow32() const { return Uint32(data_ & 0xffffffff); }
877 Uint64 operator~() const { return Uint64(~data_); }
878 Uint64 operator-() const { return Uint64(-data_); }
879 bool operator==(Uint64 value) const { return data_ == value.data_; }
880 bool operator!=(Uint64 value) const { return data_ != value.data_; }
881 Uint64 operator+(Uint64 value) const { return Uint64(data_ + value.data_); }
882 Uint64 operator-(Uint64 value) const { return Uint64(data_ - value.data_); }
883 Uint64 operator&(Uint64 value) const { return Uint64(data_ & value.data_); }
884 Uint64 operator&=(Uint64 value) {
885 data_ &= value.data_;
886 return *this;
887 }
888 Uint64 operator^(Uint64 value) const { return Uint64(data_ ^ value.data_); }
889 Uint64 operator^=(Uint64 value) {
890 data_ ^= value.data_;
891 return *this;
892 }
893 Uint64 operator|(Uint64 value) const { return Uint64(data_ | value.data_); }
894 Uint64 operator|=(Uint64 value) {
895 data_ |= value.data_;
896 return *this;
897 }
898 // Unlike uint64_t, the shift functions can accept negative shift and
899 // return 0 when the shift is too big.
900 Uint64 operator>>(int shift) const {
901 if (shift == 0) return *this;
902 if (shift < 0) {
903 int tmp = -shift;
904 if (tmp >= 64) return Uint64(0);
905 return Uint64(data_ << tmp);
906 }
907 int tmp = shift;
908 if (tmp >= 64) return Uint64(0);
909 return Uint64(data_ >> tmp);
910 }
911 Uint64 operator<<(int shift) const {
912 if (shift == 0) return *this;
913 if (shift < 0) {
914 int tmp = -shift;
915 if (tmp >= 64) return Uint64(0);
916 return Uint64(data_ >> tmp);
917 }
918 int tmp = shift;
919 if (tmp >= 64) return Uint64(0);
920 return Uint64(data_ << tmp);
921 }
922 };
923
924 class Uint128 {
925 uint64_t data_high_;
926 uint64_t data_low_;
927
928 public:
Uint128()929 Uint128() : data_high_(0), data_low_(0) {}
Uint128(uint64_t data_low)930 explicit Uint128(uint64_t data_low) : data_high_(0), data_low_(data_low) {}
Uint128(Uint64 data_low)931 explicit Uint128(Uint64 data_low)
932 : data_high_(0), data_low_(data_low.Get()) {}
Uint128(uint64_t data_high,uint64_t data_low)933 Uint128(uint64_t data_high, uint64_t data_low)
934 : data_high_(data_high), data_low_(data_low) {}
ToUint64()935 Uint64 ToUint64() const {
936 VIXL_ASSERT(data_high_ == 0);
937 return Uint64(data_low_);
938 }
GetHigh64()939 Uint64 GetHigh64() const { return Uint64(data_high_); }
GetLow64()940 Uint64 GetLow64() const { return Uint64(data_low_); }
941 Uint128 operator~() const { return Uint128(~data_high_, ~data_low_); }
942 bool operator==(Uint128 value) const {
943 return (data_high_ == value.data_high_) && (data_low_ == value.data_low_);
944 }
945 Uint128 operator&(Uint128 value) const {
946 return Uint128(data_high_ & value.data_high_, data_low_ & value.data_low_);
947 }
948 Uint128 operator&=(Uint128 value) {
949 data_high_ &= value.data_high_;
950 data_low_ &= value.data_low_;
951 return *this;
952 }
953 Uint128 operator|=(Uint128 value) {
954 data_high_ |= value.data_high_;
955 data_low_ |= value.data_low_;
956 return *this;
957 }
958 Uint128 operator>>(int shift) const {
959 VIXL_ASSERT((shift >= 0) && (shift < 128));
960 if (shift == 0) return *this;
961 if (shift >= 64) {
962 return Uint128(0, data_high_ >> (shift - 64));
963 }
964 uint64_t tmp = (data_high_ << (64 - shift)) | (data_low_ >> shift);
965 return Uint128(data_high_ >> shift, tmp);
966 }
967 Uint128 operator<<(int shift) const {
968 VIXL_ASSERT((shift >= 0) && (shift < 128));
969 if (shift == 0) return *this;
970 if (shift >= 64) {
971 return Uint128(data_low_ << (shift - 64), 0);
972 }
973 uint64_t tmp = (data_high_ << shift) | (data_low_ >> (64 - shift));
974 return Uint128(tmp, data_low_ << shift);
975 }
976 };
977
Uint32(Uint64 data)978 Uint32::Uint32(Uint64 data) : data_(data.ToUint32().Get()) {}
Uint64(Uint128 data)979 Uint64::Uint64(Uint128 data) : data_(data.ToUint64().Get()) {}
980
981 Int64 BitCount(Uint32 value);
982
983 } // namespace internal
984
985 // The default NaN values (for FPCR.DN=1).
986 extern const double kFP64DefaultNaN;
987 extern const float kFP32DefaultNaN;
988 extern const Float16 kFP16DefaultNaN;
989
990 // Floating-point infinity values.
991 extern const Float16 kFP16PositiveInfinity;
992 extern const Float16 kFP16NegativeInfinity;
993 extern const float kFP32PositiveInfinity;
994 extern const float kFP32NegativeInfinity;
995 extern const double kFP64PositiveInfinity;
996 extern const double kFP64NegativeInfinity;
997
998 // Floating-point zero values.
999 extern const Float16 kFP16PositiveZero;
1000 extern const Float16 kFP16NegativeZero;
1001
1002 // AArch64 floating-point specifics. These match IEEE-754.
1003 const unsigned kDoubleMantissaBits = 52;
1004 const unsigned kDoubleExponentBits = 11;
1005 const unsigned kFloatMantissaBits = 23;
1006 const unsigned kFloatExponentBits = 8;
1007 const unsigned kFloat16MantissaBits = 10;
1008 const unsigned kFloat16ExponentBits = 5;
1009
1010 enum FPRounding {
1011 // The first four values are encodable directly by FPCR<RMode>.
1012 FPTieEven = 0x0,
1013 FPPositiveInfinity = 0x1,
1014 FPNegativeInfinity = 0x2,
1015 FPZero = 0x3,
1016
1017 // The final rounding modes are only available when explicitly specified by
1018 // the instruction (such as with fcvta). It cannot be set in FPCR.
1019 FPTieAway,
1020 FPRoundOdd
1021 };
1022
1023 enum UseDefaultNaN { kUseDefaultNaN, kIgnoreDefaultNaN };
1024
1025 // Assemble the specified IEEE-754 components into the target type and apply
1026 // appropriate rounding.
1027 // sign: 0 = positive, 1 = negative
1028 // exponent: Unbiased IEEE-754 exponent.
1029 // mantissa: The mantissa of the input. The top bit (which is not encoded for
1030 // normal IEEE-754 values) must not be omitted. This bit has the
1031 // value 'pow(2, exponent)'.
1032 //
1033 // The input value is assumed to be a normalized value. That is, the input may
1034 // not be infinity or NaN. If the source value is subnormal, it must be
1035 // normalized before calling this function such that the highest set bit in the
1036 // mantissa has the value 'pow(2, exponent)'.
1037 //
1038 // Callers should use FPRoundToFloat or FPRoundToDouble directly, rather than
1039 // calling a templated FPRound.
1040 template <class T, int ebits, int mbits>
FPRound(int64_t sign,int64_t exponent,uint64_t mantissa,FPRounding round_mode)1041 T FPRound(int64_t sign,
1042 int64_t exponent,
1043 uint64_t mantissa,
1044 FPRounding round_mode) {
1045 VIXL_ASSERT((sign == 0) || (sign == 1));
1046
1047 // Only FPTieEven and FPRoundOdd rounding modes are implemented.
1048 VIXL_ASSERT((round_mode == FPTieEven) || (round_mode == FPRoundOdd));
1049
1050 // Rounding can promote subnormals to normals, and normals to infinities. For
1051 // example, a double with exponent 127 (FLT_MAX_EXP) would appear to be
1052 // encodable as a float, but rounding based on the low-order mantissa bits
1053 // could make it overflow. With ties-to-even rounding, this value would become
1054 // an infinity.
1055
1056 // ---- Rounding Method ----
1057 //
1058 // The exponent is irrelevant in the rounding operation, so we treat the
1059 // lowest-order bit that will fit into the result ('onebit') as having
1060 // the value '1'. Similarly, the highest-order bit that won't fit into
1061 // the result ('halfbit') has the value '0.5'. The 'point' sits between
1062 // 'onebit' and 'halfbit':
1063 //
1064 // These bits fit into the result.
1065 // |---------------------|
1066 // mantissa = 0bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
1067 // ||
1068 // / |
1069 // / halfbit
1070 // onebit
1071 //
1072 // For subnormal outputs, the range of representable bits is smaller and
1073 // the position of onebit and halfbit depends on the exponent of the
1074 // input, but the method is otherwise similar.
1075 //
1076 // onebit(frac)
1077 // |
1078 // | halfbit(frac) halfbit(adjusted)
1079 // | / /
1080 // | | |
1081 // 0b00.0 (exact) -> 0b00.0 (exact) -> 0b00
1082 // 0b00.0... -> 0b00.0... -> 0b00
1083 // 0b00.1 (exact) -> 0b00.0111..111 -> 0b00
1084 // 0b00.1... -> 0b00.1... -> 0b01
1085 // 0b01.0 (exact) -> 0b01.0 (exact) -> 0b01
1086 // 0b01.0... -> 0b01.0... -> 0b01
1087 // 0b01.1 (exact) -> 0b01.1 (exact) -> 0b10
1088 // 0b01.1... -> 0b01.1... -> 0b10
1089 // 0b10.0 (exact) -> 0b10.0 (exact) -> 0b10
1090 // 0b10.0... -> 0b10.0... -> 0b10
1091 // 0b10.1 (exact) -> 0b10.0111..111 -> 0b10
1092 // 0b10.1... -> 0b10.1... -> 0b11
1093 // 0b11.0 (exact) -> 0b11.0 (exact) -> 0b11
1094 // ... / | / |
1095 // / | / |
1096 // / |
1097 // adjusted = frac - (halfbit(mantissa) & ~onebit(frac)); / |
1098 //
1099 // mantissa = (mantissa >> shift) + halfbit(adjusted);
1100
1101 static const int mantissa_offset = 0;
1102 static const int exponent_offset = mantissa_offset + mbits;
1103 static const int sign_offset = exponent_offset + ebits;
1104 VIXL_ASSERT(sign_offset == (sizeof(T) * 8 - 1));
1105
1106 // Bail out early for zero inputs.
1107 if (mantissa == 0) {
1108 return static_cast<T>(sign << sign_offset);
1109 }
1110
1111 // If all bits in the exponent are set, the value is infinite or NaN.
1112 // This is true for all binary IEEE-754 formats.
1113 static const int infinite_exponent = (1 << ebits) - 1;
1114 static const int max_normal_exponent = infinite_exponent - 1;
1115
1116 // Apply the exponent bias to encode it for the result. Doing this early makes
1117 // it easy to detect values that will be infinite or subnormal.
1118 exponent += max_normal_exponent >> 1;
1119
1120 if (exponent > max_normal_exponent) {
1121 // Overflow: the input is too large for the result type to represent.
1122 if (round_mode == FPTieEven) {
1123 // FPTieEven rounding mode handles overflows using infinities.
1124 exponent = infinite_exponent;
1125 mantissa = 0;
1126 } else {
1127 VIXL_ASSERT(round_mode == FPRoundOdd);
1128 // FPRoundOdd rounding mode handles overflows using the largest magnitude
1129 // normal number.
1130 exponent = max_normal_exponent;
1131 mantissa = (UINT64_C(1) << exponent_offset) - 1;
1132 }
1133 return static_cast<T>((sign << sign_offset) |
1134 (exponent << exponent_offset) |
1135 (mantissa << mantissa_offset));
1136 }
1137
1138 // Calculate the shift required to move the top mantissa bit to the proper
1139 // place in the destination type.
1140 const int highest_significant_bit = 63 - CountLeadingZeros(mantissa);
1141 int shift = highest_significant_bit - mbits;
1142
1143 if (exponent <= 0) {
1144 // The output will be subnormal (before rounding).
1145 // For subnormal outputs, the shift must be adjusted by the exponent. The +1
1146 // is necessary because the exponent of a subnormal value (encoded as 0) is
1147 // the same as the exponent of the smallest normal value (encoded as 1).
1148 shift += -exponent + 1;
1149
1150 // Handle inputs that would produce a zero output.
1151 //
1152 // Shifts higher than highest_significant_bit+1 will always produce a zero
1153 // result. A shift of exactly highest_significant_bit+1 might produce a
1154 // non-zero result after rounding.
1155 if (shift > (highest_significant_bit + 1)) {
1156 if (round_mode == FPTieEven) {
1157 // The result will always be +/-0.0.
1158 return static_cast<T>(sign << sign_offset);
1159 } else {
1160 VIXL_ASSERT(round_mode == FPRoundOdd);
1161 VIXL_ASSERT(mantissa != 0);
1162 // For FPRoundOdd, if the mantissa is too small to represent and
1163 // non-zero return the next "odd" value.
1164 return static_cast<T>((sign << sign_offset) | 1);
1165 }
1166 }
1167
1168 // Properly encode the exponent for a subnormal output.
1169 exponent = 0;
1170 } else {
1171 // Clear the topmost mantissa bit, since this is not encoded in IEEE-754
1172 // normal values.
1173 mantissa &= ~(UINT64_C(1) << highest_significant_bit);
1174 }
1175
1176 // The casts below are only well-defined for unsigned integers.
1177 VIXL_STATIC_ASSERT(std::numeric_limits<T>::is_integer);
1178 VIXL_STATIC_ASSERT(!std::numeric_limits<T>::is_signed);
1179
1180 if (shift > 0) {
1181 if (round_mode == FPTieEven) {
1182 // We have to shift the mantissa to the right. Some precision is lost, so
1183 // we need to apply rounding.
1184 uint64_t onebit_mantissa = (mantissa >> (shift)) & 1;
1185 uint64_t halfbit_mantissa = (mantissa >> (shift - 1)) & 1;
1186 uint64_t adjustment = (halfbit_mantissa & ~onebit_mantissa);
1187 uint64_t adjusted = mantissa - adjustment;
1188 T halfbit_adjusted = (adjusted >> (shift - 1)) & 1;
1189
1190 T result =
1191 static_cast<T>((sign << sign_offset) | (exponent << exponent_offset) |
1192 ((mantissa >> shift) << mantissa_offset));
1193
1194 // A very large mantissa can overflow during rounding. If this happens,
1195 // the exponent should be incremented and the mantissa set to 1.0
1196 // (encoded as 0). Applying halfbit_adjusted after assembling the float
1197 // has the nice side-effect that this case is handled for free.
1198 //
1199 // This also handles cases where a very large finite value overflows to
1200 // infinity, or where a very large subnormal value overflows to become
1201 // normal.
1202 return result + halfbit_adjusted;
1203 } else {
1204 VIXL_ASSERT(round_mode == FPRoundOdd);
1205 // If any bits at position halfbit or below are set, onebit (ie. the
1206 // bottom bit of the resulting mantissa) must be set.
1207 uint64_t fractional_bits = mantissa & ((UINT64_C(1) << shift) - 1);
1208 if (fractional_bits != 0) {
1209 mantissa |= UINT64_C(1) << shift;
1210 }
1211
1212 return static_cast<T>((sign << sign_offset) |
1213 (exponent << exponent_offset) |
1214 ((mantissa >> shift) << mantissa_offset));
1215 }
1216 } else {
1217 // We have to shift the mantissa to the left (or not at all). The input
1218 // mantissa is exactly representable in the output mantissa, so apply no
1219 // rounding correction.
1220 return static_cast<T>((sign << sign_offset) |
1221 (exponent << exponent_offset) |
1222 ((mantissa << -shift) << mantissa_offset));
1223 }
1224 }
1225
1226
1227 // See FPRound for a description of this function.
FPRoundToDouble(int64_t sign,int64_t exponent,uint64_t mantissa,FPRounding round_mode)1228 inline double FPRoundToDouble(int64_t sign,
1229 int64_t exponent,
1230 uint64_t mantissa,
1231 FPRounding round_mode) {
1232 uint64_t bits =
1233 FPRound<uint64_t, kDoubleExponentBits, kDoubleMantissaBits>(sign,
1234 exponent,
1235 mantissa,
1236 round_mode);
1237 return RawbitsToDouble(bits);
1238 }
1239
1240
1241 // See FPRound for a description of this function.
FPRoundToFloat16(int64_t sign,int64_t exponent,uint64_t mantissa,FPRounding round_mode)1242 inline Float16 FPRoundToFloat16(int64_t sign,
1243 int64_t exponent,
1244 uint64_t mantissa,
1245 FPRounding round_mode) {
1246 return RawbitsToFloat16(
1247 FPRound<uint16_t,
1248 kFloat16ExponentBits,
1249 kFloat16MantissaBits>(sign, exponent, mantissa, round_mode));
1250 }
1251
1252
1253 // See FPRound for a description of this function.
FPRoundToFloat(int64_t sign,int64_t exponent,uint64_t mantissa,FPRounding round_mode)1254 static inline float FPRoundToFloat(int64_t sign,
1255 int64_t exponent,
1256 uint64_t mantissa,
1257 FPRounding round_mode) {
1258 uint32_t bits =
1259 FPRound<uint32_t, kFloatExponentBits, kFloatMantissaBits>(sign,
1260 exponent,
1261 mantissa,
1262 round_mode);
1263 return RawbitsToFloat(bits);
1264 }
1265
1266
1267 float FPToFloat(Float16 value, UseDefaultNaN DN, bool* exception = NULL);
1268 float FPToFloat(double value,
1269 FPRounding round_mode,
1270 UseDefaultNaN DN,
1271 bool* exception = NULL);
1272
1273 double FPToDouble(Float16 value, UseDefaultNaN DN, bool* exception = NULL);
1274 double FPToDouble(float value, UseDefaultNaN DN, bool* exception = NULL);
1275
1276 Float16 FPToFloat16(float value,
1277 FPRounding round_mode,
1278 UseDefaultNaN DN,
1279 bool* exception = NULL);
1280
1281 Float16 FPToFloat16(double value,
1282 FPRounding round_mode,
1283 UseDefaultNaN DN,
1284 bool* exception = NULL);
1285 } // namespace vixl
1286
1287 #endif // VIXL_UTILS_H
1288