1 // Copyright 2015, VIXL authors
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 // * Redistributions of source code must retain the above copyright notice,
8 // this list of conditions and the following disclaimer.
9 // * Redistributions in binary form must reproduce the above copyright notice,
10 // this list of conditions and the following disclaimer in the documentation
11 // and/or other materials provided with the distribution.
12 // * Neither the name of ARM Limited nor the names of its contributors may be
13 // used to endorse or promote products derived from this software without
14 // specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27 #ifndef VIXL_UTILS_H
28 #define VIXL_UTILS_H
29
30 #include <cmath>
31 #include <cstring>
32 #include <limits>
33 #include <vector>
34
35 #include "compiler-intrinsics-vixl.h"
36 #include "globals-vixl.h"
37
38 namespace vixl {
39
40 // Macros for compile-time format checking.
41 #if GCC_VERSION_OR_NEWER(4, 4, 0)
42 #define PRINTF_CHECK(format_index, varargs_index) \
43 __attribute__((format(gnu_printf, format_index, varargs_index)))
44 #else
45 #define PRINTF_CHECK(format_index, varargs_index)
46 #endif
47
48 #ifdef __GNUC__
49 #define VIXL_HAS_DEPRECATED_WITH_MSG
50 #elif defined(__clang__)
51 #ifdef __has_extension(attribute_deprecated_with_message)
52 #define VIXL_HAS_DEPRECATED_WITH_MSG
53 #endif
54 #endif
55
56 #ifdef VIXL_HAS_DEPRECATED_WITH_MSG
57 #define VIXL_DEPRECATED(replaced_by, declarator) \
58 __attribute__((deprecated("Use \"" replaced_by "\" instead"))) declarator
59 #else
60 #define VIXL_DEPRECATED(replaced_by, declarator) declarator
61 #endif
62
63 #ifdef VIXL_DEBUG
64 #define VIXL_UNREACHABLE_OR_FALLTHROUGH() VIXL_UNREACHABLE()
65 #else
66 #define VIXL_UNREACHABLE_OR_FALLTHROUGH() VIXL_FALLTHROUGH()
67 #endif
68
69 template <typename T, size_t n>
ArrayLength(const T (&)[n])70 constexpr size_t ArrayLength(const T (&)[n]) {
71 return n;
72 }
73
GetUintMask(unsigned bits)74 inline uint64_t GetUintMask(unsigned bits) {
75 VIXL_ASSERT(bits <= 64);
76 uint64_t base = (bits >= 64) ? 0 : (UINT64_C(1) << bits);
77 return base - 1;
78 }
79
GetSignMask(unsigned bits)80 inline uint64_t GetSignMask(unsigned bits) {
81 VIXL_ASSERT(bits <= 64);
82 return UINT64_C(1) << (bits - 1);
83 }
84
85 // Check number width.
86 // TODO: Refactor these using templates.
IsIntN(unsigned n,uint32_t x)87 inline bool IsIntN(unsigned n, uint32_t x) {
88 VIXL_ASSERT((0 < n) && (n <= 32));
89 return x <= static_cast<uint32_t>(INT32_MAX >> (32 - n));
90 }
IsIntN(unsigned n,int32_t x)91 inline bool IsIntN(unsigned n, int32_t x) {
92 VIXL_ASSERT((0 < n) && (n <= 32));
93 if (n == 32) return true;
94 int32_t limit = INT32_C(1) << (n - 1);
95 return (-limit <= x) && (x < limit);
96 }
IsIntN(unsigned n,uint64_t x)97 inline bool IsIntN(unsigned n, uint64_t x) {
98 VIXL_ASSERT((0 < n) && (n <= 64));
99 return x <= static_cast<uint64_t>(INT64_MAX >> (64 - n));
100 }
IsIntN(unsigned n,int64_t x)101 inline bool IsIntN(unsigned n, int64_t x) {
102 VIXL_ASSERT((0 < n) && (n <= 64));
103 if (n == 64) return true;
104 int64_t limit = INT64_C(1) << (n - 1);
105 return (-limit <= x) && (x < limit);
106 }
is_intn(unsigned n,int64_t x)107 VIXL_DEPRECATED("IsIntN", inline bool is_intn(unsigned n, int64_t x)) {
108 return IsIntN(n, x);
109 }
110
IsUintN(unsigned n,uint32_t x)111 inline bool IsUintN(unsigned n, uint32_t x) {
112 VIXL_ASSERT((0 < n) && (n <= 32));
113 if (n >= 32) return true;
114 return !(x >> n);
115 }
IsUintN(unsigned n,int32_t x)116 inline bool IsUintN(unsigned n, int32_t x) {
117 VIXL_ASSERT((0 < n) && (n < 32));
118 // Convert to an unsigned integer to avoid implementation-defined behavior.
119 return !(static_cast<uint32_t>(x) >> n);
120 }
IsUintN(unsigned n,uint64_t x)121 inline bool IsUintN(unsigned n, uint64_t x) {
122 VIXL_ASSERT((0 < n) && (n <= 64));
123 if (n >= 64) return true;
124 return !(x >> n);
125 }
IsUintN(unsigned n,int64_t x)126 inline bool IsUintN(unsigned n, int64_t x) {
127 VIXL_ASSERT((0 < n) && (n < 64));
128 // Convert to an unsigned integer to avoid implementation-defined behavior.
129 return !(static_cast<uint64_t>(x) >> n);
130 }
is_uintn(unsigned n,int64_t x)131 VIXL_DEPRECATED("IsUintN", inline bool is_uintn(unsigned n, int64_t x)) {
132 return IsUintN(n, x);
133 }
134
TruncateToUintN(unsigned n,uint64_t x)135 inline uint64_t TruncateToUintN(unsigned n, uint64_t x) {
136 VIXL_ASSERT((0 < n) && (n < 64));
137 return static_cast<uint64_t>(x) & ((UINT64_C(1) << n) - 1);
138 }
139 VIXL_DEPRECATED("TruncateToUintN",
140 inline uint64_t truncate_to_intn(unsigned n, int64_t x)) {
141 return TruncateToUintN(n, x);
142 }
143
144 // clang-format off
145 #define INT_1_TO_32_LIST(V) \
146 V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) \
147 V(9) V(10) V(11) V(12) V(13) V(14) V(15) V(16) \
148 V(17) V(18) V(19) V(20) V(21) V(22) V(23) V(24) \
149 V(25) V(26) V(27) V(28) V(29) V(30) V(31) V(32)
150
151 #define INT_33_TO_63_LIST(V) \
152 V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40) \
153 V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) \
154 V(49) V(50) V(51) V(52) V(53) V(54) V(55) V(56) \
155 V(57) V(58) V(59) V(60) V(61) V(62) V(63)
156
157 #define INT_1_TO_63_LIST(V) INT_1_TO_32_LIST(V) INT_33_TO_63_LIST(V)
158
159 // clang-format on
160
161 #define DECLARE_IS_INT_N(N) \
162 inline bool IsInt##N(int64_t x) { return IsIntN(N, x); } \
163 VIXL_DEPRECATED("IsInt" #N, inline bool is_int##N(int64_t x)) { \
164 return IsIntN(N, x); \
165 }
166
167 #define DECLARE_IS_UINT_N(N) \
168 inline bool IsUint##N(int64_t x) { return IsUintN(N, x); } \
169 VIXL_DEPRECATED("IsUint" #N, inline bool is_uint##N(int64_t x)) { \
170 return IsUintN(N, x); \
171 }
172
173 #define DECLARE_TRUNCATE_TO_UINT_32(N) \
174 inline uint32_t TruncateToUint##N(uint64_t x) { \
175 return static_cast<uint32_t>(TruncateToUintN(N, x)); \
176 } \
177 VIXL_DEPRECATED("TruncateToUint" #N, \
178 inline uint32_t truncate_to_int##N(int64_t x)) { \
179 return TruncateToUint##N(x); \
180 }
181
182 INT_1_TO_63_LIST(DECLARE_IS_INT_N)
INT_1_TO_63_LIST(DECLARE_IS_UINT_N)183 INT_1_TO_63_LIST(DECLARE_IS_UINT_N)
184 INT_1_TO_32_LIST(DECLARE_TRUNCATE_TO_UINT_32)
185
186 #undef DECLARE_IS_INT_N
187 #undef DECLARE_IS_UINT_N
188 #undef DECLARE_TRUNCATE_TO_INT_N
189
190 // Bit field extraction.
191 inline uint64_t ExtractUnsignedBitfield64(int msb, int lsb, uint64_t x) {
192 VIXL_ASSERT((static_cast<size_t>(msb) < sizeof(x) * 8) && (lsb >= 0) &&
193 (msb >= lsb));
194 if ((msb == 63) && (lsb == 0)) return x;
195 return (x >> lsb) & ((static_cast<uint64_t>(1) << (1 + msb - lsb)) - 1);
196 }
197
198
ExtractUnsignedBitfield32(int msb,int lsb,uint64_t x)199 inline uint32_t ExtractUnsignedBitfield32(int msb, int lsb, uint64_t x) {
200 VIXL_ASSERT((static_cast<size_t>(msb) < sizeof(x) * 8) && (lsb >= 0) &&
201 (msb >= lsb));
202 return TruncateToUint32(ExtractUnsignedBitfield64(msb, lsb, x));
203 }
204
205
ExtractSignedBitfield64(int msb,int lsb,uint64_t x)206 inline int64_t ExtractSignedBitfield64(int msb, int lsb, uint64_t x) {
207 VIXL_ASSERT((static_cast<size_t>(msb) < sizeof(x) * 8) && (lsb >= 0) &&
208 (msb >= lsb));
209 uint64_t temp = ExtractUnsignedBitfield64(msb, lsb, x);
210 // If the highest extracted bit is set, sign extend.
211 if ((temp >> (msb - lsb)) == 1) {
212 temp |= ~UINT64_C(0) << (msb - lsb);
213 }
214 int64_t result;
215 memcpy(&result, &temp, sizeof(result));
216 return result;
217 }
218
ExtractSignedBitfield32(int msb,int lsb,uint64_t x)219 inline int32_t ExtractSignedBitfield32(int msb, int lsb, uint64_t x) {
220 VIXL_ASSERT((static_cast<size_t>(msb) < sizeof(x) * 8) && (lsb >= 0) &&
221 (msb >= lsb));
222 uint32_t temp = TruncateToUint32(ExtractSignedBitfield64(msb, lsb, x));
223 int32_t result;
224 memcpy(&result, &temp, sizeof(result));
225 return result;
226 }
227
RotateRight(uint64_t value,unsigned int rotate,unsigned int width)228 inline uint64_t RotateRight(uint64_t value,
229 unsigned int rotate,
230 unsigned int width) {
231 VIXL_ASSERT((width > 0) && (width <= 64));
232 uint64_t width_mask = ~UINT64_C(0) >> (64 - width);
233 rotate &= 63;
234 if (rotate > 0) {
235 value &= width_mask;
236 value = (value << (width - rotate)) | (value >> rotate);
237 }
238 return value & width_mask;
239 }
240
241
242 // Wrapper class for passing FP16 values through the assembler.
243 // This is purely to aid with type checking/casting.
244 class Float16 {
245 public:
246 explicit Float16(double dvalue);
Float16()247 Float16() : rawbits_(0x0) {}
248 friend uint16_t Float16ToRawbits(Float16 value);
249 friend Float16 RawbitsToFloat16(uint16_t bits);
250
251 protected:
252 uint16_t rawbits_;
253 };
254
255 // Floating point representation.
256 uint16_t Float16ToRawbits(Float16 value);
257
258
259 uint32_t FloatToRawbits(float value);
260 VIXL_DEPRECATED("FloatToRawbits",
261 inline uint32_t float_to_rawbits(float value)) {
262 return FloatToRawbits(value);
263 }
264
265 uint64_t DoubleToRawbits(double value);
266 VIXL_DEPRECATED("DoubleToRawbits",
267 inline uint64_t double_to_rawbits(double value)) {
268 return DoubleToRawbits(value);
269 }
270
271 Float16 RawbitsToFloat16(uint16_t bits);
272
273 float RawbitsToFloat(uint32_t bits);
274 VIXL_DEPRECATED("RawbitsToFloat",
rawbits_to_float(uint32_t bits)275 inline float rawbits_to_float(uint32_t bits)) {
276 return RawbitsToFloat(bits);
277 }
278
279 double RawbitsToDouble(uint64_t bits);
280 VIXL_DEPRECATED("RawbitsToDouble",
rawbits_to_double(uint64_t bits)281 inline double rawbits_to_double(uint64_t bits)) {
282 return RawbitsToDouble(bits);
283 }
284
285 // Convert unsigned to signed numbers in a well-defined way (using two's
286 // complement representations).
RawbitsToInt64(uint64_t bits)287 inline int64_t RawbitsToInt64(uint64_t bits) {
288 return (bits >= UINT64_C(0x8000000000000000))
289 ? (-static_cast<int64_t>(-bits - 1) - 1)
290 : static_cast<int64_t>(bits);
291 }
292
RawbitsToInt32(uint32_t bits)293 inline int32_t RawbitsToInt32(uint32_t bits) {
294 return (bits >= UINT64_C(0x80000000)) ? (-static_cast<int32_t>(-bits - 1) - 1)
295 : static_cast<int32_t>(bits);
296 }
297
298 namespace internal {
299
300 // Internal simulation class used solely by the simulator to
301 // provide an abstraction layer for any half-precision arithmetic.
302 class SimFloat16 : public Float16 {
303 public:
304 // TODO: We should investigate making this constructor explicit.
305 // This is currently difficult to do due to a number of templated
306 // functions in the simulator which rely on returning double values.
SimFloat16(double dvalue)307 SimFloat16(double dvalue) : Float16(dvalue) {} // NOLINT(runtime/explicit)
SimFloat16(Float16 f)308 SimFloat16(Float16 f) { // NOLINT(runtime/explicit)
309 this->rawbits_ = Float16ToRawbits(f);
310 }
SimFloat16()311 SimFloat16() : Float16() {}
312 SimFloat16 operator-() const;
313 SimFloat16 operator+(SimFloat16 rhs) const;
314 SimFloat16 operator-(SimFloat16 rhs) const;
315 SimFloat16 operator*(SimFloat16 rhs) const;
316 SimFloat16 operator/(SimFloat16 rhs) const;
317 bool operator<(SimFloat16 rhs) const;
318 bool operator>(SimFloat16 rhs) const;
319 bool operator==(SimFloat16 rhs) const;
320 bool operator!=(SimFloat16 rhs) const;
321 // This is necessary for conversions peformed in (macro asm) Fmov.
322 bool operator==(double rhs) const;
323 operator double() const;
324 };
325 } // namespace internal
326
327 uint32_t Float16Sign(internal::SimFloat16 value);
328
329 uint32_t Float16Exp(internal::SimFloat16 value);
330
331 uint32_t Float16Mantissa(internal::SimFloat16 value);
332
333 uint32_t FloatSign(float value);
334 VIXL_DEPRECATED("FloatSign", inline uint32_t float_sign(float value)) {
335 return FloatSign(value);
336 }
337
338 uint32_t FloatExp(float value);
339 VIXL_DEPRECATED("FloatExp", inline uint32_t float_exp(float value)) {
340 return FloatExp(value);
341 }
342
343 uint32_t FloatMantissa(float value);
344 VIXL_DEPRECATED("FloatMantissa", inline uint32_t float_mantissa(float value)) {
345 return FloatMantissa(value);
346 }
347
348 uint32_t DoubleSign(double value);
349 VIXL_DEPRECATED("DoubleSign", inline uint32_t double_sign(double value)) {
350 return DoubleSign(value);
351 }
352
353 uint32_t DoubleExp(double value);
354 VIXL_DEPRECATED("DoubleExp", inline uint32_t double_exp(double value)) {
355 return DoubleExp(value);
356 }
357
358 uint64_t DoubleMantissa(double value);
359 VIXL_DEPRECATED("DoubleMantissa",
360 inline uint64_t double_mantissa(double value)) {
361 return DoubleMantissa(value);
362 }
363
364 internal::SimFloat16 Float16Pack(uint16_t sign,
365 uint16_t exp,
366 uint16_t mantissa);
367
368 float FloatPack(uint32_t sign, uint32_t exp, uint32_t mantissa);
369 VIXL_DEPRECATED("FloatPack",
float_pack(uint32_t sign,uint32_t exp,uint32_t mantissa)370 inline float float_pack(uint32_t sign,
371 uint32_t exp,
372 uint32_t mantissa)) {
373 return FloatPack(sign, exp, mantissa);
374 }
375
376 double DoublePack(uint64_t sign, uint64_t exp, uint64_t mantissa);
377 VIXL_DEPRECATED("DoublePack",
double_pack(uint32_t sign,uint32_t exp,uint64_t mantissa)378 inline double double_pack(uint32_t sign,
379 uint32_t exp,
380 uint64_t mantissa)) {
381 return DoublePack(sign, exp, mantissa);
382 }
383
384 // An fpclassify() function for 16-bit half-precision floats.
385 int Float16Classify(Float16 value);
float16classify(uint16_t value)386 VIXL_DEPRECATED("Float16Classify", inline int float16classify(uint16_t value)) {
387 return Float16Classify(RawbitsToFloat16(value));
388 }
389
390 bool IsZero(Float16 value);
391
IsPositiveZero(double value)392 inline bool IsPositiveZero(double value) {
393 return (value == 0.0) && (copysign(1.0, value) > 0.0);
394 }
395
IsNaN(float value)396 inline bool IsNaN(float value) { return std::isnan(value); }
397
IsNaN(double value)398 inline bool IsNaN(double value) { return std::isnan(value); }
399
IsNaN(Float16 value)400 inline bool IsNaN(Float16 value) { return Float16Classify(value) == FP_NAN; }
401
IsInf(float value)402 inline bool IsInf(float value) { return std::isinf(value); }
403
IsInf(double value)404 inline bool IsInf(double value) { return std::isinf(value); }
405
IsInf(Float16 value)406 inline bool IsInf(Float16 value) {
407 return Float16Classify(value) == FP_INFINITE;
408 }
409
410
411 // NaN tests.
IsSignallingNaN(double num)412 inline bool IsSignallingNaN(double num) {
413 const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000);
414 uint64_t raw = DoubleToRawbits(num);
415 if (IsNaN(num) && ((raw & kFP64QuietNaNMask) == 0)) {
416 return true;
417 }
418 return false;
419 }
420
421
IsSignallingNaN(float num)422 inline bool IsSignallingNaN(float num) {
423 const uint32_t kFP32QuietNaNMask = 0x00400000;
424 uint32_t raw = FloatToRawbits(num);
425 if (IsNaN(num) && ((raw & kFP32QuietNaNMask) == 0)) {
426 return true;
427 }
428 return false;
429 }
430
431
IsSignallingNaN(Float16 num)432 inline bool IsSignallingNaN(Float16 num) {
433 const uint16_t kFP16QuietNaNMask = 0x0200;
434 return IsNaN(num) && ((Float16ToRawbits(num) & kFP16QuietNaNMask) == 0);
435 }
436
437
438 template <typename T>
IsQuietNaN(T num)439 inline bool IsQuietNaN(T num) {
440 return IsNaN(num) && !IsSignallingNaN(num);
441 }
442
443
444 // Convert the NaN in 'num' to a quiet NaN.
ToQuietNaN(double num)445 inline double ToQuietNaN(double num) {
446 const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000);
447 VIXL_ASSERT(IsNaN(num));
448 return RawbitsToDouble(DoubleToRawbits(num) | kFP64QuietNaNMask);
449 }
450
451
ToQuietNaN(float num)452 inline float ToQuietNaN(float num) {
453 const uint32_t kFP32QuietNaNMask = 0x00400000;
454 VIXL_ASSERT(IsNaN(num));
455 return RawbitsToFloat(FloatToRawbits(num) | kFP32QuietNaNMask);
456 }
457
458
ToQuietNaN(internal::SimFloat16 num)459 inline internal::SimFloat16 ToQuietNaN(internal::SimFloat16 num) {
460 const uint16_t kFP16QuietNaNMask = 0x0200;
461 VIXL_ASSERT(IsNaN(num));
462 return internal::SimFloat16(
463 RawbitsToFloat16(Float16ToRawbits(num) | kFP16QuietNaNMask));
464 }
465
466
467 // Fused multiply-add.
FusedMultiplyAdd(double op1,double op2,double a)468 inline double FusedMultiplyAdd(double op1, double op2, double a) {
469 return fma(op1, op2, a);
470 }
471
472
FusedMultiplyAdd(float op1,float op2,float a)473 inline float FusedMultiplyAdd(float op1, float op2, float a) {
474 return fmaf(op1, op2, a);
475 }
476
477
LowestSetBit(uint64_t value)478 inline uint64_t LowestSetBit(uint64_t value) { return value & -value; }
479
480
481 template <typename T>
HighestSetBitPosition(T value)482 inline int HighestSetBitPosition(T value) {
483 VIXL_ASSERT(value != 0);
484 return (sizeof(value) * 8 - 1) - CountLeadingZeros(value);
485 }
486
487
488 template <typename V>
WhichPowerOf2(V value)489 inline int WhichPowerOf2(V value) {
490 VIXL_ASSERT(IsPowerOf2(value));
491 return CountTrailingZeros(value);
492 }
493
494
495 unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
496
497
498 int BitCount(uint64_t value);
499
500
501 template <typename T>
ReverseBits(T value)502 T ReverseBits(T value) {
503 VIXL_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||
504 (sizeof(value) == 4) || (sizeof(value) == 8));
505 T result = 0;
506 for (unsigned i = 0; i < (sizeof(value) * 8); i++) {
507 result = (result << 1) | (value & 1);
508 value >>= 1;
509 }
510 return result;
511 }
512
513
514 template <typename T>
SignExtend(T val,int size_in_bits)515 inline T SignExtend(T val, int size_in_bits) {
516 VIXL_ASSERT(size_in_bits > 0);
517 T mask = (T(2) << (size_in_bits - 1)) - T(1);
518 val &= mask;
519 T sign_bits = -((val >> (size_in_bits - 1)) << size_in_bits);
520 val |= sign_bits;
521 return val;
522 }
523
524
525 template <typename T>
ReverseBytes(T value,int block_bytes_log2)526 T ReverseBytes(T value, int block_bytes_log2) {
527 VIXL_ASSERT((sizeof(value) == 4) || (sizeof(value) == 8));
528 VIXL_ASSERT((1U << block_bytes_log2) <= sizeof(value));
529 // Split the 64-bit value into an 8-bit array, where b[0] is the least
530 // significant byte, and b[7] is the most significant.
531 uint8_t bytes[8];
532 uint64_t mask = UINT64_C(0xff00000000000000);
533 for (int i = 7; i >= 0; i--) {
534 bytes[i] = (static_cast<uint64_t>(value) & mask) >> (i * 8);
535 mask >>= 8;
536 }
537
538 // Permutation tables for REV instructions.
539 // permute_table[0] is used by REV16_x, REV16_w
540 // permute_table[1] is used by REV32_x, REV_w
541 // permute_table[2] is used by REV_x
542 VIXL_ASSERT((0 < block_bytes_log2) && (block_bytes_log2 < 4));
543 static const uint8_t permute_table[3][8] = {{6, 7, 4, 5, 2, 3, 0, 1},
544 {4, 5, 6, 7, 0, 1, 2, 3},
545 {0, 1, 2, 3, 4, 5, 6, 7}};
546 uint64_t temp = 0;
547 for (int i = 0; i < 8; i++) {
548 temp <<= 8;
549 temp |= bytes[permute_table[block_bytes_log2 - 1][i]];
550 }
551
552 T result;
553 VIXL_STATIC_ASSERT(sizeof(result) <= sizeof(temp));
554 memcpy(&result, &temp, sizeof(result));
555 return result;
556 }
557
558 template <unsigned MULTIPLE, typename T>
IsMultiple(T value)559 inline bool IsMultiple(T value) {
560 VIXL_ASSERT(IsPowerOf2(MULTIPLE));
561 return (value & (MULTIPLE - 1)) == 0;
562 }
563
564 template <typename T>
IsMultiple(T value,unsigned multiple)565 inline bool IsMultiple(T value, unsigned multiple) {
566 VIXL_ASSERT(IsPowerOf2(multiple));
567 return (value & (multiple - 1)) == 0;
568 }
569
570 template <typename T>
IsAligned(T pointer,int alignment)571 inline bool IsAligned(T pointer, int alignment) {
572 VIXL_ASSERT(IsPowerOf2(alignment));
573 return (pointer & (alignment - 1)) == 0;
574 }
575
576 // Pointer alignment
577 // TODO: rename/refactor to make it specific to instructions.
578 template <unsigned ALIGN, typename T>
IsAligned(T pointer)579 inline bool IsAligned(T pointer) {
580 VIXL_ASSERT(sizeof(pointer) == sizeof(intptr_t)); // NOLINT(runtime/sizeof)
581 // Use C-style casts to get static_cast behaviour for integral types (T), and
582 // reinterpret_cast behaviour for other types.
583 return IsAligned((intptr_t)(pointer), ALIGN);
584 }
585
586 template <typename T>
IsWordAligned(T pointer)587 bool IsWordAligned(T pointer) {
588 return IsAligned<4>(pointer);
589 }
590
591 // Increment a pointer until it has the specified alignment. The alignment must
592 // be a power of two.
593 template <class T>
AlignUp(T pointer,typename Unsigned<sizeof (T)* kBitsPerByte>::type alignment)594 T AlignUp(T pointer,
595 typename Unsigned<sizeof(T) * kBitsPerByte>::type alignment) {
596 VIXL_ASSERT(IsPowerOf2(alignment));
597 // Use C-style casts to get static_cast behaviour for integral types (T), and
598 // reinterpret_cast behaviour for other types.
599
600 typename Unsigned<sizeof(T)* kBitsPerByte>::type pointer_raw =
601 (typename Unsigned<sizeof(T) * kBitsPerByte>::type) pointer;
602 VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw));
603
604 size_t mask = alignment - 1;
605 T result = (T)((pointer_raw + mask) & ~mask);
606 VIXL_ASSERT(result >= pointer);
607
608 return result;
609 }
610
611 // Decrement a pointer until it has the specified alignment. The alignment must
612 // be a power of two.
613 template <class T>
AlignDown(T pointer,typename Unsigned<sizeof (T)* kBitsPerByte>::type alignment)614 T AlignDown(T pointer,
615 typename Unsigned<sizeof(T) * kBitsPerByte>::type alignment) {
616 VIXL_ASSERT(IsPowerOf2(alignment));
617 // Use C-style casts to get static_cast behaviour for integral types (T), and
618 // reinterpret_cast behaviour for other types.
619
620 typename Unsigned<sizeof(T)* kBitsPerByte>::type pointer_raw =
621 (typename Unsigned<sizeof(T) * kBitsPerByte>::type) pointer;
622 VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw));
623
624 size_t mask = alignment - 1;
625 return (T)(pointer_raw & ~mask);
626 }
627
628
629 template <typename T>
ExtractBit(T value,unsigned bit)630 inline T ExtractBit(T value, unsigned bit) {
631 return (value >> bit) & T(1);
632 }
633
634 template <typename Ts, typename Td>
ExtractBits(Ts value,int least_significant_bit,Td mask)635 inline Td ExtractBits(Ts value, int least_significant_bit, Td mask) {
636 return Td((value >> least_significant_bit) & Ts(mask));
637 }
638
639 template <typename Ts, typename Td>
AssignBit(Td & dst,int bit,Ts value)640 inline void AssignBit(Td& dst, // NOLINT(runtime/references)
641 int bit,
642 Ts value) {
643 VIXL_ASSERT((value == Ts(0)) || (value == Ts(1)));
644 VIXL_ASSERT(bit >= 0);
645 VIXL_ASSERT(bit < static_cast<int>(sizeof(Td) * 8));
646 Td mask(1);
647 dst &= ~(mask << bit);
648 dst |= Td(value) << bit;
649 }
650
651 template <typename Td, typename Ts>
AssignBits(Td & dst,int least_significant_bit,Ts mask,Ts value)652 inline void AssignBits(Td& dst, // NOLINT(runtime/references)
653 int least_significant_bit,
654 Ts mask,
655 Ts value) {
656 VIXL_ASSERT(least_significant_bit >= 0);
657 VIXL_ASSERT(least_significant_bit < static_cast<int>(sizeof(Td) * 8));
658 VIXL_ASSERT(((Td(mask) << least_significant_bit) >> least_significant_bit) ==
659 Td(mask));
660 VIXL_ASSERT((value & mask) == value);
661 dst &= ~(Td(mask) << least_significant_bit);
662 dst |= Td(value) << least_significant_bit;
663 }
664
665 class VFP {
666 public:
FP32ToImm8(float imm)667 static uint32_t FP32ToImm8(float imm) {
668 // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
669 uint32_t bits = FloatToRawbits(imm);
670 // bit7: a000.0000
671 uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
672 // bit6: 0b00.0000
673 uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
674 // bit5_to_0: 00cd.efgh
675 uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
676 return static_cast<uint32_t>(bit7 | bit6 | bit5_to_0);
677 }
FP64ToImm8(double imm)678 static uint32_t FP64ToImm8(double imm) {
679 // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
680 // 0000.0000.0000.0000.0000.0000.0000.0000
681 uint64_t bits = DoubleToRawbits(imm);
682 // bit7: a000.0000
683 uint64_t bit7 = ((bits >> 63) & 0x1) << 7;
684 // bit6: 0b00.0000
685 uint64_t bit6 = ((bits >> 61) & 0x1) << 6;
686 // bit5_to_0: 00cd.efgh
687 uint64_t bit5_to_0 = (bits >> 48) & 0x3f;
688
689 return static_cast<uint32_t>(bit7 | bit6 | bit5_to_0);
690 }
Imm8ToFP32(uint32_t imm8)691 static float Imm8ToFP32(uint32_t imm8) {
692 // Imm8: abcdefgh (8 bits)
693 // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
694 // where B is b ^ 1
695 uint32_t bits = imm8;
696 uint32_t bit7 = (bits >> 7) & 0x1;
697 uint32_t bit6 = (bits >> 6) & 0x1;
698 uint32_t bit5_to_0 = bits & 0x3f;
699 uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
700
701 return RawbitsToFloat(result);
702 }
Imm8ToFP64(uint32_t imm8)703 static double Imm8ToFP64(uint32_t imm8) {
704 // Imm8: abcdefgh (8 bits)
705 // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
706 // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
707 // where B is b ^ 1
708 uint32_t bits = imm8;
709 uint64_t bit7 = (bits >> 7) & 0x1;
710 uint64_t bit6 = (bits >> 6) & 0x1;
711 uint64_t bit5_to_0 = bits & 0x3f;
712 uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
713 return RawbitsToDouble(result);
714 }
IsImmFP32(float imm)715 static bool IsImmFP32(float imm) {
716 // Valid values will have the form:
717 // aBbb.bbbc.defg.h000.0000.0000.0000.0000
718 uint32_t bits = FloatToRawbits(imm);
719 // bits[19..0] are cleared.
720 if ((bits & 0x7ffff) != 0) {
721 return false;
722 }
723
724
725 // bits[29..25] are all set or all cleared.
726 uint32_t b_pattern = (bits >> 16) & 0x3e00;
727 if (b_pattern != 0 && b_pattern != 0x3e00) {
728 return false;
729 }
730 // bit[30] and bit[29] are opposite.
731 if (((bits ^ (bits << 1)) & 0x40000000) == 0) {
732 return false;
733 }
734 return true;
735 }
IsImmFP64(double imm)736 static bool IsImmFP64(double imm) {
737 // Valid values will have the form:
738 // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
739 // 0000.0000.0000.0000.0000.0000.0000.0000
740 uint64_t bits = DoubleToRawbits(imm);
741 // bits[47..0] are cleared.
742 if ((bits & 0x0000ffffffffffff) != 0) {
743 return false;
744 }
745 // bits[61..54] are all set or all cleared.
746 uint32_t b_pattern = (bits >> 48) & 0x3fc0;
747 if ((b_pattern != 0) && (b_pattern != 0x3fc0)) {
748 return false;
749 }
750 // bit[62] and bit[61] are opposite.
751 if (((bits ^ (bits << 1)) & (UINT64_C(1) << 62)) == 0) {
752 return false;
753 }
754 return true;
755 }
756 };
757
758 class BitField {
759 // ForEachBitHelper is a functor that will call
760 // bool ForEachBitHelper::execute(ElementType id) const
761 // and expects a boolean in return whether to continue (if true)
762 // or stop (if false)
763 // check_set will check if the bits are on (true) or off(false)
764 template <typename ForEachBitHelper, bool check_set>
ForEachBit(const ForEachBitHelper & helper)765 bool ForEachBit(const ForEachBitHelper& helper) {
766 for (int i = 0; static_cast<size_t>(i) < bitfield_.size(); i++) {
767 if (bitfield_[i] == check_set)
768 if (!helper.execute(i)) return false;
769 }
770 return true;
771 }
772
773 public:
BitField(unsigned size)774 explicit BitField(unsigned size) : bitfield_(size, 0) {}
775
Set(int i)776 void Set(int i) {
777 VIXL_ASSERT((i >= 0) && (static_cast<size_t>(i) < bitfield_.size()));
778 bitfield_[i] = true;
779 }
780
Unset(int i)781 void Unset(int i) {
782 VIXL_ASSERT((i >= 0) && (static_cast<size_t>(i) < bitfield_.size()));
783 bitfield_[i] = true;
784 }
785
IsSet(int i)786 bool IsSet(int i) const { return bitfield_[i]; }
787
788 // For each bit not set in the bitfield call the execute functor
789 // execute.
790 // ForEachBitSetHelper::execute returns true if the iteration through
791 // the bits can continue, otherwise it will stop.
792 // struct ForEachBitSetHelper {
793 // bool execute(int /*id*/) { return false; }
794 // };
795 template <typename ForEachBitNotSetHelper>
ForEachBitNotSet(const ForEachBitNotSetHelper & helper)796 bool ForEachBitNotSet(const ForEachBitNotSetHelper& helper) {
797 return ForEachBit<ForEachBitNotSetHelper, false>(helper);
798 }
799
800 // For each bit set in the bitfield call the execute functor
801 // execute.
802 template <typename ForEachBitSetHelper>
ForEachBitSet(const ForEachBitSetHelper & helper)803 bool ForEachBitSet(const ForEachBitSetHelper& helper) {
804 return ForEachBit<ForEachBitSetHelper, true>(helper);
805 }
806
807 private:
808 std::vector<bool> bitfield_;
809 };
810
811 namespace internal {
812
813 typedef int64_t Int64;
814 class Uint64;
815 class Uint128;
816
817 class Uint32 {
818 uint32_t data_;
819
820 public:
821 // Unlike uint32_t, Uint32 has a default constructor.
Uint32()822 Uint32() { data_ = 0; }
Uint32(uint32_t data)823 explicit Uint32(uint32_t data) : data_(data) {}
824 inline explicit Uint32(Uint64 data);
Get()825 uint32_t Get() const { return data_; }
826 template <int N>
GetSigned()827 int32_t GetSigned() const {
828 return ExtractSignedBitfield32(N - 1, 0, data_);
829 }
GetSigned()830 int32_t GetSigned() const { return data_; }
831 Uint32 operator~() const { return Uint32(~data_); }
832 Uint32 operator-() const { return Uint32(-data_); }
833 bool operator==(Uint32 value) const { return data_ == value.data_; }
834 bool operator!=(Uint32 value) const { return data_ != value.data_; }
835 bool operator>(Uint32 value) const { return data_ > value.data_; }
836 Uint32 operator+(Uint32 value) const { return Uint32(data_ + value.data_); }
837 Uint32 operator-(Uint32 value) const { return Uint32(data_ - value.data_); }
838 Uint32 operator&(Uint32 value) const { return Uint32(data_ & value.data_); }
839 Uint32 operator&=(Uint32 value) {
840 data_ &= value.data_;
841 return *this;
842 }
843 Uint32 operator^(Uint32 value) const { return Uint32(data_ ^ value.data_); }
844 Uint32 operator^=(Uint32 value) {
845 data_ ^= value.data_;
846 return *this;
847 }
848 Uint32 operator|(Uint32 value) const { return Uint32(data_ | value.data_); }
849 Uint32 operator|=(Uint32 value) {
850 data_ |= value.data_;
851 return *this;
852 }
853 // Unlike uint32_t, the shift functions can accept negative shift and
854 // return 0 when the shift is too big.
855 Uint32 operator>>(int shift) const {
856 if (shift == 0) return *this;
857 if (shift < 0) {
858 int tmp = -shift;
859 if (tmp >= 32) return Uint32(0);
860 return Uint32(data_ << tmp);
861 }
862 int tmp = shift;
863 if (tmp >= 32) return Uint32(0);
864 return Uint32(data_ >> tmp);
865 }
866 Uint32 operator<<(int shift) const {
867 if (shift == 0) return *this;
868 if (shift < 0) {
869 int tmp = -shift;
870 if (tmp >= 32) return Uint32(0);
871 return Uint32(data_ >> tmp);
872 }
873 int tmp = shift;
874 if (tmp >= 32) return Uint32(0);
875 return Uint32(data_ << tmp);
876 }
877 };
878
879 class Uint64 {
880 uint64_t data_;
881
882 public:
883 // Unlike uint64_t, Uint64 has a default constructor.
Uint64()884 Uint64() { data_ = 0; }
Uint64(uint64_t data)885 explicit Uint64(uint64_t data) : data_(data) {}
Uint64(Uint32 data)886 explicit Uint64(Uint32 data) : data_(data.Get()) {}
887 inline explicit Uint64(Uint128 data);
Get()888 uint64_t Get() const { return data_; }
GetSigned(int N)889 int64_t GetSigned(int N) const {
890 return ExtractSignedBitfield64(N - 1, 0, data_);
891 }
GetSigned()892 int64_t GetSigned() const { return data_; }
ToUint32()893 Uint32 ToUint32() const {
894 VIXL_ASSERT((data_ >> 32) == 0);
895 return Uint32(static_cast<uint32_t>(data_));
896 }
GetHigh32()897 Uint32 GetHigh32() const { return Uint32(data_ >> 32); }
GetLow32()898 Uint32 GetLow32() const { return Uint32(data_ & 0xffffffff); }
899 Uint64 operator~() const { return Uint64(~data_); }
900 Uint64 operator-() const { return Uint64(-data_); }
901 bool operator==(Uint64 value) const { return data_ == value.data_; }
902 bool operator!=(Uint64 value) const { return data_ != value.data_; }
903 Uint64 operator+(Uint64 value) const { return Uint64(data_ + value.data_); }
904 Uint64 operator-(Uint64 value) const { return Uint64(data_ - value.data_); }
905 Uint64 operator&(Uint64 value) const { return Uint64(data_ & value.data_); }
906 Uint64 operator&=(Uint64 value) {
907 data_ &= value.data_;
908 return *this;
909 }
910 Uint64 operator^(Uint64 value) const { return Uint64(data_ ^ value.data_); }
911 Uint64 operator^=(Uint64 value) {
912 data_ ^= value.data_;
913 return *this;
914 }
915 Uint64 operator|(Uint64 value) const { return Uint64(data_ | value.data_); }
916 Uint64 operator|=(Uint64 value) {
917 data_ |= value.data_;
918 return *this;
919 }
920 // Unlike uint64_t, the shift functions can accept negative shift and
921 // return 0 when the shift is too big.
922 Uint64 operator>>(int shift) const {
923 if (shift == 0) return *this;
924 if (shift < 0) {
925 int tmp = -shift;
926 if (tmp >= 64) return Uint64(0);
927 return Uint64(data_ << tmp);
928 }
929 int tmp = shift;
930 if (tmp >= 64) return Uint64(0);
931 return Uint64(data_ >> tmp);
932 }
933 Uint64 operator<<(int shift) const {
934 if (shift == 0) return *this;
935 if (shift < 0) {
936 int tmp = -shift;
937 if (tmp >= 64) return Uint64(0);
938 return Uint64(data_ >> tmp);
939 }
940 int tmp = shift;
941 if (tmp >= 64) return Uint64(0);
942 return Uint64(data_ << tmp);
943 }
944 };
945
946 class Uint128 {
947 uint64_t data_high_;
948 uint64_t data_low_;
949
950 public:
Uint128()951 Uint128() : data_high_(0), data_low_(0) {}
Uint128(uint64_t data_low)952 explicit Uint128(uint64_t data_low) : data_high_(0), data_low_(data_low) {}
Uint128(Uint64 data_low)953 explicit Uint128(Uint64 data_low)
954 : data_high_(0), data_low_(data_low.Get()) {}
Uint128(uint64_t data_high,uint64_t data_low)955 Uint128(uint64_t data_high, uint64_t data_low)
956 : data_high_(data_high), data_low_(data_low) {}
ToUint64()957 Uint64 ToUint64() const {
958 VIXL_ASSERT(data_high_ == 0);
959 return Uint64(data_low_);
960 }
GetHigh64()961 Uint64 GetHigh64() const { return Uint64(data_high_); }
GetLow64()962 Uint64 GetLow64() const { return Uint64(data_low_); }
963 Uint128 operator~() const { return Uint128(~data_high_, ~data_low_); }
964 bool operator==(Uint128 value) const {
965 return (data_high_ == value.data_high_) && (data_low_ == value.data_low_);
966 }
967 Uint128 operator&(Uint128 value) const {
968 return Uint128(data_high_ & value.data_high_, data_low_ & value.data_low_);
969 }
970 Uint128 operator&=(Uint128 value) {
971 data_high_ &= value.data_high_;
972 data_low_ &= value.data_low_;
973 return *this;
974 }
975 Uint128 operator|=(Uint128 value) {
976 data_high_ |= value.data_high_;
977 data_low_ |= value.data_low_;
978 return *this;
979 }
980 Uint128 operator>>(int shift) const {
981 VIXL_ASSERT((shift >= 0) && (shift < 128));
982 if (shift == 0) return *this;
983 if (shift >= 64) {
984 return Uint128(0, data_high_ >> (shift - 64));
985 }
986 uint64_t tmp = (data_high_ << (64 - shift)) | (data_low_ >> shift);
987 return Uint128(data_high_ >> shift, tmp);
988 }
989 Uint128 operator<<(int shift) const {
990 VIXL_ASSERT((shift >= 0) && (shift < 128));
991 if (shift == 0) return *this;
992 if (shift >= 64) {
993 return Uint128(data_low_ << (shift - 64), 0);
994 }
995 uint64_t tmp = (data_high_ << shift) | (data_low_ >> (64 - shift));
996 return Uint128(tmp, data_low_ << shift);
997 }
998 };
999
Uint32(Uint64 data)1000 Uint32::Uint32(Uint64 data) : data_(data.ToUint32().Get()) {}
Uint64(Uint128 data)1001 Uint64::Uint64(Uint128 data) : data_(data.ToUint64().Get()) {}
1002
1003 Int64 BitCount(Uint32 value);
1004
1005 // The algorithm used is adapted from the one described in section 8.2 of
1006 // Hacker's Delight, by Henry S. Warren, Jr.
1007 template <unsigned N, typename T>
MultiplyHigh(T u,T v)1008 int64_t MultiplyHigh(T u, T v) {
1009 uint64_t u0, v0, w0, u1, v1, w1, w2, t;
1010 VIXL_STATIC_ASSERT((N == 8) || (N == 16) || (N == 32) || (N == 64));
1011 uint64_t sign_mask = UINT64_C(1) << (N - 1);
1012 uint64_t sign_ext = 0;
1013 unsigned half_bits = N / 2;
1014 uint64_t half_mask = GetUintMask(half_bits);
1015 if (std::numeric_limits<T>::is_signed) {
1016 sign_ext = UINT64_C(0xffffffffffffffff) << half_bits;
1017 }
1018
1019 VIXL_ASSERT(sizeof(u) == sizeof(uint64_t));
1020 VIXL_ASSERT(sizeof(u) == sizeof(u0));
1021
1022 u0 = u & half_mask;
1023 u1 = u >> half_bits | (((u & sign_mask) != 0) ? sign_ext : 0);
1024 v0 = v & half_mask;
1025 v1 = v >> half_bits | (((v & sign_mask) != 0) ? sign_ext : 0);
1026
1027 w0 = u0 * v0;
1028 t = u1 * v0 + (w0 >> half_bits);
1029
1030 w1 = t & half_mask;
1031 w2 = t >> half_bits | (((t & sign_mask) != 0) ? sign_ext : 0);
1032 w1 = u0 * v1 + w1;
1033 w1 = w1 >> half_bits | (((w1 & sign_mask) != 0) ? sign_ext : 0);
1034
1035 uint64_t value = u1 * v1 + w2 + w1;
1036 int64_t result;
1037 memcpy(&result, &value, sizeof(result));
1038 return result;
1039 }
1040
1041 } // namespace internal
1042
1043 // The default NaN values (for FPCR.DN=1).
1044 extern const double kFP64DefaultNaN;
1045 extern const float kFP32DefaultNaN;
1046 extern const Float16 kFP16DefaultNaN;
1047
1048 // Floating-point infinity values.
1049 extern const Float16 kFP16PositiveInfinity;
1050 extern const Float16 kFP16NegativeInfinity;
1051 extern const float kFP32PositiveInfinity;
1052 extern const float kFP32NegativeInfinity;
1053 extern const double kFP64PositiveInfinity;
1054 extern const double kFP64NegativeInfinity;
1055
1056 // Floating-point zero values.
1057 extern const Float16 kFP16PositiveZero;
1058 extern const Float16 kFP16NegativeZero;
1059
1060 // AArch64 floating-point specifics. These match IEEE-754.
1061 const unsigned kDoubleMantissaBits = 52;
1062 const unsigned kDoubleExponentBits = 11;
1063 const unsigned kFloatMantissaBits = 23;
1064 const unsigned kFloatExponentBits = 8;
1065 const unsigned kFloat16MantissaBits = 10;
1066 const unsigned kFloat16ExponentBits = 5;
1067
1068 enum FPRounding {
1069 // The first four values are encodable directly by FPCR<RMode>.
1070 FPTieEven = 0x0,
1071 FPPositiveInfinity = 0x1,
1072 FPNegativeInfinity = 0x2,
1073 FPZero = 0x3,
1074
1075 // The final rounding modes are only available when explicitly specified by
1076 // the instruction (such as with fcvta). It cannot be set in FPCR.
1077 FPTieAway,
1078 FPRoundOdd
1079 };
1080
1081 enum UseDefaultNaN { kUseDefaultNaN, kIgnoreDefaultNaN };
1082
1083 // Assemble the specified IEEE-754 components into the target type and apply
1084 // appropriate rounding.
1085 // sign: 0 = positive, 1 = negative
1086 // exponent: Unbiased IEEE-754 exponent.
1087 // mantissa: The mantissa of the input. The top bit (which is not encoded for
1088 // normal IEEE-754 values) must not be omitted. This bit has the
1089 // value 'pow(2, exponent)'.
1090 //
1091 // The input value is assumed to be a normalized value. That is, the input may
1092 // not be infinity or NaN. If the source value is subnormal, it must be
1093 // normalized before calling this function such that the highest set bit in the
1094 // mantissa has the value 'pow(2, exponent)'.
1095 //
1096 // Callers should use FPRoundToFloat or FPRoundToDouble directly, rather than
1097 // calling a templated FPRound.
1098 template <class T, int ebits, int mbits>
FPRound(int64_t sign,int64_t exponent,uint64_t mantissa,FPRounding round_mode)1099 T FPRound(int64_t sign,
1100 int64_t exponent,
1101 uint64_t mantissa,
1102 FPRounding round_mode) {
1103 VIXL_ASSERT((sign == 0) || (sign == 1));
1104
1105 // Only FPTieEven and FPRoundOdd rounding modes are implemented.
1106 VIXL_ASSERT((round_mode == FPTieEven) || (round_mode == FPRoundOdd));
1107
1108 // Rounding can promote subnormals to normals, and normals to infinities. For
1109 // example, a double with exponent 127 (FLT_MAX_EXP) would appear to be
1110 // encodable as a float, but rounding based on the low-order mantissa bits
1111 // could make it overflow. With ties-to-even rounding, this value would become
1112 // an infinity.
1113
1114 // ---- Rounding Method ----
1115 //
1116 // The exponent is irrelevant in the rounding operation, so we treat the
1117 // lowest-order bit that will fit into the result ('onebit') as having
1118 // the value '1'. Similarly, the highest-order bit that won't fit into
1119 // the result ('halfbit') has the value '0.5'. The 'point' sits between
1120 // 'onebit' and 'halfbit':
1121 //
1122 // These bits fit into the result.
1123 // |---------------------|
1124 // mantissa = 0bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
1125 // ||
1126 // / |
1127 // / halfbit
1128 // onebit
1129 //
1130 // For subnormal outputs, the range of representable bits is smaller and
1131 // the position of onebit and halfbit depends on the exponent of the
1132 // input, but the method is otherwise similar.
1133 //
1134 // onebit(frac)
1135 // |
1136 // | halfbit(frac) halfbit(adjusted)
1137 // | / /
1138 // | | |
1139 // 0b00.0 (exact) -> 0b00.0 (exact) -> 0b00
1140 // 0b00.0... -> 0b00.0... -> 0b00
1141 // 0b00.1 (exact) -> 0b00.0111..111 -> 0b00
1142 // 0b00.1... -> 0b00.1... -> 0b01
1143 // 0b01.0 (exact) -> 0b01.0 (exact) -> 0b01
1144 // 0b01.0... -> 0b01.0... -> 0b01
1145 // 0b01.1 (exact) -> 0b01.1 (exact) -> 0b10
1146 // 0b01.1... -> 0b01.1... -> 0b10
1147 // 0b10.0 (exact) -> 0b10.0 (exact) -> 0b10
1148 // 0b10.0... -> 0b10.0... -> 0b10
1149 // 0b10.1 (exact) -> 0b10.0111..111 -> 0b10
1150 // 0b10.1... -> 0b10.1... -> 0b11
1151 // 0b11.0 (exact) -> 0b11.0 (exact) -> 0b11
1152 // ... / | / |
1153 // / | / |
1154 // / |
1155 // adjusted = frac - (halfbit(mantissa) & ~onebit(frac)); / |
1156 //
1157 // mantissa = (mantissa >> shift) + halfbit(adjusted);
1158
1159 static const int mantissa_offset = 0;
1160 static const int exponent_offset = mantissa_offset + mbits;
1161 static const int sign_offset = exponent_offset + ebits;
1162 VIXL_ASSERT(sign_offset == (sizeof(T) * 8 - 1));
1163
1164 // Bail out early for zero inputs.
1165 if (mantissa == 0) {
1166 return static_cast<T>(sign << sign_offset);
1167 }
1168
1169 // If all bits in the exponent are set, the value is infinite or NaN.
1170 // This is true for all binary IEEE-754 formats.
1171 static const int infinite_exponent = (1 << ebits) - 1;
1172 static const int max_normal_exponent = infinite_exponent - 1;
1173
1174 // Apply the exponent bias to encode it for the result. Doing this early makes
1175 // it easy to detect values that will be infinite or subnormal.
1176 exponent += max_normal_exponent >> 1;
1177
1178 if (exponent > max_normal_exponent) {
1179 // Overflow: the input is too large for the result type to represent.
1180 if (round_mode == FPTieEven) {
1181 // FPTieEven rounding mode handles overflows using infinities.
1182 exponent = infinite_exponent;
1183 mantissa = 0;
1184 } else {
1185 VIXL_ASSERT(round_mode == FPRoundOdd);
1186 // FPRoundOdd rounding mode handles overflows using the largest magnitude
1187 // normal number.
1188 exponent = max_normal_exponent;
1189 mantissa = (UINT64_C(1) << exponent_offset) - 1;
1190 }
1191 return static_cast<T>((sign << sign_offset) |
1192 (exponent << exponent_offset) |
1193 (mantissa << mantissa_offset));
1194 }
1195
1196 // Calculate the shift required to move the top mantissa bit to the proper
1197 // place in the destination type.
1198 const int highest_significant_bit = 63 - CountLeadingZeros(mantissa);
1199 int shift = highest_significant_bit - mbits;
1200
1201 if (exponent <= 0) {
1202 // The output will be subnormal (before rounding).
1203 // For subnormal outputs, the shift must be adjusted by the exponent. The +1
1204 // is necessary because the exponent of a subnormal value (encoded as 0) is
1205 // the same as the exponent of the smallest normal value (encoded as 1).
1206 shift += -exponent + 1;
1207
1208 // Handle inputs that would produce a zero output.
1209 //
1210 // Shifts higher than highest_significant_bit+1 will always produce a zero
1211 // result. A shift of exactly highest_significant_bit+1 might produce a
1212 // non-zero result after rounding.
1213 if (shift > (highest_significant_bit + 1)) {
1214 if (round_mode == FPTieEven) {
1215 // The result will always be +/-0.0.
1216 return static_cast<T>(sign << sign_offset);
1217 } else {
1218 VIXL_ASSERT(round_mode == FPRoundOdd);
1219 VIXL_ASSERT(mantissa != 0);
1220 // For FPRoundOdd, if the mantissa is too small to represent and
1221 // non-zero return the next "odd" value.
1222 return static_cast<T>((sign << sign_offset) | 1);
1223 }
1224 }
1225
1226 // Properly encode the exponent for a subnormal output.
1227 exponent = 0;
1228 } else {
1229 // Clear the topmost mantissa bit, since this is not encoded in IEEE-754
1230 // normal values.
1231 mantissa &= ~(UINT64_C(1) << highest_significant_bit);
1232 }
1233
1234 // The casts below are only well-defined for unsigned integers.
1235 VIXL_STATIC_ASSERT(std::numeric_limits<T>::is_integer);
1236 VIXL_STATIC_ASSERT(!std::numeric_limits<T>::is_signed);
1237
1238 if (shift > 0) {
1239 if (round_mode == FPTieEven) {
1240 // We have to shift the mantissa to the right. Some precision is lost, so
1241 // we need to apply rounding.
1242 uint64_t onebit_mantissa = (mantissa >> (shift)) & 1;
1243 uint64_t halfbit_mantissa = (mantissa >> (shift - 1)) & 1;
1244 uint64_t adjustment = (halfbit_mantissa & ~onebit_mantissa);
1245 uint64_t adjusted = mantissa - adjustment;
1246 T halfbit_adjusted = (adjusted >> (shift - 1)) & 1;
1247
1248 T result =
1249 static_cast<T>((sign << sign_offset) | (exponent << exponent_offset) |
1250 ((mantissa >> shift) << mantissa_offset));
1251
1252 // A very large mantissa can overflow during rounding. If this happens,
1253 // the exponent should be incremented and the mantissa set to 1.0
1254 // (encoded as 0). Applying halfbit_adjusted after assembling the float
1255 // has the nice side-effect that this case is handled for free.
1256 //
1257 // This also handles cases where a very large finite value overflows to
1258 // infinity, or where a very large subnormal value overflows to become
1259 // normal.
1260 return result + halfbit_adjusted;
1261 } else {
1262 VIXL_ASSERT(round_mode == FPRoundOdd);
1263 // If any bits at position halfbit or below are set, onebit (ie. the
1264 // bottom bit of the resulting mantissa) must be set.
1265 uint64_t fractional_bits = mantissa & ((UINT64_C(1) << shift) - 1);
1266 if (fractional_bits != 0) {
1267 mantissa |= UINT64_C(1) << shift;
1268 }
1269
1270 return static_cast<T>((sign << sign_offset) |
1271 (exponent << exponent_offset) |
1272 ((mantissa >> shift) << mantissa_offset));
1273 }
1274 } else {
1275 // We have to shift the mantissa to the left (or not at all). The input
1276 // mantissa is exactly representable in the output mantissa, so apply no
1277 // rounding correction.
1278 return static_cast<T>((sign << sign_offset) |
1279 (exponent << exponent_offset) |
1280 ((mantissa << -shift) << mantissa_offset));
1281 }
1282 }
1283
1284
1285 // See FPRound for a description of this function.
FPRoundToDouble(int64_t sign,int64_t exponent,uint64_t mantissa,FPRounding round_mode)1286 inline double FPRoundToDouble(int64_t sign,
1287 int64_t exponent,
1288 uint64_t mantissa,
1289 FPRounding round_mode) {
1290 uint64_t bits =
1291 FPRound<uint64_t, kDoubleExponentBits, kDoubleMantissaBits>(sign,
1292 exponent,
1293 mantissa,
1294 round_mode);
1295 return RawbitsToDouble(bits);
1296 }
1297
1298
1299 // See FPRound for a description of this function.
FPRoundToFloat16(int64_t sign,int64_t exponent,uint64_t mantissa,FPRounding round_mode)1300 inline Float16 FPRoundToFloat16(int64_t sign,
1301 int64_t exponent,
1302 uint64_t mantissa,
1303 FPRounding round_mode) {
1304 return RawbitsToFloat16(
1305 FPRound<uint16_t, kFloat16ExponentBits, kFloat16MantissaBits>(
1306 sign, exponent, mantissa, round_mode));
1307 }
1308
1309
1310 // See FPRound for a description of this function.
FPRoundToFloat(int64_t sign,int64_t exponent,uint64_t mantissa,FPRounding round_mode)1311 static inline float FPRoundToFloat(int64_t sign,
1312 int64_t exponent,
1313 uint64_t mantissa,
1314 FPRounding round_mode) {
1315 uint32_t bits =
1316 FPRound<uint32_t, kFloatExponentBits, kFloatMantissaBits>(sign,
1317 exponent,
1318 mantissa,
1319 round_mode);
1320 return RawbitsToFloat(bits);
1321 }
1322
1323
1324 float FPToFloat(Float16 value, UseDefaultNaN DN, bool* exception = NULL);
1325 float FPToFloat(double value,
1326 FPRounding round_mode,
1327 UseDefaultNaN DN,
1328 bool* exception = NULL);
1329
1330 double FPToDouble(Float16 value, UseDefaultNaN DN, bool* exception = NULL);
1331 double FPToDouble(float value, UseDefaultNaN DN, bool* exception = NULL);
1332
1333 Float16 FPToFloat16(float value,
1334 FPRounding round_mode,
1335 UseDefaultNaN DN,
1336 bool* exception = NULL);
1337
1338 Float16 FPToFloat16(double value,
1339 FPRounding round_mode,
1340 UseDefaultNaN DN,
1341 bool* exception = NULL);
1342
1343 // Like static_cast<T>(value), but with specialisations for the Float16 type.
1344 template <typename T, typename F>
StaticCastFPTo(F value)1345 T StaticCastFPTo(F value) {
1346 return static_cast<T>(value);
1347 }
1348
1349 template <>
1350 inline float StaticCastFPTo<float, Float16>(Float16 value) {
1351 return FPToFloat(value, kIgnoreDefaultNaN);
1352 }
1353
1354 template <>
1355 inline double StaticCastFPTo<double, Float16>(Float16 value) {
1356 return FPToDouble(value, kIgnoreDefaultNaN);
1357 }
1358
1359 template <>
1360 inline Float16 StaticCastFPTo<Float16, float>(float value) {
1361 return FPToFloat16(value, FPTieEven, kIgnoreDefaultNaN);
1362 }
1363
1364 template <>
1365 inline Float16 StaticCastFPTo<Float16, double>(double value) {
1366 return FPToFloat16(value, FPTieEven, kIgnoreDefaultNaN);
1367 }
1368
1369 template <typename T>
FPToRawbitsWithSize(unsigned size_in_bits,T value)1370 uint64_t FPToRawbitsWithSize(unsigned size_in_bits, T value) {
1371 switch (size_in_bits) {
1372 case 16:
1373 return Float16ToRawbits(StaticCastFPTo<Float16>(value));
1374 case 32:
1375 return FloatToRawbits(StaticCastFPTo<float>(value));
1376 case 64:
1377 return DoubleToRawbits(StaticCastFPTo<double>(value));
1378 }
1379 VIXL_UNREACHABLE();
1380 return 0;
1381 }
1382
1383 template <typename T>
RawbitsWithSizeToFP(unsigned size_in_bits,uint64_t value)1384 T RawbitsWithSizeToFP(unsigned size_in_bits, uint64_t value) {
1385 VIXL_ASSERT(IsUintN(size_in_bits, value));
1386 switch (size_in_bits) {
1387 case 16:
1388 return StaticCastFPTo<T>(RawbitsToFloat16(static_cast<uint16_t>(value)));
1389 case 32:
1390 return StaticCastFPTo<T>(RawbitsToFloat(static_cast<uint32_t>(value)));
1391 case 64:
1392 return StaticCastFPTo<T>(RawbitsToDouble(value));
1393 }
1394 VIXL_UNREACHABLE();
1395 return 0;
1396 }
1397
1398 // Jenkins one-at-a-time hash, based on
1399 // https://en.wikipedia.org/wiki/Jenkins_hash_function citing
1400 // https://www.drdobbs.com/database/algorithm-alley/184410284.
1401 constexpr uint32_t Hash(const char* str, uint32_t hash = 0) {
1402 if (*str == '\0') {
1403 hash += hash << 3;
1404 hash ^= hash >> 11;
1405 hash += hash << 15;
1406 return hash;
1407 } else {
1408 hash += *str;
1409 hash += hash << 10;
1410 hash ^= hash >> 6;
1411 return Hash(str + 1, hash);
1412 }
1413 }
1414
1415 constexpr uint32_t operator"" _h(const char* x, size_t) { return Hash(x); }
1416
1417 } // namespace vixl
1418
1419 #endif // VIXL_UTILS_H
1420