• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_WASM_BASELINE_LIFTOFF_REGISTER_H_
6 #define V8_WASM_BASELINE_LIFTOFF_REGISTER_H_
7 
8 #include <iosfwd>
9 #include <memory>
10 
11 #include "src/base/bits.h"
12 #include "src/wasm/baseline/liftoff-assembler-defs.h"
13 #include "src/wasm/wasm-opcodes.h"
14 
15 namespace v8 {
16 namespace internal {
17 namespace wasm {
18 
19 static constexpr bool kNeedI64RegPair = kSystemPointerSize == 4;
20 static constexpr bool kNeedS128RegPair = !kSimpleFPAliasing;
21 
22 enum RegClass : uint8_t {
23   kGpReg,
24   kFpReg,
25   kGpRegPair = kFpReg + 1 + (kNeedS128RegPair && !kNeedI64RegPair),
26   kFpRegPair = kFpReg + 1 + kNeedI64RegPair,
27   kNoReg = kFpRegPair + kNeedS128RegPair,
28   // +------------------+-------------------------------+
29   // |                  |        kNeedI64RegPair        |
30   // +------------------+---------------+---------------+
31   // | kNeedS128RegPair |     true      |    false      |
32   // +------------------+---------------+---------------+
33   // |             true | 0,1,2,3,4 (a) | 0,1,3,2,3     |
34   // |            false | 0,1,2,3,3 (b) | 0,1,2,2,2 (c) |
35   // +------------------+---------------+---------------+
36   // (a) arm
37   // (b) ia32
38   // (c) x64, arm64
39 };
40 
41 static_assert(kNeedI64RegPair == (kGpRegPair != kNoReg),
42               "kGpRegPair equals kNoReg if unused");
43 static_assert(kNeedS128RegPair == (kFpRegPair != kNoReg),
44               "kFpRegPair equals kNoReg if unused");
45 
46 enum RegPairHalf : uint8_t { kLowWord = 0, kHighWord = 1 };
47 
needs_gp_reg_pair(ValueType type)48 static inline constexpr bool needs_gp_reg_pair(ValueType type) {
49   return kNeedI64RegPair && type == kWasmI64;
50 }
51 
needs_fp_reg_pair(ValueType type)52 static inline constexpr bool needs_fp_reg_pair(ValueType type) {
53   return kNeedS128RegPair && type == kWasmS128;
54 }
55 
reg_class_for(ValueType::Kind kind)56 static inline constexpr RegClass reg_class_for(ValueType::Kind kind) {
57   switch (kind) {
58     case ValueType::kF32:
59     case ValueType::kF64:
60       return kFpReg;
61     case ValueType::kI32:
62       return kGpReg;
63     case ValueType::kI64:
64       return kNeedI64RegPair ? kGpRegPair : kGpReg;
65     case ValueType::kS128:
66       return kNeedS128RegPair ? kFpRegPair : kFpReg;
67     case ValueType::kRef:
68     case ValueType::kOptRef:
69       return kGpReg;
70     default:
71       return kNoReg;  // unsupported type
72   }
73 }
74 
reg_class_for(ValueType type)75 static inline constexpr RegClass reg_class_for(ValueType type) {
76   return reg_class_for(type.kind());
77 }
78 
79 // Description of LiftoffRegister code encoding.
80 // This example uses the ARM architecture, which as of writing has:
81 // - 9 GP registers, requiring 4 bits
82 // - 13 FP regitsters, requiring 5 bits
83 // - kNeedI64RegPair is true
84 // - kNeedS128RegPair is true
85 // - thus, kBitsPerRegPair is 2 + 2 * 4 = 10
86 // - storage_t is uint16_t
87 // The table below illustrates how each RegClass is encoded, with brackets
88 // surrounding the bits which encode the register number.
89 //
90 // +----------------+------------------+
91 // | RegClass       | Example          |
92 // +----------------+------------------+
93 // | kGpReg (1)     | [00 0000   0000] |
94 // | kFpReg (2)     | [00 0000   1001] |
95 // | kGpRegPair (3) | 01 [0000] [0001] |
96 // | kFpRegPair (4) | 10  000[0  0010] |
97 // +----------------+------------------+
98 //
99 // gp and fp registers are encoded in the same index space, which means that
100 // code has to check for kGpRegPair and kFpRegPair before it can treat the code
101 // as a register code.
102 // (1) [0 .. kMaxGpRegCode] encodes gp registers
103 // (2) [kMaxGpRegCode + 1 .. kMaxGpRegCode + kMaxFpRegCode] encodes fp
104 // registers, so in this example, 1001 is really fp register 0.
105 // (3) The second top bit is set for kGpRegPair, and the two gp registers are
106 // stuffed side by side in code. Note that this is not the second top bit of
107 // storage_t, since storage_t is larger than the number of meaningful bits we
108 // need for the encoding.
109 // (4) The top bit is set for kFpRegPair, and the fp register is stuffed into
110 // the bottom part of the code. Unlike (2), this is the fp register code itself
111 // (not sharing index space with gp), so in this example, it is fp register 2.
112 
113 // Maximum code of a gp cache register.
114 static constexpr int kMaxGpRegCode =
115     8 * sizeof(kLiftoffAssemblerGpCacheRegs) -
116     base::bits::CountLeadingZeros(kLiftoffAssemblerGpCacheRegs) - 1;
117 // Maximum code of an fp cache register.
118 static constexpr int kMaxFpRegCode =
119     8 * sizeof(kLiftoffAssemblerFpCacheRegs) -
120     base::bits::CountLeadingZeros(kLiftoffAssemblerFpCacheRegs) - 1;
121 static constexpr int kAfterMaxLiftoffGpRegCode = kMaxGpRegCode + 1;
122 static constexpr int kAfterMaxLiftoffFpRegCode =
123     kAfterMaxLiftoffGpRegCode + kMaxFpRegCode + 1;
124 static constexpr int kAfterMaxLiftoffRegCode = kAfterMaxLiftoffFpRegCode;
125 static constexpr int kBitsPerLiftoffRegCode =
126     32 - base::bits::CountLeadingZeros<uint32_t>(kAfterMaxLiftoffRegCode - 1);
127 static constexpr int kBitsPerGpRegCode =
128     32 - base::bits::CountLeadingZeros<uint32_t>(kMaxGpRegCode);
129 static constexpr int kBitsPerFpRegCode =
130     32 - base::bits::CountLeadingZeros<uint32_t>(kMaxFpRegCode);
131 // GpRegPair requires 1 extra bit, S128RegPair also needs an extra bit.
132 static constexpr int kBitsPerRegPair =
133     (kNeedS128RegPair ? 2 : 1) + 2 * kBitsPerGpRegCode;
134 
135 static_assert(2 * kBitsPerGpRegCode >= kBitsPerFpRegCode,
136               "encoding for gp pair and fp pair collides");
137 
138 class LiftoffRegister {
139   static constexpr int needed_bits =
140       std::max(kNeedI64RegPair || kNeedS128RegPair ? kBitsPerRegPair : 0,
141                kBitsPerLiftoffRegCode);
142   using storage_t = std::conditional<
143       needed_bits <= 8, uint8_t,
144       std::conditional<needed_bits <= 16, uint16_t, uint32_t>::type>::type;
145 
146   static_assert(8 * sizeof(storage_t) >= needed_bits,
147                 "chosen type is big enough");
148   // Check for smallest required data type being chosen.
149   // Special case for uint8_t as there are no smaller types.
150   static_assert((8 * sizeof(storage_t) < 2 * needed_bits) ||
151                     (sizeof(storage_t) == sizeof(uint8_t)),
152                 "chosen type is small enough");
153 
154  public:
LiftoffRegister(Register reg)155   explicit LiftoffRegister(Register reg) : LiftoffRegister(reg.code()) {
156     DCHECK_NE(0, kLiftoffAssemblerGpCacheRegs & reg.bit());
157     DCHECK_EQ(reg, gp());
158   }
LiftoffRegister(DoubleRegister reg)159   explicit LiftoffRegister(DoubleRegister reg)
160       : LiftoffRegister(kAfterMaxLiftoffGpRegCode + reg.code()) {
161     DCHECK_NE(0, kLiftoffAssemblerFpCacheRegs & reg.bit());
162     DCHECK_EQ(reg, fp());
163   }
164 
from_liftoff_code(int code)165   static LiftoffRegister from_liftoff_code(int code) {
166     LiftoffRegister reg{static_cast<storage_t>(code)};
167     // Check that the code is correct by round-tripping through the
168     // reg-class-specific constructor.
169     DCHECK(
170         (reg.is_gp() && code == LiftoffRegister{reg.gp()}.liftoff_code()) ||
171         (reg.is_fp() && code == LiftoffRegister{reg.fp()}.liftoff_code()) ||
172         (reg.is_gp_pair() &&
173          code == ForPair(reg.low_gp(), reg.high_gp()).liftoff_code()) ||
174         (reg.is_fp_pair() && code == ForFpPair(reg.low_fp()).liftoff_code()));
175     return reg;
176   }
177 
from_code(RegClass rc,int code)178   static LiftoffRegister from_code(RegClass rc, int code) {
179     switch (rc) {
180       case kGpReg:
181         return LiftoffRegister(Register::from_code(code));
182       case kFpReg:
183         return LiftoffRegister(DoubleRegister::from_code(code));
184       default:
185         UNREACHABLE();
186     }
187   }
188 
189   // Shifts the register code depending on the type before converting to a
190   // LiftoffRegister.
from_external_code(RegClass rc,ValueType type,int code)191   static LiftoffRegister from_external_code(RegClass rc, ValueType type,
192                                             int code) {
193     if (!kSimpleFPAliasing && type == kWasmF32) {
194       // Liftoff assumes a one-to-one mapping between float registers and
195       // double registers, and so does not distinguish between f32 and f64
196       // registers. The f32 register code must therefore be halved in order
197       // to pass the f64 code to Liftoff.
198       DCHECK_EQ(0, code % 2);
199       return LiftoffRegister::from_code(rc, code >> 1);
200     }
201     if (kNeedS128RegPair && type == kWasmS128) {
202       // Similarly for double registers and SIMD registers, the SIMD code
203       // needs to be doubled to pass the f64 code to Liftoff.
204       return LiftoffRegister::ForFpPair(DoubleRegister::from_code(code << 1));
205     }
206     return LiftoffRegister::from_code(rc, code);
207   }
208 
ForPair(Register low,Register high)209   static LiftoffRegister ForPair(Register low, Register high) {
210     DCHECK(kNeedI64RegPair);
211     DCHECK_NE(low, high);
212     storage_t combined_code = low.code() | high.code() << kBitsPerGpRegCode |
213                               1 << (2 * kBitsPerGpRegCode);
214     return LiftoffRegister(combined_code);
215   }
216 
ForFpPair(DoubleRegister low)217   static LiftoffRegister ForFpPair(DoubleRegister low) {
218     DCHECK(kNeedS128RegPair);
219     DCHECK_EQ(0, low.code() % 2);
220     storage_t combined_code = low.code() | 2 << (2 * kBitsPerGpRegCode);
221     return LiftoffRegister(combined_code);
222   }
223 
is_pair()224   constexpr bool is_pair() const {
225     return (kNeedI64RegPair || kNeedS128RegPair) &&
226            (code_ & (3 << (2 * kBitsPerGpRegCode)));
227   }
228 
is_gp_pair()229   constexpr bool is_gp_pair() const {
230     return kNeedI64RegPair && (code_ & (1 << (2 * kBitsPerGpRegCode))) != 0;
231   }
is_fp_pair()232   constexpr bool is_fp_pair() const {
233     return kNeedS128RegPair && (code_ & (2 << (2 * kBitsPerGpRegCode))) != 0;
234   }
is_gp()235   constexpr bool is_gp() const { return code_ < kAfterMaxLiftoffGpRegCode; }
is_fp()236   constexpr bool is_fp() const {
237     return code_ >= kAfterMaxLiftoffGpRegCode &&
238            code_ < kAfterMaxLiftoffFpRegCode;
239   }
240 
low()241   LiftoffRegister low() const {
242     // Common case for most archs where only gp pair supported.
243     if (!kNeedS128RegPair) return LiftoffRegister(low_gp());
244     return is_gp_pair() ? LiftoffRegister(low_gp()) : LiftoffRegister(low_fp());
245   }
246 
high()247   LiftoffRegister high() const {
248     // Common case for most archs where only gp pair supported.
249     if (!kNeedS128RegPair) return LiftoffRegister(high_gp());
250     return is_gp_pair() ? LiftoffRegister(high_gp())
251                         : LiftoffRegister(high_fp());
252   }
253 
low_gp()254   Register low_gp() const {
255     DCHECK(is_gp_pair());
256     static constexpr storage_t kCodeMask = (1 << kBitsPerGpRegCode) - 1;
257     return Register::from_code(code_ & kCodeMask);
258   }
259 
high_gp()260   Register high_gp() const {
261     DCHECK(is_gp_pair());
262     static constexpr storage_t kCodeMask = (1 << kBitsPerGpRegCode) - 1;
263     return Register::from_code((code_ >> kBitsPerGpRegCode) & kCodeMask);
264   }
265 
low_fp()266   DoubleRegister low_fp() const {
267     DCHECK(is_fp_pair());
268     static constexpr storage_t kCodeMask = (1 << kBitsPerFpRegCode) - 1;
269     return DoubleRegister::from_code(code_ & kCodeMask);
270   }
271 
high_fp()272   DoubleRegister high_fp() const {
273     DCHECK(is_fp_pair());
274     static constexpr storage_t kCodeMask = (1 << kBitsPerFpRegCode) - 1;
275     return DoubleRegister::from_code((code_ & kCodeMask) + 1);
276   }
277 
gp()278   Register gp() const {
279     DCHECK(is_gp());
280     return Register::from_code(code_);
281   }
282 
fp()283   DoubleRegister fp() const {
284     DCHECK(is_fp());
285     return DoubleRegister::from_code(code_ - kAfterMaxLiftoffGpRegCode);
286   }
287 
liftoff_code()288   int liftoff_code() const {
289     STATIC_ASSERT(sizeof(int) >= sizeof(storage_t));
290     return static_cast<int>(code_);
291   }
292 
reg_class()293   RegClass reg_class() const {
294     return is_fp_pair() ? kFpRegPair
295                         : is_gp_pair() ? kGpRegPair : is_gp() ? kGpReg : kFpReg;
296   }
297 
298   bool operator==(const LiftoffRegister other) const {
299     DCHECK_EQ(is_gp_pair(), other.is_gp_pair());
300     DCHECK_EQ(is_fp_pair(), other.is_fp_pair());
301     return code_ == other.code_;
302   }
303   bool operator!=(const LiftoffRegister other) const {
304     DCHECK_EQ(is_gp_pair(), other.is_gp_pair());
305     DCHECK_EQ(is_fp_pair(), other.is_fp_pair());
306     return code_ != other.code_;
307   }
overlaps(const LiftoffRegister other)308   bool overlaps(const LiftoffRegister other) const {
309     if (is_pair()) return low().overlaps(other) || high().overlaps(other);
310     if (other.is_pair()) return *this == other.low() || *this == other.high();
311     return *this == other;
312   }
313 
314  private:
315   storage_t code_;
316 
LiftoffRegister(storage_t code)317   explicit constexpr LiftoffRegister(storage_t code) : code_(code) {}
318 };
319 ASSERT_TRIVIALLY_COPYABLE(LiftoffRegister);
320 
321 inline std::ostream& operator<<(std::ostream& os, LiftoffRegister reg) {
322   if (reg.is_gp_pair()) {
323     return os << "<" << reg.low_gp() << "+" << reg.high_gp() << ">";
324   } else if (reg.is_fp_pair()) {
325     return os << "<" << reg.low_fp() << "+" << reg.high_fp() << ">";
326   } else if (reg.is_gp()) {
327     return os << reg.gp();
328   } else {
329     return os << reg.fp();
330   }
331 }
332 
333 class LiftoffRegList {
334  public:
335   class Iterator;
336 
337   static constexpr bool use_u16 = kAfterMaxLiftoffRegCode <= 16;
338   static constexpr bool use_u32 = !use_u16 && kAfterMaxLiftoffRegCode <= 32;
339   using storage_t = std::conditional<
340       use_u16, uint16_t,
341       std::conditional<use_u32, uint32_t, uint64_t>::type>::type;
342 
343   static constexpr storage_t kGpMask = storage_t{kLiftoffAssemblerGpCacheRegs};
344   static constexpr storage_t kFpMask = storage_t{kLiftoffAssemblerFpCacheRegs}
345                                        << kAfterMaxLiftoffGpRegCode;
346   // Sets all even numbered fp registers.
347   static constexpr uint64_t kEvenFpSetMask = uint64_t{0x5555555555555555}
348                                              << kAfterMaxLiftoffGpRegCode;
349 
350   constexpr LiftoffRegList() = default;
351 
set(Register reg)352   Register set(Register reg) { return set(LiftoffRegister(reg)).gp(); }
set(DoubleRegister reg)353   DoubleRegister set(DoubleRegister reg) {
354     return set(LiftoffRegister(reg)).fp();
355   }
356 
set(LiftoffRegister reg)357   LiftoffRegister set(LiftoffRegister reg) {
358     if (reg.is_pair()) {
359       regs_ |= storage_t{1} << reg.low().liftoff_code();
360       regs_ |= storage_t{1} << reg.high().liftoff_code();
361     } else {
362       regs_ |= storage_t{1} << reg.liftoff_code();
363     }
364     return reg;
365   }
366 
clear(LiftoffRegister reg)367   LiftoffRegister clear(LiftoffRegister reg) {
368     if (reg.is_pair()) {
369       regs_ &= ~(storage_t{1} << reg.low().liftoff_code());
370       regs_ &= ~(storage_t{1} << reg.high().liftoff_code());
371     } else {
372       regs_ &= ~(storage_t{1} << reg.liftoff_code());
373     }
374     return reg;
375   }
376 
has(LiftoffRegister reg)377   bool has(LiftoffRegister reg) const {
378     if (reg.is_pair()) {
379       DCHECK_EQ(has(reg.low()), has(reg.high()));
380       reg = reg.low();
381     }
382     return (regs_ & (storage_t{1} << reg.liftoff_code())) != 0;
383   }
has(Register reg)384   bool has(Register reg) const { return has(LiftoffRegister(reg)); }
has(DoubleRegister reg)385   bool has(DoubleRegister reg) const { return has(LiftoffRegister(reg)); }
386 
is_empty()387   constexpr bool is_empty() const { return regs_ == 0; }
388 
GetNumRegsSet()389   constexpr unsigned GetNumRegsSet() const {
390     return base::bits::CountPopulation(regs_);
391   }
392 
393   constexpr LiftoffRegList operator&(const LiftoffRegList other) const {
394     return LiftoffRegList(regs_ & other.regs_);
395   }
396 
397   constexpr LiftoffRegList operator|(const LiftoffRegList other) const {
398     return LiftoffRegList(regs_ | other.regs_);
399   }
400 
GetAdjacentFpRegsSet()401   constexpr LiftoffRegList GetAdjacentFpRegsSet() const {
402     // And regs_ with a right shifted version of itself, so reg[i] is set only
403     // if reg[i+1] is set. We only care about the even fp registers.
404     storage_t available = (regs_ >> 1) & regs_ & kEvenFpSetMask;
405     return LiftoffRegList(available);
406   }
407 
HasAdjacentFpRegsSet()408   constexpr bool HasAdjacentFpRegsSet() const {
409     return !GetAdjacentFpRegsSet().is_empty();
410   }
411 
412   constexpr bool operator==(const LiftoffRegList other) const {
413     return regs_ == other.regs_;
414   }
415   constexpr bool operator!=(const LiftoffRegList other) const {
416     return regs_ != other.regs_;
417   }
418 
GetFirstRegSet()419   LiftoffRegister GetFirstRegSet() const {
420     DCHECK(!is_empty());
421     int first_code = base::bits::CountTrailingZeros(regs_);
422     return LiftoffRegister::from_liftoff_code(first_code);
423   }
424 
GetLastRegSet()425   LiftoffRegister GetLastRegSet() const {
426     DCHECK(!is_empty());
427     int last_code =
428         8 * sizeof(regs_) - 1 - base::bits::CountLeadingZeros(regs_);
429     return LiftoffRegister::from_liftoff_code(last_code);
430   }
431 
MaskOut(const LiftoffRegList mask)432   LiftoffRegList MaskOut(const LiftoffRegList mask) const {
433     // Masking out is guaranteed to return a correct reg list, hence no checks
434     // needed.
435     return FromBits(regs_ & ~mask.regs_);
436   }
437 
GetGpList()438   RegList GetGpList() { return regs_ & kGpMask; }
GetFpList()439   RegList GetFpList() { return (regs_ & kFpMask) >> kAfterMaxLiftoffGpRegCode; }
440 
441   inline Iterator begin() const;
442   inline Iterator end() const;
443 
FromBits(storage_t bits)444   static LiftoffRegList FromBits(storage_t bits) {
445     DCHECK_EQ(bits, bits & (kGpMask | kFpMask));
446     return LiftoffRegList(bits);
447   }
448 
449   template <storage_t bits>
FromBits()450   static constexpr LiftoffRegList FromBits() {
451     static_assert(bits == (bits & (kGpMask | kFpMask)), "illegal reg list");
452     return LiftoffRegList(bits);
453   }
454 
455   template <typename... Regs>
ForRegs(Regs...regs)456   static LiftoffRegList ForRegs(Regs... regs) {
457     LiftoffRegList list;
458     for (LiftoffRegister reg : {LiftoffRegister(regs)...}) list.set(reg);
459     return list;
460   }
461 
462  private:
463   storage_t regs_ = 0;
464 
465   // Unchecked constructor. Only use for valid bits.
LiftoffRegList(storage_t bits)466   explicit constexpr LiftoffRegList(storage_t bits) : regs_(bits) {}
467 };
468 ASSERT_TRIVIALLY_COPYABLE(LiftoffRegList);
469 
470 static constexpr LiftoffRegList kGpCacheRegList =
471     LiftoffRegList::FromBits<LiftoffRegList::kGpMask>();
472 static constexpr LiftoffRegList kFpCacheRegList =
473     LiftoffRegList::FromBits<LiftoffRegList::kFpMask>();
474 
475 class LiftoffRegList::Iterator {
476  public:
477   LiftoffRegister operator*() { return remaining_.GetFirstRegSet(); }
478   Iterator& operator++() {
479     remaining_.clear(remaining_.GetFirstRegSet());
480     return *this;
481   }
482   bool operator==(Iterator other) { return remaining_ == other.remaining_; }
483   bool operator!=(Iterator other) { return remaining_ != other.remaining_; }
484 
485  private:
Iterator(LiftoffRegList remaining)486   explicit Iterator(LiftoffRegList remaining) : remaining_(remaining) {}
487   friend class LiftoffRegList;
488 
489   LiftoffRegList remaining_;
490 };
491 
begin()492 LiftoffRegList::Iterator LiftoffRegList::begin() const {
493   return Iterator{*this};
494 }
end()495 LiftoffRegList::Iterator LiftoffRegList::end() const {
496   return Iterator{LiftoffRegList{}};
497 }
498 
GetCacheRegList(RegClass rc)499 static constexpr LiftoffRegList GetCacheRegList(RegClass rc) {
500   return rc == kFpReg ? kFpCacheRegList : kGpCacheRegList;
501 }
502 
503 inline std::ostream& operator<<(std::ostream& os, LiftoffRegList reglist) {
504   os << "{";
505   for (bool first = true; !reglist.is_empty(); first = false) {
506     LiftoffRegister reg = reglist.GetFirstRegSet();
507     reglist.clear(reg);
508     os << (first ? "" : ", ") << reg;
509   }
510   return os << "}";
511 }
512 
513 }  // namespace wasm
514 }  // namespace internal
515 }  // namespace v8
516 
517 #endif  // V8_WASM_BASELINE_LIFTOFF_REGISTER_H_
518