• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===- subzero/src/IceOperand.h - High-level operands -----------*- C++ -*-===//
2 //
3 //                        The Subzero Code Generator
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 ///
10 /// \file
11 /// \brief Declares the Operand class and its target-independent subclasses.
12 ///
13 /// The main classes are Variable, which represents an LLVM variable that is
14 /// either register- or stack-allocated, and the Constant hierarchy, which
15 /// represents integer, floating-point, and/or symbolic constants.
16 ///
17 //===----------------------------------------------------------------------===//
18 
19 #ifndef SUBZERO_SRC_ICEOPERAND_H
20 #define SUBZERO_SRC_ICEOPERAND_H
21 
22 #include "IceDefs.h"
23 #include "IceCfg.h"
24 #include "IceGlobalContext.h"
25 #include "IceStringPool.h"
26 #include "IceTypes.h"
27 
28 #include "llvm/Support/ErrorHandling.h"
29 #include "llvm/Support/Format.h"
30 
31 #include <limits>
32 #include <type_traits>
33 
34 namespace Ice {
35 
36 class Operand {
37   Operand() = delete;
38   Operand(const Operand &) = delete;
39   Operand &operator=(const Operand &) = delete;
40 
41 public:
42   static constexpr size_t MaxTargetKinds = 10;
43   enum OperandKind {
44     kConst_Base,
45     kConstInteger32,
46     kConstInteger64,
47     kConstFloat,
48     kConstDouble,
49     kConstRelocatable,
50     kConstUndef,
51     kConst_Target, // leave space for target-specific constant kinds
52     kConst_Max = kConst_Target + MaxTargetKinds,
53     kVariable,
54     kVariable64On32,
55     kVariableVecOn32,
56     kVariableBoolean,
57     kVariable_Target, // leave space for target-specific variable kinds
58     kVariable_Max = kVariable_Target + MaxTargetKinds,
59     // Target-specific operand classes use kTarget as the starting point for
60     // their Kind enum space. Note that the value-spaces are shared across
61     // targets. To avoid confusion over the definition of shared values, an
62     // object specific to one target should never be passed to a different
63     // target.
64     kTarget,
65     kTarget_Max = std::numeric_limits<uint8_t>::max(),
66   };
67   static_assert(kTarget <= kTarget_Max, "Must not be above max.");
getKind()68   OperandKind getKind() const { return Kind; }
getType()69   Type getType() const { return Ty; }
70 
71   /// Every Operand keeps an array of the Variables referenced in the operand.
72   /// This is so that the liveness operations can get quick access to the
73   /// variables of interest, without having to dig so far into the operand.
getNumVars()74   SizeT getNumVars() const { return NumVars; }
getVar(SizeT I)75   Variable *getVar(SizeT I) const {
76     assert(I < getNumVars());
77     return Vars[I];
78   }
79   virtual void emit(const Cfg *Func) const = 0;
80 
81   /// \name Dumping functions.
82   /// @{
83 
84   /// The dump(Func,Str) implementation must be sure to handle the situation
85   /// where Func==nullptr.
86   virtual void dump(const Cfg *Func, Ostream &Str) const = 0;
dump(const Cfg * Func)87   void dump(const Cfg *Func) const {
88     if (!BuildDefs::dump())
89       return;
90     assert(Func);
91     dump(Func, Func->getContext()->getStrDump());
92   }
dump(Ostream & Str)93   void dump(Ostream &Str) const {
94     if (BuildDefs::dump())
95       dump(nullptr, Str);
96   }
97   /// @}
98 
99   virtual ~Operand() = default;
100 
asBoolean()101   virtual Variable *asBoolean() { return nullptr; }
102 
hashValue()103   virtual SizeT hashValue() const {
104     llvm::report_fatal_error("Tried to hash unsupported operand type : " +
105                              std::to_string(Kind));
106     return 0;
107   }
108 
109 protected:
Operand(OperandKind Kind,Type Ty)110   Operand(OperandKind Kind, Type Ty) : Ty(Ty), Kind(Kind) {
111     // It is undefined behavior to have a larger value in the enum
112     assert(Kind <= kTarget_Max);
113   }
114 
115   const Type Ty;
116   const OperandKind Kind;
117   /// Vars and NumVars are initialized by the derived class.
118   SizeT NumVars = 0;
119   Variable **Vars = nullptr;
120 };
121 
122 template <class StreamType>
123 inline StreamType &operator<<(StreamType &Str, const Operand &Op) {
124   Op.dump(Str);
125   return Str;
126 }
127 
128 /// Constant is the abstract base class for constants. All constants are
129 /// allocated from a global arena and are pooled.
130 class Constant : public Operand {
131   Constant() = delete;
132   Constant(const Constant &) = delete;
133   Constant &operator=(const Constant &) = delete;
134 
135 public:
136   // Declare the lookup counter to take minimal space in a non-DUMP build.
137   using CounterType =
138       std::conditional<BuildDefs::dump(), uint64_t, uint8_t>::type;
emit(const Cfg * Func)139   void emit(const Cfg *Func) const override { emit(Func->getTarget()); }
140   virtual void emit(TargetLowering *Target) const = 0;
141 
classof(const Operand * Operand)142   static bool classof(const Operand *Operand) {
143     OperandKind Kind = Operand->getKind();
144     return Kind >= kConst_Base && Kind <= kConst_Max;
145   }
146 
getLabelName()147   const GlobalString getLabelName() const { return LabelName; }
148 
149   /// Judge if this given immediate should be randomized or pooled By default
150   /// should return false, only constant integers should truly go through this
151   /// method.
shouldBeRandomizedOrPooled()152   virtual bool shouldBeRandomizedOrPooled() const { return false; }
153 
getShouldBePooled()154   bool getShouldBePooled() const { return ShouldBePooled; }
155 
156   // This should be thread-safe because the constant pool lock is acquired
157   // before the method is invoked.
updateLookupCount()158   void updateLookupCount() {
159     if (!BuildDefs::dump())
160       return;
161     ++LookupCount;
162   }
getLookupCount()163   CounterType getLookupCount() const { return LookupCount; }
hashValue()164   SizeT hashValue() const override { return 0; }
165 
166 protected:
Constant(OperandKind Kind,Type Ty)167   Constant(OperandKind Kind, Type Ty) : Operand(Kind, Ty) {
168     Vars = nullptr;
169     NumVars = 0;
170   }
171   /// Set the ShouldBePooled field to the proper value after the object is fully
172   /// initialized.
173   void initShouldBePooled();
174   GlobalString LabelName;
175   /// Whether we should pool this constant. Usually Float/Double and pooled
176   /// Integers should be flagged true.  Ideally this field would be const, but
177   /// it needs to be initialized only after the subclass is fully constructed.
178   bool ShouldBePooled = false;
179   /// Note: If ShouldBePooled is ever removed from the base class, we will want
180   /// to completely disable LookupCount in a non-DUMP build to save space.
181   CounterType LookupCount = 0;
182 };
183 
184 /// ConstantPrimitive<> wraps a primitive type.
185 template <typename T, Operand::OperandKind K>
186 class ConstantPrimitive : public Constant {
187   ConstantPrimitive() = delete;
188   ConstantPrimitive(const ConstantPrimitive &) = delete;
189   ConstantPrimitive &operator=(const ConstantPrimitive &) = delete;
190 
191 public:
192   using PrimType = T;
193 
create(GlobalContext * Ctx,Type Ty,PrimType Value)194   static ConstantPrimitive *create(GlobalContext *Ctx, Type Ty,
195                                    PrimType Value) {
196     auto *Const =
197         new (Ctx->allocate<ConstantPrimitive>()) ConstantPrimitive(Ty, Value);
198     Const->initShouldBePooled();
199     if (Const->getShouldBePooled())
200       Const->initName(Ctx);
201     return Const;
202   }
getValue()203   PrimType getValue() const { return Value; }
204   using Constant::emit;
205   void emit(TargetLowering *Target) const final;
206   using Constant::dump;
dump(const Cfg *,Ostream & Str)207   void dump(const Cfg *, Ostream &Str) const override {
208     if (BuildDefs::dump())
209       Str << getValue();
210   }
211 
classof(const Operand * Operand)212   static bool classof(const Operand *Operand) {
213     return Operand->getKind() == K;
214   }
215 
hashValue()216   SizeT hashValue() const override { return std::hash<PrimType>()(Value); }
217 
shouldBeRandomizedOrPooled()218   virtual bool shouldBeRandomizedOrPooled() const override { return false; }
219 
220 private:
ConstantPrimitive(Type Ty,PrimType Value)221   ConstantPrimitive(Type Ty, PrimType Value) : Constant(K, Ty), Value(Value) {}
222 
initName(GlobalContext * Ctx)223   void initName(GlobalContext *Ctx) {
224     std::string Buffer;
225     llvm::raw_string_ostream Str(Buffer);
226     constexpr bool IsCompact = !BuildDefs::dump();
227     if (IsCompact) {
228       switch (getType()) {
229       case IceType_f32:
230         Str << "$F";
231         break;
232       case IceType_f64:
233         Str << "$D";
234         break;
235       default:
236         // For constant pooling diversification
237         Str << ".L$" << getType() << "$";
238         break;
239       }
240     } else {
241       Str << ".L$" << getType() << "$";
242     }
243     // Print hex characters byte by byte, starting from the most significant
244     // byte.  NOTE: This ordering assumes Subzero runs on a little-endian
245     // platform.  That means the possibility of different label names depending
246     // on the endian-ness of the platform where Subzero runs.
247     for (unsigned i = 0; i < sizeof(Value); ++i) {
248       constexpr unsigned HexWidthChars = 2;
249       unsigned Offset = sizeof(Value) - 1 - i;
250       Str << llvm::format_hex_no_prefix(
251           *(Offset + (const unsigned char *)&Value), HexWidthChars);
252     }
253     // For a floating-point value in DecorateAsm mode, also append the value in
254     // human-readable sprintf form, changing '+' to 'p' and '-' to 'm' to
255     // maintain valid asm labels.
256     if (BuildDefs::dump() && std::is_floating_point<PrimType>::value &&
257         getFlags().getDecorateAsm()) {
258       char Buf[30];
259       snprintf(Buf, llvm::array_lengthof(Buf), "$%g", (double)Value);
260       for (unsigned i = 0; i < llvm::array_lengthof(Buf) && Buf[i]; ++i) {
261         if (Buf[i] == '-')
262           Buf[i] = 'm';
263         else if (Buf[i] == '+')
264           Buf[i] = 'p';
265       }
266       Str << Buf;
267     }
268     LabelName = GlobalString::createWithString(Ctx, Str.str());
269   }
270 
271   const PrimType Value;
272 };
273 
274 using ConstantInteger32 = ConstantPrimitive<int32_t, Operand::kConstInteger32>;
275 using ConstantInteger64 = ConstantPrimitive<int64_t, Operand::kConstInteger64>;
276 using ConstantFloat = ConstantPrimitive<float, Operand::kConstFloat>;
277 using ConstantDouble = ConstantPrimitive<double, Operand::kConstDouble>;
278 
279 template <>
dump(const Cfg *,Ostream & Str)280 inline void ConstantInteger32::dump(const Cfg *, Ostream &Str) const {
281   if (!BuildDefs::dump())
282     return;
283   if (getType() == IceType_i1)
284     Str << (getValue() ? "true" : "false");
285   else
286     Str << static_cast<int32_t>(getValue());
287 }
288 
289 // =========== Immediate Randomization and Pooling routines ==============
290 // Specialization of the template member function for ConstantInteger32
291 // TODO(stichnot): try to move this specialization into a target-specific file.
shouldBeRandomizedOrPooled()292 template <> inline bool ConstantInteger32::shouldBeRandomizedOrPooled() const {
293   uint32_t Threshold = getFlags().getRandomizeAndPoolImmediatesThreshold();
294   if (getFlags().getRandomizeAndPoolImmediatesOption() == RPI_None)
295     return false;
296   if (getType() != IceType_i32 && getType() != IceType_i16 &&
297       getType() != IceType_i8)
298     return false;
299   // The Following checks if the signed representation of Value is between
300   // -Threshold/2 and +Threshold/2
301   bool largerThanThreshold = Threshold / 2 + Value >= Threshold;
302   return largerThanThreshold;
303 }
304 
305 template <>
dump(const Cfg *,Ostream & Str)306 inline void ConstantInteger64::dump(const Cfg *, Ostream &Str) const {
307   if (!BuildDefs::dump())
308     return;
309   assert(getType() == IceType_i64);
310   Str << static_cast<int64_t>(getValue());
311 }
312 
313 /// RelocOffset allows symbolic references in ConstantRelocatables' offsets,
314 /// e.g., 8 + LabelOffset, where label offset is the location (code or data)
315 /// of a Label that is only determinable during ELF emission.
316 class RelocOffset final {
317   RelocOffset(const RelocOffset &) = delete;
318   RelocOffset &operator=(const RelocOffset &) = delete;
319 
320 public:
create(T * AllocOwner)321   template <typename T> static RelocOffset *create(T *AllocOwner) {
322     return new (AllocOwner->template allocate<RelocOffset>()) RelocOffset();
323   }
324 
create(GlobalContext * Ctx,RelocOffsetT Value)325   static RelocOffset *create(GlobalContext *Ctx, RelocOffsetT Value) {
326     return new (Ctx->allocate<RelocOffset>()) RelocOffset(Value);
327   }
328 
setSubtract(bool Value)329   void setSubtract(bool Value) { Subtract = Value; }
hasOffset()330   bool hasOffset() const { return HasOffset; }
331 
getOffset()332   RelocOffsetT getOffset() const {
333     assert(HasOffset);
334     return Offset;
335   }
336 
setOffset(const RelocOffsetT Value)337   void setOffset(const RelocOffsetT Value) {
338     assert(!HasOffset);
339     if (Subtract) {
340       assert(Value != std::numeric_limits<RelocOffsetT>::lowest());
341       Offset = -Value;
342     } else {
343       Offset = Value;
344     }
345     HasOffset = true;
346   }
347 
348 private:
349   RelocOffset() = default;
RelocOffset(RelocOffsetT Offset)350   explicit RelocOffset(RelocOffsetT Offset) { setOffset(Offset); }
351 
352   bool Subtract = false;
353   bool HasOffset = false;
354   RelocOffsetT Offset;
355 };
356 
357 /// RelocatableTuple bundles the parameters that are used to construct an
358 /// ConstantRelocatable. It is done this way so that ConstantRelocatable can fit
359 /// into the global constant pool template mechanism.
360 class RelocatableTuple {
361   RelocatableTuple() = delete;
362   RelocatableTuple &operator=(const RelocatableTuple &) = delete;
363 
364 public:
RelocatableTuple(const RelocOffsetT Offset,const RelocOffsetArray & OffsetExpr,GlobalString Name)365   RelocatableTuple(const RelocOffsetT Offset,
366                    const RelocOffsetArray &OffsetExpr, GlobalString Name)
367       : Offset(Offset), OffsetExpr(OffsetExpr), Name(Name) {}
368 
RelocatableTuple(const RelocOffsetT Offset,const RelocOffsetArray & OffsetExpr,GlobalString Name,const std::string & EmitString)369   RelocatableTuple(const RelocOffsetT Offset,
370                    const RelocOffsetArray &OffsetExpr, GlobalString Name,
371                    const std::string &EmitString)
372       : Offset(Offset), OffsetExpr(OffsetExpr), Name(Name),
373         EmitString(EmitString) {}
374 
375   RelocatableTuple(const RelocatableTuple &) = default;
376 
377   const RelocOffsetT Offset;
378   const RelocOffsetArray OffsetExpr;
379   const GlobalString Name;
380   const std::string EmitString;
381 };
382 
383 bool operator==(const RelocatableTuple &A, const RelocatableTuple &B);
384 
385 /// ConstantRelocatable represents a symbolic constant combined with a fixed
386 /// offset.
387 class ConstantRelocatable : public Constant {
388   ConstantRelocatable() = delete;
389   ConstantRelocatable(const ConstantRelocatable &) = delete;
390   ConstantRelocatable &operator=(const ConstantRelocatable &) = delete;
391 
392 public:
393   template <typename T>
create(T * AllocOwner,Type Ty,const RelocatableTuple & Tuple)394   static ConstantRelocatable *create(T *AllocOwner, Type Ty,
395                                      const RelocatableTuple &Tuple) {
396     return new (AllocOwner->template allocate<ConstantRelocatable>())
397         ConstantRelocatable(Ty, Tuple.Offset, Tuple.OffsetExpr, Tuple.Name,
398                             Tuple.EmitString);
399   }
400 
getOffset()401   RelocOffsetT getOffset() const {
402     RelocOffsetT Ret = Offset;
403     for (const auto *const OffsetReloc : OffsetExpr) {
404       Ret += OffsetReloc->getOffset();
405     }
406     return Ret;
407   }
408 
getEmitString()409   const std::string &getEmitString() const { return EmitString; }
410 
getName()411   GlobalString getName() const { return Name; }
412   using Constant::emit;
413   void emit(TargetLowering *Target) const final;
414   void emitWithoutPrefix(const TargetLowering *Target,
415                          const char *Suffix = "") const;
416   using Constant::dump;
417   void dump(const Cfg *Func, Ostream &Str) const override;
418 
classof(const Operand * Operand)419   static bool classof(const Operand *Operand) {
420     OperandKind Kind = Operand->getKind();
421     return Kind == kConstRelocatable;
422   }
423 
424 private:
ConstantRelocatable(Type Ty,const RelocOffsetT Offset,const RelocOffsetArray & OffsetExpr,GlobalString Name,const std::string & EmitString)425   ConstantRelocatable(Type Ty, const RelocOffsetT Offset,
426                       const RelocOffsetArray &OffsetExpr, GlobalString Name,
427                       const std::string &EmitString)
428       : Constant(kConstRelocatable, Ty), Offset(Offset), OffsetExpr(OffsetExpr),
429         Name(Name), EmitString(EmitString) {}
430 
431   const RelocOffsetT Offset;         /// fixed, known offset to add
432   const RelocOffsetArray OffsetExpr; /// fixed, unknown offset to add
433   const GlobalString Name;           /// optional for debug/dump
434   const std::string EmitString;      /// optional for textual emission
435 };
436 
437 /// ConstantUndef represents an unspecified bit pattern. Although it is legal to
438 /// lower ConstantUndef to any value, backends should try to make code
439 /// generation deterministic by lowering ConstantUndefs to 0.
440 class ConstantUndef : public Constant {
441   ConstantUndef() = delete;
442   ConstantUndef(const ConstantUndef &) = delete;
443   ConstantUndef &operator=(const ConstantUndef &) = delete;
444 
445 public:
create(GlobalContext * Ctx,Type Ty)446   static ConstantUndef *create(GlobalContext *Ctx, Type Ty) {
447     return new (Ctx->allocate<ConstantUndef>()) ConstantUndef(Ty);
448   }
449 
450   using Constant::emit;
451   void emit(TargetLowering *Target) const final;
452   using Constant::dump;
dump(const Cfg *,Ostream & Str)453   void dump(const Cfg *, Ostream &Str) const override {
454     if (BuildDefs::dump())
455       Str << "undef";
456   }
457 
classof(const Operand * Operand)458   static bool classof(const Operand *Operand) {
459     return Operand->getKind() == kConstUndef;
460   }
461 
462 private:
ConstantUndef(Type Ty)463   ConstantUndef(Type Ty) : Constant(kConstUndef, Ty) {}
464 };
465 
466 /// RegNumT is for holding target-specific register numbers, plus the sentinel
467 /// value if no register is assigned. Its public ctor allows direct use of enum
468 /// values, such as RegNumT(Reg_eax), but not things like RegNumT(Reg_eax+1).
469 /// This is to try to prevent inappropriate assumptions about enum ordering. If
470 /// needed, the fromInt() method can be used, such as when a RegNumT is based
471 /// on a bitvector index.
472 class RegNumT {
473 public:
474   using BaseType = uint32_t;
475   RegNumT() = default;
476   RegNumT(const RegNumT &) = default;
477   template <typename AnyEnum>
478   RegNumT(AnyEnum Value,
479           typename std::enable_if<std::is_enum<AnyEnum>::value, int>::type = 0)
Value(Value)480       : Value(Value) {
481     validate(Value);
482   }
483   RegNumT &operator=(const RegNumT &) = default;
484   operator unsigned() const { return Value; }
485   /// Asserts that the register is valid, i.e. not NoRegisterValue.  Note that
486   /// the ctor already does the target-specific limit check.
assertIsValid()487   void assertIsValid() const { assert(Value != NoRegisterValue); }
fromInt(BaseType Value)488   static RegNumT fromInt(BaseType Value) { return RegNumT(Value); }
489   /// Marks cases that inappropriately add/subtract RegNumT values, and
490   /// therefore need to be fixed because they make assumptions about register
491   /// enum value ordering.  TODO(stichnot): Remove fixme() as soon as all
492   /// current uses are fixed/removed.
fixme(BaseType Value)493   static RegNumT fixme(BaseType Value) { return RegNumT(Value); }
494   /// The target's staticInit() method should call setLimit() to register the
495   /// upper bound of allowable values.
setLimit(BaseType Value)496   static void setLimit(BaseType Value) {
497     // Make sure it's only called once.
498     assert(Limit == 0);
499     assert(Value != 0);
500     Limit = Value;
501   }
502   // Define NoRegisterValue as an enum value so that it can be used as an
503   // argument for the public ctor if desired.
504   enum : BaseType { NoRegisterValue = std::numeric_limits<BaseType>::max() };
505 
hasValue()506   bool hasValue() const { return Value != NoRegisterValue; }
hasNoValue()507   bool hasNoValue() const { return !hasValue(); }
508 
509 private:
510   BaseType Value = NoRegisterValue;
511   static BaseType Limit;
512   /// Private ctor called only by fromInt() and fixme().
RegNumT(BaseType Value)513   RegNumT(BaseType Value) : Value(Value) { validate(Value); }
514   /// The ctor calls this to validate against the target-supplied limit.
validate(BaseType Value)515   static void validate(BaseType Value) {
516     (void)Value;
517     assert(Value == NoRegisterValue || Value < Limit);
518   }
519   /// Disallow operators that inappropriately make assumptions about register
520   /// enum value ordering.
521   bool operator<(const RegNumT &) = delete;
522   bool operator<=(const RegNumT &) = delete;
523   bool operator>(const RegNumT &) = delete;
524   bool operator>=(const RegNumT &) = delete;
525 };
526 
527 /// RegNumBVIter wraps SmallBitVector so that instead of this pattern:
528 ///
529 ///   for (int i = V.find_first(); i != -1; i = V.find_next(i)) {
530 ///     RegNumT RegNum = RegNumT::fromInt(i);
531 ///     ...
532 ///   }
533 ///
534 /// this cleaner pattern can be used:
535 ///
536 ///   for (RegNumT RegNum : RegNumBVIter(V)) {
537 ///     ...
538 ///   }
539 template <class B> class RegNumBVIterImpl {
540   using T = B;
541   static constexpr int Sentinel = -1;
542   RegNumBVIterImpl() = delete;
543 
544 public:
545   class Iterator {
546     Iterator() = delete;
547     Iterator &operator=(const Iterator &) = delete;
548 
549   public:
Iterator(const T & V)550     explicit Iterator(const T &V) : V(V), Current(V.find_first()) {}
Iterator(const T & V,int Value)551     Iterator(const T &V, int Value) : V(V), Current(Value) {}
552     Iterator(const Iterator &) = default;
553     RegNumT operator*() {
554       assert(Current != Sentinel);
555       return RegNumT::fromInt(Current);
556     }
557     Iterator &operator++() {
558       assert(Current != Sentinel);
559       Current = V.find_next(Current);
560       return *this;
561     }
562     bool operator!=(Iterator &Other) { return Current != Other.Current; }
563 
564   private:
565     const T &V;
566     int Current;
567   };
568 
569   RegNumBVIterImpl(const RegNumBVIterImpl &) = default;
570   RegNumBVIterImpl &operator=(const RegNumBVIterImpl &) = delete;
RegNumBVIterImpl(const T & V)571   explicit RegNumBVIterImpl(const T &V) : V(V) {}
begin()572   Iterator begin() { return Iterator(V); }
end()573   Iterator end() { return Iterator(V, Sentinel); }
574 
575 private:
576   const T &V;
577 };
578 
RegNumBVIter(const B & BV)579 template <class B> RegNumBVIterImpl<B> RegNumBVIter(const B &BV) {
580   return RegNumBVIterImpl<B>(BV);
581 }
582 
583 /// RegWeight is a wrapper for a uint32_t weight value, with a special value
584 /// that represents infinite weight, and an addWeight() method that ensures that
585 /// W+infinity=infinity.
586 class RegWeight {
587 public:
588   using BaseType = uint32_t;
589   RegWeight() = default;
RegWeight(BaseType Weight)590   explicit RegWeight(BaseType Weight) : Weight(Weight) {}
591   RegWeight(const RegWeight &) = default;
592   RegWeight &operator=(const RegWeight &) = default;
593   constexpr static BaseType Inf = ~0; /// Force regalloc to give a register
594   constexpr static BaseType Zero = 0; /// Force regalloc NOT to give a register
595   constexpr static BaseType Max = Inf - 1; /// Max natural weight.
addWeight(BaseType Delta)596   void addWeight(BaseType Delta) {
597     if (Delta == Inf)
598       Weight = Inf;
599     else if (Weight != Inf)
600       if (Utils::add_overflow(Weight, Delta, &Weight) || Weight == Inf)
601         Weight = Max;
602   }
addWeight(const RegWeight & Other)603   void addWeight(const RegWeight &Other) { addWeight(Other.Weight); }
setWeight(BaseType Val)604   void setWeight(BaseType Val) { Weight = Val; }
getWeight()605   BaseType getWeight() const { return Weight; }
606 
607 private:
608   BaseType Weight = 0;
609 };
610 Ostream &operator<<(Ostream &Str, const RegWeight &W);
611 bool operator<(const RegWeight &A, const RegWeight &B);
612 bool operator<=(const RegWeight &A, const RegWeight &B);
613 bool operator==(const RegWeight &A, const RegWeight &B);
614 
615 /// LiveRange is a set of instruction number intervals representing a variable's
616 /// live range. Generally there is one interval per basic block where the
617 /// variable is live, but adjacent intervals get coalesced into a single
618 /// interval.
619 class LiveRange {
620 public:
621   using RangeElementType = std::pair<InstNumberT, InstNumberT>;
622   /// RangeType is arena-allocated from the Cfg's allocator.
623   using RangeType = CfgVector<RangeElementType>;
624   LiveRange() = default;
625   /// Special constructor for building a kill set. The advantage is that we can
626   /// reserve the right amount of space in advance.
LiveRange(const CfgVector<InstNumberT> & Kills)627   explicit LiveRange(const CfgVector<InstNumberT> &Kills) {
628     Range.reserve(Kills.size());
629     for (InstNumberT I : Kills)
630       addSegment(I, I);
631   }
632   LiveRange(const LiveRange &) = default;
633   LiveRange &operator=(const LiveRange &) = default;
634 
reset()635   void reset() {
636     Range.clear();
637     untrim();
638   }
639   void addSegment(InstNumberT Start, InstNumberT End, CfgNode *Node = nullptr);
640   void addSegment(RangeElementType Segment, CfgNode *Node = nullptr) {
641     addSegment(Segment.first, Segment.second, Node);
642   }
643 
644   bool endsBefore(const LiveRange &Other) const;
645   bool overlaps(const LiveRange &Other, bool UseTrimmed = false) const;
646   bool overlapsInst(InstNumberT OtherBegin, bool UseTrimmed = false) const;
647   bool containsValue(InstNumberT Value, bool IsDest) const;
isEmpty()648   bool isEmpty() const { return Range.empty(); }
getStart()649   InstNumberT getStart() const {
650     return Range.empty() ? -1 : Range.begin()->first;
651   }
getEnd()652   InstNumberT getEnd() const {
653     return Range.empty() ? -1 : Range.rbegin()->second;
654   }
655 
untrim()656   void untrim() { TrimmedBegin = Range.begin(); }
657   void trim(InstNumberT Lower);
658 
659   void dump(Ostream &Str) const;
660 
getNumSegments()661   SizeT getNumSegments() const { return Range.size(); }
662 
getSegments()663   const RangeType &getSegments() const { return Range; }
getNodeForSegment(InstNumberT Begin)664   CfgNode *getNodeForSegment(InstNumberT Begin) {
665     auto Iter = NodeMap.find(Begin);
666     assert(Iter != NodeMap.end());
667     return Iter->second;
668   }
669 
670 private:
671   RangeType Range;
672   CfgUnorderedMap<InstNumberT, CfgNode *> NodeMap;
673   /// TrimmedBegin is an optimization for the overlaps() computation. Since the
674   /// linear-scan algorithm always calls it as overlaps(Cur) and Cur advances
675   /// monotonically according to live range start, we can optimize overlaps() by
676   /// ignoring all segments that end before the start of Cur's range. The
677   /// linear-scan code enables this by calling trim() on the ranges of interest
678   /// as Cur advances. Note that linear-scan also has to initialize TrimmedBegin
679   /// at the beginning by calling untrim().
680   RangeType::const_iterator TrimmedBegin;
681 };
682 
683 Ostream &operator<<(Ostream &Str, const LiveRange &L);
684 
685 /// Variable represents an operand that is register-allocated or
686 /// stack-allocated. If it is register-allocated, it will ultimately have a
687 /// valid RegNum field.
688 class Variable : public Operand {
689   Variable() = delete;
690   Variable(const Variable &) = delete;
691   Variable &operator=(const Variable &) = delete;
692 
693   enum RegRequirement : uint8_t {
694     RR_MayHaveRegister,
695     RR_MustHaveRegister,
696     RR_MustNotHaveRegister,
697   };
698 
699 public:
create(Cfg * Func,Type Ty,SizeT Index)700   static Variable *create(Cfg *Func, Type Ty, SizeT Index) {
701     return new (Func->allocate<Variable>())
702         Variable(Func, kVariable, Ty, Index);
703   }
704 
getIndex()705   SizeT getIndex() const { return Number; }
getName()706   std::string getName() const {
707     if (Name.hasStdString())
708       return Name.toString();
709     return "__" + std::to_string(getIndex());
710   }
setName(const Cfg * Func,const std::string & NewName)711   virtual void setName(const Cfg *Func, const std::string &NewName) {
712     if (NewName.empty())
713       return;
714     Name = VariableString::createWithString(Func, NewName);
715   }
716 
getIsArg()717   bool getIsArg() const { return IsArgument; }
718   virtual void setIsArg(bool Val = true) { IsArgument = Val; }
getIsImplicitArg()719   bool getIsImplicitArg() const { return IsImplicitArgument; }
720   void setIsImplicitArg(bool Val = true) { IsImplicitArgument = Val; }
721 
setIgnoreLiveness()722   void setIgnoreLiveness() { IgnoreLiveness = true; }
getIgnoreLiveness()723   bool getIgnoreLiveness() const {
724     return IgnoreLiveness || IsRematerializable;
725   }
726 
727   /// Returns true if the variable either has a definite stack offset, or has
728   /// the UndeterminedStackOffset such that it is guaranteed to have a definite
729   /// stack offset at emission time.
hasStackOffset()730   bool hasStackOffset() const { return StackOffset != InvalidStackOffset; }
731   /// Returns true if the variable has a stack offset that is known at this
732   /// time.
hasKnownStackOffset()733   bool hasKnownStackOffset() const {
734     return StackOffset != InvalidStackOffset &&
735            StackOffset != UndeterminedStackOffset;
736   }
getStackOffset()737   int32_t getStackOffset() const {
738     assert(hasKnownStackOffset());
739     return StackOffset;
740   }
setStackOffset(int32_t Offset)741   void setStackOffset(int32_t Offset) { StackOffset = Offset; }
742   /// Set a "placeholder" stack offset before its actual offset has been
743   /// determined.
setHasStackOffset()744   void setHasStackOffset() {
745     if (!hasStackOffset())
746       StackOffset = UndeterminedStackOffset;
747   }
748   /// Returns the variable's stack offset in symbolic form, to improve
749   /// readability in DecorateAsm mode.
getSymbolicStackOffset()750   std::string getSymbolicStackOffset() const {
751     if (!BuildDefs::dump())
752       return "";
753     return ".L$lv$" + getName();
754   }
755 
hasReg()756   bool hasReg() const { return getRegNum().hasValue(); }
getRegNum()757   RegNumT getRegNum() const { return RegNum; }
setRegNum(RegNumT NewRegNum)758   void setRegNum(RegNumT NewRegNum) {
759     // Regnum shouldn't be set more than once.
760     assert(!hasReg() || RegNum == NewRegNum);
761     RegNum = NewRegNum;
762   }
hasRegTmp()763   bool hasRegTmp() const { return getRegNumTmp().hasValue(); }
getRegNumTmp()764   RegNumT getRegNumTmp() const { return RegNumTmp; }
setRegNumTmp(RegNumT NewRegNum)765   void setRegNumTmp(RegNumT NewRegNum) { RegNumTmp = NewRegNum; }
766 
767   RegWeight getWeight(const Cfg *Func) const;
768 
setMustHaveReg()769   void setMustHaveReg() { RegRequirement = RR_MustHaveRegister; }
mustHaveReg()770   bool mustHaveReg() const { return RegRequirement == RR_MustHaveRegister; }
setMustNotHaveReg()771   void setMustNotHaveReg() { RegRequirement = RR_MustNotHaveRegister; }
mustNotHaveReg()772   bool mustNotHaveReg() const {
773     return RegRequirement == RR_MustNotHaveRegister;
774   }
mayHaveReg()775   bool mayHaveReg() const { return RegRequirement == RR_MayHaveRegister; }
setRematerializable(RegNumT NewRegNum,int32_t NewOffset)776   void setRematerializable(RegNumT NewRegNum, int32_t NewOffset) {
777     IsRematerializable = true;
778     setRegNum(NewRegNum);
779     setStackOffset(NewOffset);
780     setMustHaveReg();
781   }
isRematerializable()782   bool isRematerializable() const { return IsRematerializable; }
783 
setRegClass(uint8_t RC)784   void setRegClass(uint8_t RC) { RegisterClass = static_cast<RegClass>(RC); }
getRegClass()785   RegClass getRegClass() const { return RegisterClass; }
786 
getLiveRange()787   LiveRange &getLiveRange() { return Live; }
getLiveRange()788   const LiveRange &getLiveRange() const { return Live; }
setLiveRange(const LiveRange & Range)789   void setLiveRange(const LiveRange &Range) { Live = Range; }
resetLiveRange()790   void resetLiveRange() { Live.reset(); }
791   void addLiveRange(InstNumberT Start, InstNumberT End,
792                     CfgNode *Node = nullptr) {
793     assert(!getIgnoreLiveness());
794     Live.addSegment(Start, End, Node);
795   }
trimLiveRange(InstNumberT Start)796   void trimLiveRange(InstNumberT Start) { Live.trim(Start); }
untrimLiveRange()797   void untrimLiveRange() { Live.untrim(); }
rangeEndsBefore(const Variable * Other)798   bool rangeEndsBefore(const Variable *Other) const {
799     return Live.endsBefore(Other->Live);
800   }
rangeOverlaps(const Variable * Other)801   bool rangeOverlaps(const Variable *Other) const {
802     constexpr bool UseTrimmed = true;
803     return Live.overlaps(Other->Live, UseTrimmed);
804   }
rangeOverlapsStart(const Variable * Other)805   bool rangeOverlapsStart(const Variable *Other) const {
806     constexpr bool UseTrimmed = true;
807     return Live.overlapsInst(Other->Live.getStart(), UseTrimmed);
808   }
809 
810   /// Creates a temporary copy of the variable with a different type. Used
811   /// primarily for syntactic correctness of textual assembly emission. Note
812   /// that only basic information is copied, in particular not IsArgument,
813   /// IsImplicitArgument, IgnoreLiveness, RegNumTmp, Live, LoVar, HiVar,
814   /// VarsReal. If NewRegNum.hasValue(), then that register assignment is made
815   /// instead of copying the existing assignment.
816   const Variable *asType(const Cfg *Func, Type Ty, RegNumT NewRegNum) const;
817 
818   void emit(const Cfg *Func) const override;
819   using Operand::dump;
820   void dump(const Cfg *Func, Ostream &Str) const override;
821 
822   /// Return reg num of base register, if different from stack/frame register.
getBaseRegNum()823   virtual RegNumT getBaseRegNum() const { return RegNumT(); }
824 
825   /// Access the LinkedTo field.
setLinkedTo(Variable * Var)826   void setLinkedTo(Variable *Var) { LinkedTo = Var; }
getLinkedTo()827   Variable *getLinkedTo() const { return LinkedTo; }
828   /// Follow the LinkedTo chain up to the furthest ancestor.
getLinkedToRoot()829   Variable *getLinkedToRoot() const {
830     Variable *Root = LinkedTo;
831     if (Root == nullptr)
832       return nullptr;
833     while (Root->LinkedTo != nullptr)
834       Root = Root->LinkedTo;
835     return Root;
836   }
837   /// Follow the LinkedTo chain up to the furthest stack-allocated ancestor.
838   /// This is only certain to be accurate after register allocation and stack
839   /// slot assignment have completed.
getLinkedToStackRoot()840   Variable *getLinkedToStackRoot() const {
841     Variable *FurthestStackVar = nullptr;
842     for (Variable *Root = LinkedTo; Root != nullptr; Root = Root->LinkedTo) {
843       if (!Root->hasReg() && Root->hasStackOffset()) {
844         FurthestStackVar = Root;
845       }
846     }
847     return FurthestStackVar;
848   }
849 
classof(const Operand * Operand)850   static bool classof(const Operand *Operand) {
851     OperandKind Kind = Operand->getKind();
852     return Kind >= kVariable && Kind <= kVariable_Max;
853   }
854 
hashValue()855   SizeT hashValue() const override { return std::hash<SizeT>()(getIndex()); }
856 
857 protected:
Variable(const Cfg * Func,OperandKind K,Type Ty,SizeT Index)858   Variable(const Cfg *Func, OperandKind K, Type Ty, SizeT Index)
859       : Operand(K, Ty), Number(Index),
860         Name(VariableString::createWithoutString(Func)),
861         RegisterClass(static_cast<RegClass>(Ty)) {
862     Vars = VarsReal;
863     Vars[0] = this;
864     NumVars = 1;
865   }
866   /// Number is unique across all variables, and is used as a (bit)vector index
867   /// for liveness analysis.
868   const SizeT Number;
869   VariableString Name;
870   bool IsArgument = false;
871   bool IsImplicitArgument = false;
872   /// IgnoreLiveness means that the variable should be ignored when constructing
873   /// and validating live ranges. This is usually reserved for the stack
874   /// pointer and other physical registers specifically referenced by name.
875   bool IgnoreLiveness = false;
876   // If IsRematerializable, RegNum keeps track of which register (stack or frame
877   // pointer), and StackOffset is the known offset from that register.
878   bool IsRematerializable = false;
879   RegRequirement RegRequirement = RR_MayHaveRegister;
880   RegClass RegisterClass;
881   /// RegNum is the allocated register, (as long as RegNum.hasValue() is true).
882   RegNumT RegNum;
883   /// RegNumTmp is the tentative assignment during register allocation.
884   RegNumT RegNumTmp;
885   static constexpr int32_t InvalidStackOffset =
886       std::numeric_limits<int32_t>::min();
887   static constexpr int32_t UndeterminedStackOffset =
888       1 + std::numeric_limits<int32_t>::min();
889   /// StackOffset is the canonical location on stack (only if
890   /// RegNum.hasNoValue() || IsArgument).
891   int32_t StackOffset = InvalidStackOffset;
892   LiveRange Live;
893   /// VarsReal (and Operand::Vars) are set up such that Vars[0] == this.
894   Variable *VarsReal[1];
895   /// This Variable may be "linked" to another Variable, such that if neither
896   /// Variable gets a register, they are guaranteed to share a stack location.
897   Variable *LinkedTo = nullptr;
898 };
899 
900 // Variable64On32 represents a 64-bit variable on a 32-bit architecture. In
901 // this situation the variable must be split into a low and a high word.
902 class Variable64On32 : public Variable {
903   Variable64On32() = delete;
904   Variable64On32(const Variable64On32 &) = delete;
905   Variable64On32 &operator=(const Variable64On32 &) = delete;
906 
907 public:
create(Cfg * Func,Type Ty,SizeT Index)908   static Variable64On32 *create(Cfg *Func, Type Ty, SizeT Index) {
909     return new (Func->allocate<Variable64On32>())
910         Variable64On32(Func, kVariable64On32, Ty, Index);
911   }
912 
setName(const Cfg * Func,const std::string & NewName)913   void setName(const Cfg *Func, const std::string &NewName) override {
914     Variable::setName(Func, NewName);
915     if (LoVar && HiVar) {
916       LoVar->setName(Func, getName() + "__lo");
917       HiVar->setName(Func, getName() + "__hi");
918     }
919   }
920 
921   void setIsArg(bool Val = true) override {
922     Variable::setIsArg(Val);
923     if (LoVar && HiVar) {
924       LoVar->setIsArg(Val);
925       HiVar->setIsArg(Val);
926     }
927   }
928 
getLo()929   Variable *getLo() const {
930     assert(LoVar != nullptr);
931     return LoVar;
932   }
getHi()933   Variable *getHi() const {
934     assert(HiVar != nullptr);
935     return HiVar;
936   }
937 
initHiLo(Cfg * Func)938   void initHiLo(Cfg *Func) {
939     assert(LoVar == nullptr);
940     assert(HiVar == nullptr);
941     LoVar = Func->makeVariable(IceType_i32);
942     HiVar = Func->makeVariable(IceType_i32);
943     LoVar->setIsArg(getIsArg());
944     HiVar->setIsArg(getIsArg());
945     if (BuildDefs::dump()) {
946       LoVar->setName(Func, getName() + "__lo");
947       HiVar->setName(Func, getName() + "__hi");
948     }
949   }
950 
classof(const Operand * Operand)951   static bool classof(const Operand *Operand) {
952     OperandKind Kind = Operand->getKind();
953     return Kind == kVariable64On32;
954   }
955 
956 protected:
Variable64On32(const Cfg * Func,OperandKind K,Type Ty,SizeT Index)957   Variable64On32(const Cfg *Func, OperandKind K, Type Ty, SizeT Index)
958       : Variable(Func, K, Ty, Index) {
959     assert(typeWidthInBytes(Ty) == 8);
960   }
961 
962   Variable *LoVar = nullptr;
963   Variable *HiVar = nullptr;
964 };
965 
966 // VariableVecOn32 represents a 128-bit vector variable on a 32-bit
967 // architecture. In this case the variable must be split into 4 containers.
968 class VariableVecOn32 : public Variable {
969   VariableVecOn32() = delete;
970   VariableVecOn32(const VariableVecOn32 &) = delete;
971   VariableVecOn32 &operator=(const VariableVecOn32 &) = delete;
972 
973 public:
create(Cfg * Func,Type Ty,SizeT Index)974   static VariableVecOn32 *create(Cfg *Func, Type Ty, SizeT Index) {
975     return new (Func->allocate<VariableVecOn32>())
976         VariableVecOn32(Func, kVariableVecOn32, Ty, Index);
977   }
978 
setName(const Cfg * Func,const std::string & NewName)979   void setName(const Cfg *Func, const std::string &NewName) override {
980     Variable::setName(Func, NewName);
981     if (!Containers.empty()) {
982       for (SizeT i = 0; i < ContainersPerVector; ++i) {
983         Containers[i]->setName(Func, getName() + "__cont" + std::to_string(i));
984       }
985     }
986   }
987 
988   void setIsArg(bool Val = true) override {
989     Variable::setIsArg(Val);
990     for (Variable *Var : Containers) {
991       Var->setIsArg(getIsArg());
992     }
993   }
994 
getContainers()995   const VarList &getContainers() const { return Containers; }
996 
initVecElement(Cfg * Func)997   void initVecElement(Cfg *Func) {
998     for (SizeT i = 0; i < ContainersPerVector; ++i) {
999       Variable *Var = Func->makeVariable(IceType_i32);
1000       Var->setIsArg(getIsArg());
1001       if (BuildDefs::dump()) {
1002         Var->setName(Func, getName() + "__cont" + std::to_string(i));
1003       }
1004       Containers.push_back(Var);
1005     }
1006   }
1007 
classof(const Operand * Operand)1008   static bool classof(const Operand *Operand) {
1009     OperandKind Kind = Operand->getKind();
1010     return Kind == kVariableVecOn32;
1011   }
1012 
1013   // A 128-bit vector value is mapped onto 4 32-bit register values.
1014   static constexpr SizeT ContainersPerVector = 4;
1015 
1016 protected:
VariableVecOn32(const Cfg * Func,OperandKind K,Type Ty,SizeT Index)1017   VariableVecOn32(const Cfg *Func, OperandKind K, Type Ty, SizeT Index)
1018       : Variable(Func, K, Ty, Index) {
1019     assert(typeWidthInBytes(Ty) ==
1020            ContainersPerVector * typeWidthInBytes(IceType_i32));
1021   }
1022 
1023   VarList Containers;
1024 };
1025 
1026 enum MetadataKind {
1027   VMK_Uses,       /// Track only uses, not defs
1028   VMK_SingleDefs, /// Track uses+defs, but only record single def
1029   VMK_All         /// Track uses+defs, including full def list
1030 };
1031 using InstDefList = CfgVector<const Inst *>;
1032 
1033 /// VariableTracking tracks the metadata for a single variable.  It is
1034 /// only meant to be used internally by VariablesMetadata.
1035 class VariableTracking {
1036 public:
1037   enum MultiDefState {
1038     // TODO(stichnot): Consider using just a simple counter.
1039     MDS_Unknown,
1040     MDS_SingleDef,
1041     MDS_MultiDefSingleBlock,
1042     MDS_MultiDefMultiBlock
1043   };
1044   enum MultiBlockState {
1045     MBS_Unknown,     // Not yet initialized, so be conservative
1046     MBS_NoUses,      // Known to have no uses
1047     MBS_SingleBlock, // All uses in are in a single block
1048     MBS_MultiBlock   // Several uses across several blocks
1049   };
1050   VariableTracking() = default;
1051   VariableTracking(const VariableTracking &) = default;
1052   VariableTracking &operator=(const VariableTracking &) = default;
VariableTracking(MultiBlockState MultiBlock)1053   VariableTracking(MultiBlockState MultiBlock) : MultiBlock(MultiBlock) {}
getMultiDef()1054   MultiDefState getMultiDef() const { return MultiDef; }
getMultiBlock()1055   MultiBlockState getMultiBlock() const { return MultiBlock; }
1056   const Inst *getFirstDefinitionSingleBlock() const;
1057   const Inst *getSingleDefinition() const;
1058   const Inst *getFirstDefinition() const;
getLatterDefinitions()1059   const InstDefList &getLatterDefinitions() const { return Definitions; }
getNode()1060   CfgNode *getNode() const { return SingleUseNode; }
getUseWeight()1061   RegWeight getUseWeight() const { return UseWeight; }
1062   void markUse(MetadataKind TrackingKind, const Inst *Instr, CfgNode *Node,
1063                bool IsImplicit);
1064   void markDef(MetadataKind TrackingKind, const Inst *Instr, CfgNode *Node);
1065 
1066 private:
1067   MultiDefState MultiDef = MDS_Unknown;
1068   MultiBlockState MultiBlock = MBS_Unknown;
1069   CfgNode *SingleUseNode = nullptr;
1070   CfgNode *SingleDefNode = nullptr;
1071   /// All definitions of the variable are collected in Definitions[] (except for
1072   /// the earliest definition), in increasing order of instruction number.
1073   InstDefList Definitions; /// Only used if Kind==VMK_All
1074   const Inst *FirstOrSingleDefinition = nullptr;
1075   RegWeight UseWeight;
1076 };
1077 
1078 /// VariablesMetadata analyzes and summarizes the metadata for the complete set
1079 /// of Variables.
1080 class VariablesMetadata {
1081   VariablesMetadata() = delete;
1082   VariablesMetadata(const VariablesMetadata &) = delete;
1083   VariablesMetadata &operator=(const VariablesMetadata &) = delete;
1084 
1085 public:
VariablesMetadata(const Cfg * Func)1086   explicit VariablesMetadata(const Cfg *Func) : Func(Func) {}
1087   /// Initialize the state by traversing all instructions/variables in the CFG.
1088   void init(MetadataKind TrackingKind);
1089   /// Add a single node. This is called by init(), and can be called
1090   /// incrementally from elsewhere, e.g. after edge-splitting.
1091   void addNode(CfgNode *Node);
getKind()1092   MetadataKind getKind() const { return Kind; }
1093   /// Returns whether the given Variable is tracked in this object. It should
1094   /// only return false if changes were made to the CFG after running init(), in
1095   /// which case the state is stale and the results shouldn't be trusted (but it
1096   /// may be OK e.g. for dumping).
isTracked(const Variable * Var)1097   bool isTracked(const Variable *Var) const {
1098     return Var->getIndex() < Metadata.size();
1099   }
1100 
1101   /// Returns whether the given Variable has multiple definitions.
1102   bool isMultiDef(const Variable *Var) const;
1103   /// Returns the first definition instruction of the given Variable. This is
1104   /// only valid for variables whose definitions are all within the same block,
1105   /// e.g. T after the lowered sequence "T=B; T+=C; A=T", for which
1106   /// getFirstDefinitionSingleBlock(T) would return the "T=B" instruction. For
1107   /// variables with definitions span multiple blocks, nullptr is returned.
1108   const Inst *getFirstDefinitionSingleBlock(const Variable *Var) const;
1109   /// Returns the definition instruction of the given Variable, when the
1110   /// variable has exactly one definition. Otherwise, nullptr is returned.
1111   const Inst *getSingleDefinition(const Variable *Var) const;
1112   /// getFirstDefinition() and getLatterDefinitions() are used together to
1113   /// return the complete set of instructions that define the given Variable,
1114   /// regardless of whether the definitions are within the same block (in
1115   /// contrast to getFirstDefinitionSingleBlock).
1116   const Inst *getFirstDefinition(const Variable *Var) const;
1117   const InstDefList &getLatterDefinitions(const Variable *Var) const;
1118 
1119   /// Returns whether the given Variable is live across multiple blocks. Mainly,
1120   /// this is used to partition Variables into single-block versus multi-block
1121   /// sets for leveraging sparsity in liveness analysis, and for implementing
1122   /// simple stack slot coalescing. As a special case, function arguments are
1123   /// always considered multi-block because they are live coming into the entry
1124   /// block.
1125   bool isMultiBlock(const Variable *Var) const;
1126   bool isSingleBlock(const Variable *Var) const;
1127   /// Returns the node that the given Variable is used in, assuming
1128   /// isMultiBlock() returns false. Otherwise, nullptr is returned.
1129   CfgNode *getLocalUseNode(const Variable *Var) const;
1130 
1131   /// Returns the total use weight computed as the sum of uses multiplied by a
1132   /// loop nest depth factor for each use.
1133   RegWeight getUseWeight(const Variable *Var) const;
1134 
1135 private:
1136   const Cfg *Func;
1137   MetadataKind Kind;
1138   CfgVector<VariableTracking> Metadata;
1139   static const InstDefList *NoDefinitions;
1140 };
1141 
1142 /// BooleanVariable represents a variable that was the zero-extended result of a
1143 /// comparison. It maintains a pointer to its original i1 source so that the
1144 /// WASM frontend can avoid adding needless comparisons.
1145 class BooleanVariable : public Variable {
1146   BooleanVariable() = delete;
1147   BooleanVariable(const BooleanVariable &) = delete;
1148   BooleanVariable &operator=(const BooleanVariable &) = delete;
1149 
BooleanVariable(const Cfg * Func,OperandKind K,Type Ty,SizeT Index)1150   BooleanVariable(const Cfg *Func, OperandKind K, Type Ty, SizeT Index)
1151       : Variable(Func, K, Ty, Index) {}
1152 
1153 public:
create(Cfg * Func,Type Ty,SizeT Index)1154   static BooleanVariable *create(Cfg *Func, Type Ty, SizeT Index) {
1155     return new (Func->allocate<BooleanVariable>())
1156         BooleanVariable(Func, kVariable, Ty, Index);
1157   }
1158 
asBoolean()1159   virtual Variable *asBoolean() { return BoolSource; }
1160 
setBoolSource(Variable * Src)1161   void setBoolSource(Variable *Src) { BoolSource = Src; }
1162 
classof(const Operand * Operand)1163   static bool classof(const Operand *Operand) {
1164     return Operand->getKind() == kVariableBoolean;
1165   }
1166 
1167 private:
1168   Variable *BoolSource = nullptr;
1169 };
1170 
1171 } // end of namespace Ice
1172 
1173 #endif // SUBZERO_SRC_ICEOPERAND_H
1174