• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===- subzero/src/IceTargetLoweringX86Base.h - x86 lowering ----*- C++ -*-===//
2 //
3 //                        The Subzero Code Generator
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 ///
10 /// \file
11 /// \brief Declares the TargetLoweringX86 template class, which implements the
12 /// TargetLowering base interface for the x86 architecture.
13 ///
14 //===----------------------------------------------------------------------===//
15 
16 #ifndef SUBZERO_SRC_ICETARGETLOWERINGX86BASE_H
17 #define SUBZERO_SRC_ICETARGETLOWERINGX86BASE_H
18 
19 #include "IceDefs.h"
20 #include "IceInst.h"
21 #include "IceSwitchLowering.h"
22 #include "IceTargetLowering.h"
23 #include "IceTargetLoweringX86RegClass.h"
24 #include "IceUtils.h"
25 
26 #include <array>
27 #include <type_traits>
28 #include <utility>
29 
30 #ifndef X86NAMESPACE
31 #error "You must define the X86 Target namespace."
32 #endif
33 
34 namespace Ice {
35 namespace X86NAMESPACE {
36 
37 using namespace ::Ice::X86;
38 
39 template <typename Traits> class BoolFolding;
40 
41 /// TargetX86Base is a template for all X86 Targets, and it relies on the CRT
42 /// pattern for generating code, delegating to actual backends target-specific
43 /// lowerings (e.g., call, ret, and intrinsics.) Backends are expected to
44 /// implement the following methods (which should be accessible from
45 /// TargetX86Base):
46 ///
47 /// Operand *createNaClReadTPSrcOperand()
48 ///
49 /// Note: Ideally, we should be able to
50 ///
51 ///  static_assert(std::is_base_of<TargetX86Base<TraitsType>,
52 ///  Machine>::value);
53 ///
54 /// but that does not work: the compiler does not know that Machine inherits
55 /// from TargetX86Base at this point in translation.
56 template <typename TraitsType> class TargetX86Base : public TargetLowering {
57   TargetX86Base() = delete;
58   TargetX86Base(const TargetX86Base &) = delete;
59   TargetX86Base &operator=(const TargetX86Base &) = delete;
60 
61 public:
62   using Traits = TraitsType;
63   using ConcreteTarget = typename Traits::ConcreteTarget;
64   using InstructionSetEnum = typename Traits::InstructionSet;
65 
66   using BrCond = typename Traits::Cond::BrCond;
67   using CmppsCond = typename Traits::Cond::CmppsCond;
68 
69   using X86Address = typename Traits::Address;
70   using X86Operand = typename Traits::X86Operand;
71   using X86OperandMem = typename Traits::X86OperandMem;
72   using SegmentRegisters = typename Traits::X86OperandMem::SegmentRegisters;
73 
74   using InstX86Br = typename Traits::Insts::Br;
75   using InstX86FakeRMW = typename Traits::Insts::FakeRMW;
76   using InstX86Label = typename Traits::Insts::Label;
77 
78   ~TargetX86Base() override = default;
79 
80   static void staticInit(GlobalContext *Ctx);
81   static bool shouldBePooled(const Constant *C);
82   static ::Ice::Type getPointerType();
83 
getPcRelFixup()84   static FixupKind getPcRelFixup() { return PcRelFixup; }
getAbsFixup()85   static FixupKind getAbsFixup() { return AbsFixup; }
86 
needSandboxing()87   bool needSandboxing() const { return NeedSandboxing; }
88 
89   void translateOm1() override;
90   void translateO2() override;
91   void doLoadOpt();
92   bool doBranchOpt(Inst *I, const CfgNode *NextNode) override;
93 
getNumRegisters()94   SizeT getNumRegisters() const override {
95     return Traits::RegisterSet::Reg_NUM;
96   }
97 
createLoweredMove(Variable * Dest,Variable * SrcVar)98   Inst *createLoweredMove(Variable *Dest, Variable *SrcVar) override {
99     if (isVectorType(Dest->getType())) {
100       return Traits::Insts::Movp::create(Func, Dest, SrcVar);
101     }
102     return Traits::Insts::Mov::create(Func, Dest, SrcVar);
103     (void)Dest;
104     (void)SrcVar;
105     return nullptr;
106   }
107 
108   Variable *getPhysicalRegister(RegNumT RegNum,
109                                 Type Ty = IceType_void) override;
110   const char *getRegName(RegNumT RegNum, Type Ty) const override;
getRegClassName(RegClass C)111   static const char *getRegClassName(RegClass C) {
112     auto ClassNum = static_cast<RegClassX86>(C);
113     assert(ClassNum < RCX86_NUM);
114     switch (ClassNum) {
115     default:
116       assert(C < RC_Target);
117       return regClassString(C);
118     case RCX86_Is64To8:
119       return "i64to8"; // 64-bit GPR truncable to i8
120     case RCX86_Is32To8:
121       return "i32to8"; // 32-bit GPR truncable to i8
122     case RCX86_Is16To8:
123       return "i16to8"; // 16-bit GPR truncable to i8
124     case RCX86_IsTrunc8Rcvr:
125       return "i8from"; // 8-bit GPR truncable from wider GPRs
126     case RCX86_IsAhRcvr:
127       return "i8fromah"; // 8-bit GPR that ah can be assigned to
128     }
129   }
130   SmallBitVector getRegisterSet(RegSetMask Include,
131                                 RegSetMask Exclude) const override;
132   const SmallBitVector &
getRegistersForVariable(const Variable * Var)133   getRegistersForVariable(const Variable *Var) const override {
134     RegClass RC = Var->getRegClass();
135     assert(static_cast<RegClassX86>(RC) < RCX86_NUM);
136     return TypeToRegisterSet[RC];
137   }
138 
139   const SmallBitVector &
getAllRegistersForVariable(const Variable * Var)140   getAllRegistersForVariable(const Variable *Var) const override {
141     RegClass RC = Var->getRegClass();
142     assert(static_cast<RegClassX86>(RC) < RCX86_NUM);
143     return TypeToRegisterSetUnfiltered[RC];
144   }
145 
getAliasesForRegister(RegNumT Reg)146   const SmallBitVector &getAliasesForRegister(RegNumT Reg) const override {
147     Reg.assertIsValid();
148     return RegisterAliases[Reg];
149   }
150 
hasFramePointer()151   bool hasFramePointer() const override { return IsEbpBasedFrame; }
setHasFramePointer()152   void setHasFramePointer() override { IsEbpBasedFrame = true; }
getStackReg()153   RegNumT getStackReg() const override { return Traits::StackPtr; }
getFrameReg()154   RegNumT getFrameReg() const override { return Traits::FramePtr; }
getFrameOrStackReg()155   RegNumT getFrameOrStackReg() const override {
156     // If the stack pointer needs to be aligned, then the frame pointer is
157     // unaligned, so always use the stack pointer.
158     if (needsStackPointerAlignment())
159       return getStackReg();
160     return IsEbpBasedFrame ? getFrameReg() : getStackReg();
161   }
typeWidthInBytesOnStack(Type Ty)162   size_t typeWidthInBytesOnStack(Type Ty) const override {
163     // Round up to the next multiple of WordType bytes.
164     const uint32_t WordSizeInBytes = typeWidthInBytes(Traits::WordType);
165     return Utils::applyAlignment(typeWidthInBytes(Ty), WordSizeInBytes);
166   }
getStackAlignment()167   uint32_t getStackAlignment() const override {
168     return Traits::X86_STACK_ALIGNMENT_BYTES;
169   }
needsStackPointerAlignment()170   bool needsStackPointerAlignment() const override {
171     // If the ABI's stack alignment is smaller than the vector size (16 bytes),
172     // use the (realigned) stack pointer for addressing any stack variables.
173     return Traits::X86_STACK_ALIGNMENT_BYTES < 16;
174   }
reserveFixedAllocaArea(size_t Size,size_t Align)175   void reserveFixedAllocaArea(size_t Size, size_t Align) override {
176     FixedAllocaSizeBytes = Size;
177     assert(llvm::isPowerOf2_32(Align));
178     FixedAllocaAlignBytes = Align;
179     PrologEmitsFixedAllocas = true;
180   }
181   /// Returns the (negative) offset from ebp/rbp where the fixed Allocas start.
getFrameFixedAllocaOffset()182   int32_t getFrameFixedAllocaOffset() const override {
183     return FixedAllocaSizeBytes - (SpillAreaSizeBytes - maxOutArgsSizeBytes());
184   }
maxOutArgsSizeBytes()185   virtual uint32_t maxOutArgsSizeBytes() const override {
186     return MaxOutArgsSizeBytes;
187   }
updateMaxOutArgsSizeBytes(uint32_t Size)188   virtual void updateMaxOutArgsSizeBytes(uint32_t Size) {
189     MaxOutArgsSizeBytes = std::max(MaxOutArgsSizeBytes, Size);
190   }
191 
shouldSplitToVariable64On32(Type Ty)192   bool shouldSplitToVariable64On32(Type Ty) const override {
193     return Traits::Is64Bit ? false : Ty == IceType_i64;
194   }
195 
createGetIPForRegister(const Variable * Dest)196   ConstantRelocatable *createGetIPForRegister(const Variable *Dest) {
197     assert(Dest->hasReg());
198     const std::string RegName = Traits::getRegName(Dest->getRegNum());
199     return llvm::cast<ConstantRelocatable>(Ctx->getConstantExternSym(
200         Ctx->getGlobalString(H_getIP_prefix + RegName)));
201   }
202 
getMinJumpTableSize()203   SizeT getMinJumpTableSize() const override { return 4; }
204 
205   void emitVariable(const Variable *Var) const override;
206 
207   void emit(const ConstantInteger32 *C) const final;
208   void emit(const ConstantInteger64 *C) const final;
209   void emit(const ConstantFloat *C) const final;
210   void emit(const ConstantDouble *C) const final;
211   void emit(const ConstantUndef *C) const final;
212   void emit(const ConstantRelocatable *C) const final;
213 
214   void initNodeForLowering(CfgNode *Node) override;
215 
216   template <typename T = Traits>
217   typename std::enable_if<!T::Is64Bit, Operand>::type *
218   loOperand(Operand *Operand);
219   template <typename T = Traits>
loOperand(Operand *)220   typename std::enable_if<T::Is64Bit, Operand>::type *loOperand(Operand *) {
221     llvm::report_fatal_error(
222         "Hey, yo! This is x86-64. Watcha doin'? (loOperand)");
223   }
224 
225   template <typename T = Traits>
226   typename std::enable_if<!T::Is64Bit, Operand>::type *
227   hiOperand(Operand *Operand);
228   template <typename T = Traits>
hiOperand(Operand *)229   typename std::enable_if<T::Is64Bit, Operand>::type *hiOperand(Operand *) {
230     llvm::report_fatal_error(
231         "Hey, yo! This is x86-64. Watcha doin'? (hiOperand)");
232   }
233 
234   void addProlog(CfgNode *Node) override;
235   void finishArgumentLowering(Variable *Arg, Variable *FramePtr,
236                               size_t BasicFrameOffset, size_t StackAdjBytes,
237                               size_t &InArgsSizeBytes);
238   void addEpilog(CfgNode *Node) override;
239   X86Address stackVarToAsmOperand(const Variable *Var) const;
240 
getInstructionSet()241   InstructionSetEnum getInstructionSet() const { return InstructionSet; }
242   Operand *legalizeUndef(Operand *From, RegNumT RegNum = RegNumT());
243 
244 protected:
245   const bool NeedSandboxing;
246 
247   explicit TargetX86Base(Cfg *Func);
248 
249   void postLower() override;
250 
251   /// Initializes the RebasePtr member variable -- if so required by
252   /// SandboxingType for the concrete Target.
initRebasePtr()253   void initRebasePtr() {
254     assert(SandboxingType != ST_None);
255     dispatchToConcrete(&Traits::ConcreteTarget::initRebasePtr);
256   }
257 
258   /// Emit code that initializes the value of the RebasePtr near the start of
259   /// the function -- if so required by SandboxingType for the concrete type.
initSandbox()260   void initSandbox() {
261     assert(SandboxingType != ST_None);
262     dispatchToConcrete(&Traits::ConcreteTarget::initSandbox);
263   }
264 
265   void lowerAlloca(const InstAlloca *Instr) override;
266   void lowerArguments() override;
267   void lowerArithmetic(const InstArithmetic *Instr) override;
268   void lowerAssign(const InstAssign *Instr) override;
269   void lowerBr(const InstBr *Instr) override;
270   void lowerBreakpoint(const InstBreakpoint *Instr) override;
271   void lowerCall(const InstCall *Instr) override;
272   void lowerCast(const InstCast *Instr) override;
273   void lowerExtractElement(const InstExtractElement *Instr) override;
274   void lowerFcmp(const InstFcmp *Instr) override;
275   void lowerIcmp(const InstIcmp *Instr) override;
276 
277   void lowerIntrinsicCall(const InstIntrinsicCall *Instr) override;
278   void lowerInsertElement(const InstInsertElement *Instr) override;
279   void lowerLoad(const InstLoad *Instr) override;
280   void lowerPhi(const InstPhi *Instr) override;
281   void lowerRet(const InstRet *Instr) override;
282   void lowerSelect(const InstSelect *Instr) override;
283   void lowerShuffleVector(const InstShuffleVector *Instr) override;
284   void lowerStore(const InstStore *Instr) override;
285   void lowerSwitch(const InstSwitch *Instr) override;
286   void lowerUnreachable(const InstUnreachable *Instr) override;
287   void lowerOther(const Inst *Instr) override;
288   void lowerRMW(const InstX86FakeRMW *RMW);
289   void prelowerPhis() override;
290   uint32_t getCallStackArgumentsSizeBytes(const CfgVector<Type> &ArgTypes,
291                                           Type ReturnType);
292   uint32_t getCallStackArgumentsSizeBytes(const InstCall *Instr) override;
293   void genTargetHelperCallFor(Inst *Instr) override;
294 
295   /// OptAddr wraps all the possible operands that an x86 address might have.
296   struct OptAddr {
297     Variable *Base = nullptr;
298     Variable *Index = nullptr;
299     uint16_t Shift = 0;
300     int32_t Offset = 0;
301     ConstantRelocatable *Relocatable = nullptr;
302   };
303   /// Legalizes Addr w.r.t. SandboxingType. The exact type of legalization
304   /// varies for different <Target, SandboxingType> tuples.
legalizeOptAddrForSandbox(OptAddr * Addr)305   bool legalizeOptAddrForSandbox(OptAddr *Addr) {
306     return dispatchToConcrete(
307         &Traits::ConcreteTarget::legalizeOptAddrForSandbox, std::move(Addr));
308   }
309   // Builds information for a canonical address expresion:
310   //   <Relocatable + Offset>(Base, Index, Shift)
311   X86OperandMem *computeAddressOpt(const Inst *Instr, Type MemType,
312                                    Operand *Addr);
313   void doAddressOptOther() override;
314   void doAddressOptLoad() override;
315   void doAddressOptStore() override;
316   void doAddressOptLoadSubVector() override;
317   void doAddressOptStoreSubVector() override;
318   void doMockBoundsCheck(Operand *Opnd) override;
319   void randomlyInsertNop(float Probability,
320                          RandomNumberGenerator &RNG) override;
321 
322   /// Naive lowering of cmpxchg.
323   void lowerAtomicCmpxchg(Variable *DestPrev, Operand *Ptr, Operand *Expected,
324                           Operand *Desired);
325   /// Attempt a more optimized lowering of cmpxchg. Returns true if optimized.
326   bool tryOptimizedCmpxchgCmpBr(Variable *DestPrev, Operand *Ptr,
327                                 Operand *Expected, Operand *Desired);
328   void lowerAtomicRMW(Variable *Dest, uint32_t Operation, Operand *Ptr,
329                       Operand *Val);
330   void lowerCountZeros(bool Cttz, Type Ty, Variable *Dest, Operand *FirstVal,
331                        Operand *SecondVal);
332   /// Load from memory for a given type.
333   void typedLoad(Type Ty, Variable *Dest, Variable *Base, Constant *Offset);
334   /// Store to memory for a given type.
335   void typedStore(Type Ty, Variable *Value, Variable *Base, Constant *Offset);
336   /// Copy memory of given type from Src to Dest using OffsetAmt on both.
337   void copyMemory(Type Ty, Variable *Dest, Variable *Src, int32_t OffsetAmt);
338   /// Replace some calls to memcpy with inline instructions.
339   void lowerMemcpy(Operand *Dest, Operand *Src, Operand *Count);
340   /// Replace some calls to memmove with inline instructions.
341   void lowerMemmove(Operand *Dest, Operand *Src, Operand *Count);
342   /// Replace some calls to memset with inline instructions.
343   void lowerMemset(Operand *Dest, Operand *Val, Operand *Count);
344 
345   /// Lower an indirect jump adding sandboxing when needed.
lowerIndirectJump(Variable * JumpTarget)346   void lowerIndirectJump(Variable *JumpTarget) {
347     // Without std::move below, the compiler deduces that the argument to
348     // lowerIndirectJmp is a Variable *&, not a Variable *.
349     dispatchToConcrete(&Traits::ConcreteTarget::lowerIndirectJump,
350                        std::move(JumpTarget));
351   }
352 
353   /// Check the comparison is in [Min,Max]. The flags register will be modified
354   /// with:
355   ///   - below equal, if in range
356   ///   - above, set if not in range
357   /// The index into the range is returned.
358   Operand *lowerCmpRange(Operand *Comparison, uint64_t Min, uint64_t Max);
359   /// Lowering of a cluster of switch cases. If the case is not matched control
360   /// will pass to the default label provided. If the default label is nullptr
361   /// then control will fall through to the next instruction. DoneCmp should be
362   /// true if the flags contain the result of a comparison with the Comparison.
363   void lowerCaseCluster(const CaseCluster &Case, Operand *Src0, bool DoneCmp,
364                         CfgNode *DefaultLabel = nullptr);
365 
366   using LowerBinOp = void (TargetX86Base::*)(Variable *, Operand *);
367   void expandAtomicRMWAsCmpxchg(LowerBinOp op_lo, LowerBinOp op_hi,
368                                 Variable *Dest, Operand *Ptr, Operand *Val);
369 
370   void eliminateNextVectorSextInstruction(Variable *SignExtendedResult);
371 
emitGetIP(CfgNode * Node)372   void emitGetIP(CfgNode *Node) {
373     dispatchToConcrete(&Traits::ConcreteTarget::emitGetIP, std::move(Node));
374   }
375   /// Emit a sandboxed return sequence rather than a return.
emitSandboxedReturn()376   void emitSandboxedReturn() {
377     dispatchToConcrete(&Traits::ConcreteTarget::emitSandboxedReturn);
378   }
379 
emitStackProbe(size_t StackSizeBytes)380   void emitStackProbe(size_t StackSizeBytes) {
381     dispatchToConcrete(&Traits::ConcreteTarget::emitStackProbe,
382                        std::move(StackSizeBytes));
383   }
384 
385   /// Emit just the call instruction (without argument or return variable
386   /// processing), sandboxing if needed.
387   virtual Inst *emitCallToTarget(Operand *CallTarget, Variable *ReturnReg,
388                                  size_t NumVariadicFpArgs = 0) = 0;
389   /// Materialize the moves needed to return a value of the specified type.
390   virtual Variable *moveReturnValueToRegister(Operand *Value,
391                                               Type ReturnType) = 0;
392 
393   /// Emit a jump table to the constant pool.
394   void emitJumpTable(const Cfg *Func,
395                      const InstJumpTable *JumpTable) const override;
396 
397   /// Emit a fake use of esp to make sure esp stays alive for the entire
398   /// function. Otherwise some esp adjustments get dead-code eliminated.
keepEspLiveAtExit()399   void keepEspLiveAtExit() {
400     Variable *esp =
401         Func->getTarget()->getPhysicalRegister(getStackReg(), Traits::WordType);
402     Context.insert<InstFakeUse>(esp);
403   }
404 
405   /// Operand legalization helpers. To deal with address mode constraints, the
406   /// helpers will create a new Operand and emit instructions that guarantee
407   /// that the Operand kind is one of those indicated by the LegalMask (a
408   /// bitmask of allowed kinds). If the input Operand is known to already meet
409   /// the constraints, it may be simply returned as the result, without creating
410   /// any new instructions or operands.
411   enum OperandLegalization {
412     Legal_None = 0,
413     Legal_Reg = 1 << 0, // physical register, not stack location
414     Legal_Imm = 1 << 1,
415     Legal_Mem = 1 << 2, // includes [eax+4*ecx] as well as [esp+12]
416     Legal_Rematerializable = 1 << 3,
417     Legal_AddrAbs = 1 << 4, // ConstantRelocatable doesn't have to add RebasePtr
418     Legal_Default = ~(Legal_Rematerializable | Legal_AddrAbs)
419     // TODO(stichnot): Figure out whether this default works for x86-64.
420   };
421   using LegalMask = uint32_t;
422   Operand *legalize(Operand *From, LegalMask Allowed = Legal_Default,
423                     RegNumT RegNum = RegNumT());
424   Variable *legalizeToReg(Operand *From, RegNumT RegNum = RegNumT());
425   /// Legalize the first source operand for use in the cmp instruction.
426   Operand *legalizeSrc0ForCmp(Operand *Src0, Operand *Src1);
427   /// Turn a pointer operand into a memory operand that can be used by a real
428   /// load/store operation. Legalizes the operand as well. This is a nop if the
429   /// operand is already a legal memory operand.
430   X86OperandMem *formMemoryOperand(Operand *Ptr, Type Ty,
431                                    bool DoLegalize = true);
432 
433   Variable *makeReg(Type Ty, RegNumT RegNum = RegNumT());
434   static Type stackSlotType();
435 
436   static constexpr uint32_t NoSizeLimit = 0;
437   /// Returns the largest type which is equal to or larger than Size bytes. The
438   /// type is suitable for copying memory i.e. a load and store will be a single
439   /// instruction (for example x86 will get f64 not i64).
440   static Type largestTypeInSize(uint32_t Size, uint32_t MaxSize = NoSizeLimit);
441   /// Returns the smallest type which is equal to or larger than Size bytes. If
442   /// one doesn't exist then the largest type smaller than Size bytes is
443   /// returned. The type is suitable for memory copies as described at
444   /// largestTypeInSize.
445   static Type firstTypeThatFitsSize(uint32_t Size,
446                                     uint32_t MaxSize = NoSizeLimit);
447 
448   Variable *copyToReg8(Operand *Src, RegNumT RegNum = RegNumT());
449   Variable *copyToReg(Operand *Src, RegNumT RegNum = RegNumT());
450 
451   /// Returns a register containing all zeros, without affecting the FLAGS
452   /// register, using the best instruction for the type.
453   Variable *makeZeroedRegister(Type Ty, RegNumT RegNum = RegNumT());
454 
455   /// \name Returns a vector in a register with the given constant entries.
456   /// @{
457   Variable *makeVectorOfZeros(Type Ty, RegNumT RegNum = RegNumT());
458   Variable *makeVectorOfOnes(Type Ty, RegNumT RegNum = RegNumT());
459   Variable *makeVectorOfMinusOnes(Type Ty, RegNumT RegNum = RegNumT());
460   Variable *makeVectorOfHighOrderBits(Type Ty, RegNumT RegNum = RegNumT());
461   Variable *makeVectorOfFabsMask(Type Ty, RegNumT RegNum = RegNumT());
462   /// @}
463 
464   /// Return a memory operand corresponding to a stack allocated Variable.
465   X86OperandMem *getMemoryOperandForStackSlot(Type Ty, Variable *Slot,
466                                               uint32_t Offset = 0);
467 
468   void
469   makeRandomRegisterPermutation(llvm::SmallVectorImpl<RegNumT> &Permutation,
470                                 const SmallBitVector &ExcludeRegisters,
471                                 uint64_t Salt) const override;
472 
473   /// AutoMemorySandboxer emits a bundle-lock/bundle-unlock pair if the
474   /// instruction's operand is a memory reference. This is only needed for
475   /// x86-64 NaCl sandbox.
476   template <InstBundleLock::Option BundleLockOpt = InstBundleLock::Opt_None>
477   class AutoMemorySandboxer {
478     AutoMemorySandboxer() = delete;
479     AutoMemorySandboxer(const AutoMemorySandboxer &) = delete;
480     AutoMemorySandboxer &operator=(const AutoMemorySandboxer &) = delete;
481 
482   private:
483     typename Traits::TargetLowering *Target;
484 
485     template <typename T, typename... Tail>
findMemoryReference(T ** First,Tail...Others)486     X86OperandMem **findMemoryReference(T **First, Tail... Others) {
487       if (llvm::isa<X86OperandMem>(*First)) {
488         return reinterpret_cast<X86OperandMem **>(First);
489       }
490       return findMemoryReference(Others...);
491     }
492 
findMemoryReference()493     X86OperandMem **findMemoryReference() { return nullptr; }
494 
495   public:
496     AutoBundle *Bundler = nullptr;
497     X86OperandMem **const MemOperand;
498 
499     template <typename... T>
AutoMemorySandboxer(typename Traits::TargetLowering * Target,T...Args)500     AutoMemorySandboxer(typename Traits::TargetLowering *Target, T... Args)
501         : Target(Target), MemOperand(Target->SandboxingType == ST_None
502                                          ? nullptr
503                                          : findMemoryReference(Args...)) {
504       if (MemOperand != nullptr) {
505         if (Traits::Is64Bit) {
506           Bundler = new (Target->Func->template allocate<AutoBundle>())
507               AutoBundle(Target, BundleLockOpt);
508         }
509         *MemOperand = Target->_sandbox_mem_reference(*MemOperand);
510       }
511     }
512 
~AutoMemorySandboxer()513     ~AutoMemorySandboxer() {
514       if (Bundler != nullptr) {
515         Bundler->~AutoBundle();
516       }
517     }
518   };
519 
520   /// The following are helpers that insert lowered x86 instructions with
521   /// minimal syntactic overhead, so that the lowering code can look as close to
522   /// assembly as practical.
_adc(Variable * Dest,Operand * Src0)523   void _adc(Variable *Dest, Operand *Src0) {
524     AutoMemorySandboxer<> _(this, &Dest, &Src0);
525     Context.insert<typename Traits::Insts::Adc>(Dest, Src0);
526   }
_adc_rmw(X86OperandMem * DestSrc0,Operand * Src1)527   void _adc_rmw(X86OperandMem *DestSrc0, Operand *Src1) {
528     AutoMemorySandboxer<> _(this, &DestSrc0, &Src1);
529     Context.insert<typename Traits::Insts::AdcRMW>(DestSrc0, Src1);
530   }
_add(Variable * Dest,Operand * Src0)531   void _add(Variable *Dest, Operand *Src0) {
532     AutoMemorySandboxer<> _(this, &Dest, &Src0);
533     Context.insert<typename Traits::Insts::Add>(Dest, Src0);
534   }
_add_rmw(X86OperandMem * DestSrc0,Operand * Src1)535   void _add_rmw(X86OperandMem *DestSrc0, Operand *Src1) {
536     AutoMemorySandboxer<> _(this, &DestSrc0, &Src1);
537     Context.insert<typename Traits::Insts::AddRMW>(DestSrc0, Src1);
538   }
_addps(Variable * Dest,Operand * Src0)539   void _addps(Variable *Dest, Operand *Src0) {
540     AutoMemorySandboxer<> _(this, &Dest, &Src0);
541     Context.insert<typename Traits::Insts::Addps>(Dest, Src0);
542   }
_addss(Variable * Dest,Operand * Src0)543   void _addss(Variable *Dest, Operand *Src0) {
544     AutoMemorySandboxer<> _(this, &Dest, &Src0);
545     Context.insert<typename Traits::Insts::Addss>(Dest, Src0);
546   }
_add_sp(Operand * Adjustment)547   void _add_sp(Operand *Adjustment) {
548     dispatchToConcrete(&Traits::ConcreteTarget::_add_sp, std::move(Adjustment));
549   }
_and(Variable * Dest,Operand * Src0)550   void _and(Variable *Dest, Operand *Src0) {
551     AutoMemorySandboxer<> _(this, &Dest, &Src0);
552     Context.insert<typename Traits::Insts::And>(Dest, Src0);
553   }
_andnps(Variable * Dest,Operand * Src0)554   void _andnps(Variable *Dest, Operand *Src0) {
555     AutoMemorySandboxer<> _(this, &Dest, &Src0);
556     Context.insert<typename Traits::Insts::Andnps>(Dest, Src0);
557   }
_andps(Variable * Dest,Operand * Src0)558   void _andps(Variable *Dest, Operand *Src0) {
559     AutoMemorySandboxer<> _(this, &Dest, &Src0);
560     Context.insert<typename Traits::Insts::Andps>(Dest, Src0);
561   }
_and_rmw(X86OperandMem * DestSrc0,Operand * Src1)562   void _and_rmw(X86OperandMem *DestSrc0, Operand *Src1) {
563     AutoMemorySandboxer<> _(this, &DestSrc0, &Src1);
564     Context.insert<typename Traits::Insts::AndRMW>(DestSrc0, Src1);
565   }
_blendvps(Variable * Dest,Operand * Src0,Operand * Src1)566   void _blendvps(Variable *Dest, Operand *Src0, Operand *Src1) {
567     AutoMemorySandboxer<> _(this, &Dest, &Src0);
568     Context.insert<typename Traits::Insts::Blendvps>(Dest, Src0, Src1);
569   }
_br(BrCond Condition,CfgNode * TargetTrue,CfgNode * TargetFalse)570   void _br(BrCond Condition, CfgNode *TargetTrue, CfgNode *TargetFalse) {
571     Context.insert<InstX86Br>(TargetTrue, TargetFalse, Condition,
572                               InstX86Br::Far);
573   }
_br(CfgNode * Target)574   void _br(CfgNode *Target) {
575     Context.insert<InstX86Br>(Target, InstX86Br::Far);
576   }
_br(BrCond Condition,CfgNode * Target)577   void _br(BrCond Condition, CfgNode *Target) {
578     Context.insert<InstX86Br>(Target, Condition, InstX86Br::Far);
579   }
580   void _br(BrCond Condition, InstX86Label *Label,
581            typename InstX86Br::Mode Kind = InstX86Br::Near) {
582     Context.insert<InstX86Br>(Label, Condition, Kind);
583   }
_bsf(Variable * Dest,Operand * Src0)584   void _bsf(Variable *Dest, Operand *Src0) {
585     AutoMemorySandboxer<> _(this, &Dest, &Src0);
586     Context.insert<typename Traits::Insts::Bsf>(Dest, Src0);
587   }
_bsr(Variable * Dest,Operand * Src0)588   void _bsr(Variable *Dest, Operand *Src0) {
589     AutoMemorySandboxer<> _(this, &Dest, &Src0);
590     Context.insert<typename Traits::Insts::Bsr>(Dest, Src0);
591   }
_bswap(Variable * SrcDest)592   void _bswap(Variable *SrcDest) {
593     AutoMemorySandboxer<> _(this, &SrcDest);
594     Context.insert<typename Traits::Insts::Bswap>(SrcDest);
595   }
_cbwdq(Variable * Dest,Operand * Src0)596   void _cbwdq(Variable *Dest, Operand *Src0) {
597     AutoMemorySandboxer<> _(this, &Dest, &Src0);
598     Context.insert<typename Traits::Insts::Cbwdq>(Dest, Src0);
599   }
_cmov(Variable * Dest,Operand * Src0,BrCond Condition)600   void _cmov(Variable *Dest, Operand *Src0, BrCond Condition) {
601     AutoMemorySandboxer<> _(this, &Dest, &Src0);
602     Context.insert<typename Traits::Insts::Cmov>(Dest, Src0, Condition);
603   }
_cmp(Operand * Src0,Operand * Src1)604   void _cmp(Operand *Src0, Operand *Src1) {
605     AutoMemorySandboxer<> _(this, &Src0, &Src1);
606     Context.insert<typename Traits::Insts::Icmp>(Src0, Src1);
607   }
_cmpps(Variable * Dest,Operand * Src0,CmppsCond Condition)608   void _cmpps(Variable *Dest, Operand *Src0, CmppsCond Condition) {
609     AutoMemorySandboxer<> _(this, &Dest, &Src0);
610     Context.insert<typename Traits::Insts::Cmpps>(Dest, Src0, Condition);
611   }
_cmpxchg(Operand * DestOrAddr,Variable * Eax,Variable * Desired,bool Locked)612   void _cmpxchg(Operand *DestOrAddr, Variable *Eax, Variable *Desired,
613                 bool Locked) {
614     AutoMemorySandboxer<> _(this, &DestOrAddr);
615     Context.insert<typename Traits::Insts::Cmpxchg>(DestOrAddr, Eax, Desired,
616                                                     Locked);
617     // Mark eax as possibly modified by cmpxchg.
618     Context.insert<InstFakeDef>(Eax, llvm::dyn_cast<Variable>(DestOrAddr));
619     _set_dest_redefined();
620     Context.insert<InstFakeUse>(Eax);
621   }
_cmpxchg8b(X86OperandMem * Addr,Variable * Edx,Variable * Eax,Variable * Ecx,Variable * Ebx,bool Locked)622   void _cmpxchg8b(X86OperandMem *Addr, Variable *Edx, Variable *Eax,
623                   Variable *Ecx, Variable *Ebx, bool Locked) {
624     AutoMemorySandboxer<> _(this, &Addr);
625     Context.insert<typename Traits::Insts::Cmpxchg8b>(Addr, Edx, Eax, Ecx, Ebx,
626                                                       Locked);
627     // Mark edx, and eax as possibly modified by cmpxchg8b.
628     Context.insert<InstFakeDef>(Edx);
629     _set_dest_redefined();
630     Context.insert<InstFakeUse>(Edx);
631     Context.insert<InstFakeDef>(Eax);
632     _set_dest_redefined();
633     Context.insert<InstFakeUse>(Eax);
634   }
_cvt(Variable * Dest,Operand * Src0,typename Traits::Insts::Cvt::CvtVariant Variant)635   void _cvt(Variable *Dest, Operand *Src0,
636             typename Traits::Insts::Cvt::CvtVariant Variant) {
637     AutoMemorySandboxer<> _(this, &Dest, &Src0);
638     Context.insert<typename Traits::Insts::Cvt>(Dest, Src0, Variant);
639   }
_round(Variable * Dest,Operand * Src0,Operand * Imm)640   void _round(Variable *Dest, Operand *Src0, Operand *Imm) {
641     AutoMemorySandboxer<> _(this, &Dest, &Src0);
642     Context.insert<typename Traits::Insts::Round>(Dest, Src0, Imm);
643   }
_div(Variable * Dest,Operand * Src0,Operand * Src1)644   void _div(Variable *Dest, Operand *Src0, Operand *Src1) {
645     AutoMemorySandboxer<> _(this, &Dest, &Src0, &Src1);
646     Context.insert<typename Traits::Insts::Div>(Dest, Src0, Src1);
647   }
_divps(Variable * Dest,Operand * Src0)648   void _divps(Variable *Dest, Operand *Src0) {
649     AutoMemorySandboxer<> _(this, &Dest, &Src0);
650     Context.insert<typename Traits::Insts::Divps>(Dest, Src0);
651   }
_divss(Variable * Dest,Operand * Src0)652   void _divss(Variable *Dest, Operand *Src0) {
653     AutoMemorySandboxer<> _(this, &Dest, &Src0);
654     Context.insert<typename Traits::Insts::Divss>(Dest, Src0);
655   }
656   template <typename T = Traits>
_fld(Operand * Src0)657   typename std::enable_if<T::UsesX87, void>::type _fld(Operand *Src0) {
658     AutoMemorySandboxer<> _(this, &Src0);
659     Context.insert<typename Traits::Insts::template Fld<>>(Src0);
660   }
661   // TODO(jpp): when implementing the X8664 calling convention, make sure x8664
662   // does not invoke this method, and remove it.
663   template <typename T = Traits>
_fld(Operand *)664   typename std::enable_if<!T::UsesX87, void>::type _fld(Operand *) {
665     llvm::report_fatal_error("fld is not available in x86-64");
666   }
667   template <typename T = Traits>
_fstp(Variable * Dest)668   typename std::enable_if<T::UsesX87, void>::type _fstp(Variable *Dest) {
669     AutoMemorySandboxer<> _(this, &Dest);
670     Context.insert<typename Traits::Insts::template Fstp<>>(Dest);
671   }
672   // TODO(jpp): when implementing the X8664 calling convention, make sure x8664
673   // does not invoke this method, and remove it.
674   template <typename T = Traits>
_fstp(Variable *)675   typename std::enable_if<!T::UsesX87, void>::type _fstp(Variable *) {
676     llvm::report_fatal_error("fstp is not available in x86-64");
677   }
_idiv(Variable * Dest,Operand * Src0,Operand * Src1)678   void _idiv(Variable *Dest, Operand *Src0, Operand *Src1) {
679     AutoMemorySandboxer<> _(this, &Dest, &Src0, &Src1);
680     Context.insert<typename Traits::Insts::Idiv>(Dest, Src0, Src1);
681   }
_imul(Variable * Dest,Operand * Src0)682   void _imul(Variable *Dest, Operand *Src0) {
683     AutoMemorySandboxer<> _(this, &Dest, &Src0);
684     Context.insert<typename Traits::Insts::Imul>(Dest, Src0);
685   }
_imul_imm(Variable * Dest,Operand * Src0,Constant * Imm)686   void _imul_imm(Variable *Dest, Operand *Src0, Constant *Imm) {
687     AutoMemorySandboxer<> _(this, &Dest, &Src0);
688     Context.insert<typename Traits::Insts::ImulImm>(Dest, Src0, Imm);
689   }
_insertps(Variable * Dest,Operand * Src0,Operand * Src1)690   void _insertps(Variable *Dest, Operand *Src0, Operand *Src1) {
691     AutoMemorySandboxer<> _(this, &Dest, &Src0, &Src1);
692     Context.insert<typename Traits::Insts::Insertps>(Dest, Src0, Src1);
693   }
_int3()694   void _int3() { Context.insert<typename Traits::Insts::Int3>(); }
_jmp(Operand * Target)695   void _jmp(Operand *Target) {
696     AutoMemorySandboxer<> _(this, &Target);
697     Context.insert<typename Traits::Insts::Jmp>(Target);
698   }
_lea(Variable * Dest,Operand * Src0)699   void _lea(Variable *Dest, Operand *Src0) {
700     Context.insert<typename Traits::Insts::Lea>(Dest, Src0);
701   }
_link_bp()702   void _link_bp() { dispatchToConcrete(&Traits::ConcreteTarget::_link_bp); }
_push_reg(RegNumT RegNum)703   void _push_reg(RegNumT RegNum) {
704     dispatchToConcrete(&Traits::ConcreteTarget::_push_reg, std::move(RegNum));
705   }
_pop_reg(RegNumT RegNum)706   void _pop_reg(RegNumT RegNum) {
707     dispatchToConcrete(&Traits::ConcreteTarget::_pop_reg, std::move(RegNum));
708   }
_mfence()709   void _mfence() { Context.insert<typename Traits::Insts::Mfence>(); }
710   /// Moves can be used to redefine registers, creating "partial kills" for
711   /// liveness.  Mark where moves are used in this way.
712   void _redefined(Inst *MovInst, bool IsRedefinition = true) {
713     if (IsRedefinition)
714       MovInst->setDestRedefined();
715   }
716   /// If Dest=nullptr is passed in, then a new variable is created, marked as
717   /// infinite register allocation weight, and returned through the in/out Dest
718   /// argument.
719   typename Traits::Insts::Mov *_mov(Variable *&Dest, Operand *Src0,
720                                     RegNumT RegNum = RegNumT()) {
721     if (Dest == nullptr)
722       Dest = makeReg(Src0->getType(), RegNum);
723     AutoMemorySandboxer<> _(this, &Dest, &Src0);
724     return Context.insert<typename Traits::Insts::Mov>(Dest, Src0);
725   }
_mov_sp(Operand * NewValue)726   void _mov_sp(Operand *NewValue) {
727     dispatchToConcrete(&Traits::ConcreteTarget::_mov_sp, std::move(NewValue));
728   }
_movp(Variable * Dest,Operand * Src0)729   typename Traits::Insts::Movp *_movp(Variable *Dest, Operand *Src0) {
730     AutoMemorySandboxer<> _(this, &Dest, &Src0);
731     return Context.insert<typename Traits::Insts::Movp>(Dest, Src0);
732   }
_movd(Variable * Dest,Operand * Src0)733   void _movd(Variable *Dest, Operand *Src0) {
734     AutoMemorySandboxer<> _(this, &Dest, &Src0);
735     Context.insert<typename Traits::Insts::Movd>(Dest, Src0);
736   }
_movq(Variable * Dest,Operand * Src0)737   void _movq(Variable *Dest, Operand *Src0) {
738     AutoMemorySandboxer<> _(this, &Dest, &Src0);
739     Context.insert<typename Traits::Insts::Movq>(Dest, Src0);
740   }
_movss(Variable * Dest,Variable * Src0)741   void _movss(Variable *Dest, Variable *Src0) {
742     Context.insert<typename Traits::Insts::MovssRegs>(Dest, Src0);
743   }
_movsx(Variable * Dest,Operand * Src0)744   void _movsx(Variable *Dest, Operand *Src0) {
745     AutoMemorySandboxer<> _(this, &Dest, &Src0);
746     Context.insert<typename Traits::Insts::Movsx>(Dest, Src0);
747   }
_movzx(Variable * Dest,Operand * Src0)748   typename Traits::Insts::Movzx *_movzx(Variable *Dest, Operand *Src0) {
749     AutoMemorySandboxer<> _(this, &Dest, &Src0);
750     return Context.insert<typename Traits::Insts::Movzx>(Dest, Src0);
751   }
_maxss(Variable * Dest,Operand * Src0)752   void _maxss(Variable *Dest, Operand *Src0) {
753     AutoMemorySandboxer<> _(this, &Dest, &Src0);
754     Context.insert<typename Traits::Insts::Maxss>(Dest, Src0);
755   }
_minss(Variable * Dest,Operand * Src0)756   void _minss(Variable *Dest, Operand *Src0) {
757     AutoMemorySandboxer<> _(this, &Dest, &Src0);
758     Context.insert<typename Traits::Insts::Minss>(Dest, Src0);
759   }
_maxps(Variable * Dest,Operand * Src0)760   void _maxps(Variable *Dest, Operand *Src0) {
761     AutoMemorySandboxer<> _(this, &Dest, &Src0);
762     Context.insert<typename Traits::Insts::Maxps>(Dest, Src0);
763   }
_minps(Variable * Dest,Operand * Src0)764   void _minps(Variable *Dest, Operand *Src0) {
765     AutoMemorySandboxer<> _(this, &Dest, &Src0);
766     Context.insert<typename Traits::Insts::Minps>(Dest, Src0);
767   }
_mul(Variable * Dest,Variable * Src0,Operand * Src1)768   void _mul(Variable *Dest, Variable *Src0, Operand *Src1) {
769     AutoMemorySandboxer<> _(this, &Dest, &Src0, &Src1);
770     Context.insert<typename Traits::Insts::Mul>(Dest, Src0, Src1);
771   }
_mulps(Variable * Dest,Operand * Src0)772   void _mulps(Variable *Dest, Operand *Src0) {
773     AutoMemorySandboxer<> _(this, &Dest, &Src0);
774     Context.insert<typename Traits::Insts::Mulps>(Dest, Src0);
775   }
_mulss(Variable * Dest,Operand * Src0)776   void _mulss(Variable *Dest, Operand *Src0) {
777     AutoMemorySandboxer<> _(this, &Dest, &Src0);
778     Context.insert<typename Traits::Insts::Mulss>(Dest, Src0);
779   }
_neg(Variable * SrcDest)780   void _neg(Variable *SrcDest) {
781     AutoMemorySandboxer<> _(this, &SrcDest);
782     Context.insert<typename Traits::Insts::Neg>(SrcDest);
783   }
_nop(SizeT Variant)784   void _nop(SizeT Variant) {
785     Context.insert<typename Traits::Insts::Nop>(Variant);
786   }
_or(Variable * Dest,Operand * Src0)787   void _or(Variable *Dest, Operand *Src0) {
788     AutoMemorySandboxer<> _(this, &Dest, &Src0);
789     Context.insert<typename Traits::Insts::Or>(Dest, Src0);
790   }
_orps(Variable * Dest,Operand * Src0)791   void _orps(Variable *Dest, Operand *Src0) {
792     AutoMemorySandboxer<> _(this, &Dest, &Src0);
793     Context.insert<typename Traits::Insts::Orps>(Dest, Src0);
794   }
_or_rmw(X86OperandMem * DestSrc0,Operand * Src1)795   void _or_rmw(X86OperandMem *DestSrc0, Operand *Src1) {
796     AutoMemorySandboxer<> _(this, &DestSrc0, &Src1);
797     Context.insert<typename Traits::Insts::OrRMW>(DestSrc0, Src1);
798   }
_padd(Variable * Dest,Operand * Src0)799   void _padd(Variable *Dest, Operand *Src0) {
800     AutoMemorySandboxer<> _(this, &Dest, &Src0);
801     Context.insert<typename Traits::Insts::Padd>(Dest, Src0);
802   }
_padds(Variable * Dest,Operand * Src0)803   void _padds(Variable *Dest, Operand *Src0) {
804     AutoMemorySandboxer<> _(this, &Dest, &Src0);
805     Context.insert<typename Traits::Insts::Padds>(Dest, Src0);
806   }
_paddus(Variable * Dest,Operand * Src0)807   void _paddus(Variable *Dest, Operand *Src0) {
808     AutoMemorySandboxer<> _(this, &Dest, &Src0);
809     Context.insert<typename Traits::Insts::Paddus>(Dest, Src0);
810   }
_pand(Variable * Dest,Operand * Src0)811   void _pand(Variable *Dest, Operand *Src0) {
812     AutoMemorySandboxer<> _(this, &Dest, &Src0);
813     Context.insert<typename Traits::Insts::Pand>(Dest, Src0);
814   }
_pandn(Variable * Dest,Operand * Src0)815   void _pandn(Variable *Dest, Operand *Src0) {
816     AutoMemorySandboxer<> _(this, &Dest, &Src0);
817     Context.insert<typename Traits::Insts::Pandn>(Dest, Src0);
818   }
_pblendvb(Variable * Dest,Operand * Src0,Operand * Src1)819   void _pblendvb(Variable *Dest, Operand *Src0, Operand *Src1) {
820     AutoMemorySandboxer<> _(this, &Dest, &Src0, &Src1);
821     Context.insert<typename Traits::Insts::Pblendvb>(Dest, Src0, Src1);
822   }
823   void _pcmpeq(Variable *Dest, Operand *Src0,
824                Type ArithmeticTypeOverride = IceType_void) {
825     AutoMemorySandboxer<> _(this, &Dest, &Src0);
826     Context.insert<typename Traits::Insts::Pcmpeq>(Dest, Src0,
827                                                    ArithmeticTypeOverride);
828   }
_pcmpgt(Variable * Dest,Operand * Src0)829   void _pcmpgt(Variable *Dest, Operand *Src0) {
830     AutoMemorySandboxer<> _(this, &Dest, &Src0);
831     Context.insert<typename Traits::Insts::Pcmpgt>(Dest, Src0);
832   }
_pextr(Variable * Dest,Operand * Src0,Operand * Src1)833   void _pextr(Variable *Dest, Operand *Src0, Operand *Src1) {
834     AutoMemorySandboxer<> _(this, &Dest, &Src0, &Src1);
835     Context.insert<typename Traits::Insts::Pextr>(Dest, Src0, Src1);
836   }
_pinsr(Variable * Dest,Operand * Src0,Operand * Src1)837   void _pinsr(Variable *Dest, Operand *Src0, Operand *Src1) {
838     AutoMemorySandboxer<> _(this, &Dest, &Src0, &Src1);
839     Context.insert<typename Traits::Insts::Pinsr>(Dest, Src0, Src1);
840   }
_pmull(Variable * Dest,Operand * Src0)841   void _pmull(Variable *Dest, Operand *Src0) {
842     AutoMemorySandboxer<> _(this, &Dest, &Src0);
843     Context.insert<typename Traits::Insts::Pmull>(Dest, Src0);
844   }
_pmulhw(Variable * Dest,Operand * Src0)845   void _pmulhw(Variable *Dest, Operand *Src0) {
846     AutoMemorySandboxer<> _(this, &Dest, &Src0);
847     Context.insert<typename Traits::Insts::Pmulhw>(Dest, Src0);
848   }
_pmulhuw(Variable * Dest,Operand * Src0)849   void _pmulhuw(Variable *Dest, Operand *Src0) {
850     AutoMemorySandboxer<> _(this, &Dest, &Src0);
851     Context.insert<typename Traits::Insts::Pmulhuw>(Dest, Src0);
852   }
_pmaddwd(Variable * Dest,Operand * Src0)853   void _pmaddwd(Variable *Dest, Operand *Src0) {
854     AutoMemorySandboxer<> _(this, &Dest, &Src0);
855     Context.insert<typename Traits::Insts::Pmaddwd>(Dest, Src0);
856   }
_pmuludq(Variable * Dest,Operand * Src0)857   void _pmuludq(Variable *Dest, Operand *Src0) {
858     AutoMemorySandboxer<> _(this, &Dest, &Src0);
859     Context.insert<typename Traits::Insts::Pmuludq>(Dest, Src0);
860   }
_pop(Variable * Dest)861   void _pop(Variable *Dest) {
862     Context.insert<typename Traits::Insts::Pop>(Dest);
863   }
_por(Variable * Dest,Operand * Src0)864   void _por(Variable *Dest, Operand *Src0) {
865     AutoMemorySandboxer<> _(this, &Dest, &Src0);
866     Context.insert<typename Traits::Insts::Por>(Dest, Src0);
867   }
_punpckl(Variable * Dest,Operand * Src0)868   void _punpckl(Variable *Dest, Operand *Src0) {
869     AutoMemorySandboxer<> _(this, &Dest, &Src0);
870     Context.insert<typename Traits::Insts::Punpckl>(Dest, Src0);
871   }
_punpckh(Variable * Dest,Operand * Src0)872   void _punpckh(Variable *Dest, Operand *Src0) {
873     AutoMemorySandboxer<> _(this, &Dest, &Src0);
874     Context.insert<typename Traits::Insts::Punpckh>(Dest, Src0);
875   }
_packss(Variable * Dest,Operand * Src0)876   void _packss(Variable *Dest, Operand *Src0) {
877     AutoMemorySandboxer<> _(this, &Dest, &Src0);
878     Context.insert<typename Traits::Insts::Packss>(Dest, Src0);
879   }
_packus(Variable * Dest,Operand * Src0)880   void _packus(Variable *Dest, Operand *Src0) {
881     AutoMemorySandboxer<> _(this, &Dest, &Src0);
882     Context.insert<typename Traits::Insts::Packus>(Dest, Src0);
883   }
_pshufb(Variable * Dest,Operand * Src0)884   void _pshufb(Variable *Dest, Operand *Src0) {
885     AutoMemorySandboxer<> _(this, &Dest, &Src0);
886     Context.insert<typename Traits::Insts::Pshufb>(Dest, Src0);
887   }
_pshufd(Variable * Dest,Operand * Src0,Operand * Src1)888   void _pshufd(Variable *Dest, Operand *Src0, Operand *Src1) {
889     AutoMemorySandboxer<> _(this, &Dest, &Src0, &Src1);
890     Context.insert<typename Traits::Insts::Pshufd>(Dest, Src0, Src1);
891   }
_psll(Variable * Dest,Operand * Src0)892   void _psll(Variable *Dest, Operand *Src0) {
893     AutoMemorySandboxer<> _(this, &Dest, &Src0);
894     Context.insert<typename Traits::Insts::Psll>(Dest, Src0);
895   }
_psra(Variable * Dest,Operand * Src0)896   void _psra(Variable *Dest, Operand *Src0) {
897     AutoMemorySandboxer<> _(this, &Dest, &Src0);
898     Context.insert<typename Traits::Insts::Psra>(Dest, Src0);
899   }
_psrl(Variable * Dest,Operand * Src0)900   void _psrl(Variable *Dest, Operand *Src0) {
901     AutoMemorySandboxer<> _(this, &Dest, &Src0);
902     Context.insert<typename Traits::Insts::Psrl>(Dest, Src0);
903   }
_psub(Variable * Dest,Operand * Src0)904   void _psub(Variable *Dest, Operand *Src0) {
905     AutoMemorySandboxer<> _(this, &Dest, &Src0);
906     Context.insert<typename Traits::Insts::Psub>(Dest, Src0);
907   }
_psubs(Variable * Dest,Operand * Src0)908   void _psubs(Variable *Dest, Operand *Src0) {
909     AutoMemorySandboxer<> _(this, &Dest, &Src0);
910     Context.insert<typename Traits::Insts::Psubs>(Dest, Src0);
911   }
_psubus(Variable * Dest,Operand * Src0)912   void _psubus(Variable *Dest, Operand *Src0) {
913     AutoMemorySandboxer<> _(this, &Dest, &Src0);
914     Context.insert<typename Traits::Insts::Psubus>(Dest, Src0);
915   }
_push(Operand * Src0)916   void _push(Operand *Src0) {
917     Context.insert<typename Traits::Insts::Push>(Src0);
918   }
_pxor(Variable * Dest,Operand * Src0)919   void _pxor(Variable *Dest, Operand *Src0) {
920     AutoMemorySandboxer<> _(this, &Dest, &Src0);
921     Context.insert<typename Traits::Insts::Pxor>(Dest, Src0);
922   }
923   void _ret(Variable *Src0 = nullptr) {
924     Context.insert<typename Traits::Insts::Ret>(Src0);
925   }
_rol(Variable * Dest,Operand * Src0)926   void _rol(Variable *Dest, Operand *Src0) {
927     AutoMemorySandboxer<> _(this, &Dest, &Src0);
928     Context.insert<typename Traits::Insts::Rol>(Dest, Src0);
929   }
_round(Variable * Dest,Operand * Src,Constant * Imm)930   void _round(Variable *Dest, Operand *Src, Constant *Imm) {
931     AutoMemorySandboxer<> _(this, &Dest, &Src);
932     Context.insert<typename Traits::Insts::Round>(Dest, Src, Imm);
933   }
_sandbox_mem_reference(X86OperandMem * Mem)934   X86OperandMem *_sandbox_mem_reference(X86OperandMem *Mem) {
935     return dispatchToConcrete(&Traits::ConcreteTarget::_sandbox_mem_reference,
936                               std::move(Mem));
937   }
_sar(Variable * Dest,Operand * Src0)938   void _sar(Variable *Dest, Operand *Src0) {
939     AutoMemorySandboxer<> _(this, &Dest, &Src0);
940     Context.insert<typename Traits::Insts::Sar>(Dest, Src0);
941   }
_sbb(Variable * Dest,Operand * Src0)942   void _sbb(Variable *Dest, Operand *Src0) {
943     AutoMemorySandboxer<> _(this, &Dest, &Src0);
944     Context.insert<typename Traits::Insts::Sbb>(Dest, Src0);
945   }
_sbb_rmw(X86OperandMem * DestSrc0,Operand * Src1)946   void _sbb_rmw(X86OperandMem *DestSrc0, Operand *Src1) {
947     AutoMemorySandboxer<> _(this, &DestSrc0, &Src1);
948     Context.insert<typename Traits::Insts::SbbRMW>(DestSrc0, Src1);
949   }
_setcc(Variable * Dest,BrCond Condition)950   void _setcc(Variable *Dest, BrCond Condition) {
951     Context.insert<typename Traits::Insts::Setcc>(Dest, Condition);
952   }
_shl(Variable * Dest,Operand * Src0)953   void _shl(Variable *Dest, Operand *Src0) {
954     AutoMemorySandboxer<> _(this, &Dest, &Src0);
955     Context.insert<typename Traits::Insts::Shl>(Dest, Src0);
956   }
_shld(Variable * Dest,Variable * Src0,Operand * Src1)957   void _shld(Variable *Dest, Variable *Src0, Operand *Src1) {
958     AutoMemorySandboxer<> _(this, &Dest, &Src0, &Src1);
959     Context.insert<typename Traits::Insts::Shld>(Dest, Src0, Src1);
960   }
_shr(Variable * Dest,Operand * Src0)961   void _shr(Variable *Dest, Operand *Src0) {
962     AutoMemorySandboxer<> _(this, &Dest, &Src0);
963     Context.insert<typename Traits::Insts::Shr>(Dest, Src0);
964   }
_shrd(Variable * Dest,Variable * Src0,Operand * Src1)965   void _shrd(Variable *Dest, Variable *Src0, Operand *Src1) {
966     AutoMemorySandboxer<> _(this, &Dest, &Src0, &Src1);
967     Context.insert<typename Traits::Insts::Shrd>(Dest, Src0, Src1);
968   }
_shufps(Variable * Dest,Operand * Src0,Operand * Src1)969   void _shufps(Variable *Dest, Operand *Src0, Operand *Src1) {
970     AutoMemorySandboxer<> _(this, &Dest, &Src0, &Src1);
971     Context.insert<typename Traits::Insts::Shufps>(Dest, Src0, Src1);
972   }
_movmsk(Variable * Dest,Operand * Src0)973   void _movmsk(Variable *Dest, Operand *Src0) {
974     AutoMemorySandboxer<> _(this, &Dest, &Src0);
975     Context.insert<typename Traits::Insts::Movmsk>(Dest, Src0);
976   }
_sqrt(Variable * Dest,Operand * Src0)977   void _sqrt(Variable *Dest, Operand *Src0) {
978     AutoMemorySandboxer<> _(this, &Dest, &Src0);
979     Context.insert<typename Traits::Insts::Sqrt>(Dest, Src0);
980   }
_store(Operand * Value,X86Operand * Mem)981   void _store(Operand *Value, X86Operand *Mem) {
982     AutoMemorySandboxer<> _(this, &Value, &Mem);
983     Context.insert<typename Traits::Insts::Store>(Value, Mem);
984   }
_storep(Variable * Value,X86OperandMem * Mem)985   void _storep(Variable *Value, X86OperandMem *Mem) {
986     AutoMemorySandboxer<> _(this, &Value, &Mem);
987     Context.insert<typename Traits::Insts::StoreP>(Value, Mem);
988   }
_storeq(Operand * Value,X86OperandMem * Mem)989   void _storeq(Operand *Value, X86OperandMem *Mem) {
990     AutoMemorySandboxer<> _(this, &Value, &Mem);
991     Context.insert<typename Traits::Insts::StoreQ>(Value, Mem);
992   }
_stored(Operand * Value,X86OperandMem * Mem)993   void _stored(Operand *Value, X86OperandMem *Mem) {
994     AutoMemorySandboxer<> _(this, &Value, &Mem);
995     Context.insert<typename Traits::Insts::StoreD>(Value, Mem);
996   }
_sub(Variable * Dest,Operand * Src0)997   void _sub(Variable *Dest, Operand *Src0) {
998     AutoMemorySandboxer<> _(this, &Dest, &Src0);
999     Context.insert<typename Traits::Insts::Sub>(Dest, Src0);
1000   }
_sub_rmw(X86OperandMem * DestSrc0,Operand * Src1)1001   void _sub_rmw(X86OperandMem *DestSrc0, Operand *Src1) {
1002     AutoMemorySandboxer<> _(this, &DestSrc0, &Src1);
1003     Context.insert<typename Traits::Insts::SubRMW>(DestSrc0, Src1);
1004   }
_sub_sp(Operand * Adjustment)1005   void _sub_sp(Operand *Adjustment) {
1006     dispatchToConcrete(&Traits::ConcreteTarget::_sub_sp, std::move(Adjustment));
1007   }
_subps(Variable * Dest,Operand * Src0)1008   void _subps(Variable *Dest, Operand *Src0) {
1009     AutoMemorySandboxer<> _(this, &Dest, &Src0);
1010     Context.insert<typename Traits::Insts::Subps>(Dest, Src0);
1011   }
_subss(Variable * Dest,Operand * Src0)1012   void _subss(Variable *Dest, Operand *Src0) {
1013     AutoMemorySandboxer<> _(this, &Dest, &Src0);
1014     Context.insert<typename Traits::Insts::Subss>(Dest, Src0);
1015   }
_test(Operand * Src0,Operand * Src1)1016   void _test(Operand *Src0, Operand *Src1) {
1017     AutoMemorySandboxer<> _(this, &Src0, &Src1);
1018     Context.insert<typename Traits::Insts::Test>(Src0, Src1);
1019   }
_ucomiss(Operand * Src0,Operand * Src1)1020   void _ucomiss(Operand *Src0, Operand *Src1) {
1021     AutoMemorySandboxer<> _(this, &Src0, &Src1);
1022     Context.insert<typename Traits::Insts::Ucomiss>(Src0, Src1);
1023   }
_ud2()1024   void _ud2() { Context.insert<typename Traits::Insts::UD2>(); }
_unlink_bp()1025   void _unlink_bp() { dispatchToConcrete(&Traits::ConcreteTarget::_unlink_bp); }
_xadd(Operand * Dest,Variable * Src,bool Locked)1026   void _xadd(Operand *Dest, Variable *Src, bool Locked) {
1027     AutoMemorySandboxer<> _(this, &Dest, &Src);
1028     Context.insert<typename Traits::Insts::Xadd>(Dest, Src, Locked);
1029     // The xadd exchanges Dest and Src (modifying Src). Model that update with
1030     // a FakeDef followed by a FakeUse.
1031     Context.insert<InstFakeDef>(Src, llvm::dyn_cast<Variable>(Dest));
1032     _set_dest_redefined();
1033     Context.insert<InstFakeUse>(Src);
1034   }
_xchg(Operand * Dest,Variable * Src)1035   void _xchg(Operand *Dest, Variable *Src) {
1036     AutoMemorySandboxer<> _(this, &Dest, &Src);
1037     Context.insert<typename Traits::Insts::Xchg>(Dest, Src);
1038     // The xchg modifies Dest and Src -- model that update with a
1039     // FakeDef/FakeUse.
1040     Context.insert<InstFakeDef>(Src, llvm::dyn_cast<Variable>(Dest));
1041     _set_dest_redefined();
1042     Context.insert<InstFakeUse>(Src);
1043   }
_xor(Variable * Dest,Operand * Src0)1044   void _xor(Variable *Dest, Operand *Src0) {
1045     AutoMemorySandboxer<> _(this, &Dest, &Src0);
1046     Context.insert<typename Traits::Insts::Xor>(Dest, Src0);
1047   }
_xorps(Variable * Dest,Operand * Src0)1048   void _xorps(Variable *Dest, Operand *Src0) {
1049     AutoMemorySandboxer<> _(this, &Dest, &Src0);
1050     Context.insert<typename Traits::Insts::Xorps>(Dest, Src0);
1051   }
_xor_rmw(X86OperandMem * DestSrc0,Operand * Src1)1052   void _xor_rmw(X86OperandMem *DestSrc0, Operand *Src1) {
1053     AutoMemorySandboxer<> _(this, &DestSrc0, &Src1);
1054     Context.insert<typename Traits::Insts::XorRMW>(DestSrc0, Src1);
1055   }
1056 
_iaca_start()1057   void _iaca_start() {
1058     if (!BuildDefs::minimal())
1059       Context.insert<typename Traits::Insts::IacaStart>();
1060   }
_iaca_end()1061   void _iaca_end() {
1062     if (!BuildDefs::minimal())
1063       Context.insert<typename Traits::Insts::IacaEnd>();
1064   }
1065 
1066   /// This class helps wrap IACA markers around the code generated by the
1067   /// current scope. It means you don't need to put an end before each return.
1068   class ScopedIacaMark {
1069     ScopedIacaMark(const ScopedIacaMark &) = delete;
1070     ScopedIacaMark &operator=(const ScopedIacaMark &) = delete;
1071 
1072   public:
ScopedIacaMark(TargetX86Base * Lowering)1073     ScopedIacaMark(TargetX86Base *Lowering) : Lowering(Lowering) {
1074       Lowering->_iaca_start();
1075     }
~ScopedIacaMark()1076     ~ScopedIacaMark() { end(); }
end()1077     void end() {
1078       if (!Lowering)
1079         return;
1080       Lowering->_iaca_end();
1081       Lowering = nullptr;
1082     }
1083 
1084   private:
1085     TargetX86Base *Lowering;
1086   };
1087 
1088   bool optimizeScalarMul(Variable *Dest, Operand *Src0, int32_t Src1);
1089   void findRMW();
1090 
1091   InstructionSetEnum InstructionSet = Traits::InstructionSet::Begin;
1092   bool IsEbpBasedFrame = false;
1093   size_t RequiredStackAlignment = sizeof(Traits::WordType);
1094   size_t SpillAreaSizeBytes = 0;
1095   size_t FixedAllocaSizeBytes = 0;
1096   size_t FixedAllocaAlignBytes = 0;
1097   bool PrologEmitsFixedAllocas = false;
1098   uint32_t MaxOutArgsSizeBytes = 0;
1099   static std::array<SmallBitVector, RCX86_NUM> TypeToRegisterSet;
1100   static std::array<SmallBitVector, RCX86_NUM> TypeToRegisterSetUnfiltered;
1101   static std::array<SmallBitVector, Traits::RegisterSet::Reg_NUM>
1102       RegisterAliases;
1103   SmallBitVector RegsUsed;
1104   std::array<VarList, IceType_NUM> PhysicalRegisters;
1105   // RebasePtr is a Variable that holds the Rebasing pointer (if any) for the
1106   // current sandboxing type.
1107   Variable *RebasePtr = nullptr;
1108 
1109   /// Randomize a given immediate operand
1110   Operand *randomizeOrPoolImmediate(Constant *Immediate,
1111                                     RegNumT RegNum = RegNumT());
1112   X86OperandMem *randomizeOrPoolImmediate(X86OperandMem *MemOperand,
1113                                           RegNumT RegNum = RegNumT());
1114   bool RandomizationPoolingPaused = false;
1115 
1116 private:
1117   /// dispatchToConcrete is the template voodoo that allows TargetX86Base to
1118   /// invoke methods in Machine (which inherits from TargetX86Base) without
1119   /// having to rely on virtual method calls. There are two overloads, one for
1120   /// non-void types, and one for void types. We need this becase, for non-void
1121   /// types, we need to return the method result, where as for void, we don't.
1122   /// While it is true that the code compiles without the void "version", there
1123   /// used to be a time when compilers would reject such code.
1124   ///
1125   /// This machinery is far from perfect. Note that, in particular, the
1126   /// arguments provided to dispatchToConcrete() need to match the arguments for
1127   /// Method **exactly** (i.e., no argument promotion is performed.)
1128   template <typename Ret, typename... Args>
1129   typename std::enable_if<!std::is_void<Ret>::value, Ret>::type
dispatchToConcrete(Ret (ConcreteTarget::* Method)(Args...),Args &&...args)1130   dispatchToConcrete(Ret (ConcreteTarget::*Method)(Args...), Args &&... args) {
1131     return (static_cast<ConcreteTarget *>(this)->*Method)(
1132         std::forward<Args>(args)...);
1133   }
1134 
1135   template <typename... Args>
dispatchToConcrete(void (ConcreteTarget::* Method)(Args...),Args &&...args)1136   void dispatchToConcrete(void (ConcreteTarget::*Method)(Args...),
1137                           Args &&... args) {
1138     (static_cast<ConcreteTarget *>(this)->*Method)(std::forward<Args>(args)...);
1139   }
1140 
1141   void lowerShift64(InstArithmetic::OpKind Op, Operand *Src0Lo, Operand *Src0Hi,
1142                     Operand *Src1Lo, Variable *DestLo, Variable *DestHi);
1143 
1144   /// Emit the code for a combined operation and consumer instruction, or set
1145   /// the destination variable of the operation if Consumer == nullptr.
1146   void lowerIcmpAndConsumer(const InstIcmp *Icmp, const Inst *Consumer);
1147   void lowerFcmpAndConsumer(const InstFcmp *Fcmp, const Inst *Consumer);
1148   void lowerArithAndConsumer(const InstArithmetic *Arith, const Inst *Consumer);
1149 
1150   /// Emit a setcc instruction if Consumer == nullptr; otherwise emit a
1151   /// specialized version of Consumer.
1152   void setccOrConsumer(BrCond Condition, Variable *Dest, const Inst *Consumer);
1153 
1154   /// Emit a mov [1|0] instruction if Consumer == nullptr; otherwise emit a
1155   /// specialized version of Consumer.
1156   void movOrConsumer(bool IcmpResult, Variable *Dest, const Inst *Consumer);
1157 
1158   /// Emit the code for instructions with a vector type.
1159   void lowerIcmpVector(const InstIcmp *Icmp);
1160   void lowerFcmpVector(const InstFcmp *Icmp);
1161   void lowerSelectVector(const InstSelect *Instr);
1162 
1163   /// Helpers for select lowering.
1164   void lowerSelectMove(Variable *Dest, BrCond Cond, Operand *SrcT,
1165                        Operand *SrcF);
1166   void lowerSelectIntMove(Variable *Dest, BrCond Cond, Operand *SrcT,
1167                           Operand *SrcF);
1168   /// Generic helper to move an arbitrary type from Src to Dest.
1169   void lowerMove(Variable *Dest, Operand *Src, bool IsRedefinition);
1170 
1171   /// Optimizations for idiom recognition.
1172   bool lowerOptimizeFcmpSelect(const InstFcmp *Fcmp, const InstSelect *Select);
1173 
1174   /// Complains loudly if invoked because the cpu can handle 64-bit types
1175   /// natively.
1176   template <typename T = Traits>
lowerIcmp64(const InstIcmp *,const Inst *)1177   typename std::enable_if<T::Is64Bit, void>::type lowerIcmp64(const InstIcmp *,
1178                                                               const Inst *) {
1179     llvm::report_fatal_error(
1180         "Hey, yo! This is x86-64. Watcha doin'? (lowerIcmp64)");
1181   }
1182   /// x86lowerIcmp64 handles 64-bit icmp lowering.
1183   template <typename T = Traits>
1184   typename std::enable_if<!T::Is64Bit, void>::type
1185   lowerIcmp64(const InstIcmp *Icmp, const Inst *Consumer);
1186 
1187   BoolFolding<Traits> FoldingInfo;
1188 
1189   /// Helpers for lowering ShuffleVector
1190   /// @{
1191   Variable *lowerShuffleVector_AllFromSameSrc(Operand *Src, SizeT Index0,
1192                                               SizeT Index1, SizeT Index2,
1193                                               SizeT Index3);
1194   static constexpr SizeT IGNORE_INDEX = 0x80000000u;
1195   Variable *lowerShuffleVector_TwoFromSameSrc(Operand *Src0, SizeT Index0,
1196                                               SizeT Index1, Operand *Src1,
1197                                               SizeT Index2, SizeT Index3);
1198   static constexpr SizeT UNIFIED_INDEX_0 = 0;
1199   static constexpr SizeT UNIFIED_INDEX_1 = 2;
1200   Variable *lowerShuffleVector_UnifyFromDifferentSrcs(Operand *Src0,
1201                                                       SizeT Index0,
1202                                                       Operand *Src1,
1203                                                       SizeT Index1);
1204   static constexpr SizeT CLEAR_ALL_BITS = 0x80;
1205   SizeT PshufbMaskCount = 0;
1206   GlobalString lowerShuffleVector_NewMaskName();
1207   ConstantRelocatable *lowerShuffleVector_CreatePshufbMask(
1208       int8_t Idx0, int8_t Idx1, int8_t Idx2, int8_t Idx3, int8_t Idx4,
1209       int8_t Idx5, int8_t Idx6, int8_t Idx7, int8_t Idx8, int8_t Idx9,
1210       int8_t Idx10, int8_t Idx11, int8_t Idx12, int8_t Idx13, int8_t Idx14,
1211       int8_t Idx15);
1212   void lowerShuffleVector_UsingPshufb(Variable *Dest, Operand *Src0,
1213                                       Operand *Src1, int8_t Idx0, int8_t Idx1,
1214                                       int8_t Idx2, int8_t Idx3, int8_t Idx4,
1215                                       int8_t Idx5, int8_t Idx6, int8_t Idx7,
1216                                       int8_t Idx8, int8_t Idx9, int8_t Idx10,
1217                                       int8_t Idx11, int8_t Idx12, int8_t Idx13,
1218                                       int8_t Idx14, int8_t Idx15);
1219   /// @}
1220 
1221   static FixupKind PcRelFixup;
1222   static FixupKind AbsFixup;
1223 };
1224 
1225 template <typename TraitsType>
1226 class TargetDataX86 final : public TargetDataLowering {
1227   using Traits = TraitsType;
1228   TargetDataX86() = delete;
1229   TargetDataX86(const TargetDataX86 &) = delete;
1230   TargetDataX86 &operator=(const TargetDataX86 &) = delete;
1231 
1232 public:
1233   ~TargetDataX86() override = default;
1234 
create(GlobalContext * Ctx)1235   static std::unique_ptr<TargetDataLowering> create(GlobalContext *Ctx) {
1236     return makeUnique<TargetDataX86>(Ctx);
1237   }
1238 
1239   void lowerGlobals(const VariableDeclarationList &Vars,
1240                     const std::string &SectionSuffix) override;
1241   void lowerConstants() override;
1242   void lowerJumpTables() override;
1243 
1244 private:
1245   ENABLE_MAKE_UNIQUE;
1246 
TargetDataX86(GlobalContext * Ctx)1247   explicit TargetDataX86(GlobalContext *Ctx) : TargetDataLowering(Ctx){}
1248   template <typename T> static void emitConstantPool(GlobalContext *Ctx);
1249 };
1250 
1251 class TargetHeaderX86 : public TargetHeaderLowering {
1252   TargetHeaderX86() = delete;
1253   TargetHeaderX86(const TargetHeaderX86 &) = delete;
1254   TargetHeaderX86 &operator=(const TargetHeaderX86 &) = delete;
1255 
1256 public:
1257   ~TargetHeaderX86() = default;
1258 
create(GlobalContext * Ctx)1259   static std::unique_ptr<TargetHeaderLowering> create(GlobalContext *Ctx) {
1260     return makeUnique<TargetHeaderX86>(Ctx);
1261   }
1262 
1263 private:
1264   ENABLE_MAKE_UNIQUE;
1265 
TargetHeaderX86(GlobalContext * Ctx)1266   explicit TargetHeaderX86(GlobalContext *Ctx) : TargetHeaderLowering(Ctx) {}
1267 };
1268 
1269 } // end of namespace X86NAMESPACE
1270 } // end of namespace Ice
1271 
1272 #include "IceTargetLoweringX86BaseImpl.h"
1273 
1274 #endif // SUBZERO_SRC_ICETARGETLOWERINGX86BASE_H
1275