• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===- subzero/src/IceTargetLoweringX86Base.h - x86 lowering ----*- C++ -*-===//
2 //
3 //                        The Subzero Code Generator
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 ///
10 /// \file
11 /// \brief Declares the TargetLoweringX86 template class, which implements the
12 /// TargetLowering base interface for the x86 architecture.
13 ///
14 //===----------------------------------------------------------------------===//
15 
16 #ifndef SUBZERO_SRC_ICETARGETLOWERINGX86BASE_H
17 #define SUBZERO_SRC_ICETARGETLOWERINGX86BASE_H
18 
19 #include "IceDefs.h"
20 #include "IceInst.h"
21 #include "IceSwitchLowering.h"
22 #include "IceTargetLowering.h"
23 #include "IceTargetLoweringX86RegClass.h"
24 #include "IceUtils.h"
25 
26 #include <array>
27 #include <type_traits>
28 #include <utility>
29 
30 #ifndef X86NAMESPACE
31 #error "You must define the X86 Target namespace."
32 #endif
33 
34 namespace Ice {
35 namespace X86NAMESPACE {
36 
37 using namespace ::Ice::X86;
38 
39 template <typename Traits> class BoolFolding;
40 
41 /// TargetX86Base is a template for all X86 Targets, and it relies on the CRT
42 /// pattern for generating code, delegating to actual backends target-specific
43 /// lowerings (e.g., call, ret, and intrinsics.) Backends are expected to
44 /// implement the following methods (which should be accessible from
45 /// TargetX86Base):
46 ///
47 /// Operand *createNaClReadTPSrcOperand()
48 ///
49 /// Note: Ideally, we should be able to
50 ///
51 ///  static_assert(std::is_base_of<TargetX86Base<TraitsType>,
52 ///  Machine>::value);
53 ///
54 /// but that does not work: the compiler does not know that Machine inherits
55 /// from TargetX86Base at this point in translation.
56 template <typename TraitsType> class TargetX86Base : public TargetLowering {
57   TargetX86Base() = delete;
58   TargetX86Base(const TargetX86Base &) = delete;
59   TargetX86Base &operator=(const TargetX86Base &) = delete;
60 
61 public:
62   using Traits = TraitsType;
63   using ConcreteTarget = typename Traits::ConcreteTarget;
64   using InstructionSetEnum = typename Traits::InstructionSet;
65 
66   using BrCond = typename Traits::Cond::BrCond;
67   using CmppsCond = typename Traits::Cond::CmppsCond;
68 
69   using X86Address = typename Traits::Address;
70   using X86Operand = typename Traits::X86Operand;
71   using X86OperandMem = typename Traits::X86OperandMem;
72   using SegmentRegisters = typename Traits::X86OperandMem::SegmentRegisters;
73 
74   using InstX86Br = typename Traits::Insts::Br;
75   using InstX86FakeRMW = typename Traits::Insts::FakeRMW;
76   using InstX86Label = typename Traits::Insts::Label;
77 
78   ~TargetX86Base() override = default;
79 
80   static void staticInit(GlobalContext *Ctx);
81   static bool shouldBePooled(const Constant *C);
82   static ::Ice::Type getPointerType();
83 
getPcRelFixup()84   static FixupKind getPcRelFixup() { return PcRelFixup; }
getAbsFixup()85   static FixupKind getAbsFixup() { return AbsFixup; }
86 
needSandboxing()87   bool needSandboxing() const { return NeedSandboxing; }
88 
89   void translateOm1() override;
90   void translateO2() override;
91   void doLoadOpt();
92   bool doBranchOpt(Inst *I, const CfgNode *NextNode) override;
93 
getNumRegisters()94   SizeT getNumRegisters() const override {
95     return Traits::RegisterSet::Reg_NUM;
96   }
97 
createLoweredMove(Variable * Dest,Variable * SrcVar)98   Inst *createLoweredMove(Variable *Dest, Variable *SrcVar) override {
99     if (isVectorType(Dest->getType())) {
100       return Traits::Insts::Movp::create(Func, Dest, SrcVar);
101     }
102     return Traits::Insts::Mov::create(Func, Dest, SrcVar);
103     (void)Dest;
104     (void)SrcVar;
105     return nullptr;
106   }
107 
108   Variable *getPhysicalRegister(RegNumT RegNum,
109                                 Type Ty = IceType_void) override;
110   const char *getRegName(RegNumT RegNum, Type Ty) const override;
getRegClassName(RegClass C)111   static const char *getRegClassName(RegClass C) {
112     auto ClassNum = static_cast<RegClassX86>(C);
113     assert(ClassNum < RCX86_NUM);
114     switch (ClassNum) {
115     default:
116       assert(C < RC_Target);
117       return regClassString(C);
118     case RCX86_Is64To8:
119       return "i64to8"; // 64-bit GPR truncable to i8
120     case RCX86_Is32To8:
121       return "i32to8"; // 32-bit GPR truncable to i8
122     case RCX86_Is16To8:
123       return "i16to8"; // 16-bit GPR truncable to i8
124     case RCX86_IsTrunc8Rcvr:
125       return "i8from"; // 8-bit GPR truncable from wider GPRs
126     case RCX86_IsAhRcvr:
127       return "i8fromah"; // 8-bit GPR that ah can be assigned to
128     }
129   }
130   SmallBitVector getRegisterSet(RegSetMask Include,
131                                 RegSetMask Exclude) const override;
132   const SmallBitVector &
getRegistersForVariable(const Variable * Var)133   getRegistersForVariable(const Variable *Var) const override {
134     RegClass RC = Var->getRegClass();
135     assert(static_cast<RegClassX86>(RC) < RCX86_NUM);
136     return TypeToRegisterSet[RC];
137   }
138 
139   const SmallBitVector &
getAllRegistersForVariable(const Variable * Var)140   getAllRegistersForVariable(const Variable *Var) const override {
141     RegClass RC = Var->getRegClass();
142     assert(static_cast<RegClassX86>(RC) < RCX86_NUM);
143     return TypeToRegisterSetUnfiltered[RC];
144   }
145 
getAliasesForRegister(RegNumT Reg)146   const SmallBitVector &getAliasesForRegister(RegNumT Reg) const override {
147     Reg.assertIsValid();
148     return RegisterAliases[Reg];
149   }
150 
hasFramePointer()151   bool hasFramePointer() const override { return IsEbpBasedFrame; }
setHasFramePointer()152   void setHasFramePointer() override { IsEbpBasedFrame = true; }
getStackReg()153   RegNumT getStackReg() const override { return Traits::StackPtr; }
getFrameReg()154   RegNumT getFrameReg() const override { return Traits::FramePtr; }
getFrameOrStackReg()155   RegNumT getFrameOrStackReg() const override {
156     // If the stack pointer needs to be aligned, then the frame pointer is
157     // unaligned, so always use the stack pointer.
158     if (needsStackPointerAlignment())
159       return getStackReg();
160     return IsEbpBasedFrame ? getFrameReg() : getStackReg();
161   }
typeWidthInBytesOnStack(Type Ty)162   size_t typeWidthInBytesOnStack(Type Ty) const override {
163     // Round up to the next multiple of WordType bytes.
164     const uint32_t WordSizeInBytes = typeWidthInBytes(Traits::WordType);
165     return Utils::applyAlignment(typeWidthInBytes(Ty), WordSizeInBytes);
166   }
getStackAlignment()167   uint32_t getStackAlignment() const override {
168     return Traits::X86_STACK_ALIGNMENT_BYTES;
169   }
needsStackPointerAlignment()170   bool needsStackPointerAlignment() const override {
171     // If the ABI's stack alignment is smaller than the vector size (16 bytes),
172     // use the (realigned) stack pointer for addressing any stack variables.
173     return Traits::X86_STACK_ALIGNMENT_BYTES < 16;
174   }
reserveFixedAllocaArea(size_t Size,size_t Align)175   void reserveFixedAllocaArea(size_t Size, size_t Align) override {
176     FixedAllocaSizeBytes = Size;
177     assert(llvm::isPowerOf2_32(Align));
178     FixedAllocaAlignBytes = Align;
179     PrologEmitsFixedAllocas = true;
180   }
181   /// Returns the (negative) offset from ebp/rbp where the fixed Allocas start.
getFrameFixedAllocaOffset()182   int32_t getFrameFixedAllocaOffset() const override {
183     return FixedAllocaSizeBytes - (SpillAreaSizeBytes - maxOutArgsSizeBytes());
184   }
maxOutArgsSizeBytes()185   virtual uint32_t maxOutArgsSizeBytes() const override {
186     return MaxOutArgsSizeBytes;
187   }
updateMaxOutArgsSizeBytes(uint32_t Size)188   virtual void updateMaxOutArgsSizeBytes(uint32_t Size) {
189     MaxOutArgsSizeBytes = std::max(MaxOutArgsSizeBytes, Size);
190   }
191 
shouldSplitToVariable64On32(Type Ty)192   bool shouldSplitToVariable64On32(Type Ty) const override {
193     return Traits::Is64Bit ? false : Ty == IceType_i64;
194   }
195 
createGetIPForRegister(const Variable * Dest)196   ConstantRelocatable *createGetIPForRegister(const Variable *Dest) {
197     assert(Dest->hasReg());
198     const std::string RegName = Traits::getRegName(Dest->getRegNum());
199     return llvm::cast<ConstantRelocatable>(Ctx->getConstantExternSym(
200         Ctx->getGlobalString(H_getIP_prefix + RegName)));
201   }
202 
getMinJumpTableSize()203   SizeT getMinJumpTableSize() const override { return 4; }
204 
205   void emitVariable(const Variable *Var) const override;
206 
207   void emit(const ConstantInteger32 *C) const final;
208   void emit(const ConstantInteger64 *C) const final;
209   void emit(const ConstantFloat *C) const final;
210   void emit(const ConstantDouble *C) const final;
211   void emit(const ConstantUndef *C) const final;
212   void emit(const ConstantRelocatable *C) const final;
213 
214   void initNodeForLowering(CfgNode *Node) override;
215 
216   template <typename T = Traits>
217   typename std::enable_if<!T::Is64Bit, Operand>::type *
218   loOperand(Operand *Operand);
219   template <typename T = Traits>
loOperand(Operand *)220   typename std::enable_if<T::Is64Bit, Operand>::type *loOperand(Operand *) {
221     llvm::report_fatal_error(
222         "Hey, yo! This is x86-64. Watcha doin'? (loOperand)");
223   }
224 
225   template <typename T = Traits>
226   typename std::enable_if<!T::Is64Bit, Operand>::type *
227   hiOperand(Operand *Operand);
228   template <typename T = Traits>
hiOperand(Operand *)229   typename std::enable_if<T::Is64Bit, Operand>::type *hiOperand(Operand *) {
230     llvm::report_fatal_error(
231         "Hey, yo! This is x86-64. Watcha doin'? (hiOperand)");
232   }
233 
234   void addProlog(CfgNode *Node) override;
235   void finishArgumentLowering(Variable *Arg, Variable *FramePtr,
236                               size_t BasicFrameOffset, size_t StackAdjBytes,
237                               size_t &InArgsSizeBytes);
238   void addEpilog(CfgNode *Node) override;
239   X86Address stackVarToAsmOperand(const Variable *Var) const;
240 
getInstructionSet()241   InstructionSetEnum getInstructionSet() const { return InstructionSet; }
242   Operand *legalizeUndef(Operand *From, RegNumT RegNum = RegNumT());
243 
244 protected:
245   const bool NeedSandboxing;
246 
247   explicit TargetX86Base(Cfg *Func);
248 
249   void postLower() override;
250 
251   /// Initializes the RebasePtr member variable -- if so required by
252   /// SandboxingType for the concrete Target.
initRebasePtr()253   void initRebasePtr() {
254     assert(SandboxingType != ST_None);
255     dispatchToConcrete(&Traits::ConcreteTarget::initRebasePtr);
256   }
257 
258   /// Emit code that initializes the value of the RebasePtr near the start of
259   /// the function -- if so required by SandboxingType for the concrete type.
initSandbox()260   void initSandbox() {
261     assert(SandboxingType != ST_None);
262     dispatchToConcrete(&Traits::ConcreteTarget::initSandbox);
263   }
264 
265   void lowerAlloca(const InstAlloca *Instr) override;
266   void lowerArguments() override;
267   void lowerArithmetic(const InstArithmetic *Instr) override;
268   void lowerAssign(const InstAssign *Instr) override;
269   void lowerBr(const InstBr *Instr) override;
270   void lowerBreakpoint(const InstBreakpoint *Instr) override;
271   void lowerCall(const InstCall *Instr) override;
272   void lowerCast(const InstCast *Instr) override;
273   void lowerExtractElement(const InstExtractElement *Instr) override;
274   void lowerFcmp(const InstFcmp *Instr) override;
275   void lowerIcmp(const InstIcmp *Instr) override;
276 
277   void lowerIntrinsic(const InstIntrinsic *Instr) override;
278   void lowerInsertElement(const InstInsertElement *Instr) override;
279   void lowerLoad(const InstLoad *Instr) override;
280   void lowerPhi(const InstPhi *Instr) override;
281   void lowerRet(const InstRet *Instr) override;
282   void lowerSelect(const InstSelect *Instr) override;
283   void lowerShuffleVector(const InstShuffleVector *Instr) override;
284   void lowerStore(const InstStore *Instr) override;
285   void lowerSwitch(const InstSwitch *Instr) override;
286   void lowerUnreachable(const InstUnreachable *Instr) override;
287   void lowerOther(const Inst *Instr) override;
288   void lowerRMW(const InstX86FakeRMW *RMW);
289   void prelowerPhis() override;
290   uint32_t getCallStackArgumentsSizeBytes(const CfgVector<Type> &ArgTypes,
291                                           Type ReturnType);
292   uint32_t getCallStackArgumentsSizeBytes(const InstCall *Instr) override;
293   void genTargetHelperCallFor(Inst *Instr) override;
294 
295   /// OptAddr wraps all the possible operands that an x86 address might have.
296   struct OptAddr {
297     Variable *Base = nullptr;
298     Variable *Index = nullptr;
299     uint16_t Shift = 0;
300     int32_t Offset = 0;
301     ConstantRelocatable *Relocatable = nullptr;
302   };
303   /// Legalizes Addr w.r.t. SandboxingType. The exact type of legalization
304   /// varies for different <Target, SandboxingType> tuples.
legalizeOptAddrForSandbox(OptAddr * Addr)305   bool legalizeOptAddrForSandbox(OptAddr *Addr) {
306     return dispatchToConcrete(
307         &Traits::ConcreteTarget::legalizeOptAddrForSandbox, std::move(Addr));
308   }
309   // Builds information for a canonical address expresion:
310   //   <Relocatable + Offset>(Base, Index, Shift)
311   X86OperandMem *computeAddressOpt(const Inst *Instr, Type MemType,
312                                    Operand *Addr);
313   void doAddressOptOther() override;
314   void doAddressOptLoad() override;
315   void doAddressOptStore() override;
316   void doAddressOptLoadSubVector() override;
317   void doAddressOptStoreSubVector() override;
318   void doMockBoundsCheck(Operand *Opnd) override;
319 
320   /// Naive lowering of cmpxchg.
321   void lowerAtomicCmpxchg(Variable *DestPrev, Operand *Ptr, Operand *Expected,
322                           Operand *Desired);
323   /// Attempt a more optimized lowering of cmpxchg. Returns true if optimized.
324   bool tryOptimizedCmpxchgCmpBr(Variable *DestPrev, Operand *Ptr,
325                                 Operand *Expected, Operand *Desired);
326   void lowerAtomicRMW(Variable *Dest, uint32_t Operation, Operand *Ptr,
327                       Operand *Val);
328   void lowerCountZeros(bool Cttz, Type Ty, Variable *Dest, Operand *FirstVal,
329                        Operand *SecondVal);
330   /// Load from memory for a given type.
331   void typedLoad(Type Ty, Variable *Dest, Variable *Base, Constant *Offset);
332   /// Store to memory for a given type.
333   void typedStore(Type Ty, Variable *Value, Variable *Base, Constant *Offset);
334   /// Copy memory of given type from Src to Dest using OffsetAmt on both.
335   void copyMemory(Type Ty, Variable *Dest, Variable *Src, int32_t OffsetAmt);
336   /// Replace some calls to memcpy with inline instructions.
337   void lowerMemcpy(Operand *Dest, Operand *Src, Operand *Count);
338   /// Replace some calls to memmove with inline instructions.
339   void lowerMemmove(Operand *Dest, Operand *Src, Operand *Count);
340   /// Replace some calls to memset with inline instructions.
341   void lowerMemset(Operand *Dest, Operand *Val, Operand *Count);
342 
343   /// Lower an indirect jump adding sandboxing when needed.
lowerIndirectJump(Variable * JumpTarget)344   void lowerIndirectJump(Variable *JumpTarget) {
345     // Without std::move below, the compiler deduces that the argument to
346     // lowerIndirectJmp is a Variable *&, not a Variable *.
347     dispatchToConcrete(&Traits::ConcreteTarget::lowerIndirectJump,
348                        std::move(JumpTarget));
349   }
350 
351   /// Check the comparison is in [Min,Max]. The flags register will be modified
352   /// with:
353   ///   - below equal, if in range
354   ///   - above, set if not in range
355   /// The index into the range is returned.
356   Operand *lowerCmpRange(Operand *Comparison, uint64_t Min, uint64_t Max);
357   /// Lowering of a cluster of switch cases. If the case is not matched control
358   /// will pass to the default label provided. If the default label is nullptr
359   /// then control will fall through to the next instruction. DoneCmp should be
360   /// true if the flags contain the result of a comparison with the Comparison.
361   void lowerCaseCluster(const CaseCluster &Case, Operand *Src0, bool DoneCmp,
362                         CfgNode *DefaultLabel = nullptr);
363 
364   using LowerBinOp = void (TargetX86Base::*)(Variable *, Operand *);
365   void expandAtomicRMWAsCmpxchg(LowerBinOp op_lo, LowerBinOp op_hi,
366                                 Variable *Dest, Operand *Ptr, Operand *Val);
367 
368   void eliminateNextVectorSextInstruction(Variable *SignExtendedResult);
369 
emitGetIP(CfgNode * Node)370   void emitGetIP(CfgNode *Node) {
371     dispatchToConcrete(&Traits::ConcreteTarget::emitGetIP, std::move(Node));
372   }
373   /// Emit a sandboxed return sequence rather than a return.
emitSandboxedReturn()374   void emitSandboxedReturn() {
375     dispatchToConcrete(&Traits::ConcreteTarget::emitSandboxedReturn);
376   }
377 
emitStackProbe(size_t StackSizeBytes)378   void emitStackProbe(size_t StackSizeBytes) {
379     dispatchToConcrete(&Traits::ConcreteTarget::emitStackProbe,
380                        std::move(StackSizeBytes));
381   }
382 
383   /// Emit just the call instruction (without argument or return variable
384   /// processing), sandboxing if needed.
385   virtual Inst *emitCallToTarget(Operand *CallTarget, Variable *ReturnReg,
386                                  size_t NumVariadicFpArgs = 0) = 0;
387   /// Materialize the moves needed to return a value of the specified type.
388   virtual Variable *moveReturnValueToRegister(Operand *Value,
389                                               Type ReturnType) = 0;
390 
391   /// Emit a jump table to the constant pool.
392   void emitJumpTable(const Cfg *Func,
393                      const InstJumpTable *JumpTable) const override;
394 
395   /// Emit a fake use of esp to make sure esp stays alive for the entire
396   /// function. Otherwise some esp adjustments get dead-code eliminated.
keepEspLiveAtExit()397   void keepEspLiveAtExit() {
398     Variable *esp =
399         Func->getTarget()->getPhysicalRegister(getStackReg(), Traits::WordType);
400     Context.insert<InstFakeUse>(esp);
401   }
402 
403   /// Operand legalization helpers. To deal with address mode constraints, the
404   /// helpers will create a new Operand and emit instructions that guarantee
405   /// that the Operand kind is one of those indicated by the LegalMask (a
406   /// bitmask of allowed kinds). If the input Operand is known to already meet
407   /// the constraints, it may be simply returned as the result, without creating
408   /// any new instructions or operands.
409   enum OperandLegalization {
410     Legal_None = 0,
411     Legal_Reg = 1 << 0, // physical register, not stack location
412     Legal_Imm = 1 << 1,
413     Legal_Mem = 1 << 2, // includes [eax+4*ecx] as well as [esp+12]
414     Legal_Rematerializable = 1 << 3,
415     Legal_AddrAbs = 1 << 4, // ConstantRelocatable doesn't have to add RebasePtr
416     Legal_Default = ~(Legal_Rematerializable | Legal_AddrAbs)
417     // TODO(stichnot): Figure out whether this default works for x86-64.
418   };
419   using LegalMask = uint32_t;
420   Operand *legalize(Operand *From, LegalMask Allowed = Legal_Default,
421                     RegNumT RegNum = RegNumT());
422   Variable *legalizeToReg(Operand *From, RegNumT RegNum = RegNumT());
423   /// Legalize the first source operand for use in the cmp instruction.
424   Operand *legalizeSrc0ForCmp(Operand *Src0, Operand *Src1);
425   /// Turn a pointer operand into a memory operand that can be used by a real
426   /// load/store operation. Legalizes the operand as well. This is a nop if the
427   /// operand is already a legal memory operand.
428   X86OperandMem *formMemoryOperand(Operand *Ptr, Type Ty,
429                                    bool DoLegalize = true);
430 
431   Variable *makeReg(Type Ty, RegNumT RegNum = RegNumT());
432   static Type stackSlotType();
433 
434   static constexpr uint32_t NoSizeLimit = 0;
435   /// Returns the largest type which is equal to or larger than Size bytes. The
436   /// type is suitable for copying memory i.e. a load and store will be a single
437   /// instruction (for example x86 will get f64 not i64).
438   static Type largestTypeInSize(uint32_t Size, uint32_t MaxSize = NoSizeLimit);
439   /// Returns the smallest type which is equal to or larger than Size bytes. If
440   /// one doesn't exist then the largest type smaller than Size bytes is
441   /// returned. The type is suitable for memory copies as described at
442   /// largestTypeInSize.
443   static Type firstTypeThatFitsSize(uint32_t Size,
444                                     uint32_t MaxSize = NoSizeLimit);
445 
446   Variable *copyToReg8(Operand *Src, RegNumT RegNum = RegNumT());
447   Variable *copyToReg(Operand *Src, RegNumT RegNum = RegNumT());
448 
449   /// Returns a register containing all zeros, without affecting the FLAGS
450   /// register, using the best instruction for the type.
451   Variable *makeZeroedRegister(Type Ty, RegNumT RegNum = RegNumT());
452 
453   /// \name Returns a vector in a register with the given constant entries.
454   /// @{
455   Variable *makeVectorOfZeros(Type Ty, RegNumT RegNum = RegNumT());
456   Variable *makeVectorOfOnes(Type Ty, RegNumT RegNum = RegNumT());
457   Variable *makeVectorOfMinusOnes(Type Ty, RegNumT RegNum = RegNumT());
458   Variable *makeVectorOfHighOrderBits(Type Ty, RegNumT RegNum = RegNumT());
459   Variable *makeVectorOfFabsMask(Type Ty, RegNumT RegNum = RegNumT());
460   /// @}
461 
462   /// Return a memory operand corresponding to a stack allocated Variable.
463   X86OperandMem *getMemoryOperandForStackSlot(Type Ty, Variable *Slot,
464                                               uint32_t Offset = 0);
465 
466   /// AutoMemorySandboxer emits a bundle-lock/bundle-unlock pair if the
467   /// instruction's operand is a memory reference. This is only needed for
468   /// x86-64 NaCl sandbox.
469   template <InstBundleLock::Option BundleLockOpt = InstBundleLock::Opt_None>
470   class AutoMemorySandboxer {
471     AutoMemorySandboxer() = delete;
472     AutoMemorySandboxer(const AutoMemorySandboxer &) = delete;
473     AutoMemorySandboxer &operator=(const AutoMemorySandboxer &) = delete;
474 
475   private:
476     typename Traits::TargetLowering *Target;
477 
478     template <typename T, typename... Tail>
findMemoryReference(T ** First,Tail...Others)479     X86OperandMem **findMemoryReference(T **First, Tail... Others) {
480       if (llvm::isa<X86OperandMem>(*First)) {
481         return reinterpret_cast<X86OperandMem **>(First);
482       }
483       return findMemoryReference(Others...);
484     }
485 
findMemoryReference()486     X86OperandMem **findMemoryReference() { return nullptr; }
487 
488   public:
489     AutoBundle *Bundler = nullptr;
490     X86OperandMem **const MemOperand;
491 
492     template <typename... T>
AutoMemorySandboxer(typename Traits::TargetLowering * Target,T...Args)493     AutoMemorySandboxer(typename Traits::TargetLowering *Target, T... Args)
494         : Target(Target), MemOperand(Target->SandboxingType == ST_None
495                                          ? nullptr
496                                          : findMemoryReference(Args...)) {
497       if (MemOperand != nullptr) {
498         if (Traits::Is64Bit) {
499           Bundler = new (Target->Func->template allocate<AutoBundle>())
500               AutoBundle(Target, BundleLockOpt);
501         }
502         *MemOperand = Target->_sandbox_mem_reference(*MemOperand);
503       }
504     }
505 
~AutoMemorySandboxer()506     ~AutoMemorySandboxer() {
507       if (Bundler != nullptr) {
508         Bundler->~AutoBundle();
509       }
510     }
511   };
512 
513   /// The following are helpers that insert lowered x86 instructions with
514   /// minimal syntactic overhead, so that the lowering code can look as close to
515   /// assembly as practical.
_adc(Variable * Dest,Operand * Src0)516   void _adc(Variable *Dest, Operand *Src0) {
517     AutoMemorySandboxer<> _(this, &Dest, &Src0);
518     Context.insert<typename Traits::Insts::Adc>(Dest, Src0);
519   }
_adc_rmw(X86OperandMem * DestSrc0,Operand * Src1)520   void _adc_rmw(X86OperandMem *DestSrc0, Operand *Src1) {
521     AutoMemorySandboxer<> _(this, &DestSrc0, &Src1);
522     Context.insert<typename Traits::Insts::AdcRMW>(DestSrc0, Src1);
523   }
_add(Variable * Dest,Operand * Src0)524   void _add(Variable *Dest, Operand *Src0) {
525     AutoMemorySandboxer<> _(this, &Dest, &Src0);
526     Context.insert<typename Traits::Insts::Add>(Dest, Src0);
527   }
_add_rmw(X86OperandMem * DestSrc0,Operand * Src1)528   void _add_rmw(X86OperandMem *DestSrc0, Operand *Src1) {
529     AutoMemorySandboxer<> _(this, &DestSrc0, &Src1);
530     Context.insert<typename Traits::Insts::AddRMW>(DestSrc0, Src1);
531   }
_addps(Variable * Dest,Operand * Src0)532   void _addps(Variable *Dest, Operand *Src0) {
533     AutoMemorySandboxer<> _(this, &Dest, &Src0);
534     Context.insert<typename Traits::Insts::Addps>(Dest, Src0);
535   }
_addss(Variable * Dest,Operand * Src0)536   void _addss(Variable *Dest, Operand *Src0) {
537     AutoMemorySandboxer<> _(this, &Dest, &Src0);
538     Context.insert<typename Traits::Insts::Addss>(Dest, Src0);
539   }
_add_sp(Operand * Adjustment)540   void _add_sp(Operand *Adjustment) {
541     dispatchToConcrete(&Traits::ConcreteTarget::_add_sp, std::move(Adjustment));
542   }
_and(Variable * Dest,Operand * Src0)543   void _and(Variable *Dest, Operand *Src0) {
544     AutoMemorySandboxer<> _(this, &Dest, &Src0);
545     Context.insert<typename Traits::Insts::And>(Dest, Src0);
546   }
_andnps(Variable * Dest,Operand * Src0)547   void _andnps(Variable *Dest, Operand *Src0) {
548     AutoMemorySandboxer<> _(this, &Dest, &Src0);
549     Context.insert<typename Traits::Insts::Andnps>(Dest, Src0);
550   }
_andps(Variable * Dest,Operand * Src0)551   void _andps(Variable *Dest, Operand *Src0) {
552     AutoMemorySandboxer<> _(this, &Dest, &Src0);
553     Context.insert<typename Traits::Insts::Andps>(Dest, Src0);
554   }
_and_rmw(X86OperandMem * DestSrc0,Operand * Src1)555   void _and_rmw(X86OperandMem *DestSrc0, Operand *Src1) {
556     AutoMemorySandboxer<> _(this, &DestSrc0, &Src1);
557     Context.insert<typename Traits::Insts::AndRMW>(DestSrc0, Src1);
558   }
_blendvps(Variable * Dest,Operand * Src0,Operand * Src1)559   void _blendvps(Variable *Dest, Operand *Src0, Operand *Src1) {
560     AutoMemorySandboxer<> _(this, &Dest, &Src0);
561     Context.insert<typename Traits::Insts::Blendvps>(Dest, Src0, Src1);
562   }
_br(BrCond Condition,CfgNode * TargetTrue,CfgNode * TargetFalse)563   void _br(BrCond Condition, CfgNode *TargetTrue, CfgNode *TargetFalse) {
564     Context.insert<InstX86Br>(TargetTrue, TargetFalse, Condition,
565                               InstX86Br::Far);
566   }
_br(CfgNode * Target)567   void _br(CfgNode *Target) {
568     Context.insert<InstX86Br>(Target, InstX86Br::Far);
569   }
_br(BrCond Condition,CfgNode * Target)570   void _br(BrCond Condition, CfgNode *Target) {
571     Context.insert<InstX86Br>(Target, Condition, InstX86Br::Far);
572   }
573   void _br(BrCond Condition, InstX86Label *Label,
574            typename InstX86Br::Mode Kind = InstX86Br::Near) {
575     Context.insert<InstX86Br>(Label, Condition, Kind);
576   }
_bsf(Variable * Dest,Operand * Src0)577   void _bsf(Variable *Dest, Operand *Src0) {
578     AutoMemorySandboxer<> _(this, &Dest, &Src0);
579     Context.insert<typename Traits::Insts::Bsf>(Dest, Src0);
580   }
_bsr(Variable * Dest,Operand * Src0)581   void _bsr(Variable *Dest, Operand *Src0) {
582     AutoMemorySandboxer<> _(this, &Dest, &Src0);
583     Context.insert<typename Traits::Insts::Bsr>(Dest, Src0);
584   }
_bswap(Variable * SrcDest)585   void _bswap(Variable *SrcDest) {
586     AutoMemorySandboxer<> _(this, &SrcDest);
587     Context.insert<typename Traits::Insts::Bswap>(SrcDest);
588   }
_cbwdq(Variable * Dest,Operand * Src0)589   void _cbwdq(Variable *Dest, Operand *Src0) {
590     AutoMemorySandboxer<> _(this, &Dest, &Src0);
591     Context.insert<typename Traits::Insts::Cbwdq>(Dest, Src0);
592   }
_cmov(Variable * Dest,Operand * Src0,BrCond Condition)593   void _cmov(Variable *Dest, Operand *Src0, BrCond Condition) {
594     AutoMemorySandboxer<> _(this, &Dest, &Src0);
595     Context.insert<typename Traits::Insts::Cmov>(Dest, Src0, Condition);
596   }
_cmp(Operand * Src0,Operand * Src1)597   void _cmp(Operand *Src0, Operand *Src1) {
598     AutoMemorySandboxer<> _(this, &Src0, &Src1);
599     Context.insert<typename Traits::Insts::Icmp>(Src0, Src1);
600   }
_cmpps(Variable * Dest,Operand * Src0,CmppsCond Condition)601   void _cmpps(Variable *Dest, Operand *Src0, CmppsCond Condition) {
602     AutoMemorySandboxer<> _(this, &Dest, &Src0);
603     Context.insert<typename Traits::Insts::Cmpps>(Dest, Src0, Condition);
604   }
_cmpxchg(Operand * DestOrAddr,Variable * Eax,Variable * Desired,bool Locked)605   void _cmpxchg(Operand *DestOrAddr, Variable *Eax, Variable *Desired,
606                 bool Locked) {
607     AutoMemorySandboxer<> _(this, &DestOrAddr);
608     Context.insert<typename Traits::Insts::Cmpxchg>(DestOrAddr, Eax, Desired,
609                                                     Locked);
610     // Mark eax as possibly modified by cmpxchg.
611     Context.insert<InstFakeDef>(Eax, llvm::dyn_cast<Variable>(DestOrAddr));
612     _set_dest_redefined();
613     Context.insert<InstFakeUse>(Eax);
614   }
_cmpxchg8b(X86OperandMem * Addr,Variable * Edx,Variable * Eax,Variable * Ecx,Variable * Ebx,bool Locked)615   void _cmpxchg8b(X86OperandMem *Addr, Variable *Edx, Variable *Eax,
616                   Variable *Ecx, Variable *Ebx, bool Locked) {
617     AutoMemorySandboxer<> _(this, &Addr);
618     Context.insert<typename Traits::Insts::Cmpxchg8b>(Addr, Edx, Eax, Ecx, Ebx,
619                                                       Locked);
620     // Mark edx, and eax as possibly modified by cmpxchg8b.
621     Context.insert<InstFakeDef>(Edx);
622     _set_dest_redefined();
623     Context.insert<InstFakeUse>(Edx);
624     Context.insert<InstFakeDef>(Eax);
625     _set_dest_redefined();
626     Context.insert<InstFakeUse>(Eax);
627   }
_cvt(Variable * Dest,Operand * Src0,typename Traits::Insts::Cvt::CvtVariant Variant)628   void _cvt(Variable *Dest, Operand *Src0,
629             typename Traits::Insts::Cvt::CvtVariant Variant) {
630     AutoMemorySandboxer<> _(this, &Dest, &Src0);
631     Context.insert<typename Traits::Insts::Cvt>(Dest, Src0, Variant);
632   }
_round(Variable * Dest,Operand * Src0,Operand * Imm)633   void _round(Variable *Dest, Operand *Src0, Operand *Imm) {
634     AutoMemorySandboxer<> _(this, &Dest, &Src0);
635     Context.insert<typename Traits::Insts::Round>(Dest, Src0, Imm);
636   }
_div(Variable * Dest,Operand * Src0,Operand * Src1)637   void _div(Variable *Dest, Operand *Src0, Operand *Src1) {
638     AutoMemorySandboxer<> _(this, &Dest, &Src0, &Src1);
639     Context.insert<typename Traits::Insts::Div>(Dest, Src0, Src1);
640   }
_divps(Variable * Dest,Operand * Src0)641   void _divps(Variable *Dest, Operand *Src0) {
642     AutoMemorySandboxer<> _(this, &Dest, &Src0);
643     Context.insert<typename Traits::Insts::Divps>(Dest, Src0);
644   }
_divss(Variable * Dest,Operand * Src0)645   void _divss(Variable *Dest, Operand *Src0) {
646     AutoMemorySandboxer<> _(this, &Dest, &Src0);
647     Context.insert<typename Traits::Insts::Divss>(Dest, Src0);
648   }
649   template <typename T = Traits>
_fld(Operand * Src0)650   typename std::enable_if<T::UsesX87, void>::type _fld(Operand *Src0) {
651     AutoMemorySandboxer<> _(this, &Src0);
652     Context.insert<typename Traits::Insts::template Fld<>>(Src0);
653   }
654   // TODO(jpp): when implementing the X8664 calling convention, make sure x8664
655   // does not invoke this method, and remove it.
656   template <typename T = Traits>
_fld(Operand *)657   typename std::enable_if<!T::UsesX87, void>::type _fld(Operand *) {
658     llvm::report_fatal_error("fld is not available in x86-64");
659   }
660   template <typename T = Traits>
_fstp(Variable * Dest)661   typename std::enable_if<T::UsesX87, void>::type _fstp(Variable *Dest) {
662     AutoMemorySandboxer<> _(this, &Dest);
663     Context.insert<typename Traits::Insts::template Fstp<>>(Dest);
664   }
665   // TODO(jpp): when implementing the X8664 calling convention, make sure x8664
666   // does not invoke this method, and remove it.
667   template <typename T = Traits>
_fstp(Variable *)668   typename std::enable_if<!T::UsesX87, void>::type _fstp(Variable *) {
669     llvm::report_fatal_error("fstp is not available in x86-64");
670   }
_idiv(Variable * Dest,Operand * Src0,Operand * Src1)671   void _idiv(Variable *Dest, Operand *Src0, Operand *Src1) {
672     AutoMemorySandboxer<> _(this, &Dest, &Src0, &Src1);
673     Context.insert<typename Traits::Insts::Idiv>(Dest, Src0, Src1);
674   }
_imul(Variable * Dest,Operand * Src0)675   void _imul(Variable *Dest, Operand *Src0) {
676     AutoMemorySandboxer<> _(this, &Dest, &Src0);
677     Context.insert<typename Traits::Insts::Imul>(Dest, Src0);
678   }
_imul_imm(Variable * Dest,Operand * Src0,Constant * Imm)679   void _imul_imm(Variable *Dest, Operand *Src0, Constant *Imm) {
680     AutoMemorySandboxer<> _(this, &Dest, &Src0);
681     Context.insert<typename Traits::Insts::ImulImm>(Dest, Src0, Imm);
682   }
_insertps(Variable * Dest,Operand * Src0,Operand * Src1)683   void _insertps(Variable *Dest, Operand *Src0, Operand *Src1) {
684     AutoMemorySandboxer<> _(this, &Dest, &Src0, &Src1);
685     Context.insert<typename Traits::Insts::Insertps>(Dest, Src0, Src1);
686   }
_int3()687   void _int3() { Context.insert<typename Traits::Insts::Int3>(); }
_jmp(Operand * Target)688   void _jmp(Operand *Target) {
689     AutoMemorySandboxer<> _(this, &Target);
690     Context.insert<typename Traits::Insts::Jmp>(Target);
691   }
_lea(Variable * Dest,Operand * Src0)692   void _lea(Variable *Dest, Operand *Src0) {
693     Context.insert<typename Traits::Insts::Lea>(Dest, Src0);
694   }
_link_bp()695   void _link_bp() { dispatchToConcrete(&Traits::ConcreteTarget::_link_bp); }
_push_reg(RegNumT RegNum)696   void _push_reg(RegNumT RegNum) {
697     dispatchToConcrete(&Traits::ConcreteTarget::_push_reg, std::move(RegNum));
698   }
_pop_reg(RegNumT RegNum)699   void _pop_reg(RegNumT RegNum) {
700     dispatchToConcrete(&Traits::ConcreteTarget::_pop_reg, std::move(RegNum));
701   }
_mfence()702   void _mfence() { Context.insert<typename Traits::Insts::Mfence>(); }
703   /// Moves can be used to redefine registers, creating "partial kills" for
704   /// liveness.  Mark where moves are used in this way.
705   void _redefined(Inst *MovInst, bool IsRedefinition = true) {
706     if (IsRedefinition)
707       MovInst->setDestRedefined();
708   }
709   /// If Dest=nullptr is passed in, then a new variable is created, marked as
710   /// infinite register allocation weight, and returned through the in/out Dest
711   /// argument.
712   typename Traits::Insts::Mov *_mov(Variable *&Dest, Operand *Src0,
713                                     RegNumT RegNum = RegNumT()) {
714     if (Dest == nullptr)
715       Dest = makeReg(Src0->getType(), RegNum);
716     AutoMemorySandboxer<> _(this, &Dest, &Src0);
717     return Context.insert<typename Traits::Insts::Mov>(Dest, Src0);
718   }
_mov_sp(Operand * NewValue)719   void _mov_sp(Operand *NewValue) {
720     dispatchToConcrete(&Traits::ConcreteTarget::_mov_sp, std::move(NewValue));
721   }
_movp(Variable * Dest,Operand * Src0)722   typename Traits::Insts::Movp *_movp(Variable *Dest, Operand *Src0) {
723     AutoMemorySandboxer<> _(this, &Dest, &Src0);
724     return Context.insert<typename Traits::Insts::Movp>(Dest, Src0);
725   }
_movd(Variable * Dest,Operand * Src0)726   void _movd(Variable *Dest, Operand *Src0) {
727     AutoMemorySandboxer<> _(this, &Dest, &Src0);
728     Context.insert<typename Traits::Insts::Movd>(Dest, Src0);
729   }
_movq(Variable * Dest,Operand * Src0)730   void _movq(Variable *Dest, Operand *Src0) {
731     AutoMemorySandboxer<> _(this, &Dest, &Src0);
732     Context.insert<typename Traits::Insts::Movq>(Dest, Src0);
733   }
_movss(Variable * Dest,Variable * Src0)734   void _movss(Variable *Dest, Variable *Src0) {
735     Context.insert<typename Traits::Insts::MovssRegs>(Dest, Src0);
736   }
_movsx(Variable * Dest,Operand * Src0)737   void _movsx(Variable *Dest, Operand *Src0) {
738     AutoMemorySandboxer<> _(this, &Dest, &Src0);
739     Context.insert<typename Traits::Insts::Movsx>(Dest, Src0);
740   }
_movzx(Variable * Dest,Operand * Src0)741   typename Traits::Insts::Movzx *_movzx(Variable *Dest, Operand *Src0) {
742     AutoMemorySandboxer<> _(this, &Dest, &Src0);
743     return Context.insert<typename Traits::Insts::Movzx>(Dest, Src0);
744   }
_maxss(Variable * Dest,Operand * Src0)745   void _maxss(Variable *Dest, Operand *Src0) {
746     AutoMemorySandboxer<> _(this, &Dest, &Src0);
747     Context.insert<typename Traits::Insts::Maxss>(Dest, Src0);
748   }
_minss(Variable * Dest,Operand * Src0)749   void _minss(Variable *Dest, Operand *Src0) {
750     AutoMemorySandboxer<> _(this, &Dest, &Src0);
751     Context.insert<typename Traits::Insts::Minss>(Dest, Src0);
752   }
_maxps(Variable * Dest,Operand * Src0)753   void _maxps(Variable *Dest, Operand *Src0) {
754     AutoMemorySandboxer<> _(this, &Dest, &Src0);
755     Context.insert<typename Traits::Insts::Maxps>(Dest, Src0);
756   }
_minps(Variable * Dest,Operand * Src0)757   void _minps(Variable *Dest, Operand *Src0) {
758     AutoMemorySandboxer<> _(this, &Dest, &Src0);
759     Context.insert<typename Traits::Insts::Minps>(Dest, Src0);
760   }
_mul(Variable * Dest,Variable * Src0,Operand * Src1)761   void _mul(Variable *Dest, Variable *Src0, Operand *Src1) {
762     AutoMemorySandboxer<> _(this, &Dest, &Src0, &Src1);
763     Context.insert<typename Traits::Insts::Mul>(Dest, Src0, Src1);
764   }
_mulps(Variable * Dest,Operand * Src0)765   void _mulps(Variable *Dest, Operand *Src0) {
766     AutoMemorySandboxer<> _(this, &Dest, &Src0);
767     Context.insert<typename Traits::Insts::Mulps>(Dest, Src0);
768   }
_mulss(Variable * Dest,Operand * Src0)769   void _mulss(Variable *Dest, Operand *Src0) {
770     AutoMemorySandboxer<> _(this, &Dest, &Src0);
771     Context.insert<typename Traits::Insts::Mulss>(Dest, Src0);
772   }
_neg(Variable * SrcDest)773   void _neg(Variable *SrcDest) {
774     AutoMemorySandboxer<> _(this, &SrcDest);
775     Context.insert<typename Traits::Insts::Neg>(SrcDest);
776   }
_nop(SizeT Variant)777   void _nop(SizeT Variant) {
778     Context.insert<typename Traits::Insts::Nop>(Variant);
779   }
_or(Variable * Dest,Operand * Src0)780   void _or(Variable *Dest, Operand *Src0) {
781     AutoMemorySandboxer<> _(this, &Dest, &Src0);
782     Context.insert<typename Traits::Insts::Or>(Dest, Src0);
783   }
_orps(Variable * Dest,Operand * Src0)784   void _orps(Variable *Dest, Operand *Src0) {
785     AutoMemorySandboxer<> _(this, &Dest, &Src0);
786     Context.insert<typename Traits::Insts::Orps>(Dest, Src0);
787   }
_or_rmw(X86OperandMem * DestSrc0,Operand * Src1)788   void _or_rmw(X86OperandMem *DestSrc0, Operand *Src1) {
789     AutoMemorySandboxer<> _(this, &DestSrc0, &Src1);
790     Context.insert<typename Traits::Insts::OrRMW>(DestSrc0, Src1);
791   }
_padd(Variable * Dest,Operand * Src0)792   void _padd(Variable *Dest, Operand *Src0) {
793     AutoMemorySandboxer<> _(this, &Dest, &Src0);
794     Context.insert<typename Traits::Insts::Padd>(Dest, Src0);
795   }
_padds(Variable * Dest,Operand * Src0)796   void _padds(Variable *Dest, Operand *Src0) {
797     AutoMemorySandboxer<> _(this, &Dest, &Src0);
798     Context.insert<typename Traits::Insts::Padds>(Dest, Src0);
799   }
_paddus(Variable * Dest,Operand * Src0)800   void _paddus(Variable *Dest, Operand *Src0) {
801     AutoMemorySandboxer<> _(this, &Dest, &Src0);
802     Context.insert<typename Traits::Insts::Paddus>(Dest, Src0);
803   }
_pand(Variable * Dest,Operand * Src0)804   void _pand(Variable *Dest, Operand *Src0) {
805     AutoMemorySandboxer<> _(this, &Dest, &Src0);
806     Context.insert<typename Traits::Insts::Pand>(Dest, Src0);
807   }
_pandn(Variable * Dest,Operand * Src0)808   void _pandn(Variable *Dest, Operand *Src0) {
809     AutoMemorySandboxer<> _(this, &Dest, &Src0);
810     Context.insert<typename Traits::Insts::Pandn>(Dest, Src0);
811   }
_pblendvb(Variable * Dest,Operand * Src0,Operand * Src1)812   void _pblendvb(Variable *Dest, Operand *Src0, Operand *Src1) {
813     AutoMemorySandboxer<> _(this, &Dest, &Src0, &Src1);
814     Context.insert<typename Traits::Insts::Pblendvb>(Dest, Src0, Src1);
815   }
816   void _pcmpeq(Variable *Dest, Operand *Src0,
817                Type ArithmeticTypeOverride = IceType_void) {
818     AutoMemorySandboxer<> _(this, &Dest, &Src0);
819     Context.insert<typename Traits::Insts::Pcmpeq>(Dest, Src0,
820                                                    ArithmeticTypeOverride);
821   }
_pcmpgt(Variable * Dest,Operand * Src0)822   void _pcmpgt(Variable *Dest, Operand *Src0) {
823     AutoMemorySandboxer<> _(this, &Dest, &Src0);
824     Context.insert<typename Traits::Insts::Pcmpgt>(Dest, Src0);
825   }
_pextr(Variable * Dest,Operand * Src0,Operand * Src1)826   void _pextr(Variable *Dest, Operand *Src0, Operand *Src1) {
827     AutoMemorySandboxer<> _(this, &Dest, &Src0, &Src1);
828     Context.insert<typename Traits::Insts::Pextr>(Dest, Src0, Src1);
829   }
_pinsr(Variable * Dest,Operand * Src0,Operand * Src1)830   void _pinsr(Variable *Dest, Operand *Src0, Operand *Src1) {
831     AutoMemorySandboxer<> _(this, &Dest, &Src0, &Src1);
832     Context.insert<typename Traits::Insts::Pinsr>(Dest, Src0, Src1);
833   }
_pmull(Variable * Dest,Operand * Src0)834   void _pmull(Variable *Dest, Operand *Src0) {
835     AutoMemorySandboxer<> _(this, &Dest, &Src0);
836     Context.insert<typename Traits::Insts::Pmull>(Dest, Src0);
837   }
_pmulhw(Variable * Dest,Operand * Src0)838   void _pmulhw(Variable *Dest, Operand *Src0) {
839     AutoMemorySandboxer<> _(this, &Dest, &Src0);
840     Context.insert<typename Traits::Insts::Pmulhw>(Dest, Src0);
841   }
_pmulhuw(Variable * Dest,Operand * Src0)842   void _pmulhuw(Variable *Dest, Operand *Src0) {
843     AutoMemorySandboxer<> _(this, &Dest, &Src0);
844     Context.insert<typename Traits::Insts::Pmulhuw>(Dest, Src0);
845   }
_pmaddwd(Variable * Dest,Operand * Src0)846   void _pmaddwd(Variable *Dest, Operand *Src0) {
847     AutoMemorySandboxer<> _(this, &Dest, &Src0);
848     Context.insert<typename Traits::Insts::Pmaddwd>(Dest, Src0);
849   }
_pmuludq(Variable * Dest,Operand * Src0)850   void _pmuludq(Variable *Dest, Operand *Src0) {
851     AutoMemorySandboxer<> _(this, &Dest, &Src0);
852     Context.insert<typename Traits::Insts::Pmuludq>(Dest, Src0);
853   }
_pop(Variable * Dest)854   void _pop(Variable *Dest) {
855     Context.insert<typename Traits::Insts::Pop>(Dest);
856   }
_por(Variable * Dest,Operand * Src0)857   void _por(Variable *Dest, Operand *Src0) {
858     AutoMemorySandboxer<> _(this, &Dest, &Src0);
859     Context.insert<typename Traits::Insts::Por>(Dest, Src0);
860   }
_punpckl(Variable * Dest,Operand * Src0)861   void _punpckl(Variable *Dest, Operand *Src0) {
862     AutoMemorySandboxer<> _(this, &Dest, &Src0);
863     Context.insert<typename Traits::Insts::Punpckl>(Dest, Src0);
864   }
_punpckh(Variable * Dest,Operand * Src0)865   void _punpckh(Variable *Dest, Operand *Src0) {
866     AutoMemorySandboxer<> _(this, &Dest, &Src0);
867     Context.insert<typename Traits::Insts::Punpckh>(Dest, Src0);
868   }
_packss(Variable * Dest,Operand * Src0)869   void _packss(Variable *Dest, Operand *Src0) {
870     AutoMemorySandboxer<> _(this, &Dest, &Src0);
871     Context.insert<typename Traits::Insts::Packss>(Dest, Src0);
872   }
_packus(Variable * Dest,Operand * Src0)873   void _packus(Variable *Dest, Operand *Src0) {
874     AutoMemorySandboxer<> _(this, &Dest, &Src0);
875     Context.insert<typename Traits::Insts::Packus>(Dest, Src0);
876   }
_pshufb(Variable * Dest,Operand * Src0)877   void _pshufb(Variable *Dest, Operand *Src0) {
878     AutoMemorySandboxer<> _(this, &Dest, &Src0);
879     Context.insert<typename Traits::Insts::Pshufb>(Dest, Src0);
880   }
_pshufd(Variable * Dest,Operand * Src0,Operand * Src1)881   void _pshufd(Variable *Dest, Operand *Src0, Operand *Src1) {
882     AutoMemorySandboxer<> _(this, &Dest, &Src0, &Src1);
883     Context.insert<typename Traits::Insts::Pshufd>(Dest, Src0, Src1);
884   }
_psll(Variable * Dest,Operand * Src0)885   void _psll(Variable *Dest, Operand *Src0) {
886     AutoMemorySandboxer<> _(this, &Dest, &Src0);
887     Context.insert<typename Traits::Insts::Psll>(Dest, Src0);
888   }
_psra(Variable * Dest,Operand * Src0)889   void _psra(Variable *Dest, Operand *Src0) {
890     AutoMemorySandboxer<> _(this, &Dest, &Src0);
891     Context.insert<typename Traits::Insts::Psra>(Dest, Src0);
892   }
_psrl(Variable * Dest,Operand * Src0)893   void _psrl(Variable *Dest, Operand *Src0) {
894     AutoMemorySandboxer<> _(this, &Dest, &Src0);
895     Context.insert<typename Traits::Insts::Psrl>(Dest, Src0);
896   }
_psub(Variable * Dest,Operand * Src0)897   void _psub(Variable *Dest, Operand *Src0) {
898     AutoMemorySandboxer<> _(this, &Dest, &Src0);
899     Context.insert<typename Traits::Insts::Psub>(Dest, Src0);
900   }
_psubs(Variable * Dest,Operand * Src0)901   void _psubs(Variable *Dest, Operand *Src0) {
902     AutoMemorySandboxer<> _(this, &Dest, &Src0);
903     Context.insert<typename Traits::Insts::Psubs>(Dest, Src0);
904   }
_psubus(Variable * Dest,Operand * Src0)905   void _psubus(Variable *Dest, Operand *Src0) {
906     AutoMemorySandboxer<> _(this, &Dest, &Src0);
907     Context.insert<typename Traits::Insts::Psubus>(Dest, Src0);
908   }
_push(Operand * Src0)909   void _push(Operand *Src0) {
910     Context.insert<typename Traits::Insts::Push>(Src0);
911   }
_pxor(Variable * Dest,Operand * Src0)912   void _pxor(Variable *Dest, Operand *Src0) {
913     AutoMemorySandboxer<> _(this, &Dest, &Src0);
914     Context.insert<typename Traits::Insts::Pxor>(Dest, Src0);
915   }
916   void _ret(Variable *Src0 = nullptr) {
917     Context.insert<typename Traits::Insts::Ret>(Src0);
918   }
_rol(Variable * Dest,Operand * Src0)919   void _rol(Variable *Dest, Operand *Src0) {
920     AutoMemorySandboxer<> _(this, &Dest, &Src0);
921     Context.insert<typename Traits::Insts::Rol>(Dest, Src0);
922   }
_round(Variable * Dest,Operand * Src,Constant * Imm)923   void _round(Variable *Dest, Operand *Src, Constant *Imm) {
924     AutoMemorySandboxer<> _(this, &Dest, &Src);
925     Context.insert<typename Traits::Insts::Round>(Dest, Src, Imm);
926   }
_sandbox_mem_reference(X86OperandMem * Mem)927   X86OperandMem *_sandbox_mem_reference(X86OperandMem *Mem) {
928     return dispatchToConcrete(&Traits::ConcreteTarget::_sandbox_mem_reference,
929                               std::move(Mem));
930   }
_sar(Variable * Dest,Operand * Src0)931   void _sar(Variable *Dest, Operand *Src0) {
932     AutoMemorySandboxer<> _(this, &Dest, &Src0);
933     Context.insert<typename Traits::Insts::Sar>(Dest, Src0);
934   }
_sbb(Variable * Dest,Operand * Src0)935   void _sbb(Variable *Dest, Operand *Src0) {
936     AutoMemorySandboxer<> _(this, &Dest, &Src0);
937     Context.insert<typename Traits::Insts::Sbb>(Dest, Src0);
938   }
_sbb_rmw(X86OperandMem * DestSrc0,Operand * Src1)939   void _sbb_rmw(X86OperandMem *DestSrc0, Operand *Src1) {
940     AutoMemorySandboxer<> _(this, &DestSrc0, &Src1);
941     Context.insert<typename Traits::Insts::SbbRMW>(DestSrc0, Src1);
942   }
_setcc(Variable * Dest,BrCond Condition)943   void _setcc(Variable *Dest, BrCond Condition) {
944     Context.insert<typename Traits::Insts::Setcc>(Dest, Condition);
945   }
_shl(Variable * Dest,Operand * Src0)946   void _shl(Variable *Dest, Operand *Src0) {
947     AutoMemorySandboxer<> _(this, &Dest, &Src0);
948     Context.insert<typename Traits::Insts::Shl>(Dest, Src0);
949   }
_shld(Variable * Dest,Variable * Src0,Operand * Src1)950   void _shld(Variable *Dest, Variable *Src0, Operand *Src1) {
951     AutoMemorySandboxer<> _(this, &Dest, &Src0, &Src1);
952     Context.insert<typename Traits::Insts::Shld>(Dest, Src0, Src1);
953   }
_shr(Variable * Dest,Operand * Src0)954   void _shr(Variable *Dest, Operand *Src0) {
955     AutoMemorySandboxer<> _(this, &Dest, &Src0);
956     Context.insert<typename Traits::Insts::Shr>(Dest, Src0);
957   }
_shrd(Variable * Dest,Variable * Src0,Operand * Src1)958   void _shrd(Variable *Dest, Variable *Src0, Operand *Src1) {
959     AutoMemorySandboxer<> _(this, &Dest, &Src0, &Src1);
960     Context.insert<typename Traits::Insts::Shrd>(Dest, Src0, Src1);
961   }
_shufps(Variable * Dest,Operand * Src0,Operand * Src1)962   void _shufps(Variable *Dest, Operand *Src0, Operand *Src1) {
963     AutoMemorySandboxer<> _(this, &Dest, &Src0, &Src1);
964     Context.insert<typename Traits::Insts::Shufps>(Dest, Src0, Src1);
965   }
_movmsk(Variable * Dest,Operand * Src0)966   void _movmsk(Variable *Dest, Operand *Src0) {
967     AutoMemorySandboxer<> _(this, &Dest, &Src0);
968     Context.insert<typename Traits::Insts::Movmsk>(Dest, Src0);
969   }
_sqrt(Variable * Dest,Operand * Src0)970   void _sqrt(Variable *Dest, Operand *Src0) {
971     AutoMemorySandboxer<> _(this, &Dest, &Src0);
972     Context.insert<typename Traits::Insts::Sqrt>(Dest, Src0);
973   }
_store(Operand * Value,X86Operand * Mem)974   void _store(Operand *Value, X86Operand *Mem) {
975     AutoMemorySandboxer<> _(this, &Value, &Mem);
976     Context.insert<typename Traits::Insts::Store>(Value, Mem);
977   }
_storep(Variable * Value,X86OperandMem * Mem)978   void _storep(Variable *Value, X86OperandMem *Mem) {
979     AutoMemorySandboxer<> _(this, &Value, &Mem);
980     Context.insert<typename Traits::Insts::StoreP>(Value, Mem);
981   }
_storeq(Operand * Value,X86OperandMem * Mem)982   void _storeq(Operand *Value, X86OperandMem *Mem) {
983     AutoMemorySandboxer<> _(this, &Value, &Mem);
984     Context.insert<typename Traits::Insts::StoreQ>(Value, Mem);
985   }
_stored(Operand * Value,X86OperandMem * Mem)986   void _stored(Operand *Value, X86OperandMem *Mem) {
987     AutoMemorySandboxer<> _(this, &Value, &Mem);
988     Context.insert<typename Traits::Insts::StoreD>(Value, Mem);
989   }
_sub(Variable * Dest,Operand * Src0)990   void _sub(Variable *Dest, Operand *Src0) {
991     AutoMemorySandboxer<> _(this, &Dest, &Src0);
992     Context.insert<typename Traits::Insts::Sub>(Dest, Src0);
993   }
_sub_rmw(X86OperandMem * DestSrc0,Operand * Src1)994   void _sub_rmw(X86OperandMem *DestSrc0, Operand *Src1) {
995     AutoMemorySandboxer<> _(this, &DestSrc0, &Src1);
996     Context.insert<typename Traits::Insts::SubRMW>(DestSrc0, Src1);
997   }
_sub_sp(Operand * Adjustment)998   void _sub_sp(Operand *Adjustment) {
999     dispatchToConcrete(&Traits::ConcreteTarget::_sub_sp, std::move(Adjustment));
1000   }
_subps(Variable * Dest,Operand * Src0)1001   void _subps(Variable *Dest, Operand *Src0) {
1002     AutoMemorySandboxer<> _(this, &Dest, &Src0);
1003     Context.insert<typename Traits::Insts::Subps>(Dest, Src0);
1004   }
_subss(Variable * Dest,Operand * Src0)1005   void _subss(Variable *Dest, Operand *Src0) {
1006     AutoMemorySandboxer<> _(this, &Dest, &Src0);
1007     Context.insert<typename Traits::Insts::Subss>(Dest, Src0);
1008   }
_test(Operand * Src0,Operand * Src1)1009   void _test(Operand *Src0, Operand *Src1) {
1010     AutoMemorySandboxer<> _(this, &Src0, &Src1);
1011     Context.insert<typename Traits::Insts::Test>(Src0, Src1);
1012   }
_ucomiss(Operand * Src0,Operand * Src1)1013   void _ucomiss(Operand *Src0, Operand *Src1) {
1014     AutoMemorySandboxer<> _(this, &Src0, &Src1);
1015     Context.insert<typename Traits::Insts::Ucomiss>(Src0, Src1);
1016   }
_ud2()1017   void _ud2() { Context.insert<typename Traits::Insts::UD2>(); }
_unlink_bp()1018   void _unlink_bp() { dispatchToConcrete(&Traits::ConcreteTarget::_unlink_bp); }
_xadd(Operand * Dest,Variable * Src,bool Locked)1019   void _xadd(Operand *Dest, Variable *Src, bool Locked) {
1020     AutoMemorySandboxer<> _(this, &Dest, &Src);
1021     Context.insert<typename Traits::Insts::Xadd>(Dest, Src, Locked);
1022     // The xadd exchanges Dest and Src (modifying Src). Model that update with
1023     // a FakeDef followed by a FakeUse.
1024     Context.insert<InstFakeDef>(Src, llvm::dyn_cast<Variable>(Dest));
1025     _set_dest_redefined();
1026     Context.insert<InstFakeUse>(Src);
1027   }
_xchg(Operand * Dest,Variable * Src)1028   void _xchg(Operand *Dest, Variable *Src) {
1029     AutoMemorySandboxer<> _(this, &Dest, &Src);
1030     Context.insert<typename Traits::Insts::Xchg>(Dest, Src);
1031     // The xchg modifies Dest and Src -- model that update with a
1032     // FakeDef/FakeUse.
1033     Context.insert<InstFakeDef>(Src, llvm::dyn_cast<Variable>(Dest));
1034     _set_dest_redefined();
1035     Context.insert<InstFakeUse>(Src);
1036   }
_xor(Variable * Dest,Operand * Src0)1037   void _xor(Variable *Dest, Operand *Src0) {
1038     AutoMemorySandboxer<> _(this, &Dest, &Src0);
1039     Context.insert<typename Traits::Insts::Xor>(Dest, Src0);
1040   }
_xorps(Variable * Dest,Operand * Src0)1041   void _xorps(Variable *Dest, Operand *Src0) {
1042     AutoMemorySandboxer<> _(this, &Dest, &Src0);
1043     Context.insert<typename Traits::Insts::Xorps>(Dest, Src0);
1044   }
_xor_rmw(X86OperandMem * DestSrc0,Operand * Src1)1045   void _xor_rmw(X86OperandMem *DestSrc0, Operand *Src1) {
1046     AutoMemorySandboxer<> _(this, &DestSrc0, &Src1);
1047     Context.insert<typename Traits::Insts::XorRMW>(DestSrc0, Src1);
1048   }
1049 
_iaca_start()1050   void _iaca_start() {
1051     if (!BuildDefs::minimal())
1052       Context.insert<typename Traits::Insts::IacaStart>();
1053   }
_iaca_end()1054   void _iaca_end() {
1055     if (!BuildDefs::minimal())
1056       Context.insert<typename Traits::Insts::IacaEnd>();
1057   }
1058 
1059   /// This class helps wrap IACA markers around the code generated by the
1060   /// current scope. It means you don't need to put an end before each return.
1061   class ScopedIacaMark {
1062     ScopedIacaMark(const ScopedIacaMark &) = delete;
1063     ScopedIacaMark &operator=(const ScopedIacaMark &) = delete;
1064 
1065   public:
ScopedIacaMark(TargetX86Base * Lowering)1066     ScopedIacaMark(TargetX86Base *Lowering) : Lowering(Lowering) {
1067       Lowering->_iaca_start();
1068     }
~ScopedIacaMark()1069     ~ScopedIacaMark() { end(); }
end()1070     void end() {
1071       if (!Lowering)
1072         return;
1073       Lowering->_iaca_end();
1074       Lowering = nullptr;
1075     }
1076 
1077   private:
1078     TargetX86Base *Lowering;
1079   };
1080 
1081   bool optimizeScalarMul(Variable *Dest, Operand *Src0, int32_t Src1);
1082   void findRMW();
1083 
1084   InstructionSetEnum InstructionSet = Traits::InstructionSet::Begin;
1085   bool IsEbpBasedFrame = false;
1086   size_t RequiredStackAlignment = sizeof(Traits::WordType);
1087   size_t SpillAreaSizeBytes = 0;
1088   size_t FixedAllocaSizeBytes = 0;
1089   size_t FixedAllocaAlignBytes = 0;
1090   bool PrologEmitsFixedAllocas = false;
1091   uint32_t MaxOutArgsSizeBytes = 0;
1092   static std::array<SmallBitVector, RCX86_NUM> TypeToRegisterSet;
1093   static std::array<SmallBitVector, RCX86_NUM> TypeToRegisterSetUnfiltered;
1094   static std::array<SmallBitVector, Traits::RegisterSet::Reg_NUM>
1095       RegisterAliases;
1096   SmallBitVector RegsUsed;
1097   std::array<VarList, IceType_NUM> PhysicalRegisters;
1098   // RebasePtr is a Variable that holds the Rebasing pointer (if any) for the
1099   // current sandboxing type.
1100   Variable *RebasePtr = nullptr;
1101 
1102 private:
1103   /// dispatchToConcrete is the template voodoo that allows TargetX86Base to
1104   /// invoke methods in Machine (which inherits from TargetX86Base) without
1105   /// having to rely on virtual method calls. There are two overloads, one for
1106   /// non-void types, and one for void types. We need this becase, for non-void
1107   /// types, we need to return the method result, where as for void, we don't.
1108   /// While it is true that the code compiles without the void "version", there
1109   /// used to be a time when compilers would reject such code.
1110   ///
1111   /// This machinery is far from perfect. Note that, in particular, the
1112   /// arguments provided to dispatchToConcrete() need to match the arguments for
1113   /// Method **exactly** (i.e., no argument promotion is performed.)
1114   template <typename Ret, typename... Args>
1115   typename std::enable_if<!std::is_void<Ret>::value, Ret>::type
dispatchToConcrete(Ret (ConcreteTarget::* Method)(Args...),Args &&...args)1116   dispatchToConcrete(Ret (ConcreteTarget::*Method)(Args...), Args &&... args) {
1117     return (static_cast<ConcreteTarget *>(this)->*Method)(
1118         std::forward<Args>(args)...);
1119   }
1120 
1121   template <typename... Args>
dispatchToConcrete(void (ConcreteTarget::* Method)(Args...),Args &&...args)1122   void dispatchToConcrete(void (ConcreteTarget::*Method)(Args...),
1123                           Args &&... args) {
1124     (static_cast<ConcreteTarget *>(this)->*Method)(std::forward<Args>(args)...);
1125   }
1126 
1127   void lowerShift64(InstArithmetic::OpKind Op, Operand *Src0Lo, Operand *Src0Hi,
1128                     Operand *Src1Lo, Variable *DestLo, Variable *DestHi);
1129 
1130   /// Emit the code for a combined operation and consumer instruction, or set
1131   /// the destination variable of the operation if Consumer == nullptr.
1132   void lowerIcmpAndConsumer(const InstIcmp *Icmp, const Inst *Consumer);
1133   void lowerFcmpAndConsumer(const InstFcmp *Fcmp, const Inst *Consumer);
1134   void lowerArithAndConsumer(const InstArithmetic *Arith, const Inst *Consumer);
1135 
1136   /// Emit a setcc instruction if Consumer == nullptr; otherwise emit a
1137   /// specialized version of Consumer.
1138   void setccOrConsumer(BrCond Condition, Variable *Dest, const Inst *Consumer);
1139 
1140   /// Emit a mov [1|0] instruction if Consumer == nullptr; otherwise emit a
1141   /// specialized version of Consumer.
1142   void movOrConsumer(bool IcmpResult, Variable *Dest, const Inst *Consumer);
1143 
1144   /// Emit the code for instructions with a vector type.
1145   void lowerIcmpVector(const InstIcmp *Icmp);
1146   void lowerFcmpVector(const InstFcmp *Icmp);
1147   void lowerSelectVector(const InstSelect *Instr);
1148 
1149   /// Helpers for select lowering.
1150   void lowerSelectMove(Variable *Dest, BrCond Cond, Operand *SrcT,
1151                        Operand *SrcF);
1152   void lowerSelectIntMove(Variable *Dest, BrCond Cond, Operand *SrcT,
1153                           Operand *SrcF);
1154   /// Generic helper to move an arbitrary type from Src to Dest.
1155   void lowerMove(Variable *Dest, Operand *Src, bool IsRedefinition);
1156 
1157   /// Optimizations for idiom recognition.
1158   bool lowerOptimizeFcmpSelect(const InstFcmp *Fcmp, const InstSelect *Select);
1159 
1160   /// Complains loudly if invoked because the cpu can handle 64-bit types
1161   /// natively.
1162   template <typename T = Traits>
lowerIcmp64(const InstIcmp *,const Inst *)1163   typename std::enable_if<T::Is64Bit, void>::type lowerIcmp64(const InstIcmp *,
1164                                                               const Inst *) {
1165     llvm::report_fatal_error(
1166         "Hey, yo! This is x86-64. Watcha doin'? (lowerIcmp64)");
1167   }
1168   /// x86lowerIcmp64 handles 64-bit icmp lowering.
1169   template <typename T = Traits>
1170   typename std::enable_if<!T::Is64Bit, void>::type
1171   lowerIcmp64(const InstIcmp *Icmp, const Inst *Consumer);
1172 
1173   BoolFolding<Traits> FoldingInfo;
1174 
1175   /// Helpers for lowering ShuffleVector
1176   /// @{
1177   Variable *lowerShuffleVector_AllFromSameSrc(Operand *Src, SizeT Index0,
1178                                               SizeT Index1, SizeT Index2,
1179                                               SizeT Index3);
1180   static constexpr SizeT IGNORE_INDEX = 0x80000000u;
1181   Variable *lowerShuffleVector_TwoFromSameSrc(Operand *Src0, SizeT Index0,
1182                                               SizeT Index1, Operand *Src1,
1183                                               SizeT Index2, SizeT Index3);
1184   static constexpr SizeT UNIFIED_INDEX_0 = 0;
1185   static constexpr SizeT UNIFIED_INDEX_1 = 2;
1186   Variable *lowerShuffleVector_UnifyFromDifferentSrcs(Operand *Src0,
1187                                                       SizeT Index0,
1188                                                       Operand *Src1,
1189                                                       SizeT Index1);
1190   static constexpr SizeT CLEAR_ALL_BITS = 0x80;
1191   SizeT PshufbMaskCount = 0;
1192   GlobalString lowerShuffleVector_NewMaskName();
1193   ConstantRelocatable *lowerShuffleVector_CreatePshufbMask(
1194       int8_t Idx0, int8_t Idx1, int8_t Idx2, int8_t Idx3, int8_t Idx4,
1195       int8_t Idx5, int8_t Idx6, int8_t Idx7, int8_t Idx8, int8_t Idx9,
1196       int8_t Idx10, int8_t Idx11, int8_t Idx12, int8_t Idx13, int8_t Idx14,
1197       int8_t Idx15);
1198   void lowerShuffleVector_UsingPshufb(Variable *Dest, Operand *Src0,
1199                                       Operand *Src1, int8_t Idx0, int8_t Idx1,
1200                                       int8_t Idx2, int8_t Idx3, int8_t Idx4,
1201                                       int8_t Idx5, int8_t Idx6, int8_t Idx7,
1202                                       int8_t Idx8, int8_t Idx9, int8_t Idx10,
1203                                       int8_t Idx11, int8_t Idx12, int8_t Idx13,
1204                                       int8_t Idx14, int8_t Idx15);
1205   /// @}
1206 
1207   static FixupKind PcRelFixup;
1208   static FixupKind AbsFixup;
1209 };
1210 
1211 template <typename TraitsType>
1212 class TargetDataX86 final : public TargetDataLowering {
1213   using Traits = TraitsType;
1214   TargetDataX86() = delete;
1215   TargetDataX86(const TargetDataX86 &) = delete;
1216   TargetDataX86 &operator=(const TargetDataX86 &) = delete;
1217 
1218 public:
1219   ~TargetDataX86() override = default;
1220 
create(GlobalContext * Ctx)1221   static std::unique_ptr<TargetDataLowering> create(GlobalContext *Ctx) {
1222     return makeUnique<TargetDataX86>(Ctx);
1223   }
1224 
1225   void lowerGlobals(const VariableDeclarationList &Vars,
1226                     const std::string &SectionSuffix) override;
1227   void lowerConstants() override;
1228   void lowerJumpTables() override;
1229 
1230 private:
1231   ENABLE_MAKE_UNIQUE;
1232 
TargetDataX86(GlobalContext * Ctx)1233   explicit TargetDataX86(GlobalContext *Ctx) : TargetDataLowering(Ctx) {}
1234   template <typename T> static void emitConstantPool(GlobalContext *Ctx);
1235 };
1236 
1237 class TargetHeaderX86 : public TargetHeaderLowering {
1238   TargetHeaderX86() = delete;
1239   TargetHeaderX86(const TargetHeaderX86 &) = delete;
1240   TargetHeaderX86 &operator=(const TargetHeaderX86 &) = delete;
1241 
1242 public:
1243   ~TargetHeaderX86() = default;
1244 
create(GlobalContext * Ctx)1245   static std::unique_ptr<TargetHeaderLowering> create(GlobalContext *Ctx) {
1246     return makeUnique<TargetHeaderX86>(Ctx);
1247   }
1248 
1249 private:
1250   ENABLE_MAKE_UNIQUE;
1251 
TargetHeaderX86(GlobalContext * Ctx)1252   explicit TargetHeaderX86(GlobalContext *Ctx) : TargetHeaderLowering(Ctx) {}
1253 };
1254 
1255 } // end of namespace X86NAMESPACE
1256 } // end of namespace Ice
1257 
1258 #include "IceTargetLoweringX86BaseImpl.h"
1259 
1260 #endif // SUBZERO_SRC_ICETARGETLOWERINGX86BASE_H
1261