• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===----- X86CallFrameOptimization.cpp - Optimize x86 call sequences -----===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines a pass that optimizes call sequences on x86.
11 // Currently, it converts movs of function parameters onto the stack into
12 // pushes. This is beneficial for two main reasons:
13 // 1) The push instruction encoding is much smaller than a stack-ptr-based mov.
14 // 2) It is possible to push memory arguments directly. So, if the
15 //    the transformation is performed pre-reg-alloc, it can help relieve
16 //    register pressure.
17 //
18 //===----------------------------------------------------------------------===//
19 
20 #include <algorithm>
21 
22 #include "X86.h"
23 #include "X86InstrInfo.h"
24 #include "X86MachineFunctionInfo.h"
25 #include "X86Subtarget.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/CodeGen/MachineFunctionPass.h"
28 #include "llvm/CodeGen/MachineInstrBuilder.h"
29 #include "llvm/CodeGen/MachineModuleInfo.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/Passes.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/raw_ostream.h"
35 #include "llvm/Target/TargetInstrInfo.h"
36 
37 using namespace llvm;
38 
39 #define DEBUG_TYPE "x86-cf-opt"
40 
41 static cl::opt<bool>
42     NoX86CFOpt("no-x86-call-frame-opt",
43                cl::desc("Avoid optimizing x86 call frames for size"),
44                cl::init(false), cl::Hidden);
45 
46 namespace {
47 class X86CallFrameOptimization : public MachineFunctionPass {
48 public:
X86CallFrameOptimization()49   X86CallFrameOptimization() : MachineFunctionPass(ID) {}
50 
51   bool runOnMachineFunction(MachineFunction &MF) override;
52 
53 private:
54   // Information we know about a particular call site
55   struct CallContext {
CallContext__anone1509d7a0111::X86CallFrameOptimization::CallContext56     CallContext()
57         : FrameSetup(nullptr), Call(nullptr), SPCopy(nullptr), ExpectedDist(0),
58           MovVector(4, nullptr), NoStackParams(false), UsePush(false) {}
59 
60     // Iterator referring to the frame setup instruction
61     MachineBasicBlock::iterator FrameSetup;
62 
63     // Actual call instruction
64     MachineInstr *Call;
65 
66     // A copy of the stack pointer
67     MachineInstr *SPCopy;
68 
69     // The total displacement of all passed parameters
70     int64_t ExpectedDist;
71 
72     // The sequence of movs used to pass the parameters
73     SmallVector<MachineInstr *, 4> MovVector;
74 
75     // True if this call site has no stack parameters
76     bool NoStackParams;
77 
78     // True if this call site can use push instructions
79     bool UsePush;
80   };
81 
82   typedef SmallVector<CallContext, 8> ContextVector;
83 
84   bool isLegal(MachineFunction &MF);
85 
86   bool isProfitable(MachineFunction &MF, ContextVector &CallSeqMap);
87 
88   void collectCallInfo(MachineFunction &MF, MachineBasicBlock &MBB,
89                        MachineBasicBlock::iterator I, CallContext &Context);
90 
91   void adjustCallSequence(MachineFunction &MF, const CallContext &Context);
92 
93   MachineInstr *canFoldIntoRegPush(MachineBasicBlock::iterator FrameSetup,
94                                    unsigned Reg);
95 
96   enum InstClassification { Convert, Skip, Exit };
97 
98   InstClassification classifyInstruction(MachineBasicBlock &MBB,
99                                          MachineBasicBlock::iterator MI,
100                                          const X86RegisterInfo &RegInfo,
101                                          DenseSet<unsigned int> &UsedRegs);
102 
getPassName() const103   const char *getPassName() const override { return "X86 Optimize Call Frame"; }
104 
105   const TargetInstrInfo *TII;
106   const X86FrameLowering *TFL;
107   const X86Subtarget *STI;
108   MachineRegisterInfo *MRI;
109   unsigned SlotSize;
110   unsigned Log2SlotSize;
111   static char ID;
112 };
113 
114 char X86CallFrameOptimization::ID = 0;
115 } // end anonymous namespace
116 
createX86CallFrameOptimization()117 FunctionPass *llvm::createX86CallFrameOptimization() {
118   return new X86CallFrameOptimization();
119 }
120 
121 // This checks whether the transformation is legal.
122 // Also returns false in cases where it's potentially legal, but
123 // we don't even want to try.
isLegal(MachineFunction & MF)124 bool X86CallFrameOptimization::isLegal(MachineFunction &MF) {
125   if (NoX86CFOpt.getValue())
126     return false;
127 
128   // We can't encode multiple DW_CFA_GNU_args_size or DW_CFA_def_cfa_offset
129   // in the compact unwind encoding that Darwin uses. So, bail if there
130   // is a danger of that being generated.
131   if (STI->isTargetDarwin() &&
132       (!MF.getMMI().getLandingPads().empty() ||
133        (MF.getFunction()->needsUnwindTableEntry() && !TFL->hasFP(MF))))
134     return false;
135 
136   // It is not valid to change the stack pointer outside the prolog/epilog
137   // on 64-bit Windows.
138   if (STI->isTargetWin64())
139     return false;
140 
141   // You would expect straight-line code between call-frame setup and
142   // call-frame destroy. You would be wrong. There are circumstances (e.g.
143   // CMOV_GR8 expansion of a select that feeds a function call!) where we can
144   // end up with the setup and the destroy in different basic blocks.
145   // This is bad, and breaks SP adjustment.
146   // So, check that all of the frames in the function are closed inside
147   // the same block, and, for good measure, that there are no nested frames.
148   unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
149   unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
150   for (MachineBasicBlock &BB : MF) {
151     bool InsideFrameSequence = false;
152     for (MachineInstr &MI : BB) {
153       if (MI.getOpcode() == FrameSetupOpcode) {
154         if (InsideFrameSequence)
155           return false;
156         InsideFrameSequence = true;
157       } else if (MI.getOpcode() == FrameDestroyOpcode) {
158         if (!InsideFrameSequence)
159           return false;
160         InsideFrameSequence = false;
161       }
162     }
163 
164     if (InsideFrameSequence)
165       return false;
166   }
167 
168   return true;
169 }
170 
171 // Check whether this transformation is profitable for a particular
172 // function - in terms of code size.
isProfitable(MachineFunction & MF,ContextVector & CallSeqVector)173 bool X86CallFrameOptimization::isProfitable(MachineFunction &MF,
174                                             ContextVector &CallSeqVector) {
175   // This transformation is always a win when we do not expect to have
176   // a reserved call frame. Under other circumstances, it may be either
177   // a win or a loss, and requires a heuristic.
178   bool CannotReserveFrame = MF.getFrameInfo()->hasVarSizedObjects();
179   if (CannotReserveFrame)
180     return true;
181 
182   unsigned StackAlign = TFL->getStackAlignment();
183 
184   int64_t Advantage = 0;
185   for (auto CC : CallSeqVector) {
186     // Call sites where no parameters are passed on the stack
187     // do not affect the cost, since there needs to be no
188     // stack adjustment.
189     if (CC.NoStackParams)
190       continue;
191 
192     if (!CC.UsePush) {
193       // If we don't use pushes for a particular call site,
194       // we pay for not having a reserved call frame with an
195       // additional sub/add esp pair. The cost is ~3 bytes per instruction,
196       // depending on the size of the constant.
197       // TODO: Callee-pop functions should have a smaller penalty, because
198       // an add is needed even with a reserved call frame.
199       Advantage -= 6;
200     } else {
201       // We can use pushes. First, account for the fixed costs.
202       // We'll need a add after the call.
203       Advantage -= 3;
204       // If we have to realign the stack, we'll also need a sub before
205       if (CC.ExpectedDist % StackAlign)
206         Advantage -= 3;
207       // Now, for each push, we save ~3 bytes. For small constants, we actually,
208       // save more (up to 5 bytes), but 3 should be a good approximation.
209       Advantage += (CC.ExpectedDist >> Log2SlotSize) * 3;
210     }
211   }
212 
213   return Advantage >= 0;
214 }
215 
runOnMachineFunction(MachineFunction & MF)216 bool X86CallFrameOptimization::runOnMachineFunction(MachineFunction &MF) {
217   STI = &MF.getSubtarget<X86Subtarget>();
218   TII = STI->getInstrInfo();
219   TFL = STI->getFrameLowering();
220   MRI = &MF.getRegInfo();
221 
222   const X86RegisterInfo &RegInfo =
223       *static_cast<const X86RegisterInfo *>(STI->getRegisterInfo());
224   SlotSize = RegInfo.getSlotSize();
225   assert(isPowerOf2_32(SlotSize) && "Expect power of 2 stack slot size");
226   Log2SlotSize = Log2_32(SlotSize);
227 
228   if (!isLegal(MF))
229     return false;
230 
231   unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
232 
233   bool Changed = false;
234 
235   ContextVector CallSeqVector;
236 
237   for (auto &MBB : MF)
238     for (auto &MI : MBB)
239       if (MI.getOpcode() == FrameSetupOpcode) {
240         CallContext Context;
241         collectCallInfo(MF, MBB, MI, Context);
242         CallSeqVector.push_back(Context);
243       }
244 
245   if (!isProfitable(MF, CallSeqVector))
246     return false;
247 
248   for (auto CC : CallSeqVector) {
249     if (CC.UsePush) {
250       adjustCallSequence(MF, CC);
251       Changed = true;
252     }
253   }
254 
255   return Changed;
256 }
257 
258 X86CallFrameOptimization::InstClassification
classifyInstruction(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,const X86RegisterInfo & RegInfo,DenseSet<unsigned int> & UsedRegs)259 X86CallFrameOptimization::classifyInstruction(
260     MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
261     const X86RegisterInfo &RegInfo, DenseSet<unsigned int> &UsedRegs) {
262   if (MI == MBB.end())
263     return Exit;
264 
265   // The instructions we actually care about are movs onto the stack
266   int Opcode = MI->getOpcode();
267   if (Opcode == X86::MOV32mi   || Opcode == X86::MOV32mr ||
268       Opcode == X86::MOV64mi32 || Opcode == X86::MOV64mr)
269     return Convert;
270 
271   // Not all calling conventions have only stack MOVs between the stack
272   // adjust and the call.
273 
274   // We want to tolerate other instructions, to cover more cases.
275   // In particular:
276   // a) PCrel calls, where we expect an additional COPY of the basereg.
277   // b) Passing frame-index addresses.
278   // c) Calling conventions that have inreg parameters. These generate
279   //    both copies and movs into registers.
280   // To avoid creating lots of special cases, allow any instruction
281   // that does not write into memory, does not def or use the stack
282   // pointer, and does not def any register that was used by a preceding
283   // push.
284   // (Reading from memory is allowed, even if referenced through a
285   // frame index, since these will get adjusted properly in PEI)
286 
287   // The reason for the last condition is that the pushes can't replace
288   // the movs in place, because the order must be reversed.
289   // So if we have a MOV32mr that uses EDX, then an instruction that defs
290   // EDX, and then the call, after the transformation the push will use
291   // the modified version of EDX, and not the original one.
292   // Since we are still in SSA form at this point, we only need to
293   // make sure we don't clobber any *physical* registers that were
294   // used by an earlier mov that will become a push.
295 
296   if (MI->isCall() || MI->mayStore())
297     return Exit;
298 
299   for (const MachineOperand &MO : MI->operands()) {
300     if (!MO.isReg())
301       continue;
302     unsigned int Reg = MO.getReg();
303     if (!RegInfo.isPhysicalRegister(Reg))
304       continue;
305     if (RegInfo.regsOverlap(Reg, RegInfo.getStackRegister()))
306       return Exit;
307     if (MO.isDef()) {
308       for (unsigned int U : UsedRegs)
309         if (RegInfo.regsOverlap(Reg, U))
310           return Exit;
311     }
312   }
313 
314   return Skip;
315 }
316 
collectCallInfo(MachineFunction & MF,MachineBasicBlock & MBB,MachineBasicBlock::iterator I,CallContext & Context)317 void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF,
318                                                MachineBasicBlock &MBB,
319                                                MachineBasicBlock::iterator I,
320                                                CallContext &Context) {
321   // Check that this particular call sequence is amenable to the
322   // transformation.
323   const X86RegisterInfo &RegInfo =
324       *static_cast<const X86RegisterInfo *>(STI->getRegisterInfo());
325   unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
326 
327   // We expect to enter this at the beginning of a call sequence
328   assert(I->getOpcode() == TII->getCallFrameSetupOpcode());
329   MachineBasicBlock::iterator FrameSetup = I++;
330   Context.FrameSetup = FrameSetup;
331 
332   // How much do we adjust the stack? This puts an upper bound on
333   // the number of parameters actually passed on it.
334   unsigned int MaxAdjust =
335       FrameSetup->getOperand(0).getImm() >> Log2SlotSize;
336 
337   // A zero adjustment means no stack parameters
338   if (!MaxAdjust) {
339     Context.NoStackParams = true;
340     return;
341   }
342 
343   // For globals in PIC mode, we can have some LEAs here.
344   // Ignore them, they don't bother us.
345   // TODO: Extend this to something that covers more cases.
346   while (I->getOpcode() == X86::LEA32r)
347     ++I;
348 
349   unsigned StackPtr = RegInfo.getStackRegister();
350   // SelectionDAG (but not FastISel) inserts a copy of ESP into a virtual
351   // register here.  If it's there, use that virtual register as stack pointer
352   // instead.
353   if (I->isCopy() && I->getOperand(0).isReg() && I->getOperand(1).isReg() &&
354       I->getOperand(1).getReg() == StackPtr) {
355     Context.SPCopy = &*I++;
356     StackPtr = Context.SPCopy->getOperand(0).getReg();
357   }
358 
359   // Scan the call setup sequence for the pattern we're looking for.
360   // We only handle a simple case - a sequence of store instructions that
361   // push a sequence of stack-slot-aligned values onto the stack, with
362   // no gaps between them.
363   if (MaxAdjust > 4)
364     Context.MovVector.resize(MaxAdjust, nullptr);
365 
366   InstClassification Classification;
367   DenseSet<unsigned int> UsedRegs;
368 
369   while ((Classification = classifyInstruction(MBB, I, RegInfo, UsedRegs)) !=
370          Exit) {
371     if (Classification == Skip) {
372       ++I;
373       continue;
374     }
375 
376     // We know the instruction has a supported store opcode.
377     // We only want movs of the form:
378     // mov imm/reg, k(%StackPtr)
379     // If we run into something else, bail.
380     // Note that AddrBaseReg may, counter to its name, not be a register,
381     // but rather a frame index.
382     // TODO: Support the fi case. This should probably work now that we
383     // have the infrastructure to track the stack pointer within a call
384     // sequence.
385     if (!I->getOperand(X86::AddrBaseReg).isReg() ||
386         (I->getOperand(X86::AddrBaseReg).getReg() != StackPtr) ||
387         !I->getOperand(X86::AddrScaleAmt).isImm() ||
388         (I->getOperand(X86::AddrScaleAmt).getImm() != 1) ||
389         (I->getOperand(X86::AddrIndexReg).getReg() != X86::NoRegister) ||
390         (I->getOperand(X86::AddrSegmentReg).getReg() != X86::NoRegister) ||
391         !I->getOperand(X86::AddrDisp).isImm())
392       return;
393 
394     int64_t StackDisp = I->getOperand(X86::AddrDisp).getImm();
395     assert(StackDisp >= 0 &&
396            "Negative stack displacement when passing parameters");
397 
398     // We really don't want to consider the unaligned case.
399     if (StackDisp & (SlotSize - 1))
400       return;
401     StackDisp >>= Log2SlotSize;
402 
403     assert((size_t)StackDisp < Context.MovVector.size() &&
404            "Function call has more parameters than the stack is adjusted for.");
405 
406     // If the same stack slot is being filled twice, something's fishy.
407     if (Context.MovVector[StackDisp] != nullptr)
408       return;
409     Context.MovVector[StackDisp] = &*I;
410 
411     for (const MachineOperand &MO : I->uses()) {
412       if (!MO.isReg())
413         continue;
414       unsigned int Reg = MO.getReg();
415       if (RegInfo.isPhysicalRegister(Reg))
416         UsedRegs.insert(Reg);
417     }
418 
419     ++I;
420   }
421 
422   // We now expect the end of the sequence. If we stopped early,
423   // or reached the end of the block without finding a call, bail.
424   if (I == MBB.end() || !I->isCall())
425     return;
426 
427   Context.Call = &*I;
428   if ((++I)->getOpcode() != FrameDestroyOpcode)
429     return;
430 
431   // Now, go through the vector, and see that we don't have any gaps,
432   // but only a series of MOVs.
433   auto MMI = Context.MovVector.begin(), MME = Context.MovVector.end();
434   for (; MMI != MME; ++MMI, Context.ExpectedDist += SlotSize)
435     if (*MMI == nullptr)
436       break;
437 
438   // If the call had no parameters, do nothing
439   if (MMI == Context.MovVector.begin())
440     return;
441 
442   // We are either at the last parameter, or a gap.
443   // Make sure it's not a gap
444   for (; MMI != MME; ++MMI)
445     if (*MMI != nullptr)
446       return;
447 
448   Context.UsePush = true;
449 }
450 
adjustCallSequence(MachineFunction & MF,const CallContext & Context)451 void X86CallFrameOptimization::adjustCallSequence(MachineFunction &MF,
452                                                   const CallContext &Context) {
453   // Ok, we can in fact do the transformation for this call.
454   // Do not remove the FrameSetup instruction, but adjust the parameters.
455   // PEI will end up finalizing the handling of this.
456   MachineBasicBlock::iterator FrameSetup = Context.FrameSetup;
457   MachineBasicBlock &MBB = *(FrameSetup->getParent());
458   FrameSetup->getOperand(1).setImm(Context.ExpectedDist);
459 
460   DebugLoc DL = FrameSetup->getDebugLoc();
461   bool Is64Bit = STI->is64Bit();
462   // Now, iterate through the vector in reverse order, and replace the movs
463   // with pushes. MOVmi/MOVmr doesn't have any defs, so no need to
464   // replace uses.
465   for (int Idx = (Context.ExpectedDist >> Log2SlotSize) - 1; Idx >= 0; --Idx) {
466     MachineBasicBlock::iterator MOV = *Context.MovVector[Idx];
467     MachineOperand PushOp = MOV->getOperand(X86::AddrNumOperands);
468     MachineBasicBlock::iterator Push = nullptr;
469     unsigned PushOpcode;
470     switch (MOV->getOpcode()) {
471     default:
472       llvm_unreachable("Unexpected Opcode!");
473     case X86::MOV32mi:
474     case X86::MOV64mi32:
475       PushOpcode = Is64Bit ? X86::PUSH64i32 : X86::PUSHi32;
476       // If the operand is a small (8-bit) immediate, we can use a
477       // PUSH instruction with a shorter encoding.
478       // Note that isImm() may fail even though this is a MOVmi, because
479       // the operand can also be a symbol.
480       if (PushOp.isImm()) {
481         int64_t Val = PushOp.getImm();
482         if (isInt<8>(Val))
483           PushOpcode = Is64Bit ? X86::PUSH64i8 : X86::PUSH32i8;
484       }
485       Push = BuildMI(MBB, Context.Call, DL, TII->get(PushOpcode))
486                  .addOperand(PushOp);
487       break;
488     case X86::MOV32mr:
489     case X86::MOV64mr:
490       unsigned int Reg = PushOp.getReg();
491 
492       // If storing a 32-bit vreg on 64-bit targets, extend to a 64-bit vreg
493       // in preparation for the PUSH64. The upper 32 bits can be undef.
494       if (Is64Bit && MOV->getOpcode() == X86::MOV32mr) {
495         unsigned UndefReg = MRI->createVirtualRegister(&X86::GR64RegClass);
496         Reg = MRI->createVirtualRegister(&X86::GR64RegClass);
497         BuildMI(MBB, Context.Call, DL, TII->get(X86::IMPLICIT_DEF), UndefReg);
498         BuildMI(MBB, Context.Call, DL, TII->get(X86::INSERT_SUBREG), Reg)
499           .addReg(UndefReg)
500           .addOperand(PushOp)
501           .addImm(X86::sub_32bit);
502       }
503 
504       // If PUSHrmm is not slow on this target, try to fold the source of the
505       // push into the instruction.
506       bool SlowPUSHrmm = STI->isAtom() || STI->isSLM();
507 
508       // Check that this is legal to fold. Right now, we're extremely
509       // conservative about that.
510       MachineInstr *DefMov = nullptr;
511       if (!SlowPUSHrmm && (DefMov = canFoldIntoRegPush(FrameSetup, Reg))) {
512         PushOpcode = Is64Bit ? X86::PUSH64rmm : X86::PUSH32rmm;
513         Push = BuildMI(MBB, Context.Call, DL, TII->get(PushOpcode));
514 
515         unsigned NumOps = DefMov->getDesc().getNumOperands();
516         for (unsigned i = NumOps - X86::AddrNumOperands; i != NumOps; ++i)
517           Push->addOperand(DefMov->getOperand(i));
518 
519         DefMov->eraseFromParent();
520       } else {
521         PushOpcode = Is64Bit ? X86::PUSH64r : X86::PUSH32r;
522         Push = BuildMI(MBB, Context.Call, DL, TII->get(PushOpcode))
523                    .addReg(Reg)
524                    .getInstr();
525       }
526       break;
527     }
528 
529     // For debugging, when using SP-based CFA, we need to adjust the CFA
530     // offset after each push.
531     // TODO: This is needed only if we require precise CFA.
532     if (!TFL->hasFP(MF))
533       TFL->BuildCFI(
534           MBB, std::next(Push), DL,
535           MCCFIInstruction::createAdjustCfaOffset(nullptr, SlotSize));
536 
537     MBB.erase(MOV);
538   }
539 
540   // The stack-pointer copy is no longer used in the call sequences.
541   // There should not be any other users, but we can't commit to that, so:
542   if (Context.SPCopy && MRI->use_empty(Context.SPCopy->getOperand(0).getReg()))
543     Context.SPCopy->eraseFromParent();
544 
545   // Once we've done this, we need to make sure PEI doesn't assume a reserved
546   // frame.
547   X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
548   FuncInfo->setHasPushSequences(true);
549 }
550 
canFoldIntoRegPush(MachineBasicBlock::iterator FrameSetup,unsigned Reg)551 MachineInstr *X86CallFrameOptimization::canFoldIntoRegPush(
552     MachineBasicBlock::iterator FrameSetup, unsigned Reg) {
553   // Do an extremely restricted form of load folding.
554   // ISel will often create patterns like:
555   // movl    4(%edi), %eax
556   // movl    8(%edi), %ecx
557   // movl    12(%edi), %edx
558   // movl    %edx, 8(%esp)
559   // movl    %ecx, 4(%esp)
560   // movl    %eax, (%esp)
561   // call
562   // Get rid of those with prejudice.
563   if (!TargetRegisterInfo::isVirtualRegister(Reg))
564     return nullptr;
565 
566   // Make sure this is the only use of Reg.
567   if (!MRI->hasOneNonDBGUse(Reg))
568     return nullptr;
569 
570   MachineInstr &DefMI = *MRI->getVRegDef(Reg);
571 
572   // Make sure the def is a MOV from memory.
573   // If the def is in another block, give up.
574   if ((DefMI.getOpcode() != X86::MOV32rm &&
575        DefMI.getOpcode() != X86::MOV64rm) ||
576       DefMI.getParent() != FrameSetup->getParent())
577     return nullptr;
578 
579   // Make sure we don't have any instructions between DefMI and the
580   // push that make folding the load illegal.
581   for (MachineBasicBlock::iterator I = DefMI; I != FrameSetup; ++I)
582     if (I->isLoadFoldBarrier())
583       return nullptr;
584 
585   return &DefMI;
586 }
587