• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===- lib/CodeGen/MachineInstr.cpp ---------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Methods common to all machine instructions.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/MachineInstr.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/FoldingSet.h"
18 #include "llvm/ADT/Hashing.h"
19 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallBitVector.h"
22 #include "llvm/ADT/SmallString.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/Analysis/AliasAnalysis.h"
25 #include "llvm/Analysis/Loads.h"
26 #include "llvm/Analysis/MemoryLocation.h"
27 #include "llvm/CodeGen/GlobalISel/RegisterBank.h"
28 #include "llvm/CodeGen/MachineBasicBlock.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineInstrBuilder.h"
31 #include "llvm/CodeGen/MachineInstrBundle.h"
32 #include "llvm/CodeGen/MachineMemOperand.h"
33 #include "llvm/CodeGen/MachineModuleInfo.h"
34 #include "llvm/CodeGen/MachineOperand.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/CodeGen/PseudoSourceValue.h"
37 #include "llvm/CodeGen/TargetInstrInfo.h"
38 #include "llvm/CodeGen/TargetRegisterInfo.h"
39 #include "llvm/CodeGen/TargetSubtargetInfo.h"
40 #include "llvm/Config/llvm-config.h"
41 #include "llvm/IR/Constants.h"
42 #include "llvm/IR/DebugInfoMetadata.h"
43 #include "llvm/IR/DebugLoc.h"
44 #include "llvm/IR/DerivedTypes.h"
45 #include "llvm/IR/Function.h"
46 #include "llvm/IR/InlineAsm.h"
47 #include "llvm/IR/InstrTypes.h"
48 #include "llvm/IR/Intrinsics.h"
49 #include "llvm/IR/LLVMContext.h"
50 #include "llvm/IR/Metadata.h"
51 #include "llvm/IR/Module.h"
52 #include "llvm/IR/ModuleSlotTracker.h"
53 #include "llvm/IR/Type.h"
54 #include "llvm/IR/Value.h"
55 #include "llvm/MC/MCInstrDesc.h"
56 #include "llvm/MC/MCRegisterInfo.h"
57 #include "llvm/MC/MCSymbol.h"
58 #include "llvm/Support/Casting.h"
59 #include "llvm/Support/CommandLine.h"
60 #include "llvm/Support/Compiler.h"
61 #include "llvm/Support/Debug.h"
62 #include "llvm/Support/ErrorHandling.h"
63 #include "llvm/Support/LowLevelTypeImpl.h"
64 #include "llvm/Support/MathExtras.h"
65 #include "llvm/Support/raw_ostream.h"
66 #include "llvm/Target/TargetIntrinsicInfo.h"
67 #include "llvm/Target/TargetMachine.h"
68 #include <algorithm>
69 #include <cassert>
70 #include <cstddef>
71 #include <cstdint>
72 #include <cstring>
73 #include <iterator>
74 #include <utility>
75 
76 using namespace llvm;
77 
getMFIfAvailable(const MachineInstr & MI)78 static const MachineFunction *getMFIfAvailable(const MachineInstr &MI) {
79   if (const MachineBasicBlock *MBB = MI.getParent())
80     if (const MachineFunction *MF = MBB->getParent())
81       return MF;
82   return nullptr;
83 }
84 
85 // Try to crawl up to the machine function and get TRI and IntrinsicInfo from
86 // it.
tryToGetTargetInfo(const MachineInstr & MI,const TargetRegisterInfo * & TRI,const MachineRegisterInfo * & MRI,const TargetIntrinsicInfo * & IntrinsicInfo,const TargetInstrInfo * & TII)87 static void tryToGetTargetInfo(const MachineInstr &MI,
88                                const TargetRegisterInfo *&TRI,
89                                const MachineRegisterInfo *&MRI,
90                                const TargetIntrinsicInfo *&IntrinsicInfo,
91                                const TargetInstrInfo *&TII) {
92 
93   if (const MachineFunction *MF = getMFIfAvailable(MI)) {
94     TRI = MF->getSubtarget().getRegisterInfo();
95     MRI = &MF->getRegInfo();
96     IntrinsicInfo = MF->getTarget().getIntrinsicInfo();
97     TII = MF->getSubtarget().getInstrInfo();
98   }
99 }
100 
addImplicitDefUseOperands(MachineFunction & MF)101 void MachineInstr::addImplicitDefUseOperands(MachineFunction &MF) {
102   if (MCID->ImplicitDefs)
103     for (const MCPhysReg *ImpDefs = MCID->getImplicitDefs(); *ImpDefs;
104            ++ImpDefs)
105       addOperand(MF, MachineOperand::CreateReg(*ImpDefs, true, true));
106   if (MCID->ImplicitUses)
107     for (const MCPhysReg *ImpUses = MCID->getImplicitUses(); *ImpUses;
108            ++ImpUses)
109       addOperand(MF, MachineOperand::CreateReg(*ImpUses, false, true));
110 }
111 
112 /// MachineInstr ctor - This constructor creates a MachineInstr and adds the
113 /// implicit operands. It reserves space for the number of operands specified by
114 /// the MCInstrDesc.
MachineInstr(MachineFunction & MF,const MCInstrDesc & tid,DebugLoc dl,bool NoImp)115 MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &tid,
116                            DebugLoc dl, bool NoImp)
117     : MCID(&tid), debugLoc(std::move(dl)) {
118   assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
119 
120   // Reserve space for the expected number of operands.
121   if (unsigned NumOps = MCID->getNumOperands() +
122     MCID->getNumImplicitDefs() + MCID->getNumImplicitUses()) {
123     CapOperands = OperandCapacity::get(NumOps);
124     Operands = MF.allocateOperandArray(CapOperands);
125   }
126 
127   if (!NoImp)
128     addImplicitDefUseOperands(MF);
129 }
130 
131 /// MachineInstr ctor - Copies MachineInstr arg exactly
132 ///
MachineInstr(MachineFunction & MF,const MachineInstr & MI)133 MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
134     : MCID(&MI.getDesc()), NumMemRefs(MI.NumMemRefs), MemRefs(MI.MemRefs),
135       debugLoc(MI.getDebugLoc()) {
136   assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
137 
138   CapOperands = OperandCapacity::get(MI.getNumOperands());
139   Operands = MF.allocateOperandArray(CapOperands);
140 
141   // Copy operands.
142   for (const MachineOperand &MO : MI.operands())
143     addOperand(MF, MO);
144 
145   // Copy all the sensible flags.
146   setFlags(MI.Flags);
147 }
148 
149 /// getRegInfo - If this instruction is embedded into a MachineFunction,
150 /// return the MachineRegisterInfo object for the current function, otherwise
151 /// return null.
getRegInfo()152 MachineRegisterInfo *MachineInstr::getRegInfo() {
153   if (MachineBasicBlock *MBB = getParent())
154     return &MBB->getParent()->getRegInfo();
155   return nullptr;
156 }
157 
158 /// RemoveRegOperandsFromUseLists - Unlink all of the register operands in
159 /// this instruction from their respective use lists.  This requires that the
160 /// operands already be on their use lists.
RemoveRegOperandsFromUseLists(MachineRegisterInfo & MRI)161 void MachineInstr::RemoveRegOperandsFromUseLists(MachineRegisterInfo &MRI) {
162   for (MachineOperand &MO : operands())
163     if (MO.isReg())
164       MRI.removeRegOperandFromUseList(&MO);
165 }
166 
167 /// AddRegOperandsToUseLists - Add all of the register operands in
168 /// this instruction from their respective use lists.  This requires that the
169 /// operands not be on their use lists yet.
AddRegOperandsToUseLists(MachineRegisterInfo & MRI)170 void MachineInstr::AddRegOperandsToUseLists(MachineRegisterInfo &MRI) {
171   for (MachineOperand &MO : operands())
172     if (MO.isReg())
173       MRI.addRegOperandToUseList(&MO);
174 }
175 
addOperand(const MachineOperand & Op)176 void MachineInstr::addOperand(const MachineOperand &Op) {
177   MachineBasicBlock *MBB = getParent();
178   assert(MBB && "Use MachineInstrBuilder to add operands to dangling instrs");
179   MachineFunction *MF = MBB->getParent();
180   assert(MF && "Use MachineInstrBuilder to add operands to dangling instrs");
181   addOperand(*MF, Op);
182 }
183 
184 /// Move NumOps MachineOperands from Src to Dst, with support for overlapping
185 /// ranges. If MRI is non-null also update use-def chains.
moveOperands(MachineOperand * Dst,MachineOperand * Src,unsigned NumOps,MachineRegisterInfo * MRI)186 static void moveOperands(MachineOperand *Dst, MachineOperand *Src,
187                          unsigned NumOps, MachineRegisterInfo *MRI) {
188   if (MRI)
189     return MRI->moveOperands(Dst, Src, NumOps);
190 
191   // MachineOperand is a trivially copyable type so we can just use memmove.
192   std::memmove(Dst, Src, NumOps * sizeof(MachineOperand));
193 }
194 
195 /// addOperand - Add the specified operand to the instruction.  If it is an
196 /// implicit operand, it is added to the end of the operand list.  If it is
197 /// an explicit operand it is added at the end of the explicit operand list
198 /// (before the first implicit operand).
addOperand(MachineFunction & MF,const MachineOperand & Op)199 void MachineInstr::addOperand(MachineFunction &MF, const MachineOperand &Op) {
200   assert(MCID && "Cannot add operands before providing an instr descriptor");
201 
202   // Check if we're adding one of our existing operands.
203   if (&Op >= Operands && &Op < Operands + NumOperands) {
204     // This is unusual: MI->addOperand(MI->getOperand(i)).
205     // If adding Op requires reallocating or moving existing operands around,
206     // the Op reference could go stale. Support it by copying Op.
207     MachineOperand CopyOp(Op);
208     return addOperand(MF, CopyOp);
209   }
210 
211   // Find the insert location for the new operand.  Implicit registers go at
212   // the end, everything else goes before the implicit regs.
213   //
214   // FIXME: Allow mixed explicit and implicit operands on inline asm.
215   // InstrEmitter::EmitSpecialNode() is marking inline asm clobbers as
216   // implicit-defs, but they must not be moved around.  See the FIXME in
217   // InstrEmitter.cpp.
218   unsigned OpNo = getNumOperands();
219   bool isImpReg = Op.isReg() && Op.isImplicit();
220   if (!isImpReg && !isInlineAsm()) {
221     while (OpNo && Operands[OpNo-1].isReg() && Operands[OpNo-1].isImplicit()) {
222       --OpNo;
223       assert(!Operands[OpNo].isTied() && "Cannot move tied operands");
224     }
225   }
226 
227 #ifndef NDEBUG
228   bool isMetaDataOp = Op.getType() == MachineOperand::MO_Metadata;
229   // OpNo now points as the desired insertion point.  Unless this is a variadic
230   // instruction, only implicit regs are allowed beyond MCID->getNumOperands().
231   // RegMask operands go between the explicit and implicit operands.
232   assert((isImpReg || Op.isRegMask() || MCID->isVariadic() ||
233           OpNo < MCID->getNumOperands() || isMetaDataOp) &&
234          "Trying to add an operand to a machine instr that is already done!");
235 #endif
236 
237   MachineRegisterInfo *MRI = getRegInfo();
238 
239   // Determine if the Operands array needs to be reallocated.
240   // Save the old capacity and operand array.
241   OperandCapacity OldCap = CapOperands;
242   MachineOperand *OldOperands = Operands;
243   if (!OldOperands || OldCap.getSize() == getNumOperands()) {
244     CapOperands = OldOperands ? OldCap.getNext() : OldCap.get(1);
245     Operands = MF.allocateOperandArray(CapOperands);
246     // Move the operands before the insertion point.
247     if (OpNo)
248       moveOperands(Operands, OldOperands, OpNo, MRI);
249   }
250 
251   // Move the operands following the insertion point.
252   if (OpNo != NumOperands)
253     moveOperands(Operands + OpNo + 1, OldOperands + OpNo, NumOperands - OpNo,
254                  MRI);
255   ++NumOperands;
256 
257   // Deallocate the old operand array.
258   if (OldOperands != Operands && OldOperands)
259     MF.deallocateOperandArray(OldCap, OldOperands);
260 
261   // Copy Op into place. It still needs to be inserted into the MRI use lists.
262   MachineOperand *NewMO = new (Operands + OpNo) MachineOperand(Op);
263   NewMO->ParentMI = this;
264 
265   // When adding a register operand, tell MRI about it.
266   if (NewMO->isReg()) {
267     // Ensure isOnRegUseList() returns false, regardless of Op's status.
268     NewMO->Contents.Reg.Prev = nullptr;
269     // Ignore existing ties. This is not a property that can be copied.
270     NewMO->TiedTo = 0;
271     // Add the new operand to MRI, but only for instructions in an MBB.
272     if (MRI)
273       MRI->addRegOperandToUseList(NewMO);
274     // The MCID operand information isn't accurate until we start adding
275     // explicit operands. The implicit operands are added first, then the
276     // explicits are inserted before them.
277     if (!isImpReg) {
278       // Tie uses to defs as indicated in MCInstrDesc.
279       if (NewMO->isUse()) {
280         int DefIdx = MCID->getOperandConstraint(OpNo, MCOI::TIED_TO);
281         if (DefIdx != -1)
282           tieOperands(DefIdx, OpNo);
283       }
284       // If the register operand is flagged as early, mark the operand as such.
285       if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1)
286         NewMO->setIsEarlyClobber(true);
287     }
288   }
289 }
290 
291 /// RemoveOperand - Erase an operand  from an instruction, leaving it with one
292 /// fewer operand than it started with.
293 ///
RemoveOperand(unsigned OpNo)294 void MachineInstr::RemoveOperand(unsigned OpNo) {
295   assert(OpNo < getNumOperands() && "Invalid operand number");
296   untieRegOperand(OpNo);
297 
298 #ifndef NDEBUG
299   // Moving tied operands would break the ties.
300   for (unsigned i = OpNo + 1, e = getNumOperands(); i != e; ++i)
301     if (Operands[i].isReg())
302       assert(!Operands[i].isTied() && "Cannot move tied operands");
303 #endif
304 
305   MachineRegisterInfo *MRI = getRegInfo();
306   if (MRI && Operands[OpNo].isReg())
307     MRI->removeRegOperandFromUseList(Operands + OpNo);
308 
309   // Don't call the MachineOperand destructor. A lot of this code depends on
310   // MachineOperand having a trivial destructor anyway, and adding a call here
311   // wouldn't make it 'destructor-correct'.
312 
313   if (unsigned N = NumOperands - 1 - OpNo)
314     moveOperands(Operands + OpNo, Operands + OpNo + 1, N, MRI);
315   --NumOperands;
316 }
317 
318 /// addMemOperand - Add a MachineMemOperand to the machine instruction.
319 /// This function should be used only occasionally. The setMemRefs function
320 /// is the primary method for setting up a MachineInstr's MemRefs list.
addMemOperand(MachineFunction & MF,MachineMemOperand * MO)321 void MachineInstr::addMemOperand(MachineFunction &MF,
322                                  MachineMemOperand *MO) {
323   mmo_iterator OldMemRefs = MemRefs;
324   unsigned OldNumMemRefs = NumMemRefs;
325 
326   unsigned NewNum = NumMemRefs + 1;
327   mmo_iterator NewMemRefs = MF.allocateMemRefsArray(NewNum);
328 
329   std::copy(OldMemRefs, OldMemRefs + OldNumMemRefs, NewMemRefs);
330   NewMemRefs[NewNum - 1] = MO;
331   setMemRefs(NewMemRefs, NewMemRefs + NewNum);
332 }
333 
334 /// Check to see if the MMOs pointed to by the two MemRefs arrays are
335 /// identical.
hasIdenticalMMOs(const MachineInstr & MI1,const MachineInstr & MI2)336 static bool hasIdenticalMMOs(const MachineInstr &MI1, const MachineInstr &MI2) {
337   auto I1 = MI1.memoperands_begin(), E1 = MI1.memoperands_end();
338   auto I2 = MI2.memoperands_begin(), E2 = MI2.memoperands_end();
339   if ((E1 - I1) != (E2 - I2))
340     return false;
341   for (; I1 != E1; ++I1, ++I2) {
342     if (**I1 != **I2)
343       return false;
344   }
345   return true;
346 }
347 
348 std::pair<MachineInstr::mmo_iterator, unsigned>
mergeMemRefsWith(const MachineInstr & Other)349 MachineInstr::mergeMemRefsWith(const MachineInstr& Other) {
350 
351   // If either of the incoming memrefs are empty, we must be conservative and
352   // treat this as if we've exhausted our space for memrefs and dropped them.
353   if (memoperands_empty() || Other.memoperands_empty())
354     return std::make_pair(nullptr, 0);
355 
356   // If both instructions have identical memrefs, we don't need to merge them.
357   // Since many instructions have a single memref, and we tend to merge things
358   // like pairs of loads from the same location, this catches a large number of
359   // cases in practice.
360   if (hasIdenticalMMOs(*this, Other))
361     return std::make_pair(MemRefs, NumMemRefs);
362 
363   // TODO: consider uniquing elements within the operand lists to reduce
364   // space usage and fall back to conservative information less often.
365   size_t CombinedNumMemRefs = NumMemRefs + Other.NumMemRefs;
366 
367   // If we don't have enough room to store this many memrefs, be conservative
368   // and drop them.  Otherwise, we'd fail asserts when trying to add them to
369   // the new instruction.
370   if (CombinedNumMemRefs != uint8_t(CombinedNumMemRefs))
371     return std::make_pair(nullptr, 0);
372 
373   MachineFunction *MF = getMF();
374   mmo_iterator MemBegin = MF->allocateMemRefsArray(CombinedNumMemRefs);
375   mmo_iterator MemEnd = std::copy(memoperands_begin(), memoperands_end(),
376                                   MemBegin);
377   MemEnd = std::copy(Other.memoperands_begin(), Other.memoperands_end(),
378                      MemEnd);
379   assert(MemEnd - MemBegin == (ptrdiff_t)CombinedNumMemRefs &&
380          "missing memrefs");
381 
382   return std::make_pair(MemBegin, CombinedNumMemRefs);
383 }
384 
mergeFlagsWith(const MachineInstr & Other) const385 uint16_t MachineInstr::mergeFlagsWith(const MachineInstr &Other) const {
386   // For now, the just return the union of the flags. If the flags get more
387   // complicated over time, we might need more logic here.
388   return getFlags() | Other.getFlags();
389 }
390 
hasPropertyInBundle(unsigned Mask,QueryType Type) const391 bool MachineInstr::hasPropertyInBundle(unsigned Mask, QueryType Type) const {
392   assert(!isBundledWithPred() && "Must be called on bundle header");
393   for (MachineBasicBlock::const_instr_iterator MII = getIterator();; ++MII) {
394     if (MII->getDesc().getFlags() & Mask) {
395       if (Type == AnyInBundle)
396         return true;
397     } else {
398       if (Type == AllInBundle && !MII->isBundle())
399         return false;
400     }
401     // This was the last instruction in the bundle.
402     if (!MII->isBundledWithSucc())
403       return Type == AllInBundle;
404   }
405 }
406 
isIdenticalTo(const MachineInstr & Other,MICheckType Check) const407 bool MachineInstr::isIdenticalTo(const MachineInstr &Other,
408                                  MICheckType Check) const {
409   // If opcodes or number of operands are not the same then the two
410   // instructions are obviously not identical.
411   if (Other.getOpcode() != getOpcode() ||
412       Other.getNumOperands() != getNumOperands())
413     return false;
414 
415   if (isBundle()) {
416     // We have passed the test above that both instructions have the same
417     // opcode, so we know that both instructions are bundles here. Let's compare
418     // MIs inside the bundle.
419     assert(Other.isBundle() && "Expected that both instructions are bundles.");
420     MachineBasicBlock::const_instr_iterator I1 = getIterator();
421     MachineBasicBlock::const_instr_iterator I2 = Other.getIterator();
422     // Loop until we analysed the last intruction inside at least one of the
423     // bundles.
424     while (I1->isBundledWithSucc() && I2->isBundledWithSucc()) {
425       ++I1;
426       ++I2;
427       if (!I1->isIdenticalTo(*I2, Check))
428         return false;
429     }
430     // If we've reached the end of just one of the two bundles, but not both,
431     // the instructions are not identical.
432     if (I1->isBundledWithSucc() || I2->isBundledWithSucc())
433       return false;
434   }
435 
436   // Check operands to make sure they match.
437   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
438     const MachineOperand &MO = getOperand(i);
439     const MachineOperand &OMO = Other.getOperand(i);
440     if (!MO.isReg()) {
441       if (!MO.isIdenticalTo(OMO))
442         return false;
443       continue;
444     }
445 
446     // Clients may or may not want to ignore defs when testing for equality.
447     // For example, machine CSE pass only cares about finding common
448     // subexpressions, so it's safe to ignore virtual register defs.
449     if (MO.isDef()) {
450       if (Check == IgnoreDefs)
451         continue;
452       else if (Check == IgnoreVRegDefs) {
453         if (!TargetRegisterInfo::isVirtualRegister(MO.getReg()) ||
454             !TargetRegisterInfo::isVirtualRegister(OMO.getReg()))
455           if (!MO.isIdenticalTo(OMO))
456             return false;
457       } else {
458         if (!MO.isIdenticalTo(OMO))
459           return false;
460         if (Check == CheckKillDead && MO.isDead() != OMO.isDead())
461           return false;
462       }
463     } else {
464       if (!MO.isIdenticalTo(OMO))
465         return false;
466       if (Check == CheckKillDead && MO.isKill() != OMO.isKill())
467         return false;
468     }
469   }
470   // If DebugLoc does not match then two debug instructions are not identical.
471   if (isDebugInstr())
472     if (getDebugLoc() && Other.getDebugLoc() &&
473         getDebugLoc() != Other.getDebugLoc())
474       return false;
475   return true;
476 }
477 
getMF() const478 const MachineFunction *MachineInstr::getMF() const {
479   return getParent()->getParent();
480 }
481 
removeFromParent()482 MachineInstr *MachineInstr::removeFromParent() {
483   assert(getParent() && "Not embedded in a basic block!");
484   return getParent()->remove(this);
485 }
486 
removeFromBundle()487 MachineInstr *MachineInstr::removeFromBundle() {
488   assert(getParent() && "Not embedded in a basic block!");
489   return getParent()->remove_instr(this);
490 }
491 
eraseFromParent()492 void MachineInstr::eraseFromParent() {
493   assert(getParent() && "Not embedded in a basic block!");
494   getParent()->erase(this);
495 }
496 
eraseFromParentAndMarkDBGValuesForRemoval()497 void MachineInstr::eraseFromParentAndMarkDBGValuesForRemoval() {
498   assert(getParent() && "Not embedded in a basic block!");
499   MachineBasicBlock *MBB = getParent();
500   MachineFunction *MF = MBB->getParent();
501   assert(MF && "Not embedded in a function!");
502 
503   MachineInstr *MI = (MachineInstr *)this;
504   MachineRegisterInfo &MRI = MF->getRegInfo();
505 
506   for (const MachineOperand &MO : MI->operands()) {
507     if (!MO.isReg() || !MO.isDef())
508       continue;
509     unsigned Reg = MO.getReg();
510     if (!TargetRegisterInfo::isVirtualRegister(Reg))
511       continue;
512     MRI.markUsesInDebugValueAsUndef(Reg);
513   }
514   MI->eraseFromParent();
515 }
516 
eraseFromBundle()517 void MachineInstr::eraseFromBundle() {
518   assert(getParent() && "Not embedded in a basic block!");
519   getParent()->erase_instr(this);
520 }
521 
getNumExplicitOperands() const522 unsigned MachineInstr::getNumExplicitOperands() const {
523   unsigned NumOperands = MCID->getNumOperands();
524   if (!MCID->isVariadic())
525     return NumOperands;
526 
527   for (unsigned I = NumOperands, E = getNumOperands(); I != E; ++I) {
528     const MachineOperand &MO = getOperand(I);
529     // The operands must always be in the following order:
530     // - explicit reg defs,
531     // - other explicit operands (reg uses, immediates, etc.),
532     // - implicit reg defs
533     // - implicit reg uses
534     if (MO.isReg() && MO.isImplicit())
535       break;
536     ++NumOperands;
537   }
538   return NumOperands;
539 }
540 
getNumExplicitDefs() const541 unsigned MachineInstr::getNumExplicitDefs() const {
542   unsigned NumDefs = MCID->getNumDefs();
543   if (!MCID->isVariadic())
544     return NumDefs;
545 
546   for (unsigned I = NumDefs, E = getNumOperands(); I != E; ++I) {
547     const MachineOperand &MO = getOperand(I);
548     if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
549       break;
550     ++NumDefs;
551   }
552   return NumDefs;
553 }
554 
bundleWithPred()555 void MachineInstr::bundleWithPred() {
556   assert(!isBundledWithPred() && "MI is already bundled with its predecessor");
557   setFlag(BundledPred);
558   MachineBasicBlock::instr_iterator Pred = getIterator();
559   --Pred;
560   assert(!Pred->isBundledWithSucc() && "Inconsistent bundle flags");
561   Pred->setFlag(BundledSucc);
562 }
563 
bundleWithSucc()564 void MachineInstr::bundleWithSucc() {
565   assert(!isBundledWithSucc() && "MI is already bundled with its successor");
566   setFlag(BundledSucc);
567   MachineBasicBlock::instr_iterator Succ = getIterator();
568   ++Succ;
569   assert(!Succ->isBundledWithPred() && "Inconsistent bundle flags");
570   Succ->setFlag(BundledPred);
571 }
572 
unbundleFromPred()573 void MachineInstr::unbundleFromPred() {
574   assert(isBundledWithPred() && "MI isn't bundled with its predecessor");
575   clearFlag(BundledPred);
576   MachineBasicBlock::instr_iterator Pred = getIterator();
577   --Pred;
578   assert(Pred->isBundledWithSucc() && "Inconsistent bundle flags");
579   Pred->clearFlag(BundledSucc);
580 }
581 
unbundleFromSucc()582 void MachineInstr::unbundleFromSucc() {
583   assert(isBundledWithSucc() && "MI isn't bundled with its successor");
584   clearFlag(BundledSucc);
585   MachineBasicBlock::instr_iterator Succ = getIterator();
586   ++Succ;
587   assert(Succ->isBundledWithPred() && "Inconsistent bundle flags");
588   Succ->clearFlag(BundledPred);
589 }
590 
isStackAligningInlineAsm() const591 bool MachineInstr::isStackAligningInlineAsm() const {
592   if (isInlineAsm()) {
593     unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
594     if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
595       return true;
596   }
597   return false;
598 }
599 
getInlineAsmDialect() const600 InlineAsm::AsmDialect MachineInstr::getInlineAsmDialect() const {
601   assert(isInlineAsm() && "getInlineAsmDialect() only works for inline asms!");
602   unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
603   return InlineAsm::AsmDialect((ExtraInfo & InlineAsm::Extra_AsmDialect) != 0);
604 }
605 
findInlineAsmFlagIdx(unsigned OpIdx,unsigned * GroupNo) const606 int MachineInstr::findInlineAsmFlagIdx(unsigned OpIdx,
607                                        unsigned *GroupNo) const {
608   assert(isInlineAsm() && "Expected an inline asm instruction");
609   assert(OpIdx < getNumOperands() && "OpIdx out of range");
610 
611   // Ignore queries about the initial operands.
612   if (OpIdx < InlineAsm::MIOp_FirstOperand)
613     return -1;
614 
615   unsigned Group = 0;
616   unsigned NumOps;
617   for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
618        i += NumOps) {
619     const MachineOperand &FlagMO = getOperand(i);
620     // If we reach the implicit register operands, stop looking.
621     if (!FlagMO.isImm())
622       return -1;
623     NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm());
624     if (i + NumOps > OpIdx) {
625       if (GroupNo)
626         *GroupNo = Group;
627       return i;
628     }
629     ++Group;
630   }
631   return -1;
632 }
633 
getDebugLabel() const634 const DILabel *MachineInstr::getDebugLabel() const {
635   assert(isDebugLabel() && "not a DBG_LABEL");
636   return cast<DILabel>(getOperand(0).getMetadata());
637 }
638 
getDebugVariable() const639 const DILocalVariable *MachineInstr::getDebugVariable() const {
640   assert(isDebugValue() && "not a DBG_VALUE");
641   return cast<DILocalVariable>(getOperand(2).getMetadata());
642 }
643 
getDebugExpression() const644 const DIExpression *MachineInstr::getDebugExpression() const {
645   assert(isDebugValue() && "not a DBG_VALUE");
646   return cast<DIExpression>(getOperand(3).getMetadata());
647 }
648 
649 const TargetRegisterClass*
getRegClassConstraint(unsigned OpIdx,const TargetInstrInfo * TII,const TargetRegisterInfo * TRI) const650 MachineInstr::getRegClassConstraint(unsigned OpIdx,
651                                     const TargetInstrInfo *TII,
652                                     const TargetRegisterInfo *TRI) const {
653   assert(getParent() && "Can't have an MBB reference here!");
654   assert(getMF() && "Can't have an MF reference here!");
655   const MachineFunction &MF = *getMF();
656 
657   // Most opcodes have fixed constraints in their MCInstrDesc.
658   if (!isInlineAsm())
659     return TII->getRegClass(getDesc(), OpIdx, TRI, MF);
660 
661   if (!getOperand(OpIdx).isReg())
662     return nullptr;
663 
664   // For tied uses on inline asm, get the constraint from the def.
665   unsigned DefIdx;
666   if (getOperand(OpIdx).isUse() && isRegTiedToDefOperand(OpIdx, &DefIdx))
667     OpIdx = DefIdx;
668 
669   // Inline asm stores register class constraints in the flag word.
670   int FlagIdx = findInlineAsmFlagIdx(OpIdx);
671   if (FlagIdx < 0)
672     return nullptr;
673 
674   unsigned Flag = getOperand(FlagIdx).getImm();
675   unsigned RCID;
676   if ((InlineAsm::getKind(Flag) == InlineAsm::Kind_RegUse ||
677        InlineAsm::getKind(Flag) == InlineAsm::Kind_RegDef ||
678        InlineAsm::getKind(Flag) == InlineAsm::Kind_RegDefEarlyClobber) &&
679       InlineAsm::hasRegClassConstraint(Flag, RCID))
680     return TRI->getRegClass(RCID);
681 
682   // Assume that all registers in a memory operand are pointers.
683   if (InlineAsm::getKind(Flag) == InlineAsm::Kind_Mem)
684     return TRI->getPointerRegClass(MF);
685 
686   return nullptr;
687 }
688 
getRegClassConstraintEffectForVReg(unsigned Reg,const TargetRegisterClass * CurRC,const TargetInstrInfo * TII,const TargetRegisterInfo * TRI,bool ExploreBundle) const689 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVReg(
690     unsigned Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII,
691     const TargetRegisterInfo *TRI, bool ExploreBundle) const {
692   // Check every operands inside the bundle if we have
693   // been asked to.
694   if (ExploreBundle)
695     for (ConstMIBundleOperands OpndIt(*this); OpndIt.isValid() && CurRC;
696          ++OpndIt)
697       CurRC = OpndIt->getParent()->getRegClassConstraintEffectForVRegImpl(
698           OpndIt.getOperandNo(), Reg, CurRC, TII, TRI);
699   else
700     // Otherwise, just check the current operands.
701     for (unsigned i = 0, e = NumOperands; i < e && CurRC; ++i)
702       CurRC = getRegClassConstraintEffectForVRegImpl(i, Reg, CurRC, TII, TRI);
703   return CurRC;
704 }
705 
getRegClassConstraintEffectForVRegImpl(unsigned OpIdx,unsigned Reg,const TargetRegisterClass * CurRC,const TargetInstrInfo * TII,const TargetRegisterInfo * TRI) const706 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVRegImpl(
707     unsigned OpIdx, unsigned Reg, const TargetRegisterClass *CurRC,
708     const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
709   assert(CurRC && "Invalid initial register class");
710   // Check if Reg is constrained by some of its use/def from MI.
711   const MachineOperand &MO = getOperand(OpIdx);
712   if (!MO.isReg() || MO.getReg() != Reg)
713     return CurRC;
714   // If yes, accumulate the constraints through the operand.
715   return getRegClassConstraintEffect(OpIdx, CurRC, TII, TRI);
716 }
717 
getRegClassConstraintEffect(unsigned OpIdx,const TargetRegisterClass * CurRC,const TargetInstrInfo * TII,const TargetRegisterInfo * TRI) const718 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffect(
719     unsigned OpIdx, const TargetRegisterClass *CurRC,
720     const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
721   const TargetRegisterClass *OpRC = getRegClassConstraint(OpIdx, TII, TRI);
722   const MachineOperand &MO = getOperand(OpIdx);
723   assert(MO.isReg() &&
724          "Cannot get register constraints for non-register operand");
725   assert(CurRC && "Invalid initial register class");
726   if (unsigned SubIdx = MO.getSubReg()) {
727     if (OpRC)
728       CurRC = TRI->getMatchingSuperRegClass(CurRC, OpRC, SubIdx);
729     else
730       CurRC = TRI->getSubClassWithSubReg(CurRC, SubIdx);
731   } else if (OpRC)
732     CurRC = TRI->getCommonSubClass(CurRC, OpRC);
733   return CurRC;
734 }
735 
736 /// Return the number of instructions inside the MI bundle, not counting the
737 /// header instruction.
getBundleSize() const738 unsigned MachineInstr::getBundleSize() const {
739   MachineBasicBlock::const_instr_iterator I = getIterator();
740   unsigned Size = 0;
741   while (I->isBundledWithSucc()) {
742     ++Size;
743     ++I;
744   }
745   return Size;
746 }
747 
748 /// Returns true if the MachineInstr has an implicit-use operand of exactly
749 /// the given register (not considering sub/super-registers).
hasRegisterImplicitUseOperand(unsigned Reg) const750 bool MachineInstr::hasRegisterImplicitUseOperand(unsigned Reg) const {
751   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
752     const MachineOperand &MO = getOperand(i);
753     if (MO.isReg() && MO.isUse() && MO.isImplicit() && MO.getReg() == Reg)
754       return true;
755   }
756   return false;
757 }
758 
759 /// findRegisterUseOperandIdx() - Returns the MachineOperand that is a use of
760 /// the specific register or -1 if it is not found. It further tightens
761 /// the search criteria to a use that kills the register if isKill is true.
findRegisterUseOperandIdx(unsigned Reg,bool isKill,const TargetRegisterInfo * TRI) const762 int MachineInstr::findRegisterUseOperandIdx(
763     unsigned Reg, bool isKill, const TargetRegisterInfo *TRI) const {
764   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
765     const MachineOperand &MO = getOperand(i);
766     if (!MO.isReg() || !MO.isUse())
767       continue;
768     unsigned MOReg = MO.getReg();
769     if (!MOReg)
770       continue;
771     if (MOReg == Reg || (TRI && TargetRegisterInfo::isPhysicalRegister(MOReg) &&
772                          TargetRegisterInfo::isPhysicalRegister(Reg) &&
773                          TRI->isSubRegister(MOReg, Reg)))
774       if (!isKill || MO.isKill())
775         return i;
776   }
777   return -1;
778 }
779 
780 /// readsWritesVirtualRegister - Return a pair of bools (reads, writes)
781 /// indicating if this instruction reads or writes Reg. This also considers
782 /// partial defines.
783 std::pair<bool,bool>
readsWritesVirtualRegister(unsigned Reg,SmallVectorImpl<unsigned> * Ops) const784 MachineInstr::readsWritesVirtualRegister(unsigned Reg,
785                                          SmallVectorImpl<unsigned> *Ops) const {
786   bool PartDef = false; // Partial redefine.
787   bool FullDef = false; // Full define.
788   bool Use = false;
789 
790   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
791     const MachineOperand &MO = getOperand(i);
792     if (!MO.isReg() || MO.getReg() != Reg)
793       continue;
794     if (Ops)
795       Ops->push_back(i);
796     if (MO.isUse())
797       Use |= !MO.isUndef();
798     else if (MO.getSubReg() && !MO.isUndef())
799       // A partial def undef doesn't count as reading the register.
800       PartDef = true;
801     else
802       FullDef = true;
803   }
804   // A partial redefine uses Reg unless there is also a full define.
805   return std::make_pair(Use || (PartDef && !FullDef), PartDef || FullDef);
806 }
807 
808 /// findRegisterDefOperandIdx() - Returns the operand index that is a def of
809 /// the specified register or -1 if it is not found. If isDead is true, defs
810 /// that are not dead are skipped. If TargetRegisterInfo is non-null, then it
811 /// also checks if there is a def of a super-register.
812 int
findRegisterDefOperandIdx(unsigned Reg,bool isDead,bool Overlap,const TargetRegisterInfo * TRI) const813 MachineInstr::findRegisterDefOperandIdx(unsigned Reg, bool isDead, bool Overlap,
814                                         const TargetRegisterInfo *TRI) const {
815   bool isPhys = TargetRegisterInfo::isPhysicalRegister(Reg);
816   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
817     const MachineOperand &MO = getOperand(i);
818     // Accept regmask operands when Overlap is set.
819     // Ignore them when looking for a specific def operand (Overlap == false).
820     if (isPhys && Overlap && MO.isRegMask() && MO.clobbersPhysReg(Reg))
821       return i;
822     if (!MO.isReg() || !MO.isDef())
823       continue;
824     unsigned MOReg = MO.getReg();
825     bool Found = (MOReg == Reg);
826     if (!Found && TRI && isPhys &&
827         TargetRegisterInfo::isPhysicalRegister(MOReg)) {
828       if (Overlap)
829         Found = TRI->regsOverlap(MOReg, Reg);
830       else
831         Found = TRI->isSubRegister(MOReg, Reg);
832     }
833     if (Found && (!isDead || MO.isDead()))
834       return i;
835   }
836   return -1;
837 }
838 
839 /// findFirstPredOperandIdx() - Find the index of the first operand in the
840 /// operand list that is used to represent the predicate. It returns -1 if
841 /// none is found.
findFirstPredOperandIdx() const842 int MachineInstr::findFirstPredOperandIdx() const {
843   // Don't call MCID.findFirstPredOperandIdx() because this variant
844   // is sometimes called on an instruction that's not yet complete, and
845   // so the number of operands is less than the MCID indicates. In
846   // particular, the PTX target does this.
847   const MCInstrDesc &MCID = getDesc();
848   if (MCID.isPredicable()) {
849     for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
850       if (MCID.OpInfo[i].isPredicate())
851         return i;
852   }
853 
854   return -1;
855 }
856 
857 // MachineOperand::TiedTo is 4 bits wide.
858 const unsigned TiedMax = 15;
859 
860 /// tieOperands - Mark operands at DefIdx and UseIdx as tied to each other.
861 ///
862 /// Use and def operands can be tied together, indicated by a non-zero TiedTo
863 /// field. TiedTo can have these values:
864 ///
865 /// 0:              Operand is not tied to anything.
866 /// 1 to TiedMax-1: Tied to getOperand(TiedTo-1).
867 /// TiedMax:        Tied to an operand >= TiedMax-1.
868 ///
869 /// The tied def must be one of the first TiedMax operands on a normal
870 /// instruction. INLINEASM instructions allow more tied defs.
871 ///
tieOperands(unsigned DefIdx,unsigned UseIdx)872 void MachineInstr::tieOperands(unsigned DefIdx, unsigned UseIdx) {
873   MachineOperand &DefMO = getOperand(DefIdx);
874   MachineOperand &UseMO = getOperand(UseIdx);
875   assert(DefMO.isDef() && "DefIdx must be a def operand");
876   assert(UseMO.isUse() && "UseIdx must be a use operand");
877   assert(!DefMO.isTied() && "Def is already tied to another use");
878   assert(!UseMO.isTied() && "Use is already tied to another def");
879 
880   if (DefIdx < TiedMax)
881     UseMO.TiedTo = DefIdx + 1;
882   else {
883     // Inline asm can use the group descriptors to find tied operands, but on
884     // normal instruction, the tied def must be within the first TiedMax
885     // operands.
886     assert(isInlineAsm() && "DefIdx out of range");
887     UseMO.TiedTo = TiedMax;
888   }
889 
890   // UseIdx can be out of range, we'll search for it in findTiedOperandIdx().
891   DefMO.TiedTo = std::min(UseIdx + 1, TiedMax);
892 }
893 
894 /// Given the index of a tied register operand, find the operand it is tied to.
895 /// Defs are tied to uses and vice versa. Returns the index of the tied operand
896 /// which must exist.
findTiedOperandIdx(unsigned OpIdx) const897 unsigned MachineInstr::findTiedOperandIdx(unsigned OpIdx) const {
898   const MachineOperand &MO = getOperand(OpIdx);
899   assert(MO.isTied() && "Operand isn't tied");
900 
901   // Normally TiedTo is in range.
902   if (MO.TiedTo < TiedMax)
903     return MO.TiedTo - 1;
904 
905   // Uses on normal instructions can be out of range.
906   if (!isInlineAsm()) {
907     // Normal tied defs must be in the 0..TiedMax-1 range.
908     if (MO.isUse())
909       return TiedMax - 1;
910     // MO is a def. Search for the tied use.
911     for (unsigned i = TiedMax - 1, e = getNumOperands(); i != e; ++i) {
912       const MachineOperand &UseMO = getOperand(i);
913       if (UseMO.isReg() && UseMO.isUse() && UseMO.TiedTo == OpIdx + 1)
914         return i;
915     }
916     llvm_unreachable("Can't find tied use");
917   }
918 
919   // Now deal with inline asm by parsing the operand group descriptor flags.
920   // Find the beginning of each operand group.
921   SmallVector<unsigned, 8> GroupIdx;
922   unsigned OpIdxGroup = ~0u;
923   unsigned NumOps;
924   for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
925        i += NumOps) {
926     const MachineOperand &FlagMO = getOperand(i);
927     assert(FlagMO.isImm() && "Invalid tied operand on inline asm");
928     unsigned CurGroup = GroupIdx.size();
929     GroupIdx.push_back(i);
930     NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm());
931     // OpIdx belongs to this operand group.
932     if (OpIdx > i && OpIdx < i + NumOps)
933       OpIdxGroup = CurGroup;
934     unsigned TiedGroup;
935     if (!InlineAsm::isUseOperandTiedToDef(FlagMO.getImm(), TiedGroup))
936       continue;
937     // Operands in this group are tied to operands in TiedGroup which must be
938     // earlier. Find the number of operands between the two groups.
939     unsigned Delta = i - GroupIdx[TiedGroup];
940 
941     // OpIdx is a use tied to TiedGroup.
942     if (OpIdxGroup == CurGroup)
943       return OpIdx - Delta;
944 
945     // OpIdx is a def tied to this use group.
946     if (OpIdxGroup == TiedGroup)
947       return OpIdx + Delta;
948   }
949   llvm_unreachable("Invalid tied operand on inline asm");
950 }
951 
952 /// clearKillInfo - Clears kill flags on all operands.
953 ///
clearKillInfo()954 void MachineInstr::clearKillInfo() {
955   for (MachineOperand &MO : operands()) {
956     if (MO.isReg() && MO.isUse())
957       MO.setIsKill(false);
958   }
959 }
960 
substituteRegister(unsigned FromReg,unsigned ToReg,unsigned SubIdx,const TargetRegisterInfo & RegInfo)961 void MachineInstr::substituteRegister(unsigned FromReg, unsigned ToReg,
962                                       unsigned SubIdx,
963                                       const TargetRegisterInfo &RegInfo) {
964   if (TargetRegisterInfo::isPhysicalRegister(ToReg)) {
965     if (SubIdx)
966       ToReg = RegInfo.getSubReg(ToReg, SubIdx);
967     for (MachineOperand &MO : operands()) {
968       if (!MO.isReg() || MO.getReg() != FromReg)
969         continue;
970       MO.substPhysReg(ToReg, RegInfo);
971     }
972   } else {
973     for (MachineOperand &MO : operands()) {
974       if (!MO.isReg() || MO.getReg() != FromReg)
975         continue;
976       MO.substVirtReg(ToReg, SubIdx, RegInfo);
977     }
978   }
979 }
980 
981 /// isSafeToMove - Return true if it is safe to move this instruction. If
982 /// SawStore is set to true, it means that there is a store (or call) between
983 /// the instruction's location and its intended destination.
isSafeToMove(AliasAnalysis * AA,bool & SawStore) const984 bool MachineInstr::isSafeToMove(AliasAnalysis *AA, bool &SawStore) const {
985   // Ignore stuff that we obviously can't move.
986   //
987   // Treat volatile loads as stores. This is not strictly necessary for
988   // volatiles, but it is required for atomic loads. It is not allowed to move
989   // a load across an atomic load with Ordering > Monotonic.
990   if (mayStore() || isCall() || isPHI() ||
991       (mayLoad() && hasOrderedMemoryRef())) {
992     SawStore = true;
993     return false;
994   }
995 
996   if (isPosition() || isDebugInstr() || isTerminator() ||
997       hasUnmodeledSideEffects())
998     return false;
999 
1000   // See if this instruction does a load.  If so, we have to guarantee that the
1001   // loaded value doesn't change between the load and the its intended
1002   // destination. The check for isInvariantLoad gives the targe the chance to
1003   // classify the load as always returning a constant, e.g. a constant pool
1004   // load.
1005   if (mayLoad() && !isDereferenceableInvariantLoad(AA))
1006     // Otherwise, this is a real load.  If there is a store between the load and
1007     // end of block, we can't move it.
1008     return !SawStore;
1009 
1010   return true;
1011 }
1012 
mayAlias(AliasAnalysis * AA,MachineInstr & Other,bool UseTBAA)1013 bool MachineInstr::mayAlias(AliasAnalysis *AA, MachineInstr &Other,
1014                             bool UseTBAA) {
1015   const MachineFunction *MF = getMF();
1016   const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
1017   const MachineFrameInfo &MFI = MF->getFrameInfo();
1018 
1019   // If neither instruction stores to memory, they can't alias in any
1020   // meaningful way, even if they read from the same address.
1021   if (!mayStore() && !Other.mayStore())
1022     return false;
1023 
1024   // Let the target decide if memory accesses cannot possibly overlap.
1025   if (TII->areMemAccessesTriviallyDisjoint(*this, Other, AA))
1026     return false;
1027 
1028   // FIXME: Need to handle multiple memory operands to support all targets.
1029   if (!hasOneMemOperand() || !Other.hasOneMemOperand())
1030     return true;
1031 
1032   MachineMemOperand *MMOa = *memoperands_begin();
1033   MachineMemOperand *MMOb = *Other.memoperands_begin();
1034 
1035   // The following interface to AA is fashioned after DAGCombiner::isAlias
1036   // and operates with MachineMemOperand offset with some important
1037   // assumptions:
1038   //   - LLVM fundamentally assumes flat address spaces.
1039   //   - MachineOperand offset can *only* result from legalization and
1040   //     cannot affect queries other than the trivial case of overlap
1041   //     checking.
1042   //   - These offsets never wrap and never step outside
1043   //     of allocated objects.
1044   //   - There should never be any negative offsets here.
1045   //
1046   // FIXME: Modify API to hide this math from "user"
1047   // Even before we go to AA we can reason locally about some
1048   // memory objects. It can save compile time, and possibly catch some
1049   // corner cases not currently covered.
1050 
1051   int64_t OffsetA = MMOa->getOffset();
1052   int64_t OffsetB = MMOb->getOffset();
1053 
1054   int64_t MinOffset = std::min(OffsetA, OffsetB);
1055   int64_t WidthA = MMOa->getSize();
1056   int64_t WidthB = MMOb->getSize();
1057   const Value *ValA = MMOa->getValue();
1058   const Value *ValB = MMOb->getValue();
1059   bool SameVal = (ValA && ValB && (ValA == ValB));
1060   if (!SameVal) {
1061     const PseudoSourceValue *PSVa = MMOa->getPseudoValue();
1062     const PseudoSourceValue *PSVb = MMOb->getPseudoValue();
1063     if (PSVa && ValB && !PSVa->mayAlias(&MFI))
1064       return false;
1065     if (PSVb && ValA && !PSVb->mayAlias(&MFI))
1066       return false;
1067     if (PSVa && PSVb && (PSVa == PSVb))
1068       SameVal = true;
1069   }
1070 
1071   if (SameVal) {
1072     int64_t MaxOffset = std::max(OffsetA, OffsetB);
1073     int64_t LowWidth = (MinOffset == OffsetA) ? WidthA : WidthB;
1074     return (MinOffset + LowWidth > MaxOffset);
1075   }
1076 
1077   if (!AA)
1078     return true;
1079 
1080   if (!ValA || !ValB)
1081     return true;
1082 
1083   assert((OffsetA >= 0) && "Negative MachineMemOperand offset");
1084   assert((OffsetB >= 0) && "Negative MachineMemOperand offset");
1085 
1086   int64_t Overlapa = WidthA + OffsetA - MinOffset;
1087   int64_t Overlapb = WidthB + OffsetB - MinOffset;
1088 
1089   AliasResult AAResult = AA->alias(
1090       MemoryLocation(ValA, Overlapa,
1091                      UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
1092       MemoryLocation(ValB, Overlapb,
1093                      UseTBAA ? MMOb->getAAInfo() : AAMDNodes()));
1094 
1095   return (AAResult != NoAlias);
1096 }
1097 
1098 /// hasOrderedMemoryRef - Return true if this instruction may have an ordered
1099 /// or volatile memory reference, or if the information describing the memory
1100 /// reference is not available. Return false if it is known to have no ordered
1101 /// memory references.
hasOrderedMemoryRef() const1102 bool MachineInstr::hasOrderedMemoryRef() const {
1103   // An instruction known never to access memory won't have a volatile access.
1104   if (!mayStore() &&
1105       !mayLoad() &&
1106       !isCall() &&
1107       !hasUnmodeledSideEffects())
1108     return false;
1109 
1110   // Otherwise, if the instruction has no memory reference information,
1111   // conservatively assume it wasn't preserved.
1112   if (memoperands_empty())
1113     return true;
1114 
1115   // Check if any of our memory operands are ordered.
1116   return llvm::any_of(memoperands(), [](const MachineMemOperand *MMO) {
1117     return !MMO->isUnordered();
1118   });
1119 }
1120 
1121 /// isDereferenceableInvariantLoad - Return true if this instruction will never
1122 /// trap and is loading from a location whose value is invariant across a run of
1123 /// this function.
isDereferenceableInvariantLoad(AliasAnalysis * AA) const1124 bool MachineInstr::isDereferenceableInvariantLoad(AliasAnalysis *AA) const {
1125   // If the instruction doesn't load at all, it isn't an invariant load.
1126   if (!mayLoad())
1127     return false;
1128 
1129   // If the instruction has lost its memoperands, conservatively assume that
1130   // it may not be an invariant load.
1131   if (memoperands_empty())
1132     return false;
1133 
1134   const MachineFrameInfo &MFI = getParent()->getParent()->getFrameInfo();
1135 
1136   for (MachineMemOperand *MMO : memoperands()) {
1137     if (MMO->isVolatile()) return false;
1138     if (MMO->isStore()) return false;
1139     if (MMO->isInvariant() && MMO->isDereferenceable())
1140       continue;
1141 
1142     // A load from a constant PseudoSourceValue is invariant.
1143     if (const PseudoSourceValue *PSV = MMO->getPseudoValue())
1144       if (PSV->isConstant(&MFI))
1145         continue;
1146 
1147     if (const Value *V = MMO->getValue()) {
1148       // If we have an AliasAnalysis, ask it whether the memory is constant.
1149       if (AA &&
1150           AA->pointsToConstantMemory(
1151               MemoryLocation(V, MMO->getSize(), MMO->getAAInfo())))
1152         continue;
1153     }
1154 
1155     // Otherwise assume conservatively.
1156     return false;
1157   }
1158 
1159   // Everything checks out.
1160   return true;
1161 }
1162 
1163 /// isConstantValuePHI - If the specified instruction is a PHI that always
1164 /// merges together the same virtual register, return the register, otherwise
1165 /// return 0.
isConstantValuePHI() const1166 unsigned MachineInstr::isConstantValuePHI() const {
1167   if (!isPHI())
1168     return 0;
1169   assert(getNumOperands() >= 3 &&
1170          "It's illegal to have a PHI without source operands");
1171 
1172   unsigned Reg = getOperand(1).getReg();
1173   for (unsigned i = 3, e = getNumOperands(); i < e; i += 2)
1174     if (getOperand(i).getReg() != Reg)
1175       return 0;
1176   return Reg;
1177 }
1178 
hasUnmodeledSideEffects() const1179 bool MachineInstr::hasUnmodeledSideEffects() const {
1180   if (hasProperty(MCID::UnmodeledSideEffects))
1181     return true;
1182   if (isInlineAsm()) {
1183     unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1184     if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1185       return true;
1186   }
1187 
1188   return false;
1189 }
1190 
isLoadFoldBarrier() const1191 bool MachineInstr::isLoadFoldBarrier() const {
1192   return mayStore() || isCall() || hasUnmodeledSideEffects();
1193 }
1194 
1195 /// allDefsAreDead - Return true if all the defs of this instruction are dead.
1196 ///
allDefsAreDead() const1197 bool MachineInstr::allDefsAreDead() const {
1198   for (const MachineOperand &MO : operands()) {
1199     if (!MO.isReg() || MO.isUse())
1200       continue;
1201     if (!MO.isDead())
1202       return false;
1203   }
1204   return true;
1205 }
1206 
1207 /// copyImplicitOps - Copy implicit register operands from specified
1208 /// instruction to this instruction.
copyImplicitOps(MachineFunction & MF,const MachineInstr & MI)1209 void MachineInstr::copyImplicitOps(MachineFunction &MF,
1210                                    const MachineInstr &MI) {
1211   for (unsigned i = MI.getDesc().getNumOperands(), e = MI.getNumOperands();
1212        i != e; ++i) {
1213     const MachineOperand &MO = MI.getOperand(i);
1214     if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
1215       addOperand(MF, MO);
1216   }
1217 }
1218 
hasComplexRegisterTies() const1219 bool MachineInstr::hasComplexRegisterTies() const {
1220   const MCInstrDesc &MCID = getDesc();
1221   for (unsigned I = 0, E = getNumOperands(); I < E; ++I) {
1222     const auto &Operand = getOperand(I);
1223     if (!Operand.isReg() || Operand.isDef())
1224       // Ignore the defined registers as MCID marks only the uses as tied.
1225       continue;
1226     int ExpectedTiedIdx = MCID.getOperandConstraint(I, MCOI::TIED_TO);
1227     int TiedIdx = Operand.isTied() ? int(findTiedOperandIdx(I)) : -1;
1228     if (ExpectedTiedIdx != TiedIdx)
1229       return true;
1230   }
1231   return false;
1232 }
1233 
getTypeToPrint(unsigned OpIdx,SmallBitVector & PrintedTypes,const MachineRegisterInfo & MRI) const1234 LLT MachineInstr::getTypeToPrint(unsigned OpIdx, SmallBitVector &PrintedTypes,
1235                                  const MachineRegisterInfo &MRI) const {
1236   const MachineOperand &Op = getOperand(OpIdx);
1237   if (!Op.isReg())
1238     return LLT{};
1239 
1240   if (isVariadic() || OpIdx >= getNumExplicitOperands())
1241     return MRI.getType(Op.getReg());
1242 
1243   auto &OpInfo = getDesc().OpInfo[OpIdx];
1244   if (!OpInfo.isGenericType())
1245     return MRI.getType(Op.getReg());
1246 
1247   if (PrintedTypes[OpInfo.getGenericTypeIndex()])
1248     return LLT{};
1249 
1250   LLT TypeToPrint = MRI.getType(Op.getReg());
1251   // Don't mark the type index printed if it wasn't actually printed: maybe
1252   // another operand with the same type index has an actual type attached:
1253   if (TypeToPrint.isValid())
1254     PrintedTypes.set(OpInfo.getGenericTypeIndex());
1255   return TypeToPrint;
1256 }
1257 
1258 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump() const1259 LLVM_DUMP_METHOD void MachineInstr::dump() const {
1260   dbgs() << "  ";
1261   print(dbgs());
1262 }
1263 #endif
1264 
print(raw_ostream & OS,bool IsStandalone,bool SkipOpers,bool SkipDebugLoc,bool AddNewLine,const TargetInstrInfo * TII) const1265 void MachineInstr::print(raw_ostream &OS, bool IsStandalone, bool SkipOpers,
1266                          bool SkipDebugLoc, bool AddNewLine,
1267                          const TargetInstrInfo *TII) const {
1268   const Module *M = nullptr;
1269   const Function *F = nullptr;
1270   if (const MachineFunction *MF = getMFIfAvailable(*this)) {
1271     F = &MF->getFunction();
1272     M = F->getParent();
1273     if (!TII)
1274       TII = MF->getSubtarget().getInstrInfo();
1275   }
1276 
1277   ModuleSlotTracker MST(M);
1278   if (F)
1279     MST.incorporateFunction(*F);
1280   print(OS, MST, IsStandalone, SkipOpers, SkipDebugLoc, TII);
1281 }
1282 
print(raw_ostream & OS,ModuleSlotTracker & MST,bool IsStandalone,bool SkipOpers,bool SkipDebugLoc,bool AddNewLine,const TargetInstrInfo * TII) const1283 void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST,
1284                          bool IsStandalone, bool SkipOpers, bool SkipDebugLoc,
1285                          bool AddNewLine, const TargetInstrInfo *TII) const {
1286   // We can be a bit tidier if we know the MachineFunction.
1287   const MachineFunction *MF = nullptr;
1288   const TargetRegisterInfo *TRI = nullptr;
1289   const MachineRegisterInfo *MRI = nullptr;
1290   const TargetIntrinsicInfo *IntrinsicInfo = nullptr;
1291   tryToGetTargetInfo(*this, TRI, MRI, IntrinsicInfo, TII);
1292 
1293   if (isCFIInstruction())
1294     assert(getNumOperands() == 1 && "Expected 1 operand in CFI instruction");
1295 
1296   SmallBitVector PrintedTypes(8);
1297   bool ShouldPrintRegisterTies = hasComplexRegisterTies();
1298   auto getTiedOperandIdx = [&](unsigned OpIdx) {
1299     if (!ShouldPrintRegisterTies)
1300       return 0U;
1301     const MachineOperand &MO = getOperand(OpIdx);
1302     if (MO.isReg() && MO.isTied() && !MO.isDef())
1303       return findTiedOperandIdx(OpIdx);
1304     return 0U;
1305   };
1306   unsigned StartOp = 0;
1307   unsigned e = getNumOperands();
1308 
1309   // Print explicitly defined operands on the left of an assignment syntax.
1310   while (StartOp < e) {
1311     const MachineOperand &MO = getOperand(StartOp);
1312     if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
1313       break;
1314 
1315     if (StartOp != 0)
1316       OS << ", ";
1317 
1318     LLT TypeToPrint = MRI ? getTypeToPrint(StartOp, PrintedTypes, *MRI) : LLT{};
1319     unsigned TiedOperandIdx = getTiedOperandIdx(StartOp);
1320     MO.print(OS, MST, TypeToPrint, /*PrintDef=*/false, IsStandalone,
1321              ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1322     ++StartOp;
1323   }
1324 
1325   if (StartOp != 0)
1326     OS << " = ";
1327 
1328   if (getFlag(MachineInstr::FrameSetup))
1329     OS << "frame-setup ";
1330   if (getFlag(MachineInstr::FrameDestroy))
1331     OS << "frame-destroy ";
1332   if (getFlag(MachineInstr::FmNoNans))
1333     OS << "nnan ";
1334   if (getFlag(MachineInstr::FmNoInfs))
1335     OS << "ninf ";
1336   if (getFlag(MachineInstr::FmNsz))
1337     OS << "nsz ";
1338   if (getFlag(MachineInstr::FmArcp))
1339     OS << "arcp ";
1340   if (getFlag(MachineInstr::FmContract))
1341     OS << "contract ";
1342   if (getFlag(MachineInstr::FmAfn))
1343     OS << "afn ";
1344   if (getFlag(MachineInstr::FmReassoc))
1345     OS << "reassoc ";
1346 
1347   // Print the opcode name.
1348   if (TII)
1349     OS << TII->getName(getOpcode());
1350   else
1351     OS << "UNKNOWN";
1352 
1353   if (SkipOpers)
1354     return;
1355 
1356   // Print the rest of the operands.
1357   bool FirstOp = true;
1358   unsigned AsmDescOp = ~0u;
1359   unsigned AsmOpCount = 0;
1360 
1361   if (isInlineAsm() && e >= InlineAsm::MIOp_FirstOperand) {
1362     // Print asm string.
1363     OS << " ";
1364     const unsigned OpIdx = InlineAsm::MIOp_AsmString;
1365     LLT TypeToPrint = MRI ? getTypeToPrint(OpIdx, PrintedTypes, *MRI) : LLT{};
1366     unsigned TiedOperandIdx = getTiedOperandIdx(OpIdx);
1367     getOperand(OpIdx).print(OS, MST, TypeToPrint, /*PrintDef=*/true, IsStandalone,
1368                             ShouldPrintRegisterTies, TiedOperandIdx, TRI,
1369                             IntrinsicInfo);
1370 
1371     // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
1372     unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1373     if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1374       OS << " [sideeffect]";
1375     if (ExtraInfo & InlineAsm::Extra_MayLoad)
1376       OS << " [mayload]";
1377     if (ExtraInfo & InlineAsm::Extra_MayStore)
1378       OS << " [maystore]";
1379     if (ExtraInfo & InlineAsm::Extra_IsConvergent)
1380       OS << " [isconvergent]";
1381     if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
1382       OS << " [alignstack]";
1383     if (getInlineAsmDialect() == InlineAsm::AD_ATT)
1384       OS << " [attdialect]";
1385     if (getInlineAsmDialect() == InlineAsm::AD_Intel)
1386       OS << " [inteldialect]";
1387 
1388     StartOp = AsmDescOp = InlineAsm::MIOp_FirstOperand;
1389     FirstOp = false;
1390   }
1391 
1392   for (unsigned i = StartOp, e = getNumOperands(); i != e; ++i) {
1393     const MachineOperand &MO = getOperand(i);
1394 
1395     if (FirstOp) FirstOp = false; else OS << ",";
1396     OS << " ";
1397 
1398     if (isDebugValue() && MO.isMetadata()) {
1399       // Pretty print DBG_VALUE instructions.
1400       auto *DIV = dyn_cast<DILocalVariable>(MO.getMetadata());
1401       if (DIV && !DIV->getName().empty())
1402         OS << "!\"" << DIV->getName() << '\"';
1403       else {
1404         LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1405         unsigned TiedOperandIdx = getTiedOperandIdx(i);
1406         MO.print(OS, MST, TypeToPrint, /*PrintDef=*/true, IsStandalone,
1407                  ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1408       }
1409     } else if (isDebugLabel() && MO.isMetadata()) {
1410       // Pretty print DBG_LABEL instructions.
1411       auto *DIL = dyn_cast<DILabel>(MO.getMetadata());
1412       if (DIL && !DIL->getName().empty())
1413         OS << "\"" << DIL->getName() << '\"';
1414       else {
1415         LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1416         unsigned TiedOperandIdx = getTiedOperandIdx(i);
1417         MO.print(OS, MST, TypeToPrint, /*PrintDef=*/true, IsStandalone,
1418                  ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1419       }
1420     } else if (i == AsmDescOp && MO.isImm()) {
1421       // Pretty print the inline asm operand descriptor.
1422       OS << '$' << AsmOpCount++;
1423       unsigned Flag = MO.getImm();
1424       switch (InlineAsm::getKind(Flag)) {
1425       case InlineAsm::Kind_RegUse:             OS << ":[reguse"; break;
1426       case InlineAsm::Kind_RegDef:             OS << ":[regdef"; break;
1427       case InlineAsm::Kind_RegDefEarlyClobber: OS << ":[regdef-ec"; break;
1428       case InlineAsm::Kind_Clobber:            OS << ":[clobber"; break;
1429       case InlineAsm::Kind_Imm:                OS << ":[imm"; break;
1430       case InlineAsm::Kind_Mem:                OS << ":[mem"; break;
1431       default: OS << ":[??" << InlineAsm::getKind(Flag); break;
1432       }
1433 
1434       unsigned RCID = 0;
1435       if (!InlineAsm::isImmKind(Flag) && !InlineAsm::isMemKind(Flag) &&
1436           InlineAsm::hasRegClassConstraint(Flag, RCID)) {
1437         if (TRI) {
1438           OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
1439         } else
1440           OS << ":RC" << RCID;
1441       }
1442 
1443       if (InlineAsm::isMemKind(Flag)) {
1444         unsigned MCID = InlineAsm::getMemoryConstraintID(Flag);
1445         switch (MCID) {
1446         case InlineAsm::Constraint_es: OS << ":es"; break;
1447         case InlineAsm::Constraint_i:  OS << ":i"; break;
1448         case InlineAsm::Constraint_m:  OS << ":m"; break;
1449         case InlineAsm::Constraint_o:  OS << ":o"; break;
1450         case InlineAsm::Constraint_v:  OS << ":v"; break;
1451         case InlineAsm::Constraint_Q:  OS << ":Q"; break;
1452         case InlineAsm::Constraint_R:  OS << ":R"; break;
1453         case InlineAsm::Constraint_S:  OS << ":S"; break;
1454         case InlineAsm::Constraint_T:  OS << ":T"; break;
1455         case InlineAsm::Constraint_Um: OS << ":Um"; break;
1456         case InlineAsm::Constraint_Un: OS << ":Un"; break;
1457         case InlineAsm::Constraint_Uq: OS << ":Uq"; break;
1458         case InlineAsm::Constraint_Us: OS << ":Us"; break;
1459         case InlineAsm::Constraint_Ut: OS << ":Ut"; break;
1460         case InlineAsm::Constraint_Uv: OS << ":Uv"; break;
1461         case InlineAsm::Constraint_Uy: OS << ":Uy"; break;
1462         case InlineAsm::Constraint_X:  OS << ":X"; break;
1463         case InlineAsm::Constraint_Z:  OS << ":Z"; break;
1464         case InlineAsm::Constraint_ZC: OS << ":ZC"; break;
1465         case InlineAsm::Constraint_Zy: OS << ":Zy"; break;
1466         default: OS << ":?"; break;
1467         }
1468       }
1469 
1470       unsigned TiedTo = 0;
1471       if (InlineAsm::isUseOperandTiedToDef(Flag, TiedTo))
1472         OS << " tiedto:$" << TiedTo;
1473 
1474       OS << ']';
1475 
1476       // Compute the index of the next operand descriptor.
1477       AsmDescOp += 1 + InlineAsm::getNumOperandRegisters(Flag);
1478     } else {
1479       LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1480       unsigned TiedOperandIdx = getTiedOperandIdx(i);
1481       if (MO.isImm() && isOperandSubregIdx(i))
1482         MachineOperand::printSubRegIdx(OS, MO.getImm(), TRI);
1483       else
1484         MO.print(OS, MST, TypeToPrint, /*PrintDef=*/true, IsStandalone,
1485                  ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1486     }
1487   }
1488 
1489   if (!SkipDebugLoc) {
1490     if (const DebugLoc &DL = getDebugLoc()) {
1491       if (!FirstOp)
1492         OS << ',';
1493       OS << " debug-location ";
1494       DL->printAsOperand(OS, MST);
1495     }
1496   }
1497 
1498   if (!memoperands_empty()) {
1499     SmallVector<StringRef, 0> SSNs;
1500     const LLVMContext *Context = nullptr;
1501     std::unique_ptr<LLVMContext> CtxPtr;
1502     const MachineFrameInfo *MFI = nullptr;
1503     if (const MachineFunction *MF = getMFIfAvailable(*this)) {
1504       MFI = &MF->getFrameInfo();
1505       Context = &MF->getFunction().getContext();
1506     } else {
1507       CtxPtr = llvm::make_unique<LLVMContext>();
1508       Context = CtxPtr.get();
1509     }
1510 
1511     OS << " :: ";
1512     bool NeedComma = false;
1513     for (const MachineMemOperand *Op : memoperands()) {
1514       if (NeedComma)
1515         OS << ", ";
1516       Op->print(OS, MST, SSNs, *Context, MFI, TII);
1517       NeedComma = true;
1518     }
1519   }
1520 
1521   if (SkipDebugLoc)
1522     return;
1523 
1524   bool HaveSemi = false;
1525 
1526   // Print debug location information.
1527   if (const DebugLoc &DL = getDebugLoc()) {
1528     if (!HaveSemi) {
1529       OS << ';';
1530       HaveSemi = true;
1531     }
1532     OS << ' ';
1533     DL.print(OS);
1534   }
1535 
1536   // Print extra comments for DEBUG_VALUE.
1537   if (isDebugValue() && getOperand(e - 2).isMetadata()) {
1538     if (!HaveSemi) {
1539       OS << ";";
1540       HaveSemi = true;
1541     }
1542     auto *DV = cast<DILocalVariable>(getOperand(e - 2).getMetadata());
1543     OS << " line no:" <<  DV->getLine();
1544     if (auto *InlinedAt = debugLoc->getInlinedAt()) {
1545       DebugLoc InlinedAtDL(InlinedAt);
1546       if (InlinedAtDL && MF) {
1547         OS << " inlined @[ ";
1548         InlinedAtDL.print(OS);
1549         OS << " ]";
1550       }
1551     }
1552     if (isIndirectDebugValue())
1553       OS << " indirect";
1554   }
1555   // TODO: DBG_LABEL
1556 
1557   if (AddNewLine)
1558     OS << '\n';
1559 }
1560 
addRegisterKilled(unsigned IncomingReg,const TargetRegisterInfo * RegInfo,bool AddIfNotFound)1561 bool MachineInstr::addRegisterKilled(unsigned IncomingReg,
1562                                      const TargetRegisterInfo *RegInfo,
1563                                      bool AddIfNotFound) {
1564   bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(IncomingReg);
1565   bool hasAliases = isPhysReg &&
1566     MCRegAliasIterator(IncomingReg, RegInfo, false).isValid();
1567   bool Found = false;
1568   SmallVector<unsigned,4> DeadOps;
1569   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1570     MachineOperand &MO = getOperand(i);
1571     if (!MO.isReg() || !MO.isUse() || MO.isUndef())
1572       continue;
1573 
1574     // DEBUG_VALUE nodes do not contribute to code generation and should
1575     // always be ignored. Failure to do so may result in trying to modify
1576     // KILL flags on DEBUG_VALUE nodes.
1577     if (MO.isDebug())
1578       continue;
1579 
1580     unsigned Reg = MO.getReg();
1581     if (!Reg)
1582       continue;
1583 
1584     if (Reg == IncomingReg) {
1585       if (!Found) {
1586         if (MO.isKill())
1587           // The register is already marked kill.
1588           return true;
1589         if (isPhysReg && isRegTiedToDefOperand(i))
1590           // Two-address uses of physregs must not be marked kill.
1591           return true;
1592         MO.setIsKill();
1593         Found = true;
1594       }
1595     } else if (hasAliases && MO.isKill() &&
1596                TargetRegisterInfo::isPhysicalRegister(Reg)) {
1597       // A super-register kill already exists.
1598       if (RegInfo->isSuperRegister(IncomingReg, Reg))
1599         return true;
1600       if (RegInfo->isSubRegister(IncomingReg, Reg))
1601         DeadOps.push_back(i);
1602     }
1603   }
1604 
1605   // Trim unneeded kill operands.
1606   while (!DeadOps.empty()) {
1607     unsigned OpIdx = DeadOps.back();
1608     if (getOperand(OpIdx).isImplicit())
1609       RemoveOperand(OpIdx);
1610     else
1611       getOperand(OpIdx).setIsKill(false);
1612     DeadOps.pop_back();
1613   }
1614 
1615   // If not found, this means an alias of one of the operands is killed. Add a
1616   // new implicit operand if required.
1617   if (!Found && AddIfNotFound) {
1618     addOperand(MachineOperand::CreateReg(IncomingReg,
1619                                          false /*IsDef*/,
1620                                          true  /*IsImp*/,
1621                                          true  /*IsKill*/));
1622     return true;
1623   }
1624   return Found;
1625 }
1626 
clearRegisterKills(unsigned Reg,const TargetRegisterInfo * RegInfo)1627 void MachineInstr::clearRegisterKills(unsigned Reg,
1628                                       const TargetRegisterInfo *RegInfo) {
1629   if (!TargetRegisterInfo::isPhysicalRegister(Reg))
1630     RegInfo = nullptr;
1631   for (MachineOperand &MO : operands()) {
1632     if (!MO.isReg() || !MO.isUse() || !MO.isKill())
1633       continue;
1634     unsigned OpReg = MO.getReg();
1635     if ((RegInfo && RegInfo->regsOverlap(Reg, OpReg)) || Reg == OpReg)
1636       MO.setIsKill(false);
1637   }
1638 }
1639 
addRegisterDead(unsigned Reg,const TargetRegisterInfo * RegInfo,bool AddIfNotFound)1640 bool MachineInstr::addRegisterDead(unsigned Reg,
1641                                    const TargetRegisterInfo *RegInfo,
1642                                    bool AddIfNotFound) {
1643   bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(Reg);
1644   bool hasAliases = isPhysReg &&
1645     MCRegAliasIterator(Reg, RegInfo, false).isValid();
1646   bool Found = false;
1647   SmallVector<unsigned,4> DeadOps;
1648   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1649     MachineOperand &MO = getOperand(i);
1650     if (!MO.isReg() || !MO.isDef())
1651       continue;
1652     unsigned MOReg = MO.getReg();
1653     if (!MOReg)
1654       continue;
1655 
1656     if (MOReg == Reg) {
1657       MO.setIsDead();
1658       Found = true;
1659     } else if (hasAliases && MO.isDead() &&
1660                TargetRegisterInfo::isPhysicalRegister(MOReg)) {
1661       // There exists a super-register that's marked dead.
1662       if (RegInfo->isSuperRegister(Reg, MOReg))
1663         return true;
1664       if (RegInfo->isSubRegister(Reg, MOReg))
1665         DeadOps.push_back(i);
1666     }
1667   }
1668 
1669   // Trim unneeded dead operands.
1670   while (!DeadOps.empty()) {
1671     unsigned OpIdx = DeadOps.back();
1672     if (getOperand(OpIdx).isImplicit())
1673       RemoveOperand(OpIdx);
1674     else
1675       getOperand(OpIdx).setIsDead(false);
1676     DeadOps.pop_back();
1677   }
1678 
1679   // If not found, this means an alias of one of the operands is dead. Add a
1680   // new implicit operand if required.
1681   if (Found || !AddIfNotFound)
1682     return Found;
1683 
1684   addOperand(MachineOperand::CreateReg(Reg,
1685                                        true  /*IsDef*/,
1686                                        true  /*IsImp*/,
1687                                        false /*IsKill*/,
1688                                        true  /*IsDead*/));
1689   return true;
1690 }
1691 
clearRegisterDeads(unsigned Reg)1692 void MachineInstr::clearRegisterDeads(unsigned Reg) {
1693   for (MachineOperand &MO : operands()) {
1694     if (!MO.isReg() || !MO.isDef() || MO.getReg() != Reg)
1695       continue;
1696     MO.setIsDead(false);
1697   }
1698 }
1699 
setRegisterDefReadUndef(unsigned Reg,bool IsUndef)1700 void MachineInstr::setRegisterDefReadUndef(unsigned Reg, bool IsUndef) {
1701   for (MachineOperand &MO : operands()) {
1702     if (!MO.isReg() || !MO.isDef() || MO.getReg() != Reg || MO.getSubReg() == 0)
1703       continue;
1704     MO.setIsUndef(IsUndef);
1705   }
1706 }
1707 
addRegisterDefined(unsigned Reg,const TargetRegisterInfo * RegInfo)1708 void MachineInstr::addRegisterDefined(unsigned Reg,
1709                                       const TargetRegisterInfo *RegInfo) {
1710   if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
1711     MachineOperand *MO = findRegisterDefOperand(Reg, false, RegInfo);
1712     if (MO)
1713       return;
1714   } else {
1715     for (const MachineOperand &MO : operands()) {
1716       if (MO.isReg() && MO.getReg() == Reg && MO.isDef() &&
1717           MO.getSubReg() == 0)
1718         return;
1719     }
1720   }
1721   addOperand(MachineOperand::CreateReg(Reg,
1722                                        true  /*IsDef*/,
1723                                        true  /*IsImp*/));
1724 }
1725 
setPhysRegsDeadExcept(ArrayRef<unsigned> UsedRegs,const TargetRegisterInfo & TRI)1726 void MachineInstr::setPhysRegsDeadExcept(ArrayRef<unsigned> UsedRegs,
1727                                          const TargetRegisterInfo &TRI) {
1728   bool HasRegMask = false;
1729   for (MachineOperand &MO : operands()) {
1730     if (MO.isRegMask()) {
1731       HasRegMask = true;
1732       continue;
1733     }
1734     if (!MO.isReg() || !MO.isDef()) continue;
1735     unsigned Reg = MO.getReg();
1736     if (!TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
1737     // If there are no uses, including partial uses, the def is dead.
1738     if (llvm::none_of(UsedRegs,
1739                       [&](unsigned Use) { return TRI.regsOverlap(Use, Reg); }))
1740       MO.setIsDead();
1741   }
1742 
1743   // This is a call with a register mask operand.
1744   // Mask clobbers are always dead, so add defs for the non-dead defines.
1745   if (HasRegMask)
1746     for (ArrayRef<unsigned>::iterator I = UsedRegs.begin(), E = UsedRegs.end();
1747          I != E; ++I)
1748       addRegisterDefined(*I, &TRI);
1749 }
1750 
1751 unsigned
getHashValue(const MachineInstr * const & MI)1752 MachineInstrExpressionTrait::getHashValue(const MachineInstr* const &MI) {
1753   // Build up a buffer of hash code components.
1754   SmallVector<size_t, 8> HashComponents;
1755   HashComponents.reserve(MI->getNumOperands() + 1);
1756   HashComponents.push_back(MI->getOpcode());
1757   for (const MachineOperand &MO : MI->operands()) {
1758     if (MO.isReg() && MO.isDef() &&
1759         TargetRegisterInfo::isVirtualRegister(MO.getReg()))
1760       continue;  // Skip virtual register defs.
1761 
1762     HashComponents.push_back(hash_value(MO));
1763   }
1764   return hash_combine_range(HashComponents.begin(), HashComponents.end());
1765 }
1766 
emitError(StringRef Msg) const1767 void MachineInstr::emitError(StringRef Msg) const {
1768   // Find the source location cookie.
1769   unsigned LocCookie = 0;
1770   const MDNode *LocMD = nullptr;
1771   for (unsigned i = getNumOperands(); i != 0; --i) {
1772     if (getOperand(i-1).isMetadata() &&
1773         (LocMD = getOperand(i-1).getMetadata()) &&
1774         LocMD->getNumOperands() != 0) {
1775       if (const ConstantInt *CI =
1776               mdconst::dyn_extract<ConstantInt>(LocMD->getOperand(0))) {
1777         LocCookie = CI->getZExtValue();
1778         break;
1779       }
1780     }
1781   }
1782 
1783   if (const MachineBasicBlock *MBB = getParent())
1784     if (const MachineFunction *MF = MBB->getParent())
1785       return MF->getMMI().getModule()->getContext().emitError(LocCookie, Msg);
1786   report_fatal_error(Msg);
1787 }
1788 
BuildMI(MachineFunction & MF,const DebugLoc & DL,const MCInstrDesc & MCID,bool IsIndirect,unsigned Reg,const MDNode * Variable,const MDNode * Expr)1789 MachineInstrBuilder llvm::BuildMI(MachineFunction &MF, const DebugLoc &DL,
1790                                   const MCInstrDesc &MCID, bool IsIndirect,
1791                                   unsigned Reg, const MDNode *Variable,
1792                                   const MDNode *Expr) {
1793   assert(isa<DILocalVariable>(Variable) && "not a variable");
1794   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
1795   assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
1796          "Expected inlined-at fields to agree");
1797   auto MIB = BuildMI(MF, DL, MCID).addReg(Reg, RegState::Debug);
1798   if (IsIndirect)
1799     MIB.addImm(0U);
1800   else
1801     MIB.addReg(0U, RegState::Debug);
1802   return MIB.addMetadata(Variable).addMetadata(Expr);
1803 }
1804 
BuildMI(MachineFunction & MF,const DebugLoc & DL,const MCInstrDesc & MCID,bool IsIndirect,MachineOperand & MO,const MDNode * Variable,const MDNode * Expr)1805 MachineInstrBuilder llvm::BuildMI(MachineFunction &MF, const DebugLoc &DL,
1806                                   const MCInstrDesc &MCID, bool IsIndirect,
1807                                   MachineOperand &MO, const MDNode *Variable,
1808                                   const MDNode *Expr) {
1809   assert(isa<DILocalVariable>(Variable) && "not a variable");
1810   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
1811   assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
1812          "Expected inlined-at fields to agree");
1813   if (MO.isReg())
1814     return BuildMI(MF, DL, MCID, IsIndirect, MO.getReg(), Variable, Expr);
1815 
1816   auto MIB = BuildMI(MF, DL, MCID).add(MO);
1817   if (IsIndirect)
1818     MIB.addImm(0U);
1819   else
1820     MIB.addReg(0U, RegState::Debug);
1821   return MIB.addMetadata(Variable).addMetadata(Expr);
1822  }
1823 
BuildMI(MachineBasicBlock & BB,MachineBasicBlock::iterator I,const DebugLoc & DL,const MCInstrDesc & MCID,bool IsIndirect,unsigned Reg,const MDNode * Variable,const MDNode * Expr)1824 MachineInstrBuilder llvm::BuildMI(MachineBasicBlock &BB,
1825                                   MachineBasicBlock::iterator I,
1826                                   const DebugLoc &DL, const MCInstrDesc &MCID,
1827                                   bool IsIndirect, unsigned Reg,
1828                                   const MDNode *Variable, const MDNode *Expr) {
1829   MachineFunction &MF = *BB.getParent();
1830   MachineInstr *MI = BuildMI(MF, DL, MCID, IsIndirect, Reg, Variable, Expr);
1831   BB.insert(I, MI);
1832   return MachineInstrBuilder(MF, MI);
1833 }
1834 
BuildMI(MachineBasicBlock & BB,MachineBasicBlock::iterator I,const DebugLoc & DL,const MCInstrDesc & MCID,bool IsIndirect,MachineOperand & MO,const MDNode * Variable,const MDNode * Expr)1835 MachineInstrBuilder llvm::BuildMI(MachineBasicBlock &BB,
1836                                   MachineBasicBlock::iterator I,
1837                                   const DebugLoc &DL, const MCInstrDesc &MCID,
1838                                   bool IsIndirect, MachineOperand &MO,
1839                                   const MDNode *Variable, const MDNode *Expr) {
1840   MachineFunction &MF = *BB.getParent();
1841   MachineInstr *MI = BuildMI(MF, DL, MCID, IsIndirect, MO, Variable, Expr);
1842   BB.insert(I, MI);
1843   return MachineInstrBuilder(MF, *MI);
1844 }
1845 
1846 /// Compute the new DIExpression to use with a DBG_VALUE for a spill slot.
1847 /// This prepends DW_OP_deref when spilling an indirect DBG_VALUE.
computeExprForSpill(const MachineInstr & MI)1848 static const DIExpression *computeExprForSpill(const MachineInstr &MI) {
1849   assert(MI.getOperand(0).isReg() && "can't spill non-register");
1850   assert(MI.getDebugVariable()->isValidLocationForIntrinsic(MI.getDebugLoc()) &&
1851          "Expected inlined-at fields to agree");
1852 
1853   const DIExpression *Expr = MI.getDebugExpression();
1854   if (MI.isIndirectDebugValue()) {
1855     assert(MI.getOperand(1).getImm() == 0 && "DBG_VALUE with nonzero offset");
1856     Expr = DIExpression::prepend(Expr, DIExpression::WithDeref);
1857   }
1858   return Expr;
1859 }
1860 
buildDbgValueForSpill(MachineBasicBlock & BB,MachineBasicBlock::iterator I,const MachineInstr & Orig,int FrameIndex)1861 MachineInstr *llvm::buildDbgValueForSpill(MachineBasicBlock &BB,
1862                                           MachineBasicBlock::iterator I,
1863                                           const MachineInstr &Orig,
1864                                           int FrameIndex) {
1865   const DIExpression *Expr = computeExprForSpill(Orig);
1866   return BuildMI(BB, I, Orig.getDebugLoc(), Orig.getDesc())
1867       .addFrameIndex(FrameIndex)
1868       .addImm(0U)
1869       .addMetadata(Orig.getDebugVariable())
1870       .addMetadata(Expr);
1871 }
1872 
updateDbgValueForSpill(MachineInstr & Orig,int FrameIndex)1873 void llvm::updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex) {
1874   const DIExpression *Expr = computeExprForSpill(Orig);
1875   Orig.getOperand(0).ChangeToFrameIndex(FrameIndex);
1876   Orig.getOperand(1).ChangeToImmediate(0U);
1877   Orig.getOperand(3).setMetadata(Expr);
1878 }
1879