1 //===-- lib/CodeGen/MachineInstr.cpp --------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Methods common to all machine instructions.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/CodeGen/MachineInstr.h"
15 #include "llvm/ADT/FoldingSet.h"
16 #include "llvm/ADT/Hashing.h"
17 #include "llvm/Analysis/AliasAnalysis.h"
18 #include "llvm/CodeGen/MachineConstantPool.h"
19 #include "llvm/CodeGen/MachineFunction.h"
20 #include "llvm/CodeGen/MachineMemOperand.h"
21 #include "llvm/CodeGen/MachineModuleInfo.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/CodeGen/PseudoSourceValue.h"
24 #include "llvm/IR/Constants.h"
25 #include "llvm/IR/DebugInfo.h"
26 #include "llvm/IR/Function.h"
27 #include "llvm/IR/InlineAsm.h"
28 #include "llvm/IR/LLVMContext.h"
29 #include "llvm/IR/Metadata.h"
30 #include "llvm/IR/Module.h"
31 #include "llvm/IR/Type.h"
32 #include "llvm/IR/Value.h"
33 #include "llvm/MC/MCInstrDesc.h"
34 #include "llvm/MC/MCSymbol.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/Support/MathExtras.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include "llvm/Target/TargetInstrInfo.h"
40 #include "llvm/Target/TargetMachine.h"
41 #include "llvm/Target/TargetRegisterInfo.h"
42 using namespace llvm;
43
44 //===----------------------------------------------------------------------===//
45 // MachineOperand Implementation
46 //===----------------------------------------------------------------------===//
47
setReg(unsigned Reg)48 void MachineOperand::setReg(unsigned Reg) {
49 if (getReg() == Reg) return; // No change.
50
51 // Otherwise, we have to change the register. If this operand is embedded
52 // into a machine function, we need to update the old and new register's
53 // use/def lists.
54 if (MachineInstr *MI = getParent())
55 if (MachineBasicBlock *MBB = MI->getParent())
56 if (MachineFunction *MF = MBB->getParent()) {
57 MachineRegisterInfo &MRI = MF->getRegInfo();
58 MRI.removeRegOperandFromUseList(this);
59 SmallContents.RegNo = Reg;
60 MRI.addRegOperandToUseList(this);
61 return;
62 }
63
64 // Otherwise, just change the register, no problem. :)
65 SmallContents.RegNo = Reg;
66 }
67
substVirtReg(unsigned Reg,unsigned SubIdx,const TargetRegisterInfo & TRI)68 void MachineOperand::substVirtReg(unsigned Reg, unsigned SubIdx,
69 const TargetRegisterInfo &TRI) {
70 assert(TargetRegisterInfo::isVirtualRegister(Reg));
71 if (SubIdx && getSubReg())
72 SubIdx = TRI.composeSubRegIndices(SubIdx, getSubReg());
73 setReg(Reg);
74 if (SubIdx)
75 setSubReg(SubIdx);
76 }
77
substPhysReg(unsigned Reg,const TargetRegisterInfo & TRI)78 void MachineOperand::substPhysReg(unsigned Reg, const TargetRegisterInfo &TRI) {
79 assert(TargetRegisterInfo::isPhysicalRegister(Reg));
80 if (getSubReg()) {
81 Reg = TRI.getSubReg(Reg, getSubReg());
82 // Note that getSubReg() may return 0 if the sub-register doesn't exist.
83 // That won't happen in legal code.
84 setSubReg(0);
85 }
86 setReg(Reg);
87 }
88
89 /// Change a def to a use, or a use to a def.
setIsDef(bool Val)90 void MachineOperand::setIsDef(bool Val) {
91 assert(isReg() && "Wrong MachineOperand accessor");
92 assert((!Val || !isDebug()) && "Marking a debug operation as def");
93 if (IsDef == Val)
94 return;
95 // MRI may keep uses and defs in different list positions.
96 if (MachineInstr *MI = getParent())
97 if (MachineBasicBlock *MBB = MI->getParent())
98 if (MachineFunction *MF = MBB->getParent()) {
99 MachineRegisterInfo &MRI = MF->getRegInfo();
100 MRI.removeRegOperandFromUseList(this);
101 IsDef = Val;
102 MRI.addRegOperandToUseList(this);
103 return;
104 }
105 IsDef = Val;
106 }
107
108 /// ChangeToImmediate - Replace this operand with a new immediate operand of
109 /// the specified value. If an operand is known to be an immediate already,
110 /// the setImm method should be used.
ChangeToImmediate(int64_t ImmVal)111 void MachineOperand::ChangeToImmediate(int64_t ImmVal) {
112 assert((!isReg() || !isTied()) && "Cannot change a tied operand into an imm");
113 // If this operand is currently a register operand, and if this is in a
114 // function, deregister the operand from the register's use/def list.
115 if (isReg() && isOnRegUseList())
116 if (MachineInstr *MI = getParent())
117 if (MachineBasicBlock *MBB = MI->getParent())
118 if (MachineFunction *MF = MBB->getParent())
119 MF->getRegInfo().removeRegOperandFromUseList(this);
120
121 OpKind = MO_Immediate;
122 Contents.ImmVal = ImmVal;
123 }
124
125 /// ChangeToRegister - Replace this operand with a new register operand of
126 /// the specified value. If an operand is known to be an register already,
127 /// the setReg method should be used.
ChangeToRegister(unsigned Reg,bool isDef,bool isImp,bool isKill,bool isDead,bool isUndef,bool isDebug)128 void MachineOperand::ChangeToRegister(unsigned Reg, bool isDef, bool isImp,
129 bool isKill, bool isDead, bool isUndef,
130 bool isDebug) {
131 MachineRegisterInfo *RegInfo = nullptr;
132 if (MachineInstr *MI = getParent())
133 if (MachineBasicBlock *MBB = MI->getParent())
134 if (MachineFunction *MF = MBB->getParent())
135 RegInfo = &MF->getRegInfo();
136 // If this operand is already a register operand, remove it from the
137 // register's use/def lists.
138 bool WasReg = isReg();
139 if (RegInfo && WasReg)
140 RegInfo->removeRegOperandFromUseList(this);
141
142 // Change this to a register and set the reg#.
143 OpKind = MO_Register;
144 SmallContents.RegNo = Reg;
145 SubReg_TargetFlags = 0;
146 IsDef = isDef;
147 IsImp = isImp;
148 IsKill = isKill;
149 IsDead = isDead;
150 IsUndef = isUndef;
151 IsInternalRead = false;
152 IsEarlyClobber = false;
153 IsDebug = isDebug;
154 // Ensure isOnRegUseList() returns false.
155 Contents.Reg.Prev = nullptr;
156 // Preserve the tie when the operand was already a register.
157 if (!WasReg)
158 TiedTo = 0;
159
160 // If this operand is embedded in a function, add the operand to the
161 // register's use/def list.
162 if (RegInfo)
163 RegInfo->addRegOperandToUseList(this);
164 }
165
166 /// isIdenticalTo - Return true if this operand is identical to the specified
167 /// operand. Note that this should stay in sync with the hash_value overload
168 /// below.
isIdenticalTo(const MachineOperand & Other) const169 bool MachineOperand::isIdenticalTo(const MachineOperand &Other) const {
170 if (getType() != Other.getType() ||
171 getTargetFlags() != Other.getTargetFlags())
172 return false;
173
174 switch (getType()) {
175 case MachineOperand::MO_Register:
176 return getReg() == Other.getReg() && isDef() == Other.isDef() &&
177 getSubReg() == Other.getSubReg();
178 case MachineOperand::MO_Immediate:
179 return getImm() == Other.getImm();
180 case MachineOperand::MO_CImmediate:
181 return getCImm() == Other.getCImm();
182 case MachineOperand::MO_FPImmediate:
183 return getFPImm() == Other.getFPImm();
184 case MachineOperand::MO_MachineBasicBlock:
185 return getMBB() == Other.getMBB();
186 case MachineOperand::MO_FrameIndex:
187 return getIndex() == Other.getIndex();
188 case MachineOperand::MO_ConstantPoolIndex:
189 case MachineOperand::MO_TargetIndex:
190 return getIndex() == Other.getIndex() && getOffset() == Other.getOffset();
191 case MachineOperand::MO_JumpTableIndex:
192 return getIndex() == Other.getIndex();
193 case MachineOperand::MO_GlobalAddress:
194 return getGlobal() == Other.getGlobal() && getOffset() == Other.getOffset();
195 case MachineOperand::MO_ExternalSymbol:
196 return !strcmp(getSymbolName(), Other.getSymbolName()) &&
197 getOffset() == Other.getOffset();
198 case MachineOperand::MO_BlockAddress:
199 return getBlockAddress() == Other.getBlockAddress() &&
200 getOffset() == Other.getOffset();
201 case MachineOperand::MO_RegisterMask:
202 case MachineOperand::MO_RegisterLiveOut:
203 return getRegMask() == Other.getRegMask();
204 case MachineOperand::MO_MCSymbol:
205 return getMCSymbol() == Other.getMCSymbol();
206 case MachineOperand::MO_CFIIndex:
207 return getCFIIndex() == Other.getCFIIndex();
208 case MachineOperand::MO_Metadata:
209 return getMetadata() == Other.getMetadata();
210 }
211 llvm_unreachable("Invalid machine operand type");
212 }
213
214 // Note: this must stay exactly in sync with isIdenticalTo above.
hash_value(const MachineOperand & MO)215 hash_code llvm::hash_value(const MachineOperand &MO) {
216 switch (MO.getType()) {
217 case MachineOperand::MO_Register:
218 // Register operands don't have target flags.
219 return hash_combine(MO.getType(), MO.getReg(), MO.getSubReg(), MO.isDef());
220 case MachineOperand::MO_Immediate:
221 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getImm());
222 case MachineOperand::MO_CImmediate:
223 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getCImm());
224 case MachineOperand::MO_FPImmediate:
225 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getFPImm());
226 case MachineOperand::MO_MachineBasicBlock:
227 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getMBB());
228 case MachineOperand::MO_FrameIndex:
229 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getIndex());
230 case MachineOperand::MO_ConstantPoolIndex:
231 case MachineOperand::MO_TargetIndex:
232 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getIndex(),
233 MO.getOffset());
234 case MachineOperand::MO_JumpTableIndex:
235 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getIndex());
236 case MachineOperand::MO_ExternalSymbol:
237 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getOffset(),
238 MO.getSymbolName());
239 case MachineOperand::MO_GlobalAddress:
240 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getGlobal(),
241 MO.getOffset());
242 case MachineOperand::MO_BlockAddress:
243 return hash_combine(MO.getType(), MO.getTargetFlags(),
244 MO.getBlockAddress(), MO.getOffset());
245 case MachineOperand::MO_RegisterMask:
246 case MachineOperand::MO_RegisterLiveOut:
247 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getRegMask());
248 case MachineOperand::MO_Metadata:
249 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getMetadata());
250 case MachineOperand::MO_MCSymbol:
251 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getMCSymbol());
252 case MachineOperand::MO_CFIIndex:
253 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getCFIIndex());
254 }
255 llvm_unreachable("Invalid machine operand type");
256 }
257
258 /// print - Print the specified machine operand.
259 ///
print(raw_ostream & OS,const TargetMachine * TM) const260 void MachineOperand::print(raw_ostream &OS, const TargetMachine *TM) const {
261 // If the instruction is embedded into a basic block, we can find the
262 // target info for the instruction.
263 if (!TM)
264 if (const MachineInstr *MI = getParent())
265 if (const MachineBasicBlock *MBB = MI->getParent())
266 if (const MachineFunction *MF = MBB->getParent())
267 TM = &MF->getTarget();
268 const TargetRegisterInfo *TRI = TM ? TM->getRegisterInfo() : nullptr;
269
270 switch (getType()) {
271 case MachineOperand::MO_Register:
272 OS << PrintReg(getReg(), TRI, getSubReg());
273
274 if (isDef() || isKill() || isDead() || isImplicit() || isUndef() ||
275 isInternalRead() || isEarlyClobber() || isTied()) {
276 OS << '<';
277 bool NeedComma = false;
278 if (isDef()) {
279 if (NeedComma) OS << ',';
280 if (isEarlyClobber())
281 OS << "earlyclobber,";
282 if (isImplicit())
283 OS << "imp-";
284 OS << "def";
285 NeedComma = true;
286 // <def,read-undef> only makes sense when getSubReg() is set.
287 // Don't clutter the output otherwise.
288 if (isUndef() && getSubReg())
289 OS << ",read-undef";
290 } else if (isImplicit()) {
291 OS << "imp-use";
292 NeedComma = true;
293 }
294
295 if (isKill()) {
296 if (NeedComma) OS << ',';
297 OS << "kill";
298 NeedComma = true;
299 }
300 if (isDead()) {
301 if (NeedComma) OS << ',';
302 OS << "dead";
303 NeedComma = true;
304 }
305 if (isUndef() && isUse()) {
306 if (NeedComma) OS << ',';
307 OS << "undef";
308 NeedComma = true;
309 }
310 if (isInternalRead()) {
311 if (NeedComma) OS << ',';
312 OS << "internal";
313 NeedComma = true;
314 }
315 if (isTied()) {
316 if (NeedComma) OS << ',';
317 OS << "tied";
318 if (TiedTo != 15)
319 OS << unsigned(TiedTo - 1);
320 }
321 OS << '>';
322 }
323 break;
324 case MachineOperand::MO_Immediate:
325 OS << getImm();
326 break;
327 case MachineOperand::MO_CImmediate:
328 getCImm()->getValue().print(OS, false);
329 break;
330 case MachineOperand::MO_FPImmediate:
331 if (getFPImm()->getType()->isFloatTy())
332 OS << getFPImm()->getValueAPF().convertToFloat();
333 else
334 OS << getFPImm()->getValueAPF().convertToDouble();
335 break;
336 case MachineOperand::MO_MachineBasicBlock:
337 OS << "<BB#" << getMBB()->getNumber() << ">";
338 break;
339 case MachineOperand::MO_FrameIndex:
340 OS << "<fi#" << getIndex() << '>';
341 break;
342 case MachineOperand::MO_ConstantPoolIndex:
343 OS << "<cp#" << getIndex();
344 if (getOffset()) OS << "+" << getOffset();
345 OS << '>';
346 break;
347 case MachineOperand::MO_TargetIndex:
348 OS << "<ti#" << getIndex();
349 if (getOffset()) OS << "+" << getOffset();
350 OS << '>';
351 break;
352 case MachineOperand::MO_JumpTableIndex:
353 OS << "<jt#" << getIndex() << '>';
354 break;
355 case MachineOperand::MO_GlobalAddress:
356 OS << "<ga:";
357 getGlobal()->printAsOperand(OS, /*PrintType=*/false);
358 if (getOffset()) OS << "+" << getOffset();
359 OS << '>';
360 break;
361 case MachineOperand::MO_ExternalSymbol:
362 OS << "<es:" << getSymbolName();
363 if (getOffset()) OS << "+" << getOffset();
364 OS << '>';
365 break;
366 case MachineOperand::MO_BlockAddress:
367 OS << '<';
368 getBlockAddress()->printAsOperand(OS, /*PrintType=*/false);
369 if (getOffset()) OS << "+" << getOffset();
370 OS << '>';
371 break;
372 case MachineOperand::MO_RegisterMask:
373 OS << "<regmask>";
374 break;
375 case MachineOperand::MO_RegisterLiveOut:
376 OS << "<regliveout>";
377 break;
378 case MachineOperand::MO_Metadata:
379 OS << '<';
380 getMetadata()->printAsOperand(OS, /*PrintType=*/false);
381 OS << '>';
382 break;
383 case MachineOperand::MO_MCSymbol:
384 OS << "<MCSym=" << *getMCSymbol() << '>';
385 break;
386 case MachineOperand::MO_CFIIndex:
387 OS << "<call frame instruction>";
388 break;
389 }
390
391 if (unsigned TF = getTargetFlags())
392 OS << "[TF=" << TF << ']';
393 }
394
395 //===----------------------------------------------------------------------===//
396 // MachineMemOperand Implementation
397 //===----------------------------------------------------------------------===//
398
399 /// getAddrSpace - Return the LLVM IR address space number that this pointer
400 /// points into.
getAddrSpace() const401 unsigned MachinePointerInfo::getAddrSpace() const {
402 if (V.isNull() || V.is<const PseudoSourceValue*>()) return 0;
403 return cast<PointerType>(V.get<const Value*>()->getType())->getAddressSpace();
404 }
405
406 /// getConstantPool - Return a MachinePointerInfo record that refers to the
407 /// constant pool.
getConstantPool()408 MachinePointerInfo MachinePointerInfo::getConstantPool() {
409 return MachinePointerInfo(PseudoSourceValue::getConstantPool());
410 }
411
412 /// getFixedStack - Return a MachinePointerInfo record that refers to the
413 /// the specified FrameIndex.
getFixedStack(int FI,int64_t offset)414 MachinePointerInfo MachinePointerInfo::getFixedStack(int FI, int64_t offset) {
415 return MachinePointerInfo(PseudoSourceValue::getFixedStack(FI), offset);
416 }
417
getJumpTable()418 MachinePointerInfo MachinePointerInfo::getJumpTable() {
419 return MachinePointerInfo(PseudoSourceValue::getJumpTable());
420 }
421
getGOT()422 MachinePointerInfo MachinePointerInfo::getGOT() {
423 return MachinePointerInfo(PseudoSourceValue::getGOT());
424 }
425
getStack(int64_t Offset)426 MachinePointerInfo MachinePointerInfo::getStack(int64_t Offset) {
427 return MachinePointerInfo(PseudoSourceValue::getStack(), Offset);
428 }
429
MachineMemOperand(MachinePointerInfo ptrinfo,unsigned f,uint64_t s,unsigned int a,const MDNode * TBAAInfo,const MDNode * Ranges)430 MachineMemOperand::MachineMemOperand(MachinePointerInfo ptrinfo, unsigned f,
431 uint64_t s, unsigned int a,
432 const MDNode *TBAAInfo,
433 const MDNode *Ranges)
434 : PtrInfo(ptrinfo), Size(s),
435 Flags((f & ((1 << MOMaxBits) - 1)) | ((Log2_32(a) + 1) << MOMaxBits)),
436 TBAAInfo(TBAAInfo), Ranges(Ranges) {
437 assert((PtrInfo.V.isNull() || PtrInfo.V.is<const PseudoSourceValue*>() ||
438 isa<PointerType>(PtrInfo.V.get<const Value*>()->getType())) &&
439 "invalid pointer value");
440 assert(getBaseAlignment() == a && "Alignment is not a power of 2!");
441 assert((isLoad() || isStore()) && "Not a load/store!");
442 }
443
444 /// Profile - Gather unique data for the object.
445 ///
Profile(FoldingSetNodeID & ID) const446 void MachineMemOperand::Profile(FoldingSetNodeID &ID) const {
447 ID.AddInteger(getOffset());
448 ID.AddInteger(Size);
449 ID.AddPointer(getOpaqueValue());
450 ID.AddInteger(Flags);
451 }
452
refineAlignment(const MachineMemOperand * MMO)453 void MachineMemOperand::refineAlignment(const MachineMemOperand *MMO) {
454 // The Value and Offset may differ due to CSE. But the flags and size
455 // should be the same.
456 assert(MMO->getFlags() == getFlags() && "Flags mismatch!");
457 assert(MMO->getSize() == getSize() && "Size mismatch!");
458
459 if (MMO->getBaseAlignment() >= getBaseAlignment()) {
460 // Update the alignment value.
461 Flags = (Flags & ((1 << MOMaxBits) - 1)) |
462 ((Log2_32(MMO->getBaseAlignment()) + 1) << MOMaxBits);
463 // Also update the base and offset, because the new alignment may
464 // not be applicable with the old ones.
465 PtrInfo = MMO->PtrInfo;
466 }
467 }
468
469 /// getAlignment - Return the minimum known alignment in bytes of the
470 /// actual memory reference.
getAlignment() const471 uint64_t MachineMemOperand::getAlignment() const {
472 return MinAlign(getBaseAlignment(), getOffset());
473 }
474
operator <<(raw_ostream & OS,const MachineMemOperand & MMO)475 raw_ostream &llvm::operator<<(raw_ostream &OS, const MachineMemOperand &MMO) {
476 assert((MMO.isLoad() || MMO.isStore()) &&
477 "SV has to be a load, store or both.");
478
479 if (MMO.isVolatile())
480 OS << "Volatile ";
481
482 if (MMO.isLoad())
483 OS << "LD";
484 if (MMO.isStore())
485 OS << "ST";
486 OS << MMO.getSize();
487
488 // Print the address information.
489 OS << "[";
490 if (const Value *V = MMO.getValue())
491 V->printAsOperand(OS, /*PrintType=*/false);
492 else if (const PseudoSourceValue *PSV = MMO.getPseudoValue())
493 PSV->printCustom(OS);
494 else
495 OS << "<unknown>";
496
497 unsigned AS = MMO.getAddrSpace();
498 if (AS != 0)
499 OS << "(addrspace=" << AS << ')';
500
501 // If the alignment of the memory reference itself differs from the alignment
502 // of the base pointer, print the base alignment explicitly, next to the base
503 // pointer.
504 if (MMO.getBaseAlignment() != MMO.getAlignment())
505 OS << "(align=" << MMO.getBaseAlignment() << ")";
506
507 if (MMO.getOffset() != 0)
508 OS << "+" << MMO.getOffset();
509 OS << "]";
510
511 // Print the alignment of the reference.
512 if (MMO.getBaseAlignment() != MMO.getAlignment() ||
513 MMO.getBaseAlignment() != MMO.getSize())
514 OS << "(align=" << MMO.getAlignment() << ")";
515
516 // Print TBAA info.
517 if (const MDNode *TBAAInfo = MMO.getTBAAInfo()) {
518 OS << "(tbaa=";
519 if (TBAAInfo->getNumOperands() > 0)
520 TBAAInfo->getOperand(0)->printAsOperand(OS, /*PrintType=*/false);
521 else
522 OS << "<unknown>";
523 OS << ")";
524 }
525
526 // Print nontemporal info.
527 if (MMO.isNonTemporal())
528 OS << "(nontemporal)";
529
530 return OS;
531 }
532
533 //===----------------------------------------------------------------------===//
534 // MachineInstr Implementation
535 //===----------------------------------------------------------------------===//
536
addImplicitDefUseOperands(MachineFunction & MF)537 void MachineInstr::addImplicitDefUseOperands(MachineFunction &MF) {
538 if (MCID->ImplicitDefs)
539 for (const uint16_t *ImpDefs = MCID->getImplicitDefs(); *ImpDefs; ++ImpDefs)
540 addOperand(MF, MachineOperand::CreateReg(*ImpDefs, true, true));
541 if (MCID->ImplicitUses)
542 for (const uint16_t *ImpUses = MCID->getImplicitUses(); *ImpUses; ++ImpUses)
543 addOperand(MF, MachineOperand::CreateReg(*ImpUses, false, true));
544 }
545
546 /// MachineInstr ctor - This constructor creates a MachineInstr and adds the
547 /// implicit operands. It reserves space for the number of operands specified by
548 /// the MCInstrDesc.
MachineInstr(MachineFunction & MF,const MCInstrDesc & tid,const DebugLoc dl,bool NoImp)549 MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &tid,
550 const DebugLoc dl, bool NoImp)
551 : MCID(&tid), Parent(nullptr), Operands(nullptr), NumOperands(0),
552 Flags(0), AsmPrinterFlags(0),
553 NumMemRefs(0), MemRefs(nullptr), debugLoc(dl) {
554 // Reserve space for the expected number of operands.
555 if (unsigned NumOps = MCID->getNumOperands() +
556 MCID->getNumImplicitDefs() + MCID->getNumImplicitUses()) {
557 CapOperands = OperandCapacity::get(NumOps);
558 Operands = MF.allocateOperandArray(CapOperands);
559 }
560
561 if (!NoImp)
562 addImplicitDefUseOperands(MF);
563 }
564
565 /// MachineInstr ctor - Copies MachineInstr arg exactly
566 ///
MachineInstr(MachineFunction & MF,const MachineInstr & MI)567 MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
568 : MCID(&MI.getDesc()), Parent(nullptr), Operands(nullptr), NumOperands(0),
569 Flags(0), AsmPrinterFlags(0),
570 NumMemRefs(MI.NumMemRefs), MemRefs(MI.MemRefs),
571 debugLoc(MI.getDebugLoc()) {
572 CapOperands = OperandCapacity::get(MI.getNumOperands());
573 Operands = MF.allocateOperandArray(CapOperands);
574
575 // Copy operands.
576 for (unsigned i = 0; i != MI.getNumOperands(); ++i)
577 addOperand(MF, MI.getOperand(i));
578
579 // Copy all the sensible flags.
580 setFlags(MI.Flags);
581 }
582
583 /// getRegInfo - If this instruction is embedded into a MachineFunction,
584 /// return the MachineRegisterInfo object for the current function, otherwise
585 /// return null.
getRegInfo()586 MachineRegisterInfo *MachineInstr::getRegInfo() {
587 if (MachineBasicBlock *MBB = getParent())
588 return &MBB->getParent()->getRegInfo();
589 return nullptr;
590 }
591
592 /// RemoveRegOperandsFromUseLists - Unlink all of the register operands in
593 /// this instruction from their respective use lists. This requires that the
594 /// operands already be on their use lists.
RemoveRegOperandsFromUseLists(MachineRegisterInfo & MRI)595 void MachineInstr::RemoveRegOperandsFromUseLists(MachineRegisterInfo &MRI) {
596 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
597 if (Operands[i].isReg())
598 MRI.removeRegOperandFromUseList(&Operands[i]);
599 }
600
601 /// AddRegOperandsToUseLists - Add all of the register operands in
602 /// this instruction from their respective use lists. This requires that the
603 /// operands not be on their use lists yet.
AddRegOperandsToUseLists(MachineRegisterInfo & MRI)604 void MachineInstr::AddRegOperandsToUseLists(MachineRegisterInfo &MRI) {
605 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
606 if (Operands[i].isReg())
607 MRI.addRegOperandToUseList(&Operands[i]);
608 }
609
addOperand(const MachineOperand & Op)610 void MachineInstr::addOperand(const MachineOperand &Op) {
611 MachineBasicBlock *MBB = getParent();
612 assert(MBB && "Use MachineInstrBuilder to add operands to dangling instrs");
613 MachineFunction *MF = MBB->getParent();
614 assert(MF && "Use MachineInstrBuilder to add operands to dangling instrs");
615 addOperand(*MF, Op);
616 }
617
618 /// Move NumOps MachineOperands from Src to Dst, with support for overlapping
619 /// ranges. If MRI is non-null also update use-def chains.
moveOperands(MachineOperand * Dst,MachineOperand * Src,unsigned NumOps,MachineRegisterInfo * MRI)620 static void moveOperands(MachineOperand *Dst, MachineOperand *Src,
621 unsigned NumOps, MachineRegisterInfo *MRI) {
622 if (MRI)
623 return MRI->moveOperands(Dst, Src, NumOps);
624
625 // Here it would be convenient to call memmove, so that isn't allowed because
626 // MachineOperand has a constructor and so isn't a POD type.
627 if (Dst < Src)
628 for (unsigned i = 0; i != NumOps; ++i)
629 new (Dst + i) MachineOperand(Src[i]);
630 else
631 for (unsigned i = NumOps; i ; --i)
632 new (Dst + i - 1) MachineOperand(Src[i - 1]);
633 }
634
635 /// addOperand - Add the specified operand to the instruction. If it is an
636 /// implicit operand, it is added to the end of the operand list. If it is
637 /// an explicit operand it is added at the end of the explicit operand list
638 /// (before the first implicit operand).
addOperand(MachineFunction & MF,const MachineOperand & Op)639 void MachineInstr::addOperand(MachineFunction &MF, const MachineOperand &Op) {
640 assert(MCID && "Cannot add operands before providing an instr descriptor");
641
642 // Check if we're adding one of our existing operands.
643 if (&Op >= Operands && &Op < Operands + NumOperands) {
644 // This is unusual: MI->addOperand(MI->getOperand(i)).
645 // If adding Op requires reallocating or moving existing operands around,
646 // the Op reference could go stale. Support it by copying Op.
647 MachineOperand CopyOp(Op);
648 return addOperand(MF, CopyOp);
649 }
650
651 // Find the insert location for the new operand. Implicit registers go at
652 // the end, everything else goes before the implicit regs.
653 //
654 // FIXME: Allow mixed explicit and implicit operands on inline asm.
655 // InstrEmitter::EmitSpecialNode() is marking inline asm clobbers as
656 // implicit-defs, but they must not be moved around. See the FIXME in
657 // InstrEmitter.cpp.
658 unsigned OpNo = getNumOperands();
659 bool isImpReg = Op.isReg() && Op.isImplicit();
660 if (!isImpReg && !isInlineAsm()) {
661 while (OpNo && Operands[OpNo-1].isReg() && Operands[OpNo-1].isImplicit()) {
662 --OpNo;
663 assert(!Operands[OpNo].isTied() && "Cannot move tied operands");
664 }
665 }
666
667 #ifndef NDEBUG
668 bool isMetaDataOp = Op.getType() == MachineOperand::MO_Metadata;
669 // OpNo now points as the desired insertion point. Unless this is a variadic
670 // instruction, only implicit regs are allowed beyond MCID->getNumOperands().
671 // RegMask operands go between the explicit and implicit operands.
672 assert((isImpReg || Op.isRegMask() || MCID->isVariadic() ||
673 OpNo < MCID->getNumOperands() || isMetaDataOp) &&
674 "Trying to add an operand to a machine instr that is already done!");
675 #endif
676
677 MachineRegisterInfo *MRI = getRegInfo();
678
679 // Determine if the Operands array needs to be reallocated.
680 // Save the old capacity and operand array.
681 OperandCapacity OldCap = CapOperands;
682 MachineOperand *OldOperands = Operands;
683 if (!OldOperands || OldCap.getSize() == getNumOperands()) {
684 CapOperands = OldOperands ? OldCap.getNext() : OldCap.get(1);
685 Operands = MF.allocateOperandArray(CapOperands);
686 // Move the operands before the insertion point.
687 if (OpNo)
688 moveOperands(Operands, OldOperands, OpNo, MRI);
689 }
690
691 // Move the operands following the insertion point.
692 if (OpNo != NumOperands)
693 moveOperands(Operands + OpNo + 1, OldOperands + OpNo, NumOperands - OpNo,
694 MRI);
695 ++NumOperands;
696
697 // Deallocate the old operand array.
698 if (OldOperands != Operands && OldOperands)
699 MF.deallocateOperandArray(OldCap, OldOperands);
700
701 // Copy Op into place. It still needs to be inserted into the MRI use lists.
702 MachineOperand *NewMO = new (Operands + OpNo) MachineOperand(Op);
703 NewMO->ParentMI = this;
704
705 // When adding a register operand, tell MRI about it.
706 if (NewMO->isReg()) {
707 // Ensure isOnRegUseList() returns false, regardless of Op's status.
708 NewMO->Contents.Reg.Prev = nullptr;
709 // Ignore existing ties. This is not a property that can be copied.
710 NewMO->TiedTo = 0;
711 // Add the new operand to MRI, but only for instructions in an MBB.
712 if (MRI)
713 MRI->addRegOperandToUseList(NewMO);
714 // The MCID operand information isn't accurate until we start adding
715 // explicit operands. The implicit operands are added first, then the
716 // explicits are inserted before them.
717 if (!isImpReg) {
718 // Tie uses to defs as indicated in MCInstrDesc.
719 if (NewMO->isUse()) {
720 int DefIdx = MCID->getOperandConstraint(OpNo, MCOI::TIED_TO);
721 if (DefIdx != -1)
722 tieOperands(DefIdx, OpNo);
723 }
724 // If the register operand is flagged as early, mark the operand as such.
725 if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1)
726 NewMO->setIsEarlyClobber(true);
727 }
728 }
729 }
730
731 /// RemoveOperand - Erase an operand from an instruction, leaving it with one
732 /// fewer operand than it started with.
733 ///
RemoveOperand(unsigned OpNo)734 void MachineInstr::RemoveOperand(unsigned OpNo) {
735 assert(OpNo < getNumOperands() && "Invalid operand number");
736 untieRegOperand(OpNo);
737
738 #ifndef NDEBUG
739 // Moving tied operands would break the ties.
740 for (unsigned i = OpNo + 1, e = getNumOperands(); i != e; ++i)
741 if (Operands[i].isReg())
742 assert(!Operands[i].isTied() && "Cannot move tied operands");
743 #endif
744
745 MachineRegisterInfo *MRI = getRegInfo();
746 if (MRI && Operands[OpNo].isReg())
747 MRI->removeRegOperandFromUseList(Operands + OpNo);
748
749 // Don't call the MachineOperand destructor. A lot of this code depends on
750 // MachineOperand having a trivial destructor anyway, and adding a call here
751 // wouldn't make it 'destructor-correct'.
752
753 if (unsigned N = NumOperands - 1 - OpNo)
754 moveOperands(Operands + OpNo, Operands + OpNo + 1, N, MRI);
755 --NumOperands;
756 }
757
758 /// addMemOperand - Add a MachineMemOperand to the machine instruction.
759 /// This function should be used only occasionally. The setMemRefs function
760 /// is the primary method for setting up a MachineInstr's MemRefs list.
addMemOperand(MachineFunction & MF,MachineMemOperand * MO)761 void MachineInstr::addMemOperand(MachineFunction &MF,
762 MachineMemOperand *MO) {
763 mmo_iterator OldMemRefs = MemRefs;
764 unsigned OldNumMemRefs = NumMemRefs;
765
766 unsigned NewNum = NumMemRefs + 1;
767 mmo_iterator NewMemRefs = MF.allocateMemRefsArray(NewNum);
768
769 std::copy(OldMemRefs, OldMemRefs + OldNumMemRefs, NewMemRefs);
770 NewMemRefs[NewNum - 1] = MO;
771 setMemRefs(NewMemRefs, NewMemRefs + NewNum);
772 }
773
hasPropertyInBundle(unsigned Mask,QueryType Type) const774 bool MachineInstr::hasPropertyInBundle(unsigned Mask, QueryType Type) const {
775 assert(!isBundledWithPred() && "Must be called on bundle header");
776 for (MachineBasicBlock::const_instr_iterator MII = this;; ++MII) {
777 if (MII->getDesc().getFlags() & Mask) {
778 if (Type == AnyInBundle)
779 return true;
780 } else {
781 if (Type == AllInBundle && !MII->isBundle())
782 return false;
783 }
784 // This was the last instruction in the bundle.
785 if (!MII->isBundledWithSucc())
786 return Type == AllInBundle;
787 }
788 }
789
isIdenticalTo(const MachineInstr * Other,MICheckType Check) const790 bool MachineInstr::isIdenticalTo(const MachineInstr *Other,
791 MICheckType Check) const {
792 // If opcodes or number of operands are not the same then the two
793 // instructions are obviously not identical.
794 if (Other->getOpcode() != getOpcode() ||
795 Other->getNumOperands() != getNumOperands())
796 return false;
797
798 if (isBundle()) {
799 // Both instructions are bundles, compare MIs inside the bundle.
800 MachineBasicBlock::const_instr_iterator I1 = *this;
801 MachineBasicBlock::const_instr_iterator E1 = getParent()->instr_end();
802 MachineBasicBlock::const_instr_iterator I2 = *Other;
803 MachineBasicBlock::const_instr_iterator E2= Other->getParent()->instr_end();
804 while (++I1 != E1 && I1->isInsideBundle()) {
805 ++I2;
806 if (I2 == E2 || !I2->isInsideBundle() || !I1->isIdenticalTo(I2, Check))
807 return false;
808 }
809 }
810
811 // Check operands to make sure they match.
812 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
813 const MachineOperand &MO = getOperand(i);
814 const MachineOperand &OMO = Other->getOperand(i);
815 if (!MO.isReg()) {
816 if (!MO.isIdenticalTo(OMO))
817 return false;
818 continue;
819 }
820
821 // Clients may or may not want to ignore defs when testing for equality.
822 // For example, machine CSE pass only cares about finding common
823 // subexpressions, so it's safe to ignore virtual register defs.
824 if (MO.isDef()) {
825 if (Check == IgnoreDefs)
826 continue;
827 else if (Check == IgnoreVRegDefs) {
828 if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()) ||
829 TargetRegisterInfo::isPhysicalRegister(OMO.getReg()))
830 if (MO.getReg() != OMO.getReg())
831 return false;
832 } else {
833 if (!MO.isIdenticalTo(OMO))
834 return false;
835 if (Check == CheckKillDead && MO.isDead() != OMO.isDead())
836 return false;
837 }
838 } else {
839 if (!MO.isIdenticalTo(OMO))
840 return false;
841 if (Check == CheckKillDead && MO.isKill() != OMO.isKill())
842 return false;
843 }
844 }
845 // If DebugLoc does not match then two dbg.values are not identical.
846 if (isDebugValue())
847 if (!getDebugLoc().isUnknown() && !Other->getDebugLoc().isUnknown()
848 && getDebugLoc() != Other->getDebugLoc())
849 return false;
850 return true;
851 }
852
removeFromParent()853 MachineInstr *MachineInstr::removeFromParent() {
854 assert(getParent() && "Not embedded in a basic block!");
855 return getParent()->remove(this);
856 }
857
removeFromBundle()858 MachineInstr *MachineInstr::removeFromBundle() {
859 assert(getParent() && "Not embedded in a basic block!");
860 return getParent()->remove_instr(this);
861 }
862
eraseFromParent()863 void MachineInstr::eraseFromParent() {
864 assert(getParent() && "Not embedded in a basic block!");
865 getParent()->erase(this);
866 }
867
eraseFromBundle()868 void MachineInstr::eraseFromBundle() {
869 assert(getParent() && "Not embedded in a basic block!");
870 getParent()->erase_instr(this);
871 }
872
873 /// getNumExplicitOperands - Returns the number of non-implicit operands.
874 ///
getNumExplicitOperands() const875 unsigned MachineInstr::getNumExplicitOperands() const {
876 unsigned NumOperands = MCID->getNumOperands();
877 if (!MCID->isVariadic())
878 return NumOperands;
879
880 for (unsigned i = NumOperands, e = getNumOperands(); i != e; ++i) {
881 const MachineOperand &MO = getOperand(i);
882 if (!MO.isReg() || !MO.isImplicit())
883 NumOperands++;
884 }
885 return NumOperands;
886 }
887
bundleWithPred()888 void MachineInstr::bundleWithPred() {
889 assert(!isBundledWithPred() && "MI is already bundled with its predecessor");
890 setFlag(BundledPred);
891 MachineBasicBlock::instr_iterator Pred = this;
892 --Pred;
893 assert(!Pred->isBundledWithSucc() && "Inconsistent bundle flags");
894 Pred->setFlag(BundledSucc);
895 }
896
bundleWithSucc()897 void MachineInstr::bundleWithSucc() {
898 assert(!isBundledWithSucc() && "MI is already bundled with its successor");
899 setFlag(BundledSucc);
900 MachineBasicBlock::instr_iterator Succ = this;
901 ++Succ;
902 assert(!Succ->isBundledWithPred() && "Inconsistent bundle flags");
903 Succ->setFlag(BundledPred);
904 }
905
unbundleFromPred()906 void MachineInstr::unbundleFromPred() {
907 assert(isBundledWithPred() && "MI isn't bundled with its predecessor");
908 clearFlag(BundledPred);
909 MachineBasicBlock::instr_iterator Pred = this;
910 --Pred;
911 assert(Pred->isBundledWithSucc() && "Inconsistent bundle flags");
912 Pred->clearFlag(BundledSucc);
913 }
914
unbundleFromSucc()915 void MachineInstr::unbundleFromSucc() {
916 assert(isBundledWithSucc() && "MI isn't bundled with its successor");
917 clearFlag(BundledSucc);
918 MachineBasicBlock::instr_iterator Succ = this;
919 ++Succ;
920 assert(Succ->isBundledWithPred() && "Inconsistent bundle flags");
921 Succ->clearFlag(BundledPred);
922 }
923
isStackAligningInlineAsm() const924 bool MachineInstr::isStackAligningInlineAsm() const {
925 if (isInlineAsm()) {
926 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
927 if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
928 return true;
929 }
930 return false;
931 }
932
getInlineAsmDialect() const933 InlineAsm::AsmDialect MachineInstr::getInlineAsmDialect() const {
934 assert(isInlineAsm() && "getInlineAsmDialect() only works for inline asms!");
935 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
936 return InlineAsm::AsmDialect((ExtraInfo & InlineAsm::Extra_AsmDialect) != 0);
937 }
938
findInlineAsmFlagIdx(unsigned OpIdx,unsigned * GroupNo) const939 int MachineInstr::findInlineAsmFlagIdx(unsigned OpIdx,
940 unsigned *GroupNo) const {
941 assert(isInlineAsm() && "Expected an inline asm instruction");
942 assert(OpIdx < getNumOperands() && "OpIdx out of range");
943
944 // Ignore queries about the initial operands.
945 if (OpIdx < InlineAsm::MIOp_FirstOperand)
946 return -1;
947
948 unsigned Group = 0;
949 unsigned NumOps;
950 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
951 i += NumOps) {
952 const MachineOperand &FlagMO = getOperand(i);
953 // If we reach the implicit register operands, stop looking.
954 if (!FlagMO.isImm())
955 return -1;
956 NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm());
957 if (i + NumOps > OpIdx) {
958 if (GroupNo)
959 *GroupNo = Group;
960 return i;
961 }
962 ++Group;
963 }
964 return -1;
965 }
966
967 const TargetRegisterClass*
getRegClassConstraint(unsigned OpIdx,const TargetInstrInfo * TII,const TargetRegisterInfo * TRI) const968 MachineInstr::getRegClassConstraint(unsigned OpIdx,
969 const TargetInstrInfo *TII,
970 const TargetRegisterInfo *TRI) const {
971 assert(getParent() && "Can't have an MBB reference here!");
972 assert(getParent()->getParent() && "Can't have an MF reference here!");
973 const MachineFunction &MF = *getParent()->getParent();
974
975 // Most opcodes have fixed constraints in their MCInstrDesc.
976 if (!isInlineAsm())
977 return TII->getRegClass(getDesc(), OpIdx, TRI, MF);
978
979 if (!getOperand(OpIdx).isReg())
980 return nullptr;
981
982 // For tied uses on inline asm, get the constraint from the def.
983 unsigned DefIdx;
984 if (getOperand(OpIdx).isUse() && isRegTiedToDefOperand(OpIdx, &DefIdx))
985 OpIdx = DefIdx;
986
987 // Inline asm stores register class constraints in the flag word.
988 int FlagIdx = findInlineAsmFlagIdx(OpIdx);
989 if (FlagIdx < 0)
990 return nullptr;
991
992 unsigned Flag = getOperand(FlagIdx).getImm();
993 unsigned RCID;
994 if (InlineAsm::hasRegClassConstraint(Flag, RCID))
995 return TRI->getRegClass(RCID);
996
997 // Assume that all registers in a memory operand are pointers.
998 if (InlineAsm::getKind(Flag) == InlineAsm::Kind_Mem)
999 return TRI->getPointerRegClass(MF);
1000
1001 return nullptr;
1002 }
1003
getRegClassConstraintEffectForVReg(unsigned Reg,const TargetRegisterClass * CurRC,const TargetInstrInfo * TII,const TargetRegisterInfo * TRI,bool ExploreBundle) const1004 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVReg(
1005 unsigned Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII,
1006 const TargetRegisterInfo *TRI, bool ExploreBundle) const {
1007 // Check every operands inside the bundle if we have
1008 // been asked to.
1009 if (ExploreBundle)
1010 for (ConstMIBundleOperands OpndIt(this); OpndIt.isValid() && CurRC;
1011 ++OpndIt)
1012 CurRC = OpndIt->getParent()->getRegClassConstraintEffectForVRegImpl(
1013 OpndIt.getOperandNo(), Reg, CurRC, TII, TRI);
1014 else
1015 // Otherwise, just check the current operands.
1016 for (ConstMIOperands OpndIt(this); OpndIt.isValid() && CurRC; ++OpndIt)
1017 CurRC = getRegClassConstraintEffectForVRegImpl(OpndIt.getOperandNo(), Reg,
1018 CurRC, TII, TRI);
1019 return CurRC;
1020 }
1021
getRegClassConstraintEffectForVRegImpl(unsigned OpIdx,unsigned Reg,const TargetRegisterClass * CurRC,const TargetInstrInfo * TII,const TargetRegisterInfo * TRI) const1022 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVRegImpl(
1023 unsigned OpIdx, unsigned Reg, const TargetRegisterClass *CurRC,
1024 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
1025 assert(CurRC && "Invalid initial register class");
1026 // Check if Reg is constrained by some of its use/def from MI.
1027 const MachineOperand &MO = getOperand(OpIdx);
1028 if (!MO.isReg() || MO.getReg() != Reg)
1029 return CurRC;
1030 // If yes, accumulate the constraints through the operand.
1031 return getRegClassConstraintEffect(OpIdx, CurRC, TII, TRI);
1032 }
1033
getRegClassConstraintEffect(unsigned OpIdx,const TargetRegisterClass * CurRC,const TargetInstrInfo * TII,const TargetRegisterInfo * TRI) const1034 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffect(
1035 unsigned OpIdx, const TargetRegisterClass *CurRC,
1036 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
1037 const TargetRegisterClass *OpRC = getRegClassConstraint(OpIdx, TII, TRI);
1038 const MachineOperand &MO = getOperand(OpIdx);
1039 assert(MO.isReg() &&
1040 "Cannot get register constraints for non-register operand");
1041 assert(CurRC && "Invalid initial register class");
1042 if (unsigned SubIdx = MO.getSubReg()) {
1043 if (OpRC)
1044 CurRC = TRI->getMatchingSuperRegClass(CurRC, OpRC, SubIdx);
1045 else
1046 CurRC = TRI->getSubClassWithSubReg(CurRC, SubIdx);
1047 } else if (OpRC)
1048 CurRC = TRI->getCommonSubClass(CurRC, OpRC);
1049 return CurRC;
1050 }
1051
1052 /// Return the number of instructions inside the MI bundle, not counting the
1053 /// header instruction.
getBundleSize() const1054 unsigned MachineInstr::getBundleSize() const {
1055 MachineBasicBlock::const_instr_iterator I = this;
1056 unsigned Size = 0;
1057 while (I->isBundledWithSucc())
1058 ++Size, ++I;
1059 return Size;
1060 }
1061
1062 /// findRegisterUseOperandIdx() - Returns the MachineOperand that is a use of
1063 /// the specific register or -1 if it is not found. It further tightens
1064 /// the search criteria to a use that kills the register if isKill is true.
findRegisterUseOperandIdx(unsigned Reg,bool isKill,const TargetRegisterInfo * TRI) const1065 int MachineInstr::findRegisterUseOperandIdx(unsigned Reg, bool isKill,
1066 const TargetRegisterInfo *TRI) const {
1067 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1068 const MachineOperand &MO = getOperand(i);
1069 if (!MO.isReg() || !MO.isUse())
1070 continue;
1071 unsigned MOReg = MO.getReg();
1072 if (!MOReg)
1073 continue;
1074 if (MOReg == Reg ||
1075 (TRI &&
1076 TargetRegisterInfo::isPhysicalRegister(MOReg) &&
1077 TargetRegisterInfo::isPhysicalRegister(Reg) &&
1078 TRI->isSubRegister(MOReg, Reg)))
1079 if (!isKill || MO.isKill())
1080 return i;
1081 }
1082 return -1;
1083 }
1084
1085 /// readsWritesVirtualRegister - Return a pair of bools (reads, writes)
1086 /// indicating if this instruction reads or writes Reg. This also considers
1087 /// partial defines.
1088 std::pair<bool,bool>
readsWritesVirtualRegister(unsigned Reg,SmallVectorImpl<unsigned> * Ops) const1089 MachineInstr::readsWritesVirtualRegister(unsigned Reg,
1090 SmallVectorImpl<unsigned> *Ops) const {
1091 bool PartDef = false; // Partial redefine.
1092 bool FullDef = false; // Full define.
1093 bool Use = false;
1094
1095 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1096 const MachineOperand &MO = getOperand(i);
1097 if (!MO.isReg() || MO.getReg() != Reg)
1098 continue;
1099 if (Ops)
1100 Ops->push_back(i);
1101 if (MO.isUse())
1102 Use |= !MO.isUndef();
1103 else if (MO.getSubReg() && !MO.isUndef())
1104 // A partial <def,undef> doesn't count as reading the register.
1105 PartDef = true;
1106 else
1107 FullDef = true;
1108 }
1109 // A partial redefine uses Reg unless there is also a full define.
1110 return std::make_pair(Use || (PartDef && !FullDef), PartDef || FullDef);
1111 }
1112
1113 /// findRegisterDefOperandIdx() - Returns the operand index that is a def of
1114 /// the specified register or -1 if it is not found. If isDead is true, defs
1115 /// that are not dead are skipped. If TargetRegisterInfo is non-null, then it
1116 /// also checks if there is a def of a super-register.
1117 int
findRegisterDefOperandIdx(unsigned Reg,bool isDead,bool Overlap,const TargetRegisterInfo * TRI) const1118 MachineInstr::findRegisterDefOperandIdx(unsigned Reg, bool isDead, bool Overlap,
1119 const TargetRegisterInfo *TRI) const {
1120 bool isPhys = TargetRegisterInfo::isPhysicalRegister(Reg);
1121 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1122 const MachineOperand &MO = getOperand(i);
1123 // Accept regmask operands when Overlap is set.
1124 // Ignore them when looking for a specific def operand (Overlap == false).
1125 if (isPhys && Overlap && MO.isRegMask() && MO.clobbersPhysReg(Reg))
1126 return i;
1127 if (!MO.isReg() || !MO.isDef())
1128 continue;
1129 unsigned MOReg = MO.getReg();
1130 bool Found = (MOReg == Reg);
1131 if (!Found && TRI && isPhys &&
1132 TargetRegisterInfo::isPhysicalRegister(MOReg)) {
1133 if (Overlap)
1134 Found = TRI->regsOverlap(MOReg, Reg);
1135 else
1136 Found = TRI->isSubRegister(MOReg, Reg);
1137 }
1138 if (Found && (!isDead || MO.isDead()))
1139 return i;
1140 }
1141 return -1;
1142 }
1143
1144 /// findFirstPredOperandIdx() - Find the index of the first operand in the
1145 /// operand list that is used to represent the predicate. It returns -1 if
1146 /// none is found.
findFirstPredOperandIdx() const1147 int MachineInstr::findFirstPredOperandIdx() const {
1148 // Don't call MCID.findFirstPredOperandIdx() because this variant
1149 // is sometimes called on an instruction that's not yet complete, and
1150 // so the number of operands is less than the MCID indicates. In
1151 // particular, the PTX target does this.
1152 const MCInstrDesc &MCID = getDesc();
1153 if (MCID.isPredicable()) {
1154 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
1155 if (MCID.OpInfo[i].isPredicate())
1156 return i;
1157 }
1158
1159 return -1;
1160 }
1161
1162 // MachineOperand::TiedTo is 4 bits wide.
1163 const unsigned TiedMax = 15;
1164
1165 /// tieOperands - Mark operands at DefIdx and UseIdx as tied to each other.
1166 ///
1167 /// Use and def operands can be tied together, indicated by a non-zero TiedTo
1168 /// field. TiedTo can have these values:
1169 ///
1170 /// 0: Operand is not tied to anything.
1171 /// 1 to TiedMax-1: Tied to getOperand(TiedTo-1).
1172 /// TiedMax: Tied to an operand >= TiedMax-1.
1173 ///
1174 /// The tied def must be one of the first TiedMax operands on a normal
1175 /// instruction. INLINEASM instructions allow more tied defs.
1176 ///
tieOperands(unsigned DefIdx,unsigned UseIdx)1177 void MachineInstr::tieOperands(unsigned DefIdx, unsigned UseIdx) {
1178 MachineOperand &DefMO = getOperand(DefIdx);
1179 MachineOperand &UseMO = getOperand(UseIdx);
1180 assert(DefMO.isDef() && "DefIdx must be a def operand");
1181 assert(UseMO.isUse() && "UseIdx must be a use operand");
1182 assert(!DefMO.isTied() && "Def is already tied to another use");
1183 assert(!UseMO.isTied() && "Use is already tied to another def");
1184
1185 if (DefIdx < TiedMax)
1186 UseMO.TiedTo = DefIdx + 1;
1187 else {
1188 // Inline asm can use the group descriptors to find tied operands, but on
1189 // normal instruction, the tied def must be within the first TiedMax
1190 // operands.
1191 assert(isInlineAsm() && "DefIdx out of range");
1192 UseMO.TiedTo = TiedMax;
1193 }
1194
1195 // UseIdx can be out of range, we'll search for it in findTiedOperandIdx().
1196 DefMO.TiedTo = std::min(UseIdx + 1, TiedMax);
1197 }
1198
1199 /// Given the index of a tied register operand, find the operand it is tied to.
1200 /// Defs are tied to uses and vice versa. Returns the index of the tied operand
1201 /// which must exist.
findTiedOperandIdx(unsigned OpIdx) const1202 unsigned MachineInstr::findTiedOperandIdx(unsigned OpIdx) const {
1203 const MachineOperand &MO = getOperand(OpIdx);
1204 assert(MO.isTied() && "Operand isn't tied");
1205
1206 // Normally TiedTo is in range.
1207 if (MO.TiedTo < TiedMax)
1208 return MO.TiedTo - 1;
1209
1210 // Uses on normal instructions can be out of range.
1211 if (!isInlineAsm()) {
1212 // Normal tied defs must be in the 0..TiedMax-1 range.
1213 if (MO.isUse())
1214 return TiedMax - 1;
1215 // MO is a def. Search for the tied use.
1216 for (unsigned i = TiedMax - 1, e = getNumOperands(); i != e; ++i) {
1217 const MachineOperand &UseMO = getOperand(i);
1218 if (UseMO.isReg() && UseMO.isUse() && UseMO.TiedTo == OpIdx + 1)
1219 return i;
1220 }
1221 llvm_unreachable("Can't find tied use");
1222 }
1223
1224 // Now deal with inline asm by parsing the operand group descriptor flags.
1225 // Find the beginning of each operand group.
1226 SmallVector<unsigned, 8> GroupIdx;
1227 unsigned OpIdxGroup = ~0u;
1228 unsigned NumOps;
1229 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
1230 i += NumOps) {
1231 const MachineOperand &FlagMO = getOperand(i);
1232 assert(FlagMO.isImm() && "Invalid tied operand on inline asm");
1233 unsigned CurGroup = GroupIdx.size();
1234 GroupIdx.push_back(i);
1235 NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm());
1236 // OpIdx belongs to this operand group.
1237 if (OpIdx > i && OpIdx < i + NumOps)
1238 OpIdxGroup = CurGroup;
1239 unsigned TiedGroup;
1240 if (!InlineAsm::isUseOperandTiedToDef(FlagMO.getImm(), TiedGroup))
1241 continue;
1242 // Operands in this group are tied to operands in TiedGroup which must be
1243 // earlier. Find the number of operands between the two groups.
1244 unsigned Delta = i - GroupIdx[TiedGroup];
1245
1246 // OpIdx is a use tied to TiedGroup.
1247 if (OpIdxGroup == CurGroup)
1248 return OpIdx - Delta;
1249
1250 // OpIdx is a def tied to this use group.
1251 if (OpIdxGroup == TiedGroup)
1252 return OpIdx + Delta;
1253 }
1254 llvm_unreachable("Invalid tied operand on inline asm");
1255 }
1256
1257 /// clearKillInfo - Clears kill flags on all operands.
1258 ///
clearKillInfo()1259 void MachineInstr::clearKillInfo() {
1260 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1261 MachineOperand &MO = getOperand(i);
1262 if (MO.isReg() && MO.isUse())
1263 MO.setIsKill(false);
1264 }
1265 }
1266
substituteRegister(unsigned FromReg,unsigned ToReg,unsigned SubIdx,const TargetRegisterInfo & RegInfo)1267 void MachineInstr::substituteRegister(unsigned FromReg,
1268 unsigned ToReg,
1269 unsigned SubIdx,
1270 const TargetRegisterInfo &RegInfo) {
1271 if (TargetRegisterInfo::isPhysicalRegister(ToReg)) {
1272 if (SubIdx)
1273 ToReg = RegInfo.getSubReg(ToReg, SubIdx);
1274 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1275 MachineOperand &MO = getOperand(i);
1276 if (!MO.isReg() || MO.getReg() != FromReg)
1277 continue;
1278 MO.substPhysReg(ToReg, RegInfo);
1279 }
1280 } else {
1281 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1282 MachineOperand &MO = getOperand(i);
1283 if (!MO.isReg() || MO.getReg() != FromReg)
1284 continue;
1285 MO.substVirtReg(ToReg, SubIdx, RegInfo);
1286 }
1287 }
1288 }
1289
1290 /// isSafeToMove - Return true if it is safe to move this instruction. If
1291 /// SawStore is set to true, it means that there is a store (or call) between
1292 /// the instruction's location and its intended destination.
isSafeToMove(const TargetInstrInfo * TII,AliasAnalysis * AA,bool & SawStore) const1293 bool MachineInstr::isSafeToMove(const TargetInstrInfo *TII,
1294 AliasAnalysis *AA,
1295 bool &SawStore) const {
1296 // Ignore stuff that we obviously can't move.
1297 //
1298 // Treat volatile loads as stores. This is not strictly necessary for
1299 // volatiles, but it is required for atomic loads. It is not allowed to move
1300 // a load across an atomic load with Ordering > Monotonic.
1301 if (mayStore() || isCall() ||
1302 (mayLoad() && hasOrderedMemoryRef())) {
1303 SawStore = true;
1304 return false;
1305 }
1306
1307 if (isPosition() || isDebugValue() || isTerminator() ||
1308 hasUnmodeledSideEffects())
1309 return false;
1310
1311 // See if this instruction does a load. If so, we have to guarantee that the
1312 // loaded value doesn't change between the load and the its intended
1313 // destination. The check for isInvariantLoad gives the targe the chance to
1314 // classify the load as always returning a constant, e.g. a constant pool
1315 // load.
1316 if (mayLoad() && !isInvariantLoad(AA))
1317 // Otherwise, this is a real load. If there is a store between the load and
1318 // end of block, we can't move it.
1319 return !SawStore;
1320
1321 return true;
1322 }
1323
1324 /// hasOrderedMemoryRef - Return true if this instruction may have an ordered
1325 /// or volatile memory reference, or if the information describing the memory
1326 /// reference is not available. Return false if it is known to have no ordered
1327 /// memory references.
hasOrderedMemoryRef() const1328 bool MachineInstr::hasOrderedMemoryRef() const {
1329 // An instruction known never to access memory won't have a volatile access.
1330 if (!mayStore() &&
1331 !mayLoad() &&
1332 !isCall() &&
1333 !hasUnmodeledSideEffects())
1334 return false;
1335
1336 // Otherwise, if the instruction has no memory reference information,
1337 // conservatively assume it wasn't preserved.
1338 if (memoperands_empty())
1339 return true;
1340
1341 // Check the memory reference information for ordered references.
1342 for (mmo_iterator I = memoperands_begin(), E = memoperands_end(); I != E; ++I)
1343 if (!(*I)->isUnordered())
1344 return true;
1345
1346 return false;
1347 }
1348
1349 /// isInvariantLoad - Return true if this instruction is loading from a
1350 /// location whose value is invariant across the function. For example,
1351 /// loading a value from the constant pool or from the argument area
1352 /// of a function if it does not change. This should only return true of
1353 /// *all* loads the instruction does are invariant (if it does multiple loads).
isInvariantLoad(AliasAnalysis * AA) const1354 bool MachineInstr::isInvariantLoad(AliasAnalysis *AA) const {
1355 // If the instruction doesn't load at all, it isn't an invariant load.
1356 if (!mayLoad())
1357 return false;
1358
1359 // If the instruction has lost its memoperands, conservatively assume that
1360 // it may not be an invariant load.
1361 if (memoperands_empty())
1362 return false;
1363
1364 const MachineFrameInfo *MFI = getParent()->getParent()->getFrameInfo();
1365
1366 for (mmo_iterator I = memoperands_begin(),
1367 E = memoperands_end(); I != E; ++I) {
1368 if ((*I)->isVolatile()) return false;
1369 if ((*I)->isStore()) return false;
1370 if ((*I)->isInvariant()) return true;
1371
1372
1373 // A load from a constant PseudoSourceValue is invariant.
1374 if (const PseudoSourceValue *PSV = (*I)->getPseudoValue())
1375 if (PSV->isConstant(MFI))
1376 continue;
1377
1378 if (const Value *V = (*I)->getValue()) {
1379 // If we have an AliasAnalysis, ask it whether the memory is constant.
1380 if (AA && AA->pointsToConstantMemory(
1381 AliasAnalysis::Location(V, (*I)->getSize(),
1382 (*I)->getTBAAInfo())))
1383 continue;
1384 }
1385
1386 // Otherwise assume conservatively.
1387 return false;
1388 }
1389
1390 // Everything checks out.
1391 return true;
1392 }
1393
1394 /// isConstantValuePHI - If the specified instruction is a PHI that always
1395 /// merges together the same virtual register, return the register, otherwise
1396 /// return 0.
isConstantValuePHI() const1397 unsigned MachineInstr::isConstantValuePHI() const {
1398 if (!isPHI())
1399 return 0;
1400 assert(getNumOperands() >= 3 &&
1401 "It's illegal to have a PHI without source operands");
1402
1403 unsigned Reg = getOperand(1).getReg();
1404 for (unsigned i = 3, e = getNumOperands(); i < e; i += 2)
1405 if (getOperand(i).getReg() != Reg)
1406 return 0;
1407 return Reg;
1408 }
1409
hasUnmodeledSideEffects() const1410 bool MachineInstr::hasUnmodeledSideEffects() const {
1411 if (hasProperty(MCID::UnmodeledSideEffects))
1412 return true;
1413 if (isInlineAsm()) {
1414 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1415 if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1416 return true;
1417 }
1418
1419 return false;
1420 }
1421
1422 /// allDefsAreDead - Return true if all the defs of this instruction are dead.
1423 ///
allDefsAreDead() const1424 bool MachineInstr::allDefsAreDead() const {
1425 for (unsigned i = 0, e = getNumOperands(); i < e; ++i) {
1426 const MachineOperand &MO = getOperand(i);
1427 if (!MO.isReg() || MO.isUse())
1428 continue;
1429 if (!MO.isDead())
1430 return false;
1431 }
1432 return true;
1433 }
1434
1435 /// copyImplicitOps - Copy implicit register operands from specified
1436 /// instruction to this instruction.
copyImplicitOps(MachineFunction & MF,const MachineInstr * MI)1437 void MachineInstr::copyImplicitOps(MachineFunction &MF,
1438 const MachineInstr *MI) {
1439 for (unsigned i = MI->getDesc().getNumOperands(), e = MI->getNumOperands();
1440 i != e; ++i) {
1441 const MachineOperand &MO = MI->getOperand(i);
1442 if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
1443 addOperand(MF, MO);
1444 }
1445 }
1446
dump() const1447 void MachineInstr::dump() const {
1448 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1449 dbgs() << " " << *this;
1450 #endif
1451 }
1452
printDebugLoc(DebugLoc DL,const MachineFunction * MF,raw_ostream & CommentOS)1453 static void printDebugLoc(DebugLoc DL, const MachineFunction *MF,
1454 raw_ostream &CommentOS) {
1455 const LLVMContext &Ctx = MF->getFunction()->getContext();
1456 DL.print(Ctx, CommentOS);
1457 }
1458
print(raw_ostream & OS,const TargetMachine * TM,bool SkipOpers) const1459 void MachineInstr::print(raw_ostream &OS, const TargetMachine *TM,
1460 bool SkipOpers) const {
1461 // We can be a bit tidier if we know the TargetMachine and/or MachineFunction.
1462 const MachineFunction *MF = nullptr;
1463 const MachineRegisterInfo *MRI = nullptr;
1464 if (const MachineBasicBlock *MBB = getParent()) {
1465 MF = MBB->getParent();
1466 if (!TM && MF)
1467 TM = &MF->getTarget();
1468 if (MF)
1469 MRI = &MF->getRegInfo();
1470 }
1471
1472 // Save a list of virtual registers.
1473 SmallVector<unsigned, 8> VirtRegs;
1474
1475 // Print explicitly defined operands on the left of an assignment syntax.
1476 unsigned StartOp = 0, e = getNumOperands();
1477 for (; StartOp < e && getOperand(StartOp).isReg() &&
1478 getOperand(StartOp).isDef() &&
1479 !getOperand(StartOp).isImplicit();
1480 ++StartOp) {
1481 if (StartOp != 0) OS << ", ";
1482 getOperand(StartOp).print(OS, TM);
1483 unsigned Reg = getOperand(StartOp).getReg();
1484 if (TargetRegisterInfo::isVirtualRegister(Reg))
1485 VirtRegs.push_back(Reg);
1486 }
1487
1488 if (StartOp != 0)
1489 OS << " = ";
1490
1491 // Print the opcode name.
1492 if (TM && TM->getInstrInfo())
1493 OS << TM->getInstrInfo()->getName(getOpcode());
1494 else
1495 OS << "UNKNOWN";
1496
1497 if (SkipOpers)
1498 return;
1499
1500 // Print the rest of the operands.
1501 bool OmittedAnyCallClobbers = false;
1502 bool FirstOp = true;
1503 unsigned AsmDescOp = ~0u;
1504 unsigned AsmOpCount = 0;
1505
1506 if (isInlineAsm() && e >= InlineAsm::MIOp_FirstOperand) {
1507 // Print asm string.
1508 OS << " ";
1509 getOperand(InlineAsm::MIOp_AsmString).print(OS, TM);
1510
1511 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
1512 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1513 if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1514 OS << " [sideeffect]";
1515 if (ExtraInfo & InlineAsm::Extra_MayLoad)
1516 OS << " [mayload]";
1517 if (ExtraInfo & InlineAsm::Extra_MayStore)
1518 OS << " [maystore]";
1519 if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
1520 OS << " [alignstack]";
1521 if (getInlineAsmDialect() == InlineAsm::AD_ATT)
1522 OS << " [attdialect]";
1523 if (getInlineAsmDialect() == InlineAsm::AD_Intel)
1524 OS << " [inteldialect]";
1525
1526 StartOp = AsmDescOp = InlineAsm::MIOp_FirstOperand;
1527 FirstOp = false;
1528 }
1529
1530
1531 for (unsigned i = StartOp, e = getNumOperands(); i != e; ++i) {
1532 const MachineOperand &MO = getOperand(i);
1533
1534 if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))
1535 VirtRegs.push_back(MO.getReg());
1536
1537 // Omit call-clobbered registers which aren't used anywhere. This makes
1538 // call instructions much less noisy on targets where calls clobber lots
1539 // of registers. Don't rely on MO.isDead() because we may be called before
1540 // LiveVariables is run, or we may be looking at a non-allocatable reg.
1541 if (MF && isCall() &&
1542 MO.isReg() && MO.isImplicit() && MO.isDef()) {
1543 unsigned Reg = MO.getReg();
1544 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
1545 const MachineRegisterInfo &MRI = MF->getRegInfo();
1546 if (MRI.use_empty(Reg)) {
1547 bool HasAliasLive = false;
1548 for (MCRegAliasIterator AI(Reg, TM->getRegisterInfo(), true);
1549 AI.isValid(); ++AI) {
1550 unsigned AliasReg = *AI;
1551 if (!MRI.use_empty(AliasReg)) {
1552 HasAliasLive = true;
1553 break;
1554 }
1555 }
1556 if (!HasAliasLive) {
1557 OmittedAnyCallClobbers = true;
1558 continue;
1559 }
1560 }
1561 }
1562 }
1563
1564 if (FirstOp) FirstOp = false; else OS << ",";
1565 OS << " ";
1566 if (i < getDesc().NumOperands) {
1567 const MCOperandInfo &MCOI = getDesc().OpInfo[i];
1568 if (MCOI.isPredicate())
1569 OS << "pred:";
1570 if (MCOI.isOptionalDef())
1571 OS << "opt:";
1572 }
1573 if (isDebugValue() && MO.isMetadata()) {
1574 // Pretty print DBG_VALUE instructions.
1575 const MDNode *MD = MO.getMetadata();
1576 if (const MDString *MDS = dyn_cast<MDString>(MD->getOperand(2)))
1577 OS << "!\"" << MDS->getString() << '\"';
1578 else
1579 MO.print(OS, TM);
1580 } else if (TM && (isInsertSubreg() || isRegSequence()) && MO.isImm()) {
1581 OS << TM->getRegisterInfo()->getSubRegIndexName(MO.getImm());
1582 } else if (i == AsmDescOp && MO.isImm()) {
1583 // Pretty print the inline asm operand descriptor.
1584 OS << '$' << AsmOpCount++;
1585 unsigned Flag = MO.getImm();
1586 switch (InlineAsm::getKind(Flag)) {
1587 case InlineAsm::Kind_RegUse: OS << ":[reguse"; break;
1588 case InlineAsm::Kind_RegDef: OS << ":[regdef"; break;
1589 case InlineAsm::Kind_RegDefEarlyClobber: OS << ":[regdef-ec"; break;
1590 case InlineAsm::Kind_Clobber: OS << ":[clobber"; break;
1591 case InlineAsm::Kind_Imm: OS << ":[imm"; break;
1592 case InlineAsm::Kind_Mem: OS << ":[mem"; break;
1593 default: OS << ":[??" << InlineAsm::getKind(Flag); break;
1594 }
1595
1596 unsigned RCID = 0;
1597 if (InlineAsm::hasRegClassConstraint(Flag, RCID)) {
1598 if (TM)
1599 OS << ':' << TM->getRegisterInfo()->getRegClass(RCID)->getName();
1600 else
1601 OS << ":RC" << RCID;
1602 }
1603
1604 unsigned TiedTo = 0;
1605 if (InlineAsm::isUseOperandTiedToDef(Flag, TiedTo))
1606 OS << " tiedto:$" << TiedTo;
1607
1608 OS << ']';
1609
1610 // Compute the index of the next operand descriptor.
1611 AsmDescOp += 1 + InlineAsm::getNumOperandRegisters(Flag);
1612 } else
1613 MO.print(OS, TM);
1614 }
1615
1616 // Briefly indicate whether any call clobbers were omitted.
1617 if (OmittedAnyCallClobbers) {
1618 if (!FirstOp) OS << ",";
1619 OS << " ...";
1620 }
1621
1622 bool HaveSemi = false;
1623 const unsigned PrintableFlags = FrameSetup;
1624 if (Flags & PrintableFlags) {
1625 if (!HaveSemi) OS << ";"; HaveSemi = true;
1626 OS << " flags: ";
1627
1628 if (Flags & FrameSetup)
1629 OS << "FrameSetup";
1630 }
1631
1632 if (!memoperands_empty()) {
1633 if (!HaveSemi) OS << ";"; HaveSemi = true;
1634
1635 OS << " mem:";
1636 for (mmo_iterator i = memoperands_begin(), e = memoperands_end();
1637 i != e; ++i) {
1638 OS << **i;
1639 if (std::next(i) != e)
1640 OS << " ";
1641 }
1642 }
1643
1644 // Print the regclass of any virtual registers encountered.
1645 if (MRI && !VirtRegs.empty()) {
1646 if (!HaveSemi) OS << ";"; HaveSemi = true;
1647 for (unsigned i = 0; i != VirtRegs.size(); ++i) {
1648 const TargetRegisterClass *RC = MRI->getRegClass(VirtRegs[i]);
1649 OS << " " << RC->getName() << ':' << PrintReg(VirtRegs[i]);
1650 for (unsigned j = i+1; j != VirtRegs.size();) {
1651 if (MRI->getRegClass(VirtRegs[j]) != RC) {
1652 ++j;
1653 continue;
1654 }
1655 if (VirtRegs[i] != VirtRegs[j])
1656 OS << "," << PrintReg(VirtRegs[j]);
1657 VirtRegs.erase(VirtRegs.begin()+j);
1658 }
1659 }
1660 }
1661
1662 // Print debug location information.
1663 if (isDebugValue() && getOperand(e - 1).isMetadata()) {
1664 if (!HaveSemi) OS << ";";
1665 DIVariable DV(getOperand(e - 1).getMetadata());
1666 OS << " line no:" << DV.getLineNumber();
1667 if (MDNode *InlinedAt = DV.getInlinedAt()) {
1668 DebugLoc InlinedAtDL = DebugLoc::getFromDILocation(InlinedAt);
1669 if (!InlinedAtDL.isUnknown() && MF) {
1670 OS << " inlined @[ ";
1671 printDebugLoc(InlinedAtDL, MF, OS);
1672 OS << " ]";
1673 }
1674 }
1675 } else if (!debugLoc.isUnknown() && MF) {
1676 if (!HaveSemi) OS << ";";
1677 OS << " dbg:";
1678 printDebugLoc(debugLoc, MF, OS);
1679 }
1680
1681 OS << '\n';
1682 }
1683
addRegisterKilled(unsigned IncomingReg,const TargetRegisterInfo * RegInfo,bool AddIfNotFound)1684 bool MachineInstr::addRegisterKilled(unsigned IncomingReg,
1685 const TargetRegisterInfo *RegInfo,
1686 bool AddIfNotFound) {
1687 bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(IncomingReg);
1688 bool hasAliases = isPhysReg &&
1689 MCRegAliasIterator(IncomingReg, RegInfo, false).isValid();
1690 bool Found = false;
1691 SmallVector<unsigned,4> DeadOps;
1692 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1693 MachineOperand &MO = getOperand(i);
1694 if (!MO.isReg() || !MO.isUse() || MO.isUndef())
1695 continue;
1696 unsigned Reg = MO.getReg();
1697 if (!Reg)
1698 continue;
1699
1700 if (Reg == IncomingReg) {
1701 if (!Found) {
1702 if (MO.isKill())
1703 // The register is already marked kill.
1704 return true;
1705 if (isPhysReg && isRegTiedToDefOperand(i))
1706 // Two-address uses of physregs must not be marked kill.
1707 return true;
1708 MO.setIsKill();
1709 Found = true;
1710 }
1711 } else if (hasAliases && MO.isKill() &&
1712 TargetRegisterInfo::isPhysicalRegister(Reg)) {
1713 // A super-register kill already exists.
1714 if (RegInfo->isSuperRegister(IncomingReg, Reg))
1715 return true;
1716 if (RegInfo->isSubRegister(IncomingReg, Reg))
1717 DeadOps.push_back(i);
1718 }
1719 }
1720
1721 // Trim unneeded kill operands.
1722 while (!DeadOps.empty()) {
1723 unsigned OpIdx = DeadOps.back();
1724 if (getOperand(OpIdx).isImplicit())
1725 RemoveOperand(OpIdx);
1726 else
1727 getOperand(OpIdx).setIsKill(false);
1728 DeadOps.pop_back();
1729 }
1730
1731 // If not found, this means an alias of one of the operands is killed. Add a
1732 // new implicit operand if required.
1733 if (!Found && AddIfNotFound) {
1734 addOperand(MachineOperand::CreateReg(IncomingReg,
1735 false /*IsDef*/,
1736 true /*IsImp*/,
1737 true /*IsKill*/));
1738 return true;
1739 }
1740 return Found;
1741 }
1742
clearRegisterKills(unsigned Reg,const TargetRegisterInfo * RegInfo)1743 void MachineInstr::clearRegisterKills(unsigned Reg,
1744 const TargetRegisterInfo *RegInfo) {
1745 if (!TargetRegisterInfo::isPhysicalRegister(Reg))
1746 RegInfo = nullptr;
1747 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1748 MachineOperand &MO = getOperand(i);
1749 if (!MO.isReg() || !MO.isUse() || !MO.isKill())
1750 continue;
1751 unsigned OpReg = MO.getReg();
1752 if (OpReg == Reg || (RegInfo && RegInfo->isSuperRegister(Reg, OpReg)))
1753 MO.setIsKill(false);
1754 }
1755 }
1756
addRegisterDead(unsigned Reg,const TargetRegisterInfo * RegInfo,bool AddIfNotFound)1757 bool MachineInstr::addRegisterDead(unsigned Reg,
1758 const TargetRegisterInfo *RegInfo,
1759 bool AddIfNotFound) {
1760 bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(Reg);
1761 bool hasAliases = isPhysReg &&
1762 MCRegAliasIterator(Reg, RegInfo, false).isValid();
1763 bool Found = false;
1764 SmallVector<unsigned,4> DeadOps;
1765 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1766 MachineOperand &MO = getOperand(i);
1767 if (!MO.isReg() || !MO.isDef())
1768 continue;
1769 unsigned MOReg = MO.getReg();
1770 if (!MOReg)
1771 continue;
1772
1773 if (MOReg == Reg) {
1774 MO.setIsDead();
1775 Found = true;
1776 } else if (hasAliases && MO.isDead() &&
1777 TargetRegisterInfo::isPhysicalRegister(MOReg)) {
1778 // There exists a super-register that's marked dead.
1779 if (RegInfo->isSuperRegister(Reg, MOReg))
1780 return true;
1781 if (RegInfo->isSubRegister(Reg, MOReg))
1782 DeadOps.push_back(i);
1783 }
1784 }
1785
1786 // Trim unneeded dead operands.
1787 while (!DeadOps.empty()) {
1788 unsigned OpIdx = DeadOps.back();
1789 if (getOperand(OpIdx).isImplicit())
1790 RemoveOperand(OpIdx);
1791 else
1792 getOperand(OpIdx).setIsDead(false);
1793 DeadOps.pop_back();
1794 }
1795
1796 // If not found, this means an alias of one of the operands is dead. Add a
1797 // new implicit operand if required.
1798 if (Found || !AddIfNotFound)
1799 return Found;
1800
1801 addOperand(MachineOperand::CreateReg(Reg,
1802 true /*IsDef*/,
1803 true /*IsImp*/,
1804 false /*IsKill*/,
1805 true /*IsDead*/));
1806 return true;
1807 }
1808
addRegisterDefined(unsigned Reg,const TargetRegisterInfo * RegInfo)1809 void MachineInstr::addRegisterDefined(unsigned Reg,
1810 const TargetRegisterInfo *RegInfo) {
1811 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
1812 MachineOperand *MO = findRegisterDefOperand(Reg, false, RegInfo);
1813 if (MO)
1814 return;
1815 } else {
1816 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1817 const MachineOperand &MO = getOperand(i);
1818 if (MO.isReg() && MO.getReg() == Reg && MO.isDef() &&
1819 MO.getSubReg() == 0)
1820 return;
1821 }
1822 }
1823 addOperand(MachineOperand::CreateReg(Reg,
1824 true /*IsDef*/,
1825 true /*IsImp*/));
1826 }
1827
setPhysRegsDeadExcept(ArrayRef<unsigned> UsedRegs,const TargetRegisterInfo & TRI)1828 void MachineInstr::setPhysRegsDeadExcept(ArrayRef<unsigned> UsedRegs,
1829 const TargetRegisterInfo &TRI) {
1830 bool HasRegMask = false;
1831 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1832 MachineOperand &MO = getOperand(i);
1833 if (MO.isRegMask()) {
1834 HasRegMask = true;
1835 continue;
1836 }
1837 if (!MO.isReg() || !MO.isDef()) continue;
1838 unsigned Reg = MO.getReg();
1839 if (!TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
1840 bool Dead = true;
1841 for (ArrayRef<unsigned>::iterator I = UsedRegs.begin(), E = UsedRegs.end();
1842 I != E; ++I)
1843 if (TRI.regsOverlap(*I, Reg)) {
1844 Dead = false;
1845 break;
1846 }
1847 // If there are no uses, including partial uses, the def is dead.
1848 if (Dead) MO.setIsDead();
1849 }
1850
1851 // This is a call with a register mask operand.
1852 // Mask clobbers are always dead, so add defs for the non-dead defines.
1853 if (HasRegMask)
1854 for (ArrayRef<unsigned>::iterator I = UsedRegs.begin(), E = UsedRegs.end();
1855 I != E; ++I)
1856 addRegisterDefined(*I, &TRI);
1857 }
1858
1859 unsigned
getHashValue(const MachineInstr * const & MI)1860 MachineInstrExpressionTrait::getHashValue(const MachineInstr* const &MI) {
1861 // Build up a buffer of hash code components.
1862 SmallVector<size_t, 8> HashComponents;
1863 HashComponents.reserve(MI->getNumOperands() + 1);
1864 HashComponents.push_back(MI->getOpcode());
1865 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1866 const MachineOperand &MO = MI->getOperand(i);
1867 if (MO.isReg() && MO.isDef() &&
1868 TargetRegisterInfo::isVirtualRegister(MO.getReg()))
1869 continue; // Skip virtual register defs.
1870
1871 HashComponents.push_back(hash_value(MO));
1872 }
1873 return hash_combine_range(HashComponents.begin(), HashComponents.end());
1874 }
1875
emitError(StringRef Msg) const1876 void MachineInstr::emitError(StringRef Msg) const {
1877 // Find the source location cookie.
1878 unsigned LocCookie = 0;
1879 const MDNode *LocMD = nullptr;
1880 for (unsigned i = getNumOperands(); i != 0; --i) {
1881 if (getOperand(i-1).isMetadata() &&
1882 (LocMD = getOperand(i-1).getMetadata()) &&
1883 LocMD->getNumOperands() != 0) {
1884 if (const ConstantInt *CI = dyn_cast<ConstantInt>(LocMD->getOperand(0))) {
1885 LocCookie = CI->getZExtValue();
1886 break;
1887 }
1888 }
1889 }
1890
1891 if (const MachineBasicBlock *MBB = getParent())
1892 if (const MachineFunction *MF = MBB->getParent())
1893 return MF->getMMI().getModule()->getContext().emitError(LocCookie, Msg);
1894 report_fatal_error(Msg);
1895 }
1896