1 //===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the TargetInstrInfo class.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/Target/TargetInstrInfo.h"
15 #include "llvm/CodeGen/MachineFrameInfo.h"
16 #include "llvm/CodeGen/MachineInstrBuilder.h"
17 #include "llvm/CodeGen/MachineMemOperand.h"
18 #include "llvm/CodeGen/MachineRegisterInfo.h"
19 #include "llvm/CodeGen/PseudoSourceValue.h"
20 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
21 #include "llvm/CodeGen/StackMaps.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/MC/MCAsmInfo.h"
24 #include "llvm/MC/MCInstrItineraries.h"
25 #include "llvm/Support/CommandLine.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include "llvm/Target/TargetLowering.h"
29 #include "llvm/Target/TargetMachine.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include <cctype>
32 using namespace llvm;
33
34 static cl::opt<bool> DisableHazardRecognizer(
35 "disable-sched-hazard", cl::Hidden, cl::init(false),
36 cl::desc("Disable hazard detection during preRA scheduling"));
37
~TargetInstrInfo()38 TargetInstrInfo::~TargetInstrInfo() {
39 }
40
41 const TargetRegisterClass*
getRegClass(const MCInstrDesc & MCID,unsigned OpNum,const TargetRegisterInfo * TRI,const MachineFunction & MF) const42 TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
43 const TargetRegisterInfo *TRI,
44 const MachineFunction &MF) const {
45 if (OpNum >= MCID.getNumOperands())
46 return nullptr;
47
48 short RegClass = MCID.OpInfo[OpNum].RegClass;
49 if (MCID.OpInfo[OpNum].isLookupPtrRegClass())
50 return TRI->getPointerRegClass(MF, RegClass);
51
52 // Instructions like INSERT_SUBREG do not have fixed register classes.
53 if (RegClass < 0)
54 return nullptr;
55
56 // Otherwise just look it up normally.
57 return TRI->getRegClass(RegClass);
58 }
59
60 /// insertNoop - Insert a noop into the instruction stream at the specified
61 /// point.
insertNoop(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI) const62 void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
63 MachineBasicBlock::iterator MI) const {
64 llvm_unreachable("Target didn't implement insertNoop!");
65 }
66
67 /// Measure the specified inline asm to determine an approximation of its
68 /// length.
69 /// Comments (which run till the next SeparatorString or newline) do not
70 /// count as an instruction.
71 /// Any other non-whitespace text is considered an instruction, with
72 /// multiple instructions separated by SeparatorString or newlines.
73 /// Variable-length instructions are not handled here; this function
74 /// may be overloaded in the target code to do that.
getInlineAsmLength(const char * Str,const MCAsmInfo & MAI) const75 unsigned TargetInstrInfo::getInlineAsmLength(const char *Str,
76 const MCAsmInfo &MAI) const {
77
78
79 // Count the number of instructions in the asm.
80 bool atInsnStart = true;
81 unsigned Length = 0;
82 for (; *Str; ++Str) {
83 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
84 strlen(MAI.getSeparatorString())) == 0)
85 atInsnStart = true;
86 if (atInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) {
87 Length += MAI.getMaxInstLength();
88 atInsnStart = false;
89 }
90 if (atInsnStart && strncmp(Str, MAI.getCommentString(),
91 strlen(MAI.getCommentString())) == 0)
92 atInsnStart = false;
93 }
94
95 return Length;
96 }
97
98 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
99 /// after it, replacing it with an unconditional branch to NewDest.
100 void
ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,MachineBasicBlock * NewDest) const101 TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
102 MachineBasicBlock *NewDest) const {
103 MachineBasicBlock *MBB = Tail->getParent();
104
105 // Remove all the old successors of MBB from the CFG.
106 while (!MBB->succ_empty())
107 MBB->removeSuccessor(MBB->succ_begin());
108
109 // Remove all the dead instructions from the end of MBB.
110 MBB->erase(Tail, MBB->end());
111
112 // If MBB isn't immediately before MBB, insert a branch to it.
113 if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
114 InsertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(),
115 Tail->getDebugLoc());
116 MBB->addSuccessor(NewDest);
117 }
118
119 // commuteInstruction - The default implementation of this method just exchanges
120 // the two operands returned by findCommutedOpIndices.
commuteInstruction(MachineInstr * MI,bool NewMI) const121 MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr *MI,
122 bool NewMI) const {
123 const MCInstrDesc &MCID = MI->getDesc();
124 bool HasDef = MCID.getNumDefs();
125 if (HasDef && !MI->getOperand(0).isReg())
126 // No idea how to commute this instruction. Target should implement its own.
127 return nullptr;
128 unsigned Idx1, Idx2;
129 if (!findCommutedOpIndices(MI, Idx1, Idx2)) {
130 assert(MI->isCommutable() && "Precondition violation: MI must be commutable.");
131 return nullptr;
132 }
133
134 assert(MI->getOperand(Idx1).isReg() && MI->getOperand(Idx2).isReg() &&
135 "This only knows how to commute register operands so far");
136 unsigned Reg0 = HasDef ? MI->getOperand(0).getReg() : 0;
137 unsigned Reg1 = MI->getOperand(Idx1).getReg();
138 unsigned Reg2 = MI->getOperand(Idx2).getReg();
139 unsigned SubReg0 = HasDef ? MI->getOperand(0).getSubReg() : 0;
140 unsigned SubReg1 = MI->getOperand(Idx1).getSubReg();
141 unsigned SubReg2 = MI->getOperand(Idx2).getSubReg();
142 bool Reg1IsKill = MI->getOperand(Idx1).isKill();
143 bool Reg2IsKill = MI->getOperand(Idx2).isKill();
144 // If destination is tied to either of the commuted source register, then
145 // it must be updated.
146 if (HasDef && Reg0 == Reg1 &&
147 MI->getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
148 Reg2IsKill = false;
149 Reg0 = Reg2;
150 SubReg0 = SubReg2;
151 } else if (HasDef && Reg0 == Reg2 &&
152 MI->getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
153 Reg1IsKill = false;
154 Reg0 = Reg1;
155 SubReg0 = SubReg1;
156 }
157
158 if (NewMI) {
159 // Create a new instruction.
160 MachineFunction &MF = *MI->getParent()->getParent();
161 MI = MF.CloneMachineInstr(MI);
162 }
163
164 if (HasDef) {
165 MI->getOperand(0).setReg(Reg0);
166 MI->getOperand(0).setSubReg(SubReg0);
167 }
168 MI->getOperand(Idx2).setReg(Reg1);
169 MI->getOperand(Idx1).setReg(Reg2);
170 MI->getOperand(Idx2).setSubReg(SubReg1);
171 MI->getOperand(Idx1).setSubReg(SubReg2);
172 MI->getOperand(Idx2).setIsKill(Reg1IsKill);
173 MI->getOperand(Idx1).setIsKill(Reg2IsKill);
174 return MI;
175 }
176
177 /// findCommutedOpIndices - If specified MI is commutable, return the two
178 /// operand indices that would swap value. Return true if the instruction
179 /// is not in a form which this routine understands.
findCommutedOpIndices(MachineInstr * MI,unsigned & SrcOpIdx1,unsigned & SrcOpIdx2) const180 bool TargetInstrInfo::findCommutedOpIndices(MachineInstr *MI,
181 unsigned &SrcOpIdx1,
182 unsigned &SrcOpIdx2) const {
183 assert(!MI->isBundle() &&
184 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
185
186 const MCInstrDesc &MCID = MI->getDesc();
187 if (!MCID.isCommutable())
188 return false;
189 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
190 // is not true, then the target must implement this.
191 SrcOpIdx1 = MCID.getNumDefs();
192 SrcOpIdx2 = SrcOpIdx1 + 1;
193 if (!MI->getOperand(SrcOpIdx1).isReg() ||
194 !MI->getOperand(SrcOpIdx2).isReg())
195 // No idea.
196 return false;
197 return true;
198 }
199
200
201 bool
isUnpredicatedTerminator(const MachineInstr * MI) const202 TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
203 if (!MI->isTerminator()) return false;
204
205 // Conditional branch is a special case.
206 if (MI->isBranch() && !MI->isBarrier())
207 return true;
208 if (!MI->isPredicable())
209 return true;
210 return !isPredicated(MI);
211 }
212
213
PredicateInstruction(MachineInstr * MI,const SmallVectorImpl<MachineOperand> & Pred) const214 bool TargetInstrInfo::PredicateInstruction(MachineInstr *MI,
215 const SmallVectorImpl<MachineOperand> &Pred) const {
216 bool MadeChange = false;
217
218 assert(!MI->isBundle() &&
219 "TargetInstrInfo::PredicateInstruction() can't handle bundles");
220
221 const MCInstrDesc &MCID = MI->getDesc();
222 if (!MI->isPredicable())
223 return false;
224
225 for (unsigned j = 0, i = 0, e = MI->getNumOperands(); i != e; ++i) {
226 if (MCID.OpInfo[i].isPredicate()) {
227 MachineOperand &MO = MI->getOperand(i);
228 if (MO.isReg()) {
229 MO.setReg(Pred[j].getReg());
230 MadeChange = true;
231 } else if (MO.isImm()) {
232 MO.setImm(Pred[j].getImm());
233 MadeChange = true;
234 } else if (MO.isMBB()) {
235 MO.setMBB(Pred[j].getMBB());
236 MadeChange = true;
237 }
238 ++j;
239 }
240 }
241 return MadeChange;
242 }
243
hasLoadFromStackSlot(const MachineInstr * MI,const MachineMemOperand * & MMO,int & FrameIndex) const244 bool TargetInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
245 const MachineMemOperand *&MMO,
246 int &FrameIndex) const {
247 for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
248 oe = MI->memoperands_end();
249 o != oe;
250 ++o) {
251 if ((*o)->isLoad()) {
252 if (const FixedStackPseudoSourceValue *Value =
253 dyn_cast_or_null<FixedStackPseudoSourceValue>(
254 (*o)->getPseudoValue())) {
255 FrameIndex = Value->getFrameIndex();
256 MMO = *o;
257 return true;
258 }
259 }
260 }
261 return false;
262 }
263
hasStoreToStackSlot(const MachineInstr * MI,const MachineMemOperand * & MMO,int & FrameIndex) const264 bool TargetInstrInfo::hasStoreToStackSlot(const MachineInstr *MI,
265 const MachineMemOperand *&MMO,
266 int &FrameIndex) const {
267 for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
268 oe = MI->memoperands_end();
269 o != oe;
270 ++o) {
271 if ((*o)->isStore()) {
272 if (const FixedStackPseudoSourceValue *Value =
273 dyn_cast_or_null<FixedStackPseudoSourceValue>(
274 (*o)->getPseudoValue())) {
275 FrameIndex = Value->getFrameIndex();
276 MMO = *o;
277 return true;
278 }
279 }
280 }
281 return false;
282 }
283
getStackSlotRange(const TargetRegisterClass * RC,unsigned SubIdx,unsigned & Size,unsigned & Offset,const TargetMachine * TM) const284 bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
285 unsigned SubIdx, unsigned &Size,
286 unsigned &Offset,
287 const TargetMachine *TM) const {
288 if (!SubIdx) {
289 Size = RC->getSize();
290 Offset = 0;
291 return true;
292 }
293 unsigned BitSize = TM->getRegisterInfo()->getSubRegIdxSize(SubIdx);
294 // Convert bit size to byte size to be consistent with
295 // MCRegisterClass::getSize().
296 if (BitSize % 8)
297 return false;
298
299 int BitOffset = TM->getRegisterInfo()->getSubRegIdxOffset(SubIdx);
300 if (BitOffset < 0 || BitOffset % 8)
301 return false;
302
303 Size = BitSize /= 8;
304 Offset = (unsigned)BitOffset / 8;
305
306 assert(RC->getSize() >= (Offset + Size) && "bad subregister range");
307
308 if (!TM->getDataLayout()->isLittleEndian()) {
309 Offset = RC->getSize() - (Offset + Size);
310 }
311 return true;
312 }
313
reMaterialize(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,unsigned DestReg,unsigned SubIdx,const MachineInstr * Orig,const TargetRegisterInfo & TRI) const314 void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB,
315 MachineBasicBlock::iterator I,
316 unsigned DestReg,
317 unsigned SubIdx,
318 const MachineInstr *Orig,
319 const TargetRegisterInfo &TRI) const {
320 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
321 MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
322 MBB.insert(I, MI);
323 }
324
325 bool
produceSameValue(const MachineInstr * MI0,const MachineInstr * MI1,const MachineRegisterInfo * MRI) const326 TargetInstrInfo::produceSameValue(const MachineInstr *MI0,
327 const MachineInstr *MI1,
328 const MachineRegisterInfo *MRI) const {
329 return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
330 }
331
duplicate(MachineInstr * Orig,MachineFunction & MF) const332 MachineInstr *TargetInstrInfo::duplicate(MachineInstr *Orig,
333 MachineFunction &MF) const {
334 assert(!Orig->isNotDuplicable() &&
335 "Instruction cannot be duplicated");
336 return MF.CloneMachineInstr(Orig);
337 }
338
339 // If the COPY instruction in MI can be folded to a stack operation, return
340 // the register class to use.
canFoldCopy(const MachineInstr * MI,unsigned FoldIdx)341 static const TargetRegisterClass *canFoldCopy(const MachineInstr *MI,
342 unsigned FoldIdx) {
343 assert(MI->isCopy() && "MI must be a COPY instruction");
344 if (MI->getNumOperands() != 2)
345 return nullptr;
346 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
347
348 const MachineOperand &FoldOp = MI->getOperand(FoldIdx);
349 const MachineOperand &LiveOp = MI->getOperand(1-FoldIdx);
350
351 if (FoldOp.getSubReg() || LiveOp.getSubReg())
352 return nullptr;
353
354 unsigned FoldReg = FoldOp.getReg();
355 unsigned LiveReg = LiveOp.getReg();
356
357 assert(TargetRegisterInfo::isVirtualRegister(FoldReg) &&
358 "Cannot fold physregs");
359
360 const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
361 const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
362
363 if (TargetRegisterInfo::isPhysicalRegister(LiveOp.getReg()))
364 return RC->contains(LiveOp.getReg()) ? RC : nullptr;
365
366 if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
367 return RC;
368
369 // FIXME: Allow folding when register classes are memory compatible.
370 return nullptr;
371 }
372
373 bool TargetInstrInfo::
canFoldMemoryOperand(const MachineInstr * MI,const SmallVectorImpl<unsigned> & Ops) const374 canFoldMemoryOperand(const MachineInstr *MI,
375 const SmallVectorImpl<unsigned> &Ops) const {
376 return MI->isCopy() && Ops.size() == 1 && canFoldCopy(MI, Ops[0]);
377 }
378
foldPatchpoint(MachineFunction & MF,MachineInstr * MI,const SmallVectorImpl<unsigned> & Ops,int FrameIndex,const TargetInstrInfo & TII)379 static MachineInstr* foldPatchpoint(MachineFunction &MF,
380 MachineInstr *MI,
381 const SmallVectorImpl<unsigned> &Ops,
382 int FrameIndex,
383 const TargetInstrInfo &TII) {
384 unsigned StartIdx = 0;
385 switch (MI->getOpcode()) {
386 case TargetOpcode::STACKMAP:
387 StartIdx = 2; // Skip ID, nShadowBytes.
388 break;
389 case TargetOpcode::PATCHPOINT: {
390 // For PatchPoint, the call args are not foldable.
391 PatchPointOpers opers(MI);
392 StartIdx = opers.getVarIdx();
393 break;
394 }
395 default:
396 llvm_unreachable("unexpected stackmap opcode");
397 }
398
399 // Return false if any operands requested for folding are not foldable (not
400 // part of the stackmap's live values).
401 for (SmallVectorImpl<unsigned>::const_iterator I = Ops.begin(), E = Ops.end();
402 I != E; ++I) {
403 if (*I < StartIdx)
404 return nullptr;
405 }
406
407 MachineInstr *NewMI =
408 MF.CreateMachineInstr(TII.get(MI->getOpcode()), MI->getDebugLoc(), true);
409 MachineInstrBuilder MIB(MF, NewMI);
410
411 // No need to fold return, the meta data, and function arguments
412 for (unsigned i = 0; i < StartIdx; ++i)
413 MIB.addOperand(MI->getOperand(i));
414
415 for (unsigned i = StartIdx; i < MI->getNumOperands(); ++i) {
416 MachineOperand &MO = MI->getOperand(i);
417 if (std::find(Ops.begin(), Ops.end(), i) != Ops.end()) {
418 unsigned SpillSize;
419 unsigned SpillOffset;
420 // Compute the spill slot size and offset.
421 const TargetRegisterClass *RC =
422 MF.getRegInfo().getRegClass(MO.getReg());
423 bool Valid = TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize,
424 SpillOffset, &MF.getTarget());
425 if (!Valid)
426 report_fatal_error("cannot spill patchpoint subregister operand");
427 MIB.addImm(StackMaps::IndirectMemRefOp);
428 MIB.addImm(SpillSize);
429 MIB.addFrameIndex(FrameIndex);
430 MIB.addImm(SpillOffset);
431 }
432 else
433 MIB.addOperand(MO);
434 }
435 return NewMI;
436 }
437
438 /// foldMemoryOperand - Attempt to fold a load or store of the specified stack
439 /// slot into the specified machine instruction for the specified operand(s).
440 /// If this is possible, a new instruction is returned with the specified
441 /// operand folded, otherwise NULL is returned. The client is responsible for
442 /// removing the old instruction and adding the new one in the instruction
443 /// stream.
444 MachineInstr*
foldMemoryOperand(MachineBasicBlock::iterator MI,const SmallVectorImpl<unsigned> & Ops,int FI) const445 TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
446 const SmallVectorImpl<unsigned> &Ops,
447 int FI) const {
448 unsigned Flags = 0;
449 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
450 if (MI->getOperand(Ops[i]).isDef())
451 Flags |= MachineMemOperand::MOStore;
452 else
453 Flags |= MachineMemOperand::MOLoad;
454
455 MachineBasicBlock *MBB = MI->getParent();
456 assert(MBB && "foldMemoryOperand needs an inserted instruction");
457 MachineFunction &MF = *MBB->getParent();
458
459 MachineInstr *NewMI = nullptr;
460
461 if (MI->getOpcode() == TargetOpcode::STACKMAP ||
462 MI->getOpcode() == TargetOpcode::PATCHPOINT) {
463 // Fold stackmap/patchpoint.
464 NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
465 } else {
466 // Ask the target to do the actual folding.
467 NewMI =foldMemoryOperandImpl(MF, MI, Ops, FI);
468 }
469
470 if (NewMI) {
471 NewMI->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
472 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
473 assert((!(Flags & MachineMemOperand::MOStore) ||
474 NewMI->mayStore()) &&
475 "Folded a def to a non-store!");
476 assert((!(Flags & MachineMemOperand::MOLoad) ||
477 NewMI->mayLoad()) &&
478 "Folded a use to a non-load!");
479 const MachineFrameInfo &MFI = *MF.getFrameInfo();
480 assert(MFI.getObjectOffset(FI) != -1);
481 MachineMemOperand *MMO =
482 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
483 Flags, MFI.getObjectSize(FI),
484 MFI.getObjectAlignment(FI));
485 NewMI->addMemOperand(MF, MMO);
486
487 // FIXME: change foldMemoryOperandImpl semantics to also insert NewMI.
488 return MBB->insert(MI, NewMI);
489 }
490
491 // Straight COPY may fold as load/store.
492 if (!MI->isCopy() || Ops.size() != 1)
493 return nullptr;
494
495 const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
496 if (!RC)
497 return nullptr;
498
499 const MachineOperand &MO = MI->getOperand(1-Ops[0]);
500 MachineBasicBlock::iterator Pos = MI;
501 const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
502
503 if (Flags == MachineMemOperand::MOStore)
504 storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
505 else
506 loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
507 return --Pos;
508 }
509
510 /// foldMemoryOperand - Same as the previous version except it allows folding
511 /// of any load and store from / to any address, not just from a specific
512 /// stack slot.
513 MachineInstr*
foldMemoryOperand(MachineBasicBlock::iterator MI,const SmallVectorImpl<unsigned> & Ops,MachineInstr * LoadMI) const514 TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
515 const SmallVectorImpl<unsigned> &Ops,
516 MachineInstr* LoadMI) const {
517 assert(LoadMI->canFoldAsLoad() && "LoadMI isn't foldable!");
518 #ifndef NDEBUG
519 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
520 assert(MI->getOperand(Ops[i]).isUse() && "Folding load into def!");
521 #endif
522 MachineBasicBlock &MBB = *MI->getParent();
523 MachineFunction &MF = *MBB.getParent();
524
525 // Ask the target to do the actual folding.
526 MachineInstr *NewMI = nullptr;
527 int FrameIndex = 0;
528
529 if ((MI->getOpcode() == TargetOpcode::STACKMAP ||
530 MI->getOpcode() == TargetOpcode::PATCHPOINT) &&
531 isLoadFromStackSlot(LoadMI, FrameIndex)) {
532 // Fold stackmap/patchpoint.
533 NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
534 } else {
535 // Ask the target to do the actual folding.
536 NewMI = foldMemoryOperandImpl(MF, MI, Ops, LoadMI);
537 }
538
539 if (!NewMI) return nullptr;
540
541 NewMI = MBB.insert(MI, NewMI);
542
543 // Copy the memoperands from the load to the folded instruction.
544 if (MI->memoperands_empty()) {
545 NewMI->setMemRefs(LoadMI->memoperands_begin(),
546 LoadMI->memoperands_end());
547 }
548 else {
549 // Handle the rare case of folding multiple loads.
550 NewMI->setMemRefs(MI->memoperands_begin(),
551 MI->memoperands_end());
552 for (MachineInstr::mmo_iterator I = LoadMI->memoperands_begin(),
553 E = LoadMI->memoperands_end(); I != E; ++I) {
554 NewMI->addMemOperand(MF, *I);
555 }
556 }
557 return NewMI;
558 }
559
560 bool TargetInstrInfo::
isReallyTriviallyReMaterializableGeneric(const MachineInstr * MI,AliasAnalysis * AA) const561 isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
562 AliasAnalysis *AA) const {
563 const MachineFunction &MF = *MI->getParent()->getParent();
564 const MachineRegisterInfo &MRI = MF.getRegInfo();
565 const TargetMachine &TM = MF.getTarget();
566 const TargetInstrInfo &TII = *TM.getInstrInfo();
567
568 // Remat clients assume operand 0 is the defined register.
569 if (!MI->getNumOperands() || !MI->getOperand(0).isReg())
570 return false;
571 unsigned DefReg = MI->getOperand(0).getReg();
572
573 // A sub-register definition can only be rematerialized if the instruction
574 // doesn't read the other parts of the register. Otherwise it is really a
575 // read-modify-write operation on the full virtual register which cannot be
576 // moved safely.
577 if (TargetRegisterInfo::isVirtualRegister(DefReg) &&
578 MI->getOperand(0).getSubReg() && MI->readsVirtualRegister(DefReg))
579 return false;
580
581 // A load from a fixed stack slot can be rematerialized. This may be
582 // redundant with subsequent checks, but it's target-independent,
583 // simple, and a common case.
584 int FrameIdx = 0;
585 if (TII.isLoadFromStackSlot(MI, FrameIdx) &&
586 MF.getFrameInfo()->isImmutableObjectIndex(FrameIdx))
587 return true;
588
589 // Avoid instructions obviously unsafe for remat.
590 if (MI->isNotDuplicable() || MI->mayStore() ||
591 MI->hasUnmodeledSideEffects())
592 return false;
593
594 // Don't remat inline asm. We have no idea how expensive it is
595 // even if it's side effect free.
596 if (MI->isInlineAsm())
597 return false;
598
599 // Avoid instructions which load from potentially varying memory.
600 if (MI->mayLoad() && !MI->isInvariantLoad(AA))
601 return false;
602
603 // If any of the registers accessed are non-constant, conservatively assume
604 // the instruction is not rematerializable.
605 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
606 const MachineOperand &MO = MI->getOperand(i);
607 if (!MO.isReg()) continue;
608 unsigned Reg = MO.getReg();
609 if (Reg == 0)
610 continue;
611
612 // Check for a well-behaved physical register.
613 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
614 if (MO.isUse()) {
615 // If the physreg has no defs anywhere, it's just an ambient register
616 // and we can freely move its uses. Alternatively, if it's allocatable,
617 // it could get allocated to something with a def during allocation.
618 if (!MRI.isConstantPhysReg(Reg, MF))
619 return false;
620 } else {
621 // A physreg def. We can't remat it.
622 return false;
623 }
624 continue;
625 }
626
627 // Only allow one virtual-register def. There may be multiple defs of the
628 // same virtual register, though.
629 if (MO.isDef() && Reg != DefReg)
630 return false;
631
632 // Don't allow any virtual-register uses. Rematting an instruction with
633 // virtual register uses would length the live ranges of the uses, which
634 // is not necessarily a good idea, certainly not "trivial".
635 if (MO.isUse())
636 return false;
637 }
638
639 // Everything checked out.
640 return true;
641 }
642
643 /// isSchedulingBoundary - Test if the given instruction should be
644 /// considered a scheduling boundary. This primarily includes labels
645 /// and terminators.
isSchedulingBoundary(const MachineInstr * MI,const MachineBasicBlock * MBB,const MachineFunction & MF) const646 bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
647 const MachineBasicBlock *MBB,
648 const MachineFunction &MF) const {
649 // Terminators and labels can't be scheduled around.
650 if (MI->isTerminator() || MI->isPosition())
651 return true;
652
653 // Don't attempt to schedule around any instruction that defines
654 // a stack-oriented pointer, as it's unlikely to be profitable. This
655 // saves compile time, because it doesn't require every single
656 // stack slot reference to depend on the instruction that does the
657 // modification.
658 const TargetLowering &TLI = *MF.getTarget().getTargetLowering();
659 const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
660 if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI))
661 return true;
662
663 return false;
664 }
665
666 // Provide a global flag for disabling the PreRA hazard recognizer that targets
667 // may choose to honor.
usePreRAHazardRecognizer() const668 bool TargetInstrInfo::usePreRAHazardRecognizer() const {
669 return !DisableHazardRecognizer;
670 }
671
672 // Default implementation of CreateTargetRAHazardRecognizer.
673 ScheduleHazardRecognizer *TargetInstrInfo::
CreateTargetHazardRecognizer(const TargetSubtargetInfo * STI,const ScheduleDAG * DAG) const674 CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
675 const ScheduleDAG *DAG) const {
676 // Dummy hazard recognizer allows all instructions to issue.
677 return new ScheduleHazardRecognizer();
678 }
679
680 // Default implementation of CreateTargetMIHazardRecognizer.
681 ScheduleHazardRecognizer *TargetInstrInfo::
CreateTargetMIHazardRecognizer(const InstrItineraryData * II,const ScheduleDAG * DAG) const682 CreateTargetMIHazardRecognizer(const InstrItineraryData *II,
683 const ScheduleDAG *DAG) const {
684 return (ScheduleHazardRecognizer *)
685 new ScoreboardHazardRecognizer(II, DAG, "misched");
686 }
687
688 // Default implementation of CreateTargetPostRAHazardRecognizer.
689 ScheduleHazardRecognizer *TargetInstrInfo::
CreateTargetPostRAHazardRecognizer(const InstrItineraryData * II,const ScheduleDAG * DAG) const690 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
691 const ScheduleDAG *DAG) const {
692 return (ScheduleHazardRecognizer *)
693 new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
694 }
695
696 //===----------------------------------------------------------------------===//
697 // SelectionDAG latency interface.
698 //===----------------------------------------------------------------------===//
699
700 int
getOperandLatency(const InstrItineraryData * ItinData,SDNode * DefNode,unsigned DefIdx,SDNode * UseNode,unsigned UseIdx) const701 TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
702 SDNode *DefNode, unsigned DefIdx,
703 SDNode *UseNode, unsigned UseIdx) const {
704 if (!ItinData || ItinData->isEmpty())
705 return -1;
706
707 if (!DefNode->isMachineOpcode())
708 return -1;
709
710 unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
711 if (!UseNode->isMachineOpcode())
712 return ItinData->getOperandCycle(DefClass, DefIdx);
713 unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
714 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
715 }
716
getInstrLatency(const InstrItineraryData * ItinData,SDNode * N) const717 int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
718 SDNode *N) const {
719 if (!ItinData || ItinData->isEmpty())
720 return 1;
721
722 if (!N->isMachineOpcode())
723 return 1;
724
725 return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
726 }
727
728 //===----------------------------------------------------------------------===//
729 // MachineInstr latency interface.
730 //===----------------------------------------------------------------------===//
731
732 unsigned
getNumMicroOps(const InstrItineraryData * ItinData,const MachineInstr * MI) const733 TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
734 const MachineInstr *MI) const {
735 if (!ItinData || ItinData->isEmpty())
736 return 1;
737
738 unsigned Class = MI->getDesc().getSchedClass();
739 int UOps = ItinData->Itineraries[Class].NumMicroOps;
740 if (UOps >= 0)
741 return UOps;
742
743 // The # of u-ops is dynamically determined. The specific target should
744 // override this function to return the right number.
745 return 1;
746 }
747
748 /// Return the default expected latency for a def based on it's opcode.
defaultDefLatency(const MCSchedModel * SchedModel,const MachineInstr * DefMI) const749 unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel *SchedModel,
750 const MachineInstr *DefMI) const {
751 if (DefMI->isTransient())
752 return 0;
753 if (DefMI->mayLoad())
754 return SchedModel->LoadLatency;
755 if (isHighLatencyDef(DefMI->getOpcode()))
756 return SchedModel->HighLatency;
757 return 1;
758 }
759
getPredicationCost(const MachineInstr *) const760 unsigned TargetInstrInfo::getPredicationCost(const MachineInstr *) const {
761 return 0;
762 }
763
764 unsigned TargetInstrInfo::
getInstrLatency(const InstrItineraryData * ItinData,const MachineInstr * MI,unsigned * PredCost) const765 getInstrLatency(const InstrItineraryData *ItinData,
766 const MachineInstr *MI,
767 unsigned *PredCost) const {
768 // Default to one cycle for no itinerary. However, an "empty" itinerary may
769 // still have a MinLatency property, which getStageLatency checks.
770 if (!ItinData)
771 return MI->mayLoad() ? 2 : 1;
772
773 return ItinData->getStageLatency(MI->getDesc().getSchedClass());
774 }
775
hasLowDefLatency(const InstrItineraryData * ItinData,const MachineInstr * DefMI,unsigned DefIdx) const776 bool TargetInstrInfo::hasLowDefLatency(const InstrItineraryData *ItinData,
777 const MachineInstr *DefMI,
778 unsigned DefIdx) const {
779 if (!ItinData || ItinData->isEmpty())
780 return false;
781
782 unsigned DefClass = DefMI->getDesc().getSchedClass();
783 int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
784 return (DefCycle != -1 && DefCycle <= 1);
785 }
786
787 /// Both DefMI and UseMI must be valid. By default, call directly to the
788 /// itinerary. This may be overriden by the target.
789 int TargetInstrInfo::
getOperandLatency(const InstrItineraryData * ItinData,const MachineInstr * DefMI,unsigned DefIdx,const MachineInstr * UseMI,unsigned UseIdx) const790 getOperandLatency(const InstrItineraryData *ItinData,
791 const MachineInstr *DefMI, unsigned DefIdx,
792 const MachineInstr *UseMI, unsigned UseIdx) const {
793 unsigned DefClass = DefMI->getDesc().getSchedClass();
794 unsigned UseClass = UseMI->getDesc().getSchedClass();
795 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
796 }
797
798 /// If we can determine the operand latency from the def only, without itinerary
799 /// lookup, do so. Otherwise return -1.
computeDefOperandLatency(const InstrItineraryData * ItinData,const MachineInstr * DefMI) const800 int TargetInstrInfo::computeDefOperandLatency(
801 const InstrItineraryData *ItinData,
802 const MachineInstr *DefMI) const {
803
804 // Let the target hook getInstrLatency handle missing itineraries.
805 if (!ItinData)
806 return getInstrLatency(ItinData, DefMI);
807
808 if(ItinData->isEmpty())
809 return defaultDefLatency(ItinData->SchedModel, DefMI);
810
811 // ...operand lookup required
812 return -1;
813 }
814
815 /// computeOperandLatency - Compute and return the latency of the given data
816 /// dependent def and use when the operand indices are already known. UseMI may
817 /// be NULL for an unknown use.
818 ///
819 /// FindMin may be set to get the minimum vs. expected latency. Minimum
820 /// latency is used for scheduling groups, while expected latency is for
821 /// instruction cost and critical path.
822 ///
823 /// Depending on the subtarget's itinerary properties, this may or may not need
824 /// to call getOperandLatency(). For most subtargets, we don't need DefIdx or
825 /// UseIdx to compute min latency.
826 unsigned TargetInstrInfo::
computeOperandLatency(const InstrItineraryData * ItinData,const MachineInstr * DefMI,unsigned DefIdx,const MachineInstr * UseMI,unsigned UseIdx) const827 computeOperandLatency(const InstrItineraryData *ItinData,
828 const MachineInstr *DefMI, unsigned DefIdx,
829 const MachineInstr *UseMI, unsigned UseIdx) const {
830
831 int DefLatency = computeDefOperandLatency(ItinData, DefMI);
832 if (DefLatency >= 0)
833 return DefLatency;
834
835 assert(ItinData && !ItinData->isEmpty() && "computeDefOperandLatency fail");
836
837 int OperLatency = 0;
838 if (UseMI)
839 OperLatency = getOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx);
840 else {
841 unsigned DefClass = DefMI->getDesc().getSchedClass();
842 OperLatency = ItinData->getOperandCycle(DefClass, DefIdx);
843 }
844 if (OperLatency >= 0)
845 return OperLatency;
846
847 // No operand latency was found.
848 unsigned InstrLatency = getInstrLatency(ItinData, DefMI);
849
850 // Expected latency is the max of the stage latency and itinerary props.
851 InstrLatency = std::max(InstrLatency,
852 defaultDefLatency(ItinData->SchedModel, DefMI));
853 return InstrLatency;
854 }
855