1 //===-- TargetInstrInfoImpl.cpp - Target Instruction Information ----------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the TargetInstrInfoImpl class, it just provides default
11 // implementations of various methods.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "llvm/Target/TargetInstrInfo.h"
16 #include "llvm/Target/TargetLowering.h"
17 #include "llvm/Target/TargetMachine.h"
18 #include "llvm/Target/TargetRegisterInfo.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineInstr.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineMemOperand.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
26 #include "llvm/CodeGen/PseudoSourceValue.h"
27 #include "llvm/Support/CommandLine.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/ErrorHandling.h"
30 #include "llvm/Support/raw_ostream.h"
31 using namespace llvm;
32
33 static cl::opt<bool> DisableHazardRecognizer(
34 "disable-sched-hazard", cl::Hidden, cl::init(false),
35 cl::desc("Disable hazard detection during preRA scheduling"));
36
37 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
38 /// after it, replacing it with an unconditional branch to NewDest.
39 void
ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,MachineBasicBlock * NewDest) const40 TargetInstrInfoImpl::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
41 MachineBasicBlock *NewDest) const {
42 MachineBasicBlock *MBB = Tail->getParent();
43
44 // Remove all the old successors of MBB from the CFG.
45 while (!MBB->succ_empty())
46 MBB->removeSuccessor(MBB->succ_begin());
47
48 // Remove all the dead instructions from the end of MBB.
49 MBB->erase(Tail, MBB->end());
50
51 // If MBB isn't immediately before MBB, insert a branch to it.
52 if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
53 InsertBranch(*MBB, NewDest, 0, SmallVector<MachineOperand, 0>(),
54 Tail->getDebugLoc());
55 MBB->addSuccessor(NewDest);
56 }
57
58 // commuteInstruction - The default implementation of this method just exchanges
59 // the two operands returned by findCommutedOpIndices.
commuteInstruction(MachineInstr * MI,bool NewMI) const60 MachineInstr *TargetInstrInfoImpl::commuteInstruction(MachineInstr *MI,
61 bool NewMI) const {
62 const MCInstrDesc &MCID = MI->getDesc();
63 bool HasDef = MCID.getNumDefs();
64 if (HasDef && !MI->getOperand(0).isReg())
65 // No idea how to commute this instruction. Target should implement its own.
66 return 0;
67 unsigned Idx1, Idx2;
68 if (!findCommutedOpIndices(MI, Idx1, Idx2)) {
69 std::string msg;
70 raw_string_ostream Msg(msg);
71 Msg << "Don't know how to commute: " << *MI;
72 report_fatal_error(Msg.str());
73 }
74
75 assert(MI->getOperand(Idx1).isReg() && MI->getOperand(Idx2).isReg() &&
76 "This only knows how to commute register operands so far");
77 unsigned Reg0 = HasDef ? MI->getOperand(0).getReg() : 0;
78 unsigned Reg1 = MI->getOperand(Idx1).getReg();
79 unsigned Reg2 = MI->getOperand(Idx2).getReg();
80 bool Reg1IsKill = MI->getOperand(Idx1).isKill();
81 bool Reg2IsKill = MI->getOperand(Idx2).isKill();
82 // If destination is tied to either of the commuted source register, then
83 // it must be updated.
84 if (HasDef && Reg0 == Reg1 &&
85 MI->getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
86 Reg2IsKill = false;
87 Reg0 = Reg2;
88 } else if (HasDef && Reg0 == Reg2 &&
89 MI->getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
90 Reg1IsKill = false;
91 Reg0 = Reg1;
92 }
93
94 if (NewMI) {
95 // Create a new instruction.
96 bool Reg0IsDead = HasDef ? MI->getOperand(0).isDead() : false;
97 MachineFunction &MF = *MI->getParent()->getParent();
98 if (HasDef)
99 return BuildMI(MF, MI->getDebugLoc(), MI->getDesc())
100 .addReg(Reg0, RegState::Define | getDeadRegState(Reg0IsDead))
101 .addReg(Reg2, getKillRegState(Reg2IsKill))
102 .addReg(Reg1, getKillRegState(Reg2IsKill));
103 else
104 return BuildMI(MF, MI->getDebugLoc(), MI->getDesc())
105 .addReg(Reg2, getKillRegState(Reg2IsKill))
106 .addReg(Reg1, getKillRegState(Reg2IsKill));
107 }
108
109 if (HasDef)
110 MI->getOperand(0).setReg(Reg0);
111 MI->getOperand(Idx2).setReg(Reg1);
112 MI->getOperand(Idx1).setReg(Reg2);
113 MI->getOperand(Idx2).setIsKill(Reg1IsKill);
114 MI->getOperand(Idx1).setIsKill(Reg2IsKill);
115 return MI;
116 }
117
118 /// findCommutedOpIndices - If specified MI is commutable, return the two
119 /// operand indices that would swap value. Return true if the instruction
120 /// is not in a form which this routine understands.
findCommutedOpIndices(MachineInstr * MI,unsigned & SrcOpIdx1,unsigned & SrcOpIdx2) const121 bool TargetInstrInfoImpl::findCommutedOpIndices(MachineInstr *MI,
122 unsigned &SrcOpIdx1,
123 unsigned &SrcOpIdx2) const {
124 const MCInstrDesc &MCID = MI->getDesc();
125 if (!MCID.isCommutable())
126 return false;
127 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
128 // is not true, then the target must implement this.
129 SrcOpIdx1 = MCID.getNumDefs();
130 SrcOpIdx2 = SrcOpIdx1 + 1;
131 if (!MI->getOperand(SrcOpIdx1).isReg() ||
132 !MI->getOperand(SrcOpIdx2).isReg())
133 // No idea.
134 return false;
135 return true;
136 }
137
138
PredicateInstruction(MachineInstr * MI,const SmallVectorImpl<MachineOperand> & Pred) const139 bool TargetInstrInfoImpl::PredicateInstruction(MachineInstr *MI,
140 const SmallVectorImpl<MachineOperand> &Pred) const {
141 bool MadeChange = false;
142 const MCInstrDesc &MCID = MI->getDesc();
143 if (!MCID.isPredicable())
144 return false;
145
146 for (unsigned j = 0, i = 0, e = MI->getNumOperands(); i != e; ++i) {
147 if (MCID.OpInfo[i].isPredicate()) {
148 MachineOperand &MO = MI->getOperand(i);
149 if (MO.isReg()) {
150 MO.setReg(Pred[j].getReg());
151 MadeChange = true;
152 } else if (MO.isImm()) {
153 MO.setImm(Pred[j].getImm());
154 MadeChange = true;
155 } else if (MO.isMBB()) {
156 MO.setMBB(Pred[j].getMBB());
157 MadeChange = true;
158 }
159 ++j;
160 }
161 }
162 return MadeChange;
163 }
164
hasLoadFromStackSlot(const MachineInstr * MI,const MachineMemOperand * & MMO,int & FrameIndex) const165 bool TargetInstrInfoImpl::hasLoadFromStackSlot(const MachineInstr *MI,
166 const MachineMemOperand *&MMO,
167 int &FrameIndex) const {
168 for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
169 oe = MI->memoperands_end();
170 o != oe;
171 ++o) {
172 if ((*o)->isLoad() && (*o)->getValue())
173 if (const FixedStackPseudoSourceValue *Value =
174 dyn_cast<const FixedStackPseudoSourceValue>((*o)->getValue())) {
175 FrameIndex = Value->getFrameIndex();
176 MMO = *o;
177 return true;
178 }
179 }
180 return false;
181 }
182
hasStoreToStackSlot(const MachineInstr * MI,const MachineMemOperand * & MMO,int & FrameIndex) const183 bool TargetInstrInfoImpl::hasStoreToStackSlot(const MachineInstr *MI,
184 const MachineMemOperand *&MMO,
185 int &FrameIndex) const {
186 for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
187 oe = MI->memoperands_end();
188 o != oe;
189 ++o) {
190 if ((*o)->isStore() && (*o)->getValue())
191 if (const FixedStackPseudoSourceValue *Value =
192 dyn_cast<const FixedStackPseudoSourceValue>((*o)->getValue())) {
193 FrameIndex = Value->getFrameIndex();
194 MMO = *o;
195 return true;
196 }
197 }
198 return false;
199 }
200
reMaterialize(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,unsigned DestReg,unsigned SubIdx,const MachineInstr * Orig,const TargetRegisterInfo & TRI) const201 void TargetInstrInfoImpl::reMaterialize(MachineBasicBlock &MBB,
202 MachineBasicBlock::iterator I,
203 unsigned DestReg,
204 unsigned SubIdx,
205 const MachineInstr *Orig,
206 const TargetRegisterInfo &TRI) const {
207 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
208 MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
209 MBB.insert(I, MI);
210 }
211
212 bool
produceSameValue(const MachineInstr * MI0,const MachineInstr * MI1,const MachineRegisterInfo * MRI) const213 TargetInstrInfoImpl::produceSameValue(const MachineInstr *MI0,
214 const MachineInstr *MI1,
215 const MachineRegisterInfo *MRI) const {
216 return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
217 }
218
duplicate(MachineInstr * Orig,MachineFunction & MF) const219 MachineInstr *TargetInstrInfoImpl::duplicate(MachineInstr *Orig,
220 MachineFunction &MF) const {
221 assert(!Orig->getDesc().isNotDuplicable() &&
222 "Instruction cannot be duplicated");
223 return MF.CloneMachineInstr(Orig);
224 }
225
226 // If the COPY instruction in MI can be folded to a stack operation, return
227 // the register class to use.
canFoldCopy(const MachineInstr * MI,unsigned FoldIdx)228 static const TargetRegisterClass *canFoldCopy(const MachineInstr *MI,
229 unsigned FoldIdx) {
230 assert(MI->isCopy() && "MI must be a COPY instruction");
231 if (MI->getNumOperands() != 2)
232 return 0;
233 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
234
235 const MachineOperand &FoldOp = MI->getOperand(FoldIdx);
236 const MachineOperand &LiveOp = MI->getOperand(1-FoldIdx);
237
238 if (FoldOp.getSubReg() || LiveOp.getSubReg())
239 return 0;
240
241 unsigned FoldReg = FoldOp.getReg();
242 unsigned LiveReg = LiveOp.getReg();
243
244 assert(TargetRegisterInfo::isVirtualRegister(FoldReg) &&
245 "Cannot fold physregs");
246
247 const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
248 const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
249
250 if (TargetRegisterInfo::isPhysicalRegister(LiveOp.getReg()))
251 return RC->contains(LiveOp.getReg()) ? RC : 0;
252
253 if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
254 return RC;
255
256 // FIXME: Allow folding when register classes are memory compatible.
257 return 0;
258 }
259
260 bool TargetInstrInfoImpl::
canFoldMemoryOperand(const MachineInstr * MI,const SmallVectorImpl<unsigned> & Ops) const261 canFoldMemoryOperand(const MachineInstr *MI,
262 const SmallVectorImpl<unsigned> &Ops) const {
263 return MI->isCopy() && Ops.size() == 1 && canFoldCopy(MI, Ops[0]);
264 }
265
266 /// foldMemoryOperand - Attempt to fold a load or store of the specified stack
267 /// slot into the specified machine instruction for the specified operand(s).
268 /// If this is possible, a new instruction is returned with the specified
269 /// operand folded, otherwise NULL is returned. The client is responsible for
270 /// removing the old instruction and adding the new one in the instruction
271 /// stream.
272 MachineInstr*
foldMemoryOperand(MachineBasicBlock::iterator MI,const SmallVectorImpl<unsigned> & Ops,int FI) const273 TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
274 const SmallVectorImpl<unsigned> &Ops,
275 int FI) const {
276 unsigned Flags = 0;
277 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
278 if (MI->getOperand(Ops[i]).isDef())
279 Flags |= MachineMemOperand::MOStore;
280 else
281 Flags |= MachineMemOperand::MOLoad;
282
283 MachineBasicBlock *MBB = MI->getParent();
284 assert(MBB && "foldMemoryOperand needs an inserted instruction");
285 MachineFunction &MF = *MBB->getParent();
286
287 // Ask the target to do the actual folding.
288 if (MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, FI)) {
289 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
290 assert((!(Flags & MachineMemOperand::MOStore) ||
291 NewMI->getDesc().mayStore()) &&
292 "Folded a def to a non-store!");
293 assert((!(Flags & MachineMemOperand::MOLoad) ||
294 NewMI->getDesc().mayLoad()) &&
295 "Folded a use to a non-load!");
296 const MachineFrameInfo &MFI = *MF.getFrameInfo();
297 assert(MFI.getObjectOffset(FI) != -1);
298 MachineMemOperand *MMO =
299 MF.getMachineMemOperand(
300 MachinePointerInfo(PseudoSourceValue::getFixedStack(FI)),
301 Flags, MFI.getObjectSize(FI),
302 MFI.getObjectAlignment(FI));
303 NewMI->addMemOperand(MF, MMO);
304
305 // FIXME: change foldMemoryOperandImpl semantics to also insert NewMI.
306 return MBB->insert(MI, NewMI);
307 }
308
309 // Straight COPY may fold as load/store.
310 if (!MI->isCopy() || Ops.size() != 1)
311 return 0;
312
313 const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
314 if (!RC)
315 return 0;
316
317 const MachineOperand &MO = MI->getOperand(1-Ops[0]);
318 MachineBasicBlock::iterator Pos = MI;
319 const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
320
321 if (Flags == MachineMemOperand::MOStore)
322 storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
323 else
324 loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
325 return --Pos;
326 }
327
328 /// foldMemoryOperand - Same as the previous version except it allows folding
329 /// of any load and store from / to any address, not just from a specific
330 /// stack slot.
331 MachineInstr*
foldMemoryOperand(MachineBasicBlock::iterator MI,const SmallVectorImpl<unsigned> & Ops,MachineInstr * LoadMI) const332 TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
333 const SmallVectorImpl<unsigned> &Ops,
334 MachineInstr* LoadMI) const {
335 assert(LoadMI->getDesc().canFoldAsLoad() && "LoadMI isn't foldable!");
336 #ifndef NDEBUG
337 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
338 assert(MI->getOperand(Ops[i]).isUse() && "Folding load into def!");
339 #endif
340 MachineBasicBlock &MBB = *MI->getParent();
341 MachineFunction &MF = *MBB.getParent();
342
343 // Ask the target to do the actual folding.
344 MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, LoadMI);
345 if (!NewMI) return 0;
346
347 NewMI = MBB.insert(MI, NewMI);
348
349 // Copy the memoperands from the load to the folded instruction.
350 NewMI->setMemRefs(LoadMI->memoperands_begin(),
351 LoadMI->memoperands_end());
352
353 return NewMI;
354 }
355
356 bool TargetInstrInfo::
isReallyTriviallyReMaterializableGeneric(const MachineInstr * MI,AliasAnalysis * AA) const357 isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
358 AliasAnalysis *AA) const {
359 const MachineFunction &MF = *MI->getParent()->getParent();
360 const MachineRegisterInfo &MRI = MF.getRegInfo();
361 const TargetMachine &TM = MF.getTarget();
362 const TargetInstrInfo &TII = *TM.getInstrInfo();
363 const TargetRegisterInfo &TRI = *TM.getRegisterInfo();
364
365 // Remat clients assume operand 0 is the defined register.
366 if (!MI->getNumOperands() || !MI->getOperand(0).isReg())
367 return false;
368 unsigned DefReg = MI->getOperand(0).getReg();
369
370 // A sub-register definition can only be rematerialized if the instruction
371 // doesn't read the other parts of the register. Otherwise it is really a
372 // read-modify-write operation on the full virtual register which cannot be
373 // moved safely.
374 if (TargetRegisterInfo::isVirtualRegister(DefReg) &&
375 MI->getOperand(0).getSubReg() && MI->readsVirtualRegister(DefReg))
376 return false;
377
378 // A load from a fixed stack slot can be rematerialized. This may be
379 // redundant with subsequent checks, but it's target-independent,
380 // simple, and a common case.
381 int FrameIdx = 0;
382 if (TII.isLoadFromStackSlot(MI, FrameIdx) &&
383 MF.getFrameInfo()->isImmutableObjectIndex(FrameIdx))
384 return true;
385
386 const MCInstrDesc &MCID = MI->getDesc();
387
388 // Avoid instructions obviously unsafe for remat.
389 if (MCID.isNotDuplicable() || MCID.mayStore() ||
390 MI->hasUnmodeledSideEffects())
391 return false;
392
393 // Don't remat inline asm. We have no idea how expensive it is
394 // even if it's side effect free.
395 if (MI->isInlineAsm())
396 return false;
397
398 // Avoid instructions which load from potentially varying memory.
399 if (MCID.mayLoad() && !MI->isInvariantLoad(AA))
400 return false;
401
402 // If any of the registers accessed are non-constant, conservatively assume
403 // the instruction is not rematerializable.
404 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
405 const MachineOperand &MO = MI->getOperand(i);
406 if (!MO.isReg()) continue;
407 unsigned Reg = MO.getReg();
408 if (Reg == 0)
409 continue;
410
411 // Check for a well-behaved physical register.
412 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
413 if (MO.isUse()) {
414 // If the physreg has no defs anywhere, it's just an ambient register
415 // and we can freely move its uses. Alternatively, if it's allocatable,
416 // it could get allocated to something with a def during allocation.
417 if (!MRI.def_empty(Reg))
418 return false;
419 BitVector AllocatableRegs = TRI.getAllocatableSet(MF, 0);
420 if (AllocatableRegs.test(Reg))
421 return false;
422 // Check for a def among the register's aliases too.
423 for (const unsigned *Alias = TRI.getAliasSet(Reg); *Alias; ++Alias) {
424 unsigned AliasReg = *Alias;
425 if (!MRI.def_empty(AliasReg))
426 return false;
427 if (AllocatableRegs.test(AliasReg))
428 return false;
429 }
430 } else {
431 // A physreg def. We can't remat it.
432 return false;
433 }
434 continue;
435 }
436
437 // Only allow one virtual-register def. There may be multiple defs of the
438 // same virtual register, though.
439 if (MO.isDef() && Reg != DefReg)
440 return false;
441
442 // Don't allow any virtual-register uses. Rematting an instruction with
443 // virtual register uses would length the live ranges of the uses, which
444 // is not necessarily a good idea, certainly not "trivial".
445 if (MO.isUse())
446 return false;
447 }
448
449 // Everything checked out.
450 return true;
451 }
452
453 /// isSchedulingBoundary - Test if the given instruction should be
454 /// considered a scheduling boundary. This primarily includes labels
455 /// and terminators.
isSchedulingBoundary(const MachineInstr * MI,const MachineBasicBlock * MBB,const MachineFunction & MF) const456 bool TargetInstrInfoImpl::isSchedulingBoundary(const MachineInstr *MI,
457 const MachineBasicBlock *MBB,
458 const MachineFunction &MF) const{
459 // Terminators and labels can't be scheduled around.
460 if (MI->getDesc().isTerminator() || MI->isLabel())
461 return true;
462
463 // Don't attempt to schedule around any instruction that defines
464 // a stack-oriented pointer, as it's unlikely to be profitable. This
465 // saves compile time, because it doesn't require every single
466 // stack slot reference to depend on the instruction that does the
467 // modification.
468 const TargetLowering &TLI = *MF.getTarget().getTargetLowering();
469 if (MI->definesRegister(TLI.getStackPointerRegisterToSaveRestore()))
470 return true;
471
472 return false;
473 }
474
475 // Provide a global flag for disabling the PreRA hazard recognizer that targets
476 // may choose to honor.
usePreRAHazardRecognizer() const477 bool TargetInstrInfoImpl::usePreRAHazardRecognizer() const {
478 return !DisableHazardRecognizer;
479 }
480
481 // Default implementation of CreateTargetRAHazardRecognizer.
482 ScheduleHazardRecognizer *TargetInstrInfoImpl::
CreateTargetHazardRecognizer(const TargetMachine * TM,const ScheduleDAG * DAG) const483 CreateTargetHazardRecognizer(const TargetMachine *TM,
484 const ScheduleDAG *DAG) const {
485 // Dummy hazard recognizer allows all instructions to issue.
486 return new ScheduleHazardRecognizer();
487 }
488
489 // Default implementation of CreateTargetPostRAHazardRecognizer.
490 ScheduleHazardRecognizer *TargetInstrInfoImpl::
CreateTargetPostRAHazardRecognizer(const InstrItineraryData * II,const ScheduleDAG * DAG) const491 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
492 const ScheduleDAG *DAG) const {
493 return (ScheduleHazardRecognizer *)
494 new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
495 }
496