1 //===-- PPCCTRLoops.cpp - Identify and generate CTR loops -----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass identifies loops where we can generate the PPC branch instructions
11 // that decrement and test the count register (CTR) (bdnz and friends).
12 //
13 // The pattern that defines the induction variable can changed depending on
14 // prior optimizations. For example, the IndVarSimplify phase run by 'opt'
15 // normalizes induction variables, and the Loop Strength Reduction pass
16 // run by 'llc' may also make changes to the induction variable.
17 //
18 // Criteria for CTR loops:
19 // - Countable loops (w/ ind. var for a trip count)
20 // - Try inner-most loops first
21 // - No nested CTR loops.
22 // - No function calls in loops.
23 //
24 //===----------------------------------------------------------------------===//
25
26 #include "llvm/Transforms/Scalar.h"
27 #include "PPC.h"
28 #include "PPCTargetMachine.h"
29 #include "llvm/ADT/STLExtras.h"
30 #include "llvm/ADT/Statistic.h"
31 #include "llvm/Analysis/LoopInfo.h"
32 #include "llvm/Analysis/ScalarEvolutionExpander.h"
33 #include "llvm/Analysis/TargetLibraryInfo.h"
34 #include "llvm/IR/Constants.h"
35 #include "llvm/IR/DerivedTypes.h"
36 #include "llvm/IR/Dominators.h"
37 #include "llvm/IR/InlineAsm.h"
38 #include "llvm/IR/Instructions.h"
39 #include "llvm/IR/IntrinsicInst.h"
40 #include "llvm/IR/Module.h"
41 #include "llvm/IR/ValueHandle.h"
42 #include "llvm/PassSupport.h"
43 #include "llvm/Support/CommandLine.h"
44 #include "llvm/Support/Debug.h"
45 #include "llvm/Support/raw_ostream.h"
46 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
47 #include "llvm/Transforms/Utils/Local.h"
48 #include "llvm/Transforms/Utils/LoopUtils.h"
49
50 #ifndef NDEBUG
51 #include "llvm/CodeGen/MachineDominators.h"
52 #include "llvm/CodeGen/MachineFunction.h"
53 #include "llvm/CodeGen/MachineFunctionPass.h"
54 #include "llvm/CodeGen/MachineRegisterInfo.h"
55 #endif
56
57 #include <algorithm>
58 #include <vector>
59
60 using namespace llvm;
61
62 #define DEBUG_TYPE "ctrloops"
63
64 #ifndef NDEBUG
65 static cl::opt<int> CTRLoopLimit("ppc-max-ctrloop", cl::Hidden, cl::init(-1));
66 #endif
67
68 STATISTIC(NumCTRLoops, "Number of loops converted to CTR loops");
69
70 namespace llvm {
71 void initializePPCCTRLoopsPass(PassRegistry&);
72 #ifndef NDEBUG
73 void initializePPCCTRLoopsVerifyPass(PassRegistry&);
74 #endif
75 }
76
77 namespace {
78 struct PPCCTRLoops : public FunctionPass {
79
80 #ifndef NDEBUG
81 static int Counter;
82 #endif
83
84 public:
85 static char ID;
86
PPCCTRLoops__anon01205c500111::PPCCTRLoops87 PPCCTRLoops() : FunctionPass(ID), TM(nullptr) {
88 initializePPCCTRLoopsPass(*PassRegistry::getPassRegistry());
89 }
PPCCTRLoops__anon01205c500111::PPCCTRLoops90 PPCCTRLoops(PPCTargetMachine &TM) : FunctionPass(ID), TM(&TM) {
91 initializePPCCTRLoopsPass(*PassRegistry::getPassRegistry());
92 }
93
94 bool runOnFunction(Function &F) override;
95
getAnalysisUsage__anon01205c500111::PPCCTRLoops96 void getAnalysisUsage(AnalysisUsage &AU) const override {
97 AU.addRequired<LoopInfoWrapperPass>();
98 AU.addPreserved<LoopInfoWrapperPass>();
99 AU.addRequired<DominatorTreeWrapperPass>();
100 AU.addPreserved<DominatorTreeWrapperPass>();
101 AU.addRequired<ScalarEvolution>();
102 }
103
104 private:
105 bool mightUseCTR(const Triple &TT, BasicBlock *BB);
106 bool convertToCTRLoop(Loop *L);
107
108 private:
109 PPCTargetMachine *TM;
110 LoopInfo *LI;
111 ScalarEvolution *SE;
112 const DataLayout *DL;
113 DominatorTree *DT;
114 const TargetLibraryInfo *LibInfo;
115 };
116
117 char PPCCTRLoops::ID = 0;
118 #ifndef NDEBUG
119 int PPCCTRLoops::Counter = 0;
120 #endif
121
122 #ifndef NDEBUG
123 struct PPCCTRLoopsVerify : public MachineFunctionPass {
124 public:
125 static char ID;
126
PPCCTRLoopsVerify__anon01205c500111::PPCCTRLoopsVerify127 PPCCTRLoopsVerify() : MachineFunctionPass(ID) {
128 initializePPCCTRLoopsVerifyPass(*PassRegistry::getPassRegistry());
129 }
130
getAnalysisUsage__anon01205c500111::PPCCTRLoopsVerify131 void getAnalysisUsage(AnalysisUsage &AU) const override {
132 AU.addRequired<MachineDominatorTree>();
133 MachineFunctionPass::getAnalysisUsage(AU);
134 }
135
136 bool runOnMachineFunction(MachineFunction &MF) override;
137
138 private:
139 MachineDominatorTree *MDT;
140 };
141
142 char PPCCTRLoopsVerify::ID = 0;
143 #endif // NDEBUG
144 } // end anonymous namespace
145
146 INITIALIZE_PASS_BEGIN(PPCCTRLoops, "ppc-ctr-loops", "PowerPC CTR Loops",
147 false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)148 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
149 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
150 INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
151 INITIALIZE_PASS_END(PPCCTRLoops, "ppc-ctr-loops", "PowerPC CTR Loops",
152 false, false)
153
154 FunctionPass *llvm::createPPCCTRLoops(PPCTargetMachine &TM) {
155 return new PPCCTRLoops(TM);
156 }
157
158 #ifndef NDEBUG
159 INITIALIZE_PASS_BEGIN(PPCCTRLoopsVerify, "ppc-ctr-loops-verify",
160 "PowerPC CTR Loops Verify", false, false)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)161 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
162 INITIALIZE_PASS_END(PPCCTRLoopsVerify, "ppc-ctr-loops-verify",
163 "PowerPC CTR Loops Verify", false, false)
164
165 FunctionPass *llvm::createPPCCTRLoopsVerify() {
166 return new PPCCTRLoopsVerify();
167 }
168 #endif // NDEBUG
169
runOnFunction(Function & F)170 bool PPCCTRLoops::runOnFunction(Function &F) {
171 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
172 SE = &getAnalysis<ScalarEvolution>();
173 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
174 DL = &F.getParent()->getDataLayout();
175 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
176 LibInfo = TLIP ? &TLIP->getTLI() : nullptr;
177
178 bool MadeChange = false;
179
180 for (LoopInfo::iterator I = LI->begin(), E = LI->end();
181 I != E; ++I) {
182 Loop *L = *I;
183 if (!L->getParentLoop())
184 MadeChange |= convertToCTRLoop(L);
185 }
186
187 return MadeChange;
188 }
189
isLargeIntegerTy(bool Is32Bit,Type * Ty)190 static bool isLargeIntegerTy(bool Is32Bit, Type *Ty) {
191 if (IntegerType *ITy = dyn_cast<IntegerType>(Ty))
192 return ITy->getBitWidth() > (Is32Bit ? 32U : 64U);
193
194 return false;
195 }
196
197 // Determining the address of a TLS variable results in a function call in
198 // certain TLS models.
memAddrUsesCTR(const PPCTargetMachine * TM,const llvm::Value * MemAddr)199 static bool memAddrUsesCTR(const PPCTargetMachine *TM,
200 const llvm::Value *MemAddr) {
201 const auto *GV = dyn_cast<GlobalValue>(MemAddr);
202 if (!GV)
203 return false;
204 if (!GV->isThreadLocal())
205 return false;
206 if (!TM)
207 return true;
208 TLSModel::Model Model = TM->getTLSModel(GV);
209 return Model == TLSModel::GeneralDynamic || Model == TLSModel::LocalDynamic;
210 }
211
mightUseCTR(const Triple & TT,BasicBlock * BB)212 bool PPCCTRLoops::mightUseCTR(const Triple &TT, BasicBlock *BB) {
213 for (BasicBlock::iterator J = BB->begin(), JE = BB->end();
214 J != JE; ++J) {
215 if (CallInst *CI = dyn_cast<CallInst>(J)) {
216 if (InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue())) {
217 // Inline ASM is okay, unless it clobbers the ctr register.
218 InlineAsm::ConstraintInfoVector CIV = IA->ParseConstraints();
219 for (unsigned i = 0, ie = CIV.size(); i < ie; ++i) {
220 InlineAsm::ConstraintInfo &C = CIV[i];
221 if (C.Type != InlineAsm::isInput)
222 for (unsigned j = 0, je = C.Codes.size(); j < je; ++j)
223 if (StringRef(C.Codes[j]).equals_lower("{ctr}"))
224 return true;
225 }
226
227 continue;
228 }
229
230 if (!TM)
231 return true;
232 const TargetLowering *TLI =
233 TM->getSubtargetImpl(*BB->getParent())->getTargetLowering();
234
235 if (Function *F = CI->getCalledFunction()) {
236 // Most intrinsics don't become function calls, but some might.
237 // sin, cos, exp and log are always calls.
238 unsigned Opcode;
239 if (F->getIntrinsicID() != Intrinsic::not_intrinsic) {
240 switch (F->getIntrinsicID()) {
241 default: continue;
242
243 // VisualStudio defines setjmp as _setjmp
244 #if defined(_MSC_VER) && defined(setjmp) && \
245 !defined(setjmp_undefined_for_msvc)
246 # pragma push_macro("setjmp")
247 # undef setjmp
248 # define setjmp_undefined_for_msvc
249 #endif
250
251 case Intrinsic::setjmp:
252
253 #if defined(_MSC_VER) && defined(setjmp_undefined_for_msvc)
254 // let's return it to _setjmp state
255 # pragma pop_macro("setjmp")
256 # undef setjmp_undefined_for_msvc
257 #endif
258
259 case Intrinsic::longjmp:
260
261 // Exclude eh_sjlj_setjmp; we don't need to exclude eh_sjlj_longjmp
262 // because, although it does clobber the counter register, the
263 // control can't then return to inside the loop unless there is also
264 // an eh_sjlj_setjmp.
265 case Intrinsic::eh_sjlj_setjmp:
266
267 case Intrinsic::memcpy:
268 case Intrinsic::memmove:
269 case Intrinsic::memset:
270 case Intrinsic::powi:
271 case Intrinsic::log:
272 case Intrinsic::log2:
273 case Intrinsic::log10:
274 case Intrinsic::exp:
275 case Intrinsic::exp2:
276 case Intrinsic::pow:
277 case Intrinsic::sin:
278 case Intrinsic::cos:
279 return true;
280 case Intrinsic::copysign:
281 if (CI->getArgOperand(0)->getType()->getScalarType()->
282 isPPC_FP128Ty())
283 return true;
284 else
285 continue; // ISD::FCOPYSIGN is never a library call.
286 case Intrinsic::sqrt: Opcode = ISD::FSQRT; break;
287 case Intrinsic::floor: Opcode = ISD::FFLOOR; break;
288 case Intrinsic::ceil: Opcode = ISD::FCEIL; break;
289 case Intrinsic::trunc: Opcode = ISD::FTRUNC; break;
290 case Intrinsic::rint: Opcode = ISD::FRINT; break;
291 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
292 case Intrinsic::round: Opcode = ISD::FROUND; break;
293 }
294 }
295
296 // PowerPC does not use [US]DIVREM or other library calls for
297 // operations on regular types which are not otherwise library calls
298 // (i.e. soft float or atomics). If adapting for targets that do,
299 // additional care is required here.
300
301 LibFunc::Func Func;
302 if (!F->hasLocalLinkage() && F->hasName() && LibInfo &&
303 LibInfo->getLibFunc(F->getName(), Func) &&
304 LibInfo->hasOptimizedCodeGen(Func)) {
305 // Non-read-only functions are never treated as intrinsics.
306 if (!CI->onlyReadsMemory())
307 return true;
308
309 // Conversion happens only for FP calls.
310 if (!CI->getArgOperand(0)->getType()->isFloatingPointTy())
311 return true;
312
313 switch (Func) {
314 default: return true;
315 case LibFunc::copysign:
316 case LibFunc::copysignf:
317 continue; // ISD::FCOPYSIGN is never a library call.
318 case LibFunc::copysignl:
319 return true;
320 case LibFunc::fabs:
321 case LibFunc::fabsf:
322 case LibFunc::fabsl:
323 continue; // ISD::FABS is never a library call.
324 case LibFunc::sqrt:
325 case LibFunc::sqrtf:
326 case LibFunc::sqrtl:
327 Opcode = ISD::FSQRT; break;
328 case LibFunc::floor:
329 case LibFunc::floorf:
330 case LibFunc::floorl:
331 Opcode = ISD::FFLOOR; break;
332 case LibFunc::nearbyint:
333 case LibFunc::nearbyintf:
334 case LibFunc::nearbyintl:
335 Opcode = ISD::FNEARBYINT; break;
336 case LibFunc::ceil:
337 case LibFunc::ceilf:
338 case LibFunc::ceill:
339 Opcode = ISD::FCEIL; break;
340 case LibFunc::rint:
341 case LibFunc::rintf:
342 case LibFunc::rintl:
343 Opcode = ISD::FRINT; break;
344 case LibFunc::round:
345 case LibFunc::roundf:
346 case LibFunc::roundl:
347 Opcode = ISD::FROUND; break;
348 case LibFunc::trunc:
349 case LibFunc::truncf:
350 case LibFunc::truncl:
351 Opcode = ISD::FTRUNC; break;
352 }
353
354 MVT VTy =
355 TLI->getSimpleValueType(CI->getArgOperand(0)->getType(), true);
356 if (VTy == MVT::Other)
357 return true;
358
359 if (TLI->isOperationLegalOrCustom(Opcode, VTy))
360 continue;
361 else if (VTy.isVector() &&
362 TLI->isOperationLegalOrCustom(Opcode, VTy.getScalarType()))
363 continue;
364
365 return true;
366 }
367 }
368
369 return true;
370 } else if (isa<BinaryOperator>(J) &&
371 J->getType()->getScalarType()->isPPC_FP128Ty()) {
372 // Most operations on ppc_f128 values become calls.
373 return true;
374 } else if (isa<UIToFPInst>(J) || isa<SIToFPInst>(J) ||
375 isa<FPToUIInst>(J) || isa<FPToSIInst>(J)) {
376 CastInst *CI = cast<CastInst>(J);
377 if (CI->getSrcTy()->getScalarType()->isPPC_FP128Ty() ||
378 CI->getDestTy()->getScalarType()->isPPC_FP128Ty() ||
379 isLargeIntegerTy(TT.isArch32Bit(), CI->getSrcTy()->getScalarType()) ||
380 isLargeIntegerTy(TT.isArch32Bit(), CI->getDestTy()->getScalarType()))
381 return true;
382 } else if (isLargeIntegerTy(TT.isArch32Bit(),
383 J->getType()->getScalarType()) &&
384 (J->getOpcode() == Instruction::UDiv ||
385 J->getOpcode() == Instruction::SDiv ||
386 J->getOpcode() == Instruction::URem ||
387 J->getOpcode() == Instruction::SRem)) {
388 return true;
389 } else if (TT.isArch32Bit() &&
390 isLargeIntegerTy(false, J->getType()->getScalarType()) &&
391 (J->getOpcode() == Instruction::Shl ||
392 J->getOpcode() == Instruction::AShr ||
393 J->getOpcode() == Instruction::LShr)) {
394 // Only on PPC32, for 128-bit integers (specifically not 64-bit
395 // integers), these might be runtime calls.
396 return true;
397 } else if (isa<IndirectBrInst>(J) || isa<InvokeInst>(J)) {
398 // On PowerPC, indirect jumps use the counter register.
399 return true;
400 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(J)) {
401 if (!TM)
402 return true;
403 const TargetLowering *TLI =
404 TM->getSubtargetImpl(*BB->getParent())->getTargetLowering();
405
406 if (SI->getNumCases() + 1 >= (unsigned)TLI->getMinimumJumpTableEntries())
407 return true;
408 }
409 for (Value *Operand : J->operands())
410 if (memAddrUsesCTR(TM, Operand))
411 return true;
412 }
413
414 return false;
415 }
416
convertToCTRLoop(Loop * L)417 bool PPCCTRLoops::convertToCTRLoop(Loop *L) {
418 bool MadeChange = false;
419
420 Triple TT = Triple(L->getHeader()->getParent()->getParent()->
421 getTargetTriple());
422 if (!TT.isArch32Bit() && !TT.isArch64Bit())
423 return MadeChange; // Unknown arch. type.
424
425 // Process nested loops first.
426 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I) {
427 MadeChange |= convertToCTRLoop(*I);
428 }
429
430 // If a nested loop has been converted, then we can't convert this loop.
431 if (MadeChange)
432 return MadeChange;
433
434 #ifndef NDEBUG
435 // Stop trying after reaching the limit (if any).
436 int Limit = CTRLoopLimit;
437 if (Limit >= 0) {
438 if (Counter >= CTRLoopLimit)
439 return false;
440 Counter++;
441 }
442 #endif
443
444 // We don't want to spill/restore the counter register, and so we don't
445 // want to use the counter register if the loop contains calls.
446 for (Loop::block_iterator I = L->block_begin(), IE = L->block_end();
447 I != IE; ++I)
448 if (mightUseCTR(TT, *I))
449 return MadeChange;
450
451 SmallVector<BasicBlock*, 4> ExitingBlocks;
452 L->getExitingBlocks(ExitingBlocks);
453
454 BasicBlock *CountedExitBlock = nullptr;
455 const SCEV *ExitCount = nullptr;
456 BranchInst *CountedExitBranch = nullptr;
457 for (SmallVectorImpl<BasicBlock *>::iterator I = ExitingBlocks.begin(),
458 IE = ExitingBlocks.end(); I != IE; ++I) {
459 const SCEV *EC = SE->getExitCount(L, *I);
460 DEBUG(dbgs() << "Exit Count for " << *L << " from block " <<
461 (*I)->getName() << ": " << *EC << "\n");
462 if (isa<SCEVCouldNotCompute>(EC))
463 continue;
464 if (const SCEVConstant *ConstEC = dyn_cast<SCEVConstant>(EC)) {
465 if (ConstEC->getValue()->isZero())
466 continue;
467 } else if (!SE->isLoopInvariant(EC, L))
468 continue;
469
470 if (SE->getTypeSizeInBits(EC->getType()) > (TT.isArch64Bit() ? 64 : 32))
471 continue;
472
473 // We now have a loop-invariant count of loop iterations (which is not the
474 // constant zero) for which we know that this loop will not exit via this
475 // exisiting block.
476
477 // We need to make sure that this block will run on every loop iteration.
478 // For this to be true, we must dominate all blocks with backedges. Such
479 // blocks are in-loop predecessors to the header block.
480 bool NotAlways = false;
481 for (pred_iterator PI = pred_begin(L->getHeader()),
482 PIE = pred_end(L->getHeader()); PI != PIE; ++PI) {
483 if (!L->contains(*PI))
484 continue;
485
486 if (!DT->dominates(*I, *PI)) {
487 NotAlways = true;
488 break;
489 }
490 }
491
492 if (NotAlways)
493 continue;
494
495 // Make sure this blocks ends with a conditional branch.
496 Instruction *TI = (*I)->getTerminator();
497 if (!TI)
498 continue;
499
500 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
501 if (!BI->isConditional())
502 continue;
503
504 CountedExitBranch = BI;
505 } else
506 continue;
507
508 // Note that this block may not be the loop latch block, even if the loop
509 // has a latch block.
510 CountedExitBlock = *I;
511 ExitCount = EC;
512 break;
513 }
514
515 if (!CountedExitBlock)
516 return MadeChange;
517
518 BasicBlock *Preheader = L->getLoopPreheader();
519
520 // If we don't have a preheader, then insert one. If we already have a
521 // preheader, then we can use it (except if the preheader contains a use of
522 // the CTR register because some such uses might be reordered by the
523 // selection DAG after the mtctr instruction).
524 if (!Preheader || mightUseCTR(TT, Preheader))
525 Preheader = InsertPreheaderForLoop(L, this);
526 if (!Preheader)
527 return MadeChange;
528
529 DEBUG(dbgs() << "Preheader for exit count: " << Preheader->getName() << "\n");
530
531 // Insert the count into the preheader and replace the condition used by the
532 // selected branch.
533 MadeChange = true;
534
535 SCEVExpander SCEVE(*SE, Preheader->getModule()->getDataLayout(), "loopcnt");
536 LLVMContext &C = SE->getContext();
537 Type *CountType = TT.isArch64Bit() ? Type::getInt64Ty(C) :
538 Type::getInt32Ty(C);
539 if (!ExitCount->getType()->isPointerTy() &&
540 ExitCount->getType() != CountType)
541 ExitCount = SE->getZeroExtendExpr(ExitCount, CountType);
542 ExitCount = SE->getAddExpr(ExitCount,
543 SE->getConstant(CountType, 1));
544 Value *ECValue = SCEVE.expandCodeFor(ExitCount, CountType,
545 Preheader->getTerminator());
546
547 IRBuilder<> CountBuilder(Preheader->getTerminator());
548 Module *M = Preheader->getParent()->getParent();
549 Value *MTCTRFunc = Intrinsic::getDeclaration(M, Intrinsic::ppc_mtctr,
550 CountType);
551 CountBuilder.CreateCall(MTCTRFunc, ECValue);
552
553 IRBuilder<> CondBuilder(CountedExitBranch);
554 Value *DecFunc =
555 Intrinsic::getDeclaration(M, Intrinsic::ppc_is_decremented_ctr_nonzero);
556 Value *NewCond = CondBuilder.CreateCall(DecFunc);
557 Value *OldCond = CountedExitBranch->getCondition();
558 CountedExitBranch->setCondition(NewCond);
559
560 // The false branch must exit the loop.
561 if (!L->contains(CountedExitBranch->getSuccessor(0)))
562 CountedExitBranch->swapSuccessors();
563
564 // The old condition may be dead now, and may have even created a dead PHI
565 // (the original induction variable).
566 RecursivelyDeleteTriviallyDeadInstructions(OldCond);
567 DeleteDeadPHIs(CountedExitBlock);
568
569 ++NumCTRLoops;
570 return MadeChange;
571 }
572
573 #ifndef NDEBUG
clobbersCTR(const MachineInstr * MI)574 static bool clobbersCTR(const MachineInstr *MI) {
575 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
576 const MachineOperand &MO = MI->getOperand(i);
577 if (MO.isReg()) {
578 if (MO.isDef() && (MO.getReg() == PPC::CTR || MO.getReg() == PPC::CTR8))
579 return true;
580 } else if (MO.isRegMask()) {
581 if (MO.clobbersPhysReg(PPC::CTR) || MO.clobbersPhysReg(PPC::CTR8))
582 return true;
583 }
584 }
585
586 return false;
587 }
588
verifyCTRBranch(MachineBasicBlock * MBB,MachineBasicBlock::iterator I)589 static bool verifyCTRBranch(MachineBasicBlock *MBB,
590 MachineBasicBlock::iterator I) {
591 MachineBasicBlock::iterator BI = I;
592 SmallSet<MachineBasicBlock *, 16> Visited;
593 SmallVector<MachineBasicBlock *, 8> Preds;
594 bool CheckPreds;
595
596 if (I == MBB->begin()) {
597 Visited.insert(MBB);
598 goto queue_preds;
599 } else
600 --I;
601
602 check_block:
603 Visited.insert(MBB);
604 if (I == MBB->end())
605 goto queue_preds;
606
607 CheckPreds = true;
608 for (MachineBasicBlock::iterator IE = MBB->begin();; --I) {
609 unsigned Opc = I->getOpcode();
610 if (Opc == PPC::MTCTRloop || Opc == PPC::MTCTR8loop) {
611 CheckPreds = false;
612 break;
613 }
614
615 if (I != BI && clobbersCTR(I)) {
616 DEBUG(dbgs() << "BB#" << MBB->getNumber() << " (" <<
617 MBB->getFullName() << ") instruction " << *I <<
618 " clobbers CTR, invalidating " << "BB#" <<
619 BI->getParent()->getNumber() << " (" <<
620 BI->getParent()->getFullName() << ") instruction " <<
621 *BI << "\n");
622 return false;
623 }
624
625 if (I == IE)
626 break;
627 }
628
629 if (!CheckPreds && Preds.empty())
630 return true;
631
632 if (CheckPreds) {
633 queue_preds:
634 if (MachineFunction::iterator(MBB) == MBB->getParent()->begin()) {
635 DEBUG(dbgs() << "Unable to find a MTCTR instruction for BB#" <<
636 BI->getParent()->getNumber() << " (" <<
637 BI->getParent()->getFullName() << ") instruction " <<
638 *BI << "\n");
639 return false;
640 }
641
642 for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(),
643 PIE = MBB->pred_end(); PI != PIE; ++PI)
644 Preds.push_back(*PI);
645 }
646
647 do {
648 MBB = Preds.pop_back_val();
649 if (!Visited.count(MBB)) {
650 I = MBB->getLastNonDebugInstr();
651 goto check_block;
652 }
653 } while (!Preds.empty());
654
655 return true;
656 }
657
runOnMachineFunction(MachineFunction & MF)658 bool PPCCTRLoopsVerify::runOnMachineFunction(MachineFunction &MF) {
659 MDT = &getAnalysis<MachineDominatorTree>();
660
661 // Verify that all bdnz/bdz instructions are dominated by a loop mtctr before
662 // any other instructions that might clobber the ctr register.
663 for (MachineFunction::iterator I = MF.begin(), IE = MF.end();
664 I != IE; ++I) {
665 MachineBasicBlock *MBB = I;
666 if (!MDT->isReachableFromEntry(MBB))
667 continue;
668
669 for (MachineBasicBlock::iterator MII = MBB->getFirstTerminator(),
670 MIIE = MBB->end(); MII != MIIE; ++MII) {
671 unsigned Opc = MII->getOpcode();
672 if (Opc == PPC::BDNZ8 || Opc == PPC::BDNZ ||
673 Opc == PPC::BDZ8 || Opc == PPC::BDZ)
674 if (!verifyCTRBranch(MBB, MII))
675 llvm_unreachable("Invalid PPC CTR loop!");
676 }
677 }
678
679 return false;
680 }
681 #endif // NDEBUG
682
683