1 //===- AMDGPUUnifyDivergentExitNodes.cpp ----------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is a variant of the UnifyDivergentExitNodes pass. Rather than ensuring
10 // there is at most one ret and one unreachable instruction, it ensures there is
11 // at most one divergent exiting block.
12 //
13 // StructurizeCFG can't deal with multi-exit regions formed by branches to
14 // multiple return nodes. It is not desirable to structurize regions with
15 // uniform branches, so unifying those to the same return block as divergent
16 // branches inhibits use of scalar branching. It still can't deal with the case
17 // where one branch goes to return, and one unreachable. Replace unreachable in
18 // this case with a return.
19 //
20 //===----------------------------------------------------------------------===//
21
22 #include "AMDGPU.h"
23 #include "llvm/ADT/ArrayRef.h"
24 #include "llvm/ADT/SmallPtrSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/StringRef.h"
27 #include "llvm/Analysis/LegacyDivergenceAnalysis.h"
28 #include "llvm/Analysis/PostDominators.h"
29 #include "llvm/Analysis/TargetTransformInfo.h"
30 #include "llvm/IR/BasicBlock.h"
31 #include "llvm/IR/CFG.h"
32 #include "llvm/IR/Constants.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/InstrTypes.h"
35 #include "llvm/IR/Instructions.h"
36 #include "llvm/IR/Intrinsics.h"
37 #include "llvm/IR/IRBuilder.h"
38 #include "llvm/IR/Type.h"
39 #include "llvm/InitializePasses.h"
40 #include "llvm/Pass.h"
41 #include "llvm/Support/Casting.h"
42 #include "llvm/Transforms/Scalar.h"
43 #include "llvm/Transforms/Utils.h"
44 #include "llvm/Transforms/Utils/Local.h"
45
46 using namespace llvm;
47
48 #define DEBUG_TYPE "amdgpu-unify-divergent-exit-nodes"
49
50 namespace {
51
52 class AMDGPUUnifyDivergentExitNodes : public FunctionPass {
53 public:
54 static char ID; // Pass identification, replacement for typeid
55
AMDGPUUnifyDivergentExitNodes()56 AMDGPUUnifyDivergentExitNodes() : FunctionPass(ID) {
57 initializeAMDGPUUnifyDivergentExitNodesPass(*PassRegistry::getPassRegistry());
58 }
59
60 // We can preserve non-critical-edgeness when we unify function exit nodes
61 void getAnalysisUsage(AnalysisUsage &AU) const override;
62 bool runOnFunction(Function &F) override;
63 };
64
65 } // end anonymous namespace
66
67 char AMDGPUUnifyDivergentExitNodes::ID = 0;
68
69 char &llvm::AMDGPUUnifyDivergentExitNodesID = AMDGPUUnifyDivergentExitNodes::ID;
70
71 INITIALIZE_PASS_BEGIN(AMDGPUUnifyDivergentExitNodes, DEBUG_TYPE,
72 "Unify divergent function exit nodes", false, false)
INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)73 INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)
74 INITIALIZE_PASS_DEPENDENCY(LegacyDivergenceAnalysis)
75 INITIALIZE_PASS_END(AMDGPUUnifyDivergentExitNodes, DEBUG_TYPE,
76 "Unify divergent function exit nodes", false, false)
77
78 void AMDGPUUnifyDivergentExitNodes::getAnalysisUsage(AnalysisUsage &AU) const{
79 // TODO: Preserve dominator tree.
80 AU.addRequired<PostDominatorTreeWrapperPass>();
81
82 AU.addRequired<LegacyDivergenceAnalysis>();
83
84 // No divergent values are changed, only blocks and branch edges.
85 AU.addPreserved<LegacyDivergenceAnalysis>();
86
87 // We preserve the non-critical-edgeness property
88 AU.addPreservedID(BreakCriticalEdgesID);
89
90 // This is a cluster of orthogonal Transforms
91 AU.addPreservedID(LowerSwitchID);
92 FunctionPass::getAnalysisUsage(AU);
93
94 AU.addRequired<TargetTransformInfoWrapperPass>();
95 }
96
97 /// \returns true if \p BB is reachable through only uniform branches.
98 /// XXX - Is there a more efficient way to find this?
isUniformlyReached(const LegacyDivergenceAnalysis & DA,BasicBlock & BB)99 static bool isUniformlyReached(const LegacyDivergenceAnalysis &DA,
100 BasicBlock &BB) {
101 SmallVector<BasicBlock *, 8> Stack;
102 SmallPtrSet<BasicBlock *, 8> Visited;
103
104 for (BasicBlock *Pred : predecessors(&BB))
105 Stack.push_back(Pred);
106
107 while (!Stack.empty()) {
108 BasicBlock *Top = Stack.pop_back_val();
109 if (!DA.isUniform(Top->getTerminator()))
110 return false;
111
112 for (BasicBlock *Pred : predecessors(Top)) {
113 if (Visited.insert(Pred).second)
114 Stack.push_back(Pred);
115 }
116 }
117
118 return true;
119 }
120
removeDoneExport(Function & F)121 static void removeDoneExport(Function &F) {
122 ConstantInt *BoolFalse = ConstantInt::getFalse(F.getContext());
123 for (BasicBlock &BB : F) {
124 for (Instruction &I : BB) {
125 if (IntrinsicInst *Intrin = llvm::dyn_cast<IntrinsicInst>(&I)) {
126 if (Intrin->getIntrinsicID() == Intrinsic::amdgcn_exp) {
127 Intrin->setArgOperand(6, BoolFalse); // done
128 } else if (Intrin->getIntrinsicID() == Intrinsic::amdgcn_exp_compr) {
129 Intrin->setArgOperand(4, BoolFalse); // done
130 }
131 }
132 }
133 }
134 }
135
unifyReturnBlockSet(Function & F,ArrayRef<BasicBlock * > ReturningBlocks,bool InsertExport,const TargetTransformInfo & TTI,StringRef Name)136 static BasicBlock *unifyReturnBlockSet(Function &F,
137 ArrayRef<BasicBlock *> ReturningBlocks,
138 bool InsertExport,
139 const TargetTransformInfo &TTI,
140 StringRef Name) {
141 // Otherwise, we need to insert a new basic block into the function, add a PHI
142 // nodes (if the function returns values), and convert all of the return
143 // instructions into unconditional branches.
144 BasicBlock *NewRetBlock = BasicBlock::Create(F.getContext(), Name, &F);
145 IRBuilder<> B(NewRetBlock);
146
147 if (InsertExport) {
148 // Ensure that there's only one "done" export in the shader by removing the
149 // "done" bit set on the original final export. More than one "done" export
150 // can lead to undefined behavior.
151 removeDoneExport(F);
152
153 Value *Undef = UndefValue::get(B.getFloatTy());
154 B.CreateIntrinsic(Intrinsic::amdgcn_exp, { B.getFloatTy() },
155 {
156 B.getInt32(9), // target, SQ_EXP_NULL
157 B.getInt32(0), // enabled channels
158 Undef, Undef, Undef, Undef, // values
159 B.getTrue(), // done
160 B.getTrue(), // valid mask
161 });
162 }
163
164 PHINode *PN = nullptr;
165 if (F.getReturnType()->isVoidTy()) {
166 B.CreateRetVoid();
167 } else {
168 // If the function doesn't return void... add a PHI node to the block...
169 PN = B.CreatePHI(F.getReturnType(), ReturningBlocks.size(),
170 "UnifiedRetVal");
171 assert(!InsertExport);
172 B.CreateRet(PN);
173 }
174
175 // Loop over all of the blocks, replacing the return instruction with an
176 // unconditional branch.
177 for (BasicBlock *BB : ReturningBlocks) {
178 // Add an incoming element to the PHI node for every return instruction that
179 // is merging into this new block...
180 if (PN)
181 PN->addIncoming(BB->getTerminator()->getOperand(0), BB);
182
183 // Remove and delete the return inst.
184 BB->getTerminator()->eraseFromParent();
185 BranchInst::Create(NewRetBlock, BB);
186 }
187
188 for (BasicBlock *BB : ReturningBlocks) {
189 // Cleanup possible branch to unconditional branch to the return.
190 simplifyCFG(BB, TTI, {2});
191 }
192
193 return NewRetBlock;
194 }
195
runOnFunction(Function & F)196 bool AMDGPUUnifyDivergentExitNodes::runOnFunction(Function &F) {
197 auto &PDT = getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree();
198 if (PDT.getRoots().size() <= 1)
199 return false;
200
201 LegacyDivergenceAnalysis &DA = getAnalysis<LegacyDivergenceAnalysis>();
202
203 // Loop over all of the blocks in a function, tracking all of the blocks that
204 // return.
205 SmallVector<BasicBlock *, 4> ReturningBlocks;
206 SmallVector<BasicBlock *, 4> UnreachableBlocks;
207
208 // Dummy return block for infinite loop.
209 BasicBlock *DummyReturnBB = nullptr;
210
211 bool InsertExport = false;
212
213 for (BasicBlock *BB : PDT.getRoots()) {
214 if (isa<ReturnInst>(BB->getTerminator())) {
215 if (!isUniformlyReached(DA, *BB))
216 ReturningBlocks.push_back(BB);
217 } else if (isa<UnreachableInst>(BB->getTerminator())) {
218 if (!isUniformlyReached(DA, *BB))
219 UnreachableBlocks.push_back(BB);
220 } else if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator())) {
221
222 ConstantInt *BoolTrue = ConstantInt::getTrue(F.getContext());
223 if (DummyReturnBB == nullptr) {
224 DummyReturnBB = BasicBlock::Create(F.getContext(),
225 "DummyReturnBlock", &F);
226 Type *RetTy = F.getReturnType();
227 Value *RetVal = RetTy->isVoidTy() ? nullptr : UndefValue::get(RetTy);
228
229 // For pixel shaders, the producer guarantees that an export is
230 // executed before each return instruction. However, if there is an
231 // infinite loop and we insert a return ourselves, we need to uphold
232 // that guarantee by inserting a null export. This can happen e.g. in
233 // an infinite loop with kill instructions, which is supposed to
234 // terminate. However, we don't need to do this if there is a non-void
235 // return value, since then there is an epilog afterwards which will
236 // still export.
237 //
238 // Note: In the case where only some threads enter the infinite loop,
239 // this can result in the null export happening redundantly after the
240 // original exports. However, The last "real" export happens after all
241 // the threads that didn't enter an infinite loop converged, which
242 // means that the only extra threads to execute the null export are
243 // threads that entered the infinite loop, and they only could've
244 // exited through being killed which sets their exec bit to 0.
245 // Therefore, unless there's an actual infinite loop, which can have
246 // invalid results, or there's a kill after the last export, which we
247 // assume the frontend won't do, this export will have the same exec
248 // mask as the last "real" export, and therefore the valid mask will be
249 // overwritten with the same value and will still be correct. Also,
250 // even though this forces an extra unnecessary export wait, we assume
251 // that this happens rare enough in practice to that we don't have to
252 // worry about performance.
253 if (F.getCallingConv() == CallingConv::AMDGPU_PS &&
254 RetTy->isVoidTy()) {
255 InsertExport = true;
256 }
257
258 ReturnInst::Create(F.getContext(), RetVal, DummyReturnBB);
259 ReturningBlocks.push_back(DummyReturnBB);
260 }
261
262 if (BI->isUnconditional()) {
263 BasicBlock *LoopHeaderBB = BI->getSuccessor(0);
264 BI->eraseFromParent(); // Delete the unconditional branch.
265 // Add a new conditional branch with a dummy edge to the return block.
266 BranchInst::Create(LoopHeaderBB, DummyReturnBB, BoolTrue, BB);
267 } else { // Conditional branch.
268 // Create a new transition block to hold the conditional branch.
269 BasicBlock *TransitionBB = BB->splitBasicBlock(BI, "TransitionBlock");
270
271 // Create a branch that will always branch to the transition block and
272 // references DummyReturnBB.
273 BB->getTerminator()->eraseFromParent();
274 BranchInst::Create(TransitionBB, DummyReturnBB, BoolTrue, BB);
275 }
276 }
277 }
278
279 if (!UnreachableBlocks.empty()) {
280 BasicBlock *UnreachableBlock = nullptr;
281
282 if (UnreachableBlocks.size() == 1) {
283 UnreachableBlock = UnreachableBlocks.front();
284 } else {
285 UnreachableBlock = BasicBlock::Create(F.getContext(),
286 "UnifiedUnreachableBlock", &F);
287 new UnreachableInst(F.getContext(), UnreachableBlock);
288
289 for (BasicBlock *BB : UnreachableBlocks) {
290 // Remove and delete the unreachable inst.
291 BB->getTerminator()->eraseFromParent();
292 BranchInst::Create(UnreachableBlock, BB);
293 }
294 }
295
296 if (!ReturningBlocks.empty()) {
297 // Don't create a new unreachable inst if we have a return. The
298 // structurizer/annotator can't handle the multiple exits
299
300 Type *RetTy = F.getReturnType();
301 Value *RetVal = RetTy->isVoidTy() ? nullptr : UndefValue::get(RetTy);
302 // Remove and delete the unreachable inst.
303 UnreachableBlock->getTerminator()->eraseFromParent();
304
305 Function *UnreachableIntrin =
306 Intrinsic::getDeclaration(F.getParent(), Intrinsic::amdgcn_unreachable);
307
308 // Insert a call to an intrinsic tracking that this is an unreachable
309 // point, in case we want to kill the active lanes or something later.
310 CallInst::Create(UnreachableIntrin, {}, "", UnreachableBlock);
311
312 // Don't create a scalar trap. We would only want to trap if this code was
313 // really reached, but a scalar trap would happen even if no lanes
314 // actually reached here.
315 ReturnInst::Create(F.getContext(), RetVal, UnreachableBlock);
316 ReturningBlocks.push_back(UnreachableBlock);
317 }
318 }
319
320 // Now handle return blocks.
321 if (ReturningBlocks.empty())
322 return false; // No blocks return
323
324 if (ReturningBlocks.size() == 1)
325 return false; // Already has a single return block
326
327 const TargetTransformInfo &TTI
328 = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
329
330 unifyReturnBlockSet(F, ReturningBlocks, InsertExport, TTI, "UnifiedReturnBlock");
331 return true;
332 }
333