1 // Copyright (c) 2023 Google Inc.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include "source/opt/invocation_interlock_placement_pass.h"
16
17 #include <algorithm>
18 #include <array>
19 #include <cassert>
20 #include <functional>
21 #include <optional>
22 #include <queue>
23 #include <stack>
24 #include <unordered_map>
25 #include <unordered_set>
26 #include <vector>
27
28 #include "source/enum_set.h"
29 #include "source/enum_string_mapping.h"
30 #include "source/opt/ir_context.h"
31 #include "source/opt/reflect.h"
32 #include "source/spirv_target_env.h"
33 #include "source/util/string_utils.h"
34
35 namespace spvtools {
36 namespace opt {
37
38 namespace {
39 constexpr uint32_t kEntryPointExecutionModelInIdx = 0;
40 constexpr uint32_t kEntryPointFunctionIdInIdx = 1;
41 constexpr uint32_t kFunctionCallFunctionIdInIdx = 0;
42 } // namespace
43
hasSingleNextBlock(uint32_t block_id,bool reverse_cfg)44 bool InvocationInterlockPlacementPass::hasSingleNextBlock(uint32_t block_id,
45 bool reverse_cfg) {
46 if (reverse_cfg) {
47 // We are traversing forward, so check whether there is a single successor.
48 BasicBlock* block = cfg()->block(block_id);
49
50 switch (block->tail()->opcode()) {
51 case spv::Op::OpBranchConditional:
52 return false;
53 case spv::Op::OpSwitch:
54 return block->tail()->NumInOperandWords() == 1;
55 default:
56 return !block->tail()->IsReturnOrAbort();
57 }
58 } else {
59 // We are traversing backward, so check whether there is a single
60 // predecessor.
61 return cfg()->preds(block_id).size() == 1;
62 }
63 }
64
forEachNext(uint32_t block_id,bool reverse_cfg,std::function<void (uint32_t)> f)65 void InvocationInterlockPlacementPass::forEachNext(
66 uint32_t block_id, bool reverse_cfg, std::function<void(uint32_t)> f) {
67 if (reverse_cfg) {
68 BasicBlock* block = cfg()->block(block_id);
69
70 block->ForEachSuccessorLabel([f](uint32_t succ_id) { f(succ_id); });
71 } else {
72 for (uint32_t pred_id : cfg()->preds(block_id)) {
73 f(pred_id);
74 }
75 }
76 }
77
addInstructionAtBlockBoundary(BasicBlock * block,spv::Op opcode,bool at_end)78 void InvocationInterlockPlacementPass::addInstructionAtBlockBoundary(
79 BasicBlock* block, spv::Op opcode, bool at_end) {
80 if (at_end) {
81 assert(block->begin()->opcode() != spv::Op::OpPhi &&
82 "addInstructionAtBlockBoundary expects to be called with at_end == "
83 "true only if there is a single successor to block");
84 // Insert a begin instruction at the end of the block.
85 Instruction* begin_inst = new Instruction(context(), opcode);
86 begin_inst->InsertAfter(&*--block->tail());
87 } else {
88 assert(block->begin()->opcode() != spv::Op::OpPhi &&
89 "addInstructionAtBlockBoundary expects to be called with at_end == "
90 "false only if there is a single predecessor to block");
91 // Insert an end instruction at the beginning of the block.
92 Instruction* end_inst = new Instruction(context(), opcode);
93 end_inst->InsertBefore(&*block->begin());
94 }
95 }
96
killDuplicateBegin(BasicBlock * block)97 bool InvocationInterlockPlacementPass::killDuplicateBegin(BasicBlock* block) {
98 bool found = false;
99
100 return context()->KillInstructionIf(
101 block->begin(), block->end(), [&found](Instruction* inst) {
102 if (inst->opcode() == spv::Op::OpBeginInvocationInterlockEXT) {
103 if (found) {
104 return true;
105 }
106 found = true;
107 }
108 return false;
109 });
110 }
111
killDuplicateEnd(BasicBlock * block)112 bool InvocationInterlockPlacementPass::killDuplicateEnd(BasicBlock* block) {
113 std::vector<Instruction*> to_kill;
114 block->ForEachInst([&to_kill](Instruction* inst) {
115 if (inst->opcode() == spv::Op::OpEndInvocationInterlockEXT) {
116 to_kill.push_back(inst);
117 }
118 });
119
120 if (to_kill.size() <= 1) {
121 return false;
122 }
123
124 to_kill.pop_back();
125
126 for (Instruction* inst : to_kill) {
127 context()->KillInst(inst);
128 }
129
130 return true;
131 }
132
recordBeginOrEndInFunction(Function * func)133 void InvocationInterlockPlacementPass::recordBeginOrEndInFunction(
134 Function* func) {
135 if (extracted_functions_.count(func)) {
136 return;
137 }
138
139 bool had_begin = false;
140 bool had_end = false;
141
142 func->ForEachInst([this, &had_begin, &had_end](Instruction* inst) {
143 switch (inst->opcode()) {
144 case spv::Op::OpBeginInvocationInterlockEXT:
145 had_begin = true;
146 break;
147 case spv::Op::OpEndInvocationInterlockEXT:
148 had_end = true;
149 break;
150 case spv::Op::OpFunctionCall: {
151 uint32_t function_id =
152 inst->GetSingleWordInOperand(kFunctionCallFunctionIdInIdx);
153 Function* inner_func = context()->GetFunction(function_id);
154 recordBeginOrEndInFunction(inner_func);
155 ExtractionResult result = extracted_functions_[inner_func];
156 had_begin = had_begin || result.had_begin;
157 had_end = had_end || result.had_end;
158 break;
159 }
160 default:
161 break;
162 }
163 });
164
165 ExtractionResult result = {had_begin, had_end};
166 extracted_functions_[func] = result;
167 }
168
169 bool InvocationInterlockPlacementPass::
removeBeginAndEndInstructionsFromFunction(Function * func)170 removeBeginAndEndInstructionsFromFunction(Function* func) {
171 bool modified = false;
172 func->ForEachInst([this, &modified](Instruction* inst) {
173 switch (inst->opcode()) {
174 case spv::Op::OpBeginInvocationInterlockEXT:
175 context()->KillInst(inst);
176 modified = true;
177 break;
178 case spv::Op::OpEndInvocationInterlockEXT:
179 context()->KillInst(inst);
180 modified = true;
181 break;
182 default:
183 break;
184 }
185 });
186 return modified;
187 }
188
extractInstructionsFromCalls(std::vector<BasicBlock * > blocks)189 bool InvocationInterlockPlacementPass::extractInstructionsFromCalls(
190 std::vector<BasicBlock*> blocks) {
191 bool modified = false;
192
193 for (BasicBlock* block : blocks) {
194 block->ForEachInst([this, &modified](Instruction* inst) {
195 if (inst->opcode() == spv::Op::OpFunctionCall) {
196 uint32_t function_id =
197 inst->GetSingleWordInOperand(kFunctionCallFunctionIdInIdx);
198 Function* func = context()->GetFunction(function_id);
199 ExtractionResult result = extracted_functions_[func];
200
201 if (result.had_begin) {
202 Instruction* new_inst = new Instruction(
203 context(), spv::Op::OpBeginInvocationInterlockEXT);
204 new_inst->InsertBefore(inst);
205 modified = true;
206 }
207 if (result.had_end) {
208 Instruction* new_inst =
209 new Instruction(context(), spv::Op::OpEndInvocationInterlockEXT);
210 new_inst->InsertAfter(inst);
211 modified = true;
212 }
213 }
214 });
215 }
216 return modified;
217 }
218
recordExistingBeginAndEndBlock(std::vector<BasicBlock * > blocks)219 void InvocationInterlockPlacementPass::recordExistingBeginAndEndBlock(
220 std::vector<BasicBlock*> blocks) {
221 for (BasicBlock* block : blocks) {
222 block->ForEachInst([this, block](Instruction* inst) {
223 switch (inst->opcode()) {
224 case spv::Op::OpBeginInvocationInterlockEXT:
225 begin_.insert(block->id());
226 break;
227 case spv::Op::OpEndInvocationInterlockEXT:
228 end_.insert(block->id());
229 break;
230 default:
231 break;
232 }
233 });
234 }
235 }
236
237 InvocationInterlockPlacementPass::BlockSet
computeReachableBlocks(BlockSet & previous_inside,const BlockSet & starting_nodes,bool reverse_cfg)238 InvocationInterlockPlacementPass::computeReachableBlocks(
239 BlockSet& previous_inside, const BlockSet& starting_nodes,
240 bool reverse_cfg) {
241 BlockSet inside = starting_nodes;
242
243 std::deque<uint32_t> worklist;
244 worklist.insert(worklist.begin(), starting_nodes.begin(),
245 starting_nodes.end());
246
247 while (!worklist.empty()) {
248 uint32_t block_id = worklist.front();
249 worklist.pop_front();
250
251 forEachNext(block_id, reverse_cfg,
252 [&inside, &previous_inside, &worklist](uint32_t next_id) {
253 previous_inside.insert(next_id);
254 if (inside.insert(next_id).second) {
255 worklist.push_back(next_id);
256 }
257 });
258 }
259
260 return inside;
261 }
262
removeUnneededInstructions(BasicBlock * block)263 bool InvocationInterlockPlacementPass::removeUnneededInstructions(
264 BasicBlock* block) {
265 bool modified = false;
266 if (!predecessors_after_begin_.count(block->id()) &&
267 after_begin_.count(block->id())) {
268 // None of the previous blocks are in the critical section, but this block
269 // is. This can only happen if this block already has at least one begin
270 // instruction. Leave the first begin instruction, and remove any others.
271 modified |= killDuplicateBegin(block);
272 } else if (predecessors_after_begin_.count(block->id())) {
273 // At least one previous block is in the critical section; remove all
274 // begin instructions in this block.
275 modified |= context()->KillInstructionIf(
276 block->begin(), block->end(), [](Instruction* inst) {
277 return inst->opcode() == spv::Op::OpBeginInvocationInterlockEXT;
278 });
279 }
280
281 if (!successors_before_end_.count(block->id()) &&
282 before_end_.count(block->id())) {
283 // Same as above
284 modified |= killDuplicateEnd(block);
285 } else if (successors_before_end_.count(block->id())) {
286 modified |= context()->KillInstructionIf(
287 block->begin(), block->end(), [](Instruction* inst) {
288 return inst->opcode() == spv::Op::OpEndInvocationInterlockEXT;
289 });
290 }
291 return modified;
292 }
293
splitEdge(BasicBlock * block,uint32_t succ_id)294 BasicBlock* InvocationInterlockPlacementPass::splitEdge(BasicBlock* block,
295 uint32_t succ_id) {
296 // Create a new block to replace the critical edge.
297 auto new_succ_temp = MakeUnique<BasicBlock>(
298 MakeUnique<Instruction>(context(), spv::Op::OpLabel, 0, TakeNextId(),
299 std::initializer_list<Operand>{}));
300 auto* new_succ = new_succ_temp.get();
301
302 // Insert the new block into the function.
303 block->GetParent()->InsertBasicBlockAfter(std::move(new_succ_temp), block);
304
305 new_succ->AddInstruction(MakeUnique<Instruction>(
306 context(), spv::Op::OpBranch, 0, 0,
307 std::initializer_list<Operand>{
308 Operand(spv_operand_type_t::SPV_OPERAND_TYPE_ID, {succ_id})}));
309
310 assert(block->tail()->opcode() == spv::Op::OpBranchConditional ||
311 block->tail()->opcode() == spv::Op::OpSwitch);
312
313 // Update the first branch to successor to instead branch to
314 // the new successor. If there are multiple edges, we arbitrarily choose the
315 // first time it appears in the list. The other edges to `succ_id` will have
316 // to be split by another call to `splitEdge`.
317 block->tail()->WhileEachInId([new_succ, succ_id](uint32_t* branch_id) {
318 if (*branch_id == succ_id) {
319 *branch_id = new_succ->id();
320 return false;
321 }
322 return true;
323 });
324
325 return new_succ;
326 }
327
placeInstructionsForEdge(BasicBlock * block,uint32_t next_id,BlockSet & inside,BlockSet & previous_inside,spv::Op opcode,bool reverse_cfg)328 bool InvocationInterlockPlacementPass::placeInstructionsForEdge(
329 BasicBlock* block, uint32_t next_id, BlockSet& inside,
330 BlockSet& previous_inside, spv::Op opcode, bool reverse_cfg) {
331 bool modified = false;
332
333 if (previous_inside.count(next_id) && !inside.count(block->id())) {
334 // This block is not in the critical section but the next has at least one
335 // other previous block that is, so this block should be enter it as well.
336 // We need to add begin or end instructions to the edge.
337
338 modified = true;
339
340 if (hasSingleNextBlock(block->id(), reverse_cfg)) {
341 // This is the only next block.
342
343 // Additionally, because `next_id` is in `previous_inside`, we know that
344 // `next_id` has at least one previous block in `inside`. And because
345 // 'block` is not in `inside`, that means the `next_id` has to have at
346 // least one other previous block in `inside`.
347
348 // This is solely for a debug assertion. It is essentially recomputing the
349 // value of `previous_inside` to verify that it was computed correctly
350 // such that the above statement is true.
351 bool next_has_previous_inside = false;
352 // By passing !reverse_cfg to forEachNext, we are actually iterating over
353 // the previous blocks.
354 forEachNext(next_id, !reverse_cfg,
355 [&next_has_previous_inside, inside](uint32_t previous_id) {
356 if (inside.count(previous_id)) {
357 next_has_previous_inside = true;
358 }
359 });
360 assert(next_has_previous_inside &&
361 "`previous_inside` must be the set of blocks with at least one "
362 "previous block in `inside`");
363
364 addInstructionAtBlockBoundary(block, opcode, reverse_cfg);
365 } else {
366 // This block has multiple next blocks. Split the edge and insert the
367 // instruction in the new next block.
368 BasicBlock* new_branch;
369 if (reverse_cfg) {
370 new_branch = splitEdge(block, next_id);
371 } else {
372 new_branch = splitEdge(cfg()->block(next_id), block->id());
373 }
374
375 auto inst = new Instruction(context(), opcode);
376 inst->InsertBefore(&*new_branch->tail());
377 }
378 }
379
380 return modified;
381 }
382
placeInstructions(BasicBlock * block)383 bool InvocationInterlockPlacementPass::placeInstructions(BasicBlock* block) {
384 bool modified = false;
385
386 block->ForEachSuccessorLabel([this, block, &modified](uint32_t succ_id) {
387 modified |= placeInstructionsForEdge(
388 block, succ_id, after_begin_, predecessors_after_begin_,
389 spv::Op::OpBeginInvocationInterlockEXT, /* reverse_cfg= */ true);
390 modified |= placeInstructionsForEdge(cfg()->block(succ_id), block->id(),
391 before_end_, successors_before_end_,
392 spv::Op::OpEndInvocationInterlockEXT,
393 /* reverse_cfg= */ false);
394 });
395
396 return modified;
397 }
398
processFragmentShaderEntry(Function * entry_func)399 bool InvocationInterlockPlacementPass::processFragmentShaderEntry(
400 Function* entry_func) {
401 bool modified = false;
402
403 // Save the original order of blocks in the function, so we don't iterate over
404 // newly-added blocks.
405 std::vector<BasicBlock*> original_blocks;
406 for (auto bi = entry_func->begin(); bi != entry_func->end(); ++bi) {
407 original_blocks.push_back(&*bi);
408 }
409
410 modified |= extractInstructionsFromCalls(original_blocks);
411 recordExistingBeginAndEndBlock(original_blocks);
412
413 after_begin_ = computeReachableBlocks(predecessors_after_begin_, begin_,
414 /* reverse_cfg= */ true);
415 before_end_ = computeReachableBlocks(successors_before_end_, end_,
416 /* reverse_cfg= */ false);
417
418 for (BasicBlock* block : original_blocks) {
419 modified |= removeUnneededInstructions(block);
420 modified |= placeInstructions(block);
421 }
422 return modified;
423 }
424
isFragmentShaderInterlockEnabled()425 bool InvocationInterlockPlacementPass::isFragmentShaderInterlockEnabled() {
426 if (!context()->get_feature_mgr()->HasExtension(
427 kSPV_EXT_fragment_shader_interlock)) {
428 return false;
429 }
430
431 if (context()->get_feature_mgr()->HasCapability(
432 spv::Capability::FragmentShaderSampleInterlockEXT)) {
433 return true;
434 }
435
436 if (context()->get_feature_mgr()->HasCapability(
437 spv::Capability::FragmentShaderPixelInterlockEXT)) {
438 return true;
439 }
440
441 if (context()->get_feature_mgr()->HasCapability(
442 spv::Capability::FragmentShaderShadingRateInterlockEXT)) {
443 return true;
444 }
445
446 return false;
447 }
448
Process()449 Pass::Status InvocationInterlockPlacementPass::Process() {
450 // Skip this pass if the necessary extension or capability is missing
451 if (!isFragmentShaderInterlockEnabled()) {
452 return Status::SuccessWithoutChange;
453 }
454
455 bool modified = false;
456
457 std::unordered_set<Function*> entry_points;
458 for (Instruction& entry_inst : context()->module()->entry_points()) {
459 uint32_t entry_id =
460 entry_inst.GetSingleWordInOperand(kEntryPointFunctionIdInIdx);
461 entry_points.insert(context()->GetFunction(entry_id));
462 }
463
464 for (auto fi = context()->module()->begin(); fi != context()->module()->end();
465 ++fi) {
466 Function* func = &*fi;
467 recordBeginOrEndInFunction(func);
468 if (!entry_points.count(func) && extracted_functions_.count(func)) {
469 modified |= removeBeginAndEndInstructionsFromFunction(func);
470 }
471 }
472
473 for (Instruction& entry_inst : context()->module()->entry_points()) {
474 uint32_t entry_id =
475 entry_inst.GetSingleWordInOperand(kEntryPointFunctionIdInIdx);
476 Function* entry_func = context()->GetFunction(entry_id);
477
478 auto execution_model = spv::ExecutionModel(
479 entry_inst.GetSingleWordInOperand(kEntryPointExecutionModelInIdx));
480
481 if (execution_model != spv::ExecutionModel::Fragment) {
482 continue;
483 }
484
485 modified |= processFragmentShaderEntry(entry_func);
486 }
487
488 return modified ? Pass::Status::SuccessWithChange
489 : Pass::Status::SuccessWithoutChange;
490 }
491
492 } // namespace opt
493 } // namespace spvtools
494