1 // Copyright (c) 2017 The Khronos Group Inc.
2 // Copyright (c) 2017 Valve Corporation
3 // Copyright (c) 2017 LunarG Inc.
4 //
5 // Licensed under the Apache License, Version 2.0 (the "License");
6 // you may not use this file except in compliance with the License.
7 // You may obtain a copy of the License at
8 //
9 // http://www.apache.org/licenses/LICENSE-2.0
10 //
11 // Unless required by applicable law or agreed to in writing, software
12 // distributed under the License is distributed on an "AS IS" BASIS,
13 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 // See the License for the specific language governing permissions and
15 // limitations under the License.
16
17 #include "source/opt/common_uniform_elim_pass.h"
18 #include "source/cfa.h"
19 #include "source/opt/ir_context.h"
20
21 namespace spvtools {
22 namespace opt {
23
24 namespace {
25
26 const uint32_t kAccessChainPtrIdInIdx = 0;
27 const uint32_t kTypePointerStorageClassInIdx = 0;
28 const uint32_t kTypePointerTypeIdInIdx = 1;
29 const uint32_t kConstantValueInIdx = 0;
30 const uint32_t kExtractCompositeIdInIdx = 0;
31 const uint32_t kExtractIdx0InIdx = 1;
32 const uint32_t kStorePtrIdInIdx = 0;
33 const uint32_t kLoadPtrIdInIdx = 0;
34 const uint32_t kCopyObjectOperandInIdx = 0;
35 const uint32_t kTypeIntWidthInIdx = 0;
36
37 } // anonymous namespace
38
IsNonPtrAccessChain(const SpvOp opcode) const39 bool CommonUniformElimPass::IsNonPtrAccessChain(const SpvOp opcode) const {
40 return opcode == SpvOpAccessChain || opcode == SpvOpInBoundsAccessChain;
41 }
42
IsSamplerOrImageType(const Instruction * typeInst) const43 bool CommonUniformElimPass::IsSamplerOrImageType(
44 const Instruction* typeInst) const {
45 switch (typeInst->opcode()) {
46 case SpvOpTypeSampler:
47 case SpvOpTypeImage:
48 case SpvOpTypeSampledImage:
49 return true;
50 default:
51 break;
52 }
53 if (typeInst->opcode() != SpvOpTypeStruct) return false;
54 // Return true if any member is a sampler or image
55 return !typeInst->WhileEachInId([this](const uint32_t* tid) {
56 const Instruction* compTypeInst = get_def_use_mgr()->GetDef(*tid);
57 if (IsSamplerOrImageType(compTypeInst)) {
58 return false;
59 }
60 return true;
61 });
62 }
63
IsSamplerOrImageVar(uint32_t varId) const64 bool CommonUniformElimPass::IsSamplerOrImageVar(uint32_t varId) const {
65 const Instruction* varInst = get_def_use_mgr()->GetDef(varId);
66 assert(varInst->opcode() == SpvOpVariable);
67 const uint32_t varTypeId = varInst->type_id();
68 const Instruction* varTypeInst = get_def_use_mgr()->GetDef(varTypeId);
69 const uint32_t varPteTypeId =
70 varTypeInst->GetSingleWordInOperand(kTypePointerTypeIdInIdx);
71 Instruction* varPteTypeInst = get_def_use_mgr()->GetDef(varPteTypeId);
72 return IsSamplerOrImageType(varPteTypeInst);
73 }
74
GetPtr(Instruction * ip,uint32_t * objId)75 Instruction* CommonUniformElimPass::GetPtr(Instruction* ip, uint32_t* objId) {
76 const SpvOp op = ip->opcode();
77 assert(op == SpvOpStore || op == SpvOpLoad);
78 *objId = ip->GetSingleWordInOperand(op == SpvOpStore ? kStorePtrIdInIdx
79 : kLoadPtrIdInIdx);
80 Instruction* ptrInst = get_def_use_mgr()->GetDef(*objId);
81 while (ptrInst->opcode() == SpvOpCopyObject) {
82 *objId = ptrInst->GetSingleWordInOperand(kCopyObjectOperandInIdx);
83 ptrInst = get_def_use_mgr()->GetDef(*objId);
84 }
85 Instruction* objInst = ptrInst;
86 while (objInst->opcode() != SpvOpVariable &&
87 objInst->opcode() != SpvOpFunctionParameter) {
88 if (IsNonPtrAccessChain(objInst->opcode())) {
89 *objId = objInst->GetSingleWordInOperand(kAccessChainPtrIdInIdx);
90 } else {
91 assert(objInst->opcode() == SpvOpCopyObject);
92 *objId = objInst->GetSingleWordInOperand(kCopyObjectOperandInIdx);
93 }
94 objInst = get_def_use_mgr()->GetDef(*objId);
95 }
96 return ptrInst;
97 }
98
IsVolatileStruct(uint32_t type_id)99 bool CommonUniformElimPass::IsVolatileStruct(uint32_t type_id) {
100 assert(get_def_use_mgr()->GetDef(type_id)->opcode() == SpvOpTypeStruct);
101 return !get_decoration_mgr()->WhileEachDecoration(
102 type_id, SpvDecorationVolatile, [](const Instruction&) { return false; });
103 }
104
IsAccessChainToVolatileStructType(const Instruction & AccessChainInst)105 bool CommonUniformElimPass::IsAccessChainToVolatileStructType(
106 const Instruction& AccessChainInst) {
107 assert(AccessChainInst.opcode() == SpvOpAccessChain);
108
109 uint32_t ptr_id = AccessChainInst.GetSingleWordInOperand(0);
110 const Instruction* ptr_inst = get_def_use_mgr()->GetDef(ptr_id);
111 uint32_t pointee_type_id = GetPointeeTypeId(ptr_inst);
112 const uint32_t num_operands = AccessChainInst.NumOperands();
113
114 // walk the type tree:
115 for (uint32_t idx = 3; idx < num_operands; ++idx) {
116 Instruction* pointee_type = get_def_use_mgr()->GetDef(pointee_type_id);
117
118 switch (pointee_type->opcode()) {
119 case SpvOpTypeMatrix:
120 case SpvOpTypeVector:
121 case SpvOpTypeArray:
122 case SpvOpTypeRuntimeArray:
123 pointee_type_id = pointee_type->GetSingleWordOperand(1);
124 break;
125 case SpvOpTypeStruct:
126 // check for volatile decorations:
127 if (IsVolatileStruct(pointee_type_id)) return true;
128
129 if (idx < num_operands - 1) {
130 const uint32_t index_id = AccessChainInst.GetSingleWordOperand(idx);
131 const Instruction* index_inst = get_def_use_mgr()->GetDef(index_id);
132 uint32_t index_value = index_inst->GetSingleWordOperand(
133 2); // TODO: replace with GetUintValueFromConstant()
134 pointee_type_id = pointee_type->GetSingleWordInOperand(index_value);
135 }
136 break;
137 default:
138 assert(false && "Unhandled pointee type.");
139 }
140 }
141 return false;
142 }
143
IsVolatileLoad(const Instruction & loadInst)144 bool CommonUniformElimPass::IsVolatileLoad(const Instruction& loadInst) {
145 assert(loadInst.opcode() == SpvOpLoad);
146 // Check if this Load instruction has Volatile Memory Access flag
147 if (loadInst.NumOperands() == 4) {
148 uint32_t memory_access_mask = loadInst.GetSingleWordOperand(3);
149 if (memory_access_mask & SpvMemoryAccessVolatileMask) return true;
150 }
151 // If we load a struct directly (result type is struct),
152 // check if the struct is decorated volatile
153 uint32_t type_id = loadInst.type_id();
154 if (get_def_use_mgr()->GetDef(type_id)->opcode() == SpvOpTypeStruct)
155 return IsVolatileStruct(type_id);
156 else
157 return false;
158 }
159
IsUniformVar(uint32_t varId)160 bool CommonUniformElimPass::IsUniformVar(uint32_t varId) {
161 const Instruction* varInst =
162 get_def_use_mgr()->id_to_defs().find(varId)->second;
163 if (varInst->opcode() != SpvOpVariable) return false;
164 const uint32_t varTypeId = varInst->type_id();
165 const Instruction* varTypeInst =
166 get_def_use_mgr()->id_to_defs().find(varTypeId)->second;
167 return varTypeInst->GetSingleWordInOperand(kTypePointerStorageClassInIdx) ==
168 SpvStorageClassUniform ||
169 varTypeInst->GetSingleWordInOperand(kTypePointerStorageClassInIdx) ==
170 SpvStorageClassUniformConstant;
171 }
172
HasUnsupportedDecorates(uint32_t id) const173 bool CommonUniformElimPass::HasUnsupportedDecorates(uint32_t id) const {
174 return !get_def_use_mgr()->WhileEachUser(id, [this](Instruction* user) {
175 if (IsNonTypeDecorate(user->opcode())) return false;
176 return true;
177 });
178 }
179
HasOnlyNamesAndDecorates(uint32_t id) const180 bool CommonUniformElimPass::HasOnlyNamesAndDecorates(uint32_t id) const {
181 return get_def_use_mgr()->WhileEachUser(id, [this](Instruction* user) {
182 SpvOp op = user->opcode();
183 if (op != SpvOpName && !IsNonTypeDecorate(op)) return false;
184 return true;
185 });
186 }
187
DeleteIfUseless(Instruction * inst)188 void CommonUniformElimPass::DeleteIfUseless(Instruction* inst) {
189 const uint32_t resId = inst->result_id();
190 assert(resId != 0);
191 if (HasOnlyNamesAndDecorates(resId)) {
192 context()->KillInst(inst);
193 }
194 }
195
ReplaceAndDeleteLoad(Instruction * loadInst,uint32_t replId,Instruction * ptrInst)196 Instruction* CommonUniformElimPass::ReplaceAndDeleteLoad(Instruction* loadInst,
197 uint32_t replId,
198 Instruction* ptrInst) {
199 const uint32_t loadId = loadInst->result_id();
200 context()->KillNamesAndDecorates(loadId);
201 (void)context()->ReplaceAllUsesWith(loadId, replId);
202 // remove load instruction
203 Instruction* next_instruction = context()->KillInst(loadInst);
204 // if access chain, see if it can be removed as well
205 if (IsNonPtrAccessChain(ptrInst->opcode())) DeleteIfUseless(ptrInst);
206 return next_instruction;
207 }
208
GenACLoadRepl(const Instruction * ptrInst,std::vector<std::unique_ptr<Instruction>> * newInsts,uint32_t * resultId)209 void CommonUniformElimPass::GenACLoadRepl(
210 const Instruction* ptrInst,
211 std::vector<std::unique_ptr<Instruction>>* newInsts, uint32_t* resultId) {
212 // Build and append Load
213 const uint32_t ldResultId = TakeNextId();
214 const uint32_t varId =
215 ptrInst->GetSingleWordInOperand(kAccessChainPtrIdInIdx);
216 const Instruction* varInst = get_def_use_mgr()->GetDef(varId);
217 assert(varInst->opcode() == SpvOpVariable);
218 const uint32_t varPteTypeId = GetPointeeTypeId(varInst);
219 std::vector<Operand> load_in_operands;
220 load_in_operands.push_back(Operand(spv_operand_type_t::SPV_OPERAND_TYPE_ID,
221 std::initializer_list<uint32_t>{varId}));
222 std::unique_ptr<Instruction> newLoad(new Instruction(
223 context(), SpvOpLoad, varPteTypeId, ldResultId, load_in_operands));
224 get_def_use_mgr()->AnalyzeInstDefUse(&*newLoad);
225 newInsts->emplace_back(std::move(newLoad));
226
227 // Build and append Extract
228 const uint32_t extResultId = TakeNextId();
229 const uint32_t ptrPteTypeId = GetPointeeTypeId(ptrInst);
230 std::vector<Operand> ext_in_opnds;
231 ext_in_opnds.push_back(Operand(spv_operand_type_t::SPV_OPERAND_TYPE_ID,
232 std::initializer_list<uint32_t>{ldResultId}));
233 uint32_t iidIdx = 0;
234 ptrInst->ForEachInId([&iidIdx, &ext_in_opnds, this](const uint32_t* iid) {
235 if (iidIdx > 0) {
236 const Instruction* cInst = get_def_use_mgr()->GetDef(*iid);
237 uint32_t val = cInst->GetSingleWordInOperand(kConstantValueInIdx);
238 ext_in_opnds.push_back(
239 Operand(spv_operand_type_t::SPV_OPERAND_TYPE_LITERAL_INTEGER,
240 std::initializer_list<uint32_t>{val}));
241 }
242 ++iidIdx;
243 });
244 std::unique_ptr<Instruction> newExt(
245 new Instruction(context(), SpvOpCompositeExtract, ptrPteTypeId,
246 extResultId, ext_in_opnds));
247 get_def_use_mgr()->AnalyzeInstDefUse(&*newExt);
248 newInsts->emplace_back(std::move(newExt));
249 *resultId = extResultId;
250 }
251
IsConstantIndexAccessChain(Instruction * acp)252 bool CommonUniformElimPass::IsConstantIndexAccessChain(Instruction* acp) {
253 uint32_t inIdx = 0;
254 return acp->WhileEachInId([&inIdx, this](uint32_t* tid) {
255 if (inIdx > 0) {
256 Instruction* opInst = get_def_use_mgr()->GetDef(*tid);
257 if (opInst->opcode() != SpvOpConstant) return false;
258 }
259 ++inIdx;
260 return true;
261 });
262 }
263
UniformAccessChainConvert(Function * func)264 bool CommonUniformElimPass::UniformAccessChainConvert(Function* func) {
265 bool modified = false;
266 for (auto bi = func->begin(); bi != func->end(); ++bi) {
267 for (Instruction* inst = &*bi->begin(); inst; inst = inst->NextNode()) {
268 if (inst->opcode() != SpvOpLoad) continue;
269 uint32_t varId;
270 Instruction* ptrInst = GetPtr(inst, &varId);
271 if (!IsNonPtrAccessChain(ptrInst->opcode())) continue;
272 // Do not convert nested access chains
273 if (ptrInst->GetSingleWordInOperand(kAccessChainPtrIdInIdx) != varId)
274 continue;
275 if (!IsUniformVar(varId)) continue;
276 if (!IsConstantIndexAccessChain(ptrInst)) continue;
277 if (HasUnsupportedDecorates(inst->result_id())) continue;
278 if (HasUnsupportedDecorates(ptrInst->result_id())) continue;
279 if (IsVolatileLoad(*inst)) continue;
280 if (IsAccessChainToVolatileStructType(*ptrInst)) continue;
281 std::vector<std::unique_ptr<Instruction>> newInsts;
282 uint32_t replId;
283 GenACLoadRepl(ptrInst, &newInsts, &replId);
284 inst = ReplaceAndDeleteLoad(inst, replId, ptrInst);
285 inst = inst->InsertBefore(std::move(newInsts));
286 modified = true;
287 }
288 }
289 return modified;
290 }
291
ComputeStructuredSuccessors(Function * func)292 void CommonUniformElimPass::ComputeStructuredSuccessors(Function* func) {
293 block2structured_succs_.clear();
294 for (auto& blk : *func) {
295 // If header, make merge block first successor.
296 uint32_t mbid = blk.MergeBlockIdIfAny();
297 if (mbid != 0) {
298 block2structured_succs_[&blk].push_back(cfg()->block(mbid));
299 uint32_t cbid = blk.ContinueBlockIdIfAny();
300 if (cbid != 0) {
301 block2structured_succs_[&blk].push_back(cfg()->block(mbid));
302 }
303 }
304 // add true successors
305 const auto& const_blk = blk;
306 const_blk.ForEachSuccessorLabel([&blk, this](const uint32_t sbid) {
307 block2structured_succs_[&blk].push_back(cfg()->block(sbid));
308 });
309 }
310 }
311
ComputeStructuredOrder(Function * func,std::list<BasicBlock * > * order)312 void CommonUniformElimPass::ComputeStructuredOrder(
313 Function* func, std::list<BasicBlock*>* order) {
314 // Compute structured successors and do DFS
315 ComputeStructuredSuccessors(func);
316 auto ignore_block = [](cbb_ptr) {};
317 auto ignore_edge = [](cbb_ptr, cbb_ptr) {};
318 auto get_structured_successors = [this](const BasicBlock* block) {
319 return &(block2structured_succs_[block]);
320 };
321 // TODO(greg-lunarg): Get rid of const_cast by making moving const
322 // out of the cfa.h prototypes and into the invoking code.
323 auto post_order = [&](cbb_ptr b) {
324 order->push_front(const_cast<BasicBlock*>(b));
325 };
326
327 order->clear();
328 CFA<BasicBlock>::DepthFirstTraversal(&*func->begin(),
329 get_structured_successors, ignore_block,
330 post_order, ignore_edge);
331 }
332
CommonUniformLoadElimination(Function * func)333 bool CommonUniformElimPass::CommonUniformLoadElimination(Function* func) {
334 // Process all blocks in structured order. This is just one way (the
335 // simplest?) to keep track of the most recent block outside of control
336 // flow, used to copy common instructions, guaranteed to dominate all
337 // following load sites.
338 std::list<BasicBlock*> structuredOrder;
339 ComputeStructuredOrder(func, &structuredOrder);
340 uniform2load_id_.clear();
341 bool modified = false;
342 // Find insertion point in first block to copy non-dominating loads.
343 auto insertItr = func->begin()->begin();
344 while (insertItr->opcode() == SpvOpVariable ||
345 insertItr->opcode() == SpvOpNop)
346 ++insertItr;
347 // Update insertItr until it will not be removed. Without this code,
348 // ReplaceAndDeleteLoad() can set |insertItr| as a dangling pointer.
349 while (IsUniformLoadToBeRemoved(&*insertItr)) ++insertItr;
350 uint32_t mergeBlockId = 0;
351 for (auto bi = structuredOrder.begin(); bi != structuredOrder.end(); ++bi) {
352 BasicBlock* bp = *bi;
353 // Check if we are exiting outermost control construct. If so, remember
354 // new load insertion point. Trying to keep register pressure down.
355 if (mergeBlockId == bp->id()) {
356 mergeBlockId = 0;
357 insertItr = bp->begin();
358 // Update insertItr until it will not be removed. Without this code,
359 // ReplaceAndDeleteLoad() can set |insertItr| as a dangling pointer.
360 while (IsUniformLoadToBeRemoved(&*insertItr)) ++insertItr;
361 }
362 for (Instruction* inst = &*bp->begin(); inst; inst = inst->NextNode()) {
363 if (inst->opcode() != SpvOpLoad) continue;
364 uint32_t varId;
365 Instruction* ptrInst = GetPtr(inst, &varId);
366 if (ptrInst->opcode() != SpvOpVariable) continue;
367 if (!IsUniformVar(varId)) continue;
368 if (IsSamplerOrImageVar(varId)) continue;
369 if (HasUnsupportedDecorates(inst->result_id())) continue;
370 if (IsVolatileLoad(*inst)) continue;
371 uint32_t replId;
372 const auto uItr = uniform2load_id_.find(varId);
373 if (uItr != uniform2load_id_.end()) {
374 replId = uItr->second;
375 } else {
376 if (mergeBlockId == 0) {
377 // Load is in dominating block; just remember it
378 uniform2load_id_[varId] = inst->result_id();
379 continue;
380 } else {
381 // Copy load into most recent dominating block and remember it
382 replId = TakeNextId();
383 std::unique_ptr<Instruction> newLoad(new Instruction(
384 context(), SpvOpLoad, inst->type_id(), replId,
385 {{spv_operand_type_t::SPV_OPERAND_TYPE_ID, {varId}}}));
386 get_def_use_mgr()->AnalyzeInstDefUse(&*newLoad);
387 insertItr = insertItr.InsertBefore(std::move(newLoad));
388 ++insertItr;
389 uniform2load_id_[varId] = replId;
390 }
391 }
392 inst = ReplaceAndDeleteLoad(inst, replId, ptrInst);
393 modified = true;
394 }
395 // If we are outside of any control construct and entering one, remember
396 // the id of the merge block
397 if (mergeBlockId == 0) {
398 mergeBlockId = bp->MergeBlockIdIfAny();
399 }
400 }
401 return modified;
402 }
403
CommonUniformLoadElimBlock(Function * func)404 bool CommonUniformElimPass::CommonUniformLoadElimBlock(Function* func) {
405 bool modified = false;
406 for (auto& blk : *func) {
407 uniform2load_id_.clear();
408 for (Instruction* inst = &*blk.begin(); inst; inst = inst->NextNode()) {
409 if (inst->opcode() != SpvOpLoad) continue;
410 uint32_t varId;
411 Instruction* ptrInst = GetPtr(inst, &varId);
412 if (ptrInst->opcode() != SpvOpVariable) continue;
413 if (!IsUniformVar(varId)) continue;
414 if (!IsSamplerOrImageVar(varId)) continue;
415 if (HasUnsupportedDecorates(inst->result_id())) continue;
416 if (IsVolatileLoad(*inst)) continue;
417 uint32_t replId;
418 const auto uItr = uniform2load_id_.find(varId);
419 if (uItr != uniform2load_id_.end()) {
420 replId = uItr->second;
421 } else {
422 uniform2load_id_[varId] = inst->result_id();
423 continue;
424 }
425 inst = ReplaceAndDeleteLoad(inst, replId, ptrInst);
426 modified = true;
427 }
428 }
429 return modified;
430 }
431
CommonExtractElimination(Function * func)432 bool CommonUniformElimPass::CommonExtractElimination(Function* func) {
433 // Find all composite ids with duplicate extracts.
434 for (auto bi = func->begin(); bi != func->end(); ++bi) {
435 for (auto ii = bi->begin(); ii != bi->end(); ++ii) {
436 if (ii->opcode() != SpvOpCompositeExtract) continue;
437 // TODO(greg-lunarg): Support multiple indices
438 if (ii->NumInOperands() > 2) continue;
439 if (HasUnsupportedDecorates(ii->result_id())) continue;
440 uint32_t compId = ii->GetSingleWordInOperand(kExtractCompositeIdInIdx);
441 uint32_t idx = ii->GetSingleWordInOperand(kExtractIdx0InIdx);
442 comp2idx2inst_[compId][idx].push_back(&*ii);
443 }
444 }
445 // For all defs of ids with duplicate extracts, insert new extracts
446 // after def, and replace and delete old extracts
447 bool modified = false;
448 for (auto bi = func->begin(); bi != func->end(); ++bi) {
449 for (auto ii = bi->begin(); ii != bi->end(); ++ii) {
450 const auto cItr = comp2idx2inst_.find(ii->result_id());
451 if (cItr == comp2idx2inst_.end()) continue;
452 for (auto idxItr : cItr->second) {
453 if (idxItr.second.size() < 2) continue;
454 uint32_t replId = TakeNextId();
455 std::unique_ptr<Instruction> newExtract(
456 idxItr.second.front()->Clone(context()));
457 newExtract->SetResultId(replId);
458 get_def_use_mgr()->AnalyzeInstDefUse(&*newExtract);
459 ++ii;
460 ii = ii.InsertBefore(std::move(newExtract));
461 for (auto instItr : idxItr.second) {
462 uint32_t resId = instItr->result_id();
463 context()->KillNamesAndDecorates(resId);
464 (void)context()->ReplaceAllUsesWith(resId, replId);
465 context()->KillInst(instItr);
466 }
467 modified = true;
468 }
469 }
470 }
471 return modified;
472 }
473
EliminateCommonUniform(Function * func)474 bool CommonUniformElimPass::EliminateCommonUniform(Function* func) {
475 bool modified = false;
476 modified |= UniformAccessChainConvert(func);
477 modified |= CommonUniformLoadElimination(func);
478 modified |= CommonExtractElimination(func);
479
480 modified |= CommonUniformLoadElimBlock(func);
481 return modified;
482 }
483
Initialize()484 void CommonUniformElimPass::Initialize() {
485 // Clear collections.
486 comp2idx2inst_.clear();
487
488 // Initialize extension whitelist
489 InitExtensions();
490 }
491
AllExtensionsSupported() const492 bool CommonUniformElimPass::AllExtensionsSupported() const {
493 // If any extension not in whitelist, return false
494 for (auto& ei : get_module()->extensions()) {
495 const char* extName =
496 reinterpret_cast<const char*>(&ei.GetInOperand(0).words[0]);
497 if (extensions_whitelist_.find(extName) == extensions_whitelist_.end())
498 return false;
499 }
500 return true;
501 }
502
ProcessImpl()503 Pass::Status CommonUniformElimPass::ProcessImpl() {
504 // Assumes all control flow structured.
505 // TODO(greg-lunarg): Do SSA rewrite for non-structured control flow
506 if (!context()->get_feature_mgr()->HasCapability(SpvCapabilityShader))
507 return Status::SuccessWithoutChange;
508 // Assumes logical addressing only
509 // TODO(greg-lunarg): Add support for physical addressing
510 if (context()->get_feature_mgr()->HasCapability(SpvCapabilityAddresses))
511 return Status::SuccessWithoutChange;
512 // Do not process if any disallowed extensions are enabled
513 if (!AllExtensionsSupported()) return Status::SuccessWithoutChange;
514 // Do not process if module contains OpGroupDecorate. Additional
515 // support required in KillNamesAndDecorates().
516 // TODO(greg-lunarg): Add support for OpGroupDecorate
517 for (auto& ai : get_module()->annotations())
518 if (ai.opcode() == SpvOpGroupDecorate) return Status::SuccessWithoutChange;
519 // If non-32-bit integer type in module, terminate processing
520 // TODO(): Handle non-32-bit integer constants in access chains
521 for (const Instruction& inst : get_module()->types_values())
522 if (inst.opcode() == SpvOpTypeInt &&
523 inst.GetSingleWordInOperand(kTypeIntWidthInIdx) != 32)
524 return Status::SuccessWithoutChange;
525 // Process entry point functions
526 ProcessFunction pfn = [this](Function* fp) {
527 return EliminateCommonUniform(fp);
528 };
529 bool modified = context()->ProcessEntryPointCallTree(pfn);
530 return modified ? Status::SuccessWithChange : Status::SuccessWithoutChange;
531 }
532
533 CommonUniformElimPass::CommonUniformElimPass() = default;
534
Process()535 Pass::Status CommonUniformElimPass::Process() {
536 Initialize();
537 return ProcessImpl();
538 }
539
InitExtensions()540 void CommonUniformElimPass::InitExtensions() {
541 extensions_whitelist_.clear();
542 extensions_whitelist_.insert({
543 "SPV_AMD_shader_explicit_vertex_parameter",
544 "SPV_AMD_shader_trinary_minmax",
545 "SPV_AMD_gcn_shader",
546 "SPV_KHR_shader_ballot",
547 "SPV_AMD_shader_ballot",
548 "SPV_AMD_gpu_shader_half_float",
549 "SPV_KHR_shader_draw_parameters",
550 "SPV_KHR_subgroup_vote",
551 "SPV_KHR_16bit_storage",
552 "SPV_KHR_device_group",
553 "SPV_KHR_multiview",
554 "SPV_NVX_multiview_per_view_attributes",
555 "SPV_NV_viewport_array2",
556 "SPV_NV_stereo_view_rendering",
557 "SPV_NV_sample_mask_override_coverage",
558 "SPV_NV_geometry_shader_passthrough",
559 "SPV_AMD_texture_gather_bias_lod",
560 "SPV_KHR_storage_buffer_storage_class",
561 // SPV_KHR_variable_pointers
562 // Currently do not support extended pointer expressions
563 "SPV_AMD_gpu_shader_int16",
564 "SPV_KHR_post_depth_coverage",
565 "SPV_KHR_shader_atomic_counter_ops",
566 "SPV_EXT_shader_stencil_export",
567 "SPV_EXT_shader_viewport_index_layer",
568 "SPV_AMD_shader_image_load_store_lod",
569 "SPV_AMD_shader_fragment_mask",
570 "SPV_EXT_fragment_fully_covered",
571 "SPV_AMD_gpu_shader_half_float_fetch",
572 "SPV_GOOGLE_decorate_string",
573 "SPV_GOOGLE_hlsl_functionality1",
574 "SPV_NV_shader_subgroup_partitioned",
575 "SPV_EXT_descriptor_indexing",
576 "SPV_NV_fragment_shader_barycentric",
577 "SPV_NV_compute_shader_derivatives",
578 "SPV_NV_shader_image_footprint",
579 "SPV_NV_shading_rate",
580 "SPV_NV_mesh_shader",
581 "SPV_NV_ray_tracing",
582 "SPV_EXT_fragment_invocation_density",
583 });
584 }
585
586 } // namespace opt
587 } // namespace spvtools
588