1 // Copyright (c) 2019 The Khronos Group Inc.
2 // Copyright (c) 2019 Valve Corporation
3 // Copyright (c) 2019 LunarG Inc.
4 //
5 // Licensed under the Apache License, Version 2.0 (the "License");
6 // you may not use this file except in compliance with the License.
7 // You may obtain a copy of the License at
8 //
9 // http://www.apache.org/licenses/LICENSE-2.0
10 //
11 // Unless required by applicable law or agreed to in writing, software
12 // distributed under the License is distributed on an "AS IS" BASIS,
13 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 // See the License for the specific language governing permissions and
15 // limitations under the License.
16
17 #include "convert_to_half_pass.h"
18
19 #include "source/opt/ir_builder.h"
20
21 namespace spvtools {
22 namespace opt {
23 namespace {
24 // Indices of operands in SPIR-V instructions
25 constexpr int kImageSampleDrefIdInIdx = 2;
26 } // namespace
27
IsArithmetic(Instruction * inst)28 bool ConvertToHalfPass::IsArithmetic(Instruction* inst) {
29 return target_ops_core_.count(inst->opcode()) != 0 ||
30 (inst->opcode() == spv::Op::OpExtInst &&
31 inst->GetSingleWordInOperand(0) ==
32 context()->get_feature_mgr()->GetExtInstImportId_GLSLstd450() &&
33 target_ops_450_.count(inst->GetSingleWordInOperand(1)) != 0);
34 }
35
IsFloat(Instruction * inst,uint32_t width)36 bool ConvertToHalfPass::IsFloat(Instruction* inst, uint32_t width) {
37 uint32_t ty_id = inst->type_id();
38 if (ty_id == 0) return false;
39 return Pass::IsFloat(ty_id, width);
40 }
41
IsStruct(Instruction * inst)42 bool ConvertToHalfPass::IsStruct(Instruction* inst) {
43 uint32_t ty_id = inst->type_id();
44 if (ty_id == 0) return false;
45 Instruction* ty_inst = Pass::GetBaseType(ty_id);
46 return (ty_inst->opcode() == spv::Op::OpTypeStruct);
47 }
48
IsDecoratedRelaxed(Instruction * inst)49 bool ConvertToHalfPass::IsDecoratedRelaxed(Instruction* inst) {
50 uint32_t r_id = inst->result_id();
51 for (auto r_inst : get_decoration_mgr()->GetDecorationsFor(r_id, false))
52 if (r_inst->opcode() == spv::Op::OpDecorate &&
53 spv::Decoration(r_inst->GetSingleWordInOperand(1)) ==
54 spv::Decoration::RelaxedPrecision) {
55 return true;
56 }
57 return false;
58 }
59
IsRelaxed(uint32_t id)60 bool ConvertToHalfPass::IsRelaxed(uint32_t id) {
61 return relaxed_ids_set_.count(id) > 0;
62 }
63
AddRelaxed(uint32_t id)64 void ConvertToHalfPass::AddRelaxed(uint32_t id) { relaxed_ids_set_.insert(id); }
65
CanRelaxOpOperands(Instruction * inst)66 bool ConvertToHalfPass::CanRelaxOpOperands(Instruction* inst) {
67 return image_ops_.count(inst->opcode()) == 0;
68 }
69
FloatScalarType(uint32_t width)70 analysis::Type* ConvertToHalfPass::FloatScalarType(uint32_t width) {
71 analysis::Float float_ty(width);
72 return context()->get_type_mgr()->GetRegisteredType(&float_ty);
73 }
74
FloatVectorType(uint32_t v_len,uint32_t width)75 analysis::Type* ConvertToHalfPass::FloatVectorType(uint32_t v_len,
76 uint32_t width) {
77 analysis::Type* reg_float_ty = FloatScalarType(width);
78 analysis::Vector vec_ty(reg_float_ty, v_len);
79 return context()->get_type_mgr()->GetRegisteredType(&vec_ty);
80 }
81
FloatMatrixType(uint32_t v_cnt,uint32_t vty_id,uint32_t width)82 analysis::Type* ConvertToHalfPass::FloatMatrixType(uint32_t v_cnt,
83 uint32_t vty_id,
84 uint32_t width) {
85 Instruction* vty_inst = get_def_use_mgr()->GetDef(vty_id);
86 uint32_t v_len = vty_inst->GetSingleWordInOperand(1);
87 analysis::Type* reg_vec_ty = FloatVectorType(v_len, width);
88 analysis::Matrix mat_ty(reg_vec_ty, v_cnt);
89 return context()->get_type_mgr()->GetRegisteredType(&mat_ty);
90 }
91
EquivFloatTypeId(uint32_t ty_id,uint32_t width)92 uint32_t ConvertToHalfPass::EquivFloatTypeId(uint32_t ty_id, uint32_t width) {
93 analysis::Type* reg_equiv_ty;
94 Instruction* ty_inst = get_def_use_mgr()->GetDef(ty_id);
95 if (ty_inst->opcode() == spv::Op::OpTypeMatrix)
96 reg_equiv_ty = FloatMatrixType(ty_inst->GetSingleWordInOperand(1),
97 ty_inst->GetSingleWordInOperand(0), width);
98 else if (ty_inst->opcode() == spv::Op::OpTypeVector)
99 reg_equiv_ty = FloatVectorType(ty_inst->GetSingleWordInOperand(1), width);
100 else // spv::Op::OpTypeFloat
101 reg_equiv_ty = FloatScalarType(width);
102 return context()->get_type_mgr()->GetTypeInstruction(reg_equiv_ty);
103 }
104
GenConvert(uint32_t * val_idp,uint32_t width,Instruction * inst)105 void ConvertToHalfPass::GenConvert(uint32_t* val_idp, uint32_t width,
106 Instruction* inst) {
107 Instruction* val_inst = get_def_use_mgr()->GetDef(*val_idp);
108 uint32_t ty_id = val_inst->type_id();
109 uint32_t nty_id = EquivFloatTypeId(ty_id, width);
110 if (nty_id == ty_id) return;
111 Instruction* cvt_inst;
112 InstructionBuilder builder(
113 context(), inst,
114 IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
115 if (val_inst->opcode() == spv::Op::OpUndef)
116 cvt_inst = builder.AddNullaryOp(nty_id, spv::Op::OpUndef);
117 else
118 cvt_inst = builder.AddUnaryOp(nty_id, spv::Op::OpFConvert, *val_idp);
119 *val_idp = cvt_inst->result_id();
120 }
121
MatConvertCleanup(Instruction * inst)122 bool ConvertToHalfPass::MatConvertCleanup(Instruction* inst) {
123 if (inst->opcode() != spv::Op::OpFConvert) return false;
124 uint32_t mty_id = inst->type_id();
125 Instruction* mty_inst = get_def_use_mgr()->GetDef(mty_id);
126 if (mty_inst->opcode() != spv::Op::OpTypeMatrix) return false;
127 uint32_t vty_id = mty_inst->GetSingleWordInOperand(0);
128 uint32_t v_cnt = mty_inst->GetSingleWordInOperand(1);
129 Instruction* vty_inst = get_def_use_mgr()->GetDef(vty_id);
130 uint32_t cty_id = vty_inst->GetSingleWordInOperand(0);
131 Instruction* cty_inst = get_def_use_mgr()->GetDef(cty_id);
132 InstructionBuilder builder(
133 context(), inst,
134 IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
135 // Convert each component vector, combine them with OpCompositeConstruct
136 // and replace original instruction.
137 uint32_t orig_width = (cty_inst->GetSingleWordInOperand(0) == 16) ? 32 : 16;
138 uint32_t orig_mat_id = inst->GetSingleWordInOperand(0);
139 uint32_t orig_vty_id = EquivFloatTypeId(vty_id, orig_width);
140 std::vector<Operand> opnds = {};
141 for (uint32_t vidx = 0; vidx < v_cnt; ++vidx) {
142 Instruction* ext_inst = builder.AddIdLiteralOp(
143 orig_vty_id, spv::Op::OpCompositeExtract, orig_mat_id, vidx);
144 Instruction* cvt_inst =
145 builder.AddUnaryOp(vty_id, spv::Op::OpFConvert, ext_inst->result_id());
146 opnds.push_back({SPV_OPERAND_TYPE_ID, {cvt_inst->result_id()}});
147 }
148 uint32_t mat_id = TakeNextId();
149 std::unique_ptr<Instruction> mat_inst(new Instruction(
150 context(), spv::Op::OpCompositeConstruct, mty_id, mat_id, opnds));
151 (void)builder.AddInstruction(std::move(mat_inst));
152 context()->ReplaceAllUsesWith(inst->result_id(), mat_id);
153 // Turn original instruction into copy so it is valid.
154 inst->SetOpcode(spv::Op::OpCopyObject);
155 inst->SetResultType(EquivFloatTypeId(mty_id, orig_width));
156 get_def_use_mgr()->AnalyzeInstUse(inst);
157 return true;
158 }
159
RemoveRelaxedDecoration(uint32_t id)160 bool ConvertToHalfPass::RemoveRelaxedDecoration(uint32_t id) {
161 return context()->get_decoration_mgr()->RemoveDecorationsFrom(
162 id, [](const Instruction& dec) {
163 if (dec.opcode() == spv::Op::OpDecorate &&
164 spv::Decoration(dec.GetSingleWordInOperand(1u)) ==
165 spv::Decoration::RelaxedPrecision) {
166 return true;
167 } else
168 return false;
169 });
170 }
171
GenHalfArith(Instruction * inst)172 bool ConvertToHalfPass::GenHalfArith(Instruction* inst) {
173 bool modified = false;
174 // Convert all float32 based operands to float16 equivalent and change
175 // instruction type to float16 equivalent.
176 inst->ForEachInId([&inst, &modified, this](uint32_t* idp) {
177 Instruction* op_inst = get_def_use_mgr()->GetDef(*idp);
178 if (!IsFloat(op_inst, 32)) return;
179 GenConvert(idp, 16, inst);
180 modified = true;
181 });
182 if (IsFloat(inst, 32)) {
183 inst->SetResultType(EquivFloatTypeId(inst->type_id(), 16));
184 converted_ids_.insert(inst->result_id());
185 modified = true;
186 }
187 if (modified) get_def_use_mgr()->AnalyzeInstUse(inst);
188 return modified;
189 }
190
ProcessPhi(Instruction * inst,uint32_t from_width,uint32_t to_width)191 bool ConvertToHalfPass::ProcessPhi(Instruction* inst, uint32_t from_width,
192 uint32_t to_width) {
193 // Add converts of any float operands to to_width if they are of from_width.
194 // If converting to 16, change type of phi to float16 equivalent and remember
195 // result id. Converts need to be added to preceding blocks.
196 uint32_t ocnt = 0;
197 uint32_t* prev_idp;
198 bool modified = false;
199 inst->ForEachInId([&ocnt, &prev_idp, &from_width, &to_width, &modified,
200 this](uint32_t* idp) {
201 if (ocnt % 2 == 0) {
202 prev_idp = idp;
203 } else {
204 Instruction* val_inst = get_def_use_mgr()->GetDef(*prev_idp);
205 if (IsFloat(val_inst, from_width)) {
206 BasicBlock* bp = context()->get_instr_block(*idp);
207 auto insert_before = bp->tail();
208 if (insert_before != bp->begin()) {
209 --insert_before;
210 if (insert_before->opcode() != spv::Op::OpSelectionMerge &&
211 insert_before->opcode() != spv::Op::OpLoopMerge)
212 ++insert_before;
213 }
214 GenConvert(prev_idp, to_width, &*insert_before);
215 modified = true;
216 }
217 }
218 ++ocnt;
219 });
220 if (to_width == 16u) {
221 inst->SetResultType(EquivFloatTypeId(inst->type_id(), 16u));
222 converted_ids_.insert(inst->result_id());
223 modified = true;
224 }
225 if (modified) get_def_use_mgr()->AnalyzeInstUse(inst);
226 return modified;
227 }
228
ProcessConvert(Instruction * inst)229 bool ConvertToHalfPass::ProcessConvert(Instruction* inst) {
230 // If float32 and relaxed, change to float16 convert
231 if (IsFloat(inst, 32) && IsRelaxed(inst->result_id())) {
232 inst->SetResultType(EquivFloatTypeId(inst->type_id(), 16));
233 get_def_use_mgr()->AnalyzeInstUse(inst);
234 converted_ids_.insert(inst->result_id());
235 }
236 // If operand and result types are the same, change FConvert to CopyObject to
237 // keep validator happy; simplification and DCE will clean it up
238 // One way this can happen is if an FConvert generated during this pass
239 // (likely by ProcessPhi) is later encountered here and its operand has been
240 // changed to half.
241 uint32_t val_id = inst->GetSingleWordInOperand(0);
242 Instruction* val_inst = get_def_use_mgr()->GetDef(val_id);
243 if (inst->type_id() == val_inst->type_id())
244 inst->SetOpcode(spv::Op::OpCopyObject);
245 return true; // modified
246 }
247
ProcessImageRef(Instruction * inst)248 bool ConvertToHalfPass::ProcessImageRef(Instruction* inst) {
249 bool modified = false;
250 // If image reference, only need to convert dref args back to float32
251 if (dref_image_ops_.count(inst->opcode()) != 0) {
252 uint32_t dref_id = inst->GetSingleWordInOperand(kImageSampleDrefIdInIdx);
253 if (converted_ids_.count(dref_id) > 0) {
254 GenConvert(&dref_id, 32, inst);
255 inst->SetInOperand(kImageSampleDrefIdInIdx, {dref_id});
256 get_def_use_mgr()->AnalyzeInstUse(inst);
257 modified = true;
258 }
259 }
260 return modified;
261 }
262
ProcessDefault(Instruction * inst)263 bool ConvertToHalfPass::ProcessDefault(Instruction* inst) {
264 // If non-relaxed instruction has changed operands, need to convert
265 // them back to float32
266 if (inst->opcode() == spv::Op::OpPhi) return ProcessPhi(inst, 16u, 32u);
267 bool modified = false;
268 inst->ForEachInId([&inst, &modified, this](uint32_t* idp) {
269 if (converted_ids_.count(*idp) == 0) return;
270 uint32_t old_id = *idp;
271 GenConvert(idp, 32, inst);
272 if (*idp != old_id) modified = true;
273 });
274 if (modified) get_def_use_mgr()->AnalyzeInstUse(inst);
275 return modified;
276 }
277
GenHalfInst(Instruction * inst)278 bool ConvertToHalfPass::GenHalfInst(Instruction* inst) {
279 bool modified = false;
280 // Remember id for later deletion of RelaxedPrecision decoration
281 bool inst_relaxed = IsRelaxed(inst->result_id());
282 if (IsArithmetic(inst) && inst_relaxed)
283 modified = GenHalfArith(inst);
284 else if (inst->opcode() == spv::Op::OpPhi && inst_relaxed)
285 modified = ProcessPhi(inst, 32u, 16u);
286 else if (inst->opcode() == spv::Op::OpFConvert)
287 modified = ProcessConvert(inst);
288 else if (image_ops_.count(inst->opcode()) != 0)
289 modified = ProcessImageRef(inst);
290 else
291 modified = ProcessDefault(inst);
292 return modified;
293 }
294
CloseRelaxInst(Instruction * inst)295 bool ConvertToHalfPass::CloseRelaxInst(Instruction* inst) {
296 if (inst->result_id() == 0) return false;
297 if (IsRelaxed(inst->result_id())) return false;
298 if (!IsFloat(inst, 32)) return false;
299 if (IsDecoratedRelaxed(inst)) {
300 AddRelaxed(inst->result_id());
301 return true;
302 }
303 if (closure_ops_.count(inst->opcode()) == 0) return false;
304 // Can relax if all float operands are relaxed
305 bool relax = true;
306 inst->ForEachInId([&relax, this](uint32_t* idp) {
307 Instruction* op_inst = get_def_use_mgr()->GetDef(*idp);
308 if (IsStruct(op_inst)) relax = false;
309 if (!IsFloat(op_inst, 32)) return;
310 if (!IsRelaxed(*idp)) relax = false;
311 });
312 if (relax) {
313 AddRelaxed(inst->result_id());
314 return true;
315 }
316 // Can relax if all uses are relaxed
317 relax = true;
318 get_def_use_mgr()->ForEachUser(inst, [&relax, this](Instruction* uinst) {
319 if (uinst->result_id() == 0 || !IsFloat(uinst, 32) ||
320 (!IsDecoratedRelaxed(uinst) && !IsRelaxed(uinst->result_id())) ||
321 !CanRelaxOpOperands(uinst)) {
322 relax = false;
323 return;
324 }
325 });
326 if (relax) {
327 AddRelaxed(inst->result_id());
328 return true;
329 }
330 return false;
331 }
332
ProcessFunction(Function * func)333 bool ConvertToHalfPass::ProcessFunction(Function* func) {
334 // Do a closure of Relaxed on composite and phi instructions
335 bool changed = true;
336 while (changed) {
337 changed = false;
338 cfg()->ForEachBlockInReversePostOrder(
339 func->entry().get(), [&changed, this](BasicBlock* bb) {
340 for (auto ii = bb->begin(); ii != bb->end(); ++ii)
341 changed |= CloseRelaxInst(&*ii);
342 });
343 }
344 // Do convert of relaxed instructions to half precision
345 bool modified = false;
346 cfg()->ForEachBlockInReversePostOrder(
347 func->entry().get(), [&modified, this](BasicBlock* bb) {
348 for (auto ii = bb->begin(); ii != bb->end(); ++ii)
349 modified |= GenHalfInst(&*ii);
350 });
351 // Replace invalid converts of matrix into equivalent vector extracts,
352 // converts and finally a composite construct
353 cfg()->ForEachBlockInReversePostOrder(
354 func->entry().get(), [&modified, this](BasicBlock* bb) {
355 for (auto ii = bb->begin(); ii != bb->end(); ++ii)
356 modified |= MatConvertCleanup(&*ii);
357 });
358 return modified;
359 }
360
ProcessImpl()361 Pass::Status ConvertToHalfPass::ProcessImpl() {
362 Pass::ProcessFunction pfn = [this](Function* fp) {
363 return ProcessFunction(fp);
364 };
365 bool modified = context()->ProcessReachableCallTree(pfn);
366 // If modified, make sure module has Float16 capability
367 if (modified) context()->AddCapability(spv::Capability::Float16);
368 // Remove all RelaxedPrecision decorations from instructions and globals
369 for (auto c_id : relaxed_ids_set_) {
370 modified |= RemoveRelaxedDecoration(c_id);
371 }
372 for (auto& val : get_module()->types_values()) {
373 uint32_t v_id = val.result_id();
374 if (v_id != 0) {
375 modified |= RemoveRelaxedDecoration(v_id);
376 }
377 }
378 return modified ? Status::SuccessWithChange : Status::SuccessWithoutChange;
379 }
380
Process()381 Pass::Status ConvertToHalfPass::Process() {
382 Initialize();
383 return ProcessImpl();
384 }
385
Initialize()386 void ConvertToHalfPass::Initialize() {
387 target_ops_core_ = {
388 spv::Op::OpVectorExtractDynamic,
389 spv::Op::OpVectorInsertDynamic,
390 spv::Op::OpVectorShuffle,
391 spv::Op::OpCompositeConstruct,
392 spv::Op::OpCompositeInsert,
393 spv::Op::OpCompositeExtract,
394 spv::Op::OpCopyObject,
395 spv::Op::OpTranspose,
396 spv::Op::OpConvertSToF,
397 spv::Op::OpConvertUToF,
398 // spv::Op::OpFConvert,
399 // spv::Op::OpQuantizeToF16,
400 spv::Op::OpFNegate,
401 spv::Op::OpFAdd,
402 spv::Op::OpFSub,
403 spv::Op::OpFMul,
404 spv::Op::OpFDiv,
405 spv::Op::OpFMod,
406 spv::Op::OpVectorTimesScalar,
407 spv::Op::OpMatrixTimesScalar,
408 spv::Op::OpVectorTimesMatrix,
409 spv::Op::OpMatrixTimesVector,
410 spv::Op::OpMatrixTimesMatrix,
411 spv::Op::OpOuterProduct,
412 spv::Op::OpDot,
413 spv::Op::OpSelect,
414 spv::Op::OpFOrdEqual,
415 spv::Op::OpFUnordEqual,
416 spv::Op::OpFOrdNotEqual,
417 spv::Op::OpFUnordNotEqual,
418 spv::Op::OpFOrdLessThan,
419 spv::Op::OpFUnordLessThan,
420 spv::Op::OpFOrdGreaterThan,
421 spv::Op::OpFUnordGreaterThan,
422 spv::Op::OpFOrdLessThanEqual,
423 spv::Op::OpFUnordLessThanEqual,
424 spv::Op::OpFOrdGreaterThanEqual,
425 spv::Op::OpFUnordGreaterThanEqual,
426 };
427 target_ops_450_ = {
428 GLSLstd450Round, GLSLstd450RoundEven, GLSLstd450Trunc, GLSLstd450FAbs,
429 GLSLstd450FSign, GLSLstd450Floor, GLSLstd450Ceil, GLSLstd450Fract,
430 GLSLstd450Radians, GLSLstd450Degrees, GLSLstd450Sin, GLSLstd450Cos,
431 GLSLstd450Tan, GLSLstd450Asin, GLSLstd450Acos, GLSLstd450Atan,
432 GLSLstd450Sinh, GLSLstd450Cosh, GLSLstd450Tanh, GLSLstd450Asinh,
433 GLSLstd450Acosh, GLSLstd450Atanh, GLSLstd450Atan2, GLSLstd450Pow,
434 GLSLstd450Exp, GLSLstd450Log, GLSLstd450Exp2, GLSLstd450Log2,
435 GLSLstd450Sqrt, GLSLstd450InverseSqrt, GLSLstd450Determinant,
436 GLSLstd450MatrixInverse,
437 // TODO(greg-lunarg): GLSLstd450ModfStruct,
438 GLSLstd450FMin, GLSLstd450FMax, GLSLstd450FClamp, GLSLstd450FMix,
439 GLSLstd450Step, GLSLstd450SmoothStep, GLSLstd450Fma,
440 // TODO(greg-lunarg): GLSLstd450FrexpStruct,
441 GLSLstd450Ldexp, GLSLstd450Length, GLSLstd450Distance, GLSLstd450Cross,
442 GLSLstd450Normalize, GLSLstd450FaceForward, GLSLstd450Reflect,
443 GLSLstd450Refract, GLSLstd450NMin, GLSLstd450NMax, GLSLstd450NClamp};
444 image_ops_ = {spv::Op::OpImageSampleImplicitLod,
445 spv::Op::OpImageSampleExplicitLod,
446 spv::Op::OpImageSampleDrefImplicitLod,
447 spv::Op::OpImageSampleDrefExplicitLod,
448 spv::Op::OpImageSampleProjImplicitLod,
449 spv::Op::OpImageSampleProjExplicitLod,
450 spv::Op::OpImageSampleProjDrefImplicitLod,
451 spv::Op::OpImageSampleProjDrefExplicitLod,
452 spv::Op::OpImageFetch,
453 spv::Op::OpImageGather,
454 spv::Op::OpImageDrefGather,
455 spv::Op::OpImageRead,
456 spv::Op::OpImageSparseSampleImplicitLod,
457 spv::Op::OpImageSparseSampleExplicitLod,
458 spv::Op::OpImageSparseSampleDrefImplicitLod,
459 spv::Op::OpImageSparseSampleDrefExplicitLod,
460 spv::Op::OpImageSparseSampleProjImplicitLod,
461 spv::Op::OpImageSparseSampleProjExplicitLod,
462 spv::Op::OpImageSparseSampleProjDrefImplicitLod,
463 spv::Op::OpImageSparseSampleProjDrefExplicitLod,
464 spv::Op::OpImageSparseFetch,
465 spv::Op::OpImageSparseGather,
466 spv::Op::OpImageSparseDrefGather,
467 spv::Op::OpImageSparseTexelsResident,
468 spv::Op::OpImageSparseRead};
469 dref_image_ops_ = {
470 spv::Op::OpImageSampleDrefImplicitLod,
471 spv::Op::OpImageSampleDrefExplicitLod,
472 spv::Op::OpImageSampleProjDrefImplicitLod,
473 spv::Op::OpImageSampleProjDrefExplicitLod,
474 spv::Op::OpImageDrefGather,
475 spv::Op::OpImageSparseSampleDrefImplicitLod,
476 spv::Op::OpImageSparseSampleDrefExplicitLod,
477 spv::Op::OpImageSparseSampleProjDrefImplicitLod,
478 spv::Op::OpImageSparseSampleProjDrefExplicitLod,
479 spv::Op::OpImageSparseDrefGather,
480 };
481 closure_ops_ = {
482 spv::Op::OpVectorExtractDynamic,
483 spv::Op::OpVectorInsertDynamic,
484 spv::Op::OpVectorShuffle,
485 spv::Op::OpCompositeConstruct,
486 spv::Op::OpCompositeInsert,
487 spv::Op::OpCompositeExtract,
488 spv::Op::OpCopyObject,
489 spv::Op::OpTranspose,
490 spv::Op::OpPhi,
491 };
492 relaxed_ids_set_.clear();
493 converted_ids_.clear();
494 }
495
496 } // namespace opt
497 } // namespace spvtools
498