1 /*
2 * Copyright 2018-2021 Arm Limited
3 * SPDX-License-Identifier: Apache-2.0 OR MIT
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18 /*
19 * At your option, you may choose to accept this material under either:
20 * 1. The Apache License, Version 2.0, found at <http://www.apache.org/licenses/LICENSE-2.0>, or
21 * 2. The MIT License, found at <http://opensource.org/licenses/MIT>.
22 */
23
24 #include "spirv_parser.hpp"
25 #include <assert.h>
26
27 using namespace std;
28 using namespace spv;
29
30 namespace SPIRV_CROSS_NAMESPACE
31 {
Parser(vector<uint32_t> spirv)32 Parser::Parser(vector<uint32_t> spirv)
33 {
34 ir.spirv = move(spirv);
35 }
36
Parser(const uint32_t * spirv_data,size_t word_count)37 Parser::Parser(const uint32_t *spirv_data, size_t word_count)
38 {
39 ir.spirv = vector<uint32_t>(spirv_data, spirv_data + word_count);
40 }
41
decoration_is_string(Decoration decoration)42 static bool decoration_is_string(Decoration decoration)
43 {
44 switch (decoration)
45 {
46 case DecorationHlslSemanticGOOGLE:
47 return true;
48
49 default:
50 return false;
51 }
52 }
53
swap_endian(uint32_t v)54 static inline uint32_t swap_endian(uint32_t v)
55 {
56 return ((v >> 24) & 0x000000ffu) | ((v >> 8) & 0x0000ff00u) | ((v << 8) & 0x00ff0000u) | ((v << 24) & 0xff000000u);
57 }
58
is_valid_spirv_version(uint32_t version)59 static bool is_valid_spirv_version(uint32_t version)
60 {
61 switch (version)
62 {
63 // Allow v99 since it tends to just work.
64 case 99:
65 case 0x10000: // SPIR-V 1.0
66 case 0x10100: // SPIR-V 1.1
67 case 0x10200: // SPIR-V 1.2
68 case 0x10300: // SPIR-V 1.3
69 case 0x10400: // SPIR-V 1.4
70 case 0x10500: // SPIR-V 1.5
71 return true;
72
73 default:
74 return false;
75 }
76 }
77
parse()78 void Parser::parse()
79 {
80 auto &spirv = ir.spirv;
81
82 auto len = spirv.size();
83 if (len < 5)
84 SPIRV_CROSS_THROW("SPIRV file too small.");
85
86 auto s = spirv.data();
87
88 // Endian-swap if we need to.
89 if (s[0] == swap_endian(MagicNumber))
90 transform(begin(spirv), end(spirv), begin(spirv), [](uint32_t c) { return swap_endian(c); });
91
92 if (s[0] != MagicNumber || !is_valid_spirv_version(s[1]))
93 SPIRV_CROSS_THROW("Invalid SPIRV format.");
94
95 uint32_t bound = s[3];
96
97 const uint32_t MaximumNumberOfIDs = 0x3fffff;
98 if (bound > MaximumNumberOfIDs)
99 SPIRV_CROSS_THROW("ID bound exceeds limit of 0x3fffff.\n");
100
101 ir.set_id_bounds(bound);
102
103 uint32_t offset = 5;
104
105 SmallVector<Instruction> instructions;
106 while (offset < len)
107 {
108 Instruction instr = {};
109 instr.op = spirv[offset] & 0xffff;
110 instr.count = (spirv[offset] >> 16) & 0xffff;
111
112 if (instr.count == 0)
113 SPIRV_CROSS_THROW("SPIR-V instructions cannot consume 0 words. Invalid SPIR-V file.");
114
115 instr.offset = offset + 1;
116 instr.length = instr.count - 1;
117
118 offset += instr.count;
119
120 if (offset > spirv.size())
121 SPIRV_CROSS_THROW("SPIR-V instruction goes out of bounds.");
122
123 instructions.push_back(instr);
124 }
125
126 for (auto &i : instructions)
127 parse(i);
128
129 for (auto &fixup : forward_pointer_fixups)
130 {
131 auto &target = get<SPIRType>(fixup.first);
132 auto &source = get<SPIRType>(fixup.second);
133 target.member_types = source.member_types;
134 target.basetype = source.basetype;
135 target.self = source.self;
136 }
137 forward_pointer_fixups.clear();
138
139 if (current_function)
140 SPIRV_CROSS_THROW("Function was not terminated.");
141 if (current_block)
142 SPIRV_CROSS_THROW("Block was not terminated.");
143 if (ir.default_entry_point == 0)
144 SPIRV_CROSS_THROW("There is no entry point in the SPIR-V module.");
145 }
146
stream(const Instruction & instr) const147 const uint32_t *Parser::stream(const Instruction &instr) const
148 {
149 // If we're not going to use any arguments, just return nullptr.
150 // We want to avoid case where we return an out of range pointer
151 // that trips debug assertions on some platforms.
152 if (!instr.length)
153 return nullptr;
154
155 if (instr.offset + instr.length > ir.spirv.size())
156 SPIRV_CROSS_THROW("Compiler::stream() out of range.");
157 return &ir.spirv[instr.offset];
158 }
159
extract_string(const vector<uint32_t> & spirv,uint32_t offset)160 static string extract_string(const vector<uint32_t> &spirv, uint32_t offset)
161 {
162 string ret;
163 for (uint32_t i = offset; i < spirv.size(); i++)
164 {
165 uint32_t w = spirv[i];
166
167 for (uint32_t j = 0; j < 4; j++, w >>= 8)
168 {
169 char c = w & 0xff;
170 if (c == '\0')
171 return ret;
172 ret += c;
173 }
174 }
175
176 SPIRV_CROSS_THROW("String was not terminated before EOF");
177 }
178
parse(const Instruction & instruction)179 void Parser::parse(const Instruction &instruction)
180 {
181 auto *ops = stream(instruction);
182 auto op = static_cast<Op>(instruction.op);
183 uint32_t length = instruction.length;
184
185 switch (op)
186 {
187 case OpSourceContinued:
188 case OpSourceExtension:
189 case OpNop:
190 case OpModuleProcessed:
191 break;
192
193 case OpString:
194 {
195 set<SPIRString>(ops[0], extract_string(ir.spirv, instruction.offset + 1));
196 break;
197 }
198
199 case OpMemoryModel:
200 ir.addressing_model = static_cast<AddressingModel>(ops[0]);
201 ir.memory_model = static_cast<MemoryModel>(ops[1]);
202 break;
203
204 case OpSource:
205 {
206 auto lang = static_cast<SourceLanguage>(ops[0]);
207 switch (lang)
208 {
209 case SourceLanguageESSL:
210 ir.source.es = true;
211 ir.source.version = ops[1];
212 ir.source.known = true;
213 ir.source.hlsl = false;
214 break;
215
216 case SourceLanguageGLSL:
217 ir.source.es = false;
218 ir.source.version = ops[1];
219 ir.source.known = true;
220 ir.source.hlsl = false;
221 break;
222
223 case SourceLanguageHLSL:
224 // For purposes of cross-compiling, this is GLSL 450.
225 ir.source.es = false;
226 ir.source.version = 450;
227 ir.source.known = true;
228 ir.source.hlsl = true;
229 break;
230
231 default:
232 ir.source.known = false;
233 break;
234 }
235 break;
236 }
237
238 case OpUndef:
239 {
240 uint32_t result_type = ops[0];
241 uint32_t id = ops[1];
242 set<SPIRUndef>(id, result_type);
243 if (current_block)
244 current_block->ops.push_back(instruction);
245 break;
246 }
247
248 case OpCapability:
249 {
250 uint32_t cap = ops[0];
251 if (cap == CapabilityKernel)
252 SPIRV_CROSS_THROW("Kernel capability not supported.");
253
254 ir.declared_capabilities.push_back(static_cast<Capability>(ops[0]));
255 break;
256 }
257
258 case OpExtension:
259 {
260 auto ext = extract_string(ir.spirv, instruction.offset);
261 ir.declared_extensions.push_back(move(ext));
262 break;
263 }
264
265 case OpExtInstImport:
266 {
267 uint32_t id = ops[0];
268 auto ext = extract_string(ir.spirv, instruction.offset + 1);
269 if (ext == "GLSL.std.450")
270 set<SPIRExtension>(id, SPIRExtension::GLSL);
271 else if (ext == "DebugInfo")
272 set<SPIRExtension>(id, SPIRExtension::SPV_debug_info);
273 else if (ext == "SPV_AMD_shader_ballot")
274 set<SPIRExtension>(id, SPIRExtension::SPV_AMD_shader_ballot);
275 else if (ext == "SPV_AMD_shader_explicit_vertex_parameter")
276 set<SPIRExtension>(id, SPIRExtension::SPV_AMD_shader_explicit_vertex_parameter);
277 else if (ext == "SPV_AMD_shader_trinary_minmax")
278 set<SPIRExtension>(id, SPIRExtension::SPV_AMD_shader_trinary_minmax);
279 else if (ext == "SPV_AMD_gcn_shader")
280 set<SPIRExtension>(id, SPIRExtension::SPV_AMD_gcn_shader);
281 else
282 set<SPIRExtension>(id, SPIRExtension::Unsupported);
283
284 // Other SPIR-V extensions which have ExtInstrs are currently not supported.
285
286 break;
287 }
288
289 case OpExtInst:
290 {
291 // The SPIR-V debug information extended instructions might come at global scope.
292 if (current_block)
293 current_block->ops.push_back(instruction);
294 break;
295 }
296
297 case OpEntryPoint:
298 {
299 auto itr =
300 ir.entry_points.insert(make_pair(ops[1], SPIREntryPoint(ops[1], static_cast<ExecutionModel>(ops[0]),
301 extract_string(ir.spirv, instruction.offset + 2))));
302 auto &e = itr.first->second;
303
304 // Strings need nul-terminator and consume the whole word.
305 uint32_t strlen_words = uint32_t((e.name.size() + 1 + 3) >> 2);
306
307 for (uint32_t i = strlen_words + 2; i < instruction.length; i++)
308 e.interface_variables.push_back(ops[i]);
309
310 // Set the name of the entry point in case OpName is not provided later.
311 ir.set_name(ops[1], e.name);
312
313 // If we don't have an entry, make the first one our "default".
314 if (!ir.default_entry_point)
315 ir.default_entry_point = ops[1];
316 break;
317 }
318
319 case OpExecutionMode:
320 {
321 auto &execution = ir.entry_points[ops[0]];
322 auto mode = static_cast<ExecutionMode>(ops[1]);
323 execution.flags.set(mode);
324
325 switch (mode)
326 {
327 case ExecutionModeInvocations:
328 execution.invocations = ops[2];
329 break;
330
331 case ExecutionModeLocalSize:
332 execution.workgroup_size.x = ops[2];
333 execution.workgroup_size.y = ops[3];
334 execution.workgroup_size.z = ops[4];
335 break;
336
337 case ExecutionModeOutputVertices:
338 execution.output_vertices = ops[2];
339 break;
340
341 default:
342 break;
343 }
344 break;
345 }
346
347 case OpName:
348 {
349 uint32_t id = ops[0];
350 ir.set_name(id, extract_string(ir.spirv, instruction.offset + 1));
351 break;
352 }
353
354 case OpMemberName:
355 {
356 uint32_t id = ops[0];
357 uint32_t member = ops[1];
358 ir.set_member_name(id, member, extract_string(ir.spirv, instruction.offset + 2));
359 break;
360 }
361
362 case OpDecorationGroup:
363 {
364 // Noop, this simply means an ID should be a collector of decorations.
365 // The meta array is already a flat array of decorations which will contain the relevant decorations.
366 break;
367 }
368
369 case OpGroupDecorate:
370 {
371 uint32_t group_id = ops[0];
372 auto &decorations = ir.meta[group_id].decoration;
373 auto &flags = decorations.decoration_flags;
374
375 // Copies decorations from one ID to another. Only copy decorations which are set in the group,
376 // i.e., we cannot just copy the meta structure directly.
377 for (uint32_t i = 1; i < length; i++)
378 {
379 uint32_t target = ops[i];
380 flags.for_each_bit([&](uint32_t bit) {
381 auto decoration = static_cast<Decoration>(bit);
382
383 if (decoration_is_string(decoration))
384 {
385 ir.set_decoration_string(target, decoration, ir.get_decoration_string(group_id, decoration));
386 }
387 else
388 {
389 ir.meta[target].decoration_word_offset[decoration] =
390 ir.meta[group_id].decoration_word_offset[decoration];
391 ir.set_decoration(target, decoration, ir.get_decoration(group_id, decoration));
392 }
393 });
394 }
395 break;
396 }
397
398 case OpGroupMemberDecorate:
399 {
400 uint32_t group_id = ops[0];
401 auto &flags = ir.meta[group_id].decoration.decoration_flags;
402
403 // Copies decorations from one ID to another. Only copy decorations which are set in the group,
404 // i.e., we cannot just copy the meta structure directly.
405 for (uint32_t i = 1; i + 1 < length; i += 2)
406 {
407 uint32_t target = ops[i + 0];
408 uint32_t index = ops[i + 1];
409 flags.for_each_bit([&](uint32_t bit) {
410 auto decoration = static_cast<Decoration>(bit);
411
412 if (decoration_is_string(decoration))
413 ir.set_member_decoration_string(target, index, decoration,
414 ir.get_decoration_string(group_id, decoration));
415 else
416 ir.set_member_decoration(target, index, decoration, ir.get_decoration(group_id, decoration));
417 });
418 }
419 break;
420 }
421
422 case OpDecorate:
423 case OpDecorateId:
424 {
425 // OpDecorateId technically supports an array of arguments, but our only supported decorations are single uint,
426 // so merge decorate and decorate-id here.
427 uint32_t id = ops[0];
428
429 auto decoration = static_cast<Decoration>(ops[1]);
430 if (length >= 3)
431 {
432 ir.meta[id].decoration_word_offset[decoration] = uint32_t(&ops[2] - ir.spirv.data());
433 ir.set_decoration(id, decoration, ops[2]);
434 }
435 else
436 ir.set_decoration(id, decoration);
437
438 break;
439 }
440
441 case OpDecorateStringGOOGLE:
442 {
443 uint32_t id = ops[0];
444 auto decoration = static_cast<Decoration>(ops[1]);
445 ir.set_decoration_string(id, decoration, extract_string(ir.spirv, instruction.offset + 2));
446 break;
447 }
448
449 case OpMemberDecorate:
450 {
451 uint32_t id = ops[0];
452 uint32_t member = ops[1];
453 auto decoration = static_cast<Decoration>(ops[2]);
454 if (length >= 4)
455 ir.set_member_decoration(id, member, decoration, ops[3]);
456 else
457 ir.set_member_decoration(id, member, decoration);
458 break;
459 }
460
461 case OpMemberDecorateStringGOOGLE:
462 {
463 uint32_t id = ops[0];
464 uint32_t member = ops[1];
465 auto decoration = static_cast<Decoration>(ops[2]);
466 ir.set_member_decoration_string(id, member, decoration, extract_string(ir.spirv, instruction.offset + 3));
467 break;
468 }
469
470 // Build up basic types.
471 case OpTypeVoid:
472 {
473 uint32_t id = ops[0];
474 auto &type = set<SPIRType>(id);
475 type.basetype = SPIRType::Void;
476 break;
477 }
478
479 case OpTypeBool:
480 {
481 uint32_t id = ops[0];
482 auto &type = set<SPIRType>(id);
483 type.basetype = SPIRType::Boolean;
484 type.width = 1;
485 break;
486 }
487
488 case OpTypeFloat:
489 {
490 uint32_t id = ops[0];
491 uint32_t width = ops[1];
492 auto &type = set<SPIRType>(id);
493 if (width == 64)
494 type.basetype = SPIRType::Double;
495 else if (width == 32)
496 type.basetype = SPIRType::Float;
497 else if (width == 16)
498 type.basetype = SPIRType::Half;
499 else
500 SPIRV_CROSS_THROW("Unrecognized bit-width of floating point type.");
501 type.width = width;
502 break;
503 }
504
505 case OpTypeInt:
506 {
507 uint32_t id = ops[0];
508 uint32_t width = ops[1];
509 bool signedness = ops[2] != 0;
510 auto &type = set<SPIRType>(id);
511 type.basetype = signedness ? to_signed_basetype(width) : to_unsigned_basetype(width);
512 type.width = width;
513 break;
514 }
515
516 // Build composite types by "inheriting".
517 // NOTE: The self member is also copied! For pointers and array modifiers this is a good thing
518 // since we can refer to decorations on pointee classes which is needed for UBO/SSBO, I/O blocks in geometry/tess etc.
519 case OpTypeVector:
520 {
521 uint32_t id = ops[0];
522 uint32_t vecsize = ops[2];
523
524 auto &base = get<SPIRType>(ops[1]);
525 auto &vecbase = set<SPIRType>(id);
526
527 vecbase = base;
528 vecbase.vecsize = vecsize;
529 vecbase.self = id;
530 vecbase.parent_type = ops[1];
531 break;
532 }
533
534 case OpTypeMatrix:
535 {
536 uint32_t id = ops[0];
537 uint32_t colcount = ops[2];
538
539 auto &base = get<SPIRType>(ops[1]);
540 auto &matrixbase = set<SPIRType>(id);
541
542 matrixbase = base;
543 matrixbase.columns = colcount;
544 matrixbase.self = id;
545 matrixbase.parent_type = ops[1];
546 break;
547 }
548
549 case OpTypeArray:
550 {
551 uint32_t id = ops[0];
552 auto &arraybase = set<SPIRType>(id);
553
554 uint32_t tid = ops[1];
555 auto &base = get<SPIRType>(tid);
556
557 arraybase = base;
558 arraybase.parent_type = tid;
559
560 uint32_t cid = ops[2];
561 ir.mark_used_as_array_length(cid);
562 auto *c = maybe_get<SPIRConstant>(cid);
563 bool literal = c && !c->specialization;
564
565 // We're copying type information into Array types, so we'll need a fixup for any physical pointer
566 // references.
567 if (base.forward_pointer)
568 forward_pointer_fixups.push_back({ id, tid });
569
570 arraybase.array_size_literal.push_back(literal);
571 arraybase.array.push_back(literal ? c->scalar() : cid);
572 // Do NOT set arraybase.self!
573 break;
574 }
575
576 case OpTypeRuntimeArray:
577 {
578 uint32_t id = ops[0];
579
580 auto &base = get<SPIRType>(ops[1]);
581 auto &arraybase = set<SPIRType>(id);
582
583 // We're copying type information into Array types, so we'll need a fixup for any physical pointer
584 // references.
585 if (base.forward_pointer)
586 forward_pointer_fixups.push_back({ id, ops[1] });
587
588 arraybase = base;
589 arraybase.array.push_back(0);
590 arraybase.array_size_literal.push_back(true);
591 arraybase.parent_type = ops[1];
592 // Do NOT set arraybase.self!
593 break;
594 }
595
596 case OpTypeImage:
597 {
598 uint32_t id = ops[0];
599 auto &type = set<SPIRType>(id);
600 type.basetype = SPIRType::Image;
601 type.image.type = ops[1];
602 type.image.dim = static_cast<Dim>(ops[2]);
603 type.image.depth = ops[3] == 1;
604 type.image.arrayed = ops[4] != 0;
605 type.image.ms = ops[5] != 0;
606 type.image.sampled = ops[6];
607 type.image.format = static_cast<ImageFormat>(ops[7]);
608 type.image.access = (length >= 9) ? static_cast<AccessQualifier>(ops[8]) : AccessQualifierMax;
609 break;
610 }
611
612 case OpTypeSampledImage:
613 {
614 uint32_t id = ops[0];
615 uint32_t imagetype = ops[1];
616 auto &type = set<SPIRType>(id);
617 type = get<SPIRType>(imagetype);
618 type.basetype = SPIRType::SampledImage;
619 type.self = id;
620 break;
621 }
622
623 case OpTypeSampler:
624 {
625 uint32_t id = ops[0];
626 auto &type = set<SPIRType>(id);
627 type.basetype = SPIRType::Sampler;
628 break;
629 }
630
631 case OpTypePointer:
632 {
633 uint32_t id = ops[0];
634
635 // Very rarely, we might receive a FunctionPrototype here.
636 // We won't be able to compile it, but we shouldn't crash when parsing.
637 // We should be able to reflect.
638 auto *base = maybe_get<SPIRType>(ops[2]);
639 auto &ptrbase = set<SPIRType>(id);
640
641 if (base)
642 ptrbase = *base;
643
644 ptrbase.pointer = true;
645 ptrbase.pointer_depth++;
646 ptrbase.storage = static_cast<StorageClass>(ops[1]);
647
648 if (ptrbase.storage == StorageClassAtomicCounter)
649 ptrbase.basetype = SPIRType::AtomicCounter;
650
651 if (base && base->forward_pointer)
652 forward_pointer_fixups.push_back({ id, ops[2] });
653
654 ptrbase.parent_type = ops[2];
655
656 // Do NOT set ptrbase.self!
657 break;
658 }
659
660 case OpTypeForwardPointer:
661 {
662 uint32_t id = ops[0];
663 auto &ptrbase = set<SPIRType>(id);
664 ptrbase.pointer = true;
665 ptrbase.pointer_depth++;
666 ptrbase.storage = static_cast<StorageClass>(ops[1]);
667 ptrbase.forward_pointer = true;
668
669 if (ptrbase.storage == StorageClassAtomicCounter)
670 ptrbase.basetype = SPIRType::AtomicCounter;
671
672 break;
673 }
674
675 case OpTypeStruct:
676 {
677 uint32_t id = ops[0];
678 auto &type = set<SPIRType>(id);
679 type.basetype = SPIRType::Struct;
680 for (uint32_t i = 1; i < length; i++)
681 type.member_types.push_back(ops[i]);
682
683 // Check if we have seen this struct type before, with just different
684 // decorations.
685 //
686 // Add workaround for issue #17 as well by looking at OpName for the struct
687 // types, which we shouldn't normally do.
688 // We should not normally have to consider type aliases like this to begin with
689 // however ... glslang issues #304, #307 cover this.
690
691 // For stripped names, never consider struct type aliasing.
692 // We risk declaring the same struct multiple times, but type-punning is not allowed
693 // so this is safe.
694 bool consider_aliasing = !ir.get_name(type.self).empty();
695 if (consider_aliasing)
696 {
697 for (auto &other : global_struct_cache)
698 {
699 if (ir.get_name(type.self) == ir.get_name(other) &&
700 types_are_logically_equivalent(type, get<SPIRType>(other)))
701 {
702 type.type_alias = other;
703 break;
704 }
705 }
706
707 if (type.type_alias == TypeID(0))
708 global_struct_cache.push_back(id);
709 }
710 break;
711 }
712
713 case OpTypeFunction:
714 {
715 uint32_t id = ops[0];
716 uint32_t ret = ops[1];
717
718 auto &func = set<SPIRFunctionPrototype>(id, ret);
719 for (uint32_t i = 2; i < length; i++)
720 func.parameter_types.push_back(ops[i]);
721 break;
722 }
723
724 case OpTypeAccelerationStructureKHR:
725 {
726 uint32_t id = ops[0];
727 auto &type = set<SPIRType>(id);
728 type.basetype = SPIRType::AccelerationStructure;
729 break;
730 }
731
732 case OpTypeRayQueryKHR:
733 {
734 uint32_t id = ops[0];
735 auto &type = set<SPIRType>(id);
736 type.basetype = SPIRType::RayQuery;
737 break;
738 }
739
740 // Variable declaration
741 // All variables are essentially pointers with a storage qualifier.
742 case OpVariable:
743 {
744 uint32_t type = ops[0];
745 uint32_t id = ops[1];
746 auto storage = static_cast<StorageClass>(ops[2]);
747 uint32_t initializer = length == 4 ? ops[3] : 0;
748
749 if (storage == StorageClassFunction)
750 {
751 if (!current_function)
752 SPIRV_CROSS_THROW("No function currently in scope");
753 current_function->add_local_variable(id);
754 }
755
756 set<SPIRVariable>(id, type, storage, initializer);
757 break;
758 }
759
760 // OpPhi
761 // OpPhi is a fairly magical opcode.
762 // It selects temporary variables based on which parent block we *came from*.
763 // In high-level languages we can "de-SSA" by creating a function local, and flush out temporaries to this function-local
764 // variable to emulate SSA Phi.
765 case OpPhi:
766 {
767 if (!current_function)
768 SPIRV_CROSS_THROW("No function currently in scope");
769 if (!current_block)
770 SPIRV_CROSS_THROW("No block currently in scope");
771
772 uint32_t result_type = ops[0];
773 uint32_t id = ops[1];
774
775 // Instead of a temporary, create a new function-wide temporary with this ID instead.
776 auto &var = set<SPIRVariable>(id, result_type, spv::StorageClassFunction);
777 var.phi_variable = true;
778
779 current_function->add_local_variable(id);
780
781 for (uint32_t i = 2; i + 2 <= length; i += 2)
782 current_block->phi_variables.push_back({ ops[i], ops[i + 1], id });
783 break;
784 }
785
786 // Constants
787 case OpSpecConstant:
788 case OpConstant:
789 {
790 uint32_t id = ops[1];
791 auto &type = get<SPIRType>(ops[0]);
792
793 if (type.width > 32)
794 set<SPIRConstant>(id, ops[0], ops[2] | (uint64_t(ops[3]) << 32), op == OpSpecConstant);
795 else
796 set<SPIRConstant>(id, ops[0], ops[2], op == OpSpecConstant);
797 break;
798 }
799
800 case OpSpecConstantFalse:
801 case OpConstantFalse:
802 {
803 uint32_t id = ops[1];
804 set<SPIRConstant>(id, ops[0], uint32_t(0), op == OpSpecConstantFalse);
805 break;
806 }
807
808 case OpSpecConstantTrue:
809 case OpConstantTrue:
810 {
811 uint32_t id = ops[1];
812 set<SPIRConstant>(id, ops[0], uint32_t(1), op == OpSpecConstantTrue);
813 break;
814 }
815
816 case OpConstantNull:
817 {
818 uint32_t id = ops[1];
819 uint32_t type = ops[0];
820 ir.make_constant_null(id, type, true);
821 break;
822 }
823
824 case OpSpecConstantComposite:
825 case OpConstantComposite:
826 {
827 uint32_t id = ops[1];
828 uint32_t type = ops[0];
829
830 auto &ctype = get<SPIRType>(type);
831
832 // We can have constants which are structs and arrays.
833 // In this case, our SPIRConstant will be a list of other SPIRConstant ids which we
834 // can refer to.
835 if (ctype.basetype == SPIRType::Struct || !ctype.array.empty())
836 {
837 set<SPIRConstant>(id, type, ops + 2, length - 2, op == OpSpecConstantComposite);
838 }
839 else
840 {
841 uint32_t elements = length - 2;
842 if (elements > 4)
843 SPIRV_CROSS_THROW("OpConstantComposite only supports 1, 2, 3 and 4 elements.");
844
845 SPIRConstant remapped_constant_ops[4];
846 const SPIRConstant *c[4];
847 for (uint32_t i = 0; i < elements; i++)
848 {
849 // Specialization constants operations can also be part of this.
850 // We do not know their value, so any attempt to query SPIRConstant later
851 // will fail. We can only propagate the ID of the expression and use to_expression on it.
852 auto *constant_op = maybe_get<SPIRConstantOp>(ops[2 + i]);
853 auto *undef_op = maybe_get<SPIRUndef>(ops[2 + i]);
854 if (constant_op)
855 {
856 if (op == OpConstantComposite)
857 SPIRV_CROSS_THROW("Specialization constant operation used in OpConstantComposite.");
858
859 remapped_constant_ops[i].make_null(get<SPIRType>(constant_op->basetype));
860 remapped_constant_ops[i].self = constant_op->self;
861 remapped_constant_ops[i].constant_type = constant_op->basetype;
862 remapped_constant_ops[i].specialization = true;
863 c[i] = &remapped_constant_ops[i];
864 }
865 else if (undef_op)
866 {
867 // Undefined, just pick 0.
868 remapped_constant_ops[i].make_null(get<SPIRType>(undef_op->basetype));
869 remapped_constant_ops[i].constant_type = undef_op->basetype;
870 c[i] = &remapped_constant_ops[i];
871 }
872 else
873 c[i] = &get<SPIRConstant>(ops[2 + i]);
874 }
875 set<SPIRConstant>(id, type, c, elements, op == OpSpecConstantComposite);
876 }
877 break;
878 }
879
880 // Functions
881 case OpFunction:
882 {
883 uint32_t res = ops[0];
884 uint32_t id = ops[1];
885 // Control
886 uint32_t type = ops[3];
887
888 if (current_function)
889 SPIRV_CROSS_THROW("Must end a function before starting a new one!");
890
891 current_function = &set<SPIRFunction>(id, res, type);
892 break;
893 }
894
895 case OpFunctionParameter:
896 {
897 uint32_t type = ops[0];
898 uint32_t id = ops[1];
899
900 if (!current_function)
901 SPIRV_CROSS_THROW("Must be in a function!");
902
903 current_function->add_parameter(type, id);
904 set<SPIRVariable>(id, type, StorageClassFunction);
905 break;
906 }
907
908 case OpFunctionEnd:
909 {
910 if (current_block)
911 {
912 // Very specific error message, but seems to come up quite often.
913 SPIRV_CROSS_THROW(
914 "Cannot end a function before ending the current block.\n"
915 "Likely cause: If this SPIR-V was created from glslang HLSL, make sure the entry point is valid.");
916 }
917 current_function = nullptr;
918 break;
919 }
920
921 // Blocks
922 case OpLabel:
923 {
924 // OpLabel always starts a block.
925 if (!current_function)
926 SPIRV_CROSS_THROW("Blocks cannot exist outside functions!");
927
928 uint32_t id = ops[0];
929
930 current_function->blocks.push_back(id);
931 if (!current_function->entry_block)
932 current_function->entry_block = id;
933
934 if (current_block)
935 SPIRV_CROSS_THROW("Cannot start a block before ending the current block.");
936
937 current_block = &set<SPIRBlock>(id);
938 break;
939 }
940
941 // Branch instructions end blocks.
942 case OpBranch:
943 {
944 if (!current_block)
945 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
946
947 uint32_t target = ops[0];
948 current_block->terminator = SPIRBlock::Direct;
949 current_block->next_block = target;
950 current_block = nullptr;
951 break;
952 }
953
954 case OpBranchConditional:
955 {
956 if (!current_block)
957 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
958
959 current_block->condition = ops[0];
960 current_block->true_block = ops[1];
961 current_block->false_block = ops[2];
962
963 current_block->terminator = SPIRBlock::Select;
964 current_block = nullptr;
965 break;
966 }
967
968 case OpSwitch:
969 {
970 if (!current_block)
971 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
972
973 current_block->terminator = SPIRBlock::MultiSelect;
974
975 current_block->condition = ops[0];
976 current_block->default_block = ops[1];
977
978 for (uint32_t i = 2; i + 2 <= length; i += 2)
979 current_block->cases.push_back({ ops[i], ops[i + 1] });
980
981 // If we jump to next block, make it break instead since we're inside a switch case block at that point.
982 ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_MULTISELECT_MERGE_BIT;
983
984 current_block = nullptr;
985 break;
986 }
987
988 case OpKill:
989 {
990 if (!current_block)
991 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
992 current_block->terminator = SPIRBlock::Kill;
993 current_block = nullptr;
994 break;
995 }
996
997 case OpTerminateRayKHR:
998 // NV variant is not a terminator.
999 if (!current_block)
1000 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1001 current_block->terminator = SPIRBlock::TerminateRay;
1002 current_block = nullptr;
1003 break;
1004
1005 case OpIgnoreIntersectionKHR:
1006 // NV variant is not a terminator.
1007 if (!current_block)
1008 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1009 current_block->terminator = SPIRBlock::IgnoreIntersection;
1010 current_block = nullptr;
1011 break;
1012
1013 case OpReturn:
1014 {
1015 if (!current_block)
1016 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1017 current_block->terminator = SPIRBlock::Return;
1018 current_block = nullptr;
1019 break;
1020 }
1021
1022 case OpReturnValue:
1023 {
1024 if (!current_block)
1025 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1026 current_block->terminator = SPIRBlock::Return;
1027 current_block->return_value = ops[0];
1028 current_block = nullptr;
1029 break;
1030 }
1031
1032 case OpUnreachable:
1033 {
1034 if (!current_block)
1035 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1036 current_block->terminator = SPIRBlock::Unreachable;
1037 current_block = nullptr;
1038 break;
1039 }
1040
1041 case OpSelectionMerge:
1042 {
1043 if (!current_block)
1044 SPIRV_CROSS_THROW("Trying to modify a non-existing block.");
1045
1046 current_block->next_block = ops[0];
1047 current_block->merge = SPIRBlock::MergeSelection;
1048 ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_SELECTION_MERGE_BIT;
1049
1050 if (length >= 2)
1051 {
1052 if (ops[1] & SelectionControlFlattenMask)
1053 current_block->hint = SPIRBlock::HintFlatten;
1054 else if (ops[1] & SelectionControlDontFlattenMask)
1055 current_block->hint = SPIRBlock::HintDontFlatten;
1056 }
1057 break;
1058 }
1059
1060 case OpLoopMerge:
1061 {
1062 if (!current_block)
1063 SPIRV_CROSS_THROW("Trying to modify a non-existing block.");
1064
1065 current_block->merge_block = ops[0];
1066 current_block->continue_block = ops[1];
1067 current_block->merge = SPIRBlock::MergeLoop;
1068
1069 ir.block_meta[current_block->self] |= ParsedIR::BLOCK_META_LOOP_HEADER_BIT;
1070 ir.block_meta[current_block->merge_block] |= ParsedIR::BLOCK_META_LOOP_MERGE_BIT;
1071
1072 ir.continue_block_to_loop_header[current_block->continue_block] = BlockID(current_block->self);
1073
1074 // Don't add loop headers to continue blocks,
1075 // which would make it impossible branch into the loop header since
1076 // they are treated as continues.
1077 if (current_block->continue_block != BlockID(current_block->self))
1078 ir.block_meta[current_block->continue_block] |= ParsedIR::BLOCK_META_CONTINUE_BIT;
1079
1080 if (length >= 3)
1081 {
1082 if (ops[2] & LoopControlUnrollMask)
1083 current_block->hint = SPIRBlock::HintUnroll;
1084 else if (ops[2] & LoopControlDontUnrollMask)
1085 current_block->hint = SPIRBlock::HintDontUnroll;
1086 }
1087 break;
1088 }
1089
1090 case OpSpecConstantOp:
1091 {
1092 if (length < 3)
1093 SPIRV_CROSS_THROW("OpSpecConstantOp not enough arguments.");
1094
1095 uint32_t result_type = ops[0];
1096 uint32_t id = ops[1];
1097 auto spec_op = static_cast<Op>(ops[2]);
1098
1099 set<SPIRConstantOp>(id, result_type, spec_op, ops + 3, length - 3);
1100 break;
1101 }
1102
1103 case OpLine:
1104 {
1105 // OpLine might come at global scope, but we don't care about those since they will not be declared in any
1106 // meaningful correct order.
1107 // Ignore all OpLine directives which live outside a function.
1108 if (current_block)
1109 current_block->ops.push_back(instruction);
1110
1111 // Line directives may arrive before first OpLabel.
1112 // Treat this as the line of the function declaration,
1113 // so warnings for arguments can propagate properly.
1114 if (current_function)
1115 {
1116 // Store the first one we find and emit it before creating the function prototype.
1117 if (current_function->entry_line.file_id == 0)
1118 {
1119 current_function->entry_line.file_id = ops[0];
1120 current_function->entry_line.line_literal = ops[1];
1121 }
1122 }
1123 break;
1124 }
1125
1126 case OpNoLine:
1127 {
1128 // OpNoLine might come at global scope.
1129 if (current_block)
1130 current_block->ops.push_back(instruction);
1131 break;
1132 }
1133
1134 // Actual opcodes.
1135 default:
1136 {
1137 if (!current_block)
1138 SPIRV_CROSS_THROW("Currently no block to insert opcode.");
1139
1140 current_block->ops.push_back(instruction);
1141 break;
1142 }
1143 }
1144 }
1145
types_are_logically_equivalent(const SPIRType & a,const SPIRType & b) const1146 bool Parser::types_are_logically_equivalent(const SPIRType &a, const SPIRType &b) const
1147 {
1148 if (a.basetype != b.basetype)
1149 return false;
1150 if (a.width != b.width)
1151 return false;
1152 if (a.vecsize != b.vecsize)
1153 return false;
1154 if (a.columns != b.columns)
1155 return false;
1156 if (a.array.size() != b.array.size())
1157 return false;
1158
1159 size_t array_count = a.array.size();
1160 if (array_count && memcmp(a.array.data(), b.array.data(), array_count * sizeof(uint32_t)) != 0)
1161 return false;
1162
1163 if (a.basetype == SPIRType::Image || a.basetype == SPIRType::SampledImage)
1164 {
1165 if (memcmp(&a.image, &b.image, sizeof(SPIRType::Image)) != 0)
1166 return false;
1167 }
1168
1169 if (a.member_types.size() != b.member_types.size())
1170 return false;
1171
1172 size_t member_types = a.member_types.size();
1173 for (size_t i = 0; i < member_types; i++)
1174 {
1175 if (!types_are_logically_equivalent(get<SPIRType>(a.member_types[i]), get<SPIRType>(b.member_types[i])))
1176 return false;
1177 }
1178
1179 return true;
1180 }
1181
variable_storage_is_aliased(const SPIRVariable & v) const1182 bool Parser::variable_storage_is_aliased(const SPIRVariable &v) const
1183 {
1184 auto &type = get<SPIRType>(v.basetype);
1185
1186 auto *type_meta = ir.find_meta(type.self);
1187
1188 bool ssbo = v.storage == StorageClassStorageBuffer ||
1189 (type_meta && type_meta->decoration.decoration_flags.get(DecorationBufferBlock));
1190 bool image = type.basetype == SPIRType::Image;
1191 bool counter = type.basetype == SPIRType::AtomicCounter;
1192
1193 bool is_restrict;
1194 if (ssbo)
1195 is_restrict = ir.get_buffer_block_flags(v).get(DecorationRestrict);
1196 else
1197 is_restrict = ir.has_decoration(v.self, DecorationRestrict);
1198
1199 return !is_restrict && (ssbo || image || counter);
1200 }
1201 } // namespace SPIRV_CROSS_NAMESPACE
1202