• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright (c) 2015-2017 The Khronos Group Inc.
2  * Copyright (c) 2015-2017 Valve Corporation
3  * Copyright (c) 2015-2017 LunarG, Inc.
4  * Copyright (C) 2015-2017 Google Inc.
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  *
18  * Author: Chris Forbes <chrisf@ijw.co.nz>
19  */
20 
21 #include <cinttypes>
22 #include <cassert>
23 #include <vector>
24 #include <unordered_map>
25 #include <string>
26 #include <sstream>
27 #include <SPIRV/spirv.hpp>
28 #include "vk_loader_platform.h"
29 #include "vk_enum_string_helper.h"
30 #include "vk_layer_table.h"
31 #include "vk_layer_data.h"
32 #include "vk_layer_extension_utils.h"
33 #include "vk_layer_utils.h"
34 #include "core_validation.h"
35 #include "core_validation_types.h"
36 #include "shader_validation.h"
37 #include "spirv-tools/libspirv.h"
38 #include "xxhash.h"
39 
40 enum FORMAT_TYPE {
41     FORMAT_TYPE_FLOAT = 1,  // UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader
42     FORMAT_TYPE_SINT = 2,
43     FORMAT_TYPE_UINT = 4,
44 };
45 
46 typedef std::pair<unsigned, unsigned> location_t;
47 
48 struct interface_var {
49     uint32_t id;
50     uint32_t type_id;
51     uint32_t offset;
52     bool is_patch;
53     bool is_block_member;
54     bool is_relaxed_precision;
55     // TODO: collect the name, too? Isn't required to be present.
56 };
57 
58 struct shader_stage_attributes {
59     char const *const name;
60     bool arrayed_input;
61     bool arrayed_output;
62 };
63 
64 static shader_stage_attributes shader_stage_attribs[] = {
65     {"vertex shader", false, false},  {"tessellation control shader", true, true}, {"tessellation evaluation shader", true, false},
66     {"geometry shader", true, false}, {"fragment shader", false, false},
67 };
68 
69 // SPIRV utility functions
build_def_index()70 void shader_module::build_def_index() {
71     for (auto insn : *this) {
72         switch (insn.opcode()) {
73             // Types
74             case spv::OpTypeVoid:
75             case spv::OpTypeBool:
76             case spv::OpTypeInt:
77             case spv::OpTypeFloat:
78             case spv::OpTypeVector:
79             case spv::OpTypeMatrix:
80             case spv::OpTypeImage:
81             case spv::OpTypeSampler:
82             case spv::OpTypeSampledImage:
83             case spv::OpTypeArray:
84             case spv::OpTypeRuntimeArray:
85             case spv::OpTypeStruct:
86             case spv::OpTypeOpaque:
87             case spv::OpTypePointer:
88             case spv::OpTypeFunction:
89             case spv::OpTypeEvent:
90             case spv::OpTypeDeviceEvent:
91             case spv::OpTypeReserveId:
92             case spv::OpTypeQueue:
93             case spv::OpTypePipe:
94                 def_index[insn.word(1)] = insn.offset();
95                 break;
96 
97                 // Fixed constants
98             case spv::OpConstantTrue:
99             case spv::OpConstantFalse:
100             case spv::OpConstant:
101             case spv::OpConstantComposite:
102             case spv::OpConstantSampler:
103             case spv::OpConstantNull:
104                 def_index[insn.word(2)] = insn.offset();
105                 break;
106 
107                 // Specialization constants
108             case spv::OpSpecConstantTrue:
109             case spv::OpSpecConstantFalse:
110             case spv::OpSpecConstant:
111             case spv::OpSpecConstantComposite:
112             case spv::OpSpecConstantOp:
113                 def_index[insn.word(2)] = insn.offset();
114                 break;
115 
116                 // Variables
117             case spv::OpVariable:
118                 def_index[insn.word(2)] = insn.offset();
119                 break;
120 
121                 // Functions
122             case spv::OpFunction:
123                 def_index[insn.word(2)] = insn.offset();
124                 break;
125 
126             default:
127                 // We don't care about any other defs for now.
128                 break;
129         }
130     }
131 }
132 
find_entrypoint(shader_module const * src,char const * name,VkShaderStageFlagBits stageBits)133 static spirv_inst_iter find_entrypoint(shader_module const *src, char const *name, VkShaderStageFlagBits stageBits) {
134     for (auto insn : *src) {
135         if (insn.opcode() == spv::OpEntryPoint) {
136             auto entrypointName = (char const *)&insn.word(3);
137             auto entrypointStageBits = 1u << insn.word(1);
138 
139             if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
140                 return insn;
141             }
142         }
143     }
144 
145     return src->end();
146 }
147 
storage_class_name(unsigned sc)148 static char const *storage_class_name(unsigned sc) {
149     switch (sc) {
150         case spv::StorageClassInput:
151             return "input";
152         case spv::StorageClassOutput:
153             return "output";
154         case spv::StorageClassUniformConstant:
155             return "const uniform";
156         case spv::StorageClassUniform:
157             return "uniform";
158         case spv::StorageClassWorkgroup:
159             return "workgroup local";
160         case spv::StorageClassCrossWorkgroup:
161             return "workgroup global";
162         case spv::StorageClassPrivate:
163             return "private global";
164         case spv::StorageClassFunction:
165             return "function";
166         case spv::StorageClassGeneric:
167             return "generic";
168         case spv::StorageClassAtomicCounter:
169             return "atomic counter";
170         case spv::StorageClassImage:
171             return "image";
172         case spv::StorageClassPushConstant:
173             return "push constant";
174         default:
175             return "unknown";
176     }
177 }
178 
179 // Get the value of an integral constant
get_constant_value(shader_module const * src,unsigned id)180 unsigned get_constant_value(shader_module const *src, unsigned id) {
181     auto value = src->get_def(id);
182     assert(value != src->end());
183 
184     if (value.opcode() != spv::OpConstant) {
185         // TODO: Either ensure that the specialization transform is already performed on a module we're
186         //       considering here, OR -- specialize on the fly now.
187         return 1;
188     }
189 
190     return value.word(3);
191 }
192 
describe_type_inner(std::ostringstream & ss,shader_module const * src,unsigned type)193 static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
194     auto insn = src->get_def(type);
195     assert(insn != src->end());
196 
197     switch (insn.opcode()) {
198         case spv::OpTypeBool:
199             ss << "bool";
200             break;
201         case spv::OpTypeInt:
202             ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
203             break;
204         case spv::OpTypeFloat:
205             ss << "float" << insn.word(2);
206             break;
207         case spv::OpTypeVector:
208             ss << "vec" << insn.word(3) << " of ";
209             describe_type_inner(ss, src, insn.word(2));
210             break;
211         case spv::OpTypeMatrix:
212             ss << "mat" << insn.word(3) << " of ";
213             describe_type_inner(ss, src, insn.word(2));
214             break;
215         case spv::OpTypeArray:
216             ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
217             describe_type_inner(ss, src, insn.word(2));
218             break;
219         case spv::OpTypePointer:
220             ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
221             describe_type_inner(ss, src, insn.word(3));
222             break;
223         case spv::OpTypeStruct: {
224             ss << "struct of (";
225             for (unsigned i = 2; i < insn.len(); i++) {
226                 describe_type_inner(ss, src, insn.word(i));
227                 if (i == insn.len() - 1) {
228                     ss << ")";
229                 } else {
230                     ss << ", ";
231                 }
232             }
233             break;
234         }
235         case spv::OpTypeSampler:
236             ss << "sampler";
237             break;
238         case spv::OpTypeSampledImage:
239             ss << "sampler+";
240             describe_type_inner(ss, src, insn.word(2));
241             break;
242         case spv::OpTypeImage:
243             ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
244             break;
245         default:
246             ss << "oddtype";
247             break;
248     }
249 }
250 
describe_type(shader_module const * src,unsigned type)251 static std::string describe_type(shader_module const *src, unsigned type) {
252     std::ostringstream ss;
253     describe_type_inner(ss, src, type);
254     return ss.str();
255 }
256 
is_narrow_numeric_type(spirv_inst_iter type)257 static bool is_narrow_numeric_type(spirv_inst_iter type) {
258     if (type.opcode() != spv::OpTypeInt && type.opcode() != spv::OpTypeFloat) return false;
259     return type.word(2) < 64;
260 }
261 
types_match(shader_module const * a,shader_module const * b,unsigned a_type,unsigned b_type,bool a_arrayed,bool b_arrayed,bool relaxed)262 static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool a_arrayed,
263                         bool b_arrayed, bool relaxed) {
264     // Walk two type trees together, and complain about differences
265     auto a_insn = a->get_def(a_type);
266     auto b_insn = b->get_def(b_type);
267     assert(a_insn != a->end());
268     assert(b_insn != b->end());
269 
270     if (a_arrayed && a_insn.opcode() == spv::OpTypeArray) {
271         return types_match(a, b, a_insn.word(2), b_type, false, b_arrayed, relaxed);
272     }
273 
274     if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
275         // We probably just found the extra level of arrayness in b_type: compare the type inside it to a_type
276         return types_match(a, b, a_type, b_insn.word(2), a_arrayed, false, relaxed);
277     }
278 
279     if (a_insn.opcode() == spv::OpTypeVector && relaxed && is_narrow_numeric_type(b_insn)) {
280         return types_match(a, b, a_insn.word(2), b_type, a_arrayed, b_arrayed, false);
281     }
282 
283     if (a_insn.opcode() != b_insn.opcode()) {
284         return false;
285     }
286 
287     if (a_insn.opcode() == spv::OpTypePointer) {
288         // Match on pointee type. storage class is expected to differ
289         return types_match(a, b, a_insn.word(3), b_insn.word(3), a_arrayed, b_arrayed, relaxed);
290     }
291 
292     if (a_arrayed || b_arrayed) {
293         // If we havent resolved array-of-verts by here, we're not going to.
294         return false;
295     }
296 
297     switch (a_insn.opcode()) {
298         case spv::OpTypeBool:
299             return true;
300         case spv::OpTypeInt:
301             // Match on width, signedness
302             return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3);
303         case spv::OpTypeFloat:
304             // Match on width
305             return a_insn.word(2) == b_insn.word(2);
306         case spv::OpTypeVector:
307             // Match on element type, count.
308             if (!types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false)) return false;
309             if (relaxed && is_narrow_numeric_type(a->get_def(a_insn.word(2)))) {
310                 return a_insn.word(3) >= b_insn.word(3);
311             } else {
312                 return a_insn.word(3) == b_insn.word(3);
313             }
314         case spv::OpTypeMatrix:
315             // Match on element type, count.
316             return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) &&
317                    a_insn.word(3) == b_insn.word(3);
318         case spv::OpTypeArray:
319             // Match on element type, count. these all have the same layout. we don't get here if b_arrayed. This differs from
320             // vector & matrix types in that the array size is the id of a constant instruction, * not a literal within OpTypeArray
321             return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) &&
322                    get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
323         case spv::OpTypeStruct:
324             // Match on all element types
325             {
326                 if (a_insn.len() != b_insn.len()) {
327                     return false;  // Structs cannot match if member counts differ
328                 }
329 
330                 for (unsigned i = 2; i < a_insn.len(); i++) {
331                     if (!types_match(a, b, a_insn.word(i), b_insn.word(i), a_arrayed, b_arrayed, false)) {
332                         return false;
333                     }
334                 }
335 
336                 return true;
337             }
338         default:
339             // Remaining types are CLisms, or may not appear in the interfaces we are interested in. Just claim no match.
340             return false;
341     }
342 }
343 
value_or_default(std::unordered_map<unsigned,unsigned> const & map,unsigned id,unsigned def)344 static unsigned value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, unsigned def) {
345     auto it = map.find(id);
346     if (it == map.end())
347         return def;
348     else
349         return it->second;
350 }
351 
get_locations_consumed_by_type(shader_module const * src,unsigned type,bool strip_array_level)352 static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
353     auto insn = src->get_def(type);
354     assert(insn != src->end());
355 
356     switch (insn.opcode()) {
357         case spv::OpTypePointer:
358             // See through the ptr -- this is only ever at the toplevel for graphics shaders we're never actually passing
359             // pointers around.
360             return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
361         case spv::OpTypeArray:
362             if (strip_array_level) {
363                 return get_locations_consumed_by_type(src, insn.word(2), false);
364             } else {
365                 return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
366             }
367         case spv::OpTypeMatrix:
368             // Num locations is the dimension * element size
369             return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
370         case spv::OpTypeVector: {
371             auto scalar_type = src->get_def(insn.word(2));
372             auto bit_width =
373                 (scalar_type.opcode() == spv::OpTypeInt || scalar_type.opcode() == spv::OpTypeFloat) ? scalar_type.word(2) : 32;
374 
375             // Locations are 128-bit wide; 3- and 4-component vectors of 64 bit types require two.
376             return (bit_width * insn.word(3) + 127) / 128;
377         }
378         default:
379             // Everything else is just 1.
380             return 1;
381 
382             // TODO: extend to handle 64bit scalar types, whose vectors may need multiple locations.
383     }
384 }
385 
get_locations_consumed_by_format(VkFormat format)386 static unsigned get_locations_consumed_by_format(VkFormat format) {
387     switch (format) {
388         case VK_FORMAT_R64G64B64A64_SFLOAT:
389         case VK_FORMAT_R64G64B64A64_SINT:
390         case VK_FORMAT_R64G64B64A64_UINT:
391         case VK_FORMAT_R64G64B64_SFLOAT:
392         case VK_FORMAT_R64G64B64_SINT:
393         case VK_FORMAT_R64G64B64_UINT:
394             return 2;
395         default:
396             return 1;
397     }
398 }
399 
get_format_type(VkFormat fmt)400 static unsigned get_format_type(VkFormat fmt) {
401     if (FormatIsSInt(fmt)) return FORMAT_TYPE_SINT;
402     if (FormatIsUInt(fmt)) return FORMAT_TYPE_UINT;
403     if (FormatIsDepthAndStencil(fmt)) return FORMAT_TYPE_FLOAT | FORMAT_TYPE_UINT;
404     if (fmt == VK_FORMAT_UNDEFINED) return 0;
405     // everything else -- UNORM/SNORM/FLOAT/USCALED/SSCALED is all float in the shader.
406     return FORMAT_TYPE_FLOAT;
407 }
408 
409 // characterizes a SPIR-V type appearing in an interface to a FF stage, for comparison to a VkFormat's characterization above.
get_fundamental_type(shader_module const * src,unsigned type)410 static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
411     auto insn = src->get_def(type);
412     assert(insn != src->end());
413 
414     switch (insn.opcode()) {
415         case spv::OpTypeInt:
416             return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
417         case spv::OpTypeFloat:
418             return FORMAT_TYPE_FLOAT;
419         case spv::OpTypeVector:
420             return get_fundamental_type(src, insn.word(2));
421         case spv::OpTypeMatrix:
422             return get_fundamental_type(src, insn.word(2));
423         case spv::OpTypeArray:
424             return get_fundamental_type(src, insn.word(2));
425         case spv::OpTypePointer:
426             return get_fundamental_type(src, insn.word(3));
427         case spv::OpTypeImage:
428             return get_fundamental_type(src, insn.word(2));
429 
430         default:
431             return 0;
432     }
433 }
434 
get_shader_stage_id(VkShaderStageFlagBits stage)435 static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
436     uint32_t bit_pos = uint32_t(u_ffs(stage));
437     return bit_pos - 1;
438 }
439 
get_struct_type(shader_module const * src,spirv_inst_iter def,bool is_array_of_verts)440 static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
441     while (true) {
442         if (def.opcode() == spv::OpTypePointer) {
443             def = src->get_def(def.word(3));
444         } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
445             def = src->get_def(def.word(2));
446             is_array_of_verts = false;
447         } else if (def.opcode() == spv::OpTypeStruct) {
448             return def;
449         } else {
450             return src->end();
451         }
452     }
453 }
454 
collect_interface_block_members(shader_module const * src,std::map<location_t,interface_var> * out,std::unordered_map<unsigned,unsigned> const & blocks,bool is_array_of_verts,uint32_t id,uint32_t type_id,bool is_patch,int)455 static bool collect_interface_block_members(shader_module const *src, std::map<location_t, interface_var> *out,
456                                             std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
457                                             uint32_t id, uint32_t type_id, bool is_patch, int /*first_location*/) {
458     // Walk down the type_id presented, trying to determine whether it's actually an interface block.
459     auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts && !is_patch);
460     if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
461         // This isn't an interface block.
462         return false;
463     }
464 
465     std::unordered_map<unsigned, unsigned> member_components;
466     std::unordered_map<unsigned, unsigned> member_relaxed_precision;
467     std::unordered_map<unsigned, unsigned> member_patch;
468 
469     // Walk all the OpMemberDecorate for type's result id -- first pass, collect components.
470     for (auto insn : *src) {
471         if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
472             unsigned member_index = insn.word(2);
473 
474             if (insn.word(3) == spv::DecorationComponent) {
475                 unsigned component = insn.word(4);
476                 member_components[member_index] = component;
477             }
478 
479             if (insn.word(3) == spv::DecorationRelaxedPrecision) {
480                 member_relaxed_precision[member_index] = 1;
481             }
482 
483             if (insn.word(3) == spv::DecorationPatch) {
484                 member_patch[member_index] = 1;
485             }
486         }
487     }
488 
489     // TODO: correctly handle location assignment from outside
490 
491     // Second pass -- produce the output, from Location decorations
492     for (auto insn : *src) {
493         if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
494             unsigned member_index = insn.word(2);
495             unsigned member_type_id = type.word(2 + member_index);
496 
497             if (insn.word(3) == spv::DecorationLocation) {
498                 unsigned location = insn.word(4);
499                 unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
500                 auto component_it = member_components.find(member_index);
501                 unsigned component = component_it == member_components.end() ? 0 : component_it->second;
502                 bool is_relaxed_precision = member_relaxed_precision.find(member_index) != member_relaxed_precision.end();
503                 bool member_is_patch = is_patch || member_patch.count(member_index) > 0;
504 
505                 for (unsigned int offset = 0; offset < num_locations; offset++) {
506                     interface_var v = {};
507                     v.id = id;
508                     // TODO: member index in interface_var too?
509                     v.type_id = member_type_id;
510                     v.offset = offset;
511                     v.is_patch = member_is_patch;
512                     v.is_block_member = true;
513                     v.is_relaxed_precision = is_relaxed_precision;
514                     (*out)[std::make_pair(location + offset, component)] = v;
515                 }
516             }
517         }
518     }
519 
520     return true;
521 }
522 
collect_interface_by_location(shader_module const * src,spirv_inst_iter entrypoint,spv::StorageClass sinterface,bool is_array_of_verts)523 static std::map<location_t, interface_var> collect_interface_by_location(shader_module const *src, spirv_inst_iter entrypoint,
524                                                                          spv::StorageClass sinterface, bool is_array_of_verts) {
525     std::unordered_map<unsigned, unsigned> var_locations;
526     std::unordered_map<unsigned, unsigned> var_builtins;
527     std::unordered_map<unsigned, unsigned> var_components;
528     std::unordered_map<unsigned, unsigned> blocks;
529     std::unordered_map<unsigned, unsigned> var_patch;
530     std::unordered_map<unsigned, unsigned> var_relaxed_precision;
531 
532     for (auto insn : *src) {
533         // We consider two interface models: SSO rendezvous-by-location, and builtins. Complain about anything that
534         // fits neither model.
535         if (insn.opcode() == spv::OpDecorate) {
536             if (insn.word(2) == spv::DecorationLocation) {
537                 var_locations[insn.word(1)] = insn.word(3);
538             }
539 
540             if (insn.word(2) == spv::DecorationBuiltIn) {
541                 var_builtins[insn.word(1)] = insn.word(3);
542             }
543 
544             if (insn.word(2) == spv::DecorationComponent) {
545                 var_components[insn.word(1)] = insn.word(3);
546             }
547 
548             if (insn.word(2) == spv::DecorationBlock) {
549                 blocks[insn.word(1)] = 1;
550             }
551 
552             if (insn.word(2) == spv::DecorationPatch) {
553                 var_patch[insn.word(1)] = 1;
554             }
555 
556             if (insn.word(2) == spv::DecorationRelaxedPrecision) {
557                 var_relaxed_precision[insn.word(1)] = 1;
558             }
559         }
560     }
561 
562     // TODO: handle grouped decorations
563     // TODO: handle index=1 dual source outputs from FS -- two vars will have the same location, and we DON'T want to clobber.
564 
565     // Find the end of the entrypoint's name string. additional zero bytes follow the actual null terminator, to fill out the
566     // rest of the word - so we only need to look at the last byte in the word to determine which word contains the terminator.
567     uint32_t word = 3;
568     while (entrypoint.word(word) & 0xff000000u) {
569         ++word;
570     }
571     ++word;
572 
573     std::map<location_t, interface_var> out;
574 
575     for (; word < entrypoint.len(); word++) {
576         auto insn = src->get_def(entrypoint.word(word));
577         assert(insn != src->end());
578         assert(insn.opcode() == spv::OpVariable);
579 
580         if (insn.word(3) == static_cast<uint32_t>(sinterface)) {
581             unsigned id = insn.word(2);
582             unsigned type = insn.word(1);
583 
584             int location = value_or_default(var_locations, id, static_cast<unsigned>(-1));
585             int builtin = value_or_default(var_builtins, id, static_cast<unsigned>(-1));
586             unsigned component = value_or_default(var_components, id, 0);  // Unspecified is OK, is 0
587             bool is_patch = var_patch.find(id) != var_patch.end();
588             bool is_relaxed_precision = var_relaxed_precision.find(id) != var_relaxed_precision.end();
589 
590             if (builtin != -1)
591                 continue;
592             else if (!collect_interface_block_members(src, &out, blocks, is_array_of_verts, id, type, is_patch, location)) {
593                 // A user-defined interface variable, with a location. Where a variable occupied multiple locations, emit
594                 // one result for each.
595                 unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts && !is_patch);
596                 for (unsigned int offset = 0; offset < num_locations; offset++) {
597                     interface_var v = {};
598                     v.id = id;
599                     v.type_id = type;
600                     v.offset = offset;
601                     v.is_patch = is_patch;
602                     v.is_relaxed_precision = is_relaxed_precision;
603                     out[std::make_pair(location + offset, component)] = v;
604                 }
605             }
606         }
607     }
608 
609     return out;
610 }
611 
collect_interface_by_input_attachment_index(shader_module const * src,std::unordered_set<uint32_t> const & accessible_ids)612 static std::vector<std::pair<uint32_t, interface_var>> collect_interface_by_input_attachment_index(
613     shader_module const *src, std::unordered_set<uint32_t> const &accessible_ids) {
614     std::vector<std::pair<uint32_t, interface_var>> out;
615 
616     for (auto insn : *src) {
617         if (insn.opcode() == spv::OpDecorate) {
618             if (insn.word(2) == spv::DecorationInputAttachmentIndex) {
619                 auto attachment_index = insn.word(3);
620                 auto id = insn.word(1);
621 
622                 if (accessible_ids.count(id)) {
623                     auto def = src->get_def(id);
624                     assert(def != src->end());
625 
626                     if (def.opcode() == spv::OpVariable && insn.word(3) == spv::StorageClassUniformConstant) {
627                         auto num_locations = get_locations_consumed_by_type(src, def.word(1), false);
628                         for (unsigned int offset = 0; offset < num_locations; offset++) {
629                             interface_var v = {};
630                             v.id = id;
631                             v.type_id = def.word(1);
632                             v.offset = offset;
633                             out.emplace_back(attachment_index + offset, v);
634                         }
635                     }
636                 }
637             }
638         }
639     }
640 
641     return out;
642 }
643 
collect_interface_by_descriptor_slot(debug_report_data const * report_data,shader_module const * src,std::unordered_set<uint32_t> const & accessible_ids)644 static std::vector<std::pair<descriptor_slot_t, interface_var>> collect_interface_by_descriptor_slot(
645     debug_report_data const *report_data, shader_module const *src, std::unordered_set<uint32_t> const &accessible_ids) {
646     std::unordered_map<unsigned, unsigned> var_sets;
647     std::unordered_map<unsigned, unsigned> var_bindings;
648 
649     for (auto insn : *src) {
650         // All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
651         // DecorationDescriptorSet and DecorationBinding.
652         if (insn.opcode() == spv::OpDecorate) {
653             if (insn.word(2) == spv::DecorationDescriptorSet) {
654                 var_sets[insn.word(1)] = insn.word(3);
655             }
656 
657             if (insn.word(2) == spv::DecorationBinding) {
658                 var_bindings[insn.word(1)] = insn.word(3);
659             }
660         }
661     }
662 
663     std::vector<std::pair<descriptor_slot_t, interface_var>> out;
664 
665     for (auto id : accessible_ids) {
666         auto insn = src->get_def(id);
667         assert(insn != src->end());
668 
669         if (insn.opcode() == spv::OpVariable &&
670             (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
671             unsigned set = value_or_default(var_sets, insn.word(2), 0);
672             unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
673 
674             interface_var v = {};
675             v.id = insn.word(2);
676             v.type_id = insn.word(1);
677             out.emplace_back(std::make_pair(set, binding), v);
678         }
679     }
680 
681     return out;
682 }
683 
validate_vi_consistency(debug_report_data const * report_data,VkPipelineVertexInputStateCreateInfo const * vi)684 static bool validate_vi_consistency(debug_report_data const *report_data, VkPipelineVertexInputStateCreateInfo const *vi) {
685     // Walk the binding descriptions, which describe the step rate and stride of each vertex buffer.  Each binding should
686     // be specified only once.
687     std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
688     bool skip = false;
689 
690     for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
691         auto desc = &vi->pVertexBindingDescriptions[i];
692         auto &binding = bindings[desc->binding];
693         if (binding) {
694             // TODO: VALIDATION_ERROR_096005cc perhaps?
695             skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
696                             SHADER_CHECKER_INCONSISTENT_VI, "SC", "Duplicate vertex input binding descriptions for binding %d",
697                             desc->binding);
698         } else {
699             binding = desc;
700         }
701     }
702 
703     return skip;
704 }
705 
validate_vi_against_vs_inputs(debug_report_data const * report_data,VkPipelineVertexInputStateCreateInfo const * vi,shader_module const * vs,spirv_inst_iter entrypoint)706 static bool validate_vi_against_vs_inputs(debug_report_data const *report_data, VkPipelineVertexInputStateCreateInfo const *vi,
707                                           shader_module const *vs, spirv_inst_iter entrypoint) {
708     bool skip = false;
709 
710     auto inputs = collect_interface_by_location(vs, entrypoint, spv::StorageClassInput, false);
711 
712     // Build index by location
713     std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
714     if (vi) {
715         for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++) {
716             auto num_locations = get_locations_consumed_by_format(vi->pVertexAttributeDescriptions[i].format);
717             for (auto j = 0u; j < num_locations; j++) {
718                 attribs[vi->pVertexAttributeDescriptions[i].location + j] = &vi->pVertexAttributeDescriptions[i];
719             }
720         }
721     }
722 
723     auto it_a = attribs.begin();
724     auto it_b = inputs.begin();
725     bool used = false;
726 
727     while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
728         bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
729         bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
730         auto a_first = a_at_end ? 0 : it_a->first;
731         auto b_first = b_at_end ? 0 : it_b->first.first;
732         if (!a_at_end && (b_at_end || a_first < b_first)) {
733             if (!used && log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
734                                  0, __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
735                                  "Vertex attribute at location %d not consumed by vertex shader", a_first)) {
736                 skip = true;
737             }
738             used = false;
739             it_a++;
740         } else if (!b_at_end && (a_at_end || b_first < a_first)) {
741             skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0, __LINE__,
742                             SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Vertex shader consumes input at location %d but not provided",
743                             b_first);
744             it_b++;
745         } else {
746             unsigned attrib_type = get_format_type(it_a->second->format);
747             unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
748 
749             // Type checking
750             if (!(attrib_type & input_type)) {
751                 skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
752                                 SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
753                                 "Attribute type of `%s` at location %d does not match vertex shader input type of `%s`",
754                                 string_VkFormat(it_a->second->format), a_first, describe_type(vs, it_b->second.type_id).c_str());
755             }
756 
757             // OK!
758             used = true;
759             it_b++;
760         }
761     }
762 
763     return skip;
764 }
765 
validate_fs_outputs_against_render_pass(debug_report_data const * report_data,shader_module const * fs,spirv_inst_iter entrypoint,PIPELINE_STATE const * pipeline,uint32_t subpass_index)766 static bool validate_fs_outputs_against_render_pass(debug_report_data const *report_data, shader_module const *fs,
767                                                     spirv_inst_iter entrypoint, PIPELINE_STATE const *pipeline,
768                                                     uint32_t subpass_index) {
769     auto rpci = pipeline->rp_state->createInfo.ptr();
770 
771     std::map<uint32_t, VkFormat> color_attachments;
772     auto subpass = rpci->pSubpasses[subpass_index];
773     for (auto i = 0u; i < subpass.colorAttachmentCount; ++i) {
774         uint32_t attachment = subpass.pColorAttachments[i].attachment;
775         if (attachment == VK_ATTACHMENT_UNUSED) continue;
776         if (rpci->pAttachments[attachment].format != VK_FORMAT_UNDEFINED) {
777             color_attachments[i] = rpci->pAttachments[attachment].format;
778         }
779     }
780 
781     bool skip = false;
782 
783     // TODO: dual source blend index (spv::DecIndex, zero if not provided)
784 
785     auto outputs = collect_interface_by_location(fs, entrypoint, spv::StorageClassOutput, false);
786 
787     auto it_a = outputs.begin();
788     auto it_b = color_attachments.begin();
789 
790     // Walk attachment list and outputs together
791 
792     while ((outputs.size() > 0 && it_a != outputs.end()) || (color_attachments.size() > 0 && it_b != color_attachments.end())) {
793         bool a_at_end = outputs.size() == 0 || it_a == outputs.end();
794         bool b_at_end = color_attachments.size() == 0 || it_b == color_attachments.end();
795 
796         if (!a_at_end && (b_at_end || it_a->first.first < it_b->first)) {
797             skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
798                             SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
799                             "fragment shader writes to output location %d with no matching attachment", it_a->first.first);
800             it_a++;
801         } else if (!b_at_end && (a_at_end || it_a->first.first > it_b->first)) {
802             // Only complain if there are unmasked channels for this attachment. If the writemask is 0, it's acceptable for the
803             // shader to not produce a matching output.
804             if (pipeline->attachments[it_b->first].colorWriteMask != 0) {
805                 skip |=
806                     log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
807                             SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by fragment shader", it_b->first);
808             }
809             it_b++;
810         } else {
811             unsigned output_type = get_fundamental_type(fs, it_a->second.type_id);
812             unsigned att_type = get_format_type(it_b->second);
813 
814             // Type checking
815             if (!(output_type & att_type)) {
816                 skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
817                                 SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
818                                 "Attachment %d of type `%s` does not match fragment shader output type of `%s`", it_b->first,
819                                 string_VkFormat(it_b->second), describe_type(fs, it_a->second.type_id).c_str());
820             }
821 
822             // OK!
823             it_a++;
824             it_b++;
825         }
826     }
827 
828     return skip;
829 }
830 
831 // For some analyses, we need to know about all ids referenced by the static call tree of a particular entrypoint. This is
832 // important for identifying the set of shader resources actually used by an entrypoint, for example.
833 // Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
834 //  - NOT the shader input/output interfaces.
835 //
836 // TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
837 // converting parts of this to be generated from the machine-readable spec instead.
mark_accessible_ids(shader_module const * src,spirv_inst_iter entrypoint)838 static std::unordered_set<uint32_t> mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint) {
839     std::unordered_set<uint32_t> ids;
840     std::unordered_set<uint32_t> worklist;
841     worklist.insert(entrypoint.word(2));
842 
843     while (!worklist.empty()) {
844         auto id_iter = worklist.begin();
845         auto id = *id_iter;
846         worklist.erase(id_iter);
847 
848         auto insn = src->get_def(id);
849         if (insn == src->end()) {
850             // ID is something we didn't collect in build_def_index. that's OK -- we'll stumble across all kinds of things here
851             // that we may not care about.
852             continue;
853         }
854 
855         // Try to add to the output set
856         if (!ids.insert(id).second) {
857             continue;  // If we already saw this id, we don't want to walk it again.
858         }
859 
860         switch (insn.opcode()) {
861             case spv::OpFunction:
862                 // Scan whole body of the function, enlisting anything interesting
863                 while (++insn, insn.opcode() != spv::OpFunctionEnd) {
864                     switch (insn.opcode()) {
865                         case spv::OpLoad:
866                         case spv::OpAtomicLoad:
867                         case spv::OpAtomicExchange:
868                         case spv::OpAtomicCompareExchange:
869                         case spv::OpAtomicCompareExchangeWeak:
870                         case spv::OpAtomicIIncrement:
871                         case spv::OpAtomicIDecrement:
872                         case spv::OpAtomicIAdd:
873                         case spv::OpAtomicISub:
874                         case spv::OpAtomicSMin:
875                         case spv::OpAtomicUMin:
876                         case spv::OpAtomicSMax:
877                         case spv::OpAtomicUMax:
878                         case spv::OpAtomicAnd:
879                         case spv::OpAtomicOr:
880                         case spv::OpAtomicXor:
881                             worklist.insert(insn.word(3));  // ptr
882                             break;
883                         case spv::OpStore:
884                         case spv::OpAtomicStore:
885                             worklist.insert(insn.word(1));  // ptr
886                             break;
887                         case spv::OpAccessChain:
888                         case spv::OpInBoundsAccessChain:
889                             worklist.insert(insn.word(3));  // base ptr
890                             break;
891                         case spv::OpSampledImage:
892                         case spv::OpImageSampleImplicitLod:
893                         case spv::OpImageSampleExplicitLod:
894                         case spv::OpImageSampleDrefImplicitLod:
895                         case spv::OpImageSampleDrefExplicitLod:
896                         case spv::OpImageSampleProjImplicitLod:
897                         case spv::OpImageSampleProjExplicitLod:
898                         case spv::OpImageSampleProjDrefImplicitLod:
899                         case spv::OpImageSampleProjDrefExplicitLod:
900                         case spv::OpImageFetch:
901                         case spv::OpImageGather:
902                         case spv::OpImageDrefGather:
903                         case spv::OpImageRead:
904                         case spv::OpImage:
905                         case spv::OpImageQueryFormat:
906                         case spv::OpImageQueryOrder:
907                         case spv::OpImageQuerySizeLod:
908                         case spv::OpImageQuerySize:
909                         case spv::OpImageQueryLod:
910                         case spv::OpImageQueryLevels:
911                         case spv::OpImageQuerySamples:
912                         case spv::OpImageSparseSampleImplicitLod:
913                         case spv::OpImageSparseSampleExplicitLod:
914                         case spv::OpImageSparseSampleDrefImplicitLod:
915                         case spv::OpImageSparseSampleDrefExplicitLod:
916                         case spv::OpImageSparseSampleProjImplicitLod:
917                         case spv::OpImageSparseSampleProjExplicitLod:
918                         case spv::OpImageSparseSampleProjDrefImplicitLod:
919                         case spv::OpImageSparseSampleProjDrefExplicitLod:
920                         case spv::OpImageSparseFetch:
921                         case spv::OpImageSparseGather:
922                         case spv::OpImageSparseDrefGather:
923                         case spv::OpImageTexelPointer:
924                             worklist.insert(insn.word(3));  // Image or sampled image
925                             break;
926                         case spv::OpImageWrite:
927                             worklist.insert(insn.word(1));  // Image -- different operand order to above
928                             break;
929                         case spv::OpFunctionCall:
930                             for (uint32_t i = 3; i < insn.len(); i++) {
931                                 worklist.insert(insn.word(i));  // fn itself, and all args
932                             }
933                             break;
934 
935                         case spv::OpExtInst:
936                             for (uint32_t i = 5; i < insn.len(); i++) {
937                                 worklist.insert(insn.word(i));  // Operands to ext inst
938                             }
939                             break;
940                     }
941                 }
942                 break;
943         }
944     }
945 
946     return ids;
947 }
948 
validate_push_constant_block_against_pipeline(debug_report_data const * report_data,std::vector<VkPushConstantRange> const * push_constant_ranges,shader_module const * src,spirv_inst_iter type,VkShaderStageFlagBits stage)949 static bool validate_push_constant_block_against_pipeline(debug_report_data const *report_data,
950                                                           std::vector<VkPushConstantRange> const *push_constant_ranges,
951                                                           shader_module const *src, spirv_inst_iter type,
952                                                           VkShaderStageFlagBits stage) {
953     bool skip = false;
954 
955     // Strip off ptrs etc
956     type = get_struct_type(src, type, false);
957     assert(type != src->end());
958 
959     // Validate directly off the offsets. this isn't quite correct for arrays and matrices, but is a good first step.
960     // TODO: arrays, matrices, weird sizes
961     for (auto insn : *src) {
962         if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
963             if (insn.word(3) == spv::DecorationOffset) {
964                 unsigned offset = insn.word(4);
965                 auto size = 4;  // Bytes; TODO: calculate this based on the type
966 
967                 bool found_range = false;
968                 for (auto const &range : *push_constant_ranges) {
969                     if (range.offset <= offset && range.offset + range.size >= offset + size) {
970                         found_range = true;
971 
972                         if ((range.stageFlags & stage) == 0) {
973                             skip |=
974                                 log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
975                                         __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
976                                         "Push constant range covering variable starting at offset %u not accessible from stage %s",
977                                         offset, string_VkShaderStageFlagBits(stage));
978                         }
979 
980                         break;
981                     }
982                 }
983 
984                 if (!found_range) {
985                     skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
986                                     __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
987                                     "Push constant range covering variable starting at offset %u not declared in layout", offset);
988                 }
989             }
990         }
991     }
992 
993     return skip;
994 }
995 
validate_push_constant_usage(debug_report_data const * report_data,std::vector<VkPushConstantRange> const * push_constant_ranges,shader_module const * src,std::unordered_set<uint32_t> accessible_ids,VkShaderStageFlagBits stage)996 static bool validate_push_constant_usage(debug_report_data const *report_data,
997                                          std::vector<VkPushConstantRange> const *push_constant_ranges, shader_module const *src,
998                                          std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
999     bool skip = false;
1000 
1001     for (auto id : accessible_ids) {
1002         auto def_insn = src->get_def(id);
1003         if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
1004             skip |= validate_push_constant_block_against_pipeline(report_data, push_constant_ranges, src,
1005                                                                   src->get_def(def_insn.word(1)), stage);
1006         }
1007     }
1008 
1009     return skip;
1010 }
1011 
1012 // Validate that data for each specialization entry is fully contained within the buffer.
validate_specialization_offsets(debug_report_data const * report_data,VkPipelineShaderStageCreateInfo const * info)1013 static bool validate_specialization_offsets(debug_report_data const *report_data, VkPipelineShaderStageCreateInfo const *info) {
1014     bool skip = false;
1015 
1016     VkSpecializationInfo const *spec = info->pSpecializationInfo;
1017 
1018     if (spec) {
1019         for (auto i = 0u; i < spec->mapEntryCount; i++) {
1020             // TODO: This is a good place for VALIDATION_ERROR_1360060a.
1021             if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
1022                 skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0, __LINE__,
1023                                 VALIDATION_ERROR_1360060c, "SC",
1024                                 "Specialization entry %u (for constant id %u) references memory outside provided specialization "
1025                                 "data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER " bytes provided). %s.",
1026                                 i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
1027                                 spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize,
1028                                 validation_error_map[VALIDATION_ERROR_1360060c]);
1029             }
1030         }
1031     }
1032 
1033     return skip;
1034 }
1035 
descriptor_type_match(shader_module const * module,uint32_t type_id,VkDescriptorType descriptor_type,unsigned & descriptor_count)1036 static bool descriptor_type_match(shader_module const *module, uint32_t type_id, VkDescriptorType descriptor_type,
1037                                   unsigned &descriptor_count) {
1038     auto type = module->get_def(type_id);
1039 
1040     descriptor_count = 1;
1041 
1042     // Strip off any array or ptrs. Where we remove array levels, adjust the  descriptor count for each dimension.
1043     while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
1044         if (type.opcode() == spv::OpTypeArray) {
1045             descriptor_count *= get_constant_value(module, type.word(3));
1046             type = module->get_def(type.word(2));
1047         } else {
1048             type = module->get_def(type.word(3));
1049         }
1050     }
1051 
1052     switch (type.opcode()) {
1053         case spv::OpTypeStruct: {
1054             for (auto insn : *module) {
1055                 if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
1056                     if (insn.word(2) == spv::DecorationBlock) {
1057                         return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
1058                                descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
1059                     } else if (insn.word(2) == spv::DecorationBufferBlock) {
1060                         return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1061                                descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
1062                     }
1063                 }
1064             }
1065 
1066             // Invalid
1067             return false;
1068         }
1069 
1070         case spv::OpTypeSampler:
1071             return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER || descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
1072 
1073         case spv::OpTypeSampledImage:
1074             if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) {
1075                 // Slight relaxation for some GLSL historical madness: samplerBuffer doesn't really have a sampler, and a texel
1076                 // buffer descriptor doesn't really provide one. Allow this slight mismatch.
1077                 auto image_type = module->get_def(type.word(2));
1078                 auto dim = image_type.word(3);
1079                 auto sampled = image_type.word(7);
1080                 return dim == spv::DimBuffer && sampled == 1;
1081             }
1082             return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
1083 
1084         case spv::OpTypeImage: {
1085             // Many descriptor types backing image types-- depends on dimension and whether the image will be used with a sampler.
1086             // SPIRV for Vulkan requires that sampled be 1 or 2 -- leaving the decision to runtime is unacceptable.
1087             auto dim = type.word(3);
1088             auto sampled = type.word(7);
1089 
1090             if (dim == spv::DimSubpassData) {
1091                 return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
1092             } else if (dim == spv::DimBuffer) {
1093                 if (sampled == 1) {
1094                     return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
1095                 } else {
1096                     return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
1097                 }
1098             } else if (sampled == 1) {
1099                 return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE ||
1100                        descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
1101             } else {
1102                 return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
1103             }
1104         }
1105 
1106             // We shouldn't really see any other junk types -- but if we do, they're a mismatch.
1107         default:
1108             return false;  // Mismatch
1109     }
1110 }
1111 
require_feature(debug_report_data const * report_data,VkBool32 feature,char const * feature_name)1112 static bool require_feature(debug_report_data const *report_data, VkBool32 feature, char const *feature_name) {
1113     if (!feature) {
1114         if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
1115                     SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
1116                     "Shader requires VkPhysicalDeviceFeatures::%s but is not enabled on the device", feature_name)) {
1117             return true;
1118         }
1119     }
1120 
1121     return false;
1122 }
1123 
require_extension(debug_report_data const * report_data,bool extension,char const * extension_name)1124 static bool require_extension(debug_report_data const *report_data, bool extension, char const *extension_name) {
1125     if (!extension) {
1126         if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
1127                     SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC", "Shader requires extension %s but is not enabled on the device",
1128                     extension_name)) {
1129             return true;
1130         }
1131     }
1132 
1133     return false;
1134 }
1135 
validate_shader_capabilities(layer_data * dev_data,shader_module const * src)1136 static bool validate_shader_capabilities(layer_data *dev_data, shader_module const *src) {
1137     bool skip = false;
1138 
1139     auto report_data = GetReportData(dev_data);
1140     auto const &enabledFeatures = GetEnabledFeatures(dev_data);
1141     auto const &extensions = GetEnabledExtensions(dev_data);
1142 
1143     struct CapabilityInfo {
1144         char const *name;
1145         VkBool32 const VkPhysicalDeviceFeatures::*feature;
1146         bool const DeviceExtensions::*extension;
1147     };
1148 
1149     using F = VkPhysicalDeviceFeatures;
1150     using E = DeviceExtensions;
1151 
1152     // clang-format off
1153     static const std::unordered_multimap<uint32_t, CapabilityInfo> capabilities = {
1154         // Capabilities always supported by a Vulkan 1.0 implementation -- no
1155         // feature bits.
1156         {spv::CapabilityMatrix, {nullptr}},
1157         {spv::CapabilityShader, {nullptr}},
1158         {spv::CapabilityInputAttachment, {nullptr}},
1159         {spv::CapabilitySampled1D, {nullptr}},
1160         {spv::CapabilityImage1D, {nullptr}},
1161         {spv::CapabilitySampledBuffer, {nullptr}},
1162         {spv::CapabilityImageQuery, {nullptr}},
1163         {spv::CapabilityDerivativeControl, {nullptr}},
1164 
1165         // Capabilities that are optionally supported, but require a feature to
1166         // be enabled on the device
1167         {spv::CapabilityGeometry, {"geometryShader", &F::geometryShader}},
1168         {spv::CapabilityTessellation, {"tessellationShader", &F::tessellationShader}},
1169         {spv::CapabilityFloat64, {"shaderFloat64", &F::shaderFloat64}},
1170         {spv::CapabilityInt64, {"shaderInt64", &F::shaderInt64}},
1171         {spv::CapabilityTessellationPointSize, {"shaderTessellationAndGeometryPointSize", &F::shaderTessellationAndGeometryPointSize}},
1172         {spv::CapabilityGeometryPointSize, {"shaderTessellationAndGeometryPointSize", &F::shaderTessellationAndGeometryPointSize}},
1173         {spv::CapabilityImageGatherExtended, {"shaderImageGatherExtended", &F::shaderImageGatherExtended}},
1174         {spv::CapabilityStorageImageMultisample, {"shaderStorageImageMultisample", &F::shaderStorageImageMultisample}},
1175         {spv::CapabilityUniformBufferArrayDynamicIndexing, {"shaderUniformBufferArrayDynamicIndexing", &F::shaderUniformBufferArrayDynamicIndexing}},
1176         {spv::CapabilitySampledImageArrayDynamicIndexing, {"shaderSampledImageArrayDynamicIndexing", &F::shaderSampledImageArrayDynamicIndexing}},
1177         {spv::CapabilityStorageBufferArrayDynamicIndexing, {"shaderStorageBufferArrayDynamicIndexing", &F::shaderStorageBufferArrayDynamicIndexing}},
1178         {spv::CapabilityStorageImageArrayDynamicIndexing, {"shaderStorageImageArrayDynamicIndexing", &F::shaderStorageBufferArrayDynamicIndexing}},
1179         {spv::CapabilityClipDistance, {"shaderClipDistance", &F::shaderClipDistance}},
1180         {spv::CapabilityCullDistance, {"shaderCullDistance", &F::shaderCullDistance}},
1181         {spv::CapabilityImageCubeArray, {"imageCubeArray", &F::imageCubeArray}},
1182         {spv::CapabilitySampleRateShading, {"sampleRateShading", &F::sampleRateShading}},
1183         {spv::CapabilitySparseResidency, {"shaderResourceResidency", &F::shaderResourceResidency}},
1184         {spv::CapabilityMinLod, {"shaderResourceMinLod", &F::shaderResourceMinLod}},
1185         {spv::CapabilitySampledCubeArray, {"imageCubeArray", &F::imageCubeArray}},
1186         {spv::CapabilityImageMSArray, {"shaderStorageImageMultisample", &F::shaderStorageImageMultisample}},
1187         {spv::CapabilityStorageImageExtendedFormats, {"shaderStorageImageExtendedFormats", &F::shaderStorageImageExtendedFormats}},
1188         {spv::CapabilityInterpolationFunction, {"sampleRateShading", &F::sampleRateShading}},
1189         {spv::CapabilityStorageImageReadWithoutFormat, {"shaderStorageImageReadWithoutFormat", &F::shaderStorageImageReadWithoutFormat}},
1190         {spv::CapabilityStorageImageWriteWithoutFormat, {"shaderStorageImageWriteWithoutFormat", &F::shaderStorageImageWriteWithoutFormat}},
1191         {spv::CapabilityMultiViewport, {"multiViewport", &F::multiViewport}},
1192 
1193         // Capabilities that require an extension
1194         {spv::CapabilityDrawParameters, {VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME, nullptr, &E::vk_khr_shader_draw_parameters}},
1195         {spv::CapabilityGeometryShaderPassthroughNV, {VK_NV_GEOMETRY_SHADER_PASSTHROUGH_EXTENSION_NAME, nullptr, &E::vk_nv_geometry_shader_passthrough}},
1196         {spv::CapabilitySampleMaskOverrideCoverageNV, {VK_NV_SAMPLE_MASK_OVERRIDE_COVERAGE_EXTENSION_NAME, nullptr, &E::vk_nv_sample_mask_override_coverage}},
1197         {spv::CapabilityShaderViewportIndexLayerEXT, {VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME, nullptr, &E::vk_ext_shader_viewport_index_layer}},
1198         {spv::CapabilityShaderViewportIndexLayerNV, {VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME, nullptr, &E::vk_nv_viewport_array2}},
1199         {spv::CapabilityShaderViewportMaskNV, {VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME, nullptr, &E::vk_nv_viewport_array2}},
1200         {spv::CapabilitySubgroupBallotKHR, {VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME, nullptr, &E::vk_ext_shader_subgroup_ballot }},
1201         {spv::CapabilitySubgroupVoteKHR, {VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME, nullptr, &E::vk_ext_shader_subgroup_vote }},
1202     };
1203     // clang-format on
1204 
1205     for (auto insn : *src) {
1206         if (insn.opcode() == spv::OpCapability) {
1207             size_t n = capabilities.count(insn.word(1));
1208             if (1 == n) {  // key occurs exactly once
1209                 auto it = capabilities.find(insn.word(1));
1210                 if (it != capabilities.end()) {
1211                     if (it->second.feature) {
1212                         skip |= require_feature(report_data, enabledFeatures->*(it->second.feature), it->second.name);
1213                     }
1214                     if (it->second.extension) {
1215                         skip |= require_extension(report_data, extensions->*(it->second.extension), it->second.name);
1216                     }
1217                 }
1218             } else if (1 < n) {  // key occurs multiple times, at least one must be enabled
1219                 bool needs_feature = false, has_feature = false;
1220                 bool needs_ext = false, has_ext = false;
1221                 std::string feature_names = "(one of) [ ";
1222                 std::string extension_names = feature_names;
1223                 auto caps = capabilities.equal_range(insn.word(1));
1224                 for (auto it = caps.first; it != caps.second; ++it) {
1225                     if (it->second.feature) {
1226                         needs_feature = true;
1227                         has_feature = has_feature || enabledFeatures->*(it->second.feature);
1228                         feature_names += it->second.name;
1229                         feature_names += " ";
1230                     }
1231                     if (it->second.extension) {
1232                         needs_ext = true;
1233                         has_ext = has_ext || extensions->*(it->second.extension);
1234                         extension_names += it->second.name;
1235                         extension_names += " ";
1236                     }
1237                 }
1238                 if (needs_feature) {
1239                     feature_names += "]";
1240                     skip |= require_feature(report_data, has_feature, feature_names.c_str());
1241                 }
1242                 if (needs_ext) {
1243                     extension_names += "]";
1244                     skip |= require_extension(report_data, has_ext, extension_names.c_str());
1245                 }
1246             }
1247         }
1248     }
1249 
1250     return skip;
1251 }
1252 
descriptor_type_to_reqs(shader_module const * module,uint32_t type_id)1253 static uint32_t descriptor_type_to_reqs(shader_module const *module, uint32_t type_id) {
1254     auto type = module->get_def(type_id);
1255 
1256     while (true) {
1257         switch (type.opcode()) {
1258             case spv::OpTypeArray:
1259             case spv::OpTypeSampledImage:
1260                 type = module->get_def(type.word(2));
1261                 break;
1262             case spv::OpTypePointer:
1263                 type = module->get_def(type.word(3));
1264                 break;
1265             case spv::OpTypeImage: {
1266                 auto dim = type.word(3);
1267                 auto arrayed = type.word(5);
1268                 auto msaa = type.word(6);
1269 
1270                 switch (dim) {
1271                     case spv::Dim1D:
1272                         return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_1D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_1D;
1273                     case spv::Dim2D:
1274                         return (msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE) |
1275                                (arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_2D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_2D);
1276                     case spv::Dim3D:
1277                         return DESCRIPTOR_REQ_VIEW_TYPE_3D;
1278                     case spv::DimCube:
1279                         return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_CUBE_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_CUBE;
1280                     case spv::DimSubpassData:
1281                         return msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE;
1282                     default:  // buffer, etc.
1283                         return 0;
1284                 }
1285             }
1286             default:
1287                 return 0;
1288         }
1289     }
1290 }
1291 
1292 // For given pipelineLayout verify that the set_layout_node at slot.first
1293 //  has the requested binding at slot.second and return ptr to that binding
get_descriptor_binding(PIPELINE_LAYOUT_NODE const * pipelineLayout,descriptor_slot_t slot)1294 static VkDescriptorSetLayoutBinding const *get_descriptor_binding(PIPELINE_LAYOUT_NODE const *pipelineLayout,
1295                                                                   descriptor_slot_t slot) {
1296     if (!pipelineLayout) return nullptr;
1297 
1298     if (slot.first >= pipelineLayout->set_layouts.size()) return nullptr;
1299 
1300     return pipelineLayout->set_layouts[slot.first]->GetDescriptorSetLayoutBindingPtrFromBinding(slot.second);
1301 }
1302 
validate_pipeline_shader_stage(layer_data * dev_data,VkPipelineShaderStageCreateInfo const * pStage,PIPELINE_STATE * pipeline,shader_module const ** out_module,spirv_inst_iter * out_entrypoint)1303 static bool validate_pipeline_shader_stage(layer_data *dev_data, VkPipelineShaderStageCreateInfo const *pStage,
1304                                            PIPELINE_STATE *pipeline, shader_module const **out_module,
1305                                            spirv_inst_iter *out_entrypoint) {
1306     bool skip = false;
1307     auto module = *out_module = GetShaderModuleState(dev_data, pStage->module);
1308     auto report_data = GetReportData(dev_data);
1309 
1310     if (!module->has_valid_spirv) return false;
1311 
1312     // Find the entrypoint
1313     auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage);
1314     if (entrypoint == module->end()) {
1315         if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
1316                     VALIDATION_ERROR_10600586, "SC", "No entrypoint found named `%s` for stage %s. %s.", pStage->pName,
1317                     string_VkShaderStageFlagBits(pStage->stage), validation_error_map[VALIDATION_ERROR_10600586])) {
1318             return true;  // no point continuing beyond here, any analysis is just going to be garbage.
1319         }
1320     }
1321 
1322     // Validate shader capabilities against enabled device features
1323     skip |= validate_shader_capabilities(dev_data, module);
1324 
1325     // Mark accessible ids
1326     auto accessible_ids = mark_accessible_ids(module, entrypoint);
1327 
1328     // Validate descriptor set layout against what the entrypoint actually uses
1329     auto descriptor_uses = collect_interface_by_descriptor_slot(report_data, module, accessible_ids);
1330 
1331     skip |= validate_specialization_offsets(report_data, pStage);
1332     skip |= validate_push_constant_usage(report_data, &pipeline->pipeline_layout.push_constant_ranges, module, accessible_ids,
1333                                          pStage->stage);
1334 
1335     // Validate descriptor use
1336     for (auto use : descriptor_uses) {
1337         // While validating shaders capture which slots are used by the pipeline
1338         auto &reqs = pipeline->active_slots[use.first.first][use.first.second];
1339         reqs = descriptor_req(reqs | descriptor_type_to_reqs(module, use.second.type_id));
1340 
1341         // Verify given pipelineLayout has requested setLayout with requested binding
1342         const auto &binding = get_descriptor_binding(&pipeline->pipeline_layout, use.first);
1343         unsigned required_descriptor_count;
1344 
1345         if (!binding) {
1346             skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
1347                             SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
1348                             "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
1349                             use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str());
1350         } else if (~binding->stageFlags & pStage->stage) {
1351             skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0, __LINE__,
1352                             SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
1353                             "Shader uses descriptor slot %u.%u (used as type `%s`) but descriptor not accessible from stage %s",
1354                             use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
1355                             string_VkShaderStageFlagBits(pStage->stage));
1356         } else if (!descriptor_type_match(module, use.second.type_id, binding->descriptorType, required_descriptor_count)) {
1357             skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
1358                             SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
1359                             "Type mismatch on descriptor slot %u.%u (used as type `%s`) but descriptor of type %s", use.first.first,
1360                             use.first.second, describe_type(module, use.second.type_id).c_str(),
1361                             string_VkDescriptorType(binding->descriptorType));
1362         } else if (binding->descriptorCount < required_descriptor_count) {
1363             skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
1364                             SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
1365                             "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
1366                             required_descriptor_count, use.first.first, use.first.second,
1367                             describe_type(module, use.second.type_id).c_str(), binding->descriptorCount);
1368         }
1369     }
1370 
1371     // Validate use of input attachments against subpass structure
1372     if (pStage->stage == VK_SHADER_STAGE_FRAGMENT_BIT) {
1373         auto input_attachment_uses = collect_interface_by_input_attachment_index(module, accessible_ids);
1374 
1375         auto rpci = pipeline->rp_state->createInfo.ptr();
1376         auto subpass = pipeline->graphicsPipelineCI.subpass;
1377 
1378         for (auto use : input_attachment_uses) {
1379             auto input_attachments = rpci->pSubpasses[subpass].pInputAttachments;
1380             auto index = (input_attachments && use.first < rpci->pSubpasses[subpass].inputAttachmentCount)
1381                              ? input_attachments[use.first].attachment
1382                              : VK_ATTACHMENT_UNUSED;
1383 
1384             if (index == VK_ATTACHMENT_UNUSED) {
1385                 skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
1386                                 SHADER_CHECKER_MISSING_INPUT_ATTACHMENT, "SC",
1387                                 "Shader consumes input attachment index %d but not provided in subpass", use.first);
1388             } else if (!(get_format_type(rpci->pAttachments[index].format) & get_fundamental_type(module, use.second.type_id))) {
1389                 skip |=
1390                     log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
1391                             SHADER_CHECKER_INPUT_ATTACHMENT_TYPE_MISMATCH, "SC",
1392                             "Subpass input attachment %u format of %s does not match type used in shader `%s`", use.first,
1393                             string_VkFormat(rpci->pAttachments[index].format), describe_type(module, use.second.type_id).c_str());
1394             }
1395         }
1396     }
1397 
1398     return skip;
1399 }
1400 
validate_interface_between_stages(debug_report_data const * report_data,shader_module const * producer,spirv_inst_iter producer_entrypoint,shader_stage_attributes const * producer_stage,shader_module const * consumer,spirv_inst_iter consumer_entrypoint,shader_stage_attributes const * consumer_stage)1401 static bool validate_interface_between_stages(debug_report_data const *report_data, shader_module const *producer,
1402                                               spirv_inst_iter producer_entrypoint, shader_stage_attributes const *producer_stage,
1403                                               shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
1404                                               shader_stage_attributes const *consumer_stage) {
1405     bool skip = false;
1406 
1407     auto outputs =
1408         collect_interface_by_location(producer, producer_entrypoint, spv::StorageClassOutput, producer_stage->arrayed_output);
1409     auto inputs =
1410         collect_interface_by_location(consumer, consumer_entrypoint, spv::StorageClassInput, consumer_stage->arrayed_input);
1411 
1412     auto a_it = outputs.begin();
1413     auto b_it = inputs.begin();
1414 
1415     // Maps sorted by key (location); walk them together to find mismatches
1416     while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1417         bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1418         bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1419         auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1420         auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1421 
1422         if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
1423             skip |= log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1424                             __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1425                             "%s writes to output location %u.%u which is not consumed by %s", producer_stage->name, a_first.first,
1426                             a_first.second, consumer_stage->name);
1427             a_it++;
1428         } else if (a_at_end || a_first > b_first) {
1429             skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
1430                             SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "%s consumes input location %u.%u which is not written by %s",
1431                             consumer_stage->name, b_first.first, b_first.second, producer_stage->name);
1432             b_it++;
1433         } else {
1434             // subtleties of arrayed interfaces:
1435             // - if is_patch, then the member is not arrayed, even though the interface may be.
1436             // - if is_block_member, then the extra array level of an arrayed interface is not
1437             //   expressed in the member type -- it's expressed in the block type.
1438             if (!types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id,
1439                              producer_stage->arrayed_output && !a_it->second.is_patch && !a_it->second.is_block_member,
1440                              consumer_stage->arrayed_input && !b_it->second.is_patch && !b_it->second.is_block_member, true)) {
1441                 skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
1442                                 SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
1443                                 a_first.first, a_first.second, describe_type(producer, a_it->second.type_id).c_str(),
1444                                 describe_type(consumer, b_it->second.type_id).c_str());
1445             }
1446             if (a_it->second.is_patch != b_it->second.is_patch) {
1447                 skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0, __LINE__,
1448                                 SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1449                                 "Decoration mismatch on location %u.%u: is per-%s in %s stage but per-%s in %s stage",
1450                                 a_first.first, a_first.second, a_it->second.is_patch ? "patch" : "vertex", producer_stage->name,
1451                                 b_it->second.is_patch ? "patch" : "vertex", consumer_stage->name);
1452             }
1453             if (a_it->second.is_relaxed_precision != b_it->second.is_relaxed_precision) {
1454                 skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0, __LINE__,
1455                                 SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1456                                 "Decoration mismatch on location %u.%u: %s and %s stages differ in precision", a_first.first,
1457                                 a_first.second, producer_stage->name, consumer_stage->name);
1458             }
1459             a_it++;
1460             b_it++;
1461         }
1462     }
1463 
1464     return skip;
1465 }
1466 
1467 // Validate that the shaders used by the given pipeline and store the active_slots
1468 //  that are actually used by the pipeline into pPipeline->active_slots
validate_and_capture_pipeline_shader_state(layer_data * dev_data,PIPELINE_STATE * pipeline)1469 bool validate_and_capture_pipeline_shader_state(layer_data *dev_data, PIPELINE_STATE *pipeline) {
1470     auto pCreateInfo = pipeline->graphicsPipelineCI.ptr();
1471     int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
1472     int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
1473     auto report_data = GetReportData(dev_data);
1474 
1475     shader_module const *shaders[5];
1476     memset(shaders, 0, sizeof(shaders));
1477     spirv_inst_iter entrypoints[5];
1478     memset(entrypoints, 0, sizeof(entrypoints));
1479     bool skip = false;
1480 
1481     for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
1482         auto pStage = &pCreateInfo->pStages[i];
1483         auto stage_id = get_shader_stage_id(pStage->stage);
1484         skip |= validate_pipeline_shader_stage(dev_data, pStage, pipeline, &shaders[stage_id], &entrypoints[stage_id]);
1485     }
1486 
1487     // if the shader stages are no good individually, cross-stage validation is pointless.
1488     if (skip) return true;
1489 
1490     auto vi = pCreateInfo->pVertexInputState;
1491 
1492     if (vi) {
1493         skip |= validate_vi_consistency(report_data, vi);
1494     }
1495 
1496     if (shaders[vertex_stage] && shaders[vertex_stage]->has_valid_spirv) {
1497         skip |= validate_vi_against_vs_inputs(report_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]);
1498     }
1499 
1500     int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
1501     int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
1502 
1503     while (!shaders[producer] && producer != fragment_stage) {
1504         producer++;
1505         consumer++;
1506     }
1507 
1508     for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
1509         assert(shaders[producer]);
1510         if (shaders[consumer] && shaders[consumer]->has_valid_spirv && shaders[producer]->has_valid_spirv) {
1511             skip |= validate_interface_between_stages(report_data, shaders[producer], entrypoints[producer],
1512                                                       &shader_stage_attribs[producer], shaders[consumer], entrypoints[consumer],
1513                                                       &shader_stage_attribs[consumer]);
1514 
1515             producer = consumer;
1516         }
1517     }
1518 
1519     if (shaders[fragment_stage] && shaders[fragment_stage]->has_valid_spirv) {
1520         skip |= validate_fs_outputs_against_render_pass(report_data, shaders[fragment_stage], entrypoints[fragment_stage], pipeline,
1521                                                         pCreateInfo->subpass);
1522     }
1523 
1524     return skip;
1525 }
1526 
validate_compute_pipeline(layer_data * dev_data,PIPELINE_STATE * pipeline)1527 bool validate_compute_pipeline(layer_data *dev_data, PIPELINE_STATE *pipeline) {
1528     auto pCreateInfo = pipeline->computePipelineCI.ptr();
1529 
1530     shader_module const *module;
1531     spirv_inst_iter entrypoint;
1532 
1533     return validate_pipeline_shader_stage(dev_data, &pCreateInfo->stage, pipeline, &module, &entrypoint);
1534 }
1535 
MakeShaderHash(VkShaderModuleCreateInfo const * smci)1536 uint32_t ValidationCache::MakeShaderHash(VkShaderModuleCreateInfo const *smci) { return XXH32(smci->pCode, smci->codeSize, 0); }
1537 
GetValidationCacheInfo(VkShaderModuleCreateInfo const * pCreateInfo)1538 static ValidationCache *GetValidationCacheInfo(VkShaderModuleCreateInfo const *pCreateInfo) {
1539     while ((pCreateInfo = (VkShaderModuleCreateInfo const *)pCreateInfo->pNext) != nullptr) {
1540         if (pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT)
1541             return (ValidationCache *)((VkShaderModuleValidationCacheCreateInfoEXT const *)pCreateInfo)->validationCache;
1542     }
1543 
1544     return nullptr;
1545 }
1546 
PreCallValidateCreateShaderModule(layer_data * dev_data,VkShaderModuleCreateInfo const * pCreateInfo,bool * spirv_valid)1547 bool PreCallValidateCreateShaderModule(layer_data *dev_data, VkShaderModuleCreateInfo const *pCreateInfo, bool *spirv_valid) {
1548     bool skip = false;
1549     spv_result_t spv_valid = SPV_SUCCESS;
1550     auto report_data = GetReportData(dev_data);
1551 
1552     if (GetDisables(dev_data)->shader_validation) {
1553         return false;
1554     }
1555 
1556     auto have_glsl_shader = GetEnabledExtensions(dev_data)->vk_nv_glsl_shader;
1557 
1558     if (!have_glsl_shader && (pCreateInfo->codeSize % 4)) {
1559         skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
1560                         VALIDATION_ERROR_12a00ac0, "SC",
1561                         "SPIR-V module not valid: Codesize must be a multiple of 4 but is " PRINTF_SIZE_T_SPECIFIER ". %s",
1562                         pCreateInfo->codeSize, validation_error_map[VALIDATION_ERROR_12a00ac0]);
1563     } else {
1564         auto cache = GetValidationCacheInfo(pCreateInfo);
1565         uint32_t hash = 0;
1566         if (cache) {
1567             hash = ValidationCache::MakeShaderHash(pCreateInfo);
1568             if (cache->Contains(hash)) return false;
1569         }
1570 
1571         // Use SPIRV-Tools validator to try and catch any issues with the module itself
1572         spv_context ctx = spvContextCreate(SPV_ENV_VULKAN_1_0);
1573         spv_const_binary_t binary{pCreateInfo->pCode, pCreateInfo->codeSize / sizeof(uint32_t)};
1574         spv_diagnostic diag = nullptr;
1575 
1576         spv_valid = spvValidate(ctx, &binary, &diag);
1577         if (spv_valid != SPV_SUCCESS) {
1578             if (!have_glsl_shader || (pCreateInfo->pCode[0] == spv::MagicNumber)) {
1579                 skip |=
1580                     log_msg(report_data, spv_valid == SPV_WARNING ? VK_DEBUG_REPORT_WARNING_BIT_EXT : VK_DEBUG_REPORT_ERROR_BIT_EXT,
1581                             VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
1582                             "SPIR-V module not valid: %s", diag && diag->error ? diag->error : "(no error text)");
1583             }
1584         } else {
1585             if (cache) {
1586                 cache->Insert(hash);
1587             }
1588         }
1589 
1590         spvDiagnosticDestroy(diag);
1591         spvContextDestroy(ctx);
1592     }
1593 
1594     *spirv_valid = (spv_valid == SPV_SUCCESS);
1595     return skip;
1596 }
1597