• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Connor Abbott (cwabbott0@gmail.com)
25  *
26  */
27 
28 #include <assert.h>
29 #include "c11/threads.h"
30 #include "util/hash_table.h"
31 #include "util/simple_mtx.h"
32 #include "nir.h"
33 #include "nir_xfb_info.h"
34 
35 /*
36  * This file checks for invalid IR indicating a bug somewhere in the compiler.
37  */
38 
39 /* Since this file is just a pile of asserts, don't bother compiling it if
40  * we're not building a debug build.
41  */
42 #ifndef NDEBUG
43 
44 typedef struct {
45    void *mem_ctx;
46 
47    /* the current shader being validated */
48    nir_shader *shader;
49 
50    /* the current instruction being validated */
51    nir_instr *instr;
52 
53    /* the current variable being validated */
54    nir_variable *var;
55 
56    /* the current basic block being validated */
57    nir_block *block;
58 
59    /* the current if statement being validated */
60    nir_if *if_stmt;
61 
62    /* the current loop being visited */
63    nir_loop *loop;
64 
65    /* weather the loop continue construct is being visited */
66    bool in_loop_continue_construct;
67 
68    /* the parent of the current cf node being visited */
69    nir_cf_node *parent_node;
70 
71    /* the current function implementation being validated */
72    nir_function_impl *impl;
73 
74    /* Set of all blocks in the list */
75    struct set *blocks;
76 
77    /* Number of tagged nir_src's. This is implicitly the cardinality of the set
78     * of pending nir_src's.
79     */
80    uint32_t nr_tagged_srcs;
81 
82    /* bitset of ssa definitions we have found; used to check uniqueness */
83    BITSET_WORD *ssa_defs_found;
84 
85    /* map of variable -> function implementation where it is defined or NULL
86     * if it is a global variable
87     */
88    struct hash_table *var_defs;
89 
90    /* map of instruction/var/etc to failed assert string */
91    struct hash_table *errors;
92 } validate_state;
93 
94 static void
log_error(validate_state * state,const char * cond,const char * file,int line)95 log_error(validate_state *state, const char *cond, const char *file, int line)
96 {
97    const void *obj;
98 
99    if (state->instr)
100       obj = state->instr;
101    else if (state->var)
102       obj = state->var;
103    else
104       obj = cond;
105 
106    char *msg = ralloc_asprintf(state->errors, "error: %s (%s:%d)",
107                                cond, file, line);
108 
109    _mesa_hash_table_insert(state->errors, obj, msg);
110 }
111 
112 static bool
validate_assert_impl(validate_state * state,bool cond,const char * str,const char * file,unsigned line)113 validate_assert_impl(validate_state *state, bool cond, const char *str,
114                      const char *file, unsigned line)
115 {
116    if (unlikely(!cond))
117       log_error(state, str, file, line);
118    return cond;
119 }
120 
121 #define validate_assert(state, cond) \
122    validate_assert_impl(state, (cond), #cond, __FILE__, __LINE__)
123 
124 static void
validate_num_components(validate_state * state,unsigned num_components)125 validate_num_components(validate_state *state, unsigned num_components)
126 {
127    validate_assert(state, nir_num_components_valid(num_components));
128 }
129 
130 /* Tag used in nir_src::_parent to indicate that a source has been seen. */
131 #define SRC_TAG_SEEN (0x2)
132 
133 static_assert(SRC_TAG_SEEN == (~NIR_SRC_PARENT_MASK + 1),
134               "Parent pointer tags chosen not to collide");
135 
136 static void
tag_src(nir_src * src,validate_state * state)137 tag_src(nir_src *src, validate_state *state)
138 {
139    /* nir_src only appears once and only in one SSA def use list, since we
140     * mark nir_src's as we go by tagging this pointer.
141     */
142    if (validate_assert(state, (src->_parent & SRC_TAG_SEEN) == 0)) {
143       src->_parent |= SRC_TAG_SEEN;
144       state->nr_tagged_srcs++;
145    }
146 }
147 
148 /* Due to tagging, it's not safe to use nir_src_parent_instr during the main
149  * validate loop. This is a tagging-aware version.
150  */
151 static nir_instr *
src_parent_instr_safe(nir_src * src)152 src_parent_instr_safe(nir_src *src)
153 {
154    uintptr_t untagged = (src->_parent & ~SRC_TAG_SEEN);
155    assert(!(untagged & NIR_SRC_PARENT_IS_IF) && "precondition");
156    return (nir_instr *)untagged;
157 }
158 
159 /*
160  * As we walk SSA defs, we mark every use as seen by tagging the parent pointer.
161  * We need to make sure our use is seen in a use list.
162  *
163  * Then we unmark when we hit the source. This will let us prove that we've
164  * seen all the sources.
165  */
166 static void
validate_src_tag(nir_src * src,validate_state * state)167 validate_src_tag(nir_src *src, validate_state *state)
168 {
169    if (validate_assert(state, src->_parent & SRC_TAG_SEEN)) {
170       src->_parent &= ~SRC_TAG_SEEN;
171       state->nr_tagged_srcs--;
172    }
173 }
174 
175 static void
validate_if_src(nir_src * src,validate_state * state)176 validate_if_src(nir_src *src, validate_state *state)
177 {
178    validate_src_tag(src, state);
179    validate_assert(state, nir_src_parent_if(src) == state->if_stmt);
180    validate_assert(state, src->ssa != NULL);
181    validate_assert(state, src->ssa->num_components == 1);
182 }
183 
184 static void
validate_src(nir_src * src,validate_state * state)185 validate_src(nir_src *src, validate_state *state)
186 {
187    /* Validate the tag first, so that nir_src_parent_instr is valid */
188    validate_src_tag(src, state);
189 
190    /* Source assumed to be instruction, use validate_if_src for if */
191    validate_assert(state, nir_src_parent_instr(src) == state->instr);
192 
193    validate_assert(state, src->ssa != NULL);
194 }
195 
196 static void
validate_sized_src(nir_src * src,validate_state * state,unsigned bit_sizes,unsigned num_components)197 validate_sized_src(nir_src *src, validate_state *state,
198                    unsigned bit_sizes, unsigned num_components)
199 {
200    validate_src(src, state);
201 
202    if (bit_sizes)
203       validate_assert(state, src->ssa->bit_size & bit_sizes);
204    if (num_components)
205       validate_assert(state, src->ssa->num_components == num_components);
206 }
207 
208 static void
validate_alu_src(nir_alu_instr * instr,unsigned index,validate_state * state)209 validate_alu_src(nir_alu_instr *instr, unsigned index, validate_state *state)
210 {
211    nir_alu_src *src = &instr->src[index];
212 
213    unsigned num_instr_channels = nir_ssa_alu_instr_src_components(instr, index);
214    unsigned num_components = nir_src_num_components(src->src);
215 
216    for (unsigned i = 0; i < num_instr_channels; i++) {
217       validate_assert(state, src->swizzle[i] < num_components);
218    }
219 
220    validate_src(&src->src, state);
221 }
222 
223 static void
validate_def(nir_def * def,validate_state * state)224 validate_def(nir_def *def, validate_state *state)
225 {
226    validate_assert(state, def->index < state->impl->ssa_alloc);
227    validate_assert(state, !BITSET_TEST(state->ssa_defs_found, def->index));
228    BITSET_SET(state->ssa_defs_found, def->index);
229 
230    validate_assert(state, def->parent_instr == state->instr);
231    validate_num_components(state, def->num_components);
232 
233    list_validate(&def->uses);
234    nir_foreach_use_including_if(src, def) {
235       /* Check that the def matches. */
236       validate_assert(state, src->ssa == def);
237 
238       /* Check that nir_src's are unique */
239       tag_src(src, state);
240    }
241 }
242 
243 static void
validate_alu_instr(nir_alu_instr * instr,validate_state * state)244 validate_alu_instr(nir_alu_instr *instr, validate_state *state)
245 {
246    validate_assert(state, instr->op < nir_num_opcodes);
247 
248    unsigned instr_bit_size = 0;
249    for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
250       nir_alu_type src_type = nir_op_infos[instr->op].input_types[i];
251       unsigned src_bit_size = nir_src_bit_size(instr->src[i].src);
252       if (nir_alu_type_get_type_size(src_type)) {
253          validate_assert(state, src_bit_size == nir_alu_type_get_type_size(src_type));
254       } else if (instr_bit_size) {
255          validate_assert(state, src_bit_size == instr_bit_size);
256       } else {
257          instr_bit_size = src_bit_size;
258       }
259 
260       if (nir_alu_type_get_base_type(src_type) == nir_type_float) {
261          /* 8-bit float isn't a thing */
262          validate_assert(state, src_bit_size == 16 || src_bit_size == 32 ||
263                                    src_bit_size == 64);
264       }
265 
266       /* In nir_opcodes.py, these are defined to take general uint or int
267        * sources.  However, they're really only defined for 32-bit or 64-bit
268        * sources.  This seems to be the only place to enforce this
269        * restriction.
270        */
271       switch (instr->op) {
272       case nir_op_ufind_msb:
273       case nir_op_ufind_msb_rev:
274          validate_assert(state, src_bit_size == 32 || src_bit_size == 64);
275          break;
276 
277       default:
278          break;
279       }
280 
281       validate_alu_src(instr, i, state);
282    }
283 
284    nir_alu_type dest_type = nir_op_infos[instr->op].output_type;
285    unsigned dest_bit_size = instr->def.bit_size;
286    if (nir_alu_type_get_type_size(dest_type)) {
287       validate_assert(state, dest_bit_size == nir_alu_type_get_type_size(dest_type));
288    } else if (instr_bit_size) {
289       validate_assert(state, dest_bit_size == instr_bit_size);
290    } else {
291       /* The only unsized thing is the destination so it's vacuously valid */
292    }
293 
294    if (nir_alu_type_get_base_type(dest_type) == nir_type_float) {
295       /* 8-bit float isn't a thing */
296       validate_assert(state, dest_bit_size == 16 || dest_bit_size == 32 ||
297                                 dest_bit_size == 64);
298    }
299 
300    validate_def(&instr->def, state);
301 }
302 
303 static void
validate_var_use(nir_variable * var,validate_state * state)304 validate_var_use(nir_variable *var, validate_state *state)
305 {
306    struct hash_entry *entry = _mesa_hash_table_search(state->var_defs, var);
307    validate_assert(state, entry);
308    if (entry && var->data.mode == nir_var_function_temp)
309       validate_assert(state, (nir_function_impl *)entry->data == state->impl);
310 }
311 
312 static void
validate_deref_instr(nir_deref_instr * instr,validate_state * state)313 validate_deref_instr(nir_deref_instr *instr, validate_state *state)
314 {
315    if (instr->deref_type == nir_deref_type_var) {
316       /* Variable dereferences are stupid simple. */
317       validate_assert(state, instr->modes == instr->var->data.mode);
318       validate_assert(state, instr->type == instr->var->type);
319       validate_var_use(instr->var, state);
320    } else if (instr->deref_type == nir_deref_type_cast) {
321       /* For cast, we simply have to trust the instruction.  It's up to
322        * lowering passes and front/back-ends to make them sane.
323        */
324       validate_src(&instr->parent, state);
325 
326       /* Most variable modes in NIR can only exist by themselves. */
327       if (instr->modes & ~nir_var_mem_generic)
328          validate_assert(state, util_bitcount(instr->modes) == 1);
329 
330       nir_deref_instr *parent = nir_src_as_deref(instr->parent);
331       if (parent) {
332          /* Casts can change the mode but it can't change completely.  The new
333           * mode must have some bits in common with the old.
334           */
335          validate_assert(state, instr->modes & parent->modes);
336       } else {
337          /* If our parent isn't a deref, just assert the mode is there */
338          validate_assert(state, instr->modes != 0);
339       }
340 
341       /* We just validate that the type is there */
342       validate_assert(state, instr->type);
343       if (instr->cast.align_mul > 0) {
344          validate_assert(state, util_is_power_of_two_nonzero(instr->cast.align_mul));
345          validate_assert(state, instr->cast.align_offset < instr->cast.align_mul);
346       } else {
347          validate_assert(state, instr->cast.align_offset == 0);
348       }
349    } else {
350       /* The parent pointer value must have the same number of components
351        * as the destination.
352        */
353       validate_sized_src(&instr->parent, state, instr->def.bit_size,
354                          instr->def.num_components);
355 
356       nir_instr *parent_instr = instr->parent.ssa->parent_instr;
357 
358       /* The parent must come from another deref instruction */
359       validate_assert(state, parent_instr->type == nir_instr_type_deref);
360 
361       nir_deref_instr *parent = nir_instr_as_deref(parent_instr);
362 
363       validate_assert(state, instr->modes == parent->modes);
364 
365       switch (instr->deref_type) {
366       case nir_deref_type_struct:
367          validate_assert(state, glsl_type_is_struct_or_ifc(parent->type));
368          validate_assert(state,
369                          instr->strct.index < glsl_get_length(parent->type));
370          validate_assert(state, instr->type ==
371                                    glsl_get_struct_field(parent->type, instr->strct.index));
372          break;
373 
374       case nir_deref_type_array:
375       case nir_deref_type_array_wildcard:
376          if (instr->modes & nir_var_vec_indexable_modes) {
377             /* Shared variables and UBO/SSBOs have a bit more relaxed rules
378              * because we need to be able to handle array derefs on vectors.
379              * Fortunately, nir_lower_io handles these just fine.
380              */
381             validate_assert(state, glsl_type_is_array(parent->type) ||
382                                       glsl_type_is_matrix(parent->type) ||
383                                       glsl_type_is_vector(parent->type));
384          } else {
385             /* Most of NIR cannot handle array derefs on vectors */
386             validate_assert(state, glsl_type_is_array(parent->type) ||
387                                       glsl_type_is_matrix(parent->type));
388          }
389          validate_assert(state,
390                          instr->type == glsl_get_array_element(parent->type));
391 
392          if (instr->deref_type == nir_deref_type_array) {
393             validate_sized_src(&instr->arr.index, state,
394                                instr->def.bit_size, 1);
395          }
396          break;
397 
398       case nir_deref_type_ptr_as_array:
399          /* ptr_as_array derefs must have a parent that is either an array,
400           * ptr_as_array, or cast.  If the parent is a cast, we get the stride
401           * information (if any) from the cast deref.
402           */
403          validate_assert(state,
404                          parent->deref_type == nir_deref_type_array ||
405                             parent->deref_type == nir_deref_type_ptr_as_array ||
406                             parent->deref_type == nir_deref_type_cast);
407          validate_sized_src(&instr->arr.index, state,
408                             instr->def.bit_size, 1);
409          break;
410 
411       default:
412          unreachable("Invalid deref instruction type");
413       }
414    }
415 
416    /* We intentionally don't validate the size of the destination because we
417     * want to let other compiler components such as SPIR-V decide how big
418     * pointers should be.
419     */
420    validate_def(&instr->def, state);
421 
422    /* Certain modes cannot be used as sources for phi instructions because
423     * way too many passes assume that they can always chase deref chains.
424     */
425    nir_foreach_use_including_if(use, &instr->def) {
426       /* Deref instructions as if conditions don't make sense because if
427        * conditions expect well-formed Booleans.  If you want to compare with
428        * NULL, an explicit comparison operation should be used.
429        */
430       if (!validate_assert(state, !nir_src_is_if(use)))
431          continue;
432 
433       if (src_parent_instr_safe(use)->type == nir_instr_type_phi) {
434          validate_assert(state, !(instr->modes & (nir_var_shader_in |
435                                                   nir_var_shader_out |
436                                                   nir_var_shader_out |
437                                                   nir_var_uniform)));
438       }
439    }
440 }
441 
442 static bool
vectorized_intrinsic(nir_intrinsic_instr * intr)443 vectorized_intrinsic(nir_intrinsic_instr *intr)
444 {
445    const nir_intrinsic_info *info = &nir_intrinsic_infos[intr->intrinsic];
446 
447    if (info->dest_components == 0)
448       return true;
449 
450    for (unsigned i = 0; i < info->num_srcs; i++)
451       if (info->src_components[i] == 0)
452          return true;
453 
454    return false;
455 }
456 
457 /** Returns the image format or PIPE_FORMAT_COUNT for incomplete derefs
458  *
459  * We use PIPE_FORMAT_COUNT for incomplete derefs because PIPE_FORMAT_NONE
460  * indicates that we found the variable but it has no format specified.
461  */
462 static enum pipe_format
image_intrin_format(nir_intrinsic_instr * instr)463 image_intrin_format(nir_intrinsic_instr *instr)
464 {
465    if (nir_intrinsic_format(instr) != PIPE_FORMAT_NONE)
466       return nir_intrinsic_format(instr);
467 
468    /* If this not a deref intrinsic, PIPE_FORMAT_NONE is the best we can do */
469    if (nir_intrinsic_infos[instr->intrinsic].src_components[0] != -1)
470       return PIPE_FORMAT_NONE;
471 
472    nir_variable *var = nir_intrinsic_get_var(instr, 0);
473    if (var == NULL)
474       return PIPE_FORMAT_COUNT;
475 
476    return var->data.image.format;
477 }
478 
479 static void
validate_register_handle(nir_src handle_src,unsigned num_components,unsigned bit_size,validate_state * state)480 validate_register_handle(nir_src handle_src,
481                          unsigned num_components,
482                          unsigned bit_size,
483                          validate_state *state)
484 {
485    nir_def *handle = handle_src.ssa;
486    nir_instr *parent = handle->parent_instr;
487 
488    if (!validate_assert(state, parent->type == nir_instr_type_intrinsic))
489       return;
490 
491    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(parent);
492    if (!validate_assert(state, intr->intrinsic == nir_intrinsic_decl_reg))
493       return;
494 
495    validate_assert(state, nir_intrinsic_num_components(intr) == num_components);
496    validate_assert(state, nir_intrinsic_bit_size(intr) == bit_size);
497 }
498 
499 static void
validate_intrinsic_instr(nir_intrinsic_instr * instr,validate_state * state)500 validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state)
501 {
502    unsigned dest_bit_size = 0;
503    unsigned src_bit_sizes[NIR_INTRINSIC_MAX_INPUTS] = {
504       0,
505    };
506    switch (instr->intrinsic) {
507    case nir_intrinsic_decl_reg:
508       assert(state->block == nir_start_block(state->impl));
509       break;
510 
511    case nir_intrinsic_load_reg:
512    case nir_intrinsic_load_reg_indirect:
513       validate_register_handle(instr->src[0],
514                                instr->def.num_components,
515                                instr->def.bit_size, state);
516       break;
517 
518    case nir_intrinsic_store_reg:
519    case nir_intrinsic_store_reg_indirect:
520       validate_register_handle(instr->src[1],
521                                nir_src_num_components(instr->src[0]),
522                                nir_src_bit_size(instr->src[0]), state);
523       break;
524 
525    case nir_intrinsic_convert_alu_types: {
526       nir_alu_type src_type = nir_intrinsic_src_type(instr);
527       nir_alu_type dest_type = nir_intrinsic_dest_type(instr);
528       dest_bit_size = nir_alu_type_get_type_size(dest_type);
529       src_bit_sizes[0] = nir_alu_type_get_type_size(src_type);
530       validate_assert(state, dest_bit_size != 0);
531       validate_assert(state, src_bit_sizes[0] != 0);
532       break;
533    }
534 
535    case nir_intrinsic_load_param: {
536       unsigned param_idx = nir_intrinsic_param_idx(instr);
537       validate_assert(state, param_idx < state->impl->function->num_params);
538       nir_parameter *param = &state->impl->function->params[param_idx];
539       validate_assert(state, instr->num_components == param->num_components);
540       dest_bit_size = param->bit_size;
541       break;
542    }
543 
544    case nir_intrinsic_load_deref: {
545       nir_deref_instr *src = nir_src_as_deref(instr->src[0]);
546       assert(src);
547       validate_assert(state, glsl_type_is_vector_or_scalar(src->type) ||
548                                 (src->modes == nir_var_uniform &&
549                                  glsl_get_base_type(src->type) == GLSL_TYPE_SUBROUTINE));
550       validate_assert(state, instr->num_components ==
551                                 glsl_get_vector_elements(src->type));
552       dest_bit_size = glsl_get_bit_size(src->type);
553       /* Also allow 32-bit boolean load operations */
554       if (glsl_type_is_boolean(src->type))
555          dest_bit_size |= 32;
556       break;
557    }
558 
559    case nir_intrinsic_store_deref: {
560       nir_deref_instr *dst = nir_src_as_deref(instr->src[0]);
561       assert(dst);
562       validate_assert(state, glsl_type_is_vector_or_scalar(dst->type));
563       validate_assert(state, instr->num_components ==
564                                 glsl_get_vector_elements(dst->type));
565       src_bit_sizes[1] = glsl_get_bit_size(dst->type);
566       /* Also allow 32-bit boolean store operations */
567       if (glsl_type_is_boolean(dst->type))
568          src_bit_sizes[1] |= 32;
569       validate_assert(state, !nir_deref_mode_may_be(dst, nir_var_read_only_modes));
570       break;
571    }
572 
573    case nir_intrinsic_copy_deref: {
574       nir_deref_instr *dst = nir_src_as_deref(instr->src[0]);
575       nir_deref_instr *src = nir_src_as_deref(instr->src[1]);
576       validate_assert(state, glsl_get_bare_type(dst->type) ==
577                                 glsl_get_bare_type(src->type));
578       validate_assert(state, !nir_deref_mode_may_be(dst, nir_var_read_only_modes));
579       /* FIXME: now that we track if the var copies were lowered, it would be
580        * good to validate here that no new copy derefs were added. Right now
581        * we can't as there are some specific cases where copies are added even
582        * after the lowering. One example is the Intel compiler, that calls
583        * nir_lower_io_to_temporaries when linking some shader stages.
584        */
585       break;
586    }
587 
588    case nir_intrinsic_load_ubo_vec4: {
589       int bit_size = instr->def.bit_size;
590       validate_assert(state, bit_size >= 8);
591       validate_assert(state, (nir_intrinsic_component(instr) +
592                               instr->num_components) *
593                                    (bit_size / 8) <=
594                                 16);
595       break;
596    }
597 
598    case nir_intrinsic_load_ubo:
599       /* Make sure that the creator didn't forget to set the range_base+range. */
600       validate_assert(state, nir_intrinsic_range(instr) != 0);
601       FALLTHROUGH;
602    case nir_intrinsic_load_ssbo:
603    case nir_intrinsic_load_shared:
604    case nir_intrinsic_load_global:
605    case nir_intrinsic_load_global_constant:
606    case nir_intrinsic_load_scratch:
607    case nir_intrinsic_load_constant:
608       /* These memory load operations must have alignments */
609       validate_assert(state,
610                       util_is_power_of_two_nonzero(nir_intrinsic_align_mul(instr)));
611       validate_assert(state, nir_intrinsic_align_offset(instr) <
612                                 nir_intrinsic_align_mul(instr));
613       FALLTHROUGH;
614 
615    case nir_intrinsic_load_uniform:
616    case nir_intrinsic_load_input:
617    case nir_intrinsic_load_per_primitive_input:
618    case nir_intrinsic_load_per_vertex_input:
619    case nir_intrinsic_load_interpolated_input:
620    case nir_intrinsic_load_output:
621    case nir_intrinsic_load_per_vertex_output:
622    case nir_intrinsic_load_per_view_output:
623    case nir_intrinsic_load_per_primitive_output:
624    case nir_intrinsic_load_push_constant:
625    case nir_intrinsic_load_attribute_pan:
626       /* All memory load operations must load at least a byte */
627       validate_assert(state, instr->def.bit_size >= 8);
628       break;
629 
630    case nir_intrinsic_load_barycentric_pixel:
631    case nir_intrinsic_load_barycentric_centroid:
632    case nir_intrinsic_load_barycentric_sample:
633    case nir_intrinsic_load_barycentric_at_offset:
634    case nir_intrinsic_load_barycentric_at_sample: {
635       enum glsl_interp_mode mode = nir_intrinsic_interp_mode(instr);
636       validate_assert(state,
637                       mode == INTERP_MODE_NONE ||
638                       mode == INTERP_MODE_SMOOTH ||
639                       mode == INTERP_MODE_NOPERSPECTIVE);
640       break;
641    }
642 
643    case nir_intrinsic_store_ssbo:
644    case nir_intrinsic_store_shared:
645    case nir_intrinsic_store_global:
646    case nir_intrinsic_store_scratch:
647       /* These memory store operations must also have alignments */
648       validate_assert(state,
649                       util_is_power_of_two_nonzero(nir_intrinsic_align_mul(instr)));
650       validate_assert(state, nir_intrinsic_align_offset(instr) <
651                                 nir_intrinsic_align_mul(instr));
652       /* All memory store operations must store at least a byte */
653       validate_assert(state, nir_src_bit_size(instr->src[0]) >= 8);
654       break;
655 
656    case nir_intrinsic_store_output:
657    case nir_intrinsic_store_per_vertex_output:
658    case nir_intrinsic_store_per_view_output:
659       if (state->shader->info.stage == MESA_SHADER_FRAGMENT)
660          validate_assert(state, nir_src_bit_size(instr->src[0]) >= 8);
661       else
662          validate_assert(state, nir_src_bit_size(instr->src[0]) >= 16);
663       validate_assert(state,
664                       nir_src_bit_size(instr->src[0]) ==
665                       nir_alu_type_get_type_size(nir_intrinsic_src_type(instr)));
666       break;
667 
668    case nir_intrinsic_deref_mode_is:
669    case nir_intrinsic_addr_mode_is:
670       validate_assert(state,
671                       util_bitcount(nir_intrinsic_memory_modes(instr)) == 1);
672       break;
673 
674    case nir_intrinsic_image_deref_atomic:
675    case nir_intrinsic_image_deref_atomic_swap:
676    case nir_intrinsic_bindless_image_atomic:
677    case nir_intrinsic_bindless_image_atomic_swap:
678    case nir_intrinsic_image_atomic:
679    case nir_intrinsic_image_atomic_swap: {
680       nir_atomic_op op = nir_intrinsic_atomic_op(instr);
681 
682       enum pipe_format format = image_intrin_format(instr);
683       if (format != PIPE_FORMAT_COUNT) {
684          bool allowed = false;
685          bool is_float = (nir_atomic_op_type(op) == nir_type_float);
686 
687          switch (format) {
688          case PIPE_FORMAT_R32_FLOAT:
689             allowed = is_float || op == nir_atomic_op_xchg;
690             break;
691          case PIPE_FORMAT_R16_FLOAT:
692          case PIPE_FORMAT_R64_FLOAT:
693             allowed = op == nir_atomic_op_fmin || op == nir_atomic_op_fmax;
694             break;
695          case PIPE_FORMAT_R32_UINT:
696          case PIPE_FORMAT_R32_SINT:
697          case PIPE_FORMAT_R64_UINT:
698          case PIPE_FORMAT_R64_SINT:
699             allowed = !is_float;
700             break;
701          default:
702             break;
703          }
704 
705          validate_assert(state, allowed);
706          validate_assert(state, instr->def.bit_size ==
707                                    util_format_get_blocksizebits(format));
708       }
709       break;
710    }
711 
712    case nir_intrinsic_store_buffer_amd:
713       if (nir_intrinsic_access(instr) & ACCESS_USES_FORMAT_AMD) {
714          unsigned writemask = nir_intrinsic_write_mask(instr);
715 
716          /* Make sure the writemask is derived from the component count. */
717          validate_assert(state,
718                          writemask ==
719                             BITFIELD_MASK(nir_src_num_components(instr->src[0])));
720       }
721       break;
722 
723    default:
724       break;
725    }
726 
727    if (instr->num_components > 0)
728       validate_num_components(state, instr->num_components);
729 
730    const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
731    unsigned num_srcs = info->num_srcs;
732    for (unsigned i = 0; i < num_srcs; i++) {
733       unsigned components_read = nir_intrinsic_src_components(instr, i);
734 
735       validate_num_components(state, components_read);
736 
737       validate_sized_src(&instr->src[i], state, src_bit_sizes[i], components_read);
738    }
739 
740    if (nir_intrinsic_infos[instr->intrinsic].has_dest) {
741       unsigned components_written = nir_intrinsic_dest_components(instr);
742       unsigned bit_sizes = info->dest_bit_sizes;
743       if (!bit_sizes && info->bit_size_src >= 0)
744          bit_sizes = nir_src_bit_size(instr->src[info->bit_size_src]);
745 
746       validate_num_components(state, components_written);
747       if (dest_bit_size && bit_sizes)
748          validate_assert(state, dest_bit_size & bit_sizes);
749       else
750          dest_bit_size = dest_bit_size ? dest_bit_size : bit_sizes;
751 
752       validate_def(&instr->def, state);
753       validate_assert(state, instr->def.num_components == components_written);
754 
755       if (dest_bit_size)
756          validate_assert(state, instr->def.bit_size & dest_bit_size);
757    }
758 
759    if (!vectorized_intrinsic(instr))
760       validate_assert(state, instr->num_components == 0);
761 
762    if (nir_intrinsic_has_write_mask(instr)) {
763       unsigned component_mask = BITFIELD_MASK(instr->num_components);
764       validate_assert(state, (nir_intrinsic_write_mask(instr) & ~component_mask) == 0);
765    }
766 
767    if (nir_intrinsic_has_io_xfb(instr)) {
768       unsigned used_mask = 0;
769 
770       for (unsigned i = 0; i < 4; i++) {
771          nir_io_xfb xfb = i < 2 ? nir_intrinsic_io_xfb(instr) : nir_intrinsic_io_xfb2(instr);
772          unsigned xfb_mask = BITFIELD_RANGE(i, xfb.out[i % 2].num_components);
773 
774          /* Each component can be used only once by transform feedback info. */
775          validate_assert(state, (xfb_mask & used_mask) == 0);
776          used_mask |= xfb_mask;
777       }
778    }
779 
780    if (nir_intrinsic_has_io_semantics(instr) &&
781        !nir_intrinsic_infos[instr->intrinsic].has_dest) {
782       nir_io_semantics sem = nir_intrinsic_io_semantics(instr);
783 
784       /* An output that has no effect shouldn't be present in the IR. */
785       validate_assert(state,
786                       (nir_slot_is_sysval_output(sem.location, MESA_SHADER_NONE) &&
787                        !sem.no_sysval_output) ||
788                       (nir_slot_is_varying(sem.location, MESA_SHADER_NONE) &&
789                        !sem.no_varying) ||
790                       nir_instr_xfb_write_mask(instr) ||
791                       /* TCS can set no_varying and no_sysval_output, meaning
792                        * that the output is only read by TCS and not TES.
793                        */
794                       state->shader->info.stage == MESA_SHADER_TESS_CTRL);
795       validate_assert(state,
796                       (!sem.dual_source_blend_index &&
797                        !sem.fb_fetch_output &&
798                        !sem.fb_fetch_output_coherent) ||
799                       state->shader->info.stage == MESA_SHADER_FRAGMENT);
800       validate_assert(state,
801                       !sem.gs_streams ||
802                       state->shader->info.stage == MESA_SHADER_GEOMETRY);
803       validate_assert(state,
804                       !sem.high_dvec2 ||
805                       (state->shader->info.stage == MESA_SHADER_VERTEX &&
806                        instr->intrinsic == nir_intrinsic_load_input));
807       validate_assert(state,
808                       !sem.interp_explicit_strict ||
809                       (state->shader->info.stage == MESA_SHADER_FRAGMENT &&
810                        instr->intrinsic == nir_intrinsic_load_input_vertex));
811    }
812 }
813 
814 static void
validate_tex_src_texture_deref(nir_tex_instr * instr,validate_state * state,nir_deref_instr * deref)815 validate_tex_src_texture_deref(nir_tex_instr *instr, validate_state *state,
816                                nir_deref_instr *deref)
817 {
818    validate_assert(state, glsl_type_is_image(deref->type) ||
819                              glsl_type_is_texture(deref->type) ||
820                              glsl_type_is_sampler(deref->type));
821 
822    switch (instr->op) {
823    case nir_texop_descriptor_amd:
824    case nir_texop_sampler_descriptor_amd:
825    case nir_texop_custom_border_color_agx:
826       break;
827    case nir_texop_lod:
828    case nir_texop_lod_bias_agx:
829       validate_assert(state, nir_alu_type_get_base_type(instr->dest_type) == nir_type_float);
830       break;
831    case nir_texop_samples_identical:
832    case nir_texop_has_custom_border_color_agx:
833       validate_assert(state, nir_alu_type_get_base_type(instr->dest_type) == nir_type_bool);
834       break;
835    case nir_texop_txs:
836    case nir_texop_texture_samples:
837    case nir_texop_query_levels:
838    case nir_texop_fragment_mask_fetch_amd:
839    case nir_texop_txf_ms_mcs_intel:
840       validate_assert(state, nir_alu_type_get_base_type(instr->dest_type) == nir_type_int ||
841                              nir_alu_type_get_base_type(instr->dest_type) == nir_type_uint);
842       break;
843    default:
844       validate_assert(state,
845                       glsl_get_sampler_result_type(deref->type) == GLSL_TYPE_VOID ||
846                       glsl_base_type_is_integer(glsl_get_sampler_result_type(deref->type)) ==
847                          (nir_alu_type_get_base_type(instr->dest_type) == nir_type_int ||
848                           nir_alu_type_get_base_type(instr->dest_type) == nir_type_uint));
849    }
850 }
851 
852 static void
validate_tex_instr(nir_tex_instr * instr,validate_state * state)853 validate_tex_instr(nir_tex_instr *instr, validate_state *state)
854 {
855    bool src_type_seen[nir_num_tex_src_types];
856    for (unsigned i = 0; i < nir_num_tex_src_types; i++)
857       src_type_seen[i] = false;
858 
859    for (unsigned i = 0; i < instr->num_srcs; i++) {
860       validate_assert(state, !src_type_seen[instr->src[i].src_type]);
861       src_type_seen[instr->src[i].src_type] = true;
862       validate_sized_src(&instr->src[i].src, state,
863                          0, nir_tex_instr_src_size(instr, i));
864 
865       switch (instr->src[i].src_type) {
866 
867       case nir_tex_src_comparator:
868          validate_assert(state, instr->is_shadow);
869          break;
870 
871       case nir_tex_src_bias:
872          validate_assert(state, instr->op == nir_texop_txb ||
873                                    instr->op == nir_texop_tg4 ||
874                                    instr->op == nir_texop_lod);
875          break;
876 
877       case nir_tex_src_lod:
878          validate_assert(state, instr->op != nir_texop_tex &&
879                                    instr->op != nir_texop_txb &&
880                                    instr->op != nir_texop_txd &&
881                                    instr->op != nir_texop_lod);
882          break;
883 
884       case nir_tex_src_ddx:
885       case nir_tex_src_ddy:
886          validate_assert(state, instr->op == nir_texop_txd);
887          break;
888 
889       case nir_tex_src_texture_deref: {
890          nir_deref_instr *deref = nir_src_as_deref(instr->src[i].src);
891          if (!validate_assert(state, deref))
892             break;
893 
894          validate_tex_src_texture_deref(instr, state, deref);
895          break;
896       }
897 
898       case nir_tex_src_sampler_deref: {
899          nir_deref_instr *deref = nir_src_as_deref(instr->src[i].src);
900          if (!validate_assert(state, deref))
901             break;
902 
903          validate_assert(state, glsl_type_is_sampler(deref->type));
904          break;
905       }
906 
907       case nir_tex_src_sampler_deref_intrinsic:
908       case nir_tex_src_texture_deref_intrinsic: {
909          nir_intrinsic_instr *intrin =
910             nir_instr_as_intrinsic(instr->src[i].src.ssa->parent_instr);
911          nir_deref_instr *deref =
912             nir_instr_as_deref(intrin->src[0].ssa->parent_instr);
913          if (!validate_assert(state, deref))
914             break;
915 
916          if (instr->src[i].src_type == nir_tex_src_sampler_deref_intrinsic)
917             validate_assert(state, glsl_type_is_sampler(deref->type));
918          else
919             validate_tex_src_texture_deref(instr, state, deref);
920 
921          break;
922       }
923 
924       case nir_tex_src_coord:
925       case nir_tex_src_projector:
926       case nir_tex_src_offset:
927       case nir_tex_src_min_lod:
928       case nir_tex_src_ms_index:
929       case nir_tex_src_texture_offset:
930       case nir_tex_src_sampler_offset:
931       case nir_tex_src_plane:
932       case nir_tex_src_texture_handle:
933       case nir_tex_src_sampler_handle:
934          break;
935 
936       default:
937          break;
938       }
939    }
940 
941    bool msaa = (instr->sampler_dim == GLSL_SAMPLER_DIM_MS ||
942                 instr->sampler_dim == GLSL_SAMPLER_DIM_SUBPASS_MS);
943 
944    if (msaa)
945       validate_assert(state, instr->op != nir_texop_txf);
946    else
947       validate_assert(state, instr->op != nir_texop_txf_ms);
948 
949    if (instr->op != nir_texop_tg4)
950       validate_assert(state, instr->component == 0);
951 
952    if (nir_tex_instr_has_explicit_tg4_offsets(instr)) {
953       validate_assert(state, instr->op == nir_texop_tg4);
954       validate_assert(state, !src_type_seen[nir_tex_src_offset]);
955    }
956 
957    if (instr->is_gather_implicit_lod)
958       validate_assert(state, instr->op == nir_texop_tg4);
959 
960    validate_def(&instr->def, state);
961    validate_assert(state, instr->def.num_components ==
962                              nir_tex_instr_dest_size(instr));
963 
964    unsigned bit_size = nir_alu_type_get_type_size(instr->dest_type);
965    validate_assert(state,
966                    (bit_size ? bit_size : 32) ==
967                       instr->def.bit_size);
968 }
969 
970 static void
validate_call_instr(nir_call_instr * instr,validate_state * state)971 validate_call_instr(nir_call_instr *instr, validate_state *state)
972 {
973    validate_assert(state, instr->num_params == instr->callee->num_params);
974 
975    if (instr->indirect_callee.ssa) {
976       validate_assert(state, !instr->callee->impl);
977       validate_src(&instr->indirect_callee, state);
978    }
979 
980    for (unsigned i = 0; i < instr->num_params; i++) {
981       validate_sized_src(&instr->params[i], state,
982                          instr->callee->params[i].bit_size,
983                          instr->callee->params[i].num_components);
984    }
985 }
986 
987 static void
validate_const_value(nir_const_value * val,unsigned bit_size,bool is_null_constant,validate_state * state)988 validate_const_value(nir_const_value *val, unsigned bit_size,
989                      bool is_null_constant, validate_state *state)
990 {
991    /* In order for block copies to work properly for things like instruction
992     * comparisons and [de]serialization, we require the unused bits of the
993     * nir_const_value to be zero.
994     */
995    nir_const_value cmp_val;
996    memset(&cmp_val, 0, sizeof(cmp_val));
997    if (!is_null_constant) {
998       switch (bit_size) {
999       case 1:
1000          cmp_val.b = val->b;
1001          break;
1002       case 8:
1003          cmp_val.u8 = val->u8;
1004          break;
1005       case 16:
1006          cmp_val.u16 = val->u16;
1007          break;
1008       case 32:
1009          cmp_val.u32 = val->u32;
1010          break;
1011       case 64:
1012          cmp_val.u64 = val->u64;
1013          break;
1014       default:
1015          validate_assert(state, !"Invalid load_const bit size");
1016       }
1017    }
1018    validate_assert(state, memcmp(val, &cmp_val, sizeof(cmp_val)) == 0);
1019 }
1020 
1021 static void
validate_load_const_instr(nir_load_const_instr * instr,validate_state * state)1022 validate_load_const_instr(nir_load_const_instr *instr, validate_state *state)
1023 {
1024    validate_def(&instr->def, state);
1025 
1026    for (unsigned i = 0; i < instr->def.num_components; i++)
1027       validate_const_value(&instr->value[i], instr->def.bit_size, false, state);
1028 }
1029 
1030 static void
validate_ssa_undef_instr(nir_undef_instr * instr,validate_state * state)1031 validate_ssa_undef_instr(nir_undef_instr *instr, validate_state *state)
1032 {
1033    validate_def(&instr->def, state);
1034 }
1035 
1036 static void
validate_phi_instr(nir_phi_instr * instr,validate_state * state)1037 validate_phi_instr(nir_phi_instr *instr, validate_state *state)
1038 {
1039    /*
1040     * don't validate the sources until we get to them from their predecessor
1041     * basic blocks, to avoid validating an SSA use before its definition.
1042     */
1043 
1044    validate_def(&instr->def, state);
1045 
1046    exec_list_validate(&instr->srcs);
1047    validate_assert(state, exec_list_length(&instr->srcs) ==
1048                              state->block->predecessors->entries);
1049 }
1050 
1051 static void
validate_jump_instr(nir_jump_instr * instr,validate_state * state)1052 validate_jump_instr(nir_jump_instr *instr, validate_state *state)
1053 {
1054    nir_block *block = state->block;
1055    validate_assert(state, &instr->instr == nir_block_last_instr(block));
1056 
1057    switch (instr->type) {
1058    case nir_jump_return:
1059    case nir_jump_halt:
1060       validate_assert(state, block->successors[0] == state->impl->end_block);
1061       validate_assert(state, block->successors[1] == NULL);
1062       validate_assert(state, instr->target == NULL);
1063       validate_assert(state, instr->else_target == NULL);
1064       validate_assert(state, !state->in_loop_continue_construct);
1065       break;
1066 
1067    case nir_jump_break:
1068       validate_assert(state, state->impl->structured);
1069       validate_assert(state, state->loop != NULL);
1070       if (state->loop) {
1071          nir_block *after =
1072             nir_cf_node_as_block(nir_cf_node_next(&state->loop->cf_node));
1073          validate_assert(state, block->successors[0] == after);
1074       }
1075       validate_assert(state, block->successors[1] == NULL);
1076       validate_assert(state, instr->target == NULL);
1077       validate_assert(state, instr->else_target == NULL);
1078       break;
1079 
1080    case nir_jump_continue:
1081       validate_assert(state, state->impl->structured);
1082       validate_assert(state, state->loop != NULL);
1083       if (state->loop) {
1084          nir_block *cont_block = nir_loop_continue_target(state->loop);
1085          validate_assert(state, block->successors[0] == cont_block);
1086       }
1087       validate_assert(state, block->successors[1] == NULL);
1088       validate_assert(state, instr->target == NULL);
1089       validate_assert(state, instr->else_target == NULL);
1090       validate_assert(state, !state->in_loop_continue_construct);
1091       break;
1092 
1093    case nir_jump_goto:
1094       validate_assert(state, !state->impl->structured);
1095       validate_assert(state, instr->target == block->successors[0]);
1096       validate_assert(state, instr->target != NULL);
1097       validate_assert(state, instr->else_target == NULL);
1098       break;
1099 
1100    case nir_jump_goto_if:
1101       validate_assert(state, !state->impl->structured);
1102       validate_assert(state, instr->target == block->successors[1]);
1103       validate_assert(state, instr->else_target == block->successors[0]);
1104       validate_sized_src(&instr->condition, state, 0, 1);
1105       validate_assert(state, instr->target != NULL);
1106       validate_assert(state, instr->else_target != NULL);
1107       break;
1108 
1109    default:
1110       validate_assert(state, !"Invalid jump instruction type");
1111       break;
1112    }
1113 }
1114 
1115 static void
validate_instr(nir_instr * instr,validate_state * state)1116 validate_instr(nir_instr *instr, validate_state *state)
1117 {
1118    validate_assert(state, instr->block == state->block);
1119 
1120    state->instr = instr;
1121 
1122    switch (instr->type) {
1123    case nir_instr_type_alu:
1124       validate_alu_instr(nir_instr_as_alu(instr), state);
1125       break;
1126 
1127    case nir_instr_type_deref:
1128       validate_deref_instr(nir_instr_as_deref(instr), state);
1129       break;
1130 
1131    case nir_instr_type_call:
1132       validate_call_instr(nir_instr_as_call(instr), state);
1133       break;
1134 
1135    case nir_instr_type_intrinsic:
1136       validate_intrinsic_instr(nir_instr_as_intrinsic(instr), state);
1137       break;
1138 
1139    case nir_instr_type_tex:
1140       validate_tex_instr(nir_instr_as_tex(instr), state);
1141       break;
1142 
1143    case nir_instr_type_load_const:
1144       validate_load_const_instr(nir_instr_as_load_const(instr), state);
1145       break;
1146 
1147    case nir_instr_type_phi:
1148       validate_phi_instr(nir_instr_as_phi(instr), state);
1149       break;
1150 
1151    case nir_instr_type_undef:
1152       validate_ssa_undef_instr(nir_instr_as_undef(instr), state);
1153       break;
1154 
1155    case nir_instr_type_jump:
1156       validate_jump_instr(nir_instr_as_jump(instr), state);
1157       break;
1158 
1159    case nir_instr_type_debug_info:
1160       break;
1161 
1162    default:
1163       validate_assert(state, !"Invalid ALU instruction type");
1164       break;
1165    }
1166 
1167    state->instr = NULL;
1168 }
1169 
1170 static void
validate_phi_src(nir_phi_instr * instr,nir_block * pred,validate_state * state)1171 validate_phi_src(nir_phi_instr *instr, nir_block *pred, validate_state *state)
1172 {
1173    state->instr = &instr->instr;
1174 
1175    exec_list_validate(&instr->srcs);
1176    nir_foreach_phi_src(src, instr) {
1177       if (src->pred == pred) {
1178          validate_sized_src(&src->src, state, instr->def.bit_size,
1179                             instr->def.num_components);
1180          state->instr = NULL;
1181          return;
1182       }
1183    }
1184    validate_assert(state, !"Phi does not have a source corresponding to one "
1185                            "of its predecessor blocks");
1186 }
1187 
1188 static void
validate_phi_srcs(nir_block * block,nir_block * succ,validate_state * state)1189 validate_phi_srcs(nir_block *block, nir_block *succ, validate_state *state)
1190 {
1191    nir_foreach_phi(phi, succ) {
1192       validate_phi_src(phi, block, state);
1193    }
1194 }
1195 
1196 static void
collect_blocks(struct exec_list * cf_list,validate_state * state)1197 collect_blocks(struct exec_list *cf_list, validate_state *state)
1198 {
1199    /* We walk the blocks manually here rather than using nir_foreach_block for
1200     * a few reasons:
1201     *
1202     *  1. We want to call exec_list_validate() on every linked list in the IR
1203     *     which means we need to touch every linked and just walking blocks
1204     *     with nir_foreach_block() would make that difficult.  In particular,
1205     *     we want to validate each list before the first time we walk it so
1206     *     that we catch broken lists in exec_list_validate() instead of
1207     *     getting stuck in a hard-to-debug infinite loop in the validator.
1208     *
1209     *  2. nir_foreach_block() depends on several invariants of the CF node
1210     *     hierarchy which nir_validate_shader() is responsible for verifying.
1211     *     If we used nir_foreach_block() in nir_validate_shader(), we could
1212     *     end up blowing up on a bad list walk instead of throwing the much
1213     *     easier to debug validation error.
1214     */
1215    exec_list_validate(cf_list);
1216    foreach_list_typed(nir_cf_node, node, node, cf_list) {
1217       switch (node->type) {
1218       case nir_cf_node_block:
1219          _mesa_set_add(state->blocks, nir_cf_node_as_block(node));
1220          break;
1221 
1222       case nir_cf_node_if:
1223          collect_blocks(&nir_cf_node_as_if(node)->then_list, state);
1224          collect_blocks(&nir_cf_node_as_if(node)->else_list, state);
1225          break;
1226 
1227       case nir_cf_node_loop:
1228          collect_blocks(&nir_cf_node_as_loop(node)->body, state);
1229          collect_blocks(&nir_cf_node_as_loop(node)->continue_list, state);
1230          break;
1231 
1232       default:
1233          unreachable("Invalid CF node type");
1234       }
1235    }
1236 }
1237 
1238 static void
collect_blocks_pdfs(nir_function_impl * impl,nir_block * block,uint32_t * count,validate_state * state)1239 collect_blocks_pdfs(nir_function_impl *impl, nir_block *block,
1240                     uint32_t *count, validate_state *state)
1241 {
1242    if (block == impl->end_block)
1243       return;
1244 
1245    if (_mesa_set_search(state->blocks, block))
1246       return;
1247 
1248    _mesa_set_add(state->blocks, block);
1249 
1250    for (uint32_t i = 0; i < ARRAY_SIZE(block->successors); i++) {
1251       if (block->successors[i] != NULL)
1252          collect_blocks_pdfs(impl, block->successors[i], count, state);
1253    }
1254 
1255    /* Assert that the blocks are indexed in reverse PDFS order */
1256    validate_assert(state, block->index == --(*count));
1257 }
1258 
1259 static void
collect_unstructured_blocks(nir_function_impl * impl,validate_state * state)1260 collect_unstructured_blocks(nir_function_impl *impl, validate_state *state)
1261 {
1262    exec_list_validate(&impl->body);
1263 
1264    /* Assert that the blocks are properly indexed */
1265    uint32_t count = 0;
1266    foreach_list_typed(nir_cf_node, node, node, &impl->body) {
1267       nir_block *block = nir_cf_node_as_block(node);
1268       validate_assert(state, block->index == count++);
1269    }
1270    validate_assert(state, impl->end_block->index == count);
1271 
1272    collect_blocks_pdfs(impl, nir_start_block(impl), &count, state);
1273 }
1274 
1275 static void validate_cf_node(nir_cf_node *node, validate_state *state);
1276 
1277 static void
validate_block_predecessors(nir_block * block,validate_state * state)1278 validate_block_predecessors(nir_block *block, validate_state *state)
1279 {
1280    for (unsigned i = 0; i < 2; i++) {
1281       if (block->successors[i] == NULL)
1282          continue;
1283 
1284       /* The block has to exist in the nir_function_impl */
1285       validate_assert(state, _mesa_set_search(state->blocks,
1286                                               block->successors[i]));
1287 
1288       /* And we have to be in our successor's predecessors set */
1289       validate_assert(state,
1290                       _mesa_set_search(block->successors[i]->predecessors, block));
1291 
1292       validate_phi_srcs(block, block->successors[i], state);
1293    }
1294 
1295    /* The start block cannot have any predecessors */
1296    if (block == nir_start_block(state->impl))
1297       validate_assert(state, block->predecessors->entries == 0);
1298 
1299    set_foreach(block->predecessors, entry) {
1300       const nir_block *pred = entry->key;
1301       validate_assert(state, _mesa_set_search(state->blocks, pred));
1302       validate_assert(state, pred->successors[0] == block ||
1303                                 pred->successors[1] == block);
1304    }
1305 }
1306 
1307 static void
validate_block(nir_block * block,validate_state * state)1308 validate_block(nir_block *block, validate_state *state)
1309 {
1310    validate_assert(state, block->cf_node.parent == state->parent_node);
1311 
1312    state->block = block;
1313 
1314    exec_list_validate(&block->instr_list);
1315    nir_foreach_instr(instr, block) {
1316       if (instr->type == nir_instr_type_phi) {
1317          validate_assert(state, instr == nir_block_first_instr(block) ||
1318                                    nir_instr_prev(instr)->type == nir_instr_type_phi);
1319       }
1320 
1321       validate_instr(instr, state);
1322    }
1323 
1324    validate_assert(state, block->successors[0] != NULL);
1325    validate_assert(state, block->successors[0] != block->successors[1]);
1326    validate_block_predecessors(block, state);
1327 
1328    if (!state->impl->structured) {
1329       validate_assert(state, nir_block_ends_in_jump(block));
1330    } else if (!nir_block_ends_in_jump(block)) {
1331       nir_cf_node *next = nir_cf_node_next(&block->cf_node);
1332       if (next == NULL) {
1333          switch (state->parent_node->type) {
1334          case nir_cf_node_loop: {
1335             if (block == nir_loop_last_block(state->loop)) {
1336                nir_block *cont = nir_loop_continue_target(state->loop);
1337                validate_assert(state, block->successors[0] == cont);
1338             } else {
1339                validate_assert(state, nir_loop_has_continue_construct(state->loop) &&
1340                                          block == nir_loop_last_continue_block(state->loop));
1341                nir_block *head = nir_loop_first_block(state->loop);
1342                validate_assert(state, block->successors[0] == head);
1343             }
1344             /* due to the hack for infinite loops, block->successors[1] may
1345              * point to the block after the loop.
1346              */
1347             break;
1348          }
1349 
1350          case nir_cf_node_if: {
1351             nir_block *after =
1352                nir_cf_node_as_block(nir_cf_node_next(state->parent_node));
1353             validate_assert(state, block->successors[0] == after);
1354             validate_assert(state, block->successors[1] == NULL);
1355             break;
1356          }
1357 
1358          case nir_cf_node_function:
1359             validate_assert(state, block->successors[0] == state->impl->end_block);
1360             validate_assert(state, block->successors[1] == NULL);
1361             break;
1362 
1363          default:
1364             unreachable("unknown control flow node type");
1365          }
1366       } else {
1367          if (next->type == nir_cf_node_if) {
1368             nir_if *if_stmt = nir_cf_node_as_if(next);
1369             validate_assert(state, block->successors[0] ==
1370                                       nir_if_first_then_block(if_stmt));
1371             validate_assert(state, block->successors[1] ==
1372                                       nir_if_first_else_block(if_stmt));
1373          } else if (next->type == nir_cf_node_loop) {
1374             nir_loop *loop = nir_cf_node_as_loop(next);
1375             validate_assert(state, block->successors[0] ==
1376                                       nir_loop_first_block(loop));
1377             validate_assert(state, block->successors[1] == NULL);
1378          } else {
1379             validate_assert(state,
1380                             !"Structured NIR cannot have consecutive blocks");
1381          }
1382       }
1383    }
1384 }
1385 
1386 static void
validate_end_block(nir_block * block,validate_state * state)1387 validate_end_block(nir_block *block, validate_state *state)
1388 {
1389    validate_assert(state, block->cf_node.parent == &state->impl->cf_node);
1390 
1391    exec_list_validate(&block->instr_list);
1392    validate_assert(state, exec_list_is_empty(&block->instr_list));
1393 
1394    validate_assert(state, block->successors[0] == NULL);
1395    validate_assert(state, block->successors[1] == NULL);
1396    validate_block_predecessors(block, state);
1397 }
1398 
1399 static void
validate_if(nir_if * if_stmt,validate_state * state)1400 validate_if(nir_if *if_stmt, validate_state *state)
1401 {
1402    validate_assert(state, state->impl->structured);
1403 
1404    state->if_stmt = if_stmt;
1405 
1406    validate_assert(state, !exec_node_is_head_sentinel(if_stmt->cf_node.node.prev));
1407    nir_cf_node *prev_node = nir_cf_node_prev(&if_stmt->cf_node);
1408    validate_assert(state, prev_node->type == nir_cf_node_block);
1409 
1410    validate_assert(state, !exec_node_is_tail_sentinel(if_stmt->cf_node.node.next));
1411    nir_cf_node *next_node = nir_cf_node_next(&if_stmt->cf_node);
1412    validate_assert(state, next_node->type == nir_cf_node_block);
1413 
1414    validate_assert(state, nir_src_is_if(&if_stmt->condition));
1415    validate_if_src(&if_stmt->condition, state);
1416 
1417    validate_assert(state, !exec_list_is_empty(&if_stmt->then_list));
1418    validate_assert(state, !exec_list_is_empty(&if_stmt->else_list));
1419 
1420    nir_cf_node *old_parent = state->parent_node;
1421    state->parent_node = &if_stmt->cf_node;
1422 
1423    foreach_list_typed(nir_cf_node, cf_node, node, &if_stmt->then_list) {
1424       validate_cf_node(cf_node, state);
1425    }
1426 
1427    foreach_list_typed(nir_cf_node, cf_node, node, &if_stmt->else_list) {
1428       validate_cf_node(cf_node, state);
1429    }
1430 
1431    state->parent_node = old_parent;
1432    state->if_stmt = NULL;
1433 }
1434 
1435 static void
validate_loop(nir_loop * loop,validate_state * state)1436 validate_loop(nir_loop *loop, validate_state *state)
1437 {
1438    validate_assert(state, state->impl->structured);
1439 
1440    validate_assert(state, !exec_node_is_head_sentinel(loop->cf_node.node.prev));
1441    nir_cf_node *prev_node = nir_cf_node_prev(&loop->cf_node);
1442    validate_assert(state, prev_node->type == nir_cf_node_block);
1443 
1444    validate_assert(state, !exec_node_is_tail_sentinel(loop->cf_node.node.next));
1445    nir_cf_node *next_node = nir_cf_node_next(&loop->cf_node);
1446    validate_assert(state, next_node->type == nir_cf_node_block);
1447 
1448    validate_assert(state, !exec_list_is_empty(&loop->body));
1449 
1450    nir_cf_node *old_parent = state->parent_node;
1451    state->parent_node = &loop->cf_node;
1452    nir_loop *old_loop = state->loop;
1453    bool old_continue_construct = state->in_loop_continue_construct;
1454    state->loop = loop;
1455    state->in_loop_continue_construct = false;
1456 
1457    foreach_list_typed(nir_cf_node, cf_node, node, &loop->body) {
1458       validate_cf_node(cf_node, state);
1459    }
1460    state->in_loop_continue_construct = true;
1461    foreach_list_typed(nir_cf_node, cf_node, node, &loop->continue_list) {
1462       validate_cf_node(cf_node, state);
1463    }
1464    state->in_loop_continue_construct = false;
1465    state->parent_node = old_parent;
1466    state->loop = old_loop;
1467    state->in_loop_continue_construct = old_continue_construct;
1468 }
1469 
1470 static void
validate_cf_node(nir_cf_node * node,validate_state * state)1471 validate_cf_node(nir_cf_node *node, validate_state *state)
1472 {
1473    validate_assert(state, node->parent == state->parent_node);
1474 
1475    switch (node->type) {
1476    case nir_cf_node_block:
1477       validate_block(nir_cf_node_as_block(node), state);
1478       break;
1479 
1480    case nir_cf_node_if:
1481       validate_if(nir_cf_node_as_if(node), state);
1482       break;
1483 
1484    case nir_cf_node_loop:
1485       validate_loop(nir_cf_node_as_loop(node), state);
1486       break;
1487 
1488    default:
1489       unreachable("Invalid CF node type");
1490    }
1491 }
1492 
1493 static void
validate_constant(nir_constant * c,const struct glsl_type * type,validate_state * state)1494 validate_constant(nir_constant *c, const struct glsl_type *type,
1495                   validate_state *state)
1496 {
1497    if (glsl_type_is_vector_or_scalar(type)) {
1498       unsigned num_components = glsl_get_vector_elements(type);
1499       unsigned bit_size = glsl_get_bit_size(type);
1500       for (unsigned i = 0; i < num_components; i++)
1501          validate_const_value(&c->values[i], bit_size, c->is_null_constant, state);
1502       for (unsigned i = num_components; i < NIR_MAX_VEC_COMPONENTS; i++)
1503          validate_assert(state, c->values[i].u64 == 0);
1504    } else {
1505       validate_assert(state, c->num_elements == glsl_get_length(type));
1506       if (glsl_type_is_struct_or_ifc(type)) {
1507          for (unsigned i = 0; i < c->num_elements; i++) {
1508             const struct glsl_type *elem_type = glsl_get_struct_field(type, i);
1509             validate_constant(c->elements[i], elem_type, state);
1510             validate_assert(state, !c->is_null_constant || c->elements[i]->is_null_constant);
1511          }
1512       } else if (glsl_type_is_array_or_matrix(type)) {
1513          const struct glsl_type *elem_type = glsl_get_array_element(type);
1514          for (unsigned i = 0; i < c->num_elements; i++) {
1515             validate_constant(c->elements[i], elem_type, state);
1516             validate_assert(state, !c->is_null_constant || c->elements[i]->is_null_constant);
1517          }
1518       } else {
1519          validate_assert(state, !"Invalid type for nir_constant");
1520       }
1521    }
1522 }
1523 
1524 static void
validate_var_decl(nir_variable * var,nir_variable_mode valid_modes,validate_state * state)1525 validate_var_decl(nir_variable *var, nir_variable_mode valid_modes,
1526                   validate_state *state)
1527 {
1528    state->var = var;
1529 
1530    /* Must have exactly one mode set */
1531    validate_assert(state, util_is_power_of_two_nonzero(var->data.mode));
1532    validate_assert(state, var->data.mode & valid_modes);
1533 
1534    if (var->data.compact) {
1535       /* The "compact" flag is only valid on arrays of scalars. */
1536       assert(glsl_type_is_array(var->type));
1537 
1538       const struct glsl_type *type = glsl_get_array_element(var->type);
1539       if (nir_is_arrayed_io(var, state->shader->info.stage)) {
1540          assert(glsl_type_is_array(type));
1541          assert(glsl_type_is_scalar(glsl_get_array_element(type)));
1542       } else {
1543          assert(glsl_type_is_scalar(type));
1544       }
1545    }
1546 
1547    if (var->num_members > 0) {
1548       const struct glsl_type *without_array = glsl_without_array(var->type);
1549       validate_assert(state, glsl_type_is_struct_or_ifc(without_array));
1550       validate_assert(state, var->num_members == glsl_get_length(without_array));
1551       validate_assert(state, var->members != NULL);
1552    }
1553 
1554    if (var->data.per_view)
1555       validate_assert(state, glsl_type_is_array(var->type));
1556 
1557    if (var->constant_initializer)
1558       validate_constant(var->constant_initializer, var->type, state);
1559 
1560    if (var->data.mode == nir_var_image) {
1561       validate_assert(state, !var->data.bindless);
1562       validate_assert(state, glsl_type_is_image(glsl_without_array(var->type)));
1563    }
1564 
1565    if (var->data.per_vertex)
1566       validate_assert(state, state->shader->info.stage == MESA_SHADER_FRAGMENT);
1567 
1568    /*
1569     * TODO validate some things ir_validate.cpp does (requires more GLSL type
1570     * support)
1571     */
1572 
1573    _mesa_hash_table_insert(state->var_defs, var,
1574                            valid_modes == nir_var_function_temp ? state->impl : NULL);
1575 
1576    state->var = NULL;
1577 }
1578 
1579 static bool
validate_ssa_def_dominance(nir_def * def,void * _state)1580 validate_ssa_def_dominance(nir_def *def, void *_state)
1581 {
1582    validate_state *state = _state;
1583 
1584    validate_assert(state, def->index < state->impl->ssa_alloc);
1585    validate_assert(state, !BITSET_TEST(state->ssa_defs_found, def->index));
1586    BITSET_SET(state->ssa_defs_found, def->index);
1587 
1588    return true;
1589 }
1590 
1591 static bool
validate_src_dominance(nir_src * src,void * _state)1592 validate_src_dominance(nir_src *src, void *_state)
1593 {
1594    validate_state *state = _state;
1595 
1596    if (src->ssa->parent_instr->block == nir_src_parent_instr(src)->block) {
1597       validate_assert(state, src->ssa->index < state->impl->ssa_alloc);
1598       validate_assert(state, BITSET_TEST(state->ssa_defs_found,
1599                                          src->ssa->index));
1600    } else {
1601       validate_assert(state, nir_block_dominates(src->ssa->parent_instr->block,
1602                                                  nir_src_parent_instr(src)->block));
1603    }
1604    return true;
1605 }
1606 
1607 static void
validate_ssa_dominance(nir_function_impl * impl,validate_state * state)1608 validate_ssa_dominance(nir_function_impl *impl, validate_state *state)
1609 {
1610    nir_metadata_require(impl, nir_metadata_dominance);
1611 
1612    nir_foreach_block_unstructured(block, impl) {
1613       state->block = block;
1614       nir_foreach_instr(instr, block) {
1615          state->instr = instr;
1616          if (instr->type == nir_instr_type_phi) {
1617             nir_phi_instr *phi = nir_instr_as_phi(instr);
1618             nir_foreach_phi_src(src, phi) {
1619                validate_assert(state,
1620                                nir_block_dominates(src->src.ssa->parent_instr->block,
1621                                                    src->pred));
1622             }
1623          } else {
1624             nir_foreach_src(instr, validate_src_dominance, state);
1625          }
1626          nir_foreach_def(instr, validate_ssa_def_dominance, state);
1627       }
1628    }
1629 }
1630 
1631 static void
validate_block_index(nir_function_impl * impl,validate_state * state)1632 validate_block_index(nir_function_impl *impl, validate_state *state)
1633 {
1634    unsigned index = 0;
1635    nir_foreach_block_unstructured(block, impl) {
1636       state->block = block;
1637       validate_assert(state, block->index == index);
1638       index++;
1639    }
1640    state->block = NULL;
1641    validate_assert(state, impl->num_blocks == index);
1642    validate_assert(state, impl->end_block->index == impl->num_blocks);
1643 }
1644 
1645 static void
validate_instr_index(nir_function_impl * impl,validate_state * state)1646 validate_instr_index(nir_function_impl *impl, validate_state *state)
1647 {
1648    int index = -1;
1649    nir_foreach_block(block, impl) {
1650       state->block = block;
1651 
1652       validate_assert(state, (int)block->start_ip > index);
1653       index = block->start_ip;
1654 
1655       nir_foreach_instr(instr, block) {
1656          state->instr = instr;
1657          validate_assert(state, instr->index > index);
1658          index = instr->index;
1659       }
1660       state->instr = NULL;
1661 
1662       validate_assert(state, block->end_ip > index);
1663       index = block->end_ip;
1664    }
1665    state->block = NULL;
1666 }
1667 
1668 typedef struct {
1669    uint32_t index;
1670    unsigned num_dom_children;
1671    struct nir_block **dom_children;
1672    struct set *dom_frontier;
1673    uint32_t dom_pre_index, dom_post_index;
1674 } block_dom_metadata;
1675 
1676 static void
validate_dominance(nir_function_impl * impl,validate_state * state)1677 validate_dominance(nir_function_impl *impl, validate_state *state)
1678 {
1679    nir_metadata valid_metadata = impl->valid_metadata;
1680 
1681    /* Preserve dominance */
1682    block_dom_metadata *blocks = ralloc_array(state->mem_ctx, block_dom_metadata,
1683                                              state->blocks->size);
1684    set_foreach(state->blocks, entry) {
1685       nir_block *block = (nir_block *)entry->key;
1686       block_dom_metadata *md = &blocks[entry - state->blocks->table];
1687       md->index = block->index;
1688       md->num_dom_children = block->num_dom_children;
1689       md->dom_pre_index = block->dom_pre_index;
1690       md->dom_post_index = block->dom_post_index;
1691       md->dom_children = block->dom_children;
1692       md->dom_frontier = block->dom_frontier;
1693 
1694       block->dom_children = NULL;
1695       block->dom_frontier = _mesa_pointer_set_create(block);
1696    }
1697 
1698    /* Call metadata passes and compare it against the preserved metadata and call SSA dominance
1699     * validation. Dominance requires block indices, but we should ignore the current ones, since
1700     * we don't trust them.
1701     */
1702    impl->valid_metadata &= ~(nir_metadata_block_index | nir_metadata_dominance);
1703    nir_metadata_require(impl, nir_metadata_block_index | nir_metadata_dominance);
1704    assert(impl->valid_metadata == (valid_metadata | nir_metadata_block_index | nir_metadata_dominance));
1705 
1706    if (valid_metadata & nir_metadata_dominance) {
1707       set_foreach(state->blocks, entry) {
1708          nir_block *block = (nir_block *)entry->key;
1709          block_dom_metadata *md = &blocks[entry - state->blocks->table];
1710          state->block = (nir_block *)block;
1711 
1712          validate_assert(state, block->num_dom_children == md->num_dom_children);
1713          validate_assert(state, block->dom_pre_index == md->dom_pre_index);
1714          validate_assert(state, block->dom_post_index == md->dom_post_index);
1715 
1716          if (block->num_dom_children == md->num_dom_children && block->num_dom_children) {
1717             validate_assert(state, !memcmp(block->dom_children, md->dom_children,
1718                                            block->num_dom_children * sizeof(md->dom_children[0])));
1719          }
1720 
1721          validate_assert(state, block->dom_frontier->entries == md->dom_frontier->entries);
1722          set_foreach(block->dom_frontier, entry) {
1723             validate_assert(state, _mesa_set_search_pre_hashed(md->dom_frontier,
1724                                                                entry->hash, entry->key));
1725          }
1726       }
1727       state->block = NULL;
1728    }
1729 
1730    memset(state->ssa_defs_found, 0, BITSET_WORDS(impl->ssa_alloc) * sizeof(BITSET_WORD));
1731    validate_ssa_dominance(impl, state);
1732 
1733    /* Restore the old dominance metadata */
1734    set_foreach(state->blocks, entry) {
1735       nir_block *block = (nir_block *)entry->key;
1736       block_dom_metadata *md = &blocks[entry - state->blocks->table];
1737 
1738       ralloc_free(block->dom_children);
1739       ralloc_free(block->dom_frontier);
1740 
1741       block->index = md->index;
1742       block->num_dom_children = md->num_dom_children;
1743       block->dom_pre_index = md->dom_pre_index;
1744       block->dom_post_index = md->dom_post_index;
1745       block->dom_children = md->dom_children;
1746       block->dom_frontier = md->dom_frontier;
1747    }
1748 
1749    ralloc_free(blocks);
1750    impl->valid_metadata = valid_metadata;
1751 }
1752 
1753 typedef struct {
1754    BITSET_WORD *live_in;
1755    BITSET_WORD *live_out;
1756 } block_liveness_metadata;
1757 
1758 static void
validate_live_defs(nir_function_impl * impl,validate_state * state)1759 validate_live_defs(nir_function_impl *impl, validate_state *state)
1760 {
1761    nir_metadata valid_metadata = impl->valid_metadata;
1762 
1763    /* Preserve live defs */
1764    block_liveness_metadata *blocks = ralloc_array(state->mem_ctx, block_liveness_metadata,
1765                                                   state->blocks->size);
1766    set_foreach(state->blocks, entry) {
1767       nir_block *block = (nir_block *)entry->key;
1768       block_liveness_metadata *md = &blocks[entry - state->blocks->table];
1769       md->live_in = block->live_in;
1770       md->live_out = block->live_out;
1771 
1772       block->live_in = NULL;
1773       block->live_out = NULL;
1774    }
1775 
1776    /* Call metadata passes and compare it against the preserved metadata */
1777    impl->valid_metadata &= ~nir_metadata_live_defs;
1778    nir_metadata_require(impl, nir_metadata_live_defs);
1779    assert(impl->valid_metadata == valid_metadata);
1780 
1781    set_foreach(state->blocks, entry) {
1782       nir_block *block = (nir_block *)entry->key;
1783       block_liveness_metadata *md = &blocks[entry - state->blocks->table];
1784       state->block = (nir_block *)block;
1785 
1786       if (block == impl->end_block)
1787          continue;
1788 
1789       size_t bitset_words = BITSET_WORDS(impl->ssa_alloc);
1790       if (bitset_words) {
1791          validate_assert(state, !memcmp(md->live_in, block->live_in,
1792                                         sizeof(BITSET_WORD) * bitset_words));
1793          validate_assert(state, !memcmp(md->live_out, block->live_out,
1794                                         sizeof(BITSET_WORD) * bitset_words));
1795       }
1796    }
1797    state->block = NULL;
1798 
1799    /* Restore the old live defs metadata */
1800    set_foreach(state->blocks, entry) {
1801       nir_block *block = (nir_block *)entry->key;
1802       block_liveness_metadata *md = &blocks[entry - state->blocks->table];
1803 
1804       ralloc_free(block->live_in);
1805       ralloc_free(block->live_out);
1806 
1807       block->live_in = md->live_in;
1808       block->live_out = md->live_out;
1809    }
1810 
1811    ralloc_free(blocks);
1812 }
1813 
1814 static bool
are_loop_terminators_equal(const nir_loop_terminator * a,const nir_loop_terminator * b)1815 are_loop_terminators_equal(const nir_loop_terminator *a, const nir_loop_terminator *b)
1816 {
1817    if (!a || !b)
1818       return !a && !b;
1819 
1820    return a->nif == b->nif &&
1821           a->conditional_instr == b->conditional_instr &&
1822           a->break_block == b->break_block &&
1823           a->continue_from_block == b->continue_from_block &&
1824           a->continue_from_then == b->continue_from_then &&
1825           a->induction_rhs == b->induction_rhs &&
1826           a->exact_trip_count_unknown == b->exact_trip_count_unknown;
1827 }
1828 
1829 static void
validate_loop_info(nir_function_impl * impl,validate_state * state)1830 validate_loop_info(nir_function_impl *impl, validate_state *state)
1831 {
1832    nir_metadata valid_metadata = impl->valid_metadata;
1833 
1834    /* Preserve loop info */
1835    struct hash_table *loops = _mesa_pointer_hash_table_create(state->mem_ctx);
1836    set_foreach(state->blocks, entry) {
1837       nir_block *block = (nir_block *)entry->key;
1838       if (block->cf_node.parent->type == nir_cf_node_loop &&
1839           nir_cf_node_is_first(&block->cf_node)) {
1840          nir_loop *loop = nir_cf_node_as_loop(block->cf_node.parent);
1841          _mesa_hash_table_insert(loops, loop, loop->info);
1842          loop->info = NULL;
1843       }
1844    }
1845 
1846    /* Call metadata passes and compare it against the preserved metadata */
1847    impl->valid_metadata &= ~nir_metadata_loop_analysis;
1848    nir_metadata_require(impl, nir_metadata_loop_analysis, impl->loop_analysis_indirect_mask,
1849                         impl->loop_analysis_force_unroll_sampler_indirect);
1850    assert(impl->valid_metadata == valid_metadata);
1851 
1852    hash_table_foreach(loops, entry) {
1853       const nir_loop *loop = entry->key;
1854       const nir_loop_info *md = entry->data;
1855 
1856       validate_assert(state, loop->info->instr_cost == md->instr_cost);
1857       validate_assert(state, loop->info->has_soft_fp64 == md->has_soft_fp64);
1858       validate_assert(state, loop->info->guessed_trip_count == md->guessed_trip_count);
1859       validate_assert(state, loop->info->max_trip_count == md->max_trip_count);
1860       validate_assert(state, loop->info->exact_trip_count_known == md->exact_trip_count_known);
1861       validate_assert(state, loop->info->force_unroll == md->force_unroll);
1862       validate_assert(state, loop->info->complex_loop == md->complex_loop);
1863 
1864       validate_assert(state, are_loop_terminators_equal(loop->info->limiting_terminator,
1865                                                         md->limiting_terminator));
1866 
1867       validate_assert(state, list_length(&loop->info->loop_terminator_list) ==
1868                                 list_length(&md->loop_terminator_list));
1869       list_pair_for_each_entry(nir_loop_terminator, a, b, &loop->info->loop_terminator_list,
1870                                &md->loop_terminator_list, loop_terminator_link) {
1871          validate_assert(state, are_loop_terminators_equal(a, b));
1872       }
1873 
1874       validate_assert(state, _mesa_hash_table_num_entries(loop->info->induction_vars) ==
1875                                 _mesa_hash_table_num_entries(md->induction_vars));
1876       hash_table_foreach(loop->info->induction_vars, var) {
1877          struct hash_entry *prev_var = _mesa_hash_table_search(md->induction_vars, var->key);
1878          validate_assert(state, prev_var != NULL);
1879          if (prev_var) {
1880             nir_loop_induction_variable *a = var->data;
1881             nir_loop_induction_variable *b = prev_var->data;
1882             validate_assert(state, a->basis == b->basis);
1883             validate_assert(state, a->def == b->def);
1884             validate_assert(state, a->init_src == b->init_src);
1885             validate_assert(state, a->update_src == b->update_src);
1886          }
1887       }
1888    }
1889 
1890    /* Restore the old loop info */
1891    hash_table_foreach(loops, entry) {
1892       nir_loop *loop = (nir_loop *)entry->key;
1893       nir_loop_info *md = entry->data;
1894 
1895       ralloc_free(loop->info);
1896       loop->info = md;
1897    }
1898 }
1899 
1900 static void
validate_metadata_and_ssa_dominance(nir_function_impl * impl,validate_state * state)1901 validate_metadata_and_ssa_dominance(nir_function_impl *impl, validate_state *state)
1902 {
1903    /* We should preserve and restore metadata when necessary, so that passes do not accidentally
1904     * depend on nir_validate_shader().
1905     */
1906 
1907    if (impl->valid_metadata & nir_metadata_block_index)
1908       validate_block_index(impl, state);
1909 
1910    if (impl->valid_metadata & nir_metadata_instr_index)
1911       validate_instr_index(impl, state);
1912 
1913    /* This validates both metadata and SSA dominance. */
1914    validate_dominance(impl, state);
1915 
1916    if (impl->valid_metadata & nir_metadata_live_defs)
1917       validate_live_defs(impl, state);
1918 
1919    if (impl->valid_metadata & nir_metadata_loop_analysis)
1920       validate_loop_info(impl, state);
1921 }
1922 
1923 static void
validate_function_impl(nir_function_impl * impl,validate_state * state)1924 validate_function_impl(nir_function_impl *impl, validate_state *state)
1925 {
1926    validate_assert(state, impl->function->impl == impl);
1927    validate_assert(state, impl->cf_node.parent == NULL);
1928 
1929    if (impl->preamble) {
1930       validate_assert(state, impl->function->is_entrypoint);
1931       validate_assert(state, impl->preamble->is_preamble);
1932    }
1933 
1934    validate_assert(state, exec_list_is_empty(&impl->end_block->instr_list));
1935    validate_assert(state, impl->end_block->successors[0] == NULL);
1936    validate_assert(state, impl->end_block->successors[1] == NULL);
1937 
1938    state->impl = impl;
1939    state->parent_node = &impl->cf_node;
1940 
1941    exec_list_validate(&impl->locals);
1942    nir_foreach_function_temp_variable(var, impl) {
1943       validate_var_decl(var, nir_var_function_temp, state);
1944    }
1945 
1946    state->ssa_defs_found = reralloc(state->mem_ctx, state->ssa_defs_found,
1947                                     BITSET_WORD, BITSET_WORDS(impl->ssa_alloc));
1948    memset(state->ssa_defs_found, 0, BITSET_WORDS(impl->ssa_alloc) * sizeof(BITSET_WORD));
1949 
1950    _mesa_set_clear(state->blocks, NULL);
1951    _mesa_set_resize(state->blocks, impl->num_blocks);
1952    if (impl->structured)
1953       collect_blocks(&impl->body, state);
1954    else
1955       collect_unstructured_blocks(impl, state);
1956    _mesa_set_add(state->blocks, impl->end_block);
1957    validate_assert(state, !exec_list_is_empty(&impl->body));
1958    foreach_list_typed(nir_cf_node, node, node, &impl->body) {
1959       validate_cf_node(node, state);
1960    }
1961    validate_end_block(impl->end_block, state);
1962 
1963    /* We must have seen every source by now. This also means that we've untagged
1964     * every source, so we have valid (unaugmented) NIR once again.
1965     */
1966    validate_assert(state, state->nr_tagged_srcs == 0);
1967 
1968    /* Metadata validation assumes a valid NIR shader. */
1969    if (_mesa_hash_table_num_entries(state->errors) == 0)
1970       validate_metadata_and_ssa_dominance(impl, state);
1971 }
1972 
1973 static void
validate_function(nir_function * func,validate_state * state)1974 validate_function(nir_function *func, validate_state *state)
1975 {
1976    if (func->impl != NULL) {
1977       validate_assert(state, func->impl->function == func);
1978       validate_function_impl(func->impl, state);
1979    }
1980 }
1981 
1982 static void
init_validate_state(validate_state * state)1983 init_validate_state(validate_state *state)
1984 {
1985    state->mem_ctx = ralloc_context(NULL);
1986    state->ssa_defs_found = NULL;
1987    state->blocks = _mesa_pointer_set_create(state->mem_ctx);
1988    state->var_defs = _mesa_pointer_hash_table_create(state->mem_ctx);
1989    state->errors = _mesa_pointer_hash_table_create(state->mem_ctx);
1990    state->nr_tagged_srcs = 0;
1991 
1992    state->loop = NULL;
1993    state->in_loop_continue_construct = false;
1994    state->instr = NULL;
1995    state->var = NULL;
1996 }
1997 
1998 static void
destroy_validate_state(validate_state * state)1999 destroy_validate_state(validate_state *state)
2000 {
2001    ralloc_free(state->mem_ctx);
2002 }
2003 
2004 simple_mtx_t fail_dump_mutex = SIMPLE_MTX_INITIALIZER;
2005 
2006 static void
dump_errors(validate_state * state,const char * when)2007 dump_errors(validate_state *state, const char *when)
2008 {
2009    struct hash_table *errors = state->errors;
2010 
2011    /* Lock around dumping so that we get clean dumps in a multi-threaded
2012     * scenario
2013     */
2014    simple_mtx_lock(&fail_dump_mutex);
2015 
2016    if (when) {
2017       fprintf(stderr, "NIR validation failed %s\n", when);
2018       fprintf(stderr, "%d errors:\n", _mesa_hash_table_num_entries(errors));
2019    } else {
2020       fprintf(stderr, "NIR validation failed with %d errors:\n",
2021               _mesa_hash_table_num_entries(errors));
2022    }
2023 
2024    nir_print_shader_annotated(state->shader, stderr, errors);
2025 
2026    if (_mesa_hash_table_num_entries(errors) > 0) {
2027       fprintf(stderr, "%d additional errors:\n",
2028               _mesa_hash_table_num_entries(errors));
2029       hash_table_foreach(errors, entry) {
2030          fprintf(stderr, "%s\n", (char *)entry->data);
2031       }
2032    }
2033 
2034    simple_mtx_unlock(&fail_dump_mutex);
2035 
2036    abort();
2037 }
2038 
2039 void
nir_validate_shader(nir_shader * shader,const char * when)2040 nir_validate_shader(nir_shader *shader, const char *when)
2041 {
2042    if (NIR_DEBUG(NOVALIDATE))
2043       return;
2044 
2045    validate_state state;
2046    init_validate_state(&state);
2047 
2048    state.shader = shader;
2049 
2050    nir_variable_mode valid_modes =
2051       nir_var_shader_in |
2052       nir_var_shader_out |
2053       nir_var_shader_temp |
2054       nir_var_uniform |
2055       nir_var_mem_ubo |
2056       nir_var_system_value |
2057       nir_var_mem_ssbo |
2058       nir_var_mem_shared |
2059       nir_var_mem_global |
2060       nir_var_mem_push_const |
2061       nir_var_mem_constant |
2062       nir_var_image;
2063 
2064    if (gl_shader_stage_is_callable(shader->info.stage))
2065       valid_modes |= nir_var_shader_call_data;
2066 
2067    if (shader->info.stage == MESA_SHADER_ANY_HIT ||
2068        shader->info.stage == MESA_SHADER_CLOSEST_HIT ||
2069        shader->info.stage == MESA_SHADER_INTERSECTION)
2070       valid_modes |= nir_var_ray_hit_attrib;
2071 
2072    if (shader->info.stage == MESA_SHADER_TASK ||
2073        shader->info.stage == MESA_SHADER_MESH)
2074       valid_modes |= nir_var_mem_task_payload;
2075 
2076    if (shader->info.stage == MESA_SHADER_COMPUTE)
2077       valid_modes |= nir_var_mem_node_payload |
2078                      nir_var_mem_node_payload_in;
2079 
2080    exec_list_validate(&shader->variables);
2081    nir_foreach_variable_in_shader(var, shader)
2082       validate_var_decl(var, valid_modes, &state);
2083 
2084    exec_list_validate(&shader->functions);
2085    foreach_list_typed(nir_function, func, node, &shader->functions) {
2086       validate_function(func, &state);
2087    }
2088 
2089    if (shader->xfb_info != NULL) {
2090       /* At least validate that, if nir_shader::xfb_info exists, the shader
2091        * has real transform feedback going on.
2092        */
2093       validate_assert(&state, shader->info.stage == MESA_SHADER_VERTEX ||
2094                                  shader->info.stage == MESA_SHADER_TESS_EVAL ||
2095                                  shader->info.stage == MESA_SHADER_GEOMETRY);
2096       validate_assert(&state, shader->xfb_info->buffers_written != 0);
2097       validate_assert(&state, shader->xfb_info->streams_written != 0);
2098       validate_assert(&state, shader->xfb_info->output_count > 0);
2099    }
2100 
2101    if (_mesa_hash_table_num_entries(state.errors) > 0)
2102       dump_errors(&state, when);
2103 
2104    destroy_validate_state(&state);
2105 }
2106 
2107 void
nir_validate_ssa_dominance(nir_shader * shader,const char * when)2108 nir_validate_ssa_dominance(nir_shader *shader, const char *when)
2109 {
2110    if (NIR_DEBUG(NOVALIDATE))
2111       return;
2112 
2113    validate_state state;
2114    init_validate_state(&state);
2115 
2116    state.shader = shader;
2117 
2118    nir_foreach_function_impl(impl, shader) {
2119       state.ssa_defs_found = reralloc(state.mem_ctx, state.ssa_defs_found,
2120                                       BITSET_WORD,
2121                                       BITSET_WORDS(impl->ssa_alloc));
2122       memset(state.ssa_defs_found, 0, BITSET_WORDS(impl->ssa_alloc) * sizeof(BITSET_WORD));
2123 
2124       state.impl = impl;
2125       validate_ssa_dominance(impl, &state);
2126    }
2127 
2128    if (_mesa_hash_table_num_entries(state.errors) > 0)
2129       dump_errors(&state, when);
2130 
2131    destroy_validate_state(&state);
2132 }
2133 
2134 #endif /* NDEBUG */
2135