• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Connor Abbott (cwabbott0@gmail.com)
25  *
26  */
27 
28 #include <assert.h>
29 #include "c11/threads.h"
30 #include "util/simple_mtx.h"
31 #include "nir.h"
32 #include "nir_xfb_info.h"
33 
34 /*
35  * This file checks for invalid IR indicating a bug somewhere in the compiler.
36  */
37 
38 /* Since this file is just a pile of asserts, don't bother compiling it if
39  * we're not building a debug build.
40  */
41 #ifndef NDEBUG
42 
43 typedef struct {
44    void *mem_ctx;
45 
46    /* the current shader being validated */
47    nir_shader *shader;
48 
49    /* the current instruction being validated */
50    nir_instr *instr;
51 
52    /* the current variable being validated */
53    nir_variable *var;
54 
55    /* the current basic block being validated */
56    nir_block *block;
57 
58    /* the current if statement being validated */
59    nir_if *if_stmt;
60 
61    /* the current loop being visited */
62    nir_loop *loop;
63 
64    /* weather the loop continue construct is being visited */
65    bool in_loop_continue_construct;
66 
67    /* the parent of the current cf node being visited */
68    nir_cf_node *parent_node;
69 
70    /* the current function implementation being validated */
71    nir_function_impl *impl;
72 
73    /* Set of all blocks in the list */
74    struct set *blocks;
75 
76    /* Number of tagged nir_src's. This is implicitly the cardinality of the set
77     * of pending nir_src's.
78     */
79    uint32_t nr_tagged_srcs;
80 
81    /* bitset of ssa definitions we have found; used to check uniqueness */
82    BITSET_WORD *ssa_defs_found;
83 
84    /* map of variable -> function implementation where it is defined or NULL
85     * if it is a global variable
86     */
87    struct hash_table *var_defs;
88 
89    /* map of instruction/var/etc to failed assert string */
90    struct hash_table *errors;
91 } validate_state;
92 
93 static void
log_error(validate_state * state,const char * cond,const char * file,int line)94 log_error(validate_state *state, const char *cond, const char *file, int line)
95 {
96    const void *obj;
97 
98    if (state->instr)
99       obj = state->instr;
100    else if (state->var)
101       obj = state->var;
102    else
103       obj = cond;
104 
105    char *msg = ralloc_asprintf(state->errors, "error: %s (%s:%d)",
106                                cond, file, line);
107 
108    _mesa_hash_table_insert(state->errors, obj, msg);
109 }
110 
111 static bool
validate_assert_impl(validate_state * state,bool cond,const char * str,const char * file,unsigned line)112 validate_assert_impl(validate_state *state, bool cond, const char *str,
113                      const char *file, unsigned line)
114 {
115    if (unlikely(!cond))
116       log_error(state, str, file, line);
117    return cond;
118 }
119 
120 #define validate_assert(state, cond) \
121    validate_assert_impl(state, (cond), #cond, __FILE__, __LINE__)
122 
123 static void
validate_num_components(validate_state * state,unsigned num_components)124 validate_num_components(validate_state *state, unsigned num_components)
125 {
126    validate_assert(state, nir_num_components_valid(num_components));
127 }
128 
129 /* Tag used in nir_src::_parent to indicate that a source has been seen. */
130 #define SRC_TAG_SEEN (0x2)
131 
132 static_assert(SRC_TAG_SEEN == (~NIR_SRC_PARENT_MASK + 1),
133               "Parent pointer tags chosen not to collide");
134 
135 static void
tag_src(nir_src * src,validate_state * state)136 tag_src(nir_src *src, validate_state *state)
137 {
138    /* nir_src only appears once and only in one SSA def use list, since we
139     * mark nir_src's as we go by tagging this pointer.
140     */
141    if (validate_assert(state, (src->_parent & SRC_TAG_SEEN) == 0)) {
142       src->_parent |= SRC_TAG_SEEN;
143       state->nr_tagged_srcs++;
144    }
145 }
146 
147 /* Due to tagging, it's not safe to use nir_src_parent_instr during the main
148  * validate loop. This is a tagging-aware version.
149  */
150 static nir_instr *
src_parent_instr_safe(nir_src * src)151 src_parent_instr_safe(nir_src *src)
152 {
153    uintptr_t untagged = (src->_parent & ~SRC_TAG_SEEN);
154    assert(!(untagged & NIR_SRC_PARENT_IS_IF) && "precondition");
155    return (nir_instr *)untagged;
156 }
157 
158 /*
159  * As we walk SSA defs, we mark every use as seen by tagging the parent pointer.
160  * We need to make sure our use is seen in a use list.
161  *
162  * Then we unmark when we hit the source. This will let us prove that we've
163  * seen all the sources.
164  */
165 static void
validate_src_tag(nir_src * src,validate_state * state)166 validate_src_tag(nir_src *src, validate_state *state)
167 {
168    if (validate_assert(state, src->_parent & SRC_TAG_SEEN)) {
169       src->_parent &= ~SRC_TAG_SEEN;
170       state->nr_tagged_srcs--;
171    }
172 }
173 
174 static void
validate_if_src(nir_src * src,validate_state * state)175 validate_if_src(nir_src *src, validate_state *state)
176 {
177    validate_src_tag(src, state);
178    validate_assert(state, nir_src_parent_if(src) == state->if_stmt);
179    validate_assert(state, src->ssa != NULL);
180    validate_assert(state, src->ssa->num_components == 1);
181 }
182 
183 static void
validate_src(nir_src * src,validate_state * state)184 validate_src(nir_src *src, validate_state *state)
185 {
186    /* Validate the tag first, so that nir_src_parent_instr is valid */
187    validate_src_tag(src, state);
188 
189    /* Source assumed to be instruction, use validate_if_src for if */
190    validate_assert(state, nir_src_parent_instr(src) == state->instr);
191 
192    validate_assert(state, src->ssa != NULL);
193 }
194 
195 static void
validate_sized_src(nir_src * src,validate_state * state,unsigned bit_sizes,unsigned num_components)196 validate_sized_src(nir_src *src, validate_state *state,
197                    unsigned bit_sizes, unsigned num_components)
198 {
199    validate_src(src, state);
200 
201    if (bit_sizes)
202       validate_assert(state, src->ssa->bit_size & bit_sizes);
203    if (num_components)
204       validate_assert(state, src->ssa->num_components == num_components);
205 }
206 
207 static void
validate_alu_src(nir_alu_instr * instr,unsigned index,validate_state * state)208 validate_alu_src(nir_alu_instr *instr, unsigned index, validate_state *state)
209 {
210    nir_alu_src *src = &instr->src[index];
211 
212    unsigned num_instr_channels = nir_ssa_alu_instr_src_components(instr, index);
213    unsigned num_components = nir_src_num_components(src->src);
214 
215    for (unsigned i = 0; i < num_instr_channels; i++) {
216       validate_assert(state, src->swizzle[i] < num_components);
217    }
218 
219    validate_src(&src->src, state);
220 }
221 
222 static void
validate_def(nir_def * def,validate_state * state)223 validate_def(nir_def *def, validate_state *state)
224 {
225    validate_assert(state, def->index < state->impl->ssa_alloc);
226    validate_assert(state, !BITSET_TEST(state->ssa_defs_found, def->index));
227    BITSET_SET(state->ssa_defs_found, def->index);
228 
229    validate_assert(state, def->parent_instr == state->instr);
230    validate_num_components(state, def->num_components);
231 
232    list_validate(&def->uses);
233    nir_foreach_use_including_if(src, def) {
234       /* Check that the def matches. */
235       validate_assert(state, src->ssa == def);
236 
237       /* Check that nir_src's are unique */
238       tag_src(src, state);
239    }
240 }
241 
242 static void
validate_alu_instr(nir_alu_instr * instr,validate_state * state)243 validate_alu_instr(nir_alu_instr *instr, validate_state *state)
244 {
245    validate_assert(state, instr->op < nir_num_opcodes);
246 
247    unsigned instr_bit_size = 0;
248    for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
249       nir_alu_type src_type = nir_op_infos[instr->op].input_types[i];
250       unsigned src_bit_size = nir_src_bit_size(instr->src[i].src);
251       if (nir_alu_type_get_type_size(src_type)) {
252          validate_assert(state, src_bit_size == nir_alu_type_get_type_size(src_type));
253       } else if (instr_bit_size) {
254          validate_assert(state, src_bit_size == instr_bit_size);
255       } else {
256          instr_bit_size = src_bit_size;
257       }
258 
259       if (nir_alu_type_get_base_type(src_type) == nir_type_float) {
260          /* 8-bit float isn't a thing */
261          validate_assert(state, src_bit_size == 16 || src_bit_size == 32 ||
262                                    src_bit_size == 64);
263       }
264 
265       /* In nir_opcodes.py, these are defined to take general uint or int
266        * sources.  However, they're really only defined for 32-bit or 64-bit
267        * sources.  This seems to be the only place to enforce this
268        * restriction.
269        */
270       switch (instr->op) {
271       case nir_op_ufind_msb:
272       case nir_op_ufind_msb_rev:
273          validate_assert(state, src_bit_size == 32 || src_bit_size == 64);
274          break;
275 
276       default:
277          break;
278       }
279 
280       validate_alu_src(instr, i, state);
281    }
282 
283    nir_alu_type dest_type = nir_op_infos[instr->op].output_type;
284    unsigned dest_bit_size = instr->def.bit_size;
285    if (nir_alu_type_get_type_size(dest_type)) {
286       validate_assert(state, dest_bit_size == nir_alu_type_get_type_size(dest_type));
287    } else if (instr_bit_size) {
288       validate_assert(state, dest_bit_size == instr_bit_size);
289    } else {
290       /* The only unsized thing is the destination so it's vacuously valid */
291    }
292 
293    if (nir_alu_type_get_base_type(dest_type) == nir_type_float) {
294       /* 8-bit float isn't a thing */
295       validate_assert(state, dest_bit_size == 16 || dest_bit_size == 32 ||
296                                 dest_bit_size == 64);
297    }
298 
299    validate_def(&instr->def, state);
300 }
301 
302 static void
validate_var_use(nir_variable * var,validate_state * state)303 validate_var_use(nir_variable *var, validate_state *state)
304 {
305    struct hash_entry *entry = _mesa_hash_table_search(state->var_defs, var);
306    validate_assert(state, entry);
307    if (entry && var->data.mode == nir_var_function_temp)
308       validate_assert(state, (nir_function_impl *)entry->data == state->impl);
309 }
310 
311 static void
validate_deref_instr(nir_deref_instr * instr,validate_state * state)312 validate_deref_instr(nir_deref_instr *instr, validate_state *state)
313 {
314    if (instr->deref_type == nir_deref_type_var) {
315       /* Variable dereferences are stupid simple. */
316       validate_assert(state, instr->modes == instr->var->data.mode);
317       validate_assert(state, instr->type == instr->var->type);
318       validate_var_use(instr->var, state);
319    } else if (instr->deref_type == nir_deref_type_cast) {
320       /* For cast, we simply have to trust the instruction.  It's up to
321        * lowering passes and front/back-ends to make them sane.
322        */
323       validate_src(&instr->parent, state);
324 
325       /* Most variable modes in NIR can only exist by themselves. */
326       if (instr->modes & ~nir_var_mem_generic)
327          validate_assert(state, util_bitcount(instr->modes) == 1);
328 
329       nir_deref_instr *parent = nir_src_as_deref(instr->parent);
330       if (parent) {
331          /* Casts can change the mode but it can't change completely.  The new
332           * mode must have some bits in common with the old.
333           */
334          validate_assert(state, instr->modes & parent->modes);
335       } else {
336          /* If our parent isn't a deref, just assert the mode is there */
337          validate_assert(state, instr->modes != 0);
338       }
339 
340       /* We just validate that the type is there */
341       validate_assert(state, instr->type);
342       if (instr->cast.align_mul > 0) {
343          validate_assert(state, util_is_power_of_two_nonzero(instr->cast.align_mul));
344          validate_assert(state, instr->cast.align_offset < instr->cast.align_mul);
345       } else {
346          validate_assert(state, instr->cast.align_offset == 0);
347       }
348    } else {
349       /* The parent pointer value must have the same number of components
350        * as the destination.
351        */
352       validate_sized_src(&instr->parent, state, instr->def.bit_size,
353                          instr->def.num_components);
354 
355       nir_instr *parent_instr = instr->parent.ssa->parent_instr;
356 
357       /* The parent must come from another deref instruction */
358       validate_assert(state, parent_instr->type == nir_instr_type_deref);
359 
360       nir_deref_instr *parent = nir_instr_as_deref(parent_instr);
361 
362       validate_assert(state, instr->modes == parent->modes);
363 
364       switch (instr->deref_type) {
365       case nir_deref_type_struct:
366          validate_assert(state, glsl_type_is_struct_or_ifc(parent->type));
367          validate_assert(state,
368                          instr->strct.index < glsl_get_length(parent->type));
369          validate_assert(state, instr->type ==
370                                    glsl_get_struct_field(parent->type, instr->strct.index));
371          break;
372 
373       case nir_deref_type_array:
374       case nir_deref_type_array_wildcard:
375          if (instr->modes & nir_var_vec_indexable_modes) {
376             /* Shared variables and UBO/SSBOs have a bit more relaxed rules
377              * because we need to be able to handle array derefs on vectors.
378              * Fortunately, nir_lower_io handles these just fine.
379              */
380             validate_assert(state, glsl_type_is_array(parent->type) ||
381                                       glsl_type_is_matrix(parent->type) ||
382                                       glsl_type_is_vector(parent->type));
383          } else {
384             /* Most of NIR cannot handle array derefs on vectors */
385             validate_assert(state, glsl_type_is_array(parent->type) ||
386                                       glsl_type_is_matrix(parent->type));
387          }
388          validate_assert(state,
389                          instr->type == glsl_get_array_element(parent->type));
390 
391          if (instr->deref_type == nir_deref_type_array) {
392             validate_sized_src(&instr->arr.index, state,
393                                instr->def.bit_size, 1);
394          }
395          break;
396 
397       case nir_deref_type_ptr_as_array:
398          /* ptr_as_array derefs must have a parent that is either an array,
399           * ptr_as_array, or cast.  If the parent is a cast, we get the stride
400           * information (if any) from the cast deref.
401           */
402          validate_assert(state,
403                          parent->deref_type == nir_deref_type_array ||
404                             parent->deref_type == nir_deref_type_ptr_as_array ||
405                             parent->deref_type == nir_deref_type_cast);
406          validate_sized_src(&instr->arr.index, state,
407                             instr->def.bit_size, 1);
408          break;
409 
410       default:
411          unreachable("Invalid deref instruction type");
412       }
413    }
414 
415    /* We intentionally don't validate the size of the destination because we
416     * want to let other compiler components such as SPIR-V decide how big
417     * pointers should be.
418     */
419    validate_def(&instr->def, state);
420 
421    /* Certain modes cannot be used as sources for phi instructions because
422     * way too many passes assume that they can always chase deref chains.
423     */
424    nir_foreach_use_including_if(use, &instr->def) {
425       /* Deref instructions as if conditions don't make sense because if
426        * conditions expect well-formed Booleans.  If you want to compare with
427        * NULL, an explicit comparison operation should be used.
428        */
429       if (!validate_assert(state, !nir_src_is_if(use)))
430          continue;
431 
432       if (src_parent_instr_safe(use)->type == nir_instr_type_phi) {
433          validate_assert(state, !(instr->modes & (nir_var_shader_in |
434                                                   nir_var_shader_out |
435                                                   nir_var_shader_out |
436                                                   nir_var_uniform)));
437       }
438    }
439 }
440 
441 static bool
vectorized_intrinsic(nir_intrinsic_instr * intr)442 vectorized_intrinsic(nir_intrinsic_instr *intr)
443 {
444    const nir_intrinsic_info *info = &nir_intrinsic_infos[intr->intrinsic];
445 
446    if (info->dest_components == 0)
447       return true;
448 
449    for (unsigned i = 0; i < info->num_srcs; i++)
450       if (info->src_components[i] == 0)
451          return true;
452 
453    return false;
454 }
455 
456 /** Returns the image format or PIPE_FORMAT_COUNT for incomplete derefs
457  *
458  * We use PIPE_FORMAT_COUNT for incomplete derefs because PIPE_FORMAT_NONE
459  * indicates that we found the variable but it has no format specified.
460  */
461 static enum pipe_format
image_intrin_format(nir_intrinsic_instr * instr)462 image_intrin_format(nir_intrinsic_instr *instr)
463 {
464    if (nir_intrinsic_format(instr) != PIPE_FORMAT_NONE)
465       return nir_intrinsic_format(instr);
466 
467    /* If this not a deref intrinsic, PIPE_FORMAT_NONE is the best we can do */
468    if (nir_intrinsic_infos[instr->intrinsic].src_components[0] != -1)
469       return PIPE_FORMAT_NONE;
470 
471    nir_variable *var = nir_intrinsic_get_var(instr, 0);
472    if (var == NULL)
473       return PIPE_FORMAT_COUNT;
474 
475    return var->data.image.format;
476 }
477 
478 static void
validate_register_handle(nir_src handle_src,unsigned num_components,unsigned bit_size,validate_state * state)479 validate_register_handle(nir_src handle_src,
480                          unsigned num_components,
481                          unsigned bit_size,
482                          validate_state *state)
483 {
484    nir_def *handle = handle_src.ssa;
485    nir_instr *parent = handle->parent_instr;
486 
487    if (!validate_assert(state, parent->type == nir_instr_type_intrinsic))
488       return;
489 
490    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(parent);
491    if (!validate_assert(state, intr->intrinsic == nir_intrinsic_decl_reg))
492       return;
493 
494    validate_assert(state, nir_intrinsic_num_components(intr) == num_components);
495    validate_assert(state, nir_intrinsic_bit_size(intr) == bit_size);
496 }
497 
498 static void
validate_intrinsic_instr(nir_intrinsic_instr * instr,validate_state * state)499 validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state)
500 {
501    unsigned dest_bit_size = 0;
502    unsigned src_bit_sizes[NIR_INTRINSIC_MAX_INPUTS] = {
503       0,
504    };
505    switch (instr->intrinsic) {
506    case nir_intrinsic_decl_reg:
507       assert(state->block == nir_start_block(state->impl));
508       break;
509 
510    case nir_intrinsic_load_reg:
511    case nir_intrinsic_load_reg_indirect:
512       validate_register_handle(instr->src[0],
513                                instr->def.num_components,
514                                instr->def.bit_size, state);
515       break;
516 
517    case nir_intrinsic_store_reg:
518    case nir_intrinsic_store_reg_indirect:
519       validate_register_handle(instr->src[1],
520                                nir_src_num_components(instr->src[0]),
521                                nir_src_bit_size(instr->src[0]), state);
522       break;
523 
524    case nir_intrinsic_convert_alu_types: {
525       nir_alu_type src_type = nir_intrinsic_src_type(instr);
526       nir_alu_type dest_type = nir_intrinsic_dest_type(instr);
527       dest_bit_size = nir_alu_type_get_type_size(dest_type);
528       src_bit_sizes[0] = nir_alu_type_get_type_size(src_type);
529       validate_assert(state, dest_bit_size != 0);
530       validate_assert(state, src_bit_sizes[0] != 0);
531       break;
532    }
533 
534    case nir_intrinsic_load_param: {
535       unsigned param_idx = nir_intrinsic_param_idx(instr);
536       validate_assert(state, param_idx < state->impl->function->num_params);
537       nir_parameter *param = &state->impl->function->params[param_idx];
538       validate_assert(state, instr->num_components == param->num_components);
539       dest_bit_size = param->bit_size;
540       break;
541    }
542 
543    case nir_intrinsic_load_deref: {
544       nir_deref_instr *src = nir_src_as_deref(instr->src[0]);
545       assert(src);
546       validate_assert(state, glsl_type_is_vector_or_scalar(src->type) ||
547                                 (src->modes == nir_var_uniform &&
548                                  glsl_get_base_type(src->type) == GLSL_TYPE_SUBROUTINE));
549       validate_assert(state, instr->num_components ==
550                                 glsl_get_vector_elements(src->type));
551       dest_bit_size = glsl_get_bit_size(src->type);
552       /* Also allow 32-bit boolean load operations */
553       if (glsl_type_is_boolean(src->type))
554          dest_bit_size |= 32;
555       break;
556    }
557 
558    case nir_intrinsic_store_deref: {
559       nir_deref_instr *dst = nir_src_as_deref(instr->src[0]);
560       assert(dst);
561       validate_assert(state, glsl_type_is_vector_or_scalar(dst->type));
562       validate_assert(state, instr->num_components ==
563                                 glsl_get_vector_elements(dst->type));
564       src_bit_sizes[1] = glsl_get_bit_size(dst->type);
565       /* Also allow 32-bit boolean store operations */
566       if (glsl_type_is_boolean(dst->type))
567          src_bit_sizes[1] |= 32;
568       validate_assert(state, !nir_deref_mode_may_be(dst, nir_var_read_only_modes));
569       break;
570    }
571 
572    case nir_intrinsic_copy_deref: {
573       nir_deref_instr *dst = nir_src_as_deref(instr->src[0]);
574       nir_deref_instr *src = nir_src_as_deref(instr->src[1]);
575       validate_assert(state, glsl_get_bare_type(dst->type) ==
576                                 glsl_get_bare_type(src->type));
577       validate_assert(state, !nir_deref_mode_may_be(dst, nir_var_read_only_modes));
578       /* FIXME: now that we track if the var copies were lowered, it would be
579        * good to validate here that no new copy derefs were added. Right now
580        * we can't as there are some specific cases where copies are added even
581        * after the lowering. One example is the Intel compiler, that calls
582        * nir_lower_io_to_temporaries when linking some shader stages.
583        */
584       break;
585    }
586 
587    case nir_intrinsic_load_ubo_vec4: {
588       int bit_size = instr->def.bit_size;
589       validate_assert(state, bit_size >= 8);
590       validate_assert(state, (nir_intrinsic_component(instr) +
591                               instr->num_components) *
592                                    (bit_size / 8) <=
593                                 16);
594       break;
595    }
596 
597    case nir_intrinsic_load_ubo:
598       /* Make sure that the creator didn't forget to set the range_base+range. */
599       validate_assert(state, nir_intrinsic_range(instr) != 0);
600       FALLTHROUGH;
601    case nir_intrinsic_load_ssbo:
602    case nir_intrinsic_load_shared:
603    case nir_intrinsic_load_global:
604    case nir_intrinsic_load_global_constant:
605    case nir_intrinsic_load_scratch:
606    case nir_intrinsic_load_constant:
607       /* These memory load operations must have alignments */
608       validate_assert(state,
609                       util_is_power_of_two_nonzero(nir_intrinsic_align_mul(instr)));
610       validate_assert(state, nir_intrinsic_align_offset(instr) <
611                                 nir_intrinsic_align_mul(instr));
612       FALLTHROUGH;
613 
614    case nir_intrinsic_load_uniform:
615    case nir_intrinsic_load_input:
616    case nir_intrinsic_load_per_primitive_input:
617    case nir_intrinsic_load_per_vertex_input:
618    case nir_intrinsic_load_interpolated_input:
619    case nir_intrinsic_load_output:
620    case nir_intrinsic_load_per_vertex_output:
621    case nir_intrinsic_load_per_view_output:
622    case nir_intrinsic_load_per_primitive_output:
623    case nir_intrinsic_load_push_constant:
624    case nir_intrinsic_load_attribute_pan:
625       /* All memory load operations must load at least a byte */
626       validate_assert(state, instr->def.bit_size >= 8);
627       break;
628 
629    case nir_intrinsic_load_barycentric_pixel:
630    case nir_intrinsic_load_barycentric_centroid:
631    case nir_intrinsic_load_barycentric_sample:
632    case nir_intrinsic_load_barycentric_at_offset:
633    case nir_intrinsic_load_barycentric_at_sample: {
634       enum glsl_interp_mode mode = nir_intrinsic_interp_mode(instr);
635       validate_assert(state,
636                       mode == INTERP_MODE_NONE ||
637                       mode == INTERP_MODE_SMOOTH ||
638                       mode == INTERP_MODE_NOPERSPECTIVE);
639       break;
640    }
641 
642    case nir_intrinsic_store_ssbo:
643    case nir_intrinsic_store_shared:
644    case nir_intrinsic_store_global:
645    case nir_intrinsic_store_scratch:
646       /* These memory store operations must also have alignments */
647       validate_assert(state,
648                       util_is_power_of_two_nonzero(nir_intrinsic_align_mul(instr)));
649       validate_assert(state, nir_intrinsic_align_offset(instr) <
650                                 nir_intrinsic_align_mul(instr));
651       /* All memory store operations must store at least a byte */
652       validate_assert(state, nir_src_bit_size(instr->src[0]) >= 8);
653       break;
654 
655    case nir_intrinsic_store_output:
656    case nir_intrinsic_store_per_vertex_output:
657    case nir_intrinsic_store_per_view_output:
658       if (state->shader->info.stage == MESA_SHADER_FRAGMENT)
659          validate_assert(state, nir_src_bit_size(instr->src[0]) >= 8);
660       else
661          validate_assert(state, nir_src_bit_size(instr->src[0]) >= 16);
662       validate_assert(state,
663                       nir_src_bit_size(instr->src[0]) ==
664                       nir_alu_type_get_type_size(nir_intrinsic_src_type(instr)));
665       break;
666 
667    case nir_intrinsic_deref_mode_is:
668    case nir_intrinsic_addr_mode_is:
669       validate_assert(state,
670                       util_bitcount(nir_intrinsic_memory_modes(instr)) == 1);
671       break;
672 
673    case nir_intrinsic_image_deref_atomic:
674    case nir_intrinsic_image_deref_atomic_swap:
675    case nir_intrinsic_bindless_image_atomic:
676    case nir_intrinsic_bindless_image_atomic_swap:
677    case nir_intrinsic_image_atomic:
678    case nir_intrinsic_image_atomic_swap: {
679       nir_atomic_op op = nir_intrinsic_atomic_op(instr);
680 
681       enum pipe_format format = image_intrin_format(instr);
682       if (format != PIPE_FORMAT_COUNT) {
683          bool allowed = false;
684          bool is_float = (nir_atomic_op_type(op) == nir_type_float);
685 
686          switch (format) {
687          case PIPE_FORMAT_R32_FLOAT:
688             allowed = is_float || op == nir_atomic_op_xchg;
689             break;
690          case PIPE_FORMAT_R16_FLOAT:
691          case PIPE_FORMAT_R64_FLOAT:
692             allowed = op == nir_atomic_op_fmin || op == nir_atomic_op_fmax;
693             break;
694          case PIPE_FORMAT_R32_UINT:
695          case PIPE_FORMAT_R32_SINT:
696          case PIPE_FORMAT_R64_UINT:
697          case PIPE_FORMAT_R64_SINT:
698             allowed = !is_float;
699             break;
700          default:
701             break;
702          }
703 
704          validate_assert(state, allowed);
705          validate_assert(state, instr->def.bit_size ==
706                                    util_format_get_blocksizebits(format));
707       }
708       break;
709    }
710 
711    case nir_intrinsic_store_buffer_amd:
712       if (nir_intrinsic_access(instr) & ACCESS_USES_FORMAT_AMD) {
713          unsigned writemask = nir_intrinsic_write_mask(instr);
714 
715          /* Make sure the writemask is derived from the component count. */
716          validate_assert(state,
717                          writemask ==
718                             BITFIELD_MASK(nir_src_num_components(instr->src[0])));
719       }
720       break;
721 
722    default:
723       break;
724    }
725 
726    if (instr->num_components > 0)
727       validate_num_components(state, instr->num_components);
728 
729    const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
730    unsigned num_srcs = info->num_srcs;
731    for (unsigned i = 0; i < num_srcs; i++) {
732       unsigned components_read = nir_intrinsic_src_components(instr, i);
733 
734       validate_num_components(state, components_read);
735 
736       validate_sized_src(&instr->src[i], state, src_bit_sizes[i], components_read);
737    }
738 
739    if (nir_intrinsic_infos[instr->intrinsic].has_dest) {
740       unsigned components_written = nir_intrinsic_dest_components(instr);
741       unsigned bit_sizes = info->dest_bit_sizes;
742       if (!bit_sizes && info->bit_size_src >= 0)
743          bit_sizes = nir_src_bit_size(instr->src[info->bit_size_src]);
744 
745       validate_num_components(state, components_written);
746       if (dest_bit_size && bit_sizes)
747          validate_assert(state, dest_bit_size & bit_sizes);
748       else
749          dest_bit_size = dest_bit_size ? dest_bit_size : bit_sizes;
750 
751       validate_def(&instr->def, state);
752       validate_assert(state, instr->def.num_components == components_written);
753 
754       if (dest_bit_size)
755          validate_assert(state, instr->def.bit_size & dest_bit_size);
756    }
757 
758    if (!vectorized_intrinsic(instr))
759       validate_assert(state, instr->num_components == 0);
760 
761    if (nir_intrinsic_has_write_mask(instr)) {
762       unsigned component_mask = BITFIELD_MASK(instr->num_components);
763       validate_assert(state, (nir_intrinsic_write_mask(instr) & ~component_mask) == 0);
764    }
765 
766    if (nir_intrinsic_has_io_xfb(instr)) {
767       unsigned used_mask = 0;
768 
769       for (unsigned i = 0; i < 4; i++) {
770          nir_io_xfb xfb = i < 2 ? nir_intrinsic_io_xfb(instr) : nir_intrinsic_io_xfb2(instr);
771          unsigned xfb_mask = BITFIELD_RANGE(i, xfb.out[i % 2].num_components);
772 
773          /* Each component can be used only once by transform feedback info. */
774          validate_assert(state, (xfb_mask & used_mask) == 0);
775          used_mask |= xfb_mask;
776       }
777    }
778 
779    if (nir_intrinsic_has_io_semantics(instr) &&
780        !nir_intrinsic_infos[instr->intrinsic].has_dest) {
781       nir_io_semantics sem = nir_intrinsic_io_semantics(instr);
782 
783       /* An output that has no effect shouldn't be present in the IR. */
784       validate_assert(state,
785                       (nir_slot_is_sysval_output(sem.location, MESA_SHADER_NONE) &&
786                        !sem.no_sysval_output) ||
787                       (nir_slot_is_varying(sem.location, MESA_SHADER_NONE) &&
788                        !sem.no_varying) ||
789                       nir_instr_xfb_write_mask(instr) ||
790                       /* TCS can set no_varying and no_sysval_output, meaning
791                        * that the output is only read by TCS and not TES.
792                        */
793                       state->shader->info.stage == MESA_SHADER_TESS_CTRL);
794       validate_assert(state,
795                       (!sem.dual_source_blend_index &&
796                        !sem.fb_fetch_output &&
797                        !sem.fb_fetch_output_coherent) ||
798                       state->shader->info.stage == MESA_SHADER_FRAGMENT);
799       validate_assert(state,
800                       !sem.gs_streams ||
801                       state->shader->info.stage == MESA_SHADER_GEOMETRY);
802       validate_assert(state,
803                       !sem.high_dvec2 ||
804                       (state->shader->info.stage == MESA_SHADER_VERTEX &&
805                        instr->intrinsic == nir_intrinsic_load_input));
806       validate_assert(state,
807                       !sem.interp_explicit_strict ||
808                       (state->shader->info.stage == MESA_SHADER_FRAGMENT &&
809                        instr->intrinsic == nir_intrinsic_load_input_vertex));
810    }
811 }
812 
813 static void
validate_tex_src_texture_deref(nir_tex_instr * instr,validate_state * state,nir_deref_instr * deref)814 validate_tex_src_texture_deref(nir_tex_instr *instr, validate_state *state,
815                                nir_deref_instr *deref)
816 {
817    validate_assert(state, glsl_type_is_image(deref->type) ||
818                              glsl_type_is_texture(deref->type) ||
819                              glsl_type_is_sampler(deref->type));
820 
821    switch (instr->op) {
822    case nir_texop_descriptor_amd:
823    case nir_texop_sampler_descriptor_amd:
824    case nir_texop_custom_border_color_agx:
825       break;
826    case nir_texop_lod:
827    case nir_texop_lod_bias_agx:
828       validate_assert(state, nir_alu_type_get_base_type(instr->dest_type) == nir_type_float);
829       break;
830    case nir_texop_samples_identical:
831    case nir_texop_has_custom_border_color_agx:
832       validate_assert(state, nir_alu_type_get_base_type(instr->dest_type) == nir_type_bool);
833       break;
834    case nir_texop_txs:
835    case nir_texop_texture_samples:
836    case nir_texop_query_levels:
837    case nir_texop_fragment_mask_fetch_amd:
838    case nir_texop_txf_ms_mcs_intel:
839       validate_assert(state, nir_alu_type_get_base_type(instr->dest_type) == nir_type_int ||
840                              nir_alu_type_get_base_type(instr->dest_type) == nir_type_uint);
841       break;
842    default:
843       validate_assert(state,
844                       glsl_get_sampler_result_type(deref->type) == GLSL_TYPE_VOID ||
845                       glsl_base_type_is_integer(glsl_get_sampler_result_type(deref->type)) ==
846                          (nir_alu_type_get_base_type(instr->dest_type) == nir_type_int ||
847                           nir_alu_type_get_base_type(instr->dest_type) == nir_type_uint));
848    }
849 }
850 
851 static void
validate_tex_instr(nir_tex_instr * instr,validate_state * state)852 validate_tex_instr(nir_tex_instr *instr, validate_state *state)
853 {
854    bool src_type_seen[nir_num_tex_src_types];
855    for (unsigned i = 0; i < nir_num_tex_src_types; i++)
856       src_type_seen[i] = false;
857 
858    for (unsigned i = 0; i < instr->num_srcs; i++) {
859       validate_assert(state, !src_type_seen[instr->src[i].src_type]);
860       src_type_seen[instr->src[i].src_type] = true;
861       validate_sized_src(&instr->src[i].src, state,
862                          0, nir_tex_instr_src_size(instr, i));
863 
864       switch (instr->src[i].src_type) {
865 
866       case nir_tex_src_comparator:
867          validate_assert(state, instr->is_shadow);
868          break;
869 
870       case nir_tex_src_bias:
871          validate_assert(state, instr->op == nir_texop_txb ||
872                                    instr->op == nir_texop_tg4 ||
873                                    instr->op == nir_texop_lod);
874          break;
875 
876       case nir_tex_src_lod:
877          validate_assert(state, instr->op != nir_texop_tex &&
878                                    instr->op != nir_texop_txb &&
879                                    instr->op != nir_texop_txd &&
880                                    instr->op != nir_texop_lod);
881          break;
882 
883       case nir_tex_src_ddx:
884       case nir_tex_src_ddy:
885          validate_assert(state, instr->op == nir_texop_txd);
886          break;
887 
888       case nir_tex_src_texture_deref: {
889          nir_deref_instr *deref = nir_src_as_deref(instr->src[i].src);
890          if (!validate_assert(state, deref))
891             break;
892 
893          validate_tex_src_texture_deref(instr, state, deref);
894          break;
895       }
896 
897       case nir_tex_src_sampler_deref: {
898          nir_deref_instr *deref = nir_src_as_deref(instr->src[i].src);
899          if (!validate_assert(state, deref))
900             break;
901 
902          validate_assert(state, glsl_type_is_sampler(deref->type));
903          break;
904       }
905 
906       case nir_tex_src_sampler_deref_intrinsic:
907       case nir_tex_src_texture_deref_intrinsic: {
908          nir_intrinsic_instr *intrin =
909             nir_instr_as_intrinsic(instr->src[i].src.ssa->parent_instr);
910          nir_deref_instr *deref =
911             nir_instr_as_deref(intrin->src[0].ssa->parent_instr);
912          if (!validate_assert(state, deref))
913             break;
914 
915          if (instr->src[i].src_type == nir_tex_src_sampler_deref_intrinsic)
916             validate_assert(state, glsl_type_is_sampler(deref->type));
917          else
918             validate_tex_src_texture_deref(instr, state, deref);
919 
920          break;
921       }
922 
923       case nir_tex_src_coord:
924       case nir_tex_src_projector:
925       case nir_tex_src_offset:
926       case nir_tex_src_min_lod:
927       case nir_tex_src_ms_index:
928       case nir_tex_src_texture_offset:
929       case nir_tex_src_sampler_offset:
930       case nir_tex_src_plane:
931       case nir_tex_src_texture_handle:
932       case nir_tex_src_sampler_handle:
933          break;
934 
935       default:
936          break;
937       }
938    }
939 
940    bool msaa = (instr->sampler_dim == GLSL_SAMPLER_DIM_MS ||
941                 instr->sampler_dim == GLSL_SAMPLER_DIM_SUBPASS_MS);
942 
943    if (msaa)
944       validate_assert(state, instr->op != nir_texop_txf);
945    else
946       validate_assert(state, instr->op != nir_texop_txf_ms);
947 
948    if (instr->op != nir_texop_tg4)
949       validate_assert(state, instr->component == 0);
950 
951    if (nir_tex_instr_has_explicit_tg4_offsets(instr)) {
952       validate_assert(state, instr->op == nir_texop_tg4);
953       validate_assert(state, !src_type_seen[nir_tex_src_offset]);
954    }
955 
956    if (instr->is_gather_implicit_lod)
957       validate_assert(state, instr->op == nir_texop_tg4);
958 
959    validate_def(&instr->def, state);
960    validate_assert(state, instr->def.num_components ==
961                              nir_tex_instr_dest_size(instr));
962 
963    unsigned bit_size = nir_alu_type_get_type_size(instr->dest_type);
964    validate_assert(state,
965                    (bit_size ? bit_size : 32) ==
966                       instr->def.bit_size);
967 }
968 
969 static void
validate_call_instr(nir_call_instr * instr,validate_state * state)970 validate_call_instr(nir_call_instr *instr, validate_state *state)
971 {
972    validate_assert(state, instr->num_params == instr->callee->num_params);
973 
974    for (unsigned i = 0; i < instr->num_params; i++) {
975       validate_sized_src(&instr->params[i], state,
976                          instr->callee->params[i].bit_size,
977                          instr->callee->params[i].num_components);
978    }
979 }
980 
981 static void
validate_const_value(nir_const_value * val,unsigned bit_size,bool is_null_constant,validate_state * state)982 validate_const_value(nir_const_value *val, unsigned bit_size,
983                      bool is_null_constant, validate_state *state)
984 {
985    /* In order for block copies to work properly for things like instruction
986     * comparisons and [de]serialization, we require the unused bits of the
987     * nir_const_value to be zero.
988     */
989    nir_const_value cmp_val;
990    memset(&cmp_val, 0, sizeof(cmp_val));
991    if (!is_null_constant) {
992       switch (bit_size) {
993       case 1:
994          cmp_val.b = val->b;
995          break;
996       case 8:
997          cmp_val.u8 = val->u8;
998          break;
999       case 16:
1000          cmp_val.u16 = val->u16;
1001          break;
1002       case 32:
1003          cmp_val.u32 = val->u32;
1004          break;
1005       case 64:
1006          cmp_val.u64 = val->u64;
1007          break;
1008       default:
1009          validate_assert(state, !"Invalid load_const bit size");
1010       }
1011    }
1012    validate_assert(state, memcmp(val, &cmp_val, sizeof(cmp_val)) == 0);
1013 }
1014 
1015 static void
validate_load_const_instr(nir_load_const_instr * instr,validate_state * state)1016 validate_load_const_instr(nir_load_const_instr *instr, validate_state *state)
1017 {
1018    validate_def(&instr->def, state);
1019 
1020    for (unsigned i = 0; i < instr->def.num_components; i++)
1021       validate_const_value(&instr->value[i], instr->def.bit_size, false, state);
1022 }
1023 
1024 static void
validate_ssa_undef_instr(nir_undef_instr * instr,validate_state * state)1025 validate_ssa_undef_instr(nir_undef_instr *instr, validate_state *state)
1026 {
1027    validate_def(&instr->def, state);
1028 }
1029 
1030 static void
validate_phi_instr(nir_phi_instr * instr,validate_state * state)1031 validate_phi_instr(nir_phi_instr *instr, validate_state *state)
1032 {
1033    /*
1034     * don't validate the sources until we get to them from their predecessor
1035     * basic blocks, to avoid validating an SSA use before its definition.
1036     */
1037 
1038    validate_def(&instr->def, state);
1039 
1040    exec_list_validate(&instr->srcs);
1041    validate_assert(state, exec_list_length(&instr->srcs) ==
1042                              state->block->predecessors->entries);
1043 }
1044 
1045 static void
validate_jump_instr(nir_jump_instr * instr,validate_state * state)1046 validate_jump_instr(nir_jump_instr *instr, validate_state *state)
1047 {
1048    nir_block *block = state->block;
1049    validate_assert(state, &instr->instr == nir_block_last_instr(block));
1050 
1051    switch (instr->type) {
1052    case nir_jump_return:
1053    case nir_jump_halt:
1054       validate_assert(state, block->successors[0] == state->impl->end_block);
1055       validate_assert(state, block->successors[1] == NULL);
1056       validate_assert(state, instr->target == NULL);
1057       validate_assert(state, instr->else_target == NULL);
1058       validate_assert(state, !state->in_loop_continue_construct);
1059       break;
1060 
1061    case nir_jump_break:
1062       validate_assert(state, state->impl->structured);
1063       validate_assert(state, state->loop != NULL);
1064       if (state->loop) {
1065          nir_block *after =
1066             nir_cf_node_as_block(nir_cf_node_next(&state->loop->cf_node));
1067          validate_assert(state, block->successors[0] == after);
1068       }
1069       validate_assert(state, block->successors[1] == NULL);
1070       validate_assert(state, instr->target == NULL);
1071       validate_assert(state, instr->else_target == NULL);
1072       break;
1073 
1074    case nir_jump_continue:
1075       validate_assert(state, state->impl->structured);
1076       validate_assert(state, state->loop != NULL);
1077       if (state->loop) {
1078          nir_block *cont_block = nir_loop_continue_target(state->loop);
1079          validate_assert(state, block->successors[0] == cont_block);
1080       }
1081       validate_assert(state, block->successors[1] == NULL);
1082       validate_assert(state, instr->target == NULL);
1083       validate_assert(state, instr->else_target == NULL);
1084       validate_assert(state, !state->in_loop_continue_construct);
1085       break;
1086 
1087    case nir_jump_goto:
1088       validate_assert(state, !state->impl->structured);
1089       validate_assert(state, instr->target == block->successors[0]);
1090       validate_assert(state, instr->target != NULL);
1091       validate_assert(state, instr->else_target == NULL);
1092       break;
1093 
1094    case nir_jump_goto_if:
1095       validate_assert(state, !state->impl->structured);
1096       validate_assert(state, instr->target == block->successors[1]);
1097       validate_assert(state, instr->else_target == block->successors[0]);
1098       validate_sized_src(&instr->condition, state, 0, 1);
1099       validate_assert(state, instr->target != NULL);
1100       validate_assert(state, instr->else_target != NULL);
1101       break;
1102 
1103    default:
1104       validate_assert(state, !"Invalid jump instruction type");
1105       break;
1106    }
1107 }
1108 
1109 static void
validate_instr(nir_instr * instr,validate_state * state)1110 validate_instr(nir_instr *instr, validate_state *state)
1111 {
1112    validate_assert(state, instr->block == state->block);
1113 
1114    state->instr = instr;
1115 
1116    switch (instr->type) {
1117    case nir_instr_type_alu:
1118       validate_alu_instr(nir_instr_as_alu(instr), state);
1119       break;
1120 
1121    case nir_instr_type_deref:
1122       validate_deref_instr(nir_instr_as_deref(instr), state);
1123       break;
1124 
1125    case nir_instr_type_call:
1126       validate_call_instr(nir_instr_as_call(instr), state);
1127       break;
1128 
1129    case nir_instr_type_intrinsic:
1130       validate_intrinsic_instr(nir_instr_as_intrinsic(instr), state);
1131       break;
1132 
1133    case nir_instr_type_tex:
1134       validate_tex_instr(nir_instr_as_tex(instr), state);
1135       break;
1136 
1137    case nir_instr_type_load_const:
1138       validate_load_const_instr(nir_instr_as_load_const(instr), state);
1139       break;
1140 
1141    case nir_instr_type_phi:
1142       validate_phi_instr(nir_instr_as_phi(instr), state);
1143       break;
1144 
1145    case nir_instr_type_undef:
1146       validate_ssa_undef_instr(nir_instr_as_undef(instr), state);
1147       break;
1148 
1149    case nir_instr_type_jump:
1150       validate_jump_instr(nir_instr_as_jump(instr), state);
1151       break;
1152 
1153    case nir_instr_type_debug_info:
1154       break;
1155 
1156    default:
1157       validate_assert(state, !"Invalid ALU instruction type");
1158       break;
1159    }
1160 
1161    state->instr = NULL;
1162 }
1163 
1164 static void
validate_phi_src(nir_phi_instr * instr,nir_block * pred,validate_state * state)1165 validate_phi_src(nir_phi_instr *instr, nir_block *pred, validate_state *state)
1166 {
1167    state->instr = &instr->instr;
1168 
1169    exec_list_validate(&instr->srcs);
1170    nir_foreach_phi_src(src, instr) {
1171       if (src->pred == pred) {
1172          validate_sized_src(&src->src, state, instr->def.bit_size,
1173                             instr->def.num_components);
1174          state->instr = NULL;
1175          return;
1176       }
1177    }
1178    validate_assert(state, !"Phi does not have a source corresponding to one "
1179                            "of its predecessor blocks");
1180 }
1181 
1182 static void
validate_phi_srcs(nir_block * block,nir_block * succ,validate_state * state)1183 validate_phi_srcs(nir_block *block, nir_block *succ, validate_state *state)
1184 {
1185    nir_foreach_phi(phi, succ) {
1186       validate_phi_src(phi, block, state);
1187    }
1188 }
1189 
1190 static void
collect_blocks(struct exec_list * cf_list,validate_state * state)1191 collect_blocks(struct exec_list *cf_list, validate_state *state)
1192 {
1193    /* We walk the blocks manually here rather than using nir_foreach_block for
1194     * a few reasons:
1195     *
1196     *  1. We want to call exec_list_validate() on every linked list in the IR
1197     *     which means we need to touch every linked and just walking blocks
1198     *     with nir_foreach_block() would make that difficult.  In particular,
1199     *     we want to validate each list before the first time we walk it so
1200     *     that we catch broken lists in exec_list_validate() instead of
1201     *     getting stuck in a hard-to-debug infinite loop in the validator.
1202     *
1203     *  2. nir_foreach_block() depends on several invariants of the CF node
1204     *     hierarchy which nir_validate_shader() is responsible for verifying.
1205     *     If we used nir_foreach_block() in nir_validate_shader(), we could
1206     *     end up blowing up on a bad list walk instead of throwing the much
1207     *     easier to debug validation error.
1208     */
1209    exec_list_validate(cf_list);
1210    foreach_list_typed(nir_cf_node, node, node, cf_list) {
1211       switch (node->type) {
1212       case nir_cf_node_block:
1213          _mesa_set_add(state->blocks, nir_cf_node_as_block(node));
1214          break;
1215 
1216       case nir_cf_node_if:
1217          collect_blocks(&nir_cf_node_as_if(node)->then_list, state);
1218          collect_blocks(&nir_cf_node_as_if(node)->else_list, state);
1219          break;
1220 
1221       case nir_cf_node_loop:
1222          collect_blocks(&nir_cf_node_as_loop(node)->body, state);
1223          collect_blocks(&nir_cf_node_as_loop(node)->continue_list, state);
1224          break;
1225 
1226       default:
1227          unreachable("Invalid CF node type");
1228       }
1229    }
1230 }
1231 
1232 static void
collect_blocks_pdfs(nir_function_impl * impl,nir_block * block,uint32_t * count,validate_state * state)1233 collect_blocks_pdfs(nir_function_impl *impl, nir_block *block,
1234                     uint32_t *count, validate_state *state)
1235 {
1236    if (block == impl->end_block)
1237       return;
1238 
1239    if (_mesa_set_search(state->blocks, block))
1240       return;
1241 
1242    _mesa_set_add(state->blocks, block);
1243 
1244    for (uint32_t i = 0; i < ARRAY_SIZE(block->successors); i++) {
1245       if (block->successors[i] != NULL)
1246          collect_blocks_pdfs(impl, block->successors[i], count, state);
1247    }
1248 
1249    /* Assert that the blocks are indexed in reverse PDFS order */
1250    validate_assert(state, block->index == --(*count));
1251 }
1252 
1253 static void
collect_unstructured_blocks(nir_function_impl * impl,validate_state * state)1254 collect_unstructured_blocks(nir_function_impl *impl, validate_state *state)
1255 {
1256    exec_list_validate(&impl->body);
1257 
1258    /* Assert that the blocks are properly indexed */
1259    uint32_t count = 0;
1260    foreach_list_typed(nir_cf_node, node, node, &impl->body) {
1261       nir_block *block = nir_cf_node_as_block(node);
1262       validate_assert(state, block->index == count++);
1263    }
1264    validate_assert(state, impl->end_block->index == count);
1265 
1266    collect_blocks_pdfs(impl, nir_start_block(impl), &count, state);
1267 }
1268 
1269 static void validate_cf_node(nir_cf_node *node, validate_state *state);
1270 
1271 static void
validate_block_predecessors(nir_block * block,validate_state * state)1272 validate_block_predecessors(nir_block *block, validate_state *state)
1273 {
1274    for (unsigned i = 0; i < 2; i++) {
1275       if (block->successors[i] == NULL)
1276          continue;
1277 
1278       /* The block has to exist in the nir_function_impl */
1279       validate_assert(state, _mesa_set_search(state->blocks,
1280                                               block->successors[i]));
1281 
1282       /* And we have to be in our successor's predecessors set */
1283       validate_assert(state,
1284                       _mesa_set_search(block->successors[i]->predecessors, block));
1285 
1286       validate_phi_srcs(block, block->successors[i], state);
1287    }
1288 
1289    /* The start block cannot have any predecessors */
1290    if (block == nir_start_block(state->impl))
1291       validate_assert(state, block->predecessors->entries == 0);
1292 
1293    set_foreach(block->predecessors, entry) {
1294       const nir_block *pred = entry->key;
1295       validate_assert(state, _mesa_set_search(state->blocks, pred));
1296       validate_assert(state, pred->successors[0] == block ||
1297                                 pred->successors[1] == block);
1298    }
1299 }
1300 
1301 static void
validate_block(nir_block * block,validate_state * state)1302 validate_block(nir_block *block, validate_state *state)
1303 {
1304    validate_assert(state, block->cf_node.parent == state->parent_node);
1305 
1306    state->block = block;
1307 
1308    exec_list_validate(&block->instr_list);
1309    nir_foreach_instr(instr, block) {
1310       if (instr->type == nir_instr_type_phi) {
1311          validate_assert(state, instr == nir_block_first_instr(block) ||
1312                                    nir_instr_prev(instr)->type == nir_instr_type_phi);
1313       }
1314 
1315       validate_instr(instr, state);
1316    }
1317 
1318    validate_assert(state, block->successors[0] != NULL);
1319    validate_assert(state, block->successors[0] != block->successors[1]);
1320    validate_block_predecessors(block, state);
1321 
1322    if (!state->impl->structured) {
1323       validate_assert(state, nir_block_ends_in_jump(block));
1324    } else if (!nir_block_ends_in_jump(block)) {
1325       nir_cf_node *next = nir_cf_node_next(&block->cf_node);
1326       if (next == NULL) {
1327          switch (state->parent_node->type) {
1328          case nir_cf_node_loop: {
1329             if (block == nir_loop_last_block(state->loop)) {
1330                nir_block *cont = nir_loop_continue_target(state->loop);
1331                validate_assert(state, block->successors[0] == cont);
1332             } else {
1333                validate_assert(state, nir_loop_has_continue_construct(state->loop) &&
1334                                          block == nir_loop_last_continue_block(state->loop));
1335                nir_block *head = nir_loop_first_block(state->loop);
1336                validate_assert(state, block->successors[0] == head);
1337             }
1338             /* due to the hack for infinite loops, block->successors[1] may
1339              * point to the block after the loop.
1340              */
1341             break;
1342          }
1343 
1344          case nir_cf_node_if: {
1345             nir_block *after =
1346                nir_cf_node_as_block(nir_cf_node_next(state->parent_node));
1347             validate_assert(state, block->successors[0] == after);
1348             validate_assert(state, block->successors[1] == NULL);
1349             break;
1350          }
1351 
1352          case nir_cf_node_function:
1353             validate_assert(state, block->successors[0] == state->impl->end_block);
1354             validate_assert(state, block->successors[1] == NULL);
1355             break;
1356 
1357          default:
1358             unreachable("unknown control flow node type");
1359          }
1360       } else {
1361          if (next->type == nir_cf_node_if) {
1362             nir_if *if_stmt = nir_cf_node_as_if(next);
1363             validate_assert(state, block->successors[0] ==
1364                                       nir_if_first_then_block(if_stmt));
1365             validate_assert(state, block->successors[1] ==
1366                                       nir_if_first_else_block(if_stmt));
1367          } else if (next->type == nir_cf_node_loop) {
1368             nir_loop *loop = nir_cf_node_as_loop(next);
1369             validate_assert(state, block->successors[0] ==
1370                                       nir_loop_first_block(loop));
1371             validate_assert(state, block->successors[1] == NULL);
1372          } else {
1373             validate_assert(state,
1374                             !"Structured NIR cannot have consecutive blocks");
1375          }
1376       }
1377    }
1378 }
1379 
1380 static void
validate_end_block(nir_block * block,validate_state * state)1381 validate_end_block(nir_block *block, validate_state *state)
1382 {
1383    validate_assert(state, block->cf_node.parent == &state->impl->cf_node);
1384 
1385    exec_list_validate(&block->instr_list);
1386    validate_assert(state, exec_list_is_empty(&block->instr_list));
1387 
1388    validate_assert(state, block->successors[0] == NULL);
1389    validate_assert(state, block->successors[1] == NULL);
1390    validate_block_predecessors(block, state);
1391 }
1392 
1393 static void
validate_if(nir_if * if_stmt,validate_state * state)1394 validate_if(nir_if *if_stmt, validate_state *state)
1395 {
1396    validate_assert(state, state->impl->structured);
1397 
1398    state->if_stmt = if_stmt;
1399 
1400    validate_assert(state, !exec_node_is_head_sentinel(if_stmt->cf_node.node.prev));
1401    nir_cf_node *prev_node = nir_cf_node_prev(&if_stmt->cf_node);
1402    validate_assert(state, prev_node->type == nir_cf_node_block);
1403 
1404    validate_assert(state, !exec_node_is_tail_sentinel(if_stmt->cf_node.node.next));
1405    nir_cf_node *next_node = nir_cf_node_next(&if_stmt->cf_node);
1406    validate_assert(state, next_node->type == nir_cf_node_block);
1407 
1408    validate_assert(state, nir_src_is_if(&if_stmt->condition));
1409    validate_if_src(&if_stmt->condition, state);
1410 
1411    validate_assert(state, !exec_list_is_empty(&if_stmt->then_list));
1412    validate_assert(state, !exec_list_is_empty(&if_stmt->else_list));
1413 
1414    nir_cf_node *old_parent = state->parent_node;
1415    state->parent_node = &if_stmt->cf_node;
1416 
1417    foreach_list_typed(nir_cf_node, cf_node, node, &if_stmt->then_list) {
1418       validate_cf_node(cf_node, state);
1419    }
1420 
1421    foreach_list_typed(nir_cf_node, cf_node, node, &if_stmt->else_list) {
1422       validate_cf_node(cf_node, state);
1423    }
1424 
1425    state->parent_node = old_parent;
1426    state->if_stmt = NULL;
1427 }
1428 
1429 static void
validate_loop(nir_loop * loop,validate_state * state)1430 validate_loop(nir_loop *loop, validate_state *state)
1431 {
1432    validate_assert(state, state->impl->structured);
1433 
1434    validate_assert(state, !exec_node_is_head_sentinel(loop->cf_node.node.prev));
1435    nir_cf_node *prev_node = nir_cf_node_prev(&loop->cf_node);
1436    validate_assert(state, prev_node->type == nir_cf_node_block);
1437 
1438    validate_assert(state, !exec_node_is_tail_sentinel(loop->cf_node.node.next));
1439    nir_cf_node *next_node = nir_cf_node_next(&loop->cf_node);
1440    validate_assert(state, next_node->type == nir_cf_node_block);
1441 
1442    validate_assert(state, !exec_list_is_empty(&loop->body));
1443 
1444    nir_cf_node *old_parent = state->parent_node;
1445    state->parent_node = &loop->cf_node;
1446    nir_loop *old_loop = state->loop;
1447    bool old_continue_construct = state->in_loop_continue_construct;
1448    state->loop = loop;
1449    state->in_loop_continue_construct = false;
1450 
1451    foreach_list_typed(nir_cf_node, cf_node, node, &loop->body) {
1452       validate_cf_node(cf_node, state);
1453    }
1454    state->in_loop_continue_construct = true;
1455    foreach_list_typed(nir_cf_node, cf_node, node, &loop->continue_list) {
1456       validate_cf_node(cf_node, state);
1457    }
1458    state->in_loop_continue_construct = false;
1459    state->parent_node = old_parent;
1460    state->loop = old_loop;
1461    state->in_loop_continue_construct = old_continue_construct;
1462 }
1463 
1464 static void
validate_cf_node(nir_cf_node * node,validate_state * state)1465 validate_cf_node(nir_cf_node *node, validate_state *state)
1466 {
1467    validate_assert(state, node->parent == state->parent_node);
1468 
1469    switch (node->type) {
1470    case nir_cf_node_block:
1471       validate_block(nir_cf_node_as_block(node), state);
1472       break;
1473 
1474    case nir_cf_node_if:
1475       validate_if(nir_cf_node_as_if(node), state);
1476       break;
1477 
1478    case nir_cf_node_loop:
1479       validate_loop(nir_cf_node_as_loop(node), state);
1480       break;
1481 
1482    default:
1483       unreachable("Invalid CF node type");
1484    }
1485 }
1486 
1487 static void
validate_constant(nir_constant * c,const struct glsl_type * type,validate_state * state)1488 validate_constant(nir_constant *c, const struct glsl_type *type,
1489                   validate_state *state)
1490 {
1491    if (glsl_type_is_vector_or_scalar(type)) {
1492       unsigned num_components = glsl_get_vector_elements(type);
1493       unsigned bit_size = glsl_get_bit_size(type);
1494       for (unsigned i = 0; i < num_components; i++)
1495          validate_const_value(&c->values[i], bit_size, c->is_null_constant, state);
1496       for (unsigned i = num_components; i < NIR_MAX_VEC_COMPONENTS; i++)
1497          validate_assert(state, c->values[i].u64 == 0);
1498    } else {
1499       validate_assert(state, c->num_elements == glsl_get_length(type));
1500       if (glsl_type_is_struct_or_ifc(type)) {
1501          for (unsigned i = 0; i < c->num_elements; i++) {
1502             const struct glsl_type *elem_type = glsl_get_struct_field(type, i);
1503             validate_constant(c->elements[i], elem_type, state);
1504             validate_assert(state, !c->is_null_constant || c->elements[i]->is_null_constant);
1505          }
1506       } else if (glsl_type_is_array_or_matrix(type)) {
1507          const struct glsl_type *elem_type = glsl_get_array_element(type);
1508          for (unsigned i = 0; i < c->num_elements; i++) {
1509             validate_constant(c->elements[i], elem_type, state);
1510             validate_assert(state, !c->is_null_constant || c->elements[i]->is_null_constant);
1511          }
1512       } else {
1513          validate_assert(state, !"Invalid type for nir_constant");
1514       }
1515    }
1516 }
1517 
1518 static void
validate_var_decl(nir_variable * var,nir_variable_mode valid_modes,validate_state * state)1519 validate_var_decl(nir_variable *var, nir_variable_mode valid_modes,
1520                   validate_state *state)
1521 {
1522    state->var = var;
1523 
1524    /* Must have exactly one mode set */
1525    validate_assert(state, util_is_power_of_two_nonzero(var->data.mode));
1526    validate_assert(state, var->data.mode & valid_modes);
1527 
1528    if (var->data.compact) {
1529       /* The "compact" flag is only valid on arrays of scalars. */
1530       assert(glsl_type_is_array(var->type));
1531 
1532       const struct glsl_type *type = glsl_get_array_element(var->type);
1533       if (nir_is_arrayed_io(var, state->shader->info.stage)) {
1534          assert(glsl_type_is_array(type));
1535          assert(glsl_type_is_scalar(glsl_get_array_element(type)));
1536       } else {
1537          assert(glsl_type_is_scalar(type));
1538       }
1539    }
1540 
1541    if (var->num_members > 0) {
1542       const struct glsl_type *without_array = glsl_without_array(var->type);
1543       validate_assert(state, glsl_type_is_struct_or_ifc(without_array));
1544       validate_assert(state, var->num_members == glsl_get_length(without_array));
1545       validate_assert(state, var->members != NULL);
1546    }
1547 
1548    if (var->data.per_view)
1549       validate_assert(state, glsl_type_is_array(var->type));
1550 
1551    if (var->constant_initializer)
1552       validate_constant(var->constant_initializer, var->type, state);
1553 
1554    if (var->data.mode == nir_var_image) {
1555       validate_assert(state, !var->data.bindless);
1556       validate_assert(state, glsl_type_is_image(glsl_without_array(var->type)));
1557    }
1558 
1559    if (var->data.per_vertex)
1560       validate_assert(state, state->shader->info.stage == MESA_SHADER_FRAGMENT);
1561 
1562    /*
1563     * TODO validate some things ir_validate.cpp does (requires more GLSL type
1564     * support)
1565     */
1566 
1567    _mesa_hash_table_insert(state->var_defs, var,
1568                            valid_modes == nir_var_function_temp ? state->impl : NULL);
1569 
1570    state->var = NULL;
1571 }
1572 
1573 static bool
validate_ssa_def_dominance(nir_def * def,void * _state)1574 validate_ssa_def_dominance(nir_def *def, void *_state)
1575 {
1576    validate_state *state = _state;
1577 
1578    validate_assert(state, def->index < state->impl->ssa_alloc);
1579    validate_assert(state, !BITSET_TEST(state->ssa_defs_found, def->index));
1580    BITSET_SET(state->ssa_defs_found, def->index);
1581 
1582    return true;
1583 }
1584 
1585 static bool
validate_src_dominance(nir_src * src,void * _state)1586 validate_src_dominance(nir_src *src, void *_state)
1587 {
1588    validate_state *state = _state;
1589 
1590    if (src->ssa->parent_instr->block == nir_src_parent_instr(src)->block) {
1591       validate_assert(state, src->ssa->index < state->impl->ssa_alloc);
1592       validate_assert(state, BITSET_TEST(state->ssa_defs_found,
1593                                          src->ssa->index));
1594    } else {
1595       validate_assert(state, nir_block_dominates(src->ssa->parent_instr->block,
1596                                                  nir_src_parent_instr(src)->block));
1597    }
1598    return true;
1599 }
1600 
1601 static void
validate_ssa_dominance(nir_function_impl * impl,validate_state * state)1602 validate_ssa_dominance(nir_function_impl *impl, validate_state *state)
1603 {
1604    nir_metadata_require(impl, nir_metadata_dominance);
1605 
1606    nir_foreach_block(block, impl) {
1607       state->block = block;
1608       nir_foreach_instr(instr, block) {
1609          state->instr = instr;
1610          if (instr->type == nir_instr_type_phi) {
1611             nir_phi_instr *phi = nir_instr_as_phi(instr);
1612             nir_foreach_phi_src(src, phi) {
1613                validate_assert(state,
1614                                nir_block_dominates(src->src.ssa->parent_instr->block,
1615                                                    src->pred));
1616             }
1617          } else {
1618             nir_foreach_src(instr, validate_src_dominance, state);
1619          }
1620          nir_foreach_def(instr, validate_ssa_def_dominance, state);
1621       }
1622    }
1623 }
1624 
1625 static void
validate_function_impl(nir_function_impl * impl,validate_state * state)1626 validate_function_impl(nir_function_impl *impl, validate_state *state)
1627 {
1628    validate_assert(state, impl->function->impl == impl);
1629    validate_assert(state, impl->cf_node.parent == NULL);
1630 
1631    if (impl->preamble) {
1632       validate_assert(state, impl->function->is_entrypoint);
1633       validate_assert(state, impl->preamble->is_preamble);
1634    }
1635 
1636    validate_assert(state, exec_list_is_empty(&impl->end_block->instr_list));
1637    validate_assert(state, impl->end_block->successors[0] == NULL);
1638    validate_assert(state, impl->end_block->successors[1] == NULL);
1639 
1640    state->impl = impl;
1641    state->parent_node = &impl->cf_node;
1642 
1643    exec_list_validate(&impl->locals);
1644    nir_foreach_function_temp_variable(var, impl) {
1645       validate_var_decl(var, nir_var_function_temp, state);
1646    }
1647 
1648    state->ssa_defs_found = reralloc(state->mem_ctx, state->ssa_defs_found,
1649                                     BITSET_WORD, BITSET_WORDS(impl->ssa_alloc));
1650    memset(state->ssa_defs_found, 0, BITSET_WORDS(impl->ssa_alloc) * sizeof(BITSET_WORD));
1651 
1652    _mesa_set_clear(state->blocks, NULL);
1653    _mesa_set_resize(state->blocks, impl->num_blocks);
1654    if (impl->structured)
1655       collect_blocks(&impl->body, state);
1656    else
1657       collect_unstructured_blocks(impl, state);
1658    _mesa_set_add(state->blocks, impl->end_block);
1659    validate_assert(state, !exec_list_is_empty(&impl->body));
1660    foreach_list_typed(nir_cf_node, node, node, &impl->body) {
1661       validate_cf_node(node, state);
1662    }
1663    validate_end_block(impl->end_block, state);
1664 
1665    /* We must have seen every source by now. This also means that we've untagged
1666     * every source, so we have valid (unaugmented) NIR once again.
1667     */
1668    validate_assert(state, state->nr_tagged_srcs == 0);
1669 
1670    static int validate_dominance = -1;
1671    if (validate_dominance < 0) {
1672       validate_dominance =
1673          NIR_DEBUG(VALIDATE_SSA_DOMINANCE);
1674    }
1675    if (validate_dominance) {
1676       memset(state->ssa_defs_found, 0, BITSET_WORDS(impl->ssa_alloc) * sizeof(BITSET_WORD));
1677       validate_ssa_dominance(impl, state);
1678    }
1679 }
1680 
1681 static void
validate_function(nir_function * func,validate_state * state)1682 validate_function(nir_function *func, validate_state *state)
1683 {
1684    if (func->impl != NULL) {
1685       validate_assert(state, func->impl->function == func);
1686       validate_function_impl(func->impl, state);
1687    }
1688 }
1689 
1690 static void
init_validate_state(validate_state * state)1691 init_validate_state(validate_state *state)
1692 {
1693    state->mem_ctx = ralloc_context(NULL);
1694    state->ssa_defs_found = NULL;
1695    state->blocks = _mesa_pointer_set_create(state->mem_ctx);
1696    state->var_defs = _mesa_pointer_hash_table_create(state->mem_ctx);
1697    state->errors = _mesa_pointer_hash_table_create(state->mem_ctx);
1698    state->nr_tagged_srcs = 0;
1699 
1700    state->loop = NULL;
1701    state->in_loop_continue_construct = false;
1702    state->instr = NULL;
1703    state->var = NULL;
1704 }
1705 
1706 static void
destroy_validate_state(validate_state * state)1707 destroy_validate_state(validate_state *state)
1708 {
1709    ralloc_free(state->mem_ctx);
1710 }
1711 
1712 simple_mtx_t fail_dump_mutex = SIMPLE_MTX_INITIALIZER;
1713 
1714 static void
dump_errors(validate_state * state,const char * when)1715 dump_errors(validate_state *state, const char *when)
1716 {
1717    struct hash_table *errors = state->errors;
1718 
1719    /* Lock around dumping so that we get clean dumps in a multi-threaded
1720     * scenario
1721     */
1722    simple_mtx_lock(&fail_dump_mutex);
1723 
1724    if (when) {
1725       fprintf(stderr, "NIR validation failed %s\n", when);
1726       fprintf(stderr, "%d errors:\n", _mesa_hash_table_num_entries(errors));
1727    } else {
1728       fprintf(stderr, "NIR validation failed with %d errors:\n",
1729               _mesa_hash_table_num_entries(errors));
1730    }
1731 
1732    nir_print_shader_annotated(state->shader, stderr, errors);
1733 
1734    if (_mesa_hash_table_num_entries(errors) > 0) {
1735       fprintf(stderr, "%d additional errors:\n",
1736               _mesa_hash_table_num_entries(errors));
1737       hash_table_foreach(errors, entry) {
1738          fprintf(stderr, "%s\n", (char *)entry->data);
1739       }
1740    }
1741 
1742    simple_mtx_unlock(&fail_dump_mutex);
1743 
1744    abort();
1745 }
1746 
1747 void
nir_validate_shader(nir_shader * shader,const char * when)1748 nir_validate_shader(nir_shader *shader, const char *when)
1749 {
1750    if (NIR_DEBUG(NOVALIDATE))
1751       return;
1752 
1753    validate_state state;
1754    init_validate_state(&state);
1755 
1756    state.shader = shader;
1757 
1758    nir_variable_mode valid_modes =
1759       nir_var_shader_in |
1760       nir_var_shader_out |
1761       nir_var_shader_temp |
1762       nir_var_uniform |
1763       nir_var_mem_ubo |
1764       nir_var_system_value |
1765       nir_var_mem_ssbo |
1766       nir_var_mem_shared |
1767       nir_var_mem_global |
1768       nir_var_mem_push_const |
1769       nir_var_mem_constant |
1770       nir_var_image;
1771 
1772    if (gl_shader_stage_is_callable(shader->info.stage))
1773       valid_modes |= nir_var_shader_call_data;
1774 
1775    if (shader->info.stage == MESA_SHADER_ANY_HIT ||
1776        shader->info.stage == MESA_SHADER_CLOSEST_HIT ||
1777        shader->info.stage == MESA_SHADER_INTERSECTION)
1778       valid_modes |= nir_var_ray_hit_attrib;
1779 
1780    if (shader->info.stage == MESA_SHADER_TASK ||
1781        shader->info.stage == MESA_SHADER_MESH)
1782       valid_modes |= nir_var_mem_task_payload;
1783 
1784    if (shader->info.stage == MESA_SHADER_COMPUTE)
1785       valid_modes |= nir_var_mem_node_payload |
1786                      nir_var_mem_node_payload_in;
1787 
1788    exec_list_validate(&shader->variables);
1789    nir_foreach_variable_in_shader(var, shader)
1790       validate_var_decl(var, valid_modes, &state);
1791 
1792    exec_list_validate(&shader->functions);
1793    foreach_list_typed(nir_function, func, node, &shader->functions) {
1794       validate_function(func, &state);
1795    }
1796 
1797    if (shader->xfb_info != NULL) {
1798       /* At least validate that, if nir_shader::xfb_info exists, the shader
1799        * has real transform feedback going on.
1800        */
1801       validate_assert(&state, shader->info.stage == MESA_SHADER_VERTEX ||
1802                                  shader->info.stage == MESA_SHADER_TESS_EVAL ||
1803                                  shader->info.stage == MESA_SHADER_GEOMETRY);
1804       validate_assert(&state, shader->xfb_info->buffers_written != 0);
1805       validate_assert(&state, shader->xfb_info->streams_written != 0);
1806       validate_assert(&state, shader->xfb_info->output_count > 0);
1807    }
1808 
1809    if (_mesa_hash_table_num_entries(state.errors) > 0)
1810       dump_errors(&state, when);
1811 
1812    destroy_validate_state(&state);
1813 }
1814 
1815 void
nir_validate_ssa_dominance(nir_shader * shader,const char * when)1816 nir_validate_ssa_dominance(nir_shader *shader, const char *when)
1817 {
1818    if (NIR_DEBUG(NOVALIDATE))
1819       return;
1820 
1821    validate_state state;
1822    init_validate_state(&state);
1823 
1824    state.shader = shader;
1825 
1826    nir_foreach_function_impl(impl, shader) {
1827       state.ssa_defs_found = reralloc(state.mem_ctx, state.ssa_defs_found,
1828                                       BITSET_WORD,
1829                                       BITSET_WORDS(impl->ssa_alloc));
1830       memset(state.ssa_defs_found, 0, BITSET_WORDS(impl->ssa_alloc) * sizeof(BITSET_WORD));
1831 
1832       state.impl = impl;
1833       validate_ssa_dominance(impl, &state);
1834    }
1835 
1836    if (_mesa_hash_table_num_entries(state.errors) > 0)
1837       dump_errors(&state, when);
1838 
1839    destroy_validate_state(&state);
1840 }
1841 
1842 #endif /* NDEBUG */
1843