1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_deref.h"
26
27 #include "util/set.h"
28
29 static bool
src_is_invocation_id(const nir_src * src)30 src_is_invocation_id(const nir_src *src)
31 {
32 nir_scalar s = nir_scalar_resolved(src->ssa, 0);
33 return nir_scalar_is_intrinsic(s) &&
34 nir_scalar_intrinsic_op(s) == nir_intrinsic_load_invocation_id;
35 }
36
37 static bool
src_is_local_invocation_index(nir_shader * shader,const nir_src * src)38 src_is_local_invocation_index(nir_shader *shader, const nir_src *src)
39 {
40 assert(shader->info.stage == MESA_SHADER_MESH && !shader->info.workgroup_size_variable);
41
42 nir_scalar s = nir_scalar_resolved(src->ssa, 0);
43 if (!nir_scalar_is_intrinsic(s))
44 return false;
45
46 const nir_intrinsic_op op = nir_scalar_intrinsic_op(s);
47 if (op == nir_intrinsic_load_local_invocation_index)
48 return true;
49 if (op != nir_intrinsic_load_local_invocation_id)
50 return false;
51
52 unsigned nz_ids = 0;
53 for (unsigned i = 0; i < 3; i++)
54 nz_ids |= (shader->info.workgroup_size[i] > 1) ? (1u << i) : 0;
55
56 return nz_ids == 0 || (util_bitcount(nz_ids) == 1 && s.comp == ffs(nz_ids) - 1);
57 }
58
59 static void
get_deref_info(nir_shader * shader,nir_variable * var,nir_deref_instr * deref,bool * cross_invocation,bool * indirect)60 get_deref_info(nir_shader *shader, nir_variable *var, nir_deref_instr *deref,
61 bool *cross_invocation, bool *indirect)
62 {
63 *cross_invocation = false;
64 *indirect = false;
65
66 const bool is_arrayed = nir_is_arrayed_io(var, shader->info.stage);
67
68 nir_deref_path path;
69 nir_deref_path_init(&path, deref, NULL);
70 assert(path.path[0]->deref_type == nir_deref_type_var);
71 nir_deref_instr **p = &path.path[1];
72
73 /* Vertex index is the outermost array index. */
74 if (is_arrayed) {
75 assert((*p)->deref_type == nir_deref_type_array);
76 if (shader->info.stage == MESA_SHADER_TESS_CTRL)
77 *cross_invocation = !src_is_invocation_id(&(*p)->arr.index);
78 else if (shader->info.stage == MESA_SHADER_MESH)
79 *cross_invocation = !src_is_local_invocation_index(shader, &(*p)->arr.index);
80 p++;
81 }
82
83 /* We always lower indirect dereferences for "compact" array vars. */
84 if (!path.path[0]->var->data.compact) {
85 /* Non-compact array vars: find out if they are indirect. */
86 for (; *p; p++) {
87 if ((*p)->deref_type == nir_deref_type_array) {
88 *indirect |= !nir_src_is_const((*p)->arr.index);
89 } else if ((*p)->deref_type == nir_deref_type_struct) {
90 /* Struct indices are always constant. */
91 } else if ((*p)->deref_type == nir_deref_type_array_wildcard) {
92 /* Wilcards ref the whole array dimension and should get lowered
93 * to direct deref at a later point.
94 */
95 } else {
96 unreachable("Unsupported deref type");
97 }
98 }
99 }
100
101 nir_deref_path_finish(&path);
102 }
103
104 static void
set_io_mask(nir_shader * shader,nir_variable * var,int offset,int len,nir_deref_instr * deref,bool is_output_read)105 set_io_mask(nir_shader *shader, nir_variable *var, int offset, int len,
106 nir_deref_instr *deref, bool is_output_read)
107 {
108 for (int i = 0; i < len; i++) {
109 /* Varyings might not have been assigned values yet so abort. */
110 if (var->data.location == -1)
111 return;
112
113 int idx = var->data.location + offset + i;
114 bool is_patch_generic = var->data.patch &&
115 idx != VARYING_SLOT_TESS_LEVEL_INNER &&
116 idx != VARYING_SLOT_TESS_LEVEL_OUTER &&
117 idx != VARYING_SLOT_BOUNDING_BOX0 &&
118 idx != VARYING_SLOT_BOUNDING_BOX1;
119 uint64_t bitfield;
120
121 if (is_patch_generic) {
122 /* Varyings might still have temp locations so abort */
123 if (idx < VARYING_SLOT_PATCH0 || idx >= VARYING_SLOT_TESS_MAX)
124 return;
125
126 bitfield = BITFIELD64_BIT(idx - VARYING_SLOT_PATCH0);
127 } else {
128 /* Varyings might still have temp locations so abort */
129 if (idx >= VARYING_SLOT_MAX)
130 return;
131
132 bitfield = BITFIELD64_BIT(idx);
133 }
134
135 bool cross_invocation;
136 bool indirect;
137 get_deref_info(shader, var, deref, &cross_invocation, &indirect);
138
139 if (var->data.mode == nir_var_shader_in) {
140 if (is_patch_generic) {
141 shader->info.patch_inputs_read |= bitfield;
142 if (indirect)
143 shader->info.patch_inputs_read_indirectly |= bitfield;
144 } else {
145 shader->info.inputs_read |= bitfield;
146 if (indirect)
147 shader->info.inputs_read_indirectly |= bitfield;
148 }
149
150 if (cross_invocation && shader->info.stage == MESA_SHADER_TESS_CTRL)
151 shader->info.tess.tcs_cross_invocation_inputs_read |= bitfield;
152
153 if (shader->info.stage == MESA_SHADER_FRAGMENT) {
154 shader->info.fs.uses_sample_qualifier |= var->data.sample;
155 }
156 } else {
157 assert(var->data.mode == nir_var_shader_out);
158 if (is_output_read) {
159 if (is_patch_generic) {
160 shader->info.patch_outputs_read |= bitfield;
161 if (indirect)
162 shader->info.patch_outputs_accessed_indirectly |= bitfield;
163 } else {
164 shader->info.outputs_read |= bitfield;
165 if (indirect)
166 shader->info.outputs_accessed_indirectly |= bitfield;
167 }
168
169 if (cross_invocation && shader->info.stage == MESA_SHADER_TESS_CTRL)
170 shader->info.tess.tcs_cross_invocation_outputs_read |= bitfield;
171 } else {
172 if (is_patch_generic) {
173 shader->info.patch_outputs_written |= bitfield;
174 if (indirect)
175 shader->info.patch_outputs_accessed_indirectly |= bitfield;
176 } else if (!var->data.read_only) {
177 shader->info.outputs_written |= bitfield;
178 if (indirect)
179 shader->info.outputs_accessed_indirectly |= bitfield;
180 }
181 }
182
183 if (cross_invocation && shader->info.stage == MESA_SHADER_MESH)
184 shader->info.mesh.ms_cross_invocation_output_access |= bitfield;
185
186 if (var->data.fb_fetch_output) {
187 shader->info.outputs_read |= bitfield;
188 if (shader->info.stage == MESA_SHADER_FRAGMENT) {
189 shader->info.fs.uses_fbfetch_output = true;
190 shader->info.fs.fbfetch_coherent = var->data.access & ACCESS_COHERENT;
191 }
192 }
193
194 if (shader->info.stage == MESA_SHADER_FRAGMENT &&
195 !is_output_read && var->data.index == 1)
196 shader->info.fs.color_is_dual_source = true;
197 }
198 }
199 }
200
201 /**
202 * Mark an entire variable as used. Caller must ensure that the variable
203 * represents a shader input or output.
204 */
205 static void
mark_whole_variable(nir_shader * shader,nir_variable * var,nir_deref_instr * deref,bool is_output_read)206 mark_whole_variable(nir_shader *shader, nir_variable *var,
207 nir_deref_instr *deref, bool is_output_read)
208 {
209 const struct glsl_type *type = var->type;
210
211 if (nir_is_arrayed_io(var, shader->info.stage) ||
212 /* For NV_mesh_shader. */
213 (shader->info.stage == MESA_SHADER_MESH &&
214 var->data.location == VARYING_SLOT_PRIMITIVE_INDICES &&
215 !var->data.per_primitive)) {
216 assert(glsl_type_is_array(type));
217 type = glsl_get_array_element(type);
218 }
219
220 if (var->data.per_view) {
221 assert(glsl_type_is_array(type));
222 type = glsl_get_array_element(type);
223 }
224
225 const unsigned slots = nir_variable_count_slots(var, type);
226 set_io_mask(shader, var, 0, slots, deref, is_output_read);
227 }
228
229 static unsigned
get_io_offset(nir_deref_instr * deref,nir_variable * var,bool is_arrayed,bool skip_non_arrayed)230 get_io_offset(nir_deref_instr *deref, nir_variable *var, bool is_arrayed,
231 bool skip_non_arrayed)
232 {
233 if (var->data.compact) {
234 if (deref->deref_type == nir_deref_type_var) {
235 assert(glsl_type_is_array(var->type));
236 return 0;
237 }
238 assert(deref->deref_type == nir_deref_type_array);
239 return nir_src_is_const(deref->arr.index) ? (nir_src_as_uint(deref->arr.index) + var->data.location_frac) / 4u : (unsigned)-1;
240 }
241
242 unsigned offset = 0;
243
244 for (nir_deref_instr *d = deref; d; d = nir_deref_instr_parent(d)) {
245 if (d->deref_type == nir_deref_type_array) {
246 if (is_arrayed && nir_deref_instr_parent(d)->deref_type == nir_deref_type_var)
247 break;
248
249 if (!is_arrayed && skip_non_arrayed)
250 break;
251
252 if (!nir_src_is_const(d->arr.index))
253 return -1;
254
255 offset += glsl_count_attribute_slots(d->type, false) *
256 nir_src_as_uint(d->arr.index);
257 } else if (d->deref_type == nir_deref_type_struct) {
258 const struct glsl_type *parent_type = nir_deref_instr_parent(d)->type;
259 for (unsigned i = 0; i < d->strct.index; i++) {
260 const struct glsl_type *field_type = glsl_get_struct_field(parent_type, i);
261 offset += glsl_count_attribute_slots(field_type, false);
262 }
263 }
264 }
265
266 return offset;
267 }
268
269 /**
270 * Try to mark a portion of the given varying as used. Caller must ensure
271 * that the variable represents a shader input or output.
272 *
273 * If the index can't be interpreted as a constant, or some other problem
274 * occurs, then nothing will be marked and false will be returned.
275 */
276 static bool
try_mask_partial_io(nir_shader * shader,nir_variable * var,nir_deref_instr * deref,bool is_output_read)277 try_mask_partial_io(nir_shader *shader, nir_variable *var,
278 nir_deref_instr *deref, bool is_output_read)
279 {
280 const struct glsl_type *type = var->type;
281 bool is_arrayed = nir_is_arrayed_io(var, shader->info.stage);
282 bool skip_non_arrayed = shader->info.stage == MESA_SHADER_MESH;
283
284 if (is_arrayed) {
285 assert(glsl_type_is_array(type));
286 type = glsl_get_array_element(type);
287 }
288
289 /* Per view variables will be considered as a whole. */
290 if (var->data.per_view)
291 return false;
292
293 unsigned offset = get_io_offset(deref, var, is_arrayed, skip_non_arrayed);
294 if (offset == -1)
295 return false;
296
297 const unsigned slots = nir_variable_count_slots(var, type);
298 if (offset >= slots) {
299 /* Constant index outside the bounds of the matrix/array. This could
300 * arise as a result of constant folding of a legal GLSL program.
301 *
302 * Even though the spec says that indexing outside the bounds of a
303 * matrix/array results in undefined behaviour, we don't want to pass
304 * out-of-range values to set_io_mask() (since this could result in
305 * slots that don't exist being marked as used), so just let the caller
306 * mark the whole variable as used.
307 */
308 return false;
309 }
310
311 unsigned len = nir_deref_count_slots(deref, var);
312 set_io_mask(shader, var, offset, len, deref, is_output_read);
313 return true;
314 }
315
316 /** Returns true if the given intrinsic writes external memory
317 *
318 * Only returns true for writes to globally visible memory, not scratch and
319 * not shared.
320 */
321 bool
nir_intrinsic_writes_external_memory(const nir_intrinsic_instr * instr)322 nir_intrinsic_writes_external_memory(const nir_intrinsic_instr *instr)
323 {
324 switch (instr->intrinsic) {
325 case nir_intrinsic_atomic_counter_inc:
326 case nir_intrinsic_atomic_counter_inc_deref:
327 case nir_intrinsic_atomic_counter_add:
328 case nir_intrinsic_atomic_counter_add_deref:
329 case nir_intrinsic_atomic_counter_pre_dec:
330 case nir_intrinsic_atomic_counter_pre_dec_deref:
331 case nir_intrinsic_atomic_counter_post_dec:
332 case nir_intrinsic_atomic_counter_post_dec_deref:
333 case nir_intrinsic_atomic_counter_min:
334 case nir_intrinsic_atomic_counter_min_deref:
335 case nir_intrinsic_atomic_counter_max:
336 case nir_intrinsic_atomic_counter_max_deref:
337 case nir_intrinsic_atomic_counter_and:
338 case nir_intrinsic_atomic_counter_and_deref:
339 case nir_intrinsic_atomic_counter_or:
340 case nir_intrinsic_atomic_counter_or_deref:
341 case nir_intrinsic_atomic_counter_xor:
342 case nir_intrinsic_atomic_counter_xor_deref:
343 case nir_intrinsic_atomic_counter_exchange:
344 case nir_intrinsic_atomic_counter_exchange_deref:
345 case nir_intrinsic_atomic_counter_comp_swap:
346 case nir_intrinsic_atomic_counter_comp_swap_deref:
347 case nir_intrinsic_bindless_image_atomic:
348 case nir_intrinsic_bindless_image_atomic_swap:
349 case nir_intrinsic_bindless_image_store:
350 case nir_intrinsic_bindless_image_store_raw_intel:
351 case nir_intrinsic_global_atomic:
352 case nir_intrinsic_global_atomic_swap:
353 case nir_intrinsic_global_atomic_ir3:
354 case nir_intrinsic_global_atomic_swap_ir3:
355 case nir_intrinsic_image_atomic:
356 case nir_intrinsic_image_atomic_swap:
357 case nir_intrinsic_image_deref_atomic:
358 case nir_intrinsic_image_deref_atomic_swap:
359 case nir_intrinsic_image_deref_store:
360 case nir_intrinsic_image_deref_store_raw_intel:
361 case nir_intrinsic_image_store:
362 case nir_intrinsic_image_store_raw_intel:
363 case nir_intrinsic_ssbo_atomic:
364 case nir_intrinsic_ssbo_atomic_swap:
365 case nir_intrinsic_ssbo_atomic_ir3:
366 case nir_intrinsic_ssbo_atomic_swap_ir3:
367 case nir_intrinsic_store_global:
368 case nir_intrinsic_store_global_ir3:
369 case nir_intrinsic_store_global_amd:
370 case nir_intrinsic_store_ssbo:
371 case nir_intrinsic_store_ssbo_ir3:
372 return true;
373
374 case nir_intrinsic_store_deref:
375 case nir_intrinsic_deref_atomic:
376 case nir_intrinsic_deref_atomic_swap:
377 return nir_deref_mode_may_be(nir_src_as_deref(instr->src[0]),
378 nir_var_mem_ssbo | nir_var_mem_global);
379
380 default:
381 return false;
382 }
383 }
384
385 static bool
intrinsic_is_bindless(nir_intrinsic_instr * instr)386 intrinsic_is_bindless(nir_intrinsic_instr *instr)
387 {
388 switch (instr->intrinsic) {
389 case nir_intrinsic_bindless_image_atomic:
390 case nir_intrinsic_bindless_image_atomic_swap:
391 case nir_intrinsic_bindless_image_descriptor_amd:
392 case nir_intrinsic_bindless_image_format:
393 case nir_intrinsic_bindless_image_load:
394 case nir_intrinsic_bindless_image_load_raw_intel:
395 case nir_intrinsic_bindless_image_order:
396 case nir_intrinsic_bindless_image_samples:
397 case nir_intrinsic_bindless_image_samples_identical:
398 case nir_intrinsic_bindless_image_size:
399 case nir_intrinsic_bindless_image_sparse_load:
400 case nir_intrinsic_bindless_image_store:
401 case nir_intrinsic_bindless_image_store_raw_intel:
402 case nir_intrinsic_bindless_resource_ir3:
403 return true;
404 default:
405 break;
406 }
407 return false;
408 }
409
410 static void
gather_intrinsic_info(nir_intrinsic_instr * instr,nir_shader * shader,void * dead_ctx)411 gather_intrinsic_info(nir_intrinsic_instr *instr, nir_shader *shader,
412 void *dead_ctx)
413 {
414 uint64_t slot_mask = 0;
415 uint16_t slot_mask_16bit = 0;
416 bool is_patch_special = false;
417
418 if (nir_intrinsic_infos[instr->intrinsic].index_map[NIR_INTRINSIC_IO_SEMANTICS] > 0) {
419 nir_io_semantics semantics = nir_intrinsic_io_semantics(instr);
420
421 is_patch_special = semantics.location == VARYING_SLOT_TESS_LEVEL_INNER ||
422 semantics.location == VARYING_SLOT_TESS_LEVEL_OUTER ||
423 semantics.location == VARYING_SLOT_BOUNDING_BOX0 ||
424 semantics.location == VARYING_SLOT_BOUNDING_BOX1;
425
426 if (semantics.location >= VARYING_SLOT_PATCH0 &&
427 semantics.location <= VARYING_SLOT_PATCH31) {
428 /* Generic per-patch I/O. */
429 assert((shader->info.stage == MESA_SHADER_TESS_EVAL &&
430 instr->intrinsic == nir_intrinsic_load_input) ||
431 (shader->info.stage == MESA_SHADER_TESS_CTRL &&
432 (instr->intrinsic == nir_intrinsic_load_output ||
433 instr->intrinsic == nir_intrinsic_store_output)));
434
435 semantics.location -= VARYING_SLOT_PATCH0;
436 }
437
438 if (semantics.location >= VARYING_SLOT_VAR0_16BIT &&
439 semantics.location <= VARYING_SLOT_VAR15_16BIT) {
440 /* Convert num_slots from the units of half vectors to full vectors. */
441 unsigned num_slots = (semantics.num_slots + semantics.high_16bits + 1) / 2;
442 slot_mask_16bit =
443 BITFIELD_RANGE(semantics.location - VARYING_SLOT_VAR0_16BIT, num_slots);
444 } else {
445 slot_mask = BITFIELD64_RANGE(semantics.location, semantics.num_slots);
446 assert(util_bitcount64(slot_mask) == semantics.num_slots);
447 }
448 }
449
450 switch (instr->intrinsic) {
451 case nir_intrinsic_demote:
452 case nir_intrinsic_demote_if:
453 shader->info.fs.uses_demote = true;
454 FALLTHROUGH; /* quads with helper lanes only might be discarded entirely */
455 case nir_intrinsic_discard:
456 case nir_intrinsic_discard_if:
457 case nir_intrinsic_terminate:
458 case nir_intrinsic_terminate_if:
459 /* Freedreno uses discard_if() to end GS invocations that don't produce
460 * a vertex and RADV uses terminate() to end ray-tracing shaders,
461 * so only set uses_discard for fragment shaders.
462 */
463 if (shader->info.stage == MESA_SHADER_FRAGMENT)
464 shader->info.fs.uses_discard = true;
465 break;
466
467 case nir_intrinsic_interp_deref_at_centroid:
468 case nir_intrinsic_interp_deref_at_sample:
469 case nir_intrinsic_interp_deref_at_offset:
470 case nir_intrinsic_interp_deref_at_vertex:
471 case nir_intrinsic_load_deref:
472 case nir_intrinsic_store_deref:
473 case nir_intrinsic_copy_deref: {
474 nir_deref_instr *deref = nir_src_as_deref(instr->src[0]);
475 if (nir_deref_mode_is_one_of(deref, nir_var_shader_in |
476 nir_var_shader_out)) {
477 nir_variable *var = nir_deref_instr_get_variable(deref);
478 bool is_output_read = false;
479 if (var->data.mode == nir_var_shader_out &&
480 instr->intrinsic == nir_intrinsic_load_deref)
481 is_output_read = true;
482
483 if (!try_mask_partial_io(shader, var, deref, is_output_read))
484 mark_whole_variable(shader, var, deref, is_output_read);
485
486 /* We need to track which input_reads bits correspond to a
487 * dvec3/dvec4 input attribute */
488 if (shader->info.stage == MESA_SHADER_VERTEX &&
489 var->data.mode == nir_var_shader_in &&
490 glsl_type_is_dual_slot(glsl_without_array(var->type))) {
491 for (unsigned i = 0; i < glsl_count_attribute_slots(var->type, false); i++) {
492 int idx = var->data.location + i;
493 shader->info.vs.double_inputs |= BITFIELD64_BIT(idx);
494 }
495 }
496 }
497 if (nir_intrinsic_writes_external_memory(instr))
498 shader->info.writes_memory = true;
499 break;
500 }
501 case nir_intrinsic_image_deref_load: {
502 nir_deref_instr *deref = nir_src_as_deref(instr->src[0]);
503 nir_variable *var = nir_deref_instr_get_variable(deref);
504 enum glsl_sampler_dim dim = glsl_get_sampler_dim(glsl_without_array(var->type));
505 if (dim != GLSL_SAMPLER_DIM_SUBPASS &&
506 dim != GLSL_SAMPLER_DIM_SUBPASS_MS)
507 break;
508
509 var->data.fb_fetch_output = true;
510 shader->info.fs.uses_fbfetch_output = true;
511 break;
512 }
513
514 case nir_intrinsic_bindless_image_load: {
515 enum glsl_sampler_dim dim = nir_intrinsic_image_dim(instr);
516 if (dim != GLSL_SAMPLER_DIM_SUBPASS &&
517 dim != GLSL_SAMPLER_DIM_SUBPASS_MS)
518 break;
519 shader->info.fs.uses_fbfetch_output = true;
520 break;
521 }
522
523 case nir_intrinsic_load_input:
524 case nir_intrinsic_load_per_vertex_input:
525 case nir_intrinsic_load_input_vertex:
526 case nir_intrinsic_load_interpolated_input:
527 if (shader->info.stage == MESA_SHADER_TESS_EVAL &&
528 instr->intrinsic == nir_intrinsic_load_input &&
529 !is_patch_special) {
530 shader->info.patch_inputs_read |= slot_mask;
531 if (!nir_src_is_const(*nir_get_io_offset_src(instr)))
532 shader->info.patch_inputs_read_indirectly |= slot_mask;
533 } else {
534 shader->info.inputs_read |= slot_mask;
535 if (nir_intrinsic_io_semantics(instr).high_dvec2)
536 shader->info.dual_slot_inputs |= slot_mask;
537 shader->info.inputs_read_16bit |= slot_mask_16bit;
538 if (!nir_src_is_const(*nir_get_io_offset_src(instr))) {
539 shader->info.inputs_read_indirectly |= slot_mask;
540 shader->info.inputs_read_indirectly_16bit |= slot_mask_16bit;
541 }
542 }
543
544 if (shader->info.stage == MESA_SHADER_TESS_CTRL &&
545 instr->intrinsic == nir_intrinsic_load_per_vertex_input &&
546 !src_is_invocation_id(nir_get_io_arrayed_index_src(instr)))
547 shader->info.tess.tcs_cross_invocation_inputs_read |= slot_mask;
548 break;
549
550 case nir_intrinsic_load_output:
551 case nir_intrinsic_load_per_vertex_output:
552 case nir_intrinsic_load_per_primitive_output:
553 if (shader->info.stage == MESA_SHADER_TESS_CTRL &&
554 instr->intrinsic == nir_intrinsic_load_output &&
555 !is_patch_special) {
556 shader->info.patch_outputs_read |= slot_mask;
557 if (!nir_src_is_const(*nir_get_io_offset_src(instr)))
558 shader->info.patch_outputs_accessed_indirectly |= slot_mask;
559 } else {
560 shader->info.outputs_read |= slot_mask;
561 shader->info.outputs_read_16bit |= slot_mask_16bit;
562 if (!nir_src_is_const(*nir_get_io_offset_src(instr))) {
563 shader->info.outputs_accessed_indirectly |= slot_mask;
564 shader->info.outputs_accessed_indirectly_16bit |= slot_mask_16bit;
565 }
566 }
567
568 if (shader->info.stage == MESA_SHADER_TESS_CTRL &&
569 instr->intrinsic == nir_intrinsic_load_per_vertex_output &&
570 !src_is_invocation_id(nir_get_io_arrayed_index_src(instr)))
571 shader->info.tess.tcs_cross_invocation_outputs_read |= slot_mask;
572
573 /* NV_mesh_shader: mesh shaders can load their outputs. */
574 if (shader->info.stage == MESA_SHADER_MESH &&
575 (instr->intrinsic == nir_intrinsic_load_per_vertex_output ||
576 instr->intrinsic == nir_intrinsic_load_per_primitive_output) &&
577 !src_is_local_invocation_index(shader, nir_get_io_arrayed_index_src(instr)))
578 shader->info.mesh.ms_cross_invocation_output_access |= slot_mask;
579
580 if (shader->info.stage == MESA_SHADER_FRAGMENT &&
581 nir_intrinsic_io_semantics(instr).fb_fetch_output)
582 shader->info.fs.uses_fbfetch_output = true;
583 break;
584
585 case nir_intrinsic_store_output:
586 case nir_intrinsic_store_per_vertex_output:
587 case nir_intrinsic_store_per_primitive_output:
588 if (shader->info.stage == MESA_SHADER_TESS_CTRL &&
589 instr->intrinsic == nir_intrinsic_store_output &&
590 !is_patch_special) {
591 shader->info.patch_outputs_written |= slot_mask;
592 if (!nir_src_is_const(*nir_get_io_offset_src(instr)))
593 shader->info.patch_outputs_accessed_indirectly |= slot_mask;
594 } else {
595 shader->info.outputs_written |= slot_mask;
596 shader->info.outputs_written_16bit |= slot_mask_16bit;
597 if (!nir_src_is_const(*nir_get_io_offset_src(instr))) {
598 shader->info.outputs_accessed_indirectly |= slot_mask;
599 shader->info.outputs_accessed_indirectly_16bit |= slot_mask_16bit;
600 }
601 }
602
603 if (shader->info.stage == MESA_SHADER_MESH &&
604 (instr->intrinsic == nir_intrinsic_store_per_vertex_output ||
605 instr->intrinsic == nir_intrinsic_store_per_primitive_output) &&
606 !src_is_local_invocation_index(shader, nir_get_io_arrayed_index_src(instr)))
607 shader->info.mesh.ms_cross_invocation_output_access |= slot_mask;
608
609 if (shader->info.stage == MESA_SHADER_FRAGMENT &&
610 nir_intrinsic_io_semantics(instr).dual_source_blend_index)
611 shader->info.fs.color_is_dual_source = true;
612 break;
613
614 case nir_intrinsic_load_color0:
615 case nir_intrinsic_load_color1:
616 shader->info.inputs_read |=
617 BITFIELD64_BIT(VARYING_SLOT_COL0 << (instr->intrinsic == nir_intrinsic_load_color1));
618 FALLTHROUGH;
619 case nir_intrinsic_load_subgroup_size:
620 case nir_intrinsic_load_subgroup_invocation:
621 case nir_intrinsic_load_subgroup_eq_mask:
622 case nir_intrinsic_load_subgroup_ge_mask:
623 case nir_intrinsic_load_subgroup_gt_mask:
624 case nir_intrinsic_load_subgroup_le_mask:
625 case nir_intrinsic_load_subgroup_lt_mask:
626 case nir_intrinsic_load_num_subgroups:
627 case nir_intrinsic_load_subgroup_id:
628 case nir_intrinsic_load_vertex_id:
629 case nir_intrinsic_load_instance_id:
630 case nir_intrinsic_load_vertex_id_zero_base:
631 case nir_intrinsic_load_base_vertex:
632 case nir_intrinsic_load_first_vertex:
633 case nir_intrinsic_load_is_indexed_draw:
634 case nir_intrinsic_load_base_instance:
635 case nir_intrinsic_load_draw_id:
636 case nir_intrinsic_load_invocation_id:
637 case nir_intrinsic_load_frag_coord:
638 case nir_intrinsic_load_frag_shading_rate:
639 case nir_intrinsic_load_fully_covered:
640 case nir_intrinsic_load_point_coord:
641 case nir_intrinsic_load_line_coord:
642 case nir_intrinsic_load_front_face:
643 case nir_intrinsic_load_sample_id:
644 case nir_intrinsic_load_sample_pos:
645 case nir_intrinsic_load_sample_pos_or_center:
646 case nir_intrinsic_load_sample_mask_in:
647 case nir_intrinsic_load_helper_invocation:
648 case nir_intrinsic_load_tess_coord:
649 case nir_intrinsic_load_tess_coord_xy:
650 case nir_intrinsic_load_patch_vertices_in:
651 case nir_intrinsic_load_primitive_id:
652 case nir_intrinsic_load_tess_level_outer:
653 case nir_intrinsic_load_tess_level_inner:
654 case nir_intrinsic_load_tess_level_outer_default:
655 case nir_intrinsic_load_tess_level_inner_default:
656 case nir_intrinsic_load_local_invocation_id:
657 case nir_intrinsic_load_local_invocation_index:
658 case nir_intrinsic_load_global_invocation_id:
659 case nir_intrinsic_load_base_global_invocation_id:
660 case nir_intrinsic_load_global_invocation_index:
661 case nir_intrinsic_load_workgroup_id:
662 case nir_intrinsic_load_workgroup_index:
663 case nir_intrinsic_load_num_workgroups:
664 case nir_intrinsic_load_workgroup_size:
665 case nir_intrinsic_load_work_dim:
666 case nir_intrinsic_load_user_data_amd:
667 case nir_intrinsic_load_view_index:
668 case nir_intrinsic_load_barycentric_model:
669 case nir_intrinsic_load_ray_launch_id:
670 case nir_intrinsic_load_ray_launch_size:
671 case nir_intrinsic_load_ray_world_origin:
672 case nir_intrinsic_load_ray_world_direction:
673 case nir_intrinsic_load_ray_object_origin:
674 case nir_intrinsic_load_ray_object_direction:
675 case nir_intrinsic_load_ray_t_min:
676 case nir_intrinsic_load_ray_t_max:
677 case nir_intrinsic_load_ray_object_to_world:
678 case nir_intrinsic_load_ray_world_to_object:
679 case nir_intrinsic_load_ray_hit_kind:
680 case nir_intrinsic_load_ray_flags:
681 case nir_intrinsic_load_ray_geometry_index:
682 case nir_intrinsic_load_ray_instance_custom_index:
683 case nir_intrinsic_load_mesh_view_count:
684 case nir_intrinsic_load_gs_header_ir3:
685 case nir_intrinsic_load_tcs_header_ir3:
686 case nir_intrinsic_load_ray_triangle_vertex_positions:
687 case nir_intrinsic_load_layer_id:
688 BITSET_SET(shader->info.system_values_read,
689 nir_system_value_from_intrinsic(instr->intrinsic));
690 break;
691
692 case nir_intrinsic_load_barycentric_pixel:
693 if (nir_intrinsic_interp_mode(instr) == INTERP_MODE_SMOOTH ||
694 nir_intrinsic_interp_mode(instr) == INTERP_MODE_NONE) {
695 BITSET_SET(shader->info.system_values_read,
696 SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL);
697 } else if (nir_intrinsic_interp_mode(instr) == INTERP_MODE_NOPERSPECTIVE) {
698 BITSET_SET(shader->info.system_values_read,
699 SYSTEM_VALUE_BARYCENTRIC_LINEAR_PIXEL);
700 }
701 break;
702
703 case nir_intrinsic_load_barycentric_centroid:
704 if (nir_intrinsic_interp_mode(instr) == INTERP_MODE_SMOOTH ||
705 nir_intrinsic_interp_mode(instr) == INTERP_MODE_NONE) {
706 BITSET_SET(shader->info.system_values_read,
707 SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTROID);
708 } else if (nir_intrinsic_interp_mode(instr) == INTERP_MODE_NOPERSPECTIVE) {
709 BITSET_SET(shader->info.system_values_read,
710 SYSTEM_VALUE_BARYCENTRIC_LINEAR_CENTROID);
711 }
712 break;
713
714 case nir_intrinsic_load_barycentric_sample:
715 if (nir_intrinsic_interp_mode(instr) == INTERP_MODE_SMOOTH ||
716 nir_intrinsic_interp_mode(instr) == INTERP_MODE_NONE) {
717 BITSET_SET(shader->info.system_values_read,
718 SYSTEM_VALUE_BARYCENTRIC_PERSP_SAMPLE);
719 } else if (nir_intrinsic_interp_mode(instr) == INTERP_MODE_NOPERSPECTIVE) {
720 BITSET_SET(shader->info.system_values_read,
721 SYSTEM_VALUE_BARYCENTRIC_LINEAR_SAMPLE);
722 }
723 if (shader->info.stage == MESA_SHADER_FRAGMENT)
724 shader->info.fs.uses_sample_qualifier = true;
725 break;
726
727 case nir_intrinsic_load_barycentric_coord_pixel:
728 case nir_intrinsic_load_barycentric_coord_centroid:
729 case nir_intrinsic_load_barycentric_coord_sample:
730 case nir_intrinsic_load_barycentric_coord_at_offset:
731 case nir_intrinsic_load_barycentric_coord_at_sample:
732 if (nir_intrinsic_interp_mode(instr) == INTERP_MODE_SMOOTH ||
733 nir_intrinsic_interp_mode(instr) == INTERP_MODE_NONE) {
734 BITSET_SET(shader->info.system_values_read, SYSTEM_VALUE_BARYCENTRIC_PERSP_COORD);
735 } else if (nir_intrinsic_interp_mode(instr) == INTERP_MODE_NOPERSPECTIVE) {
736 BITSET_SET(shader->info.system_values_read, SYSTEM_VALUE_BARYCENTRIC_LINEAR_COORD);
737 }
738 break;
739
740 case nir_intrinsic_quad_vote_any:
741 case nir_intrinsic_quad_vote_all:
742 case nir_intrinsic_quad_broadcast:
743 case nir_intrinsic_quad_swap_horizontal:
744 case nir_intrinsic_quad_swap_vertical:
745 case nir_intrinsic_quad_swap_diagonal:
746 case nir_intrinsic_quad_swizzle_amd:
747 if (shader->info.stage == MESA_SHADER_FRAGMENT)
748 shader->info.fs.needs_quad_helper_invocations = true;
749 break;
750
751 case nir_intrinsic_vote_any:
752 case nir_intrinsic_vote_all:
753 case nir_intrinsic_vote_feq:
754 case nir_intrinsic_vote_ieq:
755 case nir_intrinsic_ballot:
756 case nir_intrinsic_first_invocation:
757 case nir_intrinsic_last_invocation:
758 case nir_intrinsic_read_invocation:
759 case nir_intrinsic_read_first_invocation:
760 case nir_intrinsic_elect:
761 case nir_intrinsic_reduce:
762 case nir_intrinsic_inclusive_scan:
763 case nir_intrinsic_exclusive_scan:
764 case nir_intrinsic_shuffle:
765 case nir_intrinsic_shuffle_xor:
766 case nir_intrinsic_shuffle_up:
767 case nir_intrinsic_shuffle_down:
768 case nir_intrinsic_rotate:
769 case nir_intrinsic_masked_swizzle_amd:
770 shader->info.uses_wide_subgroup_intrinsics = true;
771
772 if (shader->info.stage == MESA_SHADER_FRAGMENT &&
773 shader->info.fs.require_full_quads)
774 shader->info.fs.needs_quad_helper_invocations = true;
775 break;
776
777 case nir_intrinsic_end_primitive:
778 case nir_intrinsic_end_primitive_with_counter:
779 case nir_intrinsic_end_primitive_nv:
780 assert(shader->info.stage == MESA_SHADER_GEOMETRY);
781 shader->info.gs.uses_end_primitive = 1;
782 FALLTHROUGH;
783
784 case nir_intrinsic_emit_vertex:
785 case nir_intrinsic_emit_vertex_with_counter:
786 case nir_intrinsic_emit_vertex_nv:
787 shader->info.gs.active_stream_mask |= 1 << nir_intrinsic_stream_id(instr);
788
789 break;
790
791 case nir_intrinsic_barrier:
792 shader->info.uses_control_barrier |=
793 nir_intrinsic_execution_scope(instr) != SCOPE_NONE;
794
795 shader->info.uses_memory_barrier |=
796 nir_intrinsic_memory_scope(instr) != SCOPE_NONE;
797 break;
798
799 case nir_intrinsic_store_zs_agx:
800 shader->info.outputs_written |= BITFIELD64_BIT(FRAG_RESULT_DEPTH) |
801 BITFIELD64_BIT(FRAG_RESULT_STENCIL);
802 break;
803
804 case nir_intrinsic_sample_mask_agx:
805 shader->info.outputs_written |= BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK);
806 break;
807
808 case nir_intrinsic_discard_agx:
809 shader->info.fs.uses_discard = true;
810 break;
811
812 case nir_intrinsic_launch_mesh_workgroups:
813 case nir_intrinsic_launch_mesh_workgroups_with_payload_deref: {
814 for (unsigned i = 0; i < 3; ++i) {
815 nir_scalar dim = nir_scalar_resolved(instr->src[0].ssa, i);
816 if (nir_scalar_is_const(dim))
817 shader->info.mesh.ts_mesh_dispatch_dimensions[i] =
818 nir_scalar_as_uint(dim);
819 }
820 break;
821 }
822
823 default:
824 shader->info.uses_bindless |= intrinsic_is_bindless(instr);
825 if (nir_intrinsic_writes_external_memory(instr))
826 shader->info.writes_memory = true;
827
828 if (instr->intrinsic == nir_intrinsic_image_size ||
829 instr->intrinsic == nir_intrinsic_image_samples ||
830 instr->intrinsic == nir_intrinsic_image_deref_size ||
831 instr->intrinsic == nir_intrinsic_image_deref_samples ||
832 instr->intrinsic == nir_intrinsic_bindless_image_size ||
833 instr->intrinsic == nir_intrinsic_bindless_image_samples)
834 shader->info.uses_resource_info_query = true;
835 break;
836 }
837 }
838
839 static void
gather_tex_info(nir_tex_instr * instr,nir_shader * shader)840 gather_tex_info(nir_tex_instr *instr, nir_shader *shader)
841 {
842 if (shader->info.stage == MESA_SHADER_FRAGMENT &&
843 nir_tex_instr_has_implicit_derivative(instr))
844 shader->info.fs.needs_quad_helper_invocations = true;
845
846 if (nir_tex_instr_src_index(instr, nir_tex_src_texture_handle) != -1 ||
847 nir_tex_instr_src_index(instr, nir_tex_src_sampler_handle) != -1)
848 shader->info.uses_bindless = true;
849
850 switch (instr->op) {
851 case nir_texop_tg4:
852 shader->info.uses_texture_gather = true;
853 break;
854 case nir_texop_txs:
855 case nir_texop_query_levels:
856 case nir_texop_texture_samples:
857 shader->info.uses_resource_info_query = true;
858 break;
859 default:
860 break;
861 }
862 }
863
864 static void
gather_alu_info(nir_alu_instr * instr,nir_shader * shader)865 gather_alu_info(nir_alu_instr *instr, nir_shader *shader)
866 {
867 if (nir_op_is_derivative(instr->op) &&
868 shader->info.stage == MESA_SHADER_FRAGMENT) {
869
870 shader->info.fs.needs_quad_helper_invocations = true;
871 }
872
873 if (instr->op == nir_op_fddx || instr->op == nir_op_fddy)
874 shader->info.uses_fddx_fddy = true;
875
876 const nir_op_info *info = &nir_op_infos[instr->op];
877
878 for (unsigned i = 0; i < info->num_inputs; i++) {
879 if (nir_alu_type_get_base_type(info->input_types[i]) == nir_type_float)
880 shader->info.bit_sizes_float |= nir_src_bit_size(instr->src[i].src);
881 else
882 shader->info.bit_sizes_int |= nir_src_bit_size(instr->src[i].src);
883 }
884 if (nir_alu_type_get_base_type(info->output_type) == nir_type_float)
885 shader->info.bit_sizes_float |= instr->def.bit_size;
886 else
887 shader->info.bit_sizes_int |= instr->def.bit_size;
888 }
889
890 static void
gather_func_info(nir_function_impl * func,nir_shader * shader,struct set * visited_funcs,void * dead_ctx)891 gather_func_info(nir_function_impl *func, nir_shader *shader,
892 struct set *visited_funcs, void *dead_ctx)
893 {
894 if (_mesa_set_search(visited_funcs, func))
895 return;
896
897 _mesa_set_add(visited_funcs, func);
898
899 nir_foreach_block(block, func) {
900 nir_foreach_instr(instr, block) {
901 switch (instr->type) {
902 case nir_instr_type_alu:
903 gather_alu_info(nir_instr_as_alu(instr), shader);
904 break;
905 case nir_instr_type_intrinsic:
906 gather_intrinsic_info(nir_instr_as_intrinsic(instr), shader, dead_ctx);
907 break;
908 case nir_instr_type_tex:
909 gather_tex_info(nir_instr_as_tex(instr), shader);
910 break;
911 case nir_instr_type_call: {
912 nir_call_instr *call = nir_instr_as_call(instr);
913 nir_function_impl *impl = call->callee->impl;
914
915 assert(impl || !"nir_shader_gather_info only works with linked shaders");
916 gather_func_info(impl, shader, visited_funcs, dead_ctx);
917 break;
918 }
919 default:
920 break;
921 }
922 }
923 }
924 }
925
926 void
nir_shader_gather_info(nir_shader * shader,nir_function_impl * entrypoint)927 nir_shader_gather_info(nir_shader *shader, nir_function_impl *entrypoint)
928 {
929 shader->info.num_textures = 0;
930 shader->info.num_images = 0;
931 shader->info.bit_sizes_float = 0;
932 shader->info.bit_sizes_int = 0;
933 shader->info.uses_bindless = false;
934
935 nir_foreach_variable_with_modes(var, shader, nir_var_image | nir_var_uniform) {
936 if (var->data.bindless)
937 shader->info.uses_bindless = true;
938 /* Bindless textures and images don't use non-bindless slots.
939 * Interface blocks imply inputs, outputs, UBO, or SSBO, which can only
940 * mean bindless.
941 */
942 if (var->data.bindless || var->interface_type)
943 continue;
944
945 shader->info.num_textures += glsl_type_get_sampler_count(var->type) +
946 glsl_type_get_texture_count(var->type);
947 shader->info.num_images += glsl_type_get_image_count(var->type);
948 }
949
950 /* these types may not initially be marked bindless */
951 nir_foreach_variable_with_modes(var, shader, nir_var_shader_in | nir_var_shader_out) {
952 const struct glsl_type *type = glsl_without_array(var->type);
953 if (glsl_type_is_sampler(type) || glsl_type_is_image(type))
954 shader->info.uses_bindless = true;
955 }
956
957 shader->info.inputs_read = 0;
958 shader->info.dual_slot_inputs = 0;
959 shader->info.outputs_written = 0;
960 shader->info.outputs_read = 0;
961 shader->info.inputs_read_16bit = 0;
962 shader->info.outputs_written_16bit = 0;
963 shader->info.outputs_read_16bit = 0;
964 shader->info.inputs_read_indirectly_16bit = 0;
965 shader->info.outputs_accessed_indirectly_16bit = 0;
966 shader->info.patch_outputs_read = 0;
967 shader->info.patch_inputs_read = 0;
968 shader->info.patch_outputs_written = 0;
969 BITSET_ZERO(shader->info.system_values_read);
970 shader->info.inputs_read_indirectly = 0;
971 shader->info.outputs_accessed_indirectly = 0;
972 shader->info.patch_inputs_read_indirectly = 0;
973 shader->info.patch_outputs_accessed_indirectly = 0;
974
975 shader->info.uses_resource_info_query = false;
976
977 if (shader->info.stage == MESA_SHADER_VERTEX) {
978 shader->info.vs.double_inputs = 0;
979 }
980 if (shader->info.stage == MESA_SHADER_FRAGMENT) {
981 shader->info.fs.uses_sample_qualifier = false;
982 shader->info.fs.uses_discard = false;
983 shader->info.fs.uses_demote = false;
984 shader->info.fs.color_is_dual_source = false;
985 shader->info.fs.uses_fbfetch_output = false;
986 shader->info.fs.needs_quad_helper_invocations = false;
987 }
988 if (shader->info.stage == MESA_SHADER_TESS_CTRL) {
989 shader->info.tess.tcs_cross_invocation_inputs_read = 0;
990 shader->info.tess.tcs_cross_invocation_outputs_read = 0;
991 }
992 if (shader->info.stage == MESA_SHADER_MESH) {
993 shader->info.mesh.ms_cross_invocation_output_access = 0;
994 }
995 if (shader->info.stage == MESA_SHADER_TASK) {
996 shader->info.mesh.ts_mesh_dispatch_dimensions[0] = 0;
997 shader->info.mesh.ts_mesh_dispatch_dimensions[1] = 0;
998 shader->info.mesh.ts_mesh_dispatch_dimensions[2] = 0;
999 }
1000
1001 if (shader->info.stage != MESA_SHADER_FRAGMENT)
1002 shader->info.writes_memory = shader->info.has_transform_feedback_varyings;
1003
1004 void *dead_ctx = ralloc_context(NULL);
1005 struct set *visited_funcs = _mesa_pointer_set_create(dead_ctx);
1006 gather_func_info(entrypoint, shader, visited_funcs, dead_ctx);
1007 ralloc_free(dead_ctx);
1008
1009 shader->info.per_primitive_outputs = 0;
1010 shader->info.per_view_outputs = 0;
1011 nir_foreach_shader_out_variable(var, shader) {
1012 if (var->data.per_primitive) {
1013 assert(shader->info.stage == MESA_SHADER_MESH);
1014 assert(nir_is_arrayed_io(var, shader->info.stage));
1015 const unsigned slots =
1016 glsl_count_attribute_slots(glsl_get_array_element(var->type), false);
1017 shader->info.per_primitive_outputs |= BITFIELD64_RANGE(var->data.location, slots);
1018 }
1019 if (var->data.per_view) {
1020 const unsigned slots =
1021 glsl_count_attribute_slots(glsl_get_array_element(var->type), false);
1022 shader->info.per_view_outputs |= BITFIELD64_RANGE(var->data.location, slots);
1023 }
1024 }
1025
1026 shader->info.per_primitive_inputs = 0;
1027 if (shader->info.stage == MESA_SHADER_FRAGMENT) {
1028 nir_foreach_shader_in_variable(var, shader) {
1029 if (var->data.per_primitive) {
1030 const unsigned slots =
1031 glsl_count_attribute_slots(var->type, false);
1032 shader->info.per_primitive_inputs |= BITFIELD64_RANGE(var->data.location, slots);
1033 }
1034 }
1035 }
1036
1037 shader->info.ray_queries = 0;
1038 nir_foreach_variable_in_shader(var, shader) {
1039 if (!var->data.ray_query)
1040 continue;
1041
1042 shader->info.ray_queries += MAX2(glsl_get_aoa_size(var->type), 1);
1043 }
1044 nir_foreach_function_impl(impl, shader) {
1045 nir_foreach_function_temp_variable(var, impl) {
1046 if (!var->data.ray_query)
1047 continue;
1048
1049 shader->info.ray_queries += MAX2(glsl_get_aoa_size(var->type), 1);
1050 }
1051 }
1052 }
1053