1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26 #include "nir_deref.h"
27
28 #include "util/bitscan.h"
29 #include "util/u_dynarray.h"
30
31 static const bool debug = false;
32
33 /**
34 * Variable-based copy propagation
35 *
36 * Normally, NIR trusts in SSA form for most of its copy-propagation needs.
37 * However, there are cases, especially when dealing with indirects, where SSA
38 * won't help you. This pass is for those times. Specifically, it handles
39 * the following things that the rest of NIR can't:
40 *
41 * 1) Copy-propagation on variables that have indirect access. This includes
42 * propagating from indirect stores into indirect loads.
43 *
44 * 2) Removal of redundant load_deref intrinsics. We can't trust regular CSE
45 * to do this because it isn't aware of variable writes that may alias the
46 * value and make the former load invalid.
47 *
48 * This pass uses an intermediate solution between being local / "per-block"
49 * and a complete data-flow analysis. It follows the control flow graph, and
50 * propagate the available copy information forward, invalidating data at each
51 * cf_node.
52 *
53 * Removal of dead writes to variables is handled by another pass.
54 */
55
56 struct vars_written {
57 nir_variable_mode modes;
58
59 /* Key is deref and value is the uintptr_t with the write mask. */
60 struct hash_table *derefs;
61 };
62
63 struct value {
64 bool is_ssa;
65 union {
66 struct {
67 nir_ssa_def *def[NIR_MAX_VEC_COMPONENTS];
68 uint8_t component[NIR_MAX_VEC_COMPONENTS];
69 } ssa;
70 nir_deref_and_path deref;
71 };
72 };
73
74 static void
value_set_ssa_components(struct value * value,nir_ssa_def * def,unsigned num_components)75 value_set_ssa_components(struct value *value, nir_ssa_def *def,
76 unsigned num_components)
77 {
78 if (!value->is_ssa)
79 memset(&value->ssa, 0, sizeof(value->ssa));
80 value->is_ssa = true;
81 for (unsigned i = 0; i < num_components; i++) {
82 value->ssa.def[i] = def;
83 value->ssa.component[i] = i;
84 }
85 }
86
87 struct copy_entry {
88 struct value src;
89
90 nir_deref_and_path dst;
91 };
92
93 struct copy_prop_var_state {
94 nir_function_impl *impl;
95
96 void *mem_ctx;
97 void *lin_ctx;
98
99 /* Maps nodes to vars_written. Used to invalidate copy entries when
100 * visiting each node.
101 */
102 struct hash_table *vars_written_map;
103
104 bool progress;
105 };
106
107 static bool
value_equals_store_src(struct value * value,nir_intrinsic_instr * intrin)108 value_equals_store_src(struct value *value, nir_intrinsic_instr *intrin)
109 {
110 assert(intrin->intrinsic == nir_intrinsic_store_deref);
111 nir_component_mask_t write_mask = nir_intrinsic_write_mask(intrin);
112
113 for (unsigned i = 0; i < intrin->num_components; i++) {
114 if ((write_mask & (1 << i)) &&
115 (value->ssa.def[i] != intrin->src[1].ssa ||
116 value->ssa.component[i] != i))
117 return false;
118 }
119
120 return true;
121 }
122
123 static struct vars_written *
create_vars_written(struct copy_prop_var_state * state)124 create_vars_written(struct copy_prop_var_state *state)
125 {
126 struct vars_written *written =
127 linear_zalloc_child(state->lin_ctx, sizeof(struct vars_written));
128 written->derefs = _mesa_pointer_hash_table_create(state->mem_ctx);
129 return written;
130 }
131
132 static void
gather_vars_written(struct copy_prop_var_state * state,struct vars_written * written,nir_cf_node * cf_node)133 gather_vars_written(struct copy_prop_var_state *state,
134 struct vars_written *written,
135 nir_cf_node *cf_node)
136 {
137 struct vars_written *new_written = NULL;
138
139 switch (cf_node->type) {
140 case nir_cf_node_function: {
141 nir_function_impl *impl = nir_cf_node_as_function(cf_node);
142 foreach_list_typed_safe(nir_cf_node, cf_node, node, &impl->body)
143 gather_vars_written(state, NULL, cf_node);
144 break;
145 }
146
147 case nir_cf_node_block: {
148 if (!written)
149 break;
150
151 nir_block *block = nir_cf_node_as_block(cf_node);
152 nir_foreach_instr(instr, block) {
153 if (instr->type == nir_instr_type_call) {
154 written->modes |= nir_var_shader_out |
155 nir_var_shader_temp |
156 nir_var_function_temp |
157 nir_var_mem_ssbo |
158 nir_var_mem_shared |
159 nir_var_mem_global;
160 continue;
161 }
162
163 if (instr->type != nir_instr_type_intrinsic)
164 continue;
165
166 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
167 switch (intrin->intrinsic) {
168 case nir_intrinsic_control_barrier:
169 case nir_intrinsic_group_memory_barrier:
170 case nir_intrinsic_memory_barrier:
171 written->modes |= nir_var_shader_out |
172 nir_var_mem_ssbo |
173 nir_var_mem_shared |
174 nir_var_mem_global;
175 break;
176
177 case nir_intrinsic_scoped_barrier:
178 if (nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_ACQUIRE)
179 written->modes |= nir_intrinsic_memory_modes(intrin);
180 break;
181
182 case nir_intrinsic_emit_vertex:
183 case nir_intrinsic_emit_vertex_with_counter:
184 written->modes = nir_var_shader_out;
185 break;
186
187 case nir_intrinsic_trace_ray:
188 case nir_intrinsic_execute_callable:
189 case nir_intrinsic_rt_trace_ray:
190 case nir_intrinsic_rt_execute_callable: {
191 nir_deref_instr *payload =
192 nir_src_as_deref(*nir_get_shader_call_payload_src(intrin));
193
194 nir_component_mask_t mask = (1 << glsl_get_vector_elements(payload->type)) - 1;
195
196 struct hash_entry *ht_entry =
197 _mesa_hash_table_search(written->derefs, payload);
198 if (ht_entry) {
199 ht_entry->data = (void *)(mask | (uintptr_t)ht_entry->data);
200 } else {
201 _mesa_hash_table_insert(written->derefs, payload,
202 (void *)(uintptr_t)mask);
203 }
204 break;
205 }
206
207 case nir_intrinsic_report_ray_intersection:
208 written->modes |= nir_var_mem_ssbo |
209 nir_var_mem_global |
210 nir_var_shader_call_data |
211 nir_var_ray_hit_attrib;
212 break;
213
214 case nir_intrinsic_ignore_ray_intersection:
215 case nir_intrinsic_terminate_ray:
216 written->modes |= nir_var_mem_ssbo |
217 nir_var_mem_global |
218 nir_var_shader_call_data;
219 break;
220
221 case nir_intrinsic_deref_atomic_add:
222 case nir_intrinsic_deref_atomic_fadd:
223 case nir_intrinsic_deref_atomic_imin:
224 case nir_intrinsic_deref_atomic_umin:
225 case nir_intrinsic_deref_atomic_fmin:
226 case nir_intrinsic_deref_atomic_imax:
227 case nir_intrinsic_deref_atomic_umax:
228 case nir_intrinsic_deref_atomic_fmax:
229 case nir_intrinsic_deref_atomic_and:
230 case nir_intrinsic_deref_atomic_or:
231 case nir_intrinsic_deref_atomic_xor:
232 case nir_intrinsic_deref_atomic_exchange:
233 case nir_intrinsic_deref_atomic_comp_swap:
234 case nir_intrinsic_deref_atomic_fcomp_swap:
235 case nir_intrinsic_store_deref:
236 case nir_intrinsic_copy_deref:
237 case nir_intrinsic_memcpy_deref: {
238 /* Destination in all of store_deref, copy_deref and the atomics is src[0]. */
239 nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
240
241 uintptr_t mask = intrin->intrinsic == nir_intrinsic_store_deref ?
242 nir_intrinsic_write_mask(intrin) : (1 << glsl_get_vector_elements(dst->type)) - 1;
243
244 struct hash_entry *ht_entry = _mesa_hash_table_search(written->derefs, dst);
245 if (ht_entry)
246 ht_entry->data = (void *)(mask | (uintptr_t)ht_entry->data);
247 else
248 _mesa_hash_table_insert(written->derefs, dst, (void *)mask);
249
250 break;
251 }
252
253 default:
254 break;
255 }
256 }
257
258 break;
259 }
260
261 case nir_cf_node_if: {
262 nir_if *if_stmt = nir_cf_node_as_if(cf_node);
263
264 new_written = create_vars_written(state);
265
266 foreach_list_typed_safe(nir_cf_node, cf_node, node, &if_stmt->then_list)
267 gather_vars_written(state, new_written, cf_node);
268
269 foreach_list_typed_safe(nir_cf_node, cf_node, node, &if_stmt->else_list)
270 gather_vars_written(state, new_written, cf_node);
271
272 break;
273 }
274
275 case nir_cf_node_loop: {
276 nir_loop *loop = nir_cf_node_as_loop(cf_node);
277
278 new_written = create_vars_written(state);
279
280 foreach_list_typed_safe(nir_cf_node, cf_node, node, &loop->body)
281 gather_vars_written(state, new_written, cf_node);
282
283 break;
284 }
285
286 default:
287 unreachable("Invalid CF node type");
288 }
289
290 if (new_written) {
291 /* Merge new information to the parent control flow node. */
292 if (written) {
293 written->modes |= new_written->modes;
294 hash_table_foreach(new_written->derefs, new_entry) {
295 struct hash_entry *old_entry =
296 _mesa_hash_table_search_pre_hashed(written->derefs, new_entry->hash,
297 new_entry->key);
298 if (old_entry) {
299 nir_component_mask_t merged = (uintptr_t) new_entry->data |
300 (uintptr_t) old_entry->data;
301 old_entry->data = (void *) ((uintptr_t) merged);
302 } else {
303 _mesa_hash_table_insert_pre_hashed(written->derefs, new_entry->hash,
304 new_entry->key, new_entry->data);
305 }
306 }
307 }
308 _mesa_hash_table_insert(state->vars_written_map, cf_node, new_written);
309 }
310 }
311
312 static struct copy_entry *
copy_entry_create(struct util_dynarray * copies,nir_deref_and_path * deref)313 copy_entry_create(struct util_dynarray *copies,
314 nir_deref_and_path *deref)
315 {
316 struct copy_entry new_entry = {
317 .dst = *deref,
318 };
319 util_dynarray_append(copies, struct copy_entry, new_entry);
320 return util_dynarray_top_ptr(copies, struct copy_entry);
321 }
322
323 /* Remove copy entry by swapping it with the last element and reducing the
324 * size. If used inside an iteration on copies, it must be a reverse
325 * (backwards) iteration. It is safe to use in those cases because the swap
326 * will not affect the rest of the iteration.
327 */
328 static void
copy_entry_remove(struct util_dynarray * copies,struct copy_entry * entry)329 copy_entry_remove(struct util_dynarray *copies,
330 struct copy_entry *entry)
331 {
332 const struct copy_entry *src =
333 util_dynarray_pop_ptr(copies, struct copy_entry);
334 if (src != entry)
335 *entry = *src;
336 }
337
338 static bool
is_array_deref_of_vector(const nir_deref_and_path * deref)339 is_array_deref_of_vector(const nir_deref_and_path *deref)
340 {
341 if (deref->instr->deref_type != nir_deref_type_array)
342 return false;
343 nir_deref_instr *parent = nir_deref_instr_parent(deref->instr);
344 return glsl_type_is_vector(parent->type);
345 }
346
347 static struct copy_entry *
lookup_entry_for_deref(struct copy_prop_var_state * state,struct util_dynarray * copies,nir_deref_and_path * deref,nir_deref_compare_result allowed_comparisons,bool * equal)348 lookup_entry_for_deref(struct copy_prop_var_state *state,
349 struct util_dynarray *copies,
350 nir_deref_and_path *deref,
351 nir_deref_compare_result allowed_comparisons,
352 bool *equal)
353 {
354 struct copy_entry *entry = NULL;
355 util_dynarray_foreach(copies, struct copy_entry, iter) {
356 nir_deref_compare_result result =
357 nir_compare_derefs_and_paths(state->mem_ctx, &iter->dst, deref);
358 if (result & allowed_comparisons) {
359 entry = iter;
360 if (result & nir_derefs_equal_bit) {
361 if (equal != NULL)
362 *equal = true;
363 break;
364 }
365 /* Keep looking in case we have an equal match later in the array. */
366 }
367 }
368
369 return entry;
370 }
371
372 static struct copy_entry *
lookup_entry_and_kill_aliases(struct copy_prop_var_state * state,struct util_dynarray * copies,nir_deref_and_path * deref,unsigned write_mask)373 lookup_entry_and_kill_aliases(struct copy_prop_var_state *state,
374 struct util_dynarray *copies,
375 nir_deref_and_path *deref,
376 unsigned write_mask)
377 {
378 /* TODO: Take into account the write_mask. */
379
380 nir_deref_instr *dst_match = NULL;
381 util_dynarray_foreach_reverse(copies, struct copy_entry, iter) {
382 if (!iter->src.is_ssa) {
383 /* If this write aliases the source of some entry, get rid of it */
384 nir_deref_compare_result result =
385 nir_compare_derefs_and_paths(state->mem_ctx, &iter->src.deref, deref);
386 if (result & nir_derefs_may_alias_bit) {
387 copy_entry_remove(copies, iter);
388 continue;
389 }
390 }
391
392 nir_deref_compare_result comp =
393 nir_compare_derefs_and_paths(state->mem_ctx, &iter->dst, deref);
394
395 if (comp & nir_derefs_equal_bit) {
396 /* Removing entries invalidate previous iter pointers, so we'll
397 * collect the matching entry later. Just make sure it is unique.
398 */
399 assert(!dst_match);
400 dst_match = iter->dst.instr;
401 } else if (comp & nir_derefs_may_alias_bit) {
402 copy_entry_remove(copies, iter);
403 }
404 }
405
406 struct copy_entry *entry = NULL;
407 if (dst_match) {
408 util_dynarray_foreach(copies, struct copy_entry, iter) {
409 if (iter->dst.instr == dst_match) {
410 entry = iter;
411 break;
412 }
413 }
414 assert(entry);
415 }
416 return entry;
417 }
418
419 static void
kill_aliases(struct copy_prop_var_state * state,struct util_dynarray * copies,nir_deref_and_path * deref,unsigned write_mask)420 kill_aliases(struct copy_prop_var_state *state,
421 struct util_dynarray *copies,
422 nir_deref_and_path *deref,
423 unsigned write_mask)
424 {
425 /* TODO: Take into account the write_mask. */
426
427 struct copy_entry *entry =
428 lookup_entry_and_kill_aliases(state, copies, deref, write_mask);
429 if (entry)
430 copy_entry_remove(copies, entry);
431 }
432
433 static struct copy_entry *
get_entry_and_kill_aliases(struct copy_prop_var_state * state,struct util_dynarray * copies,nir_deref_and_path * deref,unsigned write_mask)434 get_entry_and_kill_aliases(struct copy_prop_var_state *state,
435 struct util_dynarray *copies,
436 nir_deref_and_path *deref,
437 unsigned write_mask)
438 {
439 /* TODO: Take into account the write_mask. */
440
441 struct copy_entry *entry =
442 lookup_entry_and_kill_aliases(state, copies, deref, write_mask);
443
444 if (entry == NULL)
445 entry = copy_entry_create(copies, deref);
446
447 return entry;
448 }
449
450 static void
apply_barrier_for_modes(struct util_dynarray * copies,nir_variable_mode modes)451 apply_barrier_for_modes(struct util_dynarray *copies,
452 nir_variable_mode modes)
453 {
454 util_dynarray_foreach_reverse(copies, struct copy_entry, iter) {
455 if (nir_deref_mode_may_be(iter->dst.instr, modes) ||
456 (!iter->src.is_ssa && nir_deref_mode_may_be(iter->src.deref.instr, modes)))
457 copy_entry_remove(copies, iter);
458 }
459 }
460
461 static void
value_set_from_value(struct value * value,const struct value * from,unsigned base_index,unsigned write_mask)462 value_set_from_value(struct value *value, const struct value *from,
463 unsigned base_index, unsigned write_mask)
464 {
465 /* We can't have non-zero indexes with non-trivial write masks */
466 assert(base_index == 0 || write_mask == 1);
467
468 if (from->is_ssa) {
469 /* Clear value if it was being used as non-SSA. */
470 if (!value->is_ssa)
471 memset(&value->ssa, 0, sizeof(value->ssa));
472 value->is_ssa = true;
473 /* Only overwrite the written components */
474 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
475 if (write_mask & (1 << i)) {
476 value->ssa.def[base_index + i] = from->ssa.def[i];
477 value->ssa.component[base_index + i] = from->ssa.component[i];
478 }
479 }
480 } else {
481 /* Non-ssa stores always write everything */
482 value->is_ssa = false;
483 value->deref = from->deref;
484 }
485 }
486
487 /* Try to load a single element of a vector from the copy_entry. If the data
488 * isn't available, just let the original intrinsic do the work.
489 */
490 static bool
load_element_from_ssa_entry_value(struct copy_prop_var_state * state,struct copy_entry * entry,nir_builder * b,nir_intrinsic_instr * intrin,struct value * value,unsigned index)491 load_element_from_ssa_entry_value(struct copy_prop_var_state *state,
492 struct copy_entry *entry,
493 nir_builder *b, nir_intrinsic_instr *intrin,
494 struct value *value, unsigned index)
495 {
496 assert(index < glsl_get_vector_elements(entry->dst.instr->type));
497
498 /* We don't have the element available, so let the instruction do the work. */
499 if (!entry->src.ssa.def[index])
500 return false;
501
502 b->cursor = nir_instr_remove(&intrin->instr);
503 intrin->instr.block = NULL;
504
505 assert(entry->src.ssa.component[index] <
506 entry->src.ssa.def[index]->num_components);
507 nir_ssa_def *def = nir_channel(b, entry->src.ssa.def[index],
508 entry->src.ssa.component[index]);
509
510 *value = (struct value) {
511 .is_ssa = true,
512 {
513 .ssa = {
514 .def = { def },
515 .component = { 0 },
516 },
517 }
518 };
519
520 return true;
521 }
522
523 /* Do a "load" from an SSA-based entry return it in "value" as a value with a
524 * single SSA def. Because an entry could reference multiple different SSA
525 * defs, a vecN operation may be inserted to combine them into a single SSA
526 * def before handing it back to the caller. If the load instruction is no
527 * longer needed, it is removed and nir_instr::block is set to NULL. (It is
528 * possible, in some cases, for the load to be used in the vecN operation in
529 * which case it isn't deleted.)
530 */
531 static bool
load_from_ssa_entry_value(struct copy_prop_var_state * state,struct copy_entry * entry,nir_builder * b,nir_intrinsic_instr * intrin,nir_deref_and_path * src,struct value * value)532 load_from_ssa_entry_value(struct copy_prop_var_state *state,
533 struct copy_entry *entry,
534 nir_builder *b, nir_intrinsic_instr *intrin,
535 nir_deref_and_path *src, struct value *value)
536 {
537 if (is_array_deref_of_vector(src)) {
538 if (nir_src_is_const(src->instr->arr.index)) {
539 unsigned index = nir_src_as_uint(src->instr->arr.index);
540 return load_element_from_ssa_entry_value(state, entry, b, intrin,
541 value, index);
542 }
543
544 /* An SSA copy_entry for the vector won't help indirect load. */
545 if (glsl_type_is_vector(entry->dst.instr->type)) {
546 assert(entry->dst.instr->type == nir_deref_instr_parent(src->instr)->type);
547 /* TODO: If all SSA entries are there, try an if-ladder. */
548 return false;
549 }
550 }
551
552 *value = entry->src;
553 assert(value->is_ssa);
554
555 const struct glsl_type *type = entry->dst.instr->type;
556 unsigned num_components = glsl_get_vector_elements(type);
557
558 nir_component_mask_t available = 0;
559 bool all_same = true;
560 for (unsigned i = 0; i < num_components; i++) {
561 if (value->ssa.def[i])
562 available |= (1 << i);
563
564 if (value->ssa.def[i] != value->ssa.def[0])
565 all_same = false;
566
567 if (value->ssa.component[i] != i)
568 all_same = false;
569 }
570
571 if (all_same) {
572 /* Our work here is done */
573 b->cursor = nir_instr_remove(&intrin->instr);
574 intrin->instr.block = NULL;
575 return true;
576 }
577
578 if (available != (1 << num_components) - 1 &&
579 intrin->intrinsic == nir_intrinsic_load_deref &&
580 (available & nir_ssa_def_components_read(&intrin->dest.ssa)) == 0) {
581 /* If none of the components read are available as SSA values, then we
582 * should just bail. Otherwise, we would end up replacing the uses of
583 * the load_deref a vecN() that just gathers up its components.
584 */
585 return false;
586 }
587
588 b->cursor = nir_after_instr(&intrin->instr);
589
590 nir_ssa_def *load_def =
591 intrin->intrinsic == nir_intrinsic_load_deref ? &intrin->dest.ssa : NULL;
592
593 bool keep_intrin = false;
594 nir_ssa_scalar comps[NIR_MAX_VEC_COMPONENTS];
595 for (unsigned i = 0; i < num_components; i++) {
596 if (value->ssa.def[i]) {
597 comps[i] = nir_get_ssa_scalar(value->ssa.def[i], value->ssa.component[i]);
598 } else {
599 /* We don't have anything for this component in our
600 * list. Just re-use a channel from the load.
601 */
602 if (load_def == NULL)
603 load_def = nir_load_deref(b, entry->dst.instr);
604
605 if (load_def->parent_instr == &intrin->instr)
606 keep_intrin = true;
607
608 comps[i] = nir_get_ssa_scalar(load_def, i);
609 }
610 }
611
612 nir_ssa_def *vec = nir_vec_scalars(b, comps, num_components);
613 value_set_ssa_components(value, vec, num_components);
614
615 if (!keep_intrin) {
616 /* Removing this instruction should not touch the cursor because we
617 * created the cursor after the intrinsic and have added at least one
618 * instruction (the vec) since then.
619 */
620 assert(b->cursor.instr != &intrin->instr);
621 nir_instr_remove(&intrin->instr);
622 intrin->instr.block = NULL;
623 }
624
625 return true;
626 }
627
628 /**
629 * Specialize the wildcards in a deref chain
630 *
631 * This function returns a deref chain identical to \param deref except that
632 * some of its wildcards are replaced with indices from \param specific. The
633 * process is guided by \param guide which references the same type as \param
634 * specific but has the same wildcard array lengths as \param deref.
635 */
636 static nir_deref_instr *
specialize_wildcards(nir_builder * b,nir_deref_path * deref,nir_deref_path * guide,nir_deref_path * specific)637 specialize_wildcards(nir_builder *b,
638 nir_deref_path *deref,
639 nir_deref_path *guide,
640 nir_deref_path *specific)
641 {
642 nir_deref_instr **deref_p = &deref->path[1];
643 nir_deref_instr **guide_p = &guide->path[1];
644 nir_deref_instr **spec_p = &specific->path[1];
645 nir_deref_instr *ret_tail = deref->path[0];
646 for (; *deref_p; deref_p++) {
647 if ((*deref_p)->deref_type == nir_deref_type_array_wildcard) {
648 /* This is where things get tricky. We have to search through
649 * the entry deref to find its corresponding wildcard and fill
650 * this slot in with the value from the src.
651 */
652 while (*guide_p &&
653 (*guide_p)->deref_type != nir_deref_type_array_wildcard) {
654 guide_p++;
655 spec_p++;
656 }
657 assert(*guide_p && *spec_p);
658
659 ret_tail = nir_build_deref_follower(b, ret_tail, *spec_p);
660
661 guide_p++;
662 spec_p++;
663 } else {
664 ret_tail = nir_build_deref_follower(b, ret_tail, *deref_p);
665 }
666 }
667
668 return ret_tail;
669 }
670
671 /* Do a "load" from an deref-based entry return it in "value" as a value. The
672 * deref returned in "value" will always be a fresh copy so the caller can
673 * steal it and assign it to the instruction directly without copying it
674 * again.
675 */
676 static bool
load_from_deref_entry_value(struct copy_prop_var_state * state,struct copy_entry * entry,nir_builder * b,nir_intrinsic_instr * intrin,nir_deref_and_path * src,struct value * value)677 load_from_deref_entry_value(struct copy_prop_var_state *state,
678 struct copy_entry *entry,
679 nir_builder *b, nir_intrinsic_instr *intrin,
680 nir_deref_and_path *src, struct value *value)
681 {
682 *value = entry->src;
683
684 b->cursor = nir_instr_remove(&intrin->instr);
685
686 nir_deref_path *entry_dst_path = nir_get_deref_path(state->mem_ctx, &entry->dst);
687 nir_deref_path *src_path = nir_get_deref_path(state->mem_ctx, src);
688
689 bool need_to_specialize_wildcards = false;
690 nir_deref_instr **entry_p = &entry_dst_path->path[1];
691 nir_deref_instr **src_p = &src_path->path[1];
692 while (*entry_p && *src_p) {
693 nir_deref_instr *entry_tail = *entry_p++;
694 nir_deref_instr *src_tail = *src_p++;
695
696 if (src_tail->deref_type == nir_deref_type_array &&
697 entry_tail->deref_type == nir_deref_type_array_wildcard)
698 need_to_specialize_wildcards = true;
699 }
700
701 /* If the entry deref is longer than the source deref then it refers to a
702 * smaller type and we can't source from it.
703 */
704 assert(*entry_p == NULL);
705
706 value->deref._path = NULL;
707
708 if (need_to_specialize_wildcards) {
709 /* The entry has some wildcards that are not in src. This means we need
710 * to construct a new deref based on the entry but using the wildcards
711 * from the source and guided by the entry dst. Oof.
712 */
713 nir_deref_path *entry_src_path =
714 nir_get_deref_path(state->mem_ctx, &entry->src.deref);
715 value->deref.instr = specialize_wildcards(b, entry_src_path,
716 entry_dst_path, src_path);
717 }
718
719 /* If our source deref is longer than the entry deref, that's ok because
720 * it just means the entry deref needs to be extended a bit.
721 */
722 while (*src_p) {
723 nir_deref_instr *src_tail = *src_p++;
724 value->deref.instr = nir_build_deref_follower(b, value->deref.instr, src_tail);
725 }
726
727 return true;
728 }
729
730 static bool
try_load_from_entry(struct copy_prop_var_state * state,struct copy_entry * entry,nir_builder * b,nir_intrinsic_instr * intrin,nir_deref_and_path * src,struct value * value)731 try_load_from_entry(struct copy_prop_var_state *state, struct copy_entry *entry,
732 nir_builder *b, nir_intrinsic_instr *intrin,
733 nir_deref_and_path *src, struct value *value)
734 {
735 if (entry == NULL)
736 return false;
737
738 if (entry->src.is_ssa) {
739 return load_from_ssa_entry_value(state, entry, b, intrin, src, value);
740 } else {
741 return load_from_deref_entry_value(state, entry, b, intrin, src, value);
742 }
743 }
744
745 static void
invalidate_copies_for_cf_node(struct copy_prop_var_state * state,struct util_dynarray * copies,nir_cf_node * cf_node)746 invalidate_copies_for_cf_node(struct copy_prop_var_state *state,
747 struct util_dynarray *copies,
748 nir_cf_node *cf_node)
749 {
750 struct hash_entry *ht_entry = _mesa_hash_table_search(state->vars_written_map, cf_node);
751 assert(ht_entry);
752
753 struct vars_written *written = ht_entry->data;
754 if (written->modes) {
755 util_dynarray_foreach_reverse(copies, struct copy_entry, entry) {
756 if (nir_deref_mode_may_be(entry->dst.instr, written->modes))
757 copy_entry_remove(copies, entry);
758 }
759 }
760
761 hash_table_foreach (written->derefs, entry) {
762 nir_deref_instr *deref_written = (nir_deref_instr *)entry->key;
763 nir_deref_and_path deref = {deref_written, NULL};
764 kill_aliases(state, copies, &deref, (uintptr_t)entry->data);
765 }
766 }
767
768 static void
print_value(struct value * value,unsigned num_components)769 print_value(struct value *value, unsigned num_components)
770 {
771 if (!value->is_ssa) {
772 printf(" %s ", glsl_get_type_name(value->deref.instr->type));
773 nir_print_deref(value->deref.instr, stdout);
774 return;
775 }
776
777 bool same_ssa = true;
778 for (unsigned i = 0; i < num_components; i++) {
779 if (value->ssa.component[i] != i ||
780 (i > 0 && value->ssa.def[i - 1] != value->ssa.def[i])) {
781 same_ssa = false;
782 break;
783 }
784 }
785 if (same_ssa) {
786 printf(" ssa_%d", value->ssa.def[0]->index);
787 } else {
788 for (int i = 0; i < num_components; i++) {
789 if (value->ssa.def[i])
790 printf(" ssa_%d[%u]", value->ssa.def[i]->index, value->ssa.component[i]);
791 else
792 printf(" _");
793 }
794 }
795 }
796
797 static void
print_copy_entry(struct copy_entry * entry)798 print_copy_entry(struct copy_entry *entry)
799 {
800 printf(" %s ", glsl_get_type_name(entry->dst.instr->type));
801 nir_print_deref(entry->dst.instr, stdout);
802 printf(":\t");
803
804 unsigned num_components = glsl_get_vector_elements(entry->dst.instr->type);
805 print_value(&entry->src, num_components);
806 printf("\n");
807 }
808
809 static void
dump_instr(nir_instr * instr)810 dump_instr(nir_instr *instr)
811 {
812 printf(" ");
813 nir_print_instr(instr, stdout);
814 printf("\n");
815 }
816
817 static void
dump_copy_entries(struct util_dynarray * copies)818 dump_copy_entries(struct util_dynarray *copies)
819 {
820 util_dynarray_foreach(copies, struct copy_entry, iter)
821 print_copy_entry(iter);
822 printf("\n");
823 }
824
825 static void
copy_prop_vars_block(struct copy_prop_var_state * state,nir_builder * b,nir_block * block,struct util_dynarray * copies)826 copy_prop_vars_block(struct copy_prop_var_state *state,
827 nir_builder *b, nir_block *block,
828 struct util_dynarray *copies)
829 {
830 if (debug) {
831 printf("# block%d\n", block->index);
832 dump_copy_entries(copies);
833 }
834
835 nir_foreach_instr_safe(instr, block) {
836 if (debug && instr->type == nir_instr_type_deref)
837 dump_instr(instr);
838
839 if (instr->type == nir_instr_type_call) {
840 if (debug) dump_instr(instr);
841 apply_barrier_for_modes(copies, nir_var_shader_out |
842 nir_var_shader_temp |
843 nir_var_function_temp |
844 nir_var_mem_ssbo |
845 nir_var_mem_shared |
846 nir_var_mem_global);
847 if (debug) dump_copy_entries(copies);
848 continue;
849 }
850
851 if (instr->type != nir_instr_type_intrinsic)
852 continue;
853
854 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
855 switch (intrin->intrinsic) {
856 case nir_intrinsic_control_barrier:
857 case nir_intrinsic_memory_barrier:
858 if (debug) dump_instr(instr);
859
860 apply_barrier_for_modes(copies, nir_var_shader_out |
861 nir_var_mem_ssbo |
862 nir_var_mem_shared |
863 nir_var_mem_global);
864 break;
865
866 case nir_intrinsic_memory_barrier_buffer:
867 if (debug) dump_instr(instr);
868
869 apply_barrier_for_modes(copies, nir_var_mem_ssbo |
870 nir_var_mem_global);
871 break;
872
873 case nir_intrinsic_memory_barrier_shared:
874 if (debug) dump_instr(instr);
875
876 apply_barrier_for_modes(copies, nir_var_mem_shared);
877 break;
878
879 case nir_intrinsic_memory_barrier_tcs_patch:
880 if (debug) dump_instr(instr);
881
882 apply_barrier_for_modes(copies, nir_var_shader_out);
883 break;
884
885 case nir_intrinsic_scoped_barrier:
886 if (debug) dump_instr(instr);
887
888 if (nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_ACQUIRE)
889 apply_barrier_for_modes(copies, nir_intrinsic_memory_modes(intrin));
890 break;
891
892 case nir_intrinsic_emit_vertex:
893 case nir_intrinsic_emit_vertex_with_counter:
894 if (debug) dump_instr(instr);
895
896 apply_barrier_for_modes(copies, nir_var_shader_out);
897 break;
898
899 case nir_intrinsic_report_ray_intersection:
900 apply_barrier_for_modes(copies, nir_var_mem_ssbo |
901 nir_var_mem_global |
902 nir_var_shader_call_data |
903 nir_var_ray_hit_attrib);
904 break;
905
906 case nir_intrinsic_ignore_ray_intersection:
907 case nir_intrinsic_terminate_ray:
908 apply_barrier_for_modes(copies, nir_var_mem_ssbo |
909 nir_var_mem_global |
910 nir_var_shader_call_data);
911 break;
912
913 case nir_intrinsic_load_deref: {
914 if (debug) dump_instr(instr);
915
916 if (nir_intrinsic_access(intrin) & ACCESS_VOLATILE)
917 break;
918
919 nir_deref_and_path src = {nir_src_as_deref(intrin->src[0]), NULL};
920
921 /* If this is a load from a read-only mode, then all this pass would
922 * do is combine redundant loads and CSE should be more efficient for
923 * that.
924 */
925 nir_variable_mode ignore = nir_var_read_only_modes & ~nir_var_vec_indexable_modes;
926 if (nir_deref_mode_must_be(src.instr, ignore))
927 break;
928
929 /* Direct array_derefs of vectors operate on the vectors (the parent
930 * deref). Indirects will be handled like other derefs.
931 */
932 int vec_index = 0;
933 nir_deref_and_path vec_src = src;
934 if (is_array_deref_of_vector(&src) && nir_src_is_const(src.instr->arr.index)) {
935 vec_src.instr = nir_deref_instr_parent(src.instr);
936 unsigned vec_comps = glsl_get_vector_elements(vec_src.instr->type);
937 vec_index = nir_src_as_uint(src.instr->arr.index);
938
939 /* Loading from an invalid index yields an undef */
940 if (vec_index >= vec_comps) {
941 b->cursor = nir_instr_remove(instr);
942 nir_ssa_def *u = nir_ssa_undef(b, 1, intrin->dest.ssa.bit_size);
943 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, u);
944 state->progress = true;
945 break;
946 }
947 }
948
949 bool src_entry_equal = false;
950 struct copy_entry *src_entry =
951 lookup_entry_for_deref(state, copies, &src,
952 nir_derefs_a_contains_b_bit, &src_entry_equal);
953 struct value value = {0};
954 if (try_load_from_entry(state, src_entry, b, intrin, &src, &value)) {
955 if (value.is_ssa) {
956 /* lookup_load has already ensured that we get a single SSA
957 * value that has all of the channels. We just have to do the
958 * rewrite operation. Note for array derefs of vectors, the
959 * channel 0 is used.
960 */
961 if (intrin->instr.block) {
962 /* The lookup left our instruction in-place. This means it
963 * must have used it to vec up a bunch of different sources.
964 * We need to be careful when rewriting uses so we don't
965 * rewrite the vecN itself.
966 */
967 nir_ssa_def_rewrite_uses_after(&intrin->dest.ssa,
968 value.ssa.def[0],
969 value.ssa.def[0]->parent_instr);
970 } else {
971 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
972 value.ssa.def[0]);
973 }
974 } else {
975 /* We're turning it into a load of a different variable */
976 intrin->src[0] = nir_src_for_ssa(&value.deref.instr->dest.ssa);
977
978 /* Put it back in again. */
979 nir_builder_instr_insert(b, instr);
980 value_set_ssa_components(&value, &intrin->dest.ssa,
981 intrin->num_components);
982 }
983 state->progress = true;
984 } else {
985 value_set_ssa_components(&value, &intrin->dest.ssa,
986 intrin->num_components);
987 }
988
989 /* Now that we have a value, we're going to store it back so that we
990 * have the right value next time we come looking for it. In order
991 * to do this, we need an exact match, not just something that
992 * contains what we're looking for.
993 *
994 * We avoid doing another lookup if src.instr == vec_src.instr.
995 */
996 struct copy_entry *entry = src_entry;
997 if (src.instr != vec_src.instr)
998 entry = lookup_entry_for_deref(state, copies, &vec_src,
999 nir_derefs_equal_bit, NULL);
1000 else if (!src_entry_equal)
1001 entry = NULL;
1002
1003 if (!entry)
1004 entry = copy_entry_create(copies, &vec_src);
1005
1006 /* Update the entry with the value of the load. This way
1007 * we can potentially remove subsequent loads.
1008 */
1009 value_set_from_value(&entry->src, &value, vec_index,
1010 (1 << intrin->num_components) - 1);
1011 break;
1012 }
1013
1014 case nir_intrinsic_store_deref: {
1015 if (debug) dump_instr(instr);
1016
1017 nir_deref_and_path dst = {nir_src_as_deref(intrin->src[0]), NULL};
1018 assert(glsl_type_is_vector_or_scalar(dst.instr->type));
1019
1020 /* Direct array_derefs of vectors operate on the vectors (the parent
1021 * deref). Indirects will be handled like other derefs.
1022 */
1023 int vec_index = 0;
1024 nir_deref_and_path vec_dst = dst;
1025 if (is_array_deref_of_vector(&dst) && nir_src_is_const(dst.instr->arr.index)) {
1026 vec_dst.instr = nir_deref_instr_parent(dst.instr);
1027 unsigned vec_comps = glsl_get_vector_elements(vec_dst.instr->type);
1028
1029 vec_index = nir_src_as_uint(dst.instr->arr.index);
1030
1031 /* Storing to an invalid index is a no-op. */
1032 if (vec_index >= vec_comps) {
1033 nir_instr_remove(instr);
1034 state->progress = true;
1035 break;
1036 }
1037 }
1038
1039 if (nir_intrinsic_access(intrin) & ACCESS_VOLATILE) {
1040 unsigned wrmask = nir_intrinsic_write_mask(intrin);
1041 kill_aliases(state, copies, &dst, wrmask);
1042 break;
1043 }
1044
1045 struct copy_entry *entry =
1046 lookup_entry_for_deref(state, copies, &dst, nir_derefs_equal_bit, NULL);
1047 if (entry && value_equals_store_src(&entry->src, intrin)) {
1048 /* If we are storing the value from a load of the same var the
1049 * store is redundant so remove it.
1050 */
1051 nir_instr_remove(instr);
1052 state->progress = true;
1053 } else {
1054 struct value value = {0};
1055 value_set_ssa_components(&value, intrin->src[1].ssa,
1056 intrin->num_components);
1057 unsigned wrmask = nir_intrinsic_write_mask(intrin);
1058 struct copy_entry *entry =
1059 get_entry_and_kill_aliases(state, copies, &vec_dst, wrmask);
1060 value_set_from_value(&entry->src, &value, vec_index, wrmask);
1061 }
1062
1063 break;
1064 }
1065
1066 case nir_intrinsic_copy_deref: {
1067 if (debug) dump_instr(instr);
1068
1069 nir_deref_and_path dst = {nir_src_as_deref(intrin->src[0]), NULL};
1070 nir_deref_and_path src = {nir_src_as_deref(intrin->src[1]), NULL};
1071
1072 /* The copy_deref intrinsic doesn't keep track of num_components, so
1073 * get it ourselves.
1074 */
1075 unsigned num_components = glsl_get_vector_elements(dst.instr->type);
1076 unsigned full_mask = (1 << num_components) - 1;
1077
1078 if ((nir_intrinsic_src_access(intrin) & ACCESS_VOLATILE) ||
1079 (nir_intrinsic_dst_access(intrin) & ACCESS_VOLATILE)) {
1080 kill_aliases(state, copies, &dst, full_mask);
1081 break;
1082 }
1083
1084 nir_deref_compare_result comp =
1085 nir_compare_derefs_and_paths(state->mem_ctx, &src, &dst);
1086 if (comp & nir_derefs_equal_bit) {
1087 /* This is a no-op self-copy. Get rid of it */
1088 nir_instr_remove(instr);
1089 state->progress = true;
1090 continue;
1091 }
1092
1093 /* Copy of direct array derefs of vectors are not handled. Just
1094 * invalidate what's written and bail.
1095 */
1096 if ((is_array_deref_of_vector(&src) && nir_src_is_const(src.instr->arr.index)) ||
1097 (is_array_deref_of_vector(&dst) && nir_src_is_const(dst.instr->arr.index))) {
1098 kill_aliases(state, copies, &dst, full_mask);
1099 break;
1100 }
1101
1102 struct copy_entry *src_entry =
1103 lookup_entry_for_deref(state, copies, &src, nir_derefs_a_contains_b_bit, NULL);
1104 struct value value;
1105 if (try_load_from_entry(state, src_entry, b, intrin, &src, &value)) {
1106 /* If load works, intrin (the copy_deref) is removed. */
1107 if (value.is_ssa) {
1108 nir_store_deref(b, dst.instr, value.ssa.def[0], full_mask);
1109 } else {
1110 /* If this would be a no-op self-copy, don't bother. */
1111 comp = nir_compare_derefs_and_paths(state->mem_ctx, &value.deref, &dst);
1112 if (comp & nir_derefs_equal_bit)
1113 continue;
1114
1115 /* Just turn it into a copy of a different deref */
1116 intrin->src[1] = nir_src_for_ssa(&value.deref.instr->dest.ssa);
1117
1118 /* Put it back in again. */
1119 nir_builder_instr_insert(b, instr);
1120 }
1121
1122 state->progress = true;
1123 } else {
1124 value = (struct value) {
1125 .is_ssa = false,
1126 { .deref = src },
1127 };
1128 }
1129
1130 nir_variable *src_var = nir_deref_instr_get_variable(src.instr);
1131 if (src_var && src_var->data.cannot_coalesce) {
1132 /* The source cannot be coaleseced, which means we can't propagate
1133 * this copy.
1134 */
1135 break;
1136 }
1137
1138 struct copy_entry *dst_entry =
1139 get_entry_and_kill_aliases(state, copies, &dst, full_mask);
1140 value_set_from_value(&dst_entry->src, &value, 0, full_mask);
1141 break;
1142 }
1143
1144 case nir_intrinsic_trace_ray:
1145 case nir_intrinsic_execute_callable:
1146 case nir_intrinsic_rt_trace_ray:
1147 case nir_intrinsic_rt_execute_callable: {
1148 if (debug) dump_instr(instr);
1149
1150 nir_deref_and_path payload = {
1151 nir_src_as_deref(*nir_get_shader_call_payload_src(intrin)), NULL};
1152 nir_component_mask_t full_mask = (1 << glsl_get_vector_elements(payload.instr->type)) - 1;
1153 kill_aliases(state, copies, &payload, full_mask);
1154 break;
1155 }
1156
1157 case nir_intrinsic_memcpy_deref:
1158 case nir_intrinsic_deref_atomic_add:
1159 case nir_intrinsic_deref_atomic_fadd:
1160 case nir_intrinsic_deref_atomic_imin:
1161 case nir_intrinsic_deref_atomic_umin:
1162 case nir_intrinsic_deref_atomic_fmin:
1163 case nir_intrinsic_deref_atomic_imax:
1164 case nir_intrinsic_deref_atomic_umax:
1165 case nir_intrinsic_deref_atomic_fmax:
1166 case nir_intrinsic_deref_atomic_and:
1167 case nir_intrinsic_deref_atomic_or:
1168 case nir_intrinsic_deref_atomic_xor:
1169 case nir_intrinsic_deref_atomic_exchange:
1170 case nir_intrinsic_deref_atomic_comp_swap:
1171 case nir_intrinsic_deref_atomic_fcomp_swap:
1172 if (debug) dump_instr(instr);
1173
1174 nir_deref_and_path dst = {nir_src_as_deref(intrin->src[0]), NULL};
1175 unsigned num_components = glsl_get_vector_elements(dst.instr->type);
1176 unsigned full_mask = (1 << num_components) - 1;
1177 kill_aliases(state, copies, &dst, full_mask);
1178 break;
1179
1180 case nir_intrinsic_store_deref_block_intel: {
1181 if (debug) dump_instr(instr);
1182
1183 /* Invalidate the whole variable (or cast) and anything that alias
1184 * with it.
1185 */
1186 nir_deref_and_path dst = {nir_src_as_deref(intrin->src[0]), NULL};
1187 while (nir_deref_instr_parent(dst.instr))
1188 dst.instr = nir_deref_instr_parent(dst.instr);
1189 assert(dst.instr->deref_type == nir_deref_type_var ||
1190 dst.instr->deref_type == nir_deref_type_cast);
1191
1192 unsigned num_components = glsl_get_vector_elements(dst.instr->type);
1193 unsigned full_mask = (1 << num_components) - 1;
1194 kill_aliases(state, copies, &dst, full_mask);
1195 break;
1196 }
1197
1198 default:
1199 continue; /* To skip the debug below. */
1200 }
1201
1202 if (debug) dump_copy_entries(copies);
1203 }
1204 }
1205
1206 static void
copy_prop_vars_cf_node(struct copy_prop_var_state * state,struct util_dynarray * copies,nir_cf_node * cf_node)1207 copy_prop_vars_cf_node(struct copy_prop_var_state *state,
1208 struct util_dynarray *copies,
1209 nir_cf_node *cf_node)
1210 {
1211 switch (cf_node->type) {
1212 case nir_cf_node_function: {
1213 nir_function_impl *impl = nir_cf_node_as_function(cf_node);
1214
1215 struct util_dynarray impl_copies;
1216 util_dynarray_init(&impl_copies, state->mem_ctx);
1217
1218 foreach_list_typed_safe(nir_cf_node, cf_node, node, &impl->body)
1219 copy_prop_vars_cf_node(state, &impl_copies, cf_node);
1220
1221 break;
1222 }
1223
1224 case nir_cf_node_block: {
1225 nir_block *block = nir_cf_node_as_block(cf_node);
1226 nir_builder b;
1227 nir_builder_init(&b, state->impl);
1228 copy_prop_vars_block(state, &b, block, copies);
1229 break;
1230 }
1231
1232 case nir_cf_node_if: {
1233 nir_if *if_stmt = nir_cf_node_as_if(cf_node);
1234
1235 /* Clone the copies for each branch of the if statement. The idea is
1236 * that they both see the same state of available copies, but do not
1237 * interfere to each other.
1238 */
1239
1240 struct util_dynarray then_copies;
1241 util_dynarray_clone(&then_copies, state->mem_ctx, copies);
1242
1243 struct util_dynarray else_copies;
1244 util_dynarray_clone(&else_copies, state->mem_ctx, copies);
1245
1246 foreach_list_typed_safe(nir_cf_node, cf_node, node, &if_stmt->then_list)
1247 copy_prop_vars_cf_node(state, &then_copies, cf_node);
1248
1249 foreach_list_typed_safe(nir_cf_node, cf_node, node, &if_stmt->else_list)
1250 copy_prop_vars_cf_node(state, &else_copies, cf_node);
1251
1252 /* Both branches copies can be ignored, since the effect of running both
1253 * branches was captured in the first pass that collects vars_written.
1254 */
1255
1256 invalidate_copies_for_cf_node(state, copies, cf_node);
1257
1258 break;
1259 }
1260
1261 case nir_cf_node_loop: {
1262 nir_loop *loop = nir_cf_node_as_loop(cf_node);
1263
1264 /* Invalidate before cloning the copies for the loop, since the loop
1265 * body can be executed more than once.
1266 */
1267
1268 invalidate_copies_for_cf_node(state, copies, cf_node);
1269
1270 struct util_dynarray loop_copies;
1271 util_dynarray_clone(&loop_copies, state->mem_ctx, copies);
1272
1273 foreach_list_typed_safe(nir_cf_node, cf_node, node, &loop->body)
1274 copy_prop_vars_cf_node(state, &loop_copies, cf_node);
1275
1276 break;
1277 }
1278
1279 default:
1280 unreachable("Invalid CF node type");
1281 }
1282 }
1283
1284 static bool
nir_copy_prop_vars_impl(nir_function_impl * impl)1285 nir_copy_prop_vars_impl(nir_function_impl *impl)
1286 {
1287 void *mem_ctx = ralloc_context(NULL);
1288
1289 if (debug) {
1290 nir_metadata_require(impl, nir_metadata_block_index);
1291 printf("## nir_copy_prop_vars_impl for %s\n", impl->function->name);
1292 }
1293
1294 struct copy_prop_var_state state = {
1295 .impl = impl,
1296 .mem_ctx = mem_ctx,
1297 .lin_ctx = linear_zalloc_parent(mem_ctx, 0),
1298
1299 .vars_written_map = _mesa_pointer_hash_table_create(mem_ctx),
1300 };
1301
1302 gather_vars_written(&state, NULL, &impl->cf_node);
1303
1304 copy_prop_vars_cf_node(&state, NULL, &impl->cf_node);
1305
1306 if (state.progress) {
1307 nir_metadata_preserve(impl, nir_metadata_block_index |
1308 nir_metadata_dominance);
1309 } else {
1310 nir_metadata_preserve(impl, nir_metadata_all);
1311 }
1312
1313 ralloc_free(mem_ctx);
1314 return state.progress;
1315 }
1316
1317 bool
nir_opt_copy_prop_vars(nir_shader * shader)1318 nir_opt_copy_prop_vars(nir_shader *shader)
1319 {
1320 bool progress = false;
1321
1322 nir_foreach_function(function, shader) {
1323 if (!function->impl)
1324 continue;
1325 progress |= nir_copy_prop_vars_impl(function->impl);
1326 }
1327
1328 return progress;
1329 }
1330