• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2018 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "nir.h"
25 #include "nir_builder.h"
26 #include "nir_deref.h"
27 
28 #include "util/u_dynarray.h"
29 
30 /**
31  * Elimination of dead writes based on derefs.
32  *
33  * Dead writes are stores and copies that write to a deref, which then gets
34  * another write before it was used (read or sourced for a copy).  Those
35  * writes can be removed since they don't affect anything.
36  *
37  * For derefs that refer to a memory area that can be read after the program,
38  * the last write is considered used.  The presence of certain instructions
39  * may also cause writes to be considered used, e.g. memory barrier (in this case
40  * the value must be written as other thread might use it).
41  *
42  * The write mask for store instructions is considered, so it is possible that
43  * a store is removed because of the combination of other stores overwritten
44  * its value.
45  */
46 
47 /* Entry for unused_writes arrays. */
48 struct write_entry {
49    /* If NULL indicates the entry is free to be reused. */
50    nir_intrinsic_instr *intrin;
51    nir_component_mask_t mask;
52    nir_deref_instr *dst;
53 };
54 
55 static void
clear_unused_for_modes(struct util_dynarray * unused_writes,nir_variable_mode modes)56 clear_unused_for_modes(struct util_dynarray *unused_writes, nir_variable_mode modes)
57 {
58    util_dynarray_foreach_reverse(unused_writes, struct write_entry, entry) {
59       if (nir_deref_mode_may_be(entry->dst, modes))
60          *entry = util_dynarray_pop(unused_writes, struct write_entry);
61    }
62 }
63 
64 static void
clear_unused_for_read(struct util_dynarray * unused_writes,nir_deref_instr * src)65 clear_unused_for_read(struct util_dynarray *unused_writes, nir_deref_instr *src)
66 {
67    util_dynarray_foreach_reverse(unused_writes, struct write_entry, entry) {
68       if (nir_compare_derefs(src, entry->dst) & nir_derefs_may_alias_bit)
69          *entry = util_dynarray_pop(unused_writes, struct write_entry);
70    }
71 }
72 
73 static bool
update_unused_writes(struct util_dynarray * unused_writes,nir_intrinsic_instr * intrin,nir_deref_instr * dst,nir_component_mask_t mask)74 update_unused_writes(struct util_dynarray *unused_writes,
75                      nir_intrinsic_instr *intrin,
76                      nir_deref_instr *dst, nir_component_mask_t mask)
77 {
78    bool progress = false;
79 
80    /* This pass assumes that destination of copies and stores are derefs that
81     * end in a vector or scalar (it is OK to have wildcards or indirects for
82     * arrays).
83     */
84    assert(glsl_type_is_vector_or_scalar(dst->type));
85 
86    /* Find writes that are unused and can be removed. */
87    util_dynarray_foreach_reverse(unused_writes, struct write_entry, entry) {
88       nir_deref_compare_result comp = nir_compare_derefs(dst, entry->dst);
89       if (comp & nir_derefs_a_contains_b_bit) {
90          entry->mask &= ~mask;
91          if (entry->mask == 0) {
92             nir_instr_remove(&entry->intrin->instr);
93             *entry = util_dynarray_pop(unused_writes, struct write_entry);
94             progress = true;
95          }
96       }
97    }
98 
99    /* Add the new write to the unused array. */
100    struct write_entry new_entry = {
101       .intrin = intrin,
102       .mask = mask,
103       .dst = dst,
104    };
105 
106    util_dynarray_append(unused_writes, struct write_entry, new_entry);
107 
108    return progress;
109 }
110 
111 static bool
remove_dead_write_vars_local(void * mem_ctx,nir_shader * shader,nir_block * block)112 remove_dead_write_vars_local(void *mem_ctx, nir_shader *shader, nir_block *block)
113 {
114    bool progress = false;
115 
116    struct util_dynarray unused_writes;
117    util_dynarray_init(&unused_writes, mem_ctx);
118 
119    nir_foreach_instr_safe(instr, block) {
120       if (instr->type == nir_instr_type_call) {
121          clear_unused_for_modes(&unused_writes, nir_var_shader_out |
122                                                 nir_var_shader_temp |
123                                                 nir_var_function_temp |
124                                                 nir_var_mem_ssbo |
125                                                 nir_var_mem_shared |
126                                                 nir_var_mem_global);
127          continue;
128       }
129 
130       if (instr->type != nir_instr_type_intrinsic)
131          continue;
132 
133       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
134       switch (intrin->intrinsic) {
135       case nir_intrinsic_control_barrier:
136       case nir_intrinsic_group_memory_barrier:
137       case nir_intrinsic_memory_barrier: {
138          clear_unused_for_modes(&unused_writes, nir_var_shader_out |
139                                                 nir_var_mem_ssbo |
140                                                 nir_var_mem_shared |
141                                                 nir_var_mem_global);
142          break;
143       }
144 
145       case nir_intrinsic_memory_barrier_buffer:
146          clear_unused_for_modes(&unused_writes, nir_var_mem_ssbo |
147                                                 nir_var_mem_global);
148          break;
149 
150       case nir_intrinsic_memory_barrier_shared:
151          clear_unused_for_modes(&unused_writes, nir_var_mem_shared);
152          break;
153 
154       case nir_intrinsic_memory_barrier_tcs_patch:
155          clear_unused_for_modes(&unused_writes, nir_var_shader_out);
156          break;
157 
158       case nir_intrinsic_scoped_barrier: {
159          if (nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_RELEASE) {
160             clear_unused_for_modes(&unused_writes,
161                                    nir_intrinsic_memory_modes(intrin));
162          }
163          break;
164       }
165 
166       case nir_intrinsic_emit_vertex:
167       case nir_intrinsic_emit_vertex_with_counter: {
168          clear_unused_for_modes(&unused_writes, nir_var_shader_out);
169          break;
170       }
171 
172       case nir_intrinsic_execute_callable:
173       case nir_intrinsic_rt_execute_callable: {
174          /* Mark payload as it can be used by the callee */
175          nir_deref_instr *src = nir_src_as_deref(intrin->src[1]);
176          clear_unused_for_read(&unused_writes, src);
177          break;
178       }
179 
180       case nir_intrinsic_trace_ray:
181       case nir_intrinsic_rt_trace_ray: {
182          /* Mark payload as it can be used by the callees */
183          nir_deref_instr *src = nir_src_as_deref(intrin->src[10]);
184          clear_unused_for_read(&unused_writes, src);
185          break;
186       }
187 
188       case nir_intrinsic_load_deref: {
189          nir_deref_instr *src = nir_src_as_deref(intrin->src[0]);
190          if (nir_deref_mode_must_be(src, nir_var_read_only_modes))
191             break;
192          clear_unused_for_read(&unused_writes, src);
193          break;
194       }
195 
196       case nir_intrinsic_store_deref: {
197          nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
198 
199          if (nir_intrinsic_access(intrin) & ACCESS_VOLATILE) {
200             /* Consider a volatile write to also be a sort of read.  This
201              * prevents us from deleting a non-volatile write just before a
202              * volatile write thanks to a non-volatile write afterwards.  It's
203              * quite the corner case, but this should be safer and more
204              * predictable for the programmer than allowing two non-volatile
205              * writes to be combined with a volatile write between them.
206              */
207             clear_unused_for_read(&unused_writes, dst);
208             break;
209          }
210 
211          nir_component_mask_t mask = nir_intrinsic_write_mask(intrin);
212          progress |= update_unused_writes(&unused_writes, intrin, dst, mask);
213          break;
214       }
215 
216       case nir_intrinsic_copy_deref: {
217          nir_deref_instr *src = nir_src_as_deref(intrin->src[1]);
218          nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
219 
220          if (nir_intrinsic_dst_access(intrin) & ACCESS_VOLATILE) {
221             clear_unused_for_read(&unused_writes, src);
222             clear_unused_for_read(&unused_writes, dst);
223             break;
224          }
225 
226          /* Self-copy is removed. */
227          if (nir_compare_derefs(src, dst) & nir_derefs_equal_bit) {
228             nir_instr_remove(instr);
229             progress = true;
230             break;
231          }
232 
233          clear_unused_for_read(&unused_writes, src);
234          nir_component_mask_t mask = (1 << glsl_get_vector_elements(dst->type)) - 1;
235          progress |= update_unused_writes(&unused_writes, intrin, dst, mask);
236          break;
237       }
238 
239       default:
240          break;
241       }
242    }
243 
244    /* All unused writes at the end of the block are kept, since we can't be
245     * sure they'll be overwritten or not with local analysis only.
246     */
247 
248    return progress;
249 }
250 
251 static bool
remove_dead_write_vars_impl(void * mem_ctx,nir_shader * shader,nir_function_impl * impl)252 remove_dead_write_vars_impl(void *mem_ctx, nir_shader *shader, nir_function_impl *impl)
253 {
254    bool progress = false;
255 
256    nir_metadata_require(impl, nir_metadata_block_index);
257 
258    nir_foreach_block(block, impl)
259       progress |= remove_dead_write_vars_local(mem_ctx, shader, block);
260 
261    if (progress) {
262       nir_metadata_preserve(impl, nir_metadata_block_index |
263                                   nir_metadata_dominance);
264    } else {
265       nir_metadata_preserve(impl, nir_metadata_all);
266    }
267 
268    return progress;
269 }
270 
271 bool
nir_opt_dead_write_vars(nir_shader * shader)272 nir_opt_dead_write_vars(nir_shader *shader)
273 {
274    void *mem_ctx = ralloc_context(NULL);
275    bool progress = false;
276 
277    nir_foreach_function(function, shader) {
278       if (!function->impl)
279          continue;
280       progress |= remove_dead_write_vars_impl(mem_ctx, shader, function->impl);
281    }
282 
283    ralloc_free(mem_ctx);
284    return progress;
285 }
286