• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2020 Google LLC
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 /**
25  * @file
26  *
27  * Trims off the unused trailing components of SSA defs.
28  *
29  * Due to various optimization passes (or frontend implementations,
30  * particularly prog_to_nir), we may have instructions generating vectors
31  * whose components don't get read by any instruction.  While it can be tricky
32  * to eliminate either unused low components of a writemask (you might need to
33  * increment some offset from a load_uniform, for example) or channels in the
34  * middle of a partially set writemask (you might need to reswizzle ALU ops
35  * using the value), it is trivial to just drop the trailing components.
36  *
37  * This pass is probably only of use to vector backends -- scalar backends
38  * typically get unused def channel trimming by scalarizing and dead code
39  * elimination.
40  */
41 
42 #include "nir.h"
43 #include "nir_builder.h"
44 
45 static bool
shrink_dest_to_read_mask(nir_ssa_def * def)46 shrink_dest_to_read_mask(nir_ssa_def *def)
47 {
48    /* early out if there's nothing to do. */
49    if (def->num_components == 1)
50       return false;
51 
52    unsigned mask = nir_ssa_def_components_read(def);
53    int last_bit = util_last_bit(mask);
54 
55    /* If nothing was read, leave it up to DCE. */
56    if (!mask)
57       return false;
58 
59    if (def->num_components > last_bit) {
60       def->num_components = last_bit;
61       return true;
62    }
63 
64    return false;
65 }
66 
67 static bool
opt_shrink_vectors_alu(nir_builder * b,nir_alu_instr * instr)68 opt_shrink_vectors_alu(nir_builder *b, nir_alu_instr *instr)
69 {
70    nir_ssa_def *def = &instr->dest.dest.ssa;
71 
72    if (nir_op_infos[instr->op].output_size == 0) {
73       if (shrink_dest_to_read_mask(def)) {
74          instr->dest.write_mask &=
75             BITFIELD_MASK(def->num_components);
76 
77          return true;
78       }
79    } else {
80 
81       switch (instr->op) {
82       case nir_op_vec4:
83       case nir_op_vec3:
84       case nir_op_vec2: {
85          unsigned mask = nir_ssa_def_components_read(def);
86 
87          /* If nothing was read, leave it up to DCE. */
88          if (mask == 0)
89             return false;
90 
91          int last_bit = util_last_bit(mask);
92          if (last_bit < def->num_components) {
93             nir_ssa_def *srcs[NIR_MAX_VEC_COMPONENTS] = { 0 };
94             for (int i = 0; i < last_bit; i++)
95                srcs[i] = nir_ssa_for_alu_src(b, instr, i);
96 
97             nir_ssa_def *new_vec = nir_vec(b, srcs, last_bit);
98             nir_ssa_def_rewrite_uses(def, nir_src_for_ssa(new_vec));
99             return true;
100          }
101          break;
102       }
103 
104       default:
105          break;
106       }
107    }
108 
109    return false;
110 }
111 
112 static bool
opt_shrink_vectors_image_store(nir_builder * b,nir_intrinsic_instr * instr)113 opt_shrink_vectors_image_store(nir_builder *b, nir_intrinsic_instr *instr)
114 {
115    enum pipe_format format;
116    if (instr->intrinsic == nir_intrinsic_image_deref_store) {
117       nir_deref_instr *deref = nir_src_as_deref(instr->src[0]);
118       format = nir_deref_instr_get_variable(deref)->data.image.format;
119    } else {
120       format = nir_intrinsic_format(instr);
121    }
122    if (format == PIPE_FORMAT_NONE)
123       return false;
124 
125    unsigned components = util_format_get_nr_components(format);
126    if (components >= instr->num_components)
127       return false;
128 
129    nir_ssa_def *data = nir_channels(b, instr->src[3].ssa, BITSET_MASK(components));
130    nir_instr_rewrite_src(&instr->instr, &instr->src[3], nir_src_for_ssa(data));
131    instr->num_components = components;
132 
133    return true;
134 }
135 
136 static bool
opt_shrink_vectors_intrinsic(nir_builder * b,nir_intrinsic_instr * instr)137 opt_shrink_vectors_intrinsic(nir_builder *b, nir_intrinsic_instr *instr)
138 {
139    switch (instr->intrinsic) {
140    case nir_intrinsic_load_uniform:
141    case nir_intrinsic_load_ubo:
142    case nir_intrinsic_load_input:
143    case nir_intrinsic_load_input_vertex:
144    case nir_intrinsic_load_per_vertex_input:
145    case nir_intrinsic_load_interpolated_input:
146    case nir_intrinsic_load_ssbo:
147    case nir_intrinsic_load_push_constant:
148    case nir_intrinsic_load_constant:
149    case nir_intrinsic_load_global:
150    case nir_intrinsic_load_global_constant:
151    case nir_intrinsic_load_kernel_input:
152    case nir_intrinsic_load_scratch:
153    case nir_intrinsic_store_output:
154    case nir_intrinsic_store_per_vertex_output:
155    case nir_intrinsic_store_ssbo:
156    case nir_intrinsic_store_shared:
157    case nir_intrinsic_store_global:
158    case nir_intrinsic_store_scratch:
159       break;
160    case nir_intrinsic_bindless_image_store:
161    case nir_intrinsic_image_deref_store:
162    case nir_intrinsic_image_store:
163       return opt_shrink_vectors_image_store(b, instr);
164    default:
165       return false;
166    }
167 
168    /* Must be a vectorized intrinsic that we can resize. */
169    assert(instr->num_components != 0);
170 
171    if (nir_intrinsic_infos[instr->intrinsic].has_dest) {
172       /* loads: Trim the dest to the used channels */
173 
174       if (shrink_dest_to_read_mask(&instr->dest.ssa)) {
175          instr->num_components = instr->dest.ssa.num_components;
176          return true;
177       }
178    } else {
179       /* Stores: trim the num_components stored according to the write
180        * mask.
181        */
182       unsigned write_mask = nir_intrinsic_write_mask(instr);
183       unsigned last_bit = util_last_bit(write_mask);
184       if (last_bit < instr->num_components && instr->src[0].is_ssa) {
185          nir_ssa_def *def = nir_channels(b, instr->src[0].ssa,
186                                          BITSET_MASK(last_bit));
187          nir_instr_rewrite_src(&instr->instr,
188                                &instr->src[0],
189                                nir_src_for_ssa(def));
190          instr->num_components = last_bit;
191 
192          return true;
193       }
194    }
195 
196    return false;
197 }
198 
199 static bool
opt_shrink_vectors_load_const(nir_load_const_instr * instr)200 opt_shrink_vectors_load_const(nir_load_const_instr *instr)
201 {
202    return shrink_dest_to_read_mask(&instr->def);
203 }
204 
205 static bool
opt_shrink_vectors_ssa_undef(nir_ssa_undef_instr * instr)206 opt_shrink_vectors_ssa_undef(nir_ssa_undef_instr *instr)
207 {
208    return shrink_dest_to_read_mask(&instr->def);
209 }
210 
211 static bool
opt_shrink_vectors_instr(nir_builder * b,nir_instr * instr)212 opt_shrink_vectors_instr(nir_builder *b, nir_instr *instr)
213 {
214    b->cursor = nir_before_instr(instr);
215 
216    switch (instr->type) {
217    case nir_instr_type_alu:
218       return opt_shrink_vectors_alu(b, nir_instr_as_alu(instr));
219 
220    case nir_instr_type_intrinsic:
221       return opt_shrink_vectors_intrinsic(b, nir_instr_as_intrinsic(instr));
222 
223    case nir_instr_type_load_const:
224       return opt_shrink_vectors_load_const(nir_instr_as_load_const(instr));
225 
226    case nir_instr_type_ssa_undef:
227       return opt_shrink_vectors_ssa_undef(nir_instr_as_ssa_undef(instr));
228 
229    default:
230       return false;
231    }
232 
233    return true;
234 }
235 
236 bool
nir_opt_shrink_vectors(nir_shader * shader)237 nir_opt_shrink_vectors(nir_shader *shader)
238 {
239    bool progress = false;
240 
241    nir_foreach_function(function, shader) {
242       if (!function->impl)
243          continue;
244 
245       nir_builder b;
246       nir_builder_init(&b, function->impl);
247 
248       nir_foreach_block(block, function->impl) {
249          nir_foreach_instr(instr, block) {
250             progress |= opt_shrink_vectors_instr(&b, instr);
251          }
252       }
253 
254       if (progress) {
255          nir_metadata_preserve(function->impl,
256                                nir_metadata_block_index |
257                                nir_metadata_dominance);
258       } else {
259          nir_metadata_preserve(function->impl, nir_metadata_all);
260       }
261    }
262 
263    return progress;
264 }
265