• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Jason Ekstrand (jason@jlekstrand.net)
25  *
26  */
27 
28 #include "nir.h"
29 
30 /*
31  * Implements a pass that tries to move uses vecN sources to their
32  * destinations.  This is kind of like an inverse copy-propagation pass.
33  * For instance, if you have
34  *
35  * ssa_1 = vec4(a, b, c, d)
36  * ssa_2 = fadd(a, b)
37  *
38  * This will be turned into
39  *
40  * ssa_1 = vec4(a, b, c, d)
41  * ssa_2 = fadd(ssa_1.x, ssa_1.y)
42  *
43  * While this is "worse" because it adds a bunch of unneeded dependencies, it
44  * actually makes it much easier for vec4-based backends to coalesce the MOVs
45  * that result from the vec4 operation because it doesn't have to worry about
46  * quite as many reads.
47  */
48 
49 /* Returns true if the given SSA def dominates the instruction.  An SSA def is
50  * considered to *not* dominate the instruction that defines it.
51  */
52 static bool
ssa_def_dominates_instr(nir_ssa_def * def,nir_instr * instr)53 ssa_def_dominates_instr(nir_ssa_def *def, nir_instr *instr)
54 {
55    if (instr->index <= def->parent_instr->index) {
56       return false;
57    } else if (def->parent_instr->block == instr->block) {
58       return def->parent_instr->index < instr->index;
59    } else {
60       return nir_block_dominates(def->parent_instr->block, instr->block);
61    }
62 }
63 
64 static bool
move_vec_src_uses_to_dest_block(nir_block * block)65 move_vec_src_uses_to_dest_block(nir_block *block)
66 {
67    bool progress = false;
68 
69    nir_foreach_instr(instr, block) {
70       if (instr->type != nir_instr_type_alu)
71          continue;
72 
73       nir_alu_instr *vec = nir_instr_as_alu(instr);
74 
75       switch (vec->op) {
76       case nir_op_vec2:
77       case nir_op_vec3:
78       case nir_op_vec4:
79          break;
80       default:
81          continue; /* The loop */
82       }
83 
84       /* Can't handle non-SSA vec operations */
85       if (!vec->dest.dest.is_ssa)
86          continue;
87 
88       /* Can't handle saturation */
89       if (vec->dest.saturate)
90          continue;
91 
92       /* First, mark all of the sources we are going to consider for rewriting
93        * to the destination
94        */
95       int srcs_remaining = 0;
96       for (unsigned i = 0; i < nir_op_infos[vec->op].num_inputs; i++) {
97          /* We can't rewrite a source if it's not in SSA form */
98          if (!vec->src[i].src.is_ssa)
99             continue;
100 
101          /* We can't rewrite a source if it has modifiers */
102          if (vec->src[i].abs || vec->src[i].negate)
103             continue;
104 
105          srcs_remaining |= 1 << i;
106       }
107 
108       /* We can't actually do anything with this instruction */
109       if (srcs_remaining == 0)
110          continue;
111 
112       for (unsigned i; i = ffs(srcs_remaining) - 1, srcs_remaining;) {
113          int8_t swizzle[NIR_MAX_VEC_COMPONENTS];
114          memset(swizzle, -1, sizeof(swizzle));
115 
116          for (unsigned j = i; j < nir_op_infos[vec->op].num_inputs; j++) {
117             if (vec->src[j].src.ssa != vec->src[i].src.ssa)
118                continue;
119 
120             /* Mark the given channel as having been handled */
121             srcs_remaining &= ~(1 << j);
122 
123             /* Mark the appropriate channel as coming from src j */
124             swizzle[vec->src[j].swizzle[0]] = j;
125          }
126 
127          nir_foreach_use_safe(use, vec->src[i].src.ssa) {
128             if (use->parent_instr == &vec->instr)
129                continue;
130 
131             /* We need to dominate the use if we are going to rewrite it */
132             if (!ssa_def_dominates_instr(&vec->dest.dest.ssa, use->parent_instr))
133                continue;
134 
135             /* For now, we'll just rewrite ALU instructions */
136             if (use->parent_instr->type != nir_instr_type_alu)
137                continue;
138 
139             assert(use->is_ssa);
140 
141             nir_alu_instr *use_alu = nir_instr_as_alu(use->parent_instr);
142 
143             /* Figure out which source we're actually looking at */
144             nir_alu_src *use_alu_src = exec_node_data(nir_alu_src, use, src);
145             unsigned src_idx = use_alu_src - use_alu->src;
146             assert(src_idx < nir_op_infos[use_alu->op].num_inputs);
147 
148             bool can_reswizzle = true;
149             for (unsigned j = 0; j < 4; j++) {
150                if (!nir_alu_instr_channel_used(use_alu, src_idx, j))
151                   continue;
152 
153                if (swizzle[use_alu_src->swizzle[j]] == -1) {
154                   can_reswizzle = false;
155                   break;
156                }
157             }
158 
159             if (!can_reswizzle)
160                continue;
161 
162             /* At this point, we have determined that the given use can be
163              * reswizzled to actually use the destination of the vecN operation.
164              * Go ahead and rewrite it as needed.
165              */
166             nir_instr_rewrite_src(use->parent_instr, use,
167                                   nir_src_for_ssa(&vec->dest.dest.ssa));
168             for (unsigned j = 0; j < 4; j++) {
169                if (!nir_alu_instr_channel_used(use_alu, src_idx, j))
170                   continue;
171 
172                use_alu_src->swizzle[j] = swizzle[use_alu_src->swizzle[j]];
173                progress = true;
174             }
175          }
176       }
177    }
178 
179    return progress;
180 }
181 
182 static bool
nir_move_vec_src_uses_to_dest_impl(nir_shader * shader,nir_function_impl * impl)183 nir_move_vec_src_uses_to_dest_impl(nir_shader *shader, nir_function_impl *impl)
184 {
185    bool progress = false;
186 
187    nir_metadata_require(impl, nir_metadata_dominance);
188 
189    nir_index_instrs(impl);
190 
191    nir_foreach_block(block, impl) {
192       progress |= move_vec_src_uses_to_dest_block(block);
193    }
194 
195    nir_metadata_preserve(impl, nir_metadata_block_index |
196                                nir_metadata_dominance);
197 
198    return progress;
199 }
200 
201 bool
nir_move_vec_src_uses_to_dest(nir_shader * shader)202 nir_move_vec_src_uses_to_dest(nir_shader *shader)
203 {
204    bool progress = false;
205 
206    nir_foreach_function(function, shader) {
207       if (function->impl)
208          progress |= nir_move_vec_src_uses_to_dest_impl(shader,
209                                                         function->impl);
210    }
211 
212    return progress;
213 }
214