1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25
26 /*
27 * Implements a pass that tries to move uses vecN sources to their
28 * destinations. This is kind of like an inverse copy-propagation pass.
29 * For instance, if you have
30 *
31 * ssa_1 = vec4(a, b, c, d)
32 * ssa_2 = fadd(a, b)
33 *
34 * This will be turned into
35 *
36 * ssa_1 = vec4(a, b, c, d)
37 * ssa_2 = fadd(ssa_1.x, ssa_1.y)
38 *
39 * While this is "worse" because it adds a bunch of unneeded dependencies, it
40 * actually makes it much easier for vec4-based backends to coalesce the MOVs
41 * that result from the vec4 operation because it doesn't have to worry about
42 * quite as many reads.
43 */
44
45 /* Returns true if the given SSA def dominates the instruction. An SSA def is
46 * considered to *not* dominate the instruction that defines it.
47 */
48 static bool
ssa_def_dominates_instr(nir_def * def,nir_instr * instr)49 ssa_def_dominates_instr(nir_def *def, nir_instr *instr)
50 {
51 if (instr->index <= def->parent_instr->index) {
52 return false;
53 } else if (def->parent_instr->block == instr->block) {
54 return def->parent_instr->index < instr->index;
55 } else {
56 return nir_block_dominates(def->parent_instr->block, instr->block);
57 }
58 }
59
60 static bool
move_vec_src_uses_to_dest_block(nir_block * block,bool skip_const_srcs)61 move_vec_src_uses_to_dest_block(nir_block *block, bool skip_const_srcs)
62 {
63 bool progress = false;
64
65 nir_foreach_instr(instr, block) {
66 if (instr->type != nir_instr_type_alu)
67 continue;
68
69 nir_alu_instr *vec = nir_instr_as_alu(instr);
70
71 switch (vec->op) {
72 case nir_op_vec2:
73 case nir_op_vec3:
74 case nir_op_vec4:
75 break;
76 default:
77 continue; /* The loop */
78 }
79
80 /* If the vec is used only in single store output than by reusing it
81 * we lose the ability to write it to the output directly.
82 */
83 if (list_is_singular(&vec->def.uses)) {
84 nir_src *src = list_first_entry(&vec->def.uses, nir_src, use_link);
85 nir_instr *use_instr = nir_src_parent_instr(src);
86 if (use_instr->type == nir_instr_type_intrinsic) {
87 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(use_instr);
88 if (intr->intrinsic == nir_intrinsic_store_output ||
89 intr->intrinsic == nir_intrinsic_store_per_view_output)
90 return false;
91 }
92 }
93
94 /* First, mark all of the sources we are going to consider for rewriting
95 * to the destination
96 */
97 int srcs_remaining = 0;
98 for (unsigned i = 0; i < nir_op_infos[vec->op].num_inputs; i++) {
99 if (skip_const_srcs && nir_src_is_const(vec->src[i].src))
100 continue;
101
102 srcs_remaining |= 1 << i;
103 }
104
105 /* We can't actually do anything with this instruction */
106 if (srcs_remaining == 0)
107 continue;
108
109 for (unsigned i; i = ffs(srcs_remaining) - 1, srcs_remaining;) {
110 int8_t swizzle[NIR_MAX_VEC_COMPONENTS];
111 memset(swizzle, -1, sizeof(swizzle));
112
113 for (unsigned j = i; j < nir_op_infos[vec->op].num_inputs; j++) {
114 if (vec->src[j].src.ssa != vec->src[i].src.ssa)
115 continue;
116
117 /* Mark the given channel as having been handled */
118 srcs_remaining &= ~(1 << j);
119
120 /* Mark the appropriate channel as coming from src j */
121 swizzle[vec->src[j].swizzle[0]] = j;
122 }
123
124 nir_foreach_use_safe(use, vec->src[i].src.ssa) {
125 if (nir_src_parent_instr(use) == &vec->instr)
126 continue;
127
128 /* We need to dominate the use if we are going to rewrite it */
129 if (!ssa_def_dominates_instr(&vec->def, nir_src_parent_instr(use)))
130 continue;
131
132 /* For now, we'll just rewrite ALU instructions */
133 if (nir_src_parent_instr(use)->type != nir_instr_type_alu)
134 continue;
135
136 nir_alu_instr *use_alu = nir_instr_as_alu(nir_src_parent_instr(use));
137
138 /* Figure out which source we're actually looking at */
139 nir_alu_src *use_alu_src = exec_node_data(nir_alu_src, use, src);
140 unsigned src_idx = use_alu_src - use_alu->src;
141 assert(src_idx < nir_op_infos[use_alu->op].num_inputs);
142
143 bool can_reswizzle = true;
144 for (unsigned j = 0; j < 4; j++) {
145 if (!nir_alu_instr_channel_used(use_alu, src_idx, j))
146 continue;
147
148 if (swizzle[use_alu_src->swizzle[j]] == -1) {
149 can_reswizzle = false;
150 break;
151 }
152 }
153
154 if (!can_reswizzle)
155 continue;
156
157 /* At this point, we have determined that the given use can be
158 * reswizzled to actually use the destination of the vecN operation.
159 * Go ahead and rewrite it as needed.
160 */
161 nir_src_rewrite(use, &vec->def);
162 for (unsigned j = 0; j < 4; j++) {
163 if (!nir_alu_instr_channel_used(use_alu, src_idx, j))
164 continue;
165
166 use_alu_src->swizzle[j] = swizzle[use_alu_src->swizzle[j]];
167 progress = true;
168 }
169 }
170 }
171 }
172
173 return progress;
174 }
175
176 static bool
nir_move_vec_src_uses_to_dest_impl(nir_shader * shader,nir_function_impl * impl,bool skip_const_srcs)177 nir_move_vec_src_uses_to_dest_impl(nir_shader *shader, nir_function_impl *impl,
178 bool skip_const_srcs)
179 {
180 bool progress = false;
181
182 nir_metadata_require(impl, nir_metadata_dominance);
183
184 nir_index_instrs(impl);
185
186 nir_foreach_block(block, impl) {
187 progress |= move_vec_src_uses_to_dest_block(block, skip_const_srcs);
188 }
189
190 nir_metadata_preserve(impl, nir_metadata_control_flow);
191
192 return progress;
193 }
194
195 bool
nir_move_vec_src_uses_to_dest(nir_shader * shader,bool skip_const_srcs)196 nir_move_vec_src_uses_to_dest(nir_shader *shader, bool skip_const_srcs)
197 {
198 bool progress = false;
199
200 nir_foreach_function_impl(impl, shader) {
201 progress |= nir_move_vec_src_uses_to_dest_impl(shader, impl, skip_const_srcs);
202 }
203
204 return progress;
205 }
206