1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28 #include "nir.h"
29 #include "nir_builder.h"
30
31 struct vec_to_movs_data {
32 nir_instr_writemask_filter_cb cb;
33 const void *data;
34 };
35
36 /*
37 * Implements a simple pass that lowers vecN instructions to a series of
38 * moves with partial writes.
39 */
40
41 static bool
src_matches_dest_reg(nir_dest * dest,nir_src * src)42 src_matches_dest_reg(nir_dest *dest, nir_src *src)
43 {
44 if (dest->is_ssa || src->is_ssa)
45 return false;
46
47 return (dest->reg.reg == src->reg.reg &&
48 dest->reg.base_offset == src->reg.base_offset &&
49 !dest->reg.indirect &&
50 !src->reg.indirect);
51 }
52
53 /**
54 * For a given starting writemask channel and corresponding source index in
55 * the vec instruction, insert a MOV to the vec instruction's dest of all the
56 * writemask channels that get read from the same src reg.
57 *
58 * Returns the writemask of our MOV, so the parent loop calling this knows
59 * which ones have been processed.
60 */
61 static unsigned
insert_mov(nir_alu_instr * vec,unsigned start_idx,nir_shader * shader)62 insert_mov(nir_alu_instr *vec, unsigned start_idx, nir_shader *shader)
63 {
64 assert(start_idx < nir_op_infos[vec->op].num_inputs);
65
66 /* No sense generating a MOV from undef, we can just leave the dst channel undef. */
67 if (nir_src_is_undef(vec->src[start_idx].src))
68 return 1 << start_idx;
69
70 nir_alu_instr *mov = nir_alu_instr_create(shader, nir_op_mov);
71 nir_alu_src_copy(&mov->src[0], &vec->src[start_idx]);
72 nir_alu_dest_copy(&mov->dest, &vec->dest);
73
74 mov->dest.write_mask = (1u << start_idx);
75 mov->src[0].swizzle[start_idx] = vec->src[start_idx].swizzle[0];
76 mov->src[0].negate = vec->src[start_idx].negate;
77 mov->src[0].abs = vec->src[start_idx].abs;
78
79 for (unsigned i = start_idx + 1; i < 4; i++) {
80 if (!(vec->dest.write_mask & (1 << i)))
81 continue;
82
83 if (nir_srcs_equal(vec->src[i].src, vec->src[start_idx].src) &&
84 vec->src[i].negate == vec->src[start_idx].negate &&
85 vec->src[i].abs == vec->src[start_idx].abs) {
86 mov->dest.write_mask |= (1 << i);
87 mov->src[0].swizzle[i] = vec->src[i].swizzle[0];
88 }
89 }
90
91 unsigned channels_handled = mov->dest.write_mask;
92
93 /* In some situations (if the vecN is involved in a phi-web), we can end
94 * up with a mov from a register to itself. Some of those channels may end
95 * up doing nothing and there's no reason to have them as part of the mov.
96 */
97 if (src_matches_dest_reg(&mov->dest.dest, &mov->src[0].src) &&
98 !mov->src[0].abs && !mov->src[0].negate) {
99 for (unsigned i = 0; i < 4; i++) {
100 if (mov->src[0].swizzle[i] == i) {
101 mov->dest.write_mask &= ~(1 << i);
102 }
103 }
104 }
105
106 /* Only emit the instruction if it actually does something */
107 if (mov->dest.write_mask) {
108 nir_instr_insert_before(&vec->instr, &mov->instr);
109 } else {
110 nir_instr_free(&mov->instr);
111 }
112
113 return channels_handled;
114 }
115
116 static bool
has_replicated_dest(nir_alu_instr * alu)117 has_replicated_dest(nir_alu_instr *alu)
118 {
119 return alu->op == nir_op_fdot2_replicated ||
120 alu->op == nir_op_fdot3_replicated ||
121 alu->op == nir_op_fdot4_replicated ||
122 alu->op == nir_op_fdph_replicated;
123 }
124
125 /* Attempts to coalesce the "move" from the given source of the vec to the
126 * destination of the instruction generating the value. If, for whatever
127 * reason, we cannot coalesce the mmove, it does nothing and returns 0. We
128 * can then call insert_mov as normal.
129 */
130 static unsigned
try_coalesce(nir_alu_instr * vec,unsigned start_idx,void * _data)131 try_coalesce(nir_alu_instr *vec, unsigned start_idx, void *_data)
132 {
133 struct vec_to_movs_data *data = _data;
134
135 assert(start_idx < nir_op_infos[vec->op].num_inputs);
136
137 /* We will only even try if the source is SSA */
138 if (!vec->src[start_idx].src.is_ssa)
139 return 0;
140
141 assert(vec->src[start_idx].src.ssa);
142
143 /* If we are going to do a reswizzle, then the vecN operation must be the
144 * only use of the source value. We also can't have any source modifiers.
145 */
146 nir_foreach_use(src, vec->src[start_idx].src.ssa) {
147 if (src->parent_instr != &vec->instr)
148 return 0;
149
150 nir_alu_src *alu_src = exec_node_data(nir_alu_src, src, src);
151 if (alu_src->abs || alu_src->negate)
152 return 0;
153 }
154
155 if (!list_is_empty(&vec->src[start_idx].src.ssa->if_uses))
156 return 0;
157
158 if (vec->src[start_idx].src.ssa->parent_instr->type != nir_instr_type_alu)
159 return 0;
160
161 nir_alu_instr *src_alu =
162 nir_instr_as_alu(vec->src[start_idx].src.ssa->parent_instr);
163
164 if (has_replicated_dest(src_alu)) {
165 /* The fdot instruction is special: It replicates its result to all
166 * components. This means that we can always rewrite its destination
167 * and we don't need to swizzle anything.
168 */
169 } else {
170 /* We only care about being able to re-swizzle the instruction if it is
171 * something that we can reswizzle. It must be per-component. The one
172 * exception to this is the fdotN instructions which implicitly splat
173 * their result out to all channels.
174 */
175 if (nir_op_infos[src_alu->op].output_size != 0)
176 return 0;
177
178 /* If we are going to reswizzle the instruction, we can't have any
179 * non-per-component sources either.
180 */
181 for (unsigned j = 0; j < nir_op_infos[src_alu->op].num_inputs; j++)
182 if (nir_op_infos[src_alu->op].input_sizes[j] != 0)
183 return 0;
184 }
185
186 /* Stash off all of the ALU instruction's swizzles. */
187 uint8_t swizzles[4][4];
188 for (unsigned j = 0; j < nir_op_infos[src_alu->op].num_inputs; j++)
189 for (unsigned i = 0; i < 4; i++)
190 swizzles[j][i] = src_alu->src[j].swizzle[i];
191
192 /* Generate the final write mask */
193 unsigned write_mask = 0;
194 for (unsigned i = start_idx; i < 4; i++) {
195 if (!(vec->dest.write_mask & (1 << i)))
196 continue;
197
198 if (!vec->src[i].src.is_ssa ||
199 vec->src[i].src.ssa != &src_alu->dest.dest.ssa)
200 continue;
201
202 write_mask |= 1 << i;
203 }
204
205 /* If the instruction would be vectorized but the backend
206 * doesn't support vectorizing this op, abort. */
207 if (data->cb && !data->cb(&src_alu->instr, write_mask, data->data))
208 return 0;
209
210 for (unsigned i = start_idx; i < 4; i++) {
211 if (!(write_mask & (1 << i)))
212 continue;
213
214 /* At this point, the given vec source matches up with the ALU
215 * instruction so we can re-swizzle that component to match.
216 */
217 if (has_replicated_dest(src_alu)) {
218 /* Since the destination is a single replicated value, we don't need
219 * to do any reswizzling
220 */
221 } else {
222 for (unsigned j = 0; j < nir_op_infos[src_alu->op].num_inputs; j++)
223 src_alu->src[j].swizzle[i] = swizzles[j][vec->src[i].swizzle[0]];
224 }
225
226 /* Clear the no longer needed vec source */
227 nir_instr_rewrite_src(&vec->instr, &vec->src[i].src, NIR_SRC_INIT);
228 }
229
230 nir_instr_rewrite_dest(&src_alu->instr, &src_alu->dest.dest, vec->dest.dest);
231 src_alu->dest.write_mask = write_mask;
232
233 return write_mask;
234 }
235
236 static bool
nir_lower_vec_to_movs_instr(nir_builder * b,nir_instr * instr,void * data)237 nir_lower_vec_to_movs_instr(nir_builder *b, nir_instr *instr, void *data)
238 {
239 if (instr->type != nir_instr_type_alu)
240 return false;
241
242 nir_alu_instr *vec = nir_instr_as_alu(instr);
243
244 switch (vec->op) {
245 case nir_op_vec2:
246 case nir_op_vec3:
247 case nir_op_vec4:
248 break;
249 default:
250 return false;
251 }
252
253 bool vec_had_ssa_dest = vec->dest.dest.is_ssa;
254 if (vec->dest.dest.is_ssa) {
255 /* Since we insert multiple MOVs, we have a register destination. */
256 nir_register *reg = nir_local_reg_create(b->impl);
257 reg->num_components = vec->dest.dest.ssa.num_components;
258 reg->bit_size = vec->dest.dest.ssa.bit_size;
259
260 nir_ssa_def_rewrite_uses_src(&vec->dest.dest.ssa, nir_src_for_reg(reg));
261
262 nir_instr_rewrite_dest(&vec->instr, &vec->dest.dest,
263 nir_dest_for_reg(reg));
264 }
265
266 unsigned finished_write_mask = 0;
267
268 /* First, emit a MOV for all the src channels that are in the
269 * destination reg, in case other values we're populating in the dest
270 * might overwrite them.
271 */
272 for (unsigned i = 0; i < 4; i++) {
273 if (!(vec->dest.write_mask & (1 << i)))
274 continue;
275
276 if (src_matches_dest_reg(&vec->dest.dest, &vec->src[i].src)) {
277 finished_write_mask |= insert_mov(vec, i, b->shader);
278 break;
279 }
280 }
281
282 /* Now, emit MOVs for all the other src channels. */
283 for (unsigned i = 0; i < 4; i++) {
284 if (!(vec->dest.write_mask & (1 << i)))
285 continue;
286
287 /* Coalescing moves the register writes from the vec up to the ALU
288 * instruction in the source. We can only do this if the original
289 * vecN had an SSA destination.
290 */
291 if (vec_had_ssa_dest && !(finished_write_mask & (1 << i)))
292 finished_write_mask |= try_coalesce(vec, i, data);
293
294 if (!(finished_write_mask & (1 << i)))
295 finished_write_mask |= insert_mov(vec, i, b->shader);
296 }
297
298 nir_instr_remove(&vec->instr);
299 nir_instr_free(&vec->instr);
300
301 return true;
302 }
303
304 bool
nir_lower_vec_to_movs(nir_shader * shader,nir_instr_writemask_filter_cb cb,const void * _data)305 nir_lower_vec_to_movs(nir_shader *shader, nir_instr_writemask_filter_cb cb,
306 const void *_data)
307 {
308 struct vec_to_movs_data data = {
309 .cb = cb,
310 .data = _data,
311 };
312
313 return nir_shader_instructions_pass(shader,
314 nir_lower_vec_to_movs_instr,
315 nir_metadata_block_index |
316 nir_metadata_dominance,
317 &data);
318 }
319