1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /** @file brw_fs_copy_propagation.cpp
25 *
26 * Support for global copy propagation in two passes: A local pass that does
27 * intra-block copy (and constant) propagation, and a global pass that uses
28 * dataflow analysis on the copies available at the end of each block to re-do
29 * local copy propagation with more copies available.
30 *
31 * See Muchnick's Advanced Compiler Design and Implementation, section
32 * 12.5 (p356).
33 */
34
35 #define ACP_HASH_SIZE 64
36
37 #include "util/bitset.h"
38 #include "util/u_math.h"
39 #include "brw_fs.h"
40 #include "brw_fs_live_variables.h"
41 #include "brw_cfg.h"
42 #include "brw_eu.h"
43
44 using namespace brw;
45
46 namespace { /* avoid conflict with opt_copy_propagation_elements */
47 struct acp_entry : public exec_node {
48 fs_reg dst;
49 fs_reg src;
50 unsigned global_idx;
51 unsigned size_written;
52 unsigned size_read;
53 enum opcode opcode;
54 bool saturate;
55 bool is_partial_write;
56 };
57
58 struct block_data {
59 /**
60 * Which entries in the fs_copy_prop_dataflow acp table are live at the
61 * start of this block. This is the useful output of the analysis, since
62 * it lets us plug those into the local copy propagation on the second
63 * pass.
64 */
65 BITSET_WORD *livein;
66
67 /**
68 * Which entries in the fs_copy_prop_dataflow acp table are live at the end
69 * of this block. This is done in initial setup from the per-block acps
70 * returned by the first local copy prop pass.
71 */
72 BITSET_WORD *liveout;
73
74 /**
75 * Which entries in the fs_copy_prop_dataflow acp table are generated by
76 * instructions in this block which reach the end of the block without
77 * being killed.
78 */
79 BITSET_WORD *copy;
80
81 /**
82 * Which entries in the fs_copy_prop_dataflow acp table are killed over the
83 * course of this block.
84 */
85 BITSET_WORD *kill;
86
87 /**
88 * Which entries in the fs_copy_prop_dataflow acp table are guaranteed to
89 * have a fully uninitialized destination at the end of this block.
90 */
91 BITSET_WORD *undef;
92 };
93
94 class fs_copy_prop_dataflow
95 {
96 public:
97 fs_copy_prop_dataflow(void *mem_ctx, cfg_t *cfg,
98 const fs_live_variables &live,
99 exec_list *out_acp[ACP_HASH_SIZE]);
100
101 void setup_initial_values();
102 void run();
103
104 void dump_block_data() const UNUSED;
105
106 void *mem_ctx;
107 cfg_t *cfg;
108 const fs_live_variables &live;
109
110 acp_entry **acp;
111 int num_acp;
112 int bitset_words;
113
114 struct block_data *bd;
115 };
116 } /* anonymous namespace */
117
fs_copy_prop_dataflow(void * mem_ctx,cfg_t * cfg,const fs_live_variables & live,exec_list * out_acp[ACP_HASH_SIZE])118 fs_copy_prop_dataflow::fs_copy_prop_dataflow(void *mem_ctx, cfg_t *cfg,
119 const fs_live_variables &live,
120 exec_list *out_acp[ACP_HASH_SIZE])
121 : mem_ctx(mem_ctx), cfg(cfg), live(live)
122 {
123 bd = rzalloc_array(mem_ctx, struct block_data, cfg->num_blocks);
124
125 num_acp = 0;
126 foreach_block (block, cfg) {
127 for (int i = 0; i < ACP_HASH_SIZE; i++) {
128 num_acp += out_acp[block->num][i].length();
129 }
130 }
131
132 acp = rzalloc_array(mem_ctx, struct acp_entry *, num_acp);
133
134 bitset_words = BITSET_WORDS(num_acp);
135
136 int next_acp = 0;
137 foreach_block (block, cfg) {
138 bd[block->num].livein = rzalloc_array(bd, BITSET_WORD, bitset_words);
139 bd[block->num].liveout = rzalloc_array(bd, BITSET_WORD, bitset_words);
140 bd[block->num].copy = rzalloc_array(bd, BITSET_WORD, bitset_words);
141 bd[block->num].kill = rzalloc_array(bd, BITSET_WORD, bitset_words);
142 bd[block->num].undef = rzalloc_array(bd, BITSET_WORD, bitset_words);
143
144 for (int i = 0; i < ACP_HASH_SIZE; i++) {
145 foreach_in_list(acp_entry, entry, &out_acp[block->num][i]) {
146 acp[next_acp] = entry;
147
148 entry->global_idx = next_acp;
149
150 /* opt_copy_propagation_local populates out_acp with copies created
151 * in a block which are still live at the end of the block. This
152 * is exactly what we want in the COPY set.
153 */
154 BITSET_SET(bd[block->num].copy, next_acp);
155
156 next_acp++;
157 }
158 }
159 }
160
161 assert(next_acp == num_acp);
162
163 setup_initial_values();
164 run();
165 }
166
167 /**
168 * Set up initial values for each of the data flow sets, prior to running
169 * the fixed-point algorithm.
170 */
171 void
setup_initial_values()172 fs_copy_prop_dataflow::setup_initial_values()
173 {
174 /* Initialize the COPY and KILL sets. */
175 {
176 /* Create a temporary table of ACP entries which we'll use for efficient
177 * look-up. Unfortunately, we have to do this in two steps because we
178 * have to match both sources and destinations and an ACP entry can only
179 * be in one list at a time.
180 *
181 * We choose to make the table size between num_acp/2 and num_acp/4 to
182 * try and trade off between the time it takes to initialize the table
183 * via exec_list constructors or make_empty() and the cost of
184 * collisions. In practice, it doesn't appear to matter too much what
185 * size we make the table as long as it's roughly the same order of
186 * magnitude as num_acp. We get most of the benefit of the table
187 * approach even if we use a table of size ACP_HASH_SIZE though a
188 * full-sized table is 1-2% faster in practice.
189 */
190 unsigned acp_table_size = util_next_power_of_two(num_acp) / 4;
191 acp_table_size = MAX2(acp_table_size, ACP_HASH_SIZE);
192 exec_list *acp_table = new exec_list[acp_table_size];
193
194 /* First, get all the KILLs for instructions which overwrite ACP
195 * destinations.
196 */
197 for (int i = 0; i < num_acp; i++) {
198 unsigned idx = reg_space(acp[i]->dst) & (acp_table_size - 1);
199 acp_table[idx].push_tail(acp[i]);
200 }
201
202 foreach_block (block, cfg) {
203 foreach_inst_in_block(fs_inst, inst, block) {
204 if (inst->dst.file != VGRF)
205 continue;
206
207 unsigned idx = reg_space(inst->dst) & (acp_table_size - 1);
208 foreach_in_list(acp_entry, entry, &acp_table[idx]) {
209 if (regions_overlap(inst->dst, inst->size_written,
210 entry->dst, entry->size_written))
211 BITSET_SET(bd[block->num].kill, entry->global_idx);
212 }
213 }
214 }
215
216 /* Clear the table for the second pass */
217 for (unsigned i = 0; i < acp_table_size; i++)
218 acp_table[i].make_empty();
219
220 /* Next, get all the KILLs for instructions which overwrite ACP
221 * sources.
222 */
223 for (int i = 0; i < num_acp; i++) {
224 unsigned idx = reg_space(acp[i]->src) & (acp_table_size - 1);
225 acp_table[idx].push_tail(acp[i]);
226 }
227
228 foreach_block (block, cfg) {
229 foreach_inst_in_block(fs_inst, inst, block) {
230 if (inst->dst.file != VGRF &&
231 inst->dst.file != FIXED_GRF)
232 continue;
233
234 unsigned idx = reg_space(inst->dst) & (acp_table_size - 1);
235 foreach_in_list(acp_entry, entry, &acp_table[idx]) {
236 if (regions_overlap(inst->dst, inst->size_written,
237 entry->src, entry->size_read))
238 BITSET_SET(bd[block->num].kill, entry->global_idx);
239 }
240 }
241 }
242
243 delete [] acp_table;
244 }
245
246 /* Populate the initial values for the livein and liveout sets. For the
247 * block at the start of the program, livein = 0 and liveout = copy.
248 * For the others, set liveout and livein to ~0 (the universal set).
249 */
250 foreach_block (block, cfg) {
251 if (block->parents.is_empty()) {
252 for (int i = 0; i < bitset_words; i++) {
253 bd[block->num].livein[i] = 0u;
254 bd[block->num].liveout[i] = bd[block->num].copy[i];
255 }
256 } else {
257 for (int i = 0; i < bitset_words; i++) {
258 bd[block->num].liveout[i] = ~0u;
259 bd[block->num].livein[i] = ~0u;
260 }
261 }
262 }
263
264 /* Initialize the undef set. */
265 foreach_block (block, cfg) {
266 for (int i = 0; i < num_acp; i++) {
267 BITSET_SET(bd[block->num].undef, i);
268 for (unsigned off = 0; off < acp[i]->size_written; off += REG_SIZE) {
269 if (BITSET_TEST(live.block_data[block->num].defout,
270 live.var_from_reg(byte_offset(acp[i]->dst, off))))
271 BITSET_CLEAR(bd[block->num].undef, i);
272 }
273 }
274 }
275 }
276
277 /**
278 * Walk the set of instructions in the block, marking which entries in the acp
279 * are killed by the block.
280 */
281 void
run()282 fs_copy_prop_dataflow::run()
283 {
284 bool progress;
285
286 do {
287 progress = false;
288
289 foreach_block (block, cfg) {
290 if (block->parents.is_empty())
291 continue;
292
293 for (int i = 0; i < bitset_words; i++) {
294 const BITSET_WORD old_liveout = bd[block->num].liveout[i];
295 BITSET_WORD livein_from_any_block = 0;
296
297 /* Update livein for this block. If a copy is live out of all
298 * parent blocks, it's live coming in to this block.
299 */
300 bd[block->num].livein[i] = ~0u;
301 foreach_list_typed(bblock_link, parent_link, link, &block->parents) {
302 bblock_t *parent = parent_link->block;
303 /* Consider ACP entries with a known-undefined destination to
304 * be available from the parent. This is valid because we're
305 * free to set the undefined variable equal to the source of
306 * the ACP entry without breaking the application's
307 * expectations, since the variable is undefined.
308 */
309 bd[block->num].livein[i] &= (bd[parent->num].liveout[i] |
310 bd[parent->num].undef[i]);
311 livein_from_any_block |= bd[parent->num].liveout[i];
312 }
313
314 /* Limit to the set of ACP entries that can possibly be available
315 * at the start of the block, since propagating from a variable
316 * which is guaranteed to be undefined (rather than potentially
317 * undefined for some dynamic control-flow paths) doesn't seem
318 * particularly useful.
319 */
320 bd[block->num].livein[i] &= livein_from_any_block;
321
322 /* Update liveout for this block. */
323 bd[block->num].liveout[i] =
324 bd[block->num].copy[i] | (bd[block->num].livein[i] &
325 ~bd[block->num].kill[i]);
326
327 if (old_liveout != bd[block->num].liveout[i])
328 progress = true;
329 }
330 }
331 } while (progress);
332 }
333
334 void
dump_block_data() const335 fs_copy_prop_dataflow::dump_block_data() const
336 {
337 foreach_block (block, cfg) {
338 fprintf(stderr, "Block %d [%d, %d] (parents ", block->num,
339 block->start_ip, block->end_ip);
340 foreach_list_typed(bblock_link, link, link, &block->parents) {
341 bblock_t *parent = link->block;
342 fprintf(stderr, "%d ", parent->num);
343 }
344 fprintf(stderr, "):\n");
345 fprintf(stderr, " livein = 0x");
346 for (int i = 0; i < bitset_words; i++)
347 fprintf(stderr, "%08x", bd[block->num].livein[i]);
348 fprintf(stderr, ", liveout = 0x");
349 for (int i = 0; i < bitset_words; i++)
350 fprintf(stderr, "%08x", bd[block->num].liveout[i]);
351 fprintf(stderr, ",\n copy = 0x");
352 for (int i = 0; i < bitset_words; i++)
353 fprintf(stderr, "%08x", bd[block->num].copy[i]);
354 fprintf(stderr, ", kill = 0x");
355 for (int i = 0; i < bitset_words; i++)
356 fprintf(stderr, "%08x", bd[block->num].kill[i]);
357 fprintf(stderr, "\n");
358 }
359 }
360
361 static bool
is_logic_op(enum opcode opcode)362 is_logic_op(enum opcode opcode)
363 {
364 return (opcode == BRW_OPCODE_AND ||
365 opcode == BRW_OPCODE_OR ||
366 opcode == BRW_OPCODE_XOR ||
367 opcode == BRW_OPCODE_NOT);
368 }
369
370 static bool
can_take_stride(fs_inst * inst,brw_reg_type dst_type,unsigned arg,unsigned stride,const intel_device_info * devinfo)371 can_take_stride(fs_inst *inst, brw_reg_type dst_type,
372 unsigned arg, unsigned stride,
373 const intel_device_info *devinfo)
374 {
375 if (stride > 4)
376 return false;
377
378 /* Bail if the channels of the source need to be aligned to the byte offset
379 * of the corresponding channel of the destination, and the provided stride
380 * would break this restriction.
381 */
382 if (has_dst_aligned_region_restriction(devinfo, inst, dst_type) &&
383 !(type_sz(inst->src[arg].type) * stride ==
384 type_sz(dst_type) * inst->dst.stride ||
385 stride == 0))
386 return false;
387
388 /* 3-source instructions can only be Align16, which restricts what strides
389 * they can take. They can only take a stride of 1 (the usual case), or 0
390 * with a special "repctrl" bit. But the repctrl bit doesn't work for
391 * 64-bit datatypes, so if the source type is 64-bit then only a stride of
392 * 1 is allowed. From the Broadwell PRM, Volume 7 "3D Media GPGPU", page
393 * 944:
394 *
395 * This is applicable to 32b datatypes and 16b datatype. 64b datatypes
396 * cannot use the replicate control.
397 */
398 if (inst->is_3src(devinfo)) {
399 if (type_sz(inst->src[arg].type) > 4)
400 return stride == 1;
401 else
402 return stride == 1 || stride == 0;
403 }
404
405 /* From the Broadwell PRM, Volume 2a "Command Reference - Instructions",
406 * page 391 ("Extended Math Function"):
407 *
408 * The following restrictions apply for align1 mode: Scalar source is
409 * supported. Source and destination horizontal stride must be the
410 * same.
411 *
412 * From the Haswell PRM Volume 2b "Command Reference - Instructions", page
413 * 134 ("Extended Math Function"):
414 *
415 * Scalar source is supported. Source and destination horizontal stride
416 * must be 1.
417 *
418 * and similar language exists for IVB and SNB. Pre-SNB, math instructions
419 * are sends, so the sources are moved to MRF's and there are no
420 * restrictions.
421 */
422 if (inst->is_math()) {
423 if (devinfo->ver == 6 || devinfo->ver == 7) {
424 assert(inst->dst.stride == 1);
425 return stride == 1 || stride == 0;
426 } else if (devinfo->ver >= 8) {
427 return stride == inst->dst.stride || stride == 0;
428 }
429 }
430
431 return true;
432 }
433
434 static bool
instruction_requires_packed_data(fs_inst * inst)435 instruction_requires_packed_data(fs_inst *inst)
436 {
437 switch (inst->opcode) {
438 case FS_OPCODE_DDX_FINE:
439 case FS_OPCODE_DDX_COARSE:
440 case FS_OPCODE_DDY_FINE:
441 case FS_OPCODE_DDY_COARSE:
442 case SHADER_OPCODE_QUAD_SWIZZLE:
443 return true;
444 default:
445 return false;
446 }
447 }
448
449 bool
try_copy_propagate(fs_inst * inst,int arg,acp_entry * entry)450 fs_visitor::try_copy_propagate(fs_inst *inst, int arg, acp_entry *entry)
451 {
452 if (inst->src[arg].file != VGRF)
453 return false;
454
455 if (entry->src.file == IMM)
456 return false;
457 assert(entry->src.file == VGRF || entry->src.file == UNIFORM ||
458 entry->src.file == ATTR || entry->src.file == FIXED_GRF);
459
460 /* Avoid propagating a LOAD_PAYLOAD instruction into another if there is a
461 * good chance that we'll be able to eliminate the latter through register
462 * coalescing. If only part of the sources of the second LOAD_PAYLOAD can
463 * be simplified through copy propagation we would be making register
464 * coalescing impossible, ending up with unnecessary copies in the program.
465 * This is also the case for is_multi_copy_payload() copies that can only
466 * be coalesced when the instruction is lowered into a sequence of MOVs.
467 *
468 * Worse -- In cases where the ACP entry was the result of CSE combining
469 * multiple LOAD_PAYLOAD subexpressions, propagating the first LOAD_PAYLOAD
470 * into the second would undo the work of CSE, leading to an infinite
471 * optimization loop. Avoid this by detecting LOAD_PAYLOAD copies from CSE
472 * temporaries which should match is_coalescing_payload().
473 */
474 if (entry->opcode == SHADER_OPCODE_LOAD_PAYLOAD &&
475 (is_coalescing_payload(alloc, inst) || is_multi_copy_payload(inst)))
476 return false;
477
478 assert(entry->dst.file == VGRF);
479 if (inst->src[arg].nr != entry->dst.nr)
480 return false;
481
482 /* Bail if inst is reading a range that isn't contained in the range
483 * that entry is writing.
484 */
485 if (!region_contained_in(inst->src[arg], inst->size_read(arg),
486 entry->dst, entry->size_written))
487 return false;
488
489 /* Avoid propagating a FIXED_GRF register into an EOT instruction in order
490 * for any register allocation restrictions to be applied.
491 */
492 if (entry->src.file == FIXED_GRF && inst->eot)
493 return false;
494
495 /* Avoid propagating odd-numbered FIXED_GRF registers into the first source
496 * of a LINTERP instruction on platforms where the PLN instruction has
497 * register alignment restrictions.
498 */
499 if (devinfo->has_pln && devinfo->ver <= 6 &&
500 entry->src.file == FIXED_GRF && (entry->src.nr & 1) &&
501 inst->opcode == FS_OPCODE_LINTERP && arg == 0)
502 return false;
503
504 /* we can't generally copy-propagate UD negations because we
505 * can end up accessing the resulting values as signed integers
506 * instead. See also resolve_ud_negate() and comment in
507 * fs_generator::generate_code.
508 */
509 if (entry->src.type == BRW_REGISTER_TYPE_UD &&
510 entry->src.negate)
511 return false;
512
513 bool has_source_modifiers = entry->src.abs || entry->src.negate;
514
515 if (has_source_modifiers && !inst->can_do_source_mods(devinfo))
516 return false;
517
518 /* Reject cases that would violate register regioning restrictions. */
519 if ((entry->src.file == UNIFORM || !entry->src.is_contiguous()) &&
520 ((devinfo->ver == 6 && inst->is_math()) ||
521 inst->is_send_from_grf() ||
522 inst->uses_indirect_addressing())) {
523 return false;
524 }
525
526 if (has_source_modifiers &&
527 inst->opcode == SHADER_OPCODE_GFX4_SCRATCH_WRITE)
528 return false;
529
530 /* Some instructions implemented in the generator backend, such as
531 * derivatives, assume that their operands are packed so we can't
532 * generally propagate strided regions to them.
533 */
534 const unsigned entry_stride = (entry->src.file == FIXED_GRF ? 1 :
535 entry->src.stride);
536 if (instruction_requires_packed_data(inst) && entry_stride != 1)
537 return false;
538
539 const brw_reg_type dst_type = (has_source_modifiers &&
540 entry->dst.type != inst->src[arg].type) ?
541 entry->dst.type : inst->dst.type;
542
543 /* Bail if the result of composing both strides would exceed the
544 * hardware limit.
545 */
546 if (!can_take_stride(inst, dst_type, arg,
547 entry_stride * inst->src[arg].stride,
548 devinfo))
549 return false;
550
551 /* From the Cherry Trail/Braswell PRMs, Volume 7: 3D Media GPGPU:
552 * EU Overview
553 * Register Region Restrictions
554 * Special Requirements for Handling Double Precision Data Types :
555 *
556 * "When source or destination datatype is 64b or operation is integer
557 * DWord multiply, regioning in Align1 must follow these rules:
558 *
559 * 1. Source and Destination horizontal stride must be aligned to the
560 * same qword.
561 * 2. Regioning must ensure Src.Vstride = Src.Width * Src.Hstride.
562 * 3. Source and Destination offset must be the same, except the case
563 * of scalar source."
564 *
565 * Most of this is already checked in can_take_stride(), we're only left
566 * with checking 3.
567 */
568 if (has_dst_aligned_region_restriction(devinfo, inst, dst_type) &&
569 entry_stride != 0 &&
570 (reg_offset(inst->dst) % REG_SIZE) != (reg_offset(entry->src) % REG_SIZE))
571 return false;
572
573 /* Bail if the source FIXED_GRF region of the copy cannot be trivially
574 * composed with the source region of the instruction -- E.g. because the
575 * copy uses some extended stride greater than 4 not supported natively by
576 * the hardware as a horizontal stride, or because instruction compression
577 * could require us to use a vertical stride shorter than a GRF.
578 */
579 if (entry->src.file == FIXED_GRF &&
580 (inst->src[arg].stride > 4 ||
581 inst->dst.component_size(inst->exec_size) >
582 inst->src[arg].component_size(inst->exec_size)))
583 return false;
584
585 /* Bail if the instruction type is larger than the execution type of the
586 * copy, what implies that each channel is reading multiple channels of the
587 * destination of the copy, and simply replacing the sources would give a
588 * program with different semantics.
589 */
590 if ((type_sz(entry->dst.type) < type_sz(inst->src[arg].type) ||
591 entry->is_partial_write) &&
592 inst->opcode != BRW_OPCODE_MOV) {
593 return false;
594 }
595
596 /* Bail if the result of composing both strides cannot be expressed
597 * as another stride. This avoids, for example, trying to transform
598 * this:
599 *
600 * MOV (8) rX<1>UD rY<0;1,0>UD
601 * FOO (8) ... rX<8;8,1>UW
602 *
603 * into this:
604 *
605 * FOO (8) ... rY<0;1,0>UW
606 *
607 * Which would have different semantics.
608 */
609 if (entry_stride != 1 &&
610 (inst->src[arg].stride *
611 type_sz(inst->src[arg].type)) % type_sz(entry->src.type) != 0)
612 return false;
613
614 /* Since semantics of source modifiers are type-dependent we need to
615 * ensure that the meaning of the instruction remains the same if we
616 * change the type. If the sizes of the types are different the new
617 * instruction will read a different amount of data than the original
618 * and the semantics will always be different.
619 */
620 if (has_source_modifiers &&
621 entry->dst.type != inst->src[arg].type &&
622 (!inst->can_change_types() ||
623 type_sz(entry->dst.type) != type_sz(inst->src[arg].type)))
624 return false;
625
626 if (devinfo->ver >= 8 && (entry->src.negate || entry->src.abs) &&
627 is_logic_op(inst->opcode)) {
628 return false;
629 }
630
631 if (entry->saturate) {
632 switch(inst->opcode) {
633 case BRW_OPCODE_SEL:
634 if ((inst->conditional_mod != BRW_CONDITIONAL_GE &&
635 inst->conditional_mod != BRW_CONDITIONAL_L) ||
636 inst->src[1].file != IMM ||
637 inst->src[1].f < 0.0 ||
638 inst->src[1].f > 1.0) {
639 return false;
640 }
641 break;
642 default:
643 return false;
644 }
645 }
646
647 /* Save the offset of inst->src[arg] relative to entry->dst for it to be
648 * applied later.
649 */
650 const unsigned rel_offset = inst->src[arg].offset - entry->dst.offset;
651
652 /* Fold the copy into the instruction consuming it. */
653 inst->src[arg].file = entry->src.file;
654 inst->src[arg].nr = entry->src.nr;
655 inst->src[arg].subnr = entry->src.subnr;
656 inst->src[arg].offset = entry->src.offset;
657
658 /* Compose the strides of both regions. */
659 if (entry->src.file == FIXED_GRF) {
660 if (inst->src[arg].stride) {
661 const unsigned orig_width = 1 << entry->src.width;
662 const unsigned reg_width = REG_SIZE / (type_sz(inst->src[arg].type) *
663 inst->src[arg].stride);
664 inst->src[arg].width = cvt(MIN2(orig_width, reg_width)) - 1;
665 inst->src[arg].hstride = cvt(inst->src[arg].stride);
666 inst->src[arg].vstride = inst->src[arg].hstride + inst->src[arg].width;
667 } else {
668 inst->src[arg].vstride = inst->src[arg].hstride =
669 inst->src[arg].width = 0;
670 }
671
672 inst->src[arg].stride = 1;
673
674 /* Hopefully no Align16 around here... */
675 assert(entry->src.swizzle == BRW_SWIZZLE_XYZW);
676 inst->src[arg].swizzle = entry->src.swizzle;
677 } else {
678 inst->src[arg].stride *= entry->src.stride;
679 }
680
681 /* Compose any saturate modifiers. */
682 inst->saturate = inst->saturate || entry->saturate;
683
684 /* Compute the first component of the copy that the instruction is
685 * reading, and the base byte offset within that component.
686 */
687 assert((entry->dst.offset % REG_SIZE == 0 || inst->opcode == BRW_OPCODE_MOV) &&
688 entry->dst.stride == 1);
689 const unsigned component = rel_offset / type_sz(entry->dst.type);
690 const unsigned suboffset = rel_offset % type_sz(entry->dst.type);
691
692 /* Calculate the byte offset at the origin of the copy of the given
693 * component and suboffset.
694 */
695 inst->src[arg] = byte_offset(inst->src[arg],
696 component * entry_stride * type_sz(entry->src.type) + suboffset);
697
698 if (has_source_modifiers) {
699 if (entry->dst.type != inst->src[arg].type) {
700 /* We are propagating source modifiers from a MOV with a different
701 * type. If we got here, then we can just change the source and
702 * destination types of the instruction and keep going.
703 */
704 assert(inst->can_change_types());
705 for (int i = 0; i < inst->sources; i++) {
706 inst->src[i].type = entry->dst.type;
707 }
708 inst->dst.type = entry->dst.type;
709 }
710
711 if (!inst->src[arg].abs) {
712 inst->src[arg].abs = entry->src.abs;
713 inst->src[arg].negate ^= entry->src.negate;
714 }
715 }
716
717 return true;
718 }
719
720
721 bool
try_constant_propagate(fs_inst * inst,acp_entry * entry)722 fs_visitor::try_constant_propagate(fs_inst *inst, acp_entry *entry)
723 {
724 bool progress = false;
725
726 if (entry->src.file != IMM)
727 return false;
728 if (type_sz(entry->src.type) > 4)
729 return false;
730 if (entry->saturate)
731 return false;
732
733 for (int i = inst->sources - 1; i >= 0; i--) {
734 if (inst->src[i].file != VGRF)
735 continue;
736
737 assert(entry->dst.file == VGRF);
738 if (inst->src[i].nr != entry->dst.nr)
739 continue;
740
741 /* Bail if inst is reading a range that isn't contained in the range
742 * that entry is writing.
743 */
744 if (!region_contained_in(inst->src[i], inst->size_read(i),
745 entry->dst, entry->size_written))
746 continue;
747
748 /* If the type sizes don't match each channel of the instruction is
749 * either extracting a portion of the constant (which could be handled
750 * with some effort but the code below doesn't) or reading multiple
751 * channels of the source at once.
752 */
753 if (type_sz(inst->src[i].type) != type_sz(entry->dst.type))
754 continue;
755
756 fs_reg val = entry->src;
757 val.type = inst->src[i].type;
758
759 if (inst->src[i].abs) {
760 if ((devinfo->ver >= 8 && is_logic_op(inst->opcode)) ||
761 !brw_abs_immediate(val.type, &val.as_brw_reg())) {
762 continue;
763 }
764 }
765
766 if (inst->src[i].negate) {
767 if ((devinfo->ver >= 8 && is_logic_op(inst->opcode)) ||
768 !brw_negate_immediate(val.type, &val.as_brw_reg())) {
769 continue;
770 }
771 }
772
773 switch (inst->opcode) {
774 case BRW_OPCODE_MOV:
775 case SHADER_OPCODE_LOAD_PAYLOAD:
776 case FS_OPCODE_PACK:
777 inst->src[i] = val;
778 progress = true;
779 break;
780
781 case SHADER_OPCODE_INT_QUOTIENT:
782 case SHADER_OPCODE_INT_REMAINDER:
783 /* FINISHME: Promote non-float constants and remove this. */
784 if (devinfo->ver < 8)
785 break;
786 FALLTHROUGH;
787 case SHADER_OPCODE_POW:
788 /* Allow constant propagation into src1 (except on Gen 6 which
789 * doesn't support scalar source math), and let constant combining
790 * promote the constant on Gen < 8.
791 */
792 if (devinfo->ver == 6)
793 break;
794 FALLTHROUGH;
795 case BRW_OPCODE_BFI1:
796 case BRW_OPCODE_ASR:
797 case BRW_OPCODE_SHL:
798 case BRW_OPCODE_SHR:
799 case BRW_OPCODE_SUBB:
800 if (i == 1) {
801 inst->src[i] = val;
802 progress = true;
803 }
804 break;
805
806 case BRW_OPCODE_MACH:
807 case BRW_OPCODE_MUL:
808 case SHADER_OPCODE_MULH:
809 case BRW_OPCODE_ADD:
810 case BRW_OPCODE_OR:
811 case BRW_OPCODE_AND:
812 case BRW_OPCODE_XOR:
813 case BRW_OPCODE_ADDC:
814 if (i == 1) {
815 inst->src[i] = val;
816 progress = true;
817 } else if (i == 0 && inst->src[1].file != IMM) {
818 /* Fit this constant in by commuting the operands.
819 * Exception: we can't do this for 32-bit integer MUL/MACH
820 * because it's asymmetric.
821 *
822 * The BSpec says for Broadwell that
823 *
824 * "When multiplying DW x DW, the dst cannot be accumulator."
825 *
826 * Integer MUL with a non-accumulator destination will be lowered
827 * by lower_integer_multiplication(), so don't restrict it.
828 */
829 if (((inst->opcode == BRW_OPCODE_MUL &&
830 inst->dst.is_accumulator()) ||
831 inst->opcode == BRW_OPCODE_MACH) &&
832 (inst->src[1].type == BRW_REGISTER_TYPE_D ||
833 inst->src[1].type == BRW_REGISTER_TYPE_UD))
834 break;
835 inst->src[0] = inst->src[1];
836 inst->src[1] = val;
837 progress = true;
838 }
839 break;
840
841 case BRW_OPCODE_CMP:
842 case BRW_OPCODE_IF:
843 if (i == 1) {
844 inst->src[i] = val;
845 progress = true;
846 } else if (i == 0 && inst->src[1].file != IMM) {
847 enum brw_conditional_mod new_cmod;
848
849 new_cmod = brw_swap_cmod(inst->conditional_mod);
850 if (new_cmod != BRW_CONDITIONAL_NONE) {
851 /* Fit this constant in by swapping the operands and
852 * flipping the test
853 */
854 inst->src[0] = inst->src[1];
855 inst->src[1] = val;
856 inst->conditional_mod = new_cmod;
857 progress = true;
858 }
859 }
860 break;
861
862 case BRW_OPCODE_SEL:
863 if (i == 1) {
864 inst->src[i] = val;
865 progress = true;
866 } else if (i == 0 && inst->src[1].file != IMM &&
867 (inst->conditional_mod == BRW_CONDITIONAL_NONE ||
868 /* Only GE and L are commutative. */
869 inst->conditional_mod == BRW_CONDITIONAL_GE ||
870 inst->conditional_mod == BRW_CONDITIONAL_L)) {
871 inst->src[0] = inst->src[1];
872 inst->src[1] = val;
873
874 /* If this was predicated, flipping operands means
875 * we also need to flip the predicate.
876 */
877 if (inst->conditional_mod == BRW_CONDITIONAL_NONE) {
878 inst->predicate_inverse =
879 !inst->predicate_inverse;
880 }
881 progress = true;
882 }
883 break;
884
885 case FS_OPCODE_FB_WRITE_LOGICAL:
886 /* The stencil and omask sources of FS_OPCODE_FB_WRITE_LOGICAL are
887 * bit-cast using a strided region so they cannot be immediates.
888 */
889 if (i != FB_WRITE_LOGICAL_SRC_SRC_STENCIL &&
890 i != FB_WRITE_LOGICAL_SRC_OMASK) {
891 inst->src[i] = val;
892 progress = true;
893 }
894 break;
895
896 case SHADER_OPCODE_TEX_LOGICAL:
897 case SHADER_OPCODE_TXD_LOGICAL:
898 case SHADER_OPCODE_TXF_LOGICAL:
899 case SHADER_OPCODE_TXL_LOGICAL:
900 case SHADER_OPCODE_TXS_LOGICAL:
901 case FS_OPCODE_TXB_LOGICAL:
902 case SHADER_OPCODE_TXF_CMS_LOGICAL:
903 case SHADER_OPCODE_TXF_CMS_W_LOGICAL:
904 case SHADER_OPCODE_TXF_UMS_LOGICAL:
905 case SHADER_OPCODE_TXF_MCS_LOGICAL:
906 case SHADER_OPCODE_LOD_LOGICAL:
907 case SHADER_OPCODE_TG4_LOGICAL:
908 case SHADER_OPCODE_TG4_OFFSET_LOGICAL:
909 case SHADER_OPCODE_SAMPLEINFO_LOGICAL:
910 case SHADER_OPCODE_IMAGE_SIZE_LOGICAL:
911 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
912 case SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT_LOGICAL:
913 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL:
914 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
915 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL:
916 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
917 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
918 case SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL:
919 case SHADER_OPCODE_BYTE_SCATTERED_READ_LOGICAL:
920 inst->src[i] = val;
921 progress = true;
922 break;
923
924 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
925 case SHADER_OPCODE_BROADCAST:
926 inst->src[i] = val;
927 progress = true;
928 break;
929
930 case BRW_OPCODE_MAD:
931 case BRW_OPCODE_LRP:
932 inst->src[i] = val;
933 progress = true;
934 break;
935
936 default:
937 break;
938 }
939 }
940
941 return progress;
942 }
943
944 static bool
can_propagate_from(fs_inst * inst)945 can_propagate_from(fs_inst *inst)
946 {
947 return (inst->opcode == BRW_OPCODE_MOV &&
948 inst->dst.file == VGRF &&
949 ((inst->src[0].file == VGRF &&
950 !regions_overlap(inst->dst, inst->size_written,
951 inst->src[0], inst->size_read(0))) ||
952 inst->src[0].file == ATTR ||
953 inst->src[0].file == UNIFORM ||
954 inst->src[0].file == IMM ||
955 (inst->src[0].file == FIXED_GRF &&
956 inst->src[0].is_contiguous())) &&
957 inst->src[0].type == inst->dst.type &&
958 /* Subset of !is_partial_write() conditions. */
959 !((inst->predicate && inst->opcode != BRW_OPCODE_SEL) ||
960 !inst->dst.is_contiguous())) ||
961 is_identity_payload(FIXED_GRF, inst);
962 }
963
964 /* Walks a basic block and does copy propagation on it using the acp
965 * list.
966 */
967 bool
opt_copy_propagation_local(void * copy_prop_ctx,bblock_t * block,exec_list * acp)968 fs_visitor::opt_copy_propagation_local(void *copy_prop_ctx, bblock_t *block,
969 exec_list *acp)
970 {
971 bool progress = false;
972
973 foreach_inst_in_block(fs_inst, inst, block) {
974 /* Try propagating into this instruction. */
975 for (int i = 0; i < inst->sources; i++) {
976 if (inst->src[i].file != VGRF)
977 continue;
978
979 foreach_in_list(acp_entry, entry, &acp[inst->src[i].nr % ACP_HASH_SIZE]) {
980 if (try_constant_propagate(inst, entry))
981 progress = true;
982 else if (try_copy_propagate(inst, i, entry))
983 progress = true;
984 }
985 }
986
987 /* kill the destination from the ACP */
988 if (inst->dst.file == VGRF || inst->dst.file == FIXED_GRF) {
989 foreach_in_list_safe(acp_entry, entry, &acp[inst->dst.nr % ACP_HASH_SIZE]) {
990 if (regions_overlap(entry->dst, entry->size_written,
991 inst->dst, inst->size_written))
992 entry->remove();
993 }
994
995 /* Oops, we only have the chaining hash based on the destination, not
996 * the source, so walk across the entire table.
997 */
998 for (int i = 0; i < ACP_HASH_SIZE; i++) {
999 foreach_in_list_safe(acp_entry, entry, &acp[i]) {
1000 /* Make sure we kill the entry if this instruction overwrites
1001 * _any_ of the registers that it reads
1002 */
1003 if (regions_overlap(entry->src, entry->size_read,
1004 inst->dst, inst->size_written))
1005 entry->remove();
1006 }
1007 }
1008 }
1009
1010 /* If this instruction's source could potentially be folded into the
1011 * operand of another instruction, add it to the ACP.
1012 */
1013 if (can_propagate_from(inst)) {
1014 acp_entry *entry = rzalloc(copy_prop_ctx, acp_entry);
1015 entry->dst = inst->dst;
1016 entry->src = inst->src[0];
1017 entry->size_written = inst->size_written;
1018 for (unsigned i = 0; i < inst->sources; i++)
1019 entry->size_read += inst->size_read(i);
1020 entry->opcode = inst->opcode;
1021 entry->saturate = inst->saturate;
1022 entry->is_partial_write = inst->is_partial_write();
1023 acp[entry->dst.nr % ACP_HASH_SIZE].push_tail(entry);
1024 } else if (inst->opcode == SHADER_OPCODE_LOAD_PAYLOAD &&
1025 inst->dst.file == VGRF) {
1026 int offset = 0;
1027 for (int i = 0; i < inst->sources; i++) {
1028 int effective_width = i < inst->header_size ? 8 : inst->exec_size;
1029 assert(effective_width * type_sz(inst->src[i].type) % REG_SIZE == 0);
1030 const unsigned size_written = effective_width *
1031 type_sz(inst->src[i].type);
1032 if (inst->src[i].file == VGRF ||
1033 (inst->src[i].file == FIXED_GRF &&
1034 inst->src[i].is_contiguous())) {
1035 acp_entry *entry = rzalloc(copy_prop_ctx, acp_entry);
1036 entry->dst = byte_offset(inst->dst, offset);
1037 entry->src = inst->src[i];
1038 entry->size_written = size_written;
1039 entry->size_read = inst->size_read(i);
1040 entry->opcode = inst->opcode;
1041 if (!entry->dst.equals(inst->src[i])) {
1042 acp[entry->dst.nr % ACP_HASH_SIZE].push_tail(entry);
1043 } else {
1044 ralloc_free(entry);
1045 }
1046 }
1047 offset += size_written;
1048 }
1049 }
1050 }
1051
1052 return progress;
1053 }
1054
1055 bool
opt_copy_propagation()1056 fs_visitor::opt_copy_propagation()
1057 {
1058 bool progress = false;
1059 void *copy_prop_ctx = ralloc_context(NULL);
1060 exec_list *out_acp[cfg->num_blocks];
1061
1062 for (int i = 0; i < cfg->num_blocks; i++)
1063 out_acp[i] = new exec_list [ACP_HASH_SIZE];
1064
1065 const fs_live_variables &live = live_analysis.require();
1066
1067 /* First, walk through each block doing local copy propagation and getting
1068 * the set of copies available at the end of the block.
1069 */
1070 foreach_block (block, cfg) {
1071 progress = opt_copy_propagation_local(copy_prop_ctx, block,
1072 out_acp[block->num]) || progress;
1073
1074 /* If the destination of an ACP entry exists only within this block,
1075 * then there's no need to keep it for dataflow analysis. We can delete
1076 * it from the out_acp table and avoid growing the bitsets any bigger
1077 * than we absolutely have to.
1078 *
1079 * Because nothing in opt_copy_propagation_local touches the block
1080 * start/end IPs and opt_copy_propagation_local is incapable of
1081 * extending the live range of an ACP destination beyond the block,
1082 * it's safe to use the liveness information in this way.
1083 */
1084 for (unsigned a = 0; a < ACP_HASH_SIZE; a++) {
1085 foreach_in_list_safe(acp_entry, entry, &out_acp[block->num][a]) {
1086 assert(entry->dst.file == VGRF);
1087 if (block->start_ip <= live.vgrf_start[entry->dst.nr] &&
1088 live.vgrf_end[entry->dst.nr] <= block->end_ip)
1089 entry->remove();
1090 }
1091 }
1092 }
1093
1094 /* Do dataflow analysis for those available copies. */
1095 fs_copy_prop_dataflow dataflow(copy_prop_ctx, cfg, live, out_acp);
1096
1097 /* Next, re-run local copy propagation, this time with the set of copies
1098 * provided by the dataflow analysis available at the start of a block.
1099 */
1100 foreach_block (block, cfg) {
1101 exec_list in_acp[ACP_HASH_SIZE];
1102
1103 for (int i = 0; i < dataflow.num_acp; i++) {
1104 if (BITSET_TEST(dataflow.bd[block->num].livein, i)) {
1105 struct acp_entry *entry = dataflow.acp[i];
1106 in_acp[entry->dst.nr % ACP_HASH_SIZE].push_tail(entry);
1107 }
1108 }
1109
1110 progress = opt_copy_propagation_local(copy_prop_ctx, block, in_acp) ||
1111 progress;
1112 }
1113
1114 for (int i = 0; i < cfg->num_blocks; i++)
1115 delete [] out_acp[i];
1116 ralloc_free(copy_prop_ctx);
1117
1118 if (progress)
1119 invalidate_analysis(DEPENDENCY_INSTRUCTION_DATA_FLOW |
1120 DEPENDENCY_INSTRUCTION_DETAIL);
1121
1122 return progress;
1123 }
1124