• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2011 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 /**
25  * @file brw_vec4_copy_propagation.cpp
26  *
27  * Implements tracking of values copied between registers, and
28  * optimizations based on that: copy propagation and constant
29  * propagation.
30  */
31 
32 #include "brw_vec4.h"
33 #include "brw_cfg.h"
34 #include "brw_eu.h"
35 
36 namespace brw {
37 
38 struct copy_entry {
39    src_reg *value[4];
40    int saturatemask;
41 };
42 
43 static bool
is_direct_copy(vec4_instruction * inst)44 is_direct_copy(vec4_instruction *inst)
45 {
46    return (inst->opcode == BRW_OPCODE_MOV &&
47 	   !inst->predicate &&
48 	   inst->dst.file == VGRF &&
49 	   inst->dst.offset % REG_SIZE == 0 &&
50 	   !inst->dst.reladdr &&
51 	   !inst->src[0].reladdr &&
52 	   (inst->dst.type == inst->src[0].type ||
53             (inst->dst.type == BRW_REGISTER_TYPE_F &&
54              inst->src[0].type == BRW_REGISTER_TYPE_VF)));
55 }
56 
57 static bool
is_dominated_by_previous_instruction(vec4_instruction * inst)58 is_dominated_by_previous_instruction(vec4_instruction *inst)
59 {
60    return (inst->opcode != BRW_OPCODE_DO &&
61 	   inst->opcode != BRW_OPCODE_WHILE &&
62 	   inst->opcode != BRW_OPCODE_ELSE &&
63 	   inst->opcode != BRW_OPCODE_ENDIF);
64 }
65 
66 static bool
is_channel_updated(vec4_instruction * inst,src_reg * values[4],int ch)67 is_channel_updated(vec4_instruction *inst, src_reg *values[4], int ch)
68 {
69    const src_reg *src = values[ch];
70 
71    /* consider GRF only */
72    assert(inst->dst.file == VGRF);
73    if (!src || src->file != VGRF)
74       return false;
75 
76    return regions_overlap(*src, REG_SIZE, inst->dst, inst->size_written) &&
77           (inst->dst.offset != src->offset ||
78            inst->dst.writemask & (1 << BRW_GET_SWZ(src->swizzle, ch)));
79 }
80 
81 /**
82  * Get the origin of a copy as a single register if all components present in
83  * the given readmask originate from the same register and have compatible
84  * regions, otherwise return a BAD_FILE register.
85  */
86 static src_reg
get_copy_value(const copy_entry & entry,unsigned readmask)87 get_copy_value(const copy_entry &entry, unsigned readmask)
88 {
89    unsigned swz[4] = {};
90    src_reg value;
91 
92    for (unsigned i = 0; i < 4; i++) {
93       if (readmask & (1 << i)) {
94          if (entry.value[i]) {
95             src_reg src = *entry.value[i];
96 
97             if (src.file == IMM) {
98                swz[i] = i;
99             } else {
100                swz[i] = BRW_GET_SWZ(src.swizzle, i);
101                /* Overwrite the original swizzle so the src_reg::equals call
102                 * below doesn't care about it, the correct swizzle will be
103                 * calculated once the swizzles of all components are known.
104                 */
105                src.swizzle = BRW_SWIZZLE_XYZW;
106             }
107 
108             if (value.file == BAD_FILE) {
109                value = src;
110             } else if (!value.equals(src)) {
111                return src_reg();
112             }
113          } else {
114             return src_reg();
115          }
116       }
117    }
118 
119    return swizzle(value,
120                   brw_compose_swizzle(brw_swizzle_for_mask(readmask),
121                                       BRW_SWIZZLE4(swz[0], swz[1],
122                                                    swz[2], swz[3])));
123 }
124 
125 static bool
try_constant_propagate(vec4_instruction * inst,int arg,const copy_entry * entry)126 try_constant_propagate(vec4_instruction *inst,
127                        int arg, const copy_entry *entry)
128 {
129    /* For constant propagation, we only handle the same constant
130     * across all 4 channels.  Some day, we should handle the 8-bit
131     * float vector format, which would let us constant propagate
132     * vectors better.
133     * We could be more aggressive here -- some channels might not get used
134     * based on the destination writemask.
135     */
136    src_reg value =
137       get_copy_value(*entry,
138                      brw_apply_inv_swizzle_to_mask(inst->src[arg].swizzle,
139                                                    WRITEMASK_XYZW));
140 
141    if (value.file != IMM)
142       return false;
143 
144    /* 64-bit types can't be used except for one-source instructions, which
145     * higher levels should have constant folded away, so there's no point in
146     * propagating immediates here.
147     */
148    if (type_sz(value.type) == 8 || type_sz(inst->src[arg].type) == 8)
149       return false;
150 
151    if (value.type == BRW_REGISTER_TYPE_VF) {
152       /* The result of bit-casting the component values of a vector float
153        * cannot in general be represented as an immediate.
154        */
155       if (inst->src[arg].type != BRW_REGISTER_TYPE_F)
156          return false;
157    } else {
158       value.type = inst->src[arg].type;
159    }
160 
161    if (inst->src[arg].abs) {
162       if (!brw_abs_immediate(value.type, &value.as_brw_reg()))
163          return false;
164    }
165 
166    if (inst->src[arg].negate) {
167       if (!brw_negate_immediate(value.type, &value.as_brw_reg()))
168          return false;
169    }
170 
171    value = swizzle(value, inst->src[arg].swizzle);
172 
173    switch (inst->opcode) {
174    case BRW_OPCODE_MOV:
175    case SHADER_OPCODE_BROADCAST:
176       inst->src[arg] = value;
177       return true;
178 
179    case VEC4_OPCODE_UNTYPED_ATOMIC:
180       if (arg == 1) {
181          inst->src[arg] = value;
182          return true;
183       }
184       break;
185 
186    case SHADER_OPCODE_POW:
187    case SHADER_OPCODE_INT_QUOTIENT:
188    case SHADER_OPCODE_INT_REMAINDER:
189          break;
190    case BRW_OPCODE_DP2:
191    case BRW_OPCODE_DP3:
192    case BRW_OPCODE_DP4:
193    case BRW_OPCODE_DPH:
194    case BRW_OPCODE_BFI1:
195    case BRW_OPCODE_ASR:
196    case BRW_OPCODE_SHL:
197    case BRW_OPCODE_SHR:
198    case BRW_OPCODE_SUBB:
199       if (arg == 1) {
200          inst->src[arg] = value;
201          return true;
202       }
203       break;
204 
205    case BRW_OPCODE_MACH:
206    case BRW_OPCODE_MUL:
207    case SHADER_OPCODE_MULH:
208    case BRW_OPCODE_ADD:
209    case BRW_OPCODE_OR:
210    case BRW_OPCODE_AND:
211    case BRW_OPCODE_XOR:
212    case BRW_OPCODE_ADDC:
213       if (arg == 1) {
214 	 inst->src[arg] = value;
215 	 return true;
216       } else if (arg == 0 && inst->src[1].file != IMM) {
217 	 /* Fit this constant in by commuting the operands.  Exception: we
218 	  * can't do this for 32-bit integer MUL/MACH because it's asymmetric.
219 	  */
220 	 if ((inst->opcode == BRW_OPCODE_MUL ||
221               inst->opcode == BRW_OPCODE_MACH) &&
222 	     (inst->src[1].type == BRW_REGISTER_TYPE_D ||
223 	      inst->src[1].type == BRW_REGISTER_TYPE_UD))
224 	    break;
225 	 inst->src[0] = inst->src[1];
226 	 inst->src[1] = value;
227 	 return true;
228       }
229       break;
230    case GS_OPCODE_SET_WRITE_OFFSET:
231       /* This is just a multiply by a constant with special strides.
232        * The generator will handle immediates in both arguments (generating
233        * a single MOV of the product).  So feel free to propagate in src0.
234        */
235       inst->src[arg] = value;
236       return true;
237 
238    case BRW_OPCODE_CMP:
239       if (arg == 1) {
240 	 inst->src[arg] = value;
241 	 return true;
242       } else if (arg == 0 && inst->src[1].file != IMM) {
243 	 enum brw_conditional_mod new_cmod;
244 
245 	 new_cmod = brw_swap_cmod(inst->conditional_mod);
246 	 if (new_cmod != BRW_CONDITIONAL_NONE) {
247 	    /* Fit this constant in by swapping the operands and
248 	     * flipping the test.
249 	     */
250 	    inst->src[0] = inst->src[1];
251 	    inst->src[1] = value;
252 	    inst->conditional_mod = new_cmod;
253 	    return true;
254 	 }
255       }
256       break;
257 
258    case BRW_OPCODE_SEL:
259       if (arg == 1) {
260 	 inst->src[arg] = value;
261 	 return true;
262       } else if (arg == 0 && inst->src[1].file != IMM) {
263 	 inst->src[0] = inst->src[1];
264 	 inst->src[1] = value;
265 
266 	 /* If this was predicated, flipping operands means
267 	  * we also need to flip the predicate.
268 	  */
269 	 if (inst->conditional_mod == BRW_CONDITIONAL_NONE) {
270 	    inst->predicate_inverse = !inst->predicate_inverse;
271 	 }
272 	 return true;
273       }
274       break;
275 
276    default:
277       break;
278    }
279 
280    return false;
281 }
282 
283 static bool
is_align1_opcode(unsigned opcode)284 is_align1_opcode(unsigned opcode)
285 {
286    switch (opcode) {
287    case VEC4_OPCODE_DOUBLE_TO_F32:
288    case VEC4_OPCODE_DOUBLE_TO_D32:
289    case VEC4_OPCODE_DOUBLE_TO_U32:
290    case VEC4_OPCODE_TO_DOUBLE:
291    case VEC4_OPCODE_PICK_LOW_32BIT:
292    case VEC4_OPCODE_PICK_HIGH_32BIT:
293    case VEC4_OPCODE_SET_LOW_32BIT:
294    case VEC4_OPCODE_SET_HIGH_32BIT:
295       return true;
296    default:
297       return false;
298    }
299 }
300 
301 static bool
try_copy_propagate(const struct brw_compiler * compiler,vec4_instruction * inst,int arg,const copy_entry * entry,int attributes_per_reg)302 try_copy_propagate(const struct brw_compiler *compiler,
303                    vec4_instruction *inst, int arg,
304                    const copy_entry *entry, int attributes_per_reg)
305 {
306    const struct intel_device_info *devinfo = compiler->devinfo;
307 
308    /* Build up the value we are propagating as if it were the source of a
309     * single MOV
310     */
311    src_reg value =
312       get_copy_value(*entry,
313                      brw_apply_inv_swizzle_to_mask(inst->src[arg].swizzle,
314                                                    WRITEMASK_XYZW));
315 
316    /* Check that we can propagate that value */
317    if (value.file != UNIFORM &&
318        value.file != VGRF &&
319        value.file != ATTR)
320       return false;
321 
322    /* Instructions that write 2 registers also need to read 2 registers. Make
323     * sure we don't break that restriction by copy propagating from a uniform.
324     */
325    if (inst->size_written > REG_SIZE && is_uniform(value))
326       return false;
327 
328    /* There is a regioning restriction such that if execsize == width
329     * and hstride != 0 then the vstride can't be 0. When we split instrutions
330     * that take a single-precision source (like F->DF conversions) we end up
331     * with a 4-wide source on an instruction with an execution size of 4.
332     * If we then copy-propagate the source from a uniform we also end up with a
333     * vstride of 0 and we violate the restriction.
334     */
335    if (inst->exec_size == 4 && value.file == UNIFORM &&
336        type_sz(value.type) == 4)
337       return false;
338 
339    /* If the type of the copy value is different from the type of the
340     * instruction then the swizzles and writemasks involved don't have the same
341     * meaning and simply replacing the source would produce different semantics.
342     */
343    if (type_sz(value.type) != type_sz(inst->src[arg].type))
344       return false;
345 
346    if (inst->src[arg].offset % REG_SIZE || value.offset % REG_SIZE)
347       return false;
348 
349    bool has_source_modifiers = value.negate || value.abs;
350 
351    /* gfx6 math and gfx7+ SENDs from GRFs ignore source modifiers on
352     * instructions.
353     */
354    if (has_source_modifiers && !inst->can_do_source_mods(devinfo))
355       return false;
356 
357    /* Reject cases that would violate register regioning restrictions. */
358    if ((value.file == UNIFORM || value.swizzle != BRW_SWIZZLE_XYZW) &&
359        ((devinfo->ver == 6 && inst->is_math()) ||
360         inst->is_send_from_grf() ||
361         inst->uses_indirect_addressing())) {
362       return false;
363    }
364 
365    if (has_source_modifiers &&
366        value.type != inst->src[arg].type &&
367        !inst->can_change_types())
368       return false;
369 
370    if (has_source_modifiers &&
371        (inst->opcode == SHADER_OPCODE_GFX4_SCRATCH_WRITE ||
372         inst->opcode == VEC4_OPCODE_PICK_HIGH_32BIT))
373       return false;
374 
375    unsigned composed_swizzle = brw_compose_swizzle(inst->src[arg].swizzle,
376                                                    value.swizzle);
377 
378    /* Instructions that operate on vectors in ALIGN1 mode will ignore swizzles
379     * so copy-propagation won't be safe if the composed swizzle is anything
380     * other than the identity.
381     */
382    if (is_align1_opcode(inst->opcode) && composed_swizzle != BRW_SWIZZLE_XYZW)
383       return false;
384 
385    if (inst->is_3src(compiler) &&
386        (value.file == UNIFORM ||
387         (value.file == ATTR && attributes_per_reg != 1)) &&
388        !brw_is_single_value_swizzle(composed_swizzle))
389       return false;
390 
391    if (inst->is_send_from_grf())
392       return false;
393 
394    /* we can't generally copy-propagate UD negations because we
395     * end up accessing the resulting values as signed integers
396     * instead. See also resolve_ud_negate().
397     */
398    if (value.negate &&
399        value.type == BRW_REGISTER_TYPE_UD)
400       return false;
401 
402    /* Don't report progress if this is a noop. */
403    if (value.equals(inst->src[arg]))
404       return false;
405 
406    const unsigned dst_saturate_mask = inst->dst.writemask &
407       brw_apply_swizzle_to_mask(inst->src[arg].swizzle, entry->saturatemask);
408 
409    if (dst_saturate_mask) {
410       /* We either saturate all or nothing. */
411       if (dst_saturate_mask != inst->dst.writemask)
412          return false;
413 
414       /* Limit saturate propagation only to SEL with src1 bounded within 0.0
415        * and 1.0, otherwise skip copy propagate altogether.
416        */
417       switch(inst->opcode) {
418       case BRW_OPCODE_SEL:
419          if (arg != 0 ||
420              inst->src[0].type != BRW_REGISTER_TYPE_F ||
421              inst->src[1].file != IMM ||
422              inst->src[1].type != BRW_REGISTER_TYPE_F ||
423              inst->src[1].f < 0.0 ||
424              inst->src[1].f > 1.0) {
425             return false;
426          }
427          if (!inst->saturate)
428             inst->saturate = true;
429          break;
430       default:
431          return false;
432       }
433    }
434 
435    /* Build the final value */
436    if (inst->src[arg].abs) {
437       value.negate = false;
438       value.abs = true;
439    }
440    if (inst->src[arg].negate)
441       value.negate = !value.negate;
442 
443    value.swizzle = composed_swizzle;
444    if (has_source_modifiers &&
445        value.type != inst->src[arg].type) {
446       assert(inst->can_change_types());
447       for (int i = 0; i < 3; i++) {
448          inst->src[i].type = value.type;
449       }
450       inst->dst.type = value.type;
451    } else {
452       value.type = inst->src[arg].type;
453    }
454 
455    inst->src[arg] = value;
456    return true;
457 }
458 
459 bool
opt_copy_propagation(bool do_constant_prop)460 vec4_visitor::opt_copy_propagation(bool do_constant_prop)
461 {
462    /* If we are in dual instanced or single mode, then attributes are going
463     * to be interleaved, so one register contains two attribute slots.
464     */
465    const int attributes_per_reg =
466       prog_data->dispatch_mode == DISPATCH_MODE_4X2_DUAL_OBJECT ? 1 : 2;
467    bool progress = false;
468    struct copy_entry entries[alloc.total_size];
469 
470    memset(&entries, 0, sizeof(entries));
471 
472    foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
473       /* This pass only works on basic blocks.  If there's flow
474        * control, throw out all our information and start from
475        * scratch.
476        *
477        * This should really be fixed by using a structure like in
478        * src/glsl/opt_copy_propagation.cpp to track available copies.
479        */
480       if (!is_dominated_by_previous_instruction(inst)) {
481 	 memset(&entries, 0, sizeof(entries));
482 	 continue;
483       }
484 
485       /* For each source arg, see if each component comes from a copy
486        * from the same type file (IMM, VGRF, UNIFORM), and try
487        * optimizing out access to the copy result
488        */
489       for (int i = 2; i >= 0; i--) {
490 	 /* Copied values end up in GRFs, and we don't track reladdr
491 	  * accesses.
492 	  */
493 	 if (inst->src[i].file != VGRF ||
494 	     inst->src[i].reladdr)
495 	    continue;
496 
497          /* We only handle register-aligned single GRF copies. */
498          if (inst->size_read(i) != REG_SIZE ||
499              inst->src[i].offset % REG_SIZE)
500             continue;
501 
502          const unsigned reg = (alloc.offsets[inst->src[i].nr] +
503                                inst->src[i].offset / REG_SIZE);
504          const copy_entry &entry = entries[reg];
505 
506          if (do_constant_prop && try_constant_propagate(inst, i, &entry))
507             progress = true;
508          else if (try_copy_propagate(compiler, inst, i, &entry, attributes_per_reg))
509 	    progress = true;
510       }
511 
512       /* Track available source registers. */
513       if (inst->dst.file == VGRF) {
514 	 const int reg =
515             alloc.offsets[inst->dst.nr] + inst->dst.offset / REG_SIZE;
516 
517 	 /* Update our destination's current channel values.  For a direct copy,
518 	  * the value is the newly propagated source.  Otherwise, we don't know
519 	  * the new value, so clear it.
520 	  */
521 	 bool direct_copy = is_direct_copy(inst);
522          entries[reg].saturatemask &= ~inst->dst.writemask;
523 	 for (int i = 0; i < 4; i++) {
524 	    if (inst->dst.writemask & (1 << i)) {
525                entries[reg].value[i] = direct_copy ? &inst->src[0] : NULL;
526                entries[reg].saturatemask |=
527                   inst->saturate && direct_copy ? 1 << i : 0;
528             }
529 	 }
530 
531 	 /* Clear the records for any registers whose current value came from
532 	  * our destination's updated channels, as the two are no longer equal.
533 	  */
534 	 if (inst->dst.reladdr)
535 	    memset(&entries, 0, sizeof(entries));
536 	 else {
537 	    for (unsigned i = 0; i < alloc.total_size; i++) {
538 	       for (int j = 0; j < 4; j++) {
539 		  if (is_channel_updated(inst, entries[i].value, j)) {
540 		     entries[i].value[j] = NULL;
541 		     entries[i].saturatemask &= ~(1 << j);
542                   }
543 	       }
544 	    }
545 	 }
546       }
547    }
548 
549    if (progress)
550       invalidate_analysis(DEPENDENCY_INSTRUCTION_DATA_FLOW |
551                           DEPENDENCY_INSTRUCTION_DETAIL);
552 
553    return progress;
554 }
555 
556 } /* namespace brw */
557