• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* -*- c++ -*- */
2 /*
3  * Copyright © 2010-2015 Intel Corporation
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  */
24 
25 #pragma once
26 
27 #include "brw_ir.h"
28 #include "brw_ir_allocator.h"
29 
30 struct fs_inst : public exec_node {
31 private:
32    fs_inst &operator=(const fs_inst &);
33 
34    void init(enum opcode opcode, uint8_t exec_width, const brw_reg &dst,
35              const brw_reg *src, unsigned sources);
36 
37 public:
38    DECLARE_RALLOC_CXX_OPERATORS(fs_inst)
39 
40    fs_inst();
41    fs_inst(enum opcode opcode, uint8_t exec_size);
42    fs_inst(enum opcode opcode, uint8_t exec_size, const brw_reg &dst);
43    fs_inst(enum opcode opcode, uint8_t exec_size, const brw_reg &dst,
44            const brw_reg &src0);
45    fs_inst(enum opcode opcode, uint8_t exec_size, const brw_reg &dst,
46            const brw_reg &src0, const brw_reg &src1);
47    fs_inst(enum opcode opcode, uint8_t exec_size, const brw_reg &dst,
48            const brw_reg &src0, const brw_reg &src1, const brw_reg &src2);
49    fs_inst(enum opcode opcode, uint8_t exec_size, const brw_reg &dst,
50            const brw_reg src[], unsigned sources);
51    fs_inst(const fs_inst &that);
52    ~fs_inst();
53 
54    void resize_sources(uint8_t num_sources);
55 
56    bool is_send_from_grf() const;
57    bool is_payload(unsigned arg) const;
58    bool is_partial_write() const;
59    unsigned components_read(unsigned i) const;
60    unsigned size_read(const struct intel_device_info *devinfo, int arg) const;
61    bool can_do_source_mods(const struct intel_device_info *devinfo) const;
62    bool can_do_cmod() const;
63    bool can_change_types() const;
64    bool has_source_and_destination_hazard() const;
65 
66    bool is_3src(const struct brw_compiler *compiler) const;
67    bool is_math() const;
68    bool is_control_flow_begin() const;
69    bool is_control_flow_end() const;
70    bool is_control_flow() const;
71    bool is_commutative() const;
72    bool is_raw_move() const;
73    bool can_do_saturate() const;
74    bool reads_accumulator_implicitly() const;
75    bool writes_accumulator_implicitly(const struct intel_device_info *devinfo) const;
76 
77    /**
78     * Instructions that use indirect addressing have additional register
79     * regioning restrictions.
80     */
81    bool uses_indirect_addressing() const;
82 
83    void remove(bblock_t *block, bool defer_later_block_ip_updates = false);
84    void insert_after(bblock_t *block, fs_inst *inst);
85    void insert_before(bblock_t *block, fs_inst *inst);
86 
87    /**
88     * True if the instruction has side effects other than writing to
89     * its destination registers.  You are expected not to reorder or
90     * optimize these out unless you know what you are doing.
91     */
92    bool has_side_effects() const;
93 
94    /**
95     * True if the instruction might be affected by side effects of other
96     * instructions.
97     */
98    bool is_volatile() const;
99 
100    /**
101     * Return whether \p arg is a control source of a virtual instruction which
102     * shouldn't contribute to the execution type and usual regioning
103     * restriction calculations of arithmetic instructions.
104     */
105    bool is_control_source(unsigned arg) const;
106 
107    /**
108     * Return the subset of flag registers read by the instruction as a bitset
109     * with byte granularity.
110     */
111    unsigned flags_read(const intel_device_info *devinfo) const;
112 
113    /**
114     * Return the subset of flag registers updated by the instruction (either
115     * partially or fully) as a bitset with byte granularity.
116     */
117    unsigned flags_written(const intel_device_info *devinfo) const;
118 
119    /**
120     * Return true if this instruction is a sampler message gathering residency
121     * data.
122     */
123    bool has_sampler_residency() const;
124 
125    /**
126     * Return true if this instruction is using the address register
127     * implicitly.
128     */
129    bool uses_address_register_implicitly() const;
130 
131    uint8_t sources; /**< Number of brw_reg sources. */
132 
133    /**
134     * Execution size of the instruction.  This is used by the generator to
135     * generate the correct binary for the given instruction.  Current valid
136     * values are 1, 4, 8, 16, 32.
137     */
138    uint8_t exec_size;
139 
140    /**
141     * Channel group from the hardware execution and predication mask that
142     * should be applied to the instruction.  The subset of channel enable
143     * signals (calculated from the EU control flow and predication state)
144     * given by [group, group + exec_size) will be used to mask GRF writes and
145     * any other side effects of the instruction.
146     */
147    uint8_t group;
148 
149    uint8_t mlen; /**< SEND message length */
150    uint8_t ex_mlen; /**< SENDS extended message length */
151    uint8_t sfid; /**< SFID for SEND instructions */
152    /** The number of hardware registers used for a message header. */
153    uint8_t header_size;
154    uint8_t target; /**< MRT target. */
155    uint32_t desc; /**< SEND[S] message descriptor immediate */
156    uint32_t ex_desc; /**< SEND[S] extended message descriptor immediate */
157 
158    uint32_t offset; /**< spill/unspill offset or texture offset bitfield */
159    unsigned size_written; /**< Data written to the destination register in bytes. */
160 
161    enum opcode opcode; /* BRW_OPCODE_* or FS_OPCODE_* */
162    enum brw_conditional_mod conditional_mod; /**< BRW_CONDITIONAL_* */
163    enum brw_predicate predicate;
164 
165    tgl_swsb sched; /**< Scheduling info. */
166 
167    union {
168       struct {
169          /* Chooses which flag subregister (f0.0 to f3.1) is used for
170           * conditional mod and predication.
171           */
172          unsigned flag_subreg:3;
173 
174          /**
175           * Systolic depth used by DPAS instruction.
176           */
177          unsigned sdepth:4;
178 
179          /**
180           * Repeat count used by DPAS instruction.
181           */
182          unsigned rcount:4;
183 
184          unsigned pad:2;
185 
186          bool predicate_inverse:1;
187          bool writes_accumulator:1; /**< instruction implicitly writes accumulator */
188          bool force_writemask_all:1;
189          bool no_dd_clear:1;
190          bool no_dd_check:1;
191          bool saturate:1;
192          bool shadow_compare:1;
193          bool check_tdr:1; /**< Only valid for SEND; turns it into a SENDC */
194          bool send_has_side_effects:1; /**< Only valid for SHADER_OPCODE_SEND */
195          bool send_is_volatile:1; /**< Only valid for SHADER_OPCODE_SEND */
196          bool send_ex_bso:1; /**< Only for SHADER_OPCODE_SEND, use extended
197                               *   bindless surface offset (26bits instead of
198                               *   20bits)
199                               */
200          /**
201           * The predication mask applied to this instruction is guaranteed to
202           * be uniform and a superset of the execution mask of the present block.
203           * No currently enabled channel will be disabled by the predicate.
204           */
205          bool predicate_trivial:1;
206          bool eot:1;
207          bool last_rt:1;
208          bool pi_noperspective:1;   /**< Pixel interpolator noperspective flag */
209          bool keep_payload_trailing_zeros:1;
210          /**
211           * Whether the parameters of the SEND instructions are build with
212           * NoMask (for A32 messages this covers only the surface handle, for
213           * A64 messages this covers the load address).
214           */
215          bool has_no_mask_send_params:1;
216       };
217       uint32_t bits;
218    };
219 
220    brw_reg dst;
221    brw_reg *src;
222    brw_reg builtin_src[4];
223 
224 #ifndef NDEBUG
225    /** @{
226     * Annotation for the generated IR.
227     */
228    const char *annotation;
229    /** @} */
230 #endif
231 };
232 
233 /**
234  * Make the execution of \p inst dependent on the evaluation of a possibly
235  * inverted predicate.
236  */
237 static inline fs_inst *
set_predicate_inv(enum brw_predicate pred,bool inverse,fs_inst * inst)238 set_predicate_inv(enum brw_predicate pred, bool inverse,
239                   fs_inst *inst)
240 {
241    inst->predicate = pred;
242    inst->predicate_inverse = inverse;
243    return inst;
244 }
245 
246 /**
247  * Make the execution of \p inst dependent on the evaluation of a predicate.
248  */
249 static inline fs_inst *
set_predicate(enum brw_predicate pred,fs_inst * inst)250 set_predicate(enum brw_predicate pred, fs_inst *inst)
251 {
252    return set_predicate_inv(pred, false, inst);
253 }
254 
255 /**
256  * Write the result of evaluating the condition given by \p mod to a flag
257  * register.
258  */
259 static inline fs_inst *
set_condmod(enum brw_conditional_mod mod,fs_inst * inst)260 set_condmod(enum brw_conditional_mod mod, fs_inst *inst)
261 {
262    inst->conditional_mod = mod;
263    return inst;
264 }
265 
266 /**
267  * Clamp the result of \p inst to the saturation range of its destination
268  * datatype.
269  */
270 static inline fs_inst *
set_saturate(bool saturate,fs_inst * inst)271 set_saturate(bool saturate, fs_inst *inst)
272 {
273    inst->saturate = saturate;
274    return inst;
275 }
276 
277 /**
278  * Return the number of dataflow registers written by the instruction (either
279  * fully or partially) counted from 'floor(reg_offset(inst->dst) /
280  * register_size)'.  The somewhat arbitrary register size unit is 4B for the
281  * UNIFORM and IMM files and 32B for all other files.
282  */
283 inline unsigned
regs_written(const fs_inst * inst)284 regs_written(const fs_inst *inst)
285 {
286    assert(inst->dst.file != UNIFORM && inst->dst.file != IMM);
287    return DIV_ROUND_UP(reg_offset(inst->dst) % REG_SIZE +
288                        inst->size_written -
289                        MIN2(inst->size_written, reg_padding(inst->dst)),
290                        REG_SIZE);
291 }
292 
293 /**
294  * Return the number of dataflow registers read by the instruction (either
295  * fully or partially) counted from 'floor(reg_offset(inst->src[i]) /
296  * register_size)'.  The somewhat arbitrary register size unit is 4B for the
297  * UNIFORM files and 32B for all other files.
298  */
299 inline unsigned
regs_read(const struct intel_device_info * devinfo,const fs_inst * inst,unsigned i)300 regs_read(const struct intel_device_info *devinfo, const fs_inst *inst, unsigned i)
301 {
302    if (inst->src[i].file == IMM)
303       return 1;
304 
305    const unsigned reg_size = inst->src[i].file == UNIFORM ? 4 : REG_SIZE;
306    return DIV_ROUND_UP(reg_offset(inst->src[i]) % reg_size +
307                        inst->size_read(devinfo, i) -
308                        MIN2(inst->size_read(devinfo, i), reg_padding(inst->src[i])),
309                        reg_size);
310 }
311 
312 static inline enum brw_reg_type
get_exec_type(const fs_inst * inst)313 get_exec_type(const fs_inst *inst)
314 {
315    brw_reg_type exec_type = BRW_TYPE_B;
316 
317    for (int i = 0; i < inst->sources; i++) {
318       if (inst->src[i].file != BAD_FILE &&
319           !inst->is_control_source(i)) {
320          const brw_reg_type t = get_exec_type(inst->src[i].type);
321          if (brw_type_size_bytes(t) > brw_type_size_bytes(exec_type))
322             exec_type = t;
323          else if (brw_type_size_bytes(t) == brw_type_size_bytes(exec_type) &&
324                   brw_type_is_float(t))
325             exec_type = t;
326       }
327    }
328 
329    if (exec_type == BRW_TYPE_B)
330       exec_type = inst->dst.type;
331 
332    assert(exec_type != BRW_TYPE_B);
333 
334    /* Promotion of the execution type to 32-bit for conversions from or to
335     * half-float seems to be consistent with the following text from the
336     * Cherryview PRM Vol. 7, "Execution Data Type":
337     *
338     * "When single precision and half precision floats are mixed between
339     *  source operands or between source and destination operand [..] single
340     *  precision float is the execution datatype."
341     *
342     * and from "Register Region Restrictions":
343     *
344     * "Conversion between Integer and HF (Half Float) must be DWord aligned
345     *  and strided by a DWord on the destination."
346     */
347    if (brw_type_size_bytes(exec_type) == 2 &&
348        inst->dst.type != exec_type) {
349       if (exec_type == BRW_TYPE_HF)
350          exec_type = BRW_TYPE_F;
351       else if (inst->dst.type == BRW_TYPE_HF)
352          exec_type = BRW_TYPE_D;
353    }
354 
355    return exec_type;
356 }
357 
358 static inline unsigned
get_exec_type_size(const fs_inst * inst)359 get_exec_type_size(const fs_inst *inst)
360 {
361    return brw_type_size_bytes(get_exec_type(inst));
362 }
363 
364 static inline bool
is_send(const fs_inst * inst)365 is_send(const fs_inst *inst)
366 {
367    return inst->mlen || inst->is_send_from_grf();
368 }
369 
370 /**
371  * Return whether the instruction isn't an ALU instruction and cannot be
372  * assumed to complete in-order.
373  */
374 static inline bool
is_unordered(const intel_device_info * devinfo,const fs_inst * inst)375 is_unordered(const intel_device_info *devinfo, const fs_inst *inst)
376 {
377    return is_send(inst) || (devinfo->ver < 20 && inst->is_math()) ||
378           inst->opcode == BRW_OPCODE_DPAS ||
379           (devinfo->has_64bit_float_via_math_pipe &&
380            (get_exec_type(inst) == BRW_TYPE_DF ||
381             inst->dst.type == BRW_TYPE_DF));
382 }
383 
384 /**
385  * Return whether the following regioning restriction applies to the specified
386  * instruction.  From the Cherryview PRM Vol 7. "Register Region
387  * Restrictions":
388  *
389  * "When source or destination datatype is 64b or operation is integer DWord
390  *  multiply, regioning in Align1 must follow these rules:
391  *
392  *  1. Source and Destination horizontal stride must be aligned to the same qword.
393  *  2. Regioning must ensure Src.Vstride = Src.Width * Src.Hstride.
394  *  3. Source and Destination offset must be the same, except the case of
395  *     scalar source."
396  */
397 static inline bool
has_dst_aligned_region_restriction(const intel_device_info * devinfo,const fs_inst * inst,brw_reg_type dst_type)398 has_dst_aligned_region_restriction(const intel_device_info *devinfo,
399                                    const fs_inst *inst,
400                                    brw_reg_type dst_type)
401 {
402    const brw_reg_type exec_type = get_exec_type(inst);
403    /* Even though the hardware spec claims that "integer DWord multiply"
404     * operations are restricted, empirical evidence and the behavior of the
405     * simulator suggest that only 32x32-bit integer multiplication is
406     * restricted.
407     */
408    const bool is_dword_multiply = !brw_type_is_float(exec_type) &&
409       ((inst->opcode == BRW_OPCODE_MUL &&
410         MIN2(brw_type_size_bytes(inst->src[0].type), brw_type_size_bytes(inst->src[1].type)) >= 4) ||
411        (inst->opcode == BRW_OPCODE_MAD &&
412         MIN2(brw_type_size_bytes(inst->src[1].type), brw_type_size_bytes(inst->src[2].type)) >= 4));
413 
414    if (brw_type_size_bytes(dst_type) > 4 || brw_type_size_bytes(exec_type) > 4 ||
415        (brw_type_size_bytes(exec_type) == 4 && is_dword_multiply))
416       return intel_device_info_is_9lp(devinfo) || devinfo->verx10 >= 125;
417 
418    else if (brw_type_is_float(dst_type))
419       return devinfo->verx10 >= 125;
420 
421    else
422       return false;
423 }
424 
425 static inline bool
has_dst_aligned_region_restriction(const intel_device_info * devinfo,const fs_inst * inst)426 has_dst_aligned_region_restriction(const intel_device_info *devinfo,
427                                    const fs_inst *inst)
428 {
429    return has_dst_aligned_region_restriction(devinfo, inst, inst->dst.type);
430 }
431 
432 /**
433  * Return true if the instruction can be potentially affected by the Xe2+
434  * regioning restrictions that apply to integer types smaller than a dword.
435  * The restriction isn't quoted here due to its length, see BSpec #56640 for
436  * details.
437  */
438 static inline bool
has_subdword_integer_region_restriction(const intel_device_info * devinfo,const fs_inst * inst,const brw_reg * srcs,unsigned num_srcs)439 has_subdword_integer_region_restriction(const intel_device_info *devinfo,
440                                         const fs_inst *inst,
441                                         const brw_reg *srcs, unsigned num_srcs)
442 {
443    if (devinfo->ver >= 20 &&
444        brw_type_is_int(inst->dst.type) &&
445        MAX2(byte_stride(inst->dst),
446             brw_type_size_bytes(inst->dst.type)) < 4) {
447       for (unsigned i = 0; i < num_srcs; i++) {
448          if (brw_type_is_int(srcs[i].type) &&
449              ((brw_type_size_bytes(srcs[i].type) < 4 &&
450                byte_stride(srcs[i]) >= 4) ||
451               (MAX2(byte_stride(inst->dst),
452                    brw_type_size_bytes(inst->dst.type)) == 1 &&
453                brw_type_size_bytes(srcs[i].type) == 1 &&
454                byte_stride(srcs[i]) >= 2)))
455             return true;
456       }
457    }
458 
459    return false;
460 }
461 
462 static inline bool
has_subdword_integer_region_restriction(const intel_device_info * devinfo,const fs_inst * inst)463 has_subdword_integer_region_restriction(const intel_device_info *devinfo,
464                                         const fs_inst *inst)
465 {
466    return has_subdword_integer_region_restriction(devinfo, inst,
467                                                   inst->src, inst->sources);
468 }
469 
470 /**
471  * Return whether the LOAD_PAYLOAD instruction is a plain copy of bits from
472  * the specified register file into a VGRF.
473  *
474  * This implies identity register regions without any source-destination
475  * overlap, but otherwise has no implications on the location of sources and
476  * destination in the register file: Gathering any number of portions from
477  * multiple virtual registers in any order is allowed.
478  */
479 inline bool
is_copy_payload(const struct intel_device_info * devinfo,brw_reg_file file,const fs_inst * inst)480 is_copy_payload(const struct intel_device_info *devinfo,
481                 brw_reg_file file, const fs_inst *inst)
482 {
483    if (inst->opcode != SHADER_OPCODE_LOAD_PAYLOAD ||
484        inst->is_partial_write() || inst->saturate ||
485        inst->dst.file != VGRF)
486       return false;
487 
488    for (unsigned i = 0; i < inst->sources; i++) {
489       if (inst->src[i].file != file ||
490           inst->src[i].abs || inst->src[i].negate)
491          return false;
492 
493       if (!inst->src[i].is_contiguous())
494          return false;
495 
496       if (regions_overlap(inst->dst, inst->size_written,
497                           inst->src[i], inst->size_read(devinfo, i)))
498          return false;
499    }
500 
501    return true;
502 }
503 
504 /**
505  * Like is_copy_payload(), but the instruction is required to copy a single
506  * contiguous block of registers from the given register file into the
507  * destination without any reordering.
508  */
509 inline bool
is_identity_payload(const struct intel_device_info * devinfo,brw_reg_file file,const fs_inst * inst)510 is_identity_payload(const struct intel_device_info *devinfo,
511                     brw_reg_file file, const fs_inst *inst)
512 {
513    if (is_copy_payload(devinfo, file, inst)) {
514       brw_reg reg = inst->src[0];
515 
516       for (unsigned i = 0; i < inst->sources; i++) {
517          reg.type = inst->src[i].type;
518          if (!inst->src[i].equals(reg))
519             return false;
520 
521          reg = byte_offset(reg, inst->size_read(devinfo, i));
522       }
523 
524       return true;
525    } else {
526       return false;
527    }
528 }
529 
530 /**
531  * Like is_copy_payload(), but the instruction is required to source data from
532  * at least two disjoint VGRFs.
533  *
534  * This doesn't necessarily rule out the elimination of this instruction
535  * through register coalescing, but due to limitations of the register
536  * coalesce pass it might be impossible to do so directly until a later stage,
537  * when the LOAD_PAYLOAD instruction is unrolled into a sequence of MOV
538  * instructions.
539  */
540 inline bool
is_multi_copy_payload(const struct intel_device_info * devinfo,const fs_inst * inst)541 is_multi_copy_payload(const struct intel_device_info *devinfo,
542                       const fs_inst *inst)
543 {
544    if (is_copy_payload(devinfo, VGRF, inst)) {
545       for (unsigned i = 0; i < inst->sources; i++) {
546             if (inst->src[i].nr != inst->src[0].nr)
547                return true;
548       }
549    }
550 
551    return false;
552 }
553 
554 /**
555  * Like is_identity_payload(), but the instruction is required to copy the
556  * whole contents of a single VGRF into the destination.
557  *
558  * This means that there is a good chance that the instruction will be
559  * eliminated through register coalescing, but it's neither a necessary nor a
560  * sufficient condition for that to happen -- E.g. consider the case where
561  * source and destination registers diverge due to other instructions in the
562  * program overwriting part of their contents, which isn't something we can
563  * predict up front based on a cheap strictly local test of the copy
564  * instruction.
565  */
566 inline bool
is_coalescing_payload(const struct intel_device_info * devinfo,const brw::simple_allocator & alloc,const fs_inst * inst)567 is_coalescing_payload(const struct intel_device_info *devinfo,
568                       const brw::simple_allocator &alloc, const fs_inst *inst)
569 {
570    return is_identity_payload(devinfo, VGRF, inst) &&
571           inst->src[0].offset == 0 &&
572           alloc.sizes[inst->src[0].nr] * REG_SIZE == inst->size_written;
573 }
574 
575 bool
576 has_bank_conflict(const struct brw_isa_info *isa, const fs_inst *inst);
577 
578 /* Return the subset of flag registers that an instruction could
579  * potentially read or write based on the execution controls and flag
580  * subregister number of the instruction.
581  */
582 static inline unsigned
brw_fs_flag_mask(const fs_inst * inst,unsigned width)583 brw_fs_flag_mask(const fs_inst *inst, unsigned width)
584 {
585    assert(util_is_power_of_two_nonzero(width));
586    const unsigned start = (inst->flag_subreg * 16 + inst->group) &
587                           ~(width - 1);
588   const unsigned end = start + ALIGN(inst->exec_size, width);
589    return ((1 << DIV_ROUND_UP(end, 8)) - 1) & ~((1 << (start / 8)) - 1);
590 }
591 
592 static inline unsigned
brw_fs_bit_mask(unsigned n)593 brw_fs_bit_mask(unsigned n)
594 {
595    return (n >= CHAR_BIT * sizeof(brw_fs_bit_mask(n)) ? ~0u : (1u << n) - 1);
596 }
597 
598 static inline unsigned
brw_fs_flag_mask(const brw_reg & r,unsigned sz)599 brw_fs_flag_mask(const brw_reg &r, unsigned sz)
600 {
601    if (r.file == ARF) {
602       const unsigned start = (r.nr - BRW_ARF_FLAG) * 4 + r.subnr;
603       const unsigned end = start + sz;
604       return brw_fs_bit_mask(end) & ~brw_fs_bit_mask(start);
605    } else {
606       return 0;
607    }
608 }
609