• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 #pragma once
29 
30 struct bblock_t;
31 
32 #ifdef __cplusplus
33 
34 #include "brw_ir.h"
35 #include "brw_ir_analysis.h"
36 #include "brw_ir_fs.h"
37 
38 struct bblock_t;
39 
40 /**
41  * CFG edge types.
42  *
43  * A logical edge represents a potential control flow path of the original
44  * scalar program, while a physical edge represents a control flow path that
45  * may not have existed in the original program but was introduced during
46  * vectorization in order to implement divergent control flow of different
47  * shader invocations within the same SIMD thread.
48  *
49  * All logical edges in the CFG are considered to be physical edges but not
50  * the other way around -- I.e. the logical CFG is a subset of the physical
51  * one.
52  */
53 enum bblock_link_kind {
54    bblock_link_logical = 0,
55    bblock_link_physical
56 };
57 
58 struct bblock_link {
59    DECLARE_RALLOC_CXX_OPERATORS(bblock_link)
60 
bblock_linkbblock_link61    bblock_link(bblock_t *block, enum bblock_link_kind kind)
62       : block(block), kind(kind)
63    {
64    }
65 
66    struct exec_node link;
67    struct bblock_t *block;
68 
69    /* Type of this CFG edge.  Because bblock_link_logical also implies
70     * bblock_link_physical, the proper way to test for membership of edge 'l'
71     * in CFG kind 'k' is 'l.kind <= k'.
72     */
73    enum bblock_link_kind kind;
74 };
75 
76 struct fs_visitor;
77 struct cfg_t;
78 
79 struct bblock_t {
80    DECLARE_RALLOC_CXX_OPERATORS(bblock_t)
81 
82    explicit bblock_t(cfg_t *cfg);
83 
84    void add_successor(void *mem_ctx, bblock_t *successor,
85                       enum bblock_link_kind kind);
86    bool is_predecessor_of(const bblock_t *block,
87                           enum bblock_link_kind kind) const;
88    bool is_successor_of(const bblock_t *block,
89                         enum bblock_link_kind kind) const;
90    bool can_combine_with(const bblock_t *that) const;
91    void combine_with(bblock_t *that);
92    void dump(FILE *file = stderr) const;
93 
94    fs_inst *start();
95    const fs_inst *start() const;
96    fs_inst *end();
97    const fs_inst *end() const;
98 
99    bblock_t *next();
100    const bblock_t *next() const;
101    bblock_t *prev();
102    const bblock_t *prev() const;
103 
104    bool starts_with_control_flow() const;
105    bool ends_with_control_flow() const;
106 
107    fs_inst *first_non_control_flow_inst();
108    fs_inst *last_non_control_flow_inst();
109 
110 private:
111    /**
112     * \sa unlink_parents, unlink_children
113     */
114    void unlink_list(exec_list *);
115 
116 public:
unlink_parentsbblock_t117    void unlink_parents()
118    {
119       unlink_list(&parents);
120    }
121 
unlink_childrenbblock_t122    void unlink_children()
123    {
124       unlink_list(&children);
125    }
126 
127    struct exec_node link;
128    struct cfg_t *cfg;
129 
130    int start_ip;
131    int end_ip;
132 
133    /**
134     * Change in end_ip since the last time IPs of later blocks were updated.
135     */
136    int end_ip_delta;
137 
138    struct exec_list instructions;
139    struct exec_list parents;
140    struct exec_list children;
141    int num;
142 };
143 
144 static inline fs_inst *
bblock_start(struct bblock_t * block)145 bblock_start(struct bblock_t *block)
146 {
147    return (fs_inst *)exec_list_get_head(&block->instructions);
148 }
149 
150 static inline const fs_inst *
bblock_start_const(const struct bblock_t * block)151 bblock_start_const(const struct bblock_t *block)
152 {
153    return (const fs_inst *)exec_list_get_head_const(&block->instructions);
154 }
155 
156 static inline fs_inst *
bblock_end(struct bblock_t * block)157 bblock_end(struct bblock_t *block)
158 {
159    return (fs_inst *)exec_list_get_tail(&block->instructions);
160 }
161 
162 static inline const fs_inst *
bblock_end_const(const struct bblock_t * block)163 bblock_end_const(const struct bblock_t *block)
164 {
165    return (const fs_inst *)exec_list_get_tail_const(&block->instructions);
166 }
167 
168 static inline struct bblock_t *
bblock_next(struct bblock_t * block)169 bblock_next(struct bblock_t *block)
170 {
171    if (exec_node_is_tail_sentinel(block->link.next))
172       return NULL;
173 
174    return (struct bblock_t *)block->link.next;
175 }
176 
177 static inline const struct bblock_t *
bblock_next_const(const struct bblock_t * block)178 bblock_next_const(const struct bblock_t *block)
179 {
180    if (exec_node_is_tail_sentinel(block->link.next))
181       return NULL;
182 
183    return (const struct bblock_t *)block->link.next;
184 }
185 
186 static inline struct bblock_t *
bblock_prev(struct bblock_t * block)187 bblock_prev(struct bblock_t *block)
188 {
189    if (exec_node_is_head_sentinel(block->link.prev))
190       return NULL;
191 
192    return (struct bblock_t *)block->link.prev;
193 }
194 
195 static inline const struct bblock_t *
bblock_prev_const(const struct bblock_t * block)196 bblock_prev_const(const struct bblock_t *block)
197 {
198    if (exec_node_is_head_sentinel(block->link.prev))
199       return NULL;
200 
201    return (const struct bblock_t *)block->link.prev;
202 }
203 
204 static inline bool
bblock_starts_with_control_flow(const struct bblock_t * block)205 bblock_starts_with_control_flow(const struct bblock_t *block)
206 {
207    enum opcode op = bblock_start_const(block)->opcode;
208    return op == BRW_OPCODE_DO || op == BRW_OPCODE_ENDIF;
209 }
210 
211 static inline bool
bblock_ends_with_control_flow(const struct bblock_t * block)212 bblock_ends_with_control_flow(const struct bblock_t *block)
213 {
214    enum opcode op = bblock_end_const(block)->opcode;
215    return op == BRW_OPCODE_IF ||
216           op == BRW_OPCODE_ELSE ||
217           op == BRW_OPCODE_WHILE ||
218           op == BRW_OPCODE_BREAK ||
219           op == BRW_OPCODE_CONTINUE;
220 }
221 
222 static inline fs_inst *
bblock_first_non_control_flow_inst(struct bblock_t * block)223 bblock_first_non_control_flow_inst(struct bblock_t *block)
224 {
225    fs_inst *inst = bblock_start(block);
226    if (bblock_starts_with_control_flow(block))
227 #ifdef __cplusplus
228       inst = (fs_inst *)inst->next;
229 #else
230       inst = (fs_inst *)inst->link.next;
231 #endif
232    return inst;
233 }
234 
235 static inline fs_inst *
bblock_last_non_control_flow_inst(struct bblock_t * block)236 bblock_last_non_control_flow_inst(struct bblock_t *block)
237 {
238    fs_inst *inst = bblock_end(block);
239    if (bblock_ends_with_control_flow(block))
240 #ifdef __cplusplus
241       inst = (fs_inst *)inst->prev;
242 #else
243       inst = (fs_inst *)inst->link.prev;
244 #endif
245    return inst;
246 }
247 
248 inline fs_inst *
start()249 bblock_t::start()
250 {
251    return bblock_start(this);
252 }
253 
254 inline const fs_inst *
start()255 bblock_t::start() const
256 {
257    return bblock_start_const(this);
258 }
259 
260 inline fs_inst *
end()261 bblock_t::end()
262 {
263    return bblock_end(this);
264 }
265 
266 inline const fs_inst *
end()267 bblock_t::end() const
268 {
269    return bblock_end_const(this);
270 }
271 
272 inline bblock_t *
next()273 bblock_t::next()
274 {
275    return bblock_next(this);
276 }
277 
278 inline const bblock_t *
next()279 bblock_t::next() const
280 {
281    return bblock_next_const(this);
282 }
283 
284 inline bblock_t *
prev()285 bblock_t::prev()
286 {
287    return bblock_prev(this);
288 }
289 
290 inline const bblock_t *
prev()291 bblock_t::prev() const
292 {
293    return bblock_prev_const(this);
294 }
295 
296 inline bool
starts_with_control_flow()297 bblock_t::starts_with_control_flow() const
298 {
299    return bblock_starts_with_control_flow(this);
300 }
301 
302 inline bool
ends_with_control_flow()303 bblock_t::ends_with_control_flow() const
304 {
305    return bblock_ends_with_control_flow(this);
306 }
307 
308 inline fs_inst *
first_non_control_flow_inst()309 bblock_t::first_non_control_flow_inst()
310 {
311    return bblock_first_non_control_flow_inst(this);
312 }
313 
314 inline fs_inst *
last_non_control_flow_inst()315 bblock_t::last_non_control_flow_inst()
316 {
317    return bblock_last_non_control_flow_inst(this);
318 }
319 
320 struct cfg_t {
321    DECLARE_RALLOC_CXX_OPERATORS(cfg_t)
322 
323    cfg_t(const fs_visitor *s, exec_list *instructions);
324    ~cfg_t();
325 
326    void remove_block(bblock_t *block);
327 
328    bblock_t *first_block();
329    const bblock_t *first_block() const;
330    bblock_t *last_block();
331    const bblock_t *last_block() const;
332 
333    bblock_t *new_block();
334    void set_next_block(bblock_t **cur, bblock_t *block, int ip);
335    void make_block_array();
336 
337    void dump(FILE *file = stderr);
338    void dump_cfg();
339 
340 #ifdef NDEBUG
validatecfg_t341    void validate(UNUSED const char *stage_abbrev) { }
342 #else
343    void validate(const char *stage_abbrev);
344 #endif
345 
346    /**
347     * Propagate bblock_t::end_ip_delta data through the CFG.
348     */
349    inline void adjust_block_ips();
350 
351    const struct fs_visitor *s;
352    void *mem_ctx;
353 
354    /** Ordered list (by ip) of basic blocks */
355    struct exec_list block_list;
356    struct bblock_t **blocks;
357    int num_blocks;
358 };
359 
360 static inline struct bblock_t *
cfg_first_block(struct cfg_t * cfg)361 cfg_first_block(struct cfg_t *cfg)
362 {
363    return (struct bblock_t *)exec_list_get_head(&cfg->block_list);
364 }
365 
366 static inline const struct bblock_t *
cfg_first_block_const(const struct cfg_t * cfg)367 cfg_first_block_const(const struct cfg_t *cfg)
368 {
369    return (const struct bblock_t *)exec_list_get_head_const(&cfg->block_list);
370 }
371 
372 static inline struct bblock_t *
cfg_last_block(struct cfg_t * cfg)373 cfg_last_block(struct cfg_t *cfg)
374 {
375    return (struct bblock_t *)exec_list_get_tail(&cfg->block_list);
376 }
377 
378 static inline const struct bblock_t *
cfg_last_block_const(const struct cfg_t * cfg)379 cfg_last_block_const(const struct cfg_t *cfg)
380 {
381    return (const struct bblock_t *)exec_list_get_tail_const(&cfg->block_list);
382 }
383 
384 inline bblock_t *
first_block()385 cfg_t::first_block()
386 {
387    return cfg_first_block(this);
388 }
389 
390 const inline bblock_t *
first_block()391 cfg_t::first_block() const
392 {
393    return cfg_first_block_const(this);
394 }
395 
396 inline bblock_t *
last_block()397 cfg_t::last_block()
398 {
399    return cfg_last_block(this);
400 }
401 
402 const inline bblock_t *
last_block()403 cfg_t::last_block() const
404 {
405    return cfg_last_block_const(this);
406 }
407 
408 /* Note that this is implemented with a double for loop -- break will
409  * break from the inner loop only!
410  */
411 #define foreach_block_and_inst(__block, __type, __inst, __cfg) \
412    foreach_block (__block, __cfg)                              \
413       foreach_inst_in_block (__type, __inst, __block)
414 
415 /* Note that this is implemented with a double for loop -- break will
416  * break from the inner loop only!
417  */
418 #define foreach_block_and_inst_safe(__block, __type, __inst, __cfg) \
419    foreach_block_safe (__block, __cfg)                              \
420       foreach_inst_in_block_safe (__type, __inst, __block)
421 
422 #define foreach_block(__block, __cfg)                          \
423    foreach_list_typed (bblock_t, __block, link, &(__cfg)->block_list)
424 
425 #define foreach_block_reverse(__block, __cfg)                  \
426    foreach_list_typed_reverse (bblock_t, __block, link, &(__cfg)->block_list)
427 
428 #define foreach_block_safe(__block, __cfg)                     \
429    foreach_list_typed_safe (bblock_t, __block, link, &(__cfg)->block_list)
430 
431 #define foreach_block_reverse_safe(__block, __cfg)             \
432    foreach_list_typed_reverse_safe (bblock_t, __block, link, &(__cfg)->block_list)
433 
434 #define foreach_inst_in_block(__type, __inst, __block)         \
435    foreach_in_list(__type, __inst, &(__block)->instructions)
436 
437 #define foreach_inst_in_block_safe(__type, __inst, __block)    \
438    for (__type *__inst = (__type *)__block->instructions.head_sentinel.next, \
439                *__next = (__type *)__inst->next;               \
440         __next != NULL;                                        \
441         __inst = __next,                                       \
442         __next = (__type *)__next->next)
443 
444 #define foreach_inst_in_block_reverse(__type, __inst, __block) \
445    foreach_in_list_reverse(__type, __inst, &(__block)->instructions)
446 
447 #define foreach_inst_in_block_reverse_safe(__type, __inst, __block) \
448    foreach_in_list_reverse_safe(__type, __inst, &(__block)->instructions)
449 
450 #define foreach_inst_in_block_starting_from(__type, __scan_inst, __inst) \
451    for (__type *__scan_inst = (__type *)__inst->next;          \
452         !__scan_inst->is_tail_sentinel();                      \
453         __scan_inst = (__type *)__scan_inst->next)
454 
455 #define foreach_inst_in_block_reverse_starting_from(__type, __scan_inst, __inst) \
456    for (__type *__scan_inst = (__type *)__inst->prev;          \
457         !__scan_inst->is_head_sentinel();                      \
458         __scan_inst = (__type *)__scan_inst->prev)
459 
460 inline void
adjust_block_ips()461 cfg_t::adjust_block_ips()
462 {
463    int delta = 0;
464 
465    foreach_block(block, this) {
466       block->start_ip += delta;
467       block->end_ip += delta;
468 
469       delta += block->end_ip_delta;
470 
471       block->end_ip_delta = 0;
472    }
473 }
474 
475 namespace brw {
476    /**
477     * Immediate dominator tree analysis of a shader.
478     */
479    struct idom_tree {
480       idom_tree(const fs_visitor *s);
481       ~idom_tree();
482 
483       bool
validateidom_tree484       validate(const fs_visitor *) const
485       {
486          /* FINISHME */
487          return true;
488       }
489 
490       analysis_dependency_class
dependency_classidom_tree491       dependency_class() const
492       {
493          return DEPENDENCY_BLOCKS;
494       }
495 
496       const bblock_t *
parentidom_tree497       parent(const bblock_t *b) const
498       {
499          assert(unsigned(b->num) < num_parents);
500          return parents[b->num];
501       }
502 
503       bblock_t *
parentidom_tree504       parent(bblock_t *b) const
505       {
506          assert(unsigned(b->num) < num_parents);
507          return parents[b->num];
508       }
509 
510       bblock_t *
511       intersect(bblock_t *b1, bblock_t *b2) const;
512 
513       /**
514        * Returns true if block `a` dominates block `b`.
515        */
516       bool
dominatesidom_tree517       dominates(const bblock_t *a, const bblock_t *b) const
518       {
519          while (a != b) {
520             if (b->num == 0)
521                return false;
522 
523             b = parent(b);
524          }
525          return true;
526       }
527 
528       void dump(FILE *file = stderr) const;
529 
530    private:
531       unsigned num_parents;
532       bblock_t **parents;
533    };
534 }
535 
536 #endif
537