• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 #ifndef BRW_CFG_H
29 #define BRW_CFG_H
30 
31 #include "brw_ir.h"
32 #ifdef __cplusplus
33 #include "brw_ir_analysis.h"
34 #endif
35 
36 struct bblock_t;
37 
38 /**
39  * CFG edge types.
40  *
41  * A logical edge represents a potential control flow path of the original
42  * scalar program, while a physical edge represents a control flow path that
43  * may not have existed in the original program but was introduced during
44  * vectorization in order to implement divergent control flow of different
45  * shader invocations within the same SIMD thread.
46  *
47  * All logical edges in the CFG are considered to be physical edges but not
48  * the other way around -- I.e. the logical CFG is a subset of the physical
49  * one.
50  */
51 enum bblock_link_kind {
52    bblock_link_logical = 0,
53    bblock_link_physical
54 };
55 
56 struct bblock_link {
57 #ifdef __cplusplus
58    DECLARE_RALLOC_CXX_OPERATORS(bblock_link)
59 
bblock_linkbblock_link60    bblock_link(bblock_t *block, enum bblock_link_kind kind)
61       : block(block), kind(kind)
62    {
63    }
64 #endif
65 
66    struct exec_node link;
67    struct bblock_t *block;
68 
69    /* Type of this CFG edge.  Because bblock_link_logical also implies
70     * bblock_link_physical, the proper way to test for membership of edge 'l'
71     * in CFG kind 'k' is 'l.kind <= k'.
72     */
73    enum bblock_link_kind kind;
74 };
75 
76 struct backend_shader;
77 struct cfg_t;
78 
79 struct bblock_t {
80 #ifdef __cplusplus
81    DECLARE_RALLOC_CXX_OPERATORS(bblock_t)
82 
83    explicit bblock_t(cfg_t *cfg);
84 
85    void add_successor(void *mem_ctx, bblock_t *successor,
86                       enum bblock_link_kind kind);
87    bool is_predecessor_of(const bblock_t *block,
88                           enum bblock_link_kind kind) const;
89    bool is_successor_of(const bblock_t *block,
90                         enum bblock_link_kind kind) const;
91    bool can_combine_with(const bblock_t *that) const;
92    void combine_with(bblock_t *that);
93    void dump(FILE *file = stderr) const;
94 
95    backend_instruction *start();
96    const backend_instruction *start() const;
97    backend_instruction *end();
98    const backend_instruction *end() const;
99 
100    bblock_t *next();
101    const bblock_t *next() const;
102    bblock_t *prev();
103    const bblock_t *prev() const;
104 
105    bool starts_with_control_flow() const;
106    bool ends_with_control_flow() const;
107 
108    backend_instruction *first_non_control_flow_inst();
109    backend_instruction *last_non_control_flow_inst();
110 
111 private:
112    /**
113     * \sa unlink_parents, unlink_children
114     */
115    void unlink_list(exec_list *);
116 
117 public:
unlink_parentsbblock_t118    void unlink_parents()
119    {
120       unlink_list(&parents);
121    }
122 
unlink_childrenbblock_t123    void unlink_children()
124    {
125       unlink_list(&children);
126    }
127 #endif
128 
129    struct exec_node link;
130    struct cfg_t *cfg;
131 
132    int start_ip;
133    int end_ip;
134 
135    /**
136     * Change in end_ip since the last time IPs of later blocks were updated.
137     */
138    int end_ip_delta;
139 
140    struct exec_list instructions;
141    struct exec_list parents;
142    struct exec_list children;
143    int num;
144 };
145 
146 static inline struct backend_instruction *
bblock_start(struct bblock_t * block)147 bblock_start(struct bblock_t *block)
148 {
149    return (struct backend_instruction *)exec_list_get_head(&block->instructions);
150 }
151 
152 static inline const struct backend_instruction *
bblock_start_const(const struct bblock_t * block)153 bblock_start_const(const struct bblock_t *block)
154 {
155    return (const struct backend_instruction *)exec_list_get_head_const(&block->instructions);
156 }
157 
158 static inline struct backend_instruction *
bblock_end(struct bblock_t * block)159 bblock_end(struct bblock_t *block)
160 {
161    return (struct backend_instruction *)exec_list_get_tail(&block->instructions);
162 }
163 
164 static inline const struct backend_instruction *
bblock_end_const(const struct bblock_t * block)165 bblock_end_const(const struct bblock_t *block)
166 {
167    return (const struct backend_instruction *)exec_list_get_tail_const(&block->instructions);
168 }
169 
170 static inline struct bblock_t *
bblock_next(struct bblock_t * block)171 bblock_next(struct bblock_t *block)
172 {
173    if (exec_node_is_tail_sentinel(block->link.next))
174       return NULL;
175 
176    return (struct bblock_t *)block->link.next;
177 }
178 
179 static inline const struct bblock_t *
bblock_next_const(const struct bblock_t * block)180 bblock_next_const(const struct bblock_t *block)
181 {
182    if (exec_node_is_tail_sentinel(block->link.next))
183       return NULL;
184 
185    return (const struct bblock_t *)block->link.next;
186 }
187 
188 static inline struct bblock_t *
bblock_prev(struct bblock_t * block)189 bblock_prev(struct bblock_t *block)
190 {
191    if (exec_node_is_head_sentinel(block->link.prev))
192       return NULL;
193 
194    return (struct bblock_t *)block->link.prev;
195 }
196 
197 static inline const struct bblock_t *
bblock_prev_const(const struct bblock_t * block)198 bblock_prev_const(const struct bblock_t *block)
199 {
200    if (exec_node_is_head_sentinel(block->link.prev))
201       return NULL;
202 
203    return (const struct bblock_t *)block->link.prev;
204 }
205 
206 static inline bool
bblock_starts_with_control_flow(const struct bblock_t * block)207 bblock_starts_with_control_flow(const struct bblock_t *block)
208 {
209    enum opcode op = bblock_start_const(block)->opcode;
210    return op == BRW_OPCODE_DO || op == BRW_OPCODE_ENDIF;
211 }
212 
213 static inline bool
bblock_ends_with_control_flow(const struct bblock_t * block)214 bblock_ends_with_control_flow(const struct bblock_t *block)
215 {
216    enum opcode op = bblock_end_const(block)->opcode;
217    return op == BRW_OPCODE_IF ||
218           op == BRW_OPCODE_ELSE ||
219           op == BRW_OPCODE_WHILE ||
220           op == BRW_OPCODE_BREAK ||
221           op == BRW_OPCODE_CONTINUE;
222 }
223 
224 static inline struct backend_instruction *
bblock_first_non_control_flow_inst(struct bblock_t * block)225 bblock_first_non_control_flow_inst(struct bblock_t *block)
226 {
227    struct backend_instruction *inst = bblock_start(block);
228    if (bblock_starts_with_control_flow(block))
229 #ifdef __cplusplus
230       inst = (struct backend_instruction *)inst->next;
231 #else
232       inst = (struct backend_instruction *)inst->link.next;
233 #endif
234    return inst;
235 }
236 
237 static inline struct backend_instruction *
bblock_last_non_control_flow_inst(struct bblock_t * block)238 bblock_last_non_control_flow_inst(struct bblock_t *block)
239 {
240    struct backend_instruction *inst = bblock_end(block);
241    if (bblock_ends_with_control_flow(block))
242 #ifdef __cplusplus
243       inst = (struct backend_instruction *)inst->prev;
244 #else
245       inst = (struct backend_instruction *)inst->link.prev;
246 #endif
247    return inst;
248 }
249 
250 #ifdef __cplusplus
251 inline backend_instruction *
start()252 bblock_t::start()
253 {
254    return bblock_start(this);
255 }
256 
257 inline const backend_instruction *
start()258 bblock_t::start() const
259 {
260    return bblock_start_const(this);
261 }
262 
263 inline backend_instruction *
end()264 bblock_t::end()
265 {
266    return bblock_end(this);
267 }
268 
269 inline const backend_instruction *
end()270 bblock_t::end() const
271 {
272    return bblock_end_const(this);
273 }
274 
275 inline bblock_t *
next()276 bblock_t::next()
277 {
278    return bblock_next(this);
279 }
280 
281 inline const bblock_t *
next()282 bblock_t::next() const
283 {
284    return bblock_next_const(this);
285 }
286 
287 inline bblock_t *
prev()288 bblock_t::prev()
289 {
290    return bblock_prev(this);
291 }
292 
293 inline const bblock_t *
prev()294 bblock_t::prev() const
295 {
296    return bblock_prev_const(this);
297 }
298 
299 inline bool
starts_with_control_flow()300 bblock_t::starts_with_control_flow() const
301 {
302    return bblock_starts_with_control_flow(this);
303 }
304 
305 inline bool
ends_with_control_flow()306 bblock_t::ends_with_control_flow() const
307 {
308    return bblock_ends_with_control_flow(this);
309 }
310 
311 inline backend_instruction *
first_non_control_flow_inst()312 bblock_t::first_non_control_flow_inst()
313 {
314    return bblock_first_non_control_flow_inst(this);
315 }
316 
317 inline backend_instruction *
last_non_control_flow_inst()318 bblock_t::last_non_control_flow_inst()
319 {
320    return bblock_last_non_control_flow_inst(this);
321 }
322 #endif
323 
324 struct cfg_t {
325 #ifdef __cplusplus
326    DECLARE_RALLOC_CXX_OPERATORS(cfg_t)
327 
328    cfg_t(const backend_shader *s, exec_list *instructions);
329    ~cfg_t();
330 
331    void remove_block(bblock_t *block);
332 
333    bblock_t *first_block();
334    const bblock_t *first_block() const;
335    bblock_t *last_block();
336    const bblock_t *last_block() const;
337 
338    bblock_t *new_block();
339    void set_next_block(bblock_t **cur, bblock_t *block, int ip);
340    void make_block_array();
341 
342    void dump(FILE *file = stderr);
343    void dump_cfg();
344 
345 #ifdef NDEBUG
validatecfg_t346    void validate(UNUSED const char *stage_abbrev) { }
347 #else
348    void validate(const char *stage_abbrev);
349 #endif
350 
351    /**
352     * Propagate bblock_t::end_ip_delta data through the CFG.
353     */
354    inline void adjust_block_ips();
355 
356 #endif
357    const struct backend_shader *s;
358    void *mem_ctx;
359 
360    /** Ordered list (by ip) of basic blocks */
361    struct exec_list block_list;
362    struct bblock_t **blocks;
363    int num_blocks;
364 };
365 
366 static inline struct bblock_t *
cfg_first_block(struct cfg_t * cfg)367 cfg_first_block(struct cfg_t *cfg)
368 {
369    return (struct bblock_t *)exec_list_get_head(&cfg->block_list);
370 }
371 
372 static inline const struct bblock_t *
cfg_first_block_const(const struct cfg_t * cfg)373 cfg_first_block_const(const struct cfg_t *cfg)
374 {
375    return (const struct bblock_t *)exec_list_get_head_const(&cfg->block_list);
376 }
377 
378 static inline struct bblock_t *
cfg_last_block(struct cfg_t * cfg)379 cfg_last_block(struct cfg_t *cfg)
380 {
381    return (struct bblock_t *)exec_list_get_tail(&cfg->block_list);
382 }
383 
384 static inline const struct bblock_t *
cfg_last_block_const(const struct cfg_t * cfg)385 cfg_last_block_const(const struct cfg_t *cfg)
386 {
387    return (const struct bblock_t *)exec_list_get_tail_const(&cfg->block_list);
388 }
389 
390 #ifdef __cplusplus
391 inline bblock_t *
first_block()392 cfg_t::first_block()
393 {
394    return cfg_first_block(this);
395 }
396 
397 const inline bblock_t *
first_block()398 cfg_t::first_block() const
399 {
400    return cfg_first_block_const(this);
401 }
402 
403 inline bblock_t *
last_block()404 cfg_t::last_block()
405 {
406    return cfg_last_block(this);
407 }
408 
409 const inline bblock_t *
last_block()410 cfg_t::last_block() const
411 {
412    return cfg_last_block_const(this);
413 }
414 #endif
415 
416 /* Note that this is implemented with a double for loop -- break will
417  * break from the inner loop only!
418  */
419 #define foreach_block_and_inst(__block, __type, __inst, __cfg) \
420    foreach_block (__block, __cfg)                              \
421       foreach_inst_in_block (__type, __inst, __block)
422 
423 /* Note that this is implemented with a double for loop -- break will
424  * break from the inner loop only!
425  */
426 #define foreach_block_and_inst_safe(__block, __type, __inst, __cfg) \
427    foreach_block_safe (__block, __cfg)                              \
428       foreach_inst_in_block_safe (__type, __inst, __block)
429 
430 #define foreach_block(__block, __cfg)                          \
431    foreach_list_typed (bblock_t, __block, link, &(__cfg)->block_list)
432 
433 #define foreach_block_reverse(__block, __cfg)                  \
434    foreach_list_typed_reverse (bblock_t, __block, link, &(__cfg)->block_list)
435 
436 #define foreach_block_safe(__block, __cfg)                     \
437    foreach_list_typed_safe (bblock_t, __block, link, &(__cfg)->block_list)
438 
439 #define foreach_block_reverse_safe(__block, __cfg)             \
440    foreach_list_typed_reverse_safe (bblock_t, __block, link, &(__cfg)->block_list)
441 
442 #define foreach_inst_in_block(__type, __inst, __block)         \
443    foreach_in_list(__type, __inst, &(__block)->instructions)
444 
445 #define foreach_inst_in_block_safe(__type, __inst, __block)    \
446    for (__type *__inst = (__type *)__block->instructions.head_sentinel.next, \
447                *__next = (__type *)__inst->next;               \
448         __next != NULL;                                        \
449         __inst = __next,                                       \
450         __next = (__type *)__next->next)
451 
452 #define foreach_inst_in_block_reverse(__type, __inst, __block) \
453    foreach_in_list_reverse(__type, __inst, &(__block)->instructions)
454 
455 #define foreach_inst_in_block_reverse_safe(__type, __inst, __block) \
456    foreach_in_list_reverse_safe(__type, __inst, &(__block)->instructions)
457 
458 #define foreach_inst_in_block_starting_from(__type, __scan_inst, __inst) \
459    for (__type *__scan_inst = (__type *)__inst->next;          \
460         !__scan_inst->is_tail_sentinel();                      \
461         __scan_inst = (__type *)__scan_inst->next)
462 
463 #define foreach_inst_in_block_reverse_starting_from(__type, __scan_inst, __inst) \
464    for (__type *__scan_inst = (__type *)__inst->prev;          \
465         !__scan_inst->is_head_sentinel();                      \
466         __scan_inst = (__type *)__scan_inst->prev)
467 
468 #ifdef __cplusplus
469 inline void
adjust_block_ips()470 cfg_t::adjust_block_ips()
471 {
472    int delta = 0;
473 
474    foreach_block(block, this) {
475       block->start_ip += delta;
476       block->end_ip += delta;
477 
478       delta += block->end_ip_delta;
479 
480       block->end_ip_delta = 0;
481    }
482 }
483 
484 namespace brw {
485    /**
486     * Immediate dominator tree analysis of a shader.
487     */
488    struct idom_tree {
489       idom_tree(const backend_shader *s);
490       ~idom_tree();
491 
492       bool
validateidom_tree493       validate(const backend_shader *) const
494       {
495          /* FINISHME */
496          return true;
497       }
498 
499       analysis_dependency_class
dependency_classidom_tree500       dependency_class() const
501       {
502          return DEPENDENCY_BLOCKS;
503       }
504 
505       const bblock_t *
parentidom_tree506       parent(const bblock_t *b) const
507       {
508          assert(unsigned(b->num) < num_parents);
509          return parents[b->num];
510       }
511 
512       bblock_t *
parentidom_tree513       parent(bblock_t *b) const
514       {
515          assert(unsigned(b->num) < num_parents);
516          return parents[b->num];
517       }
518 
519       bblock_t *
520       intersect(bblock_t *b1, bblock_t *b2) const;
521 
522       void
523       dump() const;
524 
525    private:
526       unsigned num_parents;
527       bblock_t **parents;
528    };
529 }
530 #endif
531 
532 #endif /* BRW_CFG_H */
533