• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 #ifndef BRW_CFG_H
29 #define BRW_CFG_H
30 
31 #include "brw_ir.h"
32 #ifdef __cplusplus
33 #include "brw_ir_analysis.h"
34 #endif
35 
36 struct bblock_t;
37 
38 /**
39  * CFG edge types.
40  *
41  * A logical edge represents a potential control flow path of the original
42  * scalar program, while a physical edge represents a control flow path that
43  * may not have existed in the original program but was introduced during
44  * vectorization in order to implement divergent control flow of different
45  * shader invocations within the same SIMD thread.
46  *
47  * All logical edges in the CFG are considered to be physical edges but not
48  * the other way around -- I.e. the logical CFG is a subset of the physical
49  * one.
50  */
51 enum bblock_link_kind {
52    bblock_link_logical = 0,
53    bblock_link_physical
54 };
55 
56 struct bblock_link {
57 #ifdef __cplusplus
58    DECLARE_RALLOC_CXX_OPERATORS(bblock_link)
59 
bblock_linkbblock_link60    bblock_link(bblock_t *block, enum bblock_link_kind kind)
61       : block(block), kind(kind)
62    {
63    }
64 #endif
65 
66    struct exec_node link;
67    struct bblock_t *block;
68 
69    /* Type of this CFG edge.  Because bblock_link_logical also implies
70     * bblock_link_physical, the proper way to test for membership of edge 'l'
71     * in CFG kind 'k' is 'l.kind <= k'.
72     */
73    enum bblock_link_kind kind;
74 };
75 
76 struct backend_shader;
77 struct cfg_t;
78 
79 struct bblock_t {
80 #ifdef __cplusplus
81    DECLARE_RALLOC_CXX_OPERATORS(bblock_t)
82 
83    explicit bblock_t(cfg_t *cfg);
84 
85    void add_successor(void *mem_ctx, bblock_t *successor,
86                       enum bblock_link_kind kind);
87    bool is_predecessor_of(const bblock_t *block,
88                           enum bblock_link_kind kind) const;
89    bool is_successor_of(const bblock_t *block,
90                         enum bblock_link_kind kind) const;
91    bool can_combine_with(const bblock_t *that) const;
92    void combine_with(bblock_t *that);
93    void dump() const;
94 
95    backend_instruction *start();
96    const backend_instruction *start() const;
97    backend_instruction *end();
98    const backend_instruction *end() const;
99 
100    bblock_t *next();
101    const bblock_t *next() const;
102    bblock_t *prev();
103    const bblock_t *prev() const;
104 
105    bool starts_with_control_flow() const;
106    bool ends_with_control_flow() const;
107 
108    backend_instruction *first_non_control_flow_inst();
109    backend_instruction *last_non_control_flow_inst();
110 #endif
111 
112    struct exec_node link;
113    struct cfg_t *cfg;
114 
115    int start_ip;
116    int end_ip;
117 
118    /**
119     * Change in end_ip since the last time IPs of later blocks were updated.
120     */
121    int end_ip_delta;
122 
123    struct exec_list instructions;
124    struct exec_list parents;
125    struct exec_list children;
126    int num;
127 };
128 
129 static inline struct backend_instruction *
bblock_start(struct bblock_t * block)130 bblock_start(struct bblock_t *block)
131 {
132    return (struct backend_instruction *)exec_list_get_head(&block->instructions);
133 }
134 
135 static inline const struct backend_instruction *
bblock_start_const(const struct bblock_t * block)136 bblock_start_const(const struct bblock_t *block)
137 {
138    return (const struct backend_instruction *)exec_list_get_head_const(&block->instructions);
139 }
140 
141 static inline struct backend_instruction *
bblock_end(struct bblock_t * block)142 bblock_end(struct bblock_t *block)
143 {
144    return (struct backend_instruction *)exec_list_get_tail(&block->instructions);
145 }
146 
147 static inline const struct backend_instruction *
bblock_end_const(const struct bblock_t * block)148 bblock_end_const(const struct bblock_t *block)
149 {
150    return (const struct backend_instruction *)exec_list_get_tail_const(&block->instructions);
151 }
152 
153 static inline struct bblock_t *
bblock_next(struct bblock_t * block)154 bblock_next(struct bblock_t *block)
155 {
156    if (exec_node_is_tail_sentinel(block->link.next))
157       return NULL;
158 
159    return (struct bblock_t *)block->link.next;
160 }
161 
162 static inline const struct bblock_t *
bblock_next_const(const struct bblock_t * block)163 bblock_next_const(const struct bblock_t *block)
164 {
165    if (exec_node_is_tail_sentinel(block->link.next))
166       return NULL;
167 
168    return (const struct bblock_t *)block->link.next;
169 }
170 
171 static inline struct bblock_t *
bblock_prev(struct bblock_t * block)172 bblock_prev(struct bblock_t *block)
173 {
174    if (exec_node_is_head_sentinel(block->link.prev))
175       return NULL;
176 
177    return (struct bblock_t *)block->link.prev;
178 }
179 
180 static inline const struct bblock_t *
bblock_prev_const(const struct bblock_t * block)181 bblock_prev_const(const struct bblock_t *block)
182 {
183    if (exec_node_is_head_sentinel(block->link.prev))
184       return NULL;
185 
186    return (const struct bblock_t *)block->link.prev;
187 }
188 
189 static inline bool
bblock_starts_with_control_flow(const struct bblock_t * block)190 bblock_starts_with_control_flow(const struct bblock_t *block)
191 {
192    enum opcode op = bblock_start_const(block)->opcode;
193    return op == BRW_OPCODE_DO || op == BRW_OPCODE_ENDIF;
194 }
195 
196 static inline bool
bblock_ends_with_control_flow(const struct bblock_t * block)197 bblock_ends_with_control_flow(const struct bblock_t *block)
198 {
199    enum opcode op = bblock_end_const(block)->opcode;
200    return op == BRW_OPCODE_IF ||
201           op == BRW_OPCODE_ELSE ||
202           op == BRW_OPCODE_WHILE ||
203           op == BRW_OPCODE_BREAK ||
204           op == BRW_OPCODE_CONTINUE;
205 }
206 
207 static inline struct backend_instruction *
bblock_first_non_control_flow_inst(struct bblock_t * block)208 bblock_first_non_control_flow_inst(struct bblock_t *block)
209 {
210    struct backend_instruction *inst = bblock_start(block);
211    if (bblock_starts_with_control_flow(block))
212 #ifdef __cplusplus
213       inst = (struct backend_instruction *)inst->next;
214 #else
215       inst = (struct backend_instruction *)inst->link.next;
216 #endif
217    return inst;
218 }
219 
220 static inline struct backend_instruction *
bblock_last_non_control_flow_inst(struct bblock_t * block)221 bblock_last_non_control_flow_inst(struct bblock_t *block)
222 {
223    struct backend_instruction *inst = bblock_end(block);
224    if (bblock_ends_with_control_flow(block))
225 #ifdef __cplusplus
226       inst = (struct backend_instruction *)inst->prev;
227 #else
228       inst = (struct backend_instruction *)inst->link.prev;
229 #endif
230    return inst;
231 }
232 
233 #ifdef __cplusplus
234 inline backend_instruction *
start()235 bblock_t::start()
236 {
237    return bblock_start(this);
238 }
239 
240 inline const backend_instruction *
start()241 bblock_t::start() const
242 {
243    return bblock_start_const(this);
244 }
245 
246 inline backend_instruction *
end()247 bblock_t::end()
248 {
249    return bblock_end(this);
250 }
251 
252 inline const backend_instruction *
end()253 bblock_t::end() const
254 {
255    return bblock_end_const(this);
256 }
257 
258 inline bblock_t *
next()259 bblock_t::next()
260 {
261    return bblock_next(this);
262 }
263 
264 inline const bblock_t *
next()265 bblock_t::next() const
266 {
267    return bblock_next_const(this);
268 }
269 
270 inline bblock_t *
prev()271 bblock_t::prev()
272 {
273    return bblock_prev(this);
274 }
275 
276 inline const bblock_t *
prev()277 bblock_t::prev() const
278 {
279    return bblock_prev_const(this);
280 }
281 
282 inline bool
starts_with_control_flow()283 bblock_t::starts_with_control_flow() const
284 {
285    return bblock_starts_with_control_flow(this);
286 }
287 
288 inline bool
ends_with_control_flow()289 bblock_t::ends_with_control_flow() const
290 {
291    return bblock_ends_with_control_flow(this);
292 }
293 
294 inline backend_instruction *
first_non_control_flow_inst()295 bblock_t::first_non_control_flow_inst()
296 {
297    return bblock_first_non_control_flow_inst(this);
298 }
299 
300 inline backend_instruction *
last_non_control_flow_inst()301 bblock_t::last_non_control_flow_inst()
302 {
303    return bblock_last_non_control_flow_inst(this);
304 }
305 #endif
306 
307 struct cfg_t {
308 #ifdef __cplusplus
309    DECLARE_RALLOC_CXX_OPERATORS(cfg_t)
310 
311    cfg_t(const backend_shader *s, exec_list *instructions);
312    ~cfg_t();
313 
314    void remove_block(bblock_t *block);
315 
316    bblock_t *first_block();
317    const bblock_t *first_block() const;
318    bblock_t *last_block();
319    const bblock_t *last_block() const;
320 
321    bblock_t *new_block();
322    void set_next_block(bblock_t **cur, bblock_t *block, int ip);
323    void make_block_array();
324 
325    void dump();
326    void dump_cfg();
327 
328    /**
329     * Propagate bblock_t::end_ip_delta data through the CFG.
330     */
331    inline void adjust_block_ips();
332 
333 #endif
334    const struct backend_shader *s;
335    void *mem_ctx;
336 
337    /** Ordered list (by ip) of basic blocks */
338    struct exec_list block_list;
339    struct bblock_t **blocks;
340    int num_blocks;
341 };
342 
343 static inline struct bblock_t *
cfg_first_block(struct cfg_t * cfg)344 cfg_first_block(struct cfg_t *cfg)
345 {
346    return (struct bblock_t *)exec_list_get_head(&cfg->block_list);
347 }
348 
349 static inline const struct bblock_t *
cfg_first_block_const(const struct cfg_t * cfg)350 cfg_first_block_const(const struct cfg_t *cfg)
351 {
352    return (const struct bblock_t *)exec_list_get_head_const(&cfg->block_list);
353 }
354 
355 static inline struct bblock_t *
cfg_last_block(struct cfg_t * cfg)356 cfg_last_block(struct cfg_t *cfg)
357 {
358    return (struct bblock_t *)exec_list_get_tail(&cfg->block_list);
359 }
360 
361 static inline const struct bblock_t *
cfg_last_block_const(const struct cfg_t * cfg)362 cfg_last_block_const(const struct cfg_t *cfg)
363 {
364    return (const struct bblock_t *)exec_list_get_tail_const(&cfg->block_list);
365 }
366 
367 #ifdef __cplusplus
368 inline bblock_t *
first_block()369 cfg_t::first_block()
370 {
371    return cfg_first_block(this);
372 }
373 
374 const inline bblock_t *
first_block()375 cfg_t::first_block() const
376 {
377    return cfg_first_block_const(this);
378 }
379 
380 inline bblock_t *
last_block()381 cfg_t::last_block()
382 {
383    return cfg_last_block(this);
384 }
385 
386 const inline bblock_t *
last_block()387 cfg_t::last_block() const
388 {
389    return cfg_last_block_const(this);
390 }
391 #endif
392 
393 /* Note that this is implemented with a double for loop -- break will
394  * break from the inner loop only!
395  */
396 #define foreach_block_and_inst(__block, __type, __inst, __cfg) \
397    foreach_block (__block, __cfg)                              \
398       foreach_inst_in_block (__type, __inst, __block)
399 
400 /* Note that this is implemented with a double for loop -- break will
401  * break from the inner loop only!
402  */
403 #define foreach_block_and_inst_safe(__block, __type, __inst, __cfg) \
404    foreach_block_safe (__block, __cfg)                              \
405       foreach_inst_in_block_safe (__type, __inst, __block)
406 
407 #define foreach_block(__block, __cfg)                          \
408    foreach_list_typed (bblock_t, __block, link, &(__cfg)->block_list)
409 
410 #define foreach_block_reverse(__block, __cfg)                  \
411    foreach_list_typed_reverse (bblock_t, __block, link, &(__cfg)->block_list)
412 
413 #define foreach_block_safe(__block, __cfg)                     \
414    foreach_list_typed_safe (bblock_t, __block, link, &(__cfg)->block_list)
415 
416 #define foreach_block_reverse_safe(__block, __cfg)             \
417    foreach_list_typed_reverse_safe (bblock_t, __block, link, &(__cfg)->block_list)
418 
419 #define foreach_inst_in_block(__type, __inst, __block)         \
420    foreach_in_list(__type, __inst, &(__block)->instructions)
421 
422 #define foreach_inst_in_block_safe(__type, __inst, __block)    \
423    for (__type *__inst = (__type *)__block->instructions.head_sentinel.next, \
424                *__next = (__type *)__inst->next;               \
425         __next != NULL;                                        \
426         __inst = __next,                                       \
427         __next = (__type *)__next->next)
428 
429 #define foreach_inst_in_block_reverse(__type, __inst, __block) \
430    foreach_in_list_reverse(__type, __inst, &(__block)->instructions)
431 
432 #define foreach_inst_in_block_reverse_safe(__type, __inst, __block) \
433    foreach_in_list_reverse_safe(__type, __inst, &(__block)->instructions)
434 
435 #define foreach_inst_in_block_starting_from(__type, __scan_inst, __inst) \
436    for (__type *__scan_inst = (__type *)__inst->next;          \
437         !__scan_inst->is_tail_sentinel();                      \
438         __scan_inst = (__type *)__scan_inst->next)
439 
440 #define foreach_inst_in_block_reverse_starting_from(__type, __scan_inst, __inst) \
441    for (__type *__scan_inst = (__type *)__inst->prev;          \
442         !__scan_inst->is_head_sentinel();                      \
443         __scan_inst = (__type *)__scan_inst->prev)
444 
445 #ifdef __cplusplus
446 inline void
adjust_block_ips()447 cfg_t::adjust_block_ips()
448 {
449    int delta = 0;
450 
451    foreach_block(block, this) {
452       block->start_ip += delta;
453       block->end_ip += delta;
454 
455       delta += block->end_ip_delta;
456 
457       block->end_ip_delta = 0;
458    }
459 }
460 
461 namespace brw {
462    /**
463     * Immediate dominator tree analysis of a shader.
464     */
465    struct idom_tree {
466       idom_tree(const backend_shader *s);
467       ~idom_tree();
468 
469       bool
validateidom_tree470       validate(const backend_shader *) const
471       {
472          /* FINISHME */
473          return true;
474       }
475 
476       analysis_dependency_class
dependency_classidom_tree477       dependency_class() const
478       {
479          return DEPENDENCY_BLOCKS;
480       }
481 
482       const bblock_t *
parentidom_tree483       parent(const bblock_t *b) const
484       {
485          assert(unsigned(b->num) < num_parents);
486          return parents[b->num];
487       }
488 
489       bblock_t *
parentidom_tree490       parent(bblock_t *b) const
491       {
492          assert(unsigned(b->num) < num_parents);
493          return parents[b->num];
494       }
495 
496       bblock_t *
497       intersect(bblock_t *b1, bblock_t *b2) const;
498 
499       void
500       dump() const;
501 
502    private:
503       unsigned num_parents;
504       bblock_t **parents;
505    };
506 }
507 #endif
508 
509 #endif /* BRW_CFG_H */
510