1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 #pragma once
29
30 #include "elk_ir.h"
31 #ifdef __cplusplus
32 #include "elk_ir_analysis.h"
33 #endif
34
35 struct elk_bblock_t;
36
37 /**
38 * CFG edge types.
39 *
40 * A logical edge represents a potential control flow path of the original
41 * scalar program, while a physical edge represents a control flow path that
42 * may not have existed in the original program but was introduced during
43 * vectorization in order to implement divergent control flow of different
44 * shader invocations within the same SIMD thread.
45 *
46 * All logical edges in the CFG are considered to be physical edges but not
47 * the other way around -- I.e. the logical CFG is a subset of the physical
48 * one.
49 */
50 enum bblock_link_kind {
51 bblock_link_logical = 0,
52 bblock_link_physical
53 };
54
55 struct elk_bblock_link {
56 #ifdef __cplusplus
57 DECLARE_RALLOC_CXX_OPERATORS(elk_bblock_link)
58
elk_bblock_linkelk_bblock_link59 elk_bblock_link(elk_bblock_t *block, enum bblock_link_kind kind)
60 : block(block), kind(kind)
61 {
62 }
63 #endif
64
65 struct exec_node link;
66 struct elk_bblock_t *block;
67
68 /* Type of this CFG edge. Because bblock_link_logical also implies
69 * bblock_link_physical, the proper way to test for membership of edge 'l'
70 * in CFG kind 'k' is 'l.kind <= k'.
71 */
72 enum bblock_link_kind kind;
73 };
74
75 struct elk_backend_shader;
76 struct elk_cfg_t;
77
78 struct elk_bblock_t {
79 #ifdef __cplusplus
80 DECLARE_RALLOC_CXX_OPERATORS(elk_bblock_t)
81
82 explicit elk_bblock_t(elk_cfg_t *cfg);
83
84 void add_successor(void *mem_ctx, elk_bblock_t *successor,
85 enum bblock_link_kind kind);
86 bool is_predecessor_of(const elk_bblock_t *block,
87 enum bblock_link_kind kind) const;
88 bool is_successor_of(const elk_bblock_t *block,
89 enum bblock_link_kind kind) const;
90 bool can_combine_with(const elk_bblock_t *that) const;
91 void combine_with(elk_bblock_t *that);
92 void dump(FILE *file = stderr) const;
93
94 elk_backend_instruction *start();
95 const elk_backend_instruction *start() const;
96 elk_backend_instruction *end();
97 const elk_backend_instruction *end() const;
98
99 elk_bblock_t *next();
100 const elk_bblock_t *next() const;
101 elk_bblock_t *prev();
102 const elk_bblock_t *prev() const;
103
104 bool starts_with_control_flow() const;
105 bool ends_with_control_flow() const;
106
107 elk_backend_instruction *first_non_control_flow_inst();
108 elk_backend_instruction *last_non_control_flow_inst();
109
110 private:
111 /**
112 * \sa unlink_parents, unlink_children
113 */
114 void unlink_list(exec_list *);
115
116 public:
unlink_parentselk_bblock_t117 void unlink_parents()
118 {
119 unlink_list(&parents);
120 }
121
unlink_childrenelk_bblock_t122 void unlink_children()
123 {
124 unlink_list(&children);
125 }
126 #endif
127
128 struct exec_node link;
129 struct elk_cfg_t *cfg;
130
131 int start_ip;
132 int end_ip;
133
134 /**
135 * Change in end_ip since the last time IPs of later blocks were updated.
136 */
137 int end_ip_delta;
138
139 struct exec_list instructions;
140 struct exec_list parents;
141 struct exec_list children;
142 int num;
143 };
144
145 static inline struct elk_backend_instruction *
bblock_start(struct elk_bblock_t * block)146 bblock_start(struct elk_bblock_t *block)
147 {
148 return (struct elk_backend_instruction *)exec_list_get_head(&block->instructions);
149 }
150
151 static inline const struct elk_backend_instruction *
bblock_start_const(const struct elk_bblock_t * block)152 bblock_start_const(const struct elk_bblock_t *block)
153 {
154 return (const struct elk_backend_instruction *)exec_list_get_head_const(&block->instructions);
155 }
156
157 static inline struct elk_backend_instruction *
bblock_end(struct elk_bblock_t * block)158 bblock_end(struct elk_bblock_t *block)
159 {
160 return (struct elk_backend_instruction *)exec_list_get_tail(&block->instructions);
161 }
162
163 static inline const struct elk_backend_instruction *
bblock_end_const(const struct elk_bblock_t * block)164 bblock_end_const(const struct elk_bblock_t *block)
165 {
166 return (const struct elk_backend_instruction *)exec_list_get_tail_const(&block->instructions);
167 }
168
169 static inline struct elk_bblock_t *
bblock_next(struct elk_bblock_t * block)170 bblock_next(struct elk_bblock_t *block)
171 {
172 if (exec_node_is_tail_sentinel(block->link.next))
173 return NULL;
174
175 return (struct elk_bblock_t *)block->link.next;
176 }
177
178 static inline const struct elk_bblock_t *
bblock_next_const(const struct elk_bblock_t * block)179 bblock_next_const(const struct elk_bblock_t *block)
180 {
181 if (exec_node_is_tail_sentinel(block->link.next))
182 return NULL;
183
184 return (const struct elk_bblock_t *)block->link.next;
185 }
186
187 static inline struct elk_bblock_t *
bblock_prev(struct elk_bblock_t * block)188 bblock_prev(struct elk_bblock_t *block)
189 {
190 if (exec_node_is_head_sentinel(block->link.prev))
191 return NULL;
192
193 return (struct elk_bblock_t *)block->link.prev;
194 }
195
196 static inline const struct elk_bblock_t *
bblock_prev_const(const struct elk_bblock_t * block)197 bblock_prev_const(const struct elk_bblock_t *block)
198 {
199 if (exec_node_is_head_sentinel(block->link.prev))
200 return NULL;
201
202 return (const struct elk_bblock_t *)block->link.prev;
203 }
204
205 static inline bool
bblock_starts_with_control_flow(const struct elk_bblock_t * block)206 bblock_starts_with_control_flow(const struct elk_bblock_t *block)
207 {
208 enum elk_opcode op = bblock_start_const(block)->opcode;
209 return op == ELK_OPCODE_DO || op == ELK_OPCODE_ENDIF;
210 }
211
212 static inline bool
bblock_ends_with_control_flow(const struct elk_bblock_t * block)213 bblock_ends_with_control_flow(const struct elk_bblock_t *block)
214 {
215 enum elk_opcode op = bblock_end_const(block)->opcode;
216 return op == ELK_OPCODE_IF ||
217 op == ELK_OPCODE_ELSE ||
218 op == ELK_OPCODE_WHILE ||
219 op == ELK_OPCODE_BREAK ||
220 op == ELK_OPCODE_CONTINUE;
221 }
222
223 static inline struct elk_backend_instruction *
bblock_first_non_control_flow_inst(struct elk_bblock_t * block)224 bblock_first_non_control_flow_inst(struct elk_bblock_t *block)
225 {
226 struct elk_backend_instruction *inst = bblock_start(block);
227 if (bblock_starts_with_control_flow(block))
228 #ifdef __cplusplus
229 inst = (struct elk_backend_instruction *)inst->next;
230 #else
231 inst = (struct elk_backend_instruction *)inst->link.next;
232 #endif
233 return inst;
234 }
235
236 static inline struct elk_backend_instruction *
bblock_last_non_control_flow_inst(struct elk_bblock_t * block)237 bblock_last_non_control_flow_inst(struct elk_bblock_t *block)
238 {
239 struct elk_backend_instruction *inst = bblock_end(block);
240 if (bblock_ends_with_control_flow(block))
241 #ifdef __cplusplus
242 inst = (struct elk_backend_instruction *)inst->prev;
243 #else
244 inst = (struct elk_backend_instruction *)inst->link.prev;
245 #endif
246 return inst;
247 }
248
249 #ifdef __cplusplus
250 inline elk_backend_instruction *
start()251 elk_bblock_t::start()
252 {
253 return bblock_start(this);
254 }
255
256 inline const elk_backend_instruction *
start()257 elk_bblock_t::start() const
258 {
259 return bblock_start_const(this);
260 }
261
262 inline elk_backend_instruction *
end()263 elk_bblock_t::end()
264 {
265 return bblock_end(this);
266 }
267
268 inline const elk_backend_instruction *
end()269 elk_bblock_t::end() const
270 {
271 return bblock_end_const(this);
272 }
273
274 inline elk_bblock_t *
next()275 elk_bblock_t::next()
276 {
277 return bblock_next(this);
278 }
279
280 inline const elk_bblock_t *
next()281 elk_bblock_t::next() const
282 {
283 return bblock_next_const(this);
284 }
285
286 inline elk_bblock_t *
prev()287 elk_bblock_t::prev()
288 {
289 return bblock_prev(this);
290 }
291
292 inline const elk_bblock_t *
prev()293 elk_bblock_t::prev() const
294 {
295 return bblock_prev_const(this);
296 }
297
298 inline bool
starts_with_control_flow()299 elk_bblock_t::starts_with_control_flow() const
300 {
301 return bblock_starts_with_control_flow(this);
302 }
303
304 inline bool
ends_with_control_flow()305 elk_bblock_t::ends_with_control_flow() const
306 {
307 return bblock_ends_with_control_flow(this);
308 }
309
310 inline elk_backend_instruction *
first_non_control_flow_inst()311 elk_bblock_t::first_non_control_flow_inst()
312 {
313 return bblock_first_non_control_flow_inst(this);
314 }
315
316 inline elk_backend_instruction *
last_non_control_flow_inst()317 elk_bblock_t::last_non_control_flow_inst()
318 {
319 return bblock_last_non_control_flow_inst(this);
320 }
321 #endif
322
323 struct elk_cfg_t {
324 #ifdef __cplusplus
325 DECLARE_RALLOC_CXX_OPERATORS(elk_cfg_t)
326
327 elk_cfg_t(const elk_backend_shader *s, exec_list *instructions);
328 elk_cfg_t(const elk_cfg_t &) = delete;
329 ~elk_cfg_t();
330
331 elk_cfg_t & operator=(const elk_cfg_t &) = delete;
332
333 void remove_block(elk_bblock_t *block);
334
335 elk_bblock_t *first_block();
336 const elk_bblock_t *first_block() const;
337 elk_bblock_t *last_block();
338 const elk_bblock_t *last_block() const;
339
340 elk_bblock_t *new_block();
341 void set_next_block(elk_bblock_t **cur, elk_bblock_t *block, int ip);
342 void make_block_array();
343
344 void dump(FILE *file = stderr);
345 void dump_cfg();
346
347 #ifdef NDEBUG
validateelk_cfg_t348 void validate(UNUSED const char *stage_abbrev) { }
349 #else
350 void validate(const char *stage_abbrev);
351 #endif
352
353 /**
354 * Propagate elk_bblock_t::end_ip_delta data through the CFG.
355 */
356 inline void adjust_block_ips();
357
358 #endif
359 const struct elk_backend_shader *s;
360 void *mem_ctx;
361
362 /** Ordered list (by ip) of basic blocks */
363 struct exec_list block_list;
364 struct elk_bblock_t **blocks;
365 int num_blocks;
366 };
367
368 static inline struct elk_bblock_t *
cfg_first_block(struct elk_cfg_t * cfg)369 cfg_first_block(struct elk_cfg_t *cfg)
370 {
371 return (struct elk_bblock_t *)exec_list_get_head(&cfg->block_list);
372 }
373
374 static inline const struct elk_bblock_t *
cfg_first_block_const(const struct elk_cfg_t * cfg)375 cfg_first_block_const(const struct elk_cfg_t *cfg)
376 {
377 return (const struct elk_bblock_t *)exec_list_get_head_const(&cfg->block_list);
378 }
379
380 static inline struct elk_bblock_t *
cfg_last_block(struct elk_cfg_t * cfg)381 cfg_last_block(struct elk_cfg_t *cfg)
382 {
383 return (struct elk_bblock_t *)exec_list_get_tail(&cfg->block_list);
384 }
385
386 static inline const struct elk_bblock_t *
cfg_last_block_const(const struct elk_cfg_t * cfg)387 cfg_last_block_const(const struct elk_cfg_t *cfg)
388 {
389 return (const struct elk_bblock_t *)exec_list_get_tail_const(&cfg->block_list);
390 }
391
392 #ifdef __cplusplus
393 inline elk_bblock_t *
first_block()394 elk_cfg_t::first_block()
395 {
396 return cfg_first_block(this);
397 }
398
399 const inline elk_bblock_t *
first_block()400 elk_cfg_t::first_block() const
401 {
402 return cfg_first_block_const(this);
403 }
404
405 inline elk_bblock_t *
last_block()406 elk_cfg_t::last_block()
407 {
408 return cfg_last_block(this);
409 }
410
411 const inline elk_bblock_t *
last_block()412 elk_cfg_t::last_block() const
413 {
414 return cfg_last_block_const(this);
415 }
416 #endif
417
418 /* Note that this is implemented with a double for loop -- break will
419 * break from the inner loop only!
420 */
421 #define foreach_block_and_inst(__block, __type, __inst, __cfg) \
422 foreach_block (__block, __cfg) \
423 foreach_inst_in_block (__type, __inst, __block)
424
425 /* Note that this is implemented with a double for loop -- break will
426 * break from the inner loop only!
427 */
428 #define foreach_block_and_inst_safe(__block, __type, __inst, __cfg) \
429 foreach_block_safe (__block, __cfg) \
430 foreach_inst_in_block_safe (__type, __inst, __block)
431
432 #define foreach_block(__block, __cfg) \
433 foreach_list_typed (elk_bblock_t, __block, link, &(__cfg)->block_list)
434
435 #define foreach_block_reverse(__block, __cfg) \
436 foreach_list_typed_reverse (elk_bblock_t, __block, link, &(__cfg)->block_list)
437
438 #define foreach_block_safe(__block, __cfg) \
439 foreach_list_typed_safe (elk_bblock_t, __block, link, &(__cfg)->block_list)
440
441 #define foreach_block_reverse_safe(__block, __cfg) \
442 foreach_list_typed_reverse_safe (elk_bblock_t, __block, link, &(__cfg)->block_list)
443
444 #define foreach_inst_in_block(__type, __inst, __block) \
445 foreach_in_list(__type, __inst, &(__block)->instructions)
446
447 #define foreach_inst_in_block_safe(__type, __inst, __block) \
448 for (__type *__inst = (__type *)__block->instructions.head_sentinel.next, \
449 *__next = (__type *)__inst->next; \
450 __next != NULL; \
451 __inst = __next, \
452 __next = (__type *)__next->next)
453
454 #define foreach_inst_in_block_reverse(__type, __inst, __block) \
455 foreach_in_list_reverse(__type, __inst, &(__block)->instructions)
456
457 #define foreach_inst_in_block_reverse_safe(__type, __inst, __block) \
458 foreach_in_list_reverse_safe(__type, __inst, &(__block)->instructions)
459
460 #define foreach_inst_in_block_starting_from(__type, __scan_inst, __inst) \
461 for (__type *__scan_inst = (__type *)__inst->next; \
462 !__scan_inst->is_tail_sentinel(); \
463 __scan_inst = (__type *)__scan_inst->next)
464
465 #define foreach_inst_in_block_reverse_starting_from(__type, __scan_inst, __inst) \
466 for (__type *__scan_inst = (__type *)__inst->prev; \
467 !__scan_inst->is_head_sentinel(); \
468 __scan_inst = (__type *)__scan_inst->prev)
469
470 #ifdef __cplusplus
471 inline void
adjust_block_ips()472 elk_cfg_t::adjust_block_ips()
473 {
474 int delta = 0;
475
476 foreach_block(block, this) {
477 block->start_ip += delta;
478 block->end_ip += delta;
479
480 delta += block->end_ip_delta;
481
482 block->end_ip_delta = 0;
483 }
484 }
485
486 namespace elk {
487 /**
488 * Immediate dominator tree analysis of a shader.
489 */
490 struct idom_tree {
491 idom_tree(const elk_backend_shader *s);
492 idom_tree(const idom_tree &) = delete;
493 ~idom_tree();
494 idom_tree & operator=(const idom_tree &) = delete;
495
496 bool
validateidom_tree497 validate(const elk_backend_shader *) const
498 {
499 /* FINISHME */
500 return true;
501 }
502
503 analysis_dependency_class
dependency_classidom_tree504 dependency_class() const
505 {
506 return DEPENDENCY_BLOCKS;
507 }
508
509 const elk_bblock_t *
parentidom_tree510 parent(const elk_bblock_t *b) const
511 {
512 assert(unsigned(b->num) < num_parents);
513 return parents[b->num];
514 }
515
516 elk_bblock_t *
parentidom_tree517 parent(elk_bblock_t *b) const
518 {
519 assert(unsigned(b->num) < num_parents);
520 return parents[b->num];
521 }
522
523 elk_bblock_t *
524 intersect(elk_bblock_t *b1, elk_bblock_t *b2) const;
525
526 void
527 dump() const;
528
529 private:
530 unsigned num_parents;
531 elk_bblock_t **parents;
532 };
533 }
534 #endif
535