1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 #include "brw_cfg.h"
29 #include "brw_shader.h"
30
31 /** @file brw_cfg.cpp
32 *
33 * Walks the shader instructions generated and creates a set of basic
34 * blocks with successor/predecessor edges connecting them.
35 */
36
37 using namespace brw;
38
39 static bblock_t *
pop_stack(exec_list * list)40 pop_stack(exec_list *list)
41 {
42 bblock_link *link = (bblock_link *)list->get_tail();
43 bblock_t *block = link->block;
44 link->link.remove();
45
46 return block;
47 }
48
49 static exec_node *
link(void * mem_ctx,bblock_t * block,enum bblock_link_kind kind)50 link(void *mem_ctx, bblock_t *block, enum bblock_link_kind kind)
51 {
52 bblock_link *l = new(mem_ctx) bblock_link(block, kind);
53 return &l->link;
54 }
55
56 void
push_stack(exec_list * list,void * mem_ctx,bblock_t * block)57 push_stack(exec_list *list, void *mem_ctx, bblock_t *block)
58 {
59 /* The kind of the link is immaterial, but we need to provide one since
60 * this is (ab)using the edge data structure in order to implement a stack.
61 */
62 list->push_tail(link(mem_ctx, block, bblock_link_logical));
63 }
64
bblock_t(cfg_t * cfg)65 bblock_t::bblock_t(cfg_t *cfg) :
66 cfg(cfg), start_ip(0), end_ip(0), end_ip_delta(0), num(0)
67 {
68 instructions.make_empty();
69 parents.make_empty();
70 children.make_empty();
71 }
72
73 void
add_successor(void * mem_ctx,bblock_t * successor,enum bblock_link_kind kind)74 bblock_t::add_successor(void *mem_ctx, bblock_t *successor,
75 enum bblock_link_kind kind)
76 {
77 successor->parents.push_tail(::link(mem_ctx, this, kind));
78 children.push_tail(::link(mem_ctx, successor, kind));
79 }
80
81 bool
is_predecessor_of(const bblock_t * block,enum bblock_link_kind kind) const82 bblock_t::is_predecessor_of(const bblock_t *block,
83 enum bblock_link_kind kind) const
84 {
85 foreach_list_typed_safe (bblock_link, parent, link, &block->parents) {
86 if (parent->block == this && parent->kind <= kind) {
87 return true;
88 }
89 }
90
91 return false;
92 }
93
94 bool
is_successor_of(const bblock_t * block,enum bblock_link_kind kind) const95 bblock_t::is_successor_of(const bblock_t *block,
96 enum bblock_link_kind kind) const
97 {
98 foreach_list_typed_safe (bblock_link, child, link, &block->children) {
99 if (child->block == this && child->kind <= kind) {
100 return true;
101 }
102 }
103
104 return false;
105 }
106
107 static bool
ends_block(const backend_instruction * inst)108 ends_block(const backend_instruction *inst)
109 {
110 enum opcode op = inst->opcode;
111
112 return op == BRW_OPCODE_IF ||
113 op == BRW_OPCODE_ELSE ||
114 op == BRW_OPCODE_CONTINUE ||
115 op == BRW_OPCODE_BREAK ||
116 op == BRW_OPCODE_DO ||
117 op == BRW_OPCODE_WHILE;
118 }
119
120 static bool
starts_block(const backend_instruction * inst)121 starts_block(const backend_instruction *inst)
122 {
123 enum opcode op = inst->opcode;
124
125 return op == BRW_OPCODE_DO ||
126 op == BRW_OPCODE_ENDIF;
127 }
128
129 bool
can_combine_with(const bblock_t * that) const130 bblock_t::can_combine_with(const bblock_t *that) const
131 {
132 if ((const bblock_t *)this->link.next != that)
133 return false;
134
135 if (ends_block(this->end()) ||
136 starts_block(that->start()))
137 return false;
138
139 return true;
140 }
141
142 void
combine_with(bblock_t * that)143 bblock_t::combine_with(bblock_t *that)
144 {
145 assert(this->can_combine_with(that));
146 foreach_list_typed (bblock_link, link, link, &that->parents) {
147 assert(link->block == this);
148 }
149
150 this->end_ip = that->end_ip;
151 this->instructions.append_list(&that->instructions);
152
153 this->cfg->remove_block(that);
154 }
155
156 void
dump() const157 bblock_t::dump() const
158 {
159 const backend_shader *s = this->cfg->s;
160
161 int ip = this->start_ip;
162 foreach_inst_in_block(backend_instruction, inst, this) {
163 fprintf(stderr, "%5d: ", ip);
164 s->dump_instruction(inst);
165 ip++;
166 }
167 }
168
cfg_t(const backend_shader * s,exec_list * instructions)169 cfg_t::cfg_t(const backend_shader *s, exec_list *instructions) :
170 s(s)
171 {
172 mem_ctx = ralloc_context(NULL);
173 block_list.make_empty();
174 blocks = NULL;
175 num_blocks = 0;
176
177 bblock_t *cur = NULL;
178 int ip = 0;
179
180 bblock_t *entry = new_block();
181 bblock_t *cur_if = NULL; /**< BB ending with IF. */
182 bblock_t *cur_else = NULL; /**< BB ending with ELSE. */
183 bblock_t *cur_endif = NULL; /**< BB starting with ENDIF. */
184 bblock_t *cur_do = NULL; /**< BB starting with DO. */
185 bblock_t *cur_while = NULL; /**< BB immediately following WHILE. */
186 exec_list if_stack, else_stack, do_stack, while_stack;
187 bblock_t *next;
188
189 set_next_block(&cur, entry, ip);
190
191 foreach_in_list_safe(backend_instruction, inst, instructions) {
192 /* set_next_block wants the post-incremented ip */
193 ip++;
194
195 inst->exec_node::remove();
196
197 switch (inst->opcode) {
198 case BRW_OPCODE_IF:
199 cur->instructions.push_tail(inst);
200
201 /* Push our information onto a stack so we can recover from
202 * nested ifs.
203 */
204 push_stack(&if_stack, mem_ctx, cur_if);
205 push_stack(&else_stack, mem_ctx, cur_else);
206
207 cur_if = cur;
208 cur_else = NULL;
209 cur_endif = NULL;
210
211 /* Set up our immediately following block, full of "then"
212 * instructions.
213 */
214 next = new_block();
215 cur_if->add_successor(mem_ctx, next, bblock_link_logical);
216
217 set_next_block(&cur, next, ip);
218 break;
219
220 case BRW_OPCODE_ELSE:
221 cur->instructions.push_tail(inst);
222
223 cur_else = cur;
224
225 next = new_block();
226 assert(cur_if != NULL);
227 cur_if->add_successor(mem_ctx, next, bblock_link_logical);
228 cur_else->add_successor(mem_ctx, next, bblock_link_physical);
229
230 set_next_block(&cur, next, ip);
231 break;
232
233 case BRW_OPCODE_ENDIF: {
234 if (cur->instructions.is_empty()) {
235 /* New block was just created; use it. */
236 cur_endif = cur;
237 } else {
238 cur_endif = new_block();
239
240 cur->add_successor(mem_ctx, cur_endif, bblock_link_logical);
241
242 set_next_block(&cur, cur_endif, ip - 1);
243 }
244
245 cur->instructions.push_tail(inst);
246
247 if (cur_else) {
248 cur_else->add_successor(mem_ctx, cur_endif, bblock_link_logical);
249 } else {
250 assert(cur_if != NULL);
251 cur_if->add_successor(mem_ctx, cur_endif, bblock_link_logical);
252 }
253
254 assert(cur_if->end()->opcode == BRW_OPCODE_IF);
255 assert(!cur_else || cur_else->end()->opcode == BRW_OPCODE_ELSE);
256
257 /* Pop the stack so we're in the previous if/else/endif */
258 cur_if = pop_stack(&if_stack);
259 cur_else = pop_stack(&else_stack);
260 break;
261 }
262 case BRW_OPCODE_DO:
263 /* Push our information onto a stack so we can recover from
264 * nested loops.
265 */
266 push_stack(&do_stack, mem_ctx, cur_do);
267 push_stack(&while_stack, mem_ctx, cur_while);
268
269 /* Set up the block just after the while. Don't know when exactly
270 * it will start, yet.
271 */
272 cur_while = new_block();
273
274 if (cur->instructions.is_empty()) {
275 /* New block was just created; use it. */
276 cur_do = cur;
277 } else {
278 cur_do = new_block();
279
280 cur->add_successor(mem_ctx, cur_do, bblock_link_logical);
281
282 set_next_block(&cur, cur_do, ip - 1);
283 }
284
285 cur->instructions.push_tail(inst);
286
287 /* Represent divergent execution of the loop as a pair of alternative
288 * edges coming out of the DO instruction: For any physical iteration
289 * of the loop a given logical thread can either start off enabled
290 * (which is represented as the "next" successor), or disabled (if it
291 * has reached a non-uniform exit of the loop during a previous
292 * iteration, which is represented as the "cur_while" successor).
293 *
294 * The disabled edge will be taken by the logical thread anytime we
295 * arrive at the DO instruction through a back-edge coming from a
296 * conditional exit of the loop where divergent control flow started.
297 *
298 * This guarantees that there is a control-flow path from any
299 * divergence point of the loop into the convergence point
300 * (immediately past the WHILE instruction) such that it overlaps the
301 * whole IP region of divergent control flow (potentially the whole
302 * loop) *and* doesn't imply the execution of any instructions part
303 * of the loop (since the corresponding execution mask bit will be
304 * disabled for a diverging thread).
305 *
306 * This way we make sure that any variables that are live throughout
307 * the region of divergence for an inactive logical thread are also
308 * considered to interfere with any other variables assigned by
309 * active logical threads within the same physical region of the
310 * program, since otherwise we would risk cross-channel data
311 * corruption.
312 */
313 next = new_block();
314 cur->add_successor(mem_ctx, next, bblock_link_logical);
315 cur->add_successor(mem_ctx, cur_while, bblock_link_physical);
316 set_next_block(&cur, next, ip);
317 break;
318
319 case BRW_OPCODE_CONTINUE:
320 cur->instructions.push_tail(inst);
321
322 /* A conditional CONTINUE may start a region of divergent control
323 * flow until the start of the next loop iteration (*not* until the
324 * end of the loop which is why the successor is not the top-level
325 * divergence point at cur_do). The live interval of any variable
326 * extending through a CONTINUE edge is guaranteed to overlap the
327 * whole region of divergent execution, because any variable live-out
328 * at the CONTINUE instruction will also be live-in at the top of the
329 * loop, and therefore also live-out at the bottom-most point of the
330 * loop which is reachable from the top (since a control flow path
331 * exists from a definition of the variable through this CONTINUE
332 * instruction, the top of the loop, the (reachable) bottom of the
333 * loop, the top of the loop again, into a use of the variable).
334 */
335 assert(cur_do != NULL);
336 cur->add_successor(mem_ctx, cur_do->next(), bblock_link_logical);
337
338 next = new_block();
339 if (inst->predicate)
340 cur->add_successor(mem_ctx, next, bblock_link_logical);
341 else
342 cur->add_successor(mem_ctx, next, bblock_link_physical);
343
344 set_next_block(&cur, next, ip);
345 break;
346
347 case BRW_OPCODE_BREAK:
348 cur->instructions.push_tail(inst);
349
350 /* A conditional BREAK instruction may start a region of divergent
351 * control flow until the end of the loop if the condition is
352 * non-uniform, in which case the loop will execute additional
353 * iterations with the present channel disabled. We model this as a
354 * control flow path from the divergence point to the convergence
355 * point that overlaps the whole IP range of the loop and skips over
356 * the execution of any other instructions part of the loop.
357 *
358 * See the DO case for additional explanation.
359 */
360 assert(cur_do != NULL);
361 cur->add_successor(mem_ctx, cur_do, bblock_link_physical);
362 cur->add_successor(mem_ctx, cur_while, bblock_link_logical);
363
364 next = new_block();
365 if (inst->predicate)
366 cur->add_successor(mem_ctx, next, bblock_link_logical);
367 else
368 cur->add_successor(mem_ctx, next, bblock_link_physical);
369
370 set_next_block(&cur, next, ip);
371 break;
372
373 case BRW_OPCODE_WHILE:
374 cur->instructions.push_tail(inst);
375
376 assert(cur_do != NULL && cur_while != NULL);
377
378 /* A conditional WHILE instruction may start a region of divergent
379 * control flow until the end of the loop, just like the BREAK
380 * instruction. See the BREAK case for more details. OTOH an
381 * unconditional WHILE instruction is non-divergent (just like an
382 * unconditional CONTINUE), and will necessarily lead to the
383 * execution of an additional iteration of the loop for all enabled
384 * channels, so we may skip over the divergence point at the top of
385 * the loop to keep the CFG as unambiguous as possible.
386 */
387 if (inst->predicate) {
388 cur->add_successor(mem_ctx, cur_do, bblock_link_logical);
389 } else {
390 cur->add_successor(mem_ctx, cur_do->next(), bblock_link_logical);
391 }
392
393 set_next_block(&cur, cur_while, ip);
394
395 /* Pop the stack so we're in the previous loop */
396 cur_do = pop_stack(&do_stack);
397 cur_while = pop_stack(&while_stack);
398 break;
399
400 default:
401 cur->instructions.push_tail(inst);
402 break;
403 }
404 }
405
406 cur->end_ip = ip - 1;
407
408 make_block_array();
409 }
410
~cfg_t()411 cfg_t::~cfg_t()
412 {
413 ralloc_free(mem_ctx);
414 }
415
416 void
remove_block(bblock_t * block)417 cfg_t::remove_block(bblock_t *block)
418 {
419 foreach_list_typed_safe (bblock_link, predecessor, link, &block->parents) {
420 /* Remove block from all of its predecessors' successor lists. */
421 foreach_list_typed_safe (bblock_link, successor, link,
422 &predecessor->block->children) {
423 if (block == successor->block) {
424 successor->link.remove();
425 ralloc_free(successor);
426 }
427 }
428
429 /* Add removed-block's successors to its predecessors' successor lists. */
430 foreach_list_typed (bblock_link, successor, link, &block->children) {
431 if (!successor->block->is_successor_of(predecessor->block,
432 successor->kind)) {
433 predecessor->block->children.push_tail(link(mem_ctx,
434 successor->block,
435 successor->kind));
436 }
437 }
438 }
439
440 foreach_list_typed_safe (bblock_link, successor, link, &block->children) {
441 /* Remove block from all of its childrens' parents lists. */
442 foreach_list_typed_safe (bblock_link, predecessor, link,
443 &successor->block->parents) {
444 if (block == predecessor->block) {
445 predecessor->link.remove();
446 ralloc_free(predecessor);
447 }
448 }
449
450 /* Add removed-block's predecessors to its successors' predecessor lists. */
451 foreach_list_typed (bblock_link, predecessor, link, &block->parents) {
452 if (!predecessor->block->is_predecessor_of(successor->block,
453 predecessor->kind)) {
454 successor->block->parents.push_tail(link(mem_ctx,
455 predecessor->block,
456 predecessor->kind));
457 }
458 }
459 }
460
461 block->link.remove();
462
463 for (int b = block->num; b < this->num_blocks - 1; b++) {
464 this->blocks[b] = this->blocks[b + 1];
465 this->blocks[b]->num = b;
466 }
467
468 this->blocks[this->num_blocks - 1]->num = this->num_blocks - 2;
469 this->num_blocks--;
470 }
471
472 bblock_t *
new_block()473 cfg_t::new_block()
474 {
475 bblock_t *block = new(mem_ctx) bblock_t(this);
476
477 return block;
478 }
479
480 void
set_next_block(bblock_t ** cur,bblock_t * block,int ip)481 cfg_t::set_next_block(bblock_t **cur, bblock_t *block, int ip)
482 {
483 if (*cur) {
484 (*cur)->end_ip = ip - 1;
485 }
486
487 block->start_ip = ip;
488 block->num = num_blocks++;
489 block_list.push_tail(&block->link);
490 *cur = block;
491 }
492
493 void
make_block_array()494 cfg_t::make_block_array()
495 {
496 blocks = ralloc_array(mem_ctx, bblock_t *, num_blocks);
497
498 int i = 0;
499 foreach_block (block, this) {
500 blocks[i++] = block;
501 }
502 assert(i == num_blocks);
503 }
504
505 void
dump()506 cfg_t::dump()
507 {
508 const idom_tree *idom = (s ? &s->idom_analysis.require() : NULL);
509
510 foreach_block (block, this) {
511 if (idom && idom->parent(block))
512 fprintf(stderr, "START B%d IDOM(B%d)", block->num,
513 idom->parent(block)->num);
514 else
515 fprintf(stderr, "START B%d IDOM(none)", block->num);
516
517 foreach_list_typed(bblock_link, link, link, &block->parents) {
518 fprintf(stderr, " <%cB%d",
519 link->kind == bblock_link_logical ? '-' : '~',
520 link->block->num);
521 }
522 fprintf(stderr, "\n");
523 if (s != NULL)
524 block->dump();
525 fprintf(stderr, "END B%d", block->num);
526 foreach_list_typed(bblock_link, link, link, &block->children) {
527 fprintf(stderr, " %c>B%d",
528 link->kind == bblock_link_logical ? '-' : '~',
529 link->block->num);
530 }
531 fprintf(stderr, "\n");
532 }
533 }
534
535 /* Calculates the immediate dominator of each block, according to "A Simple,
536 * Fast Dominance Algorithm" by Keith D. Cooper, Timothy J. Harvey, and Ken
537 * Kennedy.
538 *
539 * The authors claim that for control flow graphs of sizes normally encountered
540 * (less than 1000 nodes) that this algorithm is significantly faster than
541 * others like Lengauer-Tarjan.
542 */
idom_tree(const backend_shader * s)543 idom_tree::idom_tree(const backend_shader *s) :
544 num_parents(s->cfg->num_blocks),
545 parents(new bblock_t *[num_parents]())
546 {
547 bool changed;
548
549 parents[0] = s->cfg->blocks[0];
550
551 do {
552 changed = false;
553
554 foreach_block(block, s->cfg) {
555 if (block->num == 0)
556 continue;
557
558 bblock_t *new_idom = NULL;
559 foreach_list_typed(bblock_link, parent_link, link, &block->parents) {
560 if (parent(parent_link->block)) {
561 new_idom = (new_idom ? intersect(new_idom, parent_link->block) :
562 parent_link->block);
563 }
564 }
565
566 if (parent(block) != new_idom) {
567 parents[block->num] = new_idom;
568 changed = true;
569 }
570 }
571 } while (changed);
572 }
573
~idom_tree()574 idom_tree::~idom_tree()
575 {
576 delete[] parents;
577 }
578
579 bblock_t *
intersect(bblock_t * b1,bblock_t * b2) const580 idom_tree::intersect(bblock_t *b1, bblock_t *b2) const
581 {
582 /* Note, the comparisons here are the opposite of what the paper says
583 * because we index blocks from beginning -> end (i.e. reverse post-order)
584 * instead of post-order like they assume.
585 */
586 while (b1->num != b2->num) {
587 while (b1->num > b2->num)
588 b1 = parent(b1);
589 while (b2->num > b1->num)
590 b2 = parent(b2);
591 }
592 assert(b1);
593 return b1;
594 }
595
596 void
dump() const597 idom_tree::dump() const
598 {
599 printf("digraph DominanceTree {\n");
600 for (unsigned i = 0; i < num_parents; i++)
601 printf("\t%d -> %d\n", parents[i]->num, i);
602 printf("}\n");
603 }
604
605 void
dump_cfg()606 cfg_t::dump_cfg()
607 {
608 printf("digraph CFG {\n");
609 for (int b = 0; b < num_blocks; b++) {
610 bblock_t *block = this->blocks[b];
611
612 foreach_list_typed_safe (bblock_link, child, link, &block->children) {
613 printf("\t%d -> %d\n", b, child->block->num);
614 }
615 }
616 printf("}\n");
617 }
618