• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2004 Linus Torvalds
3  */
4 
5 ///
6 // Flow simplification
7 // -------------------
8 
9 #include <string.h>
10 #include <stdarg.h>
11 #include <stdlib.h>
12 #include <stdio.h>
13 #include <stddef.h>
14 #include <assert.h>
15 
16 #include "parse.h"
17 #include "expression.h"
18 #include "linearize.h"
19 #include "simplify.h"
20 #include "flow.h"
21 #include "target.h"
22 
23 unsigned long bb_generation;
24 
25 ///
26 // remove phi-sources from a removed edge
27 //
28 // :note: It's possible to have several edges between the same BBs.
29 //	  It's common with switches but it's also possible with branches.
30 //	  This function will only remove a single phi-source per edge.
remove_phisources(struct basic_block * par,struct basic_block * old)31 int remove_phisources(struct basic_block *par, struct basic_block *old)
32 {
33 	struct instruction *insn;
34 	int changed = 0;
35 
36 	FOR_EACH_PTR(old->insns, insn) {
37 		pseudo_t phi;
38 
39 		if (!insn->bb)
40 			continue;
41 		if (insn->opcode != OP_PHI)
42 			return changed;
43 
44 		// found a phi-node in the target BB,
45 		// now look after its phi-sources.
46 		FOR_EACH_PTR(insn->phi_list, phi) {
47 			struct instruction *phisrc = phi->def;
48 
49 			if (phi == VOID)
50 				continue;
51 			assert(phisrc->phi_node == insn);
52 			if (phisrc->bb != par)
53 				continue;
54 			// found a phi-source corresponding to this edge:
55 			// remove it but avoid the recursive killing.
56 			REPLACE_CURRENT_PTR(phi, VOID);
57 			remove_use(&phisrc->src);
58 			phisrc->bb = NULL;
59 			changed |= REPEAT_CSE;
60 			// Only the first one must be removed.
61 			goto next;
62 		} END_FOR_EACH_PTR(phi);
63 next: ;
64 	} END_FOR_EACH_PTR(insn);
65 	return changed;
66 }
67 
68 ///
69 // remove all phisources but the one corresponding to the given target
remove_other_phisources(struct basic_block * bb,struct multijmp_list * list,struct basic_block * target)70 static int remove_other_phisources(struct basic_block *bb, struct multijmp_list *list, struct basic_block *target)
71 {
72 	struct multijmp *jmp;
73 	int changed = 0;
74 
75 	FOR_EACH_PTR(list, jmp) {
76 		if (jmp->target == target) {
77 			target = NULL;
78 			continue;
79 		}
80 		changed |= remove_phisources(bb, jmp->target);
81 	} END_FOR_EACH_PTR(jmp);
82 	return changed;
83 }
84 
85 /*
86  * Dammit, if we have a phi-node followed by a conditional
87  * branch on that phi-node, we should damn well be able to
88  * do something about the source. Maybe.
89  */
rewrite_branch(struct basic_block * bb,struct basic_block ** ptr,struct basic_block * old,struct basic_block * new)90 static int rewrite_branch(struct basic_block *bb,
91 	struct basic_block **ptr,
92 	struct basic_block *old,
93 	struct basic_block *new)
94 {
95 	if (*ptr != old || new == old || !bb->ep)
96 		return 0;
97 
98 	/* We might find new if-conversions or non-dominating CSEs */
99 	/* we may also create new dead cycles */
100 	repeat_phase |= REPEAT_CSE | REPEAT_CFG_CLEANUP;
101 	*ptr = new;
102 	replace_bb_in_list(&bb->children, old, new, 1);
103 	remove_bb_from_list(&old->parents, bb, 1);
104 	add_bb(&new->parents, bb);
105 	return 1;
106 }
107 
108 /*
109  * Return the known truth value of a pseudo, or -1 if
110  * it's not known.
111  */
pseudo_truth_value(pseudo_t pseudo)112 static int pseudo_truth_value(pseudo_t pseudo)
113 {
114 	switch (pseudo->type) {
115 	case PSEUDO_VAL:
116 		return !!pseudo->value;
117 
118 	case PSEUDO_REG: {
119 		struct instruction *insn = pseudo->def;
120 
121 		/* A symbol address is always considered true.. */
122 		if (insn->opcode == OP_SYMADDR && insn->target == pseudo)
123 			return 1;
124 	}
125 		/* Fall through */
126 	default:
127 		return -1;
128 	}
129 }
130 
131 /*
132  * Does a basic block depend on the pseudos that "src" defines?
133  */
bb_depends_on(struct basic_block * target,struct basic_block * src)134 static int bb_depends_on(struct basic_block *target, struct basic_block *src)
135 {
136 	pseudo_t pseudo;
137 
138 	FOR_EACH_PTR(src->defines, pseudo) {
139 		if (pseudo_in_list(target->needs, pseudo))
140 			return 1;
141 	} END_FOR_EACH_PTR(pseudo);
142 	return 0;
143 }
144 
145 /*
146  * This really should be handled by bb_depends_on()
147  * which efficiently check the dependence using the
148  * defines - needs liveness info. Problem is that
149  * there is no liveness done on OP_PHI & OP_PHISRC.
150  *
151  * This function add the missing dependency checks.
152  */
bb_depends_on_phi(struct basic_block * target,struct basic_block * src)153 static int bb_depends_on_phi(struct basic_block *target, struct basic_block *src)
154 {
155 	struct instruction *insn;
156 	FOR_EACH_PTR(src->insns, insn) {
157 		if (!insn->bb)
158 			continue;
159 		if (insn->opcode != OP_PHI)
160 			continue;
161 		if (pseudo_in_list(target->needs, insn->target))
162 			return 1;
163 	} END_FOR_EACH_PTR(insn);
164 	return 0;
165 }
166 
167 ///
168 // does the BB contains ignorable instructions but a final branch?
169 // :note: something could be done for phi-sources but ... we'll see.
bb_is_forwarder(struct basic_block * bb)170 static bool bb_is_forwarder(struct basic_block *bb)
171 {
172 	struct instruction *insn;
173 
174 	FOR_EACH_PTR(bb->insns, insn) {
175 		if (!insn->bb)
176 			continue;
177 		switch (insn->opcode) {
178 		case OP_NOP:
179 		case OP_INLINED_CALL:
180 			continue;
181 		case OP_CBR:
182 		case OP_BR:
183 			return true;
184 		default:
185 			goto out;
186 		}
187 	} END_FOR_EACH_PTR(insn);
188 out:
189 	return false;
190 }
191 
192 ///
193 // check if the sources of a phi-node match with the parent BBs
phi_check(struct instruction * node)194 static bool phi_check(struct instruction *node)
195 {
196 	struct basic_block *bb;
197 	pseudo_t phi;
198 
199 	PREPARE_PTR_LIST(node->bb->parents, bb);
200 	FOR_EACH_PTR(node->phi_list, phi) {
201 		if (phi == VOID || !phi->def)
202 			continue;
203 		if (phi->def->bb != bb)
204 			return false;
205 		NEXT_PTR_LIST(bb);
206 	} END_FOR_EACH_PTR(phi);
207 	if (bb)
208 		return false;
209 	FINISH_PTR_LIST(bb);
210 	return true;
211 }
212 
213 /*
214  * When we reach here, we have:
215  *  - a basic block that ends in a conditional branch and
216  *    that has no side effects apart from the pseudos it
217  *    may change.
218  *  - the phi-node that the conditional branch depends on
219  *  - full pseudo liveness information
220  *
221  * We need to check if any of the _sources_ of the phi-node
222  * may be constant, and not actually need this block at all.
223  */
try_to_simplify_bb(struct basic_block * bb,struct instruction * first,struct instruction * second)224 static int try_to_simplify_bb(struct basic_block *bb, struct instruction *first, struct instruction *second)
225 {
226 	int changed = 0;
227 	pseudo_t phi;
228 	int bogus;
229 
230 	/*
231 	 * This a due to improper dominance tracking during
232 	 * simplify_symbol_usage()/conversion to SSA form.
233 	 * No sane simplification can be done when we have this.
234 	 */
235 	bogus = !phi_check(first);
236 
237 	FOR_EACH_PTR(first->phi_list, phi) {
238 		struct instruction *def = phi->def;
239 		struct basic_block *source, *target;
240 		pseudo_t pseudo;
241 		struct instruction *br;
242 		int cond;
243 
244 		if (!def)
245 			continue;
246 		source = def->bb;
247 		pseudo = def->src1;
248 		if (!pseudo || !source)
249 			continue;
250 		br = last_instruction(source->insns);
251 		if (!br)
252 			continue;
253 		if (br->opcode != OP_CBR && br->opcode != OP_BR)
254 			continue;
255 		cond = pseudo_truth_value(pseudo);
256 		if (cond < 0)
257 			continue;
258 		target = cond ? second->bb_true : second->bb_false;
259 		if (bb_depends_on(target, bb))
260 			continue;
261 		if (bb_depends_on_phi(target, bb))
262 			continue;
263 		changed |= rewrite_branch(source, &br->bb_true, bb, target);
264 		changed |= rewrite_branch(source, &br->bb_false, bb, target);
265 		if (changed && !bogus)
266 			kill_use(THIS_ADDRESS(phi));
267 	} END_FOR_EACH_PTR(phi);
268 	return changed;
269 }
270 
bb_has_side_effects(struct basic_block * bb)271 static int bb_has_side_effects(struct basic_block *bb)
272 {
273 	struct instruction *insn;
274 	FOR_EACH_PTR(bb->insns, insn) {
275 		if (!insn->bb)
276 			continue;
277 		switch (insn->opcode) {
278 		case OP_CALL:
279 			/* FIXME! This should take "const" etc into account */
280 			return 1;
281 
282 		case OP_LOAD:
283 			if (!insn->type)
284 				return 1;
285 			if (insn->is_volatile)
286 				return 1;
287 			continue;
288 
289 		case OP_STORE:
290 		case OP_CONTEXT:
291 			return 1;
292 
293 		case OP_ASM:
294 			/* FIXME! This should take "volatile" etc into account */
295 			return 1;
296 
297 		default:
298 			continue;
299 		}
300 	} END_FOR_EACH_PTR(insn);
301 	return 0;
302 }
303 
simplify_phi_branch(struct basic_block * bb,struct instruction * br)304 static int simplify_phi_branch(struct basic_block *bb, struct instruction *br)
305 {
306 	pseudo_t cond = br->cond;
307 	struct instruction *def;
308 
309 	if (cond->type != PSEUDO_REG)
310 		return 0;
311 	def = cond->def;
312 	if (def->bb != bb || def->opcode != OP_PHI)
313 		return 0;
314 	if (bb_has_side_effects(bb))
315 		return 0;
316 	return try_to_simplify_bb(bb, def, br);
317 }
318 
simplify_branch_branch(struct basic_block * bb,struct instruction * br,struct basic_block ** target_p,int bb_true)319 static int simplify_branch_branch(struct basic_block *bb, struct instruction *br,
320 	struct basic_block **target_p, int bb_true)
321 {
322 	struct basic_block *target = *target_p, *final;
323 	struct instruction *insn;
324 	int retval;
325 
326 	if (target == bb)
327 		return 0;
328 	insn = last_instruction(target->insns);
329 	if (!insn || insn->opcode != OP_CBR || insn->cond != br->cond)
330 		return 0;
331 	/*
332 	 * Ahhah! We've found a branch to a branch on the same conditional!
333 	 * Now we just need to see if we can rewrite the branch..
334 	 */
335 	retval = 0;
336 	final = bb_true ? insn->bb_true : insn->bb_false;
337 	if (bb_has_side_effects(target))
338 		goto try_to_rewrite_target;
339 	if (bb_depends_on(final, target))
340 		goto try_to_rewrite_target;
341 	if (bb_depends_on_phi(final, target))
342 		return 0;
343 	return rewrite_branch(bb, target_p, target, final);
344 
345 try_to_rewrite_target:
346 	/*
347 	 * If we're the only parent, at least we can rewrite the
348 	 * now-known second branch.
349 	 */
350 	if (bb_list_size(target->parents) != 1)
351 		return retval;
352 	convert_to_jump(insn, final);
353 	return 1;
354 }
355 
simplify_one_branch(struct basic_block * bb,struct instruction * br)356 static int simplify_one_branch(struct basic_block *bb, struct instruction *br)
357 {
358 	if (simplify_phi_branch(bb, br))
359 		return 1;
360 	return simplify_branch_branch(bb, br, &br->bb_true, 1) |
361 	       simplify_branch_branch(bb, br, &br->bb_false, 0);
362 }
363 
simplify_branch_nodes(struct entrypoint * ep)364 static int simplify_branch_nodes(struct entrypoint *ep)
365 {
366 	int changed = 0;
367 	struct basic_block *bb;
368 
369 	FOR_EACH_PTR(ep->bbs, bb) {
370 		struct instruction *br = last_instruction(bb->insns);
371 
372 		if (!br || br->opcode != OP_CBR)
373 			continue;
374 		changed |= simplify_one_branch(bb, br);
375 	} END_FOR_EACH_PTR(bb);
376 	return changed;
377 }
378 
379 /*
380  * This is called late - when we have intra-bb liveness information..
381  */
simplify_flow(struct entrypoint * ep)382 int simplify_flow(struct entrypoint *ep)
383 {
384 	return simplify_branch_nodes(ep);
385 }
386 
concat_user_list(struct pseudo_user_list * src,struct pseudo_user_list ** dst)387 static inline void concat_user_list(struct pseudo_user_list *src, struct pseudo_user_list **dst)
388 {
389 	copy_ptr_list((struct ptr_list **)dst, (struct ptr_list *)src);
390 }
391 
convert_instruction_target(struct instruction * insn,pseudo_t src)392 void convert_instruction_target(struct instruction *insn, pseudo_t src)
393 {
394 	pseudo_t target;
395 	struct pseudo_user *pu;
396 	/*
397 	 * Go through the "insn->users" list and replace them all..
398 	 */
399 	target = insn->target;
400 	if (target == src)
401 		return;
402 	FOR_EACH_PTR(target->users, pu) {
403 		if (*pu->userp != VOID) {
404 			assert(*pu->userp == target);
405 			*pu->userp = src;
406 		}
407 	} END_FOR_EACH_PTR(pu);
408 	if (has_use_list(src))
409 		concat_user_list(target->users, &src->users);
410 	target->users = NULL;
411 }
412 
overlapping_memop(struct instruction * a,struct instruction * b)413 static int overlapping_memop(struct instruction *a, struct instruction *b)
414 {
415 	unsigned int a_start = bytes_to_bits(a->offset);
416 	unsigned int b_start = bytes_to_bits(b->offset);
417 	unsigned int a_size = a->size;
418 	unsigned int b_size = b->size;
419 
420 	if (a_size + a_start <= b_start)
421 		return 0;
422 	if (b_size + b_start <= a_start)
423 		return 0;
424 	return 1;
425 }
426 
same_memop(struct instruction * a,struct instruction * b)427 static inline int same_memop(struct instruction *a, struct instruction *b)
428 {
429 	return	a->offset == b->offset && a->size == b->size;
430 }
431 
distinct_symbols(pseudo_t a,pseudo_t b)432 static inline int distinct_symbols(pseudo_t a, pseudo_t b)
433 {
434 	if (a->type != PSEUDO_SYM)
435 		return 0;
436 	if (b->type != PSEUDO_SYM)
437 		return 0;
438 	return a->sym != b->sym;
439 }
440 
441 /*
442  * Return 1 if "dom" dominates the access to "pseudo"
443  * in "insn".
444  *
445  * Return 0 if it doesn't, and -1 if you don't know.
446  */
dominates(struct instruction * insn,struct instruction * dom,int local)447 int dominates(struct instruction *insn, struct instruction *dom, int local)
448 {
449 	switch (dom->opcode) {
450 	case OP_CALL: case OP_ENTRY:
451 		return local ? 0 : -1;
452 	case OP_LOAD: case OP_STORE:
453 		break;
454 	case OP_ASM:
455 		if (dom->clobber_memory)
456 			return -1;
457 		if (dom->output_memory)
458 			return -1;
459 		return 0;
460 	default:
461 		return 0;
462 	}
463 
464 	if (dom->src != insn->src) {
465 		if (local)
466 			return 0;
467 		/* We don't think two explicitly different symbols ever alias */
468 		if (distinct_symbols(insn->src, dom->src))
469 			return 0;
470 		/* We could try to do some alias analysis here */
471 		return -1;
472 	}
473 	if (!same_memop(insn, dom)) {
474 		if (!overlapping_memop(insn, dom))
475 			return 0;
476 		return -1;
477 	}
478 	return 1;
479 }
480 
481 /* Kill a pseudo that is dead on exit from the bb */
482 // The context is:
483 // * the variable is not global but may have its address used (local/non-local)
484 // * the stores are only needed by others functions which would do some
485 //   loads via the escaped address
486 // We start by the terminating BB (normal exit BB + no-return/unreachable)
487 // We walkup the BB' intruction backward
488 // * we're only concerned by loads, stores & calls
489 // * if we reach a call			-> we have to stop if var is non-local
490 // * if we reach a load of our var	-> we have to stop
491 // * if we reach a store of our var	-> we can kill it, it's dead
492 // * we can ignore other stores & loads if the var is local
493 // * if we reach another store or load done via non-symbol access
494 //   (so done via some address calculation) -> we have to stop
495 // If we reach the top of the BB we can recurse into the parents BBs.
kill_dead_stores_bb(pseudo_t pseudo,unsigned long generation,struct basic_block * bb,int local)496 static void kill_dead_stores_bb(pseudo_t pseudo, unsigned long generation, struct basic_block *bb, int local)
497 {
498 	struct instruction *insn;
499 	struct basic_block *parent;
500 
501 	if (bb->generation == generation)
502 		return;
503 	bb->generation = generation;
504 	FOR_EACH_PTR_REVERSE(bb->insns, insn) {
505 		if (!insn->bb)
506 			continue;
507 		switch (insn->opcode) {
508 		case OP_LOAD:
509 			if (insn->src == pseudo)
510 				return;
511 			break;
512 		case OP_STORE:
513 			if (insn->src == pseudo) {
514 				kill_instruction_force(insn);
515 				continue;
516 			}
517 			break;
518 		case OP_CALL:
519 			if (!local)
520 				return;
521 		default:
522 			continue;
523 		}
524 		if (!local && insn->src->type != PSEUDO_SYM)
525 			return;
526 	} END_FOR_EACH_PTR_REVERSE(insn);
527 
528 	FOR_EACH_PTR(bb->parents, parent) {
529 		if (bb_list_size(parent->children) > 1)
530 			continue;
531 		kill_dead_stores_bb(pseudo, generation, parent, local);
532 	} END_FOR_EACH_PTR(parent);
533 }
534 
check_access(struct instruction * insn)535 void check_access(struct instruction *insn)
536 {
537 	pseudo_t pseudo = insn->src;
538 
539 	if (insn->bb && pseudo->type == PSEUDO_SYM) {
540 		int offset = insn->offset, bit = bytes_to_bits(offset) + insn->size;
541 		struct symbol *sym = pseudo->sym;
542 
543 		if (sym->bit_size > 0 && (offset < 0 || bit > sym->bit_size)) {
544 			if (insn->tainted)
545 				return;
546 			warning(insn->pos, "invalid access %s '%s' (%d %d)",
547 				offset < 0 ? "below" : "past the end of",
548 				show_ident(sym->ident), offset,
549 				bits_to_bytes(sym->bit_size));
550 			insn->tainted = 1;
551 		}
552 	}
553 }
554 
first_user(pseudo_t p)555 static struct pseudo_user *first_user(pseudo_t p)
556 {
557 	struct pseudo_user *pu;
558 	FOR_EACH_PTR(p->users, pu) {
559 		if (!pu)
560 			continue;
561 		return pu;
562 	} END_FOR_EACH_PTR(pu);
563 	return NULL;
564 }
565 
kill_dead_stores(struct entrypoint * ep,pseudo_t addr,int local)566 void kill_dead_stores(struct entrypoint *ep, pseudo_t addr, int local)
567 {
568 	unsigned long generation;
569 	struct basic_block *bb;
570 
571 	switch (pseudo_user_list_size(addr->users)) {
572 	case 0:
573 		return;
574 	case 1:
575 		if (local) {
576 			struct pseudo_user *pu = first_user(addr);
577 			struct instruction *insn = pu->insn;
578 			if (insn->opcode == OP_STORE) {
579 				kill_instruction_force(insn);
580 				return;
581 			}
582 		}
583 	default:
584 		break;
585 	}
586 
587 	generation = ++bb_generation;
588 	FOR_EACH_PTR(ep->bbs, bb) {
589 		if (bb->children)
590 			continue;
591 		kill_dead_stores_bb(addr, generation, bb, local);
592 	} END_FOR_EACH_PTR(bb);
593 }
594 
mark_bb_reachable(struct basic_block * bb,unsigned long generation)595 static void mark_bb_reachable(struct basic_block *bb, unsigned long generation)
596 {
597 	struct basic_block *child;
598 
599 	if (bb->generation == generation)
600 		return;
601 	bb->generation = generation;
602 	FOR_EACH_PTR(bb->children, child) {
603 		mark_bb_reachable(child, generation);
604 	} END_FOR_EACH_PTR(child);
605 }
606 
kill_defs(struct instruction * insn)607 static void kill_defs(struct instruction *insn)
608 {
609 	pseudo_t target = insn->target;
610 
611 	if (!has_use_list(target))
612 		return;
613 	if (target->def != insn)
614 		return;
615 
616 	convert_instruction_target(insn, VOID);
617 }
618 
kill_bb(struct basic_block * bb)619 void kill_bb(struct basic_block *bb)
620 {
621 	struct instruction *insn;
622 	struct basic_block *child, *parent;
623 
624 	FOR_EACH_PTR(bb->insns, insn) {
625 		if (!insn->bb)
626 			continue;
627 		kill_instruction_force(insn);
628 		kill_defs(insn);
629 		/*
630 		 * We kill unreachable instructions even if they
631 		 * otherwise aren't "killable" (e.g. volatile loads)
632 		 */
633 	} END_FOR_EACH_PTR(insn);
634 	bb->insns = NULL;
635 
636 	FOR_EACH_PTR(bb->children, child) {
637 		remove_bb_from_list(&child->parents, bb, 0);
638 	} END_FOR_EACH_PTR(child);
639 	bb->children = NULL;
640 
641 	FOR_EACH_PTR(bb->parents, parent) {
642 		remove_bb_from_list(&parent->children, bb, 0);
643 	} END_FOR_EACH_PTR(parent);
644 	bb->parents = NULL;
645 }
646 
kill_unreachable_bbs(struct entrypoint * ep)647 void kill_unreachable_bbs(struct entrypoint *ep)
648 {
649 	struct basic_block *bb;
650 	unsigned long generation = ++bb_generation;
651 
652 	mark_bb_reachable(ep->entry->bb, generation);
653 	FOR_EACH_PTR(ep->bbs, bb) {
654 		if (bb->generation == generation)
655 			continue;
656 		/* Mark it as being dead */
657 		kill_bb(bb);
658 		bb->ep = NULL;
659 		DELETE_CURRENT_PTR(bb);
660 	} END_FOR_EACH_PTR(bb);
661 	PACK_PTR_LIST(&ep->bbs);
662 }
663 
rewrite_parent_branch(struct basic_block * bb,struct basic_block * old,struct basic_block * new)664 static int rewrite_parent_branch(struct basic_block *bb, struct basic_block *old, struct basic_block *new)
665 {
666 	int changed = 0;
667 	struct instruction *insn = last_instruction(bb->insns);
668 
669 	if (!insn)
670 		return 0;
671 
672 	/* Infinite loops: let's not "optimize" them.. */
673 	if (old == new)
674 		return 0;
675 
676 	switch (insn->opcode) {
677 	case OP_CBR:
678 		changed |= rewrite_branch(bb, &insn->bb_false, old, new);
679 		/* fall through */
680 	case OP_BR:
681 		changed |= rewrite_branch(bb, &insn->bb_true, old, new);
682 		assert(changed);
683 		return changed;
684 	case OP_SWITCH: {
685 		struct multijmp *jmp;
686 		FOR_EACH_PTR(insn->multijmp_list, jmp) {
687 			changed |= rewrite_branch(bb, &jmp->target, old, new);
688 		} END_FOR_EACH_PTR(jmp);
689 		assert(changed);
690 		return changed;
691 	}
692 	default:
693 		return 0;
694 	}
695 }
696 
rewrite_branch_bb(struct basic_block * bb,struct instruction * br)697 static struct basic_block * rewrite_branch_bb(struct basic_block *bb, struct instruction *br)
698 {
699 	struct basic_block *parent;
700 	struct basic_block *target = br->bb_true;
701 
702 	if (br->opcode == OP_CBR) {
703 		pseudo_t cond = br->cond;
704 		if (cond->type != PSEUDO_VAL)
705 			return NULL;
706 		target = cond->value ? target : br->bb_false;
707 	}
708 
709 	/*
710 	 * We can't do FOR_EACH_PTR() here, because the parent list
711 	 * may change when we rewrite the parent.
712 	 */
713 	while ((parent = first_basic_block(bb->parents)) != NULL) {
714 		if (!rewrite_parent_branch(parent, bb, target))
715 			return NULL;
716 	}
717 	return target;
718 }
719 
vrfy_bb_in_list(struct basic_block * bb,struct basic_block_list * list)720 static void vrfy_bb_in_list(struct basic_block *bb, struct basic_block_list *list)
721 {
722 	if (bb) {
723 		struct basic_block *tmp;
724 		int no_bb_in_list = 0;
725 
726 		FOR_EACH_PTR(list, tmp) {
727 			if (bb == tmp)
728 				return;
729 		} END_FOR_EACH_PTR(tmp);
730 		assert(no_bb_in_list);
731 	}
732 }
733 
vrfy_parents(struct basic_block * bb)734 static void vrfy_parents(struct basic_block *bb)
735 {
736 	struct basic_block *tmp;
737 	FOR_EACH_PTR(bb->parents, tmp) {
738 		vrfy_bb_in_list(bb, tmp->children);
739 	} END_FOR_EACH_PTR(tmp);
740 }
741 
vrfy_children(struct basic_block * bb)742 static void vrfy_children(struct basic_block *bb)
743 {
744 	struct basic_block *tmp;
745 	struct instruction *br = last_instruction(bb->insns);
746 
747 	if (!br) {
748 		assert(!bb->children);
749 		return;
750 	}
751 	switch (br->opcode) {
752 		struct multijmp *jmp;
753 	case OP_CBR:
754 		vrfy_bb_in_list(br->bb_false, bb->children);
755 		/* fall through */
756 	case OP_BR:
757 		vrfy_bb_in_list(br->bb_true, bb->children);
758 		break;
759 	case OP_SWITCH:
760 	case OP_COMPUTEDGOTO:
761 		FOR_EACH_PTR(br->multijmp_list, jmp) {
762 			vrfy_bb_in_list(jmp->target, bb->children);
763 		} END_FOR_EACH_PTR(jmp);
764 		break;
765 	default:
766 		break;
767 	}
768 
769 	FOR_EACH_PTR(bb->children, tmp) {
770 		vrfy_bb_in_list(bb, tmp->parents);
771 	} END_FOR_EACH_PTR(tmp);
772 }
773 
vrfy_bb_flow(struct basic_block * bb)774 static void vrfy_bb_flow(struct basic_block *bb)
775 {
776 	vrfy_children(bb);
777 	vrfy_parents(bb);
778 }
779 
vrfy_flow(struct entrypoint * ep)780 void vrfy_flow(struct entrypoint *ep)
781 {
782 	struct basic_block *bb;
783 	struct basic_block *entry = ep->entry->bb;
784 
785 	FOR_EACH_PTR(ep->bbs, bb) {
786 		if (bb == entry)
787 			entry = NULL;
788 		vrfy_bb_flow(bb);
789 	} END_FOR_EACH_PTR(bb);
790 	assert(!entry);
791 }
792 
793 ///
794 // change a switch or a conditional branch into a branch
convert_to_jump(struct instruction * insn,struct basic_block * target)795 int convert_to_jump(struct instruction *insn, struct basic_block *target)
796 {
797 	struct basic_block *bb = insn->bb;
798 	struct basic_block *child;
799 	int changed = REPEAT_CSE;
800 
801 	switch (insn->opcode) {
802 	case OP_CBR:
803 		changed |= remove_phisources(insn->bb, insn->bb_true == target ? insn->bb_false : insn->bb_true);
804 		break;
805 	case OP_SWITCH:
806 		changed |= remove_other_phisources(insn->bb, insn->multijmp_list, target);
807 		break;
808 	}
809 	kill_use(&insn->cond);
810 	insn->bb_true = target;
811 	insn->bb_false = NULL;
812 	insn->cond = NULL;
813 	insn->size = 0;
814 	insn->opcode = OP_BR;
815 
816 	FOR_EACH_PTR(bb->children, child) {
817 		if (child == target) {
818 			target = NULL;	// leave first occurence
819 			continue;
820 		}
821 		DELETE_CURRENT_PTR(child);
822 		remove_bb_from_list(&child->parents, bb, 1);
823 		changed |= REPEAT_CFG_CLEANUP;
824 	} END_FOR_EACH_PTR(child);
825 	PACK_PTR_LIST(&bb->children);
826 	repeat_phase |= changed;
827 	return changed;
828 }
829 
retarget_parents(struct basic_block * bb,struct basic_block * target)830 static int retarget_parents(struct basic_block *bb, struct basic_block *target)
831 {
832 	struct basic_block *parent;
833 
834 	/*
835 	 * We can't do FOR_EACH_PTR() here, because the parent list
836 	 * may change when we rewrite the parent.
837 	 */
838 	while ((parent = first_basic_block(bb->parents))) {
839 		if (!rewrite_parent_branch(parent, bb, target))
840 			return 0;
841 	}
842 	kill_bb(bb);
843 	return REPEAT_CFG_CLEANUP;
844 }
845 
remove_merging_phisrc(struct instruction * insn,struct basic_block * bot)846 static void remove_merging_phisrc(struct instruction *insn, struct basic_block *bot)
847 {
848 	struct instruction *node = insn->phi_node;
849 	pseudo_t phi;
850 
851 	if (!node) {
852 		kill_instruction(insn);
853 		return;
854 	}
855 
856 	FOR_EACH_PTR(node->phi_list, phi) {
857 		struct instruction *phisrc;
858 
859 		if (phi == VOID)
860 			continue;
861 		phisrc = phi->def;
862 		if (phisrc->bb == bot) {
863 			kill_instruction(insn);
864 			return;
865 		}
866 	} END_FOR_EACH_PTR(phi);
867 }
868 
remove_merging_phi(struct basic_block * top,struct instruction * insn)869 static void remove_merging_phi(struct basic_block *top, struct instruction *insn)
870 {
871 	pseudo_t phi;
872 
873 	FOR_EACH_PTR(insn->phi_list, phi) {
874 		struct instruction *def;
875 
876 		if (phi == VOID)
877 			continue;
878 
879 		def = phi->def;
880 		if (def->bb != top)
881 			continue;
882 
883 		convert_instruction_target(insn, def->src);
884 		kill_instruction(def);
885 		kill_instruction(insn);
886 	} END_FOR_EACH_PTR(phi);
887 }
888 
889 ///
890 // merge two BBs
891 // @top: the first BB to be merged
892 // @bot: the second BB to be merged
merge_bb(struct basic_block * top,struct basic_block * bot)893 static int merge_bb(struct basic_block *top, struct basic_block *bot)
894 {
895 	struct instruction *insn;
896 	struct basic_block *bb;
897 
898 	if (top == bot)
899 		return 0;
900 
901 	top->children = bot->children;
902 	bot->children = NULL;
903 	bot->parents = NULL;
904 
905 	FOR_EACH_PTR(top->children, bb) {
906 		replace_bb_in_list(&bb->parents, bot, top, 1);
907 	} END_FOR_EACH_PTR(bb);
908 
909 	FOR_EACH_PTR(top->insns, insn) {
910 		if (!insn->bb)
911 			continue;
912 		if (insn->opcode != OP_PHISOURCE)
913 			continue;
914 		remove_merging_phisrc(insn, bot);
915 	} END_FOR_EACH_PTR(insn);
916 
917 	kill_instruction(delete_last_instruction(&top->insns));
918 	FOR_EACH_PTR(bot->insns, insn) {
919 		if (!insn->bb)
920 			continue;
921 		assert(insn->bb == bot);
922 		switch (insn->opcode) {
923 		case OP_PHI:
924 			remove_merging_phi(top, insn);
925 			continue;
926 		}
927 		insn->bb = top;
928 		add_instruction(&top->insns, insn);
929 	} END_FOR_EACH_PTR(insn);
930 	bot->insns = NULL;
931 	bot->ep = NULL;
932 	return REPEAT_CFG_CLEANUP;
933 }
934 
935 ///
936 // early simplification of the CFG
937 // Three things are done here:
938 //    # inactive BB are removed
939 //    # branches to a 'forwarder' BB are redirected to the forwardee.
940 //    # merge single-child/single-parent BBs.
simplify_cfg_early(struct entrypoint * ep)941 int simplify_cfg_early(struct entrypoint *ep)
942 {
943 	struct basic_block *bb;
944 	int changed = 0;
945 
946 	FOR_EACH_PTR_REVERSE(ep->bbs, bb) {
947 		struct instruction *insn;
948 		struct basic_block *tgt;
949 
950 		if (!bb->ep) {
951 			DELETE_CURRENT_PTR(bb);
952 			changed = REPEAT_CFG_CLEANUP;
953 			continue;
954 		}
955 
956 		insn = last_instruction(bb->insns);
957 		if (!insn)
958 			continue;
959 		switch (insn->opcode) {
960 		case OP_BR:
961 			tgt = insn->bb_true;
962 			if (bb_is_forwarder(bb))
963 				changed |= retarget_parents(bb, tgt);
964 			else if (bb_list_size(tgt->parents) == 1)
965 				changed |= merge_bb(bb, tgt);
966 			break;
967 		}
968 	} END_FOR_EACH_PTR_REVERSE(bb);
969 	return changed;
970 }
971 
pack_basic_blocks(struct entrypoint * ep)972 void pack_basic_blocks(struct entrypoint *ep)
973 {
974 	struct basic_block *bb;
975 
976 	/* See if we can merge a bb into another one.. */
977 	FOR_EACH_PTR(ep->bbs, bb) {
978 		struct instruction *first;
979 		struct basic_block *parent, *child, *last;
980 
981 		if (!bb_reachable(bb))
982 			continue;
983 
984 		/*
985 		 * Just a branch?
986 		 */
987 		FOR_EACH_PTR(bb->insns, first) {
988 			if (!first->bb)
989 				continue;
990 			switch (first->opcode) {
991 			case OP_NOP:
992 			case OP_INLINED_CALL:
993 				continue;
994 			case OP_CBR:
995 			case OP_BR: {
996 				struct basic_block *replace;
997 				replace = rewrite_branch_bb(bb, first);
998 				if (replace) {
999 					kill_bb(bb);
1000 					goto no_merge;
1001 				}
1002 			}
1003 			/* fallthrough */
1004 			default:
1005 				goto out;
1006 			}
1007 		} END_FOR_EACH_PTR(first);
1008 
1009 out:
1010 		/*
1011 		 * See if we only have one parent..
1012 		 */
1013 		last = NULL;
1014 		FOR_EACH_PTR(bb->parents, parent) {
1015 			if (last) {
1016 				if (last != parent)
1017 					goto no_merge;
1018 				continue;
1019 			}
1020 			last = parent;
1021 		} END_FOR_EACH_PTR(parent);
1022 
1023 		parent = last;
1024 		if (!parent || parent == bb)
1025 			continue;
1026 
1027 		/*
1028 		 * Goodie. See if the parent can merge..
1029 		 */
1030 		FOR_EACH_PTR(parent->children, child) {
1031 			if (child != bb)
1032 				goto no_merge;
1033 		} END_FOR_EACH_PTR(child);
1034 
1035 		repeat_phase |= merge_bb(parent, bb);
1036 
1037 	no_merge:
1038 		/* nothing to do */;
1039 	} END_FOR_EACH_PTR(bb);
1040 }
1041 
1042 
1043