• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 1988, 1989, 1990, 1991, 1993, 1994, 1995, 1996
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that: (1) source code distributions
7  * retain the above copyright notice and this paragraph in its entirety, (2)
8  * distributions including binary code include the above copyright notice and
9  * this paragraph in its entirety in the documentation or other materials
10  * provided with the distribution, and (3) all advertising materials mentioning
11  * features or use of this software display the following acknowledgement:
12  * ``This product includes software developed by the University of California,
13  * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
14  * the University nor the names of its contributors may be used to endorse
15  * or promote products derived from this software without specific prior
16  * written permission.
17  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
18  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
19  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
20  *
21  *  Optimization module for tcpdump intermediate representation.
22  */
23 #ifndef lint
24 static const char rcsid[] _U_ =
25     "@(#) $Header: /tcpdump/master/libpcap/optimize.c,v 1.85.2.3 2007/09/12 21:29:45 guy Exp $ (LBL)";
26 #endif
27 
28 #ifdef HAVE_CONFIG_H
29 #include "config.h"
30 #endif
31 
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <memory.h>
35 #include <string.h>
36 
37 #include <errno.h>
38 
39 #include "pcap-int.h"
40 
41 #include "gencode.h"
42 
43 #ifdef HAVE_OS_PROTO_H
44 #include "os-proto.h"
45 #endif
46 
47 #ifdef BDEBUG
48 extern int dflag;
49 #endif
50 
51 #if defined(MSDOS) && !defined(__DJGPP__)
52 extern int _w32_ffs (int mask);
53 #define ffs _w32_ffs
54 #endif
55 
56 /*
57  * Represents a deleted instruction.
58  */
59 #define NOP -1
60 
61 /*
62  * Register numbers for use-def values.
63  * 0 through BPF_MEMWORDS-1 represent the corresponding scratch memory
64  * location.  A_ATOM is the accumulator and X_ATOM is the index
65  * register.
66  */
67 #define A_ATOM BPF_MEMWORDS
68 #define X_ATOM (BPF_MEMWORDS+1)
69 
70 /*
71  * This define is used to represent *both* the accumulator and
72  * x register in use-def computations.
73  * Currently, the use-def code assumes only one definition per instruction.
74  */
75 #define AX_ATOM N_ATOMS
76 
77 /*
78  * A flag to indicate that further optimization is needed.
79  * Iterative passes are continued until a given pass yields no
80  * branch movement.
81  */
82 static int done;
83 
84 /*
85  * A block is marked if only if its mark equals the current mark.
86  * Rather than traverse the code array, marking each item, 'cur_mark' is
87  * incremented.  This automatically makes each element unmarked.
88  */
89 static int cur_mark;
90 #define isMarked(p) ((p)->mark == cur_mark)
91 #define unMarkAll() cur_mark += 1
92 #define Mark(p) ((p)->mark = cur_mark)
93 
94 static void opt_init(struct block *);
95 static void opt_cleanup(void);
96 
97 static void make_marks(struct block *);
98 static void mark_code(struct block *);
99 
100 static void intern_blocks(struct block *);
101 
102 static int eq_slist(struct slist *, struct slist *);
103 
104 static void find_levels_r(struct block *);
105 
106 static void find_levels(struct block *);
107 static void find_dom(struct block *);
108 static void propedom(struct edge *);
109 static void find_edom(struct block *);
110 static void find_closure(struct block *);
111 static int atomuse(struct stmt *);
112 static int atomdef(struct stmt *);
113 static void compute_local_ud(struct block *);
114 static void find_ud(struct block *);
115 static void init_val(void);
116 static int F(int, int, int);
117 static inline void vstore(struct stmt *, int *, int, int);
118 static void opt_blk(struct block *, int);
119 static int use_conflict(struct block *, struct block *);
120 static void opt_j(struct edge *);
121 static void or_pullup(struct block *);
122 static void and_pullup(struct block *);
123 static void opt_blks(struct block *, int);
124 static inline void link_inedge(struct edge *, struct block *);
125 static void find_inedges(struct block *);
126 static void opt_root(struct block **);
127 static void opt_loop(struct block *, int);
128 static void fold_op(struct stmt *, int, int);
129 static inline struct slist *this_op(struct slist *);
130 static void opt_not(struct block *);
131 static void opt_peep(struct block *);
132 static void opt_stmt(struct stmt *, int[], int);
133 static void deadstmt(struct stmt *, struct stmt *[]);
134 static void opt_deadstores(struct block *);
135 static struct block *fold_edge(struct block *, struct edge *);
136 static inline int eq_blk(struct block *, struct block *);
137 static int slength(struct slist *);
138 static int count_blocks(struct block *);
139 static void number_blks_r(struct block *);
140 static int count_stmts(struct block *);
141 static int convert_code_r(struct block *);
142 #ifdef BDEBUG
143 static void opt_dump(struct block *);
144 #endif
145 
146 static int n_blocks;
147 struct block **blocks;
148 static int n_edges;
149 struct edge **edges;
150 
151 /*
152  * A bit vector set representation of the dominators.
153  * We round up the set size to the next power of two.
154  */
155 static int nodewords;
156 static int edgewords;
157 struct block **levels;
158 bpf_u_int32 *space;
159 #define BITS_PER_WORD (8*sizeof(bpf_u_int32))
160 /*
161  * True if a is in uset {p}
162  */
163 #define SET_MEMBER(p, a) \
164 ((p)[(unsigned)(a) / BITS_PER_WORD] & (1 << ((unsigned)(a) % BITS_PER_WORD)))
165 
166 /*
167  * Add 'a' to uset p.
168  */
169 #define SET_INSERT(p, a) \
170 (p)[(unsigned)(a) / BITS_PER_WORD] |= (1 << ((unsigned)(a) % BITS_PER_WORD))
171 
172 /*
173  * Delete 'a' from uset p.
174  */
175 #define SET_DELETE(p, a) \
176 (p)[(unsigned)(a) / BITS_PER_WORD] &= ~(1 << ((unsigned)(a) % BITS_PER_WORD))
177 
178 /*
179  * a := a intersect b
180  */
181 #define SET_INTERSECT(a, b, n)\
182 {\
183 	register bpf_u_int32 *_x = a, *_y = b;\
184 	register int _n = n;\
185 	while (--_n >= 0) *_x++ &= *_y++;\
186 }
187 
188 /*
189  * a := a - b
190  */
191 #define SET_SUBTRACT(a, b, n)\
192 {\
193 	register bpf_u_int32 *_x = a, *_y = b;\
194 	register int _n = n;\
195 	while (--_n >= 0) *_x++ &=~ *_y++;\
196 }
197 
198 /*
199  * a := a union b
200  */
201 #define SET_UNION(a, b, n)\
202 {\
203 	register bpf_u_int32 *_x = a, *_y = b;\
204 	register int _n = n;\
205 	while (--_n >= 0) *_x++ |= *_y++;\
206 }
207 
208 static uset all_dom_sets;
209 static uset all_closure_sets;
210 static uset all_edge_sets;
211 
212 #ifndef MAX
213 #define MAX(a,b) ((a)>(b)?(a):(b))
214 #endif
215 
216 static void
find_levels_r(b)217 find_levels_r(b)
218 	struct block *b;
219 {
220 	int level;
221 
222 	if (isMarked(b))
223 		return;
224 
225 	Mark(b);
226 	b->link = 0;
227 
228 	if (JT(b)) {
229 		find_levels_r(JT(b));
230 		find_levels_r(JF(b));
231 		level = MAX(JT(b)->level, JF(b)->level) + 1;
232 	} else
233 		level = 0;
234 	b->level = level;
235 	b->link = levels[level];
236 	levels[level] = b;
237 }
238 
239 /*
240  * Level graph.  The levels go from 0 at the leaves to
241  * N_LEVELS at the root.  The levels[] array points to the
242  * first node of the level list, whose elements are linked
243  * with the 'link' field of the struct block.
244  */
245 static void
find_levels(root)246 find_levels(root)
247 	struct block *root;
248 {
249 	memset((char *)levels, 0, n_blocks * sizeof(*levels));
250 	unMarkAll();
251 	find_levels_r(root);
252 }
253 
254 /*
255  * Find dominator relationships.
256  * Assumes graph has been leveled.
257  */
258 static void
find_dom(root)259 find_dom(root)
260 	struct block *root;
261 {
262 	int i;
263 	struct block *b;
264 	bpf_u_int32 *x;
265 
266 	/*
267 	 * Initialize sets to contain all nodes.
268 	 */
269 	x = all_dom_sets;
270 	i = n_blocks * nodewords;
271 	while (--i >= 0)
272 		*x++ = ~0;
273 	/* Root starts off empty. */
274 	for (i = nodewords; --i >= 0;)
275 		root->dom[i] = 0;
276 
277 	/* root->level is the highest level no found. */
278 	for (i = root->level; i >= 0; --i) {
279 		for (b = levels[i]; b; b = b->link) {
280 			SET_INSERT(b->dom, b->id);
281 			if (JT(b) == 0)
282 				continue;
283 			SET_INTERSECT(JT(b)->dom, b->dom, nodewords);
284 			SET_INTERSECT(JF(b)->dom, b->dom, nodewords);
285 		}
286 	}
287 }
288 
289 static void
propedom(ep)290 propedom(ep)
291 	struct edge *ep;
292 {
293 	SET_INSERT(ep->edom, ep->id);
294 	if (ep->succ) {
295 		SET_INTERSECT(ep->succ->et.edom, ep->edom, edgewords);
296 		SET_INTERSECT(ep->succ->ef.edom, ep->edom, edgewords);
297 	}
298 }
299 
300 /*
301  * Compute edge dominators.
302  * Assumes graph has been leveled and predecessors established.
303  */
304 static void
find_edom(root)305 find_edom(root)
306 	struct block *root;
307 {
308 	int i;
309 	uset x;
310 	struct block *b;
311 
312 	x = all_edge_sets;
313 	for (i = n_edges * edgewords; --i >= 0; )
314 		x[i] = ~0;
315 
316 	/* root->level is the highest level no found. */
317 	memset(root->et.edom, 0, edgewords * sizeof(*(uset)0));
318 	memset(root->ef.edom, 0, edgewords * sizeof(*(uset)0));
319 	for (i = root->level; i >= 0; --i) {
320 		for (b = levels[i]; b != 0; b = b->link) {
321 			propedom(&b->et);
322 			propedom(&b->ef);
323 		}
324 	}
325 }
326 
327 /*
328  * Find the backwards transitive closure of the flow graph.  These sets
329  * are backwards in the sense that we find the set of nodes that reach
330  * a given node, not the set of nodes that can be reached by a node.
331  *
332  * Assumes graph has been leveled.
333  */
334 static void
find_closure(root)335 find_closure(root)
336 	struct block *root;
337 {
338 	int i;
339 	struct block *b;
340 
341 	/*
342 	 * Initialize sets to contain no nodes.
343 	 */
344 	memset((char *)all_closure_sets, 0,
345 	      n_blocks * nodewords * sizeof(*all_closure_sets));
346 
347 	/* root->level is the highest level no found. */
348 	for (i = root->level; i >= 0; --i) {
349 		for (b = levels[i]; b; b = b->link) {
350 			SET_INSERT(b->closure, b->id);
351 			if (JT(b) == 0)
352 				continue;
353 			SET_UNION(JT(b)->closure, b->closure, nodewords);
354 			SET_UNION(JF(b)->closure, b->closure, nodewords);
355 		}
356 	}
357 }
358 
359 /*
360  * Return the register number that is used by s.  If A and X are both
361  * used, return AX_ATOM.  If no register is used, return -1.
362  *
363  * The implementation should probably change to an array access.
364  */
365 static int
atomuse(s)366 atomuse(s)
367 	struct stmt *s;
368 {
369 	register int c = s->code;
370 
371 	if (c == NOP)
372 		return -1;
373 
374 	switch (BPF_CLASS(c)) {
375 
376 	case BPF_RET:
377 		return (BPF_RVAL(c) == BPF_A) ? A_ATOM :
378 			(BPF_RVAL(c) == BPF_X) ? X_ATOM : -1;
379 
380 	case BPF_LD:
381 	case BPF_LDX:
382 		return (BPF_MODE(c) == BPF_IND) ? X_ATOM :
383 			(BPF_MODE(c) == BPF_MEM) ? s->k : -1;
384 
385 	case BPF_ST:
386 		return A_ATOM;
387 
388 	case BPF_STX:
389 		return X_ATOM;
390 
391 	case BPF_JMP:
392 	case BPF_ALU:
393 		if (BPF_SRC(c) == BPF_X)
394 			return AX_ATOM;
395 		return A_ATOM;
396 
397 	case BPF_MISC:
398 		return BPF_MISCOP(c) == BPF_TXA ? X_ATOM : A_ATOM;
399 	}
400 	abort();
401 	/* NOTREACHED */
402 }
403 
404 /*
405  * Return the register number that is defined by 's'.  We assume that
406  * a single stmt cannot define more than one register.  If no register
407  * is defined, return -1.
408  *
409  * The implementation should probably change to an array access.
410  */
411 static int
atomdef(s)412 atomdef(s)
413 	struct stmt *s;
414 {
415 	if (s->code == NOP)
416 		return -1;
417 
418 	switch (BPF_CLASS(s->code)) {
419 
420 	case BPF_LD:
421 	case BPF_ALU:
422 		return A_ATOM;
423 
424 	case BPF_LDX:
425 		return X_ATOM;
426 
427 	case BPF_ST:
428 	case BPF_STX:
429 		return s->k;
430 
431 	case BPF_MISC:
432 		return BPF_MISCOP(s->code) == BPF_TAX ? X_ATOM : A_ATOM;
433 	}
434 	return -1;
435 }
436 
437 /*
438  * Compute the sets of registers used, defined, and killed by 'b'.
439  *
440  * "Used" means that a statement in 'b' uses the register before any
441  * statement in 'b' defines it, i.e. it uses the value left in
442  * that register by a predecessor block of this block.
443  * "Defined" means that a statement in 'b' defines it.
444  * "Killed" means that a statement in 'b' defines it before any
445  * statement in 'b' uses it, i.e. it kills the value left in that
446  * register by a predecessor block of this block.
447  */
448 static void
compute_local_ud(b)449 compute_local_ud(b)
450 	struct block *b;
451 {
452 	struct slist *s;
453 	atomset def = 0, use = 0, kill = 0;
454 	int atom;
455 
456 	for (s = b->stmts; s; s = s->next) {
457 		if (s->s.code == NOP)
458 			continue;
459 		atom = atomuse(&s->s);
460 		if (atom >= 0) {
461 			if (atom == AX_ATOM) {
462 				if (!ATOMELEM(def, X_ATOM))
463 					use |= ATOMMASK(X_ATOM);
464 				if (!ATOMELEM(def, A_ATOM))
465 					use |= ATOMMASK(A_ATOM);
466 			}
467 			else if (atom < N_ATOMS) {
468 				if (!ATOMELEM(def, atom))
469 					use |= ATOMMASK(atom);
470 			}
471 			else
472 				abort();
473 		}
474 		atom = atomdef(&s->s);
475 		if (atom >= 0) {
476 			if (!ATOMELEM(use, atom))
477 				kill |= ATOMMASK(atom);
478 			def |= ATOMMASK(atom);
479 		}
480 	}
481 	if (BPF_CLASS(b->s.code) == BPF_JMP) {
482 		/*
483 		 * XXX - what about RET?
484 		 */
485 		atom = atomuse(&b->s);
486 		if (atom >= 0) {
487 			if (atom == AX_ATOM) {
488 				if (!ATOMELEM(def, X_ATOM))
489 					use |= ATOMMASK(X_ATOM);
490 				if (!ATOMELEM(def, A_ATOM))
491 					use |= ATOMMASK(A_ATOM);
492 			}
493 			else if (atom < N_ATOMS) {
494 				if (!ATOMELEM(def, atom))
495 					use |= ATOMMASK(atom);
496 			}
497 			else
498 				abort();
499 		}
500 	}
501 
502 	b->def = def;
503 	b->kill = kill;
504 	b->in_use = use;
505 }
506 
507 /*
508  * Assume graph is already leveled.
509  */
510 static void
find_ud(root)511 find_ud(root)
512 	struct block *root;
513 {
514 	int i, maxlevel;
515 	struct block *p;
516 
517 	/*
518 	 * root->level is the highest level no found;
519 	 * count down from there.
520 	 */
521 	maxlevel = root->level;
522 	for (i = maxlevel; i >= 0; --i)
523 		for (p = levels[i]; p; p = p->link) {
524 			compute_local_ud(p);
525 			p->out_use = 0;
526 		}
527 
528 	for (i = 1; i <= maxlevel; ++i) {
529 		for (p = levels[i]; p; p = p->link) {
530 			p->out_use |= JT(p)->in_use | JF(p)->in_use;
531 			p->in_use |= p->out_use &~ p->kill;
532 		}
533 	}
534 }
535 
536 /*
537  * These data structures are used in a Cocke and Shwarz style
538  * value numbering scheme.  Since the flowgraph is acyclic,
539  * exit values can be propagated from a node's predecessors
540  * provided it is uniquely defined.
541  */
542 struct valnode {
543 	int code;
544 	int v0, v1;
545 	int val;
546 	struct valnode *next;
547 };
548 
549 #define MODULUS 213
550 static struct valnode *hashtbl[MODULUS];
551 static int curval;
552 static int maxval;
553 
554 /* Integer constants mapped with the load immediate opcode. */
555 #define K(i) F(BPF_LD|BPF_IMM|BPF_W, i, 0L)
556 
557 struct vmapinfo {
558 	int is_const;
559 	bpf_int32 const_val;
560 };
561 
562 struct vmapinfo *vmap;
563 struct valnode *vnode_base;
564 struct valnode *next_vnode;
565 
566 static void
init_val()567 init_val()
568 {
569 	curval = 0;
570 	next_vnode = vnode_base;
571 	memset((char *)vmap, 0, maxval * sizeof(*vmap));
572 	memset((char *)hashtbl, 0, sizeof hashtbl);
573 }
574 
575 /* Because we really don't have an IR, this stuff is a little messy. */
576 static int
F(code,v0,v1)577 F(code, v0, v1)
578 	int code;
579 	int v0, v1;
580 {
581 	u_int hash;
582 	int val;
583 	struct valnode *p;
584 
585 	hash = (u_int)code ^ (v0 << 4) ^ (v1 << 8);
586 	hash %= MODULUS;
587 
588 	for (p = hashtbl[hash]; p; p = p->next)
589 		if (p->code == code && p->v0 == v0 && p->v1 == v1)
590 			return p->val;
591 
592 	val = ++curval;
593 	if (BPF_MODE(code) == BPF_IMM &&
594 	    (BPF_CLASS(code) == BPF_LD || BPF_CLASS(code) == BPF_LDX)) {
595 		vmap[val].const_val = v0;
596 		vmap[val].is_const = 1;
597 	}
598 	p = next_vnode++;
599 	p->val = val;
600 	p->code = code;
601 	p->v0 = v0;
602 	p->v1 = v1;
603 	p->next = hashtbl[hash];
604 	hashtbl[hash] = p;
605 
606 	return val;
607 }
608 
609 static inline void
vstore(s,valp,newval,alter)610 vstore(s, valp, newval, alter)
611 	struct stmt *s;
612 	int *valp;
613 	int newval;
614 	int alter;
615 {
616 	if (alter && *valp == newval)
617 		s->code = NOP;
618 	else
619 		*valp = newval;
620 }
621 
622 static void
fold_op(s,v0,v1)623 fold_op(s, v0, v1)
624 	struct stmt *s;
625 	int v0, v1;
626 {
627 	bpf_u_int32 a, b;
628 
629 	a = vmap[v0].const_val;
630 	b = vmap[v1].const_val;
631 
632 	switch (BPF_OP(s->code)) {
633 	case BPF_ADD:
634 		a += b;
635 		break;
636 
637 	case BPF_SUB:
638 		a -= b;
639 		break;
640 
641 	case BPF_MUL:
642 		a *= b;
643 		break;
644 
645 	case BPF_DIV:
646 		if (b == 0)
647 			bpf_error("division by zero");
648 		a /= b;
649 		break;
650 
651 	case BPF_AND:
652 		a &= b;
653 		break;
654 
655 	case BPF_OR:
656 		a |= b;
657 		break;
658 
659 	case BPF_LSH:
660 		a <<= b;
661 		break;
662 
663 	case BPF_RSH:
664 		a >>= b;
665 		break;
666 
667 	case BPF_NEG:
668 		a = -a;
669 		break;
670 
671 	default:
672 		abort();
673 	}
674 	s->k = a;
675 	s->code = BPF_LD|BPF_IMM;
676 	done = 0;
677 }
678 
679 static inline struct slist *
this_op(s)680 this_op(s)
681 	struct slist *s;
682 {
683 	while (s != 0 && s->s.code == NOP)
684 		s = s->next;
685 	return s;
686 }
687 
688 static void
opt_not(b)689 opt_not(b)
690 	struct block *b;
691 {
692 	struct block *tmp = JT(b);
693 
694 	JT(b) = JF(b);
695 	JF(b) = tmp;
696 }
697 
698 static void
opt_peep(b)699 opt_peep(b)
700 	struct block *b;
701 {
702 	struct slist *s;
703 	struct slist *next, *last;
704 	int val;
705 
706 	s = b->stmts;
707 	if (s == 0)
708 		return;
709 
710 	last = s;
711 	for (/*empty*/; /*empty*/; s = next) {
712 		/*
713 		 * Skip over nops.
714 		 */
715 		s = this_op(s);
716 		if (s == 0)
717 			break;	/* nothing left in the block */
718 
719 		/*
720 		 * Find the next real instruction after that one
721 		 * (skipping nops).
722 		 */
723 		next = this_op(s->next);
724 		if (next == 0)
725 			break;	/* no next instruction */
726 		last = next;
727 
728 		/*
729 		 * st  M[k]	-->	st  M[k]
730 		 * ldx M[k]		tax
731 		 */
732 		if (s->s.code == BPF_ST &&
733 		    next->s.code == (BPF_LDX|BPF_MEM) &&
734 		    s->s.k == next->s.k) {
735 			done = 0;
736 			next->s.code = BPF_MISC|BPF_TAX;
737 		}
738 		/*
739 		 * ld  #k	-->	ldx  #k
740 		 * tax			txa
741 		 */
742 		if (s->s.code == (BPF_LD|BPF_IMM) &&
743 		    next->s.code == (BPF_MISC|BPF_TAX)) {
744 			s->s.code = BPF_LDX|BPF_IMM;
745 			next->s.code = BPF_MISC|BPF_TXA;
746 			done = 0;
747 		}
748 		/*
749 		 * This is an ugly special case, but it happens
750 		 * when you say tcp[k] or udp[k] where k is a constant.
751 		 */
752 		if (s->s.code == (BPF_LD|BPF_IMM)) {
753 			struct slist *add, *tax, *ild;
754 
755 			/*
756 			 * Check that X isn't used on exit from this
757 			 * block (which the optimizer might cause).
758 			 * We know the code generator won't generate
759 			 * any local dependencies.
760 			 */
761 			if (ATOMELEM(b->out_use, X_ATOM))
762 				continue;
763 
764 			/*
765 			 * Check that the instruction following the ldi
766 			 * is an addx, or it's an ldxms with an addx
767 			 * following it (with 0 or more nops between the
768 			 * ldxms and addx).
769 			 */
770 			if (next->s.code != (BPF_LDX|BPF_MSH|BPF_B))
771 				add = next;
772 			else
773 				add = this_op(next->next);
774 			if (add == 0 || add->s.code != (BPF_ALU|BPF_ADD|BPF_X))
775 				continue;
776 
777 			/*
778 			 * Check that a tax follows that (with 0 or more
779 			 * nops between them).
780 			 */
781 			tax = this_op(add->next);
782 			if (tax == 0 || tax->s.code != (BPF_MISC|BPF_TAX))
783 				continue;
784 
785 			/*
786 			 * Check that an ild follows that (with 0 or more
787 			 * nops between them).
788 			 */
789 			ild = this_op(tax->next);
790 			if (ild == 0 || BPF_CLASS(ild->s.code) != BPF_LD ||
791 			    BPF_MODE(ild->s.code) != BPF_IND)
792 				continue;
793 			/*
794 			 * We want to turn this sequence:
795 			 *
796 			 * (004) ldi     #0x2		{s}
797 			 * (005) ldxms   [14]		{next}  -- optional
798 			 * (006) addx			{add}
799 			 * (007) tax			{tax}
800 			 * (008) ild     [x+0]		{ild}
801 			 *
802 			 * into this sequence:
803 			 *
804 			 * (004) nop
805 			 * (005) ldxms   [14]
806 			 * (006) nop
807 			 * (007) nop
808 			 * (008) ild     [x+2]
809 			 *
810 			 * XXX We need to check that X is not
811 			 * subsequently used, because we want to change
812 			 * what'll be in it after this sequence.
813 			 *
814 			 * We know we can eliminate the accumulator
815 			 * modifications earlier in the sequence since
816 			 * it is defined by the last stmt of this sequence
817 			 * (i.e., the last statement of the sequence loads
818 			 * a value into the accumulator, so we can eliminate
819 			 * earlier operations on the accumulator).
820 			 */
821 			ild->s.k += s->s.k;
822 			s->s.code = NOP;
823 			add->s.code = NOP;
824 			tax->s.code = NOP;
825 			done = 0;
826 		}
827 	}
828 	/*
829 	 * If the comparison at the end of a block is an equality
830 	 * comparison against a constant, and nobody uses the value
831 	 * we leave in the A register at the end of a block, and
832 	 * the operation preceding the comparison is an arithmetic
833 	 * operation, we can sometime optimize it away.
834 	 */
835 	if (b->s.code == (BPF_JMP|BPF_JEQ|BPF_K) &&
836 	    !ATOMELEM(b->out_use, A_ATOM)) {
837 	    	/*
838 	    	 * We can optimize away certain subtractions of the
839 	    	 * X register.
840 	    	 */
841 		if (last->s.code == (BPF_ALU|BPF_SUB|BPF_X)) {
842 			val = b->val[X_ATOM];
843 			if (vmap[val].is_const) {
844 				/*
845 				 * If we have a subtract to do a comparison,
846 				 * and the X register is a known constant,
847 				 * we can merge this value into the
848 				 * comparison:
849 				 *
850 				 * sub x  ->	nop
851 				 * jeq #y	jeq #(x+y)
852 				 */
853 				b->s.k += vmap[val].const_val;
854 				last->s.code = NOP;
855 				done = 0;
856 			} else if (b->s.k == 0) {
857 				/*
858 				 * If the X register isn't a constant,
859 				 * and the comparison in the test is
860 				 * against 0, we can compare with the
861 				 * X register, instead:
862 				 *
863 				 * sub x  ->	nop
864 				 * jeq #0	jeq x
865 				 */
866 				last->s.code = NOP;
867 				b->s.code = BPF_JMP|BPF_JEQ|BPF_X;
868 				done = 0;
869 			}
870 		}
871 		/*
872 		 * Likewise, a constant subtract can be simplified:
873 		 *
874 		 * sub #x ->	nop
875 		 * jeq #y ->	jeq #(x+y)
876 		 */
877 		else if (last->s.code == (BPF_ALU|BPF_SUB|BPF_K)) {
878 			last->s.code = NOP;
879 			b->s.k += last->s.k;
880 			done = 0;
881 		}
882 		/*
883 		 * And, similarly, a constant AND can be simplified
884 		 * if we're testing against 0, i.e.:
885 		 *
886 		 * and #k	nop
887 		 * jeq #0  ->	jset #k
888 		 */
889 		else if (last->s.code == (BPF_ALU|BPF_AND|BPF_K) &&
890 		    b->s.k == 0) {
891 			b->s.k = last->s.k;
892 			b->s.code = BPF_JMP|BPF_K|BPF_JSET;
893 			last->s.code = NOP;
894 			done = 0;
895 			opt_not(b);
896 		}
897 	}
898 	/*
899 	 * jset #0        ->   never
900 	 * jset #ffffffff ->   always
901 	 */
902 	if (b->s.code == (BPF_JMP|BPF_K|BPF_JSET)) {
903 		if (b->s.k == 0)
904 			JT(b) = JF(b);
905 		if (b->s.k == 0xffffffff)
906 			JF(b) = JT(b);
907 	}
908 	/*
909 	 * If the accumulator is a known constant, we can compute the
910 	 * comparison result.
911 	 */
912 	val = b->val[A_ATOM];
913 	if (vmap[val].is_const && BPF_SRC(b->s.code) == BPF_K) {
914 		bpf_int32 v = vmap[val].const_val;
915 		switch (BPF_OP(b->s.code)) {
916 
917 		case BPF_JEQ:
918 			v = v == b->s.k;
919 			break;
920 
921 		case BPF_JGT:
922 			v = (unsigned)v > b->s.k;
923 			break;
924 
925 		case BPF_JGE:
926 			v = (unsigned)v >= b->s.k;
927 			break;
928 
929 		case BPF_JSET:
930 			v &= b->s.k;
931 			break;
932 
933 		default:
934 			abort();
935 		}
936 		if (JF(b) != JT(b))
937 			done = 0;
938 		if (v)
939 			JF(b) = JT(b);
940 		else
941 			JT(b) = JF(b);
942 	}
943 }
944 
945 /*
946  * Compute the symbolic value of expression of 's', and update
947  * anything it defines in the value table 'val'.  If 'alter' is true,
948  * do various optimizations.  This code would be cleaner if symbolic
949  * evaluation and code transformations weren't folded together.
950  */
951 static void
opt_stmt(s,val,alter)952 opt_stmt(s, val, alter)
953 	struct stmt *s;
954 	int val[];
955 	int alter;
956 {
957 	int op;
958 	int v;
959 
960 	switch (s->code) {
961 
962 	case BPF_LD|BPF_ABS|BPF_W:
963 	case BPF_LD|BPF_ABS|BPF_H:
964 	case BPF_LD|BPF_ABS|BPF_B:
965 		v = F(s->code, s->k, 0L);
966 		vstore(s, &val[A_ATOM], v, alter);
967 		break;
968 
969 	case BPF_LD|BPF_IND|BPF_W:
970 	case BPF_LD|BPF_IND|BPF_H:
971 	case BPF_LD|BPF_IND|BPF_B:
972 		v = val[X_ATOM];
973 		if (alter && vmap[v].is_const) {
974 			s->code = BPF_LD|BPF_ABS|BPF_SIZE(s->code);
975 			s->k += vmap[v].const_val;
976 			v = F(s->code, s->k, 0L);
977 			done = 0;
978 		}
979 		else
980 			v = F(s->code, s->k, v);
981 		vstore(s, &val[A_ATOM], v, alter);
982 		break;
983 
984 	case BPF_LD|BPF_LEN:
985 		v = F(s->code, 0L, 0L);
986 		vstore(s, &val[A_ATOM], v, alter);
987 		break;
988 
989 	case BPF_LD|BPF_IMM:
990 		v = K(s->k);
991 		vstore(s, &val[A_ATOM], v, alter);
992 		break;
993 
994 	case BPF_LDX|BPF_IMM:
995 		v = K(s->k);
996 		vstore(s, &val[X_ATOM], v, alter);
997 		break;
998 
999 	case BPF_LDX|BPF_MSH|BPF_B:
1000 		v = F(s->code, s->k, 0L);
1001 		vstore(s, &val[X_ATOM], v, alter);
1002 		break;
1003 
1004 	case BPF_ALU|BPF_NEG:
1005 		if (alter && vmap[val[A_ATOM]].is_const) {
1006 			s->code = BPF_LD|BPF_IMM;
1007 			s->k = -vmap[val[A_ATOM]].const_val;
1008 			val[A_ATOM] = K(s->k);
1009 		}
1010 		else
1011 			val[A_ATOM] = F(s->code, val[A_ATOM], 0L);
1012 		break;
1013 
1014 	case BPF_ALU|BPF_ADD|BPF_K:
1015 	case BPF_ALU|BPF_SUB|BPF_K:
1016 	case BPF_ALU|BPF_MUL|BPF_K:
1017 	case BPF_ALU|BPF_DIV|BPF_K:
1018 	case BPF_ALU|BPF_AND|BPF_K:
1019 	case BPF_ALU|BPF_OR|BPF_K:
1020 	case BPF_ALU|BPF_LSH|BPF_K:
1021 	case BPF_ALU|BPF_RSH|BPF_K:
1022 		op = BPF_OP(s->code);
1023 		if (alter) {
1024 			if (s->k == 0) {
1025 				/* don't optimize away "sub #0"
1026 				 * as it may be needed later to
1027 				 * fixup the generated math code */
1028 				if (op == BPF_ADD ||
1029 				    op == BPF_LSH || op == BPF_RSH ||
1030 				    op == BPF_OR) {
1031 					s->code = NOP;
1032 					break;
1033 				}
1034 				if (op == BPF_MUL || op == BPF_AND) {
1035 					s->code = BPF_LD|BPF_IMM;
1036 					val[A_ATOM] = K(s->k);
1037 					break;
1038 				}
1039 			}
1040 			if (vmap[val[A_ATOM]].is_const) {
1041 				fold_op(s, val[A_ATOM], K(s->k));
1042 				val[A_ATOM] = K(s->k);
1043 				break;
1044 			}
1045 		}
1046 		val[A_ATOM] = F(s->code, val[A_ATOM], K(s->k));
1047 		break;
1048 
1049 	case BPF_ALU|BPF_ADD|BPF_X:
1050 	case BPF_ALU|BPF_SUB|BPF_X:
1051 	case BPF_ALU|BPF_MUL|BPF_X:
1052 	case BPF_ALU|BPF_DIV|BPF_X:
1053 	case BPF_ALU|BPF_AND|BPF_X:
1054 	case BPF_ALU|BPF_OR|BPF_X:
1055 	case BPF_ALU|BPF_LSH|BPF_X:
1056 	case BPF_ALU|BPF_RSH|BPF_X:
1057 		op = BPF_OP(s->code);
1058 		if (alter && vmap[val[X_ATOM]].is_const) {
1059 			if (vmap[val[A_ATOM]].is_const) {
1060 				fold_op(s, val[A_ATOM], val[X_ATOM]);
1061 				val[A_ATOM] = K(s->k);
1062 			}
1063 			else {
1064 				s->code = BPF_ALU|BPF_K|op;
1065 				s->k = vmap[val[X_ATOM]].const_val;
1066 				done = 0;
1067 				val[A_ATOM] =
1068 					F(s->code, val[A_ATOM], K(s->k));
1069 			}
1070 			break;
1071 		}
1072 		/*
1073 		 * Check if we're doing something to an accumulator
1074 		 * that is 0, and simplify.  This may not seem like
1075 		 * much of a simplification but it could open up further
1076 		 * optimizations.
1077 		 * XXX We could also check for mul by 1, etc.
1078 		 */
1079 		if (alter && vmap[val[A_ATOM]].is_const
1080 		    && vmap[val[A_ATOM]].const_val == 0) {
1081 			if (op == BPF_ADD || op == BPF_OR) {
1082 				s->code = BPF_MISC|BPF_TXA;
1083 				vstore(s, &val[A_ATOM], val[X_ATOM], alter);
1084 				break;
1085 			}
1086 			else if (op == BPF_MUL || op == BPF_DIV ||
1087 				 op == BPF_AND || op == BPF_LSH || op == BPF_RSH) {
1088 				s->code = BPF_LD|BPF_IMM;
1089 				s->k = 0;
1090 				vstore(s, &val[A_ATOM], K(s->k), alter);
1091 				break;
1092 			}
1093 			else if (op == BPF_NEG) {
1094 				s->code = NOP;
1095 				break;
1096 			}
1097 		}
1098 		val[A_ATOM] = F(s->code, val[A_ATOM], val[X_ATOM]);
1099 		break;
1100 
1101 	case BPF_MISC|BPF_TXA:
1102 		vstore(s, &val[A_ATOM], val[X_ATOM], alter);
1103 		break;
1104 
1105 	case BPF_LD|BPF_MEM:
1106 		v = val[s->k];
1107 		if (alter && vmap[v].is_const) {
1108 			s->code = BPF_LD|BPF_IMM;
1109 			s->k = vmap[v].const_val;
1110 			done = 0;
1111 		}
1112 		vstore(s, &val[A_ATOM], v, alter);
1113 		break;
1114 
1115 	case BPF_MISC|BPF_TAX:
1116 		vstore(s, &val[X_ATOM], val[A_ATOM], alter);
1117 		break;
1118 
1119 	case BPF_LDX|BPF_MEM:
1120 		v = val[s->k];
1121 		if (alter && vmap[v].is_const) {
1122 			s->code = BPF_LDX|BPF_IMM;
1123 			s->k = vmap[v].const_val;
1124 			done = 0;
1125 		}
1126 		vstore(s, &val[X_ATOM], v, alter);
1127 		break;
1128 
1129 	case BPF_ST:
1130 		vstore(s, &val[s->k], val[A_ATOM], alter);
1131 		break;
1132 
1133 	case BPF_STX:
1134 		vstore(s, &val[s->k], val[X_ATOM], alter);
1135 		break;
1136 	}
1137 }
1138 
1139 static void
deadstmt(s,last)1140 deadstmt(s, last)
1141 	register struct stmt *s;
1142 	register struct stmt *last[];
1143 {
1144 	register int atom;
1145 
1146 	atom = atomuse(s);
1147 	if (atom >= 0) {
1148 		if (atom == AX_ATOM) {
1149 			last[X_ATOM] = 0;
1150 			last[A_ATOM] = 0;
1151 		}
1152 		else
1153 			last[atom] = 0;
1154 	}
1155 	atom = atomdef(s);
1156 	if (atom >= 0) {
1157 		if (last[atom]) {
1158 			done = 0;
1159 			last[atom]->code = NOP;
1160 		}
1161 		last[atom] = s;
1162 	}
1163 }
1164 
1165 static void
opt_deadstores(b)1166 opt_deadstores(b)
1167 	register struct block *b;
1168 {
1169 	register struct slist *s;
1170 	register int atom;
1171 	struct stmt *last[N_ATOMS];
1172 
1173 	memset((char *)last, 0, sizeof last);
1174 
1175 	for (s = b->stmts; s != 0; s = s->next)
1176 		deadstmt(&s->s, last);
1177 	deadstmt(&b->s, last);
1178 
1179 	for (atom = 0; atom < N_ATOMS; ++atom)
1180 		if (last[atom] && !ATOMELEM(b->out_use, atom)) {
1181 			last[atom]->code = NOP;
1182 			done = 0;
1183 		}
1184 }
1185 
1186 static void
opt_blk(b,do_stmts)1187 opt_blk(b, do_stmts)
1188 	struct block *b;
1189 	int do_stmts;
1190 {
1191 	struct slist *s;
1192 	struct edge *p;
1193 	int i;
1194 	bpf_int32 aval, xval;
1195 
1196 #if 0
1197 	for (s = b->stmts; s && s->next; s = s->next)
1198 		if (BPF_CLASS(s->s.code) == BPF_JMP) {
1199 			do_stmts = 0;
1200 			break;
1201 		}
1202 #endif
1203 
1204 	/*
1205 	 * Initialize the atom values.
1206 	 */
1207 	p = b->in_edges;
1208 	if (p == 0) {
1209 		/*
1210 		 * We have no predecessors, so everything is undefined
1211 		 * upon entry to this block.
1212 		 */
1213 		memset((char *)b->val, 0, sizeof(b->val));
1214 	} else {
1215 		/*
1216 		 * Inherit values from our predecessors.
1217 		 *
1218 		 * First, get the values from the predecessor along the
1219 		 * first edge leading to this node.
1220 		 */
1221 		memcpy((char *)b->val, (char *)p->pred->val, sizeof(b->val));
1222 		/*
1223 		 * Now look at all the other nodes leading to this node.
1224 		 * If, for the predecessor along that edge, a register
1225 		 * has a different value from the one we have (i.e.,
1226 		 * control paths are merging, and the merging paths
1227 		 * assign different values to that register), give the
1228 		 * register the undefined value of 0.
1229 		 */
1230 		while ((p = p->next) != NULL) {
1231 			for (i = 0; i < N_ATOMS; ++i)
1232 				if (b->val[i] != p->pred->val[i])
1233 					b->val[i] = 0;
1234 		}
1235 	}
1236 	aval = b->val[A_ATOM];
1237 	xval = b->val[X_ATOM];
1238 	for (s = b->stmts; s; s = s->next)
1239 		opt_stmt(&s->s, b->val, do_stmts);
1240 
1241 	/*
1242 	 * This is a special case: if we don't use anything from this
1243 	 * block, and we load the accumulator or index register with a
1244 	 * value that is already there, or if this block is a return,
1245 	 * eliminate all the statements.
1246 	 *
1247 	 * XXX - what if it does a store?
1248 	 *
1249 	 * XXX - why does it matter whether we use anything from this
1250 	 * block?  If the accumulator or index register doesn't change
1251 	 * its value, isn't that OK even if we use that value?
1252 	 *
1253 	 * XXX - if we load the accumulator with a different value,
1254 	 * and the block ends with a conditional branch, we obviously
1255 	 * can't eliminate it, as the branch depends on that value.
1256 	 * For the index register, the conditional branch only depends
1257 	 * on the index register value if the test is against the index
1258 	 * register value rather than a constant; if nothing uses the
1259 	 * value we put into the index register, and we're not testing
1260 	 * against the index register's value, and there aren't any
1261 	 * other problems that would keep us from eliminating this
1262 	 * block, can we eliminate it?
1263 	 */
1264 	if (do_stmts &&
1265 	    ((b->out_use == 0 && aval != 0 && b->val[A_ATOM] == aval &&
1266 	      xval != 0 && b->val[X_ATOM] == xval) ||
1267 	     BPF_CLASS(b->s.code) == BPF_RET)) {
1268 		if (b->stmts != 0) {
1269 			b->stmts = 0;
1270 			done = 0;
1271 		}
1272 	} else {
1273 		opt_peep(b);
1274 		opt_deadstores(b);
1275 	}
1276 	/*
1277 	 * Set up values for branch optimizer.
1278 	 */
1279 	if (BPF_SRC(b->s.code) == BPF_K)
1280 		b->oval = K(b->s.k);
1281 	else
1282 		b->oval = b->val[X_ATOM];
1283 	b->et.code = b->s.code;
1284 	b->ef.code = -b->s.code;
1285 }
1286 
1287 /*
1288  * Return true if any register that is used on exit from 'succ', has
1289  * an exit value that is different from the corresponding exit value
1290  * from 'b'.
1291  */
1292 static int
use_conflict(b,succ)1293 use_conflict(b, succ)
1294 	struct block *b, *succ;
1295 {
1296 	int atom;
1297 	atomset use = succ->out_use;
1298 
1299 	if (use == 0)
1300 		return 0;
1301 
1302 	for (atom = 0; atom < N_ATOMS; ++atom)
1303 		if (ATOMELEM(use, atom))
1304 			if (b->val[atom] != succ->val[atom])
1305 				return 1;
1306 	return 0;
1307 }
1308 
1309 static struct block *
fold_edge(child,ep)1310 fold_edge(child, ep)
1311 	struct block *child;
1312 	struct edge *ep;
1313 {
1314 	int sense;
1315 	int aval0, aval1, oval0, oval1;
1316 	int code = ep->code;
1317 
1318 	if (code < 0) {
1319 		code = -code;
1320 		sense = 0;
1321 	} else
1322 		sense = 1;
1323 
1324 	if (child->s.code != code)
1325 		return 0;
1326 
1327 	aval0 = child->val[A_ATOM];
1328 	oval0 = child->oval;
1329 	aval1 = ep->pred->val[A_ATOM];
1330 	oval1 = ep->pred->oval;
1331 
1332 	if (aval0 != aval1)
1333 		return 0;
1334 
1335 	if (oval0 == oval1)
1336 		/*
1337 		 * The operands of the branch instructions are
1338 		 * identical, so the result is true if a true
1339 		 * branch was taken to get here, otherwise false.
1340 		 */
1341 		return sense ? JT(child) : JF(child);
1342 
1343 	if (sense && code == (BPF_JMP|BPF_JEQ|BPF_K))
1344 		/*
1345 		 * At this point, we only know the comparison if we
1346 		 * came down the true branch, and it was an equality
1347 		 * comparison with a constant.
1348 		 *
1349 		 * I.e., if we came down the true branch, and the branch
1350 		 * was an equality comparison with a constant, we know the
1351 		 * accumulator contains that constant.  If we came down
1352 		 * the false branch, or the comparison wasn't with a
1353 		 * constant, we don't know what was in the accumulator.
1354 		 *
1355 		 * We rely on the fact that distinct constants have distinct
1356 		 * value numbers.
1357 		 */
1358 		return JF(child);
1359 
1360 	return 0;
1361 }
1362 
1363 #include "ffs.h"
1364 static void
opt_j(ep)1365 opt_j(ep)
1366 	struct edge *ep;
1367 {
1368 	register int i, k;
1369 	register struct block *target;
1370 
1371 	if (JT(ep->succ) == 0)
1372 		return;
1373 
1374 	if (JT(ep->succ) == JF(ep->succ)) {
1375 		/*
1376 		 * Common branch targets can be eliminated, provided
1377 		 * there is no data dependency.
1378 		 */
1379 		if (!use_conflict(ep->pred, ep->succ->et.succ)) {
1380 			done = 0;
1381 			ep->succ = JT(ep->succ);
1382 		}
1383 	}
1384 	/*
1385 	 * For each edge dominator that matches the successor of this
1386 	 * edge, promote the edge successor to the its grandchild.
1387 	 *
1388 	 * XXX We violate the set abstraction here in favor a reasonably
1389 	 * efficient loop.
1390 	 */
1391  top:
1392 	for (i = 0; i < edgewords; ++i) {
1393 		register bpf_u_int32 x = ep->edom[i];
1394 
1395 		while (x != 0) {
1396 			k = ffs(x) - 1;
1397 			x &=~ (1 << k);
1398 			k += i * BITS_PER_WORD;
1399 
1400 			target = fold_edge(ep->succ, edges[k]);
1401 			/*
1402 			 * Check that there is no data dependency between
1403 			 * nodes that will be violated if we move the edge.
1404 			 */
1405 			if (target != 0 && !use_conflict(ep->pred, target)) {
1406 				done = 0;
1407 				ep->succ = target;
1408 				if (JT(target) != 0)
1409 					/*
1410 					 * Start over unless we hit a leaf.
1411 					 */
1412 					goto top;
1413 				return;
1414 			}
1415 		}
1416 	}
1417 }
1418 
1419 
1420 static void
or_pullup(b)1421 or_pullup(b)
1422 	struct block *b;
1423 {
1424 	int val, at_top;
1425 	struct block *pull;
1426 	struct block **diffp, **samep;
1427 	struct edge *ep;
1428 
1429 	ep = b->in_edges;
1430 	if (ep == 0)
1431 		return;
1432 
1433 	/*
1434 	 * Make sure each predecessor loads the same value.
1435 	 * XXX why?
1436 	 */
1437 	val = ep->pred->val[A_ATOM];
1438 	for (ep = ep->next; ep != 0; ep = ep->next)
1439 		if (val != ep->pred->val[A_ATOM])
1440 			return;
1441 
1442 	if (JT(b->in_edges->pred) == b)
1443 		diffp = &JT(b->in_edges->pred);
1444 	else
1445 		diffp = &JF(b->in_edges->pred);
1446 
1447 	at_top = 1;
1448 	while (1) {
1449 		if (*diffp == 0)
1450 			return;
1451 
1452 		if (JT(*diffp) != JT(b))
1453 			return;
1454 
1455 		if (!SET_MEMBER((*diffp)->dom, b->id))
1456 			return;
1457 
1458 		if ((*diffp)->val[A_ATOM] != val)
1459 			break;
1460 
1461 		diffp = &JF(*diffp);
1462 		at_top = 0;
1463 	}
1464 	samep = &JF(*diffp);
1465 	while (1) {
1466 		if (*samep == 0)
1467 			return;
1468 
1469 		if (JT(*samep) != JT(b))
1470 			return;
1471 
1472 		if (!SET_MEMBER((*samep)->dom, b->id))
1473 			return;
1474 
1475 		if ((*samep)->val[A_ATOM] == val)
1476 			break;
1477 
1478 		/* XXX Need to check that there are no data dependencies
1479 		   between dp0 and dp1.  Currently, the code generator
1480 		   will not produce such dependencies. */
1481 		samep = &JF(*samep);
1482 	}
1483 #ifdef notdef
1484 	/* XXX This doesn't cover everything. */
1485 	for (i = 0; i < N_ATOMS; ++i)
1486 		if ((*samep)->val[i] != pred->val[i])
1487 			return;
1488 #endif
1489 	/* Pull up the node. */
1490 	pull = *samep;
1491 	*samep = JF(pull);
1492 	JF(pull) = *diffp;
1493 
1494 	/*
1495 	 * At the top of the chain, each predecessor needs to point at the
1496 	 * pulled up node.  Inside the chain, there is only one predecessor
1497 	 * to worry about.
1498 	 */
1499 	if (at_top) {
1500 		for (ep = b->in_edges; ep != 0; ep = ep->next) {
1501 			if (JT(ep->pred) == b)
1502 				JT(ep->pred) = pull;
1503 			else
1504 				JF(ep->pred) = pull;
1505 		}
1506 	}
1507 	else
1508 		*diffp = pull;
1509 
1510 	done = 0;
1511 }
1512 
1513 static void
and_pullup(b)1514 and_pullup(b)
1515 	struct block *b;
1516 {
1517 	int val, at_top;
1518 	struct block *pull;
1519 	struct block **diffp, **samep;
1520 	struct edge *ep;
1521 
1522 	ep = b->in_edges;
1523 	if (ep == 0)
1524 		return;
1525 
1526 	/*
1527 	 * Make sure each predecessor loads the same value.
1528 	 */
1529 	val = ep->pred->val[A_ATOM];
1530 	for (ep = ep->next; ep != 0; ep = ep->next)
1531 		if (val != ep->pred->val[A_ATOM])
1532 			return;
1533 
1534 	if (JT(b->in_edges->pred) == b)
1535 		diffp = &JT(b->in_edges->pred);
1536 	else
1537 		diffp = &JF(b->in_edges->pred);
1538 
1539 	at_top = 1;
1540 	while (1) {
1541 		if (*diffp == 0)
1542 			return;
1543 
1544 		if (JF(*diffp) != JF(b))
1545 			return;
1546 
1547 		if (!SET_MEMBER((*diffp)->dom, b->id))
1548 			return;
1549 
1550 		if ((*diffp)->val[A_ATOM] != val)
1551 			break;
1552 
1553 		diffp = &JT(*diffp);
1554 		at_top = 0;
1555 	}
1556 	samep = &JT(*diffp);
1557 	while (1) {
1558 		if (*samep == 0)
1559 			return;
1560 
1561 		if (JF(*samep) != JF(b))
1562 			return;
1563 
1564 		if (!SET_MEMBER((*samep)->dom, b->id))
1565 			return;
1566 
1567 		if ((*samep)->val[A_ATOM] == val)
1568 			break;
1569 
1570 		/* XXX Need to check that there are no data dependencies
1571 		   between diffp and samep.  Currently, the code generator
1572 		   will not produce such dependencies. */
1573 		samep = &JT(*samep);
1574 	}
1575 #ifdef notdef
1576 	/* XXX This doesn't cover everything. */
1577 	for (i = 0; i < N_ATOMS; ++i)
1578 		if ((*samep)->val[i] != pred->val[i])
1579 			return;
1580 #endif
1581 	/* Pull up the node. */
1582 	pull = *samep;
1583 	*samep = JT(pull);
1584 	JT(pull) = *diffp;
1585 
1586 	/*
1587 	 * At the top of the chain, each predecessor needs to point at the
1588 	 * pulled up node.  Inside the chain, there is only one predecessor
1589 	 * to worry about.
1590 	 */
1591 	if (at_top) {
1592 		for (ep = b->in_edges; ep != 0; ep = ep->next) {
1593 			if (JT(ep->pred) == b)
1594 				JT(ep->pred) = pull;
1595 			else
1596 				JF(ep->pred) = pull;
1597 		}
1598 	}
1599 	else
1600 		*diffp = pull;
1601 
1602 	done = 0;
1603 }
1604 
1605 static void
opt_blks(root,do_stmts)1606 opt_blks(root, do_stmts)
1607 	struct block *root;
1608 	int do_stmts;
1609 {
1610 	int i, maxlevel;
1611 	struct block *p;
1612 
1613 	init_val();
1614 	maxlevel = root->level;
1615 
1616 	find_inedges(root);
1617 	for (i = maxlevel; i >= 0; --i)
1618 		for (p = levels[i]; p; p = p->link)
1619 			opt_blk(p, do_stmts);
1620 
1621 	if (do_stmts)
1622 		/*
1623 		 * No point trying to move branches; it can't possibly
1624 		 * make a difference at this point.
1625 		 */
1626 		return;
1627 
1628 	for (i = 1; i <= maxlevel; ++i) {
1629 		for (p = levels[i]; p; p = p->link) {
1630 			opt_j(&p->et);
1631 			opt_j(&p->ef);
1632 		}
1633 	}
1634 
1635 	find_inedges(root);
1636 	for (i = 1; i <= maxlevel; ++i) {
1637 		for (p = levels[i]; p; p = p->link) {
1638 			or_pullup(p);
1639 			and_pullup(p);
1640 		}
1641 	}
1642 }
1643 
1644 static inline void
link_inedge(parent,child)1645 link_inedge(parent, child)
1646 	struct edge *parent;
1647 	struct block *child;
1648 {
1649 	parent->next = child->in_edges;
1650 	child->in_edges = parent;
1651 }
1652 
1653 static void
find_inedges(root)1654 find_inedges(root)
1655 	struct block *root;
1656 {
1657 	int i;
1658 	struct block *b;
1659 
1660 	for (i = 0; i < n_blocks; ++i)
1661 		blocks[i]->in_edges = 0;
1662 
1663 	/*
1664 	 * Traverse the graph, adding each edge to the predecessor
1665 	 * list of its successors.  Skip the leaves (i.e. level 0).
1666 	 */
1667 	for (i = root->level; i > 0; --i) {
1668 		for (b = levels[i]; b != 0; b = b->link) {
1669 			link_inedge(&b->et, JT(b));
1670 			link_inedge(&b->ef, JF(b));
1671 		}
1672 	}
1673 }
1674 
1675 static void
opt_root(b)1676 opt_root(b)
1677 	struct block **b;
1678 {
1679 	struct slist *tmp, *s;
1680 
1681 	s = (*b)->stmts;
1682 	(*b)->stmts = 0;
1683 	while (BPF_CLASS((*b)->s.code) == BPF_JMP && JT(*b) == JF(*b))
1684 		*b = JT(*b);
1685 
1686 	tmp = (*b)->stmts;
1687 	if (tmp != 0)
1688 		sappend(s, tmp);
1689 	(*b)->stmts = s;
1690 
1691 	/*
1692 	 * If the root node is a return, then there is no
1693 	 * point executing any statements (since the bpf machine
1694 	 * has no side effects).
1695 	 */
1696 	if (BPF_CLASS((*b)->s.code) == BPF_RET)
1697 		(*b)->stmts = 0;
1698 }
1699 
1700 static void
opt_loop(root,do_stmts)1701 opt_loop(root, do_stmts)
1702 	struct block *root;
1703 	int do_stmts;
1704 {
1705 
1706 #ifdef BDEBUG
1707 	if (dflag > 1) {
1708 		printf("opt_loop(root, %d) begin\n", do_stmts);
1709 		opt_dump(root);
1710 	}
1711 #endif
1712 	do {
1713 		done = 1;
1714 		find_levels(root);
1715 		find_dom(root);
1716 		find_closure(root);
1717 		find_ud(root);
1718 		find_edom(root);
1719 		opt_blks(root, do_stmts);
1720 #ifdef BDEBUG
1721 		if (dflag > 1) {
1722 			printf("opt_loop(root, %d) bottom, done=%d\n", do_stmts, done);
1723 			opt_dump(root);
1724 		}
1725 #endif
1726 	} while (!done);
1727 }
1728 
1729 /*
1730  * Optimize the filter code in its dag representation.
1731  */
1732 void
bpf_optimize(rootp)1733 bpf_optimize(rootp)
1734 	struct block **rootp;
1735 {
1736 	struct block *root;
1737 
1738 	root = *rootp;
1739 
1740 	opt_init(root);
1741 	opt_loop(root, 0);
1742 	opt_loop(root, 1);
1743 	intern_blocks(root);
1744 #ifdef BDEBUG
1745 	if (dflag > 1) {
1746 		printf("after intern_blocks()\n");
1747 		opt_dump(root);
1748 	}
1749 #endif
1750 	opt_root(rootp);
1751 #ifdef BDEBUG
1752 	if (dflag > 1) {
1753 		printf("after opt_root()\n");
1754 		opt_dump(root);
1755 	}
1756 #endif
1757 	opt_cleanup();
1758 }
1759 
1760 static void
make_marks(p)1761 make_marks(p)
1762 	struct block *p;
1763 {
1764 	if (!isMarked(p)) {
1765 		Mark(p);
1766 		if (BPF_CLASS(p->s.code) != BPF_RET) {
1767 			make_marks(JT(p));
1768 			make_marks(JF(p));
1769 		}
1770 	}
1771 }
1772 
1773 /*
1774  * Mark code array such that isMarked(i) is true
1775  * only for nodes that are alive.
1776  */
1777 static void
mark_code(p)1778 mark_code(p)
1779 	struct block *p;
1780 {
1781 	cur_mark += 1;
1782 	make_marks(p);
1783 }
1784 
1785 /*
1786  * True iff the two stmt lists load the same value from the packet into
1787  * the accumulator.
1788  */
1789 static int
eq_slist(x,y)1790 eq_slist(x, y)
1791 	struct slist *x, *y;
1792 {
1793 	while (1) {
1794 		while (x && x->s.code == NOP)
1795 			x = x->next;
1796 		while (y && y->s.code == NOP)
1797 			y = y->next;
1798 		if (x == 0)
1799 			return y == 0;
1800 		if (y == 0)
1801 			return x == 0;
1802 		if (x->s.code != y->s.code || x->s.k != y->s.k)
1803 			return 0;
1804 		x = x->next;
1805 		y = y->next;
1806 	}
1807 }
1808 
1809 static inline int
eq_blk(b0,b1)1810 eq_blk(b0, b1)
1811 	struct block *b0, *b1;
1812 {
1813 	if (b0->s.code == b1->s.code &&
1814 	    b0->s.k == b1->s.k &&
1815 	    b0->et.succ == b1->et.succ &&
1816 	    b0->ef.succ == b1->ef.succ)
1817 		return eq_slist(b0->stmts, b1->stmts);
1818 	return 0;
1819 }
1820 
1821 static void
intern_blocks(root)1822 intern_blocks(root)
1823 	struct block *root;
1824 {
1825 	struct block *p;
1826 	int i, j;
1827 	int done1; /* don't shadow global */
1828  top:
1829 	done1 = 1;
1830 	for (i = 0; i < n_blocks; ++i)
1831 		blocks[i]->link = 0;
1832 
1833 	mark_code(root);
1834 
1835 	for (i = n_blocks - 1; --i >= 0; ) {
1836 		if (!isMarked(blocks[i]))
1837 			continue;
1838 		for (j = i + 1; j < n_blocks; ++j) {
1839 			if (!isMarked(blocks[j]))
1840 				continue;
1841 			if (eq_blk(blocks[i], blocks[j])) {
1842 				blocks[i]->link = blocks[j]->link ?
1843 					blocks[j]->link : blocks[j];
1844 				break;
1845 			}
1846 		}
1847 	}
1848 	for (i = 0; i < n_blocks; ++i) {
1849 		p = blocks[i];
1850 		if (JT(p) == 0)
1851 			continue;
1852 		if (JT(p)->link) {
1853 			done1 = 0;
1854 			JT(p) = JT(p)->link;
1855 		}
1856 		if (JF(p)->link) {
1857 			done1 = 0;
1858 			JF(p) = JF(p)->link;
1859 		}
1860 	}
1861 	if (!done1)
1862 		goto top;
1863 }
1864 
1865 static void
opt_cleanup()1866 opt_cleanup()
1867 {
1868 	free((void *)vnode_base);
1869 	free((void *)vmap);
1870 	free((void *)edges);
1871 	free((void *)space);
1872 	free((void *)levels);
1873 	free((void *)blocks);
1874 }
1875 
1876 /*
1877  * Return the number of stmts in 's'.
1878  */
1879 static int
slength(s)1880 slength(s)
1881 	struct slist *s;
1882 {
1883 	int n = 0;
1884 
1885 	for (; s; s = s->next)
1886 		if (s->s.code != NOP)
1887 			++n;
1888 	return n;
1889 }
1890 
1891 /*
1892  * Return the number of nodes reachable by 'p'.
1893  * All nodes should be initially unmarked.
1894  */
1895 static int
count_blocks(p)1896 count_blocks(p)
1897 	struct block *p;
1898 {
1899 	if (p == 0 || isMarked(p))
1900 		return 0;
1901 	Mark(p);
1902 	return count_blocks(JT(p)) + count_blocks(JF(p)) + 1;
1903 }
1904 
1905 /*
1906  * Do a depth first search on the flow graph, numbering the
1907  * the basic blocks, and entering them into the 'blocks' array.`
1908  */
1909 static void
number_blks_r(p)1910 number_blks_r(p)
1911 	struct block *p;
1912 {
1913 	int n;
1914 
1915 	if (p == 0 || isMarked(p))
1916 		return;
1917 
1918 	Mark(p);
1919 	n = n_blocks++;
1920 	p->id = n;
1921 	blocks[n] = p;
1922 
1923 	number_blks_r(JT(p));
1924 	number_blks_r(JF(p));
1925 }
1926 
1927 /*
1928  * Return the number of stmts in the flowgraph reachable by 'p'.
1929  * The nodes should be unmarked before calling.
1930  *
1931  * Note that "stmts" means "instructions", and that this includes
1932  *
1933  *	side-effect statements in 'p' (slength(p->stmts));
1934  *
1935  *	statements in the true branch from 'p' (count_stmts(JT(p)));
1936  *
1937  *	statements in the false branch from 'p' (count_stmts(JF(p)));
1938  *
1939  *	the conditional jump itself (1);
1940  *
1941  *	an extra long jump if the true branch requires it (p->longjt);
1942  *
1943  *	an extra long jump if the false branch requires it (p->longjf).
1944  */
1945 static int
count_stmts(p)1946 count_stmts(p)
1947 	struct block *p;
1948 {
1949 	int n;
1950 
1951 	if (p == 0 || isMarked(p))
1952 		return 0;
1953 	Mark(p);
1954 	n = count_stmts(JT(p)) + count_stmts(JF(p));
1955 	return slength(p->stmts) + n + 1 + p->longjt + p->longjf;
1956 }
1957 
1958 /*
1959  * Allocate memory.  All allocation is done before optimization
1960  * is begun.  A linear bound on the size of all data structures is computed
1961  * from the total number of blocks and/or statements.
1962  */
1963 static void
opt_init(root)1964 opt_init(root)
1965 	struct block *root;
1966 {
1967 	bpf_u_int32 *p;
1968 	int i, n, max_stmts;
1969 
1970 	/*
1971 	 * First, count the blocks, so we can malloc an array to map
1972 	 * block number to block.  Then, put the blocks into the array.
1973 	 */
1974 	unMarkAll();
1975 	n = count_blocks(root);
1976 	blocks = (struct block **)calloc(n, sizeof(*blocks));
1977 	if (blocks == NULL)
1978 		bpf_error("malloc");
1979 	unMarkAll();
1980 	n_blocks = 0;
1981 	number_blks_r(root);
1982 
1983 	n_edges = 2 * n_blocks;
1984 	edges = (struct edge **)calloc(n_edges, sizeof(*edges));
1985 	if (edges == NULL)
1986 		bpf_error("malloc");
1987 
1988 	/*
1989 	 * The number of levels is bounded by the number of nodes.
1990 	 */
1991 	levels = (struct block **)calloc(n_blocks, sizeof(*levels));
1992 	if (levels == NULL)
1993 		bpf_error("malloc");
1994 
1995 	edgewords = n_edges / (8 * sizeof(bpf_u_int32)) + 1;
1996 	nodewords = n_blocks / (8 * sizeof(bpf_u_int32)) + 1;
1997 
1998 	/* XXX */
1999 	space = (bpf_u_int32 *)malloc(2 * n_blocks * nodewords * sizeof(*space)
2000 				 + n_edges * edgewords * sizeof(*space));
2001 	if (space == NULL)
2002 		bpf_error("malloc");
2003 	p = space;
2004 	all_dom_sets = p;
2005 	for (i = 0; i < n; ++i) {
2006 		blocks[i]->dom = p;
2007 		p += nodewords;
2008 	}
2009 	all_closure_sets = p;
2010 	for (i = 0; i < n; ++i) {
2011 		blocks[i]->closure = p;
2012 		p += nodewords;
2013 	}
2014 	all_edge_sets = p;
2015 	for (i = 0; i < n; ++i) {
2016 		register struct block *b = blocks[i];
2017 
2018 		b->et.edom = p;
2019 		p += edgewords;
2020 		b->ef.edom = p;
2021 		p += edgewords;
2022 		b->et.id = i;
2023 		edges[i] = &b->et;
2024 		b->ef.id = n_blocks + i;
2025 		edges[n_blocks + i] = &b->ef;
2026 		b->et.pred = b;
2027 		b->ef.pred = b;
2028 	}
2029 	max_stmts = 0;
2030 	for (i = 0; i < n; ++i)
2031 		max_stmts += slength(blocks[i]->stmts) + 1;
2032 	/*
2033 	 * We allocate at most 3 value numbers per statement,
2034 	 * so this is an upper bound on the number of valnodes
2035 	 * we'll need.
2036 	 */
2037 	maxval = 3 * max_stmts;
2038 	vmap = (struct vmapinfo *)calloc(maxval, sizeof(*vmap));
2039 	vnode_base = (struct valnode *)calloc(maxval, sizeof(*vnode_base));
2040 	if (vmap == NULL || vnode_base == NULL)
2041 		bpf_error("malloc");
2042 }
2043 
2044 /*
2045  * Some pointers used to convert the basic block form of the code,
2046  * into the array form that BPF requires.  'fstart' will point to
2047  * the malloc'd array while 'ftail' is used during the recursive traversal.
2048  */
2049 static struct bpf_insn *fstart;
2050 static struct bpf_insn *ftail;
2051 
2052 #ifdef BDEBUG
2053 int bids[1000];
2054 #endif
2055 
2056 /*
2057  * Returns true if successful.  Returns false if a branch has
2058  * an offset that is too large.  If so, we have marked that
2059  * branch so that on a subsequent iteration, it will be treated
2060  * properly.
2061  */
2062 static int
convert_code_r(p)2063 convert_code_r(p)
2064 	struct block *p;
2065 {
2066 	struct bpf_insn *dst;
2067 	struct slist *src;
2068 	int slen;
2069 	u_int off;
2070 	int extrajmps;		/* number of extra jumps inserted */
2071 	struct slist **offset = NULL;
2072 
2073 	if (p == 0 || isMarked(p))
2074 		return (1);
2075 	Mark(p);
2076 
2077 	if (convert_code_r(JF(p)) == 0)
2078 		return (0);
2079 	if (convert_code_r(JT(p)) == 0)
2080 		return (0);
2081 
2082 	slen = slength(p->stmts);
2083 	dst = ftail -= (slen + 1 + p->longjt + p->longjf);
2084 		/* inflate length by any extra jumps */
2085 
2086 	p->offset = dst - fstart;
2087 
2088 	/* generate offset[] for convenience  */
2089 	if (slen) {
2090 		offset = (struct slist **)calloc(slen, sizeof(struct slist *));
2091 		if (!offset) {
2092 			bpf_error("not enough core");
2093 			/*NOTREACHED*/
2094 		}
2095 	}
2096 	src = p->stmts;
2097 	for (off = 0; off < slen && src; off++) {
2098 #if 0
2099 		printf("off=%d src=%x\n", off, src);
2100 #endif
2101 		offset[off] = src;
2102 		src = src->next;
2103 	}
2104 
2105 	off = 0;
2106 	for (src = p->stmts; src; src = src->next) {
2107 		if (src->s.code == NOP)
2108 			continue;
2109 		dst->code = (u_short)src->s.code;
2110 		dst->k = src->s.k;
2111 
2112 		/* fill block-local relative jump */
2113 		if (BPF_CLASS(src->s.code) != BPF_JMP || src->s.code == (BPF_JMP|BPF_JA)) {
2114 #if 0
2115 			if (src->s.jt || src->s.jf) {
2116 				bpf_error("illegal jmp destination");
2117 				/*NOTREACHED*/
2118 			}
2119 #endif
2120 			goto filled;
2121 		}
2122 		if (off == slen - 2)	/*???*/
2123 			goto filled;
2124 
2125 	    {
2126 		int i;
2127 		int jt, jf;
2128 		const char *ljerr = "%s for block-local relative jump: off=%d";
2129 
2130 #if 0
2131 		printf("code=%x off=%d %x %x\n", src->s.code,
2132 			off, src->s.jt, src->s.jf);
2133 #endif
2134 
2135 		if (!src->s.jt || !src->s.jf) {
2136 			bpf_error(ljerr, "no jmp destination", off);
2137 			/*NOTREACHED*/
2138 		}
2139 
2140 		jt = jf = 0;
2141 		for (i = 0; i < slen; i++) {
2142 			if (offset[i] == src->s.jt) {
2143 				if (jt) {
2144 					bpf_error(ljerr, "multiple matches", off);
2145 					/*NOTREACHED*/
2146 				}
2147 
2148 				dst->jt = i - off - 1;
2149 				jt++;
2150 			}
2151 			if (offset[i] == src->s.jf) {
2152 				if (jf) {
2153 					bpf_error(ljerr, "multiple matches", off);
2154 					/*NOTREACHED*/
2155 				}
2156 				dst->jf = i - off - 1;
2157 				jf++;
2158 			}
2159 		}
2160 		if (!jt || !jf) {
2161 			bpf_error(ljerr, "no destination found", off);
2162 			/*NOTREACHED*/
2163 		}
2164 	    }
2165 filled:
2166 		++dst;
2167 		++off;
2168 	}
2169 	if (offset)
2170 		free(offset);
2171 
2172 #ifdef BDEBUG
2173 	bids[dst - fstart] = p->id + 1;
2174 #endif
2175 	dst->code = (u_short)p->s.code;
2176 	dst->k = p->s.k;
2177 	if (JT(p)) {
2178 		extrajmps = 0;
2179 		off = JT(p)->offset - (p->offset + slen) - 1;
2180 		if (off >= 256) {
2181 		    /* offset too large for branch, must add a jump */
2182 		    if (p->longjt == 0) {
2183 		    	/* mark this instruction and retry */
2184 			p->longjt++;
2185 			return(0);
2186 		    }
2187 		    /* branch if T to following jump */
2188 		    dst->jt = extrajmps;
2189 		    extrajmps++;
2190 		    dst[extrajmps].code = BPF_JMP|BPF_JA;
2191 		    dst[extrajmps].k = off - extrajmps;
2192 		}
2193 		else
2194 		    dst->jt = off;
2195 		off = JF(p)->offset - (p->offset + slen) - 1;
2196 		if (off >= 256) {
2197 		    /* offset too large for branch, must add a jump */
2198 		    if (p->longjf == 0) {
2199 		    	/* mark this instruction and retry */
2200 			p->longjf++;
2201 			return(0);
2202 		    }
2203 		    /* branch if F to following jump */
2204 		    /* if two jumps are inserted, F goes to second one */
2205 		    dst->jf = extrajmps;
2206 		    extrajmps++;
2207 		    dst[extrajmps].code = BPF_JMP|BPF_JA;
2208 		    dst[extrajmps].k = off - extrajmps;
2209 		}
2210 		else
2211 		    dst->jf = off;
2212 	}
2213 	return (1);
2214 }
2215 
2216 
2217 /*
2218  * Convert flowgraph intermediate representation to the
2219  * BPF array representation.  Set *lenp to the number of instructions.
2220  *
2221  * This routine does *NOT* leak the memory pointed to by fp.  It *must
2222  * not* do free(fp) before returning fp; doing so would make no sense,
2223  * as the BPF array pointed to by the return value of icode_to_fcode()
2224  * must be valid - it's being returned for use in a bpf_program structure.
2225  *
2226  * If it appears that icode_to_fcode() is leaking, the problem is that
2227  * the program using pcap_compile() is failing to free the memory in
2228  * the BPF program when it's done - the leak is in the program, not in
2229  * the routine that happens to be allocating the memory.  (By analogy, if
2230  * a program calls fopen() without ever calling fclose() on the FILE *,
2231  * it will leak the FILE structure; the leak is not in fopen(), it's in
2232  * the program.)  Change the program to use pcap_freecode() when it's
2233  * done with the filter program.  See the pcap man page.
2234  */
2235 struct bpf_insn *
icode_to_fcode(root,lenp)2236 icode_to_fcode(root, lenp)
2237 	struct block *root;
2238 	int *lenp;
2239 {
2240 	int n;
2241 	struct bpf_insn *fp;
2242 
2243 	/*
2244 	 * Loop doing convert_code_r() until no branches remain
2245 	 * with too-large offsets.
2246 	 */
2247 	while (1) {
2248 	    unMarkAll();
2249 	    n = *lenp = count_stmts(root);
2250 
2251 	    fp = (struct bpf_insn *)malloc(sizeof(*fp) * n);
2252 	    if (fp == NULL)
2253 		    bpf_error("malloc");
2254 	    memset((char *)fp, 0, sizeof(*fp) * n);
2255 	    fstart = fp;
2256 	    ftail = fp + n;
2257 
2258 	    unMarkAll();
2259 	    if (convert_code_r(root))
2260 		break;
2261 	    free(fp);
2262 	}
2263 
2264 	return fp;
2265 }
2266 
2267 /*
2268  * Make a copy of a BPF program and put it in the "fcode" member of
2269  * a "pcap_t".
2270  *
2271  * If we fail to allocate memory for the copy, fill in the "errbuf"
2272  * member of the "pcap_t" with an error message, and return -1;
2273  * otherwise, return 0.
2274  */
2275 int
install_bpf_program(pcap_t * p,struct bpf_program * fp)2276 install_bpf_program(pcap_t *p, struct bpf_program *fp)
2277 {
2278 	size_t prog_size;
2279 
2280 	/*
2281 	 * Free up any already installed program.
2282 	 */
2283 	pcap_freecode(&p->fcode);
2284 
2285 	prog_size = sizeof(*fp->bf_insns) * fp->bf_len;
2286 	p->fcode.bf_len = fp->bf_len;
2287 	p->fcode.bf_insns = (struct bpf_insn *)malloc(prog_size);
2288 	if (p->fcode.bf_insns == NULL) {
2289 		snprintf(p->errbuf, sizeof(p->errbuf),
2290 			 "malloc: %s", pcap_strerror(errno));
2291 		return (-1);
2292 	}
2293 	memcpy(p->fcode.bf_insns, fp->bf_insns, prog_size);
2294 	return (0);
2295 }
2296 
2297 #ifdef BDEBUG
2298 static void
opt_dump(root)2299 opt_dump(root)
2300 	struct block *root;
2301 {
2302 	struct bpf_program f;
2303 
2304 	memset(bids, 0, sizeof bids);
2305 	f.bf_insns = icode_to_fcode(root, &f.bf_len);
2306 	bpf_dump(&f, 1);
2307 	putchar('\n');
2308 	free((char *)f.bf_insns);
2309 }
2310 #endif
2311