• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 1988, 1989, 1990, 1991, 1993, 1994, 1995, 1996
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that: (1) source code distributions
7  * retain the above copyright notice and this paragraph in its entirety, (2)
8  * distributions including binary code include the above copyright notice and
9  * this paragraph in its entirety in the documentation or other materials
10  * provided with the distribution, and (3) all advertising materials mentioning
11  * features or use of this software display the following acknowledgement:
12  * ``This product includes software developed by the University of California,
13  * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
14  * the University nor the names of its contributors may be used to endorse
15  * or promote products derived from this software without specific prior
16  * written permission.
17  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
18  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
19  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
20  *
21  *  Optimization module for tcpdump intermediate representation.
22  */
23 #ifndef lint
24 static const char rcsid[] _U_ =
25     "@(#) $Header: /tcpdump/master/libpcap/optimize.c,v 1.91 2008-01-02 04:16:46 guy Exp $ (LBL)";
26 #endif
27 
28 #ifdef HAVE_CONFIG_H
29 #include "config.h"
30 #endif
31 
32 #ifdef WIN32
33 #include <pcap-stdinc.h>
34 #else /* WIN32 */
35 #if HAVE_INTTYPES_H
36 #include <inttypes.h>
37 #elif HAVE_STDINT_H
38 #include <stdint.h>
39 #endif
40 #ifdef HAVE_SYS_BITYPES_H
41 #include <sys/bitypes.h>
42 #endif
43 #include <sys/types.h>
44 #endif /* WIN32 */
45 
46 #include <stdio.h>
47 #include <stdlib.h>
48 #include <memory.h>
49 #include <string.h>
50 
51 #include <errno.h>
52 
53 #include "pcap-int.h"
54 
55 #include "gencode.h"
56 
57 #ifdef HAVE_OS_PROTO_H
58 #include "os-proto.h"
59 #endif
60 
61 #ifdef BDEBUG
62 extern int dflag;
63 #endif
64 
65 #if defined(MSDOS) && !defined(__DJGPP__)
66 extern int _w32_ffs (int mask);
67 #define ffs _w32_ffs
68 #endif
69 
70 #if defined(WIN32) && defined (_MSC_VER)
71 int ffs(int mask);
72 #endif
73 
74 /*
75  * Represents a deleted instruction.
76  */
77 #define NOP -1
78 
79 /*
80  * Register numbers for use-def values.
81  * 0 through BPF_MEMWORDS-1 represent the corresponding scratch memory
82  * location.  A_ATOM is the accumulator and X_ATOM is the index
83  * register.
84  */
85 #define A_ATOM BPF_MEMWORDS
86 #define X_ATOM (BPF_MEMWORDS+1)
87 
88 /*
89  * This define is used to represent *both* the accumulator and
90  * x register in use-def computations.
91  * Currently, the use-def code assumes only one definition per instruction.
92  */
93 #define AX_ATOM N_ATOMS
94 
95 /*
96  * A flag to indicate that further optimization is needed.
97  * Iterative passes are continued until a given pass yields no
98  * branch movement.
99  */
100 static int done;
101 
102 /*
103  * A block is marked if only if its mark equals the current mark.
104  * Rather than traverse the code array, marking each item, 'cur_mark' is
105  * incremented.  This automatically makes each element unmarked.
106  */
107 static int cur_mark;
108 #define isMarked(p) ((p)->mark == cur_mark)
109 #define unMarkAll() cur_mark += 1
110 #define Mark(p) ((p)->mark = cur_mark)
111 
112 static void opt_init(struct block *);
113 static void opt_cleanup(void);
114 
115 static void intern_blocks(struct block *);
116 
117 static void find_inedges(struct block *);
118 #ifdef BDEBUG
119 static void opt_dump(struct block *);
120 #endif
121 
122 static int n_blocks;
123 struct block **blocks;
124 static int n_edges;
125 struct edge **edges;
126 
127 /*
128  * A bit vector set representation of the dominators.
129  * We round up the set size to the next power of two.
130  */
131 static int nodewords;
132 static int edgewords;
133 struct block **levels;
134 bpf_u_int32 *space;
135 #define BITS_PER_WORD (8*sizeof(bpf_u_int32))
136 /*
137  * True if a is in uset {p}
138  */
139 #define SET_MEMBER(p, a) \
140 ((p)[(unsigned)(a) / BITS_PER_WORD] & (1 << ((unsigned)(a) % BITS_PER_WORD)))
141 
142 /*
143  * Add 'a' to uset p.
144  */
145 #define SET_INSERT(p, a) \
146 (p)[(unsigned)(a) / BITS_PER_WORD] |= (1 << ((unsigned)(a) % BITS_PER_WORD))
147 
148 /*
149  * Delete 'a' from uset p.
150  */
151 #define SET_DELETE(p, a) \
152 (p)[(unsigned)(a) / BITS_PER_WORD] &= ~(1 << ((unsigned)(a) % BITS_PER_WORD))
153 
154 /*
155  * a := a intersect b
156  */
157 #define SET_INTERSECT(a, b, n)\
158 {\
159 	register bpf_u_int32 *_x = a, *_y = b;\
160 	register int _n = n;\
161 	while (--_n >= 0) *_x++ &= *_y++;\
162 }
163 
164 /*
165  * a := a - b
166  */
167 #define SET_SUBTRACT(a, b, n)\
168 {\
169 	register bpf_u_int32 *_x = a, *_y = b;\
170 	register int _n = n;\
171 	while (--_n >= 0) *_x++ &=~ *_y++;\
172 }
173 
174 /*
175  * a := a union b
176  */
177 #define SET_UNION(a, b, n)\
178 {\
179 	register bpf_u_int32 *_x = a, *_y = b;\
180 	register int _n = n;\
181 	while (--_n >= 0) *_x++ |= *_y++;\
182 }
183 
184 static uset all_dom_sets;
185 static uset all_closure_sets;
186 static uset all_edge_sets;
187 
188 #ifndef MAX
189 #define MAX(a,b) ((a)>(b)?(a):(b))
190 #endif
191 
192 static void
find_levels_r(struct block * b)193 find_levels_r(struct block *b)
194 {
195 	int level;
196 
197 	if (isMarked(b))
198 		return;
199 
200 	Mark(b);
201 	b->link = 0;
202 
203 	if (JT(b)) {
204 		find_levels_r(JT(b));
205 		find_levels_r(JF(b));
206 		level = MAX(JT(b)->level, JF(b)->level) + 1;
207 	} else
208 		level = 0;
209 	b->level = level;
210 	b->link = levels[level];
211 	levels[level] = b;
212 }
213 
214 /*
215  * Level graph.  The levels go from 0 at the leaves to
216  * N_LEVELS at the root.  The levels[] array points to the
217  * first node of the level list, whose elements are linked
218  * with the 'link' field of the struct block.
219  */
220 static void
find_levels(struct block * root)221 find_levels(struct block *root)
222 {
223 	memset((char *)levels, 0, n_blocks * sizeof(*levels));
224 	unMarkAll();
225 	find_levels_r(root);
226 }
227 
228 /*
229  * Find dominator relationships.
230  * Assumes graph has been leveled.
231  */
232 static void
find_dom(struct block * root)233 find_dom(struct block *root)
234 {
235 	int i;
236 	struct block *b;
237 	bpf_u_int32 *x;
238 
239 	/*
240 	 * Initialize sets to contain all nodes.
241 	 */
242 	x = all_dom_sets;
243 	i = n_blocks * nodewords;
244 	while (--i >= 0)
245 		*x++ = ~0;
246 	/* Root starts off empty. */
247 	for (i = nodewords; --i >= 0;)
248 		root->dom[i] = 0;
249 
250 	/* root->level is the highest level no found. */
251 	for (i = root->level; i >= 0; --i) {
252 		for (b = levels[i]; b; b = b->link) {
253 			SET_INSERT(b->dom, b->id);
254 			if (JT(b) == 0)
255 				continue;
256 			SET_INTERSECT(JT(b)->dom, b->dom, nodewords);
257 			SET_INTERSECT(JF(b)->dom, b->dom, nodewords);
258 		}
259 	}
260 }
261 
262 static void
propedom(struct edge * ep)263 propedom(struct edge *ep)
264 {
265 	SET_INSERT(ep->edom, ep->id);
266 	if (ep->succ) {
267 		SET_INTERSECT(ep->succ->et.edom, ep->edom, edgewords);
268 		SET_INTERSECT(ep->succ->ef.edom, ep->edom, edgewords);
269 	}
270 }
271 
272 /*
273  * Compute edge dominators.
274  * Assumes graph has been leveled and predecessors established.
275  */
276 static void
find_edom(struct block * root)277 find_edom(struct block *root)
278 {
279 	int i;
280 	uset x;
281 	struct block *b;
282 
283 	x = all_edge_sets;
284 	for (i = n_edges * edgewords; --i >= 0; )
285 		x[i] = ~0;
286 
287 	/* root->level is the highest level no found. */
288 	memset(root->et.edom, 0, edgewords * sizeof(*(uset)0));
289 	memset(root->ef.edom, 0, edgewords * sizeof(*(uset)0));
290 	for (i = root->level; i >= 0; --i) {
291 		for (b = levels[i]; b != 0; b = b->link) {
292 			propedom(&b->et);
293 			propedom(&b->ef);
294 		}
295 	}
296 }
297 
298 /*
299  * Find the backwards transitive closure of the flow graph.  These sets
300  * are backwards in the sense that we find the set of nodes that reach
301  * a given node, not the set of nodes that can be reached by a node.
302  *
303  * Assumes graph has been leveled.
304  */
305 static void
find_closure(struct block * root)306 find_closure(struct block *root)
307 {
308 	int i;
309 	struct block *b;
310 
311 	/*
312 	 * Initialize sets to contain no nodes.
313 	 */
314 	memset((char *)all_closure_sets, 0,
315 	      n_blocks * nodewords * sizeof(*all_closure_sets));
316 
317 	/* root->level is the highest level no found. */
318 	for (i = root->level; i >= 0; --i) {
319 		for (b = levels[i]; b; b = b->link) {
320 			SET_INSERT(b->closure, b->id);
321 			if (JT(b) == 0)
322 				continue;
323 			SET_UNION(JT(b)->closure, b->closure, nodewords);
324 			SET_UNION(JF(b)->closure, b->closure, nodewords);
325 		}
326 	}
327 }
328 
329 /*
330  * Return the register number that is used by s.  If A and X are both
331  * used, return AX_ATOM.  If no register is used, return -1.
332  *
333  * The implementation should probably change to an array access.
334  */
335 static int
atomuse(struct stmt * s)336 atomuse(struct stmt *s)
337 {
338 	register int c = s->code;
339 
340 	if (c == NOP)
341 		return -1;
342 
343 	switch (BPF_CLASS(c)) {
344 
345 	case BPF_RET:
346 		return (BPF_RVAL(c) == BPF_A) ? A_ATOM :
347 			(BPF_RVAL(c) == BPF_X) ? X_ATOM : -1;
348 
349 	case BPF_LD:
350 	case BPF_LDX:
351 		return (BPF_MODE(c) == BPF_IND) ? X_ATOM :
352 			(BPF_MODE(c) == BPF_MEM) ? s->k : -1;
353 
354 	case BPF_ST:
355 		return A_ATOM;
356 
357 	case BPF_STX:
358 		return X_ATOM;
359 
360 	case BPF_JMP:
361 	case BPF_ALU:
362 		if (BPF_SRC(c) == BPF_X)
363 			return AX_ATOM;
364 		return A_ATOM;
365 
366 	case BPF_MISC:
367 		return BPF_MISCOP(c) == BPF_TXA ? X_ATOM : A_ATOM;
368 	}
369 	abort();
370 	/* NOTREACHED */
371 }
372 
373 /*
374  * Return the register number that is defined by 's'.  We assume that
375  * a single stmt cannot define more than one register.  If no register
376  * is defined, return -1.
377  *
378  * The implementation should probably change to an array access.
379  */
380 static int
atomdef(struct stmt * s)381 atomdef(struct stmt *s)
382 {
383 	if (s->code == NOP)
384 		return -1;
385 
386 	switch (BPF_CLASS(s->code)) {
387 
388 	case BPF_LD:
389 	case BPF_ALU:
390 		return A_ATOM;
391 
392 	case BPF_LDX:
393 		return X_ATOM;
394 
395 	case BPF_ST:
396 	case BPF_STX:
397 		return s->k;
398 
399 	case BPF_MISC:
400 		return BPF_MISCOP(s->code) == BPF_TAX ? X_ATOM : A_ATOM;
401 	}
402 	return -1;
403 }
404 
405 /*
406  * Compute the sets of registers used, defined, and killed by 'b'.
407  *
408  * "Used" means that a statement in 'b' uses the register before any
409  * statement in 'b' defines it, i.e. it uses the value left in
410  * that register by a predecessor block of this block.
411  * "Defined" means that a statement in 'b' defines it.
412  * "Killed" means that a statement in 'b' defines it before any
413  * statement in 'b' uses it, i.e. it kills the value left in that
414  * register by a predecessor block of this block.
415  */
416 static void
compute_local_ud(struct block * b)417 compute_local_ud(struct block *b)
418 {
419 	struct slist *s;
420 	atomset def = 0, use = 0, kill = 0;
421 	int atom;
422 
423 	for (s = b->stmts; s; s = s->next) {
424 		if (s->s.code == NOP)
425 			continue;
426 		atom = atomuse(&s->s);
427 		if (atom >= 0) {
428 			if (atom == AX_ATOM) {
429 				if (!ATOMELEM(def, X_ATOM))
430 					use |= ATOMMASK(X_ATOM);
431 				if (!ATOMELEM(def, A_ATOM))
432 					use |= ATOMMASK(A_ATOM);
433 			}
434 			else if (atom < N_ATOMS) {
435 				if (!ATOMELEM(def, atom))
436 					use |= ATOMMASK(atom);
437 			}
438 			else
439 				abort();
440 		}
441 		atom = atomdef(&s->s);
442 		if (atom >= 0) {
443 			if (!ATOMELEM(use, atom))
444 				kill |= ATOMMASK(atom);
445 			def |= ATOMMASK(atom);
446 		}
447 	}
448 	if (BPF_CLASS(b->s.code) == BPF_JMP) {
449 		/*
450 		 * XXX - what about RET?
451 		 */
452 		atom = atomuse(&b->s);
453 		if (atom >= 0) {
454 			if (atom == AX_ATOM) {
455 				if (!ATOMELEM(def, X_ATOM))
456 					use |= ATOMMASK(X_ATOM);
457 				if (!ATOMELEM(def, A_ATOM))
458 					use |= ATOMMASK(A_ATOM);
459 			}
460 			else if (atom < N_ATOMS) {
461 				if (!ATOMELEM(def, atom))
462 					use |= ATOMMASK(atom);
463 			}
464 			else
465 				abort();
466 		}
467 	}
468 
469 	b->def = def;
470 	b->kill = kill;
471 	b->in_use = use;
472 }
473 
474 /*
475  * Assume graph is already leveled.
476  */
477 static void
find_ud(struct block * root)478 find_ud(struct block *root)
479 {
480 	int i, maxlevel;
481 	struct block *p;
482 
483 	/*
484 	 * root->level is the highest level no found;
485 	 * count down from there.
486 	 */
487 	maxlevel = root->level;
488 	for (i = maxlevel; i >= 0; --i)
489 		for (p = levels[i]; p; p = p->link) {
490 			compute_local_ud(p);
491 			p->out_use = 0;
492 		}
493 
494 	for (i = 1; i <= maxlevel; ++i) {
495 		for (p = levels[i]; p; p = p->link) {
496 			p->out_use |= JT(p)->in_use | JF(p)->in_use;
497 			p->in_use |= p->out_use &~ p->kill;
498 		}
499 	}
500 }
501 
502 /*
503  * These data structures are used in a Cocke and Shwarz style
504  * value numbering scheme.  Since the flowgraph is acyclic,
505  * exit values can be propagated from a node's predecessors
506  * provided it is uniquely defined.
507  */
508 struct valnode {
509 	int code;
510 	int v0, v1;
511 	int val;
512 	struct valnode *next;
513 };
514 
515 #define MODULUS 213
516 static struct valnode *hashtbl[MODULUS];
517 static int curval;
518 static int maxval;
519 
520 /* Integer constants mapped with the load immediate opcode. */
521 #define K(i) F(BPF_LD|BPF_IMM|BPF_W, i, 0L)
522 
523 struct vmapinfo {
524 	int is_const;
525 	bpf_int32 const_val;
526 };
527 
528 struct vmapinfo *vmap;
529 struct valnode *vnode_base;
530 struct valnode *next_vnode;
531 
532 static void
init_val(void)533 init_val(void)
534 {
535 	curval = 0;
536 	next_vnode = vnode_base;
537 	memset((char *)vmap, 0, maxval * sizeof(*vmap));
538 	memset((char *)hashtbl, 0, sizeof hashtbl);
539 }
540 
541 /* Because we really don't have an IR, this stuff is a little messy. */
542 static int
F(int code,int v0,int v1)543 F(int code, int v0, int v1)
544 {
545 	u_int hash;
546 	int val;
547 	struct valnode *p;
548 
549 	hash = (u_int)code ^ (v0 << 4) ^ (v1 << 8);
550 	hash %= MODULUS;
551 
552 	for (p = hashtbl[hash]; p; p = p->next)
553 		if (p->code == code && p->v0 == v0 && p->v1 == v1)
554 			return p->val;
555 
556 	val = ++curval;
557 	if (BPF_MODE(code) == BPF_IMM &&
558 	    (BPF_CLASS(code) == BPF_LD || BPF_CLASS(code) == BPF_LDX)) {
559 		vmap[val].const_val = v0;
560 		vmap[val].is_const = 1;
561 	}
562 	p = next_vnode++;
563 	p->val = val;
564 	p->code = code;
565 	p->v0 = v0;
566 	p->v1 = v1;
567 	p->next = hashtbl[hash];
568 	hashtbl[hash] = p;
569 
570 	return val;
571 }
572 
573 static inline void
vstore(struct stmt * s,int * valp,int newval,int alter)574 vstore(struct stmt *s, int *valp, int newval, int alter)
575 {
576 	if (alter && *valp == newval)
577 		s->code = NOP;
578 	else
579 		*valp = newval;
580 }
581 
582 /*
583  * Do constant-folding on binary operators.
584  * (Unary operators are handled elsewhere.)
585  */
586 static void
fold_op(struct stmt * s,int v0,int v1)587 fold_op(struct stmt *s, int v0, int v1)
588 {
589 	bpf_u_int32 a, b;
590 
591 	a = vmap[v0].const_val;
592 	b = vmap[v1].const_val;
593 
594 	switch (BPF_OP(s->code)) {
595 	case BPF_ADD:
596 		a += b;
597 		break;
598 
599 	case BPF_SUB:
600 		a -= b;
601 		break;
602 
603 	case BPF_MUL:
604 		a *= b;
605 		break;
606 
607 	case BPF_DIV:
608 		if (b == 0)
609 			bpf_error("division by zero");
610 		a /= b;
611 		break;
612 
613 	case BPF_AND:
614 		a &= b;
615 		break;
616 
617 	case BPF_OR:
618 		a |= b;
619 		break;
620 
621 	case BPF_LSH:
622 		a <<= b;
623 		break;
624 
625 	case BPF_RSH:
626 		a >>= b;
627 		break;
628 
629 	default:
630 		abort();
631 	}
632 	s->k = a;
633 	s->code = BPF_LD|BPF_IMM;
634 	done = 0;
635 }
636 
637 static inline struct slist *
this_op(struct slist * s)638 this_op(struct slist *s)
639 {
640 	while (s != 0 && s->s.code == NOP)
641 		s = s->next;
642 	return s;
643 }
644 
645 static void
opt_not(struct block * b)646 opt_not(struct block *b)
647 {
648 	struct block *tmp = JT(b);
649 
650 	JT(b) = JF(b);
651 	JF(b) = tmp;
652 }
653 
654 static void
opt_peep(struct block * b)655 opt_peep(struct block *b)
656 {
657 	struct slist *s;
658 	struct slist *next, *last;
659 	int val;
660 
661 	s = b->stmts;
662 	if (s == 0)
663 		return;
664 
665 	last = s;
666 	for (/*empty*/; /*empty*/; s = next) {
667 		/*
668 		 * Skip over nops.
669 		 */
670 		s = this_op(s);
671 		if (s == 0)
672 			break;	/* nothing left in the block */
673 
674 		/*
675 		 * Find the next real instruction after that one
676 		 * (skipping nops).
677 		 */
678 		next = this_op(s->next);
679 		if (next == 0)
680 			break;	/* no next instruction */
681 		last = next;
682 
683 		/*
684 		 * st  M[k]	-->	st  M[k]
685 		 * ldx M[k]		tax
686 		 */
687 		if (s->s.code == BPF_ST &&
688 		    next->s.code == (BPF_LDX|BPF_MEM) &&
689 		    s->s.k == next->s.k) {
690 			done = 0;
691 			next->s.code = BPF_MISC|BPF_TAX;
692 		}
693 		/*
694 		 * ld  #k	-->	ldx  #k
695 		 * tax			txa
696 		 */
697 		if (s->s.code == (BPF_LD|BPF_IMM) &&
698 		    next->s.code == (BPF_MISC|BPF_TAX)) {
699 			s->s.code = BPF_LDX|BPF_IMM;
700 			next->s.code = BPF_MISC|BPF_TXA;
701 			done = 0;
702 		}
703 		/*
704 		 * This is an ugly special case, but it happens
705 		 * when you say tcp[k] or udp[k] where k is a constant.
706 		 */
707 		if (s->s.code == (BPF_LD|BPF_IMM)) {
708 			struct slist *add, *tax, *ild;
709 
710 			/*
711 			 * Check that X isn't used on exit from this
712 			 * block (which the optimizer might cause).
713 			 * We know the code generator won't generate
714 			 * any local dependencies.
715 			 */
716 			if (ATOMELEM(b->out_use, X_ATOM))
717 				continue;
718 
719 			/*
720 			 * Check that the instruction following the ldi
721 			 * is an addx, or it's an ldxms with an addx
722 			 * following it (with 0 or more nops between the
723 			 * ldxms and addx).
724 			 */
725 			if (next->s.code != (BPF_LDX|BPF_MSH|BPF_B))
726 				add = next;
727 			else
728 				add = this_op(next->next);
729 			if (add == 0 || add->s.code != (BPF_ALU|BPF_ADD|BPF_X))
730 				continue;
731 
732 			/*
733 			 * Check that a tax follows that (with 0 or more
734 			 * nops between them).
735 			 */
736 			tax = this_op(add->next);
737 			if (tax == 0 || tax->s.code != (BPF_MISC|BPF_TAX))
738 				continue;
739 
740 			/*
741 			 * Check that an ild follows that (with 0 or more
742 			 * nops between them).
743 			 */
744 			ild = this_op(tax->next);
745 			if (ild == 0 || BPF_CLASS(ild->s.code) != BPF_LD ||
746 			    BPF_MODE(ild->s.code) != BPF_IND)
747 				continue;
748 			/*
749 			 * We want to turn this sequence:
750 			 *
751 			 * (004) ldi     #0x2		{s}
752 			 * (005) ldxms   [14]		{next}  -- optional
753 			 * (006) addx			{add}
754 			 * (007) tax			{tax}
755 			 * (008) ild     [x+0]		{ild}
756 			 *
757 			 * into this sequence:
758 			 *
759 			 * (004) nop
760 			 * (005) ldxms   [14]
761 			 * (006) nop
762 			 * (007) nop
763 			 * (008) ild     [x+2]
764 			 *
765 			 * XXX We need to check that X is not
766 			 * subsequently used, because we want to change
767 			 * what'll be in it after this sequence.
768 			 *
769 			 * We know we can eliminate the accumulator
770 			 * modifications earlier in the sequence since
771 			 * it is defined by the last stmt of this sequence
772 			 * (i.e., the last statement of the sequence loads
773 			 * a value into the accumulator, so we can eliminate
774 			 * earlier operations on the accumulator).
775 			 */
776 			ild->s.k += s->s.k;
777 			s->s.code = NOP;
778 			add->s.code = NOP;
779 			tax->s.code = NOP;
780 			done = 0;
781 		}
782 	}
783 	/*
784 	 * If the comparison at the end of a block is an equality
785 	 * comparison against a constant, and nobody uses the value
786 	 * we leave in the A register at the end of a block, and
787 	 * the operation preceding the comparison is an arithmetic
788 	 * operation, we can sometime optimize it away.
789 	 */
790 	if (b->s.code == (BPF_JMP|BPF_JEQ|BPF_K) &&
791 	    !ATOMELEM(b->out_use, A_ATOM)) {
792 	    	/*
793 	    	 * We can optimize away certain subtractions of the
794 	    	 * X register.
795 	    	 */
796 		if (last->s.code == (BPF_ALU|BPF_SUB|BPF_X)) {
797 			val = b->val[X_ATOM];
798 			if (vmap[val].is_const) {
799 				/*
800 				 * If we have a subtract to do a comparison,
801 				 * and the X register is a known constant,
802 				 * we can merge this value into the
803 				 * comparison:
804 				 *
805 				 * sub x  ->	nop
806 				 * jeq #y	jeq #(x+y)
807 				 */
808 				b->s.k += vmap[val].const_val;
809 				last->s.code = NOP;
810 				done = 0;
811 			} else if (b->s.k == 0) {
812 				/*
813 				 * If the X register isn't a constant,
814 				 * and the comparison in the test is
815 				 * against 0, we can compare with the
816 				 * X register, instead:
817 				 *
818 				 * sub x  ->	nop
819 				 * jeq #0	jeq x
820 				 */
821 				last->s.code = NOP;
822 				b->s.code = BPF_JMP|BPF_JEQ|BPF_X;
823 				done = 0;
824 			}
825 		}
826 		/*
827 		 * Likewise, a constant subtract can be simplified:
828 		 *
829 		 * sub #x ->	nop
830 		 * jeq #y ->	jeq #(x+y)
831 		 */
832 		else if (last->s.code == (BPF_ALU|BPF_SUB|BPF_K)) {
833 			last->s.code = NOP;
834 			b->s.k += last->s.k;
835 			done = 0;
836 		}
837 		/*
838 		 * And, similarly, a constant AND can be simplified
839 		 * if we're testing against 0, i.e.:
840 		 *
841 		 * and #k	nop
842 		 * jeq #0  ->	jset #k
843 		 */
844 		else if (last->s.code == (BPF_ALU|BPF_AND|BPF_K) &&
845 		    b->s.k == 0) {
846 			b->s.k = last->s.k;
847 			b->s.code = BPF_JMP|BPF_K|BPF_JSET;
848 			last->s.code = NOP;
849 			done = 0;
850 			opt_not(b);
851 		}
852 	}
853 	/*
854 	 * jset #0        ->   never
855 	 * jset #ffffffff ->   always
856 	 */
857 	if (b->s.code == (BPF_JMP|BPF_K|BPF_JSET)) {
858 		if (b->s.k == 0)
859 			JT(b) = JF(b);
860 		if (b->s.k == 0xffffffff)
861 			JF(b) = JT(b);
862 	}
863 	/*
864 	 * If we're comparing against the index register, and the index
865 	 * register is a known constant, we can just compare against that
866 	 * constant.
867 	 */
868 	val = b->val[X_ATOM];
869 	if (vmap[val].is_const && BPF_SRC(b->s.code) == BPF_X) {
870 		bpf_int32 v = vmap[val].const_val;
871 		b->s.code &= ~BPF_X;
872 		b->s.k = v;
873 	}
874 	/*
875 	 * If the accumulator is a known constant, we can compute the
876 	 * comparison result.
877 	 */
878 	val = b->val[A_ATOM];
879 	if (vmap[val].is_const && BPF_SRC(b->s.code) == BPF_K) {
880 		bpf_int32 v = vmap[val].const_val;
881 		switch (BPF_OP(b->s.code)) {
882 
883 		case BPF_JEQ:
884 			v = v == b->s.k;
885 			break;
886 
887 		case BPF_JGT:
888 			v = (unsigned)v > b->s.k;
889 			break;
890 
891 		case BPF_JGE:
892 			v = (unsigned)v >= b->s.k;
893 			break;
894 
895 		case BPF_JSET:
896 			v &= b->s.k;
897 			break;
898 
899 		default:
900 			abort();
901 		}
902 		if (JF(b) != JT(b))
903 			done = 0;
904 		if (v)
905 			JF(b) = JT(b);
906 		else
907 			JT(b) = JF(b);
908 	}
909 }
910 
911 /*
912  * Compute the symbolic value of expression of 's', and update
913  * anything it defines in the value table 'val'.  If 'alter' is true,
914  * do various optimizations.  This code would be cleaner if symbolic
915  * evaluation and code transformations weren't folded together.
916  */
917 static void
opt_stmt(struct stmt * s,int val[],int alter)918 opt_stmt(struct stmt *s, int val[], int alter)
919 {
920 	int op;
921 	int v;
922 
923 	switch (s->code) {
924 
925 	case BPF_LD|BPF_ABS|BPF_W:
926 	case BPF_LD|BPF_ABS|BPF_H:
927 	case BPF_LD|BPF_ABS|BPF_B:
928 		v = F(s->code, s->k, 0L);
929 		vstore(s, &val[A_ATOM], v, alter);
930 		break;
931 
932 	case BPF_LD|BPF_IND|BPF_W:
933 	case BPF_LD|BPF_IND|BPF_H:
934 	case BPF_LD|BPF_IND|BPF_B:
935 		v = val[X_ATOM];
936 		if (alter && vmap[v].is_const) {
937 			s->code = BPF_LD|BPF_ABS|BPF_SIZE(s->code);
938 			s->k += vmap[v].const_val;
939 			v = F(s->code, s->k, 0L);
940 			done = 0;
941 		}
942 		else
943 			v = F(s->code, s->k, v);
944 		vstore(s, &val[A_ATOM], v, alter);
945 		break;
946 
947 	case BPF_LD|BPF_LEN:
948 		v = F(s->code, 0L, 0L);
949 		vstore(s, &val[A_ATOM], v, alter);
950 		break;
951 
952 	case BPF_LD|BPF_IMM:
953 		v = K(s->k);
954 		vstore(s, &val[A_ATOM], v, alter);
955 		break;
956 
957 	case BPF_LDX|BPF_IMM:
958 		v = K(s->k);
959 		vstore(s, &val[X_ATOM], v, alter);
960 		break;
961 
962 	case BPF_LDX|BPF_MSH|BPF_B:
963 		v = F(s->code, s->k, 0L);
964 		vstore(s, &val[X_ATOM], v, alter);
965 		break;
966 
967 	case BPF_ALU|BPF_NEG:
968 		if (alter && vmap[val[A_ATOM]].is_const) {
969 			s->code = BPF_LD|BPF_IMM;
970 			s->k = -vmap[val[A_ATOM]].const_val;
971 			val[A_ATOM] = K(s->k);
972 		}
973 		else
974 			val[A_ATOM] = F(s->code, val[A_ATOM], 0L);
975 		break;
976 
977 	case BPF_ALU|BPF_ADD|BPF_K:
978 	case BPF_ALU|BPF_SUB|BPF_K:
979 	case BPF_ALU|BPF_MUL|BPF_K:
980 	case BPF_ALU|BPF_DIV|BPF_K:
981 	case BPF_ALU|BPF_AND|BPF_K:
982 	case BPF_ALU|BPF_OR|BPF_K:
983 	case BPF_ALU|BPF_LSH|BPF_K:
984 	case BPF_ALU|BPF_RSH|BPF_K:
985 		op = BPF_OP(s->code);
986 		if (alter) {
987 			if (s->k == 0) {
988 				/* don't optimize away "sub #0"
989 				 * as it may be needed later to
990 				 * fixup the generated math code */
991 				if (op == BPF_ADD ||
992 				    op == BPF_LSH || op == BPF_RSH ||
993 				    op == BPF_OR) {
994 					s->code = NOP;
995 					break;
996 				}
997 				if (op == BPF_MUL || op == BPF_AND) {
998 					s->code = BPF_LD|BPF_IMM;
999 					val[A_ATOM] = K(s->k);
1000 					break;
1001 				}
1002 			}
1003 			if (vmap[val[A_ATOM]].is_const) {
1004 				fold_op(s, val[A_ATOM], K(s->k));
1005 				val[A_ATOM] = K(s->k);
1006 				break;
1007 			}
1008 		}
1009 		val[A_ATOM] = F(s->code, val[A_ATOM], K(s->k));
1010 		break;
1011 
1012 	case BPF_ALU|BPF_ADD|BPF_X:
1013 	case BPF_ALU|BPF_SUB|BPF_X:
1014 	case BPF_ALU|BPF_MUL|BPF_X:
1015 	case BPF_ALU|BPF_DIV|BPF_X:
1016 	case BPF_ALU|BPF_AND|BPF_X:
1017 	case BPF_ALU|BPF_OR|BPF_X:
1018 	case BPF_ALU|BPF_LSH|BPF_X:
1019 	case BPF_ALU|BPF_RSH|BPF_X:
1020 		op = BPF_OP(s->code);
1021 		if (alter && vmap[val[X_ATOM]].is_const) {
1022 			if (vmap[val[A_ATOM]].is_const) {
1023 				fold_op(s, val[A_ATOM], val[X_ATOM]);
1024 				val[A_ATOM] = K(s->k);
1025 			}
1026 			else {
1027 				s->code = BPF_ALU|BPF_K|op;
1028 				s->k = vmap[val[X_ATOM]].const_val;
1029 				done = 0;
1030 				val[A_ATOM] =
1031 					F(s->code, val[A_ATOM], K(s->k));
1032 			}
1033 			break;
1034 		}
1035 		/*
1036 		 * Check if we're doing something to an accumulator
1037 		 * that is 0, and simplify.  This may not seem like
1038 		 * much of a simplification but it could open up further
1039 		 * optimizations.
1040 		 * XXX We could also check for mul by 1, etc.
1041 		 */
1042 		if (alter && vmap[val[A_ATOM]].is_const
1043 		    && vmap[val[A_ATOM]].const_val == 0) {
1044 			if (op == BPF_ADD || op == BPF_OR) {
1045 				s->code = BPF_MISC|BPF_TXA;
1046 				vstore(s, &val[A_ATOM], val[X_ATOM], alter);
1047 				break;
1048 			}
1049 			else if (op == BPF_MUL || op == BPF_DIV ||
1050 				 op == BPF_AND || op == BPF_LSH || op == BPF_RSH) {
1051 				s->code = BPF_LD|BPF_IMM;
1052 				s->k = 0;
1053 				vstore(s, &val[A_ATOM], K(s->k), alter);
1054 				break;
1055 			}
1056 			else if (op == BPF_NEG) {
1057 				s->code = NOP;
1058 				break;
1059 			}
1060 		}
1061 		val[A_ATOM] = F(s->code, val[A_ATOM], val[X_ATOM]);
1062 		break;
1063 
1064 	case BPF_MISC|BPF_TXA:
1065 		vstore(s, &val[A_ATOM], val[X_ATOM], alter);
1066 		break;
1067 
1068 	case BPF_LD|BPF_MEM:
1069 		v = val[s->k];
1070 		if (alter && vmap[v].is_const) {
1071 			s->code = BPF_LD|BPF_IMM;
1072 			s->k = vmap[v].const_val;
1073 			done = 0;
1074 		}
1075 		vstore(s, &val[A_ATOM], v, alter);
1076 		break;
1077 
1078 	case BPF_MISC|BPF_TAX:
1079 		vstore(s, &val[X_ATOM], val[A_ATOM], alter);
1080 		break;
1081 
1082 	case BPF_LDX|BPF_MEM:
1083 		v = val[s->k];
1084 		if (alter && vmap[v].is_const) {
1085 			s->code = BPF_LDX|BPF_IMM;
1086 			s->k = vmap[v].const_val;
1087 			done = 0;
1088 		}
1089 		vstore(s, &val[X_ATOM], v, alter);
1090 		break;
1091 
1092 	case BPF_ST:
1093 		vstore(s, &val[s->k], val[A_ATOM], alter);
1094 		break;
1095 
1096 	case BPF_STX:
1097 		vstore(s, &val[s->k], val[X_ATOM], alter);
1098 		break;
1099 	}
1100 }
1101 
1102 static void
deadstmt(register struct stmt * s,register struct stmt * last[])1103 deadstmt(register struct stmt *s, register struct stmt *last[])
1104 {
1105 	register int atom;
1106 
1107 	atom = atomuse(s);
1108 	if (atom >= 0) {
1109 		if (atom == AX_ATOM) {
1110 			last[X_ATOM] = 0;
1111 			last[A_ATOM] = 0;
1112 		}
1113 		else
1114 			last[atom] = 0;
1115 	}
1116 	atom = atomdef(s);
1117 	if (atom >= 0) {
1118 		if (last[atom]) {
1119 			done = 0;
1120 			last[atom]->code = NOP;
1121 		}
1122 		last[atom] = s;
1123 	}
1124 }
1125 
1126 static void
opt_deadstores(register struct block * b)1127 opt_deadstores(register struct block *b)
1128 {
1129 	register struct slist *s;
1130 	register int atom;
1131 	struct stmt *last[N_ATOMS];
1132 
1133 	memset((char *)last, 0, sizeof last);
1134 
1135 	for (s = b->stmts; s != 0; s = s->next)
1136 		deadstmt(&s->s, last);
1137 	deadstmt(&b->s, last);
1138 
1139 	for (atom = 0; atom < N_ATOMS; ++atom)
1140 		if (last[atom] && !ATOMELEM(b->out_use, atom)) {
1141 			last[atom]->code = NOP;
1142 			done = 0;
1143 		}
1144 }
1145 
1146 static void
opt_blk(struct block * b,int do_stmts)1147 opt_blk(struct block *b, int do_stmts)
1148 {
1149 	struct slist *s;
1150 	struct edge *p;
1151 	int i;
1152 	bpf_int32 aval, xval;
1153 
1154 #if 0
1155 	for (s = b->stmts; s && s->next; s = s->next)
1156 		if (BPF_CLASS(s->s.code) == BPF_JMP) {
1157 			do_stmts = 0;
1158 			break;
1159 		}
1160 #endif
1161 
1162 	/*
1163 	 * Initialize the atom values.
1164 	 */
1165 	p = b->in_edges;
1166 	if (p == 0) {
1167 		/*
1168 		 * We have no predecessors, so everything is undefined
1169 		 * upon entry to this block.
1170 		 */
1171 		memset((char *)b->val, 0, sizeof(b->val));
1172 	} else {
1173 		/*
1174 		 * Inherit values from our predecessors.
1175 		 *
1176 		 * First, get the values from the predecessor along the
1177 		 * first edge leading to this node.
1178 		 */
1179 		memcpy((char *)b->val, (char *)p->pred->val, sizeof(b->val));
1180 		/*
1181 		 * Now look at all the other nodes leading to this node.
1182 		 * If, for the predecessor along that edge, a register
1183 		 * has a different value from the one we have (i.e.,
1184 		 * control paths are merging, and the merging paths
1185 		 * assign different values to that register), give the
1186 		 * register the undefined value of 0.
1187 		 */
1188 		while ((p = p->next) != NULL) {
1189 			for (i = 0; i < N_ATOMS; ++i)
1190 				if (b->val[i] != p->pred->val[i])
1191 					b->val[i] = 0;
1192 		}
1193 	}
1194 	aval = b->val[A_ATOM];
1195 	xval = b->val[X_ATOM];
1196 	for (s = b->stmts; s; s = s->next)
1197 		opt_stmt(&s->s, b->val, do_stmts);
1198 
1199 	/*
1200 	 * This is a special case: if we don't use anything from this
1201 	 * block, and we load the accumulator or index register with a
1202 	 * value that is already there, or if this block is a return,
1203 	 * eliminate all the statements.
1204 	 *
1205 	 * XXX - what if it does a store?
1206 	 *
1207 	 * XXX - why does it matter whether we use anything from this
1208 	 * block?  If the accumulator or index register doesn't change
1209 	 * its value, isn't that OK even if we use that value?
1210 	 *
1211 	 * XXX - if we load the accumulator with a different value,
1212 	 * and the block ends with a conditional branch, we obviously
1213 	 * can't eliminate it, as the branch depends on that value.
1214 	 * For the index register, the conditional branch only depends
1215 	 * on the index register value if the test is against the index
1216 	 * register value rather than a constant; if nothing uses the
1217 	 * value we put into the index register, and we're not testing
1218 	 * against the index register's value, and there aren't any
1219 	 * other problems that would keep us from eliminating this
1220 	 * block, can we eliminate it?
1221 	 */
1222 	if (do_stmts &&
1223 	    ((b->out_use == 0 && aval != 0 && b->val[A_ATOM] == aval &&
1224 	      xval != 0 && b->val[X_ATOM] == xval) ||
1225 	     BPF_CLASS(b->s.code) == BPF_RET)) {
1226 		if (b->stmts != 0) {
1227 			b->stmts = 0;
1228 			done = 0;
1229 		}
1230 	} else {
1231 		opt_peep(b);
1232 		opt_deadstores(b);
1233 	}
1234 	/*
1235 	 * Set up values for branch optimizer.
1236 	 */
1237 	if (BPF_SRC(b->s.code) == BPF_K)
1238 		b->oval = K(b->s.k);
1239 	else
1240 		b->oval = b->val[X_ATOM];
1241 	b->et.code = b->s.code;
1242 	b->ef.code = -b->s.code;
1243 }
1244 
1245 /*
1246  * Return true if any register that is used on exit from 'succ', has
1247  * an exit value that is different from the corresponding exit value
1248  * from 'b'.
1249  */
1250 static int
use_conflict(struct block * b,struct block * succ)1251 use_conflict(struct block *b, struct block *succ)
1252 {
1253 	int atom;
1254 	atomset use = succ->out_use;
1255 
1256 	if (use == 0)
1257 		return 0;
1258 
1259 	for (atom = 0; atom < N_ATOMS; ++atom)
1260 		if (ATOMELEM(use, atom))
1261 			if (b->val[atom] != succ->val[atom])
1262 				return 1;
1263 	return 0;
1264 }
1265 
1266 static struct block *
fold_edge(struct block * child,struct edge * ep)1267 fold_edge(struct block *child, struct edge *ep)
1268 {
1269 	int sense;
1270 	int aval0, aval1, oval0, oval1;
1271 	int code = ep->code;
1272 
1273 	if (code < 0) {
1274 		code = -code;
1275 		sense = 0;
1276 	} else
1277 		sense = 1;
1278 
1279 	if (child->s.code != code)
1280 		return 0;
1281 
1282 	aval0 = child->val[A_ATOM];
1283 	oval0 = child->oval;
1284 	aval1 = ep->pred->val[A_ATOM];
1285 	oval1 = ep->pred->oval;
1286 
1287 	if (aval0 != aval1)
1288 		return 0;
1289 
1290 	if (oval0 == oval1)
1291 		/*
1292 		 * The operands of the branch instructions are
1293 		 * identical, so the result is true if a true
1294 		 * branch was taken to get here, otherwise false.
1295 		 */
1296 		return sense ? JT(child) : JF(child);
1297 
1298 	if (sense && code == (BPF_JMP|BPF_JEQ|BPF_K))
1299 		/*
1300 		 * At this point, we only know the comparison if we
1301 		 * came down the true branch, and it was an equality
1302 		 * comparison with a constant.
1303 		 *
1304 		 * I.e., if we came down the true branch, and the branch
1305 		 * was an equality comparison with a constant, we know the
1306 		 * accumulator contains that constant.  If we came down
1307 		 * the false branch, or the comparison wasn't with a
1308 		 * constant, we don't know what was in the accumulator.
1309 		 *
1310 		 * We rely on the fact that distinct constants have distinct
1311 		 * value numbers.
1312 		 */
1313 		return JF(child);
1314 
1315 	return 0;
1316 }
1317 
1318 static void
opt_j(struct edge * ep)1319 opt_j(struct edge *ep)
1320 {
1321 	register int i, k;
1322 	register struct block *target;
1323 
1324 	if (JT(ep->succ) == 0)
1325 		return;
1326 
1327 	if (JT(ep->succ) == JF(ep->succ)) {
1328 		/*
1329 		 * Common branch targets can be eliminated, provided
1330 		 * there is no data dependency.
1331 		 */
1332 		if (!use_conflict(ep->pred, ep->succ->et.succ)) {
1333 			done = 0;
1334 			ep->succ = JT(ep->succ);
1335 		}
1336 	}
1337 	/*
1338 	 * For each edge dominator that matches the successor of this
1339 	 * edge, promote the edge successor to the its grandchild.
1340 	 *
1341 	 * XXX We violate the set abstraction here in favor a reasonably
1342 	 * efficient loop.
1343 	 */
1344  top:
1345 	for (i = 0; i < edgewords; ++i) {
1346 		register bpf_u_int32 x = ep->edom[i];
1347 
1348 		while (x != 0) {
1349 			k = ffs(x) - 1;
1350 			x &=~ (1 << k);
1351 			k += i * BITS_PER_WORD;
1352 
1353 			target = fold_edge(ep->succ, edges[k]);
1354 			/*
1355 			 * Check that there is no data dependency between
1356 			 * nodes that will be violated if we move the edge.
1357 			 */
1358 			if (target != 0 && !use_conflict(ep->pred, target)) {
1359 				done = 0;
1360 				ep->succ = target;
1361 				if (JT(target) != 0)
1362 					/*
1363 					 * Start over unless we hit a leaf.
1364 					 */
1365 					goto top;
1366 				return;
1367 			}
1368 		}
1369 	}
1370 }
1371 
1372 
1373 static void
or_pullup(struct block * b)1374 or_pullup(struct block *b)
1375 {
1376 	int val, at_top;
1377 	struct block *pull;
1378 	struct block **diffp, **samep;
1379 	struct edge *ep;
1380 
1381 	ep = b->in_edges;
1382 	if (ep == 0)
1383 		return;
1384 
1385 	/*
1386 	 * Make sure each predecessor loads the same value.
1387 	 * XXX why?
1388 	 */
1389 	val = ep->pred->val[A_ATOM];
1390 	for (ep = ep->next; ep != 0; ep = ep->next)
1391 		if (val != ep->pred->val[A_ATOM])
1392 			return;
1393 
1394 	if (JT(b->in_edges->pred) == b)
1395 		diffp = &JT(b->in_edges->pred);
1396 	else
1397 		diffp = &JF(b->in_edges->pred);
1398 
1399 	at_top = 1;
1400 	while (1) {
1401 		if (*diffp == 0)
1402 			return;
1403 
1404 		if (JT(*diffp) != JT(b))
1405 			return;
1406 
1407 		if (!SET_MEMBER((*diffp)->dom, b->id))
1408 			return;
1409 
1410 		if ((*diffp)->val[A_ATOM] != val)
1411 			break;
1412 
1413 		diffp = &JF(*diffp);
1414 		at_top = 0;
1415 	}
1416 	samep = &JF(*diffp);
1417 	while (1) {
1418 		if (*samep == 0)
1419 			return;
1420 
1421 		if (JT(*samep) != JT(b))
1422 			return;
1423 
1424 		if (!SET_MEMBER((*samep)->dom, b->id))
1425 			return;
1426 
1427 		if ((*samep)->val[A_ATOM] == val)
1428 			break;
1429 
1430 		/* XXX Need to check that there are no data dependencies
1431 		   between dp0 and dp1.  Currently, the code generator
1432 		   will not produce such dependencies. */
1433 		samep = &JF(*samep);
1434 	}
1435 #ifdef notdef
1436 	/* XXX This doesn't cover everything. */
1437 	for (i = 0; i < N_ATOMS; ++i)
1438 		if ((*samep)->val[i] != pred->val[i])
1439 			return;
1440 #endif
1441 	/* Pull up the node. */
1442 	pull = *samep;
1443 	*samep = JF(pull);
1444 	JF(pull) = *diffp;
1445 
1446 	/*
1447 	 * At the top of the chain, each predecessor needs to point at the
1448 	 * pulled up node.  Inside the chain, there is only one predecessor
1449 	 * to worry about.
1450 	 */
1451 	if (at_top) {
1452 		for (ep = b->in_edges; ep != 0; ep = ep->next) {
1453 			if (JT(ep->pred) == b)
1454 				JT(ep->pred) = pull;
1455 			else
1456 				JF(ep->pred) = pull;
1457 		}
1458 	}
1459 	else
1460 		*diffp = pull;
1461 
1462 	done = 0;
1463 }
1464 
1465 static void
and_pullup(struct block * b)1466 and_pullup(struct block *b)
1467 {
1468 	int val, at_top;
1469 	struct block *pull;
1470 	struct block **diffp, **samep;
1471 	struct edge *ep;
1472 
1473 	ep = b->in_edges;
1474 	if (ep == 0)
1475 		return;
1476 
1477 	/*
1478 	 * Make sure each predecessor loads the same value.
1479 	 */
1480 	val = ep->pred->val[A_ATOM];
1481 	for (ep = ep->next; ep != 0; ep = ep->next)
1482 		if (val != ep->pred->val[A_ATOM])
1483 			return;
1484 
1485 	if (JT(b->in_edges->pred) == b)
1486 		diffp = &JT(b->in_edges->pred);
1487 	else
1488 		diffp = &JF(b->in_edges->pred);
1489 
1490 	at_top = 1;
1491 	while (1) {
1492 		if (*diffp == 0)
1493 			return;
1494 
1495 		if (JF(*diffp) != JF(b))
1496 			return;
1497 
1498 		if (!SET_MEMBER((*diffp)->dom, b->id))
1499 			return;
1500 
1501 		if ((*diffp)->val[A_ATOM] != val)
1502 			break;
1503 
1504 		diffp = &JT(*diffp);
1505 		at_top = 0;
1506 	}
1507 	samep = &JT(*diffp);
1508 	while (1) {
1509 		if (*samep == 0)
1510 			return;
1511 
1512 		if (JF(*samep) != JF(b))
1513 			return;
1514 
1515 		if (!SET_MEMBER((*samep)->dom, b->id))
1516 			return;
1517 
1518 		if ((*samep)->val[A_ATOM] == val)
1519 			break;
1520 
1521 		/* XXX Need to check that there are no data dependencies
1522 		   between diffp and samep.  Currently, the code generator
1523 		   will not produce such dependencies. */
1524 		samep = &JT(*samep);
1525 	}
1526 #ifdef notdef
1527 	/* XXX This doesn't cover everything. */
1528 	for (i = 0; i < N_ATOMS; ++i)
1529 		if ((*samep)->val[i] != pred->val[i])
1530 			return;
1531 #endif
1532 	/* Pull up the node. */
1533 	pull = *samep;
1534 	*samep = JT(pull);
1535 	JT(pull) = *diffp;
1536 
1537 	/*
1538 	 * At the top of the chain, each predecessor needs to point at the
1539 	 * pulled up node.  Inside the chain, there is only one predecessor
1540 	 * to worry about.
1541 	 */
1542 	if (at_top) {
1543 		for (ep = b->in_edges; ep != 0; ep = ep->next) {
1544 			if (JT(ep->pred) == b)
1545 				JT(ep->pred) = pull;
1546 			else
1547 				JF(ep->pred) = pull;
1548 		}
1549 	}
1550 	else
1551 		*diffp = pull;
1552 
1553 	done = 0;
1554 }
1555 
1556 static void
opt_blks(struct block * root,int do_stmts)1557 opt_blks(struct block *root, int do_stmts)
1558 {
1559 	int i, maxlevel;
1560 	struct block *p;
1561 
1562 	init_val();
1563 	maxlevel = root->level;
1564 
1565 	find_inedges(root);
1566 	for (i = maxlevel; i >= 0; --i)
1567 		for (p = levels[i]; p; p = p->link)
1568 			opt_blk(p, do_stmts);
1569 
1570 	if (do_stmts)
1571 		/*
1572 		 * No point trying to move branches; it can't possibly
1573 		 * make a difference at this point.
1574 		 */
1575 		return;
1576 
1577 	for (i = 1; i <= maxlevel; ++i) {
1578 		for (p = levels[i]; p; p = p->link) {
1579 			opt_j(&p->et);
1580 			opt_j(&p->ef);
1581 		}
1582 	}
1583 
1584 	find_inedges(root);
1585 	for (i = 1; i <= maxlevel; ++i) {
1586 		for (p = levels[i]; p; p = p->link) {
1587 			or_pullup(p);
1588 			and_pullup(p);
1589 		}
1590 	}
1591 }
1592 
1593 static inline void
link_inedge(struct edge * parent,struct block * child)1594 link_inedge(struct edge *parent, struct block *child)
1595 {
1596 	parent->next = child->in_edges;
1597 	child->in_edges = parent;
1598 }
1599 
1600 static void
find_inedges(struct block * root)1601 find_inedges(struct block *root)
1602 {
1603 	int i;
1604 	struct block *b;
1605 
1606 	for (i = 0; i < n_blocks; ++i)
1607 		blocks[i]->in_edges = 0;
1608 
1609 	/*
1610 	 * Traverse the graph, adding each edge to the predecessor
1611 	 * list of its successors.  Skip the leaves (i.e. level 0).
1612 	 */
1613 	for (i = root->level; i > 0; --i) {
1614 		for (b = levels[i]; b != 0; b = b->link) {
1615 			link_inedge(&b->et, JT(b));
1616 			link_inedge(&b->ef, JF(b));
1617 		}
1618 	}
1619 }
1620 
1621 static void
opt_root(struct block ** b)1622 opt_root(struct block **b)
1623 {
1624 	struct slist *tmp, *s;
1625 
1626 	s = (*b)->stmts;
1627 	(*b)->stmts = 0;
1628 	while (BPF_CLASS((*b)->s.code) == BPF_JMP && JT(*b) == JF(*b))
1629 		*b = JT(*b);
1630 
1631 	tmp = (*b)->stmts;
1632 	if (tmp != 0)
1633 		sappend(s, tmp);
1634 	(*b)->stmts = s;
1635 
1636 	/*
1637 	 * If the root node is a return, then there is no
1638 	 * point executing any statements (since the bpf machine
1639 	 * has no side effects).
1640 	 */
1641 	if (BPF_CLASS((*b)->s.code) == BPF_RET)
1642 		(*b)->stmts = 0;
1643 }
1644 
1645 static void
opt_loop(struct block * root,int do_stmts)1646 opt_loop(struct block *root, int do_stmts)
1647 {
1648 
1649 #ifdef BDEBUG
1650 	if (dflag > 1) {
1651 		printf("opt_loop(root, %d) begin\n", do_stmts);
1652 		opt_dump(root);
1653 	}
1654 #endif
1655 	do {
1656 		done = 1;
1657 		find_levels(root);
1658 		find_dom(root);
1659 		find_closure(root);
1660 		find_ud(root);
1661 		find_edom(root);
1662 		opt_blks(root, do_stmts);
1663 #ifdef BDEBUG
1664 		if (dflag > 1) {
1665 			printf("opt_loop(root, %d) bottom, done=%d\n", do_stmts, done);
1666 			opt_dump(root);
1667 		}
1668 #endif
1669 	} while (!done);
1670 }
1671 
1672 /*
1673  * Optimize the filter code in its dag representation.
1674  */
1675 void
bpf_optimize(struct block ** rootp)1676 bpf_optimize(struct block **rootp)
1677 {
1678 	struct block *root;
1679 
1680 	root = *rootp;
1681 
1682 	opt_init(root);
1683 	opt_loop(root, 0);
1684 	opt_loop(root, 1);
1685 	intern_blocks(root);
1686 #ifdef BDEBUG
1687 	if (dflag > 1) {
1688 		printf("after intern_blocks()\n");
1689 		opt_dump(root);
1690 	}
1691 #endif
1692 	opt_root(rootp);
1693 #ifdef BDEBUG
1694 	if (dflag > 1) {
1695 		printf("after opt_root()\n");
1696 		opt_dump(root);
1697 	}
1698 #endif
1699 	opt_cleanup();
1700 }
1701 
1702 static void
make_marks(struct block * p)1703 make_marks(struct block *p)
1704 {
1705 	if (!isMarked(p)) {
1706 		Mark(p);
1707 		if (BPF_CLASS(p->s.code) != BPF_RET) {
1708 			make_marks(JT(p));
1709 			make_marks(JF(p));
1710 		}
1711 	}
1712 }
1713 
1714 /*
1715  * Mark code array such that isMarked(i) is true
1716  * only for nodes that are alive.
1717  */
1718 static void
mark_code(struct block * p)1719 mark_code(struct block *p)
1720 {
1721 	cur_mark += 1;
1722 	make_marks(p);
1723 }
1724 
1725 /*
1726  * True iff the two stmt lists load the same value from the packet into
1727  * the accumulator.
1728  */
1729 static int
eq_slist(struct slist * x,struct slist * y)1730 eq_slist(struct slist *x, struct slist *y)
1731 {
1732 	while (1) {
1733 		while (x && x->s.code == NOP)
1734 			x = x->next;
1735 		while (y && y->s.code == NOP)
1736 			y = y->next;
1737 		if (x == 0)
1738 			return y == 0;
1739 		if (y == 0)
1740 			return x == 0;
1741 		if (x->s.code != y->s.code || x->s.k != y->s.k)
1742 			return 0;
1743 		x = x->next;
1744 		y = y->next;
1745 	}
1746 }
1747 
1748 static inline int
eq_blk(struct block * b0,struct block * b1)1749 eq_blk(struct block *b0, struct block *b1)
1750 {
1751 	if (b0->s.code == b1->s.code &&
1752 	    b0->s.k == b1->s.k &&
1753 	    b0->et.succ == b1->et.succ &&
1754 	    b0->ef.succ == b1->ef.succ)
1755 		return eq_slist(b0->stmts, b1->stmts);
1756 	return 0;
1757 }
1758 
1759 static void
intern_blocks(struct block * root)1760 intern_blocks(struct block *root)
1761 {
1762 	struct block *p;
1763 	int i, j;
1764 	int done1; /* don't shadow global */
1765  top:
1766 	done1 = 1;
1767 	for (i = 0; i < n_blocks; ++i)
1768 		blocks[i]->link = 0;
1769 
1770 	mark_code(root);
1771 
1772 	for (i = n_blocks - 1; --i >= 0; ) {
1773 		if (!isMarked(blocks[i]))
1774 			continue;
1775 		for (j = i + 1; j < n_blocks; ++j) {
1776 			if (!isMarked(blocks[j]))
1777 				continue;
1778 			if (eq_blk(blocks[i], blocks[j])) {
1779 				blocks[i]->link = blocks[j]->link ?
1780 					blocks[j]->link : blocks[j];
1781 				break;
1782 			}
1783 		}
1784 	}
1785 	for (i = 0; i < n_blocks; ++i) {
1786 		p = blocks[i];
1787 		if (JT(p) == 0)
1788 			continue;
1789 		if (JT(p)->link) {
1790 			done1 = 0;
1791 			JT(p) = JT(p)->link;
1792 		}
1793 		if (JF(p)->link) {
1794 			done1 = 0;
1795 			JF(p) = JF(p)->link;
1796 		}
1797 	}
1798 	if (!done1)
1799 		goto top;
1800 }
1801 
1802 static void
opt_cleanup(void)1803 opt_cleanup(void)
1804 {
1805 	free((void *)vnode_base);
1806 	free((void *)vmap);
1807 	free((void *)edges);
1808 	free((void *)space);
1809 	free((void *)levels);
1810 	free((void *)blocks);
1811 }
1812 
1813 /*
1814  * Return the number of stmts in 's'.
1815  */
1816 static u_int
slength(struct slist * s)1817 slength(struct slist *s)
1818 {
1819 	u_int n = 0;
1820 
1821 	for (; s; s = s->next)
1822 		if (s->s.code != NOP)
1823 			++n;
1824 	return n;
1825 }
1826 
1827 /*
1828  * Return the number of nodes reachable by 'p'.
1829  * All nodes should be initially unmarked.
1830  */
1831 static int
count_blocks(struct block * p)1832 count_blocks(struct block *p)
1833 {
1834 	if (p == 0 || isMarked(p))
1835 		return 0;
1836 	Mark(p);
1837 	return count_blocks(JT(p)) + count_blocks(JF(p)) + 1;
1838 }
1839 
1840 /*
1841  * Do a depth first search on the flow graph, numbering the
1842  * the basic blocks, and entering them into the 'blocks' array.`
1843  */
1844 static void
number_blks_r(struct block * p)1845 number_blks_r(struct block *p)
1846 {
1847 	int n;
1848 
1849 	if (p == 0 || isMarked(p))
1850 		return;
1851 
1852 	Mark(p);
1853 	n = n_blocks++;
1854 	p->id = n;
1855 	blocks[n] = p;
1856 
1857 	number_blks_r(JT(p));
1858 	number_blks_r(JF(p));
1859 }
1860 
1861 /*
1862  * Return the number of stmts in the flowgraph reachable by 'p'.
1863  * The nodes should be unmarked before calling.
1864  *
1865  * Note that "stmts" means "instructions", and that this includes
1866  *
1867  *	side-effect statements in 'p' (slength(p->stmts));
1868  *
1869  *	statements in the true branch from 'p' (count_stmts(JT(p)));
1870  *
1871  *	statements in the false branch from 'p' (count_stmts(JF(p)));
1872  *
1873  *	the conditional jump itself (1);
1874  *
1875  *	an extra long jump if the true branch requires it (p->longjt);
1876  *
1877  *	an extra long jump if the false branch requires it (p->longjf).
1878  */
1879 static u_int
count_stmts(struct block * p)1880 count_stmts(struct block *p)
1881 {
1882 	u_int n;
1883 
1884 	if (p == 0 || isMarked(p))
1885 		return 0;
1886 	Mark(p);
1887 	n = count_stmts(JT(p)) + count_stmts(JF(p));
1888 	return slength(p->stmts) + n + 1 + p->longjt + p->longjf;
1889 }
1890 
1891 /*
1892  * Allocate memory.  All allocation is done before optimization
1893  * is begun.  A linear bound on the size of all data structures is computed
1894  * from the total number of blocks and/or statements.
1895  */
1896 static void
opt_init(struct block * root)1897 opt_init(struct block *root)
1898 {
1899 	bpf_u_int32 *p;
1900 	int i, n, max_stmts;
1901 
1902 	/*
1903 	 * First, count the blocks, so we can malloc an array to map
1904 	 * block number to block.  Then, put the blocks into the array.
1905 	 */
1906 	unMarkAll();
1907 	n = count_blocks(root);
1908 	blocks = (struct block **)calloc(n, sizeof(*blocks));
1909 	if (blocks == NULL)
1910 		bpf_error("malloc");
1911 	unMarkAll();
1912 	n_blocks = 0;
1913 	number_blks_r(root);
1914 
1915 	n_edges = 2 * n_blocks;
1916 	edges = (struct edge **)calloc(n_edges, sizeof(*edges));
1917 	if (edges == NULL)
1918 		bpf_error("malloc");
1919 
1920 	/*
1921 	 * The number of levels is bounded by the number of nodes.
1922 	 */
1923 	levels = (struct block **)calloc(n_blocks, sizeof(*levels));
1924 	if (levels == NULL)
1925 		bpf_error("malloc");
1926 
1927 	edgewords = n_edges / (8 * sizeof(bpf_u_int32)) + 1;
1928 	nodewords = n_blocks / (8 * sizeof(bpf_u_int32)) + 1;
1929 
1930 	/* XXX */
1931 	space = (bpf_u_int32 *)malloc(2 * n_blocks * nodewords * sizeof(*space)
1932 				 + n_edges * edgewords * sizeof(*space));
1933 	if (space == NULL)
1934 		bpf_error("malloc");
1935 	p = space;
1936 	all_dom_sets = p;
1937 	for (i = 0; i < n; ++i) {
1938 		blocks[i]->dom = p;
1939 		p += nodewords;
1940 	}
1941 	all_closure_sets = p;
1942 	for (i = 0; i < n; ++i) {
1943 		blocks[i]->closure = p;
1944 		p += nodewords;
1945 	}
1946 	all_edge_sets = p;
1947 	for (i = 0; i < n; ++i) {
1948 		register struct block *b = blocks[i];
1949 
1950 		b->et.edom = p;
1951 		p += edgewords;
1952 		b->ef.edom = p;
1953 		p += edgewords;
1954 		b->et.id = i;
1955 		edges[i] = &b->et;
1956 		b->ef.id = n_blocks + i;
1957 		edges[n_blocks + i] = &b->ef;
1958 		b->et.pred = b;
1959 		b->ef.pred = b;
1960 	}
1961 	max_stmts = 0;
1962 	for (i = 0; i < n; ++i)
1963 		max_stmts += slength(blocks[i]->stmts) + 1;
1964 	/*
1965 	 * We allocate at most 3 value numbers per statement,
1966 	 * so this is an upper bound on the number of valnodes
1967 	 * we'll need.
1968 	 */
1969 	maxval = 3 * max_stmts;
1970 	vmap = (struct vmapinfo *)calloc(maxval, sizeof(*vmap));
1971 	vnode_base = (struct valnode *)calloc(maxval, sizeof(*vnode_base));
1972 	if (vmap == NULL || vnode_base == NULL)
1973 		bpf_error("malloc");
1974 }
1975 
1976 /*
1977  * Some pointers used to convert the basic block form of the code,
1978  * into the array form that BPF requires.  'fstart' will point to
1979  * the malloc'd array while 'ftail' is used during the recursive traversal.
1980  */
1981 static struct bpf_insn *fstart;
1982 static struct bpf_insn *ftail;
1983 
1984 #ifdef BDEBUG
1985 int bids[1000];
1986 #endif
1987 
1988 /*
1989  * Returns true if successful.  Returns false if a branch has
1990  * an offset that is too large.  If so, we have marked that
1991  * branch so that on a subsequent iteration, it will be treated
1992  * properly.
1993  */
1994 static int
convert_code_r(struct block * p)1995 convert_code_r(struct block *p)
1996 {
1997 	struct bpf_insn *dst;
1998 	struct slist *src;
1999 	int slen;
2000 	u_int off;
2001 	int extrajmps;		/* number of extra jumps inserted */
2002 	struct slist **offset = NULL;
2003 
2004 	if (p == 0 || isMarked(p))
2005 		return (1);
2006 	Mark(p);
2007 
2008 	if (convert_code_r(JF(p)) == 0)
2009 		return (0);
2010 	if (convert_code_r(JT(p)) == 0)
2011 		return (0);
2012 
2013 	slen = slength(p->stmts);
2014 	dst = ftail -= (slen + 1 + p->longjt + p->longjf);
2015 		/* inflate length by any extra jumps */
2016 
2017 	p->offset = dst - fstart;
2018 
2019 	/* generate offset[] for convenience  */
2020 	if (slen) {
2021 		offset = (struct slist **)calloc(slen, sizeof(struct slist *));
2022 		if (!offset) {
2023 			bpf_error("not enough core");
2024 			/*NOTREACHED*/
2025 		}
2026 	}
2027 	src = p->stmts;
2028 	for (off = 0; off < slen && src; off++) {
2029 #if 0
2030 		printf("off=%d src=%x\n", off, src);
2031 #endif
2032 		offset[off] = src;
2033 		src = src->next;
2034 	}
2035 
2036 	off = 0;
2037 	for (src = p->stmts; src; src = src->next) {
2038 		if (src->s.code == NOP)
2039 			continue;
2040 		dst->code = (u_short)src->s.code;
2041 		dst->k = src->s.k;
2042 
2043 		/* fill block-local relative jump */
2044 		if (BPF_CLASS(src->s.code) != BPF_JMP || src->s.code == (BPF_JMP|BPF_JA)) {
2045 #if 0
2046 			if (src->s.jt || src->s.jf) {
2047 				bpf_error("illegal jmp destination");
2048 				/*NOTREACHED*/
2049 			}
2050 #endif
2051 			goto filled;
2052 		}
2053 		if (off == slen - 2)	/*???*/
2054 			goto filled;
2055 
2056 	    {
2057 		int i;
2058 		int jt, jf;
2059 		const char *ljerr = "%s for block-local relative jump: off=%d";
2060 
2061 #if 0
2062 		printf("code=%x off=%d %x %x\n", src->s.code,
2063 			off, src->s.jt, src->s.jf);
2064 #endif
2065 
2066 		if (!src->s.jt || !src->s.jf) {
2067 			bpf_error(ljerr, "no jmp destination", off);
2068 			/*NOTREACHED*/
2069 		}
2070 
2071 		jt = jf = 0;
2072 		for (i = 0; i < slen; i++) {
2073 			if (offset[i] == src->s.jt) {
2074 				if (jt) {
2075 					bpf_error(ljerr, "multiple matches", off);
2076 					/*NOTREACHED*/
2077 				}
2078 
2079 				dst->jt = i - off - 1;
2080 				jt++;
2081 			}
2082 			if (offset[i] == src->s.jf) {
2083 				if (jf) {
2084 					bpf_error(ljerr, "multiple matches", off);
2085 					/*NOTREACHED*/
2086 				}
2087 				dst->jf = i - off - 1;
2088 				jf++;
2089 			}
2090 		}
2091 		if (!jt || !jf) {
2092 			bpf_error(ljerr, "no destination found", off);
2093 			/*NOTREACHED*/
2094 		}
2095 	    }
2096 filled:
2097 		++dst;
2098 		++off;
2099 	}
2100 	if (offset)
2101 		free(offset);
2102 
2103 #ifdef BDEBUG
2104 	bids[dst - fstart] = p->id + 1;
2105 #endif
2106 	dst->code = (u_short)p->s.code;
2107 	dst->k = p->s.k;
2108 	if (JT(p)) {
2109 		extrajmps = 0;
2110 		off = JT(p)->offset - (p->offset + slen) - 1;
2111 		if (off >= 256) {
2112 		    /* offset too large for branch, must add a jump */
2113 		    if (p->longjt == 0) {
2114 		    	/* mark this instruction and retry */
2115 			p->longjt++;
2116 			return(0);
2117 		    }
2118 		    /* branch if T to following jump */
2119 		    dst->jt = extrajmps;
2120 		    extrajmps++;
2121 		    dst[extrajmps].code = BPF_JMP|BPF_JA;
2122 		    dst[extrajmps].k = off - extrajmps;
2123 		}
2124 		else
2125 		    dst->jt = off;
2126 		off = JF(p)->offset - (p->offset + slen) - 1;
2127 		if (off >= 256) {
2128 		    /* offset too large for branch, must add a jump */
2129 		    if (p->longjf == 0) {
2130 		    	/* mark this instruction and retry */
2131 			p->longjf++;
2132 			return(0);
2133 		    }
2134 		    /* branch if F to following jump */
2135 		    /* if two jumps are inserted, F goes to second one */
2136 		    dst->jf = extrajmps;
2137 		    extrajmps++;
2138 		    dst[extrajmps].code = BPF_JMP|BPF_JA;
2139 		    dst[extrajmps].k = off - extrajmps;
2140 		}
2141 		else
2142 		    dst->jf = off;
2143 	}
2144 	return (1);
2145 }
2146 
2147 
2148 /*
2149  * Convert flowgraph intermediate representation to the
2150  * BPF array representation.  Set *lenp to the number of instructions.
2151  *
2152  * This routine does *NOT* leak the memory pointed to by fp.  It *must
2153  * not* do free(fp) before returning fp; doing so would make no sense,
2154  * as the BPF array pointed to by the return value of icode_to_fcode()
2155  * must be valid - it's being returned for use in a bpf_program structure.
2156  *
2157  * If it appears that icode_to_fcode() is leaking, the problem is that
2158  * the program using pcap_compile() is failing to free the memory in
2159  * the BPF program when it's done - the leak is in the program, not in
2160  * the routine that happens to be allocating the memory.  (By analogy, if
2161  * a program calls fopen() without ever calling fclose() on the FILE *,
2162  * it will leak the FILE structure; the leak is not in fopen(), it's in
2163  * the program.)  Change the program to use pcap_freecode() when it's
2164  * done with the filter program.  See the pcap man page.
2165  */
2166 struct bpf_insn *
icode_to_fcode(struct block * root,u_int * lenp)2167 icode_to_fcode(struct block *root, u_int *lenp)
2168 {
2169 	u_int n;
2170 	struct bpf_insn *fp;
2171 
2172 	/*
2173 	 * Loop doing convert_code_r() until no branches remain
2174 	 * with too-large offsets.
2175 	 */
2176 	while (1) {
2177 	    unMarkAll();
2178 	    n = *lenp = count_stmts(root);
2179 
2180 	    fp = (struct bpf_insn *)malloc(sizeof(*fp) * n);
2181 	    if (fp == NULL)
2182 		    bpf_error("malloc");
2183 	    memset((char *)fp, 0, sizeof(*fp) * n);
2184 	    fstart = fp;
2185 	    ftail = fp + n;
2186 
2187 	    unMarkAll();
2188 	    if (convert_code_r(root))
2189 		break;
2190 	    free(fp);
2191 	}
2192 
2193 	return fp;
2194 }
2195 
2196 /*
2197  * Make a copy of a BPF program and put it in the "fcode" member of
2198  * a "pcap_t".
2199  *
2200  * If we fail to allocate memory for the copy, fill in the "errbuf"
2201  * member of the "pcap_t" with an error message, and return -1;
2202  * otherwise, return 0.
2203  */
2204 int
install_bpf_program(pcap_t * p,struct bpf_program * fp)2205 install_bpf_program(pcap_t *p, struct bpf_program *fp)
2206 {
2207 	size_t prog_size;
2208 
2209 	/*
2210 	 * Validate the program.
2211 	 */
2212 	if (!bpf_validate(fp->bf_insns, fp->bf_len)) {
2213 		snprintf(p->errbuf, sizeof(p->errbuf),
2214 			"BPF program is not valid");
2215 		return (-1);
2216 	}
2217 
2218 	/*
2219 	 * Free up any already installed program.
2220 	 */
2221 	pcap_freecode(&p->fcode);
2222 
2223 	prog_size = sizeof(*fp->bf_insns) * fp->bf_len;
2224 	p->fcode.bf_len = fp->bf_len;
2225 	p->fcode.bf_insns = (struct bpf_insn *)malloc(prog_size);
2226 	if (p->fcode.bf_insns == NULL) {
2227 		snprintf(p->errbuf, sizeof(p->errbuf),
2228 			 "malloc: %s", pcap_strerror(errno));
2229 		return (-1);
2230 	}
2231 	memcpy(p->fcode.bf_insns, fp->bf_insns, prog_size);
2232 	return (0);
2233 }
2234 
2235 #ifdef BDEBUG
2236 static void
opt_dump(struct block * root)2237 opt_dump(struct block *root)
2238 {
2239 	struct bpf_program f;
2240 
2241 	memset(bids, 0, sizeof bids);
2242 	f.bf_insns = icode_to_fcode(root, &f.bf_len);
2243 	bpf_dump(&f, 1);
2244 	putchar('\n');
2245 	free((char *)f.bf_insns);
2246 }
2247 #endif
2248