1 /*-
2 * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from the Stanford/CMU enet packet filter,
6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
8 * Berkeley Laboratory.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)bpf.c 7.5 (Berkeley) 7/15/91
39 */
40
41 #ifdef HAVE_CONFIG_H
42 #include <config.h>
43 #endif
44
45 #include <pcap/pcap-inttypes.h>
46 #include "pcap-types.h"
47
48 #ifndef _WIN32
49 #include <sys/param.h>
50 #include <sys/types.h>
51 #include <sys/time.h>
52 #endif /* _WIN32 */
53
54 #include <pcap/bpf.h>
55
56 #include <stdlib.h>
57
58 #define int32 bpf_int32
59 #define u_int32 bpf_u_int32
60
61 #ifndef LBL_ALIGN
62 /*
63 * XXX - IA-64? If not, this probably won't work on Win64 IA-64
64 * systems, unless LBL_ALIGN is defined elsewhere for them.
65 * XXX - SuperH? If not, this probably won't work on WinCE SuperH
66 * systems, unless LBL_ALIGN is defined elsewhere for them.
67 */
68 #if defined(sparc) || defined(__sparc__) || defined(mips) || \
69 defined(ibm032) || defined(__alpha) || defined(__hpux) || \
70 defined(__arm__)
71 #define LBL_ALIGN
72 #endif
73 #endif
74
75 #ifndef LBL_ALIGN
76 #ifndef _WIN32
77 #include <netinet/in.h>
78 #endif
79
80 #define EXTRACT_SHORT(p) ((u_short)ntohs(*(u_short *)p))
81 #define EXTRACT_LONG(p) (ntohl(*(u_int32 *)p))
82 #else
83 #define EXTRACT_SHORT(p)\
84 ((u_short)\
85 ((u_short)*((u_char *)p+0)<<8|\
86 (u_short)*((u_char *)p+1)<<0))
87 #define EXTRACT_LONG(p)\
88 ((u_int32)*((u_char *)p+0)<<24|\
89 (u_int32)*((u_char *)p+1)<<16|\
90 (u_int32)*((u_char *)p+2)<<8|\
91 (u_int32)*((u_char *)p+3)<<0)
92 #endif
93
94 #ifdef __linux__
95 #include <linux/types.h>
96 #include <linux/if_packet.h>
97 #include <linux/filter.h>
98 #endif
99
100 enum {
101 BPF_S_ANC_NONE,
102 BPF_S_ANC_VLAN_TAG,
103 BPF_S_ANC_VLAN_TAG_PRESENT,
104 };
105
106 /*
107 * Execute the filter program starting at pc on the packet p
108 * wirelen is the length of the original packet
109 * buflen is the amount of data present
110 * aux_data is auxiliary data, currently used only when interpreting
111 * filters intended for the Linux kernel in cases where the kernel
112 * rejects the filter; it contains VLAN tag information
113 * For the kernel, p is assumed to be a pointer to an mbuf if buflen is 0,
114 * in all other cases, p is a pointer to a buffer and buflen is its size.
115 *
116 * Thanks to Ani Sinha <ani@arista.com> for providing initial implementation
117 */
118 u_int
bpf_filter_with_aux_data(const struct bpf_insn * pc,const u_char * p,u_int wirelen,u_int buflen,const struct bpf_aux_data * aux_data)119 bpf_filter_with_aux_data(const struct bpf_insn *pc, const u_char *p,
120 u_int wirelen, u_int buflen, const struct bpf_aux_data *aux_data)
121 {
122 register u_int32 A, X;
123 register bpf_u_int32 k;
124 u_int32 mem[BPF_MEMWORDS];
125
126 if (pc == 0)
127 /*
128 * No filter means accept all.
129 */
130 return (u_int)-1;
131 A = 0;
132 X = 0;
133 --pc;
134 for (;;) {
135 ++pc;
136 switch (pc->code) {
137
138 default:
139 abort();
140 case BPF_RET|BPF_K:
141 return (u_int)pc->k;
142
143 case BPF_RET|BPF_A:
144 return (u_int)A;
145
146 case BPF_LD|BPF_W|BPF_ABS:
147 k = pc->k;
148 if (k > buflen || sizeof(int32_t) > buflen - k) {
149 return 0;
150 }
151 A = EXTRACT_LONG(&p[k]);
152 continue;
153
154 case BPF_LD|BPF_H|BPF_ABS:
155 k = pc->k;
156 if (k > buflen || sizeof(int16_t) > buflen - k) {
157 return 0;
158 }
159 A = EXTRACT_SHORT(&p[k]);
160 continue;
161
162 case BPF_LD|BPF_B|BPF_ABS:
163 switch (pc->k) {
164
165 #if defined(SKF_AD_VLAN_TAG_PRESENT)
166 case SKF_AD_OFF + SKF_AD_VLAN_TAG:
167 if (!aux_data)
168 return 0;
169 A = aux_data->vlan_tag;
170 break;
171
172 case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
173 if (!aux_data)
174 return 0;
175 A = aux_data->vlan_tag_present;
176 break;
177 #endif
178 default:
179 k = pc->k;
180 if (k >= buflen) {
181 return 0;
182 }
183 A = p[k];
184 break;
185 }
186 continue;
187
188 case BPF_LD|BPF_W|BPF_LEN:
189 A = wirelen;
190 continue;
191
192 case BPF_LDX|BPF_W|BPF_LEN:
193 X = wirelen;
194 continue;
195
196 case BPF_LD|BPF_W|BPF_IND:
197 k = X + pc->k;
198 if (pc->k > buflen || X > buflen - pc->k ||
199 sizeof(int32_t) > buflen - k) {
200 return 0;
201 }
202 A = EXTRACT_LONG(&p[k]);
203 continue;
204
205 case BPF_LD|BPF_H|BPF_IND:
206 k = X + pc->k;
207 if (X > buflen || pc->k > buflen - X ||
208 sizeof(int16_t) > buflen - k) {
209 return 0;
210 }
211 A = EXTRACT_SHORT(&p[k]);
212 continue;
213
214 case BPF_LD|BPF_B|BPF_IND:
215 k = X + pc->k;
216 if (pc->k >= buflen || X >= buflen - pc->k) {
217 return 0;
218 }
219 A = p[k];
220 continue;
221
222 case BPF_LDX|BPF_MSH|BPF_B:
223 k = pc->k;
224 if (k >= buflen) {
225 return 0;
226 }
227 X = (p[pc->k] & 0xf) << 2;
228 continue;
229
230 case BPF_LD|BPF_IMM:
231 A = pc->k;
232 continue;
233
234 case BPF_LDX|BPF_IMM:
235 X = pc->k;
236 continue;
237
238 case BPF_LD|BPF_MEM:
239 A = mem[pc->k];
240 continue;
241
242 case BPF_LDX|BPF_MEM:
243 X = mem[pc->k];
244 continue;
245
246 case BPF_ST:
247 mem[pc->k] = A;
248 continue;
249
250 case BPF_STX:
251 mem[pc->k] = X;
252 continue;
253
254 case BPF_JMP|BPF_JA:
255 /*
256 * XXX - we currently implement "ip6 protochain"
257 * with backward jumps, so sign-extend pc->k.
258 */
259 pc += (bpf_int32)pc->k;
260 continue;
261
262 case BPF_JMP|BPF_JGT|BPF_K:
263 pc += (A > pc->k) ? pc->jt : pc->jf;
264 continue;
265
266 case BPF_JMP|BPF_JGE|BPF_K:
267 pc += (A >= pc->k) ? pc->jt : pc->jf;
268 continue;
269
270 case BPF_JMP|BPF_JEQ|BPF_K:
271 pc += (A == pc->k) ? pc->jt : pc->jf;
272 continue;
273
274 case BPF_JMP|BPF_JSET|BPF_K:
275 pc += (A & pc->k) ? pc->jt : pc->jf;
276 continue;
277
278 case BPF_JMP|BPF_JGT|BPF_X:
279 pc += (A > X) ? pc->jt : pc->jf;
280 continue;
281
282 case BPF_JMP|BPF_JGE|BPF_X:
283 pc += (A >= X) ? pc->jt : pc->jf;
284 continue;
285
286 case BPF_JMP|BPF_JEQ|BPF_X:
287 pc += (A == X) ? pc->jt : pc->jf;
288 continue;
289
290 case BPF_JMP|BPF_JSET|BPF_X:
291 pc += (A & X) ? pc->jt : pc->jf;
292 continue;
293
294 case BPF_ALU|BPF_ADD|BPF_X:
295 A += X;
296 continue;
297
298 case BPF_ALU|BPF_SUB|BPF_X:
299 A -= X;
300 continue;
301
302 case BPF_ALU|BPF_MUL|BPF_X:
303 A *= X;
304 continue;
305
306 case BPF_ALU|BPF_DIV|BPF_X:
307 if (X == 0)
308 return 0;
309 A /= X;
310 continue;
311
312 case BPF_ALU|BPF_MOD|BPF_X:
313 if (X == 0)
314 return 0;
315 A %= X;
316 continue;
317
318 case BPF_ALU|BPF_AND|BPF_X:
319 A &= X;
320 continue;
321
322 case BPF_ALU|BPF_OR|BPF_X:
323 A |= X;
324 continue;
325
326 case BPF_ALU|BPF_XOR|BPF_X:
327 A ^= X;
328 continue;
329
330 case BPF_ALU|BPF_LSH|BPF_X:
331 A <<= X;
332 continue;
333
334 case BPF_ALU|BPF_RSH|BPF_X:
335 A >>= X;
336 continue;
337
338 case BPF_ALU|BPF_ADD|BPF_K:
339 A += pc->k;
340 continue;
341
342 case BPF_ALU|BPF_SUB|BPF_K:
343 A -= pc->k;
344 continue;
345
346 case BPF_ALU|BPF_MUL|BPF_K:
347 A *= pc->k;
348 continue;
349
350 case BPF_ALU|BPF_DIV|BPF_K:
351 A /= pc->k;
352 continue;
353
354 case BPF_ALU|BPF_MOD|BPF_K:
355 A %= pc->k;
356 continue;
357
358 case BPF_ALU|BPF_AND|BPF_K:
359 A &= pc->k;
360 continue;
361
362 case BPF_ALU|BPF_OR|BPF_K:
363 A |= pc->k;
364 continue;
365
366 case BPF_ALU|BPF_XOR|BPF_K:
367 A ^= pc->k;
368 continue;
369
370 case BPF_ALU|BPF_LSH|BPF_K:
371 A <<= pc->k;
372 continue;
373
374 case BPF_ALU|BPF_RSH|BPF_K:
375 A >>= pc->k;
376 continue;
377
378 case BPF_ALU|BPF_NEG:
379 /*
380 * Most BPF arithmetic is unsigned, but negation
381 * can't be unsigned; throw some casts to
382 * specify what we're trying to do.
383 */
384 A = (u_int32)(-(int32)A);
385 continue;
386
387 case BPF_MISC|BPF_TAX:
388 X = A;
389 continue;
390
391 case BPF_MISC|BPF_TXA:
392 A = X;
393 continue;
394 }
395 }
396 }
397
398 u_int
bpf_filter(const struct bpf_insn * pc,const u_char * p,u_int wirelen,u_int buflen)399 bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
400 u_int buflen)
401 {
402 return bpf_filter_with_aux_data(pc, p, wirelen, buflen, NULL);
403 }
404
405
406 /*
407 * Return true if the 'fcode' is a valid filter program.
408 * The constraints are that each jump be forward and to a valid
409 * code, that memory accesses are within valid ranges (to the
410 * extent that this can be checked statically; loads of packet
411 * data have to be, and are, also checked at run time), and that
412 * the code terminates with either an accept or reject.
413 *
414 * The kernel needs to be able to verify an application's filter code.
415 * Otherwise, a bogus program could easily crash the system.
416 */
417 int
bpf_validate(const struct bpf_insn * f,int len)418 bpf_validate(const struct bpf_insn *f, int len)
419 {
420 u_int i, from;
421 const struct bpf_insn *p;
422
423 if (len < 1)
424 return 0;
425
426 for (i = 0; i < (u_int)len; ++i) {
427 p = &f[i];
428 switch (BPF_CLASS(p->code)) {
429 /*
430 * Check that memory operations use valid addresses.
431 */
432 case BPF_LD:
433 case BPF_LDX:
434 switch (BPF_MODE(p->code)) {
435 case BPF_IMM:
436 break;
437 case BPF_ABS:
438 case BPF_IND:
439 case BPF_MSH:
440 /*
441 * There's no maximum packet data size
442 * in userland. The runtime packet length
443 * check suffices.
444 */
445 break;
446 case BPF_MEM:
447 if (p->k >= BPF_MEMWORDS)
448 return 0;
449 break;
450 case BPF_LEN:
451 break;
452 default:
453 return 0;
454 }
455 break;
456 case BPF_ST:
457 case BPF_STX:
458 if (p->k >= BPF_MEMWORDS)
459 return 0;
460 break;
461 case BPF_ALU:
462 switch (BPF_OP(p->code)) {
463 case BPF_ADD:
464 case BPF_SUB:
465 case BPF_MUL:
466 case BPF_OR:
467 case BPF_AND:
468 case BPF_XOR:
469 case BPF_LSH:
470 case BPF_RSH:
471 case BPF_NEG:
472 break;
473 case BPF_DIV:
474 case BPF_MOD:
475 /*
476 * Check for constant division or modulus
477 * by 0.
478 */
479 if (BPF_SRC(p->code) == BPF_K && p->k == 0)
480 return 0;
481 break;
482 default:
483 return 0;
484 }
485 break;
486 case BPF_JMP:
487 /*
488 * Check that jumps are within the code block,
489 * and that unconditional branches don't go
490 * backwards as a result of an overflow.
491 * Unconditional branches have a 32-bit offset,
492 * so they could overflow; we check to make
493 * sure they don't. Conditional branches have
494 * an 8-bit offset, and the from address is <=
495 * BPF_MAXINSNS, and we assume that BPF_MAXINSNS
496 * is sufficiently small that adding 255 to it
497 * won't overflow.
498 *
499 * We know that len is <= BPF_MAXINSNS, and we
500 * assume that BPF_MAXINSNS is < the maximum size
501 * of a u_int, so that i + 1 doesn't overflow.
502 *
503 * For userland, we don't know that the from
504 * or len are <= BPF_MAXINSNS, but we know that
505 * from <= len, and, except on a 64-bit system,
506 * it's unlikely that len, if it truly reflects
507 * the size of the program we've been handed,
508 * will be anywhere near the maximum size of
509 * a u_int. We also don't check for backward
510 * branches, as we currently support them in
511 * userland for the protochain operation.
512 */
513 from = i + 1;
514 switch (BPF_OP(p->code)) {
515 case BPF_JA:
516 if (from + p->k >= (u_int)len)
517 return 0;
518 break;
519 case BPF_JEQ:
520 case BPF_JGT:
521 case BPF_JGE:
522 case BPF_JSET:
523 if (from + p->jt >= (u_int)len || from + p->jf >= (u_int)len)
524 return 0;
525 break;
526 default:
527 return 0;
528 }
529 break;
530 case BPF_RET:
531 break;
532 case BPF_MISC:
533 break;
534 default:
535 return 0;
536 }
537 }
538 return BPF_CLASS(f[len - 1].code) == BPF_RET;
539 }
540