1 /*-
2 * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from the Stanford/CMU enet packet filter,
6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
8 * Berkeley Laboratory.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)bpf.c 7.5 (Berkeley) 7/15/91
39 */
40
41 #ifdef HAVE_CONFIG_H
42 #include <config.h>
43 #endif
44
45 #include <pcap/pcap-inttypes.h>
46 #include "pcap-types.h"
47 #include "extract.h"
48 #include "diag-control.h"
49
50 #define EXTRACT_SHORT EXTRACT_BE_U_2
51 #define EXTRACT_LONG EXTRACT_BE_U_4
52
53 #ifndef _WIN32
54 #include <sys/param.h>
55 #include <sys/types.h>
56 #include <sys/time.h>
57 #endif /* _WIN32 */
58
59 #include <pcap-int.h>
60
61 #include <stdlib.h>
62
63 #ifdef __linux__
64 #include <linux/types.h>
65 #include <linux/if_packet.h>
66 #include <linux/filter.h>
67 #endif
68
69 enum {
70 BPF_S_ANC_NONE,
71 BPF_S_ANC_VLAN_TAG,
72 BPF_S_ANC_VLAN_TAG_PRESENT,
73 };
74
75 /*
76 * Execute the filter program starting at pc on the packet p
77 * wirelen is the length of the original packet
78 * buflen is the amount of data present
79 * aux_data is auxiliary data, currently used only when interpreting
80 * filters intended for the Linux kernel in cases where the kernel
81 * rejects the filter; it contains VLAN tag information
82 * For the kernel, p is assumed to be a pointer to an mbuf if buflen is 0,
83 * in all other cases, p is a pointer to a buffer and buflen is its size.
84 *
85 * Thanks to Ani Sinha <ani@arista.com> for providing initial implementation
86 */
87 #if defined(SKF_AD_VLAN_TAG_PRESENT)
88 u_int
pcap_filter_with_aux_data(const struct bpf_insn * pc,const u_char * p,u_int wirelen,u_int buflen,const struct bpf_aux_data * aux_data)89 pcap_filter_with_aux_data(const struct bpf_insn *pc, const u_char *p,
90 u_int wirelen, u_int buflen, const struct bpf_aux_data *aux_data)
91 #else
92 u_int
93 pcap_filter_with_aux_data(const struct bpf_insn *pc, const u_char *p,
94 u_int wirelen, u_int buflen, const struct bpf_aux_data *aux_data _U_)
95 #endif
96 {
97 register uint32_t A, X;
98 register bpf_u_int32 k;
99 uint32_t mem[BPF_MEMWORDS];
100
101 if (pc == 0)
102 /*
103 * No filter means accept all.
104 */
105 return (u_int)-1;
106 A = 0;
107 X = 0;
108 --pc;
109 for (;;) {
110 ++pc;
111 switch (pc->code) {
112
113 default:
114 abort();
115 case BPF_RET|BPF_K:
116 return (u_int)pc->k;
117
118 case BPF_RET|BPF_A:
119 return (u_int)A;
120
121 case BPF_LD|BPF_W|BPF_ABS:
122 k = pc->k;
123 if (k > buflen || sizeof(int32_t) > buflen - k) {
124 return 0;
125 }
126 A = EXTRACT_LONG(&p[k]);
127 continue;
128
129 case BPF_LD|BPF_H|BPF_ABS:
130 k = pc->k;
131 if (k > buflen || sizeof(int16_t) > buflen - k) {
132 return 0;
133 }
134 A = EXTRACT_SHORT(&p[k]);
135 continue;
136
137 case BPF_LD|BPF_B|BPF_ABS:
138 /*
139 * Yes, we know, this switch doesn't do
140 * anything unless we're building for
141 * a Linux kernel with removed VLAN
142 * tags available as meta-data.
143 */
144 DIAG_OFF_DEFAULT_ONLY_SWITCH
145 switch (pc->k) {
146
147 #if defined(SKF_AD_VLAN_TAG_PRESENT)
148 case SKF_AD_OFF + SKF_AD_VLAN_TAG:
149 if (!aux_data)
150 return 0;
151 A = aux_data->vlan_tag;
152 break;
153
154 case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
155 if (!aux_data)
156 return 0;
157 A = aux_data->vlan_tag_present;
158 break;
159 #endif
160 default:
161 k = pc->k;
162 if (k >= buflen) {
163 return 0;
164 }
165 A = p[k];
166 break;
167 }
168 DIAG_ON_DEFAULT_ONLY_SWITCH
169 continue;
170
171 case BPF_LD|BPF_W|BPF_LEN:
172 A = wirelen;
173 continue;
174
175 case BPF_LDX|BPF_W|BPF_LEN:
176 X = wirelen;
177 continue;
178
179 case BPF_LD|BPF_W|BPF_IND:
180 k = X + pc->k;
181 if (pc->k > buflen || X > buflen - pc->k ||
182 sizeof(int32_t) > buflen - k) {
183 return 0;
184 }
185 A = EXTRACT_LONG(&p[k]);
186 continue;
187
188 case BPF_LD|BPF_H|BPF_IND:
189 k = X + pc->k;
190 if (X > buflen || pc->k > buflen - X ||
191 sizeof(int16_t) > buflen - k) {
192 return 0;
193 }
194 A = EXTRACT_SHORT(&p[k]);
195 continue;
196
197 case BPF_LD|BPF_B|BPF_IND:
198 k = X + pc->k;
199 if (pc->k >= buflen || X >= buflen - pc->k) {
200 return 0;
201 }
202 A = p[k];
203 continue;
204
205 case BPF_LDX|BPF_MSH|BPF_B:
206 k = pc->k;
207 if (k >= buflen) {
208 return 0;
209 }
210 X = (p[pc->k] & 0xf) << 2;
211 continue;
212
213 case BPF_LD|BPF_IMM:
214 A = pc->k;
215 continue;
216
217 case BPF_LDX|BPF_IMM:
218 X = pc->k;
219 continue;
220
221 case BPF_LD|BPF_MEM:
222 A = mem[pc->k];
223 continue;
224
225 case BPF_LDX|BPF_MEM:
226 X = mem[pc->k];
227 continue;
228
229 case BPF_ST:
230 mem[pc->k] = A;
231 continue;
232
233 case BPF_STX:
234 mem[pc->k] = X;
235 continue;
236
237 case BPF_JMP|BPF_JA:
238 /*
239 * XXX - we currently implement "ip6 protochain"
240 * with backward jumps, so sign-extend pc->k.
241 */
242 pc += (bpf_int32)pc->k;
243 continue;
244
245 case BPF_JMP|BPF_JGT|BPF_K:
246 pc += (A > pc->k) ? pc->jt : pc->jf;
247 continue;
248
249 case BPF_JMP|BPF_JGE|BPF_K:
250 pc += (A >= pc->k) ? pc->jt : pc->jf;
251 continue;
252
253 case BPF_JMP|BPF_JEQ|BPF_K:
254 pc += (A == pc->k) ? pc->jt : pc->jf;
255 continue;
256
257 case BPF_JMP|BPF_JSET|BPF_K:
258 pc += (A & pc->k) ? pc->jt : pc->jf;
259 continue;
260
261 case BPF_JMP|BPF_JGT|BPF_X:
262 pc += (A > X) ? pc->jt : pc->jf;
263 continue;
264
265 case BPF_JMP|BPF_JGE|BPF_X:
266 pc += (A >= X) ? pc->jt : pc->jf;
267 continue;
268
269 case BPF_JMP|BPF_JEQ|BPF_X:
270 pc += (A == X) ? pc->jt : pc->jf;
271 continue;
272
273 case BPF_JMP|BPF_JSET|BPF_X:
274 pc += (A & X) ? pc->jt : pc->jf;
275 continue;
276
277 case BPF_ALU|BPF_ADD|BPF_X:
278 A += X;
279 continue;
280
281 case BPF_ALU|BPF_SUB|BPF_X:
282 A -= X;
283 continue;
284
285 case BPF_ALU|BPF_MUL|BPF_X:
286 A *= X;
287 continue;
288
289 case BPF_ALU|BPF_DIV|BPF_X:
290 if (X == 0)
291 return 0;
292 A /= X;
293 continue;
294
295 case BPF_ALU|BPF_MOD|BPF_X:
296 if (X == 0)
297 return 0;
298 A %= X;
299 continue;
300
301 case BPF_ALU|BPF_AND|BPF_X:
302 A &= X;
303 continue;
304
305 case BPF_ALU|BPF_OR|BPF_X:
306 A |= X;
307 continue;
308
309 case BPF_ALU|BPF_XOR|BPF_X:
310 A ^= X;
311 continue;
312
313 case BPF_ALU|BPF_LSH|BPF_X:
314 if (X < 32)
315 A <<= X;
316 else
317 A = 0;
318 continue;
319
320 case BPF_ALU|BPF_RSH|BPF_X:
321 if (X < 32)
322 A >>= X;
323 else
324 A = 0;
325 continue;
326
327 case BPF_ALU|BPF_ADD|BPF_K:
328 A += pc->k;
329 continue;
330
331 case BPF_ALU|BPF_SUB|BPF_K:
332 A -= pc->k;
333 continue;
334
335 case BPF_ALU|BPF_MUL|BPF_K:
336 A *= pc->k;
337 continue;
338
339 case BPF_ALU|BPF_DIV|BPF_K:
340 A /= pc->k;
341 continue;
342
343 case BPF_ALU|BPF_MOD|BPF_K:
344 A %= pc->k;
345 continue;
346
347 case BPF_ALU|BPF_AND|BPF_K:
348 A &= pc->k;
349 continue;
350
351 case BPF_ALU|BPF_OR|BPF_K:
352 A |= pc->k;
353 continue;
354
355 case BPF_ALU|BPF_XOR|BPF_K:
356 A ^= pc->k;
357 continue;
358
359 case BPF_ALU|BPF_LSH|BPF_K:
360 A <<= pc->k;
361 continue;
362
363 case BPF_ALU|BPF_RSH|BPF_K:
364 A >>= pc->k;
365 continue;
366
367 case BPF_ALU|BPF_NEG:
368 /*
369 * Most BPF arithmetic is unsigned, but negation
370 * can't be unsigned; respecify it as subtracting
371 * the accumulator from 0U, so that 1) we don't
372 * get compiler warnings about negating an unsigned
373 * value and 2) don't get UBSan warnings about
374 * the result of negating 0x80000000 being undefined.
375 */
376 A = (0U - A);
377 continue;
378
379 case BPF_MISC|BPF_TAX:
380 X = A;
381 continue;
382
383 case BPF_MISC|BPF_TXA:
384 A = X;
385 continue;
386 }
387 }
388 }
389
390 u_int
pcap_filter(const struct bpf_insn * pc,const u_char * p,u_int wirelen,u_int buflen)391 pcap_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
392 u_int buflen)
393 {
394 return pcap_filter_with_aux_data(pc, p, wirelen, buflen, NULL);
395 }
396
397 /*
398 * Return true if the 'fcode' is a valid filter program.
399 * The constraints are that each jump be forward and to a valid
400 * code, that memory accesses are within valid ranges (to the
401 * extent that this can be checked statically; loads of packet
402 * data have to be, and are, also checked at run time), and that
403 * the code terminates with either an accept or reject.
404 *
405 * The kernel needs to be able to verify an application's filter code.
406 * Otherwise, a bogus program could easily crash the system.
407 */
408 int
pcap_validate_filter(const struct bpf_insn * f,int len)409 pcap_validate_filter(const struct bpf_insn *f, int len)
410 {
411 u_int i, from;
412 const struct bpf_insn *p;
413
414 if (len < 1)
415 return 0;
416
417 for (i = 0; i < (u_int)len; ++i) {
418 p = &f[i];
419 switch (BPF_CLASS(p->code)) {
420 /*
421 * Check that memory operations use valid addresses.
422 */
423 case BPF_LD:
424 case BPF_LDX:
425 switch (BPF_MODE(p->code)) {
426 case BPF_IMM:
427 break;
428 case BPF_ABS:
429 case BPF_IND:
430 case BPF_MSH:
431 /*
432 * There's no maximum packet data size
433 * in userland. The runtime packet length
434 * check suffices.
435 */
436 break;
437 case BPF_MEM:
438 if (p->k >= BPF_MEMWORDS)
439 return 0;
440 break;
441 case BPF_LEN:
442 break;
443 default:
444 return 0;
445 }
446 break;
447 case BPF_ST:
448 case BPF_STX:
449 if (p->k >= BPF_MEMWORDS)
450 return 0;
451 break;
452 case BPF_ALU:
453 switch (BPF_OP(p->code)) {
454 case BPF_ADD:
455 case BPF_SUB:
456 case BPF_MUL:
457 case BPF_OR:
458 case BPF_AND:
459 case BPF_XOR:
460 case BPF_LSH:
461 case BPF_RSH:
462 case BPF_NEG:
463 break;
464 case BPF_DIV:
465 case BPF_MOD:
466 /*
467 * Check for constant division or modulus
468 * by 0.
469 */
470 if (BPF_SRC(p->code) == BPF_K && p->k == 0)
471 return 0;
472 break;
473 default:
474 return 0;
475 }
476 break;
477 case BPF_JMP:
478 /*
479 * Check that jumps are within the code block,
480 * and that unconditional branches don't go
481 * backwards as a result of an overflow.
482 * Unconditional branches have a 32-bit offset,
483 * so they could overflow; we check to make
484 * sure they don't. Conditional branches have
485 * an 8-bit offset, and the from address is <=
486 * BPF_MAXINSNS, and we assume that BPF_MAXINSNS
487 * is sufficiently small that adding 255 to it
488 * won't overflow.
489 *
490 * We know that len is <= BPF_MAXINSNS, and we
491 * assume that BPF_MAXINSNS is < the maximum size
492 * of a u_int, so that i + 1 doesn't overflow.
493 *
494 * For userland, we don't know that the from
495 * or len are <= BPF_MAXINSNS, but we know that
496 * from <= len, and, except on a 64-bit system,
497 * it's unlikely that len, if it truly reflects
498 * the size of the program we've been handed,
499 * will be anywhere near the maximum size of
500 * a u_int. We also don't check for backward
501 * branches, as we currently support them in
502 * userland for the protochain operation.
503 */
504 from = i + 1;
505 switch (BPF_OP(p->code)) {
506 case BPF_JA:
507 if (from + p->k >= (u_int)len)
508 return 0;
509 break;
510 case BPF_JEQ:
511 case BPF_JGT:
512 case BPF_JGE:
513 case BPF_JSET:
514 if (from + p->jt >= (u_int)len || from + p->jf >= (u_int)len)
515 return 0;
516 break;
517 default:
518 return 0;
519 }
520 break;
521 case BPF_RET:
522 break;
523 case BPF_MISC:
524 break;
525 default:
526 return 0;
527 }
528 }
529 return BPF_CLASS(f[len - 1].code) == BPF_RET;
530 }
531
532 /*
533 * Exported because older versions of libpcap exported them.
534 */
535 u_int
bpf_filter(const struct bpf_insn * pc,const u_char * p,u_int wirelen,u_int buflen)536 bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
537 u_int buflen)
538 {
539 return pcap_filter(pc, p, wirelen, buflen);
540 }
541
542 int
bpf_validate(const struct bpf_insn * f,int len)543 bpf_validate(const struct bpf_insn *f, int len)
544 {
545 return pcap_validate_filter(f, len);
546 }
547