1 /*
2 * Testsuite for eBPF verifier
3 *
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 */
10
11 #include <asm/types.h>
12 #include <linux/types.h>
13 #include <stdint.h>
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <unistd.h>
17 #include <errno.h>
18 #include <string.h>
19 #include <stddef.h>
20 #include <stdbool.h>
21 #include <sched.h>
22
23 #include <sys/capability.h>
24 #include <sys/resource.h>
25
26 #include <linux/unistd.h>
27 #include <linux/filter.h>
28 #include <linux/bpf_perf_event.h>
29 #include <linux/bpf.h>
30
31 #include <bpf/bpf.h>
32
33 #ifdef HAVE_GENHDR
34 # include "autoconf.h"
35 #else
36 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
37 # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
38 # endif
39 #endif
40
41 #include "../../../include/linux/filter.h"
42
43 #ifndef ARRAY_SIZE
44 # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
45 #endif
46
47 #define MAX_INSNS 512
48 #define MAX_FIXUPS 8
49
50 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
51
52 struct bpf_test {
53 const char *descr;
54 struct bpf_insn insns[MAX_INSNS];
55 int fixup_map1[MAX_FIXUPS];
56 int fixup_map2[MAX_FIXUPS];
57 int fixup_prog[MAX_FIXUPS];
58 const char *errstr;
59 const char *errstr_unpriv;
60 enum {
61 UNDEF,
62 ACCEPT,
63 REJECT
64 } result, result_unpriv;
65 enum bpf_prog_type prog_type;
66 uint8_t flags;
67 };
68
69 /* Note we want this to be 64 bit aligned so that the end of our array is
70 * actually the end of the structure.
71 */
72 #define MAX_ENTRIES 11
73
74 struct test_val {
75 unsigned int index;
76 int foo[MAX_ENTRIES];
77 };
78
79 static struct bpf_test tests[] = {
80 {
81 "add+sub+mul",
82 .insns = {
83 BPF_MOV64_IMM(BPF_REG_1, 1),
84 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
85 BPF_MOV64_IMM(BPF_REG_2, 3),
86 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
87 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
88 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
89 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
90 BPF_EXIT_INSN(),
91 },
92 .result = ACCEPT,
93 },
94 {
95 "unreachable",
96 .insns = {
97 BPF_EXIT_INSN(),
98 BPF_EXIT_INSN(),
99 },
100 .errstr = "unreachable",
101 .result = REJECT,
102 },
103 {
104 "unreachable2",
105 .insns = {
106 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
107 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
108 BPF_EXIT_INSN(),
109 },
110 .errstr = "unreachable",
111 .result = REJECT,
112 },
113 {
114 "out of range jump",
115 .insns = {
116 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
117 BPF_EXIT_INSN(),
118 },
119 .errstr = "jump out of range",
120 .result = REJECT,
121 },
122 {
123 "out of range jump2",
124 .insns = {
125 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
126 BPF_EXIT_INSN(),
127 },
128 .errstr = "jump out of range",
129 .result = REJECT,
130 },
131 {
132 "test1 ld_imm64",
133 .insns = {
134 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
135 BPF_LD_IMM64(BPF_REG_0, 0),
136 BPF_LD_IMM64(BPF_REG_0, 0),
137 BPF_LD_IMM64(BPF_REG_0, 1),
138 BPF_LD_IMM64(BPF_REG_0, 1),
139 BPF_MOV64_IMM(BPF_REG_0, 2),
140 BPF_EXIT_INSN(),
141 },
142 .errstr = "invalid BPF_LD_IMM insn",
143 .errstr_unpriv = "R1 pointer comparison",
144 .result = REJECT,
145 },
146 {
147 "test2 ld_imm64",
148 .insns = {
149 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
150 BPF_LD_IMM64(BPF_REG_0, 0),
151 BPF_LD_IMM64(BPF_REG_0, 0),
152 BPF_LD_IMM64(BPF_REG_0, 1),
153 BPF_LD_IMM64(BPF_REG_0, 1),
154 BPF_EXIT_INSN(),
155 },
156 .errstr = "invalid BPF_LD_IMM insn",
157 .errstr_unpriv = "R1 pointer comparison",
158 .result = REJECT,
159 },
160 {
161 "test3 ld_imm64",
162 .insns = {
163 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
164 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
165 BPF_LD_IMM64(BPF_REG_0, 0),
166 BPF_LD_IMM64(BPF_REG_0, 0),
167 BPF_LD_IMM64(BPF_REG_0, 1),
168 BPF_LD_IMM64(BPF_REG_0, 1),
169 BPF_EXIT_INSN(),
170 },
171 .errstr = "invalid bpf_ld_imm64 insn",
172 .result = REJECT,
173 },
174 {
175 "test4 ld_imm64",
176 .insns = {
177 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
178 BPF_EXIT_INSN(),
179 },
180 .errstr = "invalid bpf_ld_imm64 insn",
181 .result = REJECT,
182 },
183 {
184 "test5 ld_imm64",
185 .insns = {
186 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
187 },
188 .errstr = "invalid bpf_ld_imm64 insn",
189 .result = REJECT,
190 },
191 {
192 "no bpf_exit",
193 .insns = {
194 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
195 },
196 .errstr = "jump out of range",
197 .result = REJECT,
198 },
199 {
200 "loop (back-edge)",
201 .insns = {
202 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
203 BPF_EXIT_INSN(),
204 },
205 .errstr = "back-edge",
206 .result = REJECT,
207 },
208 {
209 "loop2 (back-edge)",
210 .insns = {
211 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
212 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
213 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
214 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
215 BPF_EXIT_INSN(),
216 },
217 .errstr = "back-edge",
218 .result = REJECT,
219 },
220 {
221 "conditional loop",
222 .insns = {
223 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
224 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
225 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
226 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
227 BPF_EXIT_INSN(),
228 },
229 .errstr = "back-edge",
230 .result = REJECT,
231 },
232 {
233 "read uninitialized register",
234 .insns = {
235 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
236 BPF_EXIT_INSN(),
237 },
238 .errstr = "R2 !read_ok",
239 .result = REJECT,
240 },
241 {
242 "read invalid register",
243 .insns = {
244 BPF_MOV64_REG(BPF_REG_0, -1),
245 BPF_EXIT_INSN(),
246 },
247 .errstr = "R15 is invalid",
248 .result = REJECT,
249 },
250 {
251 "program doesn't init R0 before exit",
252 .insns = {
253 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
254 BPF_EXIT_INSN(),
255 },
256 .errstr = "R0 !read_ok",
257 .result = REJECT,
258 },
259 {
260 "program doesn't init R0 before exit in all branches",
261 .insns = {
262 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
263 BPF_MOV64_IMM(BPF_REG_0, 1),
264 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
265 BPF_EXIT_INSN(),
266 },
267 .errstr = "R0 !read_ok",
268 .errstr_unpriv = "R1 pointer comparison",
269 .result = REJECT,
270 },
271 {
272 "stack out of bounds",
273 .insns = {
274 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
275 BPF_EXIT_INSN(),
276 },
277 .errstr = "invalid stack",
278 .result = REJECT,
279 },
280 {
281 "invalid call insn1",
282 .insns = {
283 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
284 BPF_EXIT_INSN(),
285 },
286 .errstr = "BPF_CALL uses reserved",
287 .result = REJECT,
288 },
289 {
290 "invalid call insn2",
291 .insns = {
292 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
293 BPF_EXIT_INSN(),
294 },
295 .errstr = "BPF_CALL uses reserved",
296 .result = REJECT,
297 },
298 {
299 "invalid function call",
300 .insns = {
301 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
302 BPF_EXIT_INSN(),
303 },
304 .errstr = "invalid func unknown#1234567",
305 .result = REJECT,
306 },
307 {
308 "uninitialized stack1",
309 .insns = {
310 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
311 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
312 BPF_LD_MAP_FD(BPF_REG_1, 0),
313 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
314 BPF_FUNC_map_lookup_elem),
315 BPF_EXIT_INSN(),
316 },
317 .fixup_map1 = { 2 },
318 .errstr = "invalid indirect read from stack",
319 .result = REJECT,
320 },
321 {
322 "uninitialized stack2",
323 .insns = {
324 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
325 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
326 BPF_EXIT_INSN(),
327 },
328 .errstr = "invalid read from stack",
329 .result = REJECT,
330 },
331 {
332 "invalid argument register",
333 .insns = {
334 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
335 BPF_FUNC_get_cgroup_classid),
336 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
337 BPF_FUNC_get_cgroup_classid),
338 BPF_EXIT_INSN(),
339 },
340 .errstr = "R1 !read_ok",
341 .result = REJECT,
342 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
343 },
344 {
345 "non-invalid argument register",
346 .insns = {
347 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
348 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
349 BPF_FUNC_get_cgroup_classid),
350 BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
351 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
352 BPF_FUNC_get_cgroup_classid),
353 BPF_EXIT_INSN(),
354 },
355 .result = ACCEPT,
356 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
357 },
358 {
359 "check valid spill/fill",
360 .insns = {
361 /* spill R1(ctx) into stack */
362 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
363 /* fill it back into R2 */
364 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
365 /* should be able to access R0 = *(R2 + 8) */
366 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
367 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
368 BPF_EXIT_INSN(),
369 },
370 .errstr_unpriv = "R0 leaks addr",
371 .result = ACCEPT,
372 .result_unpriv = REJECT,
373 },
374 {
375 "check valid spill/fill, skb mark",
376 .insns = {
377 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
378 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
379 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
380 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
381 offsetof(struct __sk_buff, mark)),
382 BPF_EXIT_INSN(),
383 },
384 .result = ACCEPT,
385 .result_unpriv = ACCEPT,
386 },
387 {
388 "check corrupted spill/fill",
389 .insns = {
390 /* spill R1(ctx) into stack */
391 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
392 /* mess up with R1 pointer on stack */
393 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
394 /* fill back into R0 should fail */
395 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
396 BPF_EXIT_INSN(),
397 },
398 .errstr_unpriv = "attempt to corrupt spilled",
399 .errstr = "corrupted spill",
400 .result = REJECT,
401 },
402 {
403 "invalid src register in STX",
404 .insns = {
405 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
406 BPF_EXIT_INSN(),
407 },
408 .errstr = "R15 is invalid",
409 .result = REJECT,
410 },
411 {
412 "invalid dst register in STX",
413 .insns = {
414 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
415 BPF_EXIT_INSN(),
416 },
417 .errstr = "R14 is invalid",
418 .result = REJECT,
419 },
420 {
421 "invalid dst register in ST",
422 .insns = {
423 BPF_ST_MEM(BPF_B, 14, -1, -1),
424 BPF_EXIT_INSN(),
425 },
426 .errstr = "R14 is invalid",
427 .result = REJECT,
428 },
429 {
430 "invalid src register in LDX",
431 .insns = {
432 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
433 BPF_EXIT_INSN(),
434 },
435 .errstr = "R12 is invalid",
436 .result = REJECT,
437 },
438 {
439 "invalid dst register in LDX",
440 .insns = {
441 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
442 BPF_EXIT_INSN(),
443 },
444 .errstr = "R11 is invalid",
445 .result = REJECT,
446 },
447 {
448 "junk insn",
449 .insns = {
450 BPF_RAW_INSN(0, 0, 0, 0, 0),
451 BPF_EXIT_INSN(),
452 },
453 .errstr = "invalid BPF_LD_IMM",
454 .result = REJECT,
455 },
456 {
457 "junk insn2",
458 .insns = {
459 BPF_RAW_INSN(1, 0, 0, 0, 0),
460 BPF_EXIT_INSN(),
461 },
462 .errstr = "BPF_LDX uses reserved fields",
463 .result = REJECT,
464 },
465 {
466 "junk insn3",
467 .insns = {
468 BPF_RAW_INSN(-1, 0, 0, 0, 0),
469 BPF_EXIT_INSN(),
470 },
471 .errstr = "invalid BPF_ALU opcode f0",
472 .result = REJECT,
473 },
474 {
475 "junk insn4",
476 .insns = {
477 BPF_RAW_INSN(-1, -1, -1, -1, -1),
478 BPF_EXIT_INSN(),
479 },
480 .errstr = "invalid BPF_ALU opcode f0",
481 .result = REJECT,
482 },
483 {
484 "junk insn5",
485 .insns = {
486 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
487 BPF_EXIT_INSN(),
488 },
489 .errstr = "BPF_ALU uses reserved fields",
490 .result = REJECT,
491 },
492 {
493 "misaligned read from stack",
494 .insns = {
495 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
496 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
497 BPF_EXIT_INSN(),
498 },
499 .errstr = "misaligned access",
500 .result = REJECT,
501 },
502 {
503 "invalid map_fd for function call",
504 .insns = {
505 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
506 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
507 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
508 BPF_LD_MAP_FD(BPF_REG_1, 0),
509 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
510 BPF_FUNC_map_delete_elem),
511 BPF_EXIT_INSN(),
512 },
513 .errstr = "fd 0 is not pointing to valid bpf_map",
514 .result = REJECT,
515 },
516 {
517 "don't check return value before access",
518 .insns = {
519 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
520 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
521 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
522 BPF_LD_MAP_FD(BPF_REG_1, 0),
523 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
524 BPF_FUNC_map_lookup_elem),
525 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
526 BPF_EXIT_INSN(),
527 },
528 .fixup_map1 = { 3 },
529 .errstr = "R0 invalid mem access 'map_value_or_null'",
530 .result = REJECT,
531 },
532 {
533 "access memory with incorrect alignment",
534 .insns = {
535 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
536 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
537 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
538 BPF_LD_MAP_FD(BPF_REG_1, 0),
539 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
540 BPF_FUNC_map_lookup_elem),
541 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
542 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
543 BPF_EXIT_INSN(),
544 },
545 .fixup_map1 = { 3 },
546 .errstr = "misaligned access",
547 .result = REJECT,
548 },
549 {
550 "sometimes access memory with incorrect alignment",
551 .insns = {
552 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
553 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
554 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
555 BPF_LD_MAP_FD(BPF_REG_1, 0),
556 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
557 BPF_FUNC_map_lookup_elem),
558 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
559 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
560 BPF_EXIT_INSN(),
561 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
562 BPF_EXIT_INSN(),
563 },
564 .fixup_map1 = { 3 },
565 .errstr = "R0 invalid mem access",
566 .errstr_unpriv = "R0 leaks addr",
567 .result = REJECT,
568 },
569 {
570 "jump test 1",
571 .insns = {
572 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
573 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
574 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
575 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
576 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
577 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
578 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
579 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
580 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
581 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
582 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
583 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
584 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
585 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
586 BPF_MOV64_IMM(BPF_REG_0, 0),
587 BPF_EXIT_INSN(),
588 },
589 .errstr_unpriv = "R1 pointer comparison",
590 .result_unpriv = REJECT,
591 .result = ACCEPT,
592 },
593 {
594 "jump test 2",
595 .insns = {
596 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
597 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
598 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
599 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
600 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
601 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
602 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
603 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
604 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
605 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
606 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
607 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
608 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
609 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
610 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
611 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
612 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
613 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
614 BPF_MOV64_IMM(BPF_REG_0, 0),
615 BPF_EXIT_INSN(),
616 },
617 .errstr_unpriv = "R1 pointer comparison",
618 .result_unpriv = REJECT,
619 .result = ACCEPT,
620 },
621 {
622 "jump test 3",
623 .insns = {
624 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
625 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
626 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
627 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
628 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
629 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
630 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
631 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
632 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
633 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
634 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
635 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
636 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
637 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
638 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
639 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
640 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
641 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
642 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
643 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
644 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
645 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
646 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
647 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
648 BPF_LD_MAP_FD(BPF_REG_1, 0),
649 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
650 BPF_FUNC_map_delete_elem),
651 BPF_EXIT_INSN(),
652 },
653 .fixup_map1 = { 24 },
654 .errstr_unpriv = "R1 pointer comparison",
655 .result_unpriv = REJECT,
656 .result = ACCEPT,
657 },
658 {
659 "jump test 4",
660 .insns = {
661 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
662 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
663 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
664 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
665 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
666 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
667 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
668 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
669 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
670 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
671 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
672 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
673 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
674 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
675 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
676 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
677 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
678 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
679 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
680 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
681 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
682 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
683 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
684 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
685 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
686 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
687 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
688 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
689 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
690 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
691 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
692 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
693 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
694 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
695 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
696 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
697 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
698 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
699 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
700 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
701 BPF_MOV64_IMM(BPF_REG_0, 0),
702 BPF_EXIT_INSN(),
703 },
704 .errstr_unpriv = "R1 pointer comparison",
705 .result_unpriv = REJECT,
706 .result = ACCEPT,
707 },
708 {
709 "jump test 5",
710 .insns = {
711 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
712 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
713 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
714 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
715 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
716 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
717 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
718 BPF_MOV64_IMM(BPF_REG_0, 0),
719 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
720 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
721 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
722 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
723 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
724 BPF_MOV64_IMM(BPF_REG_0, 0),
725 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
726 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
727 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
728 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
729 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
730 BPF_MOV64_IMM(BPF_REG_0, 0),
731 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
732 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
733 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
734 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
735 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
736 BPF_MOV64_IMM(BPF_REG_0, 0),
737 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
738 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
739 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
740 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
741 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
742 BPF_MOV64_IMM(BPF_REG_0, 0),
743 BPF_EXIT_INSN(),
744 },
745 .errstr_unpriv = "R1 pointer comparison",
746 .result_unpriv = REJECT,
747 .result = ACCEPT,
748 },
749 {
750 "access skb fields ok",
751 .insns = {
752 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
753 offsetof(struct __sk_buff, len)),
754 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
755 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
756 offsetof(struct __sk_buff, mark)),
757 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
758 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
759 offsetof(struct __sk_buff, pkt_type)),
760 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
761 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
762 offsetof(struct __sk_buff, queue_mapping)),
763 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
764 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
765 offsetof(struct __sk_buff, protocol)),
766 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
767 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
768 offsetof(struct __sk_buff, vlan_present)),
769 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
770 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
771 offsetof(struct __sk_buff, vlan_tci)),
772 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
773 BPF_EXIT_INSN(),
774 },
775 .result = ACCEPT,
776 },
777 {
778 "access skb fields bad1",
779 .insns = {
780 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
781 BPF_EXIT_INSN(),
782 },
783 .errstr = "invalid bpf_context access",
784 .result = REJECT,
785 },
786 {
787 "access skb fields bad2",
788 .insns = {
789 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
790 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
791 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
792 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
793 BPF_LD_MAP_FD(BPF_REG_1, 0),
794 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
795 BPF_FUNC_map_lookup_elem),
796 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
797 BPF_EXIT_INSN(),
798 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
799 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
800 offsetof(struct __sk_buff, pkt_type)),
801 BPF_EXIT_INSN(),
802 },
803 .fixup_map1 = { 4 },
804 .errstr = "different pointers",
805 .errstr_unpriv = "R1 pointer comparison",
806 .result = REJECT,
807 },
808 {
809 "access skb fields bad3",
810 .insns = {
811 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
812 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
813 offsetof(struct __sk_buff, pkt_type)),
814 BPF_EXIT_INSN(),
815 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
816 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
817 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
818 BPF_LD_MAP_FD(BPF_REG_1, 0),
819 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
820 BPF_FUNC_map_lookup_elem),
821 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
822 BPF_EXIT_INSN(),
823 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
824 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
825 },
826 .fixup_map1 = { 6 },
827 .errstr = "different pointers",
828 .errstr_unpriv = "R1 pointer comparison",
829 .result = REJECT,
830 },
831 {
832 "access skb fields bad4",
833 .insns = {
834 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
835 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
836 offsetof(struct __sk_buff, len)),
837 BPF_MOV64_IMM(BPF_REG_0, 0),
838 BPF_EXIT_INSN(),
839 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
840 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
841 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
842 BPF_LD_MAP_FD(BPF_REG_1, 0),
843 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
844 BPF_FUNC_map_lookup_elem),
845 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
846 BPF_EXIT_INSN(),
847 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
848 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
849 },
850 .fixup_map1 = { 7 },
851 .errstr = "different pointers",
852 .errstr_unpriv = "R1 pointer comparison",
853 .result = REJECT,
854 },
855 {
856 "check skb->mark is not writeable by sockets",
857 .insns = {
858 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
859 offsetof(struct __sk_buff, mark)),
860 BPF_EXIT_INSN(),
861 },
862 .errstr = "invalid bpf_context access",
863 .errstr_unpriv = "R1 leaks addr",
864 .result = REJECT,
865 },
866 {
867 "check skb->tc_index is not writeable by sockets",
868 .insns = {
869 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
870 offsetof(struct __sk_buff, tc_index)),
871 BPF_EXIT_INSN(),
872 },
873 .errstr = "invalid bpf_context access",
874 .errstr_unpriv = "R1 leaks addr",
875 .result = REJECT,
876 },
877 {
878 "check cb access: byte",
879 .insns = {
880 BPF_MOV64_IMM(BPF_REG_0, 0),
881 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
882 offsetof(struct __sk_buff, cb[0])),
883 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
884 offsetof(struct __sk_buff, cb[0]) + 1),
885 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
886 offsetof(struct __sk_buff, cb[0]) + 2),
887 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
888 offsetof(struct __sk_buff, cb[0]) + 3),
889 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
890 offsetof(struct __sk_buff, cb[1])),
891 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
892 offsetof(struct __sk_buff, cb[1]) + 1),
893 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
894 offsetof(struct __sk_buff, cb[1]) + 2),
895 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
896 offsetof(struct __sk_buff, cb[1]) + 3),
897 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
898 offsetof(struct __sk_buff, cb[2])),
899 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
900 offsetof(struct __sk_buff, cb[2]) + 1),
901 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
902 offsetof(struct __sk_buff, cb[2]) + 2),
903 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
904 offsetof(struct __sk_buff, cb[2]) + 3),
905 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
906 offsetof(struct __sk_buff, cb[3])),
907 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
908 offsetof(struct __sk_buff, cb[3]) + 1),
909 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
910 offsetof(struct __sk_buff, cb[3]) + 2),
911 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
912 offsetof(struct __sk_buff, cb[3]) + 3),
913 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
914 offsetof(struct __sk_buff, cb[4])),
915 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
916 offsetof(struct __sk_buff, cb[4]) + 1),
917 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
918 offsetof(struct __sk_buff, cb[4]) + 2),
919 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
920 offsetof(struct __sk_buff, cb[4]) + 3),
921 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
922 offsetof(struct __sk_buff, cb[0])),
923 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
924 offsetof(struct __sk_buff, cb[0]) + 1),
925 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
926 offsetof(struct __sk_buff, cb[0]) + 2),
927 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
928 offsetof(struct __sk_buff, cb[0]) + 3),
929 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
930 offsetof(struct __sk_buff, cb[1])),
931 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
932 offsetof(struct __sk_buff, cb[1]) + 1),
933 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
934 offsetof(struct __sk_buff, cb[1]) + 2),
935 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
936 offsetof(struct __sk_buff, cb[1]) + 3),
937 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
938 offsetof(struct __sk_buff, cb[2])),
939 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
940 offsetof(struct __sk_buff, cb[2]) + 1),
941 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
942 offsetof(struct __sk_buff, cb[2]) + 2),
943 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
944 offsetof(struct __sk_buff, cb[2]) + 3),
945 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
946 offsetof(struct __sk_buff, cb[3])),
947 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
948 offsetof(struct __sk_buff, cb[3]) + 1),
949 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
950 offsetof(struct __sk_buff, cb[3]) + 2),
951 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
952 offsetof(struct __sk_buff, cb[3]) + 3),
953 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
954 offsetof(struct __sk_buff, cb[4])),
955 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
956 offsetof(struct __sk_buff, cb[4]) + 1),
957 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
958 offsetof(struct __sk_buff, cb[4]) + 2),
959 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
960 offsetof(struct __sk_buff, cb[4]) + 3),
961 BPF_EXIT_INSN(),
962 },
963 .result = ACCEPT,
964 },
965 {
966 "check cb access: byte, oob 1",
967 .insns = {
968 BPF_MOV64_IMM(BPF_REG_0, 0),
969 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
970 offsetof(struct __sk_buff, cb[4]) + 4),
971 BPF_EXIT_INSN(),
972 },
973 .errstr = "invalid bpf_context access",
974 .result = REJECT,
975 },
976 {
977 "check cb access: byte, oob 2",
978 .insns = {
979 BPF_MOV64_IMM(BPF_REG_0, 0),
980 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
981 offsetof(struct __sk_buff, cb[0]) - 1),
982 BPF_EXIT_INSN(),
983 },
984 .errstr = "invalid bpf_context access",
985 .result = REJECT,
986 },
987 {
988 "check cb access: byte, oob 3",
989 .insns = {
990 BPF_MOV64_IMM(BPF_REG_0, 0),
991 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
992 offsetof(struct __sk_buff, cb[4]) + 4),
993 BPF_EXIT_INSN(),
994 },
995 .errstr = "invalid bpf_context access",
996 .result = REJECT,
997 },
998 {
999 "check cb access: byte, oob 4",
1000 .insns = {
1001 BPF_MOV64_IMM(BPF_REG_0, 0),
1002 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1003 offsetof(struct __sk_buff, cb[0]) - 1),
1004 BPF_EXIT_INSN(),
1005 },
1006 .errstr = "invalid bpf_context access",
1007 .result = REJECT,
1008 },
1009 {
1010 "check cb access: byte, wrong type",
1011 .insns = {
1012 BPF_MOV64_IMM(BPF_REG_0, 0),
1013 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1014 offsetof(struct __sk_buff, cb[0])),
1015 BPF_EXIT_INSN(),
1016 },
1017 .errstr = "invalid bpf_context access",
1018 .result = REJECT,
1019 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1020 },
1021 {
1022 "check cb access: half",
1023 .insns = {
1024 BPF_MOV64_IMM(BPF_REG_0, 0),
1025 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1026 offsetof(struct __sk_buff, cb[0])),
1027 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1028 offsetof(struct __sk_buff, cb[0]) + 2),
1029 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1030 offsetof(struct __sk_buff, cb[1])),
1031 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1032 offsetof(struct __sk_buff, cb[1]) + 2),
1033 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1034 offsetof(struct __sk_buff, cb[2])),
1035 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1036 offsetof(struct __sk_buff, cb[2]) + 2),
1037 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1038 offsetof(struct __sk_buff, cb[3])),
1039 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1040 offsetof(struct __sk_buff, cb[3]) + 2),
1041 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1042 offsetof(struct __sk_buff, cb[4])),
1043 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1044 offsetof(struct __sk_buff, cb[4]) + 2),
1045 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1046 offsetof(struct __sk_buff, cb[0])),
1047 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1048 offsetof(struct __sk_buff, cb[0]) + 2),
1049 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1050 offsetof(struct __sk_buff, cb[1])),
1051 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1052 offsetof(struct __sk_buff, cb[1]) + 2),
1053 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1054 offsetof(struct __sk_buff, cb[2])),
1055 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1056 offsetof(struct __sk_buff, cb[2]) + 2),
1057 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1058 offsetof(struct __sk_buff, cb[3])),
1059 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1060 offsetof(struct __sk_buff, cb[3]) + 2),
1061 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1062 offsetof(struct __sk_buff, cb[4])),
1063 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1064 offsetof(struct __sk_buff, cb[4]) + 2),
1065 BPF_EXIT_INSN(),
1066 },
1067 .result = ACCEPT,
1068 },
1069 {
1070 "check cb access: half, unaligned",
1071 .insns = {
1072 BPF_MOV64_IMM(BPF_REG_0, 0),
1073 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1074 offsetof(struct __sk_buff, cb[0]) + 1),
1075 BPF_EXIT_INSN(),
1076 },
1077 .errstr = "misaligned access",
1078 .result = REJECT,
1079 },
1080 {
1081 "check cb access: half, oob 1",
1082 .insns = {
1083 BPF_MOV64_IMM(BPF_REG_0, 0),
1084 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1085 offsetof(struct __sk_buff, cb[4]) + 4),
1086 BPF_EXIT_INSN(),
1087 },
1088 .errstr = "invalid bpf_context access",
1089 .result = REJECT,
1090 },
1091 {
1092 "check cb access: half, oob 2",
1093 .insns = {
1094 BPF_MOV64_IMM(BPF_REG_0, 0),
1095 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1096 offsetof(struct __sk_buff, cb[0]) - 2),
1097 BPF_EXIT_INSN(),
1098 },
1099 .errstr = "invalid bpf_context access",
1100 .result = REJECT,
1101 },
1102 {
1103 "check cb access: half, oob 3",
1104 .insns = {
1105 BPF_MOV64_IMM(BPF_REG_0, 0),
1106 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1107 offsetof(struct __sk_buff, cb[4]) + 4),
1108 BPF_EXIT_INSN(),
1109 },
1110 .errstr = "invalid bpf_context access",
1111 .result = REJECT,
1112 },
1113 {
1114 "check cb access: half, oob 4",
1115 .insns = {
1116 BPF_MOV64_IMM(BPF_REG_0, 0),
1117 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1118 offsetof(struct __sk_buff, cb[0]) - 2),
1119 BPF_EXIT_INSN(),
1120 },
1121 .errstr = "invalid bpf_context access",
1122 .result = REJECT,
1123 },
1124 {
1125 "check cb access: half, wrong type",
1126 .insns = {
1127 BPF_MOV64_IMM(BPF_REG_0, 0),
1128 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1129 offsetof(struct __sk_buff, cb[0])),
1130 BPF_EXIT_INSN(),
1131 },
1132 .errstr = "invalid bpf_context access",
1133 .result = REJECT,
1134 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1135 },
1136 {
1137 "check cb access: word",
1138 .insns = {
1139 BPF_MOV64_IMM(BPF_REG_0, 0),
1140 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1141 offsetof(struct __sk_buff, cb[0])),
1142 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1143 offsetof(struct __sk_buff, cb[1])),
1144 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1145 offsetof(struct __sk_buff, cb[2])),
1146 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1147 offsetof(struct __sk_buff, cb[3])),
1148 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1149 offsetof(struct __sk_buff, cb[4])),
1150 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1151 offsetof(struct __sk_buff, cb[0])),
1152 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1153 offsetof(struct __sk_buff, cb[1])),
1154 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1155 offsetof(struct __sk_buff, cb[2])),
1156 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1157 offsetof(struct __sk_buff, cb[3])),
1158 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1159 offsetof(struct __sk_buff, cb[4])),
1160 BPF_EXIT_INSN(),
1161 },
1162 .result = ACCEPT,
1163 },
1164 {
1165 "check cb access: word, unaligned 1",
1166 .insns = {
1167 BPF_MOV64_IMM(BPF_REG_0, 0),
1168 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1169 offsetof(struct __sk_buff, cb[0]) + 2),
1170 BPF_EXIT_INSN(),
1171 },
1172 .errstr = "misaligned access",
1173 .result = REJECT,
1174 },
1175 {
1176 "check cb access: word, unaligned 2",
1177 .insns = {
1178 BPF_MOV64_IMM(BPF_REG_0, 0),
1179 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1180 offsetof(struct __sk_buff, cb[4]) + 1),
1181 BPF_EXIT_INSN(),
1182 },
1183 .errstr = "misaligned access",
1184 .result = REJECT,
1185 },
1186 {
1187 "check cb access: word, unaligned 3",
1188 .insns = {
1189 BPF_MOV64_IMM(BPF_REG_0, 0),
1190 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1191 offsetof(struct __sk_buff, cb[4]) + 2),
1192 BPF_EXIT_INSN(),
1193 },
1194 .errstr = "misaligned access",
1195 .result = REJECT,
1196 },
1197 {
1198 "check cb access: word, unaligned 4",
1199 .insns = {
1200 BPF_MOV64_IMM(BPF_REG_0, 0),
1201 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1202 offsetof(struct __sk_buff, cb[4]) + 3),
1203 BPF_EXIT_INSN(),
1204 },
1205 .errstr = "misaligned access",
1206 .result = REJECT,
1207 },
1208 {
1209 "check cb access: double",
1210 .insns = {
1211 BPF_MOV64_IMM(BPF_REG_0, 0),
1212 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1213 offsetof(struct __sk_buff, cb[0])),
1214 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1215 offsetof(struct __sk_buff, cb[2])),
1216 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1217 offsetof(struct __sk_buff, cb[0])),
1218 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1219 offsetof(struct __sk_buff, cb[2])),
1220 BPF_EXIT_INSN(),
1221 },
1222 .result = ACCEPT,
1223 },
1224 {
1225 "check cb access: double, unaligned 1",
1226 .insns = {
1227 BPF_MOV64_IMM(BPF_REG_0, 0),
1228 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1229 offsetof(struct __sk_buff, cb[1])),
1230 BPF_EXIT_INSN(),
1231 },
1232 .errstr = "misaligned access",
1233 .result = REJECT,
1234 },
1235 {
1236 "check cb access: double, unaligned 2",
1237 .insns = {
1238 BPF_MOV64_IMM(BPF_REG_0, 0),
1239 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1240 offsetof(struct __sk_buff, cb[3])),
1241 BPF_EXIT_INSN(),
1242 },
1243 .errstr = "misaligned access",
1244 .result = REJECT,
1245 },
1246 {
1247 "check cb access: double, oob 1",
1248 .insns = {
1249 BPF_MOV64_IMM(BPF_REG_0, 0),
1250 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1251 offsetof(struct __sk_buff, cb[4])),
1252 BPF_EXIT_INSN(),
1253 },
1254 .errstr = "invalid bpf_context access",
1255 .result = REJECT,
1256 },
1257 {
1258 "check cb access: double, oob 2",
1259 .insns = {
1260 BPF_MOV64_IMM(BPF_REG_0, 0),
1261 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1262 offsetof(struct __sk_buff, cb[4]) + 8),
1263 BPF_EXIT_INSN(),
1264 },
1265 .errstr = "invalid bpf_context access",
1266 .result = REJECT,
1267 },
1268 {
1269 "check cb access: double, oob 3",
1270 .insns = {
1271 BPF_MOV64_IMM(BPF_REG_0, 0),
1272 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1273 offsetof(struct __sk_buff, cb[0]) - 8),
1274 BPF_EXIT_INSN(),
1275 },
1276 .errstr = "invalid bpf_context access",
1277 .result = REJECT,
1278 },
1279 {
1280 "check cb access: double, oob 4",
1281 .insns = {
1282 BPF_MOV64_IMM(BPF_REG_0, 0),
1283 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1284 offsetof(struct __sk_buff, cb[4])),
1285 BPF_EXIT_INSN(),
1286 },
1287 .errstr = "invalid bpf_context access",
1288 .result = REJECT,
1289 },
1290 {
1291 "check cb access: double, oob 5",
1292 .insns = {
1293 BPF_MOV64_IMM(BPF_REG_0, 0),
1294 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1295 offsetof(struct __sk_buff, cb[4]) + 8),
1296 BPF_EXIT_INSN(),
1297 },
1298 .errstr = "invalid bpf_context access",
1299 .result = REJECT,
1300 },
1301 {
1302 "check cb access: double, oob 6",
1303 .insns = {
1304 BPF_MOV64_IMM(BPF_REG_0, 0),
1305 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1306 offsetof(struct __sk_buff, cb[0]) - 8),
1307 BPF_EXIT_INSN(),
1308 },
1309 .errstr = "invalid bpf_context access",
1310 .result = REJECT,
1311 },
1312 {
1313 "check cb access: double, wrong type",
1314 .insns = {
1315 BPF_MOV64_IMM(BPF_REG_0, 0),
1316 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1317 offsetof(struct __sk_buff, cb[0])),
1318 BPF_EXIT_INSN(),
1319 },
1320 .errstr = "invalid bpf_context access",
1321 .result = REJECT,
1322 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1323 },
1324 {
1325 "check out of range skb->cb access",
1326 .insns = {
1327 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1328 offsetof(struct __sk_buff, cb[0]) + 256),
1329 BPF_EXIT_INSN(),
1330 },
1331 .errstr = "invalid bpf_context access",
1332 .errstr_unpriv = "",
1333 .result = REJECT,
1334 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
1335 },
1336 {
1337 "write skb fields from socket prog",
1338 .insns = {
1339 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1340 offsetof(struct __sk_buff, cb[4])),
1341 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1342 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1343 offsetof(struct __sk_buff, mark)),
1344 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1345 offsetof(struct __sk_buff, tc_index)),
1346 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1347 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1348 offsetof(struct __sk_buff, cb[0])),
1349 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1350 offsetof(struct __sk_buff, cb[2])),
1351 BPF_EXIT_INSN(),
1352 },
1353 .result = ACCEPT,
1354 .errstr_unpriv = "R1 leaks addr",
1355 .result_unpriv = REJECT,
1356 },
1357 {
1358 "write skb fields from tc_cls_act prog",
1359 .insns = {
1360 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1361 offsetof(struct __sk_buff, cb[0])),
1362 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1363 offsetof(struct __sk_buff, mark)),
1364 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1365 offsetof(struct __sk_buff, tc_index)),
1366 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1367 offsetof(struct __sk_buff, tc_index)),
1368 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1369 offsetof(struct __sk_buff, cb[3])),
1370 BPF_EXIT_INSN(),
1371 },
1372 .errstr_unpriv = "",
1373 .result_unpriv = REJECT,
1374 .result = ACCEPT,
1375 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1376 },
1377 {
1378 "PTR_TO_STACK store/load",
1379 .insns = {
1380 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1381 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
1382 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
1383 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
1384 BPF_EXIT_INSN(),
1385 },
1386 .result = ACCEPT,
1387 },
1388 {
1389 "PTR_TO_STACK store/load - bad alignment on off",
1390 .insns = {
1391 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1392 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1393 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
1394 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
1395 BPF_EXIT_INSN(),
1396 },
1397 .result = REJECT,
1398 .errstr = "misaligned access off -6 size 8",
1399 },
1400 {
1401 "PTR_TO_STACK store/load - bad alignment on reg",
1402 .insns = {
1403 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1404 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
1405 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1406 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1407 BPF_EXIT_INSN(),
1408 },
1409 .result = REJECT,
1410 .errstr = "misaligned access off -2 size 8",
1411 },
1412 {
1413 "PTR_TO_STACK store/load - out of bounds low",
1414 .insns = {
1415 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1416 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
1417 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1418 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1419 BPF_EXIT_INSN(),
1420 },
1421 .result = REJECT,
1422 .errstr = "invalid stack off=-79992 size=8",
1423 },
1424 {
1425 "PTR_TO_STACK store/load - out of bounds high",
1426 .insns = {
1427 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1428 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1429 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1430 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1431 BPF_EXIT_INSN(),
1432 },
1433 .result = REJECT,
1434 .errstr = "invalid stack off=0 size=8",
1435 },
1436 {
1437 "unpriv: return pointer",
1438 .insns = {
1439 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
1440 BPF_EXIT_INSN(),
1441 },
1442 .result = ACCEPT,
1443 .result_unpriv = REJECT,
1444 .errstr_unpriv = "R0 leaks addr",
1445 },
1446 {
1447 "unpriv: add const to pointer",
1448 .insns = {
1449 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
1450 BPF_MOV64_IMM(BPF_REG_0, 0),
1451 BPF_EXIT_INSN(),
1452 },
1453 .result = ACCEPT,
1454 .result_unpriv = REJECT,
1455 .errstr_unpriv = "R1 pointer arithmetic",
1456 },
1457 {
1458 "unpriv: add pointer to pointer",
1459 .insns = {
1460 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
1461 BPF_MOV64_IMM(BPF_REG_0, 0),
1462 BPF_EXIT_INSN(),
1463 },
1464 .result = ACCEPT,
1465 .result_unpriv = REJECT,
1466 .errstr_unpriv = "R1 pointer arithmetic",
1467 },
1468 {
1469 "unpriv: neg pointer",
1470 .insns = {
1471 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
1472 BPF_MOV64_IMM(BPF_REG_0, 0),
1473 BPF_EXIT_INSN(),
1474 },
1475 .result = ACCEPT,
1476 .result_unpriv = REJECT,
1477 .errstr_unpriv = "R1 pointer arithmetic",
1478 },
1479 {
1480 "unpriv: cmp pointer with const",
1481 .insns = {
1482 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1483 BPF_MOV64_IMM(BPF_REG_0, 0),
1484 BPF_EXIT_INSN(),
1485 },
1486 .result = ACCEPT,
1487 .result_unpriv = REJECT,
1488 .errstr_unpriv = "R1 pointer comparison",
1489 },
1490 {
1491 "unpriv: cmp pointer with pointer",
1492 .insns = {
1493 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1494 BPF_MOV64_IMM(BPF_REG_0, 0),
1495 BPF_EXIT_INSN(),
1496 },
1497 .result = ACCEPT,
1498 .result_unpriv = REJECT,
1499 .errstr_unpriv = "R10 pointer comparison",
1500 },
1501 {
1502 "unpriv: check that printk is disallowed",
1503 .insns = {
1504 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1505 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1506 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1507 BPF_MOV64_IMM(BPF_REG_2, 8),
1508 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
1509 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1510 BPF_FUNC_trace_printk),
1511 BPF_MOV64_IMM(BPF_REG_0, 0),
1512 BPF_EXIT_INSN(),
1513 },
1514 .errstr_unpriv = "unknown func bpf_trace_printk#6",
1515 .result_unpriv = REJECT,
1516 .result = ACCEPT,
1517 },
1518 {
1519 "unpriv: pass pointer to helper function",
1520 .insns = {
1521 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1522 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1523 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1524 BPF_LD_MAP_FD(BPF_REG_1, 0),
1525 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1526 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
1527 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1528 BPF_FUNC_map_update_elem),
1529 BPF_MOV64_IMM(BPF_REG_0, 0),
1530 BPF_EXIT_INSN(),
1531 },
1532 .fixup_map1 = { 3 },
1533 .errstr_unpriv = "R4 leaks addr",
1534 .result_unpriv = REJECT,
1535 .result = ACCEPT,
1536 },
1537 {
1538 "unpriv: indirectly pass pointer on stack to helper function",
1539 .insns = {
1540 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1541 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1542 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1543 BPF_LD_MAP_FD(BPF_REG_1, 0),
1544 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1545 BPF_FUNC_map_lookup_elem),
1546 BPF_MOV64_IMM(BPF_REG_0, 0),
1547 BPF_EXIT_INSN(),
1548 },
1549 .fixup_map1 = { 3 },
1550 .errstr = "invalid indirect read from stack off -8+0 size 8",
1551 .result = REJECT,
1552 },
1553 {
1554 "unpriv: mangle pointer on stack 1",
1555 .insns = {
1556 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1557 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
1558 BPF_MOV64_IMM(BPF_REG_0, 0),
1559 BPF_EXIT_INSN(),
1560 },
1561 .errstr_unpriv = "attempt to corrupt spilled",
1562 .result_unpriv = REJECT,
1563 .result = ACCEPT,
1564 },
1565 {
1566 "unpriv: mangle pointer on stack 2",
1567 .insns = {
1568 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1569 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
1570 BPF_MOV64_IMM(BPF_REG_0, 0),
1571 BPF_EXIT_INSN(),
1572 },
1573 .errstr_unpriv = "attempt to corrupt spilled",
1574 .result_unpriv = REJECT,
1575 .result = ACCEPT,
1576 },
1577 {
1578 "unpriv: read pointer from stack in small chunks",
1579 .insns = {
1580 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1581 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
1582 BPF_MOV64_IMM(BPF_REG_0, 0),
1583 BPF_EXIT_INSN(),
1584 },
1585 .errstr = "invalid size",
1586 .result = REJECT,
1587 },
1588 {
1589 "unpriv: write pointer into ctx",
1590 .insns = {
1591 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
1592 BPF_MOV64_IMM(BPF_REG_0, 0),
1593 BPF_EXIT_INSN(),
1594 },
1595 .errstr_unpriv = "R1 leaks addr",
1596 .result_unpriv = REJECT,
1597 .errstr = "invalid bpf_context access",
1598 .result = REJECT,
1599 },
1600 {
1601 "unpriv: spill/fill of ctx",
1602 .insns = {
1603 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1604 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1605 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1606 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1607 BPF_MOV64_IMM(BPF_REG_0, 0),
1608 BPF_EXIT_INSN(),
1609 },
1610 .result = ACCEPT,
1611 },
1612 {
1613 "unpriv: spill/fill of ctx 2",
1614 .insns = {
1615 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1616 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1617 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1618 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1619 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1620 BPF_FUNC_get_hash_recalc),
1621 BPF_EXIT_INSN(),
1622 },
1623 .result = ACCEPT,
1624 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1625 },
1626 {
1627 "unpriv: spill/fill of ctx 3",
1628 .insns = {
1629 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1630 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1631 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1632 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
1633 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1634 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1635 BPF_FUNC_get_hash_recalc),
1636 BPF_EXIT_INSN(),
1637 },
1638 .result = REJECT,
1639 .errstr = "R1 type=fp expected=ctx",
1640 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1641 },
1642 {
1643 "unpriv: spill/fill of ctx 4",
1644 .insns = {
1645 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1646 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1647 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1648 BPF_MOV64_IMM(BPF_REG_0, 1),
1649 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
1650 BPF_REG_0, -8, 0),
1651 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1652 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1653 BPF_FUNC_get_hash_recalc),
1654 BPF_EXIT_INSN(),
1655 },
1656 .result = REJECT,
1657 .errstr = "R1 type=inv expected=ctx",
1658 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1659 },
1660 {
1661 "unpriv: spill/fill of different pointers stx",
1662 .insns = {
1663 BPF_MOV64_IMM(BPF_REG_3, 42),
1664 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1665 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1666 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1667 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1668 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1669 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
1670 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
1671 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1672 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1673 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
1674 offsetof(struct __sk_buff, mark)),
1675 BPF_MOV64_IMM(BPF_REG_0, 0),
1676 BPF_EXIT_INSN(),
1677 },
1678 .result = REJECT,
1679 .errstr = "same insn cannot be used with different pointers",
1680 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1681 },
1682 {
1683 "unpriv: spill/fill of different pointers ldx",
1684 .insns = {
1685 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1686 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1687 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1688 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1689 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
1690 -(__s32)offsetof(struct bpf_perf_event_data,
1691 sample_period) - 8),
1692 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
1693 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
1694 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1695 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1696 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
1697 offsetof(struct bpf_perf_event_data,
1698 sample_period)),
1699 BPF_MOV64_IMM(BPF_REG_0, 0),
1700 BPF_EXIT_INSN(),
1701 },
1702 .result = REJECT,
1703 .errstr = "same insn cannot be used with different pointers",
1704 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
1705 },
1706 {
1707 "unpriv: write pointer into map elem value",
1708 .insns = {
1709 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1710 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1711 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1712 BPF_LD_MAP_FD(BPF_REG_1, 0),
1713 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1714 BPF_FUNC_map_lookup_elem),
1715 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1716 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
1717 BPF_EXIT_INSN(),
1718 },
1719 .fixup_map1 = { 3 },
1720 .errstr_unpriv = "R0 leaks addr",
1721 .result_unpriv = REJECT,
1722 .result = ACCEPT,
1723 },
1724 {
1725 "unpriv: partial copy of pointer",
1726 .insns = {
1727 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
1728 BPF_MOV64_IMM(BPF_REG_0, 0),
1729 BPF_EXIT_INSN(),
1730 },
1731 .errstr_unpriv = "R10 partial copy",
1732 .result_unpriv = REJECT,
1733 .result = ACCEPT,
1734 },
1735 {
1736 "unpriv: pass pointer to tail_call",
1737 .insns = {
1738 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
1739 BPF_LD_MAP_FD(BPF_REG_2, 0),
1740 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1741 BPF_FUNC_tail_call),
1742 BPF_MOV64_IMM(BPF_REG_0, 0),
1743 BPF_EXIT_INSN(),
1744 },
1745 .fixup_prog = { 1 },
1746 .errstr_unpriv = "R3 leaks addr into helper",
1747 .result_unpriv = REJECT,
1748 .result = ACCEPT,
1749 },
1750 {
1751 "unpriv: cmp map pointer with zero",
1752 .insns = {
1753 BPF_MOV64_IMM(BPF_REG_1, 0),
1754 BPF_LD_MAP_FD(BPF_REG_1, 0),
1755 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1756 BPF_MOV64_IMM(BPF_REG_0, 0),
1757 BPF_EXIT_INSN(),
1758 },
1759 .fixup_map1 = { 1 },
1760 .errstr_unpriv = "R1 pointer comparison",
1761 .result_unpriv = REJECT,
1762 .result = ACCEPT,
1763 },
1764 {
1765 "unpriv: write into frame pointer",
1766 .insns = {
1767 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
1768 BPF_MOV64_IMM(BPF_REG_0, 0),
1769 BPF_EXIT_INSN(),
1770 },
1771 .errstr = "frame pointer is read only",
1772 .result = REJECT,
1773 },
1774 {
1775 "unpriv: spill/fill frame pointer",
1776 .insns = {
1777 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1778 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1779 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
1780 BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
1781 BPF_MOV64_IMM(BPF_REG_0, 0),
1782 BPF_EXIT_INSN(),
1783 },
1784 .errstr = "frame pointer is read only",
1785 .result = REJECT,
1786 },
1787 {
1788 "unpriv: cmp of frame pointer",
1789 .insns = {
1790 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
1791 BPF_MOV64_IMM(BPF_REG_0, 0),
1792 BPF_EXIT_INSN(),
1793 },
1794 .errstr_unpriv = "R10 pointer comparison",
1795 .result_unpriv = REJECT,
1796 .result = ACCEPT,
1797 },
1798 {
1799 "unpriv: cmp of stack pointer",
1800 .insns = {
1801 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1802 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1803 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
1804 BPF_MOV64_IMM(BPF_REG_0, 0),
1805 BPF_EXIT_INSN(),
1806 },
1807 .errstr_unpriv = "R2 pointer comparison",
1808 .result_unpriv = REJECT,
1809 .result = ACCEPT,
1810 },
1811 {
1812 "unpriv: obfuscate stack pointer",
1813 .insns = {
1814 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1815 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1816 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1817 BPF_MOV64_IMM(BPF_REG_0, 0),
1818 BPF_EXIT_INSN(),
1819 },
1820 .errstr_unpriv = "R2 pointer arithmetic",
1821 .result_unpriv = REJECT,
1822 .result = ACCEPT,
1823 },
1824 {
1825 "raw_stack: no skb_load_bytes",
1826 .insns = {
1827 BPF_MOV64_IMM(BPF_REG_2, 4),
1828 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1829 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1830 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1831 BPF_MOV64_IMM(BPF_REG_4, 8),
1832 /* Call to skb_load_bytes() omitted. */
1833 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1834 BPF_EXIT_INSN(),
1835 },
1836 .result = REJECT,
1837 .errstr = "invalid read from stack off -8+0 size 8",
1838 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1839 },
1840 {
1841 "raw_stack: skb_load_bytes, negative len",
1842 .insns = {
1843 BPF_MOV64_IMM(BPF_REG_2, 4),
1844 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1845 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1846 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1847 BPF_MOV64_IMM(BPF_REG_4, -8),
1848 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1849 BPF_FUNC_skb_load_bytes),
1850 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1851 BPF_EXIT_INSN(),
1852 },
1853 .result = REJECT,
1854 .errstr = "invalid stack type R3",
1855 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1856 },
1857 {
1858 "raw_stack: skb_load_bytes, negative len 2",
1859 .insns = {
1860 BPF_MOV64_IMM(BPF_REG_2, 4),
1861 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1862 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1863 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1864 BPF_MOV64_IMM(BPF_REG_4, ~0),
1865 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1866 BPF_FUNC_skb_load_bytes),
1867 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1868 BPF_EXIT_INSN(),
1869 },
1870 .result = REJECT,
1871 .errstr = "invalid stack type R3",
1872 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1873 },
1874 {
1875 "raw_stack: skb_load_bytes, zero len",
1876 .insns = {
1877 BPF_MOV64_IMM(BPF_REG_2, 4),
1878 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1879 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1880 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1881 BPF_MOV64_IMM(BPF_REG_4, 0),
1882 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1883 BPF_FUNC_skb_load_bytes),
1884 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1885 BPF_EXIT_INSN(),
1886 },
1887 .result = REJECT,
1888 .errstr = "invalid stack type R3",
1889 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1890 },
1891 {
1892 "raw_stack: skb_load_bytes, no init",
1893 .insns = {
1894 BPF_MOV64_IMM(BPF_REG_2, 4),
1895 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1896 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1897 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1898 BPF_MOV64_IMM(BPF_REG_4, 8),
1899 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1900 BPF_FUNC_skb_load_bytes),
1901 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1902 BPF_EXIT_INSN(),
1903 },
1904 .result = ACCEPT,
1905 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1906 },
1907 {
1908 "raw_stack: skb_load_bytes, init",
1909 .insns = {
1910 BPF_MOV64_IMM(BPF_REG_2, 4),
1911 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1912 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1913 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
1914 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1915 BPF_MOV64_IMM(BPF_REG_4, 8),
1916 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1917 BPF_FUNC_skb_load_bytes),
1918 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1919 BPF_EXIT_INSN(),
1920 },
1921 .result = ACCEPT,
1922 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1923 },
1924 {
1925 "raw_stack: skb_load_bytes, spilled regs around bounds",
1926 .insns = {
1927 BPF_MOV64_IMM(BPF_REG_2, 4),
1928 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1929 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
1930 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
1931 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
1932 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1933 BPF_MOV64_IMM(BPF_REG_4, 8),
1934 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1935 BPF_FUNC_skb_load_bytes),
1936 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
1937 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
1938 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
1939 offsetof(struct __sk_buff, mark)),
1940 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
1941 offsetof(struct __sk_buff, priority)),
1942 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
1943 BPF_EXIT_INSN(),
1944 },
1945 .result = ACCEPT,
1946 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1947 },
1948 {
1949 "raw_stack: skb_load_bytes, spilled regs corruption",
1950 .insns = {
1951 BPF_MOV64_IMM(BPF_REG_2, 4),
1952 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1953 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1954 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1955 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1956 BPF_MOV64_IMM(BPF_REG_4, 8),
1957 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1958 BPF_FUNC_skb_load_bytes),
1959 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1960 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
1961 offsetof(struct __sk_buff, mark)),
1962 BPF_EXIT_INSN(),
1963 },
1964 .result = REJECT,
1965 .errstr = "R0 invalid mem access 'inv'",
1966 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1967 },
1968 {
1969 "raw_stack: skb_load_bytes, spilled regs corruption 2",
1970 .insns = {
1971 BPF_MOV64_IMM(BPF_REG_2, 4),
1972 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1973 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
1974 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
1975 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1976 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
1977 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1978 BPF_MOV64_IMM(BPF_REG_4, 8),
1979 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1980 BPF_FUNC_skb_load_bytes),
1981 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
1982 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
1983 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
1984 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
1985 offsetof(struct __sk_buff, mark)),
1986 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
1987 offsetof(struct __sk_buff, priority)),
1988 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
1989 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
1990 offsetof(struct __sk_buff, pkt_type)),
1991 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
1992 BPF_EXIT_INSN(),
1993 },
1994 .result = REJECT,
1995 .errstr = "R3 invalid mem access 'inv'",
1996 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1997 },
1998 {
1999 "raw_stack: skb_load_bytes, spilled regs + data",
2000 .insns = {
2001 BPF_MOV64_IMM(BPF_REG_2, 4),
2002 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2003 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
2004 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2005 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2006 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
2007 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2008 BPF_MOV64_IMM(BPF_REG_4, 8),
2009 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2010 BPF_FUNC_skb_load_bytes),
2011 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2012 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2013 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
2014 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2015 offsetof(struct __sk_buff, mark)),
2016 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2017 offsetof(struct __sk_buff, priority)),
2018 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2019 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2020 BPF_EXIT_INSN(),
2021 },
2022 .result = ACCEPT,
2023 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2024 },
2025 {
2026 "raw_stack: skb_load_bytes, invalid access 1",
2027 .insns = {
2028 BPF_MOV64_IMM(BPF_REG_2, 4),
2029 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2030 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
2031 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2032 BPF_MOV64_IMM(BPF_REG_4, 8),
2033 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2034 BPF_FUNC_skb_load_bytes),
2035 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2036 BPF_EXIT_INSN(),
2037 },
2038 .result = REJECT,
2039 .errstr = "invalid stack type R3 off=-513 access_size=8",
2040 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2041 },
2042 {
2043 "raw_stack: skb_load_bytes, invalid access 2",
2044 .insns = {
2045 BPF_MOV64_IMM(BPF_REG_2, 4),
2046 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2047 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2048 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2049 BPF_MOV64_IMM(BPF_REG_4, 8),
2050 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2051 BPF_FUNC_skb_load_bytes),
2052 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2053 BPF_EXIT_INSN(),
2054 },
2055 .result = REJECT,
2056 .errstr = "invalid stack type R3 off=-1 access_size=8",
2057 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2058 },
2059 {
2060 "raw_stack: skb_load_bytes, invalid access 3",
2061 .insns = {
2062 BPF_MOV64_IMM(BPF_REG_2, 4),
2063 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2064 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
2065 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2066 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
2067 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2068 BPF_FUNC_skb_load_bytes),
2069 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2070 BPF_EXIT_INSN(),
2071 },
2072 .result = REJECT,
2073 .errstr = "invalid stack type R3 off=-1 access_size=-1",
2074 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2075 },
2076 {
2077 "raw_stack: skb_load_bytes, invalid access 4",
2078 .insns = {
2079 BPF_MOV64_IMM(BPF_REG_2, 4),
2080 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2081 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2082 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2083 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
2084 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2085 BPF_FUNC_skb_load_bytes),
2086 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2087 BPF_EXIT_INSN(),
2088 },
2089 .result = REJECT,
2090 .errstr = "invalid stack type R3 off=-1 access_size=2147483647",
2091 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2092 },
2093 {
2094 "raw_stack: skb_load_bytes, invalid access 5",
2095 .insns = {
2096 BPF_MOV64_IMM(BPF_REG_2, 4),
2097 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2098 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2099 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2100 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
2101 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2102 BPF_FUNC_skb_load_bytes),
2103 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2104 BPF_EXIT_INSN(),
2105 },
2106 .result = REJECT,
2107 .errstr = "invalid stack type R3 off=-512 access_size=2147483647",
2108 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2109 },
2110 {
2111 "raw_stack: skb_load_bytes, invalid access 6",
2112 .insns = {
2113 BPF_MOV64_IMM(BPF_REG_2, 4),
2114 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2115 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2116 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2117 BPF_MOV64_IMM(BPF_REG_4, 0),
2118 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2119 BPF_FUNC_skb_load_bytes),
2120 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2121 BPF_EXIT_INSN(),
2122 },
2123 .result = REJECT,
2124 .errstr = "invalid stack type R3 off=-512 access_size=0",
2125 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2126 },
2127 {
2128 "raw_stack: skb_load_bytes, large access",
2129 .insns = {
2130 BPF_MOV64_IMM(BPF_REG_2, 4),
2131 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2132 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2133 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2134 BPF_MOV64_IMM(BPF_REG_4, 512),
2135 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2136 BPF_FUNC_skb_load_bytes),
2137 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2138 BPF_EXIT_INSN(),
2139 },
2140 .result = ACCEPT,
2141 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2142 },
2143 {
2144 "direct packet access: test1",
2145 .insns = {
2146 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2147 offsetof(struct __sk_buff, data)),
2148 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2149 offsetof(struct __sk_buff, data_end)),
2150 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2151 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2152 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2153 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2154 BPF_MOV64_IMM(BPF_REG_0, 0),
2155 BPF_EXIT_INSN(),
2156 },
2157 .result = ACCEPT,
2158 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2159 },
2160 {
2161 "direct packet access: test2",
2162 .insns = {
2163 BPF_MOV64_IMM(BPF_REG_0, 1),
2164 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
2165 offsetof(struct __sk_buff, data_end)),
2166 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2167 offsetof(struct __sk_buff, data)),
2168 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2169 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
2170 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
2171 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
2172 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
2173 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
2174 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2175 offsetof(struct __sk_buff, data)),
2176 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
2177 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
2178 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 48),
2179 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 48),
2180 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
2181 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
2182 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
2183 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2184 offsetof(struct __sk_buff, data_end)),
2185 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
2186 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
2187 BPF_MOV64_IMM(BPF_REG_0, 0),
2188 BPF_EXIT_INSN(),
2189 },
2190 .result = ACCEPT,
2191 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2192 },
2193 {
2194 "direct packet access: test3",
2195 .insns = {
2196 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2197 offsetof(struct __sk_buff, data)),
2198 BPF_MOV64_IMM(BPF_REG_0, 0),
2199 BPF_EXIT_INSN(),
2200 },
2201 .errstr = "invalid bpf_context access off=76",
2202 .result = REJECT,
2203 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2204 },
2205 {
2206 "direct packet access: test4 (write)",
2207 .insns = {
2208 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2209 offsetof(struct __sk_buff, data)),
2210 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2211 offsetof(struct __sk_buff, data_end)),
2212 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2213 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2214 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2215 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2216 BPF_MOV64_IMM(BPF_REG_0, 0),
2217 BPF_EXIT_INSN(),
2218 },
2219 .result = ACCEPT,
2220 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2221 },
2222 {
2223 "direct packet access: test5 (pkt_end >= reg, good access)",
2224 .insns = {
2225 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2226 offsetof(struct __sk_buff, data)),
2227 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2228 offsetof(struct __sk_buff, data_end)),
2229 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2230 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2231 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
2232 BPF_MOV64_IMM(BPF_REG_0, 1),
2233 BPF_EXIT_INSN(),
2234 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2235 BPF_MOV64_IMM(BPF_REG_0, 0),
2236 BPF_EXIT_INSN(),
2237 },
2238 .result = ACCEPT,
2239 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2240 },
2241 {
2242 "direct packet access: test6 (pkt_end >= reg, bad access)",
2243 .insns = {
2244 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2245 offsetof(struct __sk_buff, data)),
2246 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2247 offsetof(struct __sk_buff, data_end)),
2248 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2249 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2250 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
2251 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2252 BPF_MOV64_IMM(BPF_REG_0, 1),
2253 BPF_EXIT_INSN(),
2254 BPF_MOV64_IMM(BPF_REG_0, 0),
2255 BPF_EXIT_INSN(),
2256 },
2257 .errstr = "invalid access to packet",
2258 .result = REJECT,
2259 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2260 },
2261 {
2262 "direct packet access: test7 (pkt_end >= reg, both accesses)",
2263 .insns = {
2264 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2265 offsetof(struct __sk_buff, data)),
2266 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2267 offsetof(struct __sk_buff, data_end)),
2268 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2269 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2270 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
2271 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2272 BPF_MOV64_IMM(BPF_REG_0, 1),
2273 BPF_EXIT_INSN(),
2274 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2275 BPF_MOV64_IMM(BPF_REG_0, 0),
2276 BPF_EXIT_INSN(),
2277 },
2278 .errstr = "invalid access to packet",
2279 .result = REJECT,
2280 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2281 },
2282 {
2283 "direct packet access: test8 (double test, variant 1)",
2284 .insns = {
2285 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2286 offsetof(struct __sk_buff, data)),
2287 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2288 offsetof(struct __sk_buff, data_end)),
2289 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2290 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2291 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
2292 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2293 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2294 BPF_MOV64_IMM(BPF_REG_0, 1),
2295 BPF_EXIT_INSN(),
2296 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2297 BPF_MOV64_IMM(BPF_REG_0, 0),
2298 BPF_EXIT_INSN(),
2299 },
2300 .result = ACCEPT,
2301 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2302 },
2303 {
2304 "direct packet access: test9 (double test, variant 2)",
2305 .insns = {
2306 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2307 offsetof(struct __sk_buff, data)),
2308 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2309 offsetof(struct __sk_buff, data_end)),
2310 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2311 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2312 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
2313 BPF_MOV64_IMM(BPF_REG_0, 1),
2314 BPF_EXIT_INSN(),
2315 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2316 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2317 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2318 BPF_MOV64_IMM(BPF_REG_0, 0),
2319 BPF_EXIT_INSN(),
2320 },
2321 .result = ACCEPT,
2322 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2323 },
2324 {
2325 "direct packet access: test10 (write invalid)",
2326 .insns = {
2327 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2328 offsetof(struct __sk_buff, data)),
2329 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2330 offsetof(struct __sk_buff, data_end)),
2331 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2332 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2333 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
2334 BPF_MOV64_IMM(BPF_REG_0, 0),
2335 BPF_EXIT_INSN(),
2336 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2337 BPF_MOV64_IMM(BPF_REG_0, 0),
2338 BPF_EXIT_INSN(),
2339 },
2340 .errstr = "invalid access to packet",
2341 .result = REJECT,
2342 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2343 },
2344 {
2345 "direct packet access: test11 (shift, good access)",
2346 .insns = {
2347 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2348 offsetof(struct __sk_buff, data)),
2349 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2350 offsetof(struct __sk_buff, data_end)),
2351 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2352 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2353 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2354 BPF_MOV64_IMM(BPF_REG_3, 144),
2355 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2356 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2357 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
2358 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2359 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2360 BPF_MOV64_IMM(BPF_REG_0, 1),
2361 BPF_EXIT_INSN(),
2362 BPF_MOV64_IMM(BPF_REG_0, 0),
2363 BPF_EXIT_INSN(),
2364 },
2365 .result = ACCEPT,
2366 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2367 },
2368 {
2369 "direct packet access: test12 (and, good access)",
2370 .insns = {
2371 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2372 offsetof(struct __sk_buff, data)),
2373 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2374 offsetof(struct __sk_buff, data_end)),
2375 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2376 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2377 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2378 BPF_MOV64_IMM(BPF_REG_3, 144),
2379 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2380 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2381 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2382 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2383 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2384 BPF_MOV64_IMM(BPF_REG_0, 1),
2385 BPF_EXIT_INSN(),
2386 BPF_MOV64_IMM(BPF_REG_0, 0),
2387 BPF_EXIT_INSN(),
2388 },
2389 .result = ACCEPT,
2390 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2391 },
2392 {
2393 "direct packet access: test13 (branches, good access)",
2394 .insns = {
2395 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2396 offsetof(struct __sk_buff, data)),
2397 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2398 offsetof(struct __sk_buff, data_end)),
2399 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2400 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2401 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
2402 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2403 offsetof(struct __sk_buff, mark)),
2404 BPF_MOV64_IMM(BPF_REG_4, 1),
2405 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
2406 BPF_MOV64_IMM(BPF_REG_3, 14),
2407 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
2408 BPF_MOV64_IMM(BPF_REG_3, 24),
2409 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2410 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2411 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2412 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2413 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2414 BPF_MOV64_IMM(BPF_REG_0, 1),
2415 BPF_EXIT_INSN(),
2416 BPF_MOV64_IMM(BPF_REG_0, 0),
2417 BPF_EXIT_INSN(),
2418 },
2419 .result = ACCEPT,
2420 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2421 },
2422 {
2423 "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
2424 .insns = {
2425 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2426 offsetof(struct __sk_buff, data)),
2427 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2428 offsetof(struct __sk_buff, data_end)),
2429 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2430 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2431 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
2432 BPF_MOV64_IMM(BPF_REG_5, 12),
2433 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
2434 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2435 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2436 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
2437 BPF_MOV64_IMM(BPF_REG_0, 1),
2438 BPF_EXIT_INSN(),
2439 BPF_MOV64_IMM(BPF_REG_0, 0),
2440 BPF_EXIT_INSN(),
2441 },
2442 .result = ACCEPT,
2443 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2444 },
2445 {
2446 "direct packet access: test15 (spill with xadd)",
2447 .insns = {
2448 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2449 offsetof(struct __sk_buff, data)),
2450 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2451 offsetof(struct __sk_buff, data_end)),
2452 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2453 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2454 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2455 BPF_MOV64_IMM(BPF_REG_5, 4096),
2456 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2457 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2458 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2459 BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
2460 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
2461 BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
2462 BPF_MOV64_IMM(BPF_REG_0, 0),
2463 BPF_EXIT_INSN(),
2464 },
2465 .errstr = "R2 invalid mem access 'inv'",
2466 .result = REJECT,
2467 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2468 },
2469 {
2470 "helper access to packet: test1, valid packet_ptr range",
2471 .insns = {
2472 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2473 offsetof(struct xdp_md, data)),
2474 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2475 offsetof(struct xdp_md, data_end)),
2476 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
2477 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2478 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
2479 BPF_LD_MAP_FD(BPF_REG_1, 0),
2480 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2481 BPF_MOV64_IMM(BPF_REG_4, 0),
2482 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2483 BPF_FUNC_map_update_elem),
2484 BPF_MOV64_IMM(BPF_REG_0, 0),
2485 BPF_EXIT_INSN(),
2486 },
2487 .fixup_map1 = { 5 },
2488 .result_unpriv = ACCEPT,
2489 .result = ACCEPT,
2490 .prog_type = BPF_PROG_TYPE_XDP,
2491 },
2492 {
2493 "helper access to packet: test2, unchecked packet_ptr",
2494 .insns = {
2495 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2496 offsetof(struct xdp_md, data)),
2497 BPF_LD_MAP_FD(BPF_REG_1, 0),
2498 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2499 BPF_FUNC_map_lookup_elem),
2500 BPF_MOV64_IMM(BPF_REG_0, 0),
2501 BPF_EXIT_INSN(),
2502 },
2503 .fixup_map1 = { 1 },
2504 .result = REJECT,
2505 .errstr = "invalid access to packet",
2506 .prog_type = BPF_PROG_TYPE_XDP,
2507 },
2508 {
2509 "helper access to packet: test3, variable add",
2510 .insns = {
2511 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2512 offsetof(struct xdp_md, data)),
2513 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2514 offsetof(struct xdp_md, data_end)),
2515 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2516 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
2517 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
2518 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
2519 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2520 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
2521 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
2522 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
2523 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
2524 BPF_LD_MAP_FD(BPF_REG_1, 0),
2525 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
2526 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2527 BPF_FUNC_map_lookup_elem),
2528 BPF_MOV64_IMM(BPF_REG_0, 0),
2529 BPF_EXIT_INSN(),
2530 },
2531 .fixup_map1 = { 11 },
2532 .result = ACCEPT,
2533 .prog_type = BPF_PROG_TYPE_XDP,
2534 },
2535 {
2536 "helper access to packet: test4, packet_ptr with bad range",
2537 .insns = {
2538 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2539 offsetof(struct xdp_md, data)),
2540 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2541 offsetof(struct xdp_md, data_end)),
2542 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2543 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
2544 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
2545 BPF_MOV64_IMM(BPF_REG_0, 0),
2546 BPF_EXIT_INSN(),
2547 BPF_LD_MAP_FD(BPF_REG_1, 0),
2548 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2549 BPF_FUNC_map_lookup_elem),
2550 BPF_MOV64_IMM(BPF_REG_0, 0),
2551 BPF_EXIT_INSN(),
2552 },
2553 .fixup_map1 = { 7 },
2554 .result = REJECT,
2555 .errstr = "invalid access to packet",
2556 .prog_type = BPF_PROG_TYPE_XDP,
2557 },
2558 {
2559 "helper access to packet: test5, packet_ptr with too short range",
2560 .insns = {
2561 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2562 offsetof(struct xdp_md, data)),
2563 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2564 offsetof(struct xdp_md, data_end)),
2565 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
2566 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2567 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
2568 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
2569 BPF_LD_MAP_FD(BPF_REG_1, 0),
2570 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2571 BPF_FUNC_map_lookup_elem),
2572 BPF_MOV64_IMM(BPF_REG_0, 0),
2573 BPF_EXIT_INSN(),
2574 },
2575 .fixup_map1 = { 6 },
2576 .result = REJECT,
2577 .errstr = "invalid access to packet",
2578 .prog_type = BPF_PROG_TYPE_XDP,
2579 },
2580 {
2581 "helper access to packet: test6, cls valid packet_ptr range",
2582 .insns = {
2583 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2584 offsetof(struct __sk_buff, data)),
2585 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2586 offsetof(struct __sk_buff, data_end)),
2587 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
2588 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2589 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
2590 BPF_LD_MAP_FD(BPF_REG_1, 0),
2591 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2592 BPF_MOV64_IMM(BPF_REG_4, 0),
2593 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2594 BPF_FUNC_map_update_elem),
2595 BPF_MOV64_IMM(BPF_REG_0, 0),
2596 BPF_EXIT_INSN(),
2597 },
2598 .fixup_map1 = { 5 },
2599 .result = ACCEPT,
2600 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2601 },
2602 {
2603 "helper access to packet: test7, cls unchecked packet_ptr",
2604 .insns = {
2605 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2606 offsetof(struct __sk_buff, data)),
2607 BPF_LD_MAP_FD(BPF_REG_1, 0),
2608 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2609 BPF_FUNC_map_lookup_elem),
2610 BPF_MOV64_IMM(BPF_REG_0, 0),
2611 BPF_EXIT_INSN(),
2612 },
2613 .fixup_map1 = { 1 },
2614 .result = REJECT,
2615 .errstr = "invalid access to packet",
2616 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2617 },
2618 {
2619 "helper access to packet: test8, cls variable add",
2620 .insns = {
2621 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2622 offsetof(struct __sk_buff, data)),
2623 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2624 offsetof(struct __sk_buff, data_end)),
2625 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2626 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
2627 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
2628 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
2629 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2630 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
2631 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
2632 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
2633 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
2634 BPF_LD_MAP_FD(BPF_REG_1, 0),
2635 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
2636 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2637 BPF_FUNC_map_lookup_elem),
2638 BPF_MOV64_IMM(BPF_REG_0, 0),
2639 BPF_EXIT_INSN(),
2640 },
2641 .fixup_map1 = { 11 },
2642 .result = ACCEPT,
2643 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2644 },
2645 {
2646 "helper access to packet: test9, cls packet_ptr with bad range",
2647 .insns = {
2648 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2649 offsetof(struct __sk_buff, data)),
2650 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2651 offsetof(struct __sk_buff, data_end)),
2652 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2653 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
2654 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
2655 BPF_MOV64_IMM(BPF_REG_0, 0),
2656 BPF_EXIT_INSN(),
2657 BPF_LD_MAP_FD(BPF_REG_1, 0),
2658 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2659 BPF_FUNC_map_lookup_elem),
2660 BPF_MOV64_IMM(BPF_REG_0, 0),
2661 BPF_EXIT_INSN(),
2662 },
2663 .fixup_map1 = { 7 },
2664 .result = REJECT,
2665 .errstr = "invalid access to packet",
2666 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2667 },
2668 {
2669 "helper access to packet: test10, cls packet_ptr with too short range",
2670 .insns = {
2671 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2672 offsetof(struct __sk_buff, data)),
2673 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2674 offsetof(struct __sk_buff, data_end)),
2675 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
2676 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2677 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
2678 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
2679 BPF_LD_MAP_FD(BPF_REG_1, 0),
2680 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2681 BPF_FUNC_map_lookup_elem),
2682 BPF_MOV64_IMM(BPF_REG_0, 0),
2683 BPF_EXIT_INSN(),
2684 },
2685 .fixup_map1 = { 6 },
2686 .result = REJECT,
2687 .errstr = "invalid access to packet",
2688 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2689 },
2690 {
2691 "helper access to packet: test11, cls unsuitable helper 1",
2692 .insns = {
2693 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2694 offsetof(struct __sk_buff, data)),
2695 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2696 offsetof(struct __sk_buff, data_end)),
2697 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2698 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2699 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
2700 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
2701 BPF_MOV64_IMM(BPF_REG_2, 0),
2702 BPF_MOV64_IMM(BPF_REG_4, 42),
2703 BPF_MOV64_IMM(BPF_REG_5, 0),
2704 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2705 BPF_FUNC_skb_store_bytes),
2706 BPF_MOV64_IMM(BPF_REG_0, 0),
2707 BPF_EXIT_INSN(),
2708 },
2709 .result = REJECT,
2710 .errstr = "helper access to the packet",
2711 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2712 },
2713 {
2714 "helper access to packet: test12, cls unsuitable helper 2",
2715 .insns = {
2716 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2717 offsetof(struct __sk_buff, data)),
2718 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2719 offsetof(struct __sk_buff, data_end)),
2720 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2721 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
2722 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
2723 BPF_MOV64_IMM(BPF_REG_2, 0),
2724 BPF_MOV64_IMM(BPF_REG_4, 4),
2725 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2726 BPF_FUNC_skb_load_bytes),
2727 BPF_MOV64_IMM(BPF_REG_0, 0),
2728 BPF_EXIT_INSN(),
2729 },
2730 .result = REJECT,
2731 .errstr = "helper access to the packet",
2732 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2733 },
2734 {
2735 "helper access to packet: test13, cls helper ok",
2736 .insns = {
2737 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2738 offsetof(struct __sk_buff, data)),
2739 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2740 offsetof(struct __sk_buff, data_end)),
2741 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2742 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2743 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2744 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2745 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2746 BPF_MOV64_IMM(BPF_REG_2, 4),
2747 BPF_MOV64_IMM(BPF_REG_3, 0),
2748 BPF_MOV64_IMM(BPF_REG_4, 0),
2749 BPF_MOV64_IMM(BPF_REG_5, 0),
2750 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2751 BPF_FUNC_csum_diff),
2752 BPF_MOV64_IMM(BPF_REG_0, 0),
2753 BPF_EXIT_INSN(),
2754 },
2755 .result = ACCEPT,
2756 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2757 },
2758 {
2759 "helper access to packet: test14, cls helper fail sub",
2760 .insns = {
2761 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2762 offsetof(struct __sk_buff, data)),
2763 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2764 offsetof(struct __sk_buff, data_end)),
2765 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2766 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2767 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2768 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2769 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
2770 BPF_MOV64_IMM(BPF_REG_2, 4),
2771 BPF_MOV64_IMM(BPF_REG_3, 0),
2772 BPF_MOV64_IMM(BPF_REG_4, 0),
2773 BPF_MOV64_IMM(BPF_REG_5, 0),
2774 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2775 BPF_FUNC_csum_diff),
2776 BPF_MOV64_IMM(BPF_REG_0, 0),
2777 BPF_EXIT_INSN(),
2778 },
2779 .result = REJECT,
2780 .errstr = "type=inv expected=fp",
2781 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2782 },
2783 {
2784 "helper access to packet: test15, cls helper fail range 1",
2785 .insns = {
2786 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2787 offsetof(struct __sk_buff, data)),
2788 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2789 offsetof(struct __sk_buff, data_end)),
2790 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2791 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2792 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2793 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2794 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2795 BPF_MOV64_IMM(BPF_REG_2, 8),
2796 BPF_MOV64_IMM(BPF_REG_3, 0),
2797 BPF_MOV64_IMM(BPF_REG_4, 0),
2798 BPF_MOV64_IMM(BPF_REG_5, 0),
2799 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2800 BPF_FUNC_csum_diff),
2801 BPF_MOV64_IMM(BPF_REG_0, 0),
2802 BPF_EXIT_INSN(),
2803 },
2804 .result = REJECT,
2805 .errstr = "invalid access to packet",
2806 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2807 },
2808 {
2809 "helper access to packet: test16, cls helper fail range 2",
2810 .insns = {
2811 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2812 offsetof(struct __sk_buff, data)),
2813 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2814 offsetof(struct __sk_buff, data_end)),
2815 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2816 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2817 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2818 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2819 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2820 BPF_MOV64_IMM(BPF_REG_2, -9),
2821 BPF_MOV64_IMM(BPF_REG_3, 0),
2822 BPF_MOV64_IMM(BPF_REG_4, 0),
2823 BPF_MOV64_IMM(BPF_REG_5, 0),
2824 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2825 BPF_FUNC_csum_diff),
2826 BPF_MOV64_IMM(BPF_REG_0, 0),
2827 BPF_EXIT_INSN(),
2828 },
2829 .result = REJECT,
2830 .errstr = "invalid access to packet",
2831 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2832 },
2833 {
2834 "helper access to packet: test17, cls helper fail range 3",
2835 .insns = {
2836 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2837 offsetof(struct __sk_buff, data)),
2838 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2839 offsetof(struct __sk_buff, data_end)),
2840 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2841 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2842 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2843 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2844 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2845 BPF_MOV64_IMM(BPF_REG_2, ~0),
2846 BPF_MOV64_IMM(BPF_REG_3, 0),
2847 BPF_MOV64_IMM(BPF_REG_4, 0),
2848 BPF_MOV64_IMM(BPF_REG_5, 0),
2849 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2850 BPF_FUNC_csum_diff),
2851 BPF_MOV64_IMM(BPF_REG_0, 0),
2852 BPF_EXIT_INSN(),
2853 },
2854 .result = REJECT,
2855 .errstr = "invalid access to packet",
2856 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2857 },
2858 {
2859 "helper access to packet: test18, cls helper fail range zero",
2860 .insns = {
2861 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2862 offsetof(struct __sk_buff, data)),
2863 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2864 offsetof(struct __sk_buff, data_end)),
2865 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2866 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2867 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2868 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2869 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2870 BPF_MOV64_IMM(BPF_REG_2, 0),
2871 BPF_MOV64_IMM(BPF_REG_3, 0),
2872 BPF_MOV64_IMM(BPF_REG_4, 0),
2873 BPF_MOV64_IMM(BPF_REG_5, 0),
2874 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2875 BPF_FUNC_csum_diff),
2876 BPF_MOV64_IMM(BPF_REG_0, 0),
2877 BPF_EXIT_INSN(),
2878 },
2879 .result = REJECT,
2880 .errstr = "invalid access to packet",
2881 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2882 },
2883 {
2884 "helper access to packet: test19, pkt end as input",
2885 .insns = {
2886 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2887 offsetof(struct __sk_buff, data)),
2888 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2889 offsetof(struct __sk_buff, data_end)),
2890 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2891 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2892 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2893 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2894 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
2895 BPF_MOV64_IMM(BPF_REG_2, 4),
2896 BPF_MOV64_IMM(BPF_REG_3, 0),
2897 BPF_MOV64_IMM(BPF_REG_4, 0),
2898 BPF_MOV64_IMM(BPF_REG_5, 0),
2899 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2900 BPF_FUNC_csum_diff),
2901 BPF_MOV64_IMM(BPF_REG_0, 0),
2902 BPF_EXIT_INSN(),
2903 },
2904 .result = REJECT,
2905 .errstr = "R1 type=pkt_end expected=fp",
2906 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2907 },
2908 {
2909 "helper access to packet: test20, wrong reg",
2910 .insns = {
2911 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2912 offsetof(struct __sk_buff, data)),
2913 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2914 offsetof(struct __sk_buff, data_end)),
2915 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2916 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2917 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2918 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2919 BPF_MOV64_IMM(BPF_REG_2, 4),
2920 BPF_MOV64_IMM(BPF_REG_3, 0),
2921 BPF_MOV64_IMM(BPF_REG_4, 0),
2922 BPF_MOV64_IMM(BPF_REG_5, 0),
2923 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2924 BPF_FUNC_csum_diff),
2925 BPF_MOV64_IMM(BPF_REG_0, 0),
2926 BPF_EXIT_INSN(),
2927 },
2928 .result = REJECT,
2929 .errstr = "invalid access to packet",
2930 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2931 },
2932 {
2933 "valid map access into an array with a constant",
2934 .insns = {
2935 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2936 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2937 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2938 BPF_LD_MAP_FD(BPF_REG_1, 0),
2939 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2940 BPF_FUNC_map_lookup_elem),
2941 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2942 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
2943 offsetof(struct test_val, foo)),
2944 BPF_EXIT_INSN(),
2945 },
2946 .fixup_map2 = { 3 },
2947 .errstr_unpriv = "R0 leaks addr",
2948 .result_unpriv = REJECT,
2949 .result = ACCEPT,
2950 },
2951 {
2952 "valid map access into an array with a register",
2953 .insns = {
2954 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2955 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2956 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2957 BPF_LD_MAP_FD(BPF_REG_1, 0),
2958 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2959 BPF_FUNC_map_lookup_elem),
2960 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
2961 BPF_MOV64_IMM(BPF_REG_1, 4),
2962 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
2963 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
2964 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
2965 offsetof(struct test_val, foo)),
2966 BPF_EXIT_INSN(),
2967 },
2968 .fixup_map2 = { 3 },
2969 .errstr_unpriv = "R0 pointer arithmetic prohibited",
2970 .result_unpriv = REJECT,
2971 .result = ACCEPT,
2972 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2973 },
2974 {
2975 "valid map access into an array with a variable",
2976 .insns = {
2977 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2978 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2979 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2980 BPF_LD_MAP_FD(BPF_REG_1, 0),
2981 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2982 BPF_FUNC_map_lookup_elem),
2983 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
2984 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
2985 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
2986 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
2987 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
2988 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
2989 offsetof(struct test_val, foo)),
2990 BPF_EXIT_INSN(),
2991 },
2992 .fixup_map2 = { 3 },
2993 .errstr_unpriv = "R0 pointer arithmetic prohibited",
2994 .result_unpriv = REJECT,
2995 .result = ACCEPT,
2996 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2997 },
2998 {
2999 "valid map access into an array with a signed variable",
3000 .insns = {
3001 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3002 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3003 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3004 BPF_LD_MAP_FD(BPF_REG_1, 0),
3005 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3006 BPF_FUNC_map_lookup_elem),
3007 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
3008 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3009 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
3010 BPF_MOV32_IMM(BPF_REG_1, 0),
3011 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
3012 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
3013 BPF_MOV32_IMM(BPF_REG_1, 0),
3014 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3015 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3016 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3017 offsetof(struct test_val, foo)),
3018 BPF_EXIT_INSN(),
3019 },
3020 .fixup_map2 = { 3 },
3021 .errstr_unpriv = "R0 pointer arithmetic prohibited",
3022 .result_unpriv = REJECT,
3023 .result = ACCEPT,
3024 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3025 },
3026 {
3027 "invalid map access into an array with a constant",
3028 .insns = {
3029 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3030 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3031 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3032 BPF_LD_MAP_FD(BPF_REG_1, 0),
3033 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3034 BPF_FUNC_map_lookup_elem),
3035 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3036 BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
3037 offsetof(struct test_val, foo)),
3038 BPF_EXIT_INSN(),
3039 },
3040 .fixup_map2 = { 3 },
3041 .errstr = "invalid access to map value, value_size=48 off=48 size=8",
3042 .result = REJECT,
3043 },
3044 {
3045 "invalid map access into an array with a register",
3046 .insns = {
3047 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3048 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3049 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3050 BPF_LD_MAP_FD(BPF_REG_1, 0),
3051 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3052 BPF_FUNC_map_lookup_elem),
3053 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3054 BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
3055 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3056 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3057 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3058 offsetof(struct test_val, foo)),
3059 BPF_EXIT_INSN(),
3060 },
3061 .fixup_map2 = { 3 },
3062 .errstr_unpriv = "R0 pointer arithmetic prohibited",
3063 .errstr = "R0 min value is outside of the array range",
3064 .result_unpriv = REJECT,
3065 .result = REJECT,
3066 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3067 },
3068 {
3069 "invalid map access into an array with a variable",
3070 .insns = {
3071 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3072 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3073 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3074 BPF_LD_MAP_FD(BPF_REG_1, 0),
3075 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3076 BPF_FUNC_map_lookup_elem),
3077 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3078 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3079 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3080 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3081 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3082 offsetof(struct test_val, foo)),
3083 BPF_EXIT_INSN(),
3084 },
3085 .fixup_map2 = { 3 },
3086 .errstr_unpriv = "R0 pointer arithmetic prohibited",
3087 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
3088 .result_unpriv = REJECT,
3089 .result = REJECT,
3090 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3091 },
3092 {
3093 "invalid map access into an array with no floor check",
3094 .insns = {
3095 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3096 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3097 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3098 BPF_LD_MAP_FD(BPF_REG_1, 0),
3099 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3100 BPF_FUNC_map_lookup_elem),
3101 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3102 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3103 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
3104 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
3105 BPF_MOV32_IMM(BPF_REG_1, 0),
3106 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3107 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3108 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3109 offsetof(struct test_val, foo)),
3110 BPF_EXIT_INSN(),
3111 },
3112 .fixup_map2 = { 3 },
3113 .errstr_unpriv = "R0 pointer arithmetic prohibited",
3114 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
3115 .result_unpriv = REJECT,
3116 .result = REJECT,
3117 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3118 },
3119 {
3120 "invalid map access into an array with a invalid max check",
3121 .insns = {
3122 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3123 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3124 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3125 BPF_LD_MAP_FD(BPF_REG_1, 0),
3126 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3127 BPF_FUNC_map_lookup_elem),
3128 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3129 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3130 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
3131 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3132 BPF_MOV32_IMM(BPF_REG_1, 0),
3133 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3134 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3135 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3136 offsetof(struct test_val, foo)),
3137 BPF_EXIT_INSN(),
3138 },
3139 .fixup_map2 = { 3 },
3140 .errstr_unpriv = "R0 pointer arithmetic prohibited",
3141 .errstr = "invalid access to map value, value_size=48 off=44 size=8",
3142 .result_unpriv = REJECT,
3143 .result = REJECT,
3144 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3145 },
3146 {
3147 "invalid map access into an array with a invalid max check",
3148 .insns = {
3149 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3150 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3151 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3152 BPF_LD_MAP_FD(BPF_REG_1, 0),
3153 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3154 BPF_FUNC_map_lookup_elem),
3155 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
3156 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
3157 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3158 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3159 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3160 BPF_LD_MAP_FD(BPF_REG_1, 0),
3161 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3162 BPF_FUNC_map_lookup_elem),
3163 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
3164 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
3165 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3166 offsetof(struct test_val, foo)),
3167 BPF_EXIT_INSN(),
3168 },
3169 .fixup_map2 = { 3, 11 },
3170 .errstr_unpriv = "R0 pointer arithmetic prohibited",
3171 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
3172 .result_unpriv = REJECT,
3173 .result = REJECT,
3174 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3175 },
3176 {
3177 "multiple registers share map_lookup_elem result",
3178 .insns = {
3179 BPF_MOV64_IMM(BPF_REG_1, 10),
3180 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3181 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3182 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3183 BPF_LD_MAP_FD(BPF_REG_1, 0),
3184 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3185 BPF_FUNC_map_lookup_elem),
3186 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3187 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3188 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3189 BPF_EXIT_INSN(),
3190 },
3191 .fixup_map1 = { 4 },
3192 .result = ACCEPT,
3193 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3194 },
3195 {
3196 "invalid memory access with multiple map_lookup_elem calls",
3197 .insns = {
3198 BPF_MOV64_IMM(BPF_REG_1, 10),
3199 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3200 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3201 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3202 BPF_LD_MAP_FD(BPF_REG_1, 0),
3203 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
3204 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
3205 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3206 BPF_FUNC_map_lookup_elem),
3207 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3208 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
3209 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3210 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3211 BPF_FUNC_map_lookup_elem),
3212 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3213 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3214 BPF_EXIT_INSN(),
3215 },
3216 .fixup_map1 = { 4 },
3217 .result = REJECT,
3218 .errstr = "R4 !read_ok",
3219 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3220 },
3221 {
3222 "valid indirect map_lookup_elem access with 2nd lookup in branch",
3223 .insns = {
3224 BPF_MOV64_IMM(BPF_REG_1, 10),
3225 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3226 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3227 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3228 BPF_LD_MAP_FD(BPF_REG_1, 0),
3229 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
3230 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
3231 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3232 BPF_FUNC_map_lookup_elem),
3233 BPF_MOV64_IMM(BPF_REG_2, 10),
3234 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
3235 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
3236 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3237 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3238 BPF_FUNC_map_lookup_elem),
3239 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3240 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3241 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3242 BPF_EXIT_INSN(),
3243 },
3244 .fixup_map1 = { 4 },
3245 .result = ACCEPT,
3246 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3247 },
3248 {
3249 "multiple registers share map_lookup_elem bad reg type",
3250 .insns = {
3251 BPF_MOV64_IMM(BPF_REG_1, 10),
3252 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3253 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3254 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3255 BPF_LD_MAP_FD(BPF_REG_1, 0),
3256 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3257 BPF_FUNC_map_lookup_elem),
3258 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
3259 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
3260 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3261 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3262 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3263 BPF_MOV64_IMM(BPF_REG_1, 1),
3264 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3265 BPF_MOV64_IMM(BPF_REG_1, 2),
3266 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 1),
3267 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 0),
3268 BPF_MOV64_IMM(BPF_REG_1, 3),
3269 BPF_EXIT_INSN(),
3270 },
3271 .fixup_map1 = { 4 },
3272 .result = REJECT,
3273 .errstr = "R3 invalid mem access 'inv'",
3274 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3275 },
3276 {
3277 "invalid map access from else condition",
3278 .insns = {
3279 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3280 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3281 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3282 BPF_LD_MAP_FD(BPF_REG_1, 0),
3283 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
3284 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3285 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3286 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
3287 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
3288 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3289 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3290 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
3291 BPF_EXIT_INSN(),
3292 },
3293 .fixup_map2 = { 3 },
3294 .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
3295 .result = REJECT,
3296 .errstr_unpriv = "R0 pointer arithmetic prohibited",
3297 .result_unpriv = REJECT,
3298 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3299 },
3300 {
3301 "constant register |= constant should keep constant type",
3302 .insns = {
3303 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3304 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3305 BPF_MOV64_IMM(BPF_REG_2, 34),
3306 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
3307 BPF_MOV64_IMM(BPF_REG_3, 0),
3308 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3309 BPF_EXIT_INSN(),
3310 },
3311 .result = ACCEPT,
3312 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3313 },
3314 {
3315 "constant register |= constant should not bypass stack boundary checks",
3316 .insns = {
3317 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3318 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3319 BPF_MOV64_IMM(BPF_REG_2, 34),
3320 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
3321 BPF_MOV64_IMM(BPF_REG_3, 0),
3322 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3323 BPF_EXIT_INSN(),
3324 },
3325 .errstr = "invalid stack type R1 off=-48 access_size=58",
3326 .result = REJECT,
3327 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3328 },
3329 {
3330 "constant register |= constant register should keep constant type",
3331 .insns = {
3332 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3333 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3334 BPF_MOV64_IMM(BPF_REG_2, 34),
3335 BPF_MOV64_IMM(BPF_REG_4, 13),
3336 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
3337 BPF_MOV64_IMM(BPF_REG_3, 0),
3338 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3339 BPF_EXIT_INSN(),
3340 },
3341 .result = ACCEPT,
3342 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3343 },
3344 {
3345 "constant register |= constant register should not bypass stack boundary checks",
3346 .insns = {
3347 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3348 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3349 BPF_MOV64_IMM(BPF_REG_2, 34),
3350 BPF_MOV64_IMM(BPF_REG_4, 24),
3351 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
3352 BPF_MOV64_IMM(BPF_REG_3, 0),
3353 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3354 BPF_EXIT_INSN(),
3355 },
3356 .errstr = "invalid stack type R1 off=-48 access_size=58",
3357 .result = REJECT,
3358 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3359 },
3360 {
3361 "invalid direct packet write for LWT_IN",
3362 .insns = {
3363 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3364 offsetof(struct __sk_buff, data)),
3365 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3366 offsetof(struct __sk_buff, data_end)),
3367 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3368 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3369 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3370 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3371 BPF_MOV64_IMM(BPF_REG_0, 0),
3372 BPF_EXIT_INSN(),
3373 },
3374 .errstr = "cannot write into packet",
3375 .result = REJECT,
3376 .prog_type = BPF_PROG_TYPE_LWT_IN,
3377 },
3378 {
3379 "invalid direct packet write for LWT_OUT",
3380 .insns = {
3381 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3382 offsetof(struct __sk_buff, data)),
3383 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3384 offsetof(struct __sk_buff, data_end)),
3385 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3386 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3387 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3388 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3389 BPF_MOV64_IMM(BPF_REG_0, 0),
3390 BPF_EXIT_INSN(),
3391 },
3392 .errstr = "cannot write into packet",
3393 .result = REJECT,
3394 .prog_type = BPF_PROG_TYPE_LWT_OUT,
3395 },
3396 {
3397 "direct packet write for LWT_XMIT",
3398 .insns = {
3399 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3400 offsetof(struct __sk_buff, data)),
3401 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3402 offsetof(struct __sk_buff, data_end)),
3403 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3404 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3405 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3406 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3407 BPF_MOV64_IMM(BPF_REG_0, 0),
3408 BPF_EXIT_INSN(),
3409 },
3410 .result = ACCEPT,
3411 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
3412 },
3413 {
3414 "direct packet read for LWT_IN",
3415 .insns = {
3416 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3417 offsetof(struct __sk_buff, data)),
3418 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3419 offsetof(struct __sk_buff, data_end)),
3420 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3421 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3422 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3423 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3424 BPF_MOV64_IMM(BPF_REG_0, 0),
3425 BPF_EXIT_INSN(),
3426 },
3427 .result = ACCEPT,
3428 .prog_type = BPF_PROG_TYPE_LWT_IN,
3429 },
3430 {
3431 "direct packet read for LWT_OUT",
3432 .insns = {
3433 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3434 offsetof(struct __sk_buff, data)),
3435 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3436 offsetof(struct __sk_buff, data_end)),
3437 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3438 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3439 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3440 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3441 BPF_MOV64_IMM(BPF_REG_0, 0),
3442 BPF_EXIT_INSN(),
3443 },
3444 .result = ACCEPT,
3445 .prog_type = BPF_PROG_TYPE_LWT_OUT,
3446 },
3447 {
3448 "direct packet read for LWT_XMIT",
3449 .insns = {
3450 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3451 offsetof(struct __sk_buff, data)),
3452 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3453 offsetof(struct __sk_buff, data_end)),
3454 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3455 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3456 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3457 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3458 BPF_MOV64_IMM(BPF_REG_0, 0),
3459 BPF_EXIT_INSN(),
3460 },
3461 .result = ACCEPT,
3462 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
3463 },
3464 {
3465 "overlapping checks for direct packet access",
3466 .insns = {
3467 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3468 offsetof(struct __sk_buff, data)),
3469 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3470 offsetof(struct __sk_buff, data_end)),
3471 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3472 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3473 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
3474 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3475 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
3476 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
3477 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
3478 BPF_MOV64_IMM(BPF_REG_0, 0),
3479 BPF_EXIT_INSN(),
3480 },
3481 .result = ACCEPT,
3482 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
3483 },
3484 {
3485 "invalid access of tc_classid for LWT_IN",
3486 .insns = {
3487 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
3488 offsetof(struct __sk_buff, tc_classid)),
3489 BPF_EXIT_INSN(),
3490 },
3491 .result = REJECT,
3492 .errstr = "invalid bpf_context access",
3493 },
3494 {
3495 "invalid access of tc_classid for LWT_OUT",
3496 .insns = {
3497 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
3498 offsetof(struct __sk_buff, tc_classid)),
3499 BPF_EXIT_INSN(),
3500 },
3501 .result = REJECT,
3502 .errstr = "invalid bpf_context access",
3503 },
3504 {
3505 "invalid access of tc_classid for LWT_XMIT",
3506 .insns = {
3507 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
3508 offsetof(struct __sk_buff, tc_classid)),
3509 BPF_EXIT_INSN(),
3510 },
3511 .result = REJECT,
3512 .errstr = "invalid bpf_context access",
3513 },
3514 {
3515 "helper access to map: full range",
3516 .insns = {
3517 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3518 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3519 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3520 BPF_LD_MAP_FD(BPF_REG_1, 0),
3521 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3522 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3523 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3524 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
3525 BPF_MOV64_IMM(BPF_REG_3, 0),
3526 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3527 BPF_EXIT_INSN(),
3528 },
3529 .fixup_map2 = { 3 },
3530 .result = ACCEPT,
3531 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3532 },
3533 {
3534 "helper access to map: partial range",
3535 .insns = {
3536 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3537 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3538 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3539 BPF_LD_MAP_FD(BPF_REG_1, 0),
3540 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3541 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3542 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3543 BPF_MOV64_IMM(BPF_REG_2, 8),
3544 BPF_MOV64_IMM(BPF_REG_3, 0),
3545 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3546 BPF_EXIT_INSN(),
3547 },
3548 .fixup_map2 = { 3 },
3549 .result = ACCEPT,
3550 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3551 },
3552 {
3553 "helper access to map: empty range",
3554 .insns = {
3555 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3556 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3557 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3558 BPF_LD_MAP_FD(BPF_REG_1, 0),
3559 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3560 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3561 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3562 BPF_MOV64_IMM(BPF_REG_2, 0),
3563 BPF_MOV64_IMM(BPF_REG_3, 0),
3564 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3565 BPF_EXIT_INSN(),
3566 },
3567 .fixup_map2 = { 3 },
3568 .errstr = "invalid access to map value, value_size=48 off=0 size=0",
3569 .result = REJECT,
3570 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3571 },
3572 {
3573 "helper access to map: out-of-bound range",
3574 .insns = {
3575 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3576 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3577 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3578 BPF_LD_MAP_FD(BPF_REG_1, 0),
3579 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3580 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3581 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3582 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
3583 BPF_MOV64_IMM(BPF_REG_3, 0),
3584 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3585 BPF_EXIT_INSN(),
3586 },
3587 .fixup_map2 = { 3 },
3588 .errstr = "invalid access to map value, value_size=48 off=0 size=56",
3589 .result = REJECT,
3590 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3591 },
3592 {
3593 "helper access to map: negative range",
3594 .insns = {
3595 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3596 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3597 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3598 BPF_LD_MAP_FD(BPF_REG_1, 0),
3599 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3600 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3601 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3602 BPF_MOV64_IMM(BPF_REG_2, -8),
3603 BPF_MOV64_IMM(BPF_REG_3, 0),
3604 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3605 BPF_EXIT_INSN(),
3606 },
3607 .fixup_map2 = { 3 },
3608 .errstr = "invalid access to map value, value_size=48 off=0 size=-8",
3609 .result = REJECT,
3610 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3611 },
3612 {
3613 "helper access to adjusted map (via const imm): full range",
3614 .insns = {
3615 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3616 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3617 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3618 BPF_LD_MAP_FD(BPF_REG_1, 0),
3619 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3620 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3621 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3622 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3623 offsetof(struct test_val, foo)),
3624 BPF_MOV64_IMM(BPF_REG_2,
3625 sizeof(struct test_val) -
3626 offsetof(struct test_val, foo)),
3627 BPF_MOV64_IMM(BPF_REG_3, 0),
3628 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3629 BPF_EXIT_INSN(),
3630 },
3631 .fixup_map2 = { 3 },
3632 .result = ACCEPT,
3633 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3634 },
3635 {
3636 "helper access to adjusted map (via const imm): partial range",
3637 .insns = {
3638 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3639 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3640 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3641 BPF_LD_MAP_FD(BPF_REG_1, 0),
3642 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3643 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3644 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3645 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3646 offsetof(struct test_val, foo)),
3647 BPF_MOV64_IMM(BPF_REG_2, 8),
3648 BPF_MOV64_IMM(BPF_REG_3, 0),
3649 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3650 BPF_EXIT_INSN(),
3651 },
3652 .fixup_map2 = { 3 },
3653 .result = ACCEPT,
3654 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3655 },
3656 {
3657 "helper access to adjusted map (via const imm): empty range",
3658 .insns = {
3659 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3660 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3661 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3662 BPF_LD_MAP_FD(BPF_REG_1, 0),
3663 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3664 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3665 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3666 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3667 offsetof(struct test_val, foo)),
3668 BPF_MOV64_IMM(BPF_REG_2, 0),
3669 BPF_MOV64_IMM(BPF_REG_3, 0),
3670 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3671 BPF_EXIT_INSN(),
3672 },
3673 .fixup_map2 = { 3 },
3674 .errstr = "R1 min value is outside of the array range",
3675 .result = REJECT,
3676 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3677 },
3678 {
3679 "helper access to adjusted map (via const imm): out-of-bound range",
3680 .insns = {
3681 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3682 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3683 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3684 BPF_LD_MAP_FD(BPF_REG_1, 0),
3685 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3686 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3687 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3688 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3689 offsetof(struct test_val, foo)),
3690 BPF_MOV64_IMM(BPF_REG_2,
3691 sizeof(struct test_val) -
3692 offsetof(struct test_val, foo) + 8),
3693 BPF_MOV64_IMM(BPF_REG_3, 0),
3694 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3695 BPF_EXIT_INSN(),
3696 },
3697 .fixup_map2 = { 3 },
3698 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
3699 .result = REJECT,
3700 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3701 },
3702 {
3703 "helper access to adjusted map (via const imm): negative range (> adjustment)",
3704 .insns = {
3705 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3706 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3707 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3708 BPF_LD_MAP_FD(BPF_REG_1, 0),
3709 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3710 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3711 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3712 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3713 offsetof(struct test_val, foo)),
3714 BPF_MOV64_IMM(BPF_REG_2, -8),
3715 BPF_MOV64_IMM(BPF_REG_3, 0),
3716 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3717 BPF_EXIT_INSN(),
3718 },
3719 .fixup_map2 = { 3 },
3720 .errstr = "invalid access to map value, value_size=48 off=4 size=-8",
3721 .result = REJECT,
3722 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3723 },
3724 {
3725 "helper access to adjusted map (via const imm): negative range (< adjustment)",
3726 .insns = {
3727 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3728 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3729 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3730 BPF_LD_MAP_FD(BPF_REG_1, 0),
3731 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3732 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3733 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3734 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3735 offsetof(struct test_val, foo)),
3736 BPF_MOV64_IMM(BPF_REG_2, -1),
3737 BPF_MOV64_IMM(BPF_REG_3, 0),
3738 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3739 BPF_EXIT_INSN(),
3740 },
3741 .fixup_map2 = { 3 },
3742 .errstr = "R1 min value is outside of the array range",
3743 .result = REJECT,
3744 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3745 },
3746 {
3747 "helper access to adjusted map (via const reg): full range",
3748 .insns = {
3749 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3750 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3751 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3752 BPF_LD_MAP_FD(BPF_REG_1, 0),
3753 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3754 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3755 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3756 BPF_MOV64_IMM(BPF_REG_3,
3757 offsetof(struct test_val, foo)),
3758 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3759 BPF_MOV64_IMM(BPF_REG_2,
3760 sizeof(struct test_val) -
3761 offsetof(struct test_val, foo)),
3762 BPF_MOV64_IMM(BPF_REG_3, 0),
3763 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3764 BPF_EXIT_INSN(),
3765 },
3766 .fixup_map2 = { 3 },
3767 .result = ACCEPT,
3768 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3769 },
3770 {
3771 "helper access to adjusted map (via const reg): partial range",
3772 .insns = {
3773 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3774 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3775 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3776 BPF_LD_MAP_FD(BPF_REG_1, 0),
3777 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3778 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3779 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3780 BPF_MOV64_IMM(BPF_REG_3,
3781 offsetof(struct test_val, foo)),
3782 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3783 BPF_MOV64_IMM(BPF_REG_2, 8),
3784 BPF_MOV64_IMM(BPF_REG_3, 0),
3785 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3786 BPF_EXIT_INSN(),
3787 },
3788 .fixup_map2 = { 3 },
3789 .result = ACCEPT,
3790 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3791 },
3792 {
3793 "helper access to adjusted map (via const reg): empty range",
3794 .insns = {
3795 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3796 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3797 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3798 BPF_LD_MAP_FD(BPF_REG_1, 0),
3799 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3800 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3801 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3802 BPF_MOV64_IMM(BPF_REG_3, 0),
3803 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3804 BPF_MOV64_IMM(BPF_REG_2, 0),
3805 BPF_MOV64_IMM(BPF_REG_3, 0),
3806 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3807 BPF_EXIT_INSN(),
3808 },
3809 .fixup_map2 = { 3 },
3810 .errstr = "R1 min value is outside of the array range",
3811 .result = REJECT,
3812 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3813 },
3814 {
3815 "helper access to adjusted map (via const reg): out-of-bound range",
3816 .insns = {
3817 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3818 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3819 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3820 BPF_LD_MAP_FD(BPF_REG_1, 0),
3821 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3822 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3823 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3824 BPF_MOV64_IMM(BPF_REG_3,
3825 offsetof(struct test_val, foo)),
3826 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3827 BPF_MOV64_IMM(BPF_REG_2,
3828 sizeof(struct test_val) -
3829 offsetof(struct test_val, foo) + 8),
3830 BPF_MOV64_IMM(BPF_REG_3, 0),
3831 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3832 BPF_EXIT_INSN(),
3833 },
3834 .fixup_map2 = { 3 },
3835 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
3836 .result = REJECT,
3837 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3838 },
3839 {
3840 "helper access to adjusted map (via const reg): negative range (> adjustment)",
3841 .insns = {
3842 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3843 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3844 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3845 BPF_LD_MAP_FD(BPF_REG_1, 0),
3846 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3847 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3848 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3849 BPF_MOV64_IMM(BPF_REG_3,
3850 offsetof(struct test_val, foo)),
3851 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3852 BPF_MOV64_IMM(BPF_REG_2, -8),
3853 BPF_MOV64_IMM(BPF_REG_3, 0),
3854 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3855 BPF_EXIT_INSN(),
3856 },
3857 .fixup_map2 = { 3 },
3858 .errstr = "invalid access to map value, value_size=48 off=4 size=-8",
3859 .result = REJECT,
3860 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3861 },
3862 {
3863 "helper access to adjusted map (via const reg): negative range (< adjustment)",
3864 .insns = {
3865 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3866 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3867 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3868 BPF_LD_MAP_FD(BPF_REG_1, 0),
3869 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3870 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3871 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3872 BPF_MOV64_IMM(BPF_REG_3,
3873 offsetof(struct test_val, foo)),
3874 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3875 BPF_MOV64_IMM(BPF_REG_2, -1),
3876 BPF_MOV64_IMM(BPF_REG_3, 0),
3877 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3878 BPF_EXIT_INSN(),
3879 },
3880 .fixup_map2 = { 3 },
3881 .errstr = "R1 min value is outside of the array range",
3882 .result = REJECT,
3883 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3884 },
3885 {
3886 "helper access to adjusted map (via variable): full range",
3887 .insns = {
3888 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3889 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3890 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3891 BPF_LD_MAP_FD(BPF_REG_1, 0),
3892 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3893 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3894 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3895 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
3896 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
3897 offsetof(struct test_val, foo), 4),
3898 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3899 BPF_MOV64_IMM(BPF_REG_2,
3900 sizeof(struct test_val) -
3901 offsetof(struct test_val, foo)),
3902 BPF_MOV64_IMM(BPF_REG_3, 0),
3903 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3904 BPF_EXIT_INSN(),
3905 },
3906 .fixup_map2 = { 3 },
3907 .result = ACCEPT,
3908 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3909 },
3910 {
3911 "helper access to adjusted map (via variable): partial range",
3912 .insns = {
3913 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3914 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3915 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3916 BPF_LD_MAP_FD(BPF_REG_1, 0),
3917 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3918 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3919 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3920 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
3921 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
3922 offsetof(struct test_val, foo), 4),
3923 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3924 BPF_MOV64_IMM(BPF_REG_2, 8),
3925 BPF_MOV64_IMM(BPF_REG_3, 0),
3926 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3927 BPF_EXIT_INSN(),
3928 },
3929 .fixup_map2 = { 3 },
3930 .result = ACCEPT,
3931 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3932 },
3933 {
3934 "helper access to adjusted map (via variable): empty range",
3935 .insns = {
3936 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3937 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3938 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3939 BPF_LD_MAP_FD(BPF_REG_1, 0),
3940 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3941 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3942 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3943 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
3944 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
3945 offsetof(struct test_val, foo), 4),
3946 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3947 BPF_MOV64_IMM(BPF_REG_2, 0),
3948 BPF_MOV64_IMM(BPF_REG_3, 0),
3949 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3950 BPF_EXIT_INSN(),
3951 },
3952 .fixup_map2 = { 3 },
3953 .errstr = "R1 min value is outside of the array range",
3954 .result = REJECT,
3955 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3956 },
3957 {
3958 "helper access to adjusted map (via variable): no max check",
3959 .insns = {
3960 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3961 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3962 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3963 BPF_LD_MAP_FD(BPF_REG_1, 0),
3964 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3965 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3966 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3967 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
3968 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3969 BPF_MOV64_IMM(BPF_REG_2, 0),
3970 BPF_MOV64_IMM(BPF_REG_3, 0),
3971 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3972 BPF_EXIT_INSN(),
3973 },
3974 .fixup_map2 = { 3 },
3975 .errstr = "R1 min value is negative, either use unsigned index or do a if (index >=0) check",
3976 .result = REJECT,
3977 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3978 },
3979 {
3980 "helper access to adjusted map (via variable): wrong max check",
3981 .insns = {
3982 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3983 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3984 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3985 BPF_LD_MAP_FD(BPF_REG_1, 0),
3986 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3987 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3988 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3989 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
3990 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
3991 offsetof(struct test_val, foo), 4),
3992 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3993 BPF_MOV64_IMM(BPF_REG_2,
3994 sizeof(struct test_val) -
3995 offsetof(struct test_val, foo) + 1),
3996 BPF_MOV64_IMM(BPF_REG_3, 0),
3997 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3998 BPF_EXIT_INSN(),
3999 },
4000 .fixup_map2 = { 3 },
4001 .errstr = "invalid access to map value, value_size=48 off=4 size=45",
4002 .result = REJECT,
4003 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4004 },
4005 {
4006 "map element value is preserved across register spilling",
4007 .insns = {
4008 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4009 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4010 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4011 BPF_LD_MAP_FD(BPF_REG_1, 0),
4012 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4013 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4014 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
4015 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4016 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
4017 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4018 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
4019 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
4020 BPF_EXIT_INSN(),
4021 },
4022 .fixup_map2 = { 3 },
4023 .errstr_unpriv = "R0 leaks addr",
4024 .result = ACCEPT,
4025 .result_unpriv = REJECT,
4026 },
4027 {
4028 "map element value or null is marked on register spilling",
4029 .insns = {
4030 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4031 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4032 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4033 BPF_LD_MAP_FD(BPF_REG_1, 0),
4034 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4035 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4036 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
4037 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4038 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4039 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
4040 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
4041 BPF_EXIT_INSN(),
4042 },
4043 .fixup_map2 = { 3 },
4044 .errstr_unpriv = "R0 leaks addr",
4045 .result = ACCEPT,
4046 .result_unpriv = REJECT,
4047 },
4048 {
4049 "map element value store of cleared call register",
4050 .insns = {
4051 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4052 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4053 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4054 BPF_LD_MAP_FD(BPF_REG_1, 0),
4055 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4056 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4057 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
4058 BPF_EXIT_INSN(),
4059 },
4060 .fixup_map2 = { 3 },
4061 .errstr_unpriv = "R1 !read_ok",
4062 .errstr = "R1 !read_ok",
4063 .result = REJECT,
4064 .result_unpriv = REJECT,
4065 },
4066 {
4067 "map element value with unaligned store",
4068 .insns = {
4069 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4070 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4071 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4072 BPF_LD_MAP_FD(BPF_REG_1, 0),
4073 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4074 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
4075 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
4076 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
4077 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
4078 BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
4079 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4080 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
4081 BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
4082 BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
4083 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
4084 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
4085 BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
4086 BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
4087 BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
4088 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
4089 BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
4090 BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
4091 BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
4092 BPF_EXIT_INSN(),
4093 },
4094 .fixup_map2 = { 3 },
4095 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4096 .result = ACCEPT,
4097 .result_unpriv = REJECT,
4098 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4099 },
4100 {
4101 "map element value with unaligned load",
4102 .insns = {
4103 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4104 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4105 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4106 BPF_LD_MAP_FD(BPF_REG_1, 0),
4107 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4108 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
4109 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4110 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
4111 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
4112 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
4113 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
4114 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4115 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
4116 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
4117 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
4118 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
4119 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
4120 BPF_EXIT_INSN(),
4121 },
4122 .fixup_map2 = { 3 },
4123 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4124 .result = ACCEPT,
4125 .result_unpriv = REJECT,
4126 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4127 },
4128 {
4129 "map element value illegal alu op, 1",
4130 .insns = {
4131 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4132 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4133 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4134 BPF_LD_MAP_FD(BPF_REG_1, 0),
4135 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4136 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4137 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
4138 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4139 BPF_EXIT_INSN(),
4140 },
4141 .fixup_map2 = { 3 },
4142 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4143 .errstr = "invalid mem access 'inv'",
4144 .result = REJECT,
4145 .result_unpriv = REJECT,
4146 },
4147 {
4148 "map element value illegal alu op, 2",
4149 .insns = {
4150 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4151 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4152 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4153 BPF_LD_MAP_FD(BPF_REG_1, 0),
4154 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4155 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4156 BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
4157 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4158 BPF_EXIT_INSN(),
4159 },
4160 .fixup_map2 = { 3 },
4161 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4162 .errstr = "invalid mem access 'inv'",
4163 .result = REJECT,
4164 .result_unpriv = REJECT,
4165 },
4166 {
4167 "map element value illegal alu op, 3",
4168 .insns = {
4169 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4170 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4171 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4172 BPF_LD_MAP_FD(BPF_REG_1, 0),
4173 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4174 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4175 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
4176 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4177 BPF_EXIT_INSN(),
4178 },
4179 .fixup_map2 = { 3 },
4180 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4181 .errstr = "invalid mem access 'inv'",
4182 .result = REJECT,
4183 .result_unpriv = REJECT,
4184 },
4185 {
4186 "map element value illegal alu op, 4",
4187 .insns = {
4188 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4189 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4190 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4191 BPF_LD_MAP_FD(BPF_REG_1, 0),
4192 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4193 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4194 BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
4195 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4196 BPF_EXIT_INSN(),
4197 },
4198 .fixup_map2 = { 3 },
4199 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4200 .errstr = "invalid mem access 'inv'",
4201 .result = REJECT,
4202 .result_unpriv = REJECT,
4203 },
4204 {
4205 "map element value illegal alu op, 5",
4206 .insns = {
4207 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4208 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4209 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4210 BPF_LD_MAP_FD(BPF_REG_1, 0),
4211 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4212 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4213 BPF_MOV64_IMM(BPF_REG_3, 4096),
4214 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4215 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4216 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
4217 BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
4218 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
4219 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4220 BPF_EXIT_INSN(),
4221 },
4222 .fixup_map2 = { 3 },
4223 .errstr_unpriv = "R0 invalid mem access 'inv'",
4224 .errstr = "R0 invalid mem access 'inv'",
4225 .result = REJECT,
4226 .result_unpriv = REJECT,
4227 },
4228 {
4229 "map element value is preserved across register spilling",
4230 .insns = {
4231 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4232 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4233 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4234 BPF_LD_MAP_FD(BPF_REG_1, 0),
4235 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4236 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4237 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
4238 offsetof(struct test_val, foo)),
4239 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
4240 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4241 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
4242 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4243 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
4244 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
4245 BPF_EXIT_INSN(),
4246 },
4247 .fixup_map2 = { 3 },
4248 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4249 .result = ACCEPT,
4250 .result_unpriv = REJECT,
4251 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4252 },
4253 {
4254 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
4255 .insns = {
4256 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4257 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4258 BPF_MOV64_IMM(BPF_REG_0, 0),
4259 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4260 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4261 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4262 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4263 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
4264 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4265 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4266 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4267 BPF_MOV64_IMM(BPF_REG_2, 16),
4268 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4269 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4270 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
4271 BPF_MOV64_IMM(BPF_REG_4, 0),
4272 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4273 BPF_MOV64_IMM(BPF_REG_3, 0),
4274 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4275 BPF_MOV64_IMM(BPF_REG_0, 0),
4276 BPF_EXIT_INSN(),
4277 },
4278 .result = ACCEPT,
4279 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4280 },
4281 {
4282 "helper access to variable memory: stack, bitwise AND, zero included",
4283 .insns = {
4284 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4285 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4286 BPF_MOV64_IMM(BPF_REG_2, 16),
4287 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4288 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4289 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
4290 BPF_MOV64_IMM(BPF_REG_3, 0),
4291 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4292 BPF_EXIT_INSN(),
4293 },
4294 .errstr = "invalid stack type R1 off=-64 access_size=0",
4295 .result = REJECT,
4296 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4297 },
4298 {
4299 "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
4300 .insns = {
4301 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4302 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4303 BPF_MOV64_IMM(BPF_REG_2, 16),
4304 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4305 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4306 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
4307 BPF_MOV64_IMM(BPF_REG_4, 0),
4308 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4309 BPF_MOV64_IMM(BPF_REG_3, 0),
4310 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4311 BPF_MOV64_IMM(BPF_REG_0, 0),
4312 BPF_EXIT_INSN(),
4313 },
4314 .errstr = "invalid stack type R1 off=-64 access_size=65",
4315 .result = REJECT,
4316 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4317 },
4318 {
4319 "helper access to variable memory: stack, JMP, correct bounds",
4320 .insns = {
4321 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4322 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4323 BPF_MOV64_IMM(BPF_REG_0, 0),
4324 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4325 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4326 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4327 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4328 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
4329 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4330 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4331 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4332 BPF_MOV64_IMM(BPF_REG_2, 16),
4333 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4334 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4335 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
4336 BPF_MOV64_IMM(BPF_REG_4, 0),
4337 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4338 BPF_MOV64_IMM(BPF_REG_3, 0),
4339 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4340 BPF_MOV64_IMM(BPF_REG_0, 0),
4341 BPF_EXIT_INSN(),
4342 },
4343 .result = ACCEPT,
4344 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4345 },
4346 {
4347 "helper access to variable memory: stack, JMP (signed), correct bounds",
4348 .insns = {
4349 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4350 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4351 BPF_MOV64_IMM(BPF_REG_0, 0),
4352 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4353 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4354 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4355 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4356 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
4357 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4358 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4359 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4360 BPF_MOV64_IMM(BPF_REG_2, 16),
4361 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4362 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4363 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
4364 BPF_MOV64_IMM(BPF_REG_4, 0),
4365 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
4366 BPF_MOV64_IMM(BPF_REG_3, 0),
4367 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4368 BPF_MOV64_IMM(BPF_REG_0, 0),
4369 BPF_EXIT_INSN(),
4370 },
4371 .result = ACCEPT,
4372 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4373 },
4374 {
4375 "helper access to variable memory: stack, JMP, bounds + offset",
4376 .insns = {
4377 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4378 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4379 BPF_MOV64_IMM(BPF_REG_2, 16),
4380 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4381 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4382 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
4383 BPF_MOV64_IMM(BPF_REG_4, 0),
4384 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
4385 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4386 BPF_MOV64_IMM(BPF_REG_3, 0),
4387 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4388 BPF_MOV64_IMM(BPF_REG_0, 0),
4389 BPF_EXIT_INSN(),
4390 },
4391 .errstr = "invalid stack type R1 off=-64 access_size=65",
4392 .result = REJECT,
4393 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4394 },
4395 {
4396 "helper access to variable memory: stack, JMP, wrong max",
4397 .insns = {
4398 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4399 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4400 BPF_MOV64_IMM(BPF_REG_2, 16),
4401 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4402 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4403 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
4404 BPF_MOV64_IMM(BPF_REG_4, 0),
4405 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4406 BPF_MOV64_IMM(BPF_REG_3, 0),
4407 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4408 BPF_MOV64_IMM(BPF_REG_0, 0),
4409 BPF_EXIT_INSN(),
4410 },
4411 .errstr = "invalid stack type R1 off=-64 access_size=65",
4412 .result = REJECT,
4413 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4414 },
4415 {
4416 "helper access to variable memory: stack, JMP, no max check",
4417 .insns = {
4418 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4419 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4420 BPF_MOV64_IMM(BPF_REG_2, 16),
4421 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4422 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4423 BPF_MOV64_IMM(BPF_REG_4, 0),
4424 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4425 BPF_MOV64_IMM(BPF_REG_3, 0),
4426 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4427 BPF_MOV64_IMM(BPF_REG_0, 0),
4428 BPF_EXIT_INSN(),
4429 },
4430 .errstr = "R2 unbounded memory access",
4431 .result = REJECT,
4432 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4433 },
4434 {
4435 "helper access to variable memory: stack, JMP, no min check",
4436 .insns = {
4437 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4438 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4439 BPF_MOV64_IMM(BPF_REG_2, 16),
4440 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4441 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4442 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
4443 BPF_MOV64_IMM(BPF_REG_3, 0),
4444 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4445 BPF_MOV64_IMM(BPF_REG_0, 0),
4446 BPF_EXIT_INSN(),
4447 },
4448 .errstr = "invalid stack type R1 off=-64 access_size=0",
4449 .result = REJECT,
4450 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4451 },
4452 {
4453 "helper access to variable memory: stack, JMP (signed), no min check",
4454 .insns = {
4455 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4456 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4457 BPF_MOV64_IMM(BPF_REG_2, 16),
4458 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4459 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4460 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
4461 BPF_MOV64_IMM(BPF_REG_3, 0),
4462 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4463 BPF_MOV64_IMM(BPF_REG_0, 0),
4464 BPF_EXIT_INSN(),
4465 },
4466 .errstr = "R2 min value is negative",
4467 .result = REJECT,
4468 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4469 },
4470 {
4471 "helper access to variable memory: map, JMP, correct bounds",
4472 .insns = {
4473 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4474 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4475 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4476 BPF_LD_MAP_FD(BPF_REG_1, 0),
4477 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4478 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4479 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4480 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4481 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4482 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
4483 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
4484 sizeof(struct test_val), 4),
4485 BPF_MOV64_IMM(BPF_REG_4, 0),
4486 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4487 BPF_MOV64_IMM(BPF_REG_3, 0),
4488 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4489 BPF_MOV64_IMM(BPF_REG_0, 0),
4490 BPF_EXIT_INSN(),
4491 },
4492 .fixup_map2 = { 3 },
4493 .result = ACCEPT,
4494 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4495 },
4496 {
4497 "helper access to variable memory: map, JMP, wrong max",
4498 .insns = {
4499 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4500 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4501 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4502 BPF_LD_MAP_FD(BPF_REG_1, 0),
4503 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4504 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4505 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4506 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4507 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4508 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
4509 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
4510 sizeof(struct test_val) + 1, 4),
4511 BPF_MOV64_IMM(BPF_REG_4, 0),
4512 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4513 BPF_MOV64_IMM(BPF_REG_3, 0),
4514 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4515 BPF_MOV64_IMM(BPF_REG_0, 0),
4516 BPF_EXIT_INSN(),
4517 },
4518 .fixup_map2 = { 3 },
4519 .errstr = "invalid access to map value, value_size=48 off=0 size=49",
4520 .result = REJECT,
4521 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4522 },
4523 {
4524 "helper access to variable memory: map adjusted, JMP, correct bounds",
4525 .insns = {
4526 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4527 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4528 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4529 BPF_LD_MAP_FD(BPF_REG_1, 0),
4530 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4531 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
4532 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4533 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
4534 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4535 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4536 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
4537 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
4538 sizeof(struct test_val) - 20, 4),
4539 BPF_MOV64_IMM(BPF_REG_4, 0),
4540 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4541 BPF_MOV64_IMM(BPF_REG_3, 0),
4542 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4543 BPF_MOV64_IMM(BPF_REG_0, 0),
4544 BPF_EXIT_INSN(),
4545 },
4546 .fixup_map2 = { 3 },
4547 .result = ACCEPT,
4548 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4549 },
4550 {
4551 "helper access to variable memory: map adjusted, JMP, wrong max",
4552 .insns = {
4553 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4554 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4555 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4556 BPF_LD_MAP_FD(BPF_REG_1, 0),
4557 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4558 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
4559 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4560 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
4561 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4562 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4563 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
4564 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
4565 sizeof(struct test_val) - 19, 4),
4566 BPF_MOV64_IMM(BPF_REG_4, 0),
4567 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4568 BPF_MOV64_IMM(BPF_REG_3, 0),
4569 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4570 BPF_MOV64_IMM(BPF_REG_0, 0),
4571 BPF_EXIT_INSN(),
4572 },
4573 .fixup_map2 = { 3 },
4574 .errstr = "R1 min value is outside of the array range",
4575 .result = REJECT,
4576 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4577 },
4578 {
4579 "helper access to variable memory: size > 0 not allowed on NULL",
4580 .insns = {
4581 BPF_MOV64_IMM(BPF_REG_1, 0),
4582 BPF_MOV64_IMM(BPF_REG_2, 0),
4583 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4584 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
4585 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
4586 BPF_MOV64_IMM(BPF_REG_3, 0),
4587 BPF_MOV64_IMM(BPF_REG_4, 0),
4588 BPF_MOV64_IMM(BPF_REG_5, 0),
4589 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
4590 BPF_EXIT_INSN(),
4591 },
4592 .errstr = "R1 type=imm expected=fp",
4593 .result = REJECT,
4594 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4595 },
4596 {
4597 "helper access to variable memory: size = 0 not allowed on != NULL",
4598 .insns = {
4599 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4600 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
4601 BPF_MOV64_IMM(BPF_REG_2, 0),
4602 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
4603 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
4604 BPF_MOV64_IMM(BPF_REG_3, 0),
4605 BPF_MOV64_IMM(BPF_REG_4, 0),
4606 BPF_MOV64_IMM(BPF_REG_5, 0),
4607 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
4608 BPF_EXIT_INSN(),
4609 },
4610 .errstr = "invalid stack type R1 off=-8 access_size=0",
4611 .result = REJECT,
4612 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4613 },
4614 {
4615 "helper access to variable memory: 8 bytes leak",
4616 .insns = {
4617 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4618 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4619 BPF_MOV64_IMM(BPF_REG_0, 0),
4620 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4621 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4622 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4623 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4624 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4625 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4626 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4627 BPF_MOV64_IMM(BPF_REG_2, 0),
4628 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4629 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
4630 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
4631 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4632 BPF_MOV64_IMM(BPF_REG_3, 0),
4633 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4634 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
4635 BPF_EXIT_INSN(),
4636 },
4637 .errstr = "invalid indirect read from stack off -64+32 size 64",
4638 .result = REJECT,
4639 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4640 },
4641 {
4642 "helper access to variable memory: 8 bytes no leak (init memory)",
4643 .insns = {
4644 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4645 BPF_MOV64_IMM(BPF_REG_0, 0),
4646 BPF_MOV64_IMM(BPF_REG_0, 0),
4647 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4648 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4649 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4650 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4651 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
4652 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4653 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4654 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4655 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4656 BPF_MOV64_IMM(BPF_REG_2, 0),
4657 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
4658 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
4659 BPF_MOV64_IMM(BPF_REG_3, 0),
4660 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4661 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
4662 BPF_EXIT_INSN(),
4663 },
4664 .result = ACCEPT,
4665 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4666 },
4667 {
4668 "invalid and of negative number",
4669 .insns = {
4670 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4671 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4672 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4673 BPF_LD_MAP_FD(BPF_REG_1, 0),
4674 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4675 BPF_FUNC_map_lookup_elem),
4676 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4677 BPF_MOV64_IMM(BPF_REG_1, 6),
4678 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
4679 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4680 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4681 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4682 offsetof(struct test_val, foo)),
4683 BPF_EXIT_INSN(),
4684 },
4685 .fixup_map2 = { 3 },
4686 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4687 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
4688 .result = REJECT,
4689 .result_unpriv = REJECT,
4690 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4691 },
4692 {
4693 "invalid range check",
4694 .insns = {
4695 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4696 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4697 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4698 BPF_LD_MAP_FD(BPF_REG_1, 0),
4699 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4700 BPF_FUNC_map_lookup_elem),
4701 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
4702 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4703 BPF_MOV64_IMM(BPF_REG_9, 1),
4704 BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
4705 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
4706 BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
4707 BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
4708 BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
4709 BPF_MOV32_IMM(BPF_REG_3, 1),
4710 BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
4711 BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
4712 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
4713 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
4714 BPF_MOV64_REG(BPF_REG_0, 0),
4715 BPF_EXIT_INSN(),
4716 },
4717 .fixup_map2 = { 3 },
4718 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4719 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
4720 .result = REJECT,
4721 .result_unpriv = REJECT,
4722 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4723 }
4724 };
4725
probe_filter_length(const struct bpf_insn * fp)4726 static int probe_filter_length(const struct bpf_insn *fp)
4727 {
4728 int len;
4729
4730 for (len = MAX_INSNS - 1; len > 0; --len)
4731 if (fp[len].code != 0 || fp[len].imm != 0)
4732 break;
4733 return len + 1;
4734 }
4735
create_map(uint32_t size_value,uint32_t max_elem)4736 static int create_map(uint32_t size_value, uint32_t max_elem)
4737 {
4738 int fd;
4739
4740 fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
4741 size_value, max_elem, BPF_F_NO_PREALLOC);
4742 if (fd < 0)
4743 printf("Failed to create hash map '%s'!\n", strerror(errno));
4744
4745 return fd;
4746 }
4747
create_prog_array(void)4748 static int create_prog_array(void)
4749 {
4750 int fd;
4751
4752 fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
4753 sizeof(int), 4, 0);
4754 if (fd < 0)
4755 printf("Failed to create prog array '%s'!\n", strerror(errno));
4756
4757 return fd;
4758 }
4759
4760 static char bpf_vlog[32768];
4761
do_test_fixup(struct bpf_test * test,struct bpf_insn * prog,int * fd_f1,int * fd_f2,int * fd_f3)4762 static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
4763 int *fd_f1, int *fd_f2, int *fd_f3)
4764 {
4765 int *fixup_map1 = test->fixup_map1;
4766 int *fixup_map2 = test->fixup_map2;
4767 int *fixup_prog = test->fixup_prog;
4768
4769 /* Allocating HTs with 1 elem is fine here, since we only test
4770 * for verifier and not do a runtime lookup, so the only thing
4771 * that really matters is value size in this case.
4772 */
4773 if (*fixup_map1) {
4774 *fd_f1 = create_map(sizeof(long long), 1);
4775 do {
4776 prog[*fixup_map1].imm = *fd_f1;
4777 fixup_map1++;
4778 } while (*fixup_map1);
4779 }
4780
4781 if (*fixup_map2) {
4782 *fd_f2 = create_map(sizeof(struct test_val), 1);
4783 do {
4784 prog[*fixup_map2].imm = *fd_f2;
4785 fixup_map2++;
4786 } while (*fixup_map2);
4787 }
4788
4789 if (*fixup_prog) {
4790 *fd_f3 = create_prog_array();
4791 do {
4792 prog[*fixup_prog].imm = *fd_f3;
4793 fixup_prog++;
4794 } while (*fixup_prog);
4795 }
4796 }
4797
do_test_single(struct bpf_test * test,bool unpriv,int * passes,int * errors)4798 static void do_test_single(struct bpf_test *test, bool unpriv,
4799 int *passes, int *errors)
4800 {
4801 int fd_prog, expected_ret, reject_from_alignment;
4802 struct bpf_insn *prog = test->insns;
4803 int prog_len = probe_filter_length(prog);
4804 int prog_type = test->prog_type;
4805 int fd_f1 = -1, fd_f2 = -1, fd_f3 = -1;
4806 const char *expected_err;
4807
4808 do_test_fixup(test, prog, &fd_f1, &fd_f2, &fd_f3);
4809
4810 fd_prog = bpf_load_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
4811 prog, prog_len, "GPL", 0, bpf_vlog,
4812 sizeof(bpf_vlog));
4813
4814 expected_ret = unpriv && test->result_unpriv != UNDEF ?
4815 test->result_unpriv : test->result;
4816 expected_err = unpriv && test->errstr_unpriv ?
4817 test->errstr_unpriv : test->errstr;
4818
4819 reject_from_alignment = fd_prog < 0 &&
4820 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
4821 strstr(bpf_vlog, "Unknown alignment.");
4822 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
4823 if (reject_from_alignment) {
4824 printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
4825 strerror(errno));
4826 goto fail_log;
4827 }
4828 #endif
4829 if (expected_ret == ACCEPT) {
4830 if (fd_prog < 0 && !reject_from_alignment) {
4831 printf("FAIL\nFailed to load prog '%s'!\n",
4832 strerror(errno));
4833 goto fail_log;
4834 }
4835 } else {
4836 if (fd_prog >= 0) {
4837 printf("FAIL\nUnexpected success to load!\n");
4838 goto fail_log;
4839 }
4840 if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
4841 printf("FAIL\nUnexpected error message!\n");
4842 goto fail_log;
4843 }
4844 }
4845
4846 (*passes)++;
4847 printf("OK%s\n", reject_from_alignment ?
4848 " (NOTE: reject due to unknown alignment)" : "");
4849 close_fds:
4850 close(fd_prog);
4851 close(fd_f1);
4852 close(fd_f2);
4853 close(fd_f3);
4854 sched_yield();
4855 return;
4856 fail_log:
4857 (*errors)++;
4858 printf("%s", bpf_vlog);
4859 goto close_fds;
4860 }
4861
is_admin(void)4862 static bool is_admin(void)
4863 {
4864 cap_t caps;
4865 cap_flag_value_t sysadmin = CAP_CLEAR;
4866 const cap_value_t cap_val = CAP_SYS_ADMIN;
4867
4868 #ifdef CAP_IS_SUPPORTED
4869 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
4870 perror("cap_get_flag");
4871 return false;
4872 }
4873 #endif
4874 caps = cap_get_proc();
4875 if (!caps) {
4876 perror("cap_get_proc");
4877 return false;
4878 }
4879 if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
4880 perror("cap_get_flag");
4881 if (cap_free(caps))
4882 perror("cap_free");
4883 return (sysadmin == CAP_SET);
4884 }
4885
set_admin(bool admin)4886 static int set_admin(bool admin)
4887 {
4888 cap_t caps;
4889 const cap_value_t cap_val = CAP_SYS_ADMIN;
4890 int ret = -1;
4891
4892 caps = cap_get_proc();
4893 if (!caps) {
4894 perror("cap_get_proc");
4895 return -1;
4896 }
4897 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
4898 admin ? CAP_SET : CAP_CLEAR)) {
4899 perror("cap_set_flag");
4900 goto out;
4901 }
4902 if (cap_set_proc(caps)) {
4903 perror("cap_set_proc");
4904 goto out;
4905 }
4906 ret = 0;
4907 out:
4908 if (cap_free(caps))
4909 perror("cap_free");
4910 return ret;
4911 }
4912
do_test(bool unpriv,unsigned int from,unsigned int to)4913 static int do_test(bool unpriv, unsigned int from, unsigned int to)
4914 {
4915 int i, passes = 0, errors = 0;
4916
4917 for (i = from; i < to; i++) {
4918 struct bpf_test *test = &tests[i];
4919
4920 /* Program types that are not supported by non-root we
4921 * skip right away.
4922 */
4923 if (!test->prog_type) {
4924 if (!unpriv)
4925 set_admin(false);
4926 printf("#%d/u %s ", i, test->descr);
4927 do_test_single(test, true, &passes, &errors);
4928 if (!unpriv)
4929 set_admin(true);
4930 }
4931
4932 if (!unpriv) {
4933 printf("#%d/p %s ", i, test->descr);
4934 do_test_single(test, false, &passes, &errors);
4935 }
4936 }
4937
4938 printf("Summary: %d PASSED, %d FAILED\n", passes, errors);
4939 return errors ? -errors : 0;
4940 }
4941
main(int argc,char ** argv)4942 int main(int argc, char **argv)
4943 {
4944 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
4945 struct rlimit rlim = { 1 << 20, 1 << 20 };
4946 unsigned int from = 0, to = ARRAY_SIZE(tests);
4947 bool unpriv = !is_admin();
4948
4949 if (argc == 3) {
4950 unsigned int l = atoi(argv[argc - 2]);
4951 unsigned int u = atoi(argv[argc - 1]);
4952
4953 if (l < to && u < to) {
4954 from = l;
4955 to = u + 1;
4956 }
4957 } else if (argc == 2) {
4958 unsigned int t = atoi(argv[argc - 1]);
4959
4960 if (t < to) {
4961 from = t;
4962 to = t + 1;
4963 }
4964 }
4965
4966 setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf);
4967 return do_test(unpriv, from, to);
4968 }
4969