1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Testsuite for eBPF verifier
4 *
5 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
6 * Copyright (c) 2017 Facebook
7 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
8 */
9
10 #include <endian.h>
11 #include <asm/types.h>
12 #include <linux/types.h>
13 #include <stdint.h>
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <unistd.h>
17 #include <errno.h>
18 #include <string.h>
19 #include <stddef.h>
20 #include <stdbool.h>
21 #include <sched.h>
22 #include <limits.h>
23 #include <assert.h>
24
25 #include <sys/capability.h>
26
27 #include <linux/unistd.h>
28 #include <linux/filter.h>
29 #include <linux/bpf_perf_event.h>
30 #include <linux/bpf.h>
31 #include <linux/if_ether.h>
32 #include <linux/btf.h>
33
34 #include <bpf/bpf.h>
35 #include <bpf/libbpf.h>
36
37 #ifdef HAVE_GENHDR
38 # include "autoconf.h"
39 #else
40 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
41 # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
42 # endif
43 #endif
44 #include "bpf_rlimit.h"
45 #include "bpf_rand.h"
46 #include "bpf_util.h"
47 #include "test_btf.h"
48 #include "../../../include/linux/filter.h"
49
50 #define MAX_INSNS BPF_MAXINSNS
51 #define MAX_TEST_INSNS 1000000
52 #define MAX_FIXUPS 8
53 #define MAX_NR_MAPS 21
54 #define MAX_TEST_RUNS 8
55 #define POINTER_VALUE 0xcafe4all
56 #define TEST_DATA_LEN 64
57
58 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
59 #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
60
61 #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
62 static bool unpriv_disabled = false;
63 static int skips;
64 static bool verbose = false;
65
66 struct bpf_test {
67 const char *descr;
68 struct bpf_insn insns[MAX_INSNS];
69 struct bpf_insn *fill_insns;
70 int fixup_map_hash_8b[MAX_FIXUPS];
71 int fixup_map_hash_48b[MAX_FIXUPS];
72 int fixup_map_hash_16b[MAX_FIXUPS];
73 int fixup_map_array_48b[MAX_FIXUPS];
74 int fixup_map_sockmap[MAX_FIXUPS];
75 int fixup_map_sockhash[MAX_FIXUPS];
76 int fixup_map_xskmap[MAX_FIXUPS];
77 int fixup_map_stacktrace[MAX_FIXUPS];
78 int fixup_prog1[MAX_FIXUPS];
79 int fixup_prog2[MAX_FIXUPS];
80 int fixup_map_in_map[MAX_FIXUPS];
81 int fixup_cgroup_storage[MAX_FIXUPS];
82 int fixup_percpu_cgroup_storage[MAX_FIXUPS];
83 int fixup_map_spin_lock[MAX_FIXUPS];
84 int fixup_map_array_ro[MAX_FIXUPS];
85 int fixup_map_array_wo[MAX_FIXUPS];
86 int fixup_map_array_small[MAX_FIXUPS];
87 int fixup_sk_storage_map[MAX_FIXUPS];
88 int fixup_map_event_output[MAX_FIXUPS];
89 int fixup_map_reuseport_array[MAX_FIXUPS];
90 int fixup_map_ringbuf[MAX_FIXUPS];
91 const char *errstr;
92 const char *errstr_unpriv;
93 uint32_t insn_processed;
94 int prog_len;
95 enum {
96 UNDEF,
97 ACCEPT,
98 REJECT,
99 VERBOSE_ACCEPT,
100 } result, result_unpriv;
101 enum bpf_prog_type prog_type;
102 uint8_t flags;
103 void (*fill_helper)(struct bpf_test *self);
104 uint8_t runs;
105 #define bpf_testdata_struct_t \
106 struct { \
107 uint32_t retval, retval_unpriv; \
108 union { \
109 __u8 data[TEST_DATA_LEN]; \
110 __u64 data64[TEST_DATA_LEN / 8]; \
111 }; \
112 }
113 union {
114 bpf_testdata_struct_t;
115 bpf_testdata_struct_t retvals[MAX_TEST_RUNS];
116 };
117 enum bpf_attach_type expected_attach_type;
118 const char *kfunc;
119 };
120
121 /* Note we want this to be 64 bit aligned so that the end of our array is
122 * actually the end of the structure.
123 */
124 #define MAX_ENTRIES 11
125
126 struct test_val {
127 unsigned int index;
128 int foo[MAX_ENTRIES];
129 };
130
131 struct other_val {
132 long long foo;
133 long long bar;
134 };
135
bpf_fill_ld_abs_vlan_push_pop(struct bpf_test * self)136 static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
137 {
138 /* test: {skb->data[0], vlan_push} x 51 + {skb->data[0], vlan_pop} x 51 */
139 #define PUSH_CNT 51
140 /* jump range is limited to 16 bit. PUSH_CNT of ld_abs needs room */
141 unsigned int len = (1 << 15) - PUSH_CNT * 2 * 5 * 6;
142 struct bpf_insn *insn = self->fill_insns;
143 int i = 0, j, k = 0;
144
145 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
146 loop:
147 for (j = 0; j < PUSH_CNT; j++) {
148 insn[i++] = BPF_LD_ABS(BPF_B, 0);
149 /* jump to error label */
150 insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3);
151 i++;
152 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
153 insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
154 insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
155 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
156 BPF_FUNC_skb_vlan_push),
157 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
158 i++;
159 }
160
161 for (j = 0; j < PUSH_CNT; j++) {
162 insn[i++] = BPF_LD_ABS(BPF_B, 0);
163 insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3);
164 i++;
165 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
166 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
167 BPF_FUNC_skb_vlan_pop),
168 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
169 i++;
170 }
171 if (++k < 5)
172 goto loop;
173
174 for (; i < len - 3; i++)
175 insn[i] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0xbef);
176 insn[len - 3] = BPF_JMP_A(1);
177 /* error label */
178 insn[len - 2] = BPF_MOV32_IMM(BPF_REG_0, 0);
179 insn[len - 1] = BPF_EXIT_INSN();
180 self->prog_len = len;
181 }
182
bpf_fill_jump_around_ld_abs(struct bpf_test * self)183 static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
184 {
185 struct bpf_insn *insn = self->fill_insns;
186 /* jump range is limited to 16 bit. every ld_abs is replaced by 6 insns,
187 * but on arches like arm, ppc etc, there will be one BPF_ZEXT inserted
188 * to extend the error value of the inlined ld_abs sequence which then
189 * contains 7 insns. so, set the dividend to 7 so the testcase could
190 * work on all arches.
191 */
192 unsigned int len = (1 << 15) / 7;
193 int i = 0;
194
195 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
196 insn[i++] = BPF_LD_ABS(BPF_B, 0);
197 insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
198 i++;
199 while (i < len - 1)
200 insn[i++] = BPF_LD_ABS(BPF_B, 1);
201 insn[i] = BPF_EXIT_INSN();
202 self->prog_len = i + 1;
203 }
204
bpf_fill_rand_ld_dw(struct bpf_test * self)205 static void bpf_fill_rand_ld_dw(struct bpf_test *self)
206 {
207 struct bpf_insn *insn = self->fill_insns;
208 uint64_t res = 0;
209 int i = 0;
210
211 insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
212 while (i < self->retval) {
213 uint64_t val = bpf_semi_rand_get();
214 struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
215
216 res ^= val;
217 insn[i++] = tmp[0];
218 insn[i++] = tmp[1];
219 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
220 }
221 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
222 insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
223 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
224 insn[i] = BPF_EXIT_INSN();
225 self->prog_len = i + 1;
226 res ^= (res >> 32);
227 self->retval = (uint32_t)res;
228 }
229
230 #define MAX_JMP_SEQ 8192
231
232 /* test the sequence of 8k jumps */
bpf_fill_scale1(struct bpf_test * self)233 static void bpf_fill_scale1(struct bpf_test *self)
234 {
235 struct bpf_insn *insn = self->fill_insns;
236 int i = 0, k = 0;
237
238 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
239 /* test to check that the long sequence of jumps is acceptable */
240 while (k++ < MAX_JMP_SEQ) {
241 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
242 BPF_FUNC_get_prandom_u32);
243 insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2);
244 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10);
245 insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
246 -8 * (k % 64 + 1));
247 }
248 /* is_state_visited() doesn't allocate state for pruning for every jump.
249 * Hence multiply jmps by 4 to accommodate that heuristic
250 */
251 while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4)
252 insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
253 insn[i] = BPF_EXIT_INSN();
254 self->prog_len = i + 1;
255 self->retval = 42;
256 }
257
258 /* test the sequence of 8k jumps in inner most function (function depth 8)*/
bpf_fill_scale2(struct bpf_test * self)259 static void bpf_fill_scale2(struct bpf_test *self)
260 {
261 struct bpf_insn *insn = self->fill_insns;
262 int i = 0, k = 0;
263
264 #define FUNC_NEST 7
265 for (k = 0; k < FUNC_NEST; k++) {
266 insn[i++] = BPF_CALL_REL(1);
267 insn[i++] = BPF_EXIT_INSN();
268 }
269 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
270 /* test to check that the long sequence of jumps is acceptable */
271 k = 0;
272 while (k++ < MAX_JMP_SEQ) {
273 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
274 BPF_FUNC_get_prandom_u32);
275 insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2);
276 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10);
277 insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
278 -8 * (k % (64 - 4 * FUNC_NEST) + 1));
279 }
280 while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4)
281 insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
282 insn[i] = BPF_EXIT_INSN();
283 self->prog_len = i + 1;
284 self->retval = 42;
285 }
286
bpf_fill_scale(struct bpf_test * self)287 static void bpf_fill_scale(struct bpf_test *self)
288 {
289 switch (self->retval) {
290 case 1:
291 return bpf_fill_scale1(self);
292 case 2:
293 return bpf_fill_scale2(self);
294 default:
295 self->prog_len = 0;
296 break;
297 }
298 }
299
300 /* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
301 #define BPF_SK_LOOKUP(func) \
302 /* struct bpf_sock_tuple tuple = {} */ \
303 BPF_MOV64_IMM(BPF_REG_2, 0), \
304 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), \
305 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16), \
306 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24), \
307 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32), \
308 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40), \
309 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48), \
310 /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \
311 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \
312 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48), \
313 BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)), \
314 BPF_MOV64_IMM(BPF_REG_4, 0), \
315 BPF_MOV64_IMM(BPF_REG_5, 0), \
316 BPF_EMIT_CALL(BPF_FUNC_ ## func)
317
318 /* BPF_DIRECT_PKT_R2 contains 7 instructions, it initializes default return
319 * value into 0 and does necessary preparation for direct packet access
320 * through r2. The allowed access range is 8 bytes.
321 */
322 #define BPF_DIRECT_PKT_R2 \
323 BPF_MOV64_IMM(BPF_REG_0, 0), \
324 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
325 offsetof(struct __sk_buff, data)), \
326 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
327 offsetof(struct __sk_buff, data_end)), \
328 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), \
329 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), \
330 BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1), \
331 BPF_EXIT_INSN()
332
333 /* BPF_RAND_UEXT_R7 contains 4 instructions, it initializes R7 into a random
334 * positive u32, and zero-extend it into 64-bit.
335 */
336 #define BPF_RAND_UEXT_R7 \
337 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
338 BPF_FUNC_get_prandom_u32), \
339 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
340 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 33), \
341 BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 33)
342
343 /* BPF_RAND_SEXT_R7 contains 5 instructions, it initializes R7 into a random
344 * negative u32, and sign-extend it into 64-bit.
345 */
346 #define BPF_RAND_SEXT_R7 \
347 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
348 BPF_FUNC_get_prandom_u32), \
349 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
350 BPF_ALU64_IMM(BPF_OR, BPF_REG_7, 0x80000000), \
351 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 32), \
352 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_7, 32)
353
354 static struct bpf_test tests[] = {
355 #define FILL_ARRAY
356 #include <verifier/tests.h>
357 #undef FILL_ARRAY
358 };
359
probe_filter_length(const struct bpf_insn * fp)360 static int probe_filter_length(const struct bpf_insn *fp)
361 {
362 int len;
363
364 for (len = MAX_INSNS - 1; len > 0; --len)
365 if (fp[len].code != 0 || fp[len].imm != 0)
366 break;
367 return len + 1;
368 }
369
skip_unsupported_map(enum bpf_map_type map_type)370 static bool skip_unsupported_map(enum bpf_map_type map_type)
371 {
372 if (!bpf_probe_map_type(map_type, 0)) {
373 printf("SKIP (unsupported map type %d)\n", map_type);
374 skips++;
375 return true;
376 }
377 return false;
378 }
379
__create_map(uint32_t type,uint32_t size_key,uint32_t size_value,uint32_t max_elem,uint32_t extra_flags)380 static int __create_map(uint32_t type, uint32_t size_key,
381 uint32_t size_value, uint32_t max_elem,
382 uint32_t extra_flags)
383 {
384 int fd;
385
386 fd = bpf_create_map(type, size_key, size_value, max_elem,
387 (type == BPF_MAP_TYPE_HASH ?
388 BPF_F_NO_PREALLOC : 0) | extra_flags);
389 if (fd < 0) {
390 if (skip_unsupported_map(type))
391 return -1;
392 printf("Failed to create hash map '%s'!\n", strerror(errno));
393 }
394
395 return fd;
396 }
397
create_map(uint32_t type,uint32_t size_key,uint32_t size_value,uint32_t max_elem)398 static int create_map(uint32_t type, uint32_t size_key,
399 uint32_t size_value, uint32_t max_elem)
400 {
401 return __create_map(type, size_key, size_value, max_elem, 0);
402 }
403
update_map(int fd,int index)404 static void update_map(int fd, int index)
405 {
406 struct test_val value = {
407 .index = (6 + 1) * sizeof(int),
408 .foo[6] = 0xabcdef12,
409 };
410
411 assert(!bpf_map_update_elem(fd, &index, &value, 0));
412 }
413
create_prog_dummy_simple(enum bpf_prog_type prog_type,int ret)414 static int create_prog_dummy_simple(enum bpf_prog_type prog_type, int ret)
415 {
416 struct bpf_insn prog[] = {
417 BPF_MOV64_IMM(BPF_REG_0, ret),
418 BPF_EXIT_INSN(),
419 };
420
421 return bpf_load_program(prog_type, prog,
422 ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
423 }
424
create_prog_dummy_loop(enum bpf_prog_type prog_type,int mfd,int idx,int ret)425 static int create_prog_dummy_loop(enum bpf_prog_type prog_type, int mfd,
426 int idx, int ret)
427 {
428 struct bpf_insn prog[] = {
429 BPF_MOV64_IMM(BPF_REG_3, idx),
430 BPF_LD_MAP_FD(BPF_REG_2, mfd),
431 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
432 BPF_FUNC_tail_call),
433 BPF_MOV64_IMM(BPF_REG_0, ret),
434 BPF_EXIT_INSN(),
435 };
436
437 return bpf_load_program(prog_type, prog,
438 ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
439 }
440
create_prog_array(enum bpf_prog_type prog_type,uint32_t max_elem,int p1key,int p2key,int p3key)441 static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
442 int p1key, int p2key, int p3key)
443 {
444 int mfd, p1fd, p2fd, p3fd;
445
446 mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
447 sizeof(int), max_elem, 0);
448 if (mfd < 0) {
449 if (skip_unsupported_map(BPF_MAP_TYPE_PROG_ARRAY))
450 return -1;
451 printf("Failed to create prog array '%s'!\n", strerror(errno));
452 return -1;
453 }
454
455 p1fd = create_prog_dummy_simple(prog_type, 42);
456 p2fd = create_prog_dummy_loop(prog_type, mfd, p2key, 41);
457 p3fd = create_prog_dummy_simple(prog_type, 24);
458 if (p1fd < 0 || p2fd < 0 || p3fd < 0)
459 goto err;
460 if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
461 goto err;
462 if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
463 goto err;
464 if (bpf_map_update_elem(mfd, &p3key, &p3fd, BPF_ANY) < 0) {
465 err:
466 close(mfd);
467 mfd = -1;
468 }
469 close(p3fd);
470 close(p2fd);
471 close(p1fd);
472 return mfd;
473 }
474
create_map_in_map(void)475 static int create_map_in_map(void)
476 {
477 int inner_map_fd, outer_map_fd;
478
479 inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
480 sizeof(int), 1, 0);
481 if (inner_map_fd < 0) {
482 if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY))
483 return -1;
484 printf("Failed to create array '%s'!\n", strerror(errno));
485 return inner_map_fd;
486 }
487
488 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
489 sizeof(int), inner_map_fd, 1, 0);
490 if (outer_map_fd < 0) {
491 if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY_OF_MAPS))
492 return -1;
493 printf("Failed to create array of maps '%s'!\n",
494 strerror(errno));
495 }
496
497 close(inner_map_fd);
498
499 return outer_map_fd;
500 }
501
create_cgroup_storage(bool percpu)502 static int create_cgroup_storage(bool percpu)
503 {
504 enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
505 BPF_MAP_TYPE_CGROUP_STORAGE;
506 int fd;
507
508 fd = bpf_create_map(type, sizeof(struct bpf_cgroup_storage_key),
509 TEST_DATA_LEN, 0, 0);
510 if (fd < 0) {
511 if (skip_unsupported_map(type))
512 return -1;
513 printf("Failed to create cgroup storage '%s'!\n",
514 strerror(errno));
515 }
516
517 return fd;
518 }
519
520 /* struct bpf_spin_lock {
521 * int val;
522 * };
523 * struct val {
524 * int cnt;
525 * struct bpf_spin_lock l;
526 * };
527 */
528 static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l";
529 static __u32 btf_raw_types[] = {
530 /* int */
531 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
532 /* struct bpf_spin_lock */ /* [2] */
533 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
534 BTF_MEMBER_ENC(15, 1, 0), /* int val; */
535 /* struct val */ /* [3] */
536 BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
537 BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
538 BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
539 };
540
load_btf(void)541 static int load_btf(void)
542 {
543 struct btf_header hdr = {
544 .magic = BTF_MAGIC,
545 .version = BTF_VERSION,
546 .hdr_len = sizeof(struct btf_header),
547 .type_len = sizeof(btf_raw_types),
548 .str_off = sizeof(btf_raw_types),
549 .str_len = sizeof(btf_str_sec),
550 };
551 void *ptr, *raw_btf;
552 int btf_fd;
553
554 ptr = raw_btf = malloc(sizeof(hdr) + sizeof(btf_raw_types) +
555 sizeof(btf_str_sec));
556
557 memcpy(ptr, &hdr, sizeof(hdr));
558 ptr += sizeof(hdr);
559 memcpy(ptr, btf_raw_types, hdr.type_len);
560 ptr += hdr.type_len;
561 memcpy(ptr, btf_str_sec, hdr.str_len);
562 ptr += hdr.str_len;
563
564 btf_fd = bpf_load_btf(raw_btf, ptr - raw_btf, 0, 0, 0);
565 free(raw_btf);
566 if (btf_fd < 0)
567 return -1;
568 return btf_fd;
569 }
570
create_map_spin_lock(void)571 static int create_map_spin_lock(void)
572 {
573 struct bpf_create_map_attr attr = {
574 .name = "test_map",
575 .map_type = BPF_MAP_TYPE_ARRAY,
576 .key_size = 4,
577 .value_size = 8,
578 .max_entries = 1,
579 .btf_key_type_id = 1,
580 .btf_value_type_id = 3,
581 };
582 int fd, btf_fd;
583
584 btf_fd = load_btf();
585 if (btf_fd < 0)
586 return -1;
587 attr.btf_fd = btf_fd;
588 fd = bpf_create_map_xattr(&attr);
589 if (fd < 0)
590 printf("Failed to create map with spin_lock\n");
591 return fd;
592 }
593
create_sk_storage_map(void)594 static int create_sk_storage_map(void)
595 {
596 struct bpf_create_map_attr attr = {
597 .name = "test_map",
598 .map_type = BPF_MAP_TYPE_SK_STORAGE,
599 .key_size = 4,
600 .value_size = 8,
601 .max_entries = 0,
602 .map_flags = BPF_F_NO_PREALLOC,
603 .btf_key_type_id = 1,
604 .btf_value_type_id = 3,
605 };
606 int fd, btf_fd;
607
608 btf_fd = load_btf();
609 if (btf_fd < 0)
610 return -1;
611 attr.btf_fd = btf_fd;
612 fd = bpf_create_map_xattr(&attr);
613 close(attr.btf_fd);
614 if (fd < 0)
615 printf("Failed to create sk_storage_map\n");
616 return fd;
617 }
618
619 static char bpf_vlog[UINT_MAX >> 8];
620
do_test_fixup(struct bpf_test * test,enum bpf_prog_type prog_type,struct bpf_insn * prog,int * map_fds)621 static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
622 struct bpf_insn *prog, int *map_fds)
623 {
624 int *fixup_map_hash_8b = test->fixup_map_hash_8b;
625 int *fixup_map_hash_48b = test->fixup_map_hash_48b;
626 int *fixup_map_hash_16b = test->fixup_map_hash_16b;
627 int *fixup_map_array_48b = test->fixup_map_array_48b;
628 int *fixup_map_sockmap = test->fixup_map_sockmap;
629 int *fixup_map_sockhash = test->fixup_map_sockhash;
630 int *fixup_map_xskmap = test->fixup_map_xskmap;
631 int *fixup_map_stacktrace = test->fixup_map_stacktrace;
632 int *fixup_prog1 = test->fixup_prog1;
633 int *fixup_prog2 = test->fixup_prog2;
634 int *fixup_map_in_map = test->fixup_map_in_map;
635 int *fixup_cgroup_storage = test->fixup_cgroup_storage;
636 int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
637 int *fixup_map_spin_lock = test->fixup_map_spin_lock;
638 int *fixup_map_array_ro = test->fixup_map_array_ro;
639 int *fixup_map_array_wo = test->fixup_map_array_wo;
640 int *fixup_map_array_small = test->fixup_map_array_small;
641 int *fixup_sk_storage_map = test->fixup_sk_storage_map;
642 int *fixup_map_event_output = test->fixup_map_event_output;
643 int *fixup_map_reuseport_array = test->fixup_map_reuseport_array;
644 int *fixup_map_ringbuf = test->fixup_map_ringbuf;
645
646 if (test->fill_helper) {
647 test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
648 test->fill_helper(test);
649 }
650
651 /* Allocating HTs with 1 elem is fine here, since we only test
652 * for verifier and not do a runtime lookup, so the only thing
653 * that really matters is value size in this case.
654 */
655 if (*fixup_map_hash_8b) {
656 map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
657 sizeof(long long), 1);
658 do {
659 prog[*fixup_map_hash_8b].imm = map_fds[0];
660 fixup_map_hash_8b++;
661 } while (*fixup_map_hash_8b);
662 }
663
664 if (*fixup_map_hash_48b) {
665 map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
666 sizeof(struct test_val), 1);
667 do {
668 prog[*fixup_map_hash_48b].imm = map_fds[1];
669 fixup_map_hash_48b++;
670 } while (*fixup_map_hash_48b);
671 }
672
673 if (*fixup_map_hash_16b) {
674 map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
675 sizeof(struct other_val), 1);
676 do {
677 prog[*fixup_map_hash_16b].imm = map_fds[2];
678 fixup_map_hash_16b++;
679 } while (*fixup_map_hash_16b);
680 }
681
682 if (*fixup_map_array_48b) {
683 map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
684 sizeof(struct test_val), 1);
685 update_map(map_fds[3], 0);
686 do {
687 prog[*fixup_map_array_48b].imm = map_fds[3];
688 fixup_map_array_48b++;
689 } while (*fixup_map_array_48b);
690 }
691
692 if (*fixup_prog1) {
693 map_fds[4] = create_prog_array(prog_type, 4, 0, 1, 2);
694 do {
695 prog[*fixup_prog1].imm = map_fds[4];
696 fixup_prog1++;
697 } while (*fixup_prog1);
698 }
699
700 if (*fixup_prog2) {
701 map_fds[5] = create_prog_array(prog_type, 8, 7, 1, 2);
702 do {
703 prog[*fixup_prog2].imm = map_fds[5];
704 fixup_prog2++;
705 } while (*fixup_prog2);
706 }
707
708 if (*fixup_map_in_map) {
709 map_fds[6] = create_map_in_map();
710 do {
711 prog[*fixup_map_in_map].imm = map_fds[6];
712 fixup_map_in_map++;
713 } while (*fixup_map_in_map);
714 }
715
716 if (*fixup_cgroup_storage) {
717 map_fds[7] = create_cgroup_storage(false);
718 do {
719 prog[*fixup_cgroup_storage].imm = map_fds[7];
720 fixup_cgroup_storage++;
721 } while (*fixup_cgroup_storage);
722 }
723
724 if (*fixup_percpu_cgroup_storage) {
725 map_fds[8] = create_cgroup_storage(true);
726 do {
727 prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
728 fixup_percpu_cgroup_storage++;
729 } while (*fixup_percpu_cgroup_storage);
730 }
731 if (*fixup_map_sockmap) {
732 map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int),
733 sizeof(int), 1);
734 do {
735 prog[*fixup_map_sockmap].imm = map_fds[9];
736 fixup_map_sockmap++;
737 } while (*fixup_map_sockmap);
738 }
739 if (*fixup_map_sockhash) {
740 map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int),
741 sizeof(int), 1);
742 do {
743 prog[*fixup_map_sockhash].imm = map_fds[10];
744 fixup_map_sockhash++;
745 } while (*fixup_map_sockhash);
746 }
747 if (*fixup_map_xskmap) {
748 map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int),
749 sizeof(int), 1);
750 do {
751 prog[*fixup_map_xskmap].imm = map_fds[11];
752 fixup_map_xskmap++;
753 } while (*fixup_map_xskmap);
754 }
755 if (*fixup_map_stacktrace) {
756 map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32),
757 sizeof(u64), 1);
758 do {
759 prog[*fixup_map_stacktrace].imm = map_fds[12];
760 fixup_map_stacktrace++;
761 } while (*fixup_map_stacktrace);
762 }
763 if (*fixup_map_spin_lock) {
764 map_fds[13] = create_map_spin_lock();
765 do {
766 prog[*fixup_map_spin_lock].imm = map_fds[13];
767 fixup_map_spin_lock++;
768 } while (*fixup_map_spin_lock);
769 }
770 if (*fixup_map_array_ro) {
771 map_fds[14] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
772 sizeof(struct test_val), 1,
773 BPF_F_RDONLY_PROG);
774 update_map(map_fds[14], 0);
775 do {
776 prog[*fixup_map_array_ro].imm = map_fds[14];
777 fixup_map_array_ro++;
778 } while (*fixup_map_array_ro);
779 }
780 if (*fixup_map_array_wo) {
781 map_fds[15] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
782 sizeof(struct test_val), 1,
783 BPF_F_WRONLY_PROG);
784 update_map(map_fds[15], 0);
785 do {
786 prog[*fixup_map_array_wo].imm = map_fds[15];
787 fixup_map_array_wo++;
788 } while (*fixup_map_array_wo);
789 }
790 if (*fixup_map_array_small) {
791 map_fds[16] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
792 1, 1, 0);
793 update_map(map_fds[16], 0);
794 do {
795 prog[*fixup_map_array_small].imm = map_fds[16];
796 fixup_map_array_small++;
797 } while (*fixup_map_array_small);
798 }
799 if (*fixup_sk_storage_map) {
800 map_fds[17] = create_sk_storage_map();
801 do {
802 prog[*fixup_sk_storage_map].imm = map_fds[17];
803 fixup_sk_storage_map++;
804 } while (*fixup_sk_storage_map);
805 }
806 if (*fixup_map_event_output) {
807 map_fds[18] = __create_map(BPF_MAP_TYPE_PERF_EVENT_ARRAY,
808 sizeof(int), sizeof(int), 1, 0);
809 do {
810 prog[*fixup_map_event_output].imm = map_fds[18];
811 fixup_map_event_output++;
812 } while (*fixup_map_event_output);
813 }
814 if (*fixup_map_reuseport_array) {
815 map_fds[19] = __create_map(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
816 sizeof(u32), sizeof(u64), 1, 0);
817 do {
818 prog[*fixup_map_reuseport_array].imm = map_fds[19];
819 fixup_map_reuseport_array++;
820 } while (*fixup_map_reuseport_array);
821 }
822 if (*fixup_map_ringbuf) {
823 map_fds[20] = create_map(BPF_MAP_TYPE_RINGBUF, 0,
824 0, 4096);
825 do {
826 prog[*fixup_map_ringbuf].imm = map_fds[20];
827 fixup_map_ringbuf++;
828 } while (*fixup_map_ringbuf);
829 }
830 }
831
832 struct libcap {
833 struct __user_cap_header_struct hdr;
834 struct __user_cap_data_struct data[2];
835 };
836
set_admin(bool admin)837 static int set_admin(bool admin)
838 {
839 cap_t caps;
840 /* need CAP_BPF, CAP_NET_ADMIN, CAP_PERFMON to load progs */
841 const cap_value_t cap_net_admin = CAP_NET_ADMIN;
842 const cap_value_t cap_sys_admin = CAP_SYS_ADMIN;
843 struct libcap *cap;
844 int ret = -1;
845
846 caps = cap_get_proc();
847 if (!caps) {
848 perror("cap_get_proc");
849 return -1;
850 }
851 cap = (struct libcap *)caps;
852 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_sys_admin, CAP_CLEAR)) {
853 perror("cap_set_flag clear admin");
854 goto out;
855 }
856 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_net_admin,
857 admin ? CAP_SET : CAP_CLEAR)) {
858 perror("cap_set_flag set_or_clear net");
859 goto out;
860 }
861 /* libcap is likely old and simply ignores CAP_BPF and CAP_PERFMON,
862 * so update effective bits manually
863 */
864 if (admin) {
865 cap->data[1].effective |= 1 << (38 /* CAP_PERFMON */ - 32);
866 cap->data[1].effective |= 1 << (39 /* CAP_BPF */ - 32);
867 } else {
868 cap->data[1].effective &= ~(1 << (38 - 32));
869 cap->data[1].effective &= ~(1 << (39 - 32));
870 }
871 if (cap_set_proc(caps)) {
872 perror("cap_set_proc");
873 goto out;
874 }
875 ret = 0;
876 out:
877 if (cap_free(caps))
878 perror("cap_free");
879 return ret;
880 }
881
do_prog_test_run(int fd_prog,bool unpriv,uint32_t expected_val,void * data,size_t size_data)882 static int do_prog_test_run(int fd_prog, bool unpriv, uint32_t expected_val,
883 void *data, size_t size_data)
884 {
885 __u8 tmp[TEST_DATA_LEN << 2];
886 __u32 size_tmp = sizeof(tmp);
887 uint32_t retval;
888 int err;
889
890 if (unpriv)
891 set_admin(true);
892 err = bpf_prog_test_run(fd_prog, 1, data, size_data,
893 tmp, &size_tmp, &retval, NULL);
894 if (unpriv)
895 set_admin(false);
896 if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
897 printf("Unexpected bpf_prog_test_run error ");
898 return err;
899 }
900 if (!err && retval != expected_val &&
901 expected_val != POINTER_VALUE) {
902 printf("FAIL retval %d != %d ", retval, expected_val);
903 return 1;
904 }
905
906 return 0;
907 }
908
cmp_str_seq(const char * log,const char * exp)909 static bool cmp_str_seq(const char *log, const char *exp)
910 {
911 char needle[80];
912 const char *p, *q;
913 int len;
914
915 do {
916 p = strchr(exp, '\t');
917 if (!p)
918 p = exp + strlen(exp);
919
920 len = p - exp;
921 if (len >= sizeof(needle) || !len) {
922 printf("FAIL\nTestcase bug\n");
923 return false;
924 }
925 strncpy(needle, exp, len);
926 needle[len] = 0;
927 q = strstr(log, needle);
928 if (!q) {
929 printf("FAIL\nUnexpected verifier log in successful load!\n"
930 "EXP: %s\nRES:\n", needle);
931 return false;
932 }
933 log = q + len;
934 exp = p + 1;
935 } while (*p);
936 return true;
937 }
938
do_test_single(struct bpf_test * test,bool unpriv,int * passes,int * errors)939 static void do_test_single(struct bpf_test *test, bool unpriv,
940 int *passes, int *errors)
941 {
942 int fd_prog, expected_ret, alignment_prevented_execution;
943 int prog_len, prog_type = test->prog_type;
944 struct bpf_insn *prog = test->insns;
945 struct bpf_load_program_attr attr;
946 int run_errs, run_successes;
947 int map_fds[MAX_NR_MAPS];
948 const char *expected_err;
949 int fixup_skips;
950 __u32 pflags;
951 int i, err;
952
953 for (i = 0; i < MAX_NR_MAPS; i++)
954 map_fds[i] = -1;
955
956 if (!prog_type)
957 prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
958 fixup_skips = skips;
959 do_test_fixup(test, prog_type, prog, map_fds);
960 if (test->fill_insns) {
961 prog = test->fill_insns;
962 prog_len = test->prog_len;
963 } else {
964 prog_len = probe_filter_length(prog);
965 }
966 /* If there were some map skips during fixup due to missing bpf
967 * features, skip this test.
968 */
969 if (fixup_skips != skips)
970 return;
971
972 pflags = BPF_F_TEST_RND_HI32;
973 if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
974 pflags |= BPF_F_STRICT_ALIGNMENT;
975 if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
976 pflags |= BPF_F_ANY_ALIGNMENT;
977 if (test->flags & ~3)
978 pflags |= test->flags;
979
980 expected_ret = unpriv && test->result_unpriv != UNDEF ?
981 test->result_unpriv : test->result;
982 expected_err = unpriv && test->errstr_unpriv ?
983 test->errstr_unpriv : test->errstr;
984 memset(&attr, 0, sizeof(attr));
985 attr.prog_type = prog_type;
986 attr.expected_attach_type = test->expected_attach_type;
987 attr.insns = prog;
988 attr.insns_cnt = prog_len;
989 attr.license = "GPL";
990 if (verbose)
991 attr.log_level = 1;
992 else if (expected_ret == VERBOSE_ACCEPT)
993 attr.log_level = 2;
994 else
995 attr.log_level = 4;
996 attr.prog_flags = pflags;
997
998 if (prog_type == BPF_PROG_TYPE_TRACING && test->kfunc) {
999 attr.attach_btf_id = libbpf_find_vmlinux_btf_id(test->kfunc,
1000 attr.expected_attach_type);
1001 if (attr.attach_btf_id < 0) {
1002 printf("FAIL\nFailed to find BTF ID for '%s'!\n",
1003 test->kfunc);
1004 (*errors)++;
1005 return;
1006 }
1007 }
1008
1009 fd_prog = bpf_load_program_xattr(&attr, bpf_vlog, sizeof(bpf_vlog));
1010
1011 /* BPF_PROG_TYPE_TRACING requires more setup and
1012 * bpf_probe_prog_type won't give correct answer
1013 */
1014 if (fd_prog < 0 && prog_type != BPF_PROG_TYPE_TRACING &&
1015 !bpf_probe_prog_type(prog_type, 0)) {
1016 printf("SKIP (unsupported program type %d)\n", prog_type);
1017 skips++;
1018 goto close_fds;
1019 }
1020
1021 alignment_prevented_execution = 0;
1022
1023 if (expected_ret == ACCEPT || expected_ret == VERBOSE_ACCEPT) {
1024 if (fd_prog < 0) {
1025 printf("FAIL\nFailed to load prog '%s'!\n",
1026 strerror(errno));
1027 goto fail_log;
1028 }
1029 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1030 if (fd_prog >= 0 &&
1031 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS))
1032 alignment_prevented_execution = 1;
1033 #endif
1034 if (expected_ret == VERBOSE_ACCEPT && !cmp_str_seq(bpf_vlog, expected_err)) {
1035 goto fail_log;
1036 }
1037 } else {
1038 if (fd_prog >= 0) {
1039 printf("FAIL\nUnexpected success to load!\n");
1040 goto fail_log;
1041 }
1042 if (!expected_err || !strstr(bpf_vlog, expected_err)) {
1043 printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
1044 expected_err, bpf_vlog);
1045 goto fail_log;
1046 }
1047 }
1048
1049 if (!unpriv && test->insn_processed) {
1050 uint32_t insn_processed;
1051 char *proc;
1052
1053 proc = strstr(bpf_vlog, "processed ");
1054 insn_processed = atoi(proc + 10);
1055 if (test->insn_processed != insn_processed) {
1056 printf("FAIL\nUnexpected insn_processed %u vs %u\n",
1057 insn_processed, test->insn_processed);
1058 goto fail_log;
1059 }
1060 }
1061
1062 if (verbose)
1063 printf(", verifier log:\n%s", bpf_vlog);
1064
1065 run_errs = 0;
1066 run_successes = 0;
1067 if (!alignment_prevented_execution && fd_prog >= 0) {
1068 uint32_t expected_val;
1069 int i;
1070
1071 if (!test->runs)
1072 test->runs = 1;
1073
1074 for (i = 0; i < test->runs; i++) {
1075 if (unpriv && test->retvals[i].retval_unpriv)
1076 expected_val = test->retvals[i].retval_unpriv;
1077 else
1078 expected_val = test->retvals[i].retval;
1079
1080 err = do_prog_test_run(fd_prog, unpriv, expected_val,
1081 test->retvals[i].data,
1082 sizeof(test->retvals[i].data));
1083 if (err) {
1084 printf("(run %d/%d) ", i + 1, test->runs);
1085 run_errs++;
1086 } else {
1087 run_successes++;
1088 }
1089 }
1090 }
1091
1092 if (!run_errs) {
1093 (*passes)++;
1094 if (run_successes > 1)
1095 printf("%d cases ", run_successes);
1096 printf("OK");
1097 if (alignment_prevented_execution)
1098 printf(" (NOTE: not executed due to unknown alignment)");
1099 printf("\n");
1100 } else {
1101 printf("\n");
1102 goto fail_log;
1103 }
1104 close_fds:
1105 if (test->fill_insns)
1106 free(test->fill_insns);
1107 close(fd_prog);
1108 for (i = 0; i < MAX_NR_MAPS; i++)
1109 close(map_fds[i]);
1110 sched_yield();
1111 return;
1112 fail_log:
1113 (*errors)++;
1114 printf("%s", bpf_vlog);
1115 goto close_fds;
1116 }
1117
is_admin(void)1118 static bool is_admin(void)
1119 {
1120 cap_flag_value_t net_priv = CAP_CLEAR;
1121 bool perfmon_priv = false;
1122 bool bpf_priv = false;
1123 struct libcap *cap;
1124 cap_t caps;
1125
1126 #ifdef CAP_IS_SUPPORTED
1127 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
1128 perror("cap_get_flag");
1129 return false;
1130 }
1131 #endif
1132 caps = cap_get_proc();
1133 if (!caps) {
1134 perror("cap_get_proc");
1135 return false;
1136 }
1137 cap = (struct libcap *)caps;
1138 bpf_priv = cap->data[1].effective & (1 << (39/* CAP_BPF */ - 32));
1139 perfmon_priv = cap->data[1].effective & (1 << (38/* CAP_PERFMON */ - 32));
1140 if (cap_get_flag(caps, CAP_NET_ADMIN, CAP_EFFECTIVE, &net_priv))
1141 perror("cap_get_flag NET");
1142 if (cap_free(caps))
1143 perror("cap_free");
1144 return bpf_priv && perfmon_priv && net_priv == CAP_SET;
1145 }
1146
get_unpriv_disabled()1147 static void get_unpriv_disabled()
1148 {
1149 char buf[2];
1150 FILE *fd;
1151
1152 fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
1153 if (!fd) {
1154 perror("fopen /proc/sys/"UNPRIV_SYSCTL);
1155 unpriv_disabled = true;
1156 return;
1157 }
1158 if (fgets(buf, 2, fd) == buf && atoi(buf))
1159 unpriv_disabled = true;
1160 fclose(fd);
1161 }
1162
test_as_unpriv(struct bpf_test * test)1163 static bool test_as_unpriv(struct bpf_test *test)
1164 {
1165 return !test->prog_type ||
1166 test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
1167 test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
1168 }
1169
do_test(bool unpriv,unsigned int from,unsigned int to)1170 static int do_test(bool unpriv, unsigned int from, unsigned int to)
1171 {
1172 int i, passes = 0, errors = 0;
1173
1174 for (i = from; i < to; i++) {
1175 struct bpf_test *test = &tests[i];
1176
1177 /* Program types that are not supported by non-root we
1178 * skip right away.
1179 */
1180 if (test_as_unpriv(test) && unpriv_disabled) {
1181 printf("#%d/u %s SKIP\n", i, test->descr);
1182 skips++;
1183 } else if (test_as_unpriv(test)) {
1184 if (!unpriv)
1185 set_admin(false);
1186 printf("#%d/u %s ", i, test->descr);
1187 do_test_single(test, true, &passes, &errors);
1188 if (!unpriv)
1189 set_admin(true);
1190 }
1191
1192 if (unpriv) {
1193 printf("#%d/p %s SKIP\n", i, test->descr);
1194 skips++;
1195 } else {
1196 printf("#%d/p %s ", i, test->descr);
1197 do_test_single(test, false, &passes, &errors);
1198 }
1199 }
1200
1201 printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
1202 skips, errors);
1203 return errors ? EXIT_FAILURE : EXIT_SUCCESS;
1204 }
1205
main(int argc,char ** argv)1206 int main(int argc, char **argv)
1207 {
1208 unsigned int from = 0, to = ARRAY_SIZE(tests);
1209 bool unpriv = !is_admin();
1210 int arg = 1;
1211
1212 if (argc > 1 && strcmp(argv[1], "-v") == 0) {
1213 arg++;
1214 verbose = true;
1215 argc--;
1216 }
1217
1218 if (argc == 3) {
1219 unsigned int l = atoi(argv[arg]);
1220 unsigned int u = atoi(argv[arg + 1]);
1221
1222 if (l < to && u < to) {
1223 from = l;
1224 to = u + 1;
1225 }
1226 } else if (argc == 2) {
1227 unsigned int t = atoi(argv[arg]);
1228
1229 if (t < to) {
1230 from = t;
1231 to = t + 1;
1232 }
1233 }
1234
1235 get_unpriv_disabled();
1236 if (unpriv && unpriv_disabled) {
1237 printf("Cannot run as unprivileged user with sysctl %s.\n",
1238 UNPRIV_SYSCTL);
1239 return EXIT_FAILURE;
1240 }
1241
1242 bpf_semi_rand_init();
1243 return do_test(unpriv, from, to);
1244 }
1245