1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Testsuite for eBPF verifier
4 *
5 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
6 * Copyright (c) 2017 Facebook
7 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
8 */
9
10 #include <endian.h>
11 #include <asm/types.h>
12 #include <linux/types.h>
13 #include <stdint.h>
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <unistd.h>
17 #include <errno.h>
18 #include <string.h>
19 #include <stddef.h>
20 #include <stdbool.h>
21 #include <sched.h>
22 #include <limits.h>
23 #include <assert.h>
24
25 #include <sys/capability.h>
26
27 #include <linux/unistd.h>
28 #include <linux/filter.h>
29 #include <linux/bpf_perf_event.h>
30 #include <linux/bpf.h>
31 #include <linux/if_ether.h>
32 #include <linux/btf.h>
33
34 #include <bpf/bpf.h>
35 #include <bpf/libbpf.h>
36
37 #ifdef HAVE_GENHDR
38 # include "autoconf.h"
39 #else
40 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
41 # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
42 # endif
43 #endif
44 #include "bpf_rand.h"
45 #include "bpf_util.h"
46 #include "test_btf.h"
47 #include "../../../include/linux/filter.h"
48
49 #ifndef ENOTSUPP
50 #define ENOTSUPP 524
51 #endif
52
53 #define MAX_INSNS BPF_MAXINSNS
54 #define MAX_TEST_INSNS 1000000
55 #define MAX_FIXUPS 8
56 #define MAX_NR_MAPS 22
57 #define MAX_TEST_RUNS 8
58 #define POINTER_VALUE 0xcafe4all
59 #define TEST_DATA_LEN 64
60
61 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
62 #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
63
64 #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
65 static bool unpriv_disabled = false;
66 static int skips;
67 static bool verbose = false;
68
69 struct bpf_test {
70 const char *descr;
71 struct bpf_insn insns[MAX_INSNS];
72 struct bpf_insn *fill_insns;
73 int fixup_map_hash_8b[MAX_FIXUPS];
74 int fixup_map_hash_48b[MAX_FIXUPS];
75 int fixup_map_hash_16b[MAX_FIXUPS];
76 int fixup_map_array_48b[MAX_FIXUPS];
77 int fixup_map_sockmap[MAX_FIXUPS];
78 int fixup_map_sockhash[MAX_FIXUPS];
79 int fixup_map_xskmap[MAX_FIXUPS];
80 int fixup_map_stacktrace[MAX_FIXUPS];
81 int fixup_prog1[MAX_FIXUPS];
82 int fixup_prog2[MAX_FIXUPS];
83 int fixup_map_in_map[MAX_FIXUPS];
84 int fixup_cgroup_storage[MAX_FIXUPS];
85 int fixup_percpu_cgroup_storage[MAX_FIXUPS];
86 int fixup_map_spin_lock[MAX_FIXUPS];
87 int fixup_map_array_ro[MAX_FIXUPS];
88 int fixup_map_array_wo[MAX_FIXUPS];
89 int fixup_map_array_small[MAX_FIXUPS];
90 int fixup_sk_storage_map[MAX_FIXUPS];
91 int fixup_map_event_output[MAX_FIXUPS];
92 int fixup_map_reuseport_array[MAX_FIXUPS];
93 int fixup_map_ringbuf[MAX_FIXUPS];
94 int fixup_map_timer[MAX_FIXUPS];
95 /* Expected verifier log output for result REJECT or VERBOSE_ACCEPT.
96 * Can be a tab-separated sequence of expected strings. An empty string
97 * means no log verification.
98 */
99 const char *errstr;
100 const char *errstr_unpriv;
101 uint32_t insn_processed;
102 int prog_len;
103 enum {
104 UNDEF,
105 ACCEPT,
106 REJECT,
107 VERBOSE_ACCEPT,
108 } result, result_unpriv;
109 enum bpf_prog_type prog_type;
110 uint8_t flags;
111 void (*fill_helper)(struct bpf_test *self);
112 int runs;
113 #define bpf_testdata_struct_t \
114 struct { \
115 uint32_t retval, retval_unpriv; \
116 union { \
117 __u8 data[TEST_DATA_LEN]; \
118 __u64 data64[TEST_DATA_LEN / 8]; \
119 }; \
120 }
121 union {
122 bpf_testdata_struct_t;
123 bpf_testdata_struct_t retvals[MAX_TEST_RUNS];
124 };
125 enum bpf_attach_type expected_attach_type;
126 const char *kfunc;
127 };
128
129 /* Note we want this to be 64 bit aligned so that the end of our array is
130 * actually the end of the structure.
131 */
132 #define MAX_ENTRIES 11
133
134 struct test_val {
135 unsigned int index;
136 int foo[MAX_ENTRIES];
137 };
138
139 struct other_val {
140 long long foo;
141 long long bar;
142 };
143
bpf_fill_ld_abs_vlan_push_pop(struct bpf_test * self)144 static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
145 {
146 /* test: {skb->data[0], vlan_push} x 51 + {skb->data[0], vlan_pop} x 51 */
147 #define PUSH_CNT 51
148 /* jump range is limited to 16 bit. PUSH_CNT of ld_abs needs room */
149 unsigned int len = (1 << 15) - PUSH_CNT * 2 * 5 * 6;
150 struct bpf_insn *insn = self->fill_insns;
151 int i = 0, j, k = 0;
152
153 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
154 loop:
155 for (j = 0; j < PUSH_CNT; j++) {
156 insn[i++] = BPF_LD_ABS(BPF_B, 0);
157 /* jump to error label */
158 insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3);
159 i++;
160 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
161 insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
162 insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
163 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
164 BPF_FUNC_skb_vlan_push),
165 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
166 i++;
167 }
168
169 for (j = 0; j < PUSH_CNT; j++) {
170 insn[i++] = BPF_LD_ABS(BPF_B, 0);
171 insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3);
172 i++;
173 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
174 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
175 BPF_FUNC_skb_vlan_pop),
176 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
177 i++;
178 }
179 if (++k < 5)
180 goto loop;
181
182 for (; i < len - 3; i++)
183 insn[i] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0xbef);
184 insn[len - 3] = BPF_JMP_A(1);
185 /* error label */
186 insn[len - 2] = BPF_MOV32_IMM(BPF_REG_0, 0);
187 insn[len - 1] = BPF_EXIT_INSN();
188 self->prog_len = len;
189 }
190
bpf_fill_jump_around_ld_abs(struct bpf_test * self)191 static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
192 {
193 struct bpf_insn *insn = self->fill_insns;
194 /* jump range is limited to 16 bit. every ld_abs is replaced by 6 insns,
195 * but on arches like arm, ppc etc, there will be one BPF_ZEXT inserted
196 * to extend the error value of the inlined ld_abs sequence which then
197 * contains 7 insns. so, set the dividend to 7 so the testcase could
198 * work on all arches.
199 */
200 unsigned int len = (1 << 15) / 7;
201 int i = 0;
202
203 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
204 insn[i++] = BPF_LD_ABS(BPF_B, 0);
205 insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
206 i++;
207 while (i < len - 1)
208 insn[i++] = BPF_LD_ABS(BPF_B, 1);
209 insn[i] = BPF_EXIT_INSN();
210 self->prog_len = i + 1;
211 }
212
bpf_fill_rand_ld_dw(struct bpf_test * self)213 static void bpf_fill_rand_ld_dw(struct bpf_test *self)
214 {
215 struct bpf_insn *insn = self->fill_insns;
216 uint64_t res = 0;
217 int i = 0;
218
219 insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
220 while (i < self->retval) {
221 uint64_t val = bpf_semi_rand_get();
222 struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
223
224 res ^= val;
225 insn[i++] = tmp[0];
226 insn[i++] = tmp[1];
227 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
228 }
229 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
230 insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
231 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
232 insn[i] = BPF_EXIT_INSN();
233 self->prog_len = i + 1;
234 res ^= (res >> 32);
235 self->retval = (uint32_t)res;
236 }
237
238 #define MAX_JMP_SEQ 8192
239
240 /* test the sequence of 8k jumps */
bpf_fill_scale1(struct bpf_test * self)241 static void bpf_fill_scale1(struct bpf_test *self)
242 {
243 struct bpf_insn *insn = self->fill_insns;
244 int i = 0, k = 0;
245
246 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
247 /* test to check that the long sequence of jumps is acceptable */
248 while (k++ < MAX_JMP_SEQ) {
249 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
250 BPF_FUNC_get_prandom_u32);
251 insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2);
252 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10);
253 insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
254 -8 * (k % 64 + 1));
255 }
256 /* is_state_visited() doesn't allocate state for pruning for every jump.
257 * Hence multiply jmps by 4 to accommodate that heuristic
258 */
259 while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4)
260 insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
261 insn[i] = BPF_EXIT_INSN();
262 self->prog_len = i + 1;
263 self->retval = 42;
264 }
265
266 /* test the sequence of 8k jumps in inner most function (function depth 8)*/
bpf_fill_scale2(struct bpf_test * self)267 static void bpf_fill_scale2(struct bpf_test *self)
268 {
269 struct bpf_insn *insn = self->fill_insns;
270 int i = 0, k = 0;
271
272 #define FUNC_NEST 7
273 for (k = 0; k < FUNC_NEST; k++) {
274 insn[i++] = BPF_CALL_REL(1);
275 insn[i++] = BPF_EXIT_INSN();
276 }
277 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
278 /* test to check that the long sequence of jumps is acceptable */
279 k = 0;
280 while (k++ < MAX_JMP_SEQ) {
281 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
282 BPF_FUNC_get_prandom_u32);
283 insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2);
284 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10);
285 insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
286 -8 * (k % (64 - 4 * FUNC_NEST) + 1));
287 }
288 while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4)
289 insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
290 insn[i] = BPF_EXIT_INSN();
291 self->prog_len = i + 1;
292 self->retval = 42;
293 }
294
bpf_fill_scale(struct bpf_test * self)295 static void bpf_fill_scale(struct bpf_test *self)
296 {
297 switch (self->retval) {
298 case 1:
299 return bpf_fill_scale1(self);
300 case 2:
301 return bpf_fill_scale2(self);
302 default:
303 self->prog_len = 0;
304 break;
305 }
306 }
307
bpf_fill_torturous_jumps_insn_1(struct bpf_insn * insn)308 static int bpf_fill_torturous_jumps_insn_1(struct bpf_insn *insn)
309 {
310 unsigned int len = 259, hlen = 128;
311 int i;
312
313 insn[0] = BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32);
314 for (i = 1; i <= hlen; i++) {
315 insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, i, hlen);
316 insn[i + hlen] = BPF_JMP_A(hlen - i);
317 }
318 insn[len - 2] = BPF_MOV64_IMM(BPF_REG_0, 1);
319 insn[len - 1] = BPF_EXIT_INSN();
320
321 return len;
322 }
323
bpf_fill_torturous_jumps_insn_2(struct bpf_insn * insn)324 static int bpf_fill_torturous_jumps_insn_2(struct bpf_insn *insn)
325 {
326 unsigned int len = 4100, jmp_off = 2048;
327 int i, j;
328
329 insn[0] = BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32);
330 for (i = 1; i <= jmp_off; i++) {
331 insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, i, jmp_off);
332 }
333 insn[i++] = BPF_JMP_A(jmp_off);
334 for (; i <= jmp_off * 2 + 1; i+=16) {
335 for (j = 0; j < 16; j++) {
336 insn[i + j] = BPF_JMP_A(16 - j - 1);
337 }
338 }
339
340 insn[len - 2] = BPF_MOV64_IMM(BPF_REG_0, 2);
341 insn[len - 1] = BPF_EXIT_INSN();
342
343 return len;
344 }
345
bpf_fill_torturous_jumps(struct bpf_test * self)346 static void bpf_fill_torturous_jumps(struct bpf_test *self)
347 {
348 struct bpf_insn *insn = self->fill_insns;
349 int i = 0;
350
351 switch (self->retval) {
352 case 1:
353 self->prog_len = bpf_fill_torturous_jumps_insn_1(insn);
354 return;
355 case 2:
356 self->prog_len = bpf_fill_torturous_jumps_insn_2(insn);
357 return;
358 case 3:
359 /* main */
360 insn[i++] = BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4);
361 insn[i++] = BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 262);
362 insn[i++] = BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0);
363 insn[i++] = BPF_MOV64_IMM(BPF_REG_0, 3);
364 insn[i++] = BPF_EXIT_INSN();
365
366 /* subprog 1 */
367 i += bpf_fill_torturous_jumps_insn_1(insn + i);
368
369 /* subprog 2 */
370 i += bpf_fill_torturous_jumps_insn_2(insn + i);
371
372 self->prog_len = i;
373 return;
374 default:
375 self->prog_len = 0;
376 break;
377 }
378 }
379
380 /* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
381 #define BPF_SK_LOOKUP(func) \
382 /* struct bpf_sock_tuple tuple = {} */ \
383 BPF_MOV64_IMM(BPF_REG_2, 0), \
384 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), \
385 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16), \
386 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24), \
387 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32), \
388 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40), \
389 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48), \
390 /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \
391 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \
392 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48), \
393 BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)), \
394 BPF_MOV64_IMM(BPF_REG_4, 0), \
395 BPF_MOV64_IMM(BPF_REG_5, 0), \
396 BPF_EMIT_CALL(BPF_FUNC_ ## func)
397
398 /* BPF_DIRECT_PKT_R2 contains 7 instructions, it initializes default return
399 * value into 0 and does necessary preparation for direct packet access
400 * through r2. The allowed access range is 8 bytes.
401 */
402 #define BPF_DIRECT_PKT_R2 \
403 BPF_MOV64_IMM(BPF_REG_0, 0), \
404 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
405 offsetof(struct __sk_buff, data)), \
406 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
407 offsetof(struct __sk_buff, data_end)), \
408 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), \
409 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), \
410 BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1), \
411 BPF_EXIT_INSN()
412
413 /* BPF_RAND_UEXT_R7 contains 4 instructions, it initializes R7 into a random
414 * positive u32, and zero-extend it into 64-bit.
415 */
416 #define BPF_RAND_UEXT_R7 \
417 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
418 BPF_FUNC_get_prandom_u32), \
419 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
420 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 33), \
421 BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 33)
422
423 /* BPF_RAND_SEXT_R7 contains 5 instructions, it initializes R7 into a random
424 * negative u32, and sign-extend it into 64-bit.
425 */
426 #define BPF_RAND_SEXT_R7 \
427 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
428 BPF_FUNC_get_prandom_u32), \
429 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
430 BPF_ALU64_IMM(BPF_OR, BPF_REG_7, 0x80000000), \
431 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 32), \
432 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_7, 32)
433
434 static struct bpf_test tests[] = {
435 #define FILL_ARRAY
436 #include <verifier/tests.h>
437 #undef FILL_ARRAY
438 };
439
probe_filter_length(const struct bpf_insn * fp)440 static int probe_filter_length(const struct bpf_insn *fp)
441 {
442 int len;
443
444 for (len = MAX_INSNS - 1; len > 0; --len)
445 if (fp[len].code != 0 || fp[len].imm != 0)
446 break;
447 return len + 1;
448 }
449
skip_unsupported_map(enum bpf_map_type map_type)450 static bool skip_unsupported_map(enum bpf_map_type map_type)
451 {
452 if (!bpf_probe_map_type(map_type, 0)) {
453 printf("SKIP (unsupported map type %d)\n", map_type);
454 skips++;
455 return true;
456 }
457 return false;
458 }
459
__create_map(uint32_t type,uint32_t size_key,uint32_t size_value,uint32_t max_elem,uint32_t extra_flags)460 static int __create_map(uint32_t type, uint32_t size_key,
461 uint32_t size_value, uint32_t max_elem,
462 uint32_t extra_flags)
463 {
464 LIBBPF_OPTS(bpf_map_create_opts, opts);
465 int fd;
466
467 opts.map_flags = (type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0) | extra_flags;
468 fd = bpf_map_create(type, NULL, size_key, size_value, max_elem, &opts);
469 if (fd < 0) {
470 if (skip_unsupported_map(type))
471 return -1;
472 printf("Failed to create hash map '%s'!\n", strerror(errno));
473 }
474
475 return fd;
476 }
477
create_map(uint32_t type,uint32_t size_key,uint32_t size_value,uint32_t max_elem)478 static int create_map(uint32_t type, uint32_t size_key,
479 uint32_t size_value, uint32_t max_elem)
480 {
481 return __create_map(type, size_key, size_value, max_elem, 0);
482 }
483
update_map(int fd,int index)484 static void update_map(int fd, int index)
485 {
486 struct test_val value = {
487 .index = (6 + 1) * sizeof(int),
488 .foo[6] = 0xabcdef12,
489 };
490
491 assert(!bpf_map_update_elem(fd, &index, &value, 0));
492 }
493
create_prog_dummy_simple(enum bpf_prog_type prog_type,int ret)494 static int create_prog_dummy_simple(enum bpf_prog_type prog_type, int ret)
495 {
496 struct bpf_insn prog[] = {
497 BPF_MOV64_IMM(BPF_REG_0, ret),
498 BPF_EXIT_INSN(),
499 };
500
501 return bpf_prog_load(prog_type, NULL, "GPL", prog, ARRAY_SIZE(prog), NULL);
502 }
503
create_prog_dummy_loop(enum bpf_prog_type prog_type,int mfd,int idx,int ret)504 static int create_prog_dummy_loop(enum bpf_prog_type prog_type, int mfd,
505 int idx, int ret)
506 {
507 struct bpf_insn prog[] = {
508 BPF_MOV64_IMM(BPF_REG_3, idx),
509 BPF_LD_MAP_FD(BPF_REG_2, mfd),
510 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
511 BPF_FUNC_tail_call),
512 BPF_MOV64_IMM(BPF_REG_0, ret),
513 BPF_EXIT_INSN(),
514 };
515
516 return bpf_prog_load(prog_type, NULL, "GPL", prog, ARRAY_SIZE(prog), NULL);
517 }
518
create_prog_array(enum bpf_prog_type prog_type,uint32_t max_elem,int p1key,int p2key,int p3key)519 static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
520 int p1key, int p2key, int p3key)
521 {
522 int mfd, p1fd, p2fd, p3fd;
523
524 mfd = bpf_map_create(BPF_MAP_TYPE_PROG_ARRAY, NULL, sizeof(int),
525 sizeof(int), max_elem, NULL);
526 if (mfd < 0) {
527 if (skip_unsupported_map(BPF_MAP_TYPE_PROG_ARRAY))
528 return -1;
529 printf("Failed to create prog array '%s'!\n", strerror(errno));
530 return -1;
531 }
532
533 p1fd = create_prog_dummy_simple(prog_type, 42);
534 p2fd = create_prog_dummy_loop(prog_type, mfd, p2key, 41);
535 p3fd = create_prog_dummy_simple(prog_type, 24);
536 if (p1fd < 0 || p2fd < 0 || p3fd < 0)
537 goto err;
538 if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
539 goto err;
540 if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
541 goto err;
542 if (bpf_map_update_elem(mfd, &p3key, &p3fd, BPF_ANY) < 0) {
543 err:
544 close(mfd);
545 mfd = -1;
546 }
547 close(p3fd);
548 close(p2fd);
549 close(p1fd);
550 return mfd;
551 }
552
create_map_in_map(void)553 static int create_map_in_map(void)
554 {
555 LIBBPF_OPTS(bpf_map_create_opts, opts);
556 int inner_map_fd, outer_map_fd;
557
558 inner_map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int),
559 sizeof(int), 1, NULL);
560 if (inner_map_fd < 0) {
561 if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY))
562 return -1;
563 printf("Failed to create array '%s'!\n", strerror(errno));
564 return inner_map_fd;
565 }
566
567 opts.inner_map_fd = inner_map_fd;
568 outer_map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
569 sizeof(int), sizeof(int), 1, &opts);
570 if (outer_map_fd < 0) {
571 if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY_OF_MAPS))
572 return -1;
573 printf("Failed to create array of maps '%s'!\n",
574 strerror(errno));
575 }
576
577 close(inner_map_fd);
578
579 return outer_map_fd;
580 }
581
create_cgroup_storage(bool percpu)582 static int create_cgroup_storage(bool percpu)
583 {
584 enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
585 BPF_MAP_TYPE_CGROUP_STORAGE;
586 int fd;
587
588 fd = bpf_map_create(type, NULL, sizeof(struct bpf_cgroup_storage_key),
589 TEST_DATA_LEN, 0, NULL);
590 if (fd < 0) {
591 if (skip_unsupported_map(type))
592 return -1;
593 printf("Failed to create cgroup storage '%s'!\n",
594 strerror(errno));
595 }
596
597 return fd;
598 }
599
600 /* struct bpf_spin_lock {
601 * int val;
602 * };
603 * struct val {
604 * int cnt;
605 * struct bpf_spin_lock l;
606 * };
607 * struct bpf_timer {
608 * __u64 :64;
609 * __u64 :64;
610 * } __attribute__((aligned(8)));
611 * struct timer {
612 * struct bpf_timer t;
613 * };
614 */
615 static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l\0bpf_timer\0timer\0t";
616 static __u32 btf_raw_types[] = {
617 /* int */
618 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
619 /* struct bpf_spin_lock */ /* [2] */
620 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
621 BTF_MEMBER_ENC(15, 1, 0), /* int val; */
622 /* struct val */ /* [3] */
623 BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
624 BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
625 BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
626 /* struct bpf_timer */ /* [4] */
627 BTF_TYPE_ENC(25, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0), 16),
628 /* struct timer */ /* [5] */
629 BTF_TYPE_ENC(35, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 16),
630 BTF_MEMBER_ENC(41, 4, 0), /* struct bpf_timer t; */
631 };
632
load_btf(void)633 static int load_btf(void)
634 {
635 struct btf_header hdr = {
636 .magic = BTF_MAGIC,
637 .version = BTF_VERSION,
638 .hdr_len = sizeof(struct btf_header),
639 .type_len = sizeof(btf_raw_types),
640 .str_off = sizeof(btf_raw_types),
641 .str_len = sizeof(btf_str_sec),
642 };
643 void *ptr, *raw_btf;
644 int btf_fd;
645
646 ptr = raw_btf = malloc(sizeof(hdr) + sizeof(btf_raw_types) +
647 sizeof(btf_str_sec));
648
649 memcpy(ptr, &hdr, sizeof(hdr));
650 ptr += sizeof(hdr);
651 memcpy(ptr, btf_raw_types, hdr.type_len);
652 ptr += hdr.type_len;
653 memcpy(ptr, btf_str_sec, hdr.str_len);
654 ptr += hdr.str_len;
655
656 btf_fd = bpf_btf_load(raw_btf, ptr - raw_btf, NULL);
657 free(raw_btf);
658 if (btf_fd < 0)
659 return -1;
660 return btf_fd;
661 }
662
create_map_spin_lock(void)663 static int create_map_spin_lock(void)
664 {
665 LIBBPF_OPTS(bpf_map_create_opts, opts,
666 .btf_key_type_id = 1,
667 .btf_value_type_id = 3,
668 );
669 int fd, btf_fd;
670
671 btf_fd = load_btf();
672 if (btf_fd < 0)
673 return -1;
674 opts.btf_fd = btf_fd;
675 fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_map", 4, 8, 1, &opts);
676 if (fd < 0)
677 printf("Failed to create map with spin_lock\n");
678 return fd;
679 }
680
create_sk_storage_map(void)681 static int create_sk_storage_map(void)
682 {
683 LIBBPF_OPTS(bpf_map_create_opts, opts,
684 .map_flags = BPF_F_NO_PREALLOC,
685 .btf_key_type_id = 1,
686 .btf_value_type_id = 3,
687 );
688 int fd, btf_fd;
689
690 btf_fd = load_btf();
691 if (btf_fd < 0)
692 return -1;
693 opts.btf_fd = btf_fd;
694 fd = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "test_map", 4, 8, 0, &opts);
695 close(opts.btf_fd);
696 if (fd < 0)
697 printf("Failed to create sk_storage_map\n");
698 return fd;
699 }
700
create_map_timer(void)701 static int create_map_timer(void)
702 {
703 LIBBPF_OPTS(bpf_map_create_opts, opts,
704 .btf_key_type_id = 1,
705 .btf_value_type_id = 5,
706 );
707 int fd, btf_fd;
708
709 btf_fd = load_btf();
710 if (btf_fd < 0)
711 return -1;
712
713 opts.btf_fd = btf_fd;
714 fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_map", 4, 16, 1, &opts);
715 if (fd < 0)
716 printf("Failed to create map with timer\n");
717 return fd;
718 }
719
720 static char bpf_vlog[UINT_MAX >> 8];
721
do_test_fixup(struct bpf_test * test,enum bpf_prog_type prog_type,struct bpf_insn * prog,int * map_fds)722 static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
723 struct bpf_insn *prog, int *map_fds)
724 {
725 int *fixup_map_hash_8b = test->fixup_map_hash_8b;
726 int *fixup_map_hash_48b = test->fixup_map_hash_48b;
727 int *fixup_map_hash_16b = test->fixup_map_hash_16b;
728 int *fixup_map_array_48b = test->fixup_map_array_48b;
729 int *fixup_map_sockmap = test->fixup_map_sockmap;
730 int *fixup_map_sockhash = test->fixup_map_sockhash;
731 int *fixup_map_xskmap = test->fixup_map_xskmap;
732 int *fixup_map_stacktrace = test->fixup_map_stacktrace;
733 int *fixup_prog1 = test->fixup_prog1;
734 int *fixup_prog2 = test->fixup_prog2;
735 int *fixup_map_in_map = test->fixup_map_in_map;
736 int *fixup_cgroup_storage = test->fixup_cgroup_storage;
737 int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
738 int *fixup_map_spin_lock = test->fixup_map_spin_lock;
739 int *fixup_map_array_ro = test->fixup_map_array_ro;
740 int *fixup_map_array_wo = test->fixup_map_array_wo;
741 int *fixup_map_array_small = test->fixup_map_array_small;
742 int *fixup_sk_storage_map = test->fixup_sk_storage_map;
743 int *fixup_map_event_output = test->fixup_map_event_output;
744 int *fixup_map_reuseport_array = test->fixup_map_reuseport_array;
745 int *fixup_map_ringbuf = test->fixup_map_ringbuf;
746 int *fixup_map_timer = test->fixup_map_timer;
747
748 if (test->fill_helper) {
749 test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
750 test->fill_helper(test);
751 }
752
753 /* Allocating HTs with 1 elem is fine here, since we only test
754 * for verifier and not do a runtime lookup, so the only thing
755 * that really matters is value size in this case.
756 */
757 if (*fixup_map_hash_8b) {
758 map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
759 sizeof(long long), 1);
760 do {
761 prog[*fixup_map_hash_8b].imm = map_fds[0];
762 fixup_map_hash_8b++;
763 } while (*fixup_map_hash_8b);
764 }
765
766 if (*fixup_map_hash_48b) {
767 map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
768 sizeof(struct test_val), 1);
769 do {
770 prog[*fixup_map_hash_48b].imm = map_fds[1];
771 fixup_map_hash_48b++;
772 } while (*fixup_map_hash_48b);
773 }
774
775 if (*fixup_map_hash_16b) {
776 map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
777 sizeof(struct other_val), 1);
778 do {
779 prog[*fixup_map_hash_16b].imm = map_fds[2];
780 fixup_map_hash_16b++;
781 } while (*fixup_map_hash_16b);
782 }
783
784 if (*fixup_map_array_48b) {
785 map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
786 sizeof(struct test_val), 1);
787 update_map(map_fds[3], 0);
788 do {
789 prog[*fixup_map_array_48b].imm = map_fds[3];
790 fixup_map_array_48b++;
791 } while (*fixup_map_array_48b);
792 }
793
794 if (*fixup_prog1) {
795 map_fds[4] = create_prog_array(prog_type, 4, 0, 1, 2);
796 do {
797 prog[*fixup_prog1].imm = map_fds[4];
798 fixup_prog1++;
799 } while (*fixup_prog1);
800 }
801
802 if (*fixup_prog2) {
803 map_fds[5] = create_prog_array(prog_type, 8, 7, 1, 2);
804 do {
805 prog[*fixup_prog2].imm = map_fds[5];
806 fixup_prog2++;
807 } while (*fixup_prog2);
808 }
809
810 if (*fixup_map_in_map) {
811 map_fds[6] = create_map_in_map();
812 do {
813 prog[*fixup_map_in_map].imm = map_fds[6];
814 fixup_map_in_map++;
815 } while (*fixup_map_in_map);
816 }
817
818 if (*fixup_cgroup_storage) {
819 map_fds[7] = create_cgroup_storage(false);
820 do {
821 prog[*fixup_cgroup_storage].imm = map_fds[7];
822 fixup_cgroup_storage++;
823 } while (*fixup_cgroup_storage);
824 }
825
826 if (*fixup_percpu_cgroup_storage) {
827 map_fds[8] = create_cgroup_storage(true);
828 do {
829 prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
830 fixup_percpu_cgroup_storage++;
831 } while (*fixup_percpu_cgroup_storage);
832 }
833 if (*fixup_map_sockmap) {
834 map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int),
835 sizeof(int), 1);
836 do {
837 prog[*fixup_map_sockmap].imm = map_fds[9];
838 fixup_map_sockmap++;
839 } while (*fixup_map_sockmap);
840 }
841 if (*fixup_map_sockhash) {
842 map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int),
843 sizeof(int), 1);
844 do {
845 prog[*fixup_map_sockhash].imm = map_fds[10];
846 fixup_map_sockhash++;
847 } while (*fixup_map_sockhash);
848 }
849 if (*fixup_map_xskmap) {
850 map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int),
851 sizeof(int), 1);
852 do {
853 prog[*fixup_map_xskmap].imm = map_fds[11];
854 fixup_map_xskmap++;
855 } while (*fixup_map_xskmap);
856 }
857 if (*fixup_map_stacktrace) {
858 map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32),
859 sizeof(u64), 1);
860 do {
861 prog[*fixup_map_stacktrace].imm = map_fds[12];
862 fixup_map_stacktrace++;
863 } while (*fixup_map_stacktrace);
864 }
865 if (*fixup_map_spin_lock) {
866 map_fds[13] = create_map_spin_lock();
867 do {
868 prog[*fixup_map_spin_lock].imm = map_fds[13];
869 fixup_map_spin_lock++;
870 } while (*fixup_map_spin_lock);
871 }
872 if (*fixup_map_array_ro) {
873 map_fds[14] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
874 sizeof(struct test_val), 1,
875 BPF_F_RDONLY_PROG);
876 update_map(map_fds[14], 0);
877 do {
878 prog[*fixup_map_array_ro].imm = map_fds[14];
879 fixup_map_array_ro++;
880 } while (*fixup_map_array_ro);
881 }
882 if (*fixup_map_array_wo) {
883 map_fds[15] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
884 sizeof(struct test_val), 1,
885 BPF_F_WRONLY_PROG);
886 update_map(map_fds[15], 0);
887 do {
888 prog[*fixup_map_array_wo].imm = map_fds[15];
889 fixup_map_array_wo++;
890 } while (*fixup_map_array_wo);
891 }
892 if (*fixup_map_array_small) {
893 map_fds[16] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
894 1, 1, 0);
895 update_map(map_fds[16], 0);
896 do {
897 prog[*fixup_map_array_small].imm = map_fds[16];
898 fixup_map_array_small++;
899 } while (*fixup_map_array_small);
900 }
901 if (*fixup_sk_storage_map) {
902 map_fds[17] = create_sk_storage_map();
903 do {
904 prog[*fixup_sk_storage_map].imm = map_fds[17];
905 fixup_sk_storage_map++;
906 } while (*fixup_sk_storage_map);
907 }
908 if (*fixup_map_event_output) {
909 map_fds[18] = __create_map(BPF_MAP_TYPE_PERF_EVENT_ARRAY,
910 sizeof(int), sizeof(int), 1, 0);
911 do {
912 prog[*fixup_map_event_output].imm = map_fds[18];
913 fixup_map_event_output++;
914 } while (*fixup_map_event_output);
915 }
916 if (*fixup_map_reuseport_array) {
917 map_fds[19] = __create_map(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
918 sizeof(u32), sizeof(u64), 1, 0);
919 do {
920 prog[*fixup_map_reuseport_array].imm = map_fds[19];
921 fixup_map_reuseport_array++;
922 } while (*fixup_map_reuseport_array);
923 }
924 if (*fixup_map_ringbuf) {
925 map_fds[20] = create_map(BPF_MAP_TYPE_RINGBUF, 0,
926 0, 4096);
927 do {
928 prog[*fixup_map_ringbuf].imm = map_fds[20];
929 fixup_map_ringbuf++;
930 } while (*fixup_map_ringbuf);
931 }
932 if (*fixup_map_timer) {
933 map_fds[21] = create_map_timer();
934 do {
935 prog[*fixup_map_timer].imm = map_fds[21];
936 fixup_map_timer++;
937 } while (*fixup_map_timer);
938 }
939 }
940
941 struct libcap {
942 struct __user_cap_header_struct hdr;
943 struct __user_cap_data_struct data[2];
944 };
945
set_admin(bool admin)946 static int set_admin(bool admin)
947 {
948 cap_t caps;
949 /* need CAP_BPF, CAP_NET_ADMIN, CAP_PERFMON to load progs */
950 const cap_value_t cap_net_admin = CAP_NET_ADMIN;
951 const cap_value_t cap_sys_admin = CAP_SYS_ADMIN;
952 struct libcap *cap;
953 int ret = -1;
954
955 caps = cap_get_proc();
956 if (!caps) {
957 perror("cap_get_proc");
958 return -1;
959 }
960 cap = (struct libcap *)caps;
961 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_sys_admin, CAP_CLEAR)) {
962 perror("cap_set_flag clear admin");
963 goto out;
964 }
965 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_net_admin,
966 admin ? CAP_SET : CAP_CLEAR)) {
967 perror("cap_set_flag set_or_clear net");
968 goto out;
969 }
970 /* libcap is likely old and simply ignores CAP_BPF and CAP_PERFMON,
971 * so update effective bits manually
972 */
973 if (admin) {
974 cap->data[1].effective |= 1 << (38 /* CAP_PERFMON */ - 32);
975 cap->data[1].effective |= 1 << (39 /* CAP_BPF */ - 32);
976 } else {
977 cap->data[1].effective &= ~(1 << (38 - 32));
978 cap->data[1].effective &= ~(1 << (39 - 32));
979 }
980 if (cap_set_proc(caps)) {
981 perror("cap_set_proc");
982 goto out;
983 }
984 ret = 0;
985 out:
986 if (cap_free(caps))
987 perror("cap_free");
988 return ret;
989 }
990
do_prog_test_run(int fd_prog,bool unpriv,uint32_t expected_val,void * data,size_t size_data)991 static int do_prog_test_run(int fd_prog, bool unpriv, uint32_t expected_val,
992 void *data, size_t size_data)
993 {
994 __u8 tmp[TEST_DATA_LEN << 2];
995 __u32 size_tmp = sizeof(tmp);
996 uint32_t retval;
997 int err, saved_errno;
998
999 if (unpriv)
1000 set_admin(true);
1001 err = bpf_prog_test_run(fd_prog, 1, data, size_data,
1002 tmp, &size_tmp, &retval, NULL);
1003 saved_errno = errno;
1004
1005 if (unpriv)
1006 set_admin(false);
1007
1008 if (err) {
1009 switch (saved_errno) {
1010 case ENOTSUPP:
1011 printf("Did not run the program (not supported) ");
1012 return 0;
1013 case EPERM:
1014 if (unpriv) {
1015 printf("Did not run the program (no permission) ");
1016 return 0;
1017 }
1018 /* fallthrough; */
1019 default:
1020 printf("FAIL: Unexpected bpf_prog_test_run error (%s) ",
1021 strerror(saved_errno));
1022 return err;
1023 }
1024 }
1025
1026 if (retval != expected_val &&
1027 expected_val != POINTER_VALUE) {
1028 printf("FAIL retval %d != %d ", retval, expected_val);
1029 return 1;
1030 }
1031
1032 return 0;
1033 }
1034
1035 /* Returns true if every part of exp (tab-separated) appears in log, in order.
1036 *
1037 * If exp is an empty string, returns true.
1038 */
cmp_str_seq(const char * log,const char * exp)1039 static bool cmp_str_seq(const char *log, const char *exp)
1040 {
1041 char needle[200];
1042 const char *p, *q;
1043 int len;
1044
1045 do {
1046 if (!strlen(exp))
1047 break;
1048 p = strchr(exp, '\t');
1049 if (!p)
1050 p = exp + strlen(exp);
1051
1052 len = p - exp;
1053 if (len >= sizeof(needle) || !len) {
1054 printf("FAIL\nTestcase bug\n");
1055 return false;
1056 }
1057 strncpy(needle, exp, len);
1058 needle[len] = 0;
1059 q = strstr(log, needle);
1060 if (!q) {
1061 printf("FAIL\nUnexpected verifier log!\n"
1062 "EXP: %s\nRES:\n", needle);
1063 return false;
1064 }
1065 log = q + len;
1066 exp = p + 1;
1067 } while (*p);
1068 return true;
1069 }
1070
do_test_single(struct bpf_test * test,bool unpriv,int * passes,int * errors)1071 static void do_test_single(struct bpf_test *test, bool unpriv,
1072 int *passes, int *errors)
1073 {
1074 int fd_prog, expected_ret, alignment_prevented_execution;
1075 int prog_len, prog_type = test->prog_type;
1076 struct bpf_insn *prog = test->insns;
1077 LIBBPF_OPTS(bpf_prog_load_opts, opts);
1078 int run_errs, run_successes;
1079 int map_fds[MAX_NR_MAPS];
1080 const char *expected_err;
1081 int saved_errno;
1082 int fixup_skips;
1083 __u32 pflags;
1084 int i, err;
1085
1086 for (i = 0; i < MAX_NR_MAPS; i++)
1087 map_fds[i] = -1;
1088
1089 if (!prog_type)
1090 prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
1091 fixup_skips = skips;
1092 do_test_fixup(test, prog_type, prog, map_fds);
1093 if (test->fill_insns) {
1094 prog = test->fill_insns;
1095 prog_len = test->prog_len;
1096 } else {
1097 prog_len = probe_filter_length(prog);
1098 }
1099 /* If there were some map skips during fixup due to missing bpf
1100 * features, skip this test.
1101 */
1102 if (fixup_skips != skips)
1103 return;
1104
1105 pflags = BPF_F_TEST_RND_HI32;
1106 if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
1107 pflags |= BPF_F_STRICT_ALIGNMENT;
1108 if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
1109 pflags |= BPF_F_ANY_ALIGNMENT;
1110 if (test->flags & ~3)
1111 pflags |= test->flags;
1112
1113 expected_ret = unpriv && test->result_unpriv != UNDEF ?
1114 test->result_unpriv : test->result;
1115 expected_err = unpriv && test->errstr_unpriv ?
1116 test->errstr_unpriv : test->errstr;
1117
1118 opts.expected_attach_type = test->expected_attach_type;
1119 if (verbose)
1120 opts.log_level = 1;
1121 else if (expected_ret == VERBOSE_ACCEPT)
1122 opts.log_level = 2;
1123 else
1124 opts.log_level = 4;
1125 opts.prog_flags = pflags;
1126
1127 if (prog_type == BPF_PROG_TYPE_TRACING && test->kfunc) {
1128 int attach_btf_id;
1129
1130 attach_btf_id = libbpf_find_vmlinux_btf_id(test->kfunc,
1131 opts.expected_attach_type);
1132 if (attach_btf_id < 0) {
1133 printf("FAIL\nFailed to find BTF ID for '%s'!\n",
1134 test->kfunc);
1135 (*errors)++;
1136 return;
1137 }
1138
1139 opts.attach_btf_id = attach_btf_id;
1140 }
1141
1142 opts.log_buf = bpf_vlog;
1143 opts.log_size = sizeof(bpf_vlog);
1144 fd_prog = bpf_prog_load(prog_type, NULL, "GPL", prog, prog_len, &opts);
1145 saved_errno = errno;
1146
1147 /* BPF_PROG_TYPE_TRACING requires more setup and
1148 * bpf_probe_prog_type won't give correct answer
1149 */
1150 if (fd_prog < 0 && prog_type != BPF_PROG_TYPE_TRACING &&
1151 !bpf_probe_prog_type(prog_type, 0)) {
1152 printf("SKIP (unsupported program type %d)\n", prog_type);
1153 skips++;
1154 goto close_fds;
1155 }
1156
1157 if (fd_prog < 0 && saved_errno == ENOTSUPP) {
1158 printf("SKIP (program uses an unsupported feature)\n");
1159 skips++;
1160 goto close_fds;
1161 }
1162
1163 alignment_prevented_execution = 0;
1164
1165 if (expected_ret == ACCEPT || expected_ret == VERBOSE_ACCEPT) {
1166 if (fd_prog < 0) {
1167 printf("FAIL\nFailed to load prog '%s'!\n",
1168 strerror(saved_errno));
1169 goto fail_log;
1170 }
1171 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1172 if (fd_prog >= 0 &&
1173 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS))
1174 alignment_prevented_execution = 1;
1175 #endif
1176 if (expected_ret == VERBOSE_ACCEPT && !cmp_str_seq(bpf_vlog, expected_err)) {
1177 goto fail_log;
1178 }
1179 } else {
1180 if (fd_prog >= 0) {
1181 printf("FAIL\nUnexpected success to load!\n");
1182 goto fail_log;
1183 }
1184 if (!expected_err || !cmp_str_seq(bpf_vlog, expected_err)) {
1185 printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
1186 expected_err, bpf_vlog);
1187 goto fail_log;
1188 }
1189 }
1190
1191 if (!unpriv && test->insn_processed) {
1192 uint32_t insn_processed;
1193 char *proc;
1194
1195 proc = strstr(bpf_vlog, "processed ");
1196 insn_processed = atoi(proc + 10);
1197 if (test->insn_processed != insn_processed) {
1198 printf("FAIL\nUnexpected insn_processed %u vs %u\n",
1199 insn_processed, test->insn_processed);
1200 goto fail_log;
1201 }
1202 }
1203
1204 if (verbose)
1205 printf(", verifier log:\n%s", bpf_vlog);
1206
1207 run_errs = 0;
1208 run_successes = 0;
1209 if (!alignment_prevented_execution && fd_prog >= 0 && test->runs >= 0) {
1210 uint32_t expected_val;
1211 int i;
1212
1213 if (!test->runs)
1214 test->runs = 1;
1215
1216 for (i = 0; i < test->runs; i++) {
1217 if (unpriv && test->retvals[i].retval_unpriv)
1218 expected_val = test->retvals[i].retval_unpriv;
1219 else
1220 expected_val = test->retvals[i].retval;
1221
1222 err = do_prog_test_run(fd_prog, unpriv, expected_val,
1223 test->retvals[i].data,
1224 sizeof(test->retvals[i].data));
1225 if (err) {
1226 printf("(run %d/%d) ", i + 1, test->runs);
1227 run_errs++;
1228 } else {
1229 run_successes++;
1230 }
1231 }
1232 }
1233
1234 if (!run_errs) {
1235 (*passes)++;
1236 if (run_successes > 1)
1237 printf("%d cases ", run_successes);
1238 printf("OK");
1239 if (alignment_prevented_execution)
1240 printf(" (NOTE: not executed due to unknown alignment)");
1241 printf("\n");
1242 } else {
1243 printf("\n");
1244 goto fail_log;
1245 }
1246 close_fds:
1247 if (test->fill_insns)
1248 free(test->fill_insns);
1249 close(fd_prog);
1250 for (i = 0; i < MAX_NR_MAPS; i++)
1251 close(map_fds[i]);
1252 sched_yield();
1253 return;
1254 fail_log:
1255 (*errors)++;
1256 printf("%s", bpf_vlog);
1257 goto close_fds;
1258 }
1259
is_admin(void)1260 static bool is_admin(void)
1261 {
1262 cap_flag_value_t net_priv = CAP_CLEAR;
1263 bool perfmon_priv = false;
1264 bool bpf_priv = false;
1265 struct libcap *cap;
1266 cap_t caps;
1267
1268 #ifdef CAP_IS_SUPPORTED
1269 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
1270 perror("cap_get_flag");
1271 return false;
1272 }
1273 #endif
1274 caps = cap_get_proc();
1275 if (!caps) {
1276 perror("cap_get_proc");
1277 return false;
1278 }
1279 cap = (struct libcap *)caps;
1280 bpf_priv = cap->data[1].effective & (1 << (39/* CAP_BPF */ - 32));
1281 perfmon_priv = cap->data[1].effective & (1 << (38/* CAP_PERFMON */ - 32));
1282 if (cap_get_flag(caps, CAP_NET_ADMIN, CAP_EFFECTIVE, &net_priv))
1283 perror("cap_get_flag NET");
1284 if (cap_free(caps))
1285 perror("cap_free");
1286 return bpf_priv && perfmon_priv && net_priv == CAP_SET;
1287 }
1288
get_unpriv_disabled()1289 static void get_unpriv_disabled()
1290 {
1291 char buf[2];
1292 FILE *fd;
1293
1294 fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
1295 if (!fd) {
1296 perror("fopen /proc/sys/"UNPRIV_SYSCTL);
1297 unpriv_disabled = true;
1298 return;
1299 }
1300 if (fgets(buf, 2, fd) == buf && atoi(buf))
1301 unpriv_disabled = true;
1302 fclose(fd);
1303 }
1304
test_as_unpriv(struct bpf_test * test)1305 static bool test_as_unpriv(struct bpf_test *test)
1306 {
1307 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1308 /* Some architectures have strict alignment requirements. In
1309 * that case, the BPF verifier detects if a program has
1310 * unaligned accesses and rejects them. A user can pass
1311 * BPF_F_ANY_ALIGNMENT to a program to override this
1312 * check. That, however, will only work when a privileged user
1313 * loads a program. An unprivileged user loading a program
1314 * with this flag will be rejected prior entering the
1315 * verifier.
1316 */
1317 if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
1318 return false;
1319 #endif
1320 return !test->prog_type ||
1321 test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
1322 test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
1323 }
1324
do_test(bool unpriv,unsigned int from,unsigned int to)1325 static int do_test(bool unpriv, unsigned int from, unsigned int to)
1326 {
1327 int i, passes = 0, errors = 0;
1328
1329 for (i = from; i < to; i++) {
1330 struct bpf_test *test = &tests[i];
1331
1332 /* Program types that are not supported by non-root we
1333 * skip right away.
1334 */
1335 if (test_as_unpriv(test) && unpriv_disabled) {
1336 printf("#%d/u %s SKIP\n", i, test->descr);
1337 skips++;
1338 } else if (test_as_unpriv(test)) {
1339 if (!unpriv)
1340 set_admin(false);
1341 printf("#%d/u %s ", i, test->descr);
1342 do_test_single(test, true, &passes, &errors);
1343 if (!unpriv)
1344 set_admin(true);
1345 }
1346
1347 if (unpriv) {
1348 printf("#%d/p %s SKIP\n", i, test->descr);
1349 skips++;
1350 } else {
1351 printf("#%d/p %s ", i, test->descr);
1352 do_test_single(test, false, &passes, &errors);
1353 }
1354 }
1355
1356 printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
1357 skips, errors);
1358 return errors ? EXIT_FAILURE : EXIT_SUCCESS;
1359 }
1360
main(int argc,char ** argv)1361 int main(int argc, char **argv)
1362 {
1363 unsigned int from = 0, to = ARRAY_SIZE(tests);
1364 bool unpriv = !is_admin();
1365 int arg = 1;
1366
1367 if (argc > 1 && strcmp(argv[1], "-v") == 0) {
1368 arg++;
1369 verbose = true;
1370 argc--;
1371 }
1372
1373 if (argc == 3) {
1374 unsigned int l = atoi(argv[arg]);
1375 unsigned int u = atoi(argv[arg + 1]);
1376
1377 if (l < to && u < to) {
1378 from = l;
1379 to = u + 1;
1380 }
1381 } else if (argc == 2) {
1382 unsigned int t = atoi(argv[arg]);
1383
1384 if (t < to) {
1385 from = t;
1386 to = t + 1;
1387 }
1388 }
1389
1390 get_unpriv_disabled();
1391 if (unpriv && unpriv_disabled) {
1392 printf("Cannot run as unprivileged user with sysctl %s.\n",
1393 UNPRIV_SYSCTL);
1394 return EXIT_FAILURE;
1395 }
1396
1397 /* Use libbpf 1.0 API mode */
1398 libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
1399
1400 bpf_semi_rand_init();
1401 return do_test(unpriv, from, to);
1402 }
1403