1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (c) 2019 Richard Palethorpe <rpalethorpe@suse.com>
4 *
5 * Essential Extended Berkeley Packet Filter (eBPF) headers
6 *
7 * Mostly copied/adapted from linux/bpf.h and libbpf so that we can perform
8 * some eBPF testing without any external dependencies.
9 */
10
11 #ifndef BPF_H
12 # define BPF_H
13
14 #include <stdint.h>
15
16 #include "lapi/syscalls.h"
17
18 /* Start copy from linux/bpf_(common).h */
19 #define BPF_CLASS(code) ((code) & 0x07)
20 #define BPF_LD 0x00
21 #define BPF_LDX 0x01
22 #define BPF_ST 0x02
23 #define BPF_STX 0x03
24 #define BPF_ALU 0x04
25 #define BPF_JMP 0x05
26
27 #define BPF_JNE 0x50 /* jump != */
28
29 #define BPF_SIZE(code) ((code) & 0x18)
30 #define BPF_W 0x00 /* 32-bit */
31 #define BPF_DW 0x18 /* double word (64-bit) */
32
33 #define BPF_MODE(code) ((code) & 0xe0)
34 #define BPF_IMM 0x00
35 #define BPF_MEM 0x60
36
37 #define BPF_OP(code) ((code) & 0xf0)
38 #define BPF_ADD 0x00
39 #define BPF_SUB 0x10
40 #define BPF_DIV 0x30
41 #define BPF_LSH 0x60
42 #define BPF_RSH 0x70
43 #define BPF_MOD 0x90
44
45 #define BPF_JEQ 0x10
46
47 #define BPF_SRC(code) ((code) & 0x08)
48 #define BPF_K 0x00
49 #define BPF_X 0x08
50
51 #define BPF_ALU64 0x07 /* alu mode in double word width */
52 #define BPF_MOV 0xb0 /* mov reg to reg */
53 #define BPF_CALL 0x80 /* function call */
54 #define BPF_EXIT 0x90 /* function return */
55
56 /* Register numbers */
57 enum {
58 BPF_REG_0 = 0,
59 BPF_REG_1,
60 BPF_REG_2,
61 BPF_REG_3,
62 BPF_REG_4,
63 BPF_REG_5,
64 BPF_REG_6,
65 BPF_REG_7,
66 BPF_REG_8,
67 BPF_REG_9,
68 BPF_REG_10,
69 MAX_BPF_REG,
70 };
71
72 struct bpf_insn {
73 uint8_t code; /* opcode */
74 uint8_t dst_reg:4; /* dest register */
75 uint8_t src_reg:4; /* source register */
76 int16_t off; /* signed offset */
77 int32_t imm; /* signed immediate constant */
78 };
79
80 enum bpf_cmd {
81 BPF_MAP_CREATE,
82 BPF_MAP_LOOKUP_ELEM,
83 BPF_MAP_UPDATE_ELEM,
84 BPF_MAP_DELETE_ELEM,
85 BPF_MAP_GET_NEXT_KEY,
86 BPF_PROG_LOAD,
87 BPF_OBJ_PIN,
88 BPF_OBJ_GET,
89 BPF_PROG_ATTACH,
90 BPF_PROG_DETACH,
91 BPF_PROG_TEST_RUN,
92 BPF_PROG_GET_NEXT_ID,
93 BPF_MAP_GET_NEXT_ID,
94 BPF_PROG_GET_FD_BY_ID,
95 BPF_MAP_GET_FD_BY_ID,
96 BPF_OBJ_GET_INFO_BY_FD,
97 BPF_PROG_QUERY,
98 BPF_RAW_TRACEPOINT_OPEN,
99 BPF_BTF_LOAD,
100 BPF_BTF_GET_FD_BY_ID,
101 BPF_TASK_FD_QUERY,
102 BPF_MAP_LOOKUP_AND_DELETE_ELEM,
103 BPF_MAP_FREEZE,
104 };
105
106 enum bpf_map_type {
107 BPF_MAP_TYPE_UNSPEC,
108 BPF_MAP_TYPE_HASH,
109 BPF_MAP_TYPE_ARRAY,
110 BPF_MAP_TYPE_PROG_ARRAY,
111 BPF_MAP_TYPE_PERF_EVENT_ARRAY,
112 BPF_MAP_TYPE_PERCPU_HASH,
113 BPF_MAP_TYPE_PERCPU_ARRAY,
114 BPF_MAP_TYPE_STACK_TRACE,
115 BPF_MAP_TYPE_CGROUP_ARRAY,
116 BPF_MAP_TYPE_LRU_HASH,
117 BPF_MAP_TYPE_LRU_PERCPU_HASH,
118 BPF_MAP_TYPE_LPM_TRIE,
119 BPF_MAP_TYPE_ARRAY_OF_MAPS,
120 BPF_MAP_TYPE_HASH_OF_MAPS,
121 BPF_MAP_TYPE_DEVMAP,
122 BPF_MAP_TYPE_SOCKMAP,
123 BPF_MAP_TYPE_CPUMAP,
124 BPF_MAP_TYPE_XSKMAP,
125 BPF_MAP_TYPE_SOCKHASH,
126 BPF_MAP_TYPE_CGROUP_STORAGE,
127 BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
128 BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
129 BPF_MAP_TYPE_QUEUE,
130 BPF_MAP_TYPE_STACK,
131 BPF_MAP_TYPE_SK_STORAGE,
132 };
133
134 enum bpf_prog_type {
135 BPF_PROG_TYPE_UNSPEC,
136 BPF_PROG_TYPE_SOCKET_FILTER,
137 BPF_PROG_TYPE_KPROBE,
138 BPF_PROG_TYPE_SCHED_CLS,
139 BPF_PROG_TYPE_SCHED_ACT,
140 BPF_PROG_TYPE_TRACEPOINT,
141 BPF_PROG_TYPE_XDP,
142 BPF_PROG_TYPE_PERF_EVENT,
143 BPF_PROG_TYPE_CGROUP_SKB,
144 BPF_PROG_TYPE_CGROUP_SOCK,
145 BPF_PROG_TYPE_LWT_IN,
146 BPF_PROG_TYPE_LWT_OUT,
147 BPF_PROG_TYPE_LWT_XMIT,
148 BPF_PROG_TYPE_SOCK_OPS,
149 BPF_PROG_TYPE_SK_SKB,
150 BPF_PROG_TYPE_CGROUP_DEVICE,
151 BPF_PROG_TYPE_SK_MSG,
152 BPF_PROG_TYPE_RAW_TRACEPOINT,
153 BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
154 BPF_PROG_TYPE_LWT_SEG6LOCAL,
155 BPF_PROG_TYPE_LIRC_MODE2,
156 BPF_PROG_TYPE_SK_REUSEPORT,
157 BPF_PROG_TYPE_FLOW_DISSECTOR,
158 BPF_PROG_TYPE_CGROUP_SYSCTL,
159 BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
160 BPF_PROG_TYPE_CGROUP_SOCKOPT,
161 };
162
163 #define BPF_PSEUDO_MAP_FD 1
164
165 #define BPF_OBJ_NAME_LEN 16U
166
167 #define BPF_ANY 0 /* create new element or update existing */
168 #define BPF_NOEXIST 1 /* create new element if it didn't exist */
169 #define BPF_EXIST 2 /* update existing element */
170 #define BPF_F_LOCK 4 /* spin_lock-ed map_lookup/map_update */
171
172 #define aligned_uint64_t uint64_t __attribute__((aligned(8)))
173
174 union bpf_attr {
175 struct { /* anonymous struct used by BPF_MAP_CREATE command */
176 uint32_t map_type; /* one of enum bpf_map_type */
177 uint32_t key_size; /* size of key in bytes */
178 uint32_t value_size; /* size of value in bytes */
179 uint32_t max_entries; /* max number of entries in a map */
180 uint32_t map_flags; /* BPF_MAP_CREATE related
181 * flags defined above.
182 */
183 uint32_t inner_map_fd; /* fd pointing to the inner map */
184 uint32_t numa_node; /* numa node (effective only if
185 * BPF_F_NUMA_NODE is set).
186 */
187 char map_name[BPF_OBJ_NAME_LEN];
188 uint32_t map_ifindex; /* ifindex of netdev to create on */
189 uint32_t btf_fd; /* fd pointing to a BTF type data */
190 uint32_t btf_key_type_id; /* BTF type_id of the key */
191 uint32_t btf_value_type_id; /* BTF type_id of the value */
192 };
193
194 struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
195 uint32_t map_fd;
196 aligned_uint64_t key;
197 union {
198 aligned_uint64_t value;
199 aligned_uint64_t next_key;
200 };
201 uint64_t flags;
202 };
203
204 struct { /* anonymous struct used by BPF_PROG_LOAD command */
205 uint32_t prog_type; /* one of enum bpf_prog_type */
206 uint32_t insn_cnt;
207 aligned_uint64_t insns;
208 aligned_uint64_t license;
209 uint32_t log_level; /* verbosity level of verifier */
210 uint32_t log_size; /* size of user buffer */
211 aligned_uint64_t log_buf; /* user supplied buffer */
212 uint32_t kern_version; /* not used */
213 uint32_t prog_flags;
214 char prog_name[BPF_OBJ_NAME_LEN];
215 uint32_t prog_ifindex; /* ifindex of netdev to prep for */
216 /* For some prog types expected attach type must be known at
217 * load time to verify attach type specific parts of prog
218 * (context accesses, allowed helpers, etc).
219 */
220 uint32_t expected_attach_type;
221 uint32_t prog_btf_fd; /* fd pointing to BTF type data */
222 uint32_t func_info_rec_size; /* userspace bpf_func_info size */
223 aligned_uint64_t func_info; /* func info */
224 uint32_t func_info_cnt; /* number of bpf_func_info records */
225 uint32_t line_info_rec_size; /* userspace bpf_line_info size */
226 aligned_uint64_t line_info; /* line info */
227 uint32_t line_info_cnt; /* number of bpf_line_info records */
228 };
229
230 struct { /* anonymous struct used by BPF_OBJ_* commands */
231 aligned_uint64_t pathname;
232 uint32_t bpf_fd;
233 uint32_t file_flags;
234 };
235
236 struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
237 uint32_t target_fd; /* container object to attach to */
238 uint32_t attach_bpf_fd; /* eBPF program to attach */
239 uint32_t attach_type;
240 uint32_t attach_flags;
241 };
242
243 struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
244 uint32_t prog_fd;
245 uint32_t retval;
246 uint32_t data_size_in; /* input: len of data_in */
247 uint32_t data_size_out; /* input/output: len of data_out
248 * returns ENOSPC if data_out
249 * is too small.
250 */
251 aligned_uint64_t data_in;
252 aligned_uint64_t data_out;
253 uint32_t repeat;
254 uint32_t duration;
255 uint32_t ctx_size_in; /* input: len of ctx_in */
256 uint32_t ctx_size_out; /* input/output: len of ctx_out
257 * returns ENOSPC if ctx_out
258 * is too small.
259 */
260 aligned_uint64_t ctx_in;
261 aligned_uint64_t ctx_out;
262 } test;
263
264 struct { /* anonymous struct used by BPF_*_GET_*_ID */
265 union {
266 uint32_t start_id;
267 uint32_t prog_id;
268 uint32_t map_id;
269 uint32_t btf_id;
270 };
271 uint32_t next_id;
272 uint32_t open_flags;
273 };
274
275 struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */
276 uint32_t bpf_fd;
277 uint32_t info_len;
278 aligned_uint64_t info;
279 } info;
280
281 struct { /* anonymous struct used by BPF_PROG_QUERY command */
282 uint32_t target_fd; /* container object to query */
283 uint32_t attach_type;
284 uint32_t query_flags;
285 uint32_t attach_flags;
286 aligned_uint64_t prog_ids;
287 uint32_t prog_cnt;
288 } query;
289
290 struct {
291 uint64_t name;
292 uint32_t prog_fd;
293 } raw_tracepoint;
294
295 struct { /* anonymous struct for BPF_BTF_LOAD */
296 aligned_uint64_t btf;
297 aligned_uint64_t btf_log_buf;
298 uint32_t btf_size;
299 uint32_t btf_log_size;
300 uint32_t btf_log_level;
301 };
302
303 struct {
304 uint32_t pid; /* input: pid */
305 uint32_t fd; /* input: fd */
306 uint32_t flags; /* input: flags */
307 uint32_t buf_len; /* input/output: buf len */
308 aligned_uint64_t buf; /* input/output:
309 * tp_name for tracepoint
310 * symbol for kprobe
311 * filename for uprobe
312 */
313 uint32_t prog_id; /* output: prod_id */
314 uint32_t fd_type; /* output: BPF_FD_TYPE_* */
315 uint64_t probe_offset; /* output: probe_offset */
316 uint64_t probe_addr; /* output: probe_addr */
317 } task_fd_query;
318 } __attribute__((aligned(8)));
319
320 #define __BPF_FUNC_MAPPER(FN) \
321 FN(unspec), \
322 FN(map_lookup_elem), \
323 FN(map_update_elem), \
324 FN(map_delete_elem), \
325 FN(probe_read), \
326 FN(ktime_get_ns), \
327 FN(trace_printk), \
328 FN(get_prandom_u32), \
329 FN(get_smp_processor_id), \
330 FN(skb_store_bytes), \
331 FN(l3_csum_replace), \
332 FN(l4_csum_replace), \
333 FN(tail_call), \
334 FN(clone_redirect), \
335 FN(get_current_pid_tgid), \
336 FN(get_current_uid_gid), \
337 FN(get_current_comm), \
338 FN(get_cgroup_classid), \
339 FN(skb_vlan_push), \
340 FN(skb_vlan_pop), \
341 FN(skb_get_tunnel_key), \
342 FN(skb_set_tunnel_key), \
343 FN(perf_event_read), \
344 FN(redirect), \
345 FN(get_route_realm), \
346 FN(perf_event_output), \
347 FN(skb_load_bytes), \
348 FN(get_stackid), \
349 FN(csum_diff), \
350 FN(skb_get_tunnel_opt), \
351 FN(skb_set_tunnel_opt), \
352 FN(skb_change_proto), \
353 FN(skb_change_type), \
354 FN(skb_under_cgroup), \
355 FN(get_hash_recalc), \
356 FN(get_current_task), \
357 FN(probe_write_user), \
358 FN(current_task_under_cgroup), \
359 FN(skb_change_tail), \
360 FN(skb_pull_data), \
361 FN(csum_update), \
362 FN(set_hash_invalid), \
363 FN(get_numa_node_id), \
364 FN(skb_change_head), \
365 FN(xdp_adjust_head), \
366 FN(probe_read_str), \
367 FN(get_socket_cookie), \
368 FN(get_socket_uid), \
369 FN(set_hash), \
370 FN(setsockopt), \
371 FN(skb_adjust_room), \
372 FN(redirect_map), \
373 FN(sk_redirect_map), \
374 FN(sock_map_update), \
375 FN(xdp_adjust_meta), \
376 FN(perf_event_read_value), \
377 FN(perf_prog_read_value), \
378 FN(getsockopt), \
379 FN(override_return), \
380 FN(sock_ops_cb_flags_set), \
381 FN(msg_redirect_map), \
382 FN(msg_apply_bytes), \
383 FN(msg_cork_bytes), \
384 FN(msg_pull_data), \
385 FN(bind), \
386 FN(xdp_adjust_tail), \
387 FN(skb_get_xfrm_state), \
388 FN(get_stack), \
389 FN(skb_load_bytes_relative), \
390 FN(fib_lookup), \
391 FN(sock_hash_update), \
392 FN(msg_redirect_hash), \
393 FN(sk_redirect_hash), \
394 FN(lwt_push_encap), \
395 FN(lwt_seg6_store_bytes), \
396 FN(lwt_seg6_adjust_srh), \
397 FN(lwt_seg6_action), \
398 FN(rc_repeat), \
399 FN(rc_keydown), \
400 FN(skb_cgroup_id), \
401 FN(get_current_cgroup_id), \
402 FN(get_local_storage), \
403 FN(sk_select_reuseport), \
404 FN(skb_ancestor_cgroup_id), \
405 FN(sk_lookup_tcp), \
406 FN(sk_lookup_udp), \
407 FN(sk_release), \
408 FN(map_push_elem), \
409 FN(map_pop_elem), \
410 FN(map_peek_elem), \
411 FN(msg_push_data), \
412 FN(msg_pop_data), \
413 FN(rc_pointer_rel), \
414 FN(spin_lock), \
415 FN(spin_unlock), \
416 FN(sk_fullsock), \
417 FN(tcp_sock), \
418 FN(skb_ecn_set_ce), \
419 FN(get_listener_sock), \
420 FN(skc_lookup_tcp), \
421 FN(tcp_check_syncookie), \
422 FN(sysctl_get_name), \
423 FN(sysctl_get_current_value), \
424 FN(sysctl_get_new_value), \
425 FN(sysctl_set_new_value), \
426 FN(strtol), \
427 FN(strtoul), \
428 FN(sk_storage_get), \
429 FN(sk_storage_delete), \
430 FN(send_signal),
431
432 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
433 * function eBPF program intends to call
434 */
435 #define __BPF_ENUM_FN(x) BPF_FUNC_ ## x
436 enum bpf_func_id {
437 __BPF_FUNC_MAPPER(__BPF_ENUM_FN)
438 __BPF_FUNC_MAX_ID,
439 };
440 #undef __BPF_ENUM_FN
441
442 /* End copy from linux/bpf.h */
443
444 /* Start copy from tools/include/filter.h */
445
446 #define BPF_ALU64_REG(OP, DST, SRC) \
447 ((struct bpf_insn) { \
448 .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
449 .dst_reg = DST, \
450 .src_reg = SRC, \
451 .off = 0, \
452 .imm = 0 })
453
454 #define BPF_ALU32_REG(OP, DST, SRC) \
455 ((struct bpf_insn) { \
456 .code = BPF_ALU | BPF_OP(OP) | BPF_X, \
457 .dst_reg = DST, \
458 .src_reg = SRC, \
459 .off = 0, \
460 .imm = 0 })
461
462 #define BPF_ALU64_IMM(OP, DST, IMM) \
463 ((struct bpf_insn) { \
464 .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
465 .dst_reg = DST, \
466 .src_reg = 0, \
467 .off = 0, \
468 .imm = IMM })
469
470 #define BPF_ALU32_IMM(OP, DST, IMM) \
471 ((struct bpf_insn) { \
472 .code = BPF_ALU | BPF_OP(OP) | BPF_K, \
473 .dst_reg = DST, \
474 .src_reg = 0, \
475 .off = 0, \
476 .imm = IMM })
477
478 #define BPF_MOV64_REG(DST, SRC) \
479 ((struct bpf_insn) { \
480 .code = BPF_ALU64 | BPF_MOV | BPF_X, \
481 .dst_reg = DST, \
482 .src_reg = SRC, \
483 .off = 0, \
484 .imm = 0 })
485
486 #define BPF_MOV32_REG(DST, SRC) \
487 ((struct bpf_insn) { \
488 .code = BPF_ALU | BPF_MOV | BPF_X, \
489 .dst_reg = DST, \
490 .src_reg = SRC, \
491 .off = 0, \
492 .imm = 0 })
493
494 #define BPF_LD_IMM64(DST, IMM) \
495 BPF_LD_IMM64_RAW(DST, 0, IMM)
496
497 #define BPF_LD_IMM64_RAW(DST, SRC, IMM) \
498 ((struct bpf_insn) { \
499 .code = BPF_LD | BPF_DW | BPF_IMM, \
500 .dst_reg = DST, \
501 .src_reg = SRC, \
502 .off = 0, \
503 .imm = (uint32_t) (IMM) }), \
504 ((struct bpf_insn) { \
505 .code = 0, /* zero is reserved opcode */ \
506 .dst_reg = 0, \
507 .src_reg = 0, \
508 .off = 0, \
509 .imm = ((uint64_t) (IMM)) >> 32 })
510
511 /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
512 #define BPF_LD_MAP_FD(DST, MAP_FD) \
513 BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
514
515 #define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
516 ((struct bpf_insn) { \
517 .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \
518 .dst_reg = DST, \
519 .src_reg = 0, \
520 .off = OFF, \
521 .imm = IMM })
522
523 #define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \
524 ((struct bpf_insn) { \
525 .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
526 .dst_reg = DST, \
527 .src_reg = SRC, \
528 .off = OFF, \
529 .imm = 0 })
530
531 #define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
532 ((struct bpf_insn) { \
533 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
534 .dst_reg = DST, \
535 .src_reg = SRC, \
536 .off = OFF, \
537 .imm = 0 })
538
539 #define BPF_JMP_IMM(OP, DST, IMM, OFF) \
540 ((struct bpf_insn) { \
541 .code = BPF_JMP | BPF_OP(OP) | BPF_K, \
542 .dst_reg = DST, \
543 .src_reg = 0, \
544 .off = OFF, \
545 .imm = IMM })
546
547 #define BPF_MOV64_IMM(DST, IMM) \
548 ((struct bpf_insn) { \
549 .code = BPF_ALU64 | BPF_MOV | BPF_K, \
550 .dst_reg = DST, \
551 .src_reg = 0, \
552 .off = 0, \
553 .imm = IMM })
554
555 #define BPF_MOV32_IMM(DST, IMM) \
556 ((struct bpf_insn) { \
557 .code = BPF_ALU | BPF_MOV | BPF_K, \
558 .dst_reg = DST, \
559 .src_reg = 0, \
560 .off = 0, \
561 .imm = IMM })
562
563 #define BPF_EMIT_CALL(FUNC) \
564 ((struct bpf_insn) { \
565 .code = BPF_JMP | BPF_CALL, \
566 .dst_reg = 0, \
567 .src_reg = 0, \
568 .off = 0, \
569 .imm = ((FUNC) - BPF_FUNC_unspec) })
570
571 #define BPF_EXIT_INSN() \
572 ((struct bpf_insn) { \
573 .code = BPF_JMP | BPF_EXIT, \
574 .dst_reg = 0, \
575 .src_reg = 0, \
576 .off = 0, \
577 .imm = 0 })
578
579 /* End copy from tools/include/filter.h */
580
581 /* Start copy from tools/lib/bpf */
ptr_to_u64(const void * ptr)582 static inline uint64_t ptr_to_u64(const void *ptr)
583 {
584 return (uint64_t) (unsigned long) ptr;
585 }
586
bpf(enum bpf_cmd cmd,union bpf_attr * attr,unsigned int size)587 static inline int bpf(enum bpf_cmd cmd, union bpf_attr *attr, unsigned int size)
588 {
589 return tst_syscall(__NR_bpf, cmd, attr, size);
590 }
591 /* End copy from tools/lib/bpf */
592
593 #endif /* BPF_H */
594