1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (c) 2019 Richard Palethorpe <rpalethorpe@suse.com>
4 *
5 * Essential Extended Berkeley Packet Filter (eBPF) headers
6 *
7 * Mostly copied/adapted from linux/bpf.h and libbpf so that we can perform
8 * some eBPF testing without any external dependencies.
9 */
10
11 #ifndef BPF_H
12 # define BPF_H
13
14 #include <stdint.h>
15
16 #include "lapi/syscalls.h"
17
18 /* Start copy from linux/bpf_(common).h */
19 #define BPF_CLASS(code) ((code) & 0x07)
20 #define BPF_LD 0x00
21 #define BPF_LDX 0x01
22 #define BPF_ST 0x02
23 #define BPF_STX 0x03
24 #define BPF_ALU 0x04
25 #define BPF_JMP 0x05
26
27 #define BPF_JNE 0x50 /* jump != */
28
29 #define BPF_SIZE(code) ((code) & 0x18)
30 #define BPF_W 0x00 /* 32-bit */
31 #define BPF_DW 0x18 /* double word (64-bit) */
32
33 #define BPF_MODE(code) ((code) & 0xe0)
34 #define BPF_IMM 0x00
35 #define BPF_MEM 0x60
36
37 #define BPF_OP(code) ((code) & 0xf0)
38 #define BPF_ADD 0x00
39 #define BPF_SUB 0x10
40 #define BPF_LSH 0x60
41 #define BPF_RSH 0x70
42
43 #define BPF_JEQ 0x10
44
45 #define BPF_SRC(code) ((code) & 0x08)
46 #define BPF_K 0x00
47 #define BPF_X 0x08
48
49 #define BPF_ALU64 0x07 /* alu mode in double word width */
50 #define BPF_MOV 0xb0 /* mov reg to reg */
51 #define BPF_CALL 0x80 /* function call */
52 #define BPF_EXIT 0x90 /* function return */
53
54 /* Register numbers */
55 enum {
56 BPF_REG_0 = 0,
57 BPF_REG_1,
58 BPF_REG_2,
59 BPF_REG_3,
60 BPF_REG_4,
61 BPF_REG_5,
62 BPF_REG_6,
63 BPF_REG_7,
64 BPF_REG_8,
65 BPF_REG_9,
66 BPF_REG_10,
67 MAX_BPF_REG,
68 };
69
70 struct bpf_insn {
71 uint8_t code; /* opcode */
72 uint8_t dst_reg:4; /* dest register */
73 uint8_t src_reg:4; /* source register */
74 int16_t off; /* signed offset */
75 int32_t imm; /* signed immediate constant */
76 };
77
78 enum bpf_cmd {
79 BPF_MAP_CREATE,
80 BPF_MAP_LOOKUP_ELEM,
81 BPF_MAP_UPDATE_ELEM,
82 BPF_MAP_DELETE_ELEM,
83 BPF_MAP_GET_NEXT_KEY,
84 BPF_PROG_LOAD,
85 BPF_OBJ_PIN,
86 BPF_OBJ_GET,
87 BPF_PROG_ATTACH,
88 BPF_PROG_DETACH,
89 BPF_PROG_TEST_RUN,
90 BPF_PROG_GET_NEXT_ID,
91 BPF_MAP_GET_NEXT_ID,
92 BPF_PROG_GET_FD_BY_ID,
93 BPF_MAP_GET_FD_BY_ID,
94 BPF_OBJ_GET_INFO_BY_FD,
95 BPF_PROG_QUERY,
96 BPF_RAW_TRACEPOINT_OPEN,
97 BPF_BTF_LOAD,
98 BPF_BTF_GET_FD_BY_ID,
99 BPF_TASK_FD_QUERY,
100 BPF_MAP_LOOKUP_AND_DELETE_ELEM,
101 BPF_MAP_FREEZE,
102 };
103
104 enum bpf_map_type {
105 BPF_MAP_TYPE_UNSPEC,
106 BPF_MAP_TYPE_HASH,
107 BPF_MAP_TYPE_ARRAY,
108 BPF_MAP_TYPE_PROG_ARRAY,
109 BPF_MAP_TYPE_PERF_EVENT_ARRAY,
110 BPF_MAP_TYPE_PERCPU_HASH,
111 BPF_MAP_TYPE_PERCPU_ARRAY,
112 BPF_MAP_TYPE_STACK_TRACE,
113 BPF_MAP_TYPE_CGROUP_ARRAY,
114 BPF_MAP_TYPE_LRU_HASH,
115 BPF_MAP_TYPE_LRU_PERCPU_HASH,
116 BPF_MAP_TYPE_LPM_TRIE,
117 BPF_MAP_TYPE_ARRAY_OF_MAPS,
118 BPF_MAP_TYPE_HASH_OF_MAPS,
119 BPF_MAP_TYPE_DEVMAP,
120 BPF_MAP_TYPE_SOCKMAP,
121 BPF_MAP_TYPE_CPUMAP,
122 BPF_MAP_TYPE_XSKMAP,
123 BPF_MAP_TYPE_SOCKHASH,
124 BPF_MAP_TYPE_CGROUP_STORAGE,
125 BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
126 BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
127 BPF_MAP_TYPE_QUEUE,
128 BPF_MAP_TYPE_STACK,
129 BPF_MAP_TYPE_SK_STORAGE,
130 };
131
132 enum bpf_prog_type {
133 BPF_PROG_TYPE_UNSPEC,
134 BPF_PROG_TYPE_SOCKET_FILTER,
135 BPF_PROG_TYPE_KPROBE,
136 BPF_PROG_TYPE_SCHED_CLS,
137 BPF_PROG_TYPE_SCHED_ACT,
138 BPF_PROG_TYPE_TRACEPOINT,
139 BPF_PROG_TYPE_XDP,
140 BPF_PROG_TYPE_PERF_EVENT,
141 BPF_PROG_TYPE_CGROUP_SKB,
142 BPF_PROG_TYPE_CGROUP_SOCK,
143 BPF_PROG_TYPE_LWT_IN,
144 BPF_PROG_TYPE_LWT_OUT,
145 BPF_PROG_TYPE_LWT_XMIT,
146 BPF_PROG_TYPE_SOCK_OPS,
147 BPF_PROG_TYPE_SK_SKB,
148 BPF_PROG_TYPE_CGROUP_DEVICE,
149 BPF_PROG_TYPE_SK_MSG,
150 BPF_PROG_TYPE_RAW_TRACEPOINT,
151 BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
152 BPF_PROG_TYPE_LWT_SEG6LOCAL,
153 BPF_PROG_TYPE_LIRC_MODE2,
154 BPF_PROG_TYPE_SK_REUSEPORT,
155 BPF_PROG_TYPE_FLOW_DISSECTOR,
156 BPF_PROG_TYPE_CGROUP_SYSCTL,
157 BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
158 BPF_PROG_TYPE_CGROUP_SOCKOPT,
159 };
160
161 #define BPF_PSEUDO_MAP_FD 1
162
163 #define BPF_OBJ_NAME_LEN 16U
164
165 #define BPF_ANY 0 /* create new element or update existing */
166 #define BPF_NOEXIST 1 /* create new element if it didn't exist */
167 #define BPF_EXIST 2 /* update existing element */
168 #define BPF_F_LOCK 4 /* spin_lock-ed map_lookup/map_update */
169
170 #define aligned_uint64_t uint64_t __attribute__((aligned(8)))
171
172 union bpf_attr {
173 struct { /* anonymous struct used by BPF_MAP_CREATE command */
174 uint32_t map_type; /* one of enum bpf_map_type */
175 uint32_t key_size; /* size of key in bytes */
176 uint32_t value_size; /* size of value in bytes */
177 uint32_t max_entries; /* max number of entries in a map */
178 uint32_t map_flags; /* BPF_MAP_CREATE related
179 * flags defined above.
180 */
181 uint32_t inner_map_fd; /* fd pointing to the inner map */
182 uint32_t numa_node; /* numa node (effective only if
183 * BPF_F_NUMA_NODE is set).
184 */
185 char map_name[BPF_OBJ_NAME_LEN];
186 uint32_t map_ifindex; /* ifindex of netdev to create on */
187 uint32_t btf_fd; /* fd pointing to a BTF type data */
188 uint32_t btf_key_type_id; /* BTF type_id of the key */
189 uint32_t btf_value_type_id; /* BTF type_id of the value */
190 };
191
192 struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
193 uint32_t map_fd;
194 aligned_uint64_t key;
195 union {
196 aligned_uint64_t value;
197 aligned_uint64_t next_key;
198 };
199 uint64_t flags;
200 };
201
202 struct { /* anonymous struct used by BPF_PROG_LOAD command */
203 uint32_t prog_type; /* one of enum bpf_prog_type */
204 uint32_t insn_cnt;
205 aligned_uint64_t insns;
206 aligned_uint64_t license;
207 uint32_t log_level; /* verbosity level of verifier */
208 uint32_t log_size; /* size of user buffer */
209 aligned_uint64_t log_buf; /* user supplied buffer */
210 uint32_t kern_version; /* not used */
211 uint32_t prog_flags;
212 char prog_name[BPF_OBJ_NAME_LEN];
213 uint32_t prog_ifindex; /* ifindex of netdev to prep for */
214 /* For some prog types expected attach type must be known at
215 * load time to verify attach type specific parts of prog
216 * (context accesses, allowed helpers, etc).
217 */
218 uint32_t expected_attach_type;
219 uint32_t prog_btf_fd; /* fd pointing to BTF type data */
220 uint32_t func_info_rec_size; /* userspace bpf_func_info size */
221 aligned_uint64_t func_info; /* func info */
222 uint32_t func_info_cnt; /* number of bpf_func_info records */
223 uint32_t line_info_rec_size; /* userspace bpf_line_info size */
224 aligned_uint64_t line_info; /* line info */
225 uint32_t line_info_cnt; /* number of bpf_line_info records */
226 };
227
228 struct { /* anonymous struct used by BPF_OBJ_* commands */
229 aligned_uint64_t pathname;
230 uint32_t bpf_fd;
231 uint32_t file_flags;
232 };
233
234 struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
235 uint32_t target_fd; /* container object to attach to */
236 uint32_t attach_bpf_fd; /* eBPF program to attach */
237 uint32_t attach_type;
238 uint32_t attach_flags;
239 };
240
241 struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
242 uint32_t prog_fd;
243 uint32_t retval;
244 uint32_t data_size_in; /* input: len of data_in */
245 uint32_t data_size_out; /* input/output: len of data_out
246 * returns ENOSPC if data_out
247 * is too small.
248 */
249 aligned_uint64_t data_in;
250 aligned_uint64_t data_out;
251 uint32_t repeat;
252 uint32_t duration;
253 uint32_t ctx_size_in; /* input: len of ctx_in */
254 uint32_t ctx_size_out; /* input/output: len of ctx_out
255 * returns ENOSPC if ctx_out
256 * is too small.
257 */
258 aligned_uint64_t ctx_in;
259 aligned_uint64_t ctx_out;
260 } test;
261
262 struct { /* anonymous struct used by BPF_*_GET_*_ID */
263 union {
264 uint32_t start_id;
265 uint32_t prog_id;
266 uint32_t map_id;
267 uint32_t btf_id;
268 };
269 uint32_t next_id;
270 uint32_t open_flags;
271 };
272
273 struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */
274 uint32_t bpf_fd;
275 uint32_t info_len;
276 aligned_uint64_t info;
277 } info;
278
279 struct { /* anonymous struct used by BPF_PROG_QUERY command */
280 uint32_t target_fd; /* container object to query */
281 uint32_t attach_type;
282 uint32_t query_flags;
283 uint32_t attach_flags;
284 aligned_uint64_t prog_ids;
285 uint32_t prog_cnt;
286 } query;
287
288 struct {
289 uint64_t name;
290 uint32_t prog_fd;
291 } raw_tracepoint;
292
293 struct { /* anonymous struct for BPF_BTF_LOAD */
294 aligned_uint64_t btf;
295 aligned_uint64_t btf_log_buf;
296 uint32_t btf_size;
297 uint32_t btf_log_size;
298 uint32_t btf_log_level;
299 };
300
301 struct {
302 uint32_t pid; /* input: pid */
303 uint32_t fd; /* input: fd */
304 uint32_t flags; /* input: flags */
305 uint32_t buf_len; /* input/output: buf len */
306 aligned_uint64_t buf; /* input/output:
307 * tp_name for tracepoint
308 * symbol for kprobe
309 * filename for uprobe
310 */
311 uint32_t prog_id; /* output: prod_id */
312 uint32_t fd_type; /* output: BPF_FD_TYPE_* */
313 uint64_t probe_offset; /* output: probe_offset */
314 uint64_t probe_addr; /* output: probe_addr */
315 } task_fd_query;
316 } __attribute__((aligned(8)));
317
318 #define __BPF_FUNC_MAPPER(FN) \
319 FN(unspec), \
320 FN(map_lookup_elem), \
321 FN(map_update_elem), \
322 FN(map_delete_elem), \
323 FN(probe_read), \
324 FN(ktime_get_ns), \
325 FN(trace_printk), \
326 FN(get_prandom_u32), \
327 FN(get_smp_processor_id), \
328 FN(skb_store_bytes), \
329 FN(l3_csum_replace), \
330 FN(l4_csum_replace), \
331 FN(tail_call), \
332 FN(clone_redirect), \
333 FN(get_current_pid_tgid), \
334 FN(get_current_uid_gid), \
335 FN(get_current_comm), \
336 FN(get_cgroup_classid), \
337 FN(skb_vlan_push), \
338 FN(skb_vlan_pop), \
339 FN(skb_get_tunnel_key), \
340 FN(skb_set_tunnel_key), \
341 FN(perf_event_read), \
342 FN(redirect), \
343 FN(get_route_realm), \
344 FN(perf_event_output), \
345 FN(skb_load_bytes), \
346 FN(get_stackid), \
347 FN(csum_diff), \
348 FN(skb_get_tunnel_opt), \
349 FN(skb_set_tunnel_opt), \
350 FN(skb_change_proto), \
351 FN(skb_change_type), \
352 FN(skb_under_cgroup), \
353 FN(get_hash_recalc), \
354 FN(get_current_task), \
355 FN(probe_write_user), \
356 FN(current_task_under_cgroup), \
357 FN(skb_change_tail), \
358 FN(skb_pull_data), \
359 FN(csum_update), \
360 FN(set_hash_invalid), \
361 FN(get_numa_node_id), \
362 FN(skb_change_head), \
363 FN(xdp_adjust_head), \
364 FN(probe_read_str), \
365 FN(get_socket_cookie), \
366 FN(get_socket_uid), \
367 FN(set_hash), \
368 FN(setsockopt), \
369 FN(skb_adjust_room), \
370 FN(redirect_map), \
371 FN(sk_redirect_map), \
372 FN(sock_map_update), \
373 FN(xdp_adjust_meta), \
374 FN(perf_event_read_value), \
375 FN(perf_prog_read_value), \
376 FN(getsockopt), \
377 FN(override_return), \
378 FN(sock_ops_cb_flags_set), \
379 FN(msg_redirect_map), \
380 FN(msg_apply_bytes), \
381 FN(msg_cork_bytes), \
382 FN(msg_pull_data), \
383 FN(bind), \
384 FN(xdp_adjust_tail), \
385 FN(skb_get_xfrm_state), \
386 FN(get_stack), \
387 FN(skb_load_bytes_relative), \
388 FN(fib_lookup), \
389 FN(sock_hash_update), \
390 FN(msg_redirect_hash), \
391 FN(sk_redirect_hash), \
392 FN(lwt_push_encap), \
393 FN(lwt_seg6_store_bytes), \
394 FN(lwt_seg6_adjust_srh), \
395 FN(lwt_seg6_action), \
396 FN(rc_repeat), \
397 FN(rc_keydown), \
398 FN(skb_cgroup_id), \
399 FN(get_current_cgroup_id), \
400 FN(get_local_storage), \
401 FN(sk_select_reuseport), \
402 FN(skb_ancestor_cgroup_id), \
403 FN(sk_lookup_tcp), \
404 FN(sk_lookup_udp), \
405 FN(sk_release), \
406 FN(map_push_elem), \
407 FN(map_pop_elem), \
408 FN(map_peek_elem), \
409 FN(msg_push_data), \
410 FN(msg_pop_data), \
411 FN(rc_pointer_rel), \
412 FN(spin_lock), \
413 FN(spin_unlock), \
414 FN(sk_fullsock), \
415 FN(tcp_sock), \
416 FN(skb_ecn_set_ce), \
417 FN(get_listener_sock), \
418 FN(skc_lookup_tcp), \
419 FN(tcp_check_syncookie), \
420 FN(sysctl_get_name), \
421 FN(sysctl_get_current_value), \
422 FN(sysctl_get_new_value), \
423 FN(sysctl_set_new_value), \
424 FN(strtol), \
425 FN(strtoul), \
426 FN(sk_storage_get), \
427 FN(sk_storage_delete), \
428 FN(send_signal),
429
430 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
431 * function eBPF program intends to call
432 */
433 #define __BPF_ENUM_FN(x) BPF_FUNC_ ## x
434 enum bpf_func_id {
435 __BPF_FUNC_MAPPER(__BPF_ENUM_FN)
436 __BPF_FUNC_MAX_ID,
437 };
438 #undef __BPF_ENUM_FN
439
440 /* End copy from linux/bpf.h */
441
442 /* Start copy from tools/include/filter.h */
443
444 #define BPF_ALU64_REG(OP, DST, SRC) \
445 ((struct bpf_insn) { \
446 .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
447 .dst_reg = DST, \
448 .src_reg = SRC, \
449 .off = 0, \
450 .imm = 0 })
451
452 #define BPF_ALU64_IMM(OP, DST, IMM) \
453 ((struct bpf_insn) { \
454 .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
455 .dst_reg = DST, \
456 .src_reg = 0, \
457 .off = 0, \
458 .imm = IMM })
459
460 #define BPF_MOV64_REG(DST, SRC) \
461 ((struct bpf_insn) { \
462 .code = BPF_ALU64 | BPF_MOV | BPF_X, \
463 .dst_reg = DST, \
464 .src_reg = SRC, \
465 .off = 0, \
466 .imm = 0 })
467
468 #define BPF_LD_IMM64(DST, IMM) \
469 BPF_LD_IMM64_RAW(DST, 0, IMM)
470
471 #define BPF_LD_IMM64_RAW(DST, SRC, IMM) \
472 ((struct bpf_insn) { \
473 .code = BPF_LD | BPF_DW | BPF_IMM, \
474 .dst_reg = DST, \
475 .src_reg = SRC, \
476 .off = 0, \
477 .imm = (uint32_t) (IMM) }), \
478 ((struct bpf_insn) { \
479 .code = 0, /* zero is reserved opcode */ \
480 .dst_reg = 0, \
481 .src_reg = 0, \
482 .off = 0, \
483 .imm = ((uint64_t) (IMM)) >> 32 })
484
485 /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
486 #define BPF_LD_MAP_FD(DST, MAP_FD) \
487 BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
488
489 #define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
490 ((struct bpf_insn) { \
491 .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \
492 .dst_reg = DST, \
493 .src_reg = 0, \
494 .off = OFF, \
495 .imm = IMM })
496
497 #define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \
498 ((struct bpf_insn) { \
499 .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
500 .dst_reg = DST, \
501 .src_reg = SRC, \
502 .off = OFF, \
503 .imm = 0 })
504
505 #define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
506 ((struct bpf_insn) { \
507 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
508 .dst_reg = DST, \
509 .src_reg = SRC, \
510 .off = OFF, \
511 .imm = 0 })
512
513 #define BPF_JMP_IMM(OP, DST, IMM, OFF) \
514 ((struct bpf_insn) { \
515 .code = BPF_JMP | BPF_OP(OP) | BPF_K, \
516 .dst_reg = DST, \
517 .src_reg = 0, \
518 .off = OFF, \
519 .imm = IMM })
520
521 #define BPF_MOV64_IMM(DST, IMM) \
522 ((struct bpf_insn) { \
523 .code = BPF_ALU64 | BPF_MOV | BPF_K, \
524 .dst_reg = DST, \
525 .src_reg = 0, \
526 .off = 0, \
527 .imm = IMM })
528
529 #define BPF_MOV32_IMM(DST, IMM) \
530 ((struct bpf_insn) { \
531 .code = BPF_ALU | BPF_MOV | BPF_K, \
532 .dst_reg = DST, \
533 .src_reg = 0, \
534 .off = 0, \
535 .imm = IMM })
536
537 #define BPF_EMIT_CALL(FUNC) \
538 ((struct bpf_insn) { \
539 .code = BPF_JMP | BPF_CALL, \
540 .dst_reg = 0, \
541 .src_reg = 0, \
542 .off = 0, \
543 .imm = ((FUNC) - BPF_FUNC_unspec) })
544
545 #define BPF_EXIT_INSN() \
546 ((struct bpf_insn) { \
547 .code = BPF_JMP | BPF_EXIT, \
548 .dst_reg = 0, \
549 .src_reg = 0, \
550 .off = 0, \
551 .imm = 0 })
552
553 /* End copy from tools/include/filter.h */
554
555 /* Start copy from tools/lib/bpf */
ptr_to_u64(const void * ptr)556 inline uint64_t ptr_to_u64(const void *ptr)
557 {
558 return (uint64_t) (unsigned long) ptr;
559 }
560
bpf(enum bpf_cmd cmd,union bpf_attr * attr,unsigned int size)561 inline int bpf(enum bpf_cmd cmd, union bpf_attr *attr, unsigned int size)
562 {
563 return tst_syscall(__NR_bpf, cmd, attr, size);
564 }
565 /* End copy from tools/lib/bpf */
566
567 #endif /* BPF_H */
568