• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2015 PLUMgrid, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 /* eBPF mini library */
17 
18 #ifndef LIBBPF_H
19 #define LIBBPF_H
20 
21 #include "linux/bpf.h"
22 #include <stdbool.h>
23 #include <stdint.h>
24 #include <sys/types.h>
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
30 struct bpf_create_map_attr;
31 struct bpf_load_program_attr;
32 
33 enum bpf_probe_attach_type {
34 	BPF_PROBE_ENTRY,
35 	BPF_PROBE_RETURN
36 };
37 
38 struct bcc_perf_buffer_opts {
39   int pid;
40   int cpu;
41   int wakeup_events;
42 };
43 
44 int bcc_create_map(enum bpf_map_type map_type, const char *name,
45                    int key_size, int value_size, int max_entries,
46                    int map_flags);
47 int bcc_create_map_xattr(struct bpf_create_map_attr *attr, bool allow_rlimit);
48 int bpf_update_elem(int fd, void *key, void *value, unsigned long long flags);
49 int bpf_lookup_elem(int fd, void *key, void *value);
50 int bpf_delete_elem(int fd, void *key);
51 int bpf_get_first_key(int fd, void *key, size_t key_size);
52 int bpf_get_next_key(int fd, void *key, void *next_key);
53 int bpf_lookup_and_delete(int fd, void *key, void *value);
54 
55 /*
56  * Load a BPF program, and return the FD of the loaded program.
57  *
58  * On newer Kernels, the parameter name is used to identify the loaded program
59  * for inspection and debugging. It could be different from the function name.
60  *
61  * If log_level has value greater than 0, or the load failed, it will enable
62  * extra logging of loaded BPF bytecode and register status, and will print the
63  * logging message to stderr. In such cases:
64  *   - If log_buf and log_buf_size are provided, it will use and also write the
65  *     log messages to the provided log_buf. If log_buf is insufficient in size,
66  *     it will not to any additional memory allocation.
67  *   - Otherwise, it will allocate an internal temporary buffer for log message
68  *     printing, and continue to attempt increase that allocated buffer size if
69  *     initial attempt was insufficient in size.
70  */
71 int bcc_prog_load(enum bpf_prog_type prog_type, const char *name,
72                   const struct bpf_insn *insns, int prog_len,
73                   const char *license, unsigned kern_version,
74                   int log_level, char *log_buf, unsigned log_buf_size);
75 int bcc_prog_load_xattr(struct bpf_load_program_attr *attr,
76                         int prog_len, char *log_buf,
77                         unsigned log_buf_size, bool allow_rlimit);
78 
79 int bpf_attach_socket(int sockfd, int progfd);
80 
81 /* create RAW socket. If name is not NULL/a non-empty null-terminated string,
82  * bind the raw socket to the interface 'name' */
83 int bpf_open_raw_sock(const char *name);
84 
85 typedef void (*perf_reader_raw_cb)(void *cb_cookie, void *raw, int raw_size);
86 typedef void (*perf_reader_lost_cb)(void *cb_cookie, uint64_t lost);
87 
88 int bpf_attach_kprobe(int progfd, enum bpf_probe_attach_type attach_type,
89                       const char *ev_name, const char *fn_name, uint64_t fn_offset,
90                       int maxactive);
91 int bpf_detach_kprobe(const char *ev_name);
92 
93 int bpf_attach_uprobe(int progfd, enum bpf_probe_attach_type attach_type,
94                       const char *ev_name, const char *binary_path,
95                       uint64_t offset, pid_t pid, uint32_t ref_ctr_offset);
96 int bpf_detach_uprobe(const char *ev_name);
97 
98 int bpf_attach_tracepoint(int progfd, const char *tp_category,
99                           const char *tp_name);
100 int bpf_detach_tracepoint(const char *tp_category, const char *tp_name);
101 
102 int bpf_attach_raw_tracepoint(int progfd, const char *tp_name);
103 
104 int bpf_attach_kfunc(int prog_fd);
105 
106 int bpf_attach_lsm(int prog_fd);
107 
108 bool bpf_has_kernel_btf(void);
109 
110 int kernel_struct_has_field(const char *struct_name, const char *field_name);
111 
112 void * bpf_open_perf_buffer(perf_reader_raw_cb raw_cb,
113                             perf_reader_lost_cb lost_cb, void *cb_cookie,
114                             int pid, int cpu, int page_cnt);
115 
116 void * bpf_open_perf_buffer_opts(perf_reader_raw_cb raw_cb,
117                             perf_reader_lost_cb lost_cb, void *cb_cookie,
118                             int page_cnt, struct bcc_perf_buffer_opts *opts);
119 
120 /* attached a prog expressed by progfd to the device specified in dev_name */
121 int bpf_attach_xdp(const char *dev_name, int progfd, uint32_t flags);
122 
123 // attach a prog expressed by progfd to run on a specific perf event. The perf
124 // event will be created using the perf_event_attr pointer provided.
125 int bpf_attach_perf_event_raw(int progfd, void *perf_event_attr, pid_t pid,
126                               int cpu, int group_fd, unsigned long extra_flags);
127 // attach a prog expressed by progfd to run on a specific perf event, with
128 // certain sample period or sample frequency
129 int bpf_attach_perf_event(int progfd, uint32_t ev_type, uint32_t ev_config,
130                           uint64_t sample_period, uint64_t sample_freq,
131                           pid_t pid, int cpu, int group_fd);
132 
133 int bpf_open_perf_event(uint32_t type, uint64_t config, int pid, int cpu);
134 
135 int bpf_close_perf_event_fd(int fd);
136 
137 typedef int (*ring_buffer_sample_fn)(void *ctx, void *data, size_t size);
138 
139 struct ring_buffer;
140 
141 void * bpf_new_ringbuf(int map_fd, ring_buffer_sample_fn sample_cb, void *ctx);
142 void bpf_free_ringbuf(struct ring_buffer *rb);
143 int bpf_add_ringbuf(struct ring_buffer *rb, int map_fd,
144                     ring_buffer_sample_fn sample_cb, void *ctx);
145 int bpf_poll_ringbuf(struct ring_buffer *rb, int timeout_ms);
146 int bpf_consume_ringbuf(struct ring_buffer *rb);
147 
148 int bpf_obj_pin(int fd, const char *pathname);
149 int bpf_obj_get(const char *pathname);
150 int bpf_obj_get_info(int prog_map_fd, void *info, uint32_t *info_len);
151 int bpf_prog_compute_tag(const struct bpf_insn *insns, int prog_len,
152                          unsigned long long *tag);
153 int bpf_prog_get_tag(int fd, unsigned long long *tag);
154 int bpf_prog_get_next_id(uint32_t start_id, uint32_t *next_id);
155 int bpf_prog_get_fd_by_id(uint32_t id);
156 int bpf_map_get_fd_by_id(uint32_t id);
157 int bpf_obj_get_info_by_fd(int prog_fd, void *info, uint32_t *info_len);
158 
159 int bcc_iter_attach(int prog_fd, union bpf_iter_link_info *link_info,
160                     uint32_t link_info_len);
161 int bcc_iter_create(int link_fd);
162 int bcc_make_parent_dir(const char *path);
163 int bcc_check_bpffs_path(const char *path);
164 int bpf_lookup_batch(int fd, __u32 *in_batch, __u32 *out_batch, void *keys,
165                      void *values, __u32 *count);
166 int bpf_delete_batch(int fd,  void *keys, __u32 *count);
167 int bpf_update_batch(int fd, void *keys, void *values, __u32 *count);
168 int bpf_lookup_and_delete_batch(int fd, __u32 *in_batch, __u32 *out_batch,
169                                 void *keys, void *values, __u32 *count);
170 
171 #define LOG_BUF_SIZE 65536
172 
173 // Put non-static/inline functions in their own section with this prefix +
174 // fn_name to enable discovery by the bcc library.
175 #define BPF_FN_PREFIX ".bpf.fn."
176 
177 /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
178 
179 #define BPF_ALU64_REG(OP, DST, SRC)				\
180 	((struct bpf_insn) {					\
181 		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_X,	\
182 		.dst_reg = DST,					\
183 		.src_reg = SRC,					\
184 		.off   = 0,					\
185 		.imm   = 0 })
186 
187 #define BPF_ALU32_REG(OP, DST, SRC)				\
188 	((struct bpf_insn) {					\
189 		.code  = BPF_ALU | BPF_OP(OP) | BPF_X,		\
190 		.dst_reg = DST,					\
191 		.src_reg = SRC,					\
192 		.off   = 0,					\
193 		.imm   = 0 })
194 
195 /* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
196 
197 #define BPF_ALU64_IMM(OP, DST, IMM)				\
198 	((struct bpf_insn) {					\
199 		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_K,	\
200 		.dst_reg = DST,					\
201 		.src_reg = 0,					\
202 		.off   = 0,					\
203 		.imm   = IMM })
204 
205 #define BPF_ALU32_IMM(OP, DST, IMM)				\
206 	((struct bpf_insn) {					\
207 		.code  = BPF_ALU | BPF_OP(OP) | BPF_K,		\
208 		.dst_reg = DST,					\
209 		.src_reg = 0,					\
210 		.off   = 0,					\
211 		.imm   = IMM })
212 
213 /* Short form of mov, dst_reg = src_reg */
214 
215 #define BPF_MOV64_REG(DST, SRC)					\
216 	((struct bpf_insn) {					\
217 		.code  = BPF_ALU64 | BPF_MOV | BPF_X,		\
218 		.dst_reg = DST,					\
219 		.src_reg = SRC,					\
220 		.off   = 0,					\
221 		.imm   = 0 })
222 
223 /* Short form of mov, dst_reg = imm32 */
224 
225 #define BPF_MOV64_IMM(DST, IMM)					\
226 	((struct bpf_insn) {					\
227 		.code  = BPF_ALU64 | BPF_MOV | BPF_K,		\
228 		.dst_reg = DST,					\
229 		.src_reg = 0,					\
230 		.off   = 0,					\
231 		.imm   = IMM })
232 
233 /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
234 #define BPF_LD_IMM64(DST, IMM)					\
235 	BPF_LD_IMM64_RAW(DST, 0, IMM)
236 
237 #define BPF_LD_IMM64_RAW(DST, SRC, IMM)				\
238 	((struct bpf_insn) {					\
239 		.code  = BPF_LD | BPF_DW | BPF_IMM,		\
240 		.dst_reg = DST,					\
241 		.src_reg = SRC,					\
242 		.off   = 0,					\
243 		.imm   = (__u32) (IMM) }),			\
244 	((struct bpf_insn) {					\
245 		.code  = 0, /* zero is reserved opcode */	\
246 		.dst_reg = 0,					\
247 		.src_reg = 0,					\
248 		.off   = 0,					\
249 		.imm   = ((__u64) (IMM)) >> 32 })
250 
251 #define BPF_PSEUDO_MAP_FD	1
252 
253 /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
254 #define BPF_LD_MAP_FD(DST, MAP_FD)				\
255 	BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
256 
257 
258 /* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
259 
260 #define BPF_LD_ABS(SIZE, IMM)					\
261 	((struct bpf_insn) {					\
262 		.code  = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS,	\
263 		.dst_reg = 0,					\
264 		.src_reg = 0,					\
265 		.off   = 0,					\
266 		.imm   = IMM })
267 
268 /* Memory load, dst_reg = *(uint *) (src_reg + off16) */
269 
270 #define BPF_LDX_MEM(SIZE, DST, SRC, OFF)			\
271 	((struct bpf_insn) {					\
272 		.code  = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM,	\
273 		.dst_reg = DST,					\
274 		.src_reg = SRC,					\
275 		.off   = OFF,					\
276 		.imm   = 0 })
277 
278 /* Memory store, *(uint *) (dst_reg + off16) = src_reg */
279 
280 #define BPF_STX_MEM(SIZE, DST, SRC, OFF)			\
281 	((struct bpf_insn) {					\
282 		.code  = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM,	\
283 		.dst_reg = DST,					\
284 		.src_reg = SRC,					\
285 		.off   = OFF,					\
286 		.imm   = 0 })
287 
288 /* Memory store, *(uint *) (dst_reg + off16) = imm32 */
289 
290 #define BPF_ST_MEM(SIZE, DST, OFF, IMM)				\
291 	((struct bpf_insn) {					\
292 		.code  = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM,	\
293 		.dst_reg = DST,					\
294 		.src_reg = 0,					\
295 		.off   = OFF,					\
296 		.imm   = IMM })
297 
298 /* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
299 
300 #define BPF_JMP_REG(OP, DST, SRC, OFF)				\
301 	((struct bpf_insn) {					\
302 		.code  = BPF_JMP | BPF_OP(OP) | BPF_X,		\
303 		.dst_reg = DST,					\
304 		.src_reg = SRC,					\
305 		.off   = OFF,					\
306 		.imm   = 0 })
307 
308 /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
309 
310 #define BPF_JMP_IMM(OP, DST, IMM, OFF)				\
311 	((struct bpf_insn) {					\
312 		.code  = BPF_JMP | BPF_OP(OP) | BPF_K,		\
313 		.dst_reg = DST,					\
314 		.src_reg = 0,					\
315 		.off   = OFF,					\
316 		.imm   = IMM })
317 
318 /* Raw code statement block */
319 
320 #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM)			\
321 	((struct bpf_insn) {					\
322 		.code  = CODE,					\
323 		.dst_reg = DST,					\
324 		.src_reg = SRC,					\
325 		.off   = OFF,					\
326 		.imm   = IMM })
327 
328 /* Program exit */
329 
330 #define BPF_EXIT_INSN()						\
331 	((struct bpf_insn) {					\
332 		.code  = BPF_JMP | BPF_EXIT,			\
333 		.dst_reg = 0,					\
334 		.src_reg = 0,					\
335 		.off   = 0,					\
336 		.imm   = 0 })
337 
338 #ifdef __cplusplus
339 }
340 #endif
341 
342 #endif
343