• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  // SPDX-License-Identifier: LGPL-2.1
2  
3  /*
4   * common eBPF ELF operations.
5   *
6   * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7   * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8   * Copyright (C) 2015 Huawei Inc.
9   *
10   * This program is free software; you can redistribute it and/or
11   * modify it under the terms of the GNU Lesser General Public
12   * License as published by the Free Software Foundation;
13   * version 2.1 of the License (not later!)
14   *
15   * This program is distributed in the hope that it will be useful,
16   * but WITHOUT ANY WARRANTY; without even the implied warranty of
17   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18   * GNU Lesser General Public License for more details.
19   *
20   * You should have received a copy of the GNU Lesser General Public
21   * License along with this program; if not,  see <http://www.gnu.org/licenses>
22   */
23  
24  #include <stdlib.h>
25  #include <memory.h>
26  #include <unistd.h>
27  #include <asm/unistd.h>
28  #include <linux/bpf.h>
29  #include "bpf.h"
30  #include "libbpf.h"
31  #include "nlattr.h"
32  #include <linux/rtnetlink.h>
33  #include <linux/if_link.h>
34  #include <sys/socket.h>
35  #include <errno.h>
36  
37  #ifndef SOL_NETLINK
38  #define SOL_NETLINK 270
39  #endif
40  
41  /*
42   * When building perf, unistd.h is overridden. __NR_bpf is
43   * required to be defined explicitly.
44   */
45  #ifndef __NR_bpf
46  # if defined(__i386__)
47  #  define __NR_bpf 357
48  # elif defined(__x86_64__)
49  #  define __NR_bpf 321
50  # elif defined(__aarch64__)
51  #  define __NR_bpf 280
52  # elif defined(__sparc__)
53  #  define __NR_bpf 349
54  # elif defined(__s390__)
55  #  define __NR_bpf 351
56  # elif defined(__arc__)
57  #  define __NR_bpf 280
58  # else
59  #  error __NR_bpf not defined. libbpf does not support your arch.
60  # endif
61  #endif
62  
63  #ifndef min
64  #define min(x, y) ((x) < (y) ? (x) : (y))
65  #endif
66  
ptr_to_u64(const void * ptr)67  static inline __u64 ptr_to_u64(const void *ptr)
68  {
69  	return (__u64) (unsigned long) ptr;
70  }
71  
sys_bpf(enum bpf_cmd cmd,union bpf_attr * attr,unsigned int size)72  static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
73  			  unsigned int size)
74  {
75  	return syscall(__NR_bpf, cmd, attr, size);
76  }
77  
sys_bpf_prog_load(union bpf_attr * attr,unsigned int size)78  static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size)
79  {
80  	int fd;
81  
82  	do {
83  		fd = sys_bpf(BPF_PROG_LOAD, attr, size);
84  	} while (fd < 0 && errno == EAGAIN);
85  
86  	return fd;
87  }
88  
bpf_create_map_xattr(const struct bpf_create_map_attr * create_attr)89  int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
90  {
91  	__u32 name_len = create_attr->name ? strlen(create_attr->name) : 0;
92  	union bpf_attr attr;
93  	int ret;
94  
95  	memset(&attr, '\0', sizeof(attr));
96  
97  	attr.map_type = create_attr->map_type;
98  	attr.key_size = create_attr->key_size;
99  	attr.value_size = create_attr->value_size;
100  	attr.max_entries = create_attr->max_entries;
101  	attr.map_flags = create_attr->map_flags;
102  	memcpy(attr.map_name, create_attr->name,
103  	       min(name_len, BPF_OBJ_NAME_LEN - 1));
104  	attr.numa_node = create_attr->numa_node;
105  	attr.btf_fd = create_attr->btf_fd;
106  	attr.btf_key_type_id = create_attr->btf_key_type_id;
107  	attr.btf_value_type_id = create_attr->btf_value_type_id;
108  	attr.map_ifindex = create_attr->map_ifindex;
109  	attr.inner_map_fd = create_attr->inner_map_fd;
110  
111  	ret = sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
112  	if (ret < 0 && errno == EINVAL && create_attr->name) {
113  		/* Retry the same syscall, but without the name.
114  		 * Pre v4.14 kernels don't support map names.
115  		 */
116  		memset(attr.map_name, 0, sizeof(attr.map_name));
117  		return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
118  	}
119  	return ret;
120  }
121  
bpf_create_map_node(enum bpf_map_type map_type,const char * name,int key_size,int value_size,int max_entries,__u32 map_flags,int node)122  int bpf_create_map_node(enum bpf_map_type map_type, const char *name,
123  			int key_size, int value_size, int max_entries,
124  			__u32 map_flags, int node)
125  {
126  	struct bpf_create_map_attr map_attr = {};
127  
128  	map_attr.name = name;
129  	map_attr.map_type = map_type;
130  	map_attr.map_flags = map_flags;
131  	map_attr.key_size = key_size;
132  	map_attr.value_size = value_size;
133  	map_attr.max_entries = max_entries;
134  	if (node >= 0) {
135  		map_attr.numa_node = node;
136  		map_attr.map_flags |= BPF_F_NUMA_NODE;
137  	}
138  
139  	return bpf_create_map_xattr(&map_attr);
140  }
141  
bpf_create_map(enum bpf_map_type map_type,int key_size,int value_size,int max_entries,__u32 map_flags)142  int bpf_create_map(enum bpf_map_type map_type, int key_size,
143  		   int value_size, int max_entries, __u32 map_flags)
144  {
145  	struct bpf_create_map_attr map_attr = {};
146  
147  	map_attr.map_type = map_type;
148  	map_attr.map_flags = map_flags;
149  	map_attr.key_size = key_size;
150  	map_attr.value_size = value_size;
151  	map_attr.max_entries = max_entries;
152  
153  	return bpf_create_map_xattr(&map_attr);
154  }
155  
bpf_create_map_name(enum bpf_map_type map_type,const char * name,int key_size,int value_size,int max_entries,__u32 map_flags)156  int bpf_create_map_name(enum bpf_map_type map_type, const char *name,
157  			int key_size, int value_size, int max_entries,
158  			__u32 map_flags)
159  {
160  	struct bpf_create_map_attr map_attr = {};
161  
162  	map_attr.name = name;
163  	map_attr.map_type = map_type;
164  	map_attr.map_flags = map_flags;
165  	map_attr.key_size = key_size;
166  	map_attr.value_size = value_size;
167  	map_attr.max_entries = max_entries;
168  
169  	return bpf_create_map_xattr(&map_attr);
170  }
171  
bpf_create_map_in_map_node(enum bpf_map_type map_type,const char * name,int key_size,int inner_map_fd,int max_entries,__u32 map_flags,int node)172  int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name,
173  			       int key_size, int inner_map_fd, int max_entries,
174  			       __u32 map_flags, int node)
175  {
176  	__u32 name_len = name ? strlen(name) : 0;
177  	union bpf_attr attr;
178  
179  	memset(&attr, '\0', sizeof(attr));
180  
181  	attr.map_type = map_type;
182  	attr.key_size = key_size;
183  	attr.value_size = 4;
184  	attr.inner_map_fd = inner_map_fd;
185  	attr.max_entries = max_entries;
186  	attr.map_flags = map_flags;
187  	memcpy(attr.map_name, name, min(name_len, BPF_OBJ_NAME_LEN - 1));
188  
189  	if (node >= 0) {
190  		attr.map_flags |= BPF_F_NUMA_NODE;
191  		attr.numa_node = node;
192  	}
193  
194  	return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
195  }
196  
bpf_create_map_in_map(enum bpf_map_type map_type,const char * name,int key_size,int inner_map_fd,int max_entries,__u32 map_flags)197  int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name,
198  			  int key_size, int inner_map_fd, int max_entries,
199  			  __u32 map_flags)
200  {
201  	return bpf_create_map_in_map_node(map_type, name, key_size,
202  					  inner_map_fd, max_entries, map_flags,
203  					  -1);
204  }
205  
bpf_load_program_xattr(const struct bpf_load_program_attr * load_attr,char * log_buf,size_t log_buf_sz)206  int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
207  			   char *log_buf, size_t log_buf_sz)
208  {
209  	union bpf_attr attr;
210  	__u32 name_len;
211  	int fd;
212  
213  	if (!load_attr)
214  		return -EINVAL;
215  
216  	name_len = load_attr->name ? strlen(load_attr->name) : 0;
217  
218  	bzero(&attr, sizeof(attr));
219  	attr.prog_type = load_attr->prog_type;
220  	attr.expected_attach_type = load_attr->expected_attach_type;
221  	attr.insn_cnt = (__u32)load_attr->insns_cnt;
222  	attr.insns = ptr_to_u64(load_attr->insns);
223  	attr.license = ptr_to_u64(load_attr->license);
224  	attr.log_buf = ptr_to_u64(NULL);
225  	attr.log_size = 0;
226  	attr.log_level = 0;
227  	attr.kern_version = load_attr->kern_version;
228  	attr.prog_ifindex = load_attr->prog_ifindex;
229  	memcpy(attr.prog_name, load_attr->name,
230  	       min(name_len, BPF_OBJ_NAME_LEN - 1));
231  
232  	fd = sys_bpf_prog_load(&attr, sizeof(attr));
233  	if (fd >= 0 || !log_buf || !log_buf_sz)
234  		return fd;
235  
236  	/* Try again with log */
237  	attr.log_buf = ptr_to_u64(log_buf);
238  	attr.log_size = log_buf_sz;
239  	attr.log_level = 1;
240  	log_buf[0] = 0;
241  	return sys_bpf_prog_load(&attr, sizeof(attr));
242  }
243  
bpf_load_program(enum bpf_prog_type type,const struct bpf_insn * insns,size_t insns_cnt,const char * license,__u32 kern_version,char * log_buf,size_t log_buf_sz)244  int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
245  		     size_t insns_cnt, const char *license,
246  		     __u32 kern_version, char *log_buf,
247  		     size_t log_buf_sz)
248  {
249  	struct bpf_load_program_attr load_attr;
250  
251  	memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
252  	load_attr.prog_type = type;
253  	load_attr.expected_attach_type = 0;
254  	load_attr.name = NULL;
255  	load_attr.insns = insns;
256  	load_attr.insns_cnt = insns_cnt;
257  	load_attr.license = license;
258  	load_attr.kern_version = kern_version;
259  
260  	return bpf_load_program_xattr(&load_attr, log_buf, log_buf_sz);
261  }
262  
bpf_verify_program(enum bpf_prog_type type,const struct bpf_insn * insns,size_t insns_cnt,int strict_alignment,const char * license,__u32 kern_version,char * log_buf,size_t log_buf_sz,int log_level)263  int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
264  		       size_t insns_cnt, int strict_alignment,
265  		       const char *license, __u32 kern_version,
266  		       char *log_buf, size_t log_buf_sz, int log_level)
267  {
268  	union bpf_attr attr;
269  
270  	bzero(&attr, sizeof(attr));
271  	attr.prog_type = type;
272  	attr.insn_cnt = (__u32)insns_cnt;
273  	attr.insns = ptr_to_u64(insns);
274  	attr.license = ptr_to_u64(license);
275  	attr.log_buf = ptr_to_u64(log_buf);
276  	attr.log_size = log_buf_sz;
277  	attr.log_level = log_level;
278  	log_buf[0] = 0;
279  	attr.kern_version = kern_version;
280  	attr.prog_flags = strict_alignment ? BPF_F_STRICT_ALIGNMENT : 0;
281  
282  	return sys_bpf_prog_load(&attr, sizeof(attr));
283  }
284  
bpf_map_update_elem(int fd,const void * key,const void * value,__u64 flags)285  int bpf_map_update_elem(int fd, const void *key, const void *value,
286  			__u64 flags)
287  {
288  	union bpf_attr attr;
289  
290  	bzero(&attr, sizeof(attr));
291  	attr.map_fd = fd;
292  	attr.key = ptr_to_u64(key);
293  	attr.value = ptr_to_u64(value);
294  	attr.flags = flags;
295  
296  	return sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
297  }
298  
bpf_map_lookup_elem(int fd,const void * key,void * value)299  int bpf_map_lookup_elem(int fd, const void *key, void *value)
300  {
301  	union bpf_attr attr;
302  
303  	bzero(&attr, sizeof(attr));
304  	attr.map_fd = fd;
305  	attr.key = ptr_to_u64(key);
306  	attr.value = ptr_to_u64(value);
307  
308  	return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
309  }
310  
bpf_map_delete_elem(int fd,const void * key)311  int bpf_map_delete_elem(int fd, const void *key)
312  {
313  	union bpf_attr attr;
314  
315  	bzero(&attr, sizeof(attr));
316  	attr.map_fd = fd;
317  	attr.key = ptr_to_u64(key);
318  
319  	return sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
320  }
321  
bpf_map_get_next_key(int fd,const void * key,void * next_key)322  int bpf_map_get_next_key(int fd, const void *key, void *next_key)
323  {
324  	union bpf_attr attr;
325  
326  	bzero(&attr, sizeof(attr));
327  	attr.map_fd = fd;
328  	attr.key = ptr_to_u64(key);
329  	attr.next_key = ptr_to_u64(next_key);
330  
331  	return sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr));
332  }
333  
bpf_obj_pin(int fd,const char * pathname)334  int bpf_obj_pin(int fd, const char *pathname)
335  {
336  	union bpf_attr attr;
337  
338  	bzero(&attr, sizeof(attr));
339  	attr.pathname = ptr_to_u64((void *)pathname);
340  	attr.bpf_fd = fd;
341  
342  	return sys_bpf(BPF_OBJ_PIN, &attr, sizeof(attr));
343  }
344  
bpf_obj_get(const char * pathname)345  int bpf_obj_get(const char *pathname)
346  {
347  	union bpf_attr attr;
348  
349  	bzero(&attr, sizeof(attr));
350  	attr.pathname = ptr_to_u64((void *)pathname);
351  
352  	return sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr));
353  }
354  
bpf_prog_attach(int prog_fd,int target_fd,enum bpf_attach_type type,unsigned int flags)355  int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
356  		    unsigned int flags)
357  {
358  	union bpf_attr attr;
359  
360  	bzero(&attr, sizeof(attr));
361  	attr.target_fd	   = target_fd;
362  	attr.attach_bpf_fd = prog_fd;
363  	attr.attach_type   = type;
364  	attr.attach_flags  = flags;
365  
366  	return sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
367  }
368  
bpf_prog_detach(int target_fd,enum bpf_attach_type type)369  int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
370  {
371  	union bpf_attr attr;
372  
373  	bzero(&attr, sizeof(attr));
374  	attr.target_fd	 = target_fd;
375  	attr.attach_type = type;
376  
377  	return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
378  }
379  
bpf_prog_detach2(int prog_fd,int target_fd,enum bpf_attach_type type)380  int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type)
381  {
382  	union bpf_attr attr;
383  
384  	bzero(&attr, sizeof(attr));
385  	attr.target_fd	 = target_fd;
386  	attr.attach_bpf_fd = prog_fd;
387  	attr.attach_type = type;
388  
389  	return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
390  }
391  
bpf_prog_query(int target_fd,enum bpf_attach_type type,__u32 query_flags,__u32 * attach_flags,__u32 * prog_ids,__u32 * prog_cnt)392  int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
393  		   __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt)
394  {
395  	union bpf_attr attr;
396  	int ret;
397  
398  	bzero(&attr, sizeof(attr));
399  	attr.query.target_fd	= target_fd;
400  	attr.query.attach_type	= type;
401  	attr.query.query_flags	= query_flags;
402  	attr.query.prog_cnt	= *prog_cnt;
403  	attr.query.prog_ids	= ptr_to_u64(prog_ids);
404  
405  	ret = sys_bpf(BPF_PROG_QUERY, &attr, sizeof(attr));
406  	if (attach_flags)
407  		*attach_flags = attr.query.attach_flags;
408  	*prog_cnt = attr.query.prog_cnt;
409  	return ret;
410  }
411  
bpf_prog_test_run(int prog_fd,int repeat,void * data,__u32 size,void * data_out,__u32 * size_out,__u32 * retval,__u32 * duration)412  int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size,
413  		      void *data_out, __u32 *size_out, __u32 *retval,
414  		      __u32 *duration)
415  {
416  	union bpf_attr attr;
417  	int ret;
418  
419  	bzero(&attr, sizeof(attr));
420  	attr.test.prog_fd = prog_fd;
421  	attr.test.data_in = ptr_to_u64(data);
422  	attr.test.data_out = ptr_to_u64(data_out);
423  	attr.test.data_size_in = size;
424  	attr.test.repeat = repeat;
425  
426  	ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
427  	if (size_out)
428  		*size_out = attr.test.data_size_out;
429  	if (retval)
430  		*retval = attr.test.retval;
431  	if (duration)
432  		*duration = attr.test.duration;
433  	return ret;
434  }
435  
bpf_prog_get_next_id(__u32 start_id,__u32 * next_id)436  int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id)
437  {
438  	union bpf_attr attr;
439  	int err;
440  
441  	bzero(&attr, sizeof(attr));
442  	attr.start_id = start_id;
443  
444  	err = sys_bpf(BPF_PROG_GET_NEXT_ID, &attr, sizeof(attr));
445  	if (!err)
446  		*next_id = attr.next_id;
447  
448  	return err;
449  }
450  
bpf_map_get_next_id(__u32 start_id,__u32 * next_id)451  int bpf_map_get_next_id(__u32 start_id, __u32 *next_id)
452  {
453  	union bpf_attr attr;
454  	int err;
455  
456  	bzero(&attr, sizeof(attr));
457  	attr.start_id = start_id;
458  
459  	err = sys_bpf(BPF_MAP_GET_NEXT_ID, &attr, sizeof(attr));
460  	if (!err)
461  		*next_id = attr.next_id;
462  
463  	return err;
464  }
465  
bpf_prog_get_fd_by_id(__u32 id)466  int bpf_prog_get_fd_by_id(__u32 id)
467  {
468  	union bpf_attr attr;
469  
470  	bzero(&attr, sizeof(attr));
471  	attr.prog_id = id;
472  
473  	return sys_bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
474  }
475  
bpf_map_get_fd_by_id(__u32 id)476  int bpf_map_get_fd_by_id(__u32 id)
477  {
478  	union bpf_attr attr;
479  
480  	bzero(&attr, sizeof(attr));
481  	attr.map_id = id;
482  
483  	return sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr));
484  }
485  
bpf_btf_get_fd_by_id(__u32 id)486  int bpf_btf_get_fd_by_id(__u32 id)
487  {
488  	union bpf_attr attr;
489  
490  	bzero(&attr, sizeof(attr));
491  	attr.btf_id = id;
492  
493  	return sys_bpf(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr));
494  }
495  
bpf_obj_get_info_by_fd(int prog_fd,void * info,__u32 * info_len)496  int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len)
497  {
498  	union bpf_attr attr;
499  	int err;
500  
501  	bzero(&attr, sizeof(attr));
502  	attr.info.bpf_fd = prog_fd;
503  	attr.info.info_len = *info_len;
504  	attr.info.info = ptr_to_u64(info);
505  
506  	err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr));
507  	if (!err)
508  		*info_len = attr.info.info_len;
509  
510  	return err;
511  }
512  
bpf_raw_tracepoint_open(const char * name,int prog_fd)513  int bpf_raw_tracepoint_open(const char *name, int prog_fd)
514  {
515  	union bpf_attr attr;
516  
517  	bzero(&attr, sizeof(attr));
518  	attr.raw_tracepoint.name = ptr_to_u64(name);
519  	attr.raw_tracepoint.prog_fd = prog_fd;
520  
521  	return sys_bpf(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr));
522  }
523  
bpf_set_link_xdp_fd(int ifindex,int fd,__u32 flags)524  int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags)
525  {
526  	struct sockaddr_nl sa;
527  	int sock, seq = 0, len, ret = -1;
528  	char buf[4096];
529  	struct nlattr *nla, *nla_xdp;
530  	struct {
531  		struct nlmsghdr  nh;
532  		struct ifinfomsg ifinfo;
533  		char             attrbuf[64];
534  	} req;
535  	struct nlmsghdr *nh;
536  	struct nlmsgerr *err;
537  	socklen_t addrlen;
538  	int one = 1;
539  
540  	memset(&sa, 0, sizeof(sa));
541  	sa.nl_family = AF_NETLINK;
542  
543  	sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
544  	if (sock < 0) {
545  		return -errno;
546  	}
547  
548  	if (setsockopt(sock, SOL_NETLINK, NETLINK_EXT_ACK,
549  		       &one, sizeof(one)) < 0) {
550  		fprintf(stderr, "Netlink error reporting not supported\n");
551  	}
552  
553  	if (bind(sock, (struct sockaddr *)&sa, sizeof(sa)) < 0) {
554  		ret = -errno;
555  		goto cleanup;
556  	}
557  
558  	addrlen = sizeof(sa);
559  	if (getsockname(sock, (struct sockaddr *)&sa, &addrlen) < 0) {
560  		ret = -errno;
561  		goto cleanup;
562  	}
563  
564  	if (addrlen != sizeof(sa)) {
565  		ret = -LIBBPF_ERRNO__INTERNAL;
566  		goto cleanup;
567  	}
568  
569  	memset(&req, 0, sizeof(req));
570  	req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg));
571  	req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
572  	req.nh.nlmsg_type = RTM_SETLINK;
573  	req.nh.nlmsg_pid = 0;
574  	req.nh.nlmsg_seq = ++seq;
575  	req.ifinfo.ifi_family = AF_UNSPEC;
576  	req.ifinfo.ifi_index = ifindex;
577  
578  	/* started nested attribute for XDP */
579  	nla = (struct nlattr *)(((char *)&req)
580  				+ NLMSG_ALIGN(req.nh.nlmsg_len));
581  	nla->nla_type = NLA_F_NESTED | IFLA_XDP;
582  	nla->nla_len = NLA_HDRLEN;
583  
584  	/* add XDP fd */
585  	nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len);
586  	nla_xdp->nla_type = IFLA_XDP_FD;
587  	nla_xdp->nla_len = NLA_HDRLEN + sizeof(int);
588  	memcpy((char *)nla_xdp + NLA_HDRLEN, &fd, sizeof(fd));
589  	nla->nla_len += nla_xdp->nla_len;
590  
591  	/* if user passed in any flags, add those too */
592  	if (flags) {
593  		nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len);
594  		nla_xdp->nla_type = IFLA_XDP_FLAGS;
595  		nla_xdp->nla_len = NLA_HDRLEN + sizeof(flags);
596  		memcpy((char *)nla_xdp + NLA_HDRLEN, &flags, sizeof(flags));
597  		nla->nla_len += nla_xdp->nla_len;
598  	}
599  
600  	req.nh.nlmsg_len += NLA_ALIGN(nla->nla_len);
601  
602  	if (send(sock, &req, req.nh.nlmsg_len, 0) < 0) {
603  		ret = -errno;
604  		goto cleanup;
605  	}
606  
607  	len = recv(sock, buf, sizeof(buf), 0);
608  	if (len < 0) {
609  		ret = -errno;
610  		goto cleanup;
611  	}
612  
613  	for (nh = (struct nlmsghdr *)buf; NLMSG_OK(nh, len);
614  	     nh = NLMSG_NEXT(nh, len)) {
615  		if (nh->nlmsg_pid != sa.nl_pid) {
616  			ret = -LIBBPF_ERRNO__WRNGPID;
617  			goto cleanup;
618  		}
619  		if (nh->nlmsg_seq != seq) {
620  			ret = -LIBBPF_ERRNO__INVSEQ;
621  			goto cleanup;
622  		}
623  		switch (nh->nlmsg_type) {
624  		case NLMSG_ERROR:
625  			err = (struct nlmsgerr *)NLMSG_DATA(nh);
626  			if (!err->error)
627  				continue;
628  			ret = err->error;
629  			nla_dump_errormsg(nh);
630  			goto cleanup;
631  		case NLMSG_DONE:
632  			break;
633  		default:
634  			break;
635  		}
636  	}
637  
638  	ret = 0;
639  
640  cleanup:
641  	close(sock);
642  	return ret;
643  }
644  
bpf_load_btf(void * btf,__u32 btf_size,char * log_buf,__u32 log_buf_size,bool do_log)645  int bpf_load_btf(void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size,
646  		 bool do_log)
647  {
648  	union bpf_attr attr = {};
649  	int fd;
650  
651  	attr.btf = ptr_to_u64(btf);
652  	attr.btf_size = btf_size;
653  
654  retry:
655  	if (do_log && log_buf && log_buf_size) {
656  		attr.btf_log_level = 1;
657  		attr.btf_log_size = log_buf_size;
658  		attr.btf_log_buf = ptr_to_u64(log_buf);
659  	}
660  
661  	fd = sys_bpf(BPF_BTF_LOAD, &attr, sizeof(attr));
662  	if (fd == -1 && !do_log && log_buf && log_buf_size) {
663  		do_log = true;
664  		goto retry;
665  	}
666  
667  	return fd;
668  }
669  
bpf_task_fd_query(int pid,int fd,__u32 flags,char * buf,__u32 * buf_len,__u32 * prog_id,__u32 * fd_type,__u64 * probe_offset,__u64 * probe_addr)670  int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len,
671  		      __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset,
672  		      __u64 *probe_addr)
673  {
674  	union bpf_attr attr = {};
675  	int err;
676  
677  	attr.task_fd_query.pid = pid;
678  	attr.task_fd_query.fd = fd;
679  	attr.task_fd_query.flags = flags;
680  	attr.task_fd_query.buf = ptr_to_u64(buf);
681  	attr.task_fd_query.buf_len = *buf_len;
682  
683  	err = sys_bpf(BPF_TASK_FD_QUERY, &attr, sizeof(attr));
684  	*buf_len = attr.task_fd_query.buf_len;
685  	*prog_id = attr.task_fd_query.prog_id;
686  	*fd_type = attr.task_fd_query.fd_type;
687  	*probe_offset = attr.task_fd_query.probe_offset;
688  	*probe_addr = attr.task_fd_query.probe_addr;
689  
690  	return err;
691  }
692