• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 
3 /*
4  * AF_XDP user-space access library.
5  *
6  * Copyright(c) 2018 - 2019 Intel Corporation.
7  *
8  * Author(s): Magnus Karlsson <magnus.karlsson@intel.com>
9  */
10 
11 #include <errno.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <unistd.h>
15 #include <arpa/inet.h>
16 #include <asm/barrier.h>
17 #include <linux/compiler.h>
18 #include <linux/ethtool.h>
19 #include <linux/filter.h>
20 #include <linux/if_ether.h>
21 #include <linux/if_packet.h>
22 #include <linux/if_xdp.h>
23 #include <linux/kernel.h>
24 #include <linux/list.h>
25 #include <linux/sockios.h>
26 #include <net/if.h>
27 #include <sys/ioctl.h>
28 #include <sys/mman.h>
29 #include <sys/socket.h>
30 #include <sys/types.h>
31 #include <linux/if_link.h>
32 
33 #include "bpf.h"
34 #include "libbpf.h"
35 #include "libbpf_internal.h"
36 #include "xsk.h"
37 
38 #ifndef SOL_XDP
39  #define SOL_XDP 283
40 #endif
41 
42 #ifndef AF_XDP
43  #define AF_XDP 44
44 #endif
45 
46 #ifndef PF_XDP
47  #define PF_XDP AF_XDP
48 #endif
49 
50 enum xsk_prog {
51 	XSK_PROG_FALLBACK,
52 	XSK_PROG_REDIRECT_FLAGS,
53 };
54 
55 struct xsk_umem {
56 	struct xsk_ring_prod *fill_save;
57 	struct xsk_ring_cons *comp_save;
58 	char *umem_area;
59 	struct xsk_umem_config config;
60 	int fd;
61 	int refcount;
62 	struct list_head ctx_list;
63 	bool rx_ring_setup_done;
64 	bool tx_ring_setup_done;
65 };
66 
67 struct xsk_ctx {
68 	struct xsk_ring_prod *fill;
69 	struct xsk_ring_cons *comp;
70 	__u32 queue_id;
71 	struct xsk_umem *umem;
72 	int refcount;
73 	int ifindex;
74 	struct list_head list;
75 	int prog_fd;
76 	int link_fd;
77 	int xsks_map_fd;
78 	char ifname[IFNAMSIZ];
79 	bool has_bpf_link;
80 };
81 
82 struct xsk_socket {
83 	struct xsk_ring_cons *rx;
84 	struct xsk_ring_prod *tx;
85 	__u64 outstanding_tx;
86 	struct xsk_ctx *ctx;
87 	struct xsk_socket_config config;
88 	int fd;
89 };
90 
91 struct xsk_nl_info {
92 	bool xdp_prog_attached;
93 	int ifindex;
94 	int fd;
95 };
96 
97 /* Up until and including Linux 5.3 */
98 struct xdp_ring_offset_v1 {
99 	__u64 producer;
100 	__u64 consumer;
101 	__u64 desc;
102 };
103 
104 /* Up until and including Linux 5.3 */
105 struct xdp_mmap_offsets_v1 {
106 	struct xdp_ring_offset_v1 rx;
107 	struct xdp_ring_offset_v1 tx;
108 	struct xdp_ring_offset_v1 fr;
109 	struct xdp_ring_offset_v1 cr;
110 };
111 
xsk_umem__fd(const struct xsk_umem * umem)112 int xsk_umem__fd(const struct xsk_umem *umem)
113 {
114 	return umem ? umem->fd : -EINVAL;
115 }
116 
xsk_socket__fd(const struct xsk_socket * xsk)117 int xsk_socket__fd(const struct xsk_socket *xsk)
118 {
119 	return xsk ? xsk->fd : -EINVAL;
120 }
121 
xsk_page_aligned(void * buffer)122 static bool xsk_page_aligned(void *buffer)
123 {
124 	unsigned long addr = (unsigned long)buffer;
125 
126 	return !(addr & (getpagesize() - 1));
127 }
128 
xsk_set_umem_config(struct xsk_umem_config * cfg,const struct xsk_umem_config * usr_cfg)129 static void xsk_set_umem_config(struct xsk_umem_config *cfg,
130 				const struct xsk_umem_config *usr_cfg)
131 {
132 	if (!usr_cfg) {
133 		cfg->fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
134 		cfg->comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
135 		cfg->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
136 		cfg->frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM;
137 		cfg->flags = XSK_UMEM__DEFAULT_FLAGS;
138 		return;
139 	}
140 
141 	cfg->fill_size = usr_cfg->fill_size;
142 	cfg->comp_size = usr_cfg->comp_size;
143 	cfg->frame_size = usr_cfg->frame_size;
144 	cfg->frame_headroom = usr_cfg->frame_headroom;
145 	cfg->flags = usr_cfg->flags;
146 }
147 
xsk_set_xdp_socket_config(struct xsk_socket_config * cfg,const struct xsk_socket_config * usr_cfg)148 static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
149 				     const struct xsk_socket_config *usr_cfg)
150 {
151 	if (!usr_cfg) {
152 		cfg->rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
153 		cfg->tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
154 		cfg->libbpf_flags = 0;
155 		cfg->xdp_flags = 0;
156 		cfg->bind_flags = 0;
157 		return 0;
158 	}
159 
160 	if (usr_cfg->libbpf_flags & ~XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)
161 		return -EINVAL;
162 
163 	cfg->rx_size = usr_cfg->rx_size;
164 	cfg->tx_size = usr_cfg->tx_size;
165 	cfg->libbpf_flags = usr_cfg->libbpf_flags;
166 	cfg->xdp_flags = usr_cfg->xdp_flags;
167 	cfg->bind_flags = usr_cfg->bind_flags;
168 
169 	return 0;
170 }
171 
xsk_mmap_offsets_v1(struct xdp_mmap_offsets * off)172 static void xsk_mmap_offsets_v1(struct xdp_mmap_offsets *off)
173 {
174 	struct xdp_mmap_offsets_v1 off_v1;
175 
176 	/* getsockopt on a kernel <= 5.3 has no flags fields.
177 	 * Copy over the offsets to the correct places in the >=5.4 format
178 	 * and put the flags where they would have been on that kernel.
179 	 */
180 	memcpy(&off_v1, off, sizeof(off_v1));
181 
182 	off->rx.producer = off_v1.rx.producer;
183 	off->rx.consumer = off_v1.rx.consumer;
184 	off->rx.desc = off_v1.rx.desc;
185 	off->rx.flags = off_v1.rx.consumer + sizeof(__u32);
186 
187 	off->tx.producer = off_v1.tx.producer;
188 	off->tx.consumer = off_v1.tx.consumer;
189 	off->tx.desc = off_v1.tx.desc;
190 	off->tx.flags = off_v1.tx.consumer + sizeof(__u32);
191 
192 	off->fr.producer = off_v1.fr.producer;
193 	off->fr.consumer = off_v1.fr.consumer;
194 	off->fr.desc = off_v1.fr.desc;
195 	off->fr.flags = off_v1.fr.consumer + sizeof(__u32);
196 
197 	off->cr.producer = off_v1.cr.producer;
198 	off->cr.consumer = off_v1.cr.consumer;
199 	off->cr.desc = off_v1.cr.desc;
200 	off->cr.flags = off_v1.cr.consumer + sizeof(__u32);
201 }
202 
xsk_get_mmap_offsets(int fd,struct xdp_mmap_offsets * off)203 static int xsk_get_mmap_offsets(int fd, struct xdp_mmap_offsets *off)
204 {
205 	socklen_t optlen;
206 	int err;
207 
208 	optlen = sizeof(*off);
209 	err = getsockopt(fd, SOL_XDP, XDP_MMAP_OFFSETS, off, &optlen);
210 	if (err)
211 		return err;
212 
213 	if (optlen == sizeof(*off))
214 		return 0;
215 
216 	if (optlen == sizeof(struct xdp_mmap_offsets_v1)) {
217 		xsk_mmap_offsets_v1(off);
218 		return 0;
219 	}
220 
221 	return -EINVAL;
222 }
223 
xsk_create_umem_rings(struct xsk_umem * umem,int fd,struct xsk_ring_prod * fill,struct xsk_ring_cons * comp)224 static int xsk_create_umem_rings(struct xsk_umem *umem, int fd,
225 				 struct xsk_ring_prod *fill,
226 				 struct xsk_ring_cons *comp)
227 {
228 	struct xdp_mmap_offsets off;
229 	void *map;
230 	int err;
231 
232 	err = setsockopt(fd, SOL_XDP, XDP_UMEM_FILL_RING,
233 			 &umem->config.fill_size,
234 			 sizeof(umem->config.fill_size));
235 	if (err)
236 		return -errno;
237 
238 	err = setsockopt(fd, SOL_XDP, XDP_UMEM_COMPLETION_RING,
239 			 &umem->config.comp_size,
240 			 sizeof(umem->config.comp_size));
241 	if (err)
242 		return -errno;
243 
244 	err = xsk_get_mmap_offsets(fd, &off);
245 	if (err)
246 		return -errno;
247 
248 	map = mmap(NULL, off.fr.desc + umem->config.fill_size * sizeof(__u64),
249 		   PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
250 		   XDP_UMEM_PGOFF_FILL_RING);
251 	if (map == MAP_FAILED)
252 		return -errno;
253 
254 	fill->mask = umem->config.fill_size - 1;
255 	fill->size = umem->config.fill_size;
256 	fill->producer = map + off.fr.producer;
257 	fill->consumer = map + off.fr.consumer;
258 	fill->flags = map + off.fr.flags;
259 	fill->ring = map + off.fr.desc;
260 	fill->cached_cons = umem->config.fill_size;
261 
262 	map = mmap(NULL, off.cr.desc + umem->config.comp_size * sizeof(__u64),
263 		   PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
264 		   XDP_UMEM_PGOFF_COMPLETION_RING);
265 	if (map == MAP_FAILED) {
266 		err = -errno;
267 		goto out_mmap;
268 	}
269 
270 	comp->mask = umem->config.comp_size - 1;
271 	comp->size = umem->config.comp_size;
272 	comp->producer = map + off.cr.producer;
273 	comp->consumer = map + off.cr.consumer;
274 	comp->flags = map + off.cr.flags;
275 	comp->ring = map + off.cr.desc;
276 
277 	return 0;
278 
279 out_mmap:
280 	munmap(map, off.fr.desc + umem->config.fill_size * sizeof(__u64));
281 	return err;
282 }
283 
xsk_umem__create_v0_0_4(struct xsk_umem ** umem_ptr,void * umem_area,__u64 size,struct xsk_ring_prod * fill,struct xsk_ring_cons * comp,const struct xsk_umem_config * usr_config)284 int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
285 			    __u64 size, struct xsk_ring_prod *fill,
286 			    struct xsk_ring_cons *comp,
287 			    const struct xsk_umem_config *usr_config)
288 {
289 	struct xdp_umem_reg mr;
290 	struct xsk_umem *umem;
291 	int err;
292 
293 	if (!umem_area || !umem_ptr || !fill || !comp)
294 		return -EFAULT;
295 	if (!size && !xsk_page_aligned(umem_area))
296 		return -EINVAL;
297 
298 	umem = calloc(1, sizeof(*umem));
299 	if (!umem)
300 		return -ENOMEM;
301 
302 	umem->fd = socket(AF_XDP, SOCK_RAW, 0);
303 	if (umem->fd < 0) {
304 		err = -errno;
305 		goto out_umem_alloc;
306 	}
307 
308 	umem->umem_area = umem_area;
309 	INIT_LIST_HEAD(&umem->ctx_list);
310 	xsk_set_umem_config(&umem->config, usr_config);
311 
312 	memset(&mr, 0, sizeof(mr));
313 	mr.addr = (uintptr_t)umem_area;
314 	mr.len = size;
315 	mr.chunk_size = umem->config.frame_size;
316 	mr.headroom = umem->config.frame_headroom;
317 	mr.flags = umem->config.flags;
318 
319 	err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr));
320 	if (err) {
321 		err = -errno;
322 		goto out_socket;
323 	}
324 
325 	err = xsk_create_umem_rings(umem, umem->fd, fill, comp);
326 	if (err)
327 		goto out_socket;
328 
329 	umem->fill_save = fill;
330 	umem->comp_save = comp;
331 	*umem_ptr = umem;
332 	return 0;
333 
334 out_socket:
335 	close(umem->fd);
336 out_umem_alloc:
337 	free(umem);
338 	return err;
339 }
340 
341 struct xsk_umem_config_v1 {
342 	__u32 fill_size;
343 	__u32 comp_size;
344 	__u32 frame_size;
345 	__u32 frame_headroom;
346 };
347 
xsk_umem__create_v0_0_2(struct xsk_umem ** umem_ptr,void * umem_area,__u64 size,struct xsk_ring_prod * fill,struct xsk_ring_cons * comp,const struct xsk_umem_config * usr_config)348 int xsk_umem__create_v0_0_2(struct xsk_umem **umem_ptr, void *umem_area,
349 			    __u64 size, struct xsk_ring_prod *fill,
350 			    struct xsk_ring_cons *comp,
351 			    const struct xsk_umem_config *usr_config)
352 {
353 	struct xsk_umem_config config;
354 
355 	memcpy(&config, usr_config, sizeof(struct xsk_umem_config_v1));
356 	config.flags = 0;
357 
358 	return xsk_umem__create_v0_0_4(umem_ptr, umem_area, size, fill, comp,
359 					&config);
360 }
361 COMPAT_VERSION(xsk_umem__create_v0_0_2, xsk_umem__create, LIBBPF_0.0.2)
362 DEFAULT_VERSION(xsk_umem__create_v0_0_4, xsk_umem__create, LIBBPF_0.0.4)
363 
get_xsk_prog(void)364 static enum xsk_prog get_xsk_prog(void)
365 {
366 	enum xsk_prog detected = XSK_PROG_FALLBACK;
367 	struct bpf_load_program_attr prog_attr;
368 	struct bpf_create_map_attr map_attr;
369 	__u32 size_out, retval, duration;
370 	char data_in = 0, data_out;
371 	struct bpf_insn insns[] = {
372 		BPF_LD_MAP_FD(BPF_REG_1, 0),
373 		BPF_MOV64_IMM(BPF_REG_2, 0),
374 		BPF_MOV64_IMM(BPF_REG_3, XDP_PASS),
375 		BPF_EMIT_CALL(BPF_FUNC_redirect_map),
376 		BPF_EXIT_INSN(),
377 	};
378 	int prog_fd, map_fd, ret;
379 
380 	memset(&map_attr, 0, sizeof(map_attr));
381 	map_attr.map_type = BPF_MAP_TYPE_XSKMAP;
382 	map_attr.key_size = sizeof(int);
383 	map_attr.value_size = sizeof(int);
384 	map_attr.max_entries = 1;
385 
386 	map_fd = bpf_create_map_xattr(&map_attr);
387 	if (map_fd < 0)
388 		return detected;
389 
390 	insns[0].imm = map_fd;
391 
392 	memset(&prog_attr, 0, sizeof(prog_attr));
393 	prog_attr.prog_type = BPF_PROG_TYPE_XDP;
394 	prog_attr.insns = insns;
395 	prog_attr.insns_cnt = ARRAY_SIZE(insns);
396 	prog_attr.license = "GPL";
397 
398 	prog_fd = bpf_load_program_xattr(&prog_attr, NULL, 0);
399 	if (prog_fd < 0) {
400 		close(map_fd);
401 		return detected;
402 	}
403 
404 	ret = bpf_prog_test_run(prog_fd, 0, &data_in, 1, &data_out, &size_out, &retval, &duration);
405 	if (!ret && retval == XDP_PASS)
406 		detected = XSK_PROG_REDIRECT_FLAGS;
407 	close(prog_fd);
408 	close(map_fd);
409 	return detected;
410 }
411 
xsk_load_xdp_prog(struct xsk_socket * xsk)412 static int xsk_load_xdp_prog(struct xsk_socket *xsk)
413 {
414 	static const int log_buf_size = 16 * 1024;
415 	struct xsk_ctx *ctx = xsk->ctx;
416 	char log_buf[log_buf_size];
417 	int prog_fd;
418 
419 	/* This is the fallback C-program:
420 	 * SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx)
421 	 * {
422 	 *     int ret, index = ctx->rx_queue_index;
423 	 *
424 	 *     // A set entry here means that the correspnding queue_id
425 	 *     // has an active AF_XDP socket bound to it.
426 	 *     ret = bpf_redirect_map(&xsks_map, index, XDP_PASS);
427 	 *     if (ret > 0)
428 	 *         return ret;
429 	 *
430 	 *     // Fallback for pre-5.3 kernels, not supporting default
431 	 *     // action in the flags parameter.
432 	 *     if (bpf_map_lookup_elem(&xsks_map, &index))
433 	 *         return bpf_redirect_map(&xsks_map, index, 0);
434 	 *     return XDP_PASS;
435 	 * }
436 	 */
437 	struct bpf_insn prog[] = {
438 		/* r2 = *(u32 *)(r1 + 16) */
439 		BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 16),
440 		/* *(u32 *)(r10 - 4) = r2 */
441 		BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -4),
442 		/* r1 = xskmap[] */
443 		BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd),
444 		/* r3 = XDP_PASS */
445 		BPF_MOV64_IMM(BPF_REG_3, 2),
446 		/* call bpf_redirect_map */
447 		BPF_EMIT_CALL(BPF_FUNC_redirect_map),
448 		/* if w0 != 0 goto pc+13 */
449 		BPF_JMP32_IMM(BPF_JSGT, BPF_REG_0, 0, 13),
450 		/* r2 = r10 */
451 		BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
452 		/* r2 += -4 */
453 		BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
454 		/* r1 = xskmap[] */
455 		BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd),
456 		/* call bpf_map_lookup_elem */
457 		BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
458 		/* r1 = r0 */
459 		BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
460 		/* r0 = XDP_PASS */
461 		BPF_MOV64_IMM(BPF_REG_0, 2),
462 		/* if r1 == 0 goto pc+5 */
463 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 5),
464 		/* r2 = *(u32 *)(r10 - 4) */
465 		BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10, -4),
466 		/* r1 = xskmap[] */
467 		BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd),
468 		/* r3 = 0 */
469 		BPF_MOV64_IMM(BPF_REG_3, 0),
470 		/* call bpf_redirect_map */
471 		BPF_EMIT_CALL(BPF_FUNC_redirect_map),
472 		/* The jumps are to this instruction */
473 		BPF_EXIT_INSN(),
474 	};
475 
476 	/* This is the post-5.3 kernel C-program:
477 	 * SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx)
478 	 * {
479 	 *     return bpf_redirect_map(&xsks_map, ctx->rx_queue_index, XDP_PASS);
480 	 * }
481 	 */
482 	struct bpf_insn prog_redirect_flags[] = {
483 		/* r2 = *(u32 *)(r1 + 16) */
484 		BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 16),
485 		/* r1 = xskmap[] */
486 		BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd),
487 		/* r3 = XDP_PASS */
488 		BPF_MOV64_IMM(BPF_REG_3, 2),
489 		/* call bpf_redirect_map */
490 		BPF_EMIT_CALL(BPF_FUNC_redirect_map),
491 		BPF_EXIT_INSN(),
492 	};
493 	size_t insns_cnt[] = {sizeof(prog) / sizeof(struct bpf_insn),
494 			      sizeof(prog_redirect_flags) / sizeof(struct bpf_insn),
495 	};
496 	struct bpf_insn *progs[] = {prog, prog_redirect_flags};
497 	enum xsk_prog option = get_xsk_prog();
498 
499 	prog_fd = bpf_load_program(BPF_PROG_TYPE_XDP, progs[option], insns_cnt[option],
500 				   "LGPL-2.1 or BSD-2-Clause", 0, log_buf,
501 				   log_buf_size);
502 	if (prog_fd < 0) {
503 		pr_warn("BPF log buffer:\n%s", log_buf);
504 		return prog_fd;
505 	}
506 
507 	ctx->prog_fd = prog_fd;
508 	return 0;
509 }
510 
xsk_create_bpf_link(struct xsk_socket * xsk)511 static int xsk_create_bpf_link(struct xsk_socket *xsk)
512 {
513 	DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts);
514 	struct xsk_ctx *ctx = xsk->ctx;
515 	__u32 prog_id = 0;
516 	int link_fd;
517 	int err;
518 
519 	err = bpf_get_link_xdp_id(ctx->ifindex, &prog_id, xsk->config.xdp_flags);
520 	if (err) {
521 		pr_warn("getting XDP prog id failed\n");
522 		return err;
523 	}
524 
525 	/* if there's a netlink-based XDP prog loaded on interface, bail out
526 	 * and ask user to do the removal by himself
527 	 */
528 	if (prog_id) {
529 		pr_warn("Netlink-based XDP prog detected, please unload it in order to launch AF_XDP prog\n");
530 		return -EINVAL;
531 	}
532 
533 	opts.flags = xsk->config.xdp_flags & ~(XDP_FLAGS_UPDATE_IF_NOEXIST | XDP_FLAGS_REPLACE);
534 
535 	link_fd = bpf_link_create(ctx->prog_fd, ctx->ifindex, BPF_XDP, &opts);
536 	if (link_fd < 0) {
537 		pr_warn("bpf_link_create failed: %s\n", strerror(errno));
538 		return link_fd;
539 	}
540 
541 	ctx->link_fd = link_fd;
542 	return 0;
543 }
544 
xsk_get_max_queues(struct xsk_socket * xsk)545 static int xsk_get_max_queues(struct xsk_socket *xsk)
546 {
547 	struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS };
548 	struct xsk_ctx *ctx = xsk->ctx;
549 	struct ifreq ifr = {};
550 	int fd, err, ret;
551 
552 	fd = socket(AF_LOCAL, SOCK_DGRAM, 0);
553 	if (fd < 0)
554 		return -errno;
555 
556 	ifr.ifr_data = (void *)&channels;
557 	memcpy(ifr.ifr_name, ctx->ifname, IFNAMSIZ - 1);
558 	ifr.ifr_name[IFNAMSIZ - 1] = '\0';
559 	err = ioctl(fd, SIOCETHTOOL, &ifr);
560 	if (err && errno != EOPNOTSUPP) {
561 		ret = -errno;
562 		goto out;
563 	}
564 
565 	if (err) {
566 		/* If the device says it has no channels, then all traffic
567 		 * is sent to a single stream, so max queues = 1.
568 		 */
569 		ret = 1;
570 	} else {
571 		/* Take the max of rx, tx, combined. Drivers return
572 		 * the number of channels in different ways.
573 		 */
574 		ret = max(channels.max_rx, channels.max_tx);
575 		ret = max(ret, (int)channels.max_combined);
576 	}
577 
578 out:
579 	close(fd);
580 	return ret;
581 }
582 
xsk_create_bpf_maps(struct xsk_socket * xsk)583 static int xsk_create_bpf_maps(struct xsk_socket *xsk)
584 {
585 	struct xsk_ctx *ctx = xsk->ctx;
586 	int max_queues;
587 	int fd;
588 
589 	max_queues = xsk_get_max_queues(xsk);
590 	if (max_queues < 0)
591 		return max_queues;
592 
593 	fd = bpf_create_map_name(BPF_MAP_TYPE_XSKMAP, "xsks_map",
594 				 sizeof(int), sizeof(int), max_queues, 0);
595 	if (fd < 0)
596 		return fd;
597 
598 	ctx->xsks_map_fd = fd;
599 
600 	return 0;
601 }
602 
xsk_delete_bpf_maps(struct xsk_socket * xsk)603 static void xsk_delete_bpf_maps(struct xsk_socket *xsk)
604 {
605 	struct xsk_ctx *ctx = xsk->ctx;
606 
607 	bpf_map_delete_elem(ctx->xsks_map_fd, &ctx->queue_id);
608 	close(ctx->xsks_map_fd);
609 }
610 
xsk_lookup_bpf_maps(struct xsk_socket * xsk)611 static int xsk_lookup_bpf_maps(struct xsk_socket *xsk)
612 {
613 	__u32 i, *map_ids, num_maps, prog_len = sizeof(struct bpf_prog_info);
614 	__u32 map_len = sizeof(struct bpf_map_info);
615 	struct bpf_prog_info prog_info = {};
616 	struct xsk_ctx *ctx = xsk->ctx;
617 	struct bpf_map_info map_info;
618 	int fd, err;
619 
620 	err = bpf_obj_get_info_by_fd(ctx->prog_fd, &prog_info, &prog_len);
621 	if (err)
622 		return err;
623 
624 	num_maps = prog_info.nr_map_ids;
625 
626 	map_ids = calloc(prog_info.nr_map_ids, sizeof(*map_ids));
627 	if (!map_ids)
628 		return -ENOMEM;
629 
630 	memset(&prog_info, 0, prog_len);
631 	prog_info.nr_map_ids = num_maps;
632 	prog_info.map_ids = (__u64)(unsigned long)map_ids;
633 
634 	err = bpf_obj_get_info_by_fd(ctx->prog_fd, &prog_info, &prog_len);
635 	if (err)
636 		goto out_map_ids;
637 
638 	ctx->xsks_map_fd = -1;
639 
640 	for (i = 0; i < prog_info.nr_map_ids; i++) {
641 		fd = bpf_map_get_fd_by_id(map_ids[i]);
642 		if (fd < 0)
643 			continue;
644 
645 		memset(&map_info, 0, map_len);
646 		err = bpf_obj_get_info_by_fd(fd, &map_info, &map_len);
647 		if (err) {
648 			close(fd);
649 			continue;
650 		}
651 
652 		if (!strncmp(map_info.name, "xsks_map", sizeof(map_info.name))) {
653 			ctx->xsks_map_fd = fd;
654 			break;
655 		}
656 
657 		close(fd);
658 	}
659 
660 	if (ctx->xsks_map_fd == -1)
661 		err = -ENOENT;
662 
663 out_map_ids:
664 	free(map_ids);
665 	return err;
666 }
667 
xsk_set_bpf_maps(struct xsk_socket * xsk)668 static int xsk_set_bpf_maps(struct xsk_socket *xsk)
669 {
670 	struct xsk_ctx *ctx = xsk->ctx;
671 
672 	return bpf_map_update_elem(ctx->xsks_map_fd, &ctx->queue_id,
673 				   &xsk->fd, 0);
674 }
675 
xsk_link_lookup(int ifindex,__u32 * prog_id,int * link_fd)676 static int xsk_link_lookup(int ifindex, __u32 *prog_id, int *link_fd)
677 {
678 	struct bpf_link_info link_info;
679 	__u32 link_len;
680 	__u32 id = 0;
681 	int err;
682 	int fd;
683 
684 	while (true) {
685 		err = bpf_link_get_next_id(id, &id);
686 		if (err) {
687 			if (errno == ENOENT) {
688 				err = 0;
689 				break;
690 			}
691 			pr_warn("can't get next link: %s\n", strerror(errno));
692 			break;
693 		}
694 
695 		fd = bpf_link_get_fd_by_id(id);
696 		if (fd < 0) {
697 			if (errno == ENOENT)
698 				continue;
699 			pr_warn("can't get link by id (%u): %s\n", id, strerror(errno));
700 			err = -errno;
701 			break;
702 		}
703 
704 		link_len = sizeof(struct bpf_link_info);
705 		memset(&link_info, 0, link_len);
706 		err = bpf_obj_get_info_by_fd(fd, &link_info, &link_len);
707 		if (err) {
708 			pr_warn("can't get link info: %s\n", strerror(errno));
709 			close(fd);
710 			break;
711 		}
712 		if (link_info.type == BPF_LINK_TYPE_XDP) {
713 			if (link_info.xdp.ifindex == ifindex) {
714 				*link_fd = fd;
715 				if (prog_id)
716 					*prog_id = link_info.prog_id;
717 				break;
718 			}
719 		}
720 		close(fd);
721 	}
722 
723 	return err;
724 }
725 
xsk_probe_bpf_link(void)726 static bool xsk_probe_bpf_link(void)
727 {
728 	DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts,
729 			    .flags = XDP_FLAGS_SKB_MODE);
730 	struct bpf_load_program_attr prog_attr;
731 	struct bpf_insn insns[2] = {
732 		BPF_MOV64_IMM(BPF_REG_0, XDP_PASS),
733 		BPF_EXIT_INSN()
734 	};
735 	int prog_fd, link_fd = -1;
736 	int ifindex_lo = 1;
737 	bool ret = false;
738 	int err;
739 
740 	err = xsk_link_lookup(ifindex_lo, NULL, &link_fd);
741 	if (err)
742 		return ret;
743 
744 	if (link_fd >= 0)
745 		return true;
746 
747 	memset(&prog_attr, 0, sizeof(prog_attr));
748 	prog_attr.prog_type = BPF_PROG_TYPE_XDP;
749 	prog_attr.insns = insns;
750 	prog_attr.insns_cnt = ARRAY_SIZE(insns);
751 	prog_attr.license = "GPL";
752 
753 	prog_fd = bpf_load_program_xattr(&prog_attr, NULL, 0);
754 	if (prog_fd < 0)
755 		return ret;
756 
757 	link_fd = bpf_link_create(prog_fd, ifindex_lo, BPF_XDP, &opts);
758 	close(prog_fd);
759 
760 	if (link_fd >= 0) {
761 		ret = true;
762 		close(link_fd);
763 	}
764 
765 	return ret;
766 }
767 
xsk_create_xsk_struct(int ifindex,struct xsk_socket * xsk)768 static int xsk_create_xsk_struct(int ifindex, struct xsk_socket *xsk)
769 {
770 	char ifname[IFNAMSIZ];
771 	struct xsk_ctx *ctx;
772 	char *interface;
773 
774 	ctx = calloc(1, sizeof(*ctx));
775 	if (!ctx)
776 		return -ENOMEM;
777 
778 	interface = if_indextoname(ifindex, &ifname[0]);
779 	if (!interface) {
780 		free(ctx);
781 		return -errno;
782 	}
783 
784 	ctx->ifindex = ifindex;
785 	memcpy(ctx->ifname, ifname, IFNAMSIZ -1);
786 	ctx->ifname[IFNAMSIZ - 1] = 0;
787 
788 	xsk->ctx = ctx;
789 	xsk->ctx->has_bpf_link = xsk_probe_bpf_link();
790 
791 	return 0;
792 }
793 
xsk_init_xdp_res(struct xsk_socket * xsk,int * xsks_map_fd)794 static int xsk_init_xdp_res(struct xsk_socket *xsk,
795 			    int *xsks_map_fd)
796 {
797 	struct xsk_ctx *ctx = xsk->ctx;
798 	int err;
799 
800 	err = xsk_create_bpf_maps(xsk);
801 	if (err)
802 		return err;
803 
804 	err = xsk_load_xdp_prog(xsk);
805 	if (err)
806 		goto err_load_xdp_prog;
807 
808 	if (ctx->has_bpf_link)
809 		err = xsk_create_bpf_link(xsk);
810 	else
811 		err = bpf_set_link_xdp_fd(xsk->ctx->ifindex, ctx->prog_fd,
812 					  xsk->config.xdp_flags);
813 
814 	if (err)
815 		goto err_attach_xdp_prog;
816 
817 	if (!xsk->rx)
818 		return err;
819 
820 	err = xsk_set_bpf_maps(xsk);
821 	if (err)
822 		goto err_set_bpf_maps;
823 
824 	return err;
825 
826 err_set_bpf_maps:
827 	if (ctx->has_bpf_link)
828 		close(ctx->link_fd);
829 	else
830 		bpf_set_link_xdp_fd(ctx->ifindex, -1, 0);
831 err_attach_xdp_prog:
832 	close(ctx->prog_fd);
833 err_load_xdp_prog:
834 	xsk_delete_bpf_maps(xsk);
835 	return err;
836 }
837 
xsk_lookup_xdp_res(struct xsk_socket * xsk,int * xsks_map_fd,int prog_id)838 static int xsk_lookup_xdp_res(struct xsk_socket *xsk, int *xsks_map_fd, int prog_id)
839 {
840 	struct xsk_ctx *ctx = xsk->ctx;
841 	int err;
842 
843 	ctx->prog_fd = bpf_prog_get_fd_by_id(prog_id);
844 	if (ctx->prog_fd < 0) {
845 		err = -errno;
846 		goto err_prog_fd;
847 	}
848 	err = xsk_lookup_bpf_maps(xsk);
849 	if (err)
850 		goto err_lookup_maps;
851 
852 	if (!xsk->rx)
853 		return err;
854 
855 	err = xsk_set_bpf_maps(xsk);
856 	if (err)
857 		goto err_set_maps;
858 
859 	return err;
860 
861 err_set_maps:
862 	close(ctx->xsks_map_fd);
863 err_lookup_maps:
864 	close(ctx->prog_fd);
865 err_prog_fd:
866 	if (ctx->has_bpf_link)
867 		close(ctx->link_fd);
868 	return err;
869 }
870 
__xsk_setup_xdp_prog(struct xsk_socket * _xdp,int * xsks_map_fd)871 static int __xsk_setup_xdp_prog(struct xsk_socket *_xdp, int *xsks_map_fd)
872 {
873 	struct xsk_socket *xsk = _xdp;
874 	struct xsk_ctx *ctx = xsk->ctx;
875 	__u32 prog_id = 0;
876 	int err;
877 
878 	if (ctx->has_bpf_link)
879 		err = xsk_link_lookup(ctx->ifindex, &prog_id, &ctx->link_fd);
880 	else
881 		err = bpf_get_link_xdp_id(ctx->ifindex, &prog_id, xsk->config.xdp_flags);
882 
883 	if (err)
884 		return err;
885 
886 	err = !prog_id ? xsk_init_xdp_res(xsk, xsks_map_fd) :
887 			 xsk_lookup_xdp_res(xsk, xsks_map_fd, prog_id);
888 
889 	if (!err && xsks_map_fd)
890 		*xsks_map_fd = ctx->xsks_map_fd;
891 
892 	return err;
893 }
894 
xsk_get_ctx(struct xsk_umem * umem,int ifindex,__u32 queue_id)895 static struct xsk_ctx *xsk_get_ctx(struct xsk_umem *umem, int ifindex,
896 				   __u32 queue_id)
897 {
898 	struct xsk_ctx *ctx;
899 
900 	if (list_empty(&umem->ctx_list))
901 		return NULL;
902 
903 	list_for_each_entry(ctx, &umem->ctx_list, list) {
904 		if (ctx->ifindex == ifindex && ctx->queue_id == queue_id) {
905 			ctx->refcount++;
906 			return ctx;
907 		}
908 	}
909 
910 	return NULL;
911 }
912 
xsk_put_ctx(struct xsk_ctx * ctx,bool unmap)913 static void xsk_put_ctx(struct xsk_ctx *ctx, bool unmap)
914 {
915 	struct xsk_umem *umem = ctx->umem;
916 	struct xdp_mmap_offsets off;
917 	int err;
918 
919 	if (--ctx->refcount)
920 		return;
921 
922 	if (!unmap)
923 		goto out_free;
924 
925 	err = xsk_get_mmap_offsets(umem->fd, &off);
926 	if (err)
927 		goto out_free;
928 
929 	munmap(ctx->fill->ring - off.fr.desc, off.fr.desc + umem->config.fill_size *
930 	       sizeof(__u64));
931 	munmap(ctx->comp->ring - off.cr.desc, off.cr.desc + umem->config.comp_size *
932 	       sizeof(__u64));
933 
934 out_free:
935 	list_del(&ctx->list);
936 	free(ctx);
937 }
938 
xsk_create_ctx(struct xsk_socket * xsk,struct xsk_umem * umem,int ifindex,const char * ifname,__u32 queue_id,struct xsk_ring_prod * fill,struct xsk_ring_cons * comp)939 static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk,
940 				      struct xsk_umem *umem, int ifindex,
941 				      const char *ifname, __u32 queue_id,
942 				      struct xsk_ring_prod *fill,
943 				      struct xsk_ring_cons *comp)
944 {
945 	struct xsk_ctx *ctx;
946 	int err;
947 
948 	ctx = calloc(1, sizeof(*ctx));
949 	if (!ctx)
950 		return NULL;
951 
952 	if (!umem->fill_save) {
953 		err = xsk_create_umem_rings(umem, xsk->fd, fill, comp);
954 		if (err) {
955 			free(ctx);
956 			return NULL;
957 		}
958 	} else if (umem->fill_save != fill || umem->comp_save != comp) {
959 		/* Copy over rings to new structs. */
960 		memcpy(fill, umem->fill_save, sizeof(*fill));
961 		memcpy(comp, umem->comp_save, sizeof(*comp));
962 	}
963 
964 	ctx->ifindex = ifindex;
965 	ctx->refcount = 1;
966 	ctx->umem = umem;
967 	ctx->queue_id = queue_id;
968 	memcpy(ctx->ifname, ifname, IFNAMSIZ - 1);
969 	ctx->ifname[IFNAMSIZ - 1] = '\0';
970 
971 	ctx->fill = fill;
972 	ctx->comp = comp;
973 	list_add(&ctx->list, &umem->ctx_list);
974 	return ctx;
975 }
976 
xsk_destroy_xsk_struct(struct xsk_socket * xsk)977 static void xsk_destroy_xsk_struct(struct xsk_socket *xsk)
978 {
979 	free(xsk->ctx);
980 	free(xsk);
981 }
982 
xsk_socket__update_xskmap(struct xsk_socket * xsk,int fd)983 int xsk_socket__update_xskmap(struct xsk_socket *xsk, int fd)
984 {
985 	xsk->ctx->xsks_map_fd = fd;
986 	return xsk_set_bpf_maps(xsk);
987 }
988 
xsk_setup_xdp_prog(int ifindex,int * xsks_map_fd)989 int xsk_setup_xdp_prog(int ifindex, int *xsks_map_fd)
990 {
991 	struct xsk_socket *xsk;
992 	int res;
993 
994 	xsk = calloc(1, sizeof(*xsk));
995 	if (!xsk)
996 		return -ENOMEM;
997 
998 	res = xsk_create_xsk_struct(ifindex, xsk);
999 	if (res) {
1000 		free(xsk);
1001 		return -EINVAL;
1002 	}
1003 
1004 	res = __xsk_setup_xdp_prog(xsk, xsks_map_fd);
1005 
1006 	xsk_destroy_xsk_struct(xsk);
1007 
1008 	return res;
1009 }
1010 
xsk_socket__create_shared(struct xsk_socket ** xsk_ptr,const char * ifname,__u32 queue_id,struct xsk_umem * umem,struct xsk_ring_cons * rx,struct xsk_ring_prod * tx,struct xsk_ring_prod * fill,struct xsk_ring_cons * comp,const struct xsk_socket_config * usr_config)1011 int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
1012 			      const char *ifname,
1013 			      __u32 queue_id, struct xsk_umem *umem,
1014 			      struct xsk_ring_cons *rx,
1015 			      struct xsk_ring_prod *tx,
1016 			      struct xsk_ring_prod *fill,
1017 			      struct xsk_ring_cons *comp,
1018 			      const struct xsk_socket_config *usr_config)
1019 {
1020 	bool unmap, rx_setup_done = false, tx_setup_done = false;
1021 	void *rx_map = NULL, *tx_map = NULL;
1022 	struct sockaddr_xdp sxdp = {};
1023 	struct xdp_mmap_offsets off;
1024 	struct xsk_socket *xsk;
1025 	struct xsk_ctx *ctx;
1026 	int err, ifindex;
1027 
1028 	if (!umem || !xsk_ptr || !(rx || tx))
1029 		return -EFAULT;
1030 
1031 	unmap = umem->fill_save != fill;
1032 
1033 	xsk = calloc(1, sizeof(*xsk));
1034 	if (!xsk)
1035 		return -ENOMEM;
1036 
1037 	err = xsk_set_xdp_socket_config(&xsk->config, usr_config);
1038 	if (err)
1039 		goto out_xsk_alloc;
1040 
1041 	xsk->outstanding_tx = 0;
1042 	ifindex = if_nametoindex(ifname);
1043 	if (!ifindex) {
1044 		err = -errno;
1045 		goto out_xsk_alloc;
1046 	}
1047 
1048 	if (umem->refcount++ > 0) {
1049 		xsk->fd = socket(AF_XDP, SOCK_RAW, 0);
1050 		if (xsk->fd < 0) {
1051 			err = -errno;
1052 			goto out_xsk_alloc;
1053 		}
1054 	} else {
1055 		xsk->fd = umem->fd;
1056 		rx_setup_done = umem->rx_ring_setup_done;
1057 		tx_setup_done = umem->tx_ring_setup_done;
1058 	}
1059 
1060 	ctx = xsk_get_ctx(umem, ifindex, queue_id);
1061 	if (!ctx) {
1062 		if (!fill || !comp) {
1063 			err = -EFAULT;
1064 			goto out_socket;
1065 		}
1066 
1067 		ctx = xsk_create_ctx(xsk, umem, ifindex, ifname, queue_id,
1068 				     fill, comp);
1069 		if (!ctx) {
1070 			err = -ENOMEM;
1071 			goto out_socket;
1072 		}
1073 	}
1074 	xsk->ctx = ctx;
1075 	xsk->ctx->has_bpf_link = xsk_probe_bpf_link();
1076 
1077 	if (rx && !rx_setup_done) {
1078 		err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING,
1079 				 &xsk->config.rx_size,
1080 				 sizeof(xsk->config.rx_size));
1081 		if (err) {
1082 			err = -errno;
1083 			goto out_put_ctx;
1084 		}
1085 		if (xsk->fd == umem->fd)
1086 			umem->rx_ring_setup_done = true;
1087 	}
1088 	if (tx && !tx_setup_done) {
1089 		err = setsockopt(xsk->fd, SOL_XDP, XDP_TX_RING,
1090 				 &xsk->config.tx_size,
1091 				 sizeof(xsk->config.tx_size));
1092 		if (err) {
1093 			err = -errno;
1094 			goto out_put_ctx;
1095 		}
1096 		if (xsk->fd == umem->fd)
1097 			umem->tx_ring_setup_done = true;
1098 	}
1099 
1100 	err = xsk_get_mmap_offsets(xsk->fd, &off);
1101 	if (err) {
1102 		err = -errno;
1103 		goto out_put_ctx;
1104 	}
1105 
1106 	if (rx) {
1107 		rx_map = mmap(NULL, off.rx.desc +
1108 			      xsk->config.rx_size * sizeof(struct xdp_desc),
1109 			      PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
1110 			      xsk->fd, XDP_PGOFF_RX_RING);
1111 		if (rx_map == MAP_FAILED) {
1112 			err = -errno;
1113 			goto out_put_ctx;
1114 		}
1115 
1116 		rx->mask = xsk->config.rx_size - 1;
1117 		rx->size = xsk->config.rx_size;
1118 		rx->producer = rx_map + off.rx.producer;
1119 		rx->consumer = rx_map + off.rx.consumer;
1120 		rx->flags = rx_map + off.rx.flags;
1121 		rx->ring = rx_map + off.rx.desc;
1122 		rx->cached_prod = *rx->producer;
1123 		rx->cached_cons = *rx->consumer;
1124 	}
1125 	xsk->rx = rx;
1126 
1127 	if (tx) {
1128 		tx_map = mmap(NULL, off.tx.desc +
1129 			      xsk->config.tx_size * sizeof(struct xdp_desc),
1130 			      PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
1131 			      xsk->fd, XDP_PGOFF_TX_RING);
1132 		if (tx_map == MAP_FAILED) {
1133 			err = -errno;
1134 			goto out_mmap_rx;
1135 		}
1136 
1137 		tx->mask = xsk->config.tx_size - 1;
1138 		tx->size = xsk->config.tx_size;
1139 		tx->producer = tx_map + off.tx.producer;
1140 		tx->consumer = tx_map + off.tx.consumer;
1141 		tx->flags = tx_map + off.tx.flags;
1142 		tx->ring = tx_map + off.tx.desc;
1143 		tx->cached_prod = *tx->producer;
1144 		/* cached_cons is r->size bigger than the real consumer pointer
1145 		 * See xsk_prod_nb_free
1146 		 */
1147 		tx->cached_cons = *tx->consumer + xsk->config.tx_size;
1148 	}
1149 	xsk->tx = tx;
1150 
1151 	sxdp.sxdp_family = PF_XDP;
1152 	sxdp.sxdp_ifindex = ctx->ifindex;
1153 	sxdp.sxdp_queue_id = ctx->queue_id;
1154 	if (umem->refcount > 1) {
1155 		sxdp.sxdp_flags |= XDP_SHARED_UMEM;
1156 		sxdp.sxdp_shared_umem_fd = umem->fd;
1157 	} else {
1158 		sxdp.sxdp_flags = xsk->config.bind_flags;
1159 	}
1160 
1161 	err = bind(xsk->fd, (struct sockaddr *)&sxdp, sizeof(sxdp));
1162 	if (err) {
1163 		err = -errno;
1164 		goto out_mmap_tx;
1165 	}
1166 
1167 	if (!(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) {
1168 		err = __xsk_setup_xdp_prog(xsk, NULL);
1169 		if (err)
1170 			goto out_mmap_tx;
1171 	}
1172 
1173 	*xsk_ptr = xsk;
1174 	umem->fill_save = NULL;
1175 	umem->comp_save = NULL;
1176 	return 0;
1177 
1178 out_mmap_tx:
1179 	if (tx)
1180 		munmap(tx_map, off.tx.desc +
1181 		       xsk->config.tx_size * sizeof(struct xdp_desc));
1182 out_mmap_rx:
1183 	if (rx)
1184 		munmap(rx_map, off.rx.desc +
1185 		       xsk->config.rx_size * sizeof(struct xdp_desc));
1186 out_put_ctx:
1187 	xsk_put_ctx(ctx, unmap);
1188 out_socket:
1189 	if (--umem->refcount)
1190 		close(xsk->fd);
1191 out_xsk_alloc:
1192 	free(xsk);
1193 	return err;
1194 }
1195 
xsk_socket__create(struct xsk_socket ** xsk_ptr,const char * ifname,__u32 queue_id,struct xsk_umem * umem,struct xsk_ring_cons * rx,struct xsk_ring_prod * tx,const struct xsk_socket_config * usr_config)1196 int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
1197 		       __u32 queue_id, struct xsk_umem *umem,
1198 		       struct xsk_ring_cons *rx, struct xsk_ring_prod *tx,
1199 		       const struct xsk_socket_config *usr_config)
1200 {
1201 	if (!umem)
1202 		return -EFAULT;
1203 
1204 	return xsk_socket__create_shared(xsk_ptr, ifname, queue_id, umem,
1205 					 rx, tx, umem->fill_save,
1206 					 umem->comp_save, usr_config);
1207 }
1208 
xsk_umem__delete(struct xsk_umem * umem)1209 int xsk_umem__delete(struct xsk_umem *umem)
1210 {
1211 	struct xdp_mmap_offsets off;
1212 	int err;
1213 
1214 	if (!umem)
1215 		return 0;
1216 
1217 	if (umem->refcount)
1218 		return -EBUSY;
1219 
1220 	err = xsk_get_mmap_offsets(umem->fd, &off);
1221 	if (!err && umem->fill_save && umem->comp_save) {
1222 		munmap(umem->fill_save->ring - off.fr.desc,
1223 		       off.fr.desc + umem->config.fill_size * sizeof(__u64));
1224 		munmap(umem->comp_save->ring - off.cr.desc,
1225 		       off.cr.desc + umem->config.comp_size * sizeof(__u64));
1226 	}
1227 
1228 	close(umem->fd);
1229 	free(umem);
1230 
1231 	return 0;
1232 }
1233 
xsk_socket__delete(struct xsk_socket * xsk)1234 void xsk_socket__delete(struct xsk_socket *xsk)
1235 {
1236 	size_t desc_sz = sizeof(struct xdp_desc);
1237 	struct xdp_mmap_offsets off;
1238 	struct xsk_umem *umem;
1239 	struct xsk_ctx *ctx;
1240 	int err;
1241 
1242 	if (!xsk)
1243 		return;
1244 
1245 	ctx = xsk->ctx;
1246 	umem = ctx->umem;
1247 
1248 	if (ctx->refcount == 1) {
1249 		xsk_delete_bpf_maps(xsk);
1250 		close(ctx->prog_fd);
1251 		if (ctx->has_bpf_link)
1252 			close(ctx->link_fd);
1253 	}
1254 
1255 	xsk_put_ctx(ctx, true);
1256 
1257 	err = xsk_get_mmap_offsets(xsk->fd, &off);
1258 	if (!err) {
1259 		if (xsk->rx) {
1260 			munmap(xsk->rx->ring - off.rx.desc,
1261 			       off.rx.desc + xsk->config.rx_size * desc_sz);
1262 		}
1263 		if (xsk->tx) {
1264 			munmap(xsk->tx->ring - off.tx.desc,
1265 			       off.tx.desc + xsk->config.tx_size * desc_sz);
1266 		}
1267 	}
1268 
1269 	umem->refcount--;
1270 	/* Do not close an fd that also has an associated umem connected
1271 	 * to it.
1272 	 */
1273 	if (xsk->fd != umem->fd)
1274 		close(xsk->fd);
1275 	free(xsk);
1276 }
1277