1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2
3 /*
4 * AF_XDP user-space access library.
5 *
6 * Copyright(c) 2018 - 2019 Intel Corporation.
7 *
8 * Author(s): Magnus Karlsson <magnus.karlsson@intel.com>
9 */
10
11 #include <errno.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <unistd.h>
15 #include <arpa/inet.h>
16 #include <asm/barrier.h>
17 #include <linux/compiler.h>
18 #include <linux/ethtool.h>
19 #include <linux/filter.h>
20 #include <linux/if_ether.h>
21 #include <linux/if_link.h>
22 #include <linux/if_packet.h>
23 #include <linux/if_xdp.h>
24 #include <linux/kernel.h>
25 #include <linux/list.h>
26 #include <linux/netlink.h>
27 #include <linux/rtnetlink.h>
28 #include <linux/sockios.h>
29 #include <net/if.h>
30 #include <sys/ioctl.h>
31 #include <sys/mman.h>
32 #include <sys/socket.h>
33 #include <sys/types.h>
34
35 #include <bpf/bpf.h>
36 #include <bpf/libbpf.h>
37 #include "xsk.h"
38 #include "bpf_util.h"
39
40 #ifndef SOL_XDP
41 #define SOL_XDP 283
42 #endif
43
44 #ifndef AF_XDP
45 #define AF_XDP 44
46 #endif
47
48 #ifndef PF_XDP
49 #define PF_XDP AF_XDP
50 #endif
51
52 #define pr_warn(fmt, ...) fprintf(stderr, fmt, ##__VA_ARGS__)
53
54 #define XSKMAP_SIZE 1
55
56 struct xsk_umem {
57 struct xsk_ring_prod *fill_save;
58 struct xsk_ring_cons *comp_save;
59 char *umem_area;
60 struct xsk_umem_config config;
61 int fd;
62 int refcount;
63 struct list_head ctx_list;
64 bool rx_ring_setup_done;
65 bool tx_ring_setup_done;
66 };
67
68 struct xsk_ctx {
69 struct xsk_ring_prod *fill;
70 struct xsk_ring_cons *comp;
71 __u32 queue_id;
72 struct xsk_umem *umem;
73 int refcount;
74 int ifindex;
75 struct list_head list;
76 };
77
78 struct xsk_socket {
79 struct xsk_ring_cons *rx;
80 struct xsk_ring_prod *tx;
81 struct xsk_ctx *ctx;
82 struct xsk_socket_config config;
83 int fd;
84 };
85
86 struct nl_mtu_req {
87 struct nlmsghdr nh;
88 struct ifinfomsg msg;
89 char buf[512];
90 };
91
xsk_umem__fd(const struct xsk_umem * umem)92 int xsk_umem__fd(const struct xsk_umem *umem)
93 {
94 return umem ? umem->fd : -EINVAL;
95 }
96
xsk_socket__fd(const struct xsk_socket * xsk)97 int xsk_socket__fd(const struct xsk_socket *xsk)
98 {
99 return xsk ? xsk->fd : -EINVAL;
100 }
101
xsk_page_aligned(void * buffer)102 static bool xsk_page_aligned(void *buffer)
103 {
104 unsigned long addr = (unsigned long)buffer;
105
106 return !(addr & (getpagesize() - 1));
107 }
108
xsk_set_umem_config(struct xsk_umem_config * cfg,const struct xsk_umem_config * usr_cfg)109 static void xsk_set_umem_config(struct xsk_umem_config *cfg,
110 const struct xsk_umem_config *usr_cfg)
111 {
112 if (!usr_cfg) {
113 cfg->fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
114 cfg->comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
115 cfg->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
116 cfg->frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM;
117 cfg->flags = XSK_UMEM__DEFAULT_FLAGS;
118 return;
119 }
120
121 cfg->fill_size = usr_cfg->fill_size;
122 cfg->comp_size = usr_cfg->comp_size;
123 cfg->frame_size = usr_cfg->frame_size;
124 cfg->frame_headroom = usr_cfg->frame_headroom;
125 cfg->flags = usr_cfg->flags;
126 }
127
xsk_set_xdp_socket_config(struct xsk_socket_config * cfg,const struct xsk_socket_config * usr_cfg)128 static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
129 const struct xsk_socket_config *usr_cfg)
130 {
131 if (!usr_cfg) {
132 cfg->rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
133 cfg->tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
134 cfg->bind_flags = 0;
135 return 0;
136 }
137
138 cfg->rx_size = usr_cfg->rx_size;
139 cfg->tx_size = usr_cfg->tx_size;
140 cfg->bind_flags = usr_cfg->bind_flags;
141
142 return 0;
143 }
144
xsk_get_mmap_offsets(int fd,struct xdp_mmap_offsets * off)145 static int xsk_get_mmap_offsets(int fd, struct xdp_mmap_offsets *off)
146 {
147 socklen_t optlen;
148 int err;
149
150 optlen = sizeof(*off);
151 err = getsockopt(fd, SOL_XDP, XDP_MMAP_OFFSETS, off, &optlen);
152 if (err)
153 return err;
154
155 if (optlen == sizeof(*off))
156 return 0;
157
158 return -EINVAL;
159 }
160
xsk_create_umem_rings(struct xsk_umem * umem,int fd,struct xsk_ring_prod * fill,struct xsk_ring_cons * comp)161 static int xsk_create_umem_rings(struct xsk_umem *umem, int fd,
162 struct xsk_ring_prod *fill,
163 struct xsk_ring_cons *comp)
164 {
165 struct xdp_mmap_offsets off;
166 void *map;
167 int err;
168
169 err = setsockopt(fd, SOL_XDP, XDP_UMEM_FILL_RING,
170 &umem->config.fill_size,
171 sizeof(umem->config.fill_size));
172 if (err)
173 return -errno;
174
175 err = setsockopt(fd, SOL_XDP, XDP_UMEM_COMPLETION_RING,
176 &umem->config.comp_size,
177 sizeof(umem->config.comp_size));
178 if (err)
179 return -errno;
180
181 err = xsk_get_mmap_offsets(fd, &off);
182 if (err)
183 return -errno;
184
185 map = mmap(NULL, off.fr.desc + umem->config.fill_size * sizeof(__u64),
186 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
187 XDP_UMEM_PGOFF_FILL_RING);
188 if (map == MAP_FAILED)
189 return -errno;
190
191 fill->mask = umem->config.fill_size - 1;
192 fill->size = umem->config.fill_size;
193 fill->producer = map + off.fr.producer;
194 fill->consumer = map + off.fr.consumer;
195 fill->flags = map + off.fr.flags;
196 fill->ring = map + off.fr.desc;
197 fill->cached_cons = umem->config.fill_size;
198
199 map = mmap(NULL, off.cr.desc + umem->config.comp_size * sizeof(__u64),
200 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
201 XDP_UMEM_PGOFF_COMPLETION_RING);
202 if (map == MAP_FAILED) {
203 err = -errno;
204 goto out_mmap;
205 }
206
207 comp->mask = umem->config.comp_size - 1;
208 comp->size = umem->config.comp_size;
209 comp->producer = map + off.cr.producer;
210 comp->consumer = map + off.cr.consumer;
211 comp->flags = map + off.cr.flags;
212 comp->ring = map + off.cr.desc;
213
214 return 0;
215
216 out_mmap:
217 munmap(map, off.fr.desc + umem->config.fill_size * sizeof(__u64));
218 return err;
219 }
220
xsk_umem__create(struct xsk_umem ** umem_ptr,void * umem_area,__u64 size,struct xsk_ring_prod * fill,struct xsk_ring_cons * comp,const struct xsk_umem_config * usr_config)221 int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area,
222 __u64 size, struct xsk_ring_prod *fill,
223 struct xsk_ring_cons *comp,
224 const struct xsk_umem_config *usr_config)
225 {
226 struct xdp_umem_reg mr;
227 struct xsk_umem *umem;
228 int err;
229
230 if (!umem_area || !umem_ptr || !fill || !comp)
231 return -EFAULT;
232 if (!size && !xsk_page_aligned(umem_area))
233 return -EINVAL;
234
235 umem = calloc(1, sizeof(*umem));
236 if (!umem)
237 return -ENOMEM;
238
239 umem->fd = socket(AF_XDP, SOCK_RAW | SOCK_CLOEXEC, 0);
240 if (umem->fd < 0) {
241 err = -errno;
242 goto out_umem_alloc;
243 }
244
245 umem->umem_area = umem_area;
246 INIT_LIST_HEAD(&umem->ctx_list);
247 xsk_set_umem_config(&umem->config, usr_config);
248
249 memset(&mr, 0, sizeof(mr));
250 mr.addr = (uintptr_t)umem_area;
251 mr.len = size;
252 mr.chunk_size = umem->config.frame_size;
253 mr.headroom = umem->config.frame_headroom;
254 mr.flags = umem->config.flags;
255
256 err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr));
257 if (err) {
258 err = -errno;
259 goto out_socket;
260 }
261
262 err = xsk_create_umem_rings(umem, umem->fd, fill, comp);
263 if (err)
264 goto out_socket;
265
266 umem->fill_save = fill;
267 umem->comp_save = comp;
268 *umem_ptr = umem;
269 return 0;
270
271 out_socket:
272 close(umem->fd);
273 out_umem_alloc:
274 free(umem);
275 return err;
276 }
277
xsk_is_in_mode(u32 ifindex,int mode)278 bool xsk_is_in_mode(u32 ifindex, int mode)
279 {
280 LIBBPF_OPTS(bpf_xdp_query_opts, opts);
281 int ret;
282
283 ret = bpf_xdp_query(ifindex, mode, &opts);
284 if (ret) {
285 printf("XDP mode query returned error %s\n", strerror(errno));
286 return false;
287 }
288
289 if (mode == XDP_FLAGS_DRV_MODE)
290 return opts.attach_mode == XDP_ATTACHED_DRV;
291 else if (mode == XDP_FLAGS_SKB_MODE)
292 return opts.attach_mode == XDP_ATTACHED_SKB;
293
294 return false;
295 }
296
297 /* Lifted from netlink.c in tools/lib/bpf */
netlink_recvmsg(int sock,struct msghdr * mhdr,int flags)298 static int netlink_recvmsg(int sock, struct msghdr *mhdr, int flags)
299 {
300 int len;
301
302 do {
303 len = recvmsg(sock, mhdr, flags);
304 } while (len < 0 && (errno == EINTR || errno == EAGAIN));
305
306 if (len < 0)
307 return -errno;
308 return len;
309 }
310
311 /* Lifted from netlink.c in tools/lib/bpf */
alloc_iov(struct iovec * iov,int len)312 static int alloc_iov(struct iovec *iov, int len)
313 {
314 void *nbuf;
315
316 nbuf = realloc(iov->iov_base, len);
317 if (!nbuf)
318 return -ENOMEM;
319
320 iov->iov_base = nbuf;
321 iov->iov_len = len;
322 return 0;
323 }
324
325 /* Original version lifted from netlink.c in tools/lib/bpf */
netlink_recv(int sock)326 static int netlink_recv(int sock)
327 {
328 struct iovec iov = {};
329 struct msghdr mhdr = {
330 .msg_iov = &iov,
331 .msg_iovlen = 1,
332 };
333 bool multipart = true;
334 struct nlmsgerr *err;
335 struct nlmsghdr *nh;
336 int len, ret;
337
338 ret = alloc_iov(&iov, 4096);
339 if (ret)
340 goto done;
341
342 while (multipart) {
343 multipart = false;
344 len = netlink_recvmsg(sock, &mhdr, MSG_PEEK | MSG_TRUNC);
345 if (len < 0) {
346 ret = len;
347 goto done;
348 }
349
350 if (len > iov.iov_len) {
351 ret = alloc_iov(&iov, len);
352 if (ret)
353 goto done;
354 }
355
356 len = netlink_recvmsg(sock, &mhdr, 0);
357 if (len < 0) {
358 ret = len;
359 goto done;
360 }
361
362 if (len == 0)
363 break;
364
365 for (nh = (struct nlmsghdr *)iov.iov_base; NLMSG_OK(nh, len);
366 nh = NLMSG_NEXT(nh, len)) {
367 if (nh->nlmsg_flags & NLM_F_MULTI)
368 multipart = true;
369 switch (nh->nlmsg_type) {
370 case NLMSG_ERROR:
371 err = (struct nlmsgerr *)NLMSG_DATA(nh);
372 if (!err->error)
373 continue;
374 ret = err->error;
375 goto done;
376 case NLMSG_DONE:
377 ret = 0;
378 goto done;
379 default:
380 break;
381 }
382 }
383 }
384 ret = 0;
385 done:
386 free(iov.iov_base);
387 return ret;
388 }
389
xsk_set_mtu(int ifindex,int mtu)390 int xsk_set_mtu(int ifindex, int mtu)
391 {
392 struct nl_mtu_req req;
393 struct rtattr *rta;
394 int fd, ret;
395
396 fd = socket(AF_NETLINK, SOCK_DGRAM, NETLINK_ROUTE);
397 if (fd < 0)
398 return fd;
399
400 memset(&req, 0, sizeof(req));
401 req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg));
402 req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
403 req.nh.nlmsg_type = RTM_NEWLINK;
404 req.msg.ifi_family = AF_UNSPEC;
405 req.msg.ifi_index = ifindex;
406 rta = (struct rtattr *)(((char *)&req) + NLMSG_ALIGN(req.nh.nlmsg_len));
407 rta->rta_type = IFLA_MTU;
408 rta->rta_len = RTA_LENGTH(sizeof(unsigned int));
409 req.nh.nlmsg_len = NLMSG_ALIGN(req.nh.nlmsg_len) + RTA_LENGTH(sizeof(mtu));
410 memcpy(RTA_DATA(rta), &mtu, sizeof(mtu));
411
412 ret = send(fd, &req, req.nh.nlmsg_len, 0);
413 if (ret < 0) {
414 close(fd);
415 return errno;
416 }
417
418 ret = netlink_recv(fd);
419 close(fd);
420 return ret;
421 }
422
xsk_attach_xdp_program(struct bpf_program * prog,int ifindex,u32 xdp_flags)423 int xsk_attach_xdp_program(struct bpf_program *prog, int ifindex, u32 xdp_flags)
424 {
425 int prog_fd;
426
427 prog_fd = bpf_program__fd(prog);
428 return bpf_xdp_attach(ifindex, prog_fd, xdp_flags, NULL);
429 }
430
xsk_detach_xdp_program(int ifindex,u32 xdp_flags)431 void xsk_detach_xdp_program(int ifindex, u32 xdp_flags)
432 {
433 bpf_xdp_detach(ifindex, xdp_flags, NULL);
434 }
435
xsk_clear_xskmap(struct bpf_map * map)436 void xsk_clear_xskmap(struct bpf_map *map)
437 {
438 u32 index = 0;
439 int map_fd;
440
441 map_fd = bpf_map__fd(map);
442 bpf_map_delete_elem(map_fd, &index);
443 }
444
xsk_update_xskmap(struct bpf_map * map,struct xsk_socket * xsk)445 int xsk_update_xskmap(struct bpf_map *map, struct xsk_socket *xsk)
446 {
447 int map_fd, sock_fd;
448 u32 index = 0;
449
450 map_fd = bpf_map__fd(map);
451 sock_fd = xsk_socket__fd(xsk);
452
453 return bpf_map_update_elem(map_fd, &index, &sock_fd, 0);
454 }
455
xsk_get_ctx(struct xsk_umem * umem,int ifindex,__u32 queue_id)456 static struct xsk_ctx *xsk_get_ctx(struct xsk_umem *umem, int ifindex,
457 __u32 queue_id)
458 {
459 struct xsk_ctx *ctx;
460
461 if (list_empty(&umem->ctx_list))
462 return NULL;
463
464 list_for_each_entry(ctx, &umem->ctx_list, list) {
465 if (ctx->ifindex == ifindex && ctx->queue_id == queue_id) {
466 ctx->refcount++;
467 return ctx;
468 }
469 }
470
471 return NULL;
472 }
473
xsk_put_ctx(struct xsk_ctx * ctx,bool unmap)474 static void xsk_put_ctx(struct xsk_ctx *ctx, bool unmap)
475 {
476 struct xsk_umem *umem = ctx->umem;
477 struct xdp_mmap_offsets off;
478 int err;
479
480 if (--ctx->refcount)
481 return;
482
483 if (!unmap)
484 goto out_free;
485
486 err = xsk_get_mmap_offsets(umem->fd, &off);
487 if (err)
488 goto out_free;
489
490 munmap(ctx->fill->ring - off.fr.desc, off.fr.desc + umem->config.fill_size *
491 sizeof(__u64));
492 munmap(ctx->comp->ring - off.cr.desc, off.cr.desc + umem->config.comp_size *
493 sizeof(__u64));
494
495 out_free:
496 list_del(&ctx->list);
497 free(ctx);
498 }
499
xsk_create_ctx(struct xsk_socket * xsk,struct xsk_umem * umem,int ifindex,__u32 queue_id,struct xsk_ring_prod * fill,struct xsk_ring_cons * comp)500 static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk,
501 struct xsk_umem *umem, int ifindex,
502 __u32 queue_id,
503 struct xsk_ring_prod *fill,
504 struct xsk_ring_cons *comp)
505 {
506 struct xsk_ctx *ctx;
507 int err;
508
509 ctx = calloc(1, sizeof(*ctx));
510 if (!ctx)
511 return NULL;
512
513 if (!umem->fill_save) {
514 err = xsk_create_umem_rings(umem, xsk->fd, fill, comp);
515 if (err) {
516 free(ctx);
517 return NULL;
518 }
519 } else if (umem->fill_save != fill || umem->comp_save != comp) {
520 /* Copy over rings to new structs. */
521 memcpy(fill, umem->fill_save, sizeof(*fill));
522 memcpy(comp, umem->comp_save, sizeof(*comp));
523 }
524
525 ctx->ifindex = ifindex;
526 ctx->refcount = 1;
527 ctx->umem = umem;
528 ctx->queue_id = queue_id;
529
530 ctx->fill = fill;
531 ctx->comp = comp;
532 list_add(&ctx->list, &umem->ctx_list);
533 return ctx;
534 }
535
xsk_socket__create_shared(struct xsk_socket ** xsk_ptr,int ifindex,__u32 queue_id,struct xsk_umem * umem,struct xsk_ring_cons * rx,struct xsk_ring_prod * tx,struct xsk_ring_prod * fill,struct xsk_ring_cons * comp,const struct xsk_socket_config * usr_config)536 int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
537 int ifindex,
538 __u32 queue_id, struct xsk_umem *umem,
539 struct xsk_ring_cons *rx,
540 struct xsk_ring_prod *tx,
541 struct xsk_ring_prod *fill,
542 struct xsk_ring_cons *comp,
543 const struct xsk_socket_config *usr_config)
544 {
545 bool unmap, rx_setup_done = false, tx_setup_done = false;
546 void *rx_map = NULL, *tx_map = NULL;
547 struct sockaddr_xdp sxdp = {};
548 struct xdp_mmap_offsets off;
549 struct xsk_socket *xsk;
550 struct xsk_ctx *ctx;
551 int err;
552
553 if (!umem || !xsk_ptr || !(rx || tx))
554 return -EFAULT;
555
556 unmap = umem->fill_save != fill;
557
558 xsk = calloc(1, sizeof(*xsk));
559 if (!xsk)
560 return -ENOMEM;
561
562 err = xsk_set_xdp_socket_config(&xsk->config, usr_config);
563 if (err)
564 goto out_xsk_alloc;
565
566 if (umem->refcount++ > 0) {
567 xsk->fd = socket(AF_XDP, SOCK_RAW | SOCK_CLOEXEC, 0);
568 if (xsk->fd < 0) {
569 err = -errno;
570 goto out_xsk_alloc;
571 }
572 } else {
573 xsk->fd = umem->fd;
574 rx_setup_done = umem->rx_ring_setup_done;
575 tx_setup_done = umem->tx_ring_setup_done;
576 }
577
578 ctx = xsk_get_ctx(umem, ifindex, queue_id);
579 if (!ctx) {
580 if (!fill || !comp) {
581 err = -EFAULT;
582 goto out_socket;
583 }
584
585 ctx = xsk_create_ctx(xsk, umem, ifindex, queue_id, fill, comp);
586 if (!ctx) {
587 err = -ENOMEM;
588 goto out_socket;
589 }
590 }
591 xsk->ctx = ctx;
592
593 if (rx && !rx_setup_done) {
594 err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING,
595 &xsk->config.rx_size,
596 sizeof(xsk->config.rx_size));
597 if (err) {
598 err = -errno;
599 goto out_put_ctx;
600 }
601 if (xsk->fd == umem->fd)
602 umem->rx_ring_setup_done = true;
603 }
604 if (tx && !tx_setup_done) {
605 err = setsockopt(xsk->fd, SOL_XDP, XDP_TX_RING,
606 &xsk->config.tx_size,
607 sizeof(xsk->config.tx_size));
608 if (err) {
609 err = -errno;
610 goto out_put_ctx;
611 }
612 if (xsk->fd == umem->fd)
613 umem->tx_ring_setup_done = true;
614 }
615
616 err = xsk_get_mmap_offsets(xsk->fd, &off);
617 if (err) {
618 err = -errno;
619 goto out_put_ctx;
620 }
621
622 if (rx) {
623 rx_map = mmap(NULL, off.rx.desc +
624 xsk->config.rx_size * sizeof(struct xdp_desc),
625 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
626 xsk->fd, XDP_PGOFF_RX_RING);
627 if (rx_map == MAP_FAILED) {
628 err = -errno;
629 goto out_put_ctx;
630 }
631
632 rx->mask = xsk->config.rx_size - 1;
633 rx->size = xsk->config.rx_size;
634 rx->producer = rx_map + off.rx.producer;
635 rx->consumer = rx_map + off.rx.consumer;
636 rx->flags = rx_map + off.rx.flags;
637 rx->ring = rx_map + off.rx.desc;
638 rx->cached_prod = *rx->producer;
639 rx->cached_cons = *rx->consumer;
640 }
641 xsk->rx = rx;
642
643 if (tx) {
644 tx_map = mmap(NULL, off.tx.desc +
645 xsk->config.tx_size * sizeof(struct xdp_desc),
646 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
647 xsk->fd, XDP_PGOFF_TX_RING);
648 if (tx_map == MAP_FAILED) {
649 err = -errno;
650 goto out_mmap_rx;
651 }
652
653 tx->mask = xsk->config.tx_size - 1;
654 tx->size = xsk->config.tx_size;
655 tx->producer = tx_map + off.tx.producer;
656 tx->consumer = tx_map + off.tx.consumer;
657 tx->flags = tx_map + off.tx.flags;
658 tx->ring = tx_map + off.tx.desc;
659 tx->cached_prod = *tx->producer;
660 /* cached_cons is r->size bigger than the real consumer pointer
661 * See xsk_prod_nb_free
662 */
663 tx->cached_cons = *tx->consumer + xsk->config.tx_size;
664 }
665 xsk->tx = tx;
666
667 sxdp.sxdp_family = PF_XDP;
668 sxdp.sxdp_ifindex = ctx->ifindex;
669 sxdp.sxdp_queue_id = ctx->queue_id;
670 if (umem->refcount > 1) {
671 sxdp.sxdp_flags |= XDP_SHARED_UMEM;
672 sxdp.sxdp_shared_umem_fd = umem->fd;
673 } else {
674 sxdp.sxdp_flags = xsk->config.bind_flags;
675 }
676
677 err = bind(xsk->fd, (struct sockaddr *)&sxdp, sizeof(sxdp));
678 if (err) {
679 err = -errno;
680 goto out_mmap_tx;
681 }
682
683 *xsk_ptr = xsk;
684 umem->fill_save = NULL;
685 umem->comp_save = NULL;
686 return 0;
687
688 out_mmap_tx:
689 if (tx)
690 munmap(tx_map, off.tx.desc +
691 xsk->config.tx_size * sizeof(struct xdp_desc));
692 out_mmap_rx:
693 if (rx)
694 munmap(rx_map, off.rx.desc +
695 xsk->config.rx_size * sizeof(struct xdp_desc));
696 out_put_ctx:
697 xsk_put_ctx(ctx, unmap);
698 out_socket:
699 if (--umem->refcount)
700 close(xsk->fd);
701 out_xsk_alloc:
702 free(xsk);
703 return err;
704 }
705
xsk_socket__create(struct xsk_socket ** xsk_ptr,int ifindex,__u32 queue_id,struct xsk_umem * umem,struct xsk_ring_cons * rx,struct xsk_ring_prod * tx,const struct xsk_socket_config * usr_config)706 int xsk_socket__create(struct xsk_socket **xsk_ptr, int ifindex,
707 __u32 queue_id, struct xsk_umem *umem,
708 struct xsk_ring_cons *rx, struct xsk_ring_prod *tx,
709 const struct xsk_socket_config *usr_config)
710 {
711 if (!umem)
712 return -EFAULT;
713
714 return xsk_socket__create_shared(xsk_ptr, ifindex, queue_id, umem,
715 rx, tx, umem->fill_save,
716 umem->comp_save, usr_config);
717 }
718
xsk_umem__delete(struct xsk_umem * umem)719 int xsk_umem__delete(struct xsk_umem *umem)
720 {
721 struct xdp_mmap_offsets off;
722 int err;
723
724 if (!umem)
725 return 0;
726
727 if (umem->refcount)
728 return -EBUSY;
729
730 err = xsk_get_mmap_offsets(umem->fd, &off);
731 if (!err && umem->fill_save && umem->comp_save) {
732 munmap(umem->fill_save->ring - off.fr.desc,
733 off.fr.desc + umem->config.fill_size * sizeof(__u64));
734 munmap(umem->comp_save->ring - off.cr.desc,
735 off.cr.desc + umem->config.comp_size * sizeof(__u64));
736 }
737
738 close(umem->fd);
739 free(umem);
740
741 return 0;
742 }
743
xsk_socket__delete(struct xsk_socket * xsk)744 void xsk_socket__delete(struct xsk_socket *xsk)
745 {
746 size_t desc_sz = sizeof(struct xdp_desc);
747 struct xdp_mmap_offsets off;
748 struct xsk_umem *umem;
749 struct xsk_ctx *ctx;
750 int err;
751
752 if (!xsk)
753 return;
754
755 ctx = xsk->ctx;
756 umem = ctx->umem;
757
758 xsk_put_ctx(ctx, true);
759
760 err = xsk_get_mmap_offsets(xsk->fd, &off);
761 if (!err) {
762 if (xsk->rx) {
763 munmap(xsk->rx->ring - off.rx.desc,
764 off.rx.desc + xsk->config.rx_size * desc_sz);
765 }
766 if (xsk->tx) {
767 munmap(xsk->tx->ring - off.tx.desc,
768 off.tx.desc + xsk->config.tx_size * desc_sz);
769 }
770 }
771
772 umem->refcount--;
773 /* Do not close an fd that also has an associated umem connected
774 * to it.
775 */
776 if (xsk->fd != umem->fd)
777 close(xsk->fd);
778 free(xsk);
779 }
780