| /kernel/linux/linux-6.6/include/net/ |
| D | xdp.h | 2 /* include/net/xdp.h 15 * DOC: XDP RX-queue information 17 * The XDP RX-queue info (xdp_rxq_info) is associated with the driver 22 * reference to this xdp_rxq_info structure. This provides the XDP 32 * The struct is not directly tied to the XDP prog. A new XDP prog 43 MEM_TYPE_PAGE_ORDER0, /* Orig XDP full page model */ 49 /* XDP flags for ndo_xdp_xmit */ 74 XDP_FLAGS_HAS_FRAGS = BIT(0), /* non-linear xdp buff */ 75 XDP_FLAGS_FRAGS_PF_MEMALLOC = BIT(1), /* xdp paged memory is under 91 static __always_inline bool xdp_buff_has_frags(struct xdp_buff *xdp) in xdp_buff_has_frags() argument [all …]
|
| D | xdp_sock_drv.h | 53 return pool->heads[0].xdp.rxq->napi_id; in xsk_pool_get_napi_id() 73 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp) in xsk_buff_xdp_get_dma() argument 75 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); in xsk_buff_xdp_get_dma() 80 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp) in xsk_buff_xdp_get_frame_dma() argument 82 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); in xsk_buff_xdp_get_frame_dma() 98 static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max) in xsk_buff_alloc_batch() argument 100 return xp_alloc_batch(pool, xdp, max); in xsk_buff_alloc_batch() 108 static inline void xsk_buff_free(struct xdp_buff *xdp) in xsk_buff_free() argument 110 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); in xsk_buff_free() 114 if (likely(!xdp_buff_has_frags(xdp))) in xsk_buff_free() [all …]
|
| /kernel/linux/linux-5.10/include/net/ |
| D | xdp.h | 2 /* include/net/xdp.h 12 * DOC: XDP RX-queue information 14 * The XDP RX-queue info (xdp_rxq_info) is associated with the driver 19 * reference to this xdp_rxq_info structure. This provides the XDP 29 * The struct is not directly tied to the XDP prog. A new XDP prog 40 MEM_TYPE_PAGE_ORDER0, /* Orig XDP full page model */ 46 /* XDP flags for ndo_xdp_xmit */ 80 * This macro reserves tailroom in the XDP buffer by limiting the 81 * XDP/BPF data access to data_hard_end. Notice same area (and size) 84 #define xdp_data_hard_end(xdp) \ argument [all …]
|
| D | xdp_sock_drv.h | 60 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp) in xsk_buff_xdp_get_dma() argument 62 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); in xsk_buff_xdp_get_dma() 67 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp) in xsk_buff_xdp_get_frame_dma() argument 69 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); in xsk_buff_xdp_get_frame_dma() 84 static inline void xsk_buff_free(struct xdp_buff *xdp) in xsk_buff_free() argument 86 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); in xsk_buff_free() 102 static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool) in xsk_buff_dma_sync_for_cpu() argument 104 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); in xsk_buff_dma_sync_for_cpu() 193 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp) in xsk_buff_xdp_get_dma() argument 198 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp) in xsk_buff_xdp_get_frame_dma() argument [all …]
|
| /kernel/linux/linux-6.6/Documentation/bpf/ |
| D | redirect.rst | 25 :doc: xdp redirect 29 those that do, not all of them support non-linear frames. Non-linear xdp 41 The following bpftrace command can be used to capture and count all XDP tracepoints: 45 sudo bpftrace -e 'tracepoint:xdp:* { @cnt[probe] = count(); }' 49 @cnt[tracepoint:xdp:mem_connect]: 18 50 @cnt[tracepoint:xdp:mem_disconnect]: 18 51 @cnt[tracepoint:xdp:xdp_exception]: 19605 52 @cnt[tracepoint:xdp:xdp_devmap_xmit]: 1393604 53 @cnt[tracepoint:xdp:xdp_redirect]: 22292200 56 The various xdp tracepoints can be found in ``source/include/trace/events/xdp.h`` [all …]
|
| /kernel/linux/linux-6.6/tools/testing/selftests/bpf/progs/ |
| D | xdp_features.c | 65 xdp_process_echo_packet(struct xdp_md *xdp, bool dut) in xdp_process_echo_packet() argument 67 void *data_end = (void *)(long)xdp->data_end; in xdp_process_echo_packet() 68 void *data = (void *)(long)xdp->data; in xdp_process_echo_packet() 135 xdp_update_stats(struct xdp_md *xdp, bool tx, bool dut) in xdp_update_stats() argument 139 if (xdp_process_echo_packet(xdp, tx)) in xdp_update_stats() 155 SEC("xdp") 156 int xdp_tester_check_tx(struct xdp_md *xdp) in xdp_tester_check_tx() argument 158 xdp_update_stats(xdp, true, false); in xdp_tester_check_tx() 163 SEC("xdp") 164 int xdp_tester_check_rx(struct xdp_md *xdp) in xdp_tester_check_rx() argument [all …]
|
| D | verifier_xdp_direct_packet_access.c | 8 SEC("xdp") 9 __description("XDP pkt read, pkt_end mangling, bad access 1") 29 SEC("xdp") 30 __description("XDP pkt read, pkt_end mangling, bad access 2") 50 SEC("xdp") 51 __description("XDP pkt read, pkt_data' > pkt_end, corner case, good access") 70 SEC("xdp") 71 __description("XDP pkt read, pkt_data' > pkt_end, bad access 1") 91 SEC("xdp") 92 __description("XDP pkt read, pkt_data' > pkt_end, bad access 2") [all …]
|
| D | test_xdp_loop.c | 74 static __always_inline int handle_ipv4(struct xdp_md *xdp) in handle_ipv4() argument 76 void *data_end = (void *)(long)xdp->data_end; in handle_ipv4() 77 void *data = (void *)(long)xdp->data; in handle_ipv4() 107 if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr))) in handle_ipv4() 110 data = (void *)(long)xdp->data; in handle_ipv4() 111 data_end = (void *)(long)xdp->data_end; in handle_ipv4() 147 static __always_inline int handle_ipv6(struct xdp_md *xdp) in handle_ipv6() argument 149 void *data_end = (void *)(long)xdp->data_end; in handle_ipv6() 150 void *data = (void *)(long)xdp->data; in handle_ipv6() 177 if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr))) in handle_ipv6() [all …]
|
| D | test_xdp.c | 78 static __always_inline int handle_ipv4(struct xdp_md *xdp) in handle_ipv4() argument 80 void *data_end = (void *)(long)xdp->data_end; in handle_ipv4() 81 void *data = (void *)(long)xdp->data; in handle_ipv4() 111 if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr))) in handle_ipv4() 114 data = (void *)(long)xdp->data; in handle_ipv4() 115 data_end = (void *)(long)xdp->data_end; in handle_ipv4() 151 static __always_inline int handle_ipv6(struct xdp_md *xdp) in handle_ipv6() argument 153 void *data_end = (void *)(long)xdp->data_end; in handle_ipv6() 154 void *data = (void *)(long)xdp->data; in handle_ipv6() 181 if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr))) in handle_ipv6() [all …]
|
| /kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
| D | rx.c | 5 #include "en/xdp.h" 32 struct xdp_buff *xdp = wi->umr.dma_info[page_idx].xsk; in mlx5e_xsk_skb_from_cqe_mpwrq_linear() local 41 /* head_offset is not used in this function, because xdp->data and the in mlx5e_xsk_skb_from_cqe_mpwrq_linear() 48 xdp->data_end = xdp->data + cqe_bcnt32; in mlx5e_xsk_skb_from_cqe_mpwrq_linear() 49 xdp_set_data_meta_invalid(xdp); in mlx5e_xsk_skb_from_cqe_mpwrq_linear() 50 xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool); in mlx5e_xsk_skb_from_cqe_mpwrq_linear() 51 net_prefetch(xdp->data); in mlx5e_xsk_skb_from_cqe_mpwrq_linear() 68 if (likely(mlx5e_xdp_handle(rq, NULL, &cqe_bcnt32, xdp))) { in mlx5e_xsk_skb_from_cqe_mpwrq_linear() 71 return NULL; /* page/packet was consumed by XDP */ in mlx5e_xsk_skb_from_cqe_mpwrq_linear() 77 return mlx5e_xsk_construct_skb(rq, xdp->data, cqe_bcnt32); in mlx5e_xsk_skb_from_cqe_mpwrq_linear() [all …]
|
| /kernel/linux/linux-5.10/drivers/net/hyperv/ |
| D | netvsc_bpf.c | 16 #include <net/xdp.h> 24 struct xdp_buff *xdp) in netvsc_run_xdp() argument 32 xdp->data_hard_start = NULL; in netvsc_run_xdp() 47 xdp->data_hard_start = page_address(page); in netvsc_run_xdp() 48 xdp->data = xdp->data_hard_start + NETVSC_XDP_HDRM; in netvsc_run_xdp() 49 xdp_set_data_meta_invalid(xdp); in netvsc_run_xdp() 50 xdp->data_end = xdp->data + len; in netvsc_run_xdp() 51 xdp->rxq = &nvchan->xdp_rxq; in netvsc_run_xdp() 52 xdp->frame_sz = PAGE_SIZE; in netvsc_run_xdp() 54 memcpy(xdp->data, data, len); in netvsc_run_xdp() [all …]
|
| /kernel/linux/linux-6.6/drivers/net/ethernet/broadcom/bnxt/ |
| D | bnxt_xdp.c | 28 struct xdp_buff *xdp) in bnxt_xmit_bd() argument 38 if (xdp && xdp_buff_has_frags(xdp)) { in bnxt_xmit_bd() 39 sinfo = xdp_get_shared_info_from_buff(xdp); in bnxt_xmit_bd() 47 if (xdp) in bnxt_xmit_bd() 48 tx_buf->page = virt_to_head_page(xdp->data); in bnxt_xmit_bd() 97 struct xdp_buff *xdp) in __bnxt_xmit_xdp() argument 101 tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, xdp); in __bnxt_xmit_xdp() 184 struct xdp_buff *xdp) in bnxt_xdp_buff_init() argument 199 xdp_init_buff(xdp, buflen, &rxr->xdp_rxq); in bnxt_xdp_buff_init() 200 xdp_prepare_buff(xdp, data_ptr - offset, offset, len, false); in bnxt_xdp_buff_init() [all …]
|
| /kernel/linux/linux-5.10/drivers/net/ethernet/broadcom/bnxt/ |
| D | bnxt_xdp.c | 108 * true - packet consumed by XDP and new buffer is allocated. 118 struct xdp_buff xdp; in bnxt_rx_xdp() local 136 xdp.data_hard_start = *data_ptr - offset; in bnxt_rx_xdp() 137 xdp.data = *data_ptr; in bnxt_rx_xdp() 138 xdp_set_data_meta_invalid(&xdp); in bnxt_rx_xdp() 139 xdp.data_end = *data_ptr + *len; in bnxt_rx_xdp() 140 xdp.rxq = &rxr->xdp_rxq; in bnxt_rx_xdp() 141 xdp.frame_sz = PAGE_SIZE; /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */ in bnxt_rx_xdp() 142 orig_data = xdp.data; in bnxt_rx_xdp() 145 act = bpf_prog_run_xdp(xdp_prog, &xdp); in bnxt_rx_xdp() [all …]
|
| /kernel/linux/linux-6.6/Documentation/networking/ |
| D | xdp-rx-metadata.rst | 2 XDP RX Metadata 5 This document describes how an eXpress Data Path (XDP) program can access 12 XDP has access to a set of kfuncs to manipulate the metadata in an XDP frame. 14 implement these kfuncs. The set of kfuncs is declared in ``include/net/xdp.h`` 20 .. kernel-doc:: net/core/xdp.c 23 An XDP program can use these kfuncs to read the metadata into stack 25 consumers, an XDP program can store it into the metadata area carried 34 Within an XDP frame, the metadata layout (accessed via ``xdp_buff``) is 44 An XDP program can store individual metadata items into this ``data_meta`` 53 program that redirects XDP frames into the ``AF_XDP`` socket (``XSK``) and [all …]
|
| /kernel/linux/linux-6.6/drivers/net/vmxnet3/ |
| D | vmxnet3_xdp.c | 48 NL_SET_ERR_MSG_FMT_MOD(extack, "MTU %u too large for XDP", in vmxnet3_xdp_set() 54 NL_SET_ERR_MSG_MOD(extack, "LRO is not supported with XDP"); in vmxnet3_xdp_set() 86 "failed to re-create rx queues for XDP."); in vmxnet3_xdp_set() 92 "failed to activate device for XDP."); in vmxnet3_xdp_set() 100 /* This is the main xdp call used by kernel to set/unset eBPF program. */ 152 } else { /* XDP buffer from page pool */ in vmxnet3_xdp_xmit_frame() 261 vmxnet3_run_xdp(struct vmxnet3_rx_queue *rq, struct xdp_buff *xdp, in vmxnet3_run_xdp() argument 270 act = bpf_prog_run_xdp(prog, xdp); in vmxnet3_run_xdp() 271 page = virt_to_page(xdp->data_hard_start); in vmxnet3_run_xdp() 277 err = xdp_do_redirect(rq->adapter->netdev, xdp, prog); in vmxnet3_run_xdp() [all …]
|
| /kernel/linux/linux-6.6/drivers/net/ethernet/intel/ice/ |
| D | ice_xsk.c | 6 #include <net/xdp.h> 355 * ice_realloc_zc_buf - reallocate XDP ZC queue pairs 360 * XDP requires more memory, than rx_buf provides. 441 * @xdp: SW ring of xdp_buff that will hold the buffers 452 static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp, in ice_fill_rx_descs() argument 459 buffs = xsk_buff_alloc_batch(pool, xdp, count); in ice_fill_rx_descs() 461 dma = xsk_buff_xdp_get_dma(*xdp); in ice_fill_rx_descs() 466 xdp++; in ice_fill_rx_descs() 489 struct xdp_buff **xdp; in __ice_alloc_rx_bufs_zc() local 492 xdp = ice_xdp_buf(rx_ring, ntu); in __ice_alloc_rx_bufs_zc() [all …]
|
| /kernel/linux/linux-5.10/drivers/net/ethernet/intel/ixgbe/ |
| D | ixgbe_xsk.c | 6 #include <net/xdp.h> 99 struct xdp_buff *xdp) in ixgbe_run_xdp_zc() argument 108 act = bpf_prog_run_xdp(xdp_prog, xdp); in ixgbe_run_xdp_zc() 111 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); in ixgbe_run_xdp_zc() 122 xdpf = xdp_convert_buff_to_frame(xdp); in ixgbe_run_xdp_zc() 161 bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool); in ixgbe_alloc_rx_buffers_zc() 162 if (!bi->xdp) { in ixgbe_alloc_rx_buffers_zc() 167 dma = xsk_buff_xdp_get_dma(bi->xdp); in ixgbe_alloc_rx_buffers_zc() 209 unsigned int metasize = bi->xdp->data - bi->xdp->data_meta; in ixgbe_construct_skb_zc() 210 unsigned int datasize = bi->xdp->data_end - bi->xdp->data; in ixgbe_construct_skb_zc() [all …]
|
| /kernel/linux/linux-6.6/Documentation/netlink/specs/ |
| D | netdev.yaml | 11 name: xdp-act 17 XDP features set supported by all drivers 34 This feature informs if netdev supports XDP hw offloading. 38 This feature informs if netdev implements non-linear XDP buffer 43 This feature informs if netdev implements non-linear XDP buffer 60 name: xdp-features 61 doc: Bitmask of enabled xdp-features. 63 enum: xdp-act 66 name: xdp-zc-max-segs 85 - xdp-features [all …]
|
| /kernel/linux/linux-6.6/drivers/net/ethernet/intel/ixgbe/ |
| D | ixgbe_xsk.c | 6 #include <net/xdp.h> 99 struct xdp_buff *xdp) in ixgbe_run_xdp_zc() argument 108 act = bpf_prog_run_xdp(xdp_prog, xdp); in ixgbe_run_xdp_zc() 111 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); in ixgbe_run_xdp_zc() 125 xdpf = xdp_convert_buff_to_frame(xdp); in ixgbe_run_xdp_zc() 168 bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool); in ixgbe_alloc_rx_buffers_zc() 169 if (!bi->xdp) { in ixgbe_alloc_rx_buffers_zc() 174 dma = xsk_buff_xdp_get_dma(bi->xdp); in ixgbe_alloc_rx_buffers_zc() 214 const struct xdp_buff *xdp) in ixgbe_construct_skb_zc() argument 216 unsigned int totalsize = xdp->data_end - xdp->data_meta; in ixgbe_construct_skb_zc() [all …]
|
| /kernel/linux/linux-6.6/drivers/net/hyperv/ |
| D | netvsc_bpf.c | 17 #include <net/xdp.h> 25 struct xdp_buff *xdp) in netvsc_run_xdp() argument 35 xdp->data_hard_start = NULL; in netvsc_run_xdp() 56 xdp_init_buff(xdp, PAGE_SIZE, &nvchan->xdp_rxq); in netvsc_run_xdp() 57 xdp_prepare_buff(xdp, page_address(page), NETVSC_XDP_HDRM, len, false); in netvsc_run_xdp() 59 memcpy(xdp->data, data, len); in netvsc_run_xdp() 61 act = bpf_prog_run_xdp(prog, xdp); in netvsc_run_xdp() 73 if (!xdp_do_redirect(ndev, xdp, prog)) { in netvsc_run_xdp() 107 xdp->data_hard_start = NULL; in netvsc_run_xdp() 138 netdev_err(dev, "XDP: mtu:%u too large, buf_max:%u\n", in netvsc_xdp_set() [all …]
|
| /kernel/linux/linux-5.10/include/trace/events/ |
| D | xdp.h | 3 #define TRACE_SYSTEM xdp 31 const struct bpf_prog *xdp, u32 act), 33 TP_ARGS(dev, xdp, act), 42 __entry->prog_id = xdp->aux->id; 97 const struct bpf_prog *xdp, 101 TP_ARGS(dev, xdp, tgt, err, map, index), 114 __entry->prog_id = xdp->aux->id; 134 const struct bpf_prog *xdp, 137 TP_ARGS(dev, xdp, tgt, err, map, index) 142 const struct bpf_prog *xdp, [all …]
|
| /kernel/linux/linux-6.6/include/trace/events/ |
| D | xdp.h | 3 #define TRACE_SYSTEM xdp 12 #include <net/xdp.h> 32 const struct bpf_prog *xdp, u32 act), 34 TP_ARGS(dev, xdp, act), 43 __entry->prog_id = xdp->aux->id; 93 const struct bpf_prog *xdp, 98 TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index), 124 __entry->prog_id = xdp->aux->id; 143 const struct bpf_prog *xdp, 147 TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index) [all …]
|
| /kernel/linux/linux-5.10/tools/bpf/bpftool/Documentation/ |
| D | bpftool-net.rst | 29 | *ATTACH_TYPE* := { **xdp** | **xdpgeneric** | **xdpdrv** | **xdpoffload** } 36 Currently, only device driver xdp attachments and tc filter 47 The current output will start with all xdp program attachments, followed by 48 all tc class/qdisc bpf program attachments. Both xdp programs and 59 Currently, only XDP-related modes are supported for *ATTACH_TYPE*. 62 … **xdp** - try native XDP and fallback to generic XDP if NIC driver does not support it; 63 …**xdpgeneric** - Generic XDP. runs at generic XDP hook when packet already enters receive path as … 64 **xdpdrv** - Native XDP. runs earliest point in driver's receive path; 65 **xdpoffload** - Offload XDP. runs directly on NIC on each packet reception; 71 Currently, only XDP-related modes are supported for *ATTACH_TYPE*. [all …]
|
| /kernel/linux/linux-6.6/tools/bpf/bpftool/Documentation/ |
| D | bpftool-net.rst | 33 | *ATTACH_TYPE* := { **xdp** | **xdpgeneric** | **xdpdrv** | **xdpoffload** } 40 Currently, device driver xdp attachments, tcx and old-style tc 54 The current output will start with all xdp program attachments, followed by 56 and finally netfilter programs. Both xdp programs and tcx/tc programs are 67 Currently, only XDP-related modes are supported for *ATTACH_TYPE*. 70 **xdp** - try native XDP and fallback to generic XDP if NIC driver does not support it; 71 …**xdpgeneric** - Generic XDP. runs at generic XDP hook when packet already enters receive path as … 72 **xdpdrv** - Native XDP. runs earliest point in driver's receive path; 73 **xdpoffload** - Offload XDP. runs directly on NIC on each packet reception; 79 Currently, only XDP-related modes are supported for *ATTACH_TYPE*. [all …]
|
| /kernel/linux/linux-6.6/drivers/net/ethernet/microchip/lan966x/ |
| D | lan966x_xdp.c | 9 static int lan966x_xdp_setup(struct net_device *dev, struct netdev_bpf *xdp) in lan966x_xdp_setup() argument 18 NL_SET_ERR_MSG_MOD(xdp->extack, in lan966x_xdp_setup() 19 "Allow to set xdp only when using fdma"); in lan966x_xdp_setup() 24 old_prog = xchg(&port->xdp_prog, xdp->prog); in lan966x_xdp_setup() 43 int lan966x_xdp(struct net_device *dev, struct netdev_bpf *xdp) in lan966x_xdp() argument 45 switch (xdp->command) { in lan966x_xdp() 47 return lan966x_xdp_setup(dev, xdp); in lan966x_xdp() 79 struct xdp_buff xdp; in lan966x_xdp_run() local 82 xdp_init_buff(&xdp, PAGE_SIZE << lan966x->rx.page_order, in lan966x_xdp_run() 84 xdp_prepare_buff(&xdp, page_address(page), in lan966x_xdp_run() [all …]
|