Home
last modified time | relevance | path

Searched refs:xdp (Results 1 – 22 of 22) sorted by relevance

/drivers/net/ethernet/broadcom/bnxt/
Dbnxt_xdp.c80 struct xdp_buff xdp; in bnxt_rx_xdp() local
95 xdp.data_hard_start = *data_ptr - offset; in bnxt_rx_xdp()
96 xdp.data = *data_ptr; in bnxt_rx_xdp()
97 xdp.data_end = *data_ptr + *len; in bnxt_rx_xdp()
98 orig_data = xdp.data; in bnxt_rx_xdp()
104 act = bpf_prog_run_xdp(xdp_prog, &xdp); in bnxt_rx_xdp()
114 if (orig_data != xdp.data) { in bnxt_rx_xdp()
115 offset = xdp.data - xdp.data_hard_start; in bnxt_rx_xdp()
116 *data_ptr = xdp.data_hard_start + offset; in bnxt_rx_xdp()
117 *len = xdp.data_end - xdp.data; in bnxt_rx_xdp()
[all …]
Dbnxt_xdp.h19 int bnxt_xdp(struct net_device *dev, struct netdev_xdp *xdp);
/drivers/net/ethernet/intel/i40e/
Di40e_txrx.c1823 struct xdp_buff *xdp) in i40e_construct_skb() argument
1825 unsigned int size = xdp->data_end - xdp->data; in i40e_construct_skb()
1835 prefetch(xdp->data); in i40e_construct_skb()
1837 prefetch(xdp->data + L1_CACHE_BYTES); in i40e_construct_skb()
1850 headlen = eth_get_headlen(xdp->data, I40E_RX_HDR_SIZE); in i40e_construct_skb()
1853 memcpy(__skb_put(skb, headlen), xdp->data, in i40e_construct_skb()
1888 struct xdp_buff *xdp) in i40e_build_skb() argument
1890 unsigned int size = xdp->data_end - xdp->data; in i40e_build_skb()
1900 prefetch(xdp->data); in i40e_build_skb()
1902 prefetch(xdp->data + L1_CACHE_BYTES); in i40e_build_skb()
[all …]
Di40e_main.c9606 struct netdev_xdp *xdp) in i40e_xdp() argument
9614 switch (xdp->command) { in i40e_xdp()
9616 return i40e_xdp_setup(vsi, xdp->prog); in i40e_xdp()
9618 xdp->prog_attached = i40e_enabled_xdp_vsi(vsi); in i40e_xdp()
9619 xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0; in i40e_xdp()
/drivers/net/
Dvirtio_net.c387 struct xdp_buff *xdp) in virtnet_xdp_xmit() argument
406 xdp->data -= vi->hdr_len; in virtnet_xdp_xmit()
408 hdr = xdp->data; in virtnet_xdp_xmit()
411 sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data); in virtnet_xdp_xmit()
413 err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp->data, GFP_ATOMIC); in virtnet_xdp_xmit()
415 struct page *page = virt_to_head_page(xdp->data); in virtnet_xdp_xmit()
515 struct xdp_buff xdp; in receive_small() local
543 xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len; in receive_small()
544 xdp.data = xdp.data_hard_start + xdp_headroom; in receive_small()
545 xdp.data_end = xdp.data + len; in receive_small()
[all …]
Dtun.c1033 static int tun_xdp(struct net_device *dev, struct netdev_xdp *xdp) in tun_xdp() argument
1035 switch (xdp->command) { in tun_xdp()
1037 return tun_xdp_set(dev, xdp->prog, xdp->extack); in tun_xdp()
1039 xdp->prog_id = tun_xdp_query(dev); in tun_xdp()
1040 xdp->prog_attached = !!xdp->prog_id; in tun_xdp()
1319 struct xdp_buff xdp; in tun_build_skb() local
1323 xdp.data_hard_start = buf; in tun_build_skb()
1324 xdp.data = buf + pad; in tun_build_skb()
1325 xdp.data_end = xdp.data + len; in tun_build_skb()
1326 orig_data = xdp.data; in tun_build_skb()
[all …]
/drivers/net/ethernet/mellanox/mlx5/core/
Den_rx.c768 const struct xdp_buff *xdp) in mlx5e_xmit_xdp_frame() argument
779 ptrdiff_t data_offset = xdp->data - xdp->data_hard_start; in mlx5e_xmit_xdp_frame()
781 unsigned int dma_len = xdp->data_end - xdp->data; in mlx5e_xmit_xdp_frame()
809 memcpy(eseg->inline_hdr.start, xdp->data, MLX5E_XDP_MIN_INLINE); in mlx5e_xmit_xdp_frame()
841 struct xdp_buff xdp; in mlx5e_xdp_handle() local
847 xdp.data = va + *rx_headroom; in mlx5e_xdp_handle()
848 xdp.data_end = xdp.data + *len; in mlx5e_xdp_handle()
849 xdp.data_hard_start = va; in mlx5e_xdp_handle()
851 act = bpf_prog_run_xdp(prog, &xdp); in mlx5e_xdp_handle()
854 *rx_headroom = xdp.data - xdp.data_hard_start; in mlx5e_xdp_handle()
[all …]
Den_txrx.c58 if (c->xdp) in mlx5e_napi_poll()
Den_main.c1778 c->xdp = !!params->xdp_prog; in mlx5e_open_channel()
1798 err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation, in mlx5e_open_channel()
1813 err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq) : 0; in mlx5e_open_channel()
1825 if (c->xdp) in mlx5e_open_channel()
1836 if (c->xdp) in mlx5e_open_channel()
1877 if (c->xdp) in mlx5e_close_channel()
1882 if (c->xdp) in mlx5e_close_channel()
3738 static int mlx5e_xdp(struct net_device *dev, struct netdev_xdp *xdp) in mlx5e_xdp() argument
3740 switch (xdp->command) { in mlx5e_xdp()
3742 return mlx5e_xdp_set(dev, xdp->prog); in mlx5e_xdp()
[all …]
Den.h578 bool xdp; member
/drivers/net/ethernet/netronome/nfp/
Dnfp_net_debugfs.c206 struct dentry *queues, *tx, *rx, *xdp; in nfp_net_debugfs_vnic_add() local
228 xdp = debugfs_create_dir("xdp", queues); in nfp_net_debugfs_vnic_add()
229 if (IS_ERR_OR_NULL(rx) || IS_ERR_OR_NULL(tx) || IS_ERR_OR_NULL(xdp)) in nfp_net_debugfs_vnic_add()
236 debugfs_create_file(name, S_IRUSR, xdp, in nfp_net_debugfs_vnic_add()
Dnfp_net_common.c1180 nfp_net_free_frag(void *frag, bool xdp) in nfp_net_free_frag() argument
1182 if (!xdp) in nfp_net_free_frag()
1611 struct xdp_buff xdp; in nfp_net_run_xdp() local
1615 xdp.data_hard_start = hard_start; in nfp_net_run_xdp()
1616 xdp.data = data + *off; in nfp_net_run_xdp()
1617 xdp.data_end = data + *off + *len; in nfp_net_run_xdp()
1619 orig_data = xdp.data; in nfp_net_run_xdp()
1620 ret = bpf_prog_run_xdp(prog, &xdp); in nfp_net_run_xdp()
1622 *len -= xdp.data - orig_data; in nfp_net_run_xdp()
1623 *off += xdp.data - orig_data; in nfp_net_run_xdp()
[all …]
/drivers/net/ethernet/qlogic/qede/
Dqede_fp.c365 txq->sw_tx_ring.xdp[idx].page = metadata->data; in qede_xdp_xmit()
366 txq->sw_tx_ring.xdp[idx].mapping = metadata->mapping; in qede_xdp_xmit()
400 txq->sw_tx_ring.xdp[idx].mapping, in qede_xdp_tx_int()
402 __free_page(txq->sw_tx_ring.xdp[idx].page); in qede_xdp_tx_int()
1000 struct xdp_buff xdp; in qede_rx_xdp() local
1003 xdp.data_hard_start = page_address(bd->data); in qede_rx_xdp()
1004 xdp.data = xdp.data_hard_start + *data_offset; in qede_rx_xdp()
1005 xdp.data_end = xdp.data + *len; in qede_rx_xdp()
1012 act = bpf_prog_run_xdp(prog, &xdp); in qede_rx_xdp()
1016 *data_offset = xdp.data - xdp.data_hard_start; in qede_rx_xdp()
[all …]
Dqede.h396 struct sw_tx_xdp *xdp; member
508 int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp);
Dqede_filter.c1068 int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp) in qede_xdp() argument
1072 switch (xdp->command) { in qede_xdp()
1074 return qede_xdp_set(edev, xdp->prog); in qede_xdp()
1076 xdp->prog_attached = !!edev->xdp_prog; in qede_xdp()
1077 xdp->prog_id = edev->xdp_prog ? edev->xdp_prog->aux->id : 0; in qede_xdp()
Dqede_main.c1360 kfree(txq->sw_tx_ring.xdp); in qede_free_mem_txq()
1378 size = sizeof(*txq->sw_tx_ring.xdp) * txq->num_tx_buffers; in qede_alloc_mem_txq()
1379 txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL); in qede_alloc_mem_txq()
1380 if (!txq->sw_tx_ring.xdp) in qede_alloc_mem_txq()
/drivers/net/ethernet/mellanox/mlx4/
Den_rx.c771 struct xdp_buff xdp; in mlx4_en_process_rx_cq() local
781 xdp.data_hard_start = va - frags[0].page_offset; in mlx4_en_process_rx_cq()
782 xdp.data = va; in mlx4_en_process_rx_cq()
783 xdp.data_end = xdp.data + length; in mlx4_en_process_rx_cq()
784 orig_data = xdp.data; in mlx4_en_process_rx_cq()
786 act = bpf_prog_run_xdp(xdp_prog, &xdp); in mlx4_en_process_rx_cq()
788 if (xdp.data != orig_data) { in mlx4_en_process_rx_cq()
789 length = xdp.data_end - xdp.data; in mlx4_en_process_rx_cq()
790 frags[0].page_offset = xdp.data - in mlx4_en_process_rx_cq()
791 xdp.data_hard_start; in mlx4_en_process_rx_cq()
[all …]
Den_netdev.c2918 static int mlx4_xdp(struct net_device *dev, struct netdev_xdp *xdp) in mlx4_xdp() argument
2920 switch (xdp->command) { in mlx4_xdp()
2922 return mlx4_xdp_set(dev, xdp->prog); in mlx4_xdp()
2924 xdp->prog_id = mlx4_xdp_query(dev); in mlx4_xdp()
2925 xdp->prog_attached = !!xdp->prog_id; in mlx4_xdp()
/drivers/net/ethernet/cavium/thunder/
Dnicvf_main.c525 struct xdp_buff xdp; in nicvf_xdp_rx() local
542 xdp.data_hard_start = page_address(page); in nicvf_xdp_rx()
543 xdp.data = (void *)cpu_addr; in nicvf_xdp_rx()
544 xdp.data_end = xdp.data + len; in nicvf_xdp_rx()
545 orig_data = xdp.data; in nicvf_xdp_rx()
548 action = bpf_prog_run_xdp(prog, &xdp); in nicvf_xdp_rx()
552 if (orig_data != xdp.data) { in nicvf_xdp_rx()
553 len = xdp.data_end - xdp.data; in nicvf_xdp_rx()
554 offset = orig_data - xdp.data; in nicvf_xdp_rx()
574 *skb = build_skb(xdp.data, in nicvf_xdp_rx()
[all …]
Dnicvf_queues.c1613 u64 buf_addr, bool xdp) in nicvf_unmap_rcv_buffer() argument
1618 if (xdp) { in nicvf_unmap_rcv_buffer()
1638 struct cqe_rx_t *cqe_rx, bool xdp) in nicvf_get_rcv_skb() argument
1675 phys_addr, xdp); in nicvf_get_rcv_skb()
1685 nicvf_unmap_rcv_buffer(nic, *rb_ptrs, phys_addr, xdp); in nicvf_get_rcv_skb()
Dnicvf_queues.h351 struct cqe_rx_t *cqe_rx, bool xdp);
/drivers/net/ethernet/intel/ixgbe/
Dixgbe_main.c2129 struct xdp_buff *xdp, in ixgbe_construct_skb() argument
2132 unsigned int size = xdp->data_end - xdp->data; in ixgbe_construct_skb()
2136 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end - in ixgbe_construct_skb()
2137 xdp->data_hard_start); in ixgbe_construct_skb()
2142 prefetch(xdp->data); in ixgbe_construct_skb()
2144 prefetch(xdp->data + L1_CACHE_BYTES); in ixgbe_construct_skb()
2157 xdp->data - page_address(rx_buffer->page), in ixgbe_construct_skb()
2166 xdp->data, ALIGN(size, sizeof(long))); in ixgbe_construct_skb()
2175 struct xdp_buff *xdp, in ixgbe_build_skb() argument
2182 SKB_DATA_ALIGN(xdp->data_end - in ixgbe_build_skb()
[all …]