Home
last modified time | relevance | path

Searched refs:xdp (Results 1 – 25 of 71) sorted by relevance

123

/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/
Drx.c32 struct xdp_buff *xdp = wi->umr.dma_info[page_idx].xsk; in mlx5e_xsk_skb_from_cqe_mpwrq_linear() local
48 xdp->data_end = xdp->data + cqe_bcnt32; in mlx5e_xsk_skb_from_cqe_mpwrq_linear()
49 xdp_set_data_meta_invalid(xdp); in mlx5e_xsk_skb_from_cqe_mpwrq_linear()
50 xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool); in mlx5e_xsk_skb_from_cqe_mpwrq_linear()
51 net_prefetch(xdp->data); in mlx5e_xsk_skb_from_cqe_mpwrq_linear()
68 if (likely(mlx5e_xdp_handle(rq, NULL, &cqe_bcnt32, xdp))) { in mlx5e_xsk_skb_from_cqe_mpwrq_linear()
77 return mlx5e_xsk_construct_skb(rq, xdp->data, cqe_bcnt32); in mlx5e_xsk_skb_from_cqe_mpwrq_linear()
85 struct xdp_buff *xdp = wi->di->xsk; in mlx5e_xsk_skb_from_cqe_linear() local
94 xdp->data_end = xdp->data + cqe_bcnt; in mlx5e_xsk_skb_from_cqe_linear()
95 xdp_set_data_meta_invalid(xdp); in mlx5e_xsk_skb_from_cqe_linear()
[all …]
/drivers/net/ethernet/intel/ixgbe/
Dixgbe_xsk.c99 struct xdp_buff *xdp) in ixgbe_run_xdp_zc() argument
107 act = bpf_prog_run_xdp(xdp_prog, xdp); in ixgbe_run_xdp_zc()
110 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); in ixgbe_run_xdp_zc()
120 xdpf = xdp_convert_buff_to_frame(xdp); in ixgbe_run_xdp_zc()
158 bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool); in ixgbe_alloc_rx_buffers_zc()
159 if (!bi->xdp) { in ixgbe_alloc_rx_buffers_zc()
164 dma = xsk_buff_xdp_get_dma(bi->xdp); in ixgbe_alloc_rx_buffers_zc()
204 const struct xdp_buff *xdp) in ixgbe_construct_skb_zc() argument
206 unsigned int totalsize = xdp->data_end - xdp->data_meta; in ixgbe_construct_skb_zc()
207 unsigned int metasize = xdp->data - xdp->data_meta; in ixgbe_construct_skb_zc()
[all …]
/drivers/net/hyperv/
Dnetvsc_bpf.c24 struct xdp_buff *xdp) in netvsc_run_xdp() argument
32 xdp->data_hard_start = NULL; in netvsc_run_xdp()
53 xdp_init_buff(xdp, PAGE_SIZE, &nvchan->xdp_rxq); in netvsc_run_xdp()
54 xdp_prepare_buff(xdp, page_address(page), NETVSC_XDP_HDRM, len, false); in netvsc_run_xdp()
56 memcpy(xdp->data, data, len); in netvsc_run_xdp()
58 act = bpf_prog_run_xdp(prog, xdp); in netvsc_run_xdp()
79 xdp->data_hard_start = NULL; in netvsc_run_xdp()
139 struct netdev_bpf xdp; in netvsc_vf_setxdp() local
152 memset(&xdp, 0, sizeof(xdp)); in netvsc_vf_setxdp()
157 xdp.command = XDP_SETUP_PROG; in netvsc_vf_setxdp()
[all …]
/drivers/net/ethernet/intel/ice/
Dice_xsk.c383 struct xdp_buff **xdp; in ice_alloc_rx_bufs_zc() local
391 xdp = &rx_ring->xdp_buf[ntu]; in ice_alloc_rx_bufs_zc()
394 *xdp = xsk_buff_alloc(rx_ring->xsk_pool); in ice_alloc_rx_bufs_zc()
395 if (!*xdp) { in ice_alloc_rx_bufs_zc()
400 dma = xsk_buff_xdp_get_dma(*xdp); in ice_alloc_rx_bufs_zc()
405 xdp++; in ice_alloc_rx_bufs_zc()
410 xdp = rx_ring->xdp_buf; in ice_alloc_rx_bufs_zc()
449 struct xdp_buff *xdp = *xdp_arr; in ice_construct_skb_zc() local
450 unsigned int metasize = xdp->data - xdp->data_meta; in ice_construct_skb_zc()
451 unsigned int datasize = xdp->data_end - xdp->data; in ice_construct_skb_zc()
[all …]
Dice_txrx.c526 ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp, in ice_run_xdp() argument
533 act = bpf_prog_run_xdp(xdp_prog, xdp); in ice_run_xdp()
539 result = ice_xmit_xdp_buff(xdp, xdp_ring); in ice_run_xdp()
544 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); in ice_run_xdp()
892 struct xdp_buff *xdp) in ice_build_skb() argument
894 u8 metasize = xdp->data - xdp->data_meta; in ice_build_skb()
899 SKB_DATA_ALIGN(xdp->data_end - in ice_build_skb()
900 xdp->data_hard_start); in ice_build_skb()
909 net_prefetch(xdp->data_meta); in ice_build_skb()
911 skb = build_skb(xdp->data_hard_start, truesize); in ice_build_skb()
[all …]
/drivers/net/ethernet/broadcom/bnxt/
Dbnxt_xdp.c120 struct xdp_buff xdp; in bnxt_rx_xdp() local
139 xdp_init_buff(&xdp, PAGE_SIZE, &rxr->xdp_rxq); in bnxt_rx_xdp()
140 xdp_prepare_buff(&xdp, *data_ptr - offset, offset, *len, false); in bnxt_rx_xdp()
141 orig_data = xdp.data; in bnxt_rx_xdp()
143 act = bpf_prog_run_xdp(xdp_prog, &xdp); in bnxt_rx_xdp()
152 *len = xdp.data_end - xdp.data; in bnxt_rx_xdp()
153 if (orig_data != xdp.data) { in bnxt_rx_xdp()
154 offset = xdp.data - xdp.data_hard_start; in bnxt_rx_xdp()
155 *data_ptr = xdp.data_hard_start + offset; in bnxt_rx_xdp()
191 if (xdp_do_redirect(bp->dev, &xdp, xdp_prog)) { in bnxt_rx_xdp()
[all …]
/drivers/net/ethernet/intel/i40e/
Di40e_xsk.c200 static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) in i40e_run_xdp_zc() argument
211 act = bpf_prog_run_xdp(xdp_prog, xdp); in i40e_run_xdp_zc()
214 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); in i40e_run_xdp_zc()
225 result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); in i40e_run_xdp_zc()
247 struct xdp_buff **bi, *xdp; in i40e_alloc_rx_buffers_zc() local
254 xdp = xsk_buff_alloc(rx_ring->xsk_pool); in i40e_alloc_rx_buffers_zc()
255 if (!xdp) { in i40e_alloc_rx_buffers_zc()
259 *bi = xdp; in i40e_alloc_rx_buffers_zc()
260 dma = xsk_buff_xdp_get_dma(xdp); in i40e_alloc_rx_buffers_zc()
295 struct xdp_buff *xdp) in i40e_construct_skb_zc() argument
[all …]
Di40e_txrx.c2094 struct xdp_buff *xdp) in i40e_construct_skb() argument
2096 unsigned int size = xdp->data_end - xdp->data; in i40e_construct_skb()
2106 net_prefetch(xdp->data); in i40e_construct_skb()
2134 headlen = eth_get_headlen(skb->dev, xdp->data, in i40e_construct_skb()
2138 memcpy(__skb_put(skb, headlen), xdp->data, in i40e_construct_skb()
2173 struct xdp_buff *xdp) in i40e_build_skb() argument
2175 unsigned int metasize = xdp->data - xdp->data_meta; in i40e_build_skb()
2180 SKB_DATA_ALIGN(xdp->data_end - in i40e_build_skb()
2181 xdp->data_hard_start); in i40e_build_skb()
2190 net_prefetch(xdp->data_meta); in i40e_build_skb()
[all …]
/drivers/net/
Dveth.c258 static void *veth_xdp_to_ptr(struct xdp_frame *xdp) in veth_xdp_to_ptr() argument
260 return (void *)((unsigned long)xdp | VETH_XDP_FLAG); in veth_xdp_to_ptr()
293 struct veth_rq *rq, bool xdp) in veth_forward_skb() argument
295 return __dev_forward_skb(dev, skb) ?: xdp ? in veth_forward_skb()
592 static int veth_xdp_tx(struct veth_rq *rq, struct xdp_buff *xdp, in veth_xdp_tx() argument
595 struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp); in veth_xdp_tx()
619 struct xdp_buff xdp; in veth_xdp_rcv_one() local
622 xdp_convert_frame_to_buff(frame, &xdp); in veth_xdp_rcv_one()
623 xdp.rxq = &rq->xdp_rxq; in veth_xdp_rcv_one()
625 act = bpf_prog_run_xdp(xdp_prog, &xdp); in veth_xdp_rcv_one()
[all …]
Dtun.c1212 static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp) in tun_xdp() argument
1214 switch (xdp->command) { in tun_xdp()
1216 return tun_xdp_set(dev, xdp->prog, xdp->extack); in tun_xdp()
1286 struct xdp_frame *xdp = frames[i]; in tun_xdp_xmit() local
1290 void *frame = tun_xdp_to_ptr(xdp); in tun_xdp_xmit()
1307 static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp) in tun_xdp_tx() argument
1309 struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp); in tun_xdp_tx()
1605 struct xdp_buff *xdp, u32 act) in tun_xdp_act() argument
1611 err = xdp_do_redirect(tun->dev, xdp, xdp_prog); in tun_xdp_act()
1616 err = tun_xdp_tx(tun->dev, xdp); in tun_xdp_act()
[all …]
Dvirtio_net.c764 struct xdp_buff xdp; in receive_small() local
792 xdp_init_buff(&xdp, buflen, &rq->xdp_rxq); in receive_small()
793 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len, in receive_small()
795 orig_data = xdp.data; in receive_small()
796 act = bpf_prog_run_xdp(xdp_prog, &xdp); in receive_small()
802 delta = orig_data - xdp.data; in receive_small()
803 len = xdp.data_end - xdp.data; in receive_small()
804 metasize = xdp.data - xdp.data_meta; in receive_small()
808 xdpf = xdp_convert_buff_to_frame(&xdp); in receive_small()
823 err = xdp_do_redirect(dev, &xdp, xdp_prog); in receive_small()
[all …]
Dtap.c1132 static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp) in tap_get_user_xdp() argument
1134 struct tun_xdp_hdr *hdr = xdp->data_hard_start; in tap_get_user_xdp()
1145 skb = build_skb(xdp->data_hard_start, buflen); in tap_get_user_xdp()
1151 skb_reserve(skb, xdp->data - xdp->data_hard_start); in tap_get_user_xdp()
1152 skb_put(skb, xdp->data_end - xdp->data); in tap_get_user_xdp()
1198 struct xdp_buff *xdp; in tap_sendmsg() local
1204 xdp = &((struct xdp_buff *)ctl->ptr)[i]; in tap_sendmsg()
1205 tap_get_user_xdp(q, xdp); in tap_sendmsg()
Dxen-netfront.c971 struct xdp_buff *xdp, bool *need_xdp_flush) in xennet_run_xdp() argument
978 xdp_init_buff(xdp, XEN_PAGE_SIZE - XDP_PACKET_HEADROOM, in xennet_run_xdp()
980 xdp_prepare_buff(xdp, page_address(pdata), XDP_PACKET_HEADROOM, in xennet_run_xdp()
983 act = bpf_prog_run_xdp(prog, xdp); in xennet_run_xdp()
987 xdpf = xdp_convert_buff_to_frame(xdp); in xennet_run_xdp()
996 err = xdp_do_redirect(queue->info->netdev, xdp, prog); in xennet_run_xdp()
1029 struct xdp_buff xdp; in xennet_get_responses() local
1038 struct xen_netif_extra_info *xdp; in xennet_get_responses() local
1040 xdp = &extras[XEN_NETIF_EXTRA_TYPE_XDP - 1]; in xennet_get_responses()
1041 rx->offset = xdp->u.xdp.headroom; in xennet_get_responses()
[all …]
/drivers/net/ethernet/mellanox/mlx5/core/en/
Dxdp.c60 struct mlx5e_dma_info *di, struct xdp_buff *xdp) in mlx5e_xmit_xdp_buff() argument
67 xdpf = xdp_convert_buff_to_frame(xdp); in mlx5e_xmit_xdp_buff()
74 if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) { in mlx5e_xmit_xdp_buff()
123 u32 *len, struct xdp_buff *xdp) in mlx5e_xdp_handle() argument
132 act = bpf_prog_run_xdp(prog, xdp); in mlx5e_xdp_handle()
135 *len = xdp->data_end - xdp->data; in mlx5e_xdp_handle()
138 if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, di, xdp))) in mlx5e_xdp_handle()
144 err = xdp_do_redirect(rq->netdev, xdp, prog); in mlx5e_xdp_handle()
149 if (xdp->rxq->mem.type != MEM_TYPE_XSK_BUFF_POOL) in mlx5e_xdp_handle()
/drivers/net/ethernet/sfc/
Drx.c258 struct xdp_buff xdp; in efx_do_xdp() local
289 xdp_init_buff(&xdp, efx->rx_page_buf_step, &rx_queue->xdp_rxq_info); in efx_do_xdp()
291 xdp_prepare_buff(&xdp, *ehp - EFX_XDP_HEADROOM, EFX_XDP_HEADROOM, in efx_do_xdp()
294 xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp); in efx_do_xdp()
296 offset = (u8 *)xdp.data - *ehp; in efx_do_xdp()
312 xdpf = xdp_convert_buff_to_frame(&xdp); in efx_do_xdp()
327 err = xdp_do_redirect(efx->net_dev, &xdp, xdp_prog); in efx_do_xdp()
/drivers/net/ethernet/socionext/
Dnetsec.c882 static u32 netsec_xdp_xmit_back(struct netsec_priv *priv, struct xdp_buff *xdp) in netsec_xdp_xmit_back() argument
885 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); in netsec_xdp_xmit_back()
899 struct xdp_buff *xdp) in netsec_run_xdp() argument
902 unsigned int sync, len = xdp->data_end - xdp->data; in netsec_run_xdp()
908 act = bpf_prog_run_xdp(prog, xdp); in netsec_run_xdp()
911 sync = xdp->data_end - xdp->data_hard_start - NETSEC_RXBUF_HEADROOM; in netsec_run_xdp()
919 ret = netsec_xdp_xmit_back(priv, xdp); in netsec_run_xdp()
921 page = virt_to_head_page(xdp->data); in netsec_run_xdp()
926 err = xdp_do_redirect(priv->ndev, xdp, prog); in netsec_run_xdp()
931 page = virt_to_head_page(xdp->data); in netsec_run_xdp()
[all …]
/drivers/net/ethernet/intel/ixgbevf/
Dixgbevf_main.c851 struct xdp_buff *xdp, in ixgbevf_construct_skb() argument
854 unsigned int size = xdp->data_end - xdp->data; in ixgbevf_construct_skb()
858 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end - in ixgbevf_construct_skb()
859 xdp->data_hard_start); in ixgbevf_construct_skb()
865 net_prefetch(xdp->data); in ixgbevf_construct_skb()
890 headlen = eth_get_headlen(skb->dev, xdp->data, in ixgbevf_construct_skb()
894 memcpy(__skb_put(skb, headlen), xdp->data, in ixgbevf_construct_skb()
901 (xdp->data + headlen) - in ixgbevf_construct_skb()
926 struct xdp_buff *xdp, in ixgbevf_build_skb() argument
929 unsigned int metasize = xdp->data - xdp->data_meta; in ixgbevf_build_skb()
[all …]
/drivers/net/ethernet/qlogic/qede/
Dqede_fp.c309 struct sw_tx_xdp *xdp; in qede_xdp_xmit() local
330 xdp = txq->sw_tx_ring.xdp + txq->sw_tx_prod; in qede_xdp_xmit()
331 xdp->mapping = dma; in qede_xdp_xmit()
332 xdp->page = page; in qede_xdp_xmit()
333 xdp->xdpf = xdpf; in qede_xdp_xmit()
403 struct sw_tx_xdp *xdp_info, *xdp_arr = txq->sw_tx_ring.xdp; in qede_xdp_tx_int()
1088 struct xdp_buff xdp; in qede_rx_xdp() local
1091 xdp_init_buff(&xdp, rxq->rx_buf_seg_size, &rxq->xdp_rxq); in qede_rx_xdp()
1092 xdp_prepare_buff(&xdp, page_address(bd->data), *data_offset, in qede_rx_xdp()
1095 act = bpf_prog_run_xdp(prog, &xdp); in qede_rx_xdp()
[all …]
/drivers/net/ethernet/freescale/enetc/
Denetc.c18 if (priv->rx_ring[i]->xdp.prog) in enetc_num_stack_tx_queues()
523 rx_ring->xdp.xdp_tx_in_flight--; in enetc_recycle_xdp_tx_buff()
626 bool xdp = !!(rx_ring->xdp.prog); in enetc_new_page() local
635 rx_swbd->dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; in enetc_new_page()
1165 xdp_init_buff(xdp_buff, ENETC_RXB_TRUESIZE, &rx_ring->xdp.rxq); in enetc_build_xdp_buff()
1308 rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt; in enetc_clean_rx_ring_xdp()
1368 if (cleaned_cnt > rx_ring->xdp.xdp_tx_in_flight) in enetc_clean_rx_ring_xdp()
1370 rx_ring->xdp.xdp_tx_in_flight); in enetc_clean_rx_ring_xdp()
1391 prog = rx_ring->xdp.prog; in enetc_poll()
1785 if (rx_ring->xdp.prog) in enetc_setup_rxbdr()
[all …]
/drivers/net/ethernet/stmicro/stmmac/
Dstmmac_main.c1619 if (!buf->xdp) in dma_free_rx_xskbufs()
1622 xsk_buff_free(buf->xdp); in dma_free_rx_xskbufs()
1623 buf->xdp = NULL; in dma_free_rx_xskbufs()
1644 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); in stmmac_alloc_rx_buffers_zc()
1645 if (!buf->xdp) in stmmac_alloc_rx_buffers_zc()
1648 dma_addr = xsk_buff_xdp_get_dma(buf->xdp); in stmmac_alloc_rx_buffers_zc()
4719 struct xdp_buff *xdp) in stmmac_xdp_xmit_back() argument
4721 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); in stmmac_xdp_xmit_back()
4748 struct xdp_buff *xdp) in __stmmac_xdp_run_prog() argument
4753 act = bpf_prog_run_xdp(prog, xdp); in __stmmac_xdp_run_prog()
[all …]
/drivers/net/ethernet/netronome/nfp/
Dnfp_net_debugfs.c150 struct dentry *queues, *tx, *rx, *xdp; in nfp_net_debugfs_vnic_add() local
168 xdp = debugfs_create_dir("xdp", queues); in nfp_net_debugfs_vnic_add()
174 debugfs_create_file(name, 0400, xdp, in nfp_net_debugfs_vnic_add()
/drivers/net/ethernet/freescale/dpaa/
Ddpaa_eth.c2557 struct xdp_buff xdp; in dpaa_run_xdp() local
2565 xdp_init_buff(&xdp, DPAA_BP_RAW_SIZE - DPAA_TX_PRIV_DATA_SIZE, in dpaa_run_xdp()
2567 xdp_prepare_buff(&xdp, vaddr + fd_off - XDP_PACKET_HEADROOM, in dpaa_run_xdp()
2578 xdp_set_data_meta_invalid(&xdp); in dpaa_run_xdp()
2579 xdp.data_hard_start = vaddr; in dpaa_run_xdp()
2580 xdp.frame_sz = DPAA_BP_RAW_SIZE; in dpaa_run_xdp()
2584 xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp); in dpaa_run_xdp()
2587 qm_fd_set_contig(fd, xdp.data - vaddr, xdp.data_end - xdp.data); in dpaa_run_xdp()
2592 *xdp_meta_len = xdp_data_meta_unsupported(&xdp) ? 0 : in dpaa_run_xdp()
2593 xdp.data - xdp.data_meta; in dpaa_run_xdp()
[all …]
/drivers/net/ethernet/marvell/
Dmvneta.c2035 struct xdp_buff *xdp, struct skb_shared_info *sinfo, in mvneta_xdp_put_buff() argument
2043 page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data), in mvneta_xdp_put_buff()
2093 mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp) in mvneta_xdp_xmit_back() argument
2102 xdpf = xdp_convert_buff_to_frame(xdp); in mvneta_xdp_xmit_back()
2177 struct bpf_prog *prog, struct xdp_buff *xdp, in mvneta_run_xdp() argument
2180 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); in mvneta_run_xdp()
2184 len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; in mvneta_run_xdp()
2185 data_len = xdp->data_end - xdp->data; in mvneta_run_xdp()
2186 act = bpf_prog_run_xdp(prog, xdp); in mvneta_run_xdp()
2189 sync = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; in mvneta_run_xdp()
[all …]
/drivers/net/ethernet/mellanox/mlx4/
Den_rx.c673 struct xdp_buff xdp; in mlx4_en_process_rx_cq() local
683 xdp_init_buff(&xdp, priv->frag_info[0].frag_stride, &ring->xdp_rxq); in mlx4_en_process_rx_cq()
777 xdp_prepare_buff(&xdp, va - frags[0].page_offset, in mlx4_en_process_rx_cq()
779 orig_data = xdp.data; in mlx4_en_process_rx_cq()
781 act = bpf_prog_run_xdp(xdp_prog, &xdp); in mlx4_en_process_rx_cq()
783 length = xdp.data_end - xdp.data; in mlx4_en_process_rx_cq()
784 if (xdp.data != orig_data) { in mlx4_en_process_rx_cq()
785 frags[0].page_offset = xdp.data - in mlx4_en_process_rx_cq()
786 xdp.data_hard_start; in mlx4_en_process_rx_cq()
787 va = xdp.data; in mlx4_en_process_rx_cq()
/drivers/vhost/
Dnet.c128 struct xdp_buff *xdp; member
468 .ptr = nvq->xdp, in vhost_tx_batch()
486 put_page(virt_to_head_page(nvq->xdp[i].data)); in vhost_tx_batch()
702 struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp]; in vhost_net_build_xdp() local
755 xdp_init_buff(xdp, buflen, NULL); in vhost_net_build_xdp()
756 xdp_prepare_buff(xdp, buf, pad, len, true); in vhost_net_build_xdp()
1297 struct xdp_buff *xdp; in vhost_net_open() local
1318 xdp = kmalloc_array(VHOST_NET_BATCH, sizeof(*xdp), GFP_KERNEL); in vhost_net_open()
1319 if (!xdp) { in vhost_net_open()
1325 n->vqs[VHOST_NET_VQ_TX].xdp = xdp; in vhost_net_open()
[all …]

123