/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
D | rx.h | 25 dma_info->xsk = xsk_buff_alloc(rq->xsk_pool); in mlx5e_xsk_page_alloc_pool() 41 if (!xsk_uses_need_wakeup(rq->xsk_pool)) in mlx5e_xsk_update_rx_wakeup() 45 xsk_set_rx_need_wakeup(rq->xsk_pool); in mlx5e_xsk_update_rx_wakeup() 47 xsk_clear_rx_need_wakeup(rq->xsk_pool); in mlx5e_xsk_update_rx_wakeup()
|
D | tx.h | 18 if (!xsk_uses_need_wakeup(sq->xsk_pool)) in mlx5e_xsk_update_tx_wakeup() 22 xsk_clear_tx_need_wakeup(sq->xsk_pool); in mlx5e_xsk_update_tx_wakeup() 24 xsk_set_tx_need_wakeup(sq->xsk_pool); in mlx5e_xsk_update_tx_wakeup()
|
D | rx.c | 50 xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool); in mlx5e_xsk_skb_from_cqe_mpwrq_linear() 96 xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool); in mlx5e_xsk_skb_from_cqe_linear()
|
D | tx.c | 69 struct xsk_buff_pool *pool = sq->xsk_pool; in mlx5e_xsk_tx()
|
D | setup.c | 70 rq->xsk_pool = pool; in mlx5e_init_xsk_rq()
|
/drivers/net/ethernet/intel/i40e/ |
D | i40e_xsk.c | 254 xdp = xsk_buff_alloc(rx_ring->xsk_pool); in i40e_alloc_rx_buffers_zc() 424 xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool); in i40e_clean_rx_irq_zc() 444 if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { in i40e_clean_rx_irq_zc() 446 xsk_set_rx_need_wakeup(rx_ring->xsk_pool); in i40e_clean_rx_irq_zc() 448 xsk_clear_rx_need_wakeup(rx_ring->xsk_pool); in i40e_clean_rx_irq_zc() 461 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr); in i40e_xmit_pkt() 462 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len); in i40e_xmit_pkt() 481 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr); in i40e_xmit_pkt_batch() 482 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc[i].len); in i40e_xmit_pkt_batch() 531 nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, descs, budget); in i40e_xmit_zc() [all …]
|
D | i40e_txrx.h | 392 struct xsk_buff_pool *xsk_pool; member
|
D | i40e_txrx.c | 793 if (ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { in i40e_clean_tx_ring() 1495 if (rx_ring->xsk_pool) { in i40e_clean_rx_ring() 1529 if (rx_ring->xsk_pool) in i40e_clean_rx_ring() 2706 bool wd = ring->xsk_pool ? in i40e_napi_poll() 2734 int cleaned = ring->xsk_pool ? in i40e_napi_poll()
|
D | i40e_main.c | 3423 ring->xsk_pool = i40e_xsk_pool(ring); in i40e_configure_tx_ring() 3556 ring->xsk_pool = i40e_xsk_pool(ring); in i40e_configure_rx_ring() 3557 if (ring->xsk_pool) { in i40e_configure_rx_ring() 3559 xsk_pool_get_rx_frame_size(ring->xsk_pool); in i40e_configure_rx_ring() 3641 if (ring->xsk_pool) { in i40e_configure_rx_ring() 3642 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); in i40e_configure_rx_ring() 3653 ring->xsk_pool ? "AF_XDP ZC enabled " : "", in i40e_configure_rx_ring() 13341 if (vsi->xdp_rings[i]->xsk_pool) in i40e_xdp_setup()
|
/drivers/net/ethernet/intel/ice/ |
D | ice_xsk.c | 238 xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring); in ice_qp_ena() 394 *xdp = xsk_buff_alloc(rx_ring->xsk_pool); in ice_alloc_rx_bufs_zc() 565 xsk_buff_dma_sync_for_cpu(*xdp, rx_ring->xsk_pool); in ice_clean_rx_irq_zc() 618 if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { in ice_clean_rx_irq_zc() 620 xsk_set_rx_need_wakeup(rx_ring->xsk_pool); in ice_clean_rx_irq_zc() 622 xsk_clear_rx_need_wakeup(rx_ring->xsk_pool); in ice_clean_rx_irq_zc() 655 if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc)) in ice_xmit_zc() 658 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr); in ice_xmit_zc() 659 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, in ice_xmit_zc() 676 xsk_tx_release(xdp_ring->xsk_pool); in ice_xmit_zc() [all …]
|
D | ice_base.c | 390 if (ring->xsk_pool) in ice_setup_rx_ctx() 459 ring->xsk_pool = ice_xsk_pool(ring); in ice_vsi_cfg_rxq() 460 if (ring->xsk_pool) { in ice_vsi_cfg_rxq() 464 xsk_pool_get_rx_frame_size(ring->xsk_pool); in ice_vsi_cfg_rxq() 470 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); in ice_vsi_cfg_rxq() 496 if (ring->xsk_pool) { in ice_vsi_cfg_rxq() 499 if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) { in ice_vsi_cfg_rxq()
|
D | ice_txrx.h | 293 struct xsk_buff_pool *xsk_pool; member
|
D | ice_txrx.c | 149 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { in ice_clean_tx_ring() 387 if (rx_ring->xsk_pool) { in ice_clean_rx_ring() 1399 bool wd = ring->xsk_pool ? in ice_napi_poll() 1429 cleaned = ring->xsk_pool ? in ice_napi_poll()
|
D | ice_lib.c | 1832 vsi->xdp_rings[i]->xsk_pool = ice_xsk_pool(vsi->xdp_rings[i]); in ice_vsi_cfg_xdp_txqs()
|
/drivers/net/ethernet/intel/ixgbe/ |
D | ixgbe_xsk.c | 158 bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool); in ixgbe_alloc_rx_buffers_zc() 297 xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool); in ixgbe_clean_rx_irq_zc() 358 if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { in ixgbe_clean_rx_irq_zc() 360 xsk_set_rx_need_wakeup(rx_ring->xsk_pool); in ixgbe_clean_rx_irq_zc() 362 xsk_clear_rx_need_wakeup(rx_ring->xsk_pool); in ixgbe_clean_rx_irq_zc() 387 struct xsk_buff_pool *pool = xdp_ring->xsk_pool; in ixgbe_xmit_zc() 455 struct xsk_buff_pool *pool = tx_ring->xsk_pool; in ixgbe_clean_xdp_tx_irq() 527 if (!ring->xsk_pool) in ixgbe_xsk_wakeup() 542 struct xsk_buff_pool *pool = tx_ring->xsk_pool; in ixgbe_xsk_clean_tx_ring()
|
D | ixgbe_main.c | 3155 bool wd = ring->xsk_pool ? in ixgbe_poll() 3175 int cleaned = ring->xsk_pool ? in ixgbe_poll() 3470 ring->xsk_pool = NULL; in ixgbe_configure_tx_ring() 3472 ring->xsk_pool = ixgbe_xsk_pool(adapter, ring); in ixgbe_configure_tx_ring() 3712 if (rx_ring->xsk_pool) { in ixgbe_configure_srrctl() 3713 u32 xsk_buf_len = xsk_pool_get_rx_frame_size(rx_ring->xsk_pool); in ixgbe_configure_srrctl() 4058 ring->xsk_pool = ixgbe_xsk_pool(adapter, ring); in ixgbe_configure_rx_ring() 4059 if (ring->xsk_pool) { in ixgbe_configure_rx_ring() 4063 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); in ixgbe_configure_rx_ring() 4120 if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) { in ixgbe_configure_rx_ring() [all …]
|
D | ixgbe.h | 356 struct xsk_buff_pool *xsk_pool; member
|
/drivers/net/ethernet/stmicro/stmmac/ |
D | stmmac_main.c | 233 if (rx_q->xsk_pool) { in stmmac_disable_all_queues() 1644 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); in stmmac_alloc_rx_buffers_zc() 1686 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); in __init_dma_rx_desc_rings() 1688 if (rx_q->xsk_pool) { in __init_dma_rx_desc_rings() 1695 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq); in __init_dma_rx_desc_rings() 1705 if (rx_q->xsk_pool) { in __init_dma_rx_desc_rings() 1757 if (rx_q->xsk_pool) in init_dma_rx_desc_rings() 1763 rx_q->xsk_pool = NULL; in init_dma_rx_desc_rings() 1803 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); in __init_dma_tx_desc_rings() 1889 if (tx_q->xsk_pool && tx_q->xsk_frames_done) { in dma_free_tx_skbufs() [all …]
|
D | stmmac.h | 73 struct xsk_buff_pool *xsk_pool; member 99 struct xsk_buff_pool *xsk_pool; member
|
/drivers/net/ethernet/intel/igc/ |
D | igc_main.c | 251 if (tx_ring->xsk_pool && xsk_frames) in igc_clean_tx_ring() 252 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); in igc_clean_tx_ring() 464 if (ring->xsk_pool) in igc_clean_rx_ring() 636 ring->xsk_pool = igc_get_xsk_pool(adapter, ring); in igc_configure_rx_ring() 637 if (ring->xsk_pool) { in igc_configure_rx_ring() 641 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); in igc_configure_rx_ring() 670 if (ring->xsk_pool) in igc_configure_rx_ring() 671 buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool); in igc_configure_rx_ring() 736 ring->xsk_pool = igc_get_xsk_pool(adapter, ring); in igc_configure_tx_ring() 2259 bi->xdp = xsk_buff_alloc(ring->xsk_pool); in igc_alloc_rx_buffers_zc() [all …]
|
D | igc.h | 133 struct xsk_buff_pool *xsk_pool; member
|
/drivers/net/ethernet/mellanox/mlx5/core/ |
D | en_rx.c | 288 if (rq->xsk_pool) in mlx5e_page_alloc() 321 if (rq->xsk_pool) in mlx5e_page_release() 408 if (rq->xsk_pool) { in mlx5e_alloc_rx_wqes() 415 if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, pages_desired))) in mlx5e_alloc_rx_wqes() 512 if (rq->xsk_pool && in mlx5e_alloc_rx_mpwqe() 513 unlikely(!xsk_buff_can_alloc(rq->xsk_pool, MLX5_MPWRQ_PAGES_PER_WQE))) { in mlx5e_alloc_rx_mpwqe() 769 if (unlikely(alloc_err == -ENOMEM && rq->xsk_pool)) in mlx5e_post_rx_mpwqes()
|
D | en_main.c | 497 xsk_pool_set_rxq_info(rq->xsk_pool, &rq->xdp_rxq); in mlx5e_alloc_rq() 963 struct xsk_buff_pool *xsk_pool, in mlx5e_alloc_xdpsq() argument 979 sq->xsk_pool = xsk_pool; in mlx5e_alloc_xdpsq() 981 sq->stats = sq->xsk_pool ? in mlx5e_alloc_xdpsq() 1467 struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool, in mlx5e_open_xdpsq() argument 1473 err = mlx5e_alloc_xdpsq(c, params, xsk_pool, param, sq, is_redirect); in mlx5e_open_xdpsq() 1984 struct xsk_buff_pool *xsk_pool, in mlx5e_open_channel() argument 2022 if (xsk_pool) { in mlx5e_open_channel() 2023 mlx5e_build_xsk_param(xsk_pool, &xsk); in mlx5e_open_channel() 2024 err = mlx5e_open_xsk(priv, params, &xsk, xsk_pool, c); in mlx5e_open_channel() [all …]
|
D | en.h | 498 struct xsk_buff_pool *xsk_pool; member 668 struct xsk_buff_pool *xsk_pool; member 960 struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
|
/drivers/net/ethernet/mellanox/mlx5/core/en/ |
D | xdp.c | 457 xsk_tx_completed(sq->xsk_pool, xsk_frames); in mlx5e_poll_xdpsq_cq() 495 xsk_tx_completed(sq->xsk_pool, xsk_frames); in mlx5e_free_xdpsq_descs()
|