• Home
  • Raw
  • Download

Lines Matching refs:tx_ring

16 static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
22 struct enetc_bdr *tx_ring; in enetc_xmit() local
25 tx_ring = priv->tx_ring[skb->queue_mapping]; in enetc_xmit()
32 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) { in enetc_xmit()
33 netif_stop_subqueue(ndev, tx_ring->index); in enetc_xmit()
37 count = enetc_map_tx_buffs(tx_ring, skb, priv->active_offloads); in enetc_xmit()
41 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED) in enetc_xmit()
42 netif_stop_subqueue(ndev, tx_ring->index); in enetc_xmit()
85 static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring, in enetc_unmap_tx_buff() argument
89 dma_unmap_page(tx_ring->dev, tx_swbd->dma, in enetc_unmap_tx_buff()
92 dma_unmap_single(tx_ring->dev, tx_swbd->dma, in enetc_unmap_tx_buff()
97 static void enetc_free_tx_skb(struct enetc_bdr *tx_ring, in enetc_free_tx_skb() argument
101 enetc_unmap_tx_buff(tx_ring, tx_swbd); in enetc_free_tx_skb()
109 static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb, in enetc_map_tx_buffs() argument
123 i = tx_ring->next_to_use; in enetc_map_tx_buffs()
124 txbd = ENETC_TXBD(*tx_ring, i); in enetc_map_tx_buffs()
127 dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE); in enetc_map_tx_buffs()
128 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) in enetc_map_tx_buffs()
135 tx_swbd = &tx_ring->tx_swbd[i]; in enetc_map_tx_buffs()
167 if (unlikely(i == tx_ring->bd_count)) { in enetc_map_tx_buffs()
169 tx_swbd = tx_ring->tx_swbd; in enetc_map_tx_buffs()
170 txbd = ENETC_TXBD(*tx_ring, 0); in enetc_map_tx_buffs()
192 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len, in enetc_map_tx_buffs()
194 if (dma_mapping_error(tx_ring->dev, dma)) in enetc_map_tx_buffs()
204 if (unlikely(i == tx_ring->bd_count)) { in enetc_map_tx_buffs()
206 tx_swbd = tx_ring->tx_swbd; in enetc_map_tx_buffs()
207 txbd = ENETC_TXBD(*tx_ring, 0); in enetc_map_tx_buffs()
225 tx_ring->tx_swbd[i].skb = skb; in enetc_map_tx_buffs()
227 enetc_bdr_idx_inc(tx_ring, &i); in enetc_map_tx_buffs()
228 tx_ring->next_to_use = i; in enetc_map_tx_buffs()
231 enetc_wr_reg(tx_ring->tpir, i); /* includes wmb() */ in enetc_map_tx_buffs()
236 dev_err(tx_ring->dev, "DMA map error"); in enetc_map_tx_buffs()
239 tx_swbd = &tx_ring->tx_swbd[i]; in enetc_map_tx_buffs()
240 enetc_free_tx_skb(tx_ring, tx_swbd); in enetc_map_tx_buffs()
242 i = tx_ring->bd_count; in enetc_map_tx_buffs()
265 static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget);
278 if (!enetc_clean_tx_ring(&v->tx_ring[i], budget)) in enetc_poll()
300 static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci) in enetc_bd_ready_count() argument
302 int pi = enetc_rd_reg(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK; in enetc_bd_ready_count()
304 return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi; in enetc_bd_ready_count()
331 static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget) in enetc_clean_tx_ring() argument
333 struct net_device *ndev = tx_ring->ndev; in enetc_clean_tx_ring()
340 i = tx_ring->next_to_clean; in enetc_clean_tx_ring()
341 tx_swbd = &tx_ring->tx_swbd[i]; in enetc_clean_tx_ring()
342 bds_to_clean = enetc_bd_ready_count(tx_ring, i); in enetc_clean_tx_ring()
353 txbd = ENETC_TXBD(*tx_ring, i); in enetc_clean_tx_ring()
364 enetc_unmap_tx_buff(tx_ring, tx_swbd); in enetc_clean_tx_ring()
380 if (unlikely(i == tx_ring->bd_count)) { in enetc_clean_tx_ring()
382 tx_swbd = tx_ring->tx_swbd; in enetc_clean_tx_ring()
389 enetc_wr_reg(tx_ring->idr, BIT(tx_ring->index) | in enetc_clean_tx_ring()
390 BIT(16 + tx_ring->index)); in enetc_clean_tx_ring()
394 bds_to_clean = enetc_bd_ready_count(tx_ring, i); in enetc_clean_tx_ring()
397 tx_ring->next_to_clean = i; in enetc_clean_tx_ring()
398 tx_ring->stats.packets += tx_frm_cnt; in enetc_clean_tx_ring()
399 tx_ring->stats.bytes += tx_byte_cnt; in enetc_clean_tx_ring()
402 __netif_subqueue_stopped(ndev, tx_ring->index) && in enetc_clean_tx_ring()
403 (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) { in enetc_clean_tx_ring()
404 netif_wake_subqueue(ndev, tx_ring->index); in enetc_clean_tx_ring()
808 err = enetc_alloc_txbdr(priv->tx_ring[i]); in enetc_alloc_tx_resources()
818 enetc_free_txbdr(priv->tx_ring[i]); in enetc_alloc_tx_resources()
828 enetc_free_txbdr(priv->tx_ring[i]); in enetc_free_tx_resources()
893 static void enetc_free_tx_ring(struct enetc_bdr *tx_ring) in enetc_free_tx_ring() argument
897 if (!tx_ring->tx_swbd) in enetc_free_tx_ring()
900 for (i = 0; i < tx_ring->bd_count; i++) { in enetc_free_tx_ring()
901 struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i]; in enetc_free_tx_ring()
903 enetc_free_tx_skb(tx_ring, tx_swbd); in enetc_free_tx_ring()
906 tx_ring->next_to_clean = 0; in enetc_free_tx_ring()
907 tx_ring->next_to_use = 0; in enetc_free_tx_ring()
942 enetc_free_tx_ring(priv->tx_ring[i]); in enetc_free_rxtx_rings()
1103 static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) in enetc_setup_txbdr() argument
1105 int idx = tx_ring->index; in enetc_setup_txbdr()
1109 lower_32_bits(tx_ring->bd_dma_base)); in enetc_setup_txbdr()
1112 upper_32_bits(tx_ring->bd_dma_base)); in enetc_setup_txbdr()
1114 WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */ in enetc_setup_txbdr()
1116 ENETC_RTBLENR_LEN(tx_ring->bd_count)); in enetc_setup_txbdr()
1119 tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR); in enetc_setup_txbdr()
1120 tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR); in enetc_setup_txbdr()
1126 if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) in enetc_setup_txbdr()
1132 tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR); in enetc_setup_txbdr()
1133 tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR); in enetc_setup_txbdr()
1134 tx_ring->idr = hw->reg + ENETC_SITXIDR; in enetc_setup_txbdr()
1180 enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]); in enetc_setup_bdrs()
1194 static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) in enetc_clear_txbdr() argument
1197 int idx = tx_ring->index; in enetc_clear_txbdr()
1210 netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n", in enetc_clear_txbdr()
1219 enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]); in enetc_clear_bdrs()
1253 int idx = v->tx_ring[j].index; in enetc_setup_irqs()
1435 struct enetc_bdr *tx_ring; in enetc_setup_tc() local
1451 tx_ring = priv->tx_ring[i]; in enetc_setup_tc()
1452 enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, 0); in enetc_setup_tc()
1470 tx_ring = priv->tx_ring[i]; in enetc_setup_tc()
1471 enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, i); in enetc_setup_tc()
1504 packets += priv->tx_ring[i]->stats.packets; in enetc_get_stats()
1505 bytes += priv->tx_ring[i]->stats.bytes; in enetc_get_stats()
1653 bdr = &v->tx_ring[j]; in enetc_alloc_msix()
1658 priv->tx_ring[idx] = bdr; in enetc_alloc_msix()
1696 priv->tx_ring[i] = NULL; in enetc_free_msix()