Lines Matching refs:txq
75 int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len) in qede_free_tx_pkt() argument
77 u16 idx = txq->sw_tx_cons; in qede_free_tx_pkt()
78 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; in qede_free_tx_pkt()
83 bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD; in qede_free_tx_pkt()
89 idx, txq->sw_tx_cons, txq->sw_tx_prod); in qede_free_tx_pkt()
95 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
103 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
113 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
119 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
123 txq->sw_tx_ring.skbs[idx].skb = NULL; in qede_free_tx_pkt()
124 txq->sw_tx_ring.skbs[idx].flags = 0; in qede_free_tx_pkt()
130 static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq, in qede_free_failed_tx_pkt() argument
134 u16 idx = txq->sw_tx_prod; in qede_free_failed_tx_pkt()
135 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; in qede_free_failed_tx_pkt()
140 qed_chain_set_prod(&txq->tx_pbl, in qede_free_failed_tx_pkt()
141 le16_to_cpu(txq->tx_db.data.bd_prod), first_bd); in qede_free_failed_tx_pkt()
143 first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl); in qede_free_failed_tx_pkt()
147 qed_chain_produce(&txq->tx_pbl); in qede_free_failed_tx_pkt()
152 dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd), in qede_free_failed_tx_pkt()
158 qed_chain_produce(&txq->tx_pbl); in qede_free_failed_tx_pkt()
160 dma_unmap_page(txq->dev, in qede_free_failed_tx_pkt()
166 qed_chain_set_prod(&txq->tx_pbl, in qede_free_failed_tx_pkt()
167 le16_to_cpu(txq->tx_db.data.bd_prod), first_bd); in qede_free_failed_tx_pkt()
171 txq->sw_tx_ring.skbs[idx].skb = NULL; in qede_free_failed_tx_pkt()
172 txq->sw_tx_ring.skbs[idx].flags = 0; in qede_free_failed_tx_pkt()
242 static int map_frag_to_bd(struct qede_tx_queue *txq, in map_frag_to_bd() argument
248 mapping = skb_frag_dma_map(txq->dev, frag, 0, in map_frag_to_bd()
250 if (unlikely(dma_mapping_error(txq->dev, mapping))) in map_frag_to_bd()
289 static inline void qede_update_tx_producer(struct qede_tx_queue *txq) in qede_update_tx_producer() argument
296 writel(txq->tx_db.raw, txq->doorbell_addr); in qede_update_tx_producer()
305 static int qede_xdp_xmit(struct qede_tx_queue *txq, dma_addr_t dma, u16 pad, in qede_xdp_xmit() argument
312 if (unlikely(qed_chain_get_elem_used(&txq->tx_pbl) >= in qede_xdp_xmit()
313 txq->num_tx_buffers)) { in qede_xdp_xmit()
314 txq->stopped_cnt++; in qede_xdp_xmit()
318 bd = qed_chain_produce(&txq->tx_pbl); in qede_xdp_xmit()
330 xdp = txq->sw_tx_ring.xdp + txq->sw_tx_prod; in qede_xdp_xmit()
335 txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers; in qede_xdp_xmit()
388 int qede_txq_has_work(struct qede_tx_queue *txq) in qede_txq_has_work() argument
394 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); in qede_txq_has_work()
395 if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1) in qede_txq_has_work()
398 return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl); in qede_txq_has_work()
401 static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) in qede_xdp_tx_int() argument
403 struct sw_tx_xdp *xdp_info, *xdp_arr = txq->sw_tx_ring.xdp; in qede_xdp_tx_int()
408 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); in qede_xdp_tx_int()
411 while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { in qede_xdp_tx_int()
412 xdp_info = xdp_arr + txq->sw_tx_cons; in qede_xdp_tx_int()
427 qed_chain_consume(&txq->tx_pbl); in qede_xdp_tx_int()
428 txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers; in qede_xdp_tx_int()
429 txq->xmit_pkts++; in qede_xdp_tx_int()
433 static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) in qede_tx_int() argument
440 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id); in qede_tx_int()
442 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); in qede_tx_int()
445 while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { in qede_tx_int()
448 rc = qede_free_tx_pkt(edev, txq, &len); in qede_tx_int()
452 qed_chain_get_cons_idx(&txq->tx_pbl)); in qede_tx_int()
458 txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers; in qede_tx_int()
459 txq->xmit_pkts++; in qede_tx_int()
490 (qed_chain_get_elem_left(&txq->tx_pbl) in qede_tx_int()
1406 if (qede_txq_has_work(&fp->txq[cos])) in qede_poll_is_more_work()
1431 if (qede_txq_has_work(&fp->txq[cos])) in qede_poll()
1432 qede_tx_int(edev, &fp->txq[cos]); in qede_poll()
1483 struct qede_tx_queue *txq; in qede_start_xmit() local
1500 txq = QEDE_NDEV_TXQ_ID_TO_TXQ(edev, txq_index); in qede_start_xmit()
1503 WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1)); in qede_start_xmit()
1510 txq->tx_mem_alloc_err++; in qede_start_xmit()
1519 idx = txq->sw_tx_prod; in qede_start_xmit()
1520 txq->sw_tx_ring.skbs[idx].skb = skb; in qede_start_xmit()
1522 qed_chain_produce(&txq->tx_pbl); in qede_start_xmit()
1531 mapping = dma_map_single(txq->dev, skb->data, in qede_start_xmit()
1533 if (unlikely(dma_mapping_error(txq->dev, mapping))) { in qede_start_xmit()
1535 qede_free_failed_tx_pkt(txq, first_bd, 0, false); in qede_start_xmit()
1536 qede_update_tx_producer(txq); in qede_start_xmit()
1547 qed_chain_produce(&txq->tx_pbl); in qede_start_xmit()
1552 qed_chain_produce(&txq->tx_pbl); in qede_start_xmit()
1585 if (unlikely(txq->is_legacy)) in qede_start_xmit()
1642 txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD; in qede_start_xmit()
1652 qede_free_failed_tx_pkt(txq, first_bd, 0, false); in qede_start_xmit()
1653 qede_update_tx_producer(txq); in qede_start_xmit()
1666 rc = map_frag_to_bd(txq, in qede_start_xmit()
1670 qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split); in qede_start_xmit()
1671 qede_update_tx_producer(txq); in qede_start_xmit()
1686 qed_chain_produce(&txq->tx_pbl); in qede_start_xmit()
1690 rc = map_frag_to_bd(txq, in qede_start_xmit()
1694 qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split); in qede_start_xmit()
1695 qede_update_tx_producer(txq); in qede_start_xmit()
1710 txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers; in qede_start_xmit()
1713 txq->tx_db.data.bd_prod = in qede_start_xmit()
1714 cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); in qede_start_xmit()
1717 qede_update_tx_producer(txq); in qede_start_xmit()
1719 if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl) in qede_start_xmit()
1722 qede_update_tx_producer(txq); in qede_start_xmit()
1725 txq->stopped_cnt++; in qede_start_xmit()
1734 if ((qed_chain_get_elem_left(&txq->tx_pbl) >= in qede_start_xmit()