• Home
  • Raw
  • Download

Lines Matching refs:txq

29 	struct hfi1_ipoib_txq      *txq;  member
47 static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_used() argument
49 return hfi1_ipoib_txreqs(txq->tx_ring.sent_txreqs, in hfi1_ipoib_used()
50 txq->tx_ring.complete_txreqs); in hfi1_ipoib_used()
53 static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_stop_txq() argument
55 trace_hfi1_txq_stop(txq); in hfi1_ipoib_stop_txq()
56 if (atomic_inc_return(&txq->tx_ring.stops) == 1) in hfi1_ipoib_stop_txq()
57 netif_stop_subqueue(txq->priv->netdev, txq->q_idx); in hfi1_ipoib_stop_txq()
60 static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_wake_txq() argument
62 trace_hfi1_txq_wake(txq); in hfi1_ipoib_wake_txq()
63 if (atomic_dec_and_test(&txq->tx_ring.stops)) in hfi1_ipoib_wake_txq()
64 netif_wake_subqueue(txq->priv->netdev, txq->q_idx); in hfi1_ipoib_wake_txq()
67 static uint hfi1_ipoib_ring_hwat(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_ring_hwat() argument
69 return min_t(uint, txq->priv->netdev->tx_queue_len, in hfi1_ipoib_ring_hwat()
70 txq->tx_ring.max_items - 1); in hfi1_ipoib_ring_hwat()
73 static uint hfi1_ipoib_ring_lwat(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_ring_lwat() argument
75 return min_t(uint, txq->priv->netdev->tx_queue_len, in hfi1_ipoib_ring_lwat()
76 txq->tx_ring.max_items) >> 1; in hfi1_ipoib_ring_lwat()
79 static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_check_queue_depth() argument
81 ++txq->tx_ring.sent_txreqs; in hfi1_ipoib_check_queue_depth()
82 if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq) && in hfi1_ipoib_check_queue_depth()
83 !atomic_xchg(&txq->tx_ring.ring_full, 1)) { in hfi1_ipoib_check_queue_depth()
84 trace_hfi1_txq_full(txq); in hfi1_ipoib_check_queue_depth()
85 hfi1_ipoib_stop_txq(txq); in hfi1_ipoib_check_queue_depth()
89 static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_check_queue_stopped() argument
91 struct net_device *dev = txq->priv->netdev; in hfi1_ipoib_check_queue_stopped()
107 if (hfi1_ipoib_used(txq) < hfi1_ipoib_ring_lwat(txq) && in hfi1_ipoib_check_queue_stopped()
108 atomic_xchg(&txq->tx_ring.ring_full, 0)) { in hfi1_ipoib_check_queue_stopped()
109 trace_hfi1_txq_xmit_unstopped(txq); in hfi1_ipoib_check_queue_stopped()
110 hfi1_ipoib_wake_txq(txq); in hfi1_ipoib_check_queue_stopped()
116 struct hfi1_ipoib_dev_priv *priv = tx->txq->priv; in hfi1_ipoib_free_tx()
125 le64_to_cpu(tx->sdma_hdr->pbc), tx->txq->q_idx, in hfi1_ipoib_free_tx()
126 tx->txq->sde->this_idx); in hfi1_ipoib_free_tx()
134 static void hfi1_ipoib_drain_tx_ring(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_drain_tx_ring() argument
136 struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring; in hfi1_ipoib_drain_tx_ring()
145 sdma_txclean(txq->priv->dd, &tx->txreq); in hfi1_ipoib_drain_tx_ring()
151 tx_ring->avail = hfi1_ipoib_ring_hwat(txq); in hfi1_ipoib_drain_tx_ring()
156 struct hfi1_ipoib_txq *txq = in hfi1_ipoib_poll_tx_ring() local
158 struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring; in hfi1_ipoib_poll_tx_ring()
164 trace_hfi1_txq_poll(txq); in hfi1_ipoib_poll_tx_ring()
180 hfi1_ipoib_check_queue_stopped(txq); in hfi1_ipoib_poll_tx_ring()
192 trace_hfi1_txq_complete(tx->txq); in hfi1_ipoib_sdma_complete()
196 napi_schedule_irqoff(&tx->txq->napi); in hfi1_ipoib_sdma_complete()
259 struct hfi1_ipoib_dev_priv *priv = tx->txq->priv; in hfi1_ipoib_build_ib_tx_headers()
330 ohdr->bth[2] = cpu_to_be32(mask_psn((u32)txp->txq->tx_ring.sent_txreqs)); in hfi1_ipoib_build_ib_tx_headers()
353 struct hfi1_ipoib_txq *txq = txp->txq; in hfi1_ipoib_send_dma_common() local
355 struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring; in hfi1_ipoib_send_dma_common()
362 if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq)) in hfi1_ipoib_send_dma_common()
368 min_t(u32, hfi1_ipoib_ring_hwat(txq), in hfi1_ipoib_send_dma_common()
374 trace_hfi1_txq_alloc_tx(txq); in hfi1_ipoib_send_dma_common()
378 tx->txq = txq; in hfi1_ipoib_send_dma_common()
386 if (txq->flow.as_int != txp->flow.as_int) { in hfi1_ipoib_send_dma_common()
387 txq->flow.tx_queue = txp->flow.tx_queue; in hfi1_ipoib_send_dma_common()
388 txq->flow.sc5 = txp->flow.sc5; in hfi1_ipoib_send_dma_common()
389 txq->sde = in hfi1_ipoib_send_dma_common()
393 trace_hfi1_flow_switch(txq); in hfi1_ipoib_send_dma_common()
405 struct hfi1_ipoib_txq *txq) in hfi1_ipoib_submit_tx_list() argument
410 ret = sdma_send_txlist(txq->sde, in hfi1_ipoib_submit_tx_list()
411 iowait_get_ib_work(&txq->wait), in hfi1_ipoib_submit_tx_list()
412 &txq->tx_list, in hfi1_ipoib_submit_tx_list()
417 dd_dev_warn(txq->priv->dd, "cannot send skb tx list, err %d.\n", ret); in hfi1_ipoib_submit_tx_list()
423 struct hfi1_ipoib_txq *txq) in hfi1_ipoib_flush_tx_list() argument
427 if (!list_empty(&txq->tx_list)) { in hfi1_ipoib_flush_tx_list()
429 ret = hfi1_ipoib_submit_tx_list(dev, txq); in hfi1_ipoib_flush_tx_list()
439 static int hfi1_ipoib_submit_tx(struct hfi1_ipoib_txq *txq, in hfi1_ipoib_submit_tx() argument
444 ret = sdma_send_txreq(txq->sde, in hfi1_ipoib_submit_tx()
445 iowait_get_ib_work(&txq->wait), in hfi1_ipoib_submit_tx()
447 txq->pkts_sent); in hfi1_ipoib_submit_tx()
449 txq->pkts_sent = true; in hfi1_ipoib_submit_tx()
450 iowait_starve_clear(txq->pkts_sent, &txq->wait); in hfi1_ipoib_submit_tx()
460 struct hfi1_ipoib_txq *txq = txp->txq; in hfi1_ipoib_send_dma_single() local
479 tx_ring = &txq->tx_ring; in hfi1_ipoib_send_dma_single()
483 ret = hfi1_ipoib_submit_tx(txq, tx); in hfi1_ipoib_send_dma_single()
486 trace_sdma_output_ibhdr(txq->priv->dd, in hfi1_ipoib_send_dma_single()
489 hfi1_ipoib_check_queue_depth(txq); in hfi1_ipoib_send_dma_single()
493 txq->pkts_sent = false; in hfi1_ipoib_send_dma_single()
500 napi_schedule(&tx->txq->napi); in hfi1_ipoib_send_dma_single()
511 struct hfi1_ipoib_txq *txq = txp->txq; in hfi1_ipoib_send_dma_list() local
516 if (txq->flow.as_int != txp->flow.as_int) { in hfi1_ipoib_send_dma_list()
519 trace_hfi1_flow_flush(txq); in hfi1_ipoib_send_dma_list()
520 ret = hfi1_ipoib_flush_tx_list(dev, txq); in hfi1_ipoib_send_dma_list()
542 tx_ring = &txq->tx_ring; in hfi1_ipoib_send_dma_list()
546 list_add_tail(&tx->txreq.list, &txq->tx_list); in hfi1_ipoib_send_dma_list()
548 hfi1_ipoib_check_queue_depth(txq); in hfi1_ipoib_send_dma_list()
550 trace_sdma_output_ibhdr(txq->priv->dd, in hfi1_ipoib_send_dma_list()
555 (void)hfi1_ipoib_flush_tx_list(dev, txq); in hfi1_ipoib_send_dma_list()
593 txp.txq = &priv->txqs[skb_get_queue_mapping(skb)]; in hfi1_ipoib_send()
599 if (netdev_xmit_more() || !list_empty(&txp.txq->tx_list)) in hfi1_ipoib_send()
619 struct hfi1_ipoib_txq *txq = in hfi1_ipoib_sdma_sleep() local
624 if (likely(txq->priv->netdev->reg_state == NETREG_REGISTERED)) { in hfi1_ipoib_sdma_sleep()
632 list_add_tail(&txreq->list, &txq->tx_list); in hfi1_ipoib_sdma_sleep()
633 if (list_empty(&txq->wait.list)) { in hfi1_ipoib_sdma_sleep()
636 if (!atomic_xchg(&txq->tx_ring.no_desc, 1)) { in hfi1_ipoib_sdma_sleep()
637 trace_hfi1_txq_queued(txq); in hfi1_ipoib_sdma_sleep()
638 hfi1_ipoib_stop_txq(txq); in hfi1_ipoib_sdma_sleep()
660 struct hfi1_ipoib_txq *txq = in hfi1_ipoib_sdma_wakeup() local
663 trace_hfi1_txq_wakeup(txq); in hfi1_ipoib_sdma_wakeup()
664 if (likely(txq->priv->netdev->reg_state == NETREG_REGISTERED)) in hfi1_ipoib_sdma_wakeup()
673 struct hfi1_ipoib_txq *txq = in hfi1_ipoib_flush_txq() local
675 struct net_device *dev = txq->priv->netdev; in hfi1_ipoib_flush_txq()
678 likely(!hfi1_ipoib_flush_tx_list(dev, txq))) in hfi1_ipoib_flush_txq()
679 if (atomic_xchg(&txq->tx_ring.no_desc, 0)) in hfi1_ipoib_flush_txq()
680 hfi1_ipoib_wake_txq(txq); in hfi1_ipoib_flush_txq()
705 struct hfi1_ipoib_txq *txq = &priv->txqs[i]; in hfi1_ipoib_txreq_init() local
708 tx_ring = &txq->tx_ring; in hfi1_ipoib_txreq_init()
709 iowait_init(&txq->wait, in hfi1_ipoib_txreq_init()
717 txq->priv = priv; in hfi1_ipoib_txreq_init()
718 txq->sde = NULL; in hfi1_ipoib_txreq_init()
719 INIT_LIST_HEAD(&txq->tx_list); in hfi1_ipoib_txreq_init()
720 atomic_set(&txq->tx_ring.stops, 0); in hfi1_ipoib_txreq_init()
721 atomic_set(&txq->tx_ring.ring_full, 0); in hfi1_ipoib_txreq_init()
722 atomic_set(&txq->tx_ring.no_desc, 0); in hfi1_ipoib_txreq_init()
723 txq->q_idx = i; in hfi1_ipoib_txreq_init()
724 txq->flow.tx_queue = 0xff; in hfi1_ipoib_txreq_init()
725 txq->flow.sc5 = 0xff; in hfi1_ipoib_txreq_init()
726 txq->pkts_sent = false; in hfi1_ipoib_txreq_init()
731 txq->tx_ring.items = in hfi1_ipoib_txreq_init()
734 if (!txq->tx_ring.items) in hfi1_ipoib_txreq_init()
737 txq->tx_ring.max_items = tx_ring_size; in hfi1_ipoib_txreq_init()
738 txq->tx_ring.shift = ilog2(tx_item_size); in hfi1_ipoib_txreq_init()
739 txq->tx_ring.avail = hfi1_ipoib_ring_hwat(txq); in hfi1_ipoib_txreq_init()
740 tx_ring = &txq->tx_ring; in hfi1_ipoib_txreq_init()
749 netif_napi_add_tx(dev, &txq->napi, hfi1_ipoib_poll_tx_ring); in hfi1_ipoib_txreq_init()
756 struct hfi1_ipoib_txq *txq = &priv->txqs[i]; in hfi1_ipoib_txreq_init() local
758 netif_napi_del(&txq->napi); in hfi1_ipoib_txreq_init()
759 tx_ring = &txq->tx_ring; in hfi1_ipoib_txreq_init()
770 static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_drain_tx_list() argument
775 list_for_each_entry_safe(txreq, txreq_tmp, &txq->tx_list, list) { in hfi1_ipoib_drain_tx_list()
780 sdma_txclean(txq->priv->dd, &tx->txreq); in hfi1_ipoib_drain_tx_list()
783 txq->tx_ring.complete_txreqs++; in hfi1_ipoib_drain_tx_list()
786 if (hfi1_ipoib_used(txq)) in hfi1_ipoib_drain_tx_list()
787 dd_dev_warn(txq->priv->dd, in hfi1_ipoib_drain_tx_list()
789 txq->q_idx, in hfi1_ipoib_drain_tx_list()
790 hfi1_ipoib_txreqs(txq->tx_ring.sent_txreqs, in hfi1_ipoib_drain_tx_list()
791 txq->tx_ring.complete_txreqs)); in hfi1_ipoib_drain_tx_list()
799 struct hfi1_ipoib_txq *txq = &priv->txqs[i]; in hfi1_ipoib_txreq_deinit() local
800 struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring; in hfi1_ipoib_txreq_deinit()
802 iowait_cancel_work(&txq->wait); in hfi1_ipoib_txreq_deinit()
803 iowait_sdma_drain(&txq->wait); in hfi1_ipoib_txreq_deinit()
804 hfi1_ipoib_drain_tx_list(txq); in hfi1_ipoib_txreq_deinit()
805 netif_napi_del(&txq->napi); in hfi1_ipoib_txreq_deinit()
806 hfi1_ipoib_drain_tx_ring(txq); in hfi1_ipoib_txreq_deinit()
822 struct hfi1_ipoib_txq *txq = &priv->txqs[i]; in hfi1_ipoib_napi_tx_enable() local
824 napi_enable(&txq->napi); in hfi1_ipoib_napi_tx_enable()
834 struct hfi1_ipoib_txq *txq = &priv->txqs[i]; in hfi1_ipoib_napi_tx_disable() local
836 napi_disable(&txq->napi); in hfi1_ipoib_napi_tx_disable()
837 hfi1_ipoib_drain_tx_ring(txq); in hfi1_ipoib_napi_tx_disable()
844 struct hfi1_ipoib_txq *txq = &priv->txqs[q]; in hfi1_ipoib_tx_timeout() local
847 txq, q, in hfi1_ipoib_tx_timeout()
848 __netif_subqueue_stopped(dev, txq->q_idx), in hfi1_ipoib_tx_timeout()
849 atomic_read(&txq->tx_ring.stops), in hfi1_ipoib_tx_timeout()
850 atomic_read(&txq->tx_ring.no_desc), in hfi1_ipoib_tx_timeout()
851 atomic_read(&txq->tx_ring.ring_full)); in hfi1_ipoib_tx_timeout()
853 txq->sde, in hfi1_ipoib_tx_timeout()
854 txq->sde ? txq->sde->this_idx : 0); in hfi1_ipoib_tx_timeout()
855 dd_dev_info(priv->dd, "flow %x\n", txq->flow.as_int); in hfi1_ipoib_tx_timeout()
857 txq->tx_ring.sent_txreqs, txq->tx_ring.complete_txreqs, in hfi1_ipoib_tx_timeout()
858 hfi1_ipoib_used(txq)); in hfi1_ipoib_tx_timeout()
860 dev->tx_queue_len, txq->tx_ring.max_items); in hfi1_ipoib_tx_timeout()
862 txq->tx_ring.head, txq->tx_ring.tail); in hfi1_ipoib_tx_timeout()
864 !list_empty(&txq->wait.list)); in hfi1_ipoib_tx_timeout()
866 list_empty(&txq->tx_list)); in hfi1_ipoib_tx_timeout()