• Home
  • Raw
  • Download

Lines Matching refs:txq

39 	struct hfi1_ipoib_txq      *txq;  member
47 struct hfi1_ipoib_txq *txq; member
59 static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_used() argument
61 return hfi1_ipoib_txreqs(txq->sent_txreqs, in hfi1_ipoib_used()
62 atomic64_read(&txq->complete_txreqs)); in hfi1_ipoib_used()
65 static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_stop_txq() argument
67 trace_hfi1_txq_stop(txq); in hfi1_ipoib_stop_txq()
68 if (atomic_inc_return(&txq->stops) == 1) in hfi1_ipoib_stop_txq()
69 netif_stop_subqueue(txq->priv->netdev, txq->q_idx); in hfi1_ipoib_stop_txq()
72 static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_wake_txq() argument
74 trace_hfi1_txq_wake(txq); in hfi1_ipoib_wake_txq()
75 if (atomic_dec_and_test(&txq->stops)) in hfi1_ipoib_wake_txq()
76 netif_wake_subqueue(txq->priv->netdev, txq->q_idx); in hfi1_ipoib_wake_txq()
79 static uint hfi1_ipoib_ring_hwat(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_ring_hwat() argument
81 return min_t(uint, txq->priv->netdev->tx_queue_len, in hfi1_ipoib_ring_hwat()
82 txq->tx_ring.max_items - 1); in hfi1_ipoib_ring_hwat()
85 static uint hfi1_ipoib_ring_lwat(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_ring_lwat() argument
87 return min_t(uint, txq->priv->netdev->tx_queue_len, in hfi1_ipoib_ring_lwat()
88 txq->tx_ring.max_items) >> 1; in hfi1_ipoib_ring_lwat()
91 static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_check_queue_depth() argument
93 ++txq->sent_txreqs; in hfi1_ipoib_check_queue_depth()
94 if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq) && in hfi1_ipoib_check_queue_depth()
95 !atomic_xchg(&txq->ring_full, 1)) { in hfi1_ipoib_check_queue_depth()
96 trace_hfi1_txq_full(txq); in hfi1_ipoib_check_queue_depth()
97 hfi1_ipoib_stop_txq(txq); in hfi1_ipoib_check_queue_depth()
101 static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_check_queue_stopped() argument
103 struct net_device *dev = txq->priv->netdev; in hfi1_ipoib_check_queue_stopped()
119 if (hfi1_ipoib_used(txq) < hfi1_ipoib_ring_lwat(txq) && in hfi1_ipoib_check_queue_stopped()
120 atomic_xchg(&txq->ring_full, 0)) { in hfi1_ipoib_check_queue_stopped()
121 trace_hfi1_txq_xmit_unstopped(txq); in hfi1_ipoib_check_queue_stopped()
122 hfi1_ipoib_wake_txq(txq); in hfi1_ipoib_check_queue_stopped()
137 le64_to_cpu(tx->sdma_hdr.pbc), tx->txq->q_idx, in hfi1_ipoib_free_tx()
138 tx->txq->sde->this_idx); in hfi1_ipoib_free_tx()
146 static int hfi1_ipoib_drain_tx_ring(struct hfi1_ipoib_txq *txq, int budget) in hfi1_ipoib_drain_tx_ring() argument
148 struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring; in hfi1_ipoib_drain_tx_ring()
169 atomic64_add(work_done, &txq->complete_txreqs); in hfi1_ipoib_drain_tx_ring()
176 hfi1_ipoib_check_queue_stopped(txq); in hfi1_ipoib_drain_tx_ring()
184 struct hfi1_ipoib_txq *txq = &priv->txqs[napi - priv->tx_napis]; in hfi1_ipoib_process_tx_ring() local
186 int work_done = hfi1_ipoib_drain_tx_ring(txq, budget); in hfi1_ipoib_process_tx_ring()
196 struct hfi1_ipoib_circ_buf *tx_ring = &tx->txq->tx_ring; in hfi1_ipoib_add_tx()
212 napi_schedule_irqoff(tx->txq->napi); in hfi1_ipoib_add_tx()
214 struct hfi1_ipoib_txq *txq = tx->txq; in hfi1_ipoib_add_tx() local
219 atomic64_inc(&txq->complete_txreqs); in hfi1_ipoib_add_tx()
220 dd_dev_dbg(priv->dd, "txq %d full.\n", txq->q_idx); in hfi1_ipoib_add_tx()
366 ohdr->bth[2] = cpu_to_be32(mask_psn((u32)txp->txq->sent_txreqs)); in hfi1_ipoib_build_ib_tx_headers()
401 tx->txq = txp->txq; in hfi1_ipoib_send_dma_common()
409 if (txp->txq->flow.as_int != txp->flow.as_int) { in hfi1_ipoib_send_dma_common()
410 txp->txq->flow.tx_queue = txp->flow.tx_queue; in hfi1_ipoib_send_dma_common()
411 txp->txq->flow.sc5 = txp->flow.sc5; in hfi1_ipoib_send_dma_common()
412 txp->txq->sde = in hfi1_ipoib_send_dma_common()
416 trace_hfi1_flow_switch(txp->txq); in hfi1_ipoib_send_dma_common()
429 struct hfi1_ipoib_txq *txq) in hfi1_ipoib_submit_tx_list() argument
434 ret = sdma_send_txlist(txq->sde, in hfi1_ipoib_submit_tx_list()
435 iowait_get_ib_work(&txq->wait), in hfi1_ipoib_submit_tx_list()
436 &txq->tx_list, in hfi1_ipoib_submit_tx_list()
441 dd_dev_warn(txq->priv->dd, "cannot send skb tx list, err %d.\n", ret); in hfi1_ipoib_submit_tx_list()
447 struct hfi1_ipoib_txq *txq) in hfi1_ipoib_flush_tx_list() argument
451 if (!list_empty(&txq->tx_list)) { in hfi1_ipoib_flush_tx_list()
453 ret = hfi1_ipoib_submit_tx_list(dev, txq); in hfi1_ipoib_flush_tx_list()
463 static int hfi1_ipoib_submit_tx(struct hfi1_ipoib_txq *txq, in hfi1_ipoib_submit_tx() argument
468 ret = sdma_send_txreq(txq->sde, in hfi1_ipoib_submit_tx()
469 iowait_get_ib_work(&txq->wait), in hfi1_ipoib_submit_tx()
471 txq->pkts_sent); in hfi1_ipoib_submit_tx()
473 txq->pkts_sent = true; in hfi1_ipoib_submit_tx()
474 iowait_starve_clear(txq->pkts_sent, &txq->wait); in hfi1_ipoib_submit_tx()
485 struct hfi1_ipoib_txq *txq = txp->txq; in hfi1_ipoib_send_dma_single() local
503 ret = hfi1_ipoib_submit_tx(txq, tx); in hfi1_ipoib_send_dma_single()
509 hfi1_ipoib_check_queue_depth(txq); in hfi1_ipoib_send_dma_single()
513 txq->pkts_sent = false; in hfi1_ipoib_send_dma_single()
530 struct hfi1_ipoib_txq *txq = txp->txq; in hfi1_ipoib_send_dma_list() local
534 if (txq->flow.as_int != txp->flow.as_int) { in hfi1_ipoib_send_dma_list()
537 trace_hfi1_flow_flush(txq); in hfi1_ipoib_send_dma_list()
538 ret = hfi1_ipoib_flush_tx_list(dev, txq); in hfi1_ipoib_send_dma_list()
560 list_add_tail(&tx->txreq.list, &txq->tx_list); in hfi1_ipoib_send_dma_list()
562 hfi1_ipoib_check_queue_depth(txq); in hfi1_ipoib_send_dma_list()
569 (void)hfi1_ipoib_flush_tx_list(dev, txq); in hfi1_ipoib_send_dma_list()
607 txp.txq = &priv->txqs[skb_get_queue_mapping(skb)]; in hfi1_ipoib_send()
613 if (netdev_xmit_more() || !list_empty(&txp.txq->tx_list)) in hfi1_ipoib_send()
633 struct hfi1_ipoib_txq *txq = in hfi1_ipoib_sdma_sleep() local
638 if (likely(txq->priv->netdev->reg_state == NETREG_REGISTERED)) { in hfi1_ipoib_sdma_sleep()
646 list_add_tail(&txreq->list, &txq->tx_list); in hfi1_ipoib_sdma_sleep()
647 if (list_empty(&txq->wait.list)) { in hfi1_ipoib_sdma_sleep()
650 if (!atomic_xchg(&txq->no_desc, 1)) { in hfi1_ipoib_sdma_sleep()
651 trace_hfi1_txq_queued(txq); in hfi1_ipoib_sdma_sleep()
652 hfi1_ipoib_stop_txq(txq); in hfi1_ipoib_sdma_sleep()
674 struct hfi1_ipoib_txq *txq = in hfi1_ipoib_sdma_wakeup() local
677 trace_hfi1_txq_wakeup(txq); in hfi1_ipoib_sdma_wakeup()
678 if (likely(txq->priv->netdev->reg_state == NETREG_REGISTERED)) in hfi1_ipoib_sdma_wakeup()
687 struct hfi1_ipoib_txq *txq = in hfi1_ipoib_flush_txq() local
689 struct net_device *dev = txq->priv->netdev; in hfi1_ipoib_flush_txq()
692 likely(!hfi1_ipoib_flush_tx_list(dev, txq))) in hfi1_ipoib_flush_txq()
693 if (atomic_xchg(&txq->no_desc, 0)) in hfi1_ipoib_flush_txq()
694 hfi1_ipoib_wake_txq(txq); in hfi1_ipoib_flush_txq()
734 struct hfi1_ipoib_txq *txq = &priv->txqs[i]; in hfi1_ipoib_txreq_init() local
736 iowait_init(&txq->wait, in hfi1_ipoib_txreq_init()
744 txq->priv = priv; in hfi1_ipoib_txreq_init()
745 txq->sde = NULL; in hfi1_ipoib_txreq_init()
746 INIT_LIST_HEAD(&txq->tx_list); in hfi1_ipoib_txreq_init()
747 atomic64_set(&txq->complete_txreqs, 0); in hfi1_ipoib_txreq_init()
748 atomic_set(&txq->stops, 0); in hfi1_ipoib_txreq_init()
749 atomic_set(&txq->ring_full, 0); in hfi1_ipoib_txreq_init()
750 atomic_set(&txq->no_desc, 0); in hfi1_ipoib_txreq_init()
751 txq->q_idx = i; in hfi1_ipoib_txreq_init()
752 txq->flow.tx_queue = 0xff; in hfi1_ipoib_txreq_init()
753 txq->flow.sc5 = 0xff; in hfi1_ipoib_txreq_init()
754 txq->pkts_sent = false; in hfi1_ipoib_txreq_init()
759 txq->tx_ring.items = in hfi1_ipoib_txreq_init()
763 if (!txq->tx_ring.items) in hfi1_ipoib_txreq_init()
766 spin_lock_init(&txq->tx_ring.producer_lock); in hfi1_ipoib_txreq_init()
767 spin_lock_init(&txq->tx_ring.consumer_lock); in hfi1_ipoib_txreq_init()
768 txq->tx_ring.max_items = tx_ring_size; in hfi1_ipoib_txreq_init()
770 txq->napi = &priv->tx_napis[i]; in hfi1_ipoib_txreq_init()
771 netif_tx_napi_add(dev, txq->napi, in hfi1_ipoib_txreq_init()
780 struct hfi1_ipoib_txq *txq = &priv->txqs[i]; in hfi1_ipoib_txreq_init() local
782 netif_napi_del(txq->napi); in hfi1_ipoib_txreq_init()
783 kfree(txq->tx_ring.items); in hfi1_ipoib_txreq_init()
799 static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_drain_tx_list() argument
803 atomic64_t *complete_txreqs = &txq->complete_txreqs; in hfi1_ipoib_drain_tx_list()
805 list_for_each_entry_safe(txreq, txreq_tmp, &txq->tx_list, list) { in hfi1_ipoib_drain_tx_list()
810 sdma_txclean(txq->priv->dd, &tx->txreq); in hfi1_ipoib_drain_tx_list()
812 kmem_cache_free(txq->priv->txreq_cache, tx); in hfi1_ipoib_drain_tx_list()
816 if (hfi1_ipoib_used(txq)) in hfi1_ipoib_drain_tx_list()
817 dd_dev_warn(txq->priv->dd, in hfi1_ipoib_drain_tx_list()
819 txq->q_idx, in hfi1_ipoib_drain_tx_list()
820 hfi1_ipoib_txreqs(txq->sent_txreqs, in hfi1_ipoib_drain_tx_list()
829 struct hfi1_ipoib_txq *txq = &priv->txqs[i]; in hfi1_ipoib_txreq_deinit() local
831 iowait_cancel_work(&txq->wait); in hfi1_ipoib_txreq_deinit()
832 iowait_sdma_drain(&txq->wait); in hfi1_ipoib_txreq_deinit()
833 hfi1_ipoib_drain_tx_list(txq); in hfi1_ipoib_txreq_deinit()
834 netif_napi_del(txq->napi); in hfi1_ipoib_txreq_deinit()
835 (void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items); in hfi1_ipoib_txreq_deinit()
836 kfree(txq->tx_ring.items); in hfi1_ipoib_txreq_deinit()
855 struct hfi1_ipoib_txq *txq = &priv->txqs[i]; in hfi1_ipoib_napi_tx_enable() local
857 napi_enable(txq->napi); in hfi1_ipoib_napi_tx_enable()
867 struct hfi1_ipoib_txq *txq = &priv->txqs[i]; in hfi1_ipoib_napi_tx_disable() local
869 napi_disable(txq->napi); in hfi1_ipoib_napi_tx_disable()
870 (void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items); in hfi1_ipoib_napi_tx_disable()
877 struct hfi1_ipoib_txq *txq = &priv->txqs[q]; in hfi1_ipoib_tx_timeout() local
878 u64 completed = atomic64_read(&txq->complete_txreqs); in hfi1_ipoib_tx_timeout()
881 txq, q, in hfi1_ipoib_tx_timeout()
882 __netif_subqueue_stopped(dev, txq->q_idx), in hfi1_ipoib_tx_timeout()
883 atomic_read(&txq->stops), in hfi1_ipoib_tx_timeout()
884 atomic_read(&txq->no_desc), in hfi1_ipoib_tx_timeout()
885 atomic_read(&txq->ring_full)); in hfi1_ipoib_tx_timeout()
887 txq->sde, in hfi1_ipoib_tx_timeout()
888 txq->sde ? txq->sde->this_idx : 0); in hfi1_ipoib_tx_timeout()
889 dd_dev_info(priv->dd, "flow %x\n", txq->flow.as_int); in hfi1_ipoib_tx_timeout()
891 txq->sent_txreqs, completed, hfi1_ipoib_used(txq)); in hfi1_ipoib_tx_timeout()
893 dev->tx_queue_len, txq->tx_ring.max_items); in hfi1_ipoib_tx_timeout()
895 txq->tx_ring.head, txq->tx_ring.tail); in hfi1_ipoib_tx_timeout()
897 !list_empty(&txq->wait.list)); in hfi1_ipoib_tx_timeout()
899 list_empty(&txq->tx_list)); in hfi1_ipoib_tx_timeout()