• Home
  • Raw
  • Download

Lines Matching refs:txq

76 void hinic_txq_clean_stats(struct hinic_txq *txq)  in hinic_txq_clean_stats()  argument
78 struct hinic_txq_stats *txq_stats = &txq->txq_stats; in hinic_txq_clean_stats()
95 void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats) in hinic_txq_get_stats() argument
97 struct hinic_txq_stats *txq_stats = &txq->txq_stats; in hinic_txq_get_stats()
115 static void txq_stats_init(struct hinic_txq *txq) in txq_stats_init() argument
117 struct hinic_txq_stats *txq_stats = &txq->txq_stats; in txq_stats_init()
120 hinic_txq_clean_stats(txq); in txq_stats_init()
498 struct hinic_txq *txq; in hinic_lb_xmit_frame() local
501 txq = &nic_dev->txqs[q_id]; in hinic_lb_xmit_frame()
502 qp = container_of(txq->sq, struct hinic_qp, sq); in hinic_lb_xmit_frame()
505 err = tx_map_skb(nic_dev, skb, txq->sges); in hinic_lb_xmit_frame()
511 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); in hinic_lb_xmit_frame()
515 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); in hinic_lb_xmit_frame()
521 tx_unmap_skb(nic_dev, skb, txq->sges); in hinic_lb_xmit_frame()
523 u64_stats_update_begin(&txq->txq_stats.syncp); in hinic_lb_xmit_frame()
524 txq->txq_stats.tx_busy++; in hinic_lb_xmit_frame()
525 u64_stats_update_end(&txq->txq_stats.syncp); in hinic_lb_xmit_frame()
532 hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges); in hinic_lb_xmit_frame()
533 hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size); in hinic_lb_xmit_frame()
538 hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0); in hinic_lb_xmit_frame()
544 u64_stats_update_begin(&txq->txq_stats.syncp); in hinic_lb_xmit_frame()
545 txq->txq_stats.tx_dropped++; in hinic_lb_xmit_frame()
546 u64_stats_update_end(&txq->txq_stats.syncp); in hinic_lb_xmit_frame()
559 struct hinic_txq *txq; in hinic_xmit_frame() local
562 txq = &nic_dev->txqs[q_id]; in hinic_xmit_frame()
563 qp = container_of(txq->sq, struct hinic_qp, sq); in hinic_xmit_frame()
576 u64_stats_update_begin(&txq->txq_stats.syncp); in hinic_xmit_frame()
577 txq->txq_stats.big_frags_pkts++; in hinic_xmit_frame()
578 u64_stats_update_end(&txq->txq_stats.syncp); in hinic_xmit_frame()
581 if (nr_sges > txq->max_sges) { in hinic_xmit_frame()
586 err = tx_map_skb(nic_dev, skb, txq->sges); in hinic_xmit_frame()
592 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); in hinic_xmit_frame()
599 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); in hinic_xmit_frame()
605 tx_unmap_skb(nic_dev, skb, txq->sges); in hinic_xmit_frame()
607 u64_stats_update_begin(&txq->txq_stats.syncp); in hinic_xmit_frame()
608 txq->txq_stats.tx_busy++; in hinic_xmit_frame()
609 u64_stats_update_end(&txq->txq_stats.syncp); in hinic_xmit_frame()
616 hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges); in hinic_xmit_frame()
622 hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size); in hinic_xmit_frame()
627 hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0); in hinic_xmit_frame()
632 hinic_sq_return_wqe(txq->sq, wqe_size); in hinic_xmit_frame()
633 tx_unmap_skb(nic_dev, skb, txq->sges); in hinic_xmit_frame()
639 u64_stats_update_begin(&txq->txq_stats.syncp); in hinic_xmit_frame()
640 txq->txq_stats.tx_dropped++; in hinic_xmit_frame()
641 u64_stats_update_end(&txq->txq_stats.syncp); in hinic_xmit_frame()
664 static void free_all_tx_skbs(struct hinic_txq *txq) in free_all_tx_skbs() argument
666 struct hinic_dev *nic_dev = netdev_priv(txq->netdev); in free_all_tx_skbs()
667 struct hinic_sq *sq = txq->sq; in free_all_tx_skbs()
681 hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges); in free_all_tx_skbs()
685 tx_free_skb(nic_dev, skb, txq->free_sges); in free_all_tx_skbs()
698 struct hinic_txq *txq = container_of(napi, struct hinic_txq, napi); in free_tx_poll() local
699 struct hinic_qp *qp = container_of(txq->sq, struct hinic_qp, sq); in free_tx_poll()
700 struct hinic_dev *nic_dev = netdev_priv(txq->netdev); in free_tx_poll()
702 struct hinic_sq *sq = txq->sq; in free_tx_poll()
736 hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges); in free_tx_poll()
740 tx_free_skb(nic_dev, skb, txq->free_sges); in free_tx_poll()
745 netdev_txq = netdev_get_tx_queue(txq->netdev, qp->q_id); in free_tx_poll()
753 u64_stats_update_begin(&txq->txq_stats.syncp); in free_tx_poll()
754 txq->txq_stats.tx_wake++; in free_tx_poll()
755 u64_stats_update_end(&txq->txq_stats.syncp); in free_tx_poll()
758 u64_stats_update_begin(&txq->txq_stats.syncp); in free_tx_poll()
759 txq->txq_stats.bytes += tx_bytes; in free_tx_poll()
760 txq->txq_stats.pkts += pkts; in free_tx_poll()
761 u64_stats_update_end(&txq->txq_stats.syncp); in free_tx_poll()
778 struct hinic_txq *txq = data; in tx_irq() local
781 nic_dev = netdev_priv(txq->netdev); in tx_irq()
786 txq->sq->msix_entry, in tx_irq()
789 hinic_hwdev_msix_cnt_set(nic_dev->hwdev, txq->sq->msix_entry); in tx_irq()
791 napi_schedule(&txq->napi); in tx_irq()
795 static int tx_request_irq(struct hinic_txq *txq) in tx_request_irq() argument
797 struct hinic_dev *nic_dev = netdev_priv(txq->netdev); in tx_request_irq()
803 struct hinic_sq *sq = txq->sq; in tx_request_irq()
809 netif_napi_add(txq->netdev, &txq->napi, free_tx_poll, nic_dev->tx_weight); in tx_request_irq()
824 netif_err(nic_dev, drv, txq->netdev, in tx_request_irq()
826 netif_napi_del(&txq->napi); in tx_request_irq()
830 err = request_irq(sq->irq, tx_irq, 0, txq->irq_name, txq); in tx_request_irq()
833 netif_napi_del(&txq->napi); in tx_request_irq()
840 static void tx_free_irq(struct hinic_txq *txq) in tx_free_irq() argument
842 struct hinic_sq *sq = txq->sq; in tx_free_irq()
844 free_irq(sq->irq, txq); in tx_free_irq()
845 netif_napi_del(&txq->napi); in tx_free_irq()
856 int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq, in hinic_init_txq() argument
864 txq->netdev = netdev; in hinic_init_txq()
865 txq->sq = sq; in hinic_init_txq()
867 txq_stats_init(txq); in hinic_init_txq()
869 txq->max_sges = HINIC_MAX_SQ_BUFDESCS; in hinic_init_txq()
871 txq->sges = devm_kcalloc(&netdev->dev, txq->max_sges, in hinic_init_txq()
872 sizeof(*txq->sges), GFP_KERNEL); in hinic_init_txq()
873 if (!txq->sges) in hinic_init_txq()
876 txq->free_sges = devm_kcalloc(&netdev->dev, txq->max_sges, in hinic_init_txq()
877 sizeof(*txq->free_sges), GFP_KERNEL); in hinic_init_txq()
878 if (!txq->free_sges) { in hinic_init_txq()
884 txq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL); in hinic_init_txq()
885 if (!txq->irq_name) { in hinic_init_txq()
890 sprintf(txq->irq_name, "%s_txq%d", netdev->name, qp->q_id); in hinic_init_txq()
897 err = tx_request_irq(txq); in hinic_init_txq()
907 devm_kfree(&netdev->dev, txq->irq_name); in hinic_init_txq()
910 devm_kfree(&netdev->dev, txq->free_sges); in hinic_init_txq()
913 devm_kfree(&netdev->dev, txq->sges); in hinic_init_txq()
921 void hinic_clean_txq(struct hinic_txq *txq) in hinic_clean_txq() argument
923 struct net_device *netdev = txq->netdev; in hinic_clean_txq()
925 tx_free_irq(txq); in hinic_clean_txq()
927 free_all_tx_skbs(txq); in hinic_clean_txq()
929 devm_kfree(&netdev->dev, txq->irq_name); in hinic_clean_txq()
930 devm_kfree(&netdev->dev, txq->free_sges); in hinic_clean_txq()
931 devm_kfree(&netdev->dev, txq->sges); in hinic_clean_txq()