• Home
  • Raw
  • Download

Lines Matching refs:sq

185 	struct send_queue *sq;  member
368 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; in skb_xmit_done()
520 struct send_queue *sq, in __virtnet_xdp_xmit_one() argument
536 sg_init_one(sq->sg, xdpf->data, xdpf->len); in __virtnet_xdp_xmit_one()
538 err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf), in __virtnet_xdp_xmit_one()
570 v->sq + qp; \
577 txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \
590 struct send_queue *sq; in virtnet_xdp_xmit() local
607 sq = virtnet_xdp_get_sq(vi); in virtnet_xdp_xmit()
615 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { in virtnet_xdp_xmit()
633 if (__virtnet_xdp_xmit_one(vi, sq, xdpf)) in virtnet_xdp_xmit()
640 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) in virtnet_xdp_xmit()
644 u64_stats_update_begin(&sq->stats.syncp); in virtnet_xdp_xmit()
645 sq->stats.bytes += bytes; in virtnet_xdp_xmit()
646 sq->stats.packets += packets; in virtnet_xdp_xmit()
647 sq->stats.xdp_tx += n; in virtnet_xdp_xmit()
648 sq->stats.xdp_tx_drops += n - nxmit; in virtnet_xdp_xmit()
649 sq->stats.kicks += kicks; in virtnet_xdp_xmit()
650 u64_stats_update_end(&sq->stats.syncp); in virtnet_xdp_xmit()
652 virtnet_xdp_put_sq(vi, sq); in virtnet_xdp_xmit()
1505 static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) in free_old_xmit_skbs() argument
1512 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { in free_old_xmit_skbs()
1535 u64_stats_update_begin(&sq->stats.syncp); in free_old_xmit_skbs()
1536 sq->stats.bytes += bytes; in free_old_xmit_skbs()
1537 sq->stats.packets += packets; in free_old_xmit_skbs()
1538 u64_stats_update_end(&sq->stats.syncp); in free_old_xmit_skbs()
1555 struct send_queue *sq = &vi->sq[index]; in virtnet_poll_cleantx() local
1558 if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index)) in virtnet_poll_cleantx()
1563 virtqueue_disable_cb(sq->vq); in virtnet_poll_cleantx()
1564 free_old_xmit_skbs(sq, true); in virtnet_poll_cleantx()
1565 } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq))); in virtnet_poll_cleantx()
1567 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) in virtnet_poll_cleantx()
1579 struct send_queue *sq; in virtnet_poll() local
1595 sq = virtnet_xdp_get_sq(vi); in virtnet_poll()
1596 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { in virtnet_poll()
1597 u64_stats_update_begin(&sq->stats.syncp); in virtnet_poll()
1598 sq->stats.kicks++; in virtnet_poll()
1599 u64_stats_update_end(&sq->stats.syncp); in virtnet_poll()
1601 virtnet_xdp_put_sq(vi, sq); in virtnet_poll()
1609 virtnet_napi_tx_disable(&vi->sq[qp_index].napi); in virtnet_disable_queue_pair()
1630 virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi); in virtnet_enable_queue_pair()
1670 struct send_queue *sq = container_of(napi, struct send_queue, napi); in virtnet_poll_tx() local
1671 struct virtnet_info *vi = sq->vq->vdev->priv; in virtnet_poll_tx()
1672 unsigned int index = vq2txq(sq->vq); in virtnet_poll_tx()
1685 virtqueue_disable_cb(sq->vq); in virtnet_poll_tx()
1686 free_old_xmit_skbs(sq, true); in virtnet_poll_tx()
1688 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) in virtnet_poll_tx()
1691 opaque = virtqueue_enable_cb_prepare(sq->vq); in virtnet_poll_tx()
1696 virtqueue_disable_cb(sq->vq); in virtnet_poll_tx()
1701 if (unlikely(virtqueue_poll(sq->vq, opaque))) { in virtnet_poll_tx()
1704 virtqueue_disable_cb(sq->vq); in virtnet_poll_tx()
1714 static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) in xmit_skb() argument
1718 struct virtnet_info *vi = sq->vq->vdev->priv; in xmit_skb()
1743 sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2)); in xmit_skb()
1746 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); in xmit_skb()
1752 sg_set_buf(sq->sg, hdr, hdr_len); in xmit_skb()
1753 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); in xmit_skb()
1758 return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); in xmit_skb()
1765 struct send_queue *sq = &vi->sq[qnum]; in start_xmit() local
1769 bool use_napi = sq->napi.weight; in start_xmit()
1774 virtqueue_disable_cb(sq->vq); in start_xmit()
1776 free_old_xmit_skbs(sq, false); in start_xmit()
1779 unlikely(!virtqueue_enable_cb_delayed(sq->vq))); in start_xmit()
1785 err = xmit_skb(sq, skb); in start_xmit()
1815 if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { in start_xmit()
1818 if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) in start_xmit()
1819 virtqueue_napi_schedule(&sq->napi, sq->vq); in start_xmit()
1820 } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { in start_xmit()
1822 free_old_xmit_skbs(sq, false); in start_xmit()
1823 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { in start_xmit()
1825 virtqueue_disable_cb(sq->vq); in start_xmit()
1831 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { in start_xmit()
1832 u64_stats_update_begin(&sq->stats.syncp); in start_xmit()
1833 sq->stats.kicks++; in start_xmit()
1834 u64_stats_update_end(&sq->stats.syncp); in start_xmit()
1948 struct send_queue *sq = &vi->sq[i]; in virtnet_stats() local
1951 start = u64_stats_fetch_begin_irq(&sq->stats.syncp); in virtnet_stats()
1952 tpackets = sq->stats.packets; in virtnet_stats()
1953 tbytes = sq->stats.bytes; in virtnet_stats()
1954 } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start)); in virtnet_stats()
2144 virtqueue_set_affinity(vi->sq[i].vq, NULL); in virtnet_clean_affinity()
2181 virtqueue_set_affinity(vi->sq[i].vq, mask); in virtnet_set_affinity()
2245 ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); in virtnet_get_ringparam()
2361 struct send_queue *sq = &vi->sq[i]; in virtnet_get_ethtool_stats() local
2363 stats_base = (u8 *)&sq->stats; in virtnet_get_ethtool_stats()
2365 start = u64_stats_fetch_begin_irq(&sq->stats.syncp); in virtnet_get_ethtool_stats()
2370 } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start)); in virtnet_get_ethtool_stats()
2422 if (napi_weight ^ vi->sq[0].napi.weight) { in virtnet_set_coalesce()
2426 vi->sq[i].napi.weight = napi_weight; in virtnet_set_coalesce()
2445 if (vi->sq[0].napi.weight) in virtnet_get_coalesce()
2624 virtnet_napi_tx_disable(&vi->sq[i].napi); in virtnet_xdp_set()
2659 virtnet_napi_tx_enable(vi, vi->sq[i].vq, in virtnet_xdp_set()
2660 &vi->sq[i].napi); in virtnet_xdp_set()
2676 virtnet_napi_tx_enable(vi, vi->sq[i].vq, in virtnet_xdp_set()
2677 &vi->sq[i].napi); in virtnet_xdp_set()
2800 __netif_napi_del(&vi->sq[i].napi); in virtnet_free_queues()
2809 kfree(vi->sq); in virtnet_free_queues()
2871 struct virtqueue *vq = vi->sq[i].vq; in free_unused_bufs()
2958 sprintf(vi->sq[i].name, "output.%u", i); in virtnet_find_vqs()
2960 names[txq2vq(i)] = vi->sq[i].name; in virtnet_find_vqs()
2979 vi->sq[i].vq = vqs[txq2vq(i)]; in virtnet_find_vqs()
3008 vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL); in virtnet_alloc_queues()
3009 if (!vi->sq) in virtnet_alloc_queues()
3020 netif_tx_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx, in virtnet_alloc_queues()
3025 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); in virtnet_alloc_queues()
3028 u64_stats_init(&vi->sq[i].stats.syncp); in virtnet_alloc_queues()
3034 kfree(vi->sq); in virtnet_alloc_queues()