• Home
  • Raw
  • Download

Lines Matching refs:sq

180 	struct send_queue *sq;  member
340 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; in skb_xmit_done()
447 struct send_queue *sq, in __virtnet_xdp_xmit_one() argument
467 sg_init_one(sq->sg, xdpf->data, xdpf->len); in __virtnet_xdp_xmit_one()
469 err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf), in __virtnet_xdp_xmit_one()
482 return &vi->sq[qp]; in virtnet_xdp_sq()
491 struct send_queue *sq; in virtnet_xdp_xmit() local
508 sq = virtnet_xdp_sq(vi); in virtnet_xdp_xmit()
517 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { in virtnet_xdp_xmit()
535 err = __virtnet_xdp_xmit_one(vi, sq, xdpf); in virtnet_xdp_xmit()
544 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) in virtnet_xdp_xmit()
548 u64_stats_update_begin(&sq->stats.syncp); in virtnet_xdp_xmit()
549 sq->stats.bytes += bytes; in virtnet_xdp_xmit()
550 sq->stats.packets += packets; in virtnet_xdp_xmit()
551 sq->stats.xdp_tx += n; in virtnet_xdp_xmit()
552 sq->stats.xdp_tx_drops += drops; in virtnet_xdp_xmit()
553 sq->stats.kicks += kicks; in virtnet_xdp_xmit()
554 u64_stats_update_end(&sq->stats.syncp); in virtnet_xdp_xmit()
1352 static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) in free_old_xmit_skbs() argument
1359 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { in free_old_xmit_skbs()
1382 u64_stats_update_begin(&sq->stats.syncp); in free_old_xmit_skbs()
1383 sq->stats.bytes += bytes; in free_old_xmit_skbs()
1384 sq->stats.packets += packets; in free_old_xmit_skbs()
1385 u64_stats_update_end(&sq->stats.syncp); in free_old_xmit_skbs()
1402 struct send_queue *sq = &vi->sq[index]; in virtnet_poll_cleantx() local
1405 if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index)) in virtnet_poll_cleantx()
1409 free_old_xmit_skbs(sq, true); in virtnet_poll_cleantx()
1413 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) in virtnet_poll_cleantx()
1422 struct send_queue *sq; in virtnet_poll() local
1438 sq = virtnet_xdp_sq(vi); in virtnet_poll()
1439 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { in virtnet_poll()
1440 u64_stats_update_begin(&sq->stats.syncp); in virtnet_poll()
1441 sq->stats.kicks++; in virtnet_poll()
1442 u64_stats_update_end(&sq->stats.syncp); in virtnet_poll()
1472 virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi); in virtnet_open()
1480 struct send_queue *sq = container_of(napi, struct send_queue, napi); in virtnet_poll_tx() local
1481 struct virtnet_info *vi = sq->vq->vdev->priv; in virtnet_poll_tx()
1482 unsigned int index = vq2txq(sq->vq); in virtnet_poll_tx()
1493 free_old_xmit_skbs(sq, true); in virtnet_poll_tx()
1496 virtqueue_napi_complete(napi, sq->vq, 0); in virtnet_poll_tx()
1498 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) in virtnet_poll_tx()
1504 static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) in xmit_skb() argument
1508 struct virtnet_info *vi = sq->vq->vdev->priv; in xmit_skb()
1533 sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2)); in xmit_skb()
1536 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); in xmit_skb()
1542 sg_set_buf(sq->sg, hdr, hdr_len); in xmit_skb()
1543 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); in xmit_skb()
1548 return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); in xmit_skb()
1555 struct send_queue *sq = &vi->sq[qnum]; in start_xmit() local
1559 bool use_napi = sq->napi.weight; in start_xmit()
1562 free_old_xmit_skbs(sq, false); in start_xmit()
1565 virtqueue_enable_cb_delayed(sq->vq); in start_xmit()
1571 err = xmit_skb(sq, skb); in start_xmit()
1601 if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { in start_xmit()
1604 unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { in start_xmit()
1606 free_old_xmit_skbs(sq, false); in start_xmit()
1607 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { in start_xmit()
1609 virtqueue_disable_cb(sq->vq); in start_xmit()
1615 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { in start_xmit()
1616 u64_stats_update_begin(&sq->stats.syncp); in start_xmit()
1617 sq->stats.kicks++; in start_xmit()
1618 u64_stats_update_end(&sq->stats.syncp); in start_xmit()
1726 struct send_queue *sq = &vi->sq[i]; in virtnet_stats() local
1729 start = u64_stats_fetch_begin_irq(&sq->stats.syncp); in virtnet_stats()
1730 tpackets = sq->stats.packets; in virtnet_stats()
1731 tbytes = sq->stats.bytes; in virtnet_stats()
1732 } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start)); in virtnet_stats()
1810 virtnet_napi_tx_disable(&vi->sq[i].napi); in virtnet_close()
1923 virtqueue_set_affinity(vi->sq[i].vq, NULL); in virtnet_clean_affinity()
1960 virtqueue_set_affinity(vi->sq[i].vq, mask); in virtnet_set_affinity()
2024 ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); in virtnet_get_ringparam()
2142 struct send_queue *sq = &vi->sq[i]; in virtnet_get_ethtool_stats() local
2144 stats_base = (u8 *)&sq->stats; in virtnet_get_ethtool_stats()
2146 start = u64_stats_fetch_begin_irq(&sq->stats.syncp); in virtnet_get_ethtool_stats()
2151 } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start)); in virtnet_get_ethtool_stats()
2245 if (napi_weight ^ vi->sq[0].napi.weight) { in virtnet_set_coalesce()
2249 vi->sq[i].napi.weight = napi_weight; in virtnet_set_coalesce()
2266 if (vi->sq[0].napi.weight) in virtnet_get_coalesce()
2330 virtnet_napi_tx_disable(&vi->sq[i].napi); in virtnet_freeze_down()
2355 virtnet_napi_tx_enable(vi, vi->sq[i].vq, in virtnet_restore_up()
2356 &vi->sq[i].napi); in virtnet_restore_up()
2458 virtnet_napi_tx_disable(&vi->sq[i].napi); in virtnet_xdp_set()
2490 virtnet_napi_tx_enable(vi, vi->sq[i].vq, in virtnet_xdp_set()
2491 &vi->sq[i].napi); in virtnet_xdp_set()
2507 virtnet_napi_tx_enable(vi, vi->sq[i].vq, in virtnet_xdp_set()
2508 &vi->sq[i].napi); in virtnet_xdp_set()
2651 netif_napi_del(&vi->sq[i].napi); in virtnet_free_queues()
2660 kfree(vi->sq); in virtnet_free_queues()
2701 struct virtqueue *vq = vi->sq[i].vq; in free_unused_bufs()
2797 sprintf(vi->sq[i].name, "output.%d", i); in virtnet_find_vqs()
2799 names[txq2vq(i)] = vi->sq[i].name; in virtnet_find_vqs()
2818 vi->sq[i].vq = vqs[txq2vq(i)]; in virtnet_find_vqs()
2843 vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL); in virtnet_alloc_queues()
2844 if (!vi->sq) in virtnet_alloc_queues()
2855 netif_tx_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx, in virtnet_alloc_queues()
2860 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); in virtnet_alloc_queues()
2863 u64_stats_init(&vi->sq[i].stats.syncp); in virtnet_alloc_queues()
2869 kfree(vi->sq); in virtnet_alloc_queues()