Lines Matching refs:vq
57 struct virtqueue *vq; member
69 struct virtqueue *vq; member
154 static int vq2txq(struct virtqueue *vq) in vq2txq() argument
156 return (vq->index - 1) / 2; in vq2txq()
164 static int vq2rxq(struct virtqueue *vq) in vq2rxq() argument
166 return vq->index / 2; in vq2rxq()
206 static void skb_xmit_done(struct virtqueue *vq) in skb_xmit_done() argument
208 struct virtnet_info *vi = vq->vdev->priv; in skb_xmit_done()
211 virtqueue_disable_cb(vq); in skb_xmit_done()
214 netif_wake_subqueue(vi->dev, vq2txq(vq)); in skb_xmit_done()
237 struct virtnet_info *vi = rq->vq->vdev->priv; in page_to_skb()
311 page = virtqueue_get_buf(rq->vq, &len); in receive_mergeable()
331 struct virtnet_info *vi = rq->vq->vdev->priv; in receive_buf()
430 struct virtnet_info *vi = rq->vq->vdev->priv; in add_recvbuf_small()
446 err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp); in add_recvbuf_small()
491 err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2, in add_recvbuf_big()
510 err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, page, gfp); in add_recvbuf_mergeable()
526 struct virtnet_info *vi = rq->vq->vdev->priv; in try_fill_recv()
542 } while (rq->vq->num_free); in try_fill_recv()
545 virtqueue_kick(rq->vq); in try_fill_recv()
570 virtqueue_disable_cb(rq->vq); in virtnet_napi_enable()
603 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_poll()
609 (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { in virtnet_poll()
623 if (unlikely(!virtqueue_enable_cb(rq->vq)) && in virtnet_poll()
625 virtqueue_disable_cb(rq->vq); in virtnet_poll()
654 struct virtnet_info *vi = sq->vq->vdev->priv; in free_old_xmit_skbs()
657 while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { in free_old_xmit_skbs()
673 struct virtnet_info *vi = sq->vq->vdev->priv; in xmit_skb()
714 return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); in xmit_skb()
740 virtqueue_kick(sq->vq); in start_xmit()
748 if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { in start_xmit()
750 if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { in start_xmit()
753 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { in start_xmit()
755 virtqueue_disable_cb(sq->vq); in start_xmit()
1050 virtqueue_set_affinity(vi->rq[i].vq, -1); in virtnet_clean_affinity()
1051 virtqueue_set_affinity(vi->sq[i].vq, -1); in virtnet_clean_affinity()
1085 virtqueue_set_affinity(vi->rq[i].vq, cpu); in virtnet_set_affinity()
1086 virtqueue_set_affinity(vi->sq[i].vq, cpu); in virtnet_set_affinity()
1119 ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); in virtnet_get_ringparam()
1120 ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); in virtnet_get_ringparam()
1307 struct virtqueue *vq = vi->sq[i].vq; in free_unused_bufs() local
1308 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) in free_unused_bufs()
1313 struct virtqueue *vq = vi->rq[i].vq; in free_unused_bufs() local
1315 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { in free_unused_bufs()
1391 vi->rq[i].vq = vqs[rxq2vq(i)]; in virtnet_find_vqs()
1392 vi->sq[i].vq = vqs[txq2vq(i)]; in virtnet_find_vqs()