Lines Matching refs:vq
81 struct vhost_virtqueue *vq; member
85 struct vhost_virtqueue vq; member
120 static void vhost_net_enable_zcopy(int vq) in vhost_net_enable_zcopy() argument
122 vhost_net_zcopy_mask |= 0x1 << vq; in vhost_net_enable_zcopy()
126 vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy) in vhost_net_ubuf_alloc() argument
137 ubufs->vq = vq; in vhost_net_ubuf_alloc()
281 struct vhost_virtqueue *vq) in vhost_zerocopy_signal_used() argument
284 container_of(vq, struct vhost_net_virtqueue, vq); in vhost_zerocopy_signal_used()
289 if (vq->heads[i].len == VHOST_DMA_FAILED_LEN) in vhost_zerocopy_signal_used()
291 if (VHOST_DMA_IS_DONE(vq->heads[i].len)) { in vhost_zerocopy_signal_used()
292 vq->heads[i].len = VHOST_DMA_CLEAR_LEN; in vhost_zerocopy_signal_used()
299 vhost_add_used_and_signal_n(vq->dev, vq, in vhost_zerocopy_signal_used()
300 &vq->heads[nvq->done_idx], add); in vhost_zerocopy_signal_used()
309 struct vhost_virtqueue *vq = ubufs->vq; in vhost_zerocopy_callback() local
315 vq->heads[ubuf->desc].len = success ? in vhost_zerocopy_callback()
327 vhost_poll_queue(&vq->poll); in vhost_zerocopy_callback()
337 struct vhost_virtqueue *vq = &nvq->vq; in handle_tx() local
345 .msg_iov = vq->iov, in handle_tx()
355 mutex_lock(&vq->mutex); in handle_tx()
356 sock = vq->private_data; in handle_tx()
360 vhost_disable_notify(&net->dev, vq); in handle_tx()
368 vhost_zerocopy_signal_used(net, vq); in handle_tx()
373 if (unlikely((nvq->upend_idx + vq->num - VHOST_MAX_PEND) in handle_tx()
377 head = vhost_get_vq_desc(vq, vq->iov, in handle_tx()
378 ARRAY_SIZE(vq->iov), in handle_tx()
385 if (head == vq->num) { in handle_tx()
386 if (unlikely(vhost_enable_notify(&net->dev, vq))) { in handle_tx()
387 vhost_disable_notify(&net->dev, vq); in handle_tx()
393 vq_err(vq, "Unexpected descriptor format for TX: " in handle_tx()
398 s = move_iovec_hdr(vq->iov, nvq->hdr, hdr_size, out); in handle_tx()
400 len = iov_length(vq->iov, out); in handle_tx()
403 vq_err(vq, "Unexpected header len for TX: " in handle_tx()
419 vq->heads[nvq->upend_idx].id = head; in handle_tx()
420 vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS; in handle_tx()
441 vhost_discard_vq_desc(vq, 1); in handle_tx()
448 vhost_add_used_and_signal(&net->dev, vq, head, 0); in handle_tx()
450 vhost_zerocopy_signal_used(net, vq); in handle_tx()
454 vhost_poll_queue(&vq->poll); in handle_tx()
459 mutex_unlock(&vq->mutex); in handle_tx()
490 static int get_rx_bufs(struct vhost_virtqueue *vq, in get_rx_bufs() argument
509 r = vhost_get_vq_desc(vq, vq->iov + seg, in get_rx_bufs()
510 ARRAY_SIZE(vq->iov) - seg, &out, in get_rx_bufs()
516 if (d == vq->num) { in get_rx_bufs()
521 vq_err(vq, "unexpected descriptor format for RX: " in get_rx_bufs()
531 heads[headcount].len = iov_length(vq->iov + seg, in); in get_rx_bufs()
548 vhost_discard_vq_desc(vq, headcount); in get_rx_bufs()
557 struct vhost_virtqueue *vq = &nvq->vq; in handle_rx() local
565 .msg_iov = vq->iov, in handle_rx()
579 mutex_lock(&vq->mutex); in handle_rx()
580 sock = vq->private_data; in handle_rx()
583 vhost_disable_notify(&net->dev, vq); in handle_rx()
588 vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ? in handle_rx()
589 vq->log : NULL; in handle_rx()
590 mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF); in handle_rx()
595 headcount = get_rx_bufs(vq, vq->heads, vhost_len, in handle_rx()
611 if (unlikely(vhost_enable_notify(&net->dev, vq))) { in handle_rx()
614 vhost_disable_notify(&net->dev, vq); in handle_rx()
624 move_iovec_hdr(vq->iov, nvq->hdr, vhost_hlen, in); in handle_rx()
628 copy_iovec_hdr(vq->iov, nvq->hdr, sock_hlen, in); in handle_rx()
638 vhost_discard_vq_desc(vq, headcount); in handle_rx()
644 vq_err(vq, "Unable to write vnet_hdr at addr %p\n", in handle_rx()
645 vq->iov->iov_base); in handle_rx()
653 vq_err(vq, "Failed num_buffers write"); in handle_rx()
654 vhost_discard_vq_desc(vq, headcount); in handle_rx()
657 vhost_add_used_and_signal_n(&net->dev, vq, vq->heads, in handle_rx()
660 vhost_log_write(vq, vq_log, log, vhost_len); in handle_rx()
663 vhost_poll_queue(&vq->poll); in handle_rx()
668 mutex_unlock(&vq->mutex); in handle_rx()
673 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, in handle_tx_kick() local
675 struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); in handle_tx_kick()
682 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, in handle_rx_kick() local
684 struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); in handle_rx_kick()
723 vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq; in vhost_net_open()
724 vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq; in vhost_net_open()
725 n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick; in vhost_net_open()
726 n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick; in vhost_net_open()
746 struct vhost_virtqueue *vq) in vhost_net_disable_vq() argument
749 container_of(vq, struct vhost_net_virtqueue, vq); in vhost_net_disable_vq()
751 if (!vq->private_data) in vhost_net_disable_vq()
757 struct vhost_virtqueue *vq) in vhost_net_enable_vq() argument
760 container_of(vq, struct vhost_net_virtqueue, vq); in vhost_net_enable_vq()
764 sock = vq->private_data; in vhost_net_enable_vq()
772 struct vhost_virtqueue *vq) in vhost_net_stop_vq() argument
776 mutex_lock(&vq->mutex); in vhost_net_stop_vq()
777 sock = vq->private_data; in vhost_net_stop_vq()
778 vhost_net_disable_vq(n, vq); in vhost_net_stop_vq()
779 vq->private_data = NULL; in vhost_net_stop_vq()
780 mutex_unlock(&vq->mutex); in vhost_net_stop_vq()
787 *tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq); in vhost_net_stop()
788 *rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq); in vhost_net_stop()
794 vhost_poll_flush(&n->vqs[index].vq.poll); in vhost_net_flush_vq()
802 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
804 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
807 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
810 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
907 struct vhost_virtqueue *vq; in vhost_net_set_backend() local
921 vq = &n->vqs[index].vq; in vhost_net_set_backend()
923 mutex_lock(&vq->mutex); in vhost_net_set_backend()
926 if (!vhost_vq_access_ok(vq)) { in vhost_net_set_backend()
937 oldsock = vq->private_data; in vhost_net_set_backend()
939 ubufs = vhost_net_ubuf_alloc(vq, in vhost_net_set_backend()
946 vhost_net_disable_vq(n, vq); in vhost_net_set_backend()
947 vq->private_data = sock; in vhost_net_set_backend()
948 r = vhost_init_used(vq); in vhost_net_set_backend()
951 r = vhost_net_enable_vq(n, vq); in vhost_net_set_backend()
963 mutex_unlock(&vq->mutex); in vhost_net_set_backend()
967 mutex_lock(&vq->mutex); in vhost_net_set_backend()
968 vhost_zerocopy_signal_used(n, vq); in vhost_net_set_backend()
969 mutex_unlock(&vq->mutex); in vhost_net_set_backend()
981 vq->private_data = oldsock; in vhost_net_set_backend()
982 vhost_net_enable_vq(n, vq); in vhost_net_set_backend()
988 mutex_unlock(&vq->mutex); in vhost_net_set_backend()
1047 mutex_lock(&n->vqs[i].vq.mutex); in vhost_net_set_features()
1048 n->vqs[i].vq.acked_features = features; in vhost_net_set_features()
1051 mutex_unlock(&n->vqs[i].vq.mutex); in vhost_net_set_features()