Lines Matching refs:vq
76 struct vhost_virtqueue *vq; member
80 struct vhost_virtqueue vq; member
115 static void vhost_net_enable_zcopy(int vq) in vhost_net_enable_zcopy() argument
117 vhost_net_zcopy_mask |= 0x1 << vq; in vhost_net_enable_zcopy()
129 vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy) in vhost_net_ubuf_alloc() argument
140 ubufs->vq = vq; in vhost_net_ubuf_alloc()
276 struct vhost_virtqueue *vq) in vhost_zerocopy_signal_used() argument
279 container_of(vq, struct vhost_net_virtqueue, vq); in vhost_zerocopy_signal_used()
284 if (vq->heads[i].len == VHOST_DMA_FAILED_LEN) in vhost_zerocopy_signal_used()
286 if (VHOST_DMA_IS_DONE(vq->heads[i].len)) { in vhost_zerocopy_signal_used()
287 vq->heads[i].len = VHOST_DMA_CLEAR_LEN; in vhost_zerocopy_signal_used()
288 vhost_add_used_and_signal(vq->dev, vq, in vhost_zerocopy_signal_used()
289 vq->heads[i].id, 0); in vhost_zerocopy_signal_used()
302 struct vhost_virtqueue *vq = ubufs->vq; in vhost_zerocopy_callback() local
314 vhost_poll_queue(&vq->poll); in vhost_zerocopy_callback()
316 vq->heads[ubuf->desc].len = success ? in vhost_zerocopy_callback()
326 struct vhost_virtqueue *vq = &nvq->vq; in handle_tx() local
334 .msg_iov = vq->iov, in handle_tx()
345 sock = rcu_dereference_check(vq->private_data, 1); in handle_tx()
349 mutex_lock(&vq->mutex); in handle_tx()
350 vhost_disable_notify(&net->dev, vq); in handle_tx()
358 vhost_zerocopy_signal_used(net, vq); in handle_tx()
360 head = vhost_get_vq_desc(&net->dev, vq, vq->iov, in handle_tx()
361 ARRAY_SIZE(vq->iov), in handle_tx()
368 if (head == vq->num) { in handle_tx()
380 if (unlikely(vhost_enable_notify(&net->dev, vq))) { in handle_tx()
381 vhost_disable_notify(&net->dev, vq); in handle_tx()
387 vq_err(vq, "Unexpected descriptor format for TX: " in handle_tx()
392 s = move_iovec_hdr(vq->iov, nvq->hdr, hdr_size, out); in handle_tx()
394 len = iov_length(vq->iov, out); in handle_tx()
397 vq_err(vq, "Unexpected header len for TX: " in handle_tx()
407 vq->heads[nvq->upend_idx].id = head; in handle_tx()
411 vq->heads[nvq->upend_idx].len = in handle_tx()
420 vq->heads[nvq->upend_idx].len = in handle_tx()
442 vhost_discard_vq_desc(vq, 1); in handle_tx()
449 vhost_add_used_and_signal(&net->dev, vq, head, 0); in handle_tx()
451 vhost_zerocopy_signal_used(net, vq); in handle_tx()
455 vhost_poll_queue(&vq->poll); in handle_tx()
460 mutex_unlock(&vq->mutex); in handle_tx()
491 static int get_rx_bufs(struct vhost_virtqueue *vq, in get_rx_bufs() argument
510 d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg, in get_rx_bufs()
511 ARRAY_SIZE(vq->iov) - seg, &out, in get_rx_bufs()
513 if (d == vq->num) { in get_rx_bufs()
518 vq_err(vq, "unexpected descriptor format for RX: " in get_rx_bufs()
528 heads[headcount].len = iov_length(vq->iov + seg, in); in get_rx_bufs()
539 vhost_discard_vq_desc(vq, headcount); in get_rx_bufs()
548 struct vhost_virtqueue *vq = &nvq->vq; in handle_rx() local
556 .msg_iov = vq->iov, in handle_rx()
569 struct socket *sock = rcu_dereference_check(vq->private_data, 1); in handle_rx()
574 mutex_lock(&vq->mutex); in handle_rx()
575 vhost_disable_notify(&net->dev, vq); in handle_rx()
580 vq->log : NULL; in handle_rx()
586 headcount = get_rx_bufs(vq, vq->heads, vhost_len, in handle_rx()
594 if (unlikely(vhost_enable_notify(&net->dev, vq))) { in handle_rx()
597 vhost_disable_notify(&net->dev, vq); in handle_rx()
607 move_iovec_hdr(vq->iov, nvq->hdr, vhost_hlen, in); in handle_rx()
611 copy_iovec_hdr(vq->iov, nvq->hdr, sock_hlen, in); in handle_rx()
621 vhost_discard_vq_desc(vq, headcount); in handle_rx()
627 vq_err(vq, "Unable to write vnet_hdr at addr %p\n", in handle_rx()
628 vq->iov->iov_base); in handle_rx()
636 vq_err(vq, "Failed num_buffers write"); in handle_rx()
637 vhost_discard_vq_desc(vq, headcount); in handle_rx()
640 vhost_add_used_and_signal_n(&net->dev, vq, vq->heads, in handle_rx()
643 vhost_log_write(vq, vq_log, log, vhost_len); in handle_rx()
646 vhost_poll_queue(&vq->poll); in handle_rx()
651 mutex_unlock(&vq->mutex); in handle_rx()
656 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, in handle_tx_kick() local
658 struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); in handle_tx_kick()
665 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, in handle_rx_kick() local
667 struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); in handle_rx_kick()
702 vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq; in vhost_net_open()
703 vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq; in vhost_net_open()
704 n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick; in vhost_net_open()
705 n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick; in vhost_net_open()
730 struct vhost_virtqueue *vq) in vhost_net_disable_vq() argument
733 container_of(vq, struct vhost_net_virtqueue, vq); in vhost_net_disable_vq()
735 if (!vq->private_data) in vhost_net_disable_vq()
741 struct vhost_virtqueue *vq) in vhost_net_enable_vq() argument
744 container_of(vq, struct vhost_net_virtqueue, vq); in vhost_net_enable_vq()
748 sock = rcu_dereference_protected(vq->private_data, in vhost_net_enable_vq()
749 lockdep_is_held(&vq->mutex)); in vhost_net_enable_vq()
757 struct vhost_virtqueue *vq) in vhost_net_stop_vq() argument
761 mutex_lock(&vq->mutex); in vhost_net_stop_vq()
762 sock = rcu_dereference_protected(vq->private_data, in vhost_net_stop_vq()
763 lockdep_is_held(&vq->mutex)); in vhost_net_stop_vq()
764 vhost_net_disable_vq(n, vq); in vhost_net_stop_vq()
765 rcu_assign_pointer(vq->private_data, NULL); in vhost_net_stop_vq()
766 mutex_unlock(&vq->mutex); in vhost_net_stop_vq()
773 *tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq); in vhost_net_stop()
774 *rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq); in vhost_net_stop()
780 vhost_poll_flush(&n->vqs[index].vq.poll); in vhost_net_flush_vq()
788 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
790 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
793 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
796 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
891 struct vhost_virtqueue *vq; in vhost_net_set_backend() local
905 vq = &n->vqs[index].vq; in vhost_net_set_backend()
907 mutex_lock(&vq->mutex); in vhost_net_set_backend()
910 if (!vhost_vq_access_ok(vq)) { in vhost_net_set_backend()
921 oldsock = rcu_dereference_protected(vq->private_data, in vhost_net_set_backend()
922 lockdep_is_held(&vq->mutex)); in vhost_net_set_backend()
924 ubufs = vhost_net_ubuf_alloc(vq, in vhost_net_set_backend()
931 vhost_net_disable_vq(n, vq); in vhost_net_set_backend()
932 rcu_assign_pointer(vq->private_data, sock); in vhost_net_set_backend()
933 r = vhost_init_used(vq); in vhost_net_set_backend()
936 r = vhost_net_enable_vq(n, vq); in vhost_net_set_backend()
948 mutex_unlock(&vq->mutex); in vhost_net_set_backend()
952 mutex_lock(&vq->mutex); in vhost_net_set_backend()
953 vhost_zerocopy_signal_used(n, vq); in vhost_net_set_backend()
954 mutex_unlock(&vq->mutex); in vhost_net_set_backend()
966 rcu_assign_pointer(vq->private_data, oldsock); in vhost_net_set_backend()
967 vhost_net_enable_vq(n, vq); in vhost_net_set_backend()
973 mutex_unlock(&vq->mutex); in vhost_net_set_backend()
1034 mutex_lock(&n->vqs[i].vq.mutex); in vhost_net_set_features()
1037 mutex_unlock(&n->vqs[i].vq.mutex); in vhost_net_set_features()