• Home
  • Raw
  • Download

Lines Matching refs:vq

88 	struct vhost_virtqueue *vq;  member
92 struct vhost_virtqueue vq; member
123 static void vhost_net_enable_zcopy(int vq) in vhost_net_enable_zcopy() argument
125 vhost_net_zcopy_mask |= 0x1 << vq; in vhost_net_enable_zcopy()
129 vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy) in vhost_net_ubuf_alloc() argument
140 ubufs->vq = vq; in vhost_net_ubuf_alloc()
246 struct vhost_virtqueue *vq) in vhost_zerocopy_signal_used() argument
249 container_of(vq, struct vhost_net_virtqueue, vq); in vhost_zerocopy_signal_used()
254 if (vq->heads[i].len == VHOST_DMA_FAILED_LEN) in vhost_zerocopy_signal_used()
256 if (VHOST_DMA_IS_DONE(vq->heads[i].len)) { in vhost_zerocopy_signal_used()
257 vq->heads[i].len = VHOST_DMA_CLEAR_LEN; in vhost_zerocopy_signal_used()
264 vhost_add_used_and_signal_n(vq->dev, vq, in vhost_zerocopy_signal_used()
265 &vq->heads[nvq->done_idx], add); in vhost_zerocopy_signal_used()
274 struct vhost_virtqueue *vq = ubufs->vq; in vhost_zerocopy_callback() local
280 vq->heads[ubuf->desc].len = success ? in vhost_zerocopy_callback()
292 vhost_poll_queue(&vq->poll); in vhost_zerocopy_callback()
312 struct vhost_virtqueue *vq) in vhost_net_disable_vq() argument
315 container_of(vq, struct vhost_net_virtqueue, vq); in vhost_net_disable_vq()
317 if (!vq->private_data) in vhost_net_disable_vq()
323 struct vhost_virtqueue *vq) in vhost_net_enable_vq() argument
326 container_of(vq, struct vhost_net_virtqueue, vq); in vhost_net_enable_vq()
330 sock = vq->private_data; in vhost_net_enable_vq()
338 struct vhost_virtqueue *vq, in vhost_net_tx_get_vq_desc() argument
343 int r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), in vhost_net_tx_get_vq_desc()
346 if (r == vq->num && vq->busyloop_timeout) { in vhost_net_tx_get_vq_desc()
348 endtime = busy_clock() + vq->busyloop_timeout; in vhost_net_tx_get_vq_desc()
349 while (vhost_can_busy_poll(vq->dev, endtime) && in vhost_net_tx_get_vq_desc()
350 vhost_vq_avail_empty(vq->dev, vq)) in vhost_net_tx_get_vq_desc()
353 r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), in vhost_net_tx_get_vq_desc()
365 struct vhost_virtqueue *vq = &nvq->vq; in handle_tx() local
384 mutex_lock(&vq->mutex); in handle_tx()
385 sock = vq->private_data; in handle_tx()
389 if (!vq_iotlb_prefetch(vq)) in handle_tx()
392 vhost_disable_notify(&net->dev, vq); in handle_tx()
400 vhost_zerocopy_signal_used(net, vq); in handle_tx()
405 if (unlikely((nvq->upend_idx + vq->num - VHOST_MAX_PEND) in handle_tx()
409 head = vhost_net_tx_get_vq_desc(net, vq, vq->iov, in handle_tx()
410 ARRAY_SIZE(vq->iov), in handle_tx()
416 if (head == vq->num) { in handle_tx()
417 if (unlikely(vhost_enable_notify(&net->dev, vq))) { in handle_tx()
418 vhost_disable_notify(&net->dev, vq); in handle_tx()
424 vq_err(vq, "Unexpected descriptor format for TX: " in handle_tx()
429 len = iov_length(vq->iov, out); in handle_tx()
430 iov_iter_init(&msg.msg_iter, WRITE, vq->iov, out, len); in handle_tx()
434 vq_err(vq, "Unexpected header len for TX: " in handle_tx()
449 vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head); in handle_tx()
450 vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS; in handle_tx()
467 if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS) in handle_tx()
472 vhost_discard_vq_desc(vq, 1); in handle_tx()
479 vhost_add_used_and_signal(&net->dev, vq, head, 0); in handle_tx()
481 vhost_zerocopy_signal_used(net, vq); in handle_tx()
484 } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len))); in handle_tx()
486 mutex_unlock(&vq->mutex); in handle_tx()
510 struct vhost_virtqueue *vq = &nvq->vq; in vhost_net_rx_peek_head_len() local
514 if (!len && vq->busyloop_timeout) { in vhost_net_rx_peek_head_len()
516 mutex_lock_nested(&vq->mutex, 1); in vhost_net_rx_peek_head_len()
517 vhost_disable_notify(&net->dev, vq); in vhost_net_rx_peek_head_len()
520 endtime = busy_clock() + vq->busyloop_timeout; in vhost_net_rx_peek_head_len()
524 vhost_vq_avail_empty(&net->dev, vq)) in vhost_net_rx_peek_head_len()
529 if (!vhost_vq_avail_empty(&net->dev, vq)) in vhost_net_rx_peek_head_len()
530 vhost_poll_queue(&vq->poll); in vhost_net_rx_peek_head_len()
531 else if (unlikely(vhost_enable_notify(&net->dev, vq))) { in vhost_net_rx_peek_head_len()
532 vhost_disable_notify(&net->dev, vq); in vhost_net_rx_peek_head_len()
533 vhost_poll_queue(&vq->poll); in vhost_net_rx_peek_head_len()
536 mutex_unlock(&vq->mutex); in vhost_net_rx_peek_head_len()
554 static int get_rx_bufs(struct vhost_virtqueue *vq, in get_rx_bufs() argument
577 r = vhost_get_vq_desc(vq, vq->iov + seg, in get_rx_bufs()
578 ARRAY_SIZE(vq->iov) - seg, &out, in get_rx_bufs()
584 if (d == vq->num) { in get_rx_bufs()
589 vq_err(vq, "unexpected descriptor format for RX: " in get_rx_bufs()
598 heads[headcount].id = cpu_to_vhost32(vq, d); in get_rx_bufs()
599 len = iov_length(vq->iov + seg, in); in get_rx_bufs()
600 heads[headcount].len = cpu_to_vhost32(vq, len); in get_rx_bufs()
605 heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen); in get_rx_bufs()
617 vhost_discard_vq_desc(vq, headcount); in get_rx_bufs()
626 struct vhost_virtqueue *vq = &nvq->vq; in handle_rx() local
650 mutex_lock_nested(&vq->mutex, 0); in handle_rx()
651 sock = vq->private_data; in handle_rx()
655 if (!vq_iotlb_prefetch(vq)) in handle_rx()
658 vhost_disable_notify(&net->dev, vq); in handle_rx()
659 vhost_net_disable_vq(net, vq); in handle_rx()
664 vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ? in handle_rx()
665 vq->log : NULL; in handle_rx()
666 mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF); in handle_rx()
674 headcount = get_rx_bufs(vq, vq->heads, vhost_len, in handle_rx()
682 iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1); in handle_rx()
690 if (unlikely(vhost_enable_notify(&net->dev, vq))) { in handle_rx()
693 vhost_disable_notify(&net->dev, vq); in handle_rx()
701 iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len); in handle_rx()
717 vhost_discard_vq_desc(vq, headcount); in handle_rx()
724 vq_err(vq, "Unable to write vnet_hdr " in handle_rx()
725 "at addr %p\n", vq->iov->iov_base); in handle_rx()
736 num_buffers = cpu_to_vhost16(vq, headcount); in handle_rx()
740 vq_err(vq, "Failed num_buffers write"); in handle_rx()
741 vhost_discard_vq_desc(vq, headcount); in handle_rx()
744 vhost_add_used_and_signal_n(&net->dev, vq, vq->heads, in handle_rx()
747 vhost_log_write(vq, vq_log, log, vhost_len); in handle_rx()
749 } while (likely(!vhost_exceeds_weight(vq, ++recv_pkts, total_len))); in handle_rx()
751 vhost_net_enable_vq(net, vq); in handle_rx()
754 mutex_unlock(&vq->mutex); in handle_rx()
759 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, in handle_tx_kick() local
761 struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); in handle_tx_kick()
768 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, in handle_rx_kick() local
770 struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); in handle_rx_kick()
809 vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq; in vhost_net_open()
810 vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq; in vhost_net_open()
811 n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick; in vhost_net_open()
812 n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick; in vhost_net_open()
833 struct vhost_virtqueue *vq) in vhost_net_stop_vq() argument
837 mutex_lock(&vq->mutex); in vhost_net_stop_vq()
838 sock = vq->private_data; in vhost_net_stop_vq()
839 vhost_net_disable_vq(n, vq); in vhost_net_stop_vq()
840 vq->private_data = NULL; in vhost_net_stop_vq()
841 mutex_unlock(&vq->mutex); in vhost_net_stop_vq()
848 *tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq); in vhost_net_stop()
849 *rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq); in vhost_net_stop()
855 vhost_poll_flush(&n->vqs[index].vq.poll); in vhost_net_flush_vq()
863 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
865 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
868 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
871 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
959 struct vhost_virtqueue *vq; in vhost_net_set_backend() local
973 vq = &n->vqs[index].vq; in vhost_net_set_backend()
975 mutex_lock(&vq->mutex); in vhost_net_set_backend()
978 if (!vhost_vq_access_ok(vq)) { in vhost_net_set_backend()
989 oldsock = vq->private_data; in vhost_net_set_backend()
991 ubufs = vhost_net_ubuf_alloc(vq, in vhost_net_set_backend()
998 vhost_net_disable_vq(n, vq); in vhost_net_set_backend()
999 vq->private_data = sock; in vhost_net_set_backend()
1000 r = vhost_vq_init_access(vq); in vhost_net_set_backend()
1003 r = vhost_net_enable_vq(n, vq); in vhost_net_set_backend()
1015 mutex_unlock(&vq->mutex); in vhost_net_set_backend()
1019 mutex_lock(&vq->mutex); in vhost_net_set_backend()
1020 vhost_zerocopy_signal_used(n, vq); in vhost_net_set_backend()
1021 mutex_unlock(&vq->mutex); in vhost_net_set_backend()
1033 vq->private_data = oldsock; in vhost_net_set_backend()
1034 vhost_net_enable_vq(n, vq); in vhost_net_set_backend()
1041 mutex_unlock(&vq->mutex); in vhost_net_set_backend()
1106 mutex_lock(&n->vqs[i].vq.mutex); in vhost_net_set_features()
1107 n->vqs[i].vq.acked_features = features; in vhost_net_set_features()
1110 mutex_unlock(&n->vqs[i].vq.mutex); in vhost_net_set_features()