Home
last modified time | relevance | path

Searched refs:vqs (Results 1 – 25 of 26) sorted by relevance

12

/drivers/vhost/
Dnet.c103 struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX]; member
163 kfree(n->vqs[i].ubuf_info); in vhost_net_clear_ubuf_info()
164 n->vqs[i].ubuf_info = NULL; in vhost_net_clear_ubuf_info()
177 n->vqs[i].ubuf_info = kmalloc(sizeof(*n->vqs[i].ubuf_info) * in vhost_net_set_ubuf_info()
179 if (!n->vqs[i].ubuf_info) in vhost_net_set_ubuf_info()
196 n->vqs[i].done_idx = 0; in vhost_net_vq_reset()
197 n->vqs[i].upend_idx = 0; in vhost_net_vq_reset()
198 n->vqs[i].ubufs = NULL; in vhost_net_vq_reset()
199 n->vqs[i].vhost_hlen = 0; in vhost_net_vq_reset()
200 n->vqs[i].sock_hlen = 0; in vhost_net_vq_reset()
[all …]
Dvsock.c34 struct vhost_virtqueue vqs[2]; member
79 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; in vhost_transport_do_send_pkt()
189 vq = &vsock->vqs[VSOCK_VQ_RX]; in vhost_transport_send_pkt_work()
208 vq = &vsock->vqs[VSOCK_VQ_RX]; in vhost_transport_send_pkt()
250 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; in vhost_transport_cancel_pkt()
323 struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX]; in vhost_vsock_more_replies()
421 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { in vhost_vsock_start()
422 vq = &vsock->vqs[i]; in vhost_vsock_start()
448 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { in vhost_vsock_start()
449 vq = &vsock->vqs[i]; in vhost_vsock_start()
[all …]
Dtest.c33 struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX]; member
40 struct vhost_virtqueue *vq = &n->vqs[VHOST_TEST_VQ]; in handle_vq()
106 struct vhost_virtqueue **vqs; in vhost_test_open() local
110 vqs = kmalloc(VHOST_TEST_VQ_MAX * sizeof(*vqs), GFP_KERNEL); in vhost_test_open()
111 if (!vqs) { in vhost_test_open()
117 vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ]; in vhost_test_open()
118 n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick; in vhost_test_open()
119 vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX); in vhost_test_open()
140 *privatep = vhost_test_stop_vq(n, n->vqs + VHOST_TEST_VQ); in vhost_test_stop()
145 vhost_poll_flush(&n->vqs[index].poll); in vhost_test_flush_vq()
[all …]
Dscsi.c195 struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ]; member
235 vq = &vs->vqs[i].vq; in vhost_scsi_init_inflight()
240 idx = vs->vqs[i].inflight_idx; in vhost_scsi_init_inflight()
242 old_inflight[i] = &vs->vqs[i].inflights[idx]; in vhost_scsi_init_inflight()
245 vs->vqs[i].inflight_idx = idx ^ 1; in vhost_scsi_init_inflight()
246 new_inflight = &vs->vqs[i].inflights[idx ^ 1]; in vhost_scsi_init_inflight()
408 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_allocate_evt()
447 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_do_evt_work()
498 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_evt_work()
557 vq = q - vs->vqs; in vhost_scsi_complete_cmd_work()
[all …]
Dvhost.c371 vq = dev->vqs[i]; in vhost_dev_alloc_iovecs()
383 vhost_vq_free_iovecs(dev->vqs[i]); in vhost_dev_alloc_iovecs()
392 vhost_vq_free_iovecs(dev->vqs[i]); in vhost_dev_free_iovecs()
396 struct vhost_virtqueue **vqs, int nvqs) in vhost_dev_init() argument
401 dev->vqs = vqs; in vhost_dev_init()
418 vq = dev->vqs[i]; in vhost_dev_init()
545 dev->vqs[i]->umem = umem; in vhost_dev_reset_owner()
554 if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) { in vhost_dev_stop()
555 vhost_poll_stop(&dev->vqs[i]->poll); in vhost_dev_stop()
556 vhost_poll_flush(&dev->vqs[i]->poll); in vhost_dev_stop()
[all …]
Dvhost.h155 struct vhost_virtqueue **vqs; member
169 void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
/drivers/virtio/
Dvirtio_pci_common.c252 vp_dev->vqs[index] = info; in vp_setup_vq()
263 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; in vp_del_vq()
281 list_for_each_entry_safe(vq, n, &vdev->vqs, list) { in vp_del_vqs()
282 info = vp_dev->vqs[vq->index]; in vp_del_vqs()
292 kfree(vp_dev->vqs); in vp_del_vqs()
293 vp_dev->vqs = NULL; in vp_del_vqs()
297 struct virtqueue *vqs[], in vp_try_to_find_vqs() argument
307 vp_dev->vqs = kmalloc(nvqs * sizeof *vp_dev->vqs, GFP_KERNEL); in vp_try_to_find_vqs()
308 if (!vp_dev->vqs) in vp_try_to_find_vqs()
337 vqs[i] = NULL; in vp_try_to_find_vqs()
[all …]
Dvirtio_pci_common.h83 struct virtio_pci_vq_info **vqs; member
134 struct virtqueue *vqs[],
Dvirtio_balloon.c410 struct virtqueue *vqs[3]; in init_vqs() local
420 err = vb->vdev->config->find_vqs(vb->vdev, nvqs, vqs, callbacks, names); in init_vqs()
424 vb->inflate_vq = vqs[0]; in init_vqs()
425 vb->deflate_vq = vqs[1]; in init_vqs()
429 vb->stats_vq = vqs[2]; in init_vqs()
Dvirtio_mmio.c346 list_for_each_entry_safe(vq, n, &vdev->vqs, list) in vm_del_vqs()
447 struct virtqueue *vqs[], in vm_find_vqs() argument
461 vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i]); in vm_find_vqs()
462 if (IS_ERR(vqs[i])) { in vm_find_vqs()
464 return PTR_ERR(vqs[i]); in vm_find_vqs()
Dvirtio_input.c170 struct virtqueue *vqs[2]; in virtinput_init_vqs() local
176 err = vi->vdev->config->find_vqs(vi->vdev, 2, vqs, cbs, names); in virtinput_init_vqs()
179 vi->evt = vqs[0]; in virtinput_init_vqs()
180 vi->sts = vqs[1]; in virtinput_init_vqs()
Dvirtio_pci_modern.c390 struct virtqueue *vqs[], in vp_modern_find_vqs() argument
396 int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names); in vp_modern_find_vqs()
404 list_for_each_entry(vq, &vdev->vqs, list) { in vp_modern_find_vqs()
Dvirtio.c321 INIT_LIST_HEAD(&dev->vqs); in register_virtio_device()
/drivers/block/
Dvirtio_blk.c51 struct virtio_blk_vq *vqs; member
145 spin_lock_irqsave(&vblk->vqs[qid].lock, flags); in virtblk_done()
148 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { in virtblk_done()
159 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); in virtblk_done()
214 spin_lock_irqsave(&vblk->vqs[qid].lock, flags); in virtio_queue_rq()
215 err = __virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num); in virtio_queue_rq()
217 virtqueue_kick(vblk->vqs[qid].vq); in virtio_queue_rq()
219 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); in virtio_queue_rq()
227 if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq)) in virtio_queue_rq()
229 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); in virtio_queue_rq()
[all …]
/drivers/net/ethernet/intel/i40evf/
Di40evf_virtchnl.c291 struct i40e_virtchnl_queue_select vqs; in i40evf_enable_queues() local
300 vqs.vsi_id = adapter->vsi_res->vsi_id; in i40evf_enable_queues()
301 vqs.tx_queues = BIT(adapter->num_active_queues) - 1; in i40evf_enable_queues()
302 vqs.rx_queues = vqs.tx_queues; in i40evf_enable_queues()
305 (u8 *)&vqs, sizeof(vqs)); in i40evf_enable_queues()
316 struct i40e_virtchnl_queue_select vqs; in i40evf_disable_queues() local
325 vqs.vsi_id = adapter->vsi_res->vsi_id; in i40evf_disable_queues()
326 vqs.tx_queues = BIT(adapter->num_active_queues) - 1; in i40evf_disable_queues()
327 vqs.rx_queues = vqs.tx_queues; in i40evf_disable_queues()
330 (u8 *)&vqs, sizeof(vqs)); in i40evf_disable_queues()
[all …]
/drivers/remoteproc/
Dremoteproc_virtio.c129 list_for_each_entry_safe(vq, n, &vdev->vqs, list) { in __rproc_virtio_del_vqs()
143 struct virtqueue *vqs[], in rproc_virtio_find_vqs() argument
150 vqs[i] = rp_find_vq(vdev, i, callbacks[i], names[i]); in rproc_virtio_find_vqs()
151 if (IS_ERR(vqs[i])) { in rproc_virtio_find_vqs()
152 ret = PTR_ERR(vqs[i]); in rproc_virtio_find_vqs()
/drivers/s390/virtio/
Dkvm_virtio.c251 list_for_each_entry_safe(vq, n, &vdev->vqs, list) in kvm_del_vqs()
256 struct virtqueue *vqs[], in kvm_find_vqs() argument
268 vqs[i] = kvm_find_vq(vdev, i, callbacks[i], names[i]); in kvm_find_vqs()
269 if (IS_ERR(vqs[i])) in kvm_find_vqs()
276 return PTR_ERR(vqs[i]); in kvm_find_vqs()
Dvirtio_ccw.c248 static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs, in get_airq_indicator() argument
274 (unsigned long)vqs[j]); in get_airq_indicator()
487 list_for_each_entry_safe(vq, n, &vdev->vqs, list) in virtio_ccw_del_vqs()
589 struct virtqueue *vqs[], int nvqs, in virtio_ccw_register_adapter_ind() argument
602 thinint_area->indicator = get_airq_indicator(vqs, nvqs, in virtio_ccw_register_adapter_ind()
637 struct virtqueue *vqs[], in virtio_ccw_find_vqs() argument
651 vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i], in virtio_ccw_find_vqs()
653 if (IS_ERR(vqs[i])) { in virtio_ccw_find_vqs()
654 ret = PTR_ERR(vqs[i]); in virtio_ccw_find_vqs()
655 vqs[i] = NULL; in virtio_ccw_find_vqs()
[all …]
/drivers/gpu/drm/virtio/
Dvirtgpu_kms.c137 struct virtqueue *vqs[2]; in virtio_gpu_driver_load() local
174 ret = vgdev->vdev->config->find_vqs(vgdev->vdev, 2, vqs, in virtio_gpu_driver_load()
180 vgdev->ctrlq.vq = vqs[0]; in virtio_gpu_driver_load()
181 vgdev->cursorq.vq = vqs[1]; in virtio_gpu_driver_load()
/drivers/misc/mic/vop/
Dvop_main.c269 list_for_each_entry_safe(vq, n, &dev->vqs, list) in vop_del_vqs()
375 struct virtqueue *vqs[], in vop_find_vqs() argument
391 vqs[i] = vop_find_vq(dev, i, callbacks[i], names[i]); in vop_find_vqs()
392 if (IS_ERR(vqs[i])) { in vop_find_vqs()
393 err = PTR_ERR(vqs[i]); in vop_find_vqs()
444 list_for_each_entry(vq, &vdev->vdev.vqs, list) in vop_virtio_intr_handler()
/drivers/scsi/
Dvirtio_scsi.c947 struct virtqueue **vqs; in virtscsi_init() local
950 vqs = kmalloc(num_vqs * sizeof(struct virtqueue *), GFP_KERNEL); in virtscsi_init()
954 if (!callbacks || !vqs || !names) { in virtscsi_init()
969 err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names); in virtscsi_init()
973 virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]); in virtscsi_init()
974 virtscsi_init_vq(&vscsi->event_vq, vqs[1]); in virtscsi_init()
977 vqs[i]); in virtscsi_init()
987 kfree(vqs); in virtscsi_init()
/drivers/char/
Dvirtio_console.c1901 struct virtqueue **vqs; in init_vqs() local
1908 vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL); in init_vqs()
1915 if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs || in init_vqs()
1948 err = portdev->vdev->config->find_vqs(portdev->vdev, nr_queues, vqs, in init_vqs()
1955 portdev->in_vqs[0] = vqs[0]; in init_vqs()
1956 portdev->out_vqs[0] = vqs[1]; in init_vqs()
1959 portdev->c_ivq = vqs[j]; in init_vqs()
1960 portdev->c_ovq = vqs[j + 1]; in init_vqs()
1964 portdev->in_vqs[i] = vqs[j]; in init_vqs()
1965 portdev->out_vqs[i] = vqs[j + 1]; in init_vqs()
[all …]
/drivers/net/
Dvirtio_net.c1597 struct virtqueue **vqs; in virtnet_find_vqs() local
1610 vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL); in virtnet_find_vqs()
1611 if (!vqs) in virtnet_find_vqs()
1636 ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, in virtnet_find_vqs()
1642 vi->cvq = vqs[total_vqs - 1]; in virtnet_find_vqs()
1648 vi->rq[i].vq = vqs[rxq2vq(i)]; in virtnet_find_vqs()
1649 vi->sq[i].vq = vqs[txq2vq(i)]; in virtnet_find_vqs()
1654 kfree(vqs); in virtnet_find_vqs()
1663 kfree(vqs); in virtnet_find_vqs()
/drivers/rpmsg/
Dvirtio_rpmsg_bus.c853 struct virtqueue *vqs[2]; in rpmsg_probe() local
872 err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, names); in rpmsg_probe()
876 vrp->rvq = vqs[0]; in rpmsg_probe()
877 vrp->svq = vqs[1]; in rpmsg_probe()
/drivers/net/ethernet/intel/i40e/
Di40e_virtchnl_pf.c1739 struct i40e_virtchnl_queue_select *vqs = in i40e_vc_enable_queues_msg() local
1742 u16 vsi_id = vqs->vsi_id; in i40e_vc_enable_queues_msg()
1755 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { in i40e_vc_enable_queues_msg()
1779 struct i40e_virtchnl_queue_select *vqs = in i40e_vc_disable_queues_msg() local
1789 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { in i40e_vc_disable_queues_msg()
1794 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { in i40e_vc_disable_queues_msg()
1818 struct i40e_virtchnl_queue_select *vqs = in i40e_vc_get_stats_msg() local
1832 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { in i40e_vc_get_stats_msg()

12