Home
last modified time | relevance | path

Searched refs:vqs (Results 1 – 25 of 44) sorted by relevance

12

/kernel/linux/linux-5.10/drivers/virtio/
Dvirtio_pci_common.c203 vp_dev->vqs[index] = info; in vp_setup_vq()
214 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; in vp_del_vq()
232 list_for_each_entry_safe(vq, n, &vdev->vqs, list) { in vp_del_vqs()
234 int v = vp_dev->vqs[vq->index]->msix_vector; in vp_del_vqs()
275 kfree(vp_dev->vqs); in vp_del_vqs()
276 vp_dev->vqs = NULL; in vp_del_vqs()
280 struct virtqueue *vqs[], vq_callback_t *callbacks[], in vp_find_vqs_msix() argument
289 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); in vp_find_vqs_msix()
290 if (!vp_dev->vqs) in vp_find_vqs_msix()
313 vqs[i] = NULL; in vp_find_vqs_msix()
[all …]
Dvirtio_vdpa.c244 list_for_each_entry_safe(vq, n, &vdev->vqs, list) in virtio_vdpa_del_vqs()
249 struct virtqueue *vqs[], in virtio_vdpa_find_vqs() argument
263 vqs[i] = NULL; in virtio_vdpa_find_vqs()
267 vqs[i] = virtio_vdpa_setup_vq(vdev, queue_idx++, in virtio_vdpa_find_vqs()
270 if (IS_ERR(vqs[i])) { in virtio_vdpa_find_vqs()
271 err = PTR_ERR(vqs[i]); in virtio_vdpa_find_vqs()
Dvirtio_pci_common.h80 struct virtio_pci_vq_info **vqs; member
131 struct virtqueue *vqs[], vq_callback_t *callbacks[],
Dvirtio_mmio.c342 list_for_each_entry_safe(vq, n, &vdev->vqs, list) in vm_del_vqs()
459 struct virtqueue *vqs[], in vm_find_vqs() argument
479 vqs[i] = NULL; in vm_find_vqs()
483 vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i], in vm_find_vqs()
485 if (IS_ERR(vqs[i])) { in vm_find_vqs()
487 return PTR_ERR(vqs[i]); in vm_find_vqs()
/kernel/linux/linux-5.10/drivers/vhost/
Dtest.c38 struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX]; member
45 struct vhost_virtqueue *vq = &n->vqs[VHOST_TEST_VQ]; in handle_vq()
109 struct vhost_virtqueue **vqs; in vhost_test_open() local
113 vqs = kmalloc_array(VHOST_TEST_VQ_MAX, sizeof(*vqs), GFP_KERNEL); in vhost_test_open()
114 if (!vqs) { in vhost_test_open()
120 vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ]; in vhost_test_open()
121 n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick; in vhost_test_open()
122 vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV, in vhost_test_open()
144 *privatep = vhost_test_stop_vq(n, n->vqs + VHOST_TEST_VQ); in vhost_test_stop()
149 vhost_poll_flush(&n->vqs[index].poll); in vhost_test_flush_vq()
[all …]
Dnet.c133 struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX]; member
274 kfree(n->vqs[i].ubuf_info); in vhost_net_clear_ubuf_info()
275 n->vqs[i].ubuf_info = NULL; in vhost_net_clear_ubuf_info()
288 n->vqs[i].ubuf_info = in vhost_net_set_ubuf_info()
290 sizeof(*n->vqs[i].ubuf_info), in vhost_net_set_ubuf_info()
292 if (!n->vqs[i].ubuf_info) in vhost_net_set_ubuf_info()
309 n->vqs[i].done_idx = 0; in vhost_net_vq_reset()
310 n->vqs[i].upend_idx = 0; in vhost_net_vq_reset()
311 n->vqs[i].ubufs = NULL; in vhost_net_vq_reset()
312 n->vqs[i].vhost_hlen = 0; in vhost_net_vq_reset()
[all …]
Dvsock.c42 struct vhost_virtqueue vqs[2]; member
87 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; in vhost_transport_do_send_pkt()
240 vq = &vsock->vqs[VSOCK_VQ_RX]; in vhost_transport_send_pkt_work()
306 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; in vhost_transport_cancel_pkt()
384 struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX]; in vhost_vsock_more_replies()
532 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { in vhost_vsock_start()
533 vq = &vsock->vqs[i]; in vhost_vsock_start()
564 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { in vhost_vsock_start()
565 vq = &vsock->vqs[i]; in vhost_vsock_start()
587 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { in vhost_vsock_stop()
[all …]
Dscsi.c203 struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ]; member
265 vq = &vs->vqs[i].vq; in vhost_scsi_init_inflight()
270 idx = vs->vqs[i].inflight_idx; in vhost_scsi_init_inflight()
272 old_inflight[i] = &vs->vqs[i].inflights[idx]; in vhost_scsi_init_inflight()
275 vs->vqs[i].inflight_idx = idx ^ 1; in vhost_scsi_init_inflight()
276 new_inflight = &vs->vqs[i].inflights[idx ^ 1]; in vhost_scsi_init_inflight()
449 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_allocate_evt()
479 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_do_evt_work()
530 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_evt_work()
584 vq = q - vs->vqs; in vhost_scsi_complete_cmd_work()
[all …]
Dvhost.c298 __vhost_vq_meta_reset(d->vqs[i]); in vhost_vq_meta_reset()
399 vq = dev->vqs[i]; in vhost_dev_alloc_iovecs()
414 vhost_vq_free_iovecs(dev->vqs[i]); in vhost_dev_alloc_iovecs()
423 vhost_vq_free_iovecs(dev->vqs[i]); in vhost_dev_free_iovecs()
468 struct vhost_virtqueue **vqs, int nvqs, in vhost_dev_init() argument
477 dev->vqs = vqs; in vhost_dev_init()
498 vq = dev->vqs[i]; in vhost_dev_init()
655 dev->vqs[i]->umem = umem; in vhost_dev_reset_owner()
664 if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) { in vhost_dev_stop()
665 vhost_poll_stop(&dev->vqs[i]->poll); in vhost_dev_stop()
[all …]
Dvdpa.c39 struct vhost_virtqueue *vqs; member
64 ops->kick_vq(v->vdpa, vq - v->vqs); in handle_vq_kick()
91 struct vhost_virtqueue *vq = &v->vqs[qid]; in vhost_vdpa_setup_vq_irq()
114 struct vhost_virtqueue *vq = &v->vqs[qid]; in vhost_vdpa_unsetup_vq_irq()
382 vq = &v->vqs[idx]; in vhost_vdpa_vring_ioctl()
866 struct vhost_virtqueue **vqs; in vhost_vdpa_open() local
878 vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL); in vhost_vdpa_open()
879 if (!vqs) { in vhost_vdpa_open()
886 vqs[i] = &v->vqs[i]; in vhost_vdpa_open()
887 vqs[i]->handle_kick = handle_vq_kick; in vhost_vdpa_open()
[all …]
/kernel/linux/linux-5.10/drivers/block/
Dvirtio_blk.c71 struct virtio_blk_vq *vqs; member
183 spin_lock_irqsave(&vblk->vqs[qid].lock, flags); in virtblk_done()
186 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { in virtblk_done()
200 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); in virtblk_done()
206 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num]; in virtio_commit_rqs()
277 spin_lock_irqsave(&vblk->vqs[qid].lock, flags); in virtio_queue_rq()
278 err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num); in virtio_queue_rq()
280 virtqueue_kick(vblk->vqs[qid].vq); in virtio_queue_rq()
286 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); in virtio_queue_rq()
297 if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq)) in virtio_queue_rq()
[all …]
/kernel/linux/linux-5.10/drivers/vdpa/vdpa_sim/
Dvdpa_sim.c83 struct vdpasim_virtqueue *vqs; member
132 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_queue_ready()
160 vdpasim_vq_reset(&vdpasim->vqs[i]); in vdpasim_reset()
175 struct vdpasim_virtqueue *txq = &vdpasim->vqs[1]; in vdpasim_work()
176 struct vdpasim_virtqueue *rxq = &vdpasim->vqs[0]; in vdpasim_work()
392 vdpasim->vqs = kcalloc(dev_attr->nvqs, sizeof(struct vdpasim_virtqueue), in vdpasim_create()
394 if (!vdpasim->vqs) in vdpasim_create()
416 vringh_set_iotlb(&vdpasim->vqs[i].vring, vdpasim->iommu); in vdpasim_create()
436 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_set_vq_address()
448 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_set_vq_num()
[all …]
/kernel/linux/linux-5.10/net/vmw_vsock/
Dvirtio_transport.c30 struct virtqueue *vqs[VSOCK_VQ_MAX]; member
99 vq = vsock->vqs[VSOCK_VQ_TX]; in virtio_transport_send_pkt_work()
141 struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; in virtio_transport_send_pkt_work()
229 struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; in virtio_transport_cancel_pkt()
253 vq = vsock->vqs[VSOCK_VQ_RX]; in virtio_vsock_rx_fill()
293 vq = vsock->vqs[VSOCK_VQ_TX]; in virtio_transport_tx_work()
320 struct virtqueue *vq = vsock->vqs[VSOCK_VQ_RX]; in virtio_transport_more_replies()
336 vq = vsock->vqs[VSOCK_VQ_EVENT]; in virtio_vsock_event_fill_one()
354 virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]); in virtio_vsock_event_fill()
397 vq = vsock->vqs[VSOCK_VQ_EVENT]; in virtio_transport_event_work()
[all …]
/kernel/linux/linux-5.10/fs/fuse/
Dvirtio_fs.c54 struct virtio_fs_vq *vqs; member
127 return &fs->vqs[vq->index]; in vq_to_fsvq()
154 kfree(vfs->vqs); in release_virtio_fs_obj()
200 fsvq = &fs->vqs[i]; in virtio_fs_drain_all_queues_locked()
224 fsvq = &fs->vqs[i]; in virtio_fs_start_all_queues()
281 struct virtio_fs_vq *fsvq = &fs->vqs[i]; in virtio_fs_free_devs()
672 struct virtqueue **vqs; in virtio_fs_setup_vqs() local
684 fs->vqs = kcalloc(fs->nvqs, sizeof(fs->vqs[VQ_HIPRIO]), GFP_KERNEL); in virtio_fs_setup_vqs()
685 if (!fs->vqs) in virtio_fs_setup_vqs()
688 vqs = kmalloc_array(fs->nvqs, sizeof(vqs[VQ_HIPRIO]), GFP_KERNEL); in virtio_fs_setup_vqs()
[all …]
/kernel/linux/linux-5.10/tools/testing/selftests/arm64/fp/
Dsve-probe-vls.c21 static unsigned int vqs[SVE_VQ_MAX]; in main() local
48 vqs[nvqs++] = vq; in main()
55 ksft_print_msg("%u\n", 16 * vqs[nvqs]); in main()
/kernel/linux/linux-5.10/drivers/crypto/virtio/
Dvirtio_crypto_core.c51 struct virtqueue **vqs; in virtcrypto_find_vqs() local
65 vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL); in virtcrypto_find_vqs()
66 if (!vqs) in virtcrypto_find_vqs()
87 ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, callbacks, names, NULL); in virtcrypto_find_vqs()
91 vi->ctrl_vq = vqs[total_vqs - 1]; in virtcrypto_find_vqs()
95 vi->data_vq[i].vq = vqs[i]; in virtcrypto_find_vqs()
106 kfree(vqs); in virtcrypto_find_vqs()
116 kfree(vqs); in virtcrypto_find_vqs()
/kernel/linux/linux-5.10/arch/arm64/kvm/
Dguest.c266 #define vq_present(vqs, vq) (!!((vqs)[vq_word(vq)] & vq_mask(vq))) argument
271 u64 vqs[KVM_ARM64_SVE_VLS_WORDS]; in get_sve_vls() local
279 memset(vqs, 0, sizeof(vqs)); in get_sve_vls()
284 vqs[vq_word(vq)] |= vq_mask(vq); in get_sve_vls()
286 if (copy_to_user((void __user *)reg->addr, vqs, sizeof(vqs))) in get_sve_vls()
295 u64 vqs[KVM_ARM64_SVE_VLS_WORDS]; in set_sve_vls() local
306 if (copy_from_user(vqs, (const void __user *)reg->addr, sizeof(vqs))) in set_sve_vls()
311 if (vq_present(vqs, vq)) in set_sve_vls()
325 if (vq_present(vqs, vq) != sve_vq_available(vq)) in set_sve_vls()
/kernel/linux/linux-5.10/drivers/remoteproc/
Dremoteproc_virtio.c131 list_for_each_entry_safe(vq, n, &vdev->vqs, list) { in __rproc_virtio_del_vqs()
144 struct virtqueue *vqs[], in rproc_virtio_find_vqs() argument
154 vqs[i] = NULL; in rproc_virtio_find_vqs()
158 vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i], in rproc_virtio_find_vqs()
160 if (IS_ERR(vqs[i])) { in rproc_virtio_find_vqs()
161 ret = PTR_ERR(vqs[i]); in rproc_virtio_find_vqs()
/kernel/linux/linux-5.10/drivers/net/ethernet/intel/iavf/
Diavf_virtchnl.c302 struct virtchnl_queue_select vqs; in iavf_enable_queues() local
311 vqs.vsi_id = adapter->vsi_res->vsi_id; in iavf_enable_queues()
312 vqs.tx_queues = BIT(adapter->num_active_queues) - 1; in iavf_enable_queues()
313 vqs.rx_queues = vqs.tx_queues; in iavf_enable_queues()
316 (u8 *)&vqs, sizeof(vqs)); in iavf_enable_queues()
327 struct virtchnl_queue_select vqs; in iavf_disable_queues() local
336 vqs.vsi_id = adapter->vsi_res->vsi_id; in iavf_disable_queues()
337 vqs.tx_queues = BIT(adapter->num_active_queues) - 1; in iavf_disable_queues()
338 vqs.rx_queues = vqs.tx_queues; in iavf_disable_queues()
341 (u8 *)&vqs, sizeof(vqs)); in iavf_disable_queues()
[all …]
/kernel/linux/linux-5.10/drivers/vdpa/mlx5/net/
Dmlx5_vnet.c132 struct mlx5_vdpa_virtqueue vqs[MLX5_MAX_SUPPORTED_VQS]; member
509 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in cq_create()
586 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in cq_destroy()
1158 suspend_vq(ndev, &ndev->vqs[i]); in suspend_vqs()
1201 if (!ndev->vqs[j].initialized) in create_rqt()
1204 if (!vq_is_tx(ndev->vqs[j].index)) { in create_rqt()
1205 list[i] = cpu_to_be32(ndev->vqs[j].virtq_id); in create_rqt()
1335 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in mlx5_vdpa_kick_vq()
1348 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_address()
1362 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_num()
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/virtio/
Dvirtgpu_kms.c106 struct virtqueue *vqs[2]; in virtio_gpu_init() local
160 ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL); in virtio_gpu_init()
165 vgdev->ctrlq.vq = vqs[0]; in virtio_gpu_init()
166 vgdev->cursorq.vq = vqs[1]; in virtio_gpu_init()
/kernel/linux/linux-5.10/include/linux/
Dvirtio_config.h87 struct virtqueue *vqs[], vq_callback_t *callbacks[],
202 struct virtqueue *vqs[], vq_callback_t *callbacks[], in virtio_find_vqs() argument
206 return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL, desc); in virtio_find_vqs()
211 struct virtqueue *vqs[], vq_callback_t *callbacks[], in virtio_find_vqs_ctx() argument
215 return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, ctx, in virtio_find_vqs_ctx()
Dvirtio.h118 struct list_head vqs; member
147 list_for_each_entry(vq, &vdev->vqs, list)
/kernel/linux/linux-5.10/tools/virtio/
Dvirtio_test.c42 struct vq_info vqs[1]; member
113 struct vq_info *info = &dev->vqs[dev->nvqs]; in vq_info_add()
132 INIT_LIST_HEAD(&dev->vdev.vqs); in vdev_info_init()
400 run_test(&dev, &dev.vqs[0], delayed, batch, reset, 0x100000); in main()
/kernel/linux/linux-5.10/net/sched/
Dsch_gred.c545 static void gred_vqs_apply(struct gred_sched *table, struct nlattr *vqs) in gred_vqs_apply() argument
550 nla_for_each_nested(attr, vqs, rem) { in gred_vqs_apply()
604 struct nlattr *vqs, struct netlink_ext_ack *extack) in gred_vqs_validate() argument
609 err = nla_validate_nested_deprecated(vqs, TCA_GRED_VQ_ENTRY_MAX, in gred_vqs_validate()
614 nla_for_each_nested(attr, vqs, rem) { in gred_vqs_validate()
760 struct nlattr *parms, *vqs, *opts = NULL; in gred_dump() local
842 vqs = nla_nest_start_noflag(skb, TCA_GRED_VQ_LIST); in gred_dump()
843 if (!vqs) in gred_dump()
891 nla_nest_end(skb, vqs); in gred_dump()

12