/drivers/virtio/ |
D | virtio_ring.c | 22 dev_err(&(_vq)->vq.vdev->dev, \ 23 "%s:"fmt, (_vq)->vq.name, ##args); \ 31 (_vq)->vq.name, (_vq)->in_use); \ 59 dev_err(&_vq->vq.vdev->dev, \ 60 "%s:"fmt, (_vq)->vq.name, ##args); \ 63 #define START_USE(vq) argument 64 #define END_USE(vq) argument 65 #define LAST_ADD_TIME_UPDATE(vq) argument 66 #define LAST_ADD_TIME_CHECK(vq) argument 67 #define LAST_ADD_TIME_INVALID(vq) argument [all …]
|
D | virtio_pci_modern.c | 182 static int vp_active_vq(struct virtqueue *vq, u16 msix_vec) in vp_active_vq() argument 184 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); in vp_active_vq() 188 index = vq->index; in vp_active_vq() 191 vp_modern_set_queue_size(mdev, index, virtqueue_get_vring_size(vq)); in vp_active_vq() 192 vp_modern_queue_address(mdev, index, virtqueue_get_desc_addr(vq), in vp_active_vq() 193 virtqueue_get_avail_addr(vq), in vp_active_vq() 194 virtqueue_get_used_addr(vq)); in vp_active_vq() 205 static int vp_modern_disable_vq_and_reset(struct virtqueue *vq) in vp_modern_disable_vq_and_reset() argument 207 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); in vp_modern_disable_vq_and_reset() 212 if (!virtio_has_feature(vq->vdev, VIRTIO_F_RING_RESET)) in vp_modern_disable_vq_and_reset() [all …]
|
D | virtio_vdpa.c | 40 struct virtqueue *vq; member 106 static bool virtio_vdpa_notify(struct virtqueue *vq) in virtio_vdpa_notify() argument 108 struct vdpa_device *vdpa = vd_get_vdpa(vq->vdev); in virtio_vdpa_notify() 111 ops->kick_vq(vdpa, vq->index); in virtio_vdpa_notify() 116 static bool virtio_vdpa_notify_with_data(struct virtqueue *vq) in virtio_vdpa_notify_with_data() argument 118 struct vdpa_device *vdpa = vd_get_vdpa(vq->vdev); in virtio_vdpa_notify_with_data() 120 u32 data = vring_notification_data(vq); in virtio_vdpa_notify_with_data() 140 return vring_interrupt(0, info->vq); in virtio_vdpa_virtqueue_cb() 145 void (*callback)(struct virtqueue *vq), in virtio_vdpa_setup_vq() argument 153 bool (*notify)(struct virtqueue *vq) = virtio_vdpa_notify; in virtio_vdpa_setup_vq() [all …]
|
D | virtio_pci_legacy.c | 113 void (*callback)(struct virtqueue *vq), in setup_vq() argument 118 struct virtqueue *vq; in setup_vq() local 131 vq = vring_create_virtqueue(index, num, in setup_vq() 135 if (!vq) in setup_vq() 138 vq->num_max = num; in setup_vq() 140 q_pfn = virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT; in setup_vq() 152 vq->priv = (void __force *)vp_dev->ldev.ioaddr + VIRTIO_PCI_QUEUE_NOTIFY; in setup_vq() 162 return vq; in setup_vq() 167 vring_del_virtqueue(vq); in setup_vq() 173 struct virtqueue *vq = info->vq; in del_vq() local [all …]
|
D | virtio_mmio.c | 99 struct virtqueue *vq; member 279 static bool vm_notify(struct virtqueue *vq) in vm_notify() argument 281 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); in vm_notify() 285 writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); in vm_notify() 289 static bool vm_notify_with_data(struct virtqueue *vq) in vm_notify_with_data() argument 291 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); in vm_notify_with_data() 292 u32 data = vring_notification_data(vq); in vm_notify_with_data() 320 ret |= vring_interrupt(irq, info->vq); in vm_interrupt() 329 static void vm_del_vq(struct virtqueue *vq) in vm_del_vq() argument 331 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); in vm_del_vq() [all …]
|
D | virtio_pci_common.c | 41 bool vp_notify(struct virtqueue *vq) in vp_notify() argument 45 iowrite16(vq->index, (void __iomem *)vq->priv); in vp_notify() 68 if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) in vp_vring_interrupt() 175 void (*callback)(struct virtqueue *vq), in vp_setup_vq() argument 182 struct virtqueue *vq; in vp_setup_vq() local 189 vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, ctx, in vp_setup_vq() 191 if (IS_ERR(vq)) in vp_setup_vq() 194 info->vq = vq; in vp_setup_vq() 204 return vq; in vp_setup_vq() 208 return vq; in vp_setup_vq() [all …]
|
D | virtio_balloon.c | 138 static void balloon_ack(struct virtqueue *vq) in balloon_ack() argument 140 struct virtio_balloon *vb = vq->vdev->priv; in balloon_ack() 145 static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq) in tell_host() argument 155 virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL); in tell_host() 156 virtqueue_kick(vq); in tell_host() 159 wait_event(vb->acked, virtqueue_get_buf(vq, &len)); in tell_host() 168 struct virtqueue *vq = vb->reporting_vq; in virtballoon_free_page_report() local 172 err = virtqueue_add_inbuf(vq, sg, nents, vb, GFP_NOWAIT | __GFP_NOWARN); in virtballoon_free_page_report() 184 virtqueue_kick(vq); in virtballoon_free_page_report() 187 wait_event(vb->acked, virtqueue_get_buf(vq, &unused)); in virtballoon_free_page_report() [all …]
|
/drivers/vhost/ |
D | vhost.c | 49 #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num]) argument 50 #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num]) argument 53 static void vhost_disable_cross_endian(struct vhost_virtqueue *vq) in vhost_disable_cross_endian() argument 55 vq->user_be = !virtio_legacy_is_little_endian(); in vhost_disable_cross_endian() 58 static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq) in vhost_enable_cross_endian_big() argument 60 vq->user_be = true; in vhost_enable_cross_endian_big() 63 static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq) in vhost_enable_cross_endian_little() argument 65 vq->user_be = false; in vhost_enable_cross_endian_little() 68 static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp) in vhost_set_vring_endian() argument 72 if (vq->private_data) in vhost_set_vring_endian() [all …]
|
D | net.c | 98 struct vhost_virtqueue *vq; member 109 struct vhost_virtqueue vq; member 229 static void vhost_net_enable_zcopy(int vq) in vhost_net_enable_zcopy() argument 231 vhost_net_zcopy_mask |= 0x1 << vq; in vhost_net_enable_zcopy() 235 vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy) in vhost_net_ubuf_alloc() argument 246 ubufs->vq = vq; in vhost_net_ubuf_alloc() 360 struct vhost_virtqueue *vq) in vhost_zerocopy_signal_used() argument 363 container_of(vq, struct vhost_net_virtqueue, vq); in vhost_zerocopy_signal_used() 368 if (vq->heads[i].len == VHOST_DMA_FAILED_LEN) in vhost_zerocopy_signal_used() 370 if (VHOST_DMA_IS_DONE(vq->heads[i].len)) { in vhost_zerocopy_signal_used() [all …]
|
D | test.c | 45 struct vhost_virtqueue *vq = &n->vqs[VHOST_TEST_VQ]; in handle_vq() local 51 mutex_lock(&vq->mutex); in handle_vq() 52 private = vhost_vq_get_backend(vq); in handle_vq() 54 mutex_unlock(&vq->mutex); in handle_vq() 58 vhost_disable_notify(&n->dev, vq); in handle_vq() 61 head = vhost_get_vq_desc(vq, vq->iov, in handle_vq() 62 ARRAY_SIZE(vq->iov), in handle_vq() 69 if (head == vq->num) { in handle_vq() 70 if (unlikely(vhost_enable_notify(&n->dev, vq))) { in handle_vq() 71 vhost_disable_notify(&n->dev, vq); in handle_vq() [all …]
|
D | vhost.h | 48 struct vhost_virtqueue *vq; member 53 struct vhost_virtqueue *vq); 156 struct vhost_virtqueue *vq; member 181 bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len); 198 bool vhost_vq_access_ok(struct vhost_virtqueue *vq); 208 void vhost_vq_flush(struct vhost_virtqueue *vq); 209 bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work); 210 bool vhost_vq_has_work(struct vhost_virtqueue *vq); 211 bool vhost_vq_is_setup(struct vhost_virtqueue *vq); 225 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, [all …]
|
D | vsock.c | 91 struct vhost_virtqueue *vq) in vhost_transport_do_send_pkt() argument 98 mutex_lock(&vq->mutex); in vhost_transport_do_send_pkt() 100 if (!vhost_vq_get_backend(vq)) in vhost_transport_do_send_pkt() 103 if (!vq_meta_prefetch(vq)) in vhost_transport_do_send_pkt() 107 vhost_disable_notify(&vsock->dev, vq); in vhost_transport_do_send_pkt() 122 vhost_enable_notify(&vsock->dev, vq); in vhost_transport_do_send_pkt() 126 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), in vhost_transport_do_send_pkt() 133 if (head == vq->num) { in vhost_transport_do_send_pkt() 138 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) { in vhost_transport_do_send_pkt() 139 vhost_disable_notify(&vsock->dev, vq); in vhost_transport_do_send_pkt() [all …]
|
D | scsi.c | 174 struct vhost_virtqueue vq; member 256 struct vhost_virtqueue *vq; in vhost_scsi_init_inflight() local 260 vq = &vs->vqs[i].vq; in vhost_scsi_init_inflight() 262 mutex_lock(&vq->mutex); in vhost_scsi_init_inflight() 275 mutex_unlock(&vq->mutex); in vhost_scsi_init_inflight() 280 vhost_scsi_get_inflight(struct vhost_virtqueue *vq) in vhost_scsi_get_inflight() argument 285 svq = container_of(vq, struct vhost_scsi_virtqueue, vq); in vhost_scsi_get_inflight() 331 struct vhost_scsi_virtqueue, vq); in vhost_scsi_release_cmd_res() 366 struct vhost_virtqueue *vq = &tmf->svq->vq; in vhost_scsi_release_cmd() local 368 vhost_vq_work_queue(vq, &tmf->vwork); in vhost_scsi_release_cmd() [all …]
|
/drivers/vdpa/vdpa_sim/ |
D | vdpa_sim_blk.c | 110 struct vdpasim_virtqueue *vq) in vdpasim_blk_handle_req() argument 123 ret = vringh_getdesc_iotlb(&vq->vring, &vq->out_iov, &vq->in_iov, in vdpasim_blk_handle_req() 124 &vq->head, GFP_ATOMIC); in vdpasim_blk_handle_req() 128 if (vq->out_iov.used < 1 || vq->in_iov.used < 1) { in vdpasim_blk_handle_req() 130 vq->out_iov.used, vq->in_iov.used); in vdpasim_blk_handle_req() 134 if (vq->in_iov.iov[vq->in_iov.used - 1].iov_len < 1) { in vdpasim_blk_handle_req() 142 to_push = vringh_kiov_length(&vq->in_iov) - 1; in vdpasim_blk_handle_req() 144 to_pull = vringh_kiov_length(&vq->out_iov); in vdpasim_blk_handle_req() 146 bytes = vringh_iov_pull_iotlb(&vq->vring, &vq->out_iov, &hdr, in vdpasim_blk_handle_req() 179 bytes = vringh_iov_push_iotlb(&vq->vring, &vq->in_iov, in vdpasim_blk_handle_req() [all …]
|
D | vdpa_sim.c | 84 struct vdpasim_virtqueue *vq = in vdpasim_vq_notify() local 87 if (!vq->cb) in vdpasim_vq_notify() 90 vq->cb(vq->private); in vdpasim_vq_notify() 95 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_queue_ready() local 96 uint16_t last_avail_idx = vq->vring.last_avail_idx; in vdpasim_queue_ready() 98 (uintptr_t)vq->desc_addr; in vdpasim_queue_ready() 100 (uintptr_t)vq->driver_addr; in vdpasim_queue_ready() 102 (uintptr_t)vq->device_addr; in vdpasim_queue_ready() 105 vringh_init_iotlb_va(&vq->vring, vdpasim->features, vq->num, in vdpasim_queue_ready() 108 vringh_init_iotlb(&vq->vring, vdpasim->features, vq->num, in vdpasim_queue_ready() [all …]
|
/drivers/gpu/drm/virtio/ |
D | virtgpu_trace.h | 12 TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr, u32 seqno), 13 TP_ARGS(vq, hdr, seqno), 16 __field(unsigned int, vq) 17 __string(name, vq->name) 26 __entry->dev = vq->vdev->index; 27 __entry->vq = vq->index; 28 __assign_str(name, vq->name); 33 __entry->num_free = vq->num_free; 37 __entry->dev, __entry->vq, __get_str(name), 43 TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr, u32 seqno), [all …]
|
/drivers/vdpa/vdpa_user/ |
D | vduse_dev.c | 253 struct vduse_virtqueue *vq, in vduse_dev_get_vq_state_packed() argument 260 msg.req.vq_state.index = vq->index; in vduse_dev_get_vq_state_packed() 279 struct vduse_virtqueue *vq, in vduse_dev_get_vq_state_split() argument 286 msg.req.vq_state.index = vq->index; in vduse_dev_get_vq_state_split() 450 struct vduse_virtqueue *vq = dev->vqs[i]; in vduse_dev_reset() local 452 vq->ready = false; in vduse_dev_reset() 453 vq->desc_addr = 0; in vduse_dev_reset() 454 vq->driver_addr = 0; in vduse_dev_reset() 455 vq->device_addr = 0; in vduse_dev_reset() 456 vq->num = 0; in vduse_dev_reset() [all …]
|
/drivers/staging/media/sunxi/cedrus/ |
D | cedrus_video.c | 292 struct vb2_queue *vq; in cedrus_s_fmt_vid_cap() local 295 vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type); in cedrus_s_fmt_vid_cap() 296 if (vb2_is_busy(vq)) in cedrus_s_fmt_vid_cap() 317 struct vb2_queue *vq; in cedrus_s_fmt_vid_out_p() local 326 vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); in cedrus_s_fmt_vid_out_p() 331 vq->subsystem_flags |= in cedrus_s_fmt_vid_out_p() 335 vq->subsystem_flags &= in cedrus_s_fmt_vid_out_p() 375 struct vb2_queue *vq; in cedrus_s_fmt_vid_out() local 378 vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type); in cedrus_s_fmt_vid_out() 384 if (vb2_is_streaming(vq) || (vb2_is_busy(vq) && in cedrus_s_fmt_vid_out() [all …]
|
/drivers/scsi/ |
D | virtio_scsi.c | 68 struct virtqueue *vq; member 177 struct virtqueue *vq = virtscsi_vq->vq; in virtscsi_vq_done() local 181 virtqueue_disable_cb(vq); in virtscsi_vq_done() 182 while ((buf = virtqueue_get_buf(vq, &len)) != NULL) in virtscsi_vq_done() 185 if (unlikely(virtqueue_is_broken(vq))) in virtscsi_vq_done() 187 } while (!virtqueue_enable_cb(vq)); in virtscsi_vq_done() 191 static void virtscsi_req_done(struct virtqueue *vq) in virtscsi_req_done() argument 193 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); in virtscsi_req_done() 195 int index = vq->index - VIRTIO_SCSI_VQ_BASE; in virtscsi_req_done() 219 static void virtscsi_ctrl_done(struct virtqueue *vq) in virtscsi_ctrl_done() argument [all …]
|
/drivers/i2c/busses/ |
D | i2c-virtio.c | 31 struct virtqueue *vq; member 48 static void virtio_i2c_msg_done(struct virtqueue *vq) in virtio_i2c_msg_done() argument 53 while ((req = virtqueue_get_buf(vq, &len))) in virtio_i2c_msg_done() 57 static int virtio_i2c_prepare_reqs(struct virtqueue *vq, in virtio_i2c_prepare_reqs() argument 100 if (virtqueue_add_sgs(vq, sgs, outcnt, incnt, &reqs[i], GFP_KERNEL)) { in virtio_i2c_prepare_reqs() 109 static int virtio_i2c_complete_reqs(struct virtqueue *vq, in virtio_i2c_complete_reqs() argument 137 struct virtqueue *vq = vi->vq; in virtio_i2c_xfer() local 145 count = virtio_i2c_prepare_reqs(vq, reqs, msgs, num); in virtio_i2c_xfer() 157 virtqueue_kick(vq); in virtio_i2c_xfer() 159 count = virtio_i2c_complete_reqs(vq, reqs, msgs, count); in virtio_i2c_xfer() [all …]
|
/drivers/vdpa/pds/ |
D | debugfs.c | 238 struct pds_vdpa_vq_info *vq = seq->private; in vq_show() local 240 seq_printf(seq, "ready: %d\n", vq->ready); in vq_show() 241 seq_printf(seq, "desc_addr: %#llx\n", vq->desc_addr); in vq_show() 242 seq_printf(seq, "avail_addr: %#llx\n", vq->avail_addr); in vq_show() 243 seq_printf(seq, "used_addr: %#llx\n", vq->used_addr); in vq_show() 244 seq_printf(seq, "q_len: %d\n", vq->q_len); in vq_show() 245 seq_printf(seq, "qid: %d\n", vq->qid); in vq_show() 247 seq_printf(seq, "doorbell: %#llx\n", vq->doorbell); in vq_show() 248 seq_printf(seq, "avail_idx: %d\n", vq->avail_idx); in vq_show() 249 seq_printf(seq, "used_idx: %d\n", vq->used_idx); in vq_show() [all …]
|
/drivers/crypto/virtio/ |
D | virtio_crypto_core.c | 30 static void virtcrypto_ctrlq_callback(struct virtqueue *vq) in virtcrypto_ctrlq_callback() argument 32 struct virtio_crypto *vcrypto = vq->vdev->priv; in virtcrypto_ctrlq_callback() 39 virtqueue_disable_cb(vq); in virtcrypto_ctrlq_callback() 40 while ((vc_ctrl_req = virtqueue_get_buf(vq, &len)) != NULL) { in virtcrypto_ctrlq_callback() 45 if (unlikely(virtqueue_is_broken(vq))) in virtcrypto_ctrlq_callback() 47 } while (!virtqueue_enable_cb(vq)); in virtcrypto_ctrlq_callback() 78 struct virtqueue *vq = data_vq->vq; in virtcrypto_done_task() local 83 virtqueue_disable_cb(vq); in virtcrypto_done_task() 84 while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) { in virtcrypto_done_task() 88 } while (!virtqueue_enable_cb(vq)); in virtcrypto_done_task() [all …]
|
/drivers/media/v4l2-core/ |
D | v4l2-mem2mem.c | 575 struct vb2_queue *vq; in v4l2_m2m_reqbufs() local 578 vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type); in v4l2_m2m_reqbufs() 579 ret = vb2_reqbufs(vq, reqbufs); in v4l2_m2m_reqbufs() 583 vq->owner = reqbufs->count ? file->private_data : NULL; in v4l2_m2m_reqbufs() 589 static void v4l2_m2m_adjust_mem_offset(struct vb2_queue *vq, in v4l2_m2m_adjust_mem_offset() argument 593 if (buf->memory == V4L2_MEMORY_MMAP && V4L2_TYPE_IS_CAPTURE(vq->type)) { in v4l2_m2m_adjust_mem_offset() 594 if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) { in v4l2_m2m_adjust_mem_offset() 609 struct vb2_queue *vq; in v4l2_m2m_querybuf() local 612 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); in v4l2_m2m_querybuf() 613 ret = vb2_querybuf(vq, buf); in v4l2_m2m_querybuf() [all …]
|
/drivers/staging/media/imx/ |
D | imx-media-capture.c | 600 static int capture_queue_setup(struct vb2_queue *vq, in capture_queue_setup() argument 606 struct capture_priv *priv = vb2_get_drv_priv(vq); in capture_queue_setup() 610 if (vq->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) in capture_queue_setup() 616 count += vq->num_buffers; in capture_queue_setup() 622 *nbuffers = (count < vq->num_buffers) ? 0 : in capture_queue_setup() 623 count - vq->num_buffers; in capture_queue_setup() 644 struct vb2_queue *vq = vb->vb2_queue; in capture_buf_prepare() local 645 struct capture_priv *priv = vb2_get_drv_priv(vq); in capture_buf_prepare() 709 static int capture_start_streaming(struct vb2_queue *vq, unsigned int count) in capture_start_streaming() argument 711 struct capture_priv *priv = vb2_get_drv_priv(vq); in capture_start_streaming() [all …]
|
/drivers/s390/virtio/ |
D | virtio_ccw.c | 125 struct virtqueue *vq; member 193 static void drop_airq_indicator(struct virtqueue *vq, struct airq_info *info) in drop_airq_indicator() argument 199 if (vq == (void *)airq_iv_get_ptr(info->aiv, i)) { in drop_airq_indicator() 305 drop_airq_indicator(info->vq, vcdev->airq_info); in virtio_ccw_drop_indicators() 393 static inline bool virtio_ccw_do_kvm_notify(struct virtqueue *vq, u32 data) in virtio_ccw_do_kvm_notify() argument 395 struct virtio_ccw_vq_info *info = vq->priv; in virtio_ccw_do_kvm_notify() 399 vcdev = to_vc_device(info->vq->vdev); in virtio_ccw_do_kvm_notify() 410 static bool virtio_ccw_kvm_notify(struct virtqueue *vq) in virtio_ccw_kvm_notify() argument 412 return virtio_ccw_do_kvm_notify(vq, vq->index); in virtio_ccw_kvm_notify() 415 static bool virtio_ccw_kvm_notify_with_data(struct virtqueue *vq) in virtio_ccw_kvm_notify_with_data() argument [all …]
|