• Home
  • Raw
  • Download

Lines Matching refs:vq

21 		dev_err(&(_vq)->vq.vdev->dev,			\
22 "%s:"fmt, (_vq)->vq.name, ##args); \
30 (_vq)->vq.name, (_vq)->in_use); \
58 dev_err(&_vq->vq.vdev->dev, \
59 "%s:"fmt, (_vq)->vq.name, ##args); \
62 #define START_USE(vq) argument
63 #define END_USE(vq) argument
64 #define LAST_ADD_TIME_UPDATE(vq) argument
65 #define LAST_ADD_TIME_CHECK(vq) argument
66 #define LAST_ADD_TIME_INVALID(vq) argument
89 struct virtqueue vq; member
186 bool (*notify)(struct virtqueue *vq);
206 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
211 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_use_indirect() local
217 return (vq->indirect && total_sg > 1 && vq->vq.num_free); in virtqueue_use_indirect()
324 static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq) in vring_dma_dev() argument
326 return vq->vq.vdev->dev.parent; in vring_dma_dev()
330 static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq, in vring_map_one_sg() argument
334 if (!vq->use_dma_api) in vring_map_one_sg()
342 return dma_map_page(vring_dma_dev(vq), in vring_map_one_sg()
347 static dma_addr_t vring_map_single(const struct vring_virtqueue *vq, in vring_map_single() argument
351 if (!vq->use_dma_api) in vring_map_single()
354 return dma_map_single(vring_dma_dev(vq), in vring_map_single()
358 static int vring_mapping_error(const struct vring_virtqueue *vq, in vring_mapping_error() argument
361 if (!vq->use_dma_api) in vring_mapping_error()
364 return dma_mapping_error(vring_dma_dev(vq), addr); in vring_mapping_error()
372 static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq, in vring_unmap_one_split_indirect() argument
377 if (!vq->use_dma_api) in vring_unmap_one_split_indirect()
380 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags); in vring_unmap_one_split_indirect()
383 dma_unmap_single(vring_dma_dev(vq), in vring_unmap_one_split_indirect()
384 virtio64_to_cpu(vq->vq.vdev, desc->addr), in vring_unmap_one_split_indirect()
385 virtio32_to_cpu(vq->vq.vdev, desc->len), in vring_unmap_one_split_indirect()
389 dma_unmap_page(vring_dma_dev(vq), in vring_unmap_one_split_indirect()
390 virtio64_to_cpu(vq->vq.vdev, desc->addr), in vring_unmap_one_split_indirect()
391 virtio32_to_cpu(vq->vq.vdev, desc->len), in vring_unmap_one_split_indirect()
397 static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq, in vring_unmap_one_split() argument
400 struct vring_desc_extra *extra = vq->split.desc_extra; in vring_unmap_one_split()
403 if (!vq->use_dma_api) in vring_unmap_one_split()
409 dma_unmap_single(vring_dma_dev(vq), in vring_unmap_one_split()
415 dma_unmap_page(vring_dma_dev(vq), in vring_unmap_one_split()
449 static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq, in virtqueue_add_desc_split() argument
457 struct vring_virtqueue *vring = to_vvq(vq); in virtqueue_add_desc_split()
461 desc[i].flags = cpu_to_virtio16(vq->vdev, flags); in virtqueue_add_desc_split()
462 desc[i].addr = cpu_to_virtio64(vq->vdev, addr); in virtqueue_add_desc_split()
463 desc[i].len = cpu_to_virtio32(vq->vdev, len); in virtqueue_add_desc_split()
467 desc[i].next = cpu_to_virtio16(vq->vdev, next); in virtqueue_add_desc_split()
473 next = virtio16_to_cpu(vq->vdev, desc[i].next); in virtqueue_add_desc_split()
487 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_add_split() local
494 START_USE(vq); in virtqueue_add_split()
497 BUG_ON(ctx && vq->indirect); in virtqueue_add_split()
499 if (unlikely(vq->broken)) { in virtqueue_add_split()
500 END_USE(vq); in virtqueue_add_split()
504 LAST_ADD_TIME_UPDATE(vq); in virtqueue_add_split()
508 head = vq->free_head; in virtqueue_add_split()
514 WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect); in virtqueue_add_split()
525 desc = vq->split.vring.desc; in virtqueue_add_split()
530 if (vq->vq.num_free < descs_used) { in virtqueue_add_split()
532 descs_used, vq->vq.num_free); in virtqueue_add_split()
537 vq->notify(&vq->vq); in virtqueue_add_split()
540 END_USE(vq); in virtqueue_add_split()
546 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE); in virtqueue_add_split()
547 if (vring_mapping_error(vq, addr)) in virtqueue_add_split()
561 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE); in virtqueue_add_split()
562 if (vring_mapping_error(vq, addr)) in virtqueue_add_split()
578 if (!indirect && vq->use_dma_api) in virtqueue_add_split()
579 vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &= in virtqueue_add_split()
585 vq, desc, total_sg * sizeof(struct vring_desc), in virtqueue_add_split()
587 if (vring_mapping_error(vq, addr)) in virtqueue_add_split()
590 virtqueue_add_desc_split(_vq, vq->split.vring.desc, in virtqueue_add_split()
598 vq->vq.num_free -= descs_used; in virtqueue_add_split()
602 vq->free_head = vq->split.desc_extra[head].next; in virtqueue_add_split()
604 vq->free_head = i; in virtqueue_add_split()
607 vq->split.desc_state[head].data = data; in virtqueue_add_split()
609 vq->split.desc_state[head].indir_desc = desc; in virtqueue_add_split()
611 vq->split.desc_state[head].indir_desc = ctx; in virtqueue_add_split()
615 avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1); in virtqueue_add_split()
616 vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); in virtqueue_add_split()
620 virtio_wmb(vq->weak_barriers); in virtqueue_add_split()
621 vq->split.avail_idx_shadow++; in virtqueue_add_split()
622 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, in virtqueue_add_split()
623 vq->split.avail_idx_shadow); in virtqueue_add_split()
624 vq->num_added++; in virtqueue_add_split()
626 pr_debug("Added buffer head %i to %p\n", head, vq); in virtqueue_add_split()
627 END_USE(vq); in virtqueue_add_split()
631 if (unlikely(vq->num_added == (1 << 16) - 1)) in virtqueue_add_split()
648 vring_unmap_one_split_indirect(vq, &desc[i]); in virtqueue_add_split()
651 i = vring_unmap_one_split(vq, i); in virtqueue_add_split()
657 END_USE(vq); in virtqueue_add_split()
663 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_kick_prepare_split() local
667 START_USE(vq); in virtqueue_kick_prepare_split()
670 virtio_mb(vq->weak_barriers); in virtqueue_kick_prepare_split()
672 old = vq->split.avail_idx_shadow - vq->num_added; in virtqueue_kick_prepare_split()
673 new = vq->split.avail_idx_shadow; in virtqueue_kick_prepare_split()
674 vq->num_added = 0; in virtqueue_kick_prepare_split()
676 LAST_ADD_TIME_CHECK(vq); in virtqueue_kick_prepare_split()
677 LAST_ADD_TIME_INVALID(vq); in virtqueue_kick_prepare_split()
679 if (vq->event) { in virtqueue_kick_prepare_split()
681 vring_avail_event(&vq->split.vring)), in virtqueue_kick_prepare_split()
684 needs_kick = !(vq->split.vring.used->flags & in virtqueue_kick_prepare_split()
688 END_USE(vq); in virtqueue_kick_prepare_split()
692 static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head, in detach_buf_split() argument
696 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT); in detach_buf_split()
699 vq->split.desc_state[head].data = NULL; in detach_buf_split()
704 while (vq->split.vring.desc[i].flags & nextflag) { in detach_buf_split()
705 vring_unmap_one_split(vq, i); in detach_buf_split()
706 i = vq->split.desc_extra[i].next; in detach_buf_split()
707 vq->vq.num_free++; in detach_buf_split()
710 vring_unmap_one_split(vq, i); in detach_buf_split()
711 vq->split.desc_extra[i].next = vq->free_head; in detach_buf_split()
712 vq->free_head = head; in detach_buf_split()
715 vq->vq.num_free++; in detach_buf_split()
717 if (vq->indirect) { in detach_buf_split()
719 vq->split.desc_state[head].indir_desc; in detach_buf_split()
726 len = vq->split.desc_extra[head].len; in detach_buf_split()
728 BUG_ON(!(vq->split.desc_extra[head].flags & in detach_buf_split()
733 vring_unmap_one_split_indirect(vq, &indir_desc[j]); in detach_buf_split()
736 vq->split.desc_state[head].indir_desc = NULL; in detach_buf_split()
738 *ctx = vq->split.desc_state[head].indir_desc; in detach_buf_split()
742 static inline bool more_used_split(const struct vring_virtqueue *vq) in more_used_split() argument
744 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, in more_used_split()
745 vq->split.vring.used->idx); in more_used_split()
752 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_buf_ctx_split() local
757 START_USE(vq); in virtqueue_get_buf_ctx_split()
759 if (unlikely(vq->broken)) { in virtqueue_get_buf_ctx_split()
760 END_USE(vq); in virtqueue_get_buf_ctx_split()
764 if (!more_used_split(vq)) { in virtqueue_get_buf_ctx_split()
766 END_USE(vq); in virtqueue_get_buf_ctx_split()
771 virtio_rmb(vq->weak_barriers); in virtqueue_get_buf_ctx_split()
773 last_used = (vq->last_used_idx & (vq->split.vring.num - 1)); in virtqueue_get_buf_ctx_split()
775 vq->split.vring.used->ring[last_used].id); in virtqueue_get_buf_ctx_split()
777 vq->split.vring.used->ring[last_used].len); in virtqueue_get_buf_ctx_split()
779 if (unlikely(i >= vq->split.vring.num)) { in virtqueue_get_buf_ctx_split()
780 BAD_RING(vq, "id %u out of range\n", i); in virtqueue_get_buf_ctx_split()
783 if (unlikely(!vq->split.desc_state[i].data)) { in virtqueue_get_buf_ctx_split()
784 BAD_RING(vq, "id %u is not a head!\n", i); in virtqueue_get_buf_ctx_split()
789 ret = vq->split.desc_state[i].data; in virtqueue_get_buf_ctx_split()
790 detach_buf_split(vq, i, ctx); in virtqueue_get_buf_ctx_split()
791 vq->last_used_idx++; in virtqueue_get_buf_ctx_split()
795 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) in virtqueue_get_buf_ctx_split()
796 virtio_store_mb(vq->weak_barriers, in virtqueue_get_buf_ctx_split()
797 &vring_used_event(&vq->split.vring), in virtqueue_get_buf_ctx_split()
798 cpu_to_virtio16(_vq->vdev, vq->last_used_idx)); in virtqueue_get_buf_ctx_split()
800 LAST_ADD_TIME_INVALID(vq); in virtqueue_get_buf_ctx_split()
802 END_USE(vq); in virtqueue_get_buf_ctx_split()
808 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_disable_cb_split() local
810 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { in virtqueue_disable_cb_split()
811 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; in virtqueue_disable_cb_split()
817 if (vq->event_triggered) in virtqueue_disable_cb_split()
820 if (vq->event) in virtqueue_disable_cb_split()
822 vring_used_event(&vq->split.vring) = 0x0; in virtqueue_disable_cb_split()
824 vq->split.vring.avail->flags = in virtqueue_disable_cb_split()
826 vq->split.avail_flags_shadow); in virtqueue_disable_cb_split()
832 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_prepare_split() local
835 START_USE(vq); in virtqueue_enable_cb_prepare_split()
842 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { in virtqueue_enable_cb_prepare_split()
843 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; in virtqueue_enable_cb_prepare_split()
844 if (!vq->event) in virtqueue_enable_cb_prepare_split()
845 vq->split.vring.avail->flags = in virtqueue_enable_cb_prepare_split()
847 vq->split.avail_flags_shadow); in virtqueue_enable_cb_prepare_split()
849 vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev, in virtqueue_enable_cb_prepare_split()
850 last_used_idx = vq->last_used_idx); in virtqueue_enable_cb_prepare_split()
851 END_USE(vq); in virtqueue_enable_cb_prepare_split()
857 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_poll_split() local
860 vq->split.vring.used->idx); in virtqueue_poll_split()
865 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_delayed_split() local
868 START_USE(vq); in virtqueue_enable_cb_delayed_split()
875 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { in virtqueue_enable_cb_delayed_split()
876 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; in virtqueue_enable_cb_delayed_split()
877 if (!vq->event) in virtqueue_enable_cb_delayed_split()
878 vq->split.vring.avail->flags = in virtqueue_enable_cb_delayed_split()
880 vq->split.avail_flags_shadow); in virtqueue_enable_cb_delayed_split()
883 bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4; in virtqueue_enable_cb_delayed_split()
885 virtio_store_mb(vq->weak_barriers, in virtqueue_enable_cb_delayed_split()
886 &vring_used_event(&vq->split.vring), in virtqueue_enable_cb_delayed_split()
887 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs)); in virtqueue_enable_cb_delayed_split()
889 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx) in virtqueue_enable_cb_delayed_split()
890 - vq->last_used_idx) > bufs)) { in virtqueue_enable_cb_delayed_split()
891 END_USE(vq); in virtqueue_enable_cb_delayed_split()
895 END_USE(vq); in virtqueue_enable_cb_delayed_split()
901 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_detach_unused_buf_split() local
905 START_USE(vq); in virtqueue_detach_unused_buf_split()
907 for (i = 0; i < vq->split.vring.num; i++) { in virtqueue_detach_unused_buf_split()
908 if (!vq->split.desc_state[i].data) in virtqueue_detach_unused_buf_split()
911 buf = vq->split.desc_state[i].data; in virtqueue_detach_unused_buf_split()
912 detach_buf_split(vq, i, NULL); in virtqueue_detach_unused_buf_split()
913 vq->split.avail_idx_shadow--; in virtqueue_detach_unused_buf_split()
914 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, in virtqueue_detach_unused_buf_split()
915 vq->split.avail_idx_shadow); in virtqueue_detach_unused_buf_split()
916 END_USE(vq); in virtqueue_detach_unused_buf_split()
920 BUG_ON(vq->vq.num_free != vq->split.vring.num); in virtqueue_detach_unused_buf_split()
922 END_USE(vq); in virtqueue_detach_unused_buf_split()
938 struct virtqueue *vq; in vring_create_virtqueue_split() local
975 vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context, in vring_create_virtqueue_split()
977 if (!vq) { in vring_create_virtqueue_split()
983 to_vvq(vq)->split.queue_dma_addr = dma_addr; in vring_create_virtqueue_split()
984 to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes; in vring_create_virtqueue_split()
985 to_vvq(vq)->we_own_ring = true; in vring_create_virtqueue_split()
987 return vq; in vring_create_virtqueue_split()
995 static void vring_unmap_state_packed(const struct vring_virtqueue *vq, in vring_unmap_state_packed() argument
1000 if (!vq->use_dma_api) in vring_unmap_state_packed()
1006 dma_unmap_single(vring_dma_dev(vq), in vring_unmap_state_packed()
1011 dma_unmap_page(vring_dma_dev(vq), in vring_unmap_state_packed()
1018 static void vring_unmap_desc_packed(const struct vring_virtqueue *vq, in vring_unmap_desc_packed() argument
1023 if (!vq->use_dma_api) in vring_unmap_desc_packed()
1029 dma_unmap_single(vring_dma_dev(vq), in vring_unmap_desc_packed()
1035 dma_unmap_page(vring_dma_dev(vq), in vring_unmap_desc_packed()
1060 static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, in virtqueue_add_indirect_packed() argument
1074 head = vq->packed.next_avail_idx; in virtqueue_add_indirect_packed()
1079 if (unlikely(vq->vq.num_free < 1)) { in virtqueue_add_indirect_packed()
1082 END_USE(vq); in virtqueue_add_indirect_packed()
1087 id = vq->free_head; in virtqueue_add_indirect_packed()
1088 BUG_ON(id == vq->packed.vring.num); in virtqueue_add_indirect_packed()
1092 addr = vring_map_one_sg(vq, sg, n < out_sgs ? in virtqueue_add_indirect_packed()
1094 if (vring_mapping_error(vq, addr)) in virtqueue_add_indirect_packed()
1106 addr = vring_map_single(vq, desc, in virtqueue_add_indirect_packed()
1109 if (vring_mapping_error(vq, addr)) in virtqueue_add_indirect_packed()
1112 vq->packed.vring.desc[head].addr = cpu_to_le64(addr); in virtqueue_add_indirect_packed()
1113 vq->packed.vring.desc[head].len = cpu_to_le32(total_sg * in virtqueue_add_indirect_packed()
1115 vq->packed.vring.desc[head].id = cpu_to_le16(id); in virtqueue_add_indirect_packed()
1117 if (vq->use_dma_api) { in virtqueue_add_indirect_packed()
1118 vq->packed.desc_extra[id].addr = addr; in virtqueue_add_indirect_packed()
1119 vq->packed.desc_extra[id].len = total_sg * in virtqueue_add_indirect_packed()
1121 vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT | in virtqueue_add_indirect_packed()
1122 vq->packed.avail_used_flags; in virtqueue_add_indirect_packed()
1130 virtio_wmb(vq->weak_barriers); in virtqueue_add_indirect_packed()
1131 vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT | in virtqueue_add_indirect_packed()
1132 vq->packed.avail_used_flags); in virtqueue_add_indirect_packed()
1135 vq->vq.num_free -= 1; in virtqueue_add_indirect_packed()
1139 if (n >= vq->packed.vring.num) { in virtqueue_add_indirect_packed()
1141 vq->packed.avail_wrap_counter ^= 1; in virtqueue_add_indirect_packed()
1142 vq->packed.avail_used_flags ^= in virtqueue_add_indirect_packed()
1146 vq->packed.next_avail_idx = n; in virtqueue_add_indirect_packed()
1147 vq->free_head = vq->packed.desc_extra[id].next; in virtqueue_add_indirect_packed()
1150 vq->packed.desc_state[id].num = 1; in virtqueue_add_indirect_packed()
1151 vq->packed.desc_state[id].data = data; in virtqueue_add_indirect_packed()
1152 vq->packed.desc_state[id].indir_desc = desc; in virtqueue_add_indirect_packed()
1153 vq->packed.desc_state[id].last = id; in virtqueue_add_indirect_packed()
1155 vq->num_added += 1; in virtqueue_add_indirect_packed()
1157 pr_debug("Added buffer head %i to %p\n", head, vq); in virtqueue_add_indirect_packed()
1158 END_USE(vq); in virtqueue_add_indirect_packed()
1166 vring_unmap_desc_packed(vq, &desc[i]); in virtqueue_add_indirect_packed()
1170 END_USE(vq); in virtqueue_add_indirect_packed()
1183 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_add_packed() local
1191 START_USE(vq); in virtqueue_add_packed()
1194 BUG_ON(ctx && vq->indirect); in virtqueue_add_packed()
1196 if (unlikely(vq->broken)) { in virtqueue_add_packed()
1197 END_USE(vq); in virtqueue_add_packed()
1201 LAST_ADD_TIME_UPDATE(vq); in virtqueue_add_packed()
1206 err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs, in virtqueue_add_packed()
1209 END_USE(vq); in virtqueue_add_packed()
1216 head = vq->packed.next_avail_idx; in virtqueue_add_packed()
1217 avail_used_flags = vq->packed.avail_used_flags; in virtqueue_add_packed()
1219 WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect); in virtqueue_add_packed()
1221 desc = vq->packed.vring.desc; in virtqueue_add_packed()
1225 if (unlikely(vq->vq.num_free < descs_used)) { in virtqueue_add_packed()
1227 descs_used, vq->vq.num_free); in virtqueue_add_packed()
1228 END_USE(vq); in virtqueue_add_packed()
1232 id = vq->free_head; in virtqueue_add_packed()
1233 BUG_ON(id == vq->packed.vring.num); in virtqueue_add_packed()
1239 dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ? in virtqueue_add_packed()
1241 if (vring_mapping_error(vq, addr)) in virtqueue_add_packed()
1244 flags = cpu_to_le16(vq->packed.avail_used_flags | in virtqueue_add_packed()
1256 if (unlikely(vq->use_dma_api)) { in virtqueue_add_packed()
1257 vq->packed.desc_extra[curr].addr = addr; in virtqueue_add_packed()
1258 vq->packed.desc_extra[curr].len = sg->length; in virtqueue_add_packed()
1259 vq->packed.desc_extra[curr].flags = in virtqueue_add_packed()
1263 curr = vq->packed.desc_extra[curr].next; in virtqueue_add_packed()
1265 if ((unlikely(++i >= vq->packed.vring.num))) { in virtqueue_add_packed()
1267 vq->packed.avail_used_flags ^= in virtqueue_add_packed()
1275 vq->packed.avail_wrap_counter ^= 1; in virtqueue_add_packed()
1278 vq->vq.num_free -= descs_used; in virtqueue_add_packed()
1281 vq->packed.next_avail_idx = i; in virtqueue_add_packed()
1282 vq->free_head = curr; in virtqueue_add_packed()
1285 vq->packed.desc_state[id].num = descs_used; in virtqueue_add_packed()
1286 vq->packed.desc_state[id].data = data; in virtqueue_add_packed()
1287 vq->packed.desc_state[id].indir_desc = ctx; in virtqueue_add_packed()
1288 vq->packed.desc_state[id].last = prev; in virtqueue_add_packed()
1295 virtio_wmb(vq->weak_barriers); in virtqueue_add_packed()
1296 vq->packed.vring.desc[head].flags = head_flags; in virtqueue_add_packed()
1297 vq->num_added += descs_used; in virtqueue_add_packed()
1299 pr_debug("Added buffer head %i to %p\n", head, vq); in virtqueue_add_packed()
1300 END_USE(vq); in virtqueue_add_packed()
1307 curr = vq->free_head; in virtqueue_add_packed()
1309 vq->packed.avail_used_flags = avail_used_flags; in virtqueue_add_packed()
1314 vring_unmap_state_packed(vq, in virtqueue_add_packed()
1315 &vq->packed.desc_extra[curr]); in virtqueue_add_packed()
1316 curr = vq->packed.desc_extra[curr].next; in virtqueue_add_packed()
1318 if (i >= vq->packed.vring.num) in virtqueue_add_packed()
1322 END_USE(vq); in virtqueue_add_packed()
1328 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_kick_prepare_packed() local
1339 START_USE(vq); in virtqueue_kick_prepare_packed()
1345 virtio_mb(vq->weak_barriers); in virtqueue_kick_prepare_packed()
1347 old = vq->packed.next_avail_idx - vq->num_added; in virtqueue_kick_prepare_packed()
1348 new = vq->packed.next_avail_idx; in virtqueue_kick_prepare_packed()
1349 vq->num_added = 0; in virtqueue_kick_prepare_packed()
1351 snapshot.u32 = *(u32 *)vq->packed.vring.device; in virtqueue_kick_prepare_packed()
1354 LAST_ADD_TIME_CHECK(vq); in virtqueue_kick_prepare_packed()
1355 LAST_ADD_TIME_INVALID(vq); in virtqueue_kick_prepare_packed()
1366 if (wrap_counter != vq->packed.avail_wrap_counter) in virtqueue_kick_prepare_packed()
1367 event_idx -= vq->packed.vring.num; in virtqueue_kick_prepare_packed()
1371 END_USE(vq); in virtqueue_kick_prepare_packed()
1375 static void detach_buf_packed(struct vring_virtqueue *vq, in detach_buf_packed() argument
1382 state = &vq->packed.desc_state[id]; in detach_buf_packed()
1387 vq->packed.desc_extra[state->last].next = vq->free_head; in detach_buf_packed()
1388 vq->free_head = id; in detach_buf_packed()
1389 vq->vq.num_free += state->num; in detach_buf_packed()
1391 if (unlikely(vq->use_dma_api)) { in detach_buf_packed()
1394 vring_unmap_state_packed(vq, in detach_buf_packed()
1395 &vq->packed.desc_extra[curr]); in detach_buf_packed()
1396 curr = vq->packed.desc_extra[curr].next; in detach_buf_packed()
1400 if (vq->indirect) { in detach_buf_packed()
1408 if (vq->use_dma_api) { in detach_buf_packed()
1409 len = vq->packed.desc_extra[id].len; in detach_buf_packed()
1412 vring_unmap_desc_packed(vq, &desc[i]); in detach_buf_packed()
1421 static inline bool is_used_desc_packed(const struct vring_virtqueue *vq, in is_used_desc_packed() argument
1427 flags = le16_to_cpu(vq->packed.vring.desc[idx].flags); in is_used_desc_packed()
1434 static inline bool more_used_packed(const struct vring_virtqueue *vq) in more_used_packed() argument
1436 return is_used_desc_packed(vq, vq->last_used_idx, in more_used_packed()
1437 vq->packed.used_wrap_counter); in more_used_packed()
1444 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_buf_ctx_packed() local
1448 START_USE(vq); in virtqueue_get_buf_ctx_packed()
1450 if (unlikely(vq->broken)) { in virtqueue_get_buf_ctx_packed()
1451 END_USE(vq); in virtqueue_get_buf_ctx_packed()
1455 if (!more_used_packed(vq)) { in virtqueue_get_buf_ctx_packed()
1457 END_USE(vq); in virtqueue_get_buf_ctx_packed()
1462 virtio_rmb(vq->weak_barriers); in virtqueue_get_buf_ctx_packed()
1464 last_used = vq->last_used_idx; in virtqueue_get_buf_ctx_packed()
1465 id = le16_to_cpu(vq->packed.vring.desc[last_used].id); in virtqueue_get_buf_ctx_packed()
1466 *len = le32_to_cpu(vq->packed.vring.desc[last_used].len); in virtqueue_get_buf_ctx_packed()
1468 if (unlikely(id >= vq->packed.vring.num)) { in virtqueue_get_buf_ctx_packed()
1469 BAD_RING(vq, "id %u out of range\n", id); in virtqueue_get_buf_ctx_packed()
1472 if (unlikely(!vq->packed.desc_state[id].data)) { in virtqueue_get_buf_ctx_packed()
1473 BAD_RING(vq, "id %u is not a head!\n", id); in virtqueue_get_buf_ctx_packed()
1478 ret = vq->packed.desc_state[id].data; in virtqueue_get_buf_ctx_packed()
1479 detach_buf_packed(vq, id, ctx); in virtqueue_get_buf_ctx_packed()
1481 vq->last_used_idx += vq->packed.desc_state[id].num; in virtqueue_get_buf_ctx_packed()
1482 if (unlikely(vq->last_used_idx >= vq->packed.vring.num)) { in virtqueue_get_buf_ctx_packed()
1483 vq->last_used_idx -= vq->packed.vring.num; in virtqueue_get_buf_ctx_packed()
1484 vq->packed.used_wrap_counter ^= 1; in virtqueue_get_buf_ctx_packed()
1492 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC) in virtqueue_get_buf_ctx_packed()
1493 virtio_store_mb(vq->weak_barriers, in virtqueue_get_buf_ctx_packed()
1494 &vq->packed.vring.driver->off_wrap, in virtqueue_get_buf_ctx_packed()
1495 cpu_to_le16(vq->last_used_idx | in virtqueue_get_buf_ctx_packed()
1496 (vq->packed.used_wrap_counter << in virtqueue_get_buf_ctx_packed()
1499 LAST_ADD_TIME_INVALID(vq); in virtqueue_get_buf_ctx_packed()
1501 END_USE(vq); in virtqueue_get_buf_ctx_packed()
1507 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_disable_cb_packed() local
1509 if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) { in virtqueue_disable_cb_packed()
1510 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE; in virtqueue_disable_cb_packed()
1516 if (vq->event_triggered) in virtqueue_disable_cb_packed()
1519 vq->packed.vring.driver->flags = in virtqueue_disable_cb_packed()
1520 cpu_to_le16(vq->packed.event_flags_shadow); in virtqueue_disable_cb_packed()
1526 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_prepare_packed() local
1528 START_USE(vq); in virtqueue_enable_cb_prepare_packed()
1535 if (vq->event) { in virtqueue_enable_cb_prepare_packed()
1536 vq->packed.vring.driver->off_wrap = in virtqueue_enable_cb_prepare_packed()
1537 cpu_to_le16(vq->last_used_idx | in virtqueue_enable_cb_prepare_packed()
1538 (vq->packed.used_wrap_counter << in virtqueue_enable_cb_prepare_packed()
1544 virtio_wmb(vq->weak_barriers); in virtqueue_enable_cb_prepare_packed()
1547 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) { in virtqueue_enable_cb_prepare_packed()
1548 vq->packed.event_flags_shadow = vq->event ? in virtqueue_enable_cb_prepare_packed()
1551 vq->packed.vring.driver->flags = in virtqueue_enable_cb_prepare_packed()
1552 cpu_to_le16(vq->packed.event_flags_shadow); in virtqueue_enable_cb_prepare_packed()
1555 END_USE(vq); in virtqueue_enable_cb_prepare_packed()
1556 return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter << in virtqueue_enable_cb_prepare_packed()
1562 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_poll_packed() local
1569 return is_used_desc_packed(vq, used_idx, wrap_counter); in virtqueue_poll_packed()
1574 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_delayed_packed() local
1578 START_USE(vq); in virtqueue_enable_cb_delayed_packed()
1585 if (vq->event) { in virtqueue_enable_cb_delayed_packed()
1587 bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4; in virtqueue_enable_cb_delayed_packed()
1588 wrap_counter = vq->packed.used_wrap_counter; in virtqueue_enable_cb_delayed_packed()
1590 used_idx = vq->last_used_idx + bufs; in virtqueue_enable_cb_delayed_packed()
1591 if (used_idx >= vq->packed.vring.num) { in virtqueue_enable_cb_delayed_packed()
1592 used_idx -= vq->packed.vring.num; in virtqueue_enable_cb_delayed_packed()
1596 vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx | in virtqueue_enable_cb_delayed_packed()
1603 virtio_wmb(vq->weak_barriers); in virtqueue_enable_cb_delayed_packed()
1606 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) { in virtqueue_enable_cb_delayed_packed()
1607 vq->packed.event_flags_shadow = vq->event ? in virtqueue_enable_cb_delayed_packed()
1610 vq->packed.vring.driver->flags = in virtqueue_enable_cb_delayed_packed()
1611 cpu_to_le16(vq->packed.event_flags_shadow); in virtqueue_enable_cb_delayed_packed()
1618 virtio_mb(vq->weak_barriers); in virtqueue_enable_cb_delayed_packed()
1620 if (is_used_desc_packed(vq, in virtqueue_enable_cb_delayed_packed()
1621 vq->last_used_idx, in virtqueue_enable_cb_delayed_packed()
1622 vq->packed.used_wrap_counter)) { in virtqueue_enable_cb_delayed_packed()
1623 END_USE(vq); in virtqueue_enable_cb_delayed_packed()
1627 END_USE(vq); in virtqueue_enable_cb_delayed_packed()
1633 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_detach_unused_buf_packed() local
1637 START_USE(vq); in virtqueue_detach_unused_buf_packed()
1639 for (i = 0; i < vq->packed.vring.num; i++) { in virtqueue_detach_unused_buf_packed()
1640 if (!vq->packed.desc_state[i].data) in virtqueue_detach_unused_buf_packed()
1643 buf = vq->packed.desc_state[i].data; in virtqueue_detach_unused_buf_packed()
1644 detach_buf_packed(vq, i, NULL); in virtqueue_detach_unused_buf_packed()
1645 END_USE(vq); in virtqueue_detach_unused_buf_packed()
1649 BUG_ON(vq->vq.num_free != vq->packed.vring.num); in virtqueue_detach_unused_buf_packed()
1651 END_USE(vq); in virtqueue_detach_unused_buf_packed()
1655 static struct vring_desc_extra *vring_alloc_desc_extra(struct vring_virtqueue *vq, in vring_alloc_desc_extra() argument
1686 struct vring_virtqueue *vq; in vring_create_virtqueue_packed() local
1714 vq = kmalloc(sizeof(*vq), GFP_KERNEL); in vring_create_virtqueue_packed()
1715 if (!vq) in vring_create_virtqueue_packed()
1718 vq->vq.callback = callback; in vring_create_virtqueue_packed()
1719 vq->vq.vdev = vdev; in vring_create_virtqueue_packed()
1720 vq->vq.name = name; in vring_create_virtqueue_packed()
1721 vq->vq.num_free = num; in vring_create_virtqueue_packed()
1722 vq->vq.index = index; in vring_create_virtqueue_packed()
1723 vq->we_own_ring = true; in vring_create_virtqueue_packed()
1724 vq->notify = notify; in vring_create_virtqueue_packed()
1725 vq->weak_barriers = weak_barriers; in vring_create_virtqueue_packed()
1726 vq->broken = false; in vring_create_virtqueue_packed()
1727 vq->last_used_idx = 0; in vring_create_virtqueue_packed()
1728 vq->event_triggered = false; in vring_create_virtqueue_packed()
1729 vq->num_added = 0; in vring_create_virtqueue_packed()
1730 vq->packed_ring = true; in vring_create_virtqueue_packed()
1731 vq->use_dma_api = vring_use_dma_api(vdev); in vring_create_virtqueue_packed()
1733 vq->in_use = false; in vring_create_virtqueue_packed()
1734 vq->last_add_time_valid = false; in vring_create_virtqueue_packed()
1737 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && in vring_create_virtqueue_packed()
1739 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); in vring_create_virtqueue_packed()
1742 vq->weak_barriers = false; in vring_create_virtqueue_packed()
1744 vq->packed.ring_dma_addr = ring_dma_addr; in vring_create_virtqueue_packed()
1745 vq->packed.driver_event_dma_addr = driver_event_dma_addr; in vring_create_virtqueue_packed()
1746 vq->packed.device_event_dma_addr = device_event_dma_addr; in vring_create_virtqueue_packed()
1748 vq->packed.ring_size_in_bytes = ring_size_in_bytes; in vring_create_virtqueue_packed()
1749 vq->packed.event_size_in_bytes = event_size_in_bytes; in vring_create_virtqueue_packed()
1751 vq->packed.vring.num = num; in vring_create_virtqueue_packed()
1752 vq->packed.vring.desc = ring; in vring_create_virtqueue_packed()
1753 vq->packed.vring.driver = driver; in vring_create_virtqueue_packed()
1754 vq->packed.vring.device = device; in vring_create_virtqueue_packed()
1756 vq->packed.next_avail_idx = 0; in vring_create_virtqueue_packed()
1757 vq->packed.avail_wrap_counter = 1; in vring_create_virtqueue_packed()
1758 vq->packed.used_wrap_counter = 1; in vring_create_virtqueue_packed()
1759 vq->packed.event_flags_shadow = 0; in vring_create_virtqueue_packed()
1760 vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL; in vring_create_virtqueue_packed()
1762 vq->packed.desc_state = kmalloc_array(num, in vring_create_virtqueue_packed()
1765 if (!vq->packed.desc_state) in vring_create_virtqueue_packed()
1768 memset(vq->packed.desc_state, 0, in vring_create_virtqueue_packed()
1772 vq->free_head = 0; in vring_create_virtqueue_packed()
1774 vq->packed.desc_extra = vring_alloc_desc_extra(vq, num); in vring_create_virtqueue_packed()
1775 if (!vq->packed.desc_extra) in vring_create_virtqueue_packed()
1780 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE; in vring_create_virtqueue_packed()
1781 vq->packed.vring.driver->flags = in vring_create_virtqueue_packed()
1782 cpu_to_le16(vq->packed.event_flags_shadow); in vring_create_virtqueue_packed()
1786 list_add_tail(&vq->vq.list, &vdev->vqs); in vring_create_virtqueue_packed()
1788 return &vq->vq; in vring_create_virtqueue_packed()
1791 kfree(vq->packed.desc_state); in vring_create_virtqueue_packed()
1793 kfree(vq); in vring_create_virtqueue_packed()
1818 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_add() local
1820 return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg, in virtqueue_add()
1874 int virtqueue_add_outbuf(struct virtqueue *vq, in virtqueue_add_outbuf() argument
1879 return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp); in virtqueue_add_outbuf()
1896 int virtqueue_add_inbuf(struct virtqueue *vq, in virtqueue_add_inbuf() argument
1901 return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp); in virtqueue_add_inbuf()
1919 int virtqueue_add_inbuf_ctx(struct virtqueue *vq, in virtqueue_add_inbuf_ctx() argument
1925 return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp); in virtqueue_add_inbuf_ctx()
1942 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_kick_prepare() local
1944 return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) : in virtqueue_kick_prepare()
1959 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_notify() local
1961 if (unlikely(vq->broken)) in virtqueue_notify()
1965 if (!vq->notify(_vq)) { in virtqueue_notify()
1966 vq->broken = true; in virtqueue_notify()
1985 bool virtqueue_kick(struct virtqueue *vq) in virtqueue_kick() argument
1987 if (virtqueue_kick_prepare(vq)) in virtqueue_kick()
1988 return virtqueue_notify(vq); in virtqueue_kick()
2013 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_buf_ctx() local
2015 return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) : in virtqueue_get_buf_ctx()
2036 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_disable_cb() local
2038 if (vq->packed_ring) in virtqueue_disable_cb()
2059 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_prepare() local
2061 if (vq->event_triggered) in virtqueue_enable_cb_prepare()
2062 vq->event_triggered = false; in virtqueue_enable_cb_prepare()
2064 return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) : in virtqueue_enable_cb_prepare()
2080 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_poll() local
2082 if (unlikely(vq->broken)) in virtqueue_poll()
2085 virtio_mb(vq->weak_barriers); in virtqueue_poll()
2086 return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) : in virtqueue_poll()
2125 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_delayed() local
2127 if (vq->event_triggered) in virtqueue_enable_cb_delayed()
2128 vq->event_triggered = false; in virtqueue_enable_cb_delayed()
2130 return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) : in virtqueue_enable_cb_delayed()
2145 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_detach_unused_buf() local
2147 return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) : in virtqueue_detach_unused_buf()
2152 static inline bool more_used(const struct vring_virtqueue *vq) in more_used() argument
2154 return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq); in more_used()
2159 struct vring_virtqueue *vq = to_vvq(_vq); in vring_interrupt() local
2161 if (!more_used(vq)) { in vring_interrupt()
2162 pr_debug("virtqueue interrupt with no work for %p\n", vq); in vring_interrupt()
2166 if (unlikely(vq->broken)) in vring_interrupt()
2170 if (vq->event) in vring_interrupt()
2171 vq->event_triggered = true; in vring_interrupt()
2173 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); in vring_interrupt()
2174 if (vq->vq.callback) in vring_interrupt()
2175 vq->vq.callback(&vq->vq); in vring_interrupt()
2191 struct vring_virtqueue *vq; in __vring_new_virtqueue() local
2196 vq = kmalloc(sizeof(*vq), GFP_KERNEL); in __vring_new_virtqueue()
2197 if (!vq) in __vring_new_virtqueue()
2200 vq->packed_ring = false; in __vring_new_virtqueue()
2201 vq->vq.callback = callback; in __vring_new_virtqueue()
2202 vq->vq.vdev = vdev; in __vring_new_virtqueue()
2203 vq->vq.name = name; in __vring_new_virtqueue()
2204 vq->vq.num_free = vring.num; in __vring_new_virtqueue()
2205 vq->vq.index = index; in __vring_new_virtqueue()
2206 vq->we_own_ring = false; in __vring_new_virtqueue()
2207 vq->notify = notify; in __vring_new_virtqueue()
2208 vq->weak_barriers = weak_barriers; in __vring_new_virtqueue()
2209 vq->broken = false; in __vring_new_virtqueue()
2210 vq->last_used_idx = 0; in __vring_new_virtqueue()
2211 vq->event_triggered = false; in __vring_new_virtqueue()
2212 vq->num_added = 0; in __vring_new_virtqueue()
2213 vq->use_dma_api = vring_use_dma_api(vdev); in __vring_new_virtqueue()
2215 vq->in_use = false; in __vring_new_virtqueue()
2216 vq->last_add_time_valid = false; in __vring_new_virtqueue()
2219 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && in __vring_new_virtqueue()
2221 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); in __vring_new_virtqueue()
2224 vq->weak_barriers = false; in __vring_new_virtqueue()
2226 vq->split.queue_dma_addr = 0; in __vring_new_virtqueue()
2227 vq->split.queue_size_in_bytes = 0; in __vring_new_virtqueue()
2229 vq->split.vring = vring; in __vring_new_virtqueue()
2230 vq->split.avail_flags_shadow = 0; in __vring_new_virtqueue()
2231 vq->split.avail_idx_shadow = 0; in __vring_new_virtqueue()
2235 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; in __vring_new_virtqueue()
2236 if (!vq->event) in __vring_new_virtqueue()
2237 vq->split.vring.avail->flags = cpu_to_virtio16(vdev, in __vring_new_virtqueue()
2238 vq->split.avail_flags_shadow); in __vring_new_virtqueue()
2241 vq->split.desc_state = kmalloc_array(vring.num, in __vring_new_virtqueue()
2243 if (!vq->split.desc_state) in __vring_new_virtqueue()
2246 vq->split.desc_extra = vring_alloc_desc_extra(vq, vring.num); in __vring_new_virtqueue()
2247 if (!vq->split.desc_extra) in __vring_new_virtqueue()
2251 vq->free_head = 0; in __vring_new_virtqueue()
2252 memset(vq->split.desc_state, 0, vring.num * in __vring_new_virtqueue()
2256 list_add_tail(&vq->vq.list, &vdev->vqs); in __vring_new_virtqueue()
2258 return &vq->vq; in __vring_new_virtqueue()
2261 kfree(vq->split.desc_state); in __vring_new_virtqueue()
2263 kfree(vq); in __vring_new_virtqueue()
2300 bool (*notify)(struct virtqueue *vq), in vring_new_virtqueue() argument
2301 void (*callback)(struct virtqueue *vq), in vring_new_virtqueue() argument
2317 struct vring_virtqueue *vq = to_vvq(_vq); in vring_del_virtqueue() local
2319 spin_lock(&vq->vq.vdev->vqs_list_lock); in vring_del_virtqueue()
2321 spin_unlock(&vq->vq.vdev->vqs_list_lock); in vring_del_virtqueue()
2323 if (vq->we_own_ring) { in vring_del_virtqueue()
2324 if (vq->packed_ring) { in vring_del_virtqueue()
2325 vring_free_queue(vq->vq.vdev, in vring_del_virtqueue()
2326 vq->packed.ring_size_in_bytes, in vring_del_virtqueue()
2327 vq->packed.vring.desc, in vring_del_virtqueue()
2328 vq->packed.ring_dma_addr); in vring_del_virtqueue()
2330 vring_free_queue(vq->vq.vdev, in vring_del_virtqueue()
2331 vq->packed.event_size_in_bytes, in vring_del_virtqueue()
2332 vq->packed.vring.driver, in vring_del_virtqueue()
2333 vq->packed.driver_event_dma_addr); in vring_del_virtqueue()
2335 vring_free_queue(vq->vq.vdev, in vring_del_virtqueue()
2336 vq->packed.event_size_in_bytes, in vring_del_virtqueue()
2337 vq->packed.vring.device, in vring_del_virtqueue()
2338 vq->packed.device_event_dma_addr); in vring_del_virtqueue()
2340 kfree(vq->packed.desc_state); in vring_del_virtqueue()
2341 kfree(vq->packed.desc_extra); in vring_del_virtqueue()
2343 vring_free_queue(vq->vq.vdev, in vring_del_virtqueue()
2344 vq->split.queue_size_in_bytes, in vring_del_virtqueue()
2345 vq->split.vring.desc, in vring_del_virtqueue()
2346 vq->split.queue_dma_addr); in vring_del_virtqueue()
2349 if (!vq->packed_ring) { in vring_del_virtqueue()
2350 kfree(vq->split.desc_state); in vring_del_virtqueue()
2351 kfree(vq->split.desc_extra); in vring_del_virtqueue()
2353 kfree(vq); in vring_del_virtqueue()
2394 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_vring_size() local
2396 return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num; in virtqueue_get_vring_size()
2402 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_is_broken() local
2404 return READ_ONCE(vq->broken); in virtqueue_is_broken()
2418 struct vring_virtqueue *vq = to_vvq(_vq); in virtio_break_device() local
2421 WRITE_ONCE(vq->broken, true); in virtio_break_device()
2429 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_desc_addr() local
2431 BUG_ON(!vq->we_own_ring); in virtqueue_get_desc_addr()
2433 if (vq->packed_ring) in virtqueue_get_desc_addr()
2434 return vq->packed.ring_dma_addr; in virtqueue_get_desc_addr()
2436 return vq->split.queue_dma_addr; in virtqueue_get_desc_addr()
2442 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_avail_addr() local
2444 BUG_ON(!vq->we_own_ring); in virtqueue_get_avail_addr()
2446 if (vq->packed_ring) in virtqueue_get_avail_addr()
2447 return vq->packed.driver_event_dma_addr; in virtqueue_get_avail_addr()
2449 return vq->split.queue_dma_addr + in virtqueue_get_avail_addr()
2450 ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc); in virtqueue_get_avail_addr()
2456 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_used_addr() local
2458 BUG_ON(!vq->we_own_ring); in virtqueue_get_used_addr()
2460 if (vq->packed_ring) in virtqueue_get_used_addr()
2461 return vq->packed.device_event_dma_addr; in virtqueue_get_used_addr()
2463 return vq->split.queue_dma_addr + in virtqueue_get_used_addr()
2464 ((char *)vq->split.vring.used - (char *)vq->split.vring.desc); in virtqueue_get_used_addr()
2469 const struct vring *virtqueue_get_vring(struct virtqueue *vq) in virtqueue_get_vring() argument
2471 return &to_vvq(vq)->split.vring; in virtqueue_get_vring()