• Home
  • Raw
  • Download

Lines Matching +full:virtio +full:- +full:iommu

1 // SPDX-License-Identifier: GPL-2.0
3 * Virtio driver for the paravirtualized IOMMU
12 #include <linux/dma-iommu.h>
15 #include <linux/iommu.h>
21 #include <linux/virtio.h>
36 struct iommu_device iommu; member
105 struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail); in viommu_get_req_errno()
107 switch (tail->status) { in viommu_get_req_errno()
111 return -ENOSYS; in viommu_get_req_errno()
113 return -EINVAL; in viommu_get_req_errno()
115 return -ERANGE; in viommu_get_req_errno()
117 return -ENOENT; in viommu_get_req_errno()
119 return -EFAULT; in viommu_get_req_errno()
121 return -ENOMEM; in viommu_get_req_errno()
125 return -EIO; in viommu_get_req_errno()
131 struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail); in viommu_set_req_status()
133 tail->status = status; in viommu_set_req_status()
142 if (req->type == VIRTIO_IOMMU_T_PROBE) in viommu_get_write_desc_offset()
143 return len - viommu->probe_size - tail_size; in viommu_get_write_desc_offset()
145 return len - tail_size; in viommu_get_write_desc_offset()
149 * __viommu_sync_req - Complete all in-flight requests
152 * requests that were in-flight at the time of the call have completed.
159 struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ]; in __viommu_sync_req()
161 assert_spin_locked(&viommu->request_lock); in __viommu_sync_req()
165 while (!list_empty(&viommu->requests)) { in __viommu_sync_req()
172 viommu_set_req_status(req->buf, req->len, in __viommu_sync_req()
175 write_len = req->len - req->write_offset; in __viommu_sync_req()
176 if (req->writeback && len == write_len) in __viommu_sync_req()
177 memcpy(req->writeback, req->buf + req->write_offset, in __viommu_sync_req()
180 list_del(&req->list); in __viommu_sync_req()
192 spin_lock_irqsave(&viommu->request_lock, flags); in viommu_sync_req()
195 dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret); in viommu_sync_req()
196 spin_unlock_irqrestore(&viommu->request_lock, flags); in viommu_sync_req()
202 * __viommu_add_request - Add one request to the queue
225 struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ]; in __viommu_add_req()
227 assert_spin_locked(&viommu->request_lock); in __viommu_add_req()
231 return -EINVAL; in __viommu_add_req()
235 return -ENOMEM; in __viommu_add_req()
237 req->len = len; in __viommu_add_req()
239 req->writeback = buf + write_offset; in __viommu_add_req()
240 req->write_offset = write_offset; in __viommu_add_req()
242 memcpy(&req->buf, buf, write_offset); in __viommu_add_req()
244 sg_init_one(&top_sg, req->buf, write_offset); in __viommu_add_req()
245 sg_init_one(&bottom_sg, req->buf + write_offset, len - write_offset); in __viommu_add_req()
248 if (ret == -ENOSPC) { in __viommu_add_req()
256 list_add_tail(&req->list, &viommu->requests); in __viommu_add_req()
269 spin_lock_irqsave(&viommu->request_lock, flags); in viommu_add_req()
272 dev_dbg(viommu->dev, "could not add request: %d\n", ret); in viommu_add_req()
273 spin_unlock_irqrestore(&viommu->request_lock, flags); in viommu_add_req()
288 spin_lock_irqsave(&viommu->request_lock, flags); in viommu_send_req_sync()
292 dev_dbg(viommu->dev, "could not add request (%d)\n", ret); in viommu_send_req_sync()
298 dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret); in viommu_send_req_sync()
299 /* Fall-through (get the actual request status) */ in viommu_send_req_sync()
304 spin_unlock_irqrestore(&viommu->request_lock, flags); in viommu_send_req_sync()
309 * viommu_add_mapping - add a mapping to the internal tree
321 return -ENOMEM; in viommu_add_mapping()
323 mapping->paddr = paddr; in viommu_add_mapping()
324 mapping->iova.start = iova; in viommu_add_mapping()
325 mapping->iova.last = iova + size - 1; in viommu_add_mapping()
326 mapping->flags = flags; in viommu_add_mapping()
328 spin_lock_irqsave(&vdomain->mappings_lock, irqflags); in viommu_add_mapping()
329 interval_tree_insert(&mapping->iova, &vdomain->mappings); in viommu_add_mapping()
330 spin_unlock_irqrestore(&vdomain->mappings_lock, irqflags); in viommu_add_mapping()
336 * viommu_del_mappings - remove mappings from the internal tree
350 unsigned long last = iova + size - 1; in viommu_del_mappings()
354 spin_lock_irqsave(&vdomain->mappings_lock, flags); in viommu_del_mappings()
355 next = interval_tree_iter_first(&vdomain->mappings, iova, last); in viommu_del_mappings()
362 if (mapping->iova.start < iova) in viommu_del_mappings()
366 * Virtio-iommu doesn't allow UNMAP to split a mapping created in viommu_del_mappings()
369 unmapped += mapping->iova.last - mapping->iova.start + 1; in viommu_del_mappings()
371 interval_tree_remove(node, &vdomain->mappings); in viommu_del_mappings()
374 spin_unlock_irqrestore(&vdomain->mappings_lock, flags); in viommu_del_mappings()
380 * viommu_replay_mappings - re-send MAP requests
383 * mappings were deleted from the device. Re-create the mappings available in
394 spin_lock_irqsave(&vdomain->mappings_lock, flags); in viommu_replay_mappings()
395 node = interval_tree_iter_first(&vdomain->mappings, 0, -1UL); in viommu_replay_mappings()
400 .domain = cpu_to_le32(vdomain->id), in viommu_replay_mappings()
401 .virt_start = cpu_to_le64(mapping->iova.start), in viommu_replay_mappings()
402 .virt_end = cpu_to_le64(mapping->iova.last), in viommu_replay_mappings()
403 .phys_start = cpu_to_le64(mapping->paddr), in viommu_replay_mappings()
404 .flags = cpu_to_le32(mapping->flags), in viommu_replay_mappings()
407 ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map)); in viommu_replay_mappings()
411 node = interval_tree_iter_next(node, 0, -1UL); in viommu_replay_mappings()
413 spin_unlock_irqrestore(&vdomain->mappings_lock, flags); in viommu_replay_mappings()
428 start = start64 = le64_to_cpu(mem->start); in viommu_add_resv_mem()
429 end = end64 = le64_to_cpu(mem->end); in viommu_add_resv_mem()
430 size = end64 - start64 + 1; in viommu_add_resv_mem()
432 /* Catch any overflow, including the unlikely end64 - start64 + 1 = 0 */ in viommu_add_resv_mem()
433 if (start != start64 || end != end64 || size < end64 - start64) in viommu_add_resv_mem()
434 return -EOVERFLOW; in viommu_add_resv_mem()
437 return -EINVAL; in viommu_add_resv_mem()
439 switch (mem->subtype) { in viommu_add_resv_mem()
441 dev_warn(vdev->dev, "unknown resv mem subtype 0x%x\n", in viommu_add_resv_mem()
442 mem->subtype); in viommu_add_resv_mem()
454 return -ENOMEM; in viommu_add_resv_mem()
456 list_add(&region->list, &vdev->resv_regions); in viommu_add_resv_mem()
471 if (!fwspec->num_ids) in viommu_probe_endpoint()
472 return -EINVAL; in viommu_probe_endpoint()
474 probe_len = sizeof(*probe) + viommu->probe_size + in viommu_probe_endpoint()
478 return -ENOMEM; in viommu_probe_endpoint()
480 probe->head.type = VIRTIO_IOMMU_T_PROBE; in viommu_probe_endpoint()
485 probe->endpoint = cpu_to_le32(fwspec->ids[0]); in viommu_probe_endpoint()
491 prop = (void *)probe->properties; in viommu_probe_endpoint()
492 type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK; in viommu_probe_endpoint()
495 cur < viommu->probe_size) { in viommu_probe_endpoint()
496 len = le16_to_cpu(prop->length) + sizeof(*prop); in viommu_probe_endpoint()
510 if (cur >= viommu->probe_size) in viommu_probe_endpoint()
513 prop = (void *)probe->properties + cur; in viommu_probe_endpoint()
514 type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK; in viommu_probe_endpoint()
527 u8 reason = fault->reason; in viommu_fault_handler()
528 u32 flags = le32_to_cpu(fault->flags); in viommu_fault_handler()
529 u32 endpoint = le32_to_cpu(fault->endpoint); in viommu_fault_handler()
530 u64 address = le64_to_cpu(fault->address); in viommu_fault_handler()
547 dev_err_ratelimited(viommu->dev, "%s fault from EP %u at %#llx [%s%s%s]\n", in viommu_fault_handler()
553 dev_err_ratelimited(viommu->dev, "%s fault from EP %u\n", in viommu_fault_handler()
564 struct viommu_dev *viommu = vq->vdev->priv; in viommu_event_handler()
568 dev_err(viommu->dev, in viommu_event_handler()
571 } else if (!(evt->head & VIOMMU_FAULT_RESV_MASK)) { in viommu_event_handler()
572 viommu_fault_handler(viommu, &evt->fault); in viommu_event_handler()
578 dev_err(viommu->dev, "could not add event buffer\n"); in viommu_event_handler()
584 /* IOMMU API */
597 mutex_init(&vdomain->mutex); in viommu_domain_alloc()
598 spin_lock_init(&vdomain->mappings_lock); in viommu_domain_alloc()
599 vdomain->mappings = RB_ROOT_CACHED; in viommu_domain_alloc()
602 iommu_get_dma_cookie(&vdomain->domain)) { in viommu_domain_alloc()
607 return &vdomain->domain; in viommu_domain_alloc()
615 struct viommu_dev *viommu = vdev->viommu; in viommu_domain_finalise()
618 viommu_page_size = 1UL << __ffs(viommu->pgsize_bitmap); in viommu_domain_finalise()
620 dev_err(vdev->dev, in viommu_domain_finalise()
623 return -EINVAL; in viommu_domain_finalise()
626 ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain, in viommu_domain_finalise()
627 viommu->last_domain, GFP_KERNEL); in viommu_domain_finalise()
631 vdomain->id = (unsigned int)ret; in viommu_domain_finalise()
633 domain->pgsize_bitmap = viommu->pgsize_bitmap; in viommu_domain_finalise()
634 domain->geometry = viommu->geometry; in viommu_domain_finalise()
636 vdomain->map_flags = viommu->map_flags; in viommu_domain_finalise()
637 vdomain->viommu = viommu; in viommu_domain_finalise()
651 if (vdomain->viommu) in viommu_domain_free()
652 ida_free(&vdomain->viommu->domain_ids, vdomain->id); in viommu_domain_free()
666 mutex_lock(&vdomain->mutex); in viommu_attach_dev()
667 if (!vdomain->viommu) { in viommu_attach_dev()
673 } else if (vdomain->viommu != vdev->viommu) { in viommu_attach_dev()
675 ret = -EXDEV; in viommu_attach_dev()
677 mutex_unlock(&vdomain->mutex); in viommu_attach_dev()
683 * In the virtio-iommu device, when attaching the endpoint to a new in viommu_attach_dev()
692 * vdev->vdomain is protected by group->mutex in viommu_attach_dev()
694 if (vdev->vdomain) in viommu_attach_dev()
695 vdev->vdomain->nr_endpoints--; in viommu_attach_dev()
699 .domain = cpu_to_le32(vdomain->id), in viommu_attach_dev()
702 for (i = 0; i < fwspec->num_ids; i++) { in viommu_attach_dev()
703 req.endpoint = cpu_to_le32(fwspec->ids[i]); in viommu_attach_dev()
705 ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req)); in viommu_attach_dev()
710 if (!vdomain->nr_endpoints) { in viommu_attach_dev()
720 vdomain->nr_endpoints++; in viommu_attach_dev()
721 vdev->vdomain = vdomain; in viommu_attach_dev()
738 if (flags & ~vdomain->map_flags) in viommu_map()
739 return -EINVAL; in viommu_map()
747 .domain = cpu_to_le32(vdomain->id), in viommu_map()
750 .virt_end = cpu_to_le64(iova + size - 1), in viommu_map()
754 if (!vdomain->nr_endpoints) in viommu_map()
757 ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map)); in viommu_map()
777 if (!vdomain->nr_endpoints) in viommu_unmap()
782 .domain = cpu_to_le32(vdomain->id), in viommu_unmap()
784 .virt_end = cpu_to_le64(iova + unmapped - 1), in viommu_unmap()
787 ret = viommu_add_req(vdomain->viommu, &unmap, sizeof(unmap)); in viommu_unmap()
800 spin_lock_irqsave(&vdomain->mappings_lock, flags); in viommu_iova_to_phys()
801 node = interval_tree_iter_first(&vdomain->mappings, iova, iova); in viommu_iova_to_phys()
804 paddr = mapping->paddr + (iova - mapping->iova.start); in viommu_iova_to_phys()
806 spin_unlock_irqrestore(&vdomain->mappings_lock, flags); in viommu_iova_to_phys()
816 viommu_sync_req(vdomain->viommu); in viommu_iotlb_sync()
825 list_for_each_entry(entry, &vdev->resv_regions, list) { in viommu_get_resv_regions()
826 if (entry->type == IOMMU_RESV_MSI) in viommu_get_resv_regions()
832 list_add_tail(&new_entry->list, head); in viommu_get_resv_regions()
837 * software-mapped region. in viommu_get_resv_regions()
845 list_add_tail(&msi->list, head); in viommu_get_resv_regions()
856 return dev->parent->fwnode == data; in viommu_match_node()
865 return dev ? dev_to_virtio(dev)->priv : NULL; in viommu_get_by_fwnode()
875 if (!fwspec || fwspec->ops != &viommu_ops) in viommu_probe_device()
876 return ERR_PTR(-ENODEV); in viommu_probe_device()
878 viommu = viommu_get_by_fwnode(fwspec->iommu_fwnode); in viommu_probe_device()
880 return ERR_PTR(-ENODEV); in viommu_probe_device()
884 return ERR_PTR(-ENOMEM); in viommu_probe_device()
886 vdev->dev = dev; in viommu_probe_device()
887 vdev->viommu = viommu; in viommu_probe_device()
888 INIT_LIST_HEAD(&vdev->resv_regions); in viommu_probe_device()
891 if (viommu->probe_size) { in viommu_probe_device()
898 return &viommu->iommu; in viommu_probe_device()
901 generic_iommu_put_resv_regions(dev, &vdev->resv_regions); in viommu_probe_device()
912 if (!fwspec || fwspec->ops != &viommu_ops) in viommu_release_device()
917 generic_iommu_put_resv_regions(dev, &vdev->resv_regions); in viommu_release_device()
931 return iommu_fwspec_add_ids(dev, args->args, 1); in viommu_of_xlate()
952 struct virtio_device *vdev = dev_to_virtio(viommu->dev); in viommu_init_vqs()
959 return virtio_find_vqs(vdev, VIOMMU_NR_VQS, viommu->vqs, callbacks, in viommu_init_vqs()
968 struct virtqueue *vq = viommu->vqs[VIOMMU_EVENT_VQ]; in viommu_fill_evtq()
969 size_t nr_evts = vq->num_free; in viommu_fill_evtq()
971 viommu->evts = evts = devm_kmalloc_array(viommu->dev, nr_evts, in viommu_fill_evtq()
974 return -ENOMEM; in viommu_fill_evtq()
988 struct device *parent_dev = vdev->dev.parent; in viommu_probe()
990 struct device *dev = &vdev->dev; in viommu_probe()
992 u64 input_end = -1UL; in viommu_probe()
997 return -ENODEV; in viommu_probe()
1001 return -ENOMEM; in viommu_probe()
1003 spin_lock_init(&viommu->request_lock); in viommu_probe()
1004 ida_init(&viommu->domain_ids); in viommu_probe()
1005 viommu->dev = dev; in viommu_probe()
1006 viommu->vdev = vdev; in viommu_probe()
1007 INIT_LIST_HEAD(&viommu->requests); in viommu_probe()
1014 &viommu->pgsize_bitmap); in viommu_probe()
1016 if (!viommu->pgsize_bitmap) { in viommu_probe()
1017 ret = -EINVAL; in viommu_probe()
1021 viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE; in viommu_probe()
1022 viommu->last_domain = ~0U; in viommu_probe()
1035 &viommu->first_domain); in viommu_probe()
1039 &viommu->last_domain); in viommu_probe()
1043 &viommu->probe_size); in viommu_probe()
1045 viommu->geometry = (struct iommu_domain_geometry) { in viommu_probe()
1052 viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO; in viommu_probe()
1054 viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap; in viommu_probe()
1063 ret = iommu_device_sysfs_add(&viommu->iommu, dev, NULL, "%s", in viommu_probe()
1068 iommu_device_set_ops(&viommu->iommu, &viommu_ops); in viommu_probe()
1069 iommu_device_set_fwnode(&viommu->iommu, parent_dev->fwnode); in viommu_probe()
1071 iommu_device_register(&viommu->iommu); in viommu_probe()
1093 vdev->priv = viommu; in viommu_probe()
1096 order_base_2(viommu->geometry.aperture_end)); in viommu_probe()
1097 dev_info(dev, "page mask: %#llx\n", viommu->pgsize_bitmap); in viommu_probe()
1102 iommu_device_sysfs_remove(&viommu->iommu); in viommu_probe()
1103 iommu_device_unregister(&viommu->iommu); in viommu_probe()
1105 vdev->config->del_vqs(vdev); in viommu_probe()
1112 struct viommu_dev *viommu = vdev->priv; in viommu_remove()
1114 iommu_device_sysfs_remove(&viommu->iommu); in viommu_remove()
1115 iommu_device_unregister(&viommu->iommu); in viommu_remove()
1118 vdev->config->reset(vdev); in viommu_remove()
1119 vdev->config->del_vqs(vdev); in viommu_remove()
1121 dev_info(&vdev->dev, "device removed\n"); in viommu_remove()
1126 dev_warn(&vdev->dev, "config changed\n"); in viommu_config_changed()
1141 MODULE_DEVICE_TABLE(virtio, id_table);
1156 MODULE_DESCRIPTION("Virtio IOMMU driver");
1157 MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>");