• Home
  • Raw
  • Download

Lines Matching full:mmu

70 	struct ipmmu_vmsa_device *mmu;  member
224 static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu) in ipmmu_is_root() argument
226 return mmu->root == mmu; in ipmmu_is_root()
231 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev); in __ipmmu_check_device() local
234 if (ipmmu_is_root(mmu)) in __ipmmu_check_device()
235 *rootp = mmu; in __ipmmu_check_device()
252 static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset) in ipmmu_read() argument
254 return ioread32(mmu->base + offset); in ipmmu_read()
257 static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, in ipmmu_write() argument
260 iowrite32(data, mmu->base + offset); in ipmmu_write()
266 return ipmmu_read(domain->mmu->root, in ipmmu_ctx_read_root()
273 ipmmu_write(domain->mmu->root, in ipmmu_ctx_write_root()
280 if (domain->mmu != domain->mmu->root) in ipmmu_ctx_write_all()
281 ipmmu_write(domain->mmu, in ipmmu_ctx_write_all()
284 ipmmu_write(domain->mmu->root, in ipmmu_ctx_write_all()
300 dev_err_ratelimited(domain->mmu->dev, in ipmmu_tlb_sync()
301 "TLB sync timed out -- MMU may be deadlocked\n"); in ipmmu_tlb_sync()
320 * Enable MMU translation for the microTLB.
325 struct ipmmu_vmsa_device *mmu = domain->mmu; in ipmmu_utlb_enable() local
333 ipmmu_write(mmu, IMUASID(utlb), 0); in ipmmu_utlb_enable()
335 ipmmu_write(mmu, IMUCTR(utlb), in ipmmu_utlb_enable()
341 * Disable MMU translation for the microTLB.
346 struct ipmmu_vmsa_device *mmu = domain->mmu; in ipmmu_utlb_disable() local
348 ipmmu_write(mmu, IMUCTR(utlb), 0); in ipmmu_utlb_disable()
374 static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu, in ipmmu_domain_allocate_context() argument
380 spin_lock_irqsave(&mmu->lock, flags); in ipmmu_domain_allocate_context()
382 ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx); in ipmmu_domain_allocate_context()
383 if (ret != mmu->num_ctx) { in ipmmu_domain_allocate_context()
384 mmu->domains[ret] = domain; in ipmmu_domain_allocate_context()
385 set_bit(ret, mmu->ctx); in ipmmu_domain_allocate_context()
389 spin_unlock_irqrestore(&mmu->lock, flags); in ipmmu_domain_allocate_context()
394 static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu, in ipmmu_domain_free_context() argument
399 spin_lock_irqsave(&mmu->lock, flags); in ipmmu_domain_free_context()
401 clear_bit(context_id, mmu->ctx); in ipmmu_domain_free_context()
402 mmu->domains[context_id] = NULL; in ipmmu_domain_free_context()
404 spin_unlock_irqrestore(&mmu->lock, flags); in ipmmu_domain_free_context()
435 domain->cfg.iommu_dev = domain->mmu->root->dev; in ipmmu_domain_init_context()
440 ret = ipmmu_domain_allocate_context(domain->mmu->root, domain); in ipmmu_domain_init_context()
449 ipmmu_domain_free_context(domain->mmu->root, in ipmmu_domain_init_context()
464 if (domain->mmu->features->twobit_imttbcr_sl0) in ipmmu_domain_init_context()
478 if (domain->mmu->features->setup_imbuscr) in ipmmu_domain_init_context()
491 * Enable the MMU and interrupt generation. The long-descriptor in ipmmu_domain_init_context()
504 if (!domain->mmu) in ipmmu_domain_destroy_context()
515 ipmmu_domain_free_context(domain->mmu->root, domain->context_id); in ipmmu_domain_destroy_context()
525 struct ipmmu_vmsa_device *mmu = domain->mmu; in ipmmu_domain_irq() local
545 dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%08x\n", in ipmmu_domain_irq()
548 dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%08x\n", in ipmmu_domain_irq()
560 if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0)) in ipmmu_domain_irq()
563 dev_err_ratelimited(mmu->dev, in ipmmu_domain_irq()
572 struct ipmmu_vmsa_device *mmu = dev; in ipmmu_irq() local
577 spin_lock_irqsave(&mmu->lock, flags); in ipmmu_irq()
582 for (i = 0; i < mmu->num_ctx; i++) { in ipmmu_irq()
583 if (!mmu->domains[i]) in ipmmu_irq()
585 if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED) in ipmmu_irq()
589 spin_unlock_irqrestore(&mmu->lock, flags); in ipmmu_irq()
650 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); in ipmmu_attach_device() local
655 if (!mmu) { in ipmmu_attach_device()
662 if (!domain->mmu) { in ipmmu_attach_device()
664 domain->mmu = mmu; in ipmmu_attach_device()
668 domain->mmu = NULL; in ipmmu_attach_device()
673 } else if (domain->mmu != mmu) { in ipmmu_attach_device()
679 dev_name(mmu->dev), dev_name(domain->mmu->dev)); in ipmmu_attach_device()
733 if (domain->mmu) in ipmmu_iotlb_sync()
793 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); in ipmmu_init_arm_mapping() local
821 if (!mmu->mapping) { in ipmmu_init_arm_mapping()
827 dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n"); in ipmmu_init_arm_mapping()
832 mmu->mapping = mapping; in ipmmu_init_arm_mapping()
836 ret = arm_iommu_attach_device(dev, mmu->mapping); in ipmmu_init_arm_mapping()
846 if (mmu->mapping) in ipmmu_init_arm_mapping()
847 arm_iommu_release_mapping(mmu->mapping); in ipmmu_init_arm_mapping()
881 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); in ipmmu_find_group() local
884 if (mmu->group) in ipmmu_find_group()
885 return iommu_group_ref_get(mmu->group); in ipmmu_find_group()
889 mmu->group = group; in ipmmu_find_group()
915 static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu) in ipmmu_device_reset() argument
920 for (i = 0; i < mmu->num_ctx; ++i) in ipmmu_device_reset()
921 ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0); in ipmmu_device_reset()
970 struct ipmmu_vmsa_device *mmu; in ipmmu_probe() local
975 mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL); in ipmmu_probe()
976 if (!mmu) { in ipmmu_probe()
981 mmu->dev = &pdev->dev; in ipmmu_probe()
982 mmu->num_utlbs = 48; in ipmmu_probe()
983 spin_lock_init(&mmu->lock); in ipmmu_probe()
984 bitmap_zero(mmu->ctx, IPMMU_CTX_MAX); in ipmmu_probe()
985 mmu->features = of_device_get_match_data(&pdev->dev); in ipmmu_probe()
990 mmu->base = devm_ioremap_resource(&pdev->dev, res); in ipmmu_probe()
991 if (IS_ERR(mmu->base)) in ipmmu_probe()
992 return PTR_ERR(mmu->base); in ipmmu_probe()
1006 if (mmu->features->use_ns_alias_offset) in ipmmu_probe()
1007 mmu->base += IM_NS_ALIAS_OFFSET; in ipmmu_probe()
1009 mmu->num_ctx = min_t(unsigned int, IPMMU_CTX_MAX, in ipmmu_probe()
1010 mmu->features->number_of_contexts); in ipmmu_probe()
1018 if (!mmu->features->has_cache_leaf_nodes || in ipmmu_probe()
1020 mmu->root = mmu; in ipmmu_probe()
1022 mmu->root = ipmmu_find_root(); in ipmmu_probe()
1027 if (!mmu->root) in ipmmu_probe()
1031 if (ipmmu_is_root(mmu)) { in ipmmu_probe()
1038 dev_name(&pdev->dev), mmu); in ipmmu_probe()
1044 ipmmu_device_reset(mmu); in ipmmu_probe()
1046 if (mmu->features->reserved_context) { in ipmmu_probe()
1048 set_bit(0, mmu->ctx); in ipmmu_probe()
1057 if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) { in ipmmu_probe()
1058 ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL, in ipmmu_probe()
1063 iommu_device_set_ops(&mmu->iommu, &ipmmu_ops); in ipmmu_probe()
1064 iommu_device_set_fwnode(&mmu->iommu, in ipmmu_probe()
1067 ret = iommu_device_register(&mmu->iommu); in ipmmu_probe()
1083 platform_set_drvdata(pdev, mmu); in ipmmu_probe()
1090 struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev); in ipmmu_remove() local
1092 iommu_device_sysfs_remove(&mmu->iommu); in ipmmu_remove()
1093 iommu_device_unregister(&mmu->iommu); in ipmmu_remove()
1095 arm_iommu_release_mapping(mmu->mapping); in ipmmu_remove()
1097 ipmmu_device_reset(mmu); in ipmmu_remove()