Lines Matching +full:protection +full:- +full:domain
1 // SPDX-License-Identifier: GPL-2.0-only
12 #include <linux/dma-mapping.h>
25 #include <linux/dma-iommu.h>
39 #define SECT_MASK (~(SECT_SIZE - 1))
40 #define LPAGE_MASK (~(LPAGE_SIZE - 1))
41 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
56 * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces
63 static short PG_ENT_SHIFT = -1;
99 #define section_offs(iova) (iova & (SECT_SIZE - 1))
101 #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1))
103 #define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
115 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1); in lv2ent_offset()
148 #define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
152 /* v1.x - v3.x registers */
206 { 1, REG_AR_FAULT_ADDR, "AR MULTI-HIT", IOMMU_FAULT_READ },
207 { 2, REG_AW_FAULT_ADDR, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
209 { 4, REG_AR_FAULT_ADDR, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
210 { 5, REG_AR_FAULT_ADDR, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
211 { 6, REG_AW_FAULT_ADDR, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
212 { 7, REG_AW_FAULT_ADDR, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
218 { 2, REG_V5_FAULT_AR_VA, "AR MULTI-HIT", IOMMU_FAULT_READ },
219 { 3, REG_V5_FAULT_AR_VA, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
220 { 4, REG_V5_FAULT_AR_VA, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
223 { 18, REG_V5_FAULT_AW_VA, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
224 { 19, REG_V5_FAULT_AW_VA, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
225 { 20, REG_V5_FAULT_AW_VA, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
229 * This structure is attached to dev->iommu->priv of the master device
236 struct iommu_domain *domain; /* domain this device is attached */ member
243 * been attached to this domain and page tables of IO address space defined by
244 * it. It is usually referenced by 'domain' pointer.
252 struct iommu_domain domain; /* generic domain data structure */ member
272 struct exynos_iommu_domain *domain; /* domain we belong to */ member
273 struct list_head domain_node; /* node for domain clients list */
283 return container_of(dom, struct exynos_iommu_domain, domain); in to_exynos_domain()
288 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL); in sysmmu_unblock()
295 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL); in sysmmu_block()
296 while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1)) in sysmmu_block()
297 --i; in sysmmu_block()
299 if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) { in sysmmu_block()
309 if (MMU_MAJ_VER(data->version) < 5) in __sysmmu_tlb_invalidate()
310 writel(0x1, data->sfrbase + REG_MMU_FLUSH); in __sysmmu_tlb_invalidate()
312 writel(0x1, data->sfrbase + REG_V5_MMU_FLUSH_ALL); in __sysmmu_tlb_invalidate()
320 if (MMU_MAJ_VER(data->version) < 5) { in __sysmmu_tlb_invalidate_entry()
323 data->sfrbase + REG_MMU_FLUSH_ENTRY); in __sysmmu_tlb_invalidate_entry()
329 data->sfrbase + REG_V5_MMU_FLUSH_ENTRY); in __sysmmu_tlb_invalidate_entry()
332 data->sfrbase + REG_V5_MMU_FLUSH_START); in __sysmmu_tlb_invalidate_entry()
333 writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE, in __sysmmu_tlb_invalidate_entry()
334 data->sfrbase + REG_V5_MMU_FLUSH_END); in __sysmmu_tlb_invalidate_entry()
335 writel(1, data->sfrbase + REG_V5_MMU_FLUSH_RANGE); in __sysmmu_tlb_invalidate_entry()
342 if (MMU_MAJ_VER(data->version) < 5) in __sysmmu_set_ptbase()
343 writel(pgd, data->sfrbase + REG_PT_BASE_ADDR); in __sysmmu_set_ptbase()
346 data->sfrbase + REG_V5_PT_BASE_PFN); in __sysmmu_set_ptbase()
353 BUG_ON(clk_prepare_enable(data->clk_master)); in __sysmmu_enable_clocks()
354 BUG_ON(clk_prepare_enable(data->clk)); in __sysmmu_enable_clocks()
355 BUG_ON(clk_prepare_enable(data->pclk)); in __sysmmu_enable_clocks()
356 BUG_ON(clk_prepare_enable(data->aclk)); in __sysmmu_enable_clocks()
361 clk_disable_unprepare(data->aclk); in __sysmmu_disable_clocks()
362 clk_disable_unprepare(data->pclk); in __sysmmu_disable_clocks()
363 clk_disable_unprepare(data->clk); in __sysmmu_disable_clocks()
364 clk_disable_unprepare(data->clk_master); in __sysmmu_disable_clocks()
373 ver = readl(data->sfrbase + REG_MMU_VERSION); in __sysmmu_get_version()
377 data->version = MAKE_MMU_VER(1, 0); in __sysmmu_get_version()
379 data->version = MMU_RAW_VER(ver); in __sysmmu_get_version()
381 dev_dbg(data->sysmmu, "hardware version: %d.%d\n", in __sysmmu_get_version()
382 MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version)); in __sysmmu_get_version()
393 dev_err(data->sysmmu, "%s: %s FAULT occurred at %#x\n", in show_fault_information()
394 dev_name(data->master), finfo->name, fault_addr); in show_fault_information()
395 dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable); in show_fault_information()
396 ent = section_entry(phys_to_virt(data->pgtable), fault_addr); in show_fault_information()
397 dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent); in show_fault_information()
400 dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent); in show_fault_information()
410 sysmmu_iova_t fault_addr = -1; in exynos_sysmmu_irq()
412 int ret = -ENOSYS; in exynos_sysmmu_irq()
414 WARN_ON(!data->active); in exynos_sysmmu_irq()
416 if (MMU_MAJ_VER(data->version) < 5) { in exynos_sysmmu_irq()
428 spin_lock(&data->lock); in exynos_sysmmu_irq()
430 clk_enable(data->clk_master); in exynos_sysmmu_irq()
432 itype = __ffs(readl(data->sfrbase + reg_status)); in exynos_sysmmu_irq()
434 if (finfo->bit == itype) in exynos_sysmmu_irq()
440 fault_addr = readl(data->sfrbase + finfo->addr_reg); in exynos_sysmmu_irq()
443 if (data->domain) in exynos_sysmmu_irq()
444 ret = report_iommu_fault(&data->domain->domain, in exynos_sysmmu_irq()
445 data->master, fault_addr, finfo->type); in exynos_sysmmu_irq()
449 writel(1 << itype, data->sfrbase + reg_clear); in exynos_sysmmu_irq()
453 clk_disable(data->clk_master); in exynos_sysmmu_irq()
455 spin_unlock(&data->lock); in exynos_sysmmu_irq()
464 clk_enable(data->clk_master); in __sysmmu_disable()
466 spin_lock_irqsave(&data->lock, flags); in __sysmmu_disable()
467 writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL); in __sysmmu_disable()
468 writel(0, data->sfrbase + REG_MMU_CFG); in __sysmmu_disable()
469 data->active = false; in __sysmmu_disable()
470 spin_unlock_irqrestore(&data->lock, flags); in __sysmmu_disable()
479 if (data->version <= MAKE_MMU_VER(3, 1)) in __sysmmu_init_config()
481 else if (data->version <= MAKE_MMU_VER(3, 2)) in __sysmmu_init_config()
486 cfg |= CFG_EAP; /* enable access protection bits check */ in __sysmmu_init_config()
488 writel(cfg, data->sfrbase + REG_MMU_CFG); in __sysmmu_init_config()
497 spin_lock_irqsave(&data->lock, flags); in __sysmmu_enable()
498 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL); in __sysmmu_enable()
500 __sysmmu_set_ptbase(data, data->pgtable); in __sysmmu_enable()
501 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL); in __sysmmu_enable()
502 data->active = true; in __sysmmu_enable()
503 spin_unlock_irqrestore(&data->lock, flags); in __sysmmu_enable()
511 clk_disable(data->clk_master); in __sysmmu_enable()
519 spin_lock_irqsave(&data->lock, flags); in sysmmu_tlb_invalidate_flpdcache()
520 if (data->active && data->version >= MAKE_MMU_VER(3, 3)) { in sysmmu_tlb_invalidate_flpdcache()
521 clk_enable(data->clk_master); in sysmmu_tlb_invalidate_flpdcache()
523 if (data->version >= MAKE_MMU_VER(5, 0)) in sysmmu_tlb_invalidate_flpdcache()
529 clk_disable(data->clk_master); in sysmmu_tlb_invalidate_flpdcache()
531 spin_unlock_irqrestore(&data->lock, flags); in sysmmu_tlb_invalidate_flpdcache()
539 spin_lock_irqsave(&data->lock, flags); in sysmmu_tlb_invalidate_entry()
540 if (data->active) { in sysmmu_tlb_invalidate_entry()
543 clk_enable(data->clk_master); in sysmmu_tlb_invalidate_entry()
550 * because it is set-associative TLB in sysmmu_tlb_invalidate_entry()
551 * with 8-way and 64 sets. in sysmmu_tlb_invalidate_entry()
555 if (MMU_MAJ_VER(data->version) == 2) in sysmmu_tlb_invalidate_entry()
562 clk_disable(data->clk_master); in sysmmu_tlb_invalidate_entry()
564 spin_unlock_irqrestore(&data->lock, flags); in sysmmu_tlb_invalidate_entry()
572 struct device *dev = &pdev->dev; in exynos_sysmmu_probe()
578 return -ENOMEM; in exynos_sysmmu_probe()
581 data->sfrbase = devm_ioremap_resource(dev, res); in exynos_sysmmu_probe()
582 if (IS_ERR(data->sfrbase)) in exynos_sysmmu_probe()
583 return PTR_ERR(data->sfrbase); in exynos_sysmmu_probe()
596 data->clk = devm_clk_get(dev, "sysmmu"); in exynos_sysmmu_probe()
597 if (PTR_ERR(data->clk) == -ENOENT) in exynos_sysmmu_probe()
598 data->clk = NULL; in exynos_sysmmu_probe()
599 else if (IS_ERR(data->clk)) in exynos_sysmmu_probe()
600 return PTR_ERR(data->clk); in exynos_sysmmu_probe()
602 data->aclk = devm_clk_get(dev, "aclk"); in exynos_sysmmu_probe()
603 if (PTR_ERR(data->aclk) == -ENOENT) in exynos_sysmmu_probe()
604 data->aclk = NULL; in exynos_sysmmu_probe()
605 else if (IS_ERR(data->aclk)) in exynos_sysmmu_probe()
606 return PTR_ERR(data->aclk); in exynos_sysmmu_probe()
608 data->pclk = devm_clk_get(dev, "pclk"); in exynos_sysmmu_probe()
609 if (PTR_ERR(data->pclk) == -ENOENT) in exynos_sysmmu_probe()
610 data->pclk = NULL; in exynos_sysmmu_probe()
611 else if (IS_ERR(data->pclk)) in exynos_sysmmu_probe()
612 return PTR_ERR(data->pclk); in exynos_sysmmu_probe()
614 if (!data->clk && (!data->aclk || !data->pclk)) { in exynos_sysmmu_probe()
616 return -ENOSYS; in exynos_sysmmu_probe()
619 data->clk_master = devm_clk_get(dev, "master"); in exynos_sysmmu_probe()
620 if (PTR_ERR(data->clk_master) == -ENOENT) in exynos_sysmmu_probe()
621 data->clk_master = NULL; in exynos_sysmmu_probe()
622 else if (IS_ERR(data->clk_master)) in exynos_sysmmu_probe()
623 return PTR_ERR(data->clk_master); in exynos_sysmmu_probe()
625 data->sysmmu = dev; in exynos_sysmmu_probe()
626 spin_lock_init(&data->lock); in exynos_sysmmu_probe()
628 ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL, in exynos_sysmmu_probe()
629 dev_name(data->sysmmu)); in exynos_sysmmu_probe()
633 iommu_device_set_ops(&data->iommu, &exynos_iommu_ops); in exynos_sysmmu_probe()
634 iommu_device_set_fwnode(&data->iommu, &dev->of_node->fwnode); in exynos_sysmmu_probe()
636 ret = iommu_device_register(&data->iommu); in exynos_sysmmu_probe()
644 if (MMU_MAJ_VER(data->version) < 5) { in exynos_sysmmu_probe()
660 dma_dev = &pdev->dev; in exynos_sysmmu_probe()
667 iommu_device_sysfs_remove(&data->iommu); in exynos_sysmmu_probe()
674 struct device *master = data->master; in exynos_sysmmu_suspend()
679 mutex_lock(&owner->rpm_lock); in exynos_sysmmu_suspend()
680 if (data->domain) { in exynos_sysmmu_suspend()
681 dev_dbg(data->sysmmu, "saving state\n"); in exynos_sysmmu_suspend()
684 mutex_unlock(&owner->rpm_lock); in exynos_sysmmu_suspend()
692 struct device *master = data->master; in exynos_sysmmu_resume()
697 mutex_lock(&owner->rpm_lock); in exynos_sysmmu_resume()
698 if (data->domain) { in exynos_sysmmu_resume()
699 dev_dbg(data->sysmmu, "restoring state\n"); in exynos_sysmmu_resume()
702 mutex_unlock(&owner->rpm_lock); in exynos_sysmmu_resume()
714 { .compatible = "samsung,exynos-sysmmu", },
721 .name = "exynos-sysmmu",
739 struct exynos_iommu_domain *domain; in exynos_iommu_domain_alloc() local
746 domain = kzalloc(sizeof(*domain), GFP_KERNEL); in exynos_iommu_domain_alloc()
747 if (!domain) in exynos_iommu_domain_alloc()
751 if (iommu_get_dma_cookie(&domain->domain) != 0) in exynos_iommu_domain_alloc()
757 domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2); in exynos_iommu_domain_alloc()
758 if (!domain->pgtable) in exynos_iommu_domain_alloc()
761 domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1); in exynos_iommu_domain_alloc()
762 if (!domain->lv2entcnt) in exynos_iommu_domain_alloc()
767 domain->pgtable[i] = ZERO_LV2LINK; in exynos_iommu_domain_alloc()
769 handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE, in exynos_iommu_domain_alloc()
772 BUG_ON(handle != virt_to_phys(domain->pgtable)); in exynos_iommu_domain_alloc()
776 spin_lock_init(&domain->lock); in exynos_iommu_domain_alloc()
777 spin_lock_init(&domain->pgtablelock); in exynos_iommu_domain_alloc()
778 INIT_LIST_HEAD(&domain->clients); in exynos_iommu_domain_alloc()
780 domain->domain.geometry.aperture_start = 0; in exynos_iommu_domain_alloc()
781 domain->domain.geometry.aperture_end = ~0UL; in exynos_iommu_domain_alloc()
782 domain->domain.geometry.force_aperture = true; in exynos_iommu_domain_alloc()
784 return &domain->domain; in exynos_iommu_domain_alloc()
787 free_pages((unsigned long)domain->lv2entcnt, 1); in exynos_iommu_domain_alloc()
789 free_pages((unsigned long)domain->pgtable, 2); in exynos_iommu_domain_alloc()
792 iommu_put_dma_cookie(&domain->domain); in exynos_iommu_domain_alloc()
794 kfree(domain); in exynos_iommu_domain_alloc()
800 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); in exynos_iommu_domain_free() local
805 WARN_ON(!list_empty(&domain->clients)); in exynos_iommu_domain_free()
807 spin_lock_irqsave(&domain->lock, flags); in exynos_iommu_domain_free()
809 list_for_each_entry_safe(data, next, &domain->clients, domain_node) { in exynos_iommu_domain_free()
810 spin_lock(&data->lock); in exynos_iommu_domain_free()
812 data->pgtable = 0; in exynos_iommu_domain_free()
813 data->domain = NULL; in exynos_iommu_domain_free()
814 list_del_init(&data->domain_node); in exynos_iommu_domain_free()
815 spin_unlock(&data->lock); in exynos_iommu_domain_free()
818 spin_unlock_irqrestore(&domain->lock, flags); in exynos_iommu_domain_free()
820 if (iommu_domain->type == IOMMU_DOMAIN_DMA) in exynos_iommu_domain_free()
823 dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE, in exynos_iommu_domain_free()
827 if (lv1ent_page(domain->pgtable + i)) { in exynos_iommu_domain_free()
828 phys_addr_t base = lv2table_base(domain->pgtable + i); in exynos_iommu_domain_free()
836 free_pages((unsigned long)domain->pgtable, 2); in exynos_iommu_domain_free()
837 free_pages((unsigned long)domain->lv2entcnt, 1); in exynos_iommu_domain_free()
838 kfree(domain); in exynos_iommu_domain_free()
844 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); in exynos_iommu_detach_device() local
846 phys_addr_t pagetable = virt_to_phys(domain->pgtable); in exynos_iommu_detach_device()
850 if (!has_sysmmu(dev) || owner->domain != iommu_domain) in exynos_iommu_detach_device()
853 mutex_lock(&owner->rpm_lock); in exynos_iommu_detach_device()
855 list_for_each_entry(data, &owner->controllers, owner_node) { in exynos_iommu_detach_device()
856 pm_runtime_get_noresume(data->sysmmu); in exynos_iommu_detach_device()
857 if (pm_runtime_active(data->sysmmu)) in exynos_iommu_detach_device()
859 pm_runtime_put(data->sysmmu); in exynos_iommu_detach_device()
862 spin_lock_irqsave(&domain->lock, flags); in exynos_iommu_detach_device()
863 list_for_each_entry_safe(data, next, &domain->clients, domain_node) { in exynos_iommu_detach_device()
864 spin_lock(&data->lock); in exynos_iommu_detach_device()
865 data->pgtable = 0; in exynos_iommu_detach_device()
866 data->domain = NULL; in exynos_iommu_detach_device()
867 list_del_init(&data->domain_node); in exynos_iommu_detach_device()
868 spin_unlock(&data->lock); in exynos_iommu_detach_device()
870 owner->domain = NULL; in exynos_iommu_detach_device()
871 spin_unlock_irqrestore(&domain->lock, flags); in exynos_iommu_detach_device()
873 mutex_unlock(&owner->rpm_lock); in exynos_iommu_detach_device()
882 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); in exynos_iommu_attach_device() local
885 phys_addr_t pagetable = virt_to_phys(domain->pgtable); in exynos_iommu_attach_device()
889 return -ENODEV; in exynos_iommu_attach_device()
891 if (owner->domain) in exynos_iommu_attach_device()
892 exynos_iommu_detach_device(owner->domain, dev); in exynos_iommu_attach_device()
894 mutex_lock(&owner->rpm_lock); in exynos_iommu_attach_device()
896 spin_lock_irqsave(&domain->lock, flags); in exynos_iommu_attach_device()
897 list_for_each_entry(data, &owner->controllers, owner_node) { in exynos_iommu_attach_device()
898 spin_lock(&data->lock); in exynos_iommu_attach_device()
899 data->pgtable = pagetable; in exynos_iommu_attach_device()
900 data->domain = domain; in exynos_iommu_attach_device()
901 list_add_tail(&data->domain_node, &domain->clients); in exynos_iommu_attach_device()
902 spin_unlock(&data->lock); in exynos_iommu_attach_device()
904 owner->domain = iommu_domain; in exynos_iommu_attach_device()
905 spin_unlock_irqrestore(&domain->lock, flags); in exynos_iommu_attach_device()
907 list_for_each_entry(data, &owner->controllers, owner_node) { in exynos_iommu_attach_device()
908 pm_runtime_get_noresume(data->sysmmu); in exynos_iommu_attach_device()
909 if (pm_runtime_active(data->sysmmu)) in exynos_iommu_attach_device()
911 pm_runtime_put(data->sysmmu); in exynos_iommu_attach_device()
914 mutex_unlock(&owner->rpm_lock); in exynos_iommu_attach_device()
922 static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain, in alloc_lv2entry() argument
927 return ERR_PTR(-EADDRINUSE); in alloc_lv2entry()
936 BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1)); in alloc_lv2entry()
938 return ERR_PTR(-ENOMEM); in alloc_lv2entry()
947 return ERR_PTR(-EADDRINUSE); in alloc_lv2entry()
951 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table, in alloc_lv2entry()
970 spin_lock(&domain->lock); in alloc_lv2entry()
971 list_for_each_entry(data, &domain->clients, domain_node) in alloc_lv2entry()
973 spin_unlock(&domain->lock); in alloc_lv2entry()
980 static int lv1set_section(struct exynos_iommu_domain *domain, in lv1set_section() argument
987 return -EADDRINUSE; in lv1set_section()
994 return -EADDRINUSE; in lv1set_section()
1003 spin_lock(&domain->lock); in lv1set_section()
1010 list_for_each_entry(data, &domain->clients, domain_node) in lv1set_section()
1013 spin_unlock(&domain->lock); in lv1set_section()
1023 return -EADDRINUSE; in lv2set_page()
1026 *pgcnt -= 1; in lv2set_page()
1037 memset(pent - i, 0, sizeof(*pent) * i); in lv2set_page()
1038 return -EADDRINUSE; in lv2set_page()
1046 *pgcnt -= SPAGES_PER_LPAGE; in lv2set_page()
1053 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
1074 * - Any two consecutive I/O virtual regions must have a hole of size larger
1076 * - Start address of an I/O virtual region must be aligned by 128KiB.
1082 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); in exynos_iommu_map() local
1086 int ret = -ENOMEM; in exynos_iommu_map()
1088 BUG_ON(domain->pgtable == NULL); in exynos_iommu_map()
1091 spin_lock_irqsave(&domain->pgtablelock, flags); in exynos_iommu_map()
1093 entry = section_entry(domain->pgtable, iova); in exynos_iommu_map()
1096 ret = lv1set_section(domain, entry, iova, paddr, prot, in exynos_iommu_map()
1097 &domain->lv2entcnt[lv1ent_offset(iova)]); in exynos_iommu_map()
1101 pent = alloc_lv2entry(domain, entry, iova, in exynos_iommu_map()
1102 &domain->lv2entcnt[lv1ent_offset(iova)]); in exynos_iommu_map()
1108 &domain->lv2entcnt[lv1ent_offset(iova)]); in exynos_iommu_map()
1115 spin_unlock_irqrestore(&domain->pgtablelock, flags); in exynos_iommu_map()
1120 static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain, in exynos_iommu_tlb_invalidate_entry() argument
1126 spin_lock_irqsave(&domain->lock, flags); in exynos_iommu_tlb_invalidate_entry()
1128 list_for_each_entry(data, &domain->clients, domain_node) in exynos_iommu_tlb_invalidate_entry()
1131 spin_unlock_irqrestore(&domain->lock, flags); in exynos_iommu_tlb_invalidate_entry()
1138 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); in exynos_iommu_unmap() local
1144 BUG_ON(domain->pgtable == NULL); in exynos_iommu_unmap()
1146 spin_lock_irqsave(&domain->pgtablelock, flags); in exynos_iommu_unmap()
1148 ent = section_entry(domain->pgtable, iova); in exynos_iommu_unmap()
1180 domain->lv2entcnt[lv1ent_offset(iova)] += 1; in exynos_iommu_unmap()
1198 domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE; in exynos_iommu_unmap()
1200 spin_unlock_irqrestore(&domain->pgtablelock, flags); in exynos_iommu_unmap()
1202 exynos_iommu_tlb_invalidate_entry(domain, iova, size); in exynos_iommu_unmap()
1206 spin_unlock_irqrestore(&domain->pgtablelock, flags); in exynos_iommu_unmap()
1217 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); in exynos_iommu_iova_to_phys() local
1222 spin_lock_irqsave(&domain->pgtablelock, flags); in exynos_iommu_iova_to_phys()
1224 entry = section_entry(domain->pgtable, iova); in exynos_iommu_iova_to_phys()
1237 spin_unlock_irqrestore(&domain->pgtablelock, flags); in exynos_iommu_iova_to_phys()
1248 return ERR_PTR(-ENODEV); in exynos_iommu_probe_device()
1250 list_for_each_entry(data, &owner->controllers, owner_node) { in exynos_iommu_probe_device()
1256 data->link = device_link_add(dev, data->sysmmu, in exynos_iommu_probe_device()
1262 data = list_first_entry(&owner->controllers, in exynos_iommu_probe_device()
1265 return &data->iommu; in exynos_iommu_probe_device()
1276 if (owner->domain) { in exynos_iommu_release_device()
1280 WARN_ON(owner->domain != in exynos_iommu_release_device()
1282 exynos_iommu_detach_device(owner->domain, dev); in exynos_iommu_release_device()
1287 list_for_each_entry(data, &owner->controllers, owner_node) in exynos_iommu_release_device()
1288 device_link_del(data->link); in exynos_iommu_release_device()
1294 struct platform_device *sysmmu = of_find_device_by_node(spec->np); in exynos_iommu_of_xlate()
1299 return -ENODEV; in exynos_iommu_of_xlate()
1303 put_device(&sysmmu->dev); in exynos_iommu_of_xlate()
1304 return -ENODEV; in exynos_iommu_of_xlate()
1310 put_device(&sysmmu->dev); in exynos_iommu_of_xlate()
1311 return -ENOMEM; in exynos_iommu_of_xlate()
1314 INIT_LIST_HEAD(&owner->controllers); in exynos_iommu_of_xlate()
1315 mutex_init(&owner->rpm_lock); in exynos_iommu_of_xlate()
1319 list_for_each_entry(entry, &owner->controllers, owner_node) in exynos_iommu_of_xlate()
1323 list_add_tail(&data->owner_node, &owner->controllers); in exynos_iommu_of_xlate()
1324 data->master = dev; in exynos_iommu_of_xlate()
1355 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table", in exynos_iommu_init()
1359 return -ENOMEM; in exynos_iommu_init()
1372 ret = -ENOMEM; in exynos_iommu_init()
1378 pr_err("%s: Failed to register exynos-iommu driver.\n", in exynos_iommu_init()