Lines Matching full:domain
263 static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain, in ipmmu_ctx_read_root() argument
266 return ipmmu_read(domain->mmu->root, in ipmmu_ctx_read_root()
267 domain->context_id * IM_CTX_SIZE + reg); in ipmmu_ctx_read_root()
270 static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain, in ipmmu_ctx_write_root() argument
273 ipmmu_write(domain->mmu->root, in ipmmu_ctx_write_root()
274 domain->context_id * IM_CTX_SIZE + reg, data); in ipmmu_ctx_write_root()
277 static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain, in ipmmu_ctx_write_all() argument
280 if (domain->mmu != domain->mmu->root) in ipmmu_ctx_write_all()
281 ipmmu_write(domain->mmu, in ipmmu_ctx_write_all()
282 domain->context_id * IM_CTX_SIZE + reg, data); in ipmmu_ctx_write_all()
284 ipmmu_write(domain->mmu->root, in ipmmu_ctx_write_all()
285 domain->context_id * IM_CTX_SIZE + reg, data); in ipmmu_ctx_write_all()
293 static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain) in ipmmu_tlb_sync() argument
297 while (ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH) { in ipmmu_tlb_sync()
300 dev_err_ratelimited(domain->mmu->dev, in ipmmu_tlb_sync()
308 static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain) in ipmmu_tlb_invalidate() argument
312 reg = ipmmu_ctx_read_root(domain, IMCTR); in ipmmu_tlb_invalidate()
314 ipmmu_ctx_write_all(domain, IMCTR, reg); in ipmmu_tlb_invalidate()
316 ipmmu_tlb_sync(domain); in ipmmu_tlb_invalidate()
322 static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain, in ipmmu_utlb_enable() argument
325 struct ipmmu_vmsa_device *mmu = domain->mmu; in ipmmu_utlb_enable()
336 IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH | in ipmmu_utlb_enable()
343 static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain, in ipmmu_utlb_disable() argument
346 struct ipmmu_vmsa_device *mmu = domain->mmu; in ipmmu_utlb_disable()
353 struct ipmmu_vmsa_domain *domain = cookie; in ipmmu_tlb_flush_all() local
355 ipmmu_tlb_invalidate(domain); in ipmmu_tlb_flush_all()
371 * Domain/Context Management
375 struct ipmmu_vmsa_domain *domain) in ipmmu_domain_allocate_context() argument
384 mmu->domains[ret] = domain; in ipmmu_domain_allocate_context()
407 static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) in ipmmu_domain_init_context() argument
424 domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS; in ipmmu_domain_init_context()
425 domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K; in ipmmu_domain_init_context()
426 domain->cfg.ias = 32; in ipmmu_domain_init_context()
427 domain->cfg.oas = 40; in ipmmu_domain_init_context()
428 domain->cfg.tlb = &ipmmu_gather_ops; in ipmmu_domain_init_context()
429 domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32); in ipmmu_domain_init_context()
430 domain->io_domain.geometry.force_aperture = true; in ipmmu_domain_init_context()
435 domain->cfg.iommu_dev = domain->mmu->root->dev; in ipmmu_domain_init_context()
440 ret = ipmmu_domain_allocate_context(domain->mmu->root, domain); in ipmmu_domain_init_context()
444 domain->context_id = ret; in ipmmu_domain_init_context()
446 domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg, in ipmmu_domain_init_context()
447 domain); in ipmmu_domain_init_context()
448 if (!domain->iop) { in ipmmu_domain_init_context()
449 ipmmu_domain_free_context(domain->mmu->root, in ipmmu_domain_init_context()
450 domain->context_id); in ipmmu_domain_init_context()
455 ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0]; in ipmmu_domain_init_context()
456 ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr); in ipmmu_domain_init_context()
457 ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32); in ipmmu_domain_init_context()
464 if (domain->mmu->features->twobit_imttbcr_sl0) in ipmmu_domain_init_context()
469 ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | in ipmmu_domain_init_context()
474 ipmmu_ctx_write_root(domain, IMMAIR0, in ipmmu_domain_init_context()
475 domain->cfg.arm_lpae_s1_cfg.mair[0]); in ipmmu_domain_init_context()
478 if (domain->mmu->features->setup_imbuscr) in ipmmu_domain_init_context()
479 ipmmu_ctx_write_root(domain, IMBUSCR, in ipmmu_domain_init_context()
480 ipmmu_ctx_read_root(domain, IMBUSCR) & in ipmmu_domain_init_context()
487 ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR)); in ipmmu_domain_init_context()
496 ipmmu_ctx_write_all(domain, IMCTR, in ipmmu_domain_init_context()
502 static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain) in ipmmu_domain_destroy_context() argument
504 if (!domain->mmu) in ipmmu_domain_destroy_context()
513 ipmmu_ctx_write_all(domain, IMCTR, IMCTR_FLUSH); in ipmmu_domain_destroy_context()
514 ipmmu_tlb_sync(domain); in ipmmu_domain_destroy_context()
515 ipmmu_domain_free_context(domain->mmu->root, domain->context_id); in ipmmu_domain_destroy_context()
522 static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain) in ipmmu_domain_irq() argument
525 struct ipmmu_vmsa_device *mmu = domain->mmu; in ipmmu_domain_irq()
529 status = ipmmu_ctx_read_root(domain, IMSTR); in ipmmu_domain_irq()
533 iova = ipmmu_ctx_read_root(domain, IMEAR); in ipmmu_domain_irq()
541 ipmmu_ctx_write_root(domain, IMSTR, 0); in ipmmu_domain_irq()
560 if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0)) in ipmmu_domain_irq()
600 struct ipmmu_vmsa_domain *domain; in __ipmmu_domain_alloc() local
602 domain = kzalloc(sizeof(*domain), GFP_KERNEL); in __ipmmu_domain_alloc()
603 if (!domain) in __ipmmu_domain_alloc()
606 mutex_init(&domain->mutex); in __ipmmu_domain_alloc()
608 return &domain->io_domain; in __ipmmu_domain_alloc()
634 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); in ipmmu_domain_free() local
637 * Free the domain resources. We assume that all devices have already in ipmmu_domain_free()
641 ipmmu_domain_destroy_context(domain); in ipmmu_domain_free()
642 free_io_pgtable_ops(domain->iop); in ipmmu_domain_free()
643 kfree(domain); in ipmmu_domain_free()
651 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); in ipmmu_attach_device() local
660 mutex_lock(&domain->mutex); in ipmmu_attach_device()
662 if (!domain->mmu) { in ipmmu_attach_device()
663 /* The domain hasn't been used yet, initialize it. */ in ipmmu_attach_device()
664 domain->mmu = mmu; in ipmmu_attach_device()
665 ret = ipmmu_domain_init_context(domain); in ipmmu_attach_device()
668 domain->mmu = NULL; in ipmmu_attach_device()
671 domain->context_id); in ipmmu_attach_device()
673 } else if (domain->mmu != mmu) { in ipmmu_attach_device()
676 * different IOMMUs to the same domain. in ipmmu_attach_device()
678 dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n", in ipmmu_attach_device()
679 dev_name(mmu->dev), dev_name(domain->mmu->dev)); in ipmmu_attach_device()
682 dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id); in ipmmu_attach_device()
684 mutex_unlock(&domain->mutex); in ipmmu_attach_device()
690 ipmmu_utlb_enable(domain, fwspec->ids[i]); in ipmmu_attach_device()
699 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); in ipmmu_detach_device() local
703 ipmmu_utlb_disable(domain, fwspec->ids[i]); in ipmmu_detach_device()
713 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); in ipmmu_map() local
715 if (!domain) in ipmmu_map()
718 return domain->iop->map(domain->iop, iova, paddr, size, prot); in ipmmu_map()
724 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); in ipmmu_unmap() local
726 return domain->iop->unmap(domain->iop, iova, size); in ipmmu_unmap()
731 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); in ipmmu_iotlb_sync() local
733 if (domain->mmu) in ipmmu_iotlb_sync()
734 ipmmu_tlb_flush_all(domain); in ipmmu_iotlb_sync()
740 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); in ipmmu_iova_to_phys() local
744 return domain->iop->iova_to_phys(domain->iop, iova); in ipmmu_iova_to_phys()
814 * VAs. This will allocate a corresponding IOMMU domain. in ipmmu_init_arm_mapping()