Lines Matching refs:smmu_domain
773 static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain, in arm_smmu_sync_cd() argument
780 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_sync_cd()
791 spin_lock_irqsave(&smmu_domain->devices_lock, flags); in arm_smmu_sync_cd()
792 list_for_each_entry(master, &smmu_domain->devices, domain_head) { in arm_smmu_sync_cd()
798 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); in arm_smmu_sync_cd()
828 static __le64 *arm_smmu_get_cd_ptr(struct arm_smmu_domain *smmu_domain, in arm_smmu_get_cd_ptr() argument
834 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_get_cd_ptr()
835 struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg; in arm_smmu_get_cd_ptr()
837 if (smmu_domain->s1_cfg.s1fmt == STRTAB_STE_0_S1FMT_LINEAR) in arm_smmu_get_cd_ptr()
849 arm_smmu_sync_cd(smmu_domain, ssid, false); in arm_smmu_get_cd_ptr()
855 int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid, in arm_smmu_write_ctx_desc() argument
873 if (WARN_ON(ssid >= (1 << smmu_domain->s1_cfg.s1cdmax))) in arm_smmu_write_ctx_desc()
876 cdptr = arm_smmu_get_cd_ptr(smmu_domain, ssid); in arm_smmu_write_ctx_desc()
904 arm_smmu_sync_cd(smmu_domain, ssid, true); in arm_smmu_write_ctx_desc()
916 if (smmu_domain->stall_enabled) in arm_smmu_write_ctx_desc()
930 arm_smmu_sync_cd(smmu_domain, ssid, true); in arm_smmu_write_ctx_desc()
934 static int arm_smmu_alloc_cd_tables(struct arm_smmu_domain *smmu_domain) in arm_smmu_alloc_cd_tables() argument
939 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_alloc_cd_tables()
940 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg; in arm_smmu_alloc_cd_tables()
983 static void arm_smmu_free_cd_tables(struct arm_smmu_domain *smmu_domain) in arm_smmu_free_cd_tables() argument
987 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_free_cd_tables()
988 struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg; in arm_smmu_free_cd_tables()
1068 struct arm_smmu_domain *smmu_domain = NULL; in arm_smmu_write_strtab_ent() local
1077 smmu_domain = master->domain; in arm_smmu_write_strtab_ent()
1081 if (smmu_domain) { in arm_smmu_write_strtab_ent()
1082 switch (smmu_domain->stage) { in arm_smmu_write_strtab_ent()
1084 s1_cfg = &smmu_domain->s1_cfg; in arm_smmu_write_strtab_ent()
1088 s2_cfg = &smmu_domain->s2_cfg; in arm_smmu_write_strtab_ent()
1115 if (!smmu_domain || !(s1_cfg || s2_cfg)) { in arm_smmu_write_strtab_ent()
1116 if (!smmu_domain && disable_bypass) in arm_smmu_write_strtab_ent()
1595 int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid, in arm_smmu_atc_inv_domain() argument
1604 if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS)) in arm_smmu_atc_inv_domain()
1621 if (!atomic_read(&smmu_domain->nr_ats_masters)) in arm_smmu_atc_inv_domain()
1628 spin_lock_irqsave(&smmu_domain->devices_lock, flags); in arm_smmu_atc_inv_domain()
1629 list_for_each_entry(master, &smmu_domain->devices, domain_head) { in arm_smmu_atc_inv_domain()
1635 arm_smmu_cmdq_batch_add(smmu_domain->smmu, &cmds, &cmd); in arm_smmu_atc_inv_domain()
1638 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); in arm_smmu_atc_inv_domain()
1640 return arm_smmu_cmdq_batch_submit(smmu_domain->smmu, &cmds); in arm_smmu_atc_inv_domain()
1646 struct arm_smmu_domain *smmu_domain = cookie; in arm_smmu_tlb_inv_context() local
1647 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_inv_context()
1657 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { in arm_smmu_tlb_inv_context()
1658 arm_smmu_tlb_inv_asid(smmu, smmu_domain->s1_cfg.cd.asid); in arm_smmu_tlb_inv_context()
1661 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; in arm_smmu_tlb_inv_context()
1664 arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, 0, 0); in arm_smmu_tlb_inv_context()
1670 struct arm_smmu_domain *smmu_domain) in __arm_smmu_tlb_inv_range() argument
1672 struct arm_smmu_device *smmu = smmu_domain->smmu; in __arm_smmu_tlb_inv_range()
1682 tg = __ffs(smmu_domain->domain.pgsize_bitmap); in __arm_smmu_tlb_inv_range()
1740 struct arm_smmu_domain *smmu_domain) in arm_smmu_tlb_inv_range_domain() argument
1748 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { in arm_smmu_tlb_inv_range_domain()
1749 cmd.opcode = smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ? in arm_smmu_tlb_inv_range_domain()
1751 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid; in arm_smmu_tlb_inv_range_domain()
1754 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; in arm_smmu_tlb_inv_range_domain()
1756 __arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain); in arm_smmu_tlb_inv_range_domain()
1762 arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, iova, size); in arm_smmu_tlb_inv_range_domain()
1767 struct arm_smmu_domain *smmu_domain) in arm_smmu_tlb_inv_range_asid() argument
1770 .opcode = smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ? in arm_smmu_tlb_inv_range_asid()
1778 __arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain); in arm_smmu_tlb_inv_range_asid()
1785 struct arm_smmu_domain *smmu_domain = cookie; in arm_smmu_tlb_inv_page_nosync() local
1786 struct iommu_domain *domain = &smmu_domain->domain; in arm_smmu_tlb_inv_page_nosync()
1806 struct arm_smmu_domain *smmu_domain; in arm_smmu_domain_alloc() local
1821 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL); in arm_smmu_domain_alloc()
1822 if (!smmu_domain) in arm_smmu_domain_alloc()
1825 mutex_init(&smmu_domain->init_mutex); in arm_smmu_domain_alloc()
1826 INIT_LIST_HEAD(&smmu_domain->devices); in arm_smmu_domain_alloc()
1827 spin_lock_init(&smmu_domain->devices_lock); in arm_smmu_domain_alloc()
1828 INIT_LIST_HEAD(&smmu_domain->mmu_notifiers); in arm_smmu_domain_alloc()
1830 return &smmu_domain->domain; in arm_smmu_domain_alloc()
1835 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); in arm_smmu_domain_free() local
1836 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_domain_free()
1838 free_io_pgtable_ops(smmu_domain->pgtbl_ops); in arm_smmu_domain_free()
1841 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { in arm_smmu_domain_free()
1842 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg; in arm_smmu_domain_free()
1847 arm_smmu_free_cd_tables(smmu_domain); in arm_smmu_domain_free()
1851 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; in arm_smmu_domain_free()
1856 kfree(smmu_domain); in arm_smmu_domain_free()
1859 static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain, in arm_smmu_domain_finalise_s1() argument
1865 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_domain_finalise_s1()
1866 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg; in arm_smmu_domain_finalise_s1()
1880 smmu_domain->stall_enabled = master->stall_enabled; in arm_smmu_domain_finalise_s1()
1882 ret = arm_smmu_alloc_cd_tables(smmu_domain); in arm_smmu_domain_finalise_s1()
1902 ret = arm_smmu_write_ctx_desc(smmu_domain, IOMMU_NO_PASID, &cfg->cd); in arm_smmu_domain_finalise_s1()
1910 arm_smmu_free_cd_tables(smmu_domain); in arm_smmu_domain_finalise_s1()
1918 static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain, in arm_smmu_domain_finalise_s2() argument
1923 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_domain_finalise_s2()
1924 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; in arm_smmu_domain_finalise_s2()
1957 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); in arm_smmu_domain_finalise() local
1958 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_domain_finalise()
1961 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS; in arm_smmu_domain_finalise()
1967 smmu_domain->stage = ARM_SMMU_DOMAIN_S2; in arm_smmu_domain_finalise()
1969 smmu_domain->stage = ARM_SMMU_DOMAIN_S1; in arm_smmu_domain_finalise()
1971 switch (smmu_domain->stage) { in arm_smmu_domain_finalise()
1999 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); in arm_smmu_domain_finalise()
2007 ret = finalise_stage_fn(smmu_domain, master, &pgtbl_cfg); in arm_smmu_domain_finalise()
2013 smmu_domain->pgtbl_ops = pgtbl_ops; in arm_smmu_domain_finalise()
2079 struct arm_smmu_domain *smmu_domain = master->domain; in arm_smmu_enable_ats() local
2089 atomic_inc(&smmu_domain->nr_ats_masters); in arm_smmu_enable_ats()
2090 arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, 0, 0); in arm_smmu_enable_ats()
2097 struct arm_smmu_domain *smmu_domain = master->domain; in arm_smmu_disable_ats() local
2109 atomic_dec(&smmu_domain->nr_ats_masters); in arm_smmu_disable_ats()
2162 struct arm_smmu_domain *smmu_domain = master->domain; in arm_smmu_detach_dev() local
2164 if (!smmu_domain) in arm_smmu_detach_dev()
2169 spin_lock_irqsave(&smmu_domain->devices_lock, flags); in arm_smmu_detach_dev()
2171 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); in arm_smmu_detach_dev()
2184 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); in arm_smmu_attach_dev() local
2205 mutex_lock(&smmu_domain->init_mutex); in arm_smmu_attach_dev()
2207 if (!smmu_domain->smmu) { in arm_smmu_attach_dev()
2208 smmu_domain->smmu = smmu; in arm_smmu_attach_dev()
2211 smmu_domain->smmu = NULL; in arm_smmu_attach_dev()
2214 } else if (smmu_domain->smmu != smmu) { in arm_smmu_attach_dev()
2217 } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 && in arm_smmu_attach_dev()
2218 master->ssid_bits != smmu_domain->s1_cfg.s1cdmax) { in arm_smmu_attach_dev()
2221 } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 && in arm_smmu_attach_dev()
2222 smmu_domain->stall_enabled != master->stall_enabled) { in arm_smmu_attach_dev()
2227 master->domain = smmu_domain; in arm_smmu_attach_dev()
2236 if (smmu_domain->stage != ARM_SMMU_DOMAIN_BYPASS) in arm_smmu_attach_dev()
2241 spin_lock_irqsave(&smmu_domain->devices_lock, flags); in arm_smmu_attach_dev()
2242 list_add(&master->domain_head, &smmu_domain->devices); in arm_smmu_attach_dev()
2243 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); in arm_smmu_attach_dev()
2248 mutex_unlock(&smmu_domain->init_mutex); in arm_smmu_attach_dev()
2268 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); in arm_smmu_unmap_pages() local
2269 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; in arm_smmu_unmap_pages()
2279 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); in arm_smmu_flush_iotlb_all() local
2281 if (smmu_domain->smmu) in arm_smmu_flush_iotlb_all()
2282 arm_smmu_tlb_inv_context(smmu_domain); in arm_smmu_flush_iotlb_all()
2288 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); in arm_smmu_iotlb_sync() local
2295 gather->pgsize, true, smmu_domain); in arm_smmu_iotlb_sync()
2498 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); in arm_smmu_enable_nesting() local
2501 mutex_lock(&smmu_domain->init_mutex); in arm_smmu_enable_nesting()
2502 if (smmu_domain->smmu) in arm_smmu_enable_nesting()
2505 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; in arm_smmu_enable_nesting()
2506 mutex_unlock(&smmu_domain->init_mutex); in arm_smmu_enable_nesting()