• Home
  • Raw
  • Download

Lines Matching +full:smmu +full:- +full:v2

2  * IOMMU API for ARM architected SMMU implementations.
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
26 * - Context fault reporting
27 * - Extended Stream ID (16 bit)
30 #define pr_fmt(fmt) "arm-smmu: " fmt
36 #include <linux/dma-iommu.h>
37 #include <linux/dma-mapping.h>
41 #include <linux/io-64-nonatomic-hi-lo.h>
56 #include "io-pgtable.h"
57 #include "arm-smmu-regs.h"
60 * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
66 #define QCOM_DUMMY_VAL -1
77 /* Maximum number of context banks per SMMU */
80 /* SMMU global address space */
81 #define ARM_SMMU_GR0(smmu) ((smmu)->base) argument
82 #define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift)) argument
85 * SMMU global address space with conditional offset to access secure
86 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
89 #define ARM_SMMU_GR0_NS(smmu) \ argument
90 ((smmu)->base + \
91 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
95 * Some 64-bit registers only make sense to write atomically, but in such
106 #define ARM_SMMU_CB(smmu, n) ((smmu)->cb_base + ((n) << (smmu)->pgshift)) argument
114 …"Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' f…
118 …domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
159 struct arm_smmu_device *smmu; member
162 #define INVALID_SMENDX -1
163 #define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
164 #define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
166 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
168 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
254 struct arm_smmu_device *smmu; member
259 struct mutex init_mutex; /* Protects smmu pointer */
274 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
283 static void parse_driver_options(struct arm_smmu_device *smmu) in parse_driver_options() argument
288 if (of_property_read_bool(smmu->dev->of_node, in parse_driver_options()
290 smmu->options |= arm_smmu_options[i].opt; in parse_driver_options()
291 dev_notice(smmu->dev, "option %s\n", in parse_driver_options()
300 struct pci_bus *bus = to_pci_dev(dev)->bus; in dev_get_dev_node()
303 bus = bus->parent; in dev_get_dev_node()
304 return of_node_get(bus->bridge->parent->of_node); in dev_get_dev_node()
307 return of_node_get(dev->of_node); in dev_get_dev_node()
319 struct device_node *np = it->node; in __find_legacy_master_phandle()
322 of_for_each_phandle(it, err, dev->of_node, "mmu-masters", in __find_legacy_master_phandle()
323 "#stream-id-cells", 0) in __find_legacy_master_phandle()
324 if (it->node == np) { in __find_legacy_master_phandle()
328 it->node = np; in __find_legacy_master_phandle()
329 return err == -ENOENT ? 0 : err; in __find_legacy_master_phandle()
336 struct arm_smmu_device **smmu) in arm_smmu_register_legacy_master() argument
347 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) { in arm_smmu_register_legacy_master()
349 return -ENODEV; in arm_smmu_register_legacy_master()
358 return -ENODEV; in arm_smmu_register_legacy_master()
363 /* "mmu-masters" assumes Stream ID == Requester ID */ in arm_smmu_register_legacy_master()
370 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode, in arm_smmu_register_legacy_master()
377 return -ENOMEM; in arm_smmu_register_legacy_master()
379 *smmu = dev_get_drvdata(smmu_dev); in arm_smmu_register_legacy_master()
393 return -ENOSPC; in __arm_smmu_alloc_bitmap()
405 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, in __arm_smmu_tlb_sync() argument
412 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) { in __arm_smmu_tlb_sync()
419 dev_err_ratelimited(smmu->dev, in __arm_smmu_tlb_sync()
420 "TLB sync timed out -- SMMU may be deadlocked\n"); in __arm_smmu_tlb_sync()
423 static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu) in arm_smmu_tlb_sync_global() argument
425 void __iomem *base = ARM_SMMU_GR0(smmu); in arm_smmu_tlb_sync_global()
428 spin_lock_irqsave(&smmu->global_sync_lock, flags); in arm_smmu_tlb_sync_global()
429 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC, in arm_smmu_tlb_sync_global()
431 spin_unlock_irqrestore(&smmu->global_sync_lock, flags); in arm_smmu_tlb_sync_global()
437 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_sync_context() local
438 void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx); in arm_smmu_tlb_sync_context()
441 spin_lock_irqsave(&smmu_domain->cb_lock, flags); in arm_smmu_tlb_sync_context()
442 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC, in arm_smmu_tlb_sync_context()
444 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); in arm_smmu_tlb_sync_context()
451 arm_smmu_tlb_sync_global(smmu_domain->smmu); in arm_smmu_tlb_sync_vmid()
457 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_tlb_inv_context_s1()
458 void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx); in arm_smmu_tlb_inv_context_s1()
460 writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID); in arm_smmu_tlb_inv_context_s1()
467 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_inv_context_s2() local
468 void __iomem *base = ARM_SMMU_GR0(smmu); in arm_smmu_tlb_inv_context_s2()
470 writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID); in arm_smmu_tlb_inv_context_s2()
471 arm_smmu_tlb_sync_global(smmu); in arm_smmu_tlb_inv_context_s2()
478 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_tlb_inv_range_nosync()
479 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; in arm_smmu_tlb_inv_range_nosync()
480 void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx); in arm_smmu_tlb_inv_range_nosync()
482 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) in arm_smmu_tlb_inv_range_nosync()
488 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) { in arm_smmu_tlb_inv_range_nosync()
490 iova |= cfg->asid; in arm_smmu_tlb_inv_range_nosync()
494 } while (size -= granule); in arm_smmu_tlb_inv_range_nosync()
497 iova |= (u64)cfg->asid << 48; in arm_smmu_tlb_inv_range_nosync()
501 } while (size -= granule); in arm_smmu_tlb_inv_range_nosync()
510 } while (size -= granule); in arm_smmu_tlb_inv_range_nosync()
515 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
518 * no-op and set .tlb_sync to arm_smmu_inv_context_s2() as you might think.
524 void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu); in arm_smmu_tlb_inv_vmid_nosync()
526 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) in arm_smmu_tlb_inv_vmid_nosync()
529 writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID); in arm_smmu_tlb_inv_vmid_nosync()
556 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_context_fault()
557 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_context_fault() local
560 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx); in arm_smmu_context_fault()
569 dev_err_ratelimited(smmu->dev, in arm_smmu_context_fault()
571 fsr, iova, fsynr, cfg->cbndx); in arm_smmu_context_fault()
580 struct arm_smmu_device *smmu = dev; in arm_smmu_global_fault() local
581 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu); in arm_smmu_global_fault()
591 dev_err_ratelimited(smmu->dev, in arm_smmu_global_fault()
593 dev_err_ratelimited(smmu->dev, in arm_smmu_global_fault()
604 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_init_context_bank()
605 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx]; in arm_smmu_init_context_bank()
606 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; in arm_smmu_init_context_bank()
608 cb->cfg = cfg; in arm_smmu_init_context_bank()
612 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { in arm_smmu_init_context_bank()
613 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr; in arm_smmu_init_context_bank()
615 cb->tcr[0] = pgtbl_cfg->arm_lpae_s1_cfg.tcr; in arm_smmu_init_context_bank()
616 cb->tcr[1] = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32; in arm_smmu_init_context_bank()
617 cb->tcr[1] |= TTBCR2_SEP_UPSTREAM; in arm_smmu_init_context_bank()
618 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) in arm_smmu_init_context_bank()
619 cb->tcr[1] |= TTBCR2_AS; in arm_smmu_init_context_bank()
622 cb->tcr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vtcr; in arm_smmu_init_context_bank()
627 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { in arm_smmu_init_context_bank()
628 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr[0]; in arm_smmu_init_context_bank()
629 cb->ttbr[1] = pgtbl_cfg->arm_v7s_cfg.ttbr[1]; in arm_smmu_init_context_bank()
631 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0]; in arm_smmu_init_context_bank()
632 cb->ttbr[0] |= (u64)cfg->asid << TTBRn_ASID_SHIFT; in arm_smmu_init_context_bank()
633 cb->ttbr[1] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1]; in arm_smmu_init_context_bank()
634 cb->ttbr[1] |= (u64)cfg->asid << TTBRn_ASID_SHIFT; in arm_smmu_init_context_bank()
637 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr; in arm_smmu_init_context_bank()
640 /* MAIRs (stage-1 only) */ in arm_smmu_init_context_bank()
642 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { in arm_smmu_init_context_bank()
643 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr; in arm_smmu_init_context_bank()
644 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr; in arm_smmu_init_context_bank()
646 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair[0]; in arm_smmu_init_context_bank()
647 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair[1]; in arm_smmu_init_context_bank()
652 static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx) in arm_smmu_write_context_bank() argument
656 struct arm_smmu_cb *cb = &smmu->cbs[idx]; in arm_smmu_write_context_bank()
657 struct arm_smmu_cfg *cfg = cb->cfg; in arm_smmu_write_context_bank()
660 cb_base = ARM_SMMU_CB(smmu, idx); in arm_smmu_write_context_bank()
668 gr1_base = ARM_SMMU_GR1(smmu); in arm_smmu_write_context_bank()
669 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; in arm_smmu_write_context_bank()
672 if (smmu->version > ARM_SMMU_V1) { in arm_smmu_write_context_bank()
673 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) in arm_smmu_write_context_bank()
677 /* 16-bit VMIDs live in CBA2R */ in arm_smmu_write_context_bank()
678 if (smmu->features & ARM_SMMU_FEAT_VMID16) in arm_smmu_write_context_bank()
679 reg |= cfg->vmid << CBA2R_VMID_SHIFT; in arm_smmu_write_context_bank()
685 reg = cfg->cbar; in arm_smmu_write_context_bank()
686 if (smmu->version < ARM_SMMU_V2) in arm_smmu_write_context_bank()
687 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT; in arm_smmu_write_context_bank()
696 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) { in arm_smmu_write_context_bank()
697 /* 8-bit VMIDs live in CBAR */ in arm_smmu_write_context_bank()
698 reg |= cfg->vmid << CBAR_VMID_SHIFT; in arm_smmu_write_context_bank()
707 if (stage1 && smmu->version > ARM_SMMU_V1) in arm_smmu_write_context_bank()
708 writel_relaxed(cb->tcr[1], cb_base + ARM_SMMU_CB_TTBCR2); in arm_smmu_write_context_bank()
709 writel_relaxed(cb->tcr[0], cb_base + ARM_SMMU_CB_TTBCR); in arm_smmu_write_context_bank()
712 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { in arm_smmu_write_context_bank()
713 writel_relaxed(cfg->asid, cb_base + ARM_SMMU_CB_CONTEXTIDR); in arm_smmu_write_context_bank()
714 writel_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0); in arm_smmu_write_context_bank()
715 writel_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1); in arm_smmu_write_context_bank()
717 writeq_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0); in arm_smmu_write_context_bank()
719 writeq_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1); in arm_smmu_write_context_bank()
722 /* MAIRs (stage-1 only) */ in arm_smmu_write_context_bank()
724 writel_relaxed(cb->mair[0], cb_base + ARM_SMMU_CB_S1_MAIR0); in arm_smmu_write_context_bank()
725 writel_relaxed(cb->mair[1], cb_base + ARM_SMMU_CB_S1_MAIR1); in arm_smmu_write_context_bank()
739 struct arm_smmu_device *smmu) in arm_smmu_init_domain_context() argument
747 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_init_domain_context()
749 mutex_lock(&smmu_domain->init_mutex); in arm_smmu_init_domain_context()
750 if (smmu_domain->smmu) in arm_smmu_init_domain_context()
753 if (domain->type == IOMMU_DOMAIN_IDENTITY) { in arm_smmu_init_domain_context()
754 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS; in arm_smmu_init_domain_context()
755 smmu_domain->smmu = smmu; in arm_smmu_init_domain_context()
775 * Note that you can't actually request stage-2 mappings. in arm_smmu_init_domain_context()
777 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) in arm_smmu_init_domain_context()
778 smmu_domain->stage = ARM_SMMU_DOMAIN_S2; in arm_smmu_init_domain_context()
779 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2)) in arm_smmu_init_domain_context()
780 smmu_domain->stage = ARM_SMMU_DOMAIN_S1; in arm_smmu_init_domain_context()
785 * the decision into the io-pgtable code where it arguably belongs, in arm_smmu_init_domain_context()
790 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L) in arm_smmu_init_domain_context()
791 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L; in arm_smmu_init_domain_context()
794 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) && in arm_smmu_init_domain_context()
795 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1)) in arm_smmu_init_domain_context()
796 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S; in arm_smmu_init_domain_context()
797 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) && in arm_smmu_init_domain_context()
798 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K | in arm_smmu_init_domain_context()
801 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64; in arm_smmu_init_domain_context()
803 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) { in arm_smmu_init_domain_context()
804 ret = -EINVAL; in arm_smmu_init_domain_context()
808 switch (smmu_domain->stage) { in arm_smmu_init_domain_context()
810 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; in arm_smmu_init_domain_context()
811 start = smmu->num_s2_context_banks; in arm_smmu_init_domain_context()
812 ias = smmu->va_size; in arm_smmu_init_domain_context()
813 oas = smmu->ipa_size; in arm_smmu_init_domain_context()
814 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) { in arm_smmu_init_domain_context()
816 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) { in arm_smmu_init_domain_context()
825 smmu_domain->tlb_ops = &arm_smmu_s1_tlb_ops; in arm_smmu_init_domain_context()
833 cfg->cbar = CBAR_TYPE_S2_TRANS; in arm_smmu_init_domain_context()
835 ias = smmu->ipa_size; in arm_smmu_init_domain_context()
836 oas = smmu->pa_size; in arm_smmu_init_domain_context()
837 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) { in arm_smmu_init_domain_context()
844 if (smmu->version == ARM_SMMU_V2) in arm_smmu_init_domain_context()
845 smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v2; in arm_smmu_init_domain_context()
847 smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v1; in arm_smmu_init_domain_context()
850 ret = -EINVAL; in arm_smmu_init_domain_context()
853 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, in arm_smmu_init_domain_context()
854 smmu->num_context_banks); in arm_smmu_init_domain_context()
858 cfg->cbndx = ret; in arm_smmu_init_domain_context()
859 if (smmu->version < ARM_SMMU_V2) { in arm_smmu_init_domain_context()
860 cfg->irptndx = atomic_inc_return(&smmu->irptndx); in arm_smmu_init_domain_context()
861 cfg->irptndx %= smmu->num_context_irqs; in arm_smmu_init_domain_context()
863 cfg->irptndx = cfg->cbndx; in arm_smmu_init_domain_context()
866 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2) in arm_smmu_init_domain_context()
867 cfg->vmid = cfg->cbndx + 1 + smmu->cavium_id_base; in arm_smmu_init_domain_context()
869 cfg->asid = cfg->cbndx + smmu->cavium_id_base; in arm_smmu_init_domain_context()
872 .pgsize_bitmap = smmu->pgsize_bitmap, in arm_smmu_init_domain_context()
875 .tlb = smmu_domain->tlb_ops, in arm_smmu_init_domain_context()
876 .iommu_dev = smmu->dev, in arm_smmu_init_domain_context()
879 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) in arm_smmu_init_domain_context()
882 smmu_domain->smmu = smmu; in arm_smmu_init_domain_context()
885 ret = -ENOMEM; in arm_smmu_init_domain_context()
890 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; in arm_smmu_init_domain_context()
891 domain->geometry.aperture_end = (1UL << ias) - 1; in arm_smmu_init_domain_context()
892 domain->geometry.force_aperture = true; in arm_smmu_init_domain_context()
896 arm_smmu_write_context_bank(smmu, cfg->cbndx); in arm_smmu_init_domain_context()
900 * handler seeing a half-initialised domain state. in arm_smmu_init_domain_context()
902 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; in arm_smmu_init_domain_context()
903 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault, in arm_smmu_init_domain_context()
904 IRQF_SHARED, "arm-smmu-context-fault", domain); in arm_smmu_init_domain_context()
906 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", in arm_smmu_init_domain_context()
907 cfg->irptndx, irq); in arm_smmu_init_domain_context()
908 cfg->irptndx = INVALID_IRPTNDX; in arm_smmu_init_domain_context()
911 mutex_unlock(&smmu_domain->init_mutex); in arm_smmu_init_domain_context()
914 smmu_domain->pgtbl_ops = pgtbl_ops; in arm_smmu_init_domain_context()
918 smmu_domain->smmu = NULL; in arm_smmu_init_domain_context()
920 mutex_unlock(&smmu_domain->init_mutex); in arm_smmu_init_domain_context()
927 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_destroy_domain_context() local
928 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_destroy_domain_context()
931 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY) in arm_smmu_destroy_domain_context()
938 smmu->cbs[cfg->cbndx].cfg = NULL; in arm_smmu_destroy_domain_context()
939 arm_smmu_write_context_bank(smmu, cfg->cbndx); in arm_smmu_destroy_domain_context()
941 if (cfg->irptndx != INVALID_IRPTNDX) { in arm_smmu_destroy_domain_context()
942 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; in arm_smmu_destroy_domain_context()
943 devm_free_irq(smmu->dev, irq, domain); in arm_smmu_destroy_domain_context()
946 free_io_pgtable_ops(smmu_domain->pgtbl_ops); in arm_smmu_destroy_domain_context()
947 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); in arm_smmu_destroy_domain_context()
968 iommu_get_dma_cookie(&smmu_domain->domain))) { in arm_smmu_domain_alloc()
973 mutex_init(&smmu_domain->init_mutex); in arm_smmu_domain_alloc()
974 spin_lock_init(&smmu_domain->cb_lock); in arm_smmu_domain_alloc()
976 return &smmu_domain->domain; in arm_smmu_domain_alloc()
992 static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx) in arm_smmu_write_smr() argument
994 struct arm_smmu_smr *smr = smmu->smrs + idx; in arm_smmu_write_smr()
995 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT; in arm_smmu_write_smr()
997 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid) in arm_smmu_write_smr()
999 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx)); in arm_smmu_write_smr()
1002 static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx) in arm_smmu_write_s2cr() argument
1004 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx; in arm_smmu_write_s2cr()
1005 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT | in arm_smmu_write_s2cr()
1006 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT | in arm_smmu_write_s2cr()
1007 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT; in arm_smmu_write_s2cr()
1009 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs && in arm_smmu_write_s2cr()
1010 smmu->smrs[idx].valid) in arm_smmu_write_s2cr()
1012 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx)); in arm_smmu_write_s2cr()
1015 static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx) in arm_smmu_write_sme() argument
1017 arm_smmu_write_s2cr(smmu, idx); in arm_smmu_write_sme()
1018 if (smmu->smrs) in arm_smmu_write_sme()
1019 arm_smmu_write_smr(smmu, idx); in arm_smmu_write_sme()
1026 static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu) in arm_smmu_test_smr_masks() argument
1028 void __iomem *gr0_base = ARM_SMMU_GR0(smmu); in arm_smmu_test_smr_masks()
1031 if (!smmu->smrs) in arm_smmu_test_smr_masks()
1039 smr = smmu->streamid_mask << SMR_ID_SHIFT; in arm_smmu_test_smr_masks()
1042 smmu->streamid_mask = smr >> SMR_ID_SHIFT; in arm_smmu_test_smr_masks()
1044 smr = smmu->streamid_mask << SMR_MASK_SHIFT; in arm_smmu_test_smr_masks()
1047 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT; in arm_smmu_test_smr_masks()
1050 static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask) in arm_smmu_find_sme() argument
1052 struct arm_smmu_smr *smrs = smmu->smrs; in arm_smmu_find_sme()
1053 int i, free_idx = -ENOSPC; in arm_smmu_find_sme()
1060 for (i = 0; i < smmu->num_mapping_groups; ++i) { in arm_smmu_find_sme()
1086 return -EINVAL; in arm_smmu_find_sme()
1092 static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx) in arm_smmu_free_sme() argument
1094 if (--smmu->s2crs[idx].count) in arm_smmu_free_sme()
1097 smmu->s2crs[idx] = s2cr_init_val; in arm_smmu_free_sme()
1098 if (smmu->smrs) in arm_smmu_free_sme()
1099 smmu->smrs[idx].valid = false; in arm_smmu_free_sme()
1106 struct iommu_fwspec *fwspec = dev->iommu_fwspec; in arm_smmu_master_alloc_smes()
1107 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv; in arm_smmu_master_alloc_smes()
1108 struct arm_smmu_device *smmu = cfg->smmu; in arm_smmu_master_alloc_smes() local
1109 struct arm_smmu_smr *smrs = smmu->smrs; in arm_smmu_master_alloc_smes()
1113 mutex_lock(&smmu->stream_map_mutex); in arm_smmu_master_alloc_smes()
1116 u16 sid = fwspec->ids[i]; in arm_smmu_master_alloc_smes()
1117 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT; in arm_smmu_master_alloc_smes()
1120 ret = -EEXIST; in arm_smmu_master_alloc_smes()
1124 ret = arm_smmu_find_sme(smmu, sid, mask); in arm_smmu_master_alloc_smes()
1129 if (smrs && smmu->s2crs[idx].count == 0) { in arm_smmu_master_alloc_smes()
1134 smmu->s2crs[idx].count++; in arm_smmu_master_alloc_smes()
1135 cfg->smendx[i] = (s16)idx; in arm_smmu_master_alloc_smes()
1140 group = ERR_PTR(-ENOMEM); in arm_smmu_master_alloc_smes()
1149 arm_smmu_write_sme(smmu, idx); in arm_smmu_master_alloc_smes()
1150 smmu->s2crs[idx].group = group; in arm_smmu_master_alloc_smes()
1153 mutex_unlock(&smmu->stream_map_mutex); in arm_smmu_master_alloc_smes()
1157 while (i--) { in arm_smmu_master_alloc_smes()
1158 arm_smmu_free_sme(smmu, cfg->smendx[i]); in arm_smmu_master_alloc_smes()
1159 cfg->smendx[i] = INVALID_SMENDX; in arm_smmu_master_alloc_smes()
1161 mutex_unlock(&smmu->stream_map_mutex); in arm_smmu_master_alloc_smes()
1167 struct arm_smmu_device *smmu = fwspec_smmu(fwspec); in arm_smmu_master_free_smes() local
1168 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv; in arm_smmu_master_free_smes()
1171 mutex_lock(&smmu->stream_map_mutex); in arm_smmu_master_free_smes()
1173 if (arm_smmu_free_sme(smmu, idx)) in arm_smmu_master_free_smes()
1174 arm_smmu_write_sme(smmu, idx); in arm_smmu_master_free_smes()
1175 cfg->smendx[i] = INVALID_SMENDX; in arm_smmu_master_free_smes()
1177 mutex_unlock(&smmu->stream_map_mutex); in arm_smmu_master_free_smes()
1183 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_domain_add_master() local
1184 struct arm_smmu_s2cr *s2cr = smmu->s2crs; in arm_smmu_domain_add_master()
1185 u8 cbndx = smmu_domain->cfg.cbndx; in arm_smmu_domain_add_master()
1189 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) in arm_smmu_domain_add_master()
1201 arm_smmu_write_s2cr(smmu, idx); in arm_smmu_domain_add_master()
1209 struct iommu_fwspec *fwspec = dev->iommu_fwspec; in arm_smmu_attach_dev()
1210 struct arm_smmu_device *smmu; in arm_smmu_attach_dev() local
1213 if (!fwspec || fwspec->ops != &arm_smmu_ops) { in arm_smmu_attach_dev()
1214 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n"); in arm_smmu_attach_dev()
1215 return -ENXIO; in arm_smmu_attach_dev()
1220 * domains between of_xlate() and add_device() - we have no way to cope in arm_smmu_attach_dev()
1225 if (!fwspec->iommu_priv) in arm_smmu_attach_dev()
1226 return -ENODEV; in arm_smmu_attach_dev()
1228 smmu = fwspec_smmu(fwspec); in arm_smmu_attach_dev()
1230 ret = arm_smmu_init_domain_context(domain, smmu); in arm_smmu_attach_dev()
1238 if (smmu_domain->smmu != smmu) { in arm_smmu_attach_dev()
1240 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", in arm_smmu_attach_dev()
1241 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev)); in arm_smmu_attach_dev()
1242 return -EINVAL; in arm_smmu_attach_dev()
1252 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; in arm_smmu_map()
1255 return -ENODEV; in arm_smmu_map()
1257 return ops->map(ops, iova, paddr, size, prot); in arm_smmu_map()
1263 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; in arm_smmu_unmap()
1268 return ops->unmap(ops, iova, size); in arm_smmu_unmap()
1275 if (smmu_domain->tlb_ops) in arm_smmu_iotlb_sync()
1276 smmu_domain->tlb_ops->tlb_sync(smmu_domain); in arm_smmu_iotlb_sync()
1283 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_iova_to_phys_hard() local
1284 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_iova_to_phys_hard()
1285 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; in arm_smmu_iova_to_phys_hard()
1286 struct device *dev = smmu->dev; in arm_smmu_iova_to_phys_hard()
1292 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx); in arm_smmu_iova_to_phys_hard()
1294 spin_lock_irqsave(&smmu_domain->cb_lock, flags); in arm_smmu_iova_to_phys_hard()
1297 if (smmu->version == ARM_SMMU_V2) in arm_smmu_iova_to_phys_hard()
1299 else /* Register is only 32-bit in v1 */ in arm_smmu_iova_to_phys_hard()
1304 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); in arm_smmu_iova_to_phys_hard()
1308 return ops->iova_to_phys(ops, iova); in arm_smmu_iova_to_phys_hard()
1312 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); in arm_smmu_iova_to_phys_hard()
1326 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; in arm_smmu_iova_to_phys()
1328 if (domain->type == IOMMU_DOMAIN_IDENTITY) in arm_smmu_iova_to_phys()
1334 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS && in arm_smmu_iova_to_phys()
1335 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) in arm_smmu_iova_to_phys()
1338 return ops->iova_to_phys(ops, iova); in arm_smmu_iova_to_phys()
1346 * Return true here as the SMMU can always send out coherent in arm_smmu_capable()
1359 return dev->fwnode == data; in arm_smmu_match_node()
1373 struct arm_smmu_device *smmu; in arm_smmu_add_device() local
1375 struct iommu_fwspec *fwspec = dev->iommu_fwspec; in arm_smmu_add_device()
1379 ret = arm_smmu_register_legacy_master(dev, &smmu); in arm_smmu_add_device()
1382 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master() in arm_smmu_add_device()
1386 fwspec = dev->iommu_fwspec; in arm_smmu_add_device()
1389 } else if (fwspec && fwspec->ops == &arm_smmu_ops) { in arm_smmu_add_device()
1390 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode); in arm_smmu_add_device()
1392 return -ENODEV; in arm_smmu_add_device()
1395 ret = -EINVAL; in arm_smmu_add_device()
1396 for (i = 0; i < fwspec->num_ids; i++) { in arm_smmu_add_device()
1397 u16 sid = fwspec->ids[i]; in arm_smmu_add_device()
1398 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT; in arm_smmu_add_device()
1400 if (sid & ~smmu->streamid_mask) { in arm_smmu_add_device()
1401 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n", in arm_smmu_add_device()
1402 sid, smmu->streamid_mask); in arm_smmu_add_device()
1405 if (mask & ~smmu->smr_mask_mask) { in arm_smmu_add_device()
1406 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n", in arm_smmu_add_device()
1407 mask, smmu->smr_mask_mask); in arm_smmu_add_device()
1412 ret = -ENOMEM; in arm_smmu_add_device()
1418 cfg->smmu = smmu; in arm_smmu_add_device()
1419 fwspec->iommu_priv = cfg; in arm_smmu_add_device()
1420 while (i--) in arm_smmu_add_device()
1421 cfg->smendx[i] = INVALID_SMENDX; in arm_smmu_add_device()
1427 iommu_device_link(&smmu->iommu, dev); in arm_smmu_add_device()
1440 struct iommu_fwspec *fwspec = dev->iommu_fwspec; in arm_smmu_remove_device()
1442 struct arm_smmu_device *smmu; in arm_smmu_remove_device() local
1445 if (!fwspec || fwspec->ops != &arm_smmu_ops) in arm_smmu_remove_device()
1448 cfg = fwspec->iommu_priv; in arm_smmu_remove_device()
1449 smmu = cfg->smmu; in arm_smmu_remove_device()
1451 iommu_device_unlink(&smmu->iommu, dev); in arm_smmu_remove_device()
1454 kfree(fwspec->iommu_priv); in arm_smmu_remove_device()
1460 struct iommu_fwspec *fwspec = dev->iommu_fwspec; in arm_smmu_device_group()
1461 struct arm_smmu_device *smmu = fwspec_smmu(fwspec); in arm_smmu_device_group() local
1466 if (group && smmu->s2crs[idx].group && in arm_smmu_device_group()
1467 group != smmu->s2crs[idx].group) in arm_smmu_device_group()
1468 return ERR_PTR(-EINVAL); in arm_smmu_device_group()
1470 group = smmu->s2crs[idx].group; in arm_smmu_device_group()
1489 if (domain->type != IOMMU_DOMAIN_UNMANAGED) in arm_smmu_domain_get_attr()
1490 return -EINVAL; in arm_smmu_domain_get_attr()
1494 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); in arm_smmu_domain_get_attr()
1497 return -ENODEV; in arm_smmu_domain_get_attr()
1507 if (domain->type != IOMMU_DOMAIN_UNMANAGED) in arm_smmu_domain_set_attr()
1508 return -EINVAL; in arm_smmu_domain_set_attr()
1510 mutex_lock(&smmu_domain->init_mutex); in arm_smmu_domain_set_attr()
1514 if (smmu_domain->smmu) { in arm_smmu_domain_set_attr()
1515 ret = -EPERM; in arm_smmu_domain_set_attr()
1520 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; in arm_smmu_domain_set_attr()
1522 smmu_domain->stage = ARM_SMMU_DOMAIN_S1; in arm_smmu_domain_set_attr()
1526 ret = -ENODEV; in arm_smmu_domain_set_attr()
1530 mutex_unlock(&smmu_domain->init_mutex); in arm_smmu_domain_set_attr()
1538 if (args->args_count > 0) in arm_smmu_of_xlate()
1539 fwid |= (u16)args->args[0]; in arm_smmu_of_xlate()
1541 if (args->args_count > 1) in arm_smmu_of_xlate()
1542 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT; in arm_smmu_of_xlate()
1543 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask)) in arm_smmu_of_xlate()
1560 list_add_tail(&region->list, head); in arm_smmu_get_resv_regions()
1592 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1595 static void arm_smmu_device_reset(struct arm_smmu_device *smmu) in arm_smmu_device_reset() argument
1597 void __iomem *gr0_base = ARM_SMMU_GR0(smmu); in arm_smmu_device_reset()
1602 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR); in arm_smmu_device_reset()
1603 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR); in arm_smmu_device_reset()
1609 for (i = 0; i < smmu->num_mapping_groups; ++i) in arm_smmu_device_reset()
1610 arm_smmu_write_sme(smmu, i); in arm_smmu_device_reset()
1612 if (smmu->model == ARM_MMU500) { in arm_smmu_device_reset()
1616 * bit is only present in MMU-500r2 onwards. in arm_smmu_device_reset()
1632 for (i = 0; i < smmu->num_context_banks; ++i) { in arm_smmu_device_reset()
1633 void __iomem *cb_base = ARM_SMMU_CB(smmu, i); in arm_smmu_device_reset()
1635 arm_smmu_write_context_bank(smmu, i); in arm_smmu_device_reset()
1638 * Disable MMU-500's not-particularly-beneficial next-page in arm_smmu_device_reset()
1641 if (smmu->model == ARM_MMU500) { in arm_smmu_device_reset()
1652 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); in arm_smmu_device_reset()
1673 if (smmu->features & ARM_SMMU_FEAT_VMID16) in arm_smmu_device_reset()
1676 if (smmu->features & ARM_SMMU_FEAT_EXIDS) in arm_smmu_device_reset()
1680 arm_smmu_tlb_sync_global(smmu); in arm_smmu_device_reset()
1681 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); in arm_smmu_device_reset()
1703 static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) in arm_smmu_device_cfg_probe() argument
1706 void __iomem *gr0_base = ARM_SMMU_GR0(smmu); in arm_smmu_device_cfg_probe()
1708 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK; in arm_smmu_device_cfg_probe()
1711 dev_notice(smmu->dev, "probing hardware configuration...\n"); in arm_smmu_device_cfg_probe()
1712 dev_notice(smmu->dev, "SMMUv%d with:\n", in arm_smmu_device_cfg_probe()
1713 smmu->version == ARM_SMMU_V2 ? 2 : 1); in arm_smmu_device_cfg_probe()
1725 smmu->features |= ARM_SMMU_FEAT_TRANS_S1; in arm_smmu_device_cfg_probe()
1726 dev_notice(smmu->dev, "\tstage 1 translation\n"); in arm_smmu_device_cfg_probe()
1730 smmu->features |= ARM_SMMU_FEAT_TRANS_S2; in arm_smmu_device_cfg_probe()
1731 dev_notice(smmu->dev, "\tstage 2 translation\n"); in arm_smmu_device_cfg_probe()
1735 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED; in arm_smmu_device_cfg_probe()
1736 dev_notice(smmu->dev, "\tnested translation\n"); in arm_smmu_device_cfg_probe()
1739 if (!(smmu->features & in arm_smmu_device_cfg_probe()
1741 dev_err(smmu->dev, "\tno translation support!\n"); in arm_smmu_device_cfg_probe()
1742 return -ENODEV; in arm_smmu_device_cfg_probe()
1746 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) { in arm_smmu_device_cfg_probe()
1747 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS; in arm_smmu_device_cfg_probe()
1748 dev_notice(smmu->dev, "\taddress translation ops\n"); in arm_smmu_device_cfg_probe()
1759 dev_notice(smmu->dev, "\t%scoherent table walk\n", in arm_smmu_device_cfg_probe()
1760 cttw_fw ? "" : "non-"); in arm_smmu_device_cfg_probe()
1762 dev_notice(smmu->dev, in arm_smmu_device_cfg_probe()
1766 if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) { in arm_smmu_device_cfg_probe()
1767 smmu->features |= ARM_SMMU_FEAT_EXIDS; in arm_smmu_device_cfg_probe()
1772 smmu->streamid_mask = size - 1; in arm_smmu_device_cfg_probe()
1774 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH; in arm_smmu_device_cfg_probe()
1777 dev_err(smmu->dev, in arm_smmu_device_cfg_probe()
1778 "stream-matching supported, but no SMRs present!\n"); in arm_smmu_device_cfg_probe()
1779 return -ENODEV; in arm_smmu_device_cfg_probe()
1782 /* Zero-initialised to mark as invalid */ in arm_smmu_device_cfg_probe()
1783 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs), in arm_smmu_device_cfg_probe()
1785 if (!smmu->smrs) in arm_smmu_device_cfg_probe()
1786 return -ENOMEM; in arm_smmu_device_cfg_probe()
1788 dev_notice(smmu->dev, in arm_smmu_device_cfg_probe()
1791 /* s2cr->type == 0 means translation, so initialise explicitly */ in arm_smmu_device_cfg_probe()
1792 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs), in arm_smmu_device_cfg_probe()
1794 if (!smmu->s2crs) in arm_smmu_device_cfg_probe()
1795 return -ENOMEM; in arm_smmu_device_cfg_probe()
1797 smmu->s2crs[i] = s2cr_init_val; in arm_smmu_device_cfg_probe()
1799 smmu->num_mapping_groups = size; in arm_smmu_device_cfg_probe()
1800 mutex_init(&smmu->stream_map_mutex); in arm_smmu_device_cfg_probe()
1801 spin_lock_init(&smmu->global_sync_lock); in arm_smmu_device_cfg_probe()
1803 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) { in arm_smmu_device_cfg_probe()
1804 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L; in arm_smmu_device_cfg_probe()
1806 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S; in arm_smmu_device_cfg_probe()
1811 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12; in arm_smmu_device_cfg_probe()
1813 /* Check for size mismatch of SMMU address space from mapped region */ in arm_smmu_device_cfg_probe()
1815 size <<= smmu->pgshift; in arm_smmu_device_cfg_probe()
1816 if (smmu->cb_base != gr0_base + size) in arm_smmu_device_cfg_probe()
1817 dev_warn(smmu->dev, in arm_smmu_device_cfg_probe()
1818 "SMMU address space size (0x%lx) differs from mapped region size (0x%tx)!\n", in arm_smmu_device_cfg_probe()
1819 size * 2, (smmu->cb_base - gr0_base) * 2); in arm_smmu_device_cfg_probe()
1821 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK; in arm_smmu_device_cfg_probe()
1822 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK; in arm_smmu_device_cfg_probe()
1823 if (smmu->num_s2_context_banks > smmu->num_context_banks) { in arm_smmu_device_cfg_probe()
1824 dev_err(smmu->dev, "impossible number of S2 context banks!\n"); in arm_smmu_device_cfg_probe()
1825 return -ENODEV; in arm_smmu_device_cfg_probe()
1827 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n", in arm_smmu_device_cfg_probe()
1828 smmu->num_context_banks, smmu->num_s2_context_banks); in arm_smmu_device_cfg_probe()
1834 if (smmu->model == CAVIUM_SMMUV2) { in arm_smmu_device_cfg_probe()
1835 smmu->cavium_id_base = in arm_smmu_device_cfg_probe()
1836 atomic_add_return(smmu->num_context_banks, in arm_smmu_device_cfg_probe()
1838 smmu->cavium_id_base -= smmu->num_context_banks; in arm_smmu_device_cfg_probe()
1839 dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n"); in arm_smmu_device_cfg_probe()
1841 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks, in arm_smmu_device_cfg_probe()
1842 sizeof(*smmu->cbs), GFP_KERNEL); in arm_smmu_device_cfg_probe()
1843 if (!smmu->cbs) in arm_smmu_device_cfg_probe()
1844 return -ENOMEM; in arm_smmu_device_cfg_probe()
1849 smmu->ipa_size = size; in arm_smmu_device_cfg_probe()
1853 smmu->pa_size = size; in arm_smmu_device_cfg_probe()
1856 smmu->features |= ARM_SMMU_FEAT_VMID16; in arm_smmu_device_cfg_probe()
1863 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size))) in arm_smmu_device_cfg_probe()
1864 dev_warn(smmu->dev, in arm_smmu_device_cfg_probe()
1867 if (smmu->version < ARM_SMMU_V2) { in arm_smmu_device_cfg_probe()
1868 smmu->va_size = smmu->ipa_size; in arm_smmu_device_cfg_probe()
1869 if (smmu->version == ARM_SMMU_V1_64K) in arm_smmu_device_cfg_probe()
1870 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K; in arm_smmu_device_cfg_probe()
1873 smmu->va_size = arm_smmu_id_size_to_bits(size); in arm_smmu_device_cfg_probe()
1875 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K; in arm_smmu_device_cfg_probe()
1877 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K; in arm_smmu_device_cfg_probe()
1879 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K; in arm_smmu_device_cfg_probe()
1883 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) in arm_smmu_device_cfg_probe()
1884 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M; in arm_smmu_device_cfg_probe()
1885 if (smmu->features & in arm_smmu_device_cfg_probe()
1887 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G; in arm_smmu_device_cfg_probe()
1888 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K) in arm_smmu_device_cfg_probe()
1889 smmu->pgsize_bitmap |= SZ_16K | SZ_32M; in arm_smmu_device_cfg_probe()
1890 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K) in arm_smmu_device_cfg_probe()
1891 smmu->pgsize_bitmap |= SZ_64K | SZ_512M; in arm_smmu_device_cfg_probe()
1893 if (arm_smmu_ops.pgsize_bitmap == -1UL) in arm_smmu_device_cfg_probe()
1894 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap; in arm_smmu_device_cfg_probe()
1896 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap; in arm_smmu_device_cfg_probe()
1897 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", in arm_smmu_device_cfg_probe()
1898 smmu->pgsize_bitmap); in arm_smmu_device_cfg_probe()
1901 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) in arm_smmu_device_cfg_probe()
1902 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n", in arm_smmu_device_cfg_probe()
1903 smmu->va_size, smmu->ipa_size); in arm_smmu_device_cfg_probe()
1905 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2) in arm_smmu_device_cfg_probe()
1906 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n", in arm_smmu_device_cfg_probe()
1907 smmu->ipa_size, smmu->pa_size); in arm_smmu_device_cfg_probe()
1928 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1929 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1930 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
1931 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
1932 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
1933 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
1934 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
1940 static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu) in acpi_smmu_get_data() argument
1947 smmu->version = ARM_SMMU_V1; in acpi_smmu_get_data()
1948 smmu->model = GENERIC_SMMU; in acpi_smmu_get_data()
1951 smmu->version = ARM_SMMU_V1_64K; in acpi_smmu_get_data()
1952 smmu->model = GENERIC_SMMU; in acpi_smmu_get_data()
1955 smmu->version = ARM_SMMU_V2; in acpi_smmu_get_data()
1956 smmu->model = GENERIC_SMMU; in acpi_smmu_get_data()
1959 smmu->version = ARM_SMMU_V2; in acpi_smmu_get_data()
1960 smmu->model = ARM_MMU500; in acpi_smmu_get_data()
1963 smmu->version = ARM_SMMU_V2; in acpi_smmu_get_data()
1964 smmu->model = CAVIUM_SMMUV2; in acpi_smmu_get_data()
1967 ret = -ENODEV; in acpi_smmu_get_data()
1974 struct arm_smmu_device *smmu) in arm_smmu_device_acpi_probe() argument
1976 struct device *dev = smmu->dev; in arm_smmu_device_acpi_probe()
1983 iort_smmu = (struct acpi_iort_smmu *)node->node_data; in arm_smmu_device_acpi_probe()
1985 ret = acpi_smmu_get_data(iort_smmu->model, smmu); in arm_smmu_device_acpi_probe()
1990 smmu->num_global_irqs = 1; in arm_smmu_device_acpi_probe()
1992 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) in arm_smmu_device_acpi_probe()
1993 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; in arm_smmu_device_acpi_probe()
1999 struct arm_smmu_device *smmu) in arm_smmu_device_acpi_probe() argument
2001 return -ENODEV; in arm_smmu_device_acpi_probe()
2006 struct arm_smmu_device *smmu) in arm_smmu_device_dt_probe() argument
2009 struct device *dev = &pdev->dev; in arm_smmu_device_dt_probe()
2012 if (of_property_read_u32(dev->of_node, "#global-interrupts", in arm_smmu_device_dt_probe()
2013 &smmu->num_global_irqs)) { in arm_smmu_device_dt_probe()
2014 dev_err(dev, "missing #global-interrupts property\n"); in arm_smmu_device_dt_probe()
2015 return -ENODEV; in arm_smmu_device_dt_probe()
2019 smmu->version = data->version; in arm_smmu_device_dt_probe()
2020 smmu->model = data->model; in arm_smmu_device_dt_probe()
2022 parse_driver_options(smmu); in arm_smmu_device_dt_probe()
2024 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL); in arm_smmu_device_dt_probe()
2027 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n"); in arm_smmu_device_dt_probe()
2033 return -ENODEV; in arm_smmu_device_dt_probe()
2036 if (of_dma_is_coherent(dev->of_node)) in arm_smmu_device_dt_probe()
2037 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; in arm_smmu_device_dt_probe()
2063 struct arm_smmu_device *smmu; in arm_smmu_device_probe() local
2064 struct device *dev = &pdev->dev; in arm_smmu_device_probe()
2067 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); in arm_smmu_device_probe()
2068 if (!smmu) { in arm_smmu_device_probe()
2070 return -ENOMEM; in arm_smmu_device_probe()
2072 smmu->dev = dev; in arm_smmu_device_probe()
2074 if (dev->of_node) in arm_smmu_device_probe()
2075 err = arm_smmu_device_dt_probe(pdev, smmu); in arm_smmu_device_probe()
2077 err = arm_smmu_device_acpi_probe(pdev, smmu); in arm_smmu_device_probe()
2083 ioaddr = res->start; in arm_smmu_device_probe()
2084 smmu->base = devm_ioremap_resource(dev, res); in arm_smmu_device_probe()
2085 if (IS_ERR(smmu->base)) in arm_smmu_device_probe()
2086 return PTR_ERR(smmu->base); in arm_smmu_device_probe()
2087 smmu->cb_base = smmu->base + resource_size(res) / 2; in arm_smmu_device_probe()
2092 if (num_irqs > smmu->num_global_irqs) in arm_smmu_device_probe()
2093 smmu->num_context_irqs++; in arm_smmu_device_probe()
2096 if (!smmu->num_context_irqs) { in arm_smmu_device_probe()
2098 num_irqs, smmu->num_global_irqs + 1); in arm_smmu_device_probe()
2099 return -ENODEV; in arm_smmu_device_probe()
2102 smmu->irqs = devm_kcalloc(dev, num_irqs, sizeof(*smmu->irqs), in arm_smmu_device_probe()
2104 if (!smmu->irqs) { in arm_smmu_device_probe()
2106 return -ENOMEM; in arm_smmu_device_probe()
2114 return -ENODEV; in arm_smmu_device_probe()
2116 smmu->irqs[i] = irq; in arm_smmu_device_probe()
2119 err = arm_smmu_device_cfg_probe(smmu); in arm_smmu_device_probe()
2123 if (smmu->version == ARM_SMMU_V2) { in arm_smmu_device_probe()
2124 if (smmu->num_context_banks > smmu->num_context_irqs) { in arm_smmu_device_probe()
2127 smmu->num_context_irqs, smmu->num_context_banks); in arm_smmu_device_probe()
2128 return -ENODEV; in arm_smmu_device_probe()
2132 smmu->num_context_irqs = smmu->num_context_banks; in arm_smmu_device_probe()
2135 for (i = 0; i < smmu->num_global_irqs; ++i) { in arm_smmu_device_probe()
2136 err = devm_request_irq(smmu->dev, smmu->irqs[i], in arm_smmu_device_probe()
2139 "arm-smmu global fault", in arm_smmu_device_probe()
2140 smmu); in arm_smmu_device_probe()
2143 i, smmu->irqs[i]); in arm_smmu_device_probe()
2148 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL, in arm_smmu_device_probe()
2149 "smmu.%pa", &ioaddr); in arm_smmu_device_probe()
2155 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops); in arm_smmu_device_probe()
2156 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode); in arm_smmu_device_probe()
2158 err = iommu_device_register(&smmu->iommu); in arm_smmu_device_probe()
2164 platform_set_drvdata(pdev, smmu); in arm_smmu_device_probe()
2165 arm_smmu_device_reset(smmu); in arm_smmu_device_probe()
2166 arm_smmu_test_smr_masks(smmu); in arm_smmu_device_probe()
2169 * For ACPI and generic DT bindings, an SMMU will be probed before in arm_smmu_device_probe()
2171 * ready to handle default domain setup as soon as any SMMU exists. in arm_smmu_device_probe()
2182 * delay setting bus ops until we're sure every possible SMMU is ready,
2195 struct arm_smmu_device *smmu = platform_get_drvdata(pdev); in arm_smmu_device_remove() local
2197 if (!smmu) in arm_smmu_device_remove()
2198 return -ENODEV; in arm_smmu_device_remove()
2200 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS)) in arm_smmu_device_remove()
2201 dev_err(&pdev->dev, "removing device with active domains!\n"); in arm_smmu_device_remove()
2204 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); in arm_smmu_device_remove()
2215 struct arm_smmu_device *smmu = dev_get_drvdata(dev); in arm_smmu_pm_resume() local
2217 arm_smmu_device_reset(smmu); in arm_smmu_pm_resume()
2225 .name = "arm-smmu",
2235 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2237 MODULE_LICENSE("GPL v2");