/drivers/iommu/arm/arm-smmu/ |
D | arm-smmu.c | 73 static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu) in arm_smmu_rpm_get() argument 75 if (pm_runtime_enabled(smmu->dev)) in arm_smmu_rpm_get() 76 return pm_runtime_resume_and_get(smmu->dev); in arm_smmu_rpm_get() 81 static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu) in arm_smmu_rpm_put() argument 83 if (pm_runtime_enabled(smmu->dev)) in arm_smmu_rpm_put() 84 pm_runtime_put_autosuspend(smmu->dev); in arm_smmu_rpm_put() 134 struct arm_smmu_device **smmu) in arm_smmu_register_legacy_master() argument 177 *smmu = dev_get_drvdata(smmu_dev); in arm_smmu_register_legacy_master() 199 struct arm_smmu_device **smmu) in arm_smmu_register_legacy_master() argument 211 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page, in __arm_smmu_tlb_sync() argument [all …]
|
D | arm-smmu-impl.c | 28 static u32 arm_smmu_read_ns(struct arm_smmu_device *smmu, int page, in arm_smmu_read_ns() argument 33 return readl_relaxed(arm_smmu_page(smmu, page) + offset); in arm_smmu_read_ns() 36 static void arm_smmu_write_ns(struct arm_smmu_device *smmu, int page, in arm_smmu_write_ns() argument 41 writel_relaxed(val, arm_smmu_page(smmu, page) + offset); in arm_smmu_write_ns() 52 struct arm_smmu_device smmu; member 56 static int cavium_cfg_probe(struct arm_smmu_device *smmu) in cavium_cfg_probe() argument 59 struct cavium_smmu *cs = container_of(smmu, struct cavium_smmu, smmu); in cavium_cfg_probe() 65 cs->id_base = atomic_fetch_add(smmu->num_context_banks, &context_count); in cavium_cfg_probe() 66 dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n"); in cavium_cfg_probe() 74 struct cavium_smmu *cs = container_of(smmu_domain->smmu, in cavium_init_context() [all …]
|
D | arm-smmu-qcom.c | 14 struct arm_smmu_device smmu; member 20 static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu) in to_qcom_smmu() argument 22 return container_of(smmu, struct qcom_smmu, smmu); in to_qcom_smmu() 25 static void qcom_adreno_smmu_write_sctlr(struct arm_smmu_device *smmu, int idx, in qcom_adreno_smmu_write_sctlr() argument 28 struct qcom_smmu *qsmmu = to_qcom_smmu(smmu); in qcom_adreno_smmu_write_sctlr() 39 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg); in qcom_adreno_smmu_write_sctlr() 47 struct arm_smmu_device *smmu = smmu_domain->smmu; in qcom_adreno_smmu_get_fault_info() local 49 info->fsr = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_FSR); in qcom_adreno_smmu_get_fault_info() 50 info->fsynr0 = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_FSYNR0); in qcom_adreno_smmu_get_fault_info() 51 info->fsynr1 = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_FSYNR1); in qcom_adreno_smmu_get_fault_info() [all …]
|
D | arm-smmu-nvidia.c | 35 struct arm_smmu_device smmu; member 41 static inline struct nvidia_smmu *to_nvidia_smmu(struct arm_smmu_device *smmu) in to_nvidia_smmu() argument 43 return container_of(smmu, struct nvidia_smmu, smmu); in to_nvidia_smmu() 46 static inline void __iomem *nvidia_smmu_page(struct arm_smmu_device *smmu, in nvidia_smmu_page() argument 51 nvidia_smmu = container_of(smmu, struct nvidia_smmu, smmu); in nvidia_smmu_page() 52 return nvidia_smmu->bases[inst] + (page << smmu->pgshift); in nvidia_smmu_page() 55 static u32 nvidia_smmu_read_reg(struct arm_smmu_device *smmu, in nvidia_smmu_read_reg() argument 58 void __iomem *reg = nvidia_smmu_page(smmu, 0, page) + offset; in nvidia_smmu_read_reg() 63 static void nvidia_smmu_write_reg(struct arm_smmu_device *smmu, in nvidia_smmu_write_reg() argument 66 struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu); in nvidia_smmu_write_reg() [all …]
|
D | arm-smmu.h | 368 struct arm_smmu_device *smmu; member 380 struct arm_smmu_device *smmu; member 425 u32 (*read_reg)(struct arm_smmu_device *smmu, int page, int offset); 426 void (*write_reg)(struct arm_smmu_device *smmu, int page, int offset, 428 u64 (*read_reg64)(struct arm_smmu_device *smmu, int page, int offset); 429 void (*write_reg64)(struct arm_smmu_device *smmu, int page, int offset, 431 int (*cfg_probe)(struct arm_smmu_device *smmu); 432 int (*reset)(struct arm_smmu_device *smmu); 435 void (*tlb_sync)(struct arm_smmu_device *smmu, int page, int sync, 441 struct arm_smmu_device *smmu, [all …]
|
D | Makefile | 4 arm_smmu-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-nvidia.o 5 arm_smmu-$(CONFIG_ARM_SMMU_QCOM) += arm-smmu-qcom.o
|
/drivers/iommu/ |
D | tegra-smmu.c | 24 struct tegra_smmu *smmu; member 54 struct tegra_smmu *smmu; member 70 static inline void smmu_writel(struct tegra_smmu *smmu, u32 value, in smmu_writel() argument 73 writel(value, smmu->regs + offset); in smmu_writel() 76 static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset) in smmu_readl() argument 78 return readl(smmu->regs + offset); in smmu_readl() 87 #define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \ argument 88 ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask) 166 static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr) in smmu_dma_addr_valid() argument 169 return (addr & smmu->pfn_mask) == addr; in smmu_dma_addr_valid() [all …]
|
D | Makefile | 24 obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
|
D | Kconfig | 355 'arm-smmu.disable_bypass' will continue to override this
|
/drivers/iommu/arm/arm-smmu-v3/ |
D | arm-smmu-v3.c | 91 static void parse_driver_options(struct arm_smmu_device *smmu) in parse_driver_options() argument 96 if (of_property_read_bool(smmu->dev->of_node, in parse_driver_options() 98 smmu->options |= arm_smmu_options[i].opt; in parse_driver_options() 99 dev_notice(smmu->dev, "option %s\n", in parse_driver_options() 194 static void queue_poll_init(struct arm_smmu_device *smmu, in queue_poll_init() argument 199 qp->wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV); in queue_poll_init() 350 static struct arm_smmu_cmdq *arm_smmu_get_cmdq(struct arm_smmu_device *smmu) in arm_smmu_get_cmdq() argument 352 return &smmu->cmdq; in arm_smmu_get_cmdq() 355 static void arm_smmu_cmdq_build_sync_cmd(u64 *cmd, struct arm_smmu_device *smmu, in arm_smmu_cmdq_build_sync_cmd() argument 366 if (smmu->options & ARM_SMMU_OPT_MSIPOLL) { in arm_smmu_cmdq_build_sync_cmd() [all …]
|
D | arm-smmu-v3-sva.c | 50 struct arm_smmu_device *smmu; in arm_smmu_share_asid() local 66 smmu = smmu_domain->smmu; in arm_smmu_share_asid() 69 XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL); in arm_smmu_share_asid() 86 arm_smmu_tlb_inv_asid(smmu, asid); in arm_smmu_share_asid() 212 if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_RANGE_INV)) { in arm_smmu_mm_invalidate_range() 217 if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM)) { in arm_smmu_mm_invalidate_range() 219 arm_smmu_tlb_inv_asid(smmu_domain->smmu, in arm_smmu_mm_invalidate_range() 248 arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid); in arm_smmu_mm_release() 335 arm_smmu_tlb_inv_asid(smmu_domain->smmu, cd->asid); in arm_smmu_mmu_notifier_put() 430 bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) in arm_smmu_sva_supported() argument [all …]
|
D | Makefile | 3 arm_smmu_v3-objs-y += arm-smmu-v3.o 4 arm_smmu_v3-objs-$(CONFIG_ARM_SMMU_V3_SVA) += arm-smmu-v3-sva.o
|
D | arm-smmu-v3.h | 696 struct arm_smmu_device *smmu; member 719 struct arm_smmu_device *smmu; member 751 void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid); 760 bool arm_smmu_sva_supported(struct arm_smmu_device *smmu); 772 static inline bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) in arm_smmu_sva_supported() argument
|
/drivers/memory/tegra/ |
D | tegra210.c | 20 .smmu = { 36 .smmu = { 52 .smmu = { 68 .smmu = { 84 .smmu = { 100 .smmu = { 116 .smmu = { 132 .smmu = { 148 .smmu = { 164 .smmu = { [all …]
|
D | tegra114.c | 31 .smmu = { 47 .smmu = { 63 .smmu = { 79 .smmu = { 95 .smmu = { 111 .smmu = { 127 .smmu = { 143 .smmu = { 159 .smmu = { 175 .smmu = { [all …]
|
D | tegra124.c | 32 .smmu = { 48 .smmu = { 64 .smmu = { 80 .smmu = { 96 .smmu = { 112 .smmu = { 128 .smmu = { 144 .smmu = { 160 .smmu = { 176 .smmu = { [all …]
|
D | tegra30.c | 54 .smmu = { 71 .smmu = { 88 .smmu = { 105 .smmu = { 122 .smmu = { 139 .smmu = { 156 .smmu = { 173 .smmu = { 190 .smmu = { 207 .smmu = { [all …]
|
D | mc.c | 797 if (IS_ENABLED(CONFIG_TEGRA_IOMMU_SMMU) && mc->soc->smmu) { in tegra_mc_probe() 798 mc->smmu = tegra_smmu_probe(&pdev->dev, mc->soc->smmu, mc); in tegra_mc_probe() 799 if (IS_ERR(mc->smmu)) { in tegra_mc_probe() 801 PTR_ERR(mc->smmu)); in tegra_mc_probe() 802 mc->smmu = NULL; in tegra_mc_probe() 806 if (IS_ENABLED(CONFIG_TEGRA_IOMMU_GART) && !mc->soc->smmu) { in tegra_mc_probe()
|
/drivers/acpi/arm64/ |
D | iort.c | 407 struct acpi_iort_smmu_v3 *smmu; in iort_get_id_mapping_index() local 419 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; in iort_get_id_mapping_index() 424 if (smmu->event_gsiv && smmu->pri_gsiv && smmu->gerr_gsiv in iort_get_id_mapping_index() 425 && smmu->sync_gsiv) in iort_get_id_mapping_index() 428 if (smmu->id_mapping_index >= node->mapping_count) { in iort_get_id_mapping_index() 434 return smmu->id_mapping_index; in iort_get_id_mapping_index() 799 struct acpi_iort_smmu_v3 *smmu; in iort_get_msi_resv_iommu() local 801 smmu = (struct acpi_iort_smmu_v3 *)iommu->node_data; in iort_get_msi_resv_iommu() 802 if (smmu->model == ACPI_IORT_SMMU_V3_HISILICON_HI161X) in iort_get_msi_resv_iommu() 1126 struct acpi_iort_smmu_v3 *smmu; in arm_smmu_v3_count_resources() local [all …]
|
/drivers/iommu/arm/ |
D | Makefile | 2 obj-y += arm-smmu/ arm-smmu-v3/
|