Home
last modified time | relevance | path

Searched refs:iommu (Results 1 – 25 of 65) sorted by relevance

123

/drivers/iommu/
Damd_iommu_init.c259 bool translation_pre_enabled(struct amd_iommu *iommu) in translation_pre_enabled() argument
261 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED); in translation_pre_enabled()
265 static void clear_translation_pre_enabled(struct amd_iommu *iommu) in clear_translation_pre_enabled() argument
267 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; in clear_translation_pre_enabled()
270 static void init_translation_status(struct amd_iommu *iommu) in init_translation_status() argument
274 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); in init_translation_status()
276 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; in init_translation_status()
300 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address) in iommu_read_l1() argument
304 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); in iommu_read_l1()
305 pci_read_config_dword(iommu->dev, 0xfc, &val); in iommu_read_l1()
[all …]
Drockchip-iommu.c107 struct iommu_device iommu; member
115 struct rk_iommu *iommu; member
285 static void rk_iommu_command(struct rk_iommu *iommu, u32 command) in rk_iommu_command() argument
289 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_command()
290 writel(command, iommu->bases[i] + RK_MMU_COMMAND); in rk_iommu_command()
297 static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start, in rk_iommu_zap_lines() argument
306 for (i = 0; i < iommu->num_mmu; i++) { in rk_iommu_zap_lines()
310 rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova); in rk_iommu_zap_lines()
314 static bool rk_iommu_is_stall_active(struct rk_iommu *iommu) in rk_iommu_is_stall_active() argument
319 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_is_stall_active()
[all …]
Dmsm_iommu.c55 static int __enable_clocks(struct msm_iommu_dev *iommu) in __enable_clocks() argument
59 ret = clk_enable(iommu->pclk); in __enable_clocks()
63 if (iommu->clk) { in __enable_clocks()
64 ret = clk_enable(iommu->clk); in __enable_clocks()
66 clk_disable(iommu->pclk); in __enable_clocks()
72 static void __disable_clocks(struct msm_iommu_dev *iommu) in __disable_clocks() argument
74 if (iommu->clk) in __disable_clocks()
75 clk_disable(iommu->clk); in __disable_clocks()
76 clk_disable(iommu->pclk); in __disable_clocks()
121 struct msm_iommu_dev *iommu = NULL; in __flush_iotlb() local
[all …]
Dintel_irq_remapping.c32 struct intel_iommu *iommu; member
39 struct intel_iommu *iommu; member
46 struct intel_iommu *iommu; member
82 static void iommu_disable_irq_remapping(struct intel_iommu *iommu);
85 static bool ir_pre_enabled(struct intel_iommu *iommu) in ir_pre_enabled() argument
87 return (iommu->flags & VTD_FLAG_IRQ_REMAP_PRE_ENABLED); in ir_pre_enabled()
90 static void clear_ir_pre_enabled(struct intel_iommu *iommu) in clear_ir_pre_enabled() argument
92 iommu->flags &= ~VTD_FLAG_IRQ_REMAP_PRE_ENABLED; in clear_ir_pre_enabled()
95 static void init_ir_status(struct intel_iommu *iommu) in init_ir_status() argument
99 gsts = readl(iommu->reg + DMAR_GSTS_REG); in init_ir_status()
[all …]
Ddmar.c64 static void free_iommu(struct intel_iommu *iommu);
431 if (dmaru->iommu) in dmar_free_drhd()
432 free_iommu(dmaru->iommu); in dmar_free_drhd()
470 drhd->iommu->node = node; in dmar_parse_one_rhsa()
899 x86_init.iommu.iommu_init = intel_iommu_init; in detect_intel_iommu()
911 static void unmap_iommu(struct intel_iommu *iommu) in unmap_iommu() argument
913 iounmap(iommu->reg); in unmap_iommu()
914 release_mem_region(iommu->reg_phys, iommu->reg_size); in unmap_iommu()
925 static int map_iommu(struct intel_iommu *iommu, u64 phys_addr) in map_iommu() argument
929 iommu->reg_phys = phys_addr; in map_iommu()
[all …]
Dintel-iommu.c344 static void domain_context_clear(struct intel_iommu *iommu,
347 struct intel_iommu *iommu);
412 static bool translation_pre_enabled(struct intel_iommu *iommu) in translation_pre_enabled() argument
414 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED); in translation_pre_enabled()
417 static void clear_translation_pre_enabled(struct intel_iommu *iommu) in clear_translation_pre_enabled() argument
419 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED; in clear_translation_pre_enabled()
422 static void init_translation_status(struct intel_iommu *iommu) in init_translation_status() argument
426 gsts = readl(iommu->reg + DMAR_GSTS_REG); in init_translation_status()
428 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED; in init_translation_status()
484 static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did) in get_iommu_domain() argument
[all …]
Diommu-sysfs.c54 int iommu_device_sysfs_add(struct iommu_device *iommu, in iommu_device_sysfs_add() argument
62 iommu->dev = kzalloc(sizeof(*iommu->dev), GFP_KERNEL); in iommu_device_sysfs_add()
63 if (!iommu->dev) in iommu_device_sysfs_add()
66 device_initialize(iommu->dev); in iommu_device_sysfs_add()
68 iommu->dev->class = &iommu_class; in iommu_device_sysfs_add()
69 iommu->dev->parent = parent; in iommu_device_sysfs_add()
70 iommu->dev->groups = groups; in iommu_device_sysfs_add()
73 ret = kobject_set_name_vargs(&iommu->dev->kobj, fmt, vargs); in iommu_device_sysfs_add()
78 ret = device_add(iommu->dev); in iommu_device_sysfs_add()
82 dev_set_drvdata(iommu->dev, iommu); in iommu_device_sysfs_add()
[all …]
Dintel-svm.c26 int intel_svm_init(struct intel_iommu *iommu) in intel_svm_init() argument
29 !cap_fl1gp_support(iommu->cap)) in intel_svm_init()
33 !cap_5lp_support(iommu->cap)) in intel_svm_init()
41 int intel_svm_enable_prq(struct intel_iommu *iommu) in intel_svm_enable_prq() argument
49 iommu->name); in intel_svm_enable_prq()
52 iommu->prq = page_address(pages); in intel_svm_enable_prq()
54 irq = dmar_alloc_hwirq(DMAR_UNITS_SUPPORTED + iommu->seq_id, iommu->node, iommu); in intel_svm_enable_prq()
57 iommu->name); in intel_svm_enable_prq()
60 free_pages((unsigned long)iommu->prq, PRQ_ORDER); in intel_svm_enable_prq()
61 iommu->prq = NULL; in intel_svm_enable_prq()
[all …]
Dintel-pasid.c96 if (info->iommu->segment == data->segment && in search_pasid_table()
133 info = dev->archdata.iommu; in intel_pasid_alloc_table()
155 pages = alloc_pages_node(info->iommu->node, in intel_pasid_alloc_table()
180 info = dev->archdata.iommu; in intel_pasid_free_table()
206 info = dev->archdata.iommu; in intel_pasid_get_table()
217 info = dev->archdata.iommu; in intel_pasid_get_dev_max_id()
238 info = dev->archdata.iommu; in intel_pasid_get_entry()
245 entries = alloc_pgtable_page(info->iommu->node); in intel_pasid_get_entry()
399 pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu, in pasid_cache_invalidation_with_pasid() argument
409 qi_submit_sync(&desc, iommu); in pasid_cache_invalidation_with_pasid()
[all …]
Dintel-iommu-debugfs.c108 struct intel_iommu *iommu; in iommu_regset_show() local
114 for_each_active_iommu(iommu, drhd) { in iommu_regset_show()
122 iommu->name, drhd->reg_base_addr); in iommu_regset_show()
128 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_regset_show()
130 value = dmar_readq(iommu->reg + iommu_regs[i].offset); in iommu_regset_show()
135 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_regset_show()
203 static void ctx_tbl_walk(struct seq_file *m, struct intel_iommu *iommu, u16 bus) in ctx_tbl_walk() argument
225 context = iommu_context_addr(iommu, bus, devfn, 0); in ctx_tbl_walk()
234 tbl_wlk.rt_entry = &iommu->root_entry[bus]; in ctx_tbl_walk()
238 if (dmar_readq(iommu->reg + DMAR_RTADDR_REG) & DMA_RTADDR_SMT) { in ctx_tbl_walk()
[all …]
Damd_iommu.c289 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; in find_dev_data() local
298 if (translation_pre_enabled(iommu)) in find_dev_data()
307 return dev->archdata.iommu; in get_dev_data()
405 struct amd_iommu *iommu; in iommu_init_device() local
408 if (dev->archdata.iommu) in iommu_init_device()
415 iommu = amd_iommu_rlookup_table[devid]; in iommu_init_device()
431 struct amd_iommu *iommu; in iommu_init_device() local
433 iommu = amd_iommu_rlookup_table[dev_data->devid]; in iommu_init_device()
434 dev_data->iommu_v2 = iommu->is_iommu_v2; in iommu_init_device()
437 dev->archdata.iommu = dev_data; in iommu_init_device()
[all …]
DMakefile2 obj-$(CONFIG_IOMMU_API) += iommu.o
3 obj-$(CONFIG_IOMMU_API) += iommu-traces.o
4 obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o
5 obj-$(CONFIG_IOMMU_DEBUGFS) += iommu-debugfs.o
6 obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o
20 obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o
22 obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += intel-iommu-debugfs.o
28 obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
29 obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
30 obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o
[all …]
Damd_iommu_proto.h18 extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
25 void amd_iommu_debugfs_setup(struct amd_iommu *iommu);
27 static inline void amd_iommu_debugfs_setup(struct amd_iommu *iommu) {} in amd_iommu_debugfs_setup() argument
55 extern int amd_iommu_create_irq_domain(struct amd_iommu *iommu);
57 static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu) in amd_iommu_create_irq_domain() argument
76 static inline bool iommu_feature(struct amd_iommu *iommu, u64 f) in iommu_feature() argument
78 if (!(iommu->cap & (1 << IOMMU_CAP_EFR))) in iommu_feature()
81 return !!(iommu->features & f); in iommu_feature()
94 extern bool translation_pre_enabled(struct amd_iommu *iommu);
Domap-iommu.c85 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; in omap_iommu_save_ctx()
115 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; in omap_iommu_restore_ctx()
964 struct omap_iommu_device *iommu; in omap_iommu_domain_deactivate() local
971 iommu = omap_domain->iommus; in omap_iommu_domain_deactivate()
972 iommu += (omap_domain->num_iommus - 1); in omap_iommu_domain_deactivate()
973 for (i = 0; i < omap_domain->num_iommus; i++, iommu--) { in omap_iommu_domain_deactivate()
974 oiommu = iommu->iommu_dev; in omap_iommu_domain_deactivate()
993 struct omap_iommu_device *iommu; in omap_iommu_domain_activate() local
1000 iommu = omap_domain->iommus; in omap_iommu_domain_activate()
1001 for (i = 0; i < omap_domain->num_iommus; i++, iommu++) { in omap_iommu_domain_activate()
[all …]
Dtegra-gart.c45 struct iommu_device iommu; /* IOMMU Core handle */ member
116 } else if (dev->archdata.iommu != domain) { in gart_iommu_attach_dev()
117 dev->archdata.iommu = domain; in gart_iommu_attach_dev()
134 if (dev->archdata.iommu == domain) { in gart_iommu_detach_dev()
135 dev->archdata.iommu = NULL; in gart_iommu_detach_dev()
259 iommu_device_link(&gart_handle->iommu, dev); in gart_iommu_add_device()
267 iommu_device_unlink(&gart_handle->iommu, dev); in gart_iommu_remove_device()
361 err = iommu_device_sysfs_add(&gart->iommu, dev, NULL, "gart"); in tegra_gart_probe()
365 iommu_device_set_ops(&gart->iommu, &gart_iommu_ops); in tegra_gart_probe()
366 iommu_device_set_fwnode(&gart->iommu, dev->fwnode); in tegra_gart_probe()
[all …]
Dqcom_iommu.c44 struct iommu_device iommu; member
67 struct qcom_iommu_dev *iommu; member
234 if (qcom_domain->iommu) in qcom_iommu_init_domain()
245 qcom_domain->iommu = qcom_iommu; in qcom_iommu_init_domain()
311 qcom_domain->iommu = NULL; in qcom_iommu_init_domain()
348 if (WARN_ON(qcom_domain->iommu)) /* forgot to detach? */ in qcom_iommu_domain_free()
358 pm_runtime_get_sync(qcom_domain->iommu->dev); in qcom_iommu_domain_free()
362 pm_runtime_put_sync(qcom_domain->iommu->dev); in qcom_iommu_domain_free()
390 if (qcom_domain->iommu != qcom_iommu) { in qcom_iommu_attach_dev()
393 dev_name(qcom_domain->iommu->dev), in qcom_iommu_attach_dev()
[all …]
Dintel-pasid.h87 int intel_pasid_setup_first_level(struct intel_iommu *iommu,
90 int intel_pasid_setup_second_level(struct intel_iommu *iommu,
93 int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
96 void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
Damd_iommu_debugfs.c21 void amd_iommu_debugfs_setup(struct amd_iommu *iommu) in amd_iommu_debugfs_setup() argument
31 snprintf(name, MAX_NAME_LEN, "iommu%02d", iommu->index); in amd_iommu_debugfs_setup()
32 iommu->debugfs = debugfs_create_dir(name, amd_iommu_debugfs); in amd_iommu_debugfs_setup()
Dtegra-smmu.c45 struct iommu_device iommu; /* IOMMU Core code handle */ member
463 struct tegra_smmu *smmu = dev->archdata.iommu; in tegra_smmu_attach_dev()
736 const struct iommu_ops *ops = smmu->iommu.ops; in tegra_smmu_configure()
779 dev->archdata.iommu = smmu; in tegra_smmu_add_device()
781 iommu_device_link(&smmu->iommu, dev); in tegra_smmu_add_device()
804 struct tegra_smmu *smmu = dev->archdata.iommu; in tegra_smmu_remove_device()
807 iommu_device_unlink(&smmu->iommu, dev); in tegra_smmu_remove_device()
809 dev->archdata.iommu = NULL; in tegra_smmu_remove_device()
869 struct tegra_smmu *smmu = dev->archdata.iommu; in tegra_smmu_device_group()
1061 err = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, dev_name(dev)); in tegra_smmu_probe()
[all …]
Damd_iommu_types.h429 #define for_each_iommu(iommu) \ argument
430 list_for_each_entry((iommu), &amd_iommu_list, list)
431 #define for_each_iommu_safe(iommu, next) \ argument
432 list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list)
564 struct iommu_device iommu; member
607 struct iommu_device *iommu = dev_to_iommu_device(dev); in dev_to_amd_iommu() local
609 return container_of(iommu, struct amd_iommu, iommu); in dev_to_amd_iommu()
759 extern void iommu_flush_all_caches(struct amd_iommu *iommu);
/drivers/gpu/drm/msm/
Dmsm_iommu.c19 struct msm_iommu *iommu = arg; in msm_fault_handler() local
20 if (iommu->base.handler) in msm_fault_handler()
21 return iommu->base.handler(iommu->base.arg, iova, flags); in msm_fault_handler()
29 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_attach() local
31 return iommu_attach_device(iommu->domain, mmu->dev); in msm_iommu_attach()
37 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_detach() local
39 iommu_detach_device(iommu->domain, mmu->dev); in msm_iommu_detach()
45 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_map() local
48 ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot); in msm_iommu_map()
56 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_unmap() local
[all …]
/drivers/vfio/
Dvfio_iommu_type1.c124 #define IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu) \ argument
125 (!list_empty(&iommu->domain_list))
134 static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu, in vfio_find_dma() argument
137 struct rb_node *node = iommu->dma_list.rb_node; in vfio_find_dma()
153 static void vfio_link_dma(struct vfio_iommu *iommu, struct vfio_dma *new) in vfio_link_dma() argument
155 struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL; in vfio_link_dma()
169 rb_insert_color(&new->node, &iommu->dma_list); in vfio_link_dma()
172 static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old) in vfio_unlink_dma() argument
174 rb_erase(&old->node, &iommu->dma_list); in vfio_unlink_dma()
546 struct vfio_iommu *iommu = iommu_data; in vfio_iommu_type1_pin_pages() local
[all …]
/drivers/gpu/drm/nouveau/nvkm/engine/device/
Dtegra.c124 mutex_init(&tdev->iommu.mutex); in nvkm_device_tegra_probe_iommu()
127 tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type); in nvkm_device_tegra_probe_iommu()
128 if (!tdev->iommu.domain) in nvkm_device_tegra_probe_iommu()
136 pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap; in nvkm_device_tegra_probe_iommu()
138 tdev->iommu.pgshift = PAGE_SHIFT; in nvkm_device_tegra_probe_iommu()
140 tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK); in nvkm_device_tegra_probe_iommu()
141 if (tdev->iommu.pgshift == 0) { in nvkm_device_tegra_probe_iommu()
145 tdev->iommu.pgshift -= 1; in nvkm_device_tegra_probe_iommu()
148 ret = iommu_attach_device(tdev->iommu.domain, dev); in nvkm_device_tegra_probe_iommu()
152 ret = nvkm_mm_init(&tdev->iommu.mm, 0, 0, in nvkm_device_tegra_probe_iommu()
[all …]
/drivers/media/platform/qcom/venus/
Dfirmware.c127 struct iommu_domain *iommu; in venus_boot_no_tz() local
135 iommu = core->fw.iommu_domain; in venus_boot_no_tz()
138 ret = iommu_map(iommu, VENUS_FW_START_ADDR, mem_phys, mem_size, in venus_boot_no_tz()
153 struct iommu_domain *iommu; in venus_shutdown_no_tz() local
167 iommu = core->fw.iommu_domain; in venus_shutdown_no_tz()
169 unmapped = iommu_unmap(iommu, VENUS_FW_START_ADDR, mapped); in venus_shutdown_no_tz()
278 struct iommu_domain *iommu; in venus_firmware_deinit() local
283 iommu = core->fw.iommu_domain; in venus_firmware_deinit()
285 iommu_detach_device(iommu, core->fw.dev); in venus_firmware_deinit()
286 iommu_domain_free(iommu); in venus_firmware_deinit()
/drivers/of/
Ddevice.c95 const struct iommu_ops *iommu; in of_dma_configure() local
162 iommu = of_iommu_configure(dev, np); in of_dma_configure()
163 if (IS_ERR(iommu) && PTR_ERR(iommu) == -EPROBE_DEFER) in of_dma_configure()
167 iommu ? " " : " not "); in of_dma_configure()
169 arch_setup_dma_ops(dev, dma_addr, size, iommu, coherent); in of_dma_configure()

123