Home
last modified time | relevance | path

Searched refs:iommu (Results 1 – 25 of 81) sorted by relevance

1234

/drivers/iommu/amd/
Dinit.c267 bool translation_pre_enabled(struct amd_iommu *iommu) in translation_pre_enabled() argument
269 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED); in translation_pre_enabled()
272 static void clear_translation_pre_enabled(struct amd_iommu *iommu) in clear_translation_pre_enabled() argument
274 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; in clear_translation_pre_enabled()
277 static void init_translation_status(struct amd_iommu *iommu) in init_translation_status() argument
281 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); in init_translation_status()
283 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; in init_translation_status()
309 struct amd_iommu *iommu; in check_feature_on_all_iommus() local
311 for_each_iommu(iommu) { in check_feature_on_all_iommus()
312 ret = iommu_feature(iommu, mask); in check_feature_on_all_iommus()
[all …]
Diommu.c231 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; in find_dev_data() local
240 if (translation_pre_enabled(iommu)) in find_dev_data()
343 struct amd_iommu *iommu; in iommu_init_device() local
345 iommu = amd_iommu_rlookup_table[dev_data->devid]; in iommu_init_device()
346 dev_data->iommu_v2 = iommu->is_iommu_v2; in iommu_init_device()
502 static void iommu_print_event(struct amd_iommu *iommu, void *__evt) in iommu_print_event() argument
504 struct device *dev = iommu->iommu.dev; in iommu_print_event()
591 static void iommu_poll_events(struct amd_iommu *iommu) in iommu_poll_events() argument
595 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_poll_events()
596 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); in iommu_poll_events()
[all …]
Damd_iommu.h17 extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
18 extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
25 void amd_iommu_debugfs_setup(struct amd_iommu *iommu);
27 static inline void amd_iommu_debugfs_setup(struct amd_iommu *iommu) {} in amd_iommu_debugfs_setup() argument
47 extern int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
49 extern int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
68 extern int amd_iommu_create_irq_domain(struct amd_iommu *iommu);
70 static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu) in amd_iommu_create_irq_domain() argument
89 static inline bool iommu_feature(struct amd_iommu *iommu, u64 mask) in iommu_feature() argument
91 return !!(iommu->features & mask); in iommu_feature()
[all …]
/drivers/iommu/intel/
Dirq_remapping.c33 struct intel_iommu *iommu; member
40 struct intel_iommu *iommu; member
47 struct intel_iommu *iommu; member
83 static void iommu_disable_irq_remapping(struct intel_iommu *iommu);
86 static bool ir_pre_enabled(struct intel_iommu *iommu) in ir_pre_enabled() argument
88 return (iommu->flags & VTD_FLAG_IRQ_REMAP_PRE_ENABLED); in ir_pre_enabled()
91 static void clear_ir_pre_enabled(struct intel_iommu *iommu) in clear_ir_pre_enabled() argument
93 iommu->flags &= ~VTD_FLAG_IRQ_REMAP_PRE_ENABLED; in clear_ir_pre_enabled()
96 static void init_ir_status(struct intel_iommu *iommu) in init_ir_status() argument
100 gsts = readl(iommu->reg + DMAR_GSTS_REG); in init_ir_status()
[all …]
Diommu.c241 static inline bool context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn) in context_copied() argument
243 if (!iommu->copied_tables) in context_copied()
246 return test_bit(((long)bus << 8) | devfn, iommu->copied_tables); in context_copied()
250 set_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn) in set_context_copied() argument
252 set_bit(((long)bus << 8) | devfn, iommu->copied_tables); in set_context_copied()
256 clear_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn) in clear_context_copied() argument
258 clear_bit(((long)bus << 8) | devfn, iommu->copied_tables); in clear_context_copied()
295 struct intel_iommu *iommu; /* the corresponding iommu */ member
380 static bool translation_pre_enabled(struct intel_iommu *iommu) in translation_pre_enabled() argument
382 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED); in translation_pre_enabled()
[all …]
Ddmar.c67 static void free_iommu(struct intel_iommu *iommu);
462 if (dmaru->iommu) in dmar_free_drhd()
463 free_iommu(dmaru->iommu); in dmar_free_drhd()
502 drhd->iommu->node = node; in dmar_parse_one_rhsa()
939 x86_init.iommu.iommu_init = intel_iommu_init; in detect_intel_iommu()
954 static void unmap_iommu(struct intel_iommu *iommu) in unmap_iommu() argument
956 iounmap(iommu->reg); in unmap_iommu()
957 release_mem_region(iommu->reg_phys, iommu->reg_size); in unmap_iommu()
968 static int map_iommu(struct intel_iommu *iommu, u64 phys_addr) in map_iommu() argument
972 iommu->reg_phys = phys_addr; in map_iommu()
[all …]
Dcap_audit.c75 static int cap_audit_hotplug(struct intel_iommu *iommu, enum cap_audit_type type) in cap_audit_hotplug() argument
82 CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, pi_support, CAP_PI_MASK); in cap_audit_hotplug()
83 CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, eim_support, ECAP_EIM_MASK); in cap_audit_hotplug()
87 CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, 5lp_support, CAP_FL5LP_MASK); in cap_audit_hotplug()
88 CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, fl1gp_support, CAP_FL1GP_MASK); in cap_audit_hotplug()
89 CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, read_drain, CAP_RD_MASK); in cap_audit_hotplug()
90 CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, write_drain, CAP_WD_MASK); in cap_audit_hotplug()
91 CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, pgsel_inv, CAP_PSI_MASK); in cap_audit_hotplug()
92 CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, zlr, CAP_ZLR_MASK); in cap_audit_hotplug()
93 CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, caching_mode, CAP_CM_MASK); in cap_audit_hotplug()
[all …]
Dsvm.c85 int intel_svm_enable_prq(struct intel_iommu *iommu) in intel_svm_enable_prq() argument
94 iommu->name); in intel_svm_enable_prq()
97 iommu->prq = page_address(pages); in intel_svm_enable_prq()
99 irq = dmar_alloc_hwirq(DMAR_UNITS_SUPPORTED + iommu->seq_id, iommu->node, iommu); in intel_svm_enable_prq()
102 iommu->name); in intel_svm_enable_prq()
106 iommu->pr_irq = irq; in intel_svm_enable_prq()
108 snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name), in intel_svm_enable_prq()
109 "dmar%d-iopfq", iommu->seq_id); in intel_svm_enable_prq()
110 iopfq = iopf_queue_alloc(iommu->iopfq_name); in intel_svm_enable_prq()
112 pr_err("IOMMU: %s: Failed to allocate iopf queue\n", iommu->name); in intel_svm_enable_prq()
[all …]
Ddebugfs.c117 struct intel_iommu *iommu; in iommu_regset_show() local
123 for_each_active_iommu(iommu, drhd) { in iommu_regset_show()
131 iommu->name, drhd->reg_base_addr); in iommu_regset_show()
137 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_regset_show()
139 value = dmar_readl(iommu->reg + iommu_regs_32[i].offset); in iommu_regset_show()
145 value = dmar_readq(iommu->reg + iommu_regs_64[i].offset); in iommu_regset_show()
150 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_regset_show()
218 static void ctx_tbl_walk(struct seq_file *m, struct intel_iommu *iommu, u16 bus) in ctx_tbl_walk() argument
240 context = iommu_context_addr(iommu, bus, devfn, 0); in ctx_tbl_walk()
249 tbl_wlk.rt_entry = &iommu->root_entry[bus]; in ctx_tbl_walk()
[all …]
Dpasid.c29 int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid) in vcmd_alloc_pasid() argument
36 raw_spin_lock_irqsave(&iommu->register_lock, flags); in vcmd_alloc_pasid()
37 dmar_writeq(iommu->reg + DMAR_VCMD_REG, VCMD_CMD_ALLOC); in vcmd_alloc_pasid()
38 IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq, in vcmd_alloc_pasid()
40 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in vcmd_alloc_pasid()
48 pr_info("IOMMU: %s: No PASID available\n", iommu->name); in vcmd_alloc_pasid()
54 iommu->name, status_code); in vcmd_alloc_pasid()
60 void vcmd_free_pasid(struct intel_iommu *iommu, u32 pasid) in vcmd_free_pasid() argument
66 raw_spin_lock_irqsave(&iommu->register_lock, flags); in vcmd_free_pasid()
67 dmar_writeq(iommu->reg + DMAR_VCMD_REG, in vcmd_free_pasid()
[all …]
Dperf.c18 bool dmar_latency_enabled(struct intel_iommu *iommu, enum latency_type type) in dmar_latency_enabled() argument
20 struct latency_statistic *lstat = iommu->perf_statistic; in dmar_latency_enabled()
25 int dmar_latency_enable(struct intel_iommu *iommu, enum latency_type type) in dmar_latency_enable() argument
31 if (dmar_latency_enabled(iommu, type)) in dmar_latency_enable()
35 if (!iommu->perf_statistic) { in dmar_latency_enable()
36 iommu->perf_statistic = kzalloc(sizeof(*lstat) * DMAR_LATENCY_NUM, in dmar_latency_enable()
38 if (!iommu->perf_statistic) { in dmar_latency_enable()
44 lstat = iommu->perf_statistic; in dmar_latency_enable()
57 void dmar_latency_disable(struct intel_iommu *iommu, enum latency_type type) in dmar_latency_disable() argument
59 struct latency_statistic *lstat = iommu->perf_statistic; in dmar_latency_disable()
[all …]
Dperf.h39 int dmar_latency_enable(struct intel_iommu *iommu, enum latency_type type);
40 void dmar_latency_disable(struct intel_iommu *iommu, enum latency_type type);
41 bool dmar_latency_enabled(struct intel_iommu *iommu, enum latency_type type);
42 void dmar_latency_update(struct intel_iommu *iommu, enum latency_type type,
44 int dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size);
47 dmar_latency_enable(struct intel_iommu *iommu, enum latency_type type) in dmar_latency_enable() argument
53 dmar_latency_disable(struct intel_iommu *iommu, enum latency_type type) in dmar_latency_disable() argument
58 dmar_latency_enabled(struct intel_iommu *iommu, enum latency_type type) in dmar_latency_enabled() argument
64 dmar_latency_update(struct intel_iommu *iommu, enum latency_type type, u64 latency) in dmar_latency_update() argument
69 dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size) in dmar_latency_snapshot() argument
Dpasid.h112 int intel_pasid_setup_first_level(struct intel_iommu *iommu,
115 int intel_pasid_setup_second_level(struct intel_iommu *iommu,
118 int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
121 int intel_pasid_setup_nested(struct intel_iommu *iommu,
125 void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
128 int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid);
129 void vcmd_free_pasid(struct intel_iommu *iommu, u32 pasid);
Dcap_audit.h85 #define MINIMAL_FEATURE_IOMMU(iommu, cap, MASK) \ argument
88 min_feature = min_t(u64, min_feature, (iommu)->cap & (MASK)); \
93 #define MINIMAL_FEATURE_HOTPLUG(iommu, cap, feature, MASK, mismatch) \ argument
96 (cap##_##feature((iommu)->cap))) \
99 (iommu)->cap = ((iommu)->cap & ~(MASK)) | \
131 int intel_cap_audit(enum cap_audit_type type, struct intel_iommu *iommu);
/drivers/iommu/
Dsun50i-iommu.c97 struct iommu_device iommu; member
122 struct sun50i_iommu *iommu; member
135 static u32 iommu_read(struct sun50i_iommu *iommu, u32 offset) in iommu_read() argument
137 return readl(iommu->base + offset); in iommu_read()
140 static void iommu_write(struct sun50i_iommu *iommu, u32 offset, u32 value) in iommu_write() argument
142 writel(value, iommu->base + offset); in iommu_write()
291 struct sun50i_iommu *iommu = sun50i_domain->iommu; in sun50i_table_flush() local
295 dma_sync_single_for_device(iommu->dev, dma, size, DMA_TO_DEVICE); in sun50i_table_flush()
298 static int sun50i_iommu_flush_all_tlb(struct sun50i_iommu *iommu) in sun50i_iommu_flush_all_tlb() argument
303 assert_spin_locked(&iommu->iommu_lock); in sun50i_iommu_flush_all_tlb()
[all …]
Drockchip-iommu.c112 struct iommu_device iommu; member
120 struct rk_iommu *iommu; member
343 static void rk_iommu_command(struct rk_iommu *iommu, u32 command) in rk_iommu_command() argument
347 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_command()
348 writel(command, iommu->bases[i] + RK_MMU_COMMAND); in rk_iommu_command()
355 static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start, in rk_iommu_zap_lines() argument
364 for (i = 0; i < iommu->num_mmu; i++) { in rk_iommu_zap_lines()
368 rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova); in rk_iommu_zap_lines()
372 static bool rk_iommu_is_stall_active(struct rk_iommu *iommu) in rk_iommu_is_stall_active() argument
377 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_is_stall_active()
[all …]
Dmsm_iommu.c54 static int __enable_clocks(struct msm_iommu_dev *iommu) in __enable_clocks() argument
58 ret = clk_enable(iommu->pclk); in __enable_clocks()
62 if (iommu->clk) { in __enable_clocks()
63 ret = clk_enable(iommu->clk); in __enable_clocks()
65 clk_disable(iommu->pclk); in __enable_clocks()
71 static void __disable_clocks(struct msm_iommu_dev *iommu) in __disable_clocks() argument
73 if (iommu->clk) in __disable_clocks()
74 clk_disable(iommu->clk); in __disable_clocks()
75 clk_disable(iommu->pclk); in __disable_clocks()
120 struct msm_iommu_dev *iommu = NULL; in __flush_iotlb() local
[all …]
Diommu-sysfs.c54 int iommu_device_sysfs_add(struct iommu_device *iommu, in iommu_device_sysfs_add() argument
62 iommu->dev = kzalloc(sizeof(*iommu->dev), GFP_KERNEL); in iommu_device_sysfs_add()
63 if (!iommu->dev) in iommu_device_sysfs_add()
66 device_initialize(iommu->dev); in iommu_device_sysfs_add()
68 iommu->dev->class = &iommu_class; in iommu_device_sysfs_add()
69 iommu->dev->parent = parent; in iommu_device_sysfs_add()
70 iommu->dev->groups = groups; in iommu_device_sysfs_add()
73 ret = kobject_set_name_vargs(&iommu->dev->kobj, fmt, vargs); in iommu_device_sysfs_add()
78 ret = device_add(iommu->dev); in iommu_device_sysfs_add()
82 dev_set_drvdata(iommu->dev, iommu); in iommu_device_sysfs_add()
[all …]
DMakefile3 obj-$(CONFIG_IOMMU_API) += iommu.o
4 obj-$(CONFIG_IOMMU_API) += iommu-traces.o
5 obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o
6 obj-$(CONFIG_IOMMU_DEBUGFS) += iommu-debugfs.o
7 obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o
19 obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
20 obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
21 obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o
22 obj-$(CONFIG_SUN50I_IOMMU) += sun50i-iommu.o
25 obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
[all …]
Domap-iommu.c952 struct omap_iommu_device *iommu; in omap_iommu_domain_deactivate() local
959 iommu = omap_domain->iommus; in omap_iommu_domain_deactivate()
960 iommu += (omap_domain->num_iommus - 1); in omap_iommu_domain_deactivate()
961 for (i = 0; i < omap_domain->num_iommus; i++, iommu--) { in omap_iommu_domain_deactivate()
962 oiommu = iommu->iommu_dev; in omap_iommu_domain_deactivate()
981 struct omap_iommu_device *iommu; in omap_iommu_domain_activate() local
988 iommu = omap_domain->iommus; in omap_iommu_domain_activate()
989 for (i = 0; i < omap_domain->num_iommus; i++, iommu++) { in omap_iommu_domain_activate()
990 oiommu = iommu->iommu_dev; in omap_iommu_domain_activate()
1232 err = iommu_device_sysfs_add(&obj->iommu, obj->dev, NULL, in omap_iommu_probe()
[all …]
/drivers/vfio/
Dvfio_iommu_type1.c145 #define IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu) \ argument
146 (!list_empty(&iommu->domain_list))
166 vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
174 static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu, in vfio_find_dma() argument
177 struct rb_node *node = iommu->dma_list.rb_node; in vfio_find_dma()
193 static struct rb_node *vfio_find_dma_first_node(struct vfio_iommu *iommu, in vfio_find_dma_first_node() argument
197 struct rb_node *node = iommu->dma_list.rb_node; in vfio_find_dma_first_node()
218 static void vfio_link_dma(struct vfio_iommu *iommu, struct vfio_dma *new) in vfio_link_dma() argument
220 struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL; in vfio_link_dma()
234 rb_insert_color(&new->node, &iommu->dma_list); in vfio_link_dma()
[all …]
/drivers/gpu/drm/msm/
Dmsm_iommu.c85 struct msm_iommu *iommu = to_msm_iommu(pagetable->parent); in msm_iommu_pagetable_destroy() local
93 if (atomic_dec_return(&iommu->pagetables) == 0) in msm_iommu_pagetable_destroy()
151 struct msm_iommu *iommu = to_msm_iommu(parent); in msm_iommu_pagetable_create() local
173 iommu_set_fault_handler(iommu->domain, msm_fault_handler, iommu); in msm_iommu_pagetable_create()
190 &ttbr0_cfg, iommu->domain); in msm_iommu_pagetable_create()
201 if (atomic_inc_return(&iommu->pagetables) == 1) { in msm_iommu_pagetable_create()
231 struct msm_iommu *iommu = arg; in msm_fault_handler() local
232 struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(iommu->base.dev); in msm_fault_handler()
240 if (iommu->base.handler) in msm_fault_handler()
241 return iommu->base.handler(iommu->base.arg, iova, flags, ptr); in msm_fault_handler()
[all …]
/drivers/gpu/drm/nouveau/nvkm/engine/device/
Dtegra.c124 mutex_init(&tdev->iommu.mutex); in nvkm_device_tegra_probe_iommu()
127 tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type); in nvkm_device_tegra_probe_iommu()
128 if (!tdev->iommu.domain) in nvkm_device_tegra_probe_iommu()
136 pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap; in nvkm_device_tegra_probe_iommu()
138 tdev->iommu.pgshift = PAGE_SHIFT; in nvkm_device_tegra_probe_iommu()
140 tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK); in nvkm_device_tegra_probe_iommu()
141 if (tdev->iommu.pgshift == 0) { in nvkm_device_tegra_probe_iommu()
145 tdev->iommu.pgshift -= 1; in nvkm_device_tegra_probe_iommu()
148 ret = iommu_attach_device(tdev->iommu.domain, dev); in nvkm_device_tegra_probe_iommu()
152 ret = nvkm_mm_init(&tdev->iommu.mm, 0, 0, in nvkm_device_tegra_probe_iommu()
[all …]
/drivers/media/platform/qcom/venus/
Dfirmware.c134 struct iommu_domain *iommu; in venus_boot_no_tz() local
142 iommu = core->fw.iommu_domain; in venus_boot_no_tz()
145 ret = iommu_map(iommu, VENUS_FW_START_ADDR, mem_phys, mem_size, in venus_boot_no_tz()
160 struct iommu_domain *iommu; in venus_shutdown_no_tz() local
174 iommu = core->fw.iommu_domain; in venus_shutdown_no_tz()
176 if (core->fw.mapped_mem_size && iommu) { in venus_shutdown_no_tz()
177 unmapped = iommu_unmap(iommu, VENUS_FW_START_ADDR, mapped); in venus_shutdown_no_tz()
316 struct iommu_domain *iommu; in venus_firmware_deinit() local
321 iommu = core->fw.iommu_domain; in venus_firmware_deinit()
323 iommu_detach_device(iommu, core->fw.dev); in venus_firmware_deinit()
[all …]
/drivers/iommu/arm/arm-smmu/
Dqcom_iommu.c49 struct iommu_device iommu; member
71 struct qcom_iommu_dev *iommu; member
94 struct qcom_iommu_dev *qcom_iommu = d->iommu; in to_ctx()
238 if (qcom_domain->iommu) in qcom_iommu_init_domain()
249 qcom_domain->iommu = qcom_iommu; in qcom_iommu_init_domain()
323 qcom_domain->iommu = NULL; in qcom_iommu_init_domain()
354 if (qcom_domain->iommu) { in qcom_iommu_domain_free()
361 pm_runtime_get_sync(qcom_domain->iommu->dev); in qcom_iommu_domain_free()
363 pm_runtime_put_sync(qcom_domain->iommu->dev); in qcom_iommu_domain_free()
391 if (qcom_domain->iommu != qcom_iommu) { in qcom_iommu_attach_dev()
[all …]

1234