Lines Matching refs:iommu
32 struct intel_iommu *iommu; member
39 struct intel_iommu *iommu; member
46 struct intel_iommu *iommu; member
82 static void iommu_disable_irq_remapping(struct intel_iommu *iommu);
85 static bool ir_pre_enabled(struct intel_iommu *iommu) in ir_pre_enabled() argument
87 return (iommu->flags & VTD_FLAG_IRQ_REMAP_PRE_ENABLED); in ir_pre_enabled()
90 static void clear_ir_pre_enabled(struct intel_iommu *iommu) in clear_ir_pre_enabled() argument
92 iommu->flags &= ~VTD_FLAG_IRQ_REMAP_PRE_ENABLED; in clear_ir_pre_enabled()
95 static void init_ir_status(struct intel_iommu *iommu) in init_ir_status() argument
99 gsts = readl(iommu->reg + DMAR_GSTS_REG); in init_ir_status()
101 iommu->flags |= VTD_FLAG_IRQ_REMAP_PRE_ENABLED; in init_ir_status()
104 static int alloc_irte(struct intel_iommu *iommu, in alloc_irte() argument
107 struct ir_table *table = iommu->ir_table; in alloc_irte()
120 if (mask > ecap_max_handle_mask(iommu->ecap)) { in alloc_irte()
123 ecap_max_handle_mask(iommu->ecap)); in alloc_irte()
131 pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id); in alloc_irte()
133 irq_iommu->iommu = iommu; in alloc_irte()
144 static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) in qi_flush_iec() argument
154 return qi_submit_sync(&desc, iommu); in qi_flush_iec()
160 struct intel_iommu *iommu; in modify_irte() local
170 iommu = irq_iommu->iommu; in modify_irte()
173 irte = &iommu->ir_table->base[index]; in modify_irte()
195 __iommu_flush_cache(iommu, irte, sizeof(*irte)); in modify_irte()
197 rc = qi_flush_iec(iommu, index, 0); in modify_irte()
211 if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu) in map_hpet_to_ir()
212 return ir_hpet[i].iommu; in map_hpet_to_ir()
221 if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu) in map_ioapic_to_ir()
222 return ir_ioapic[i].iommu; in map_ioapic_to_ir()
234 return drhd->iommu; in map_dev_to_ir()
240 struct intel_iommu *iommu; in clear_entries() local
246 iommu = irq_iommu->iommu; in clear_entries()
249 start = iommu->ir_table->base + index; in clear_entries()
256 bitmap_release_region(iommu->ir_table->bitmap, index, in clear_entries()
259 return qi_flush_iec(iommu, index, irq_iommu->irte_mask); in clear_entries()
319 if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) { in set_ioapic_sid()
346 if (ir_hpet[i].iommu && ir_hpet[i].id == id) { in set_hpet_sid()
432 static int iommu_load_old_irte(struct intel_iommu *iommu) in iommu_load_old_irte() argument
441 irta = dmar_readq(iommu->reg + DMAR_IRTA_REG); in iommu_load_old_irte()
455 memcpy(iommu->ir_table->base, old_ir_table, size); in iommu_load_old_irte()
457 __iommu_flush_cache(iommu, iommu->ir_table->base, size); in iommu_load_old_irte()
464 if (iommu->ir_table->base[i].present) in iommu_load_old_irte()
465 bitmap_set(iommu->ir_table->bitmap, i, 1); in iommu_load_old_irte()
474 static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode) in iommu_set_irq_remapping() argument
480 addr = virt_to_phys((void *)iommu->ir_table->base); in iommu_set_irq_remapping()
482 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_set_irq_remapping()
484 dmar_writeq(iommu->reg + DMAR_IRTA_REG, in iommu_set_irq_remapping()
488 writel(iommu->gcmd | DMA_GCMD_SIRTP, iommu->reg + DMAR_GCMD_REG); in iommu_set_irq_remapping()
490 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_set_irq_remapping()
492 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_set_irq_remapping()
498 qi_global_iec(iommu); in iommu_set_irq_remapping()
501 static void iommu_enable_irq_remapping(struct intel_iommu *iommu) in iommu_enable_irq_remapping() argument
506 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_enable_irq_remapping()
509 iommu->gcmd |= DMA_GCMD_IRE; in iommu_enable_irq_remapping()
510 iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */ in iommu_enable_irq_remapping()
511 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_enable_irq_remapping()
513 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_enable_irq_remapping()
526 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_enable_irq_remapping()
529 static int intel_setup_irq_remapping(struct intel_iommu *iommu) in intel_setup_irq_remapping() argument
536 if (iommu->ir_table) in intel_setup_irq_remapping()
543 pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, in intel_setup_irq_remapping()
547 iommu->seq_id, INTR_REMAP_PAGE_ORDER); in intel_setup_irq_remapping()
553 pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id); in intel_setup_irq_remapping()
557 fn = irq_domain_alloc_named_id_fwnode("INTEL-IR", iommu->seq_id); in intel_setup_irq_remapping()
561 iommu->ir_domain = in intel_setup_irq_remapping()
565 iommu); in intel_setup_irq_remapping()
567 if (!iommu->ir_domain) { in intel_setup_irq_remapping()
568 pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id); in intel_setup_irq_remapping()
571 iommu->ir_msi_domain = in intel_setup_irq_remapping()
572 arch_create_remap_msi_irq_domain(iommu->ir_domain, in intel_setup_irq_remapping()
574 iommu->seq_id); in intel_setup_irq_remapping()
578 iommu->ir_table = ir_table; in intel_setup_irq_remapping()
584 if (!iommu->qi) { in intel_setup_irq_remapping()
588 dmar_fault(-1, iommu); in intel_setup_irq_remapping()
589 dmar_disable_qi(iommu); in intel_setup_irq_remapping()
591 if (dmar_enable_qi(iommu)) { in intel_setup_irq_remapping()
597 init_ir_status(iommu); in intel_setup_irq_remapping()
599 if (ir_pre_enabled(iommu)) { in intel_setup_irq_remapping()
602 iommu->name); in intel_setup_irq_remapping()
603 clear_ir_pre_enabled(iommu); in intel_setup_irq_remapping()
604 iommu_disable_irq_remapping(iommu); in intel_setup_irq_remapping()
605 } else if (iommu_load_old_irte(iommu)) in intel_setup_irq_remapping()
607 iommu->name); in intel_setup_irq_remapping()
610 iommu->name); in intel_setup_irq_remapping()
613 iommu_set_irq_remapping(iommu, eim_mode); in intel_setup_irq_remapping()
624 iommu->ir_table = NULL; in intel_setup_irq_remapping()
629 static void intel_teardown_irq_remapping(struct intel_iommu *iommu) in intel_teardown_irq_remapping() argument
631 if (iommu && iommu->ir_table) { in intel_teardown_irq_remapping()
632 if (iommu->ir_msi_domain) { in intel_teardown_irq_remapping()
633 irq_domain_remove(iommu->ir_msi_domain); in intel_teardown_irq_remapping()
634 iommu->ir_msi_domain = NULL; in intel_teardown_irq_remapping()
636 if (iommu->ir_domain) { in intel_teardown_irq_remapping()
637 irq_domain_remove(iommu->ir_domain); in intel_teardown_irq_remapping()
638 iommu->ir_domain = NULL; in intel_teardown_irq_remapping()
640 free_pages((unsigned long)iommu->ir_table->base, in intel_teardown_irq_remapping()
642 bitmap_free(iommu->ir_table->bitmap); in intel_teardown_irq_remapping()
643 kfree(iommu->ir_table); in intel_teardown_irq_remapping()
644 iommu->ir_table = NULL; in intel_teardown_irq_remapping()
651 static void iommu_disable_irq_remapping(struct intel_iommu *iommu) in iommu_disable_irq_remapping() argument
656 if (!ecap_ir_support(iommu->ecap)) in iommu_disable_irq_remapping()
663 qi_global_iec(iommu); in iommu_disable_irq_remapping()
665 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_disable_irq_remapping()
667 sts = readl(iommu->reg + DMAR_GSTS_REG); in iommu_disable_irq_remapping()
671 iommu->gcmd &= ~DMA_GCMD_IRE; in iommu_disable_irq_remapping()
672 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_disable_irq_remapping()
674 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_disable_irq_remapping()
678 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_disable_irq_remapping()
693 struct intel_iommu *iommu; in intel_cleanup_irq_remapping() local
695 for_each_iommu(iommu, drhd) { in intel_cleanup_irq_remapping()
696 if (ecap_ir_support(iommu->ecap)) { in intel_cleanup_irq_remapping()
697 iommu_disable_irq_remapping(iommu); in intel_cleanup_irq_remapping()
698 intel_teardown_irq_remapping(iommu); in intel_cleanup_irq_remapping()
709 struct intel_iommu *iommu; in intel_prepare_irq_remapping() local
734 for_each_iommu(iommu, drhd) in intel_prepare_irq_remapping()
735 if (!ecap_ir_support(iommu->ecap)) in intel_prepare_irq_remapping()
747 for_each_iommu(iommu, drhd) { in intel_prepare_irq_remapping()
748 if (eim && !ecap_eim_support(iommu->ecap)) { in intel_prepare_irq_remapping()
749 pr_info("%s does not support EIM\n", iommu->name); in intel_prepare_irq_remapping()
759 for_each_iommu(iommu, drhd) { in intel_prepare_irq_remapping()
760 if (intel_setup_irq_remapping(iommu)) { in intel_prepare_irq_remapping()
762 iommu->name); in intel_prepare_irq_remapping()
780 struct intel_iommu *iommu; in set_irq_posting_cap() local
794 for_each_iommu(iommu, drhd) in set_irq_posting_cap()
795 if (!cap_pi_support(iommu->cap)) { in set_irq_posting_cap()
806 struct intel_iommu *iommu; in intel_enable_irq_remapping() local
812 for_each_iommu(iommu, drhd) { in intel_enable_irq_remapping()
813 if (!ir_pre_enabled(iommu)) in intel_enable_irq_remapping()
814 iommu_enable_irq_remapping(iommu); in intel_enable_irq_remapping()
835 struct intel_iommu *iommu, in ir_parse_one_hpet_scope() argument
858 if (ir_hpet[count].iommu == iommu && in ir_parse_one_hpet_scope()
861 else if (ir_hpet[count].iommu == NULL && free == -1) in ir_parse_one_hpet_scope()
869 ir_hpet[free].iommu = iommu; in ir_parse_one_hpet_scope()
880 struct intel_iommu *iommu, in ir_parse_one_ioapic_scope() argument
903 if (ir_ioapic[count].iommu == iommu && in ir_parse_one_ioapic_scope()
906 else if (ir_ioapic[count].iommu == NULL && free == -1) in ir_parse_one_ioapic_scope()
916 ir_ioapic[free].iommu = iommu; in ir_parse_one_ioapic_scope()
919 scope->enumeration_id, drhd->address, iommu->seq_id); in ir_parse_one_ioapic_scope()
925 struct intel_iommu *iommu) in ir_parse_ioapic_hpet_scope() argument
939 ret = ir_parse_one_ioapic_scope(scope, iommu, drhd); in ir_parse_ioapic_hpet_scope()
941 ret = ir_parse_one_hpet_scope(scope, iommu, drhd); in ir_parse_ioapic_hpet_scope()
948 static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu) in ir_remove_ioapic_hpet_scope() argument
953 if (ir_hpet[i].iommu == iommu) in ir_remove_ioapic_hpet_scope()
954 ir_hpet[i].iommu = NULL; in ir_remove_ioapic_hpet_scope()
957 if (ir_ioapic[i].iommu == iommu) in ir_remove_ioapic_hpet_scope()
958 ir_ioapic[i].iommu = NULL; in ir_remove_ioapic_hpet_scope()
968 struct intel_iommu *iommu; in parse_ioapics_under_ir() local
972 for_each_iommu(iommu, drhd) { in parse_ioapics_under_ir()
975 if (!ecap_ir_support(iommu->ecap)) in parse_ioapics_under_ir()
978 ret = ir_parse_ioapic_hpet_scope(drhd->hdr, iommu); in parse_ioapics_under_ir()
1019 struct intel_iommu *iommu = NULL; in disable_irq_remapping() local
1024 for_each_iommu(iommu, drhd) { in disable_irq_remapping()
1025 if (!ecap_ir_support(iommu->ecap)) in disable_irq_remapping()
1028 iommu_disable_irq_remapping(iommu); in disable_irq_remapping()
1042 struct intel_iommu *iommu = NULL; in reenable_irq_remapping() local
1044 for_each_iommu(iommu, drhd) in reenable_irq_remapping()
1045 if (iommu->qi) in reenable_irq_remapping()
1046 dmar_reenable_qi(iommu); in reenable_irq_remapping()
1051 for_each_iommu(iommu, drhd) { in reenable_irq_remapping()
1052 if (!ecap_ir_support(iommu->ecap)) in reenable_irq_remapping()
1056 iommu_set_irq_remapping(iommu, eim); in reenable_irq_remapping()
1057 iommu_enable_irq_remapping(iommu); in reenable_irq_remapping()
1097 struct intel_iommu *iommu = NULL; in intel_get_ir_irq_domain() local
1104 iommu = map_ioapic_to_ir(info->ioapic_id); in intel_get_ir_irq_domain()
1107 iommu = map_hpet_to_ir(info->hpet_id); in intel_get_ir_irq_domain()
1111 iommu = map_dev_to_ir(info->msi_dev); in intel_get_ir_irq_domain()
1118 return iommu ? iommu->ir_domain : NULL; in intel_get_ir_irq_domain()
1123 struct intel_iommu *iommu; in intel_get_irq_domain() local
1131 iommu = map_dev_to_ir(info->msi_dev); in intel_get_irq_domain()
1132 if (iommu) in intel_get_irq_domain()
1133 return iommu->ir_msi_domain; in intel_get_irq_domain()
1347 struct intel_iommu *iommu = domain->host_data; in intel_irq_remapping_alloc() local
1354 if (!info || !iommu) in intel_irq_remapping_alloc()
1377 index = alloc_irte(iommu, &data->irq_2_iommu, nr_irqs); in intel_irq_remapping_alloc()
1453 static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu) in dmar_ir_add() argument
1458 if (eim && !ecap_eim_support(iommu->ecap)) { in dmar_ir_add()
1460 iommu->reg_phys, iommu->ecap); in dmar_ir_add()
1464 if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) { in dmar_ir_add()
1466 iommu->reg_phys); in dmar_ir_add()
1473 ret = intel_setup_irq_remapping(iommu); in dmar_ir_add()
1476 iommu->name); in dmar_ir_add()
1477 intel_teardown_irq_remapping(iommu); in dmar_ir_add()
1478 ir_remove_ioapic_hpet_scope(iommu); in dmar_ir_add()
1480 iommu_enable_irq_remapping(iommu); in dmar_ir_add()
1489 struct intel_iommu *iommu = dmaru->iommu; in dmar_ir_hotplug() local
1493 if (iommu == NULL) in dmar_ir_hotplug()
1495 if (!ecap_ir_support(iommu->ecap)) in dmar_ir_hotplug()
1498 !cap_pi_support(iommu->cap)) in dmar_ir_hotplug()
1502 if (!iommu->ir_table) in dmar_ir_hotplug()
1503 ret = dmar_ir_add(dmaru, iommu); in dmar_ir_hotplug()
1505 if (iommu->ir_table) { in dmar_ir_hotplug()
1506 if (!bitmap_empty(iommu->ir_table->bitmap, in dmar_ir_hotplug()
1510 iommu_disable_irq_remapping(iommu); in dmar_ir_hotplug()
1511 intel_teardown_irq_remapping(iommu); in dmar_ir_hotplug()
1512 ir_remove_ioapic_hpet_scope(iommu); in dmar_ir_hotplug()