Lines Matching refs:iommu
33 struct intel_iommu *iommu; member
40 struct intel_iommu *iommu; member
47 struct intel_iommu *iommu; member
83 static void iommu_disable_irq_remapping(struct intel_iommu *iommu);
86 static bool ir_pre_enabled(struct intel_iommu *iommu) in ir_pre_enabled() argument
88 return (iommu->flags & VTD_FLAG_IRQ_REMAP_PRE_ENABLED); in ir_pre_enabled()
91 static void clear_ir_pre_enabled(struct intel_iommu *iommu) in clear_ir_pre_enabled() argument
93 iommu->flags &= ~VTD_FLAG_IRQ_REMAP_PRE_ENABLED; in clear_ir_pre_enabled()
96 static void init_ir_status(struct intel_iommu *iommu) in init_ir_status() argument
100 gsts = readl(iommu->reg + DMAR_GSTS_REG); in init_ir_status()
102 iommu->flags |= VTD_FLAG_IRQ_REMAP_PRE_ENABLED; in init_ir_status()
105 static int alloc_irte(struct intel_iommu *iommu, in alloc_irte() argument
108 struct ir_table *table = iommu->ir_table; in alloc_irte()
121 if (mask > ecap_max_handle_mask(iommu->ecap)) { in alloc_irte()
124 ecap_max_handle_mask(iommu->ecap)); in alloc_irte()
132 pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id); in alloc_irte()
134 irq_iommu->iommu = iommu; in alloc_irte()
145 static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) in qi_flush_iec() argument
155 return qi_submit_sync(iommu, &desc, 1, 0); in qi_flush_iec()
161 struct intel_iommu *iommu; in modify_irte() local
171 iommu = irq_iommu->iommu; in modify_irte()
174 irte = &iommu->ir_table->base[index]; in modify_irte()
196 __iommu_flush_cache(iommu, irte, sizeof(*irte)); in modify_irte()
198 rc = qi_flush_iec(iommu, index, 0); in modify_irte()
212 if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu) in map_hpet_to_iommu()
213 return ir_hpet[i].iommu; in map_hpet_to_iommu()
223 if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu) in map_ioapic_to_iommu()
224 return ir_ioapic[i].iommu; in map_ioapic_to_iommu()
233 return drhd ? drhd->iommu->ir_msi_domain : NULL; in map_dev_to_ir()
239 struct intel_iommu *iommu; in clear_entries() local
245 iommu = irq_iommu->iommu; in clear_entries()
248 start = iommu->ir_table->base + index; in clear_entries()
255 bitmap_release_region(iommu->ir_table->bitmap, index, in clear_entries()
258 return qi_flush_iec(iommu, index, irq_iommu->irte_mask); in clear_entries()
318 if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) { in set_ioapic_sid()
345 if (ir_hpet[i].iommu && ir_hpet[i].id == id) { in set_hpet_sid()
431 static int iommu_load_old_irte(struct intel_iommu *iommu) in iommu_load_old_irte() argument
440 irta = dmar_readq(iommu->reg + DMAR_IRTA_REG); in iommu_load_old_irte()
454 memcpy(iommu->ir_table->base, old_ir_table, size); in iommu_load_old_irte()
456 __iommu_flush_cache(iommu, iommu->ir_table->base, size); in iommu_load_old_irte()
463 if (iommu->ir_table->base[i].present) in iommu_load_old_irte()
464 bitmap_set(iommu->ir_table->bitmap, i, 1); in iommu_load_old_irte()
473 static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode) in iommu_set_irq_remapping() argument
479 addr = virt_to_phys((void *)iommu->ir_table->base); in iommu_set_irq_remapping()
481 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_set_irq_remapping()
483 dmar_writeq(iommu->reg + DMAR_IRTA_REG, in iommu_set_irq_remapping()
487 writel(iommu->gcmd | DMA_GCMD_SIRTP, iommu->reg + DMAR_GCMD_REG); in iommu_set_irq_remapping()
489 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_set_irq_remapping()
491 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_set_irq_remapping()
497 qi_global_iec(iommu); in iommu_set_irq_remapping()
500 static void iommu_enable_irq_remapping(struct intel_iommu *iommu) in iommu_enable_irq_remapping() argument
505 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_enable_irq_remapping()
508 iommu->gcmd |= DMA_GCMD_IRE; in iommu_enable_irq_remapping()
509 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_enable_irq_remapping()
510 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_enable_irq_remapping()
515 iommu->gcmd &= ~DMA_GCMD_CFI; in iommu_enable_irq_remapping()
516 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_enable_irq_remapping()
517 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_enable_irq_remapping()
531 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_enable_irq_remapping()
534 static int intel_setup_irq_remapping(struct intel_iommu *iommu) in intel_setup_irq_remapping() argument
541 if (iommu->ir_table) in intel_setup_irq_remapping()
548 pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, in intel_setup_irq_remapping()
552 iommu->seq_id, INTR_REMAP_PAGE_ORDER); in intel_setup_irq_remapping()
558 pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id); in intel_setup_irq_remapping()
562 fn = irq_domain_alloc_named_id_fwnode("INTEL-IR", iommu->seq_id); in intel_setup_irq_remapping()
566 iommu->ir_domain = in intel_setup_irq_remapping()
570 iommu); in intel_setup_irq_remapping()
571 if (!iommu->ir_domain) { in intel_setup_irq_remapping()
572 pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id); in intel_setup_irq_remapping()
575 iommu->ir_msi_domain = in intel_setup_irq_remapping()
576 arch_create_remap_msi_irq_domain(iommu->ir_domain, in intel_setup_irq_remapping()
578 iommu->seq_id); in intel_setup_irq_remapping()
582 iommu->ir_table = ir_table; in intel_setup_irq_remapping()
588 if (!iommu->qi) { in intel_setup_irq_remapping()
592 dmar_fault(-1, iommu); in intel_setup_irq_remapping()
593 dmar_disable_qi(iommu); in intel_setup_irq_remapping()
595 if (dmar_enable_qi(iommu)) { in intel_setup_irq_remapping()
601 init_ir_status(iommu); in intel_setup_irq_remapping()
603 if (ir_pre_enabled(iommu)) { in intel_setup_irq_remapping()
606 iommu->name); in intel_setup_irq_remapping()
607 clear_ir_pre_enabled(iommu); in intel_setup_irq_remapping()
608 iommu_disable_irq_remapping(iommu); in intel_setup_irq_remapping()
609 } else if (iommu_load_old_irte(iommu)) in intel_setup_irq_remapping()
611 iommu->name); in intel_setup_irq_remapping()
614 iommu->name); in intel_setup_irq_remapping()
617 iommu_set_irq_remapping(iommu, eim_mode); in intel_setup_irq_remapping()
622 if (iommu->ir_msi_domain) in intel_setup_irq_remapping()
623 irq_domain_remove(iommu->ir_msi_domain); in intel_setup_irq_remapping()
624 iommu->ir_msi_domain = NULL; in intel_setup_irq_remapping()
625 irq_domain_remove(iommu->ir_domain); in intel_setup_irq_remapping()
626 iommu->ir_domain = NULL; in intel_setup_irq_remapping()
636 iommu->ir_table = NULL; in intel_setup_irq_remapping()
641 static void intel_teardown_irq_remapping(struct intel_iommu *iommu) in intel_teardown_irq_remapping() argument
645 if (iommu && iommu->ir_table) { in intel_teardown_irq_remapping()
646 if (iommu->ir_msi_domain) { in intel_teardown_irq_remapping()
647 fn = iommu->ir_msi_domain->fwnode; in intel_teardown_irq_remapping()
649 irq_domain_remove(iommu->ir_msi_domain); in intel_teardown_irq_remapping()
651 iommu->ir_msi_domain = NULL; in intel_teardown_irq_remapping()
653 if (iommu->ir_domain) { in intel_teardown_irq_remapping()
654 fn = iommu->ir_domain->fwnode; in intel_teardown_irq_remapping()
656 irq_domain_remove(iommu->ir_domain); in intel_teardown_irq_remapping()
658 iommu->ir_domain = NULL; in intel_teardown_irq_remapping()
660 free_pages((unsigned long)iommu->ir_table->base, in intel_teardown_irq_remapping()
662 bitmap_free(iommu->ir_table->bitmap); in intel_teardown_irq_remapping()
663 kfree(iommu->ir_table); in intel_teardown_irq_remapping()
664 iommu->ir_table = NULL; in intel_teardown_irq_remapping()
671 static void iommu_disable_irq_remapping(struct intel_iommu *iommu) in iommu_disable_irq_remapping() argument
676 if (!ecap_ir_support(iommu->ecap)) in iommu_disable_irq_remapping()
683 qi_global_iec(iommu); in iommu_disable_irq_remapping()
685 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_disable_irq_remapping()
687 sts = readl(iommu->reg + DMAR_GSTS_REG); in iommu_disable_irq_remapping()
691 iommu->gcmd &= ~DMA_GCMD_IRE; in iommu_disable_irq_remapping()
692 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_disable_irq_remapping()
694 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_disable_irq_remapping()
698 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_disable_irq_remapping()
713 struct intel_iommu *iommu; in intel_cleanup_irq_remapping() local
715 for_each_iommu(iommu, drhd) { in intel_cleanup_irq_remapping()
716 if (ecap_ir_support(iommu->ecap)) { in intel_cleanup_irq_remapping()
717 iommu_disable_irq_remapping(iommu); in intel_cleanup_irq_remapping()
718 intel_teardown_irq_remapping(iommu); in intel_cleanup_irq_remapping()
729 struct intel_iommu *iommu; in intel_prepare_irq_remapping() local
757 for_each_iommu(iommu, drhd) in intel_prepare_irq_remapping()
758 if (!ecap_ir_support(iommu->ecap)) in intel_prepare_irq_remapping()
770 for_each_iommu(iommu, drhd) { in intel_prepare_irq_remapping()
771 if (eim && !ecap_eim_support(iommu->ecap)) { in intel_prepare_irq_remapping()
772 pr_info("%s does not support EIM\n", iommu->name); in intel_prepare_irq_remapping()
782 for_each_iommu(iommu, drhd) { in intel_prepare_irq_remapping()
783 if (intel_setup_irq_remapping(iommu)) { in intel_prepare_irq_remapping()
785 iommu->name); in intel_prepare_irq_remapping()
803 struct intel_iommu *iommu; in set_irq_posting_cap() local
817 for_each_iommu(iommu, drhd) in set_irq_posting_cap()
818 if (!cap_pi_support(iommu->cap)) { in set_irq_posting_cap()
829 struct intel_iommu *iommu; in intel_enable_irq_remapping() local
835 for_each_iommu(iommu, drhd) { in intel_enable_irq_remapping()
836 if (!ir_pre_enabled(iommu)) in intel_enable_irq_remapping()
837 iommu_enable_irq_remapping(iommu); in intel_enable_irq_remapping()
858 struct intel_iommu *iommu, in ir_parse_one_hpet_scope() argument
881 if (ir_hpet[count].iommu == iommu && in ir_parse_one_hpet_scope()
884 else if (ir_hpet[count].iommu == NULL && free == -1) in ir_parse_one_hpet_scope()
892 ir_hpet[free].iommu = iommu; in ir_parse_one_hpet_scope()
903 struct intel_iommu *iommu, in ir_parse_one_ioapic_scope() argument
926 if (ir_ioapic[count].iommu == iommu && in ir_parse_one_ioapic_scope()
929 else if (ir_ioapic[count].iommu == NULL && free == -1) in ir_parse_one_ioapic_scope()
939 ir_ioapic[free].iommu = iommu; in ir_parse_one_ioapic_scope()
942 scope->enumeration_id, drhd->address, iommu->seq_id); in ir_parse_one_ioapic_scope()
948 struct intel_iommu *iommu) in ir_parse_ioapic_hpet_scope() argument
962 ret = ir_parse_one_ioapic_scope(scope, iommu, drhd); in ir_parse_ioapic_hpet_scope()
964 ret = ir_parse_one_hpet_scope(scope, iommu, drhd); in ir_parse_ioapic_hpet_scope()
971 static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu) in ir_remove_ioapic_hpet_scope() argument
976 if (ir_hpet[i].iommu == iommu) in ir_remove_ioapic_hpet_scope()
977 ir_hpet[i].iommu = NULL; in ir_remove_ioapic_hpet_scope()
980 if (ir_ioapic[i].iommu == iommu) in ir_remove_ioapic_hpet_scope()
981 ir_ioapic[i].iommu = NULL; in ir_remove_ioapic_hpet_scope()
991 struct intel_iommu *iommu; in parse_ioapics_under_ir() local
995 for_each_iommu(iommu, drhd) { in parse_ioapics_under_ir()
998 if (!ecap_ir_support(iommu->ecap)) in parse_ioapics_under_ir()
1001 ret = ir_parse_ioapic_hpet_scope(drhd->hdr, iommu); in parse_ioapics_under_ir()
1042 struct intel_iommu *iommu = NULL; in disable_irq_remapping() local
1047 for_each_iommu(iommu, drhd) { in disable_irq_remapping()
1048 if (!ecap_ir_support(iommu->ecap)) in disable_irq_remapping()
1051 iommu_disable_irq_remapping(iommu); in disable_irq_remapping()
1065 struct intel_iommu *iommu = NULL; in reenable_irq_remapping() local
1067 for_each_iommu(iommu, drhd) in reenable_irq_remapping()
1068 if (iommu->qi) in reenable_irq_remapping()
1069 dmar_reenable_qi(iommu); in reenable_irq_remapping()
1074 for_each_iommu(iommu, drhd) { in reenable_irq_remapping()
1075 if (!ecap_ir_support(iommu->ecap)) in reenable_irq_remapping()
1079 iommu_set_irq_remapping(iommu, eim); in reenable_irq_remapping()
1080 iommu_enable_irq_remapping(iommu); in reenable_irq_remapping()
1326 struct intel_iommu *iommu = domain->host_data; in intel_irq_remapping_alloc() local
1333 if (!info || !iommu) in intel_irq_remapping_alloc()
1356 index = alloc_irte(iommu, &data->irq_2_iommu, nr_irqs); in intel_irq_remapping_alloc()
1428 struct intel_iommu *iommu = NULL; in intel_irq_remapping_select() local
1431 iommu = map_ioapic_to_iommu(fwspec->param[0]); in intel_irq_remapping_select()
1433 iommu = map_hpet_to_iommu(fwspec->param[0]); in intel_irq_remapping_select()
1435 return iommu && d == iommu->ir_domain; in intel_irq_remapping_select()
1449 static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu) in dmar_ir_add() argument
1454 ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_IRQR, iommu); in dmar_ir_add()
1458 if (eim && !ecap_eim_support(iommu->ecap)) { in dmar_ir_add()
1460 iommu->reg_phys, iommu->ecap); in dmar_ir_add()
1464 if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) { in dmar_ir_add()
1466 iommu->reg_phys); in dmar_ir_add()
1473 ret = intel_setup_irq_remapping(iommu); in dmar_ir_add()
1476 iommu->name); in dmar_ir_add()
1477 intel_teardown_irq_remapping(iommu); in dmar_ir_add()
1478 ir_remove_ioapic_hpet_scope(iommu); in dmar_ir_add()
1480 iommu_enable_irq_remapping(iommu); in dmar_ir_add()
1489 struct intel_iommu *iommu = dmaru->iommu; in dmar_ir_hotplug() local
1493 if (iommu == NULL) in dmar_ir_hotplug()
1495 if (!ecap_ir_support(iommu->ecap)) in dmar_ir_hotplug()
1498 !cap_pi_support(iommu->cap)) in dmar_ir_hotplug()
1502 if (!iommu->ir_table) in dmar_ir_hotplug()
1503 ret = dmar_ir_add(dmaru, iommu); in dmar_ir_hotplug()
1505 if (iommu->ir_table) { in dmar_ir_hotplug()
1506 if (!bitmap_empty(iommu->ir_table->bitmap, in dmar_ir_hotplug()
1510 iommu_disable_irq_remapping(iommu); in dmar_ir_hotplug()
1511 intel_teardown_irq_remapping(iommu); in dmar_ir_hotplug()
1512 ir_remove_ioapic_hpet_scope(iommu); in dmar_ir_hotplug()