Lines Matching refs:iommu
231 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; in find_dev_data() local
240 if (translation_pre_enabled(iommu)) in find_dev_data()
343 struct amd_iommu *iommu; in iommu_init_device() local
345 iommu = amd_iommu_rlookup_table[dev_data->devid]; in iommu_init_device()
346 dev_data->iommu_v2 = iommu->is_iommu_v2; in iommu_init_device()
502 static void iommu_print_event(struct amd_iommu *iommu, void *__evt) in iommu_print_event() argument
504 struct device *dev = iommu->iommu.dev; in iommu_print_event()
591 static void iommu_poll_events(struct amd_iommu *iommu) in iommu_poll_events() argument
595 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_poll_events()
596 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); in iommu_poll_events()
599 iommu_print_event(iommu, iommu->evt_buf + head); in iommu_poll_events()
603 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_poll_events()
606 static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw) in iommu_handle_ppr_entry() argument
624 static void iommu_poll_ppr_log(struct amd_iommu *iommu) in iommu_poll_ppr_log() argument
628 if (iommu->ppr_log == NULL) in iommu_poll_ppr_log()
631 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
632 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); in iommu_poll_ppr_log()
639 raw = (u64 *)(iommu->ppr_log + head); in iommu_poll_ppr_log()
664 writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
667 iommu_handle_ppr_entry(iommu, entry); in iommu_poll_ppr_log()
670 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
671 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); in iommu_poll_ppr_log()
686 static void iommu_poll_ga_log(struct amd_iommu *iommu) in iommu_poll_ga_log() argument
690 if (iommu->ga_log == NULL) in iommu_poll_ga_log()
693 head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET); in iommu_poll_ga_log()
694 tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET); in iommu_poll_ga_log()
700 raw = (u64 *)(iommu->ga_log + head); in iommu_poll_ga_log()
708 writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); in iommu_poll_ga_log()
730 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) in amd_iommu_set_pci_msi_domain() argument
736 dev_set_msi_domain(dev, iommu->msi_domain); in amd_iommu_set_pci_msi_domain()
741 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { } in amd_iommu_set_pci_msi_domain() argument
752 struct amd_iommu *iommu = (struct amd_iommu *) data; in amd_iommu_int_thread() local
753 u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_int_thread()
758 iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_int_thread()
762 iommu_poll_events(iommu); in amd_iommu_int_thread()
767 iommu_poll_ppr_log(iommu); in amd_iommu_int_thread()
773 iommu_poll_ga_log(iommu); in amd_iommu_int_thread()
779 amd_iommu_restart_event_logging(iommu); in amd_iommu_int_thread()
795 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_int_thread()
811 static int wait_on_sem(struct amd_iommu *iommu, u64 data) in wait_on_sem() argument
815 while (*iommu->cmd_sem != data && i < LOOP_TIMEOUT) { in wait_on_sem()
828 static void copy_cmd_to_buffer(struct amd_iommu *iommu, in copy_cmd_to_buffer() argument
835 tail = iommu->cmd_buf_tail; in copy_cmd_to_buffer()
836 target = iommu->cmd_buf + tail; in copy_cmd_to_buffer()
840 iommu->cmd_buf_tail = tail; in copy_cmd_to_buffer()
843 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); in copy_cmd_to_buffer()
847 struct amd_iommu *iommu, in build_completion_wait() argument
850 u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem); in build_completion_wait()
1008 static int __iommu_queue_command_sync(struct amd_iommu *iommu, in __iommu_queue_command_sync() argument
1015 next_tail = (iommu->cmd_buf_tail + sizeof(*cmd)) % CMD_BUFFER_SIZE; in __iommu_queue_command_sync()
1017 left = (iommu->cmd_buf_head - next_tail) % CMD_BUFFER_SIZE; in __iommu_queue_command_sync()
1031 iommu->cmd_buf_head = readl(iommu->mmio_base + in __iommu_queue_command_sync()
1037 copy_cmd_to_buffer(iommu, cmd); in __iommu_queue_command_sync()
1040 iommu->need_sync = sync; in __iommu_queue_command_sync()
1045 static int iommu_queue_command_sync(struct amd_iommu *iommu, in iommu_queue_command_sync() argument
1052 raw_spin_lock_irqsave(&iommu->lock, flags); in iommu_queue_command_sync()
1053 ret = __iommu_queue_command_sync(iommu, cmd, sync); in iommu_queue_command_sync()
1054 raw_spin_unlock_irqrestore(&iommu->lock, flags); in iommu_queue_command_sync()
1059 static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) in iommu_queue_command() argument
1061 return iommu_queue_command_sync(iommu, cmd, true); in iommu_queue_command()
1068 static int iommu_completion_wait(struct amd_iommu *iommu) in iommu_completion_wait() argument
1075 if (!iommu->need_sync) in iommu_completion_wait()
1078 raw_spin_lock_irqsave(&iommu->lock, flags); in iommu_completion_wait()
1080 data = ++iommu->cmd_sem_val; in iommu_completion_wait()
1081 build_completion_wait(&cmd, iommu, data); in iommu_completion_wait()
1083 ret = __iommu_queue_command_sync(iommu, &cmd, false); in iommu_completion_wait()
1087 ret = wait_on_sem(iommu, data); in iommu_completion_wait()
1090 raw_spin_unlock_irqrestore(&iommu->lock, flags); in iommu_completion_wait()
1095 static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid) in iommu_flush_dte() argument
1101 return iommu_queue_command(iommu, &cmd); in iommu_flush_dte()
1104 static void amd_iommu_flush_dte_all(struct amd_iommu *iommu) in amd_iommu_flush_dte_all() argument
1109 iommu_flush_dte(iommu, devid); in amd_iommu_flush_dte_all()
1111 iommu_completion_wait(iommu); in amd_iommu_flush_dte_all()
1118 static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu) in amd_iommu_flush_tlb_all() argument
1126 iommu_queue_command(iommu, &cmd); in amd_iommu_flush_tlb_all()
1129 iommu_completion_wait(iommu); in amd_iommu_flush_tlb_all()
1132 static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id) in amd_iommu_flush_tlb_domid() argument
1138 iommu_queue_command(iommu, &cmd); in amd_iommu_flush_tlb_domid()
1140 iommu_completion_wait(iommu); in amd_iommu_flush_tlb_domid()
1143 static void amd_iommu_flush_all(struct amd_iommu *iommu) in amd_iommu_flush_all() argument
1149 iommu_queue_command(iommu, &cmd); in amd_iommu_flush_all()
1150 iommu_completion_wait(iommu); in amd_iommu_flush_all()
1153 static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid) in iommu_flush_irt() argument
1159 iommu_queue_command(iommu, &cmd); in iommu_flush_irt()
1162 static void amd_iommu_flush_irt_all(struct amd_iommu *iommu) in amd_iommu_flush_irt_all() argument
1167 iommu_flush_irt(iommu, devid); in amd_iommu_flush_irt_all()
1169 iommu_completion_wait(iommu); in amd_iommu_flush_irt_all()
1172 void iommu_flush_all_caches(struct amd_iommu *iommu) in iommu_flush_all_caches() argument
1174 if (iommu_feature(iommu, FEATURE_IA)) { in iommu_flush_all_caches()
1175 amd_iommu_flush_all(iommu); in iommu_flush_all_caches()
1177 amd_iommu_flush_dte_all(iommu); in iommu_flush_all_caches()
1178 amd_iommu_flush_irt_all(iommu); in iommu_flush_all_caches()
1179 amd_iommu_flush_tlb_all(iommu); in iommu_flush_all_caches()
1189 struct amd_iommu *iommu; in device_flush_iotlb() local
1194 iommu = amd_iommu_rlookup_table[dev_data->devid]; in device_flush_iotlb()
1198 return iommu_queue_command(iommu, &cmd); in device_flush_iotlb()
1203 struct amd_iommu *iommu = data; in device_flush_dte_alias() local
1205 return iommu_flush_dte(iommu, alias); in device_flush_dte_alias()
1213 struct amd_iommu *iommu; in device_flush_dte() local
1217 iommu = amd_iommu_rlookup_table[dev_data->devid]; in device_flush_dte()
1221 device_flush_dte_alias, iommu); in device_flush_dte()
1223 ret = iommu_flush_dte(iommu, dev_data->devid); in device_flush_dte()
1229 ret = iommu_flush_dte(iommu, alias); in device_flush_dte()
1463 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; in set_dte_entry() local
1465 if (iommu_feature(iommu, FEATURE_EPHSUP)) in set_dte_entry()
1508 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; in set_dte_entry() local
1510 amd_iommu_flush_tlb_domid(iommu, old_domid); in set_dte_entry()
1526 struct amd_iommu *iommu; in do_attach() local
1529 iommu = amd_iommu_rlookup_table[dev_data->devid]; in do_attach()
1537 domain->dev_iommu[iommu->index] += 1; in do_attach()
1551 struct amd_iommu *iommu; in do_detach() local
1553 iommu = amd_iommu_rlookup_table[dev_data->devid]; in do_detach()
1571 domain->dev_iommu[iommu->index] -= 1; in do_detach()
1732 struct amd_iommu *iommu; in amd_iommu_probe_device() local
1739 iommu = amd_iommu_rlookup_table[devid]; in amd_iommu_probe_device()
1742 return &iommu->iommu; in amd_iommu_probe_device()
1751 amd_iommu_set_pci_msi_domain(dev, iommu); in amd_iommu_probe_device()
1752 iommu_dev = &iommu->iommu; in amd_iommu_probe_device()
1755 iommu_completion_wait(iommu); in amd_iommu_probe_device()
1770 struct amd_iommu *iommu; in amd_iommu_release_device() local
1775 iommu = amd_iommu_rlookup_table[devid]; in amd_iommu_release_device()
1778 iommu_completion_wait(iommu); in amd_iommu_release_device()
1989 struct amd_iommu *iommu; in amd_iommu_detach_device() local
1997 iommu = amd_iommu_rlookup_table[devid]; in amd_iommu_detach_device()
1998 if (!iommu) in amd_iommu_detach_device()
2007 iommu_completion_wait(iommu); in amd_iommu_detach_device()
2015 struct amd_iommu *iommu; in amd_iommu_attach_device() local
2024 iommu = amd_iommu_rlookup_table[dev_data->devid]; in amd_iommu_attach_device()
2025 if (!iommu) in amd_iommu_attach_device()
2042 iommu_completion_wait(iommu); in amd_iommu_attach_device()
2382 struct amd_iommu *iommu; in __flush_pasid() local
2393 iommu = amd_iommu_rlookup_table[dev_data->devid]; in __flush_pasid()
2398 ret = iommu_queue_command(iommu, &cmd); in __flush_pasid()
2552 struct amd_iommu *iommu; in amd_iommu_complete_ppr() local
2556 iommu = amd_iommu_rlookup_table[dev_data->devid]; in amd_iommu_complete_ppr()
2561 return iommu_queue_command(iommu, &cmd); in amd_iommu_complete_ppr()
2671 static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid, in set_remap_table_entry() argument
2676 iommu_flush_dte(iommu, devid); in set_remap_table_entry()
2696 struct amd_iommu *iommu; in alloc_irq_table() local
2702 iommu = amd_iommu_rlookup_table[devid]; in alloc_irq_table()
2703 if (!iommu) in alloc_irq_table()
2713 set_remap_table_entry(iommu, devid, table); in alloc_irq_table()
2731 set_remap_table_entry(iommu, devid, table); in alloc_irq_table()
2742 set_remap_table_entry(iommu, devid, table); in alloc_irq_table()
2745 set_remap_table_entry(iommu, alias, table); in alloc_irq_table()
2748 iommu_completion_wait(iommu); in alloc_irq_table()
2766 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; in alloc_irq_index() local
2768 if (!iommu) in alloc_irq_index()
2783 if (!iommu->irte_ops->is_allocated(table, index)) { in alloc_irq_index()
2793 iommu->irte_ops->set_allocated(table, index - c + 1); in alloc_irq_index()
2815 struct amd_iommu *iommu; in modify_irte_ga() local
2819 iommu = amd_iommu_rlookup_table[devid]; in modify_irte_ga()
2820 if (iommu == NULL) in modify_irte_ga()
2848 iommu_flush_irt(iommu, devid); in modify_irte_ga()
2849 iommu_completion_wait(iommu); in modify_irte_ga()
2857 struct amd_iommu *iommu; in modify_irte() local
2860 iommu = amd_iommu_rlookup_table[devid]; in modify_irte()
2861 if (iommu == NULL) in modify_irte()
2872 iommu_flush_irt(iommu, devid); in modify_irte()
2873 iommu_completion_wait(iommu); in modify_irte()
2881 struct amd_iommu *iommu; in free_irte() local
2884 iommu = amd_iommu_rlookup_table[devid]; in free_irte()
2885 if (iommu == NULL) in free_irte()
2893 iommu->irte_ops->clear_allocated(table, index); in free_irte()
2896 iommu_flush_irt(iommu, devid); in free_irte()
2897 iommu_completion_wait(iommu); in free_irte()
3071 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; in irq_remapping_prepare_irte() local
3073 if (!iommu) in irq_remapping_prepare_irte()
3078 iommu->irte_ops->prepare(data->entry, apic->delivery_mode, in irq_remapping_prepare_irte()
3149 struct amd_iommu *iommu; in irq_remapping_alloc() local
3159 iommu = amd_iommu_rlookup_table[devid]; in irq_remapping_alloc()
3161 iommu->irte_ops->set_allocated(table, i); in irq_remapping_alloc()
3250 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
3260 struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid]; in irq_remapping_activate() local
3263 if (!iommu) in irq_remapping_activate()
3266 iommu->irte_ops->activate(data->entry, irte_info->devid, in irq_remapping_activate()
3268 amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg); in irq_remapping_activate()
3277 struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid]; in irq_remapping_deactivate() local
3279 if (iommu) in irq_remapping_deactivate()
3280 iommu->irte_ops->deactivate(data->entry, irte_info->devid, in irq_remapping_deactivate()
3287 struct amd_iommu *iommu; in irq_remapping_select() local
3301 iommu = amd_iommu_rlookup_table[devid]; in irq_remapping_select()
3302 return iommu && iommu->ir_domain == d; in irq_remapping_select()
3372 struct amd_iommu *iommu; in amd_ir_set_vcpu_affinity() local
3399 iommu = amd_iommu_rlookup_table[irte_info->devid]; in amd_ir_set_vcpu_affinity()
3400 if (iommu == NULL) in amd_ir_set_vcpu_affinity()
3426 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu, in amd_ir_update_irte() argument
3436 iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid, in amd_ir_update_irte()
3448 struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid]; in amd_ir_set_affinity() local
3451 if (!iommu) in amd_ir_set_affinity()
3458 amd_ir_update_irte(data, iommu, ir_data, irte_info, cfg); in amd_ir_set_affinity()
3484 int amd_iommu_create_irq_domain(struct amd_iommu *iommu) in amd_iommu_create_irq_domain() argument
3488 fn = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index); in amd_iommu_create_irq_domain()
3491 iommu->ir_domain = irq_domain_create_tree(fn, &amd_ir_domain_ops, iommu); in amd_iommu_create_irq_domain()
3492 if (!iommu->ir_domain) { in amd_iommu_create_irq_domain()
3497 iommu->ir_domain->parent = arch_get_ir_parent_domain(); in amd_iommu_create_irq_domain()
3498 iommu->msi_domain = arch_create_remap_msi_irq_domain(iommu->ir_domain, in amd_iommu_create_irq_domain()
3500 iommu->index); in amd_iommu_create_irq_domain()
3507 struct amd_iommu *iommu; in amd_iommu_update_ga() local
3518 iommu = amd_iommu_rlookup_table[devid]; in amd_iommu_update_ga()
3519 if (!iommu) in amd_iommu_update_ga()
3541 iommu_flush_irt(iommu, devid); in amd_iommu_update_ga()
3542 iommu_completion_wait(iommu); in amd_iommu_update_ga()