• Home
  • Raw
  • Download

Lines Matching +full:unmapped +full:- +full:event +full:- +full:sources

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
8 #define pr_fmt(fmt) "AMD-Vi: " fmt
16 #include <linux/pci-ats.h>
21 #include <linux/dma-map-ops.h>
22 #include <linux/dma-direct.h>
23 #include <linux/dma-iommu.h>
24 #include <linux/iommu-helper.h>
26 #include <linux/amd-iommu.h>
47 #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
83 * Domain for untranslated devices - only allocated
89 int amd_iommu_max_glx_val = -1;
125 return -ENODEV; in get_acpihid_device_id()
128 if (acpi_dev_hid_uid_match(adev, p->hid, in get_acpihid_device_id()
129 p->uid[0] ? p->uid : NULL)) { in get_acpihid_device_id()
132 return p->devid; in get_acpihid_device_id()
135 return -EINVAL; in get_acpihid_device_id()
158 u64 pt_root = atomic64_read(&domain->pt_root); in amd_iommu_domain_get_pgtable()
160 pgtable->root = (u64 *)(pt_root & PAGE_MASK); in amd_iommu_domain_get_pgtable()
161 pgtable->mode = pt_root & 7; /* lowest 3 bits encode pgtable mode */ in amd_iommu_domain_get_pgtable()
166 atomic64_set(&domain->pt_root, root); in amd_iommu_domain_set_pt_root()
194 spin_lock_init(&dev_data->lock); in alloc_dev_data()
195 dev_data->devid = devid; in alloc_dev_data()
196 ratelimit_default_init(&dev_data->rs); in alloc_dev_data()
198 llist_add(&dev_data->dev_data_list, &dev_data_list); in alloc_dev_data()
212 if (dev_data->devid == devid) in search_dev_data()
265 PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) in setup_aliases()
286 dev_data->defer_attach = true; in find_dev_data()
305 if ((devid == p->devid) && p->group) in acpihid_device_group()
306 entry->group = p->group; in acpihid_device_group()
309 if (!entry->group) in acpihid_device_group()
310 entry->group = generic_device_group(dev); in acpihid_device_group()
312 iommu_group_ref_get(entry->group); in acpihid_device_group()
314 return entry->group; in acpihid_device_group()
341 dev_data = dev_iommu_priv_get(&pdev->dev); in pdev_pri_erratum()
343 return dev_data->errata & (1 << erratum) ? true : false; in pdev_pri_erratum()
385 return -ENOMEM; in iommu_init_device()
387 dev_data->pdev = setup_aliases(dev); in iommu_init_device()
399 iommu = amd_iommu_rlookup_table[dev_data->devid]; in iommu_init_device()
400 dev_data->iommu_v2 = iommu->is_iommu_v2; in iommu_init_device()
430 if (dev_data->domain) in amd_iommu_uninit_device()
437 * device is re-plugged - not doing so would introduce a ton of races. in amd_iommu_uninit_device()
452 pte_mask = ~((cnt << 3) - 1); in first_pte_l7()
485 pr_err("CMD[%d]: %08x\n", i, cmd->data[i]); in dump_command()
488 static void amd_iommu_report_rmp_hw_error(volatile u32 *event) in amd_iommu_report_rmp_hw_error() argument
495 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK; in amd_iommu_report_rmp_hw_error()
496 vmg_tag = (event[1]) & 0xFFFF; in amd_iommu_report_rmp_hw_error()
497 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; in amd_iommu_report_rmp_hw_error()
498 spa = ((u64)event[3] << 32) | (event[2] & 0xFFFFFFF8); in amd_iommu_report_rmp_hw_error()
503 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_rmp_hw_error()
505 if (dev_data && __ratelimit(&dev_data->rs)) { in amd_iommu_report_rmp_hw_error()
506 pci_err(pdev, "Event logged [RMP_HW_ERROR vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n", in amd_iommu_report_rmp_hw_error()
509 …pr_err_ratelimited("Event logged [RMP_HW_ERROR device=%02x:%02x.%x, vmg_tag=0x%04x, spa=0x%llx, fl… in amd_iommu_report_rmp_hw_error()
518 static void amd_iommu_report_rmp_fault(volatile u32 *event) in amd_iommu_report_rmp_fault() argument
525 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK; in amd_iommu_report_rmp_fault()
526 flags_rmp = (event[0] >> EVENT_FLAGS_SHIFT) & 0xFF; in amd_iommu_report_rmp_fault()
527 vmg_tag = (event[1]) & 0xFFFF; in amd_iommu_report_rmp_fault()
528 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; in amd_iommu_report_rmp_fault()
529 gpa = ((u64)event[3] << 32) | event[2]; in amd_iommu_report_rmp_fault()
534 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_rmp_fault()
536 if (dev_data && __ratelimit(&dev_data->rs)) { in amd_iommu_report_rmp_fault()
537 …pci_err(pdev, "Event logged [RMP_PAGE_FAULT vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x… in amd_iommu_report_rmp_fault()
540 …pr_err_ratelimited("Event logged [RMP_PAGE_FAULT device=%02x:%02x.%x, vmg_tag=0x%04x, gpa=0x%llx, … in amd_iommu_report_rmp_fault()
558 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_page_fault()
560 if (dev_data && __ratelimit(&dev_data->rs)) { in amd_iommu_report_page_fault()
561 pci_err(pdev, "Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%llx flags=0x%04x]\n", in amd_iommu_report_page_fault()
564 …pr_err("Event logged [IO_PAGE_FAULT device=%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]… in amd_iommu_report_page_fault()
575 struct device *dev = iommu->iommu.dev; in iommu_print_event()
577 volatile u32 *event = __evt; in iommu_print_event() local
583 type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK; in iommu_print_event()
584 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK; in iommu_print_event()
585 pasid = (event[0] & EVENT_DOMID_MASK_HI) | in iommu_print_event()
586 (event[1] & EVENT_DOMID_MASK_LO); in iommu_print_event()
587 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; in iommu_print_event()
588 address = (u64)(((u64)event[3]) << 32) | event[2]; in iommu_print_event()
593 pr_err("No event written to event log\n"); in iommu_print_event()
607 …dev_err(dev, "Event logged [ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x pasid=0x%05x address=0x%ll… in iommu_print_event()
613 dev_err(dev, "Event logged [DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x " in iommu_print_event()
619 …dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x pasid=0x%04x address=0x%ll… in iommu_print_event()
624 dev_err(dev, "Event logged [ILLEGAL_COMMAND_ERROR address=0x%llx]\n", address); in iommu_print_event()
628 dev_err(dev, "Event logged [COMMAND_HARDWARE_ERROR address=0x%llx flags=0x%04x]\n", in iommu_print_event()
632 dev_err(dev, "Event logged [IOTLB_INV_TIMEOUT device=%02x:%02x.%x address=0x%llx]\n", in iommu_print_event()
637 …dev_err(dev, "Event logged [INVALID_DEVICE_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx… in iommu_print_event()
642 amd_iommu_report_rmp_fault(event); in iommu_print_event()
645 amd_iommu_report_rmp_hw_error(event); in iommu_print_event()
649 tag = event[1] & 0x03FF; in iommu_print_event()
650 …dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx fl… in iommu_print_event()
655 …dev_err(dev, "Event logged [UNKNOWN event[0]=0x%08x event[1]=0x%08x event[2]=0x%08x event[3]=0x%08… in iommu_print_event()
656 event[0], event[1], event[2], event[3]); in iommu_print_event()
666 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_poll_events()
667 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); in iommu_poll_events()
670 iommu_print_event(iommu, iommu->evt_buf + head); in iommu_poll_events()
674 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_poll_events()
699 if (iommu->ppr_log == NULL) in iommu_poll_ppr_log()
702 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
703 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); in iommu_poll_ppr_log()
710 raw = (u64 *)(iommu->ppr_log + head); in iommu_poll_ppr_log()
723 /* Avoid memcpy function-call overhead */ in iommu_poll_ppr_log()
733 /* Update head pointer of hardware ring-buffer */ in iommu_poll_ppr_log()
735 writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
740 /* Refresh ring-buffer information */ in iommu_poll_ppr_log()
741 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
742 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); in iommu_poll_ppr_log()
761 if (iommu->ga_log == NULL) in iommu_poll_ga_log()
764 head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET); in iommu_poll_ga_log()
765 tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET); in iommu_poll_ga_log()
771 raw = (u64 *)(iommu->ga_log + head); in iommu_poll_ga_log()
774 /* Avoid memcpy function-call overhead */ in iommu_poll_ga_log()
777 /* Update head pointer of hardware ring-buffer */ in iommu_poll_ga_log()
779 writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); in iommu_poll_ga_log()
807 dev_set_msi_domain(dev, iommu->msi_domain); in amd_iommu_set_pci_msi_domain()
824 u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_int_thread()
827 /* Enable interrupt sources again */ in amd_iommu_int_thread()
829 iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_int_thread()
832 pr_devel("Processing IOMMU Event Log\n"); in amd_iommu_int_thread()
849 pr_info_ratelimited("IOMMU event log overflow\n"); in amd_iommu_int_thread()
855 * When re-enabling interrupt (by writing 1 in amd_iommu_int_thread()
857 * the interrupt bit in the event status register. in amd_iommu_int_thread()
864 * again and re-clear the bits in amd_iommu_int_thread()
866 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_int_thread()
886 while (*iommu->cmd_sem != data && i < LOOP_TIMEOUT) { in wait_on_sem()
892 pr_alert("Completion-Wait loop timed out\n"); in wait_on_sem()
893 return -EIO; in wait_on_sem()
906 tail = iommu->cmd_buf_tail; in copy_cmd_to_buffer()
907 target = iommu->cmd_buf + tail; in copy_cmd_to_buffer()
911 iommu->cmd_buf_tail = tail; in copy_cmd_to_buffer()
914 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); in copy_cmd_to_buffer()
921 u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem); in build_completion_wait()
924 cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK; in build_completion_wait()
925 cmd->data[1] = upper_32_bits(paddr); in build_completion_wait()
926 cmd->data[2] = lower_32_bits(data); in build_completion_wait()
927 cmd->data[3] = upper_32_bits(data); in build_completion_wait()
934 cmd->data[0] = devid; in build_inv_dte()
959 cmd->data[1] |= domid; in build_inv_iommu_pages()
960 cmd->data[2] = lower_32_bits(address); in build_inv_iommu_pages()
961 cmd->data[3] = upper_32_bits(address); in build_inv_iommu_pages()
963 if (s) /* size bit - we flush more than one 4kb page */ in build_inv_iommu_pages()
964 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; in build_inv_iommu_pages()
965 if (pde) /* PDE bit - we want to flush everything, not only the PTEs */ in build_inv_iommu_pages()
966 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; in build_inv_iommu_pages()
990 cmd->data[0] = devid; in build_inv_iotlb_pages()
991 cmd->data[0] |= (qdep & 0xff) << 24; in build_inv_iotlb_pages()
992 cmd->data[1] = devid; in build_inv_iotlb_pages()
993 cmd->data[2] = lower_32_bits(address); in build_inv_iotlb_pages()
994 cmd->data[3] = upper_32_bits(address); in build_inv_iotlb_pages()
997 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; in build_inv_iotlb_pages()
1007 cmd->data[0] = pasid; in build_inv_iommu_pasid()
1008 cmd->data[1] = domid; in build_inv_iommu_pasid()
1009 cmd->data[2] = lower_32_bits(address); in build_inv_iommu_pasid()
1010 cmd->data[3] = upper_32_bits(address); in build_inv_iommu_pasid()
1011 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; in build_inv_iommu_pasid()
1012 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK; in build_inv_iommu_pasid()
1014 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; in build_inv_iommu_pasid()
1025 cmd->data[0] = devid; in build_inv_iotlb_pasid()
1026 cmd->data[0] |= ((pasid >> 8) & 0xff) << 16; in build_inv_iotlb_pasid()
1027 cmd->data[0] |= (qdep & 0xff) << 24; in build_inv_iotlb_pasid()
1028 cmd->data[1] = devid; in build_inv_iotlb_pasid()
1029 cmd->data[1] |= (pasid & 0xff) << 16; in build_inv_iotlb_pasid()
1030 cmd->data[2] = lower_32_bits(address); in build_inv_iotlb_pasid()
1031 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK; in build_inv_iotlb_pasid()
1032 cmd->data[3] = upper_32_bits(address); in build_inv_iotlb_pasid()
1034 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; in build_inv_iotlb_pasid()
1043 cmd->data[0] = devid; in build_complete_ppr()
1045 cmd->data[1] = pasid; in build_complete_ppr()
1046 cmd->data[2] = CMD_INV_IOMMU_PAGES_GN_MASK; in build_complete_ppr()
1048 cmd->data[3] = tag & 0x1ff; in build_complete_ppr()
1049 cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT; in build_complete_ppr()
1063 cmd->data[0] = devid; in build_inv_irt()
1078 next_tail = (iommu->cmd_buf_tail + sizeof(*cmd)) % CMD_BUFFER_SIZE; in __iommu_queue_command_sync()
1080 left = (iommu->cmd_buf_head - next_tail) % CMD_BUFFER_SIZE; in __iommu_queue_command_sync()
1087 return -EIO; in __iommu_queue_command_sync()
1094 iommu->cmd_buf_head = readl(iommu->mmio_base + in __iommu_queue_command_sync()
1103 iommu->need_sync = sync; in __iommu_queue_command_sync()
1115 raw_spin_lock_irqsave(&iommu->lock, flags); in iommu_queue_command_sync()
1117 raw_spin_unlock_irqrestore(&iommu->lock, flags); in iommu_queue_command_sync()
1138 if (!iommu->need_sync) in iommu_completion_wait()
1141 raw_spin_lock_irqsave(&iommu->lock, flags); in iommu_completion_wait()
1143 data = ++iommu->cmd_sem_val; in iommu_completion_wait()
1153 raw_spin_unlock_irqrestore(&iommu->lock, flags); in iommu_completion_wait()
1247 * Command send function for flushing on-device TLB
1256 qdep = dev_data->ats.qdep; in device_flush_iotlb()
1257 iommu = amd_iommu_rlookup_table[dev_data->devid]; in device_flush_iotlb()
1259 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size); in device_flush_iotlb()
1280 iommu = amd_iommu_rlookup_table[dev_data->devid]; in device_flush_dte()
1282 if (dev_data->pdev) in device_flush_dte()
1283 ret = pci_for_each_dma_alias(dev_data->pdev, in device_flush_dte()
1286 ret = iommu_flush_dte(iommu, dev_data->devid); in device_flush_dte()
1290 alias = amd_iommu_alias_table[dev_data->devid]; in device_flush_dte()
1291 if (alias != dev_data->devid) { in device_flush_dte()
1297 if (dev_data->ats.enabled) in device_flush_dte()
1315 build_inv_iommu_pages(&cmd, address, size, domain->id, pde); in __domain_flush_pages()
1318 if (!domain->dev_iommu[i]) in __domain_flush_pages()
1328 list_for_each_entry(dev_data, &domain->dev_list, list) { in __domain_flush_pages()
1330 if (!dev_data->ats.enabled) in __domain_flush_pages()
1345 /* Flush the whole IO/TLB for a given protection domain - including PDE */
1356 if (domain && !domain->dev_iommu[i]) in domain_flush_complete()
1374 spin_lock_irqsave(&domain->lock, flags); in domain_flush_np_cache()
1377 spin_unlock_irqrestore(&domain->lock, flags); in domain_flush_np_cache()
1389 list_for_each_entry(dev_data, &domain->dev_list, list) in domain_flush_devices()
1404 freelist = freelist->freelist; in free_page_list()
1413 p->freelist = freelist; in free_pt_page()
1487 if (pgtable->mode == PAGE_MODE_NONE) in free_pagetable()
1490 BUG_ON(pgtable->mode < PAGE_MODE_NONE || in free_pagetable()
1491 pgtable->mode > PAGE_MODE_6_LEVEL); in free_pagetable()
1493 root = (unsigned long)pgtable->root; in free_pagetable()
1494 freelist = free_sub_pt(root, pgtable->mode, freelist); in free_pagetable()
1517 spin_lock_irqsave(&domain->lock, flags); in increase_address_space()
1545 spin_unlock_irqrestore(&domain->lock, flags); in increase_address_space()
1569 * page-table. in alloc_pte()
1579 level = pgtable.mode - 1; in alloc_pte()
1635 level -= 1; in alloc_pte()
1667 level = pgtable.mode - 1; in fetch_pte()
1686 level -= 1; in fetch_pte()
1710 pr_warn("AMD-Vi: IOMMU pte changed since we read it\n"); in free_clear_pte()
1745 ret = -EINVAL; in iommu_map_page()
1752 ret = -ENOMEM; in iommu_map_page()
1782 spin_lock_irqsave(&dom->lock, flags); in iommu_map_page()
1784 * Flush domain TLB(s) and wait for completion. Any Device-Table in iommu_map_page()
1790 spin_unlock_irqrestore(&dom->lock, flags); in iommu_map_page()
1803 unsigned long long unmapped; in iommu_unmap_page() local
1809 unmapped = 0; in iommu_unmap_page()
1811 while (unmapped < page_size) { in iommu_unmap_page()
1823 bus_addr = (bus_addr & ~(unmap_size - 1)) + unmap_size; in iommu_unmap_page()
1824 unmapped += unmap_size; in iommu_unmap_page()
1827 BUG_ON(unmapped && !is_power_of_2(unmapped)); in iommu_unmap_page()
1829 return unmapped; in iommu_unmap_page()
1898 if (domain->glx == 2) in free_gcr3_table()
1899 free_gcr3_tbl_level2(domain->gcr3_tbl); in free_gcr3_table()
1900 else if (domain->glx == 1) in free_gcr3_table()
1901 free_gcr3_tbl_level1(domain->gcr3_tbl); in free_gcr3_table()
1903 BUG_ON(domain->glx != 0); in free_gcr3_table()
1905 free_page((unsigned long)domain->gcr3_tbl); in free_gcr3_table()
1916 if (pgtable->mode != PAGE_MODE_NONE) in set_dte_entry()
1917 pte_root = iommu_virt_to_phys(pgtable->root); in set_dte_entry()
1919 pte_root |= (pgtable->mode & DEV_ENTRY_MODE_MASK) in set_dte_entry()
1935 if (domain->flags & PD_IOMMUV2_MASK) { in set_dte_entry()
1936 u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl); in set_dte_entry()
1937 u64 glx = domain->glx; in set_dte_entry()
1962 flags |= domain->id; in set_dte_entry()
1970 * the previous kernel--if so, it needs to flush the translation cache in set_dte_entry()
1996 iommu = amd_iommu_rlookup_table[dev_data->devid]; in do_attach()
1997 ats = dev_data->ats.enabled; in do_attach()
2000 dev_data->domain = domain; in do_attach()
2001 list_add(&dev_data->list, &domain->dev_list); in do_attach()
2004 domain->dev_iommu[iommu->index] += 1; in do_attach()
2005 domain->dev_cnt += 1; in do_attach()
2009 set_dte_entry(dev_data->devid, domain, &pgtable, in do_attach()
2010 ats, dev_data->iommu_v2); in do_attach()
2011 clone_aliases(dev_data->pdev); in do_attach()
2018 struct protection_domain *domain = dev_data->domain; in do_detach()
2021 iommu = amd_iommu_rlookup_table[dev_data->devid]; in do_detach()
2024 dev_data->domain = NULL; in do_detach()
2025 list_del(&dev_data->list); in do_detach()
2026 clear_dte_entry(dev_data->devid); in do_detach()
2027 clone_aliases(dev_data->pdev); in do_detach()
2038 /* decrease reference counters - needs to happen after the flushes */ in do_detach()
2039 domain->dev_iommu[iommu->index] -= 1; in do_detach()
2040 domain->dev_cnt -= 1; in do_detach()
2050 /* FIXME: Change generic reset-function to do the same */
2058 return -EINVAL; in pri_reset_while_enabled()
2078 /* Only allow access to user-accessible pages */ in pdev_iommuv2_enable()
2124 spin_lock_irqsave(&domain->lock, flags); in attach_device()
2128 spin_lock(&dev_data->lock); in attach_device()
2130 ret = -EBUSY; in attach_device()
2131 if (dev_data->domain != NULL) in attach_device()
2138 if (domain->flags & PD_IOMMUV2_MASK) { in attach_device()
2141 ret = -EINVAL; in attach_device()
2142 if (def_domain->type != IOMMU_DOMAIN_IDENTITY) in attach_device()
2145 if (dev_data->iommu_v2) { in attach_device()
2149 dev_data->ats.enabled = true; in attach_device()
2150 dev_data->ats.qdep = pci_ats_queue_depth(pdev); in attach_device()
2151 dev_data->pri_tlp = pci_prg_resp_pasid_required(pdev); in attach_device()
2155 dev_data->ats.enabled = true; in attach_device()
2156 dev_data->ats.qdep = pci_ats_queue_depth(pdev); in attach_device()
2165 * We might boot into a crash-kernel here. The crashed kernel in attach_device()
2174 spin_unlock(&dev_data->lock); in attach_device()
2176 spin_unlock_irqrestore(&domain->lock, flags); in attach_device()
2191 domain = dev_data->domain; in detach_device()
2193 spin_lock_irqsave(&domain->lock, flags); in detach_device()
2195 spin_lock(&dev_data->lock); in detach_device()
2203 if (WARN_ON(!dev_data->domain)) in detach_device()
2211 if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2) in detach_device()
2213 else if (dev_data->ats.enabled) in detach_device()
2216 dev_data->ats.enabled = false; in detach_device()
2219 spin_unlock(&dev_data->lock); in detach_device()
2221 spin_unlock_irqrestore(&domain->lock, flags); in detach_device()
2231 return ERR_PTR(-ENODEV); in amd_iommu_probe_device()
2240 return &iommu->iommu; in amd_iommu_probe_device()
2244 if (ret != -ENOTSUPP) in amd_iommu_probe_device()
2245 dev_err(dev, "Failed to initialize - trying to proceed anyway\n"); in amd_iommu_probe_device()
2250 iommu_dev = &iommu->iommu; in amd_iommu_probe_device()
2262 /* Domains are initialized for this device - have a look what we ended up with */ in amd_iommu_probe_finalize()
2264 if (domain->type == IOMMU_DOMAIN_DMA) in amd_iommu_probe_finalize()
2293 switch (domain->type) { in amd_iommu_domain_get_attr()
2295 return -ENODEV; in amd_iommu_domain_get_attr()
2302 return -ENODEV; in amd_iommu_domain_get_attr()
2306 return -EINVAL; in amd_iommu_domain_get_attr()
2321 list_for_each_entry(dev_data, &domain->dev_list, list) { in update_device_table()
2322 set_dte_entry(dev_data->devid, domain, pgtable, in update_device_table()
2323 dev_data->ats.enabled, dev_data->iommu_v2); in update_device_table()
2324 clone_aliases(dev_data->pdev); in update_device_table()
2399 spin_lock_irqsave(&domain->lock, flags); in cleanup_domain()
2401 while (!list_empty(&domain->dev_list)) { in cleanup_domain()
2402 entry = list_first_entry(&domain->dev_list, in cleanup_domain()
2404 BUG_ON(!entry->domain); in cleanup_domain()
2408 spin_unlock_irqrestore(&domain->lock, flags); in cleanup_domain()
2418 if (domain->id) in protection_domain_free()
2419 domain_id_free(domain->id); in protection_domain_free()
2434 spin_lock_init(&domain->lock); in protection_domain_init()
2435 domain->id = domain_id_alloc(); in protection_domain_init()
2436 if (!domain->id) in protection_domain_init()
2437 return -ENOMEM; in protection_domain_init()
2438 INIT_LIST_HEAD(&domain->dev_list); in protection_domain_init()
2443 return -ENOMEM; in protection_domain_init()
2482 domain->domain.geometry.aperture_start = 0; in amd_iommu_domain_alloc()
2483 domain->domain.geometry.aperture_end = ~0ULL; in amd_iommu_domain_alloc()
2484 domain->domain.geometry.force_aperture = true; in amd_iommu_domain_alloc()
2487 iommu_get_dma_cookie(&domain->domain) == -ENOMEM) in amd_iommu_domain_alloc()
2490 return &domain->domain; in amd_iommu_domain_alloc()
2504 if (domain->dev_cnt > 0) in amd_iommu_domain_free()
2507 BUG_ON(domain->dev_cnt != 0); in amd_iommu_domain_free()
2512 if (dom->type == IOMMU_DOMAIN_DMA) in amd_iommu_domain_free()
2513 iommu_put_dma_cookie(&domain->domain); in amd_iommu_domain_free()
2515 if (domain->flags & PD_IOMMUV2_MASK) in amd_iommu_domain_free()
2535 if (dev_data->domain != NULL) in amd_iommu_detach_device()
2544 (dom->type == IOMMU_DOMAIN_UNMANAGED)) in amd_iommu_detach_device()
2545 dev_data->use_vapic = 0; in amd_iommu_detach_device()
2560 return -EINVAL; in amd_iommu_attach_device()
2563 dev_data->defer_attach = false; in amd_iommu_attach_device()
2565 iommu = amd_iommu_rlookup_table[dev_data->devid]; in amd_iommu_attach_device()
2567 return -EINVAL; in amd_iommu_attach_device()
2569 if (dev_data->domain) in amd_iommu_attach_device()
2576 if (dom->type == IOMMU_DOMAIN_UNMANAGED) in amd_iommu_attach_device()
2577 dev_data->use_vapic = 1; in amd_iommu_attach_device()
2579 dev_data->use_vapic = 0; in amd_iommu_attach_device()
2599 return -EINVAL; in amd_iommu_map()
2644 offset_mask = pte_pgsize - 1; in amd_iommu_iova_to_phys()
2681 if (devid < entry->devid_start || devid > entry->devid_end) in amd_iommu_get_resv_regions()
2685 length = entry->address_end - entry->address_start; in amd_iommu_get_resv_regions()
2686 if (entry->prot & IOMMU_PROT_IR) in amd_iommu_get_resv_regions()
2688 if (entry->prot & IOMMU_PROT_IW) in amd_iommu_get_resv_regions()
2690 if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE) in amd_iommu_get_resv_regions()
2694 region = iommu_alloc_resv_region(entry->address_start, in amd_iommu_get_resv_regions()
2697 dev_err(dev, "Out of memory allocating dm-regions\n"); in amd_iommu_get_resv_regions()
2700 list_add_tail(&region->list, head); in amd_iommu_get_resv_regions()
2704 MSI_RANGE_END - MSI_RANGE_START + 1, in amd_iommu_get_resv_regions()
2708 list_add_tail(&region->list, head); in amd_iommu_get_resv_regions()
2711 HT_RANGE_END - HT_RANGE_START + 1, in amd_iommu_get_resv_regions()
2715 list_add_tail(&region->list, head); in amd_iommu_get_resv_regions()
2723 return dev_data->defer_attach; in amd_iommu_is_attach_deferred()
2732 spin_lock_irqsave(&dom->lock, flags); in amd_iommu_flush_iotlb_all()
2735 spin_unlock_irqrestore(&dom->lock, flags); in amd_iommu_flush_iotlb_all()
2755 * encryption bit in their DMA-mask and require remapping. in amd_iommu_def_domain_type()
2757 if (!mem_encrypt_active() && dev_data->iommu_v2) in amd_iommu_def_domain_type()
2792 * DMA-API translation.
2815 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_direct_map()
2820 /* Remove page-table from domain */ in amd_iommu_domain_direct_map()
2826 /* Page-table is not visible to IOMMU anymore, so free it */ in amd_iommu_domain_direct_map()
2829 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_direct_map()
2840 return -EINVAL; in amd_iommu_domain_enable_v2()
2843 for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9) in amd_iommu_domain_enable_v2()
2847 return -EINVAL; in amd_iommu_domain_enable_v2()
2849 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_enable_v2()
2856 ret = -EBUSY; in amd_iommu_domain_enable_v2()
2857 if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK) in amd_iommu_domain_enable_v2()
2860 ret = -ENOMEM; in amd_iommu_domain_enable_v2()
2861 domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC); in amd_iommu_domain_enable_v2()
2862 if (domain->gcr3_tbl == NULL) in amd_iommu_domain_enable_v2()
2865 domain->glx = levels; in amd_iommu_domain_enable_v2()
2866 domain->flags |= PD_IOMMUV2_MASK; in amd_iommu_domain_enable_v2()
2873 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_enable_v2()
2886 if (!(domain->flags & PD_IOMMUV2_MASK)) in __flush_pasid()
2887 return -EINVAL; in __flush_pasid()
2889 build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size); in __flush_pasid()
2896 if (domain->dev_iommu[i] == 0) in __flush_pasid()
2908 list_for_each_entry(dev_data, &domain->dev_list, list) { in __flush_pasid()
2913 There might be non-IOMMUv2 capable devices in an IOMMUv2 in __flush_pasid()
2916 if (!dev_data->ats.enabled) in __flush_pasid()
2919 qdep = dev_data->ats.qdep; in __flush_pasid()
2920 iommu = amd_iommu_rlookup_table[dev_data->devid]; in __flush_pasid()
2922 build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid, in __flush_pasid()
2953 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_flush_page()
2955 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_flush_page()
2973 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_flush_tlb()
2975 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_flush_tlb()
3007 level -= 1; in __get_gcr3_pte()
3021 return -EINVAL; in __set_gcr3()
3023 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true); in __set_gcr3()
3025 return -ENOMEM; in __set_gcr3()
3039 return -EINVAL; in __clear_gcr3()
3041 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false); in __clear_gcr3()
3057 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_set_gcr3()
3059 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_set_gcr3()
3071 spin_lock_irqsave(&domain->lock, flags); in amd_iommu_domain_clear_gcr3()
3073 spin_unlock_irqrestore(&domain->lock, flags); in amd_iommu_domain_clear_gcr3()
3086 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_complete_ppr()
3087 iommu = amd_iommu_rlookup_table[dev_data->devid]; in amd_iommu_complete_ppr()
3089 build_complete_ppr(&cmd, dev_data->devid, pasid, status, in amd_iommu_complete_ppr()
3090 tag, dev_data->pri_tlp); in amd_iommu_complete_ppr()
3100 struct device *dev = &pdev->dev; in amd_iommu_get_v2_domain()
3106 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_get_v2_domain()
3107 pdomain = dev_data->domain; in amd_iommu_get_v2_domain()
3110 if (pdomain == NULL && dev_data->defer_attach) { in amd_iommu_get_v2_domain()
3111 dev_data->defer_attach = false; in amd_iommu_get_v2_domain()
3119 if (io_domain->type != IOMMU_DOMAIN_DMA) in amd_iommu_get_v2_domain()
3123 if (!(pdomain->flags & PD_IOMMUV2_MASK)) in amd_iommu_get_v2_domain()
3126 return &pdomain->domain; in amd_iommu_get_v2_domain()
3137 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_enable_device_erratum()
3138 dev_data->errata |= (1 << erratum); in amd_iommu_enable_device_erratum()
3149 return -EINVAL; in amd_iommu_device_info()
3152 return -EINVAL; in amd_iommu_device_info()
3157 info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP; in amd_iommu_device_info()
3161 info->flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP; in amd_iommu_device_info()
3170 info->flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP; in amd_iommu_device_info()
3171 info->max_pasids = min(pci_max_pasids(pdev), max_pasids); in amd_iommu_device_info()
3175 info->flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP; in amd_iommu_device_info()
3177 info->flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP; in amd_iommu_device_info()
3201 dte |= iommu_virt_to_phys(table->table); in set_dte_irq_entry()
3232 table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_KERNEL); in __alloc_irq_table()
3233 if (!table->table) { in __alloc_irq_table()
3237 raw_spin_lock_init(&table->lock); in __alloc_irq_table()
3240 memset(table->table, 0, in __alloc_irq_table()
3243 memset(table->table, 0, in __alloc_irq_table()
3331 kmem_cache_free(amd_iommu_irq_cache, new_table->table); in alloc_irq_table()
3346 return -ENODEV; in alloc_irq_index()
3350 return -ENODEV; in alloc_irq_index()
3355 raw_spin_lock_irqsave(&table->lock, flags); in alloc_irq_index()
3358 for (index = ALIGN(table->min_index, alignment), c = 0; in alloc_irq_index()
3360 if (!iommu->irte_ops->is_allocated(table, index)) { in alloc_irq_index()
3369 for (; c != 0; --c) in alloc_irq_index()
3370 iommu->irte_ops->set_allocated(table, index - c + 1); in alloc_irq_index()
3372 index -= count - 1; in alloc_irq_index()
3379 index = -ENOSPC; in alloc_irq_index()
3382 raw_spin_unlock_irqrestore(&table->lock, flags); in alloc_irq_index()
3398 return -EINVAL; in modify_irte_ga()
3402 return -ENOMEM; in modify_irte_ga()
3404 raw_spin_lock_irqsave(&table->lock, flags); in modify_irte_ga()
3406 entry = (struct irte_ga *)table->table; in modify_irte_ga()
3409 ret = cmpxchg_double(&entry->lo.val, &entry->hi.val, in modify_irte_ga()
3410 entry->lo.val, entry->hi.val, in modify_irte_ga()
3411 irte->lo.val, irte->hi.val); in modify_irte_ga()
3413 * We use cmpxchg16 to atomically update the 128-bit IRTE, in modify_irte_ga()
3421 data->ref = entry; in modify_irte_ga()
3423 raw_spin_unlock_irqrestore(&table->lock, flags); in modify_irte_ga()
3439 return -EINVAL; in modify_irte()
3443 return -ENOMEM; in modify_irte()
3445 raw_spin_lock_irqsave(&table->lock, flags); in modify_irte()
3446 table->table[index] = irte->val; in modify_irte()
3447 raw_spin_unlock_irqrestore(&table->lock, flags); in modify_irte()
3469 raw_spin_lock_irqsave(&table->lock, flags); in free_irte()
3470 iommu->irte_ops->clear_allocated(table, index); in free_irte()
3471 raw_spin_unlock_irqrestore(&table->lock, flags); in free_irte()
3483 irte->val = 0; in irte_prepare()
3484 irte->fields.vector = vector; in irte_prepare()
3485 irte->fields.int_type = delivery_mode; in irte_prepare()
3486 irte->fields.destination = dest_apicid; in irte_prepare()
3487 irte->fields.dm = dest_mode; in irte_prepare()
3488 irte->fields.valid = 1; in irte_prepare()
3497 irte->lo.val = 0; in irte_ga_prepare()
3498 irte->hi.val = 0; in irte_ga_prepare()
3499 irte->lo.fields_remap.int_type = delivery_mode; in irte_ga_prepare()
3500 irte->lo.fields_remap.dm = dest_mode; in irte_ga_prepare()
3501 irte->hi.fields.vector = vector; in irte_ga_prepare()
3502 irte->lo.fields_remap.destination = APICID_TO_IRTE_DEST_LO(dest_apicid); in irte_ga_prepare()
3503 irte->hi.fields.destination = APICID_TO_IRTE_DEST_HI(dest_apicid); in irte_ga_prepare()
3504 irte->lo.fields_remap.valid = 1; in irte_ga_prepare()
3511 irte->fields.valid = 1; in irte_activate()
3519 irte->lo.fields_remap.valid = 1; in irte_ga_activate()
3527 irte->fields.valid = 0; in irte_deactivate()
3535 irte->lo.fields_remap.valid = 0; in irte_ga_deactivate()
3544 irte->fields.vector = vector; in irte_set_affinity()
3545 irte->fields.destination = dest_apicid; in irte_set_affinity()
3554 if (!irte->lo.fields_remap.guest_mode) { in irte_ga_set_affinity()
3555 irte->hi.fields.vector = vector; in irte_ga_set_affinity()
3556 irte->lo.fields_remap.destination = in irte_ga_set_affinity()
3558 irte->hi.fields.destination = in irte_ga_set_affinity()
3567 table->table[index] = IRTE_ALLOCATED; in irte_set_allocated()
3572 struct irte_ga *ptr = (struct irte_ga *)table->table; in irte_ga_set_allocated()
3575 memset(&irte->lo.val, 0, sizeof(u64)); in irte_ga_set_allocated()
3576 memset(&irte->hi.val, 0, sizeof(u64)); in irte_ga_set_allocated()
3577 irte->hi.fields.vector = 0xff; in irte_ga_set_allocated()
3582 union irte *ptr = (union irte *)table->table; in irte_is_allocated()
3585 return irte->val != 0; in irte_is_allocated()
3590 struct irte_ga *ptr = (struct irte_ga *)table->table; in irte_ga_is_allocated()
3593 return irte->hi.fields.vector != 0; in irte_ga_is_allocated()
3598 table->table[index] = 0; in irte_clear_allocated()
3603 struct irte_ga *ptr = (struct irte_ga *)table->table; in irte_ga_clear_allocated()
3606 memset(&irte->lo.val, 0, sizeof(u64)); in irte_ga_clear_allocated()
3607 memset(&irte->hi.val, 0, sizeof(u64)); in irte_ga_clear_allocated()
3612 switch (info->type) { in get_devid()
3615 return get_ioapic_devid(info->devid); in get_devid()
3618 return get_hpet_devid(info->devid); in get_devid()
3621 return get_device_id(msi_desc_to_dev(info->desc)); in get_devid()
3624 return -1; in get_devid()
3636 switch (info->type) { in get_irq_domain_for_devid()
3639 return iommu->ir_domain; in get_irq_domain_for_devid()
3673 struct irq_2_irte *irte_info = &data->irq_2_irte; in irq_remapping_prepare_irte()
3674 struct msi_msg *msg = &data->msi_entry; in irq_remapping_prepare_irte()
3681 data->irq_2_irte.devid = devid; in irq_remapping_prepare_irte()
3682 data->irq_2_irte.index = index + sub_handle; in irq_remapping_prepare_irte()
3683 iommu->irte_ops->prepare(data->entry, apic->irq_delivery_mode, in irq_remapping_prepare_irte()
3684 apic->irq_dest_mode, irq_cfg->vector, in irq_remapping_prepare_irte()
3685 irq_cfg->dest_apicid, devid); in irq_remapping_prepare_irte()
3687 switch (info->type) { in irq_remapping_prepare_irte()
3690 entry = info->ioapic.entry; in irq_remapping_prepare_irte()
3691 info->ioapic.entry = NULL; in irq_remapping_prepare_irte()
3693 entry->vector = index; in irq_remapping_prepare_irte()
3694 entry->mask = 0; in irq_remapping_prepare_irte()
3695 entry->trigger = info->ioapic.trigger; in irq_remapping_prepare_irte()
3696 entry->polarity = info->ioapic.polarity; in irq_remapping_prepare_irte()
3698 if (info->ioapic.trigger) in irq_remapping_prepare_irte()
3699 entry->mask = 1; in irq_remapping_prepare_irte()
3705 msg->address_hi = MSI_ADDR_BASE_HI; in irq_remapping_prepare_irte()
3706 msg->address_lo = MSI_ADDR_BASE_LO; in irq_remapping_prepare_irte()
3707 msg->data = irte_info->index; in irq_remapping_prepare_irte()
3747 return -EINVAL; in irq_remapping_alloc()
3748 if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_PCI_MSI && in irq_remapping_alloc()
3749 info->type != X86_IRQ_ALLOC_TYPE_PCI_MSIX) in irq_remapping_alloc()
3750 return -EINVAL; in irq_remapping_alloc()
3756 if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI) in irq_remapping_alloc()
3757 info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS; in irq_remapping_alloc()
3761 return -EINVAL; in irq_remapping_alloc()
3767 if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) { in irq_remapping_alloc()
3773 if (!table->min_index) { in irq_remapping_alloc()
3778 table->min_index = 32; in irq_remapping_alloc()
3781 iommu->irte_ops->set_allocated(table, i); in irq_remapping_alloc()
3783 WARN_ON(table->min_index != 32); in irq_remapping_alloc()
3784 index = info->ioapic.pin; in irq_remapping_alloc()
3786 index = -ENOMEM; in irq_remapping_alloc()
3788 } else if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI || in irq_remapping_alloc()
3789 info->type == X86_IRQ_ALLOC_TYPE_PCI_MSIX) { in irq_remapping_alloc()
3790 bool align = (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI); in irq_remapping_alloc()
3793 msi_desc_to_pci_dev(info->desc)); in irq_remapping_alloc()
3808 ret = -EINVAL; in irq_remapping_alloc()
3812 ret = -ENOMEM; in irq_remapping_alloc()
3818 data->entry = kzalloc(sizeof(union irte), GFP_KERNEL); in irq_remapping_alloc()
3820 data->entry = kzalloc(sizeof(struct irte_ga), in irq_remapping_alloc()
3822 if (!data->entry) { in irq_remapping_alloc()
3827 irq_data->hwirq = (devid << 16) + i; in irq_remapping_alloc()
3828 irq_data->chip_data = data; in irq_remapping_alloc()
3829 irq_data->chip = &amd_ir_chip; in irq_remapping_alloc()
3837 for (i--; i >= 0; i--) { in irq_remapping_alloc()
3840 kfree(irq_data->chip_data); in irq_remapping_alloc()
3859 if (irq_data && irq_data->chip_data) { in irq_remapping_free()
3860 data = irq_data->chip_data; in irq_remapping_free()
3861 irte_info = &data->irq_2_irte; in irq_remapping_free()
3862 free_irte(irte_info->devid, irte_info->index); in irq_remapping_free()
3863 kfree(data->entry); in irq_remapping_free()
3878 struct amd_ir_data *data = irq_data->chip_data; in irq_remapping_activate()
3879 struct irq_2_irte *irte_info = &data->irq_2_irte; in irq_remapping_activate()
3880 struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid]; in irq_remapping_activate()
3886 iommu->irte_ops->activate(data->entry, irte_info->devid, in irq_remapping_activate()
3887 irte_info->index); in irq_remapping_activate()
3895 struct amd_ir_data *data = irq_data->chip_data; in irq_remapping_deactivate()
3896 struct irq_2_irte *irte_info = &data->irq_2_irte; in irq_remapping_deactivate()
3897 struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid]; in irq_remapping_deactivate()
3900 iommu->irte_ops->deactivate(data->entry, irte_info->devid, in irq_remapping_deactivate()
3901 irte_info->index); in irq_remapping_deactivate()
3914 struct irte_ga *entry = (struct irte_ga *) ir_data->entry; in amd_iommu_activate_guest_mode()
3920 valid = entry->lo.fields_vapic.valid; in amd_iommu_activate_guest_mode()
3922 entry->lo.val = 0; in amd_iommu_activate_guest_mode()
3923 entry->hi.val = 0; in amd_iommu_activate_guest_mode()
3925 entry->lo.fields_vapic.valid = valid; in amd_iommu_activate_guest_mode()
3926 entry->lo.fields_vapic.guest_mode = 1; in amd_iommu_activate_guest_mode()
3927 entry->lo.fields_vapic.ga_log_intr = 1; in amd_iommu_activate_guest_mode()
3928 entry->hi.fields.ga_root_ptr = ir_data->ga_root_ptr; in amd_iommu_activate_guest_mode()
3929 entry->hi.fields.vector = ir_data->ga_vector; in amd_iommu_activate_guest_mode()
3930 entry->lo.fields_vapic.ga_tag = ir_data->ga_tag; in amd_iommu_activate_guest_mode()
3932 return modify_irte_ga(ir_data->irq_2_irte.devid, in amd_iommu_activate_guest_mode()
3933 ir_data->irq_2_irte.index, entry, ir_data); in amd_iommu_activate_guest_mode()
3940 struct irte_ga *entry = (struct irte_ga *) ir_data->entry; in amd_iommu_deactivate_guest_mode()
3941 struct irq_cfg *cfg = ir_data->cfg; in amd_iommu_deactivate_guest_mode()
3945 !entry || !entry->lo.fields_vapic.guest_mode) in amd_iommu_deactivate_guest_mode()
3948 valid = entry->lo.fields_remap.valid; in amd_iommu_deactivate_guest_mode()
3950 entry->lo.val = 0; in amd_iommu_deactivate_guest_mode()
3951 entry->hi.val = 0; in amd_iommu_deactivate_guest_mode()
3953 entry->lo.fields_remap.valid = valid; in amd_iommu_deactivate_guest_mode()
3954 entry->lo.fields_remap.dm = apic->irq_dest_mode; in amd_iommu_deactivate_guest_mode()
3955 entry->lo.fields_remap.int_type = apic->irq_delivery_mode; in amd_iommu_deactivate_guest_mode()
3956 entry->hi.fields.vector = cfg->vector; in amd_iommu_deactivate_guest_mode()
3957 entry->lo.fields_remap.destination = in amd_iommu_deactivate_guest_mode()
3958 APICID_TO_IRTE_DEST_LO(cfg->dest_apicid); in amd_iommu_deactivate_guest_mode()
3959 entry->hi.fields.destination = in amd_iommu_deactivate_guest_mode()
3960 APICID_TO_IRTE_DEST_HI(cfg->dest_apicid); in amd_iommu_deactivate_guest_mode()
3962 return modify_irte_ga(ir_data->irq_2_irte.devid, in amd_iommu_deactivate_guest_mode()
3963 ir_data->irq_2_irte.index, entry, ir_data); in amd_iommu_deactivate_guest_mode()
3972 struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data; in amd_ir_set_vcpu_affinity()
3973 struct amd_ir_data *ir_data = data->chip_data; in amd_ir_set_vcpu_affinity()
3974 struct irq_2_irte *irte_info = &ir_data->irq_2_irte; in amd_ir_set_vcpu_affinity()
3975 struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid); in amd_ir_set_vcpu_affinity()
3981 if (!dev_data || !dev_data->use_vapic) in amd_ir_set_vcpu_affinity()
3984 ir_data->cfg = irqd_cfg(data); in amd_ir_set_vcpu_affinity()
3985 pi_data->ir_data = ir_data; in amd_ir_set_vcpu_affinity()
3994 pi_data->is_guest_mode = false; in amd_ir_set_vcpu_affinity()
3997 iommu = amd_iommu_rlookup_table[irte_info->devid]; in amd_ir_set_vcpu_affinity()
3999 return -EINVAL; in amd_ir_set_vcpu_affinity()
4001 pi_data->prev_ga_tag = ir_data->cached_ga_tag; in amd_ir_set_vcpu_affinity()
4002 if (pi_data->is_guest_mode) { in amd_ir_set_vcpu_affinity()
4003 ir_data->ga_root_ptr = (pi_data->base >> 12); in amd_ir_set_vcpu_affinity()
4004 ir_data->ga_vector = vcpu_pi_info->vector; in amd_ir_set_vcpu_affinity()
4005 ir_data->ga_tag = pi_data->ga_tag; in amd_ir_set_vcpu_affinity()
4008 ir_data->cached_ga_tag = pi_data->ga_tag; in amd_ir_set_vcpu_affinity()
4017 ir_data->cached_ga_tag = 0; in amd_ir_set_vcpu_affinity()
4034 iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid, in amd_ir_update_irte()
4035 irte_info->index, cfg->vector, in amd_ir_update_irte()
4036 cfg->dest_apicid); in amd_ir_update_irte()
4042 struct amd_ir_data *ir_data = data->chip_data; in amd_ir_set_affinity()
4043 struct irq_2_irte *irte_info = &ir_data->irq_2_irte; in amd_ir_set_affinity()
4045 struct irq_data *parent = data->parent_data; in amd_ir_set_affinity()
4046 struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid]; in amd_ir_set_affinity()
4050 return -ENODEV; in amd_ir_set_affinity()
4052 ret = parent->chip->irq_set_affinity(parent, mask, force); in amd_ir_set_affinity()
4069 struct amd_ir_data *ir_data = irq_data->chip_data; in ir_compose_msi_msg()
4071 *msg = ir_data->msi_entry; in ir_compose_msi_msg()
4075 .name = "AMD-IR",
4086 fn = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index); in amd_iommu_create_irq_domain()
4088 return -ENOMEM; in amd_iommu_create_irq_domain()
4089 iommu->ir_domain = irq_domain_create_tree(fn, &amd_ir_domain_ops, iommu); in amd_iommu_create_irq_domain()
4090 if (!iommu->ir_domain) { in amd_iommu_create_irq_domain()
4092 return -ENOMEM; in amd_iommu_create_irq_domain()
4095 iommu->ir_domain->parent = arch_get_ir_parent_domain(); in amd_iommu_create_irq_domain()
4096 iommu->msi_domain = arch_create_remap_msi_irq_domain(iommu->ir_domain, in amd_iommu_create_irq_domain()
4097 "AMD-IR-MSI", in amd_iommu_create_irq_domain()
4098 iommu->index); in amd_iommu_create_irq_domain()
4108 int devid = ir_data->irq_2_irte.devid; in amd_iommu_update_ga()
4109 struct irte_ga *entry = (struct irte_ga *) ir_data->entry; in amd_iommu_update_ga()
4110 struct irte_ga *ref = (struct irte_ga *) ir_data->ref; in amd_iommu_update_ga()
4113 !ref || !entry || !entry->lo.fields_vapic.guest_mode) in amd_iommu_update_ga()
4118 return -ENODEV; in amd_iommu_update_ga()
4122 return -ENODEV; in amd_iommu_update_ga()
4124 raw_spin_lock_irqsave(&table->lock, flags); in amd_iommu_update_ga()
4126 if (ref->lo.fields_vapic.guest_mode) { in amd_iommu_update_ga()
4128 ref->lo.fields_vapic.destination = in amd_iommu_update_ga()
4130 ref->hi.fields.destination = in amd_iommu_update_ga()
4133 ref->lo.fields_vapic.is_run = is_run; in amd_iommu_update_ga()
4137 raw_spin_unlock_irqrestore(&table->lock, flags); in amd_iommu_update_ga()