• Home
  • Raw
  • Download

Lines Matching full:iommu

19 #include <linux/amd-iommu.h>
25 #include <asm/iommu.h>
102 * structure describing one IOMMU in the ACPI table. Typically followed by one
122 * A device entry describing which devices a specific IOMMU translates and
138 * An AMD IOMMU memory definition structure. It defines things like exclusion
203 * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
209 * The rlookup table is used to find the IOMMU which is responsible
222 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
267 bool translation_pre_enabled(struct amd_iommu *iommu) in translation_pre_enabled() argument
269 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED); in translation_pre_enabled()
273 static void clear_translation_pre_enabled(struct amd_iommu *iommu) in clear_translation_pre_enabled() argument
275 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; in clear_translation_pre_enabled()
278 static void init_translation_status(struct amd_iommu *iommu) in init_translation_status() argument
282 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); in init_translation_status()
284 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; in init_translation_status()
310 struct amd_iommu *iommu; in check_feature_on_all_iommus() local
312 for_each_iommu(iommu) { in check_feature_on_all_iommus()
313 ret = iommu_feature(iommu, mask); in check_feature_on_all_iommus()
327 static void __init early_iommu_features_init(struct amd_iommu *iommu, in early_iommu_features_init() argument
331 iommu->features = h->efr_reg; in early_iommu_features_init()
336 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address) in iommu_read_l1() argument
340 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); in iommu_read_l1()
341 pci_read_config_dword(iommu->dev, 0xfc, &val); in iommu_read_l1()
345 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val) in iommu_write_l1() argument
347 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31)); in iommu_write_l1()
348 pci_write_config_dword(iommu->dev, 0xfc, val); in iommu_write_l1()
349 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); in iommu_write_l1()
352 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address) in iommu_read_l2() argument
356 pci_write_config_dword(iommu->dev, 0xf0, address); in iommu_read_l2()
357 pci_read_config_dword(iommu->dev, 0xf4, &val); in iommu_read_l2()
361 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val) in iommu_write_l2() argument
363 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8)); in iommu_write_l2()
364 pci_write_config_dword(iommu->dev, 0xf4, val); in iommu_write_l2()
369 * AMD IOMMU MMIO register space handling functions
371 * These functions are used to program the IOMMU device registers in
377 * This function set the exclusion range in the IOMMU. DMA accesses to the
380 static void iommu_set_exclusion_range(struct amd_iommu *iommu) in iommu_set_exclusion_range() argument
382 u64 start = iommu->exclusion_start & PAGE_MASK; in iommu_set_exclusion_range()
383 u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK; in iommu_set_exclusion_range()
386 if (!iommu->exclusion_start) in iommu_set_exclusion_range()
390 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, in iommu_set_exclusion_range()
394 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, in iommu_set_exclusion_range()
398 static void iommu_set_cwwb_range(struct amd_iommu *iommu) in iommu_set_cwwb_range() argument
400 u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem); in iommu_set_cwwb_range()
403 if (!iommu_feature(iommu, FEATURE_SNP)) in iommu_set_cwwb_range()
410 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, in iommu_set_cwwb_range()
417 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, in iommu_set_cwwb_range()
421 /* Programs the physical address of the device table into the IOMMU hardware */
422 static void iommu_set_device_table(struct amd_iommu *iommu) in iommu_set_device_table() argument
426 BUG_ON(iommu->mmio_base == NULL); in iommu_set_device_table()
430 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET, in iommu_set_device_table()
434 /* Generic functions to enable/disable certain features of the IOMMU. */
435 static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit) in iommu_feature_enable() argument
439 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_feature_enable()
441 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_feature_enable()
444 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) in iommu_feature_disable() argument
448 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_feature_disable()
450 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_feature_disable()
453 static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout) in iommu_set_inv_tlb_timeout() argument
457 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_set_inv_tlb_timeout()
460 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); in iommu_set_inv_tlb_timeout()
464 static void iommu_enable(struct amd_iommu *iommu) in iommu_enable() argument
466 iommu_feature_enable(iommu, CONTROL_IOMMU_EN); in iommu_enable()
469 static void iommu_disable(struct amd_iommu *iommu) in iommu_disable() argument
471 if (!iommu->mmio_base) in iommu_disable()
475 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); in iommu_disable()
478 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN); in iommu_disable()
479 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); in iommu_disable()
481 /* Disable IOMMU GA_LOG */ in iommu_disable()
482 iommu_feature_disable(iommu, CONTROL_GALOG_EN); in iommu_disable()
483 iommu_feature_disable(iommu, CONTROL_GAINT_EN); in iommu_disable()
485 /* Disable IOMMU hardware itself */ in iommu_disable()
486 iommu_feature_disable(iommu, CONTROL_IOMMU_EN); in iommu_disable()
490 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
505 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) in iommu_unmap_mmio_space() argument
507 if (iommu->mmio_base) in iommu_unmap_mmio_space()
508 iounmap(iommu->mmio_base); in iommu_unmap_mmio_space()
509 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end); in iommu_unmap_mmio_space()
530 * The functions below belong to the first pass of AMD IOMMU ACPI table
554 * After reading the highest device id from the IOMMU PCI capability header
644 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
651 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
652 * write commands to that buffer later and the IOMMU will execute them
655 static int __init alloc_command_buffer(struct amd_iommu *iommu) in alloc_command_buffer() argument
657 iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, in alloc_command_buffer()
660 return iommu->cmd_buf ? 0 : -ENOMEM; in alloc_command_buffer()
664 * This function restarts event logging in case the IOMMU experienced
667 void amd_iommu_restart_event_logging(struct amd_iommu *iommu) in amd_iommu_restart_event_logging() argument
669 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); in amd_iommu_restart_event_logging()
670 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); in amd_iommu_restart_event_logging()
674 * This function resets the command buffer if the IOMMU stopped fetching
677 void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) in amd_iommu_reset_cmd_buffer() argument
679 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); in amd_iommu_reset_cmd_buffer()
681 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); in amd_iommu_reset_cmd_buffer()
682 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); in amd_iommu_reset_cmd_buffer()
683 iommu->cmd_buf_head = 0; in amd_iommu_reset_cmd_buffer()
684 iommu->cmd_buf_tail = 0; in amd_iommu_reset_cmd_buffer()
686 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); in amd_iommu_reset_cmd_buffer()
693 static void iommu_enable_command_buffer(struct amd_iommu *iommu) in iommu_enable_command_buffer() argument
697 BUG_ON(iommu->cmd_buf == NULL); in iommu_enable_command_buffer()
699 entry = iommu_virt_to_phys(iommu->cmd_buf); in iommu_enable_command_buffer()
702 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, in iommu_enable_command_buffer()
705 amd_iommu_reset_cmd_buffer(iommu); in iommu_enable_command_buffer()
711 static void iommu_disable_command_buffer(struct amd_iommu *iommu) in iommu_disable_command_buffer() argument
713 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); in iommu_disable_command_buffer()
716 static void __init free_command_buffer(struct amd_iommu *iommu) in free_command_buffer() argument
718 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE)); in free_command_buffer()
721 static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, in iommu_alloc_4k_pages() argument
728 iommu_feature(iommu, FEATURE_SNP) && in iommu_alloc_4k_pages()
737 /* allocates the memory where the IOMMU will log its events to */
738 static int __init alloc_event_buffer(struct amd_iommu *iommu) in alloc_event_buffer() argument
740 iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, in alloc_event_buffer()
743 return iommu->evt_buf ? 0 : -ENOMEM; in alloc_event_buffer()
746 static void iommu_enable_event_buffer(struct amd_iommu *iommu) in iommu_enable_event_buffer() argument
750 BUG_ON(iommu->evt_buf == NULL); in iommu_enable_event_buffer()
752 entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; in iommu_enable_event_buffer()
754 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, in iommu_enable_event_buffer()
758 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_enable_event_buffer()
759 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); in iommu_enable_event_buffer()
761 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); in iommu_enable_event_buffer()
767 static void iommu_disable_event_buffer(struct amd_iommu *iommu) in iommu_disable_event_buffer() argument
769 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); in iommu_disable_event_buffer()
772 static void __init free_event_buffer(struct amd_iommu *iommu) in free_event_buffer() argument
774 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); in free_event_buffer()
777 /* allocates the memory where the IOMMU will log its events to */
778 static int __init alloc_ppr_log(struct amd_iommu *iommu) in alloc_ppr_log() argument
780 iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, in alloc_ppr_log()
783 return iommu->ppr_log ? 0 : -ENOMEM; in alloc_ppr_log()
786 static void iommu_enable_ppr_log(struct amd_iommu *iommu) in iommu_enable_ppr_log() argument
790 if (iommu->ppr_log == NULL) in iommu_enable_ppr_log()
793 entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512; in iommu_enable_ppr_log()
795 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET, in iommu_enable_ppr_log()
799 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_enable_ppr_log()
800 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); in iommu_enable_ppr_log()
802 iommu_feature_enable(iommu, CONTROL_PPRLOG_EN); in iommu_enable_ppr_log()
803 iommu_feature_enable(iommu, CONTROL_PPR_EN); in iommu_enable_ppr_log()
806 static void __init free_ppr_log(struct amd_iommu *iommu) in free_ppr_log() argument
808 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE)); in free_ppr_log()
811 static void free_ga_log(struct amd_iommu *iommu) in free_ga_log() argument
814 free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE)); in free_ga_log()
815 free_pages((unsigned long)iommu->ga_log_tail, get_order(8)); in free_ga_log()
819 static int iommu_ga_log_enable(struct amd_iommu *iommu) in iommu_ga_log_enable() argument
825 if (!iommu->ga_log) in iommu_ga_log_enable()
829 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in iommu_ga_log_enable()
833 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512; in iommu_ga_log_enable()
834 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET, in iommu_ga_log_enable()
836 entry = (iommu_virt_to_phys(iommu->ga_log_tail) & in iommu_ga_log_enable()
838 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET, in iommu_ga_log_enable()
840 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); in iommu_ga_log_enable()
841 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET); in iommu_ga_log_enable()
844 iommu_feature_enable(iommu, CONTROL_GAINT_EN); in iommu_ga_log_enable()
845 iommu_feature_enable(iommu, CONTROL_GALOG_EN); in iommu_ga_log_enable()
848 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in iommu_ga_log_enable()
860 static int iommu_init_ga_log(struct amd_iommu *iommu) in iommu_init_ga_log() argument
866 iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, in iommu_init_ga_log()
868 if (!iommu->ga_log) in iommu_init_ga_log()
871 iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, in iommu_init_ga_log()
873 if (!iommu->ga_log_tail) in iommu_init_ga_log()
878 free_ga_log(iommu); in iommu_init_ga_log()
885 static int __init alloc_cwwb_sem(struct amd_iommu *iommu) in alloc_cwwb_sem() argument
887 iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 1); in alloc_cwwb_sem()
889 return iommu->cmd_sem ? 0 : -ENOMEM; in alloc_cwwb_sem()
892 static void __init free_cwwb_sem(struct amd_iommu *iommu) in free_cwwb_sem() argument
894 if (iommu->cmd_sem) in free_cwwb_sem()
895 free_page((unsigned long)iommu->cmd_sem); in free_cwwb_sem()
898 static void iommu_enable_xt(struct amd_iommu *iommu) in iommu_enable_xt() argument
907 iommu_feature_enable(iommu, CONTROL_XT_EN); in iommu_enable_xt()
911 static void iommu_enable_gt(struct amd_iommu *iommu) in iommu_enable_gt() argument
913 if (!iommu_feature(iommu, FEATURE_GT)) in iommu_enable_gt()
916 iommu_feature_enable(iommu, CONTROL_GT_EN); in iommu_enable_gt()
943 struct amd_iommu *iommu; in copy_device_table() local
952 for_each_iommu(iommu) { in copy_device_table()
954 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET); in copy_device_table()
955 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4); in copy_device_table()
958 pr_err("IOMMU:%d should use the same dev table as others!\n", in copy_device_table()
959 iommu->index); in copy_device_table()
966 pr_err("The device table size of IOMMU:%d is not expected!\n", in copy_device_table()
967 iommu->index); in copy_device_table()
1048 /* Writes the specific IOMMU for a device into the rlookup table */
1049 static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid) in set_iommu_for_device() argument
1051 amd_iommu_rlookup_table[devid] = iommu; in set_iommu_for_device()
1058 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, in set_dev_entry_from_acpi() argument
1078 set_iommu_for_device(iommu, devid); in set_dev_entry_from_acpi()
1189 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
1192 static int __init init_iommu_from_acpi(struct amd_iommu *iommu, in init_iommu_from_acpi() argument
1214 iommu->acpi_flags = h->flags; in init_iommu_from_acpi()
1238 set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0); in init_iommu_from_acpi()
1250 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); in init_iommu_from_acpi()
1280 set_dev_entry_from_acpi(iommu, devid , e->flags, 0); in init_iommu_from_acpi()
1281 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0); in init_iommu_from_acpi()
1313 set_dev_entry_from_acpi(iommu, devid, e->flags, in init_iommu_from_acpi()
1341 set_dev_entry_from_acpi(iommu, in init_iommu_from_acpi()
1344 set_dev_entry_from_acpi(iommu, dev_i, in init_iommu_from_acpi()
1380 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); in init_iommu_from_acpi()
1445 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); in init_iommu_from_acpi()
1459 static void __init free_iommu_one(struct amd_iommu *iommu) in free_iommu_one() argument
1461 free_cwwb_sem(iommu); in free_iommu_one()
1462 free_command_buffer(iommu); in free_iommu_one()
1463 free_event_buffer(iommu); in free_iommu_one()
1464 free_ppr_log(iommu); in free_iommu_one()
1465 free_ga_log(iommu); in free_iommu_one()
1466 iommu_unmap_mmio_space(iommu); in free_iommu_one()
1471 struct amd_iommu *iommu, *next; in free_iommu_all() local
1473 for_each_iommu_safe(iommu, next) { in free_iommu_all()
1474 list_del(&iommu->list); in free_iommu_all()
1475 free_iommu_one(iommu); in free_iommu_all()
1476 kfree(iommu); in free_iommu_all()
1481 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
1486 static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) in amd_iommu_erratum_746_workaround() argument
1495 pci_write_config_dword(iommu->dev, 0xf0, 0x90); in amd_iommu_erratum_746_workaround()
1496 pci_read_config_dword(iommu->dev, 0xf4, &value); in amd_iommu_erratum_746_workaround()
1502 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8)); in amd_iommu_erratum_746_workaround()
1504 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4); in amd_iommu_erratum_746_workaround()
1505 pci_info(iommu->dev, "Applying erratum 746 workaround\n"); in amd_iommu_erratum_746_workaround()
1508 pci_write_config_dword(iommu->dev, 0xf0, 0x90); in amd_iommu_erratum_746_workaround()
1512 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
1517 static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu) in amd_iommu_ats_write_check_workaround() argument
1527 value = iommu_read_l2(iommu, 0x47); in amd_iommu_ats_write_check_workaround()
1533 iommu_write_l2(iommu, 0x47, value | BIT(0)); in amd_iommu_ats_write_check_workaround()
1535 pci_info(iommu->dev, "Applying ATS write check workaround\n"); in amd_iommu_ats_write_check_workaround()
1539 * This function clues the initialization function for one IOMMU
1541 * hardware. It does NOT enable the IOMMU. This is done afterwards.
1543 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) in init_iommu_one() argument
1547 raw_spin_lock_init(&iommu->lock); in init_iommu_one()
1548 iommu->cmd_sem_val = 0; in init_iommu_one()
1550 /* Add IOMMU to internal data structures */ in init_iommu_one()
1551 list_add_tail(&iommu->list, &amd_iommu_list); in init_iommu_one()
1552 iommu->index = amd_iommus_present++; in init_iommu_one()
1554 if (unlikely(iommu->index >= MAX_IOMMUS)) { in init_iommu_one()
1559 /* Index is fine - add IOMMU to the array */ in init_iommu_one()
1560 amd_iommus[iommu->index] = iommu; in init_iommu_one()
1563 * Copy data from ACPI table entry to the iommu struct in init_iommu_one()
1565 iommu->devid = h->devid; in init_iommu_one()
1566 iommu->cap_ptr = h->cap_ptr; in init_iommu_one()
1567 iommu->pci_seg = h->pci_seg; in init_iommu_one()
1568 iommu->mmio_phys = h->mmio_phys; in init_iommu_one()
1576 iommu->mmio_phys_end = MMIO_REG_END_OFFSET; in init_iommu_one()
1578 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; in init_iommu_one()
1592 iommu->mmio_phys_end = MMIO_REG_END_OFFSET; in init_iommu_one()
1594 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; in init_iommu_one()
1609 * the IOMMU MMIO access to MSI capability block registers in init_iommu_one()
1617 early_iommu_features_init(iommu, h); in init_iommu_one()
1624 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys, in init_iommu_one()
1625 iommu->mmio_phys_end); in init_iommu_one()
1626 if (!iommu->mmio_base) in init_iommu_one()
1629 if (alloc_cwwb_sem(iommu)) in init_iommu_one()
1632 if (alloc_command_buffer(iommu)) in init_iommu_one()
1635 if (alloc_event_buffer(iommu)) in init_iommu_one()
1638 iommu->int_enabled = false; in init_iommu_one()
1640 init_translation_status(iommu); in init_iommu_one()
1641 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) { in init_iommu_one()
1642 iommu_disable(iommu); in init_iommu_one()
1643 clear_translation_pre_enabled(iommu); in init_iommu_one()
1644 pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n", in init_iommu_one()
1645 iommu->index); in init_iommu_one()
1648 amd_iommu_pre_enabled = translation_pre_enabled(iommu); in init_iommu_one()
1650 ret = init_iommu_from_acpi(iommu, h); in init_iommu_one()
1654 ret = amd_iommu_create_irq_domain(iommu); in init_iommu_one()
1659 * Make sure IOMMU is not considered to translate itself. The IVRS in init_iommu_one()
1662 amd_iommu_rlookup_table[iommu->devid] = NULL; in init_iommu_one()
1694 * Iterates over all IOMMU entries in the ACPI table, allocates the
1695 * IOMMU structure and initializes it with init_iommu_one()
1701 struct amd_iommu *iommu; in init_iommu_all() local
1719 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); in init_iommu_all()
1720 if (iommu == NULL) in init_iommu_all()
1723 ret = init_iommu_one(iommu, h); in init_iommu_all()
1735 static void init_iommu_perf_ctr(struct amd_iommu *iommu) in init_iommu_perf_ctr() argument
1738 struct pci_dev *pdev = iommu->dev; in init_iommu_perf_ctr()
1740 if (!iommu_feature(iommu, FEATURE_PC)) in init_iommu_perf_ctr()
1745 pci_info(pdev, "IOMMU performance counters supported\n"); in init_iommu_perf_ctr()
1747 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET); in init_iommu_perf_ctr()
1748 iommu->max_banks = (u8) ((val >> 12) & 0x3f); in init_iommu_perf_ctr()
1749 iommu->max_counters = (u8) ((val >> 7) & 0xf); in init_iommu_perf_ctr()
1758 struct amd_iommu *iommu = dev_to_amd_iommu(dev); in amd_iommu_show_cap() local
1759 return sprintf(buf, "%x\n", iommu->cap); in amd_iommu_show_cap()
1767 struct amd_iommu *iommu = dev_to_amd_iommu(dev); in amd_iommu_show_features() local
1768 return sprintf(buf, "%llx\n", iommu->features); in amd_iommu_show_features()
1779 .name = "amd-iommu",
1790 * of the IOMMU Extended Feature Register [MMIO Offset 0030h].
1793 static void __init late_iommu_features_init(struct amd_iommu *iommu) in late_iommu_features_init() argument
1797 if (!(iommu->cap & (1 << IOMMU_CAP_EFR))) in late_iommu_features_init()
1801 features = readq(iommu->mmio_base + MMIO_EXT_FEATURES); in late_iommu_features_init()
1803 if (!iommu->features) { in late_iommu_features_init()
1804 iommu->features = features; in late_iommu_features_init()
1812 if (features != iommu->features) in late_iommu_features_init()
1814 features, iommu->features); in late_iommu_features_init()
1817 static int __init iommu_init_pci(struct amd_iommu *iommu) in iommu_init_pci() argument
1819 int cap_ptr = iommu->cap_ptr; in iommu_init_pci()
1822 iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid), in iommu_init_pci()
1823 iommu->devid & 0xff); in iommu_init_pci()
1824 if (!iommu->dev) in iommu_init_pci()
1827 /* Prevent binding other PCI device drivers to IOMMU devices */ in iommu_init_pci()
1828 iommu->dev->match_driver = false; in iommu_init_pci()
1830 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, in iommu_init_pci()
1831 &iommu->cap); in iommu_init_pci()
1833 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) in iommu_init_pci()
1836 late_iommu_features_init(iommu); in iommu_init_pci()
1838 if (iommu_feature(iommu, FEATURE_GT)) { in iommu_init_pci()
1843 pasmax = iommu->features & FEATURE_PASID_MASK; in iommu_init_pci()
1851 glxval = iommu->features & FEATURE_GLXVAL_MASK; in iommu_init_pci()
1860 if (iommu_feature(iommu, FEATURE_GT) && in iommu_init_pci()
1861 iommu_feature(iommu, FEATURE_PPR)) { in iommu_init_pci()
1862 iommu->is_iommu_v2 = true; in iommu_init_pci()
1866 if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu)) in iommu_init_pci()
1869 ret = iommu_init_ga_log(iommu); in iommu_init_pci()
1873 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) in iommu_init_pci()
1876 init_iommu_perf_ctr(iommu); in iommu_init_pci()
1878 if (is_rd890_iommu(iommu->dev)) { in iommu_init_pci()
1881 iommu->root_pdev = in iommu_init_pci()
1882 pci_get_domain_bus_and_slot(0, iommu->dev->bus->number, in iommu_init_pci()
1890 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4, in iommu_init_pci()
1891 &iommu->stored_addr_lo); in iommu_init_pci()
1892 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8, in iommu_init_pci()
1893 &iommu->stored_addr_hi); in iommu_init_pci()
1896 iommu->stored_addr_lo &= ~1; in iommu_init_pci()
1900 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j); in iommu_init_pci()
1903 iommu->stored_l2[i] = iommu_read_l2(iommu, i); in iommu_init_pci()
1906 amd_iommu_erratum_746_workaround(iommu); in iommu_init_pci()
1907 amd_iommu_ats_write_check_workaround(iommu); in iommu_init_pci()
1909 iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev, in iommu_init_pci()
1910 amd_iommu_groups, "ivhd%d", iommu->index); in iommu_init_pci()
1911 iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops); in iommu_init_pci()
1912 iommu_device_register(&iommu->iommu); in iommu_init_pci()
1914 return pci_enable_device(iommu->dev); in iommu_init_pci()
1923 struct amd_iommu *iommu; in print_iommu_info() local
1925 for_each_iommu(iommu) { in print_iommu_info()
1926 struct pci_dev *pdev = iommu->dev; in print_iommu_info()
1929 pci_info(pdev, "Found IOMMU cap 0x%hx\n", iommu->cap_ptr); in print_iommu_info()
1931 if (iommu->cap & (1 << IOMMU_CAP_EFR)) { in print_iommu_info()
1932 pr_info("Extended features (%#llx):", iommu->features); in print_iommu_info()
1935 if (iommu_feature(iommu, (1ULL << i))) in print_iommu_info()
1939 if (iommu->features & FEATURE_GAM_VAPIC) in print_iommu_info()
1956 struct amd_iommu *iommu; in amd_iommu_init_pci() local
1959 for_each_iommu(iommu) { in amd_iommu_init_pci()
1960 ret = iommu_init_pci(iommu); in amd_iommu_init_pci()
1965 iommu_set_cwwb_range(iommu); in amd_iommu_init_pci()
1982 for_each_iommu(iommu) in amd_iommu_init_pci()
1983 iommu_flush_all_caches(iommu); in amd_iommu_init_pci()
2000 static int iommu_setup_msi(struct amd_iommu *iommu) in iommu_setup_msi() argument
2004 r = pci_enable_msi(iommu->dev); in iommu_setup_msi()
2008 r = request_threaded_irq(iommu->dev->irq, in iommu_setup_msi()
2012 iommu); in iommu_setup_msi()
2015 pci_disable_msi(iommu->dev); in iommu_setup_msi()
2019 iommu->int_enabled = true; in iommu_setup_msi()
2034 static void iommu_update_intcapxt(struct amd_iommu *iommu) in iommu_update_intcapxt() argument
2037 u32 addr_lo = readl(iommu->mmio_base + MMIO_MSI_ADDR_LO_OFFSET); in iommu_update_intcapxt()
2038 u32 addr_hi = readl(iommu->mmio_base + MMIO_MSI_ADDR_HI_OFFSET); in iommu_update_intcapxt()
2039 u32 data = readl(iommu->mmio_base + MMIO_MSI_DATA_OFFSET); in iommu_update_intcapxt()
2052 * Current IOMMU implemtation uses the same IRQ for all in iommu_update_intcapxt()
2053 * 3 IOMMU interrupts. in iommu_update_intcapxt()
2055 writeq(val, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET); in iommu_update_intcapxt()
2056 writeq(val, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET); in iommu_update_intcapxt()
2057 writeq(val, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET); in iommu_update_intcapxt()
2063 struct amd_iommu *iommu; in _irq_notifier_notify() local
2065 for_each_iommu(iommu) { in _irq_notifier_notify()
2066 if (iommu->dev->irq == notify->irq) { in _irq_notifier_notify()
2067 iommu_update_intcapxt(iommu); in _irq_notifier_notify()
2077 static int iommu_init_intcapxt(struct amd_iommu *iommu) in iommu_init_intcapxt() argument
2080 struct irq_affinity_notify *notify = &iommu->intcapxt_notify; in iommu_init_intcapxt()
2093 notify->irq = iommu->dev->irq; in iommu_init_intcapxt()
2096 ret = irq_set_affinity_notifier(iommu->dev->irq, notify); in iommu_init_intcapxt()
2099 iommu->devid, iommu->dev->irq); in iommu_init_intcapxt()
2103 iommu_update_intcapxt(iommu); in iommu_init_intcapxt()
2104 iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN); in iommu_init_intcapxt()
2108 static int iommu_init_msi(struct amd_iommu *iommu) in iommu_init_msi() argument
2112 if (iommu->int_enabled) in iommu_init_msi()
2115 if (iommu->dev->msi_cap) in iommu_init_msi()
2116 ret = iommu_setup_msi(iommu); in iommu_init_msi()
2124 ret = iommu_init_intcapxt(iommu); in iommu_init_msi()
2128 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); in iommu_init_msi()
2130 if (iommu->ppr_log != NULL) in iommu_init_msi()
2131 iommu_feature_enable(iommu, CONTROL_PPRINT_EN); in iommu_init_msi()
2133 iommu_ga_log_enable(iommu); in iommu_init_msi()
2265 static void iommu_init_flags(struct amd_iommu *iommu) in iommu_init_flags() argument
2267 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? in iommu_init_flags()
2268 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : in iommu_init_flags()
2269 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); in iommu_init_flags()
2271 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ? in iommu_init_flags()
2272 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : in iommu_init_flags()
2273 iommu_feature_disable(iommu, CONTROL_PASSPW_EN); in iommu_init_flags()
2275 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ? in iommu_init_flags()
2276 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : in iommu_init_flags()
2277 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); in iommu_init_flags()
2279 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ? in iommu_init_flags()
2280 iommu_feature_enable(iommu, CONTROL_ISOC_EN) : in iommu_init_flags()
2281 iommu_feature_disable(iommu, CONTROL_ISOC_EN); in iommu_init_flags()
2284 * make IOMMU memory accesses cache coherent in iommu_init_flags()
2286 iommu_feature_enable(iommu, CONTROL_COHERENT_EN); in iommu_init_flags()
2289 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S); in iommu_init_flags()
2292 static void iommu_apply_resume_quirks(struct amd_iommu *iommu) in iommu_apply_resume_quirks() argument
2296 struct pci_dev *pdev = iommu->root_pdev; in iommu_apply_resume_quirks()
2298 /* RD890 BIOSes may not have completely reconfigured the iommu */ in iommu_apply_resume_quirks()
2299 if (!is_rd890_iommu(iommu->dev) || !pdev) in iommu_apply_resume_quirks()
2303 * First, we need to ensure that the iommu is enabled. This is in iommu_apply_resume_quirks()
2311 /* Enable the iommu */ in iommu_apply_resume_quirks()
2315 /* Restore the iommu BAR */ in iommu_apply_resume_quirks()
2316 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, in iommu_apply_resume_quirks()
2317 iommu->stored_addr_lo); in iommu_apply_resume_quirks()
2318 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8, in iommu_apply_resume_quirks()
2319 iommu->stored_addr_hi); in iommu_apply_resume_quirks()
2324 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]); in iommu_apply_resume_quirks()
2328 iommu_write_l2(iommu, i, iommu->stored_l2[i]); in iommu_apply_resume_quirks()
2331 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, in iommu_apply_resume_quirks()
2332 iommu->stored_addr_lo | 1); in iommu_apply_resume_quirks()
2335 static void iommu_enable_ga(struct amd_iommu *iommu) in iommu_enable_ga() argument
2340 iommu_feature_enable(iommu, CONTROL_GAM_EN); in iommu_enable_ga()
2343 iommu_feature_enable(iommu, CONTROL_GA_EN); in iommu_enable_ga()
2344 iommu->irte_ops = &irte_128_ops; in iommu_enable_ga()
2347 iommu->irte_ops = &irte_32_ops; in iommu_enable_ga()
2353 static void early_enable_iommu(struct amd_iommu *iommu) in early_enable_iommu() argument
2355 iommu_disable(iommu); in early_enable_iommu()
2356 iommu_init_flags(iommu); in early_enable_iommu()
2357 iommu_set_device_table(iommu); in early_enable_iommu()
2358 iommu_enable_command_buffer(iommu); in early_enable_iommu()
2359 iommu_enable_event_buffer(iommu); in early_enable_iommu()
2360 iommu_set_exclusion_range(iommu); in early_enable_iommu()
2361 iommu_enable_ga(iommu); in early_enable_iommu()
2362 iommu_enable_xt(iommu); in early_enable_iommu()
2363 iommu_enable(iommu); in early_enable_iommu()
2364 iommu_flush_all_caches(iommu); in early_enable_iommu()
2377 struct amd_iommu *iommu; in early_enable_iommus() local
2392 for_each_iommu(iommu) { in early_enable_iommus()
2393 clear_translation_pre_enabled(iommu); in early_enable_iommus()
2394 early_enable_iommu(iommu); in early_enable_iommus()
2401 for_each_iommu(iommu) { in early_enable_iommus()
2402 iommu_disable_command_buffer(iommu); in early_enable_iommus()
2403 iommu_disable_event_buffer(iommu); in early_enable_iommus()
2404 iommu_enable_command_buffer(iommu); in early_enable_iommus()
2405 iommu_enable_event_buffer(iommu); in early_enable_iommus()
2406 iommu_enable_ga(iommu); in early_enable_iommus()
2407 iommu_enable_xt(iommu); in early_enable_iommus()
2408 iommu_set_device_table(iommu); in early_enable_iommus()
2409 iommu_flush_all_caches(iommu); in early_enable_iommus()
2429 struct amd_iommu *iommu; in enable_iommus_v2() local
2431 for_each_iommu(iommu) { in enable_iommus_v2()
2432 iommu_enable_ppr_log(iommu); in enable_iommus_v2()
2433 iommu_enable_gt(iommu); in enable_iommus_v2()
2446 struct amd_iommu *iommu; in disable_iommus() local
2448 for_each_iommu(iommu) in disable_iommus()
2449 iommu_disable(iommu); in disable_iommus()
2464 struct amd_iommu *iommu; in amd_iommu_resume() local
2466 for_each_iommu(iommu) in amd_iommu_resume()
2467 iommu_apply_resume_quirks(iommu); in amd_iommu_resume()
2580 * This is the hardware init function for AMD IOMMU in the system.
2584 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
2661 * IOMMU see for that device in early_amd_iommu_init()
2668 /* IOMMU rlookup table - find the IOMMU for a specific device */ in early_amd_iommu_init()
2701 /* Disable IOMMU if there's Stoney Ridge graphics */ in early_amd_iommu_init()
2705 pr_info("Disable IOMMU on Stoney Ridge\n"); in early_amd_iommu_init()
2761 struct amd_iommu *iommu; in amd_iommu_enable_interrupts() local
2764 for_each_iommu(iommu) { in amd_iommu_enable_interrupts()
2765 ret = iommu_init_msi(iommu); in amd_iommu_enable_interrupts()
2798 * AMD IOMMU Initialization State Machine
2819 pr_info("AMD IOMMU disabled\n"); in state_next()
2866 struct amd_iommu *iommu; in state_next() local
2869 for_each_iommu(iommu) in state_next()
2870 iommu_flush_all_caches(iommu); in state_next()
2936 * This is the core init function for AMD IOMMU hardware in the system.
2942 struct amd_iommu *iommu; in amd_iommu_init() local
2949 * We failed to initialize the AMD IOMMU - try fallback in amd_iommu_init()
2956 for_each_iommu(iommu) in amd_iommu_init()
2957 amd_iommu_debugfs_setup(iommu); in amd_iommu_init()
2975 pr_notice("IOMMU not currently supported when SME is active\n"); in amd_iommu_sme_check()
2982 * Early detect code. This code runs at IOMMU detection time in the DMA
3003 x86_init.iommu.iommu_init = amd_iommu_init; in amd_iommu_detect()
3010 * Parsing functions for the AMD IOMMU specific kernel command line
3220 struct amd_iommu *iommu; in get_amd_iommu() local
3222 for_each_iommu(iommu) in get_amd_iommu()
3224 return iommu; in get_amd_iommu()
3231 * IOMMU EFR Performance Counter support functionality. This code allows
3232 * access to the IOMMU PC functionality.
3238 struct amd_iommu *iommu = get_amd_iommu(idx); in amd_iommu_pc_get_max_banks() local
3240 if (iommu) in amd_iommu_pc_get_max_banks()
3241 return iommu->max_banks; in amd_iommu_pc_get_max_banks()
3255 struct amd_iommu *iommu = get_amd_iommu(idx); in amd_iommu_pc_get_max_counters() local
3257 if (iommu) in amd_iommu_pc_get_max_counters()
3258 return iommu->max_counters; in amd_iommu_pc_get_max_counters()
3264 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, in iommu_pc_get_set_reg() argument
3270 /* Make sure the IOMMU PC resource is available */ in iommu_pc_get_set_reg()
3274 /* Check for valid iommu and pc register indexing */ in iommu_pc_get_set_reg()
3275 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7))) in iommu_pc_get_set_reg()
3281 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) | in iommu_pc_get_set_reg()
3282 (iommu->max_counters << 8) | 0x28); in iommu_pc_get_set_reg()
3290 writel((u32)val, iommu->mmio_base + offset); in iommu_pc_get_set_reg()
3291 writel((val >> 32), iommu->mmio_base + offset + 4); in iommu_pc_get_set_reg()
3293 *value = readl(iommu->mmio_base + offset + 4); in iommu_pc_get_set_reg()
3295 *value |= readl(iommu->mmio_base + offset); in iommu_pc_get_set_reg()
3302 int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value) in amd_iommu_pc_get_reg() argument
3304 if (!iommu) in amd_iommu_pc_get_reg()
3307 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false); in amd_iommu_pc_get_reg()
3311 int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value) in amd_iommu_pc_set_reg() argument
3313 if (!iommu) in amd_iommu_pc_set_reg()
3316 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true); in amd_iommu_pc_set_reg()