• Home
  • Raw
  • Download

Lines Matching +full:msi +full:- +full:x

1 // SPDX-License-Identifier: GPL-2.0
3 * PCI Message Signaled Interrupt (MSI)
5 * Copyright (C) 2003-2004 Intel
14 #include "msi.h"
20 * pci_msi_supported - check whether MSI may be enabled on a device
21 * @dev: pointer to the pci_dev data structure of MSI device function
25 * to determine if MSI/-X are supported for the device. If MSI/-X is
32 /* MSI must be globally enabled and supported by the device */ in pci_msi_supported()
36 if (!dev || dev->no_msi) in pci_msi_supported()
48 * Any bridge which does NOT route MSI transactions from its in pci_msi_supported()
53 * - arch-specific PCI host bus controller drivers (deprecated) in pci_msi_supported()
54 * - quirks for specific PCI bridges in pci_msi_supported()
56 * or indirectly by platform-specific PCI host bridge drivers by in pci_msi_supported()
58 * the NO_MSI flag when no MSI domain is found for this bridge in pci_msi_supported()
61 for (bus = dev->bus; bus; bus = bus->parent) in pci_msi_supported()
62 if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI) in pci_msi_supported()
72 dev->is_msi_managed = false; in pcim_msi_release()
78 * vs. msi_device_data_release() in the MSI core code.
84 if (!pci_is_managed(dev) || dev->is_msi_managed) in pcim_setup_msi_release()
87 ret = devm_add_action(&dev->dev, pcim_msi_release, dev); in pcim_setup_msi_release()
89 dev->is_msi_managed = true; in pcim_setup_msi_release()
94 * Ordering vs. devres: msi device data has to be installed first so that
99 int ret = msi_setup_device_data(&dev->dev); in pci_setup_msi_context()
107 * Helper functions for mask/unmask and MSI message handling
112 raw_spinlock_t *lock = &to_pci_dev(desc->dev)->msi_lock; in pci_msi_update_mask()
115 if (!desc->pci.msi_attrib.can_mask) in pci_msi_update_mask()
119 desc->pci.msi_mask &= ~clear; in pci_msi_update_mask()
120 desc->pci.msi_mask |= set; in pci_msi_update_mask()
121 pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->pci.mask_pos, in pci_msi_update_mask()
122 desc->pci.msi_mask); in pci_msi_update_mask()
127 * pci_msi_mask_irq - Generic IRQ chip callback to mask PCI/MSI interrupts
134 __pci_msi_mask_desc(desc, BIT(data->irq - desc->irq)); in pci_msi_mask_irq()
139 * pci_msi_unmask_irq - Generic IRQ chip callback to unmask PCI/MSI interrupts
146 __pci_msi_unmask_desc(desc, BIT(data->irq - desc->irq)); in pci_msi_unmask_irq()
154 BUG_ON(dev->current_state != PCI_D0); in __pci_read_msi_msg()
156 if (entry->pci.msi_attrib.is_msix) { in __pci_read_msi_msg()
159 if (WARN_ON_ONCE(entry->pci.msi_attrib.is_virtual)) in __pci_read_msi_msg()
162 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR); in __pci_read_msi_msg()
163 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR); in __pci_read_msi_msg()
164 msg->data = readl(base + PCI_MSIX_ENTRY_DATA); in __pci_read_msi_msg()
166 int pos = dev->msi_cap; in __pci_read_msi_msg()
170 &msg->address_lo); in __pci_read_msi_msg()
171 if (entry->pci.msi_attrib.is_64) { in __pci_read_msi_msg()
173 &msg->address_hi); in __pci_read_msi_msg()
176 msg->address_hi = 0; in __pci_read_msi_msg()
179 msg->data = data; in __pci_read_msi_msg()
186 int pos = dev->msi_cap; in pci_write_msg_msi()
191 msgctl |= desc->pci.msi_attrib.multiple << 4; in pci_write_msg_msi()
194 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, msg->address_lo); in pci_write_msg_msi()
195 if (desc->pci.msi_attrib.is_64) { in pci_write_msg_msi()
196 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, msg->address_hi); in pci_write_msg_msi()
197 pci_write_config_word(dev, pos + PCI_MSI_DATA_64, msg->data); in pci_write_msg_msi()
199 pci_write_config_word(dev, pos + PCI_MSI_DATA_32, msg->data); in pci_write_msg_msi()
208 u32 ctrl = desc->pci.msix_ctrl; in pci_write_msg_msix()
211 if (desc->pci.msi_attrib.is_virtual) in pci_write_msg_msix()
224 writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR); in pci_write_msg_msix()
225 writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); in pci_write_msg_msix()
226 writel(msg->data, base + PCI_MSIX_ENTRY_DATA); in pci_write_msg_msix()
239 if (dev->current_state != PCI_D0 || pci_dev_is_disconnected(dev)) { in __pci_write_msi_msg()
241 } else if (entry->pci.msi_attrib.is_msix) { in __pci_write_msi_msg()
247 entry->msg = *msg; in __pci_write_msi_msg()
249 if (entry->write_msi_msg) in __pci_write_msi_msg()
250 entry->write_msi_msg(entry, entry->write_msi_msg_data); in __pci_write_msi_msg()
262 /* PCI/MSI specific functionality */
266 if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG)) in pci_intx_for_msi()
274 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); in pci_msi_set_enable()
278 pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); in pci_msi_set_enable()
287 /* MSI Entry Initialization */ in msi_setup_msi_desc()
290 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); in msi_setup_msi_desc()
292 if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING) in msi_setup_msi_desc()
301 desc.pci.msi_attrib.default_irq = dev->irq; in msi_setup_msi_desc()
307 desc.pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_64; in msi_setup_msi_desc()
309 desc.pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_32; in msi_setup_msi_desc()
315 return msi_insert_msi_desc(&dev->dev, &desc); in msi_setup_msi_desc()
322 if (!dev->no_64bit_msi) in msi_verify_entries()
325 msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) { in msi_verify_entries()
326 if (entry->msg.address_hi) { in msi_verify_entries()
327 pci_err(dev, "arch assigned 64-bit MSI address %#x%08x but device only supports 32 bits\n", in msi_verify_entries()
328 entry->msg.address_hi, entry->msg.address_lo); in msi_verify_entries()
332 return !entry ? 0 : -EIO; in msi_verify_entries()
336 * msi_capability_init - configure device's MSI capability structure
337 * @dev: pointer to the pci_dev data structure of MSI device function
341 * Setup the MSI capability structure of the device with the requested
343 * setup of an entry with the new MSI IRQ. A negative return value indicates
354 /* Reject multi-MSI early on irq domain enabled architectures */ in msi_capability_init()
359 * Disable MSI during setup in the hardware, but mark it enabled in msi_capability_init()
363 dev->msi_enabled = 1; in msi_capability_init()
368 msi_lock_descs(&dev->dev); in msi_capability_init()
374 entry = msi_first_desc(&dev->dev, MSI_DESC_ALL); in msi_capability_init()
377 * Copy the MSI descriptor for the error path because in msi_capability_init()
383 /* Configure MSI capability structure */ in msi_capability_init()
392 /* Set MSI enabled bits */ in msi_capability_init()
397 dev->irq = entry->irq; in msi_capability_init()
404 dev->msi_enabled = 0; in msi_capability_init()
406 msi_unlock_descs(&dev->dev); in msi_capability_init()
417 if (!pci_msi_supported(dev, minvec) || dev->current_state != PCI_D0) in __pci_enable_msi_range()
418 return -EINVAL; in __pci_enable_msi_range()
420 /* Check whether driver already requested MSI-X IRQs */ in __pci_enable_msi_range()
421 if (dev->msix_enabled) { in __pci_enable_msi_range()
422 pci_info(dev, "can't enable MSI (MSI-X already enabled)\n"); in __pci_enable_msi_range()
423 return -EINVAL; in __pci_enable_msi_range()
427 return -ERANGE; in __pci_enable_msi_range()
429 if (WARN_ON_ONCE(dev->msi_enabled)) in __pci_enable_msi_range()
430 return -EINVAL; in __pci_enable_msi_range()
432 /* Test for the availability of MSI support */ in __pci_enable_msi_range()
434 return -ENOTSUPP; in __pci_enable_msi_range()
440 return -ENOSPC; in __pci_enable_msi_range()
450 return -ENODEV; in __pci_enable_msi_range()
456 return -ENOSPC; in __pci_enable_msi_range()
466 return -ENOSPC; in __pci_enable_msi_range()
473 * pci_msi_vec_count - Return the number of MSI vectors a device can send
476 * This function returns the number of MSI vectors a device requested via
478 * device is not capable sending MSI interrupts. Otherwise, the call succeeds
480 * MSI specification.
487 if (!dev->msi_cap) in pci_msi_vec_count()
488 return -EINVAL; in pci_msi_vec_count()
490 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl); in pci_msi_vec_count()
498 * Architecture override returns true when the PCI MSI message should be
511 if (!dev->msi_enabled) in __pci_restore_msi_state()
514 entry = irq_get_msi_desc(dev->irq); in __pci_restore_msi_state()
519 __pci_write_msi_msg(entry, &entry->msg); in __pci_restore_msi_state()
521 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); in __pci_restore_msi_state()
524 control |= (entry->pci.msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE; in __pci_restore_msi_state()
525 pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); in __pci_restore_msi_state()
532 if (!pci_msi_enable || !dev || !dev->msi_enabled) in pci_msi_shutdown()
537 dev->msi_enabled = 0; in pci_msi_shutdown()
539 /* Return the device with MSI unmasked as initial states */ in pci_msi_shutdown()
540 desc = msi_first_desc(&dev->dev, MSI_DESC_ALL); in pci_msi_shutdown()
544 /* Restore dev->irq to its default pin-assertion IRQ */ in pci_msi_shutdown()
545 dev->irq = desc->pci.msi_attrib.default_irq; in pci_msi_shutdown()
549 /* PCI/MSI-X specific functionality */
555 pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl); in pci_msix_clear_and_set_ctrl()
558 pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl); in pci_msix_clear_and_set_ctrl()
569 pci_read_config_dword(dev, dev->msix_cap + PCI_MSIX_TABLE, in msix_map_region()
583 * msix_prepare_msi_desc - Prepare a half initialized MSI descriptor for operation
585 * @desc: The MSI descriptor for preparation
588 * allocations for MSI-X after initial enablement.
590 * Ideally the whole MSI-X setup would work that way, but there is no way to
602 desc->nvec_used = 1; in msix_prepare_msi_desc()
603 desc->pci.msi_attrib.is_msix = 1; in msix_prepare_msi_desc()
604 desc->pci.msi_attrib.is_64 = 1; in msix_prepare_msi_desc()
605 desc->pci.msi_attrib.default_irq = dev->irq; in msix_prepare_msi_desc()
606 desc->pci.mask_base = dev->msix_base; in msix_prepare_msi_desc()
607 desc->pci.msi_attrib.can_mask = !pci_msi_ignore_mask && in msix_prepare_msi_desc()
608 !desc->pci.msi_attrib.is_virtual; in msix_prepare_msi_desc()
610 if (desc->pci.msi_attrib.can_mask) { in msix_prepare_msi_desc()
613 desc->pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL); in msix_prepare_msi_desc()
633 ret = msi_insert_msi_desc(&dev->dev, &desc); in msix_setup_msi_descs()
645 msi_for_each_desc(desc, &dev->dev, MSI_DESC_ALL) { in msix_update_entries()
646 entries->vector = desc->irq; in msix_update_entries()
673 msi_lock_descs(&dev->dev); in msix_setup_interrupts()
682 /* Check if all MSI entries honor device restrictions */ in msix_setup_interrupts()
693 msi_unlock_descs(&dev->dev); in msix_setup_interrupts()
699 * msix_capability_init - configure device's MSI-X capability
700 * @dev: pointer to the pci_dev data structure of MSI-X device function
705 * Setup the MSI-X capability structure of device function with a
706 * single MSI-X IRQ. A return of zero indicates the successful setup of
707 * requested MSI-X entries with allocated IRQs or non-zero for otherwise.
716 * Some devices require MSI-X to be enabled before the MSI-X in msix_capability_init()
724 dev->msix_enabled = 1; in msix_capability_init()
726 pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); in msix_capability_init()
727 /* Request & Map MSI-X table region */ in msix_capability_init()
729 dev->msix_base = msix_map_region(dev, tsize); in msix_capability_init()
730 if (!dev->msix_base) { in msix_capability_init()
731 ret = -ENOMEM; in msix_capability_init()
747 * which takes the MSI-X mask bits into account even in msix_capability_init()
748 * when MSI-X is disabled, which prevents MSI delivery. in msix_capability_init()
750 msix_mask_all(dev->msix_base, tsize); in msix_capability_init()
757 dev->msix_enabled = 0; in msix_capability_init()
792 return -ERANGE; in __pci_enable_msix_range()
794 if (dev->msi_enabled) { in __pci_enable_msix_range()
795 pci_info(dev, "can't enable MSI-X (MSI already enabled)\n"); in __pci_enable_msix_range()
796 return -EINVAL; in __pci_enable_msix_range()
799 if (WARN_ON_ONCE(dev->msix_enabled)) in __pci_enable_msix_range()
800 return -EINVAL; in __pci_enable_msix_range()
802 /* Check MSI-X early on irq domain enabled architectures */ in __pci_enable_msix_range()
804 return -ENOTSUPP; in __pci_enable_msix_range()
806 if (!pci_msi_supported(dev, nvec) || dev->current_state != PCI_D0) in __pci_enable_msix_range()
807 return -EINVAL; in __pci_enable_msix_range()
814 return -EINVAL; in __pci_enable_msix_range()
825 return -ENOSPC; in __pci_enable_msix_range()
832 return -ENODEV; in __pci_enable_msix_range()
838 return -ENOSPC; in __pci_enable_msix_range()
848 return -ENOSPC; in __pci_enable_msix_range()
859 if (!dev->msix_enabled) in __pci_restore_msix_state()
869 msi_lock_descs(&dev->dev); in __pci_restore_msix_state()
870 msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) { in __pci_restore_msix_state()
872 __pci_write_msi_msg(entry, &entry->msg); in __pci_restore_msix_state()
873 pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl); in __pci_restore_msix_state()
875 msi_unlock_descs(&dev->dev); in __pci_restore_msix_state()
884 if (!pci_msi_enable || !dev || !dev->msix_enabled) in pci_msix_shutdown()
888 dev->msix_enabled = 0; in pci_msix_shutdown()
892 /* Return the device with MSI-X masked as initial states */ in pci_msix_shutdown()
893 msi_for_each_desc(desc, &dev->dev, MSI_DESC_ALL) in pci_msix_shutdown()
898 dev->msix_enabled = 0; in pci_msix_shutdown()
908 if (dev->msix_base) { in pci_free_msi_irqs()
909 iounmap(dev->msix_base); in pci_free_msi_irqs()
910 dev->msix_base = NULL; in pci_free_msi_irqs()
918 return to_pci_dev(desc->dev); in msi_desc_to_pci_dev()