• Home
  • Raw
  • Download

Lines Matching +full:pci +full:- +full:domain

1 // SPDX-License-Identifier: GPL-2.0
3 * PCI Message Signaled Interrupt (MSI)
5 * Copyright (C) 2003-2004 Intel
16 #include <linux/pci.h>
27 #include "pci.h"
37 struct irq_domain *domain; in pci_msi_setup_msi_irqs() local
39 domain = dev_get_msi_domain(&dev->dev); in pci_msi_setup_msi_irqs()
40 if (domain && irq_domain_is_hierarchy(domain)) in pci_msi_setup_msi_irqs()
41 return msi_domain_alloc_irqs(domain, &dev->dev, nvec); in pci_msi_setup_msi_irqs()
48 struct irq_domain *domain; in pci_msi_teardown_msi_irqs() local
50 domain = dev_get_msi_domain(&dev->dev); in pci_msi_teardown_msi_irqs()
51 if (domain && irq_domain_is_hierarchy(domain)) in pci_msi_teardown_msi_irqs()
52 msi_domain_free_irqs(domain, &dev->dev); in pci_msi_teardown_msi_irqs()
65 struct msi_controller *chip = dev->bus->msi; in arch_setup_msi_irq()
68 if (!chip || !chip->setup_irq) in arch_setup_msi_irq()
69 return -EINVAL; in arch_setup_msi_irq()
71 err = chip->setup_irq(chip, dev, desc); in arch_setup_msi_irq()
75 irq_set_chip_data(desc->irq, chip); in arch_setup_msi_irq()
84 if (!chip || !chip->teardown_irq) in arch_teardown_msi_irq()
87 chip->teardown_irq(chip, irq); in arch_teardown_msi_irq()
92 struct msi_controller *chip = dev->bus->msi; in arch_setup_msi_irqs()
96 if (chip && chip->setup_irqs) in arch_setup_msi_irqs()
97 return chip->setup_irqs(chip, dev, nvec, type); in arch_setup_msi_irqs()
110 return -ENOSPC; in arch_setup_msi_irqs()
117 * We have a default implementation available as a separate non-weak
118 * function, as it is used by the Xen x86 PCI code
126 if (entry->irq) in default_teardown_msi_irqs()
127 for (i = 0; i < entry->nvec_used; i++) in default_teardown_msi_irqs()
128 arch_teardown_msi_irq(entry->irq + i); in default_teardown_msi_irqs()
142 if (dev->msix_enabled) { in default_restore_msi_irq()
144 if (irq == entry->irq) in default_restore_msi_irq()
147 } else if (dev->msi_enabled) { in default_restore_msi_irq()
152 __pci_write_msi_msg(entry, &entry->msg); in default_restore_msi_irq()
165 return (1 << (1 << x)) - 1; in msi_mask()
169 * PCI 2.3 does not specify mask bits for each MSI interrupt. Attempting to
176 raw_spinlock_t *lock = &desc->dev->msi_lock; in __pci_msi_desc_mask_irq()
179 if (pci_msi_ignore_mask || !desc->msi_attrib.maskbit) in __pci_msi_desc_mask_irq()
183 desc->masked &= ~mask; in __pci_msi_desc_mask_irq()
184 desc->masked |= flag; in __pci_msi_desc_mask_irq()
185 pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos, in __pci_msi_desc_mask_irq()
186 desc->masked); in __pci_msi_desc_mask_irq()
197 if (desc->msi_attrib.is_virtual) in pci_msix_desc_addr()
200 return desc->mask_base + in pci_msix_desc_addr()
201 desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; in pci_msix_desc_addr()
205 * This internal function does not flush PCI writes to the device.
209 * of MSI-X interrupts.
213 u32 mask_bits = desc->masked; in __pci_msix_desc_mask_irq()
234 desc->masked = __pci_msix_desc_mask_irq(desc, flag); in msix_mask_irq()
241 if (desc->msi_attrib.is_msix) { in msi_set_mask_bit()
243 readl(desc->mask_base); /* Flush write to device */ in msi_set_mask_bit()
245 unsigned offset = data->irq - desc->irq; in msi_set_mask_bit()
251 * pci_msi_mask_irq - Generic IRQ chip callback to mask PCI/MSI interrupts
261 * pci_msi_unmask_irq - Generic IRQ chip callback to unmask PCI/MSI interrupts
275 default_restore_msi_irq(dev, entry->irq); in default_restore_msi_irqs()
282 BUG_ON(dev->current_state != PCI_D0); in __pci_read_msi_msg()
284 if (entry->msi_attrib.is_msix) { in __pci_read_msi_msg()
292 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR); in __pci_read_msi_msg()
293 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR); in __pci_read_msi_msg()
294 msg->data = readl(base + PCI_MSIX_ENTRY_DATA); in __pci_read_msi_msg()
296 int pos = dev->msi_cap; in __pci_read_msi_msg()
300 &msg->address_lo); in __pci_read_msi_msg()
301 if (entry->msi_attrib.is_64) { in __pci_read_msi_msg()
303 &msg->address_hi); in __pci_read_msi_msg()
306 msg->address_hi = 0; in __pci_read_msi_msg()
309 msg->data = data; in __pci_read_msi_msg()
317 if (dev->current_state != PCI_D0 || pci_dev_is_disconnected(dev)) { in __pci_write_msi_msg()
319 } else if (entry->msi_attrib.is_msix) { in __pci_write_msi_msg()
321 bool unmasked = !(entry->masked & PCI_MSIX_ENTRY_CTRL_MASKBIT); in __pci_write_msi_msg()
337 writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR); in __pci_write_msi_msg()
338 writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); in __pci_write_msi_msg()
339 writel(msg->data, base + PCI_MSIX_ENTRY_DATA); in __pci_write_msi_msg()
347 int pos = dev->msi_cap; in __pci_write_msi_msg()
352 msgctl |= entry->msi_attrib.multiple << 4; in __pci_write_msi_msg()
356 msg->address_lo); in __pci_write_msi_msg()
357 if (entry->msi_attrib.is_64) { in __pci_write_msi_msg()
359 msg->address_hi); in __pci_write_msi_msg()
361 msg->data); in __pci_write_msi_msg()
364 msg->data); in __pci_write_msi_msg()
371 entry->msg = *msg; in __pci_write_msi_msg()
373 if (entry->write_msi_msg) in __pci_write_msi_msg()
374 entry->write_msi_msg(entry, entry->write_msi_msg_data); in __pci_write_msi_msg()
388 struct list_head *msi_list = dev_to_msi_list(&dev->dev); in free_msi_irqs()
395 if (entry->irq) in free_msi_irqs()
396 for (i = 0; i < entry->nvec_used; i++) in free_msi_irqs()
397 BUG_ON(irq_has_action(entry->irq + i)); in free_msi_irqs()
399 if (dev->msi_irq_groups) { in free_msi_irqs()
400 sysfs_remove_groups(&dev->dev.kobj, dev->msi_irq_groups); in free_msi_irqs()
401 msi_attrs = dev->msi_irq_groups[0]->attrs; in free_msi_irqs()
405 kfree(dev_attr->attr.name); in free_msi_irqs()
410 kfree(dev->msi_irq_groups[0]); in free_msi_irqs()
411 kfree(dev->msi_irq_groups); in free_msi_irqs()
412 dev->msi_irq_groups = NULL; in free_msi_irqs()
418 if (entry->msi_attrib.is_msix) { in free_msi_irqs()
419 if (list_is_last(&entry->list, msi_list)) in free_msi_irqs()
420 iounmap(entry->mask_base); in free_msi_irqs()
423 list_del(&entry->list); in free_msi_irqs()
430 if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG)) in pci_intx_for_msi()
439 if (!dev->msi_enabled) in __pci_restore_msi_state()
442 entry = irq_get_msi_desc(dev->irq); in __pci_restore_msi_state()
448 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); in __pci_restore_msi_state()
449 msi_mask_irq(entry, msi_mask(entry->msi_attrib.multi_cap), in __pci_restore_msi_state()
450 entry->masked); in __pci_restore_msi_state()
452 control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE; in __pci_restore_msi_state()
453 pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); in __pci_restore_msi_state()
460 if (!dev->msix_enabled) in __pci_restore_msix_state()
462 BUG_ON(list_empty(dev_to_msi_list(&dev->dev))); in __pci_restore_msix_state()
471 msix_mask_irq(entry, entry->masked); in __pci_restore_msix_state()
490 retval = kstrtoul(attr->attr.name, 10, &irq); in msi_mode_show()
497 entry->msi_attrib.is_msix ? "msix" : "msi"); in msi_mode_show()
499 return -ENODEV; in msi_mode_show()
510 int ret = -ENOMEM; in populate_msi_sysfs()
517 num_msi += entry->nvec_used; in populate_msi_sysfs()
521 /* Dynamically create the MSI attributes for the PCI device */ in populate_msi_sysfs()
524 return -ENOMEM; in populate_msi_sysfs()
526 for (i = 0; i < entry->nvec_used; i++) { in populate_msi_sysfs()
530 msi_attrs[count] = &msi_dev_attr->attr; in populate_msi_sysfs()
532 sysfs_attr_init(&msi_dev_attr->attr); in populate_msi_sysfs()
533 msi_dev_attr->attr.name = kasprintf(GFP_KERNEL, "%d", in populate_msi_sysfs()
534 entry->irq + i); in populate_msi_sysfs()
535 if (!msi_dev_attr->attr.name) in populate_msi_sysfs()
537 msi_dev_attr->attr.mode = S_IRUGO; in populate_msi_sysfs()
538 msi_dev_attr->show = msi_mode_show; in populate_msi_sysfs()
546 msi_irq_group->name = "msi_irqs"; in populate_msi_sysfs()
547 msi_irq_group->attrs = msi_attrs; in populate_msi_sysfs()
554 ret = sysfs_create_groups(&pdev->dev.kobj, msi_irq_groups); in populate_msi_sysfs()
557 pdev->msi_irq_groups = msi_irq_groups; in populate_msi_sysfs()
570 kfree(msi_attr->name); in populate_msi_sysfs()
590 entry = alloc_msi_entry(&dev->dev, nvec, masks); in msi_setup_entry()
594 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); in msi_setup_entry()
596 if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING) in msi_setup_entry()
599 entry->msi_attrib.is_msix = 0; in msi_setup_entry()
600 entry->msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT); in msi_setup_entry()
601 entry->msi_attrib.is_virtual = 0; in msi_setup_entry()
602 entry->msi_attrib.entry_nr = 0; in msi_setup_entry()
603 entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT); in msi_setup_entry()
604 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ in msi_setup_entry()
605 entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1; in msi_setup_entry()
606 entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec)); in msi_setup_entry()
609 entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64; in msi_setup_entry()
611 entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_32; in msi_setup_entry()
614 if (entry->msi_attrib.maskbit) in msi_setup_entry()
615 pci_read_config_dword(dev, entry->mask_pos, &entry->masked); in msi_setup_entry()
627 if (!dev->no_64bit_msi || !entry->msg.address_hi) in msi_verify_entries()
629 pci_err(dev, "Device has broken 64-bit MSI but arch" in msi_verify_entries()
631 return -EIO; in msi_verify_entries()
637 * msi_capability_init - configure device's MSI capability structure
659 return -ENOMEM; in msi_capability_init()
662 mask = msi_mask(entry->msi_attrib.multi_cap); in msi_capability_init()
665 list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); in msi_capability_init()
692 dev->msi_enabled = 1; in msi_capability_init()
695 dev->irq = entry->irq; in msi_capability_init()
706 pci_read_config_dword(dev, dev->msix_cap + PCI_MSIX_TABLE, in msix_map_region()
733 entry = alloc_msi_entry(&dev->dev, 1, curmsk); in msix_setup_entries()
740 ret = -ENOMEM; in msix_setup_entries()
744 entry->msi_attrib.is_msix = 1; in msix_setup_entries()
745 entry->msi_attrib.is_64 = 1; in msix_setup_entries()
748 entry->msi_attrib.entry_nr = entries[i].entry; in msix_setup_entries()
750 entry->msi_attrib.entry_nr = i; in msix_setup_entries()
752 entry->msi_attrib.is_virtual = in msix_setup_entries()
753 entry->msi_attrib.entry_nr >= vec_count; in msix_setup_entries()
755 entry->msi_attrib.default_irq = dev->irq; in msix_setup_entries()
756 entry->mask_base = base; in msix_setup_entries()
760 entry->masked = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL); in msix_setup_entries()
762 list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); in msix_setup_entries()
778 entries->vector = entry->irq; in msix_update_entries()
797 * msix_capability_init - configure device's MSI-X capability
798 * @dev: pointer to the pci_dev data structure of MSI-X device function
803 * Setup the MSI-X capability structure of device function with a
804 * single MSI-X IRQ. A return of zero indicates the successful setup of
805 * requested MSI-X entries with allocated IRQs or non-zero for otherwise.
815 * Some devices require MSI-X to be enabled before the MSI-X in msix_capability_init()
822 pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); in msix_capability_init()
823 /* Request & Map MSI-X table region */ in msix_capability_init()
827 ret = -ENOMEM; in msix_capability_init()
850 /* Set MSI-X enabled bits and unmask the function */ in msix_capability_init()
852 dev->msix_enabled = 1; in msix_capability_init()
859 * which takes the MSI-X mask bits into account even in msix_capability_init()
860 * when MSI-X is disabled, which prevents MSI delivery. in msix_capability_init()
878 if (entry->irq != 0) in msix_capability_init()
895 * pci_msi_supported - check whether MSI may be enabled on a device
900 * to determine if MSI/-X are supported for the device. If MSI/-X is
911 if (!dev || dev->no_msi) in pci_msi_supported()
926 * We expect only arch-specific PCI host bus controller driver in pci_msi_supported()
927 * or quirks for specific PCI bridges to be setting NO_MSI. in pci_msi_supported()
929 for (bus = dev->bus; bus; bus = bus->parent) in pci_msi_supported()
930 if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI) in pci_msi_supported()
937 * pci_msi_vec_count - Return the number of MSI vectors a device can send
951 if (!dev->msi_cap) in pci_msi_vec_count()
952 return -EINVAL; in pci_msi_vec_count()
954 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl); in pci_msi_vec_count()
966 if (!pci_msi_enable || !dev || !dev->msi_enabled) in pci_msi_shutdown()
969 BUG_ON(list_empty(dev_to_msi_list(&dev->dev))); in pci_msi_shutdown()
974 dev->msi_enabled = 0; in pci_msi_shutdown()
977 mask = msi_mask(desc->msi_attrib.multi_cap); in pci_msi_shutdown()
980 /* Restore dev->irq to its default pin-assertion IRQ */ in pci_msi_shutdown()
981 dev->irq = desc->msi_attrib.default_irq; in pci_msi_shutdown()
987 if (!pci_msi_enable || !dev || !dev->msi_enabled) in pci_disable_msi()
996 * pci_msix_vec_count - return the number of device's MSI-X table entries
997 * @dev: pointer to the pci_dev data structure of MSI-X device function
998 * This function returns the number of device's MSI-X table entries and
999 * therefore the number of MSI-X vectors device is capable of sending.
1000 * It returns a negative errno if the device is not capable of sending MSI-X
1007 if (!dev->msix_cap) in pci_msix_vec_count()
1008 return -EINVAL; in pci_msix_vec_count()
1010 pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); in pci_msix_vec_count()
1021 if (!pci_msi_supported(dev, nvec) || dev->current_state != PCI_D0) in __pci_enable_msix()
1022 return -EINVAL; in __pci_enable_msix()
1034 return -EINVAL; /* invalid entry */ in __pci_enable_msix()
1037 return -EINVAL; /* duplicate entry */ in __pci_enable_msix()
1043 if (dev->msi_enabled) { in __pci_enable_msix()
1044 pci_info(dev, "can't enable MSI-X (MSI IRQ already assigned)\n"); in __pci_enable_msix()
1045 return -EINVAL; in __pci_enable_msix()
1054 if (!pci_msi_enable || !dev || !dev->msix_enabled) in pci_msix_shutdown()
1058 dev->msix_enabled = 0; in pci_msix_shutdown()
1062 /* Return the device with MSI-X masked as initial states */ in pci_msix_shutdown()
1068 dev->msix_enabled = 0; in pci_msix_shutdown()
1074 if (!pci_msi_enable || !dev || !dev->msix_enabled) in pci_disable_msix()
1088 * pci_msi_enabled - is MSI enabled?
1090 * Returns true if MSI has not been disabled by the command-line option
1091 * pci=nomsi.
1105 if (!pci_msi_supported(dev, minvec) || dev->current_state != PCI_D0) in __pci_enable_msi_range()
1106 return -EINVAL; in __pci_enable_msi_range()
1108 /* Check whether driver already requested MSI-X IRQs */ in __pci_enable_msi_range()
1109 if (dev->msix_enabled) { in __pci_enable_msi_range()
1110 pci_info(dev, "can't enable MSI (MSI-X already enabled)\n"); in __pci_enable_msi_range()
1111 return -EINVAL; in __pci_enable_msi_range()
1115 return -ERANGE; in __pci_enable_msi_range()
1117 if (WARN_ON_ONCE(dev->msi_enabled)) in __pci_enable_msi_range()
1118 return -EINVAL; in __pci_enable_msi_range()
1124 return -ENOSPC; in __pci_enable_msi_range()
1133 return -ENOSPC; in __pci_enable_msi_range()
1143 return -ENOSPC; in __pci_enable_msi_range()
1167 return -ERANGE; in __pci_enable_msix_range()
1169 if (WARN_ON_ONCE(dev->msix_enabled)) in __pci_enable_msix_range()
1170 return -EINVAL; in __pci_enable_msix_range()
1176 return -ENOSPC; in __pci_enable_msix_range()
1186 return -ENOSPC; in __pci_enable_msix_range()
1193 * pci_enable_msix_range - configure device's MSI-X capability structure
1194 * @dev: pointer to the pci_dev data structure of MSI-X device function
1195 * @entries: pointer to an array of MSI-X entries
1196 * @minvec: minimum number of MSI-X IRQs requested
1197 * @maxvec: maximum number of MSI-X IRQs requested
1199 * Setup the MSI-X capability structure of device function with a maximum
1201 * upon its software driver call to request for MSI-X mode enabled on its
1204 * indicates the successful configuration of MSI-X capability structure
1205 * with new allocated MSI-X interrupts.
1215 * pci_alloc_irq_vectors_affinity - allocate multiple IRQs for a device
1216 * @dev: PCI device to operate on
1222 * Allocate up to @max_vecs interrupt vectors for @dev, using MSI-X or MSI
1227 * available for @dev the function will fail with -ENOSPC.
1237 int nvecs = -ENOSPC; in pci_alloc_irq_vectors_affinity()
1262 if (min_vecs == 1 && dev->irq) { in pci_alloc_irq_vectors_affinity()
1280 * pci_free_irq_vectors - free previously allocated IRQs for a device
1281 * @dev: PCI device to operate on
1293 * pci_irq_vector - return Linux IRQ number of a device vector
1294 * @dev: PCI device to operate on
1295 * @nr: Interrupt vector index (0-based)
1298 * MSI-X: The index in the MSI-X vector table
1302 * Return: The Linux interrupt number or -EINVAl if @nr is out of range.
1306 if (dev->msix_enabled) { in pci_irq_vector()
1310 if (entry->msi_attrib.entry_nr == nr) in pci_irq_vector()
1311 return entry->irq; in pci_irq_vector()
1314 return -EINVAL; in pci_irq_vector()
1317 if (dev->msi_enabled) { in pci_irq_vector()
1320 if (WARN_ON_ONCE(nr >= entry->nvec_used)) in pci_irq_vector()
1321 return -EINVAL; in pci_irq_vector()
1324 return -EINVAL; in pci_irq_vector()
1327 return dev->irq + nr; in pci_irq_vector()
1332 * pci_irq_get_affinity - return the affinity of a particular MSI vector
1333 * @dev: PCI device to operate on
1334 * @nr: device-relative interrupt vector index (0-based).
1337 * MSI-X: The index in the MSI-X vector table
1345 if (dev->msix_enabled) { in pci_irq_get_affinity()
1349 if (entry->msi_attrib.entry_nr == nr) in pci_irq_get_affinity()
1350 return &entry->affinity->mask; in pci_irq_get_affinity()
1354 } else if (dev->msi_enabled) { in pci_irq_get_affinity()
1357 if (WARN_ON_ONCE(!entry || !entry->affinity || in pci_irq_get_affinity()
1358 nr >= entry->nvec_used)) in pci_irq_get_affinity()
1361 return &entry->affinity[nr].mask; in pci_irq_get_affinity()
1370 return to_pci_dev(desc->dev); in msi_desc_to_pci_dev()
1378 return dev->bus->sysdata; in msi_desc_to_pci_sysdata()
1384 * pci_msi_domain_write_msg - Helper to write MSI message to PCI config space
1393 * For MSI-X desc->irq is always equal to irq_data->irq. For in pci_msi_domain_write_msg()
1396 if (desc->irq == irq_data->irq) in pci_msi_domain_write_msg()
1401 * pci_msi_domain_calc_hwirq - Generate a unique ID for an MSI source
1410 return (irq_hw_number_t)desc->msi_attrib.entry_nr | in pci_msi_domain_calc_hwirq()
1412 (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27; in pci_msi_domain_calc_hwirq()
1417 return !desc->msi_attrib.is_msix && desc->nvec_used > 1; in pci_msi_desc_is_multi_msi()
1421 * pci_msi_domain_check_cap - Verify that @domain supports the capabilities
1423 * @domain: The interrupt domain to check
1424 * @info: The domain info for verification
1429 * 1 if Multi MSI is requested, but the domain does not support it
1430 * -ENOTSUPP otherwise
1432 int pci_msi_domain_check_cap(struct irq_domain *domain, in pci_msi_domain_check_cap() argument
1439 !(info->flags & MSI_FLAG_MULTI_PCI_MSI)) in pci_msi_domain_check_cap()
1441 else if (desc->msi_attrib.is_msix && !(info->flags & MSI_FLAG_PCI_MSIX)) in pci_msi_domain_check_cap()
1442 return -ENOTSUPP; in pci_msi_domain_check_cap()
1447 static int pci_msi_domain_handle_error(struct irq_domain *domain, in pci_msi_domain_handle_error() argument
1451 if (pci_msi_desc_is_multi_msi(desc) && error == -ENOSPC) in pci_msi_domain_handle_error()
1460 arg->desc = desc; in pci_msi_domain_set_desc()
1461 arg->hwirq = pci_msi_domain_calc_hwirq(desc); in pci_msi_domain_set_desc()
1472 struct msi_domain_ops *ops = info->ops; in pci_msi_domain_update_dom_ops()
1475 info->ops = &pci_msi_domain_ops_default; in pci_msi_domain_update_dom_ops()
1477 if (ops->set_desc == NULL) in pci_msi_domain_update_dom_ops()
1478 ops->set_desc = pci_msi_domain_set_desc; in pci_msi_domain_update_dom_ops()
1479 if (ops->msi_check == NULL) in pci_msi_domain_update_dom_ops()
1480 ops->msi_check = pci_msi_domain_check_cap; in pci_msi_domain_update_dom_ops()
1481 if (ops->handle_error == NULL) in pci_msi_domain_update_dom_ops()
1482 ops->handle_error = pci_msi_domain_handle_error; in pci_msi_domain_update_dom_ops()
1488 struct irq_chip *chip = info->chip; in pci_msi_domain_update_chip_ops()
1491 if (!chip->irq_write_msi_msg) in pci_msi_domain_update_chip_ops()
1492 chip->irq_write_msi_msg = pci_msi_domain_write_msg; in pci_msi_domain_update_chip_ops()
1493 if (!chip->irq_mask) in pci_msi_domain_update_chip_ops()
1494 chip->irq_mask = pci_msi_mask_irq; in pci_msi_domain_update_chip_ops()
1495 if (!chip->irq_unmask) in pci_msi_domain_update_chip_ops()
1496 chip->irq_unmask = pci_msi_unmask_irq; in pci_msi_domain_update_chip_ops()
1500 * pci_msi_create_irq_domain - Create a MSI interrupt domain
1502 * @info: MSI domain info
1503 * @parent: Parent irq domain
1505 * Updates the domain and chip ops and creates a MSI interrupt domain.
1508 * A domain pointer or NULL in case of failure.
1514 struct irq_domain *domain; in pci_msi_create_irq_domain() local
1516 if (WARN_ON(info->flags & MSI_FLAG_LEVEL_CAPABLE)) in pci_msi_create_irq_domain()
1517 info->flags &= ~MSI_FLAG_LEVEL_CAPABLE; in pci_msi_create_irq_domain()
1519 if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS) in pci_msi_create_irq_domain()
1521 if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) in pci_msi_create_irq_domain()
1524 info->flags |= MSI_FLAG_ACTIVATE_EARLY; in pci_msi_create_irq_domain()
1526 info->flags |= MSI_FLAG_MUST_REACTIVATE; in pci_msi_create_irq_domain()
1528 /* PCI-MSI is oneshot-safe */ in pci_msi_create_irq_domain()
1529 info->chip->flags |= IRQCHIP_ONESHOT_SAFE; in pci_msi_create_irq_domain()
1531 domain = msi_create_irq_domain(fwnode, info, parent); in pci_msi_create_irq_domain()
1532 if (!domain) in pci_msi_create_irq_domain()
1535 irq_domain_update_bus_token(domain, DOMAIN_BUS_PCI_MSI); in pci_msi_create_irq_domain()
1536 return domain; in pci_msi_create_irq_domain()
1542 * so with DMA aliases we have to pick the least-worst compromise. Devices with
1548 * case is that of PCI->PCIe so we should always use the alias RID. This echoes
1550 * well enough in practice; in the face of the horrible PCIe<->PCI-X conditions
1558 if (pdev->bus->number != bus || PCI_BUS_NUM(alias) != bus) in get_msi_id_cb()
1565 * pci_msi_domain_get_msi_rid - Get the MSI requester id (RID)
1566 * @domain: The interrupt domain
1567 * @pdev: The PCI device.
1574 u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev) in pci_msi_domain_get_msi_rid() argument
1581 of_node = irq_domain_get_of_node(domain); in pci_msi_domain_get_msi_rid()
1582 rid = of_node ? of_msi_map_id(&pdev->dev, of_node, rid) : in pci_msi_domain_get_msi_rid()
1583 iort_msi_map_id(&pdev->dev, rid); in pci_msi_domain_get_msi_rid()
1589 * pci_msi_get_device_domain - Get the MSI domain for a given PCI device
1590 * @pdev: The PCI device
1592 * Use the firmware data to find a device-specific MSI domain
1595 * Returns: The corresponding MSI domain or NULL if none has been found.
1603 dom = of_msi_map_get_device_domain(&pdev->dev, rid, DOMAIN_BUS_PCI_MSI); in pci_msi_get_device_domain()
1605 dom = iort_get_device_domain(&pdev->dev, rid, in pci_msi_get_device_domain()
1611 * pci_dev_has_special_msi_domain - Check whether the device is handled by
1612 * a non-standard PCI-MSI domain
1613 * @pdev: The PCI device to check.
1616 * non-standard PCI/MSI.
1620 struct irq_domain *dom = dev_get_msi_domain(&pdev->dev); in pci_dev_has_special_msi_domain()
1623 dom = dev_get_msi_domain(&pdev->bus->dev); in pci_dev_has_special_msi_domain()
1628 return dom->bus_token != DOMAIN_BUS_PCI_MSI; in pci_dev_has_special_msi_domain()