Lines Matching refs:entry
93 struct msi_desc *entry; in arch_setup_msi_irqs() local
105 for_each_pci_msi_entry(entry, dev) { in arch_setup_msi_irqs()
106 ret = arch_setup_msi_irq(dev, entry); in arch_setup_msi_irqs()
123 struct msi_desc *entry; in default_teardown_msi_irqs() local
125 for_each_pci_msi_entry(entry, dev) in default_teardown_msi_irqs()
126 if (entry->irq) in default_teardown_msi_irqs()
127 for (i = 0; i < entry->nvec_used; i++) in default_teardown_msi_irqs()
128 arch_teardown_msi_irq(entry->irq + i); in default_teardown_msi_irqs()
138 struct msi_desc *entry; in default_restore_msi_irq() local
140 entry = NULL; in default_restore_msi_irq()
142 for_each_pci_msi_entry(entry, dev) { in default_restore_msi_irq()
143 if (irq == entry->irq) in default_restore_msi_irq()
147 entry = irq_get_msi_desc(irq); in default_restore_msi_irq()
150 if (entry) in default_restore_msi_irq()
151 __pci_write_msi_msg(entry, &entry->msg); in default_restore_msi_irq()
261 struct msi_desc *entry; in default_restore_msi_irqs() local
263 for_each_pci_msi_entry(entry, dev) in default_restore_msi_irqs()
264 default_restore_msi_irq(dev, entry->irq); in default_restore_msi_irqs()
267 void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) in __pci_read_msi_msg() argument
269 struct pci_dev *dev = msi_desc_to_pci_dev(entry); in __pci_read_msi_msg()
273 if (entry->msi_attrib.is_msix) { in __pci_read_msi_msg()
274 void __iomem *base = pci_msix_desc_addr(entry); in __pci_read_msi_msg()
285 if (entry->msi_attrib.is_64) { in __pci_read_msi_msg()
297 void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) in __pci_write_msi_msg() argument
299 struct pci_dev *dev = msi_desc_to_pci_dev(entry); in __pci_write_msi_msg()
303 } else if (entry->msi_attrib.is_msix) { in __pci_write_msi_msg()
304 void __iomem *base = pci_msix_desc_addr(entry); in __pci_write_msi_msg()
315 msgctl |= entry->msi_attrib.multiple << 4; in __pci_write_msi_msg()
320 if (entry->msi_attrib.is_64) { in __pci_write_msi_msg()
330 entry->msg = *msg; in __pci_write_msi_msg()
335 struct msi_desc *entry = irq_get_msi_desc(irq); in pci_write_msi_msg() local
337 __pci_write_msi_msg(entry, msg); in pci_write_msi_msg()
344 struct msi_desc *entry, *tmp; in free_msi_irqs() local
349 for_each_pci_msi_entry(entry, dev) in free_msi_irqs()
350 if (entry->irq) in free_msi_irqs()
351 for (i = 0; i < entry->nvec_used; i++) in free_msi_irqs()
352 BUG_ON(irq_has_action(entry->irq + i)); in free_msi_irqs()
356 list_for_each_entry_safe(entry, tmp, msi_list, list) { in free_msi_irqs()
357 if (entry->msi_attrib.is_msix) { in free_msi_irqs()
358 if (list_is_last(&entry->list, msi_list)) in free_msi_irqs()
359 iounmap(entry->mask_base); in free_msi_irqs()
362 list_del(&entry->list); in free_msi_irqs()
363 free_msi_entry(entry); in free_msi_irqs()
392 struct msi_desc *entry; in __pci_restore_msi_state() local
397 entry = irq_get_msi_desc(dev->irq); in __pci_restore_msi_state()
404 msi_mask_irq(entry, msi_mask(entry->msi_attrib.multi_cap), in __pci_restore_msi_state()
405 entry->masked); in __pci_restore_msi_state()
407 control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE; in __pci_restore_msi_state()
413 struct msi_desc *entry; in __pci_restore_msix_state() local
425 for_each_pci_msi_entry(entry, dev) in __pci_restore_msix_state()
426 msix_mask_irq(entry, entry->masked); in __pci_restore_msix_state()
441 struct msi_desc *entry; in msi_mode_show() local
449 entry = irq_get_msi_desc(irq); in msi_mode_show()
450 if (entry) in msi_mode_show()
452 entry->msi_attrib.is_msix ? "msix" : "msi"); in msi_mode_show()
464 struct msi_desc *entry; in populate_msi_sysfs() local
471 for_each_pci_msi_entry(entry, pdev) in populate_msi_sysfs()
472 num_msi += entry->nvec_used; in populate_msi_sysfs()
480 for_each_pci_msi_entry(entry, pdev) { in populate_msi_sysfs()
481 for (i = 0; i < entry->nvec_used; i++) { in populate_msi_sysfs()
489 entry->irq + i); in populate_msi_sysfs()
538 struct msi_desc *entry; in msi_setup_entry() local
546 entry = alloc_msi_entry(&dev->dev, nvec, masks); in msi_setup_entry()
547 if (!entry) in msi_setup_entry()
552 entry->msi_attrib.is_msix = 0; in msi_setup_entry()
553 entry->msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT); in msi_setup_entry()
554 entry->msi_attrib.entry_nr = 0; in msi_setup_entry()
555 entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT); in msi_setup_entry()
556 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ in msi_setup_entry()
557 entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1; in msi_setup_entry()
558 entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec)); in msi_setup_entry()
561 entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64; in msi_setup_entry()
563 entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_32; in msi_setup_entry()
566 if (entry->msi_attrib.maskbit) in msi_setup_entry()
567 pci_read_config_dword(dev, entry->mask_pos, &entry->masked); in msi_setup_entry()
571 return entry; in msi_setup_entry()
576 struct msi_desc *entry; in msi_verify_entries() local
578 for_each_pci_msi_entry(entry, dev) { in msi_verify_entries()
579 if (!dev->no_64bit_msi || !entry->msg.address_hi) in msi_verify_entries()
603 struct msi_desc *entry; in msi_capability_init() local
609 entry = msi_setup_entry(dev, nvec, affd); in msi_capability_init()
610 if (!entry) in msi_capability_init()
614 mask = msi_mask(entry->msi_attrib.multi_cap); in msi_capability_init()
615 msi_mask_irq(entry, mask, mask); in msi_capability_init()
617 list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); in msi_capability_init()
622 msi_mask_irq(entry, mask, ~mask); in msi_capability_init()
629 msi_mask_irq(entry, mask, ~mask); in msi_capability_init()
636 msi_mask_irq(entry, mask, ~mask); in msi_capability_init()
647 dev->irq = entry->irq; in msi_capability_init()
676 struct msi_desc *entry; in msix_setup_entries() local
683 entry = alloc_msi_entry(&dev->dev, 1, curmsk); in msix_setup_entries()
684 if (!entry) { in msix_setup_entries()
694 entry->msi_attrib.is_msix = 1; in msix_setup_entries()
695 entry->msi_attrib.is_64 = 1; in msix_setup_entries()
697 entry->msi_attrib.entry_nr = entries[i].entry; in msix_setup_entries()
699 entry->msi_attrib.entry_nr = i; in msix_setup_entries()
700 entry->msi_attrib.default_irq = dev->irq; in msix_setup_entries()
701 entry->mask_base = base; in msix_setup_entries()
703 list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); in msix_setup_entries()
716 struct msi_desc *entry; in msix_program_entries() local
719 for_each_pci_msi_entry(entry, dev) { in msix_program_entries()
721 entries[i++].vector = entry->irq; in msix_program_entries()
722 entry->masked = readl(pci_msix_desc_addr(entry) + in msix_program_entries()
724 msix_mask_irq(entry, 1); in msix_program_entries()
796 struct msi_desc *entry; in msix_capability_init() local
799 for_each_pci_msi_entry(entry, dev) { in msix_capability_init()
800 if (entry->irq != 0) in msix_capability_init()
953 if (entries[i].entry >= nr_entries) in __pci_enable_msix()
956 if (entries[i].entry == entries[j].entry) in __pci_enable_msix()
972 struct msi_desc *entry; in pci_msix_shutdown() local
983 for_each_pci_msi_entry(entry, dev) { in pci_msix_shutdown()
985 __pci_msix_desc_mask_irq(entry, 1); in pci_msix_shutdown()
1219 struct msi_desc *entry; in pci_irq_vector() local
1222 for_each_pci_msi_entry(entry, dev) { in pci_irq_vector()
1224 return entry->irq; in pci_irq_vector()
1232 struct msi_desc *entry = first_pci_msi_entry(dev); in pci_irq_vector() local
1234 if (WARN_ON_ONCE(nr >= entry->nvec_used)) in pci_irq_vector()
1253 struct msi_desc *entry; in pci_irq_get_affinity() local
1256 for_each_pci_msi_entry(entry, dev) { in pci_irq_get_affinity()
1258 return entry->affinity; in pci_irq_get_affinity()
1264 struct msi_desc *entry = first_pci_msi_entry(dev); in pci_irq_get_affinity() local
1266 if (WARN_ON_ONCE(!entry || !entry->affinity || in pci_irq_get_affinity()
1267 nr >= entry->nvec_used)) in pci_irq_get_affinity()
1270 return &entry->affinity[nr]; in pci_irq_get_affinity()