/drivers/scsi/snic/ |
D | snic_isr.c | 94 for (i = 0; i < ARRAY_SIZE(snic->msix); i++) { in snic_free_intr() 95 if (snic->msix[i].requested) { in snic_free_intr() 97 snic->msix[i].devid); in snic_free_intr() 118 sprintf(snic->msix[SNIC_MSIX_WQ].devname, in snic_request_intr() 121 snic->msix[SNIC_MSIX_WQ].isr = snic_isr_msix_wq; in snic_request_intr() 122 snic->msix[SNIC_MSIX_WQ].devid = snic; in snic_request_intr() 124 sprintf(snic->msix[SNIC_MSIX_IO_CMPL].devname, in snic_request_intr() 127 snic->msix[SNIC_MSIX_IO_CMPL].isr = snic_isr_msix_io_cmpl; in snic_request_intr() 128 snic->msix[SNIC_MSIX_IO_CMPL].devid = snic; in snic_request_intr() 130 sprintf(snic->msix[SNIC_MSIX_ERR_NOTIFY].devname, in snic_request_intr() [all …]
|
/drivers/infiniband/hw/hfi1/ |
D | affinity.c | 336 static void hfi1_update_sdma_affinity(struct hfi1_msix_entry *msix, int cpu) in hfi1_update_sdma_affinity() argument 338 struct sdma_engine *sde = msix->arg; in hfi1_update_sdma_affinity() 354 cpumask_clear(&msix->mask); in hfi1_update_sdma_affinity() 355 cpumask_set_cpu(cpu, &msix->mask); in hfi1_update_sdma_affinity() 357 msix->irq, irq_type_names[msix->type], in hfi1_update_sdma_affinity() 359 irq_set_affinity_hint(msix->irq, &msix->mask); in hfi1_update_sdma_affinity() 372 if (other_msix->type != IRQ_SDMA || other_msix == msix) in hfi1_update_sdma_affinity() 388 struct hfi1_msix_entry *msix = container_of(notify, in hfi1_irq_notifier_notify() local 393 hfi1_update_sdma_affinity(msix, cpu); in hfi1_irq_notifier_notify() 404 static void hfi1_setup_sdma_notifier(struct hfi1_msix_entry *msix) in hfi1_setup_sdma_notifier() argument [all …]
|
D | affinity.h | 84 struct hfi1_msix_entry *msix); 90 struct hfi1_msix_entry *msix);
|
/drivers/scsi/fnic/ |
D | fnic_isr.c | 161 for (i = 0; i < ARRAY_SIZE(fnic->msix); i++) in fnic_free_intr() 162 if (fnic->msix[i].requested) in fnic_free_intr() 164 fnic->msix[i].devid); in fnic_free_intr() 191 sprintf(fnic->msix[FNIC_MSIX_RQ].devname, in fnic_request_intr() 193 fnic->msix[FNIC_MSIX_RQ].isr = fnic_isr_msix_rq; in fnic_request_intr() 194 fnic->msix[FNIC_MSIX_RQ].devid = fnic; in fnic_request_intr() 196 sprintf(fnic->msix[FNIC_MSIX_WQ].devname, in fnic_request_intr() 198 fnic->msix[FNIC_MSIX_WQ].isr = fnic_isr_msix_wq; in fnic_request_intr() 199 fnic->msix[FNIC_MSIX_WQ].devid = fnic; in fnic_request_intr() 201 sprintf(fnic->msix[FNIC_MSIX_WQ_COPY].devname, in fnic_request_intr() [all …]
|
/drivers/scsi/bfa/ |
D | bfa_hw_ct.c | 118 bfa->msix.nvecs = nvecs; in bfa_hwct_msix_init() 125 if (bfa->msix.nvecs == 0) in bfa_hwct_msix_ctrl_install() 128 if (bfa->msix.nvecs == 1) in bfa_hwct_msix_ctrl_install() 129 bfa->msix.handler[BFI_MSIX_LPU_ERR_CT] = bfa_msix_all; in bfa_hwct_msix_ctrl_install() 131 bfa->msix.handler[BFI_MSIX_LPU_ERR_CT] = bfa_msix_lpu_err; in bfa_hwct_msix_ctrl_install() 139 if (bfa->msix.nvecs == 0) in bfa_hwct_msix_queue_install() 142 if (bfa->msix.nvecs == 1) { in bfa_hwct_msix_queue_install() 144 bfa->msix.handler[i] = bfa_msix_all; in bfa_hwct_msix_queue_install() 149 bfa->msix.handler[i] = bfa_msix_reqq; in bfa_hwct_msix_queue_install() 152 bfa->msix.handler[i] = bfa_msix_rspq; in bfa_hwct_msix_queue_install() [all …]
|
D | bfa_hw_cb.c | 120 bfa->msix.nvecs = nvecs; in bfa_hwcb_msix_init() 129 if (bfa->msix.nvecs == 0) in bfa_hwcb_msix_ctrl_install() 132 if (bfa->msix.nvecs == 1) { in bfa_hwcb_msix_ctrl_install() 134 bfa->msix.handler[i] = bfa_msix_all; in bfa_hwcb_msix_ctrl_install() 139 bfa->msix.handler[i] = bfa_msix_lpu_err; in bfa_hwcb_msix_ctrl_install() 147 if (bfa->msix.nvecs == 0) in bfa_hwcb_msix_queue_install() 150 if (bfa->msix.nvecs == 1) { in bfa_hwcb_msix_queue_install() 152 bfa->msix.handler[i] = bfa_msix_all; in bfa_hwcb_msix_queue_install() 157 bfa->msix.handler[i] = bfa_msix_reqq; in bfa_hwcb_msix_queue_install() 160 bfa->msix.handler[i] = bfa_msix_rspq; in bfa_hwcb_msix_queue_install() [all …]
|
D | bfa.h | 193 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix); 297 ((__bfa)->msix.handler[__vec](__bfa, __vec)) 327 void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix); 341 void bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
|
D | bfa_ioc_cb.c | 35 static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix); 216 bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix) in bfa_ioc_cb_isr_mode_set() argument
|
D | bfad.c | 1137 bfa_msix(&bfad->bfa, vec->msix.entry); in bfad_msix() 1164 bfad->msix_tab[bfad->nvec].msix.entry = i; in bfad_init_msix_entry() 1186 error = request_irq(bfad->msix_tab[i].msix.vector, in bfad_install_msix_handler() 1190 bfa_trc(bfad, bfad->msix_tab[i].msix.vector); in bfad_install_msix_handler() 1195 free_irq(bfad->msix_tab[j].msix.vector, in bfad_install_msix_handler() 1259 bfad->msix_tab[i].msix.vector = msix_entries[i].vector; in bfad_setup_intr() 1287 free_irq(bfad->msix_tab[i].msix.vector, in bfad_remove_intr()
|
/drivers/crypto/cavium/nitrox/ |
D | nitrox_isr.c | 281 ndev->msix.entries = entries; in nitrox_enable_msix() 282 ndev->msix.names = names; in nitrox_enable_msix() 283 ndev->msix.nr_entries = nr_entries; in nitrox_enable_msix() 285 ret = pci_enable_msix_exact(ndev->pdev, ndev->msix.entries, in nitrox_enable_msix() 286 ndev->msix.nr_entries); in nitrox_enable_msix() 349 struct msix_entry *msix_ent = ndev->msix.entries; in nitrox_request_irqs() 367 name = *(ndev->msix.names + i); in nitrox_request_irqs() 382 set_bit(i, ndev->msix.irqs); in nitrox_request_irqs() 387 name = *(ndev->msix.names + i); in nitrox_request_irqs() 395 set_bit(i, ndev->msix.irqs); in nitrox_request_irqs() [all …]
|
D | nitrox_dev.h | 143 struct nitrox_msix msix; member
|
/drivers/vfio/pci/ |
D | vfio_pci_intrs.c | 250 static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix) in vfio_msi_enable() argument 253 unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI; in vfio_msi_enable() 273 vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX : in vfio_msi_enable() 276 if (!msix) { in vfio_msi_enable() 288 int vector, int fd, bool msix) in vfio_msi_set_vector_signal() argument 311 msix ? "x" : "", vector, in vfio_msi_set_vector_signal() 329 if (msix) { in vfio_msi_set_vector_signal() 358 unsigned count, int32_t *fds, bool msix) in vfio_msi_set_block() argument 367 ret = vfio_msi_set_vector_signal(vdev, j, fd, msix); in vfio_msi_set_block() 372 vfio_msi_set_vector_signal(vdev, j, -1, msix); in vfio_msi_set_block() [all …]
|
/drivers/net/ethernet/emulex/benet/ |
D | be_roce.c | 69 dev_info.msix.num_vectors = min(num_vec, MAX_MSIX_VECTORS); in _be_roce_dev_add() 74 dev_info.msix.start_vector = adapter->num_evt_qs; in _be_roce_dev_add() 75 for (i = 0; i < dev_info.msix.num_vectors; i++) { in _be_roce_dev_add() 76 dev_info.msix.vector_list[i] = in _be_roce_dev_add() 80 dev_info.msix.num_vectors = 0; in _be_roce_dev_add()
|
D | be_roce.h | 51 } msix; member
|
/drivers/rapidio/devices/ |
D | tsi721.c | 629 mbox = (irq - priv->msix[TSI721_VECT_OMB0_DONE].vector) % RIO_MAX_MBOX; in tsi721_omsg_msix() 646 mbox = (irq - priv->msix[TSI721_VECT_IMB0_RCV].vector) % RIO_MAX_MBOX; in tsi721_imsg_msix() 710 err = request_irq(priv->msix[TSI721_VECT_IDB].vector, in tsi721_request_msix() 712 priv->msix[TSI721_VECT_IDB].irq_name, (void *)priv); in tsi721_request_msix() 716 err = request_irq(priv->msix[TSI721_VECT_PWRX].vector, in tsi721_request_msix() 718 priv->msix[TSI721_VECT_PWRX].irq_name, (void *)priv); in tsi721_request_msix() 720 free_irq(priv->msix[TSI721_VECT_IDB].vector, (void *)priv); in tsi721_request_msix() 784 priv->msix[TSI721_VECT_IDB].vector = entries[TSI721_VECT_IDB].vector; in tsi721_enable_msix() 785 snprintf(priv->msix[TSI721_VECT_IDB].irq_name, IRQ_DEVICE_NAME_MAX, in tsi721_enable_msix() 787 priv->msix[TSI721_VECT_PWRX].vector = entries[TSI721_VECT_PWRX].vector; in tsi721_enable_msix() [all …]
|
D | tsi721_dma.c | 164 rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0, in tsi721_bdma_ch_init() 165 priv->msix[idx].irq_name, (void *)bdma_chan); in tsi721_bdma_ch_init() 176 rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0, in tsi721_bdma_ch_init() 177 priv->msix[idx].irq_name, (void *)bdma_chan); in tsi721_bdma_ch_init() 184 priv->msix[TSI721_VECT_DMA0_DONE + in tsi721_bdma_ch_init() 238 free_irq(priv->msix[TSI721_VECT_DMA0_DONE + in tsi721_bdma_ch_free() 240 free_irq(priv->msix[TSI721_VECT_DMA0_INT + in tsi721_bdma_ch_free() 787 synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE + in tsi721_sync_dma_irq() 789 synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT + in tsi721_sync_dma_irq()
|
/drivers/net/ethernet/cisco/enic/ |
D | enic_main.c | 122 (cpumask_available(enic->msix[i].affinity_mask) && in enic_init_affinity_hint() 123 !cpumask_empty(enic->msix[i].affinity_mask))) in enic_init_affinity_hint() 125 if (zalloc_cpumask_var(&enic->msix[i].affinity_mask, in enic_init_affinity_hint() 128 enic->msix[i].affinity_mask); in enic_init_affinity_hint() 139 free_cpumask_var(enic->msix[i].affinity_mask); in enic_free_affinity_hint() 151 !cpumask_available(enic->msix[i].affinity_mask) || in enic_set_affinity_hint() 152 cpumask_empty(enic->msix[i].affinity_mask)) in enic_set_affinity_hint() 155 enic->msix[i].affinity_mask); in enic_set_affinity_hint() 164 if (cpumask_available(enic->msix[wq_intr].affinity_mask) && in enic_set_affinity_hint() 165 !cpumask_empty(enic->msix[wq_intr].affinity_mask)) in enic_set_affinity_hint() [all …]
|
/drivers/irqchip/ |
D | irq-gic-v3-its-pci-msi.c | 46 int msi, msix, *count = data; in its_pci_msi_vec_count() local 49 msix = max(pci_msix_vec_count(pdev), 0); in its_pci_msi_vec_count() 50 *count += max(msi, msix); in its_pci_msi_vec_count()
|
/drivers/edac/ |
D | thunderx_edac.c | 1098 struct msix_entry *msix = irq_id; in thunderx_ocx_com_isr() local 1099 struct thunderx_ocx *ocx = container_of(msix, struct thunderx_ocx, in thunderx_ocx_com_isr() 1100 msix_ent[msix->entry]); in thunderx_ocx_com_isr() 1127 struct msix_entry *msix = irq_id; in thunderx_ocx_com_threaded_isr() local 1128 struct thunderx_ocx *ocx = container_of(msix, struct thunderx_ocx, in thunderx_ocx_com_threaded_isr() 1129 msix_ent[msix->entry]); in thunderx_ocx_com_threaded_isr() 1191 struct msix_entry *msix = irq_id; in thunderx_ocx_lnk_isr() local 1192 struct thunderx_ocx *ocx = container_of(msix, struct thunderx_ocx, in thunderx_ocx_lnk_isr() 1193 msix_ent[msix->entry]); in thunderx_ocx_lnk_isr() 1198 ctx->link = msix->entry; in thunderx_ocx_lnk_isr() [all …]
|
/drivers/ntb/hw/amd/ |
D | ntb_hw_amd.c | 600 ndev->msix = kzalloc_node(msix_max * sizeof(*ndev->msix), in ndev_init_isr() 602 if (!ndev->msix) in ndev_init_isr() 606 ndev->msix[i].entry = i; in ndev_init_isr() 608 msix_count = pci_enable_msix_range(pdev, ndev->msix, in ndev_init_isr() 624 rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0, in ndev_init_isr() 637 free_irq(ndev->msix[i].vector, &ndev->vec[i]); in ndev_init_isr() 640 kfree(ndev->msix); in ndev_init_isr() 644 ndev->msix = NULL; in ndev_init_isr() 695 if (ndev->msix) { in ndev_deinit_isr() 698 free_irq(ndev->msix[i].vector, &ndev->vec[i]); in ndev_deinit_isr() [all …]
|
/drivers/scsi/qla2xxx/ |
D | qla_mid.c | 560 if (rsp->msix && rsp->msix->have_irq) { in qla25xx_free_rsp_que() 561 free_irq(rsp->msix->vector, rsp->msix->handle); in qla25xx_free_rsp_que() 562 rsp->msix->have_irq = 0; in qla25xx_free_rsp_que() 563 rsp->msix->in_use = 0; in qla25xx_free_rsp_que() 564 rsp->msix->handle = NULL; in qla25xx_free_rsp_que() 835 rsp->msix = qpair->msix; in qla25xx_create_rsp_que() 873 ret = qla25xx_request_irq(ha, qpair, qpair->msix, in qla25xx_create_rsp_que()
|
/drivers/misc/mic/host/ |
D | mic_intr.c | 439 struct msix_entry *msix = NULL; in mic_request_threaded_irq() local 455 msix = mic_get_available_vector(mdev); in mic_request_threaded_irq() 456 if (!msix) { in mic_request_threaded_irq() 463 rc = request_threaded_irq(msix->vector, handler, thread_fn, in mic_request_threaded_irq() 470 entry = msix->entry; in mic_request_threaded_irq() 476 msix->vector, intr_src); in mic_request_threaded_irq()
|
/drivers/dma/ioat/ |
D | init.c | 417 struct msix_entry *msix; in ioat_dma_setup_interrupts() local 423 goto msix; in ioat_dma_setup_interrupts() 431 msix: in ioat_dma_setup_interrupts() 442 msix = &ioat_dma->msix_entries[i]; in ioat_dma_setup_interrupts() 444 err = devm_request_irq(dev, msix->vector, in ioat_dma_setup_interrupts() 449 msix = &ioat_dma->msix_entries[j]; in ioat_dma_setup_interrupts() 451 devm_free_irq(dev, msix->vector, ioat_chan); in ioat_dma_setup_interrupts()
|
/drivers/net/ethernet/brocade/bna/ |
D | bfa_ioc_ct.c | 43 static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix); 403 bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix) in bfa_ioc_ct_isr_mode_set() argument 416 if ((!msix && mode) || (msix && !mode)) in bfa_ioc_ct_isr_mode_set() 419 if (msix) in bfa_ioc_ct_isr_mode_set()
|
/drivers/ntb/hw/intel/ |
D | ntb_hw_intel.c | 471 ndev->msix = kzalloc_node(msix_max * sizeof(*ndev->msix), in ndev_init_isr() 473 if (!ndev->msix) in ndev_init_isr() 477 ndev->msix[i].entry = i; in ndev_init_isr() 479 msix_count = pci_enable_msix_range(pdev, ndev->msix, in ndev_init_isr() 487 rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0, in ndev_init_isr() 500 free_irq(ndev->msix[i].vector, &ndev->vec[i]); in ndev_init_isr() 503 kfree(ndev->msix); in ndev_init_isr() 507 ndev->msix = NULL; in ndev_init_isr() 561 if (ndev->msix) { in ndev_deinit_isr() 564 free_irq(ndev->msix[i].vector, &ndev->vec[i]); in ndev_deinit_isr() [all …]
|