Home
last modified time | relevance | path

Searched refs:msix (Results 1 – 25 of 72) sorted by relevance

123

/drivers/scsi/snic/
Dsnic_isr.c94 for (i = 0; i < ARRAY_SIZE(snic->msix); i++) { in snic_free_intr()
95 if (snic->msix[i].requested) { in snic_free_intr()
97 snic->msix[i].devid); in snic_free_intr()
118 sprintf(snic->msix[SNIC_MSIX_WQ].devname, in snic_request_intr()
121 snic->msix[SNIC_MSIX_WQ].isr = snic_isr_msix_wq; in snic_request_intr()
122 snic->msix[SNIC_MSIX_WQ].devid = snic; in snic_request_intr()
124 sprintf(snic->msix[SNIC_MSIX_IO_CMPL].devname, in snic_request_intr()
127 snic->msix[SNIC_MSIX_IO_CMPL].isr = snic_isr_msix_io_cmpl; in snic_request_intr()
128 snic->msix[SNIC_MSIX_IO_CMPL].devid = snic; in snic_request_intr()
130 sprintf(snic->msix[SNIC_MSIX_ERR_NOTIFY].devname, in snic_request_intr()
[all …]
/drivers/scsi/fnic/
Dfnic_isr.c161 for (i = 0; i < ARRAY_SIZE(fnic->msix); i++) in fnic_free_intr()
162 if (fnic->msix[i].requested) in fnic_free_intr()
164 fnic->msix[i].devid); in fnic_free_intr()
191 sprintf(fnic->msix[FNIC_MSIX_RQ].devname, in fnic_request_intr()
193 fnic->msix[FNIC_MSIX_RQ].isr = fnic_isr_msix_rq; in fnic_request_intr()
194 fnic->msix[FNIC_MSIX_RQ].devid = fnic; in fnic_request_intr()
196 sprintf(fnic->msix[FNIC_MSIX_WQ].devname, in fnic_request_intr()
198 fnic->msix[FNIC_MSIX_WQ].isr = fnic_isr_msix_wq; in fnic_request_intr()
199 fnic->msix[FNIC_MSIX_WQ].devid = fnic; in fnic_request_intr()
201 sprintf(fnic->msix[FNIC_MSIX_WQ_COPY].devname, in fnic_request_intr()
[all …]
/drivers/scsi/bfa/
Dbfa_hw_ct.c108 bfa->msix.nvecs = nvecs; in bfa_hwct_msix_init()
115 if (bfa->msix.nvecs == 0) in bfa_hwct_msix_ctrl_install()
118 if (bfa->msix.nvecs == 1) in bfa_hwct_msix_ctrl_install()
119 bfa->msix.handler[BFI_MSIX_LPU_ERR_CT] = bfa_msix_all; in bfa_hwct_msix_ctrl_install()
121 bfa->msix.handler[BFI_MSIX_LPU_ERR_CT] = bfa_msix_lpu_err; in bfa_hwct_msix_ctrl_install()
129 if (bfa->msix.nvecs == 0) in bfa_hwct_msix_queue_install()
132 if (bfa->msix.nvecs == 1) { in bfa_hwct_msix_queue_install()
134 bfa->msix.handler[i] = bfa_msix_all; in bfa_hwct_msix_queue_install()
139 bfa->msix.handler[i] = bfa_msix_reqq; in bfa_hwct_msix_queue_install()
142 bfa->msix.handler[i] = bfa_msix_rspq; in bfa_hwct_msix_queue_install()
[all …]
Dbfa_hw_cb.c110 bfa->msix.nvecs = nvecs; in bfa_hwcb_msix_init()
119 if (bfa->msix.nvecs == 0) in bfa_hwcb_msix_ctrl_install()
122 if (bfa->msix.nvecs == 1) { in bfa_hwcb_msix_ctrl_install()
124 bfa->msix.handler[i] = bfa_msix_all; in bfa_hwcb_msix_ctrl_install()
129 bfa->msix.handler[i] = bfa_msix_lpu_err; in bfa_hwcb_msix_ctrl_install()
137 if (bfa->msix.nvecs == 0) in bfa_hwcb_msix_queue_install()
140 if (bfa->msix.nvecs == 1) { in bfa_hwcb_msix_queue_install()
142 bfa->msix.handler[i] = bfa_msix_all; in bfa_hwcb_msix_queue_install()
147 bfa->msix.handler[i] = bfa_msix_reqq; in bfa_hwcb_msix_queue_install()
150 bfa->msix.handler[i] = bfa_msix_rspq; in bfa_hwcb_msix_queue_install()
[all …]
Dbfa.h184 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
288 ((__bfa)->msix.handler[__vec](__bfa, __vec))
318 void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
332 void bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
/drivers/infiniband/hw/hfi1/
Daffinity.c742 static void hfi1_update_sdma_affinity(struct hfi1_msix_entry *msix, int cpu) in hfi1_update_sdma_affinity() argument
744 struct sdma_engine *sde = msix->arg; in hfi1_update_sdma_affinity()
760 cpumask_clear(&msix->mask); in hfi1_update_sdma_affinity()
761 cpumask_set_cpu(cpu, &msix->mask); in hfi1_update_sdma_affinity()
763 msix->irq, irq_type_names[msix->type], in hfi1_update_sdma_affinity()
765 irq_set_affinity_hint(msix->irq, &msix->mask); in hfi1_update_sdma_affinity()
778 if (other_msix->type != IRQ_SDMA || other_msix == msix) in hfi1_update_sdma_affinity()
794 struct hfi1_msix_entry *msix = container_of(notify, in hfi1_irq_notifier_notify() local
799 hfi1_update_sdma_affinity(msix, cpu); in hfi1_irq_notifier_notify()
810 static void hfi1_setup_sdma_notifier(struct hfi1_msix_entry *msix) in hfi1_setup_sdma_notifier() argument
[all …]
Daffinity.h44 struct hfi1_msix_entry *msix);
50 struct hfi1_msix_entry *msix);
/drivers/net/ethernet/chelsio/cxgb4/
Dcxgb4_tc_mqprio.c155 int ret, msix = 0; in cxgb4_mqprio_alloc_hw_resources() local
180 msix = -((int)adap->sge.intrq.abs_id + 1); in cxgb4_mqprio_alloc_hw_resources()
187 if (msix >= 0) { in cxgb4_mqprio_alloc_hw_resources()
188 msix = cxgb4_get_msix_idx_from_bmap(adap); in cxgb4_mqprio_alloc_hw_resources()
189 if (msix < 0) { in cxgb4_mqprio_alloc_hw_resources()
190 ret = msix; in cxgb4_mqprio_alloc_hw_resources()
194 eorxq->msix = &adap->msix_info[msix]; in cxgb4_mqprio_alloc_hw_resources()
195 snprintf(eorxq->msix->desc, in cxgb4_mqprio_alloc_hw_resources()
196 sizeof(eorxq->msix->desc), in cxgb4_mqprio_alloc_hw_resources()
209 dev, msix, &eorxq->fl, in cxgb4_mqprio_alloc_hw_resources()
[all …]
Dcxgb4_main.c735 minfo = s->ethrxq[ethqidx].msix; in request_msix_queue_irqs()
750 minfo = s->ethrxq[ethqidx].msix; in request_msix_queue_irqs()
766 minfo = s->ethrxq[i].msix; in free_msix_queue_irqs()
985 int msix; in setup_non_data_intr() local
992 msix = cxgb4_get_msix_idx_from_bmap(adap); in setup_non_data_intr()
993 if (msix < 0) in setup_non_data_intr()
996 snprintf(adap->msix_info[msix].desc, in setup_non_data_intr()
997 sizeof(adap->msix_info[msix].desc), in setup_non_data_intr()
1000 adap->sge.nd_msix_idx = msix; in setup_non_data_intr()
1007 int msix, err = 0; in setup_fw_sge_queues() local
[all …]
Dcxgb4_uld.c144 q->msix = &adap->msix_info[msi_idx]; in alloc_uld_rxqs()
167 if (q->msix) in alloc_uld_rxqs()
168 cxgb4_free_msix_idx_in_bmap(adap, q->msix->idx); in alloc_uld_rxqs()
332 minfo = rxq_info->uldrxq[idx].msix; in request_msix_queue_irqs_uld()
347 minfo = rxq_info->uldrxq[idx].msix; in request_msix_queue_irqs_uld()
363 minfo = rxq_info->uldrxq[idx].msix; in free_msix_queue_irqs_uld()
/drivers/vfio/pci/
Dvfio_pci_intrs.c247 static int vfio_msi_enable(struct vfio_pci_core_device *vdev, int nvec, bool msix) in vfio_msi_enable() argument
250 unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI; in vfio_msi_enable()
274 vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX : in vfio_msi_enable()
277 if (!msix) { in vfio_msi_enable()
289 int vector, int fd, bool msix) in vfio_msi_set_vector_signal() argument
317 msix ? "x" : "", vector, in vfio_msi_set_vector_signal()
336 if (msix) { in vfio_msi_set_vector_signal()
368 unsigned count, int32_t *fds, bool msix) in vfio_msi_set_block() argument
377 ret = vfio_msi_set_vector_signal(vdev, j, fd, msix); in vfio_msi_set_block()
382 vfio_msi_set_vector_signal(vdev, j, -1, msix); in vfio_msi_set_block()
[all …]
/drivers/net/ethernet/emulex/benet/
Dbe_roce.c65 dev_info.msix.num_vectors = min(num_vec, MAX_MSIX_VECTORS); in _be_roce_dev_add()
70 dev_info.msix.start_vector = adapter->num_evt_qs; in _be_roce_dev_add()
71 for (i = 0; i < dev_info.msix.num_vectors; i++) { in _be_roce_dev_add()
72 dev_info.msix.vector_list[i] = in _be_roce_dev_add()
76 dev_info.msix.num_vectors = 0; in _be_roce_dev_add()
Dbe_roce.h47 } msix; member
/drivers/rapidio/devices/
Dtsi721.c616 mbox = (irq - priv->msix[TSI721_VECT_OMB0_DONE].vector) % RIO_MAX_MBOX; in tsi721_omsg_msix()
633 mbox = (irq - priv->msix[TSI721_VECT_IMB0_RCV].vector) % RIO_MAX_MBOX; in tsi721_imsg_msix()
697 err = request_irq(priv->msix[TSI721_VECT_IDB].vector, in tsi721_request_msix()
699 priv->msix[TSI721_VECT_IDB].irq_name, (void *)priv); in tsi721_request_msix()
703 err = request_irq(priv->msix[TSI721_VECT_PWRX].vector, in tsi721_request_msix()
705 priv->msix[TSI721_VECT_PWRX].irq_name, (void *)priv); in tsi721_request_msix()
707 free_irq(priv->msix[TSI721_VECT_IDB].vector, (void *)priv); in tsi721_request_msix()
771 priv->msix[TSI721_VECT_IDB].vector = entries[TSI721_VECT_IDB].vector; in tsi721_enable_msix()
772 snprintf(priv->msix[TSI721_VECT_IDB].irq_name, IRQ_DEVICE_NAME_MAX, in tsi721_enable_msix()
774 priv->msix[TSI721_VECT_PWRX].vector = entries[TSI721_VECT_PWRX].vector; in tsi721_enable_msix()
[all …]
Dtsi721_dma.c152 rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0, in tsi721_bdma_ch_init()
153 priv->msix[idx].irq_name, (void *)bdma_chan); in tsi721_bdma_ch_init()
164 rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0, in tsi721_bdma_ch_init()
165 priv->msix[idx].irq_name, (void *)bdma_chan); in tsi721_bdma_ch_init()
172 priv->msix[TSI721_VECT_DMA0_DONE + in tsi721_bdma_ch_init()
226 free_irq(priv->msix[TSI721_VECT_DMA0_DONE + in tsi721_bdma_ch_free()
228 free_irq(priv->msix[TSI721_VECT_DMA0_INT + in tsi721_bdma_ch_free()
770 synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE + in tsi721_sync_dma_irq()
772 synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT + in tsi721_sync_dma_irq()
/drivers/net/ethernet/cisco/enic/
Denic_main.c121 (cpumask_available(enic->msix[i].affinity_mask) && in enic_init_affinity_hint()
122 !cpumask_empty(enic->msix[i].affinity_mask))) in enic_init_affinity_hint()
124 if (zalloc_cpumask_var(&enic->msix[i].affinity_mask, in enic_init_affinity_hint()
127 enic->msix[i].affinity_mask); in enic_init_affinity_hint()
138 free_cpumask_var(enic->msix[i].affinity_mask); in enic_free_affinity_hint()
150 !cpumask_available(enic->msix[i].affinity_mask) || in enic_set_affinity_hint()
151 cpumask_empty(enic->msix[i].affinity_mask)) in enic_set_affinity_hint()
154 enic->msix[i].affinity_mask); in enic_set_affinity_hint()
163 if (cpumask_available(enic->msix[wq_intr].affinity_mask) && in enic_set_affinity_hint()
164 !cpumask_empty(enic->msix[wq_intr].affinity_mask)) in enic_set_affinity_hint()
[all …]
/drivers/edac/
Dthunderx_edac.c1077 struct msix_entry *msix = irq_id; in thunderx_ocx_com_isr() local
1078 struct thunderx_ocx *ocx = container_of(msix, struct thunderx_ocx, in thunderx_ocx_com_isr()
1079 msix_ent[msix->entry]); in thunderx_ocx_com_isr()
1106 struct msix_entry *msix = irq_id; in thunderx_ocx_com_threaded_isr() local
1107 struct thunderx_ocx *ocx = container_of(msix, struct thunderx_ocx, in thunderx_ocx_com_threaded_isr()
1108 msix_ent[msix->entry]); in thunderx_ocx_com_threaded_isr()
1170 struct msix_entry *msix = irq_id; in thunderx_ocx_lnk_isr() local
1171 struct thunderx_ocx *ocx = container_of(msix, struct thunderx_ocx, in thunderx_ocx_lnk_isr()
1172 msix_ent[msix->entry]); in thunderx_ocx_lnk_isr()
1177 ctx->link = msix->entry; in thunderx_ocx_lnk_isr()
[all …]
/drivers/irqchip/
Dirq-gic-v3-its-pci-msi.c36 int msi, msix, *count = data; in its_pci_msi_vec_count() local
39 msix = max(pci_msix_vec_count(pdev), 0); in its_pci_msi_vec_count()
40 *count += max(msi, msix); in its_pci_msi_vec_count()
/drivers/ntb/hw/amd/
Dntb_hw_amd.c727 ndev->msix = kcalloc_node(msix_max, sizeof(*ndev->msix), in ndev_init_isr()
729 if (!ndev->msix) in ndev_init_isr()
733 ndev->msix[i].entry = i; in ndev_init_isr()
735 msix_count = pci_enable_msix_range(pdev, ndev->msix, in ndev_init_isr()
751 rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0, in ndev_init_isr()
764 free_irq(ndev->msix[i].vector, &ndev->vec[i]); in ndev_init_isr()
767 kfree(ndev->msix); in ndev_init_isr()
771 ndev->msix = NULL; in ndev_init_isr()
822 if (ndev->msix) { in ndev_deinit_isr()
825 free_irq(ndev->msix[i].vector, &ndev->vec[i]); in ndev_deinit_isr()
[all …]
/drivers/scsi/qla2xxx/
Dqla_mid.c607 if (rsp->msix && rsp->msix->have_irq) { in qla25xx_free_rsp_que()
608 free_irq(rsp->msix->vector, rsp->msix->handle); in qla25xx_free_rsp_que()
609 rsp->msix->have_irq = 0; in qla25xx_free_rsp_que()
610 rsp->msix->in_use = 0; in qla25xx_free_rsp_que()
611 rsp->msix->handle = NULL; in qla25xx_free_rsp_que()
874 rsp->msix = qpair->msix; in qla25xx_create_rsp_que()
912 ret = qla25xx_request_irq(ha, qpair, qpair->msix, in qla25xx_create_rsp_que()
/drivers/crypto/cavium/nitrox/
Dnitrox_isr.c395 vec = ndev->iov.msix.vector; in nitrox_sriov_unregister_interrupts()
419 ndev->iov.msix.entry = NON_RING_MSIX_BASE; in nitrox_sriov_register_interupts()
420 ret = pci_enable_msix_exact(pdev, &ndev->iov.msix, NR_NON_RING_VECTORS); in nitrox_sriov_register_interupts()
439 vec = ndev->iov.msix.vector; in nitrox_sriov_register_interupts()
/drivers/ntb/hw/intel/
Dntb_hw_gen1.c387 ndev->msix = kcalloc_node(msix_max, sizeof(*ndev->msix), in ndev_init_isr()
389 if (!ndev->msix) in ndev_init_isr()
393 ndev->msix[i].entry = i; in ndev_init_isr()
395 msix_count = pci_enable_msix_range(pdev, ndev->msix, in ndev_init_isr()
403 rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0, in ndev_init_isr()
416 free_irq(ndev->msix[i].vector, &ndev->vec[i]); in ndev_init_isr()
419 kfree(ndev->msix); in ndev_init_isr()
423 ndev->msix = NULL; in ndev_init_isr()
477 if (ndev->msix) { in ndev_deinit_isr()
480 free_irq(ndev->msix[i].vector, &ndev->vec[i]); in ndev_deinit_isr()
[all …]
/drivers/net/ethernet/marvell/octeontx2/af/
Drvu.c620 pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1; in rvu_setup_msix_resources()
621 rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0); in rvu_setup_msix_resources()
624 err = rvu_alloc_bitmap(&pfvf->msix); in rvu_setup_msix_resources()
629 pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max, in rvu_setup_msix_resources()
647 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs); in rvu_setup_msix_resources()
657 pfvf->msix.max = (cfg & 0xFFF) + 1; in rvu_setup_msix_resources()
658 rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1); in rvu_setup_msix_resources()
661 err = rvu_alloc_bitmap(&pfvf->msix); in rvu_setup_msix_resources()
666 devm_kcalloc(rvu->dev, pfvf->msix.max, in rvu_setup_msix_resources()
679 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs); in rvu_setup_msix_resources()
[all …]
/drivers/net/ethernet/brocade/bna/
Dbfa_ioc_ct.c35 static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
395 bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix) in bfa_ioc_ct_isr_mode_set() argument
408 if ((!msix && mode) || (msix && !mode)) in bfa_ioc_ct_isr_mode_set()
411 if (msix) in bfa_ioc_ct_isr_mode_set()
/drivers/dma/ioat/
Dinit.c408 struct msix_entry *msix; in ioat_dma_setup_interrupts() local
414 goto msix; in ioat_dma_setup_interrupts()
422 msix: in ioat_dma_setup_interrupts()
433 msix = &ioat_dma->msix_entries[i]; in ioat_dma_setup_interrupts()
435 err = devm_request_irq(dev, msix->vector, in ioat_dma_setup_interrupts()
440 msix = &ioat_dma->msix_entries[j]; in ioat_dma_setup_interrupts()
442 devm_free_irq(dev, msix->vector, ioat_chan); in ioat_dma_setup_interrupts()

123