/drivers/net/ethernet/mellanox/mlx4/ |
D | eq.c | 118 struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor); in next_eqe_sw() local 119 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; in next_eqe_sw() 124 struct mlx4_eqe *eqe = in next_slave_event_eqe() local 126 return (!!(eqe->owner & 0x80) ^ in next_slave_event_eqe() 128 eqe : NULL; in next_slave_event_eqe() 141 struct mlx4_eqe *eqe; in mlx4_gen_slave_eqe() local 145 for (eqe = next_slave_event_eqe(slave_eq); eqe; in mlx4_gen_slave_eqe() 146 eqe = next_slave_event_eqe(slave_eq)) { in mlx4_gen_slave_eqe() 147 slave = eqe->slave_id; in mlx4_gen_slave_eqe() 154 if (mlx4_GEN_EQE(dev, i, eqe)) in mlx4_gen_slave_eqe() [all …]
|
D | resource_tracker.c | 2408 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe) in mlx4_GEN_EQE() argument 2421 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type]; in mlx4_GEN_EQE() 2444 if (eqe->type == MLX4_EVENT_TYPE_CMD) { in mlx4_GEN_EQE() 2446 eqe->event.cmd.token = cpu_to_be16(event_eq->token); in mlx4_GEN_EQE() 2449 memcpy(mailbox->buf, (u8 *) eqe, 28); in mlx4_GEN_EQE()
|
D | mlx4.h | 1044 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe);
|
/drivers/infiniband/hw/ehca/ |
D | ehca_irq.c | 206 static void qp_event_callback(struct ehca_shca *shca, u64 eqe, in qp_event_callback() argument 210 u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe); in qp_event_callback() 241 u64 eqe) in cq_event_callback() argument 244 u32 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe); in cq_event_callback() 263 static void parse_identifier(struct ehca_shca *shca, u64 eqe) in parse_identifier() argument 265 u8 identifier = EHCA_BMASK_GET(EQE_EE_IDENTIFIER, eqe); in parse_identifier() 269 qp_event_callback(shca, eqe, IB_EVENT_PATH_MIG, 0); in parse_identifier() 272 qp_event_callback(shca, eqe, IB_EVENT_COMM_EST, 0); in parse_identifier() 275 qp_event_callback(shca, eqe, IB_EVENT_SQ_DRAINED, 0); in parse_identifier() 279 qp_event_callback(shca, eqe, IB_EVENT_QP_FATAL, 1); in parse_identifier() [all …]
|
D | ehca_eq.c | 158 void *eqe; in ehca_poll_eq() local 161 eqe = ipz_eqit_eq_get_inc_valid(&eq->ipz_queue); in ehca_poll_eq() 164 return eqe; in ehca_poll_eq()
|
D | ehca_classes.h | 72 struct ehca_eqe *eqe; member
|
/drivers/infiniband/hw/mthca/ |
D | mthca_eq.c | 236 struct mthca_eqe *eqe; in next_eqe_sw() local 237 eqe = get_eqe(eq, eq->cons_index); in next_eqe_sw() 238 return (MTHCA_EQ_ENTRY_OWNER_HW & eqe->owner) ? NULL : eqe; in next_eqe_sw() 241 static inline void set_eqe_hw(struct mthca_eqe *eqe) in set_eqe_hw() argument 243 eqe->owner = MTHCA_EQ_ENTRY_OWNER_HW; in set_eqe_hw() 262 struct mthca_eqe *eqe; in mthca_eq_int() local 267 while ((eqe = next_eqe_sw(eq))) { in mthca_eq_int() 274 switch (eqe->type) { in mthca_eq_int() 276 disarm_cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; in mthca_eq_int() 282 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int() [all …]
|
/drivers/infiniband/hw/mlx4/ |
D | mad.c | 60 #define GET_BLK_PTR_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.bl… argument 61 #define GET_MASK_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.tbl_e… argument 884 struct mlx4_eqe *eqe) in propagate_pkey_ev() argument 886 __propagate_pkey_ev(dev, port_num, GET_BLK_PTR_FROM_EQE(eqe), in propagate_pkey_ev() 887 GET_MASK_FROM_EQE(eqe)); in propagate_pkey_ev() 947 struct mlx4_eqe *eqe = &(ew->ib_eqe); in handle_port_mgmt_change_event() local 948 u8 port = eqe->event.port_mgmt_change.port; in handle_port_mgmt_change_event() 953 switch (eqe->subtype) { in handle_port_mgmt_change_event() 955 changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr); in handle_port_mgmt_change_event() 960 u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid); in handle_port_mgmt_change_event() [all …]
|
D | main.c | 1632 struct mlx4_eqe *eqe = NULL; in mlx4_ib_event() local 1637 eqe = (struct mlx4_eqe *)param; in mlx4_ib_event() 1672 memcpy(&ew->ib_eqe, eqe, sizeof *eqe); in mlx4_ib_event()
|
/drivers/net/ethernet/ibm/ehea/ |
D | ehea_main.c | 936 struct ehea_eqe *eqe; in ehea_qp_aff_irq_handler() local 942 eqe = ehea_poll_eq(port->qp_eq); in ehea_qp_aff_irq_handler() 944 while (eqe) { in ehea_qp_aff_irq_handler() 945 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry); in ehea_qp_aff_irq_handler() 947 eqe->entry, qp_token); in ehea_qp_aff_irq_handler() 961 eqe = ehea_poll_eq(port->qp_eq); in ehea_qp_aff_irq_handler() 1149 static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe) in ehea_parse_eqe() argument 1157 ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe); in ehea_parse_eqe() 1158 portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe); in ehea_parse_eqe() 1170 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) { in ehea_parse_eqe() [all …]
|
D | ehea_qmr.c | 323 struct ehea_eqe *eqe; in ehea_poll_eq() local 327 eqe = hw_eqit_eq_get_inc_valid(&eq->hw_queue); in ehea_poll_eq() 330 return eqe; in ehea_poll_eq()
|
/drivers/scsi/be2iscsi/ |
D | be_main.c | 754 struct be_eq_entry *eqe = NULL; in be_isr_mcc() local 765 eqe = queue_tail_node(eq); in be_isr_mcc() 769 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] in be_isr_mcc() 771 if (((eqe->dw[offsetof(struct amap_eq_entry, in be_isr_mcc() 778 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); in be_isr_mcc() 780 eqe = queue_tail_node(eq); in be_isr_mcc() 799 struct be_eq_entry *eqe = NULL; in be_isr_msix() local 809 eqe = queue_tail_node(eq); in be_isr_msix() 814 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] in be_isr_msix() 819 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); in be_isr_msix() [all …]
|
/drivers/infiniband/hw/ocrdma/ |
D | ocrdma_hw.c | 903 struct ocrdma_eqe eqe; in ocrdma_irq_handler() local 909 eqe = *ptr; in ocrdma_irq_handler() 910 ocrdma_le32_to_cpu(&eqe, sizeof(eqe)); in ocrdma_irq_handler() 911 if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0) in ocrdma_irq_handler() 916 if ((eqe.id_valid & OCRDMA_EQE_FOR_CQE_MASK) == 0) { in ocrdma_irq_handler() 917 cq_id = eqe.id_valid >> OCRDMA_EQE_RESOURCE_ID_SHIFT; in ocrdma_irq_handler()
|
/drivers/scsi/lpfc/ |
D | lpfc_sli.c | 249 struct lpfc_eqe *eqe; in lpfc_sli4_eq_get() local 255 eqe = q->qe[q->hba_index].eqe; in lpfc_sli4_eq_get() 258 if (!bf_get_le32(lpfc_eqe_valid, eqe)) in lpfc_sli4_eq_get() 266 return eqe; in lpfc_sli4_eq_get() 316 temp_eqe = q->qe[q->host_index].eqe; in lpfc_sli4_eq_release() 8549 struct lpfc_eqe *eqe; in lpfc_sli_issue_iocb() local 8582 while ((eqe = lpfc_sli4_eq_get(fpeq))) { in lpfc_sli_issue_iocb() 8584 eqe, idx); in lpfc_sli_issue_iocb() 11596 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, in lpfc_sli4_sp_handle_eqe() argument 11606 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); in lpfc_sli4_sp_handle_eqe() [all …]
|
D | lpfc_sli4.h | 111 struct lpfc_eqe *eqe; member
|
/drivers/net/ethernet/emulex/benet/ |
D | be_main.c | 1771 struct be_eq_entry *eqe; in events_get() local 1775 eqe = queue_tail_node(&eqo->q); in events_get() 1776 if (eqe->evt == 0) in events_get() 1780 eqe->evt = 0; in events_get()
|