/drivers/infiniband/hw/mthca/ |
D | mthca_cq.c | 174 static inline struct mthca_cqe *cqe_sw(struct mthca_cqe *cqe) in cqe_sw() argument 176 return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe; in cqe_sw() 181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe)); in next_cqe_sw() 184 static inline void set_cqe_hw(struct mthca_cqe *cqe) in set_cqe_hw() argument 186 cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW; in set_cqe_hw() 191 __be32 *cqe = cqe_ptr; in dump_cqe() local 193 (void) cqe; /* avoid warning if mthca_dbg compiled away... */ in dump_cqe() 195 be32_to_cpu(cqe[0]), be32_to_cpu(cqe[1]), be32_to_cpu(cqe[2]), in dump_cqe() 196 be32_to_cpu(cqe[3]), be32_to_cpu(cqe[4]), be32_to_cpu(cqe[5]), in dump_cqe() 197 be32_to_cpu(cqe[6]), be32_to_cpu(cqe[7])); in dump_cqe() [all …]
|
/drivers/infiniband/hw/mlx4/ |
D | cq.c | 81 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local 82 struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe); in get_sw_cqe() 85 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; in get_sw_cqe() 133 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe) in mlx4_ib_free_cq_buf() argument 135 mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf); in mlx4_ib_free_cq_buf() 140 struct ib_umem **umem, u64 buf_addr, int cqe) in mlx4_ib_get_cq_umem() argument 147 *umem = ib_umem_get(&dev->ib_dev, buf_addr, cqe * cqe_size, in mlx4_ib_get_cq_umem() 178 int entries = attr->cqe; in mlx4_ib_create_cq() 195 cq->ibcq.cqe = entries - 1; in mlx4_ib_create_cq() 281 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); in mlx4_ib_create_cq() [all …]
|
/drivers/infiniband/hw/vmw_pvrdma/ |
D | pvrdma_cq.c | 83 cq->ibcq.cqe, &head); in pvrdma_req_notify_cq() 105 int entries = attr->cqe; in pvrdma_create_cq() 132 cq->ibcq.cqe = entries; in pvrdma_create_cq() 186 cmd->cqe = entries; in pvrdma_create_cq() 195 cq->ibcq.cqe = resp->cqe; in pvrdma_create_cq() 288 cq->ibcq.cqe, &head); in _pvrdma_flush_cqe() 293 cq->ibcq.cqe); in _pvrdma_flush_cqe() 294 struct pvrdma_cqe *cqe; in _pvrdma_flush_cqe() local 298 (cq->ibcq.cqe - head + tail); in _pvrdma_flush_cqe() 302 curr = cq->ibcq.cqe - 1; in _pvrdma_flush_cqe() [all …]
|
/drivers/infiniband/sw/siw/ |
D | siw_cq.c | 50 struct siw_cqe *cqe; in siw_reap_cqe() local 55 cqe = &cq->queue[cq->cq_get % cq->num_cqe]; in siw_reap_cqe() 56 if (READ_ONCE(cqe->flags) & SIW_WQE_VALID) { in siw_reap_cqe() 58 wc->wr_id = cqe->id; in siw_reap_cqe() 59 wc->byte_len = cqe->bytes; in siw_reap_cqe() 67 if (cqe->flags & SIW_WQE_REM_INVAL) { in siw_reap_cqe() 68 wc->ex.invalidate_rkey = cqe->inval_stag; in siw_reap_cqe() 71 wc->qp = cqe->base_qp; in siw_reap_cqe() 72 wc->opcode = map_wc_opcode[cqe->opcode]; in siw_reap_cqe() 73 wc->status = map_cqe_status[cqe->status].ib; in siw_reap_cqe() [all …]
|
D | siw_qp.c | 1068 struct siw_cqe *cqe; in siw_sqe_complete() local 1075 cqe = &cq->queue[idx]; in siw_sqe_complete() 1077 if (!READ_ONCE(cqe->flags)) { in siw_sqe_complete() 1080 cqe->id = sqe->id; in siw_sqe_complete() 1081 cqe->opcode = sqe->opcode; in siw_sqe_complete() 1082 cqe->status = status; in siw_sqe_complete() 1083 cqe->imm_data = 0; in siw_sqe_complete() 1084 cqe->bytes = bytes; in siw_sqe_complete() 1087 cqe->base_qp = &qp->base_qp; in siw_sqe_complete() 1089 cqe->qp_id = qp_id(qp); in siw_sqe_complete() [all …]
|
/drivers/net/ethernet/mellanox/mlx5/core/ |
D | en_rx.c | 63 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 64 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 114 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); in mlx5e_cqes_update_owner() local 116 cqe->op_own = op_own; in mlx5e_cqes_update_owner() 122 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); in mlx5e_cqes_update_owner() local 124 cqe->op_own = op_own; in mlx5e_cqes_update_owner() 636 struct mlx5_cqe64 *cqe; in mlx5e_poll_ico_cq() local 643 cqe = mlx5_cqwq_get_cqe(&cq->wq); in mlx5e_poll_ico_cq() 644 if (likely(!cqe)) in mlx5e_poll_ico_cq() 659 wqe_counter = be16_to_cpu(cqe->wqe_counter); in mlx5e_poll_ico_cq() [all …]
|
/drivers/infiniband/sw/rxe/ |
D | rxe_cq.c | 12 int cqe, int comp_vector) in rxe_cq_chk_attr() argument 16 if (cqe <= 0) { in rxe_cq_chk_attr() 17 pr_warn("cqe(%d) <= 0\n", cqe); in rxe_cq_chk_attr() 21 if (cqe > rxe->attr.max_cqe) { in rxe_cq_chk_attr() 23 cqe, rxe->attr.max_cqe); in rxe_cq_chk_attr() 29 if (cqe < count) { in rxe_cq_chk_attr() 31 cqe, count); in rxe_cq_chk_attr() 57 int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, in rxe_cq_from_init() argument 65 cq->queue = rxe_queue_init(rxe, &cqe, in rxe_cq_from_init() 88 cq->ibcq.cqe = cqe; in rxe_cq_from_init() [all …]
|
/drivers/infiniband/hw/cxgb4/ |
D | cq.c | 186 struct t4_cqe cqe; in insert_recv_cqe() local 190 memset(&cqe, 0, sizeof(cqe)); in insert_recv_cqe() 191 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | in insert_recv_cqe() 196 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); in insert_recv_cqe() 198 cqe.u.srcqe.abs_rqe_idx = cpu_to_be32(srqidx); in insert_recv_cqe() 199 cq->sw_queue[cq->sw_pidx] = cqe; in insert_recv_cqe() 220 struct t4_cqe cqe; in insert_sq_cqe() local 224 memset(&cqe, 0, sizeof(cqe)); in insert_sq_cqe() 225 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | in insert_sq_cqe() 230 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx; in insert_sq_cqe() [all …]
|
/drivers/net/ethernet/mellanox/mlxsw/ |
D | pci_hw.h | 114 static inline u32 mlxsw_pci_cqe_##name##_get(enum mlxsw_pci_cqe_v v, char *cqe) \ 119 return mlxsw_pci_cqe##v0##_##name##_get(cqe); \ 121 return mlxsw_pci_cqe##v1##_##name##_get(cqe); \ 123 return mlxsw_pci_cqe##v2##_##name##_get(cqe); \ 127 char *cqe, u32 val) \ 132 mlxsw_pci_cqe##v0##_##name##_set(cqe, val); \ 135 mlxsw_pci_cqe##v1##_##name##_set(cqe, val); \ 138 mlxsw_pci_cqe##v2##_##name##_set(cqe, val); \ 156 MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16); 167 MLXSW_ITEM32(pci, cqe, wqe_counter, 0x04, 16, 16); [all …]
|
/drivers/infiniband/hw/mlx5/ |
D | cq.c | 81 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local 84 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe() 87 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { in get_sw_cqe() 88 return cqe; in get_sw_cqe() 117 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_good_req() argument 121 switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) { in handle_good_req() 137 wc->byte_len = be32_to_cpu(cqe->byte_cnt); in handle_good_req() 166 static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_responder() argument 182 msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn)); in handle_responder() 189 wqe_ctr = be16_to_cpu(cqe->wqe_counter); in handle_responder() [all …]
|
/drivers/infiniband/hw/bnxt_re/ |
D | qplib_fp.c | 1465 struct cq_req *cqe = (struct cq_req *)hw_cqe; in __clean_cq() local 1467 if (qp == le64_to_cpu(cqe->qp_handle)) in __clean_cq() 1468 cqe->qp_handle = 0; in __clean_cq() 1475 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe; in __clean_cq() local 1477 if (qp == le64_to_cpu(cqe->qp_handle)) in __clean_cq() 1478 cqe->qp_handle = 0; in __clean_cq() 2154 struct bnxt_qplib_cqe *cqe; in __flush_sq() local 2160 cqe = *pcqe; in __flush_sq() 2170 memset(cqe, 0, sizeof(*cqe)); in __flush_sq() 2171 cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR; in __flush_sq() [all …]
|
/drivers/infiniband/sw/rdmavt/ |
D | cq.c | 54 if (head >= (unsigned)cq->ibcq.cqe) { in rvt_cq_enter() 55 head = cq->ibcq.cqe; in rvt_cq_enter() 167 unsigned int entries = attr->cqe; in rvt_create_cq() 249 cq->ibcq.cqe = entries; in rvt_create_cq() 338 int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) in rvt_resize_cq() argument 350 if (cqe < 1 || cqe > rdi->dparms.props.max_cqe) in rvt_resize_cq() 357 sz = sizeof(struct ib_uverbs_wc) * (cqe + 1); in rvt_resize_cq() 363 sz = sizeof(struct ib_wc) * (cqe + 1); in rvt_resize_cq() 393 if (head > (u32)cq->ibcq.cqe) in rvt_resize_cq() 394 head = (u32)cq->ibcq.cqe; in rvt_resize_cq() [all …]
|
/drivers/net/ethernet/marvell/octeontx2/nic/ |
D | otx2_txrx.c | 104 struct nix_cqe_tx_s *cqe, in otx2_snd_pkt_handler() argument 107 struct nix_send_comp_s *snd_comp = &cqe->comp; in otx2_snd_pkt_handler() 190 struct nix_cqe_rx_s *cqe, struct sk_buff *skb) in otx2_set_rxhash() argument 206 hash = cqe->hdr.flow_tag; in otx2_set_rxhash() 211 static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe, in otx2_free_rcv_seg() argument 214 struct nix_rx_sg_s *sg = &cqe->sg; in otx2_free_rcv_seg() 220 end = start + ((cqe->parse.desc_sizem1 + 1) * 16); in otx2_free_rcv_seg() 232 struct nix_cqe_rx_s *cqe, int qidx) in otx2_check_rcv_errors() argument 235 struct nix_rx_parse_s *parse = &cqe->parse; in otx2_check_rcv_errors() 290 if (cqe->sg.segs) in otx2_check_rcv_errors() [all …]
|
/drivers/net/ethernet/qlogic/qede/ |
D | qede_fp.c | 651 struct eth_fast_path_rx_tpa_start_cqe *cqe) in qede_set_gro_params() argument 653 u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags); in qede_set_gro_params() 661 skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) - in qede_set_gro_params() 662 cqe->header_len; in qede_set_gro_params() 834 struct eth_fast_path_rx_tpa_start_cqe *cqe) in qede_tpa_start() argument 836 struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; in qede_tpa_start() 841 pad = cqe->placement_offset + rxq->rx_headroom; in qede_tpa_start() 844 le16_to_cpu(cqe->len_on_first_bd), in qede_tpa_start() 865 if ((le16_to_cpu(cqe->pars_flags.flags) >> in qede_tpa_start() 868 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); in qede_tpa_start() [all …]
|
/drivers/net/ethernet/huawei/hinic/ |
D | hinic_hw_qp.c | 324 cqe_size = wq->q_depth * sizeof(*rq->cqe); in alloc_rq_cqe() 325 rq->cqe = vzalloc(cqe_size); in alloc_rq_cqe() 326 if (!rq->cqe) in alloc_rq_cqe() 335 rq->cqe[i] = dma_alloc_coherent(&pdev->dev, in alloc_rq_cqe() 336 sizeof(*rq->cqe[i]), in alloc_rq_cqe() 338 if (!rq->cqe[i]) in alloc_rq_cqe() 346 dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[j]), rq->cqe[j], in alloc_rq_cqe() 352 vfree(rq->cqe); in alloc_rq_cqe() 368 dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[i]), rq->cqe[i], in free_rq_cqe() 372 vfree(rq->cqe); in free_rq_cqe() [all …]
|
/drivers/infiniband/ulp/iser/ |
D | iscsi_iser.h | 248 struct ib_cqe cqe; member 275 struct ib_cqe cqe; member 295 struct ib_cqe cqe; member 575 iser_rx(struct ib_cqe *cqe) in iser_rx() argument 577 return container_of(cqe, struct iser_rx_desc, cqe); in iser_rx() 581 iser_tx(struct ib_cqe *cqe) in iser_tx() argument 583 return container_of(cqe, struct iser_tx_desc, cqe); in iser_tx() 587 iser_login(struct ib_cqe *cqe) in iser_login() argument 589 return container_of(cqe, struct iser_login_desc, cqe); in iser_login()
|
/drivers/scsi/qedi/ |
D | qedi_fw.c | 31 union iscsi_cqe *cqe, in qedi_process_logout_resp() argument 42 cqe_logout_response = &cqe->cqe_common.iscsi_hdr.logout_response; in qedi_process_logout_resp() 50 resp_hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age); in qedi_process_logout_resp() 82 union iscsi_cqe *cqe, in qedi_process_text_resp() argument 97 cqe_text_response = &cqe->cqe_common.iscsi_hdr.text_response; in qedi_process_text_resp() 109 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid, in qedi_process_text_resp() 178 union iscsi_cqe *cqe, in qedi_process_tmf_resp() argument 190 cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response; in qedi_process_tmf_resp() 214 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid, in qedi_process_tmf_resp() 258 union iscsi_cqe *cqe, in qedi_process_login_resp() argument [all …]
|
/drivers/infiniband/hw/ocrdma/ |
D | ocrdma_verbs.c | 967 int entries = attr->cqe; in ocrdma_create_cq() 1021 ibcq->cqe = new_cnt; in ocrdma_resize_cq() 1032 struct ocrdma_cqe *cqe = NULL; in ocrdma_flush_cq() local 1034 cqe = cq->va; in ocrdma_flush_cq() 1042 if (is_cqe_valid(cq, cqe)) in ocrdma_flush_cq() 1044 cqe++; in ocrdma_flush_cq() 1592 struct ocrdma_cqe *cqe; in ocrdma_discard_cqes() local 1611 cqe = cq->va + cur_getp; in ocrdma_discard_cqes() 1616 qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK; in ocrdma_discard_cqes() 1622 if (is_cqe_for_sq(cqe)) { in ocrdma_discard_cqes() [all …]
|
D | ocrdma.h | 496 static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe) in is_cqe_valid() argument 499 cqe_valid = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID; in is_cqe_valid() 503 static inline int is_cqe_for_sq(struct ocrdma_cqe *cqe) in is_cqe_for_sq() argument 505 return (le32_to_cpu(cqe->flags_status_srcqpn) & in is_cqe_for_sq() 509 static inline int is_cqe_invalidated(struct ocrdma_cqe *cqe) in is_cqe_invalidated() argument 511 return (le32_to_cpu(cqe->flags_status_srcqpn) & in is_cqe_invalidated() 515 static inline int is_cqe_imm(struct ocrdma_cqe *cqe) in is_cqe_imm() argument 517 return (le32_to_cpu(cqe->flags_status_srcqpn) & in is_cqe_imm() 521 static inline int is_cqe_wr_imm(struct ocrdma_cqe *cqe) in is_cqe_wr_imm() argument 523 return (le32_to_cpu(cqe->flags_status_srcqpn) & in is_cqe_wr_imm()
|
/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ |
D | tls_rxtx.h | 68 struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) in mlx5e_tls_handle_rx_skb() argument 70 if (unlikely(get_cqe_tls_offload(cqe))) /* cqe bit indicates a TLS device */ in mlx5e_tls_handle_rx_skb() 71 return mlx5e_ktls_handle_rx_skb(rq, skb, cqe, cqe_bcnt); in mlx5e_tls_handle_rx_skb() 80 mlx5e_accel_is_tls(struct mlx5_cqe64 *cqe, struct sk_buff *skb) { return false; } in mlx5e_accel_is_tls() argument 83 struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) {} in mlx5e_tls_handle_rx_skb() argument
|
D | ipsec_rxtx.h | 72 struct mlx5_cqe64 *cqe); 78 static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) in mlx5_ipsec_is_rx_flow() argument 80 return MLX5_IPSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata)); in mlx5_ipsec_is_rx_flow() 156 struct mlx5_cqe64 *cqe) in mlx5e_ipsec_offload_handle_rx_skb() argument 164 static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; } in mlx5_ipsec_is_rx_flow() argument
|
/drivers/nvme/target/ |
D | fabrics-cmd.c | 81 req->cqe->result.u64 = cpu_to_le64(val); in nvmet_execute_prop_get() 118 req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize); in nvmet_install_queue() 133 req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize); in nvmet_install_queue() 150 req->cqe->sq_head = cpu_to_le16(0xffff); in nvmet_install_queue() 191 req->cqe->result.u32 = 0; in nvmet_execute_admin_connect() 205 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); in nvmet_execute_admin_connect() 229 req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid); in nvmet_execute_admin_connect() 259 req->cqe->result.u32 = 0; in nvmet_execute_io_connect() 280 req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(qid); in nvmet_execute_io_connect() 289 req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid); in nvmet_execute_io_connect()
|
/drivers/scsi/qedf/ |
D | qedf.h | 250 struct fcoe_cqe cqe; member 486 extern void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, 489 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req); 491 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req); 496 extern void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, 509 extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, 516 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req); 518 extern void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, 520 extern void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe); 529 struct fcoe_cqe *cqe); [all …]
|
/drivers/scsi/bnx2i/ |
D | bnx2i.h | 506 struct cqe { struct 650 struct cqe *cq_virt; 654 struct cqe *cq_prod_qe; 655 struct cqe *cq_cons_qe; 656 struct cqe *cq_first_qe; 657 struct cqe *cq_last_qe; 774 struct cqe cqe; member 881 struct cqe *cqe);
|
/drivers/net/ethernet/mellanox/mlx4/ |
D | en_rx.c | 625 static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, in check_csum() argument 643 hw_checksum = csum_unfold((__force __sum16)cqe->checksum); in check_csum() 645 if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) && in check_csum() 652 if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) in check_csum() 672 struct mlx4_cqe *cqe; in mlx4_en_process_rx_cq() local 690 cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; in mlx4_en_process_rx_cq() 693 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, in mlx4_en_process_rx_cq() 712 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == in mlx4_en_process_rx_cq() 715 ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome, in mlx4_en_process_rx_cq() 716 ((struct mlx4_err_cqe *)cqe)->syndrome); in mlx4_en_process_rx_cq() [all …]
|