/drivers/infiniband/hw/mthca/ |
D | mthca_cq.c | 174 static inline struct mthca_cqe *cqe_sw(struct mthca_cqe *cqe) in cqe_sw() argument 176 return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe; in cqe_sw() 181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe)); in next_cqe_sw() 184 static inline void set_cqe_hw(struct mthca_cqe *cqe) in set_cqe_hw() argument 186 cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW; in set_cqe_hw() 191 __be32 *cqe = cqe_ptr; in dump_cqe() local 193 (void) cqe; /* avoid warning if mthca_dbg compiled away... */ in dump_cqe() 195 be32_to_cpu(cqe[0]), be32_to_cpu(cqe[1]), be32_to_cpu(cqe[2]), in dump_cqe() 196 be32_to_cpu(cqe[3]), be32_to_cpu(cqe[4]), be32_to_cpu(cqe[5]), in dump_cqe() 197 be32_to_cpu(cqe[6]), be32_to_cpu(cqe[7])); in dump_cqe() [all …]
|
/drivers/infiniband/hw/mlx4/ |
D | cq.c | 80 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local 81 struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe); in get_sw_cqe() 84 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; in get_sw_cqe() 132 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe) in mlx4_ib_free_cq_buf() argument 134 mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf); in mlx4_ib_free_cq_buf() 139 u64 buf_addr, int cqe) in mlx4_ib_get_cq_umem() argument 144 *umem = ib_umem_get(context, buf_addr, cqe * cqe_size, in mlx4_ib_get_cq_umem() 175 int entries = attr->cqe; in mlx4_ib_create_cq() 193 cq->ibcq.cqe = entries - 1; in mlx4_ib_create_cq() 276 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); in mlx4_ib_create_cq() [all …]
|
/drivers/infiniband/hw/cxgb3/ |
D | iwch_ev.c | 52 qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe)); in post_qp_event() 56 __func__, CQE_STATUS(rsp_msg->cqe), in post_qp_event() 57 CQE_QPID(rsp_msg->cqe)); in post_qp_event() 67 CQE_STATUS(rsp_msg->cqe)); in post_qp_event() 74 CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe), in post_qp_event() 75 CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe), in post_qp_event() 76 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe)); in post_qp_event() 119 qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe)); in iwch_ev_dispatch() 122 cqid, CQE_QPID(rsp_msg->cqe), in iwch_ev_dispatch() 123 CQE_OPCODE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe), in iwch_ev_dispatch() [all …]
|
D | iwch_cq.c | 48 struct t3_cqe cqe, *rd_cqe; in iwch_poll_cq_one() local 67 ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, in iwch_poll_cq_one() 83 wc->vendor_err = CQE_STATUS(cqe); in iwch_poll_cq_one() 88 CQE_QPID(cqe), CQE_TYPE(cqe), in iwch_poll_cq_one() 89 CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe), in iwch_poll_cq_one() 90 CQE_WRID_LOW(cqe), (unsigned long long)cookie); in iwch_poll_cq_one() 92 if (CQE_TYPE(cqe) == 0) { in iwch_poll_cq_one() 93 if (!CQE_STATUS(cqe)) in iwch_poll_cq_one() 94 wc->byte_len = CQE_LEN(cqe); in iwch_poll_cq_one() 98 if (CQE_OPCODE(cqe) == T3_SEND_WITH_INV || in iwch_poll_cq_one() [all …]
|
D | cxio_hal.c | 75 struct t3_cqe *cqe; in cxio_hal_cq_op() local 109 cqe = cq->queue + Q_PTR2IDX(rptr, cq->size_log2); in cxio_hal_cq_op() 110 while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) { in cxio_hal_cq_op() 350 struct t3_cqe cqe; in insert_recv_cqe() local 354 memset(&cqe, 0, sizeof(cqe)); in insert_recv_cqe() 355 cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) | in insert_recv_cqe() 362 *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe; in insert_recv_cqe() 387 struct t3_cqe cqe; in insert_sq_cqe() local 391 memset(&cqe, 0, sizeof(cqe)); in insert_sq_cqe() 392 cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) | in insert_sq_cqe() [all …]
|
D | cxio_wr.h | 675 struct t3_cqe cqe; member 728 #define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \ argument 729 CQE_GENBIT(*cqe)) 769 struct t3_cqe *cqe; in cxio_next_hw_cqe() local 771 cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2)); in cxio_next_hw_cqe() 772 if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe)) in cxio_next_hw_cqe() 773 return cqe; in cxio_next_hw_cqe() 779 struct t3_cqe *cqe; in cxio_next_sw_cqe() local 782 cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2)); in cxio_next_sw_cqe() 783 return cqe; in cxio_next_sw_cqe() [all …]
|
/drivers/infiniband/hw/vmw_pvrdma/ |
D | pvrdma_cq.c | 82 cq->ibcq.cqe, &head); in pvrdma_req_notify_cq() 107 int entries = attr->cqe; in pvrdma_create_cq() 135 cq->ibcq.cqe = entries; in pvrdma_create_cq() 191 cmd->cqe = entries; in pvrdma_create_cq() 200 cq->ibcq.cqe = resp->cqe; in pvrdma_create_cq() 312 cq->ibcq.cqe, &head); in _pvrdma_flush_cqe() 317 cq->ibcq.cqe); in _pvrdma_flush_cqe() 318 struct pvrdma_cqe *cqe; in _pvrdma_flush_cqe() local 322 (cq->ibcq.cqe - head + tail); in _pvrdma_flush_cqe() 326 curr = cq->ibcq.cqe - 1; in _pvrdma_flush_cqe() [all …]
|
/drivers/infiniband/sw/rxe/ |
D | rxe_cq.c | 39 int cqe, int comp_vector, struct ib_udata *udata) in rxe_cq_chk_attr() argument 43 if (cqe <= 0) { in rxe_cq_chk_attr() 44 pr_warn("cqe(%d) <= 0\n", cqe); in rxe_cq_chk_attr() 48 if (cqe > rxe->attr.max_cqe) { in rxe_cq_chk_attr() 50 cqe, rxe->attr.max_cqe); in rxe_cq_chk_attr() 56 if (cqe < count) { in rxe_cq_chk_attr() 58 cqe, count); in rxe_cq_chk_attr() 84 int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, in rxe_cq_from_init() argument 90 cq->queue = rxe_queue_init(rxe, &cqe, in rxe_cq_from_init() 113 cq->ibcq.cqe = cqe; in rxe_cq_from_init() [all …]
|
/drivers/net/ethernet/mellanox/mlx5/core/ |
D | en_rx.c | 83 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci); in mlx5e_cqes_update_owner() local 85 cqe->op_own = op_own; in mlx5e_cqes_update_owner() 91 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci); in mlx5e_cqes_update_owner() local 93 cqe->op_own = op_own; in mlx5e_cqes_update_owner() 489 struct mlx5_cqe64 *cqe) in mlx5e_poll_ico_single_cqe() argument 492 u16 ci = be16_to_cpu(cqe->wqe_counter) & wq->sz_m1; in mlx5e_poll_ico_single_cqe() 497 if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) { in mlx5e_poll_ico_single_cqe() 499 cqe->op_own); in mlx5e_poll_ico_single_cqe() 517 struct mlx5_cqe64 *cqe; in mlx5e_poll_ico_cq() local 522 cqe = mlx5_cqwq_get_cqe(&cq->wq); in mlx5e_poll_ico_cq() [all …]
|
/drivers/infiniband/hw/cxgb4/ |
D | cq.c | 179 struct t4_cqe cqe; in insert_recv_cqe() local 183 memset(&cqe, 0, sizeof(cqe)); in insert_recv_cqe() 184 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | in insert_recv_cqe() 189 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); in insert_recv_cqe() 190 cq->sw_queue[cq->sw_pidx] = cqe; in insert_recv_cqe() 212 struct t4_cqe cqe; in insert_sq_cqe() local 216 memset(&cqe, 0, sizeof(cqe)); in insert_sq_cqe() 217 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | in insert_sq_cqe() 222 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx; in insert_sq_cqe() 223 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); in insert_sq_cqe() [all …]
|
/drivers/net/ethernet/mellanox/mlxsw/ |
D | pci_hw.h | 132 MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1); 140 MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16); 141 MLXSW_ITEM32(pci, cqe, lag_id, 0x00, 4, 12); 142 MLXSW_ITEM32(pci, cqe, lag_port_index, 0x00, 0, 4); 147 MLXSW_ITEM32(pci, cqe, wqe_counter, 0x04, 16, 16); 154 MLXSW_ITEM32(pci, cqe, byte_count, 0x04, 0, 14); 159 MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 9); 165 MLXSW_ITEM32(pci, cqe, crc, 0x0C, 8, 1); 170 MLXSW_ITEM32(pci, cqe, e, 0x0C, 7, 1); 176 MLXSW_ITEM32(pci, cqe, sr, 0x0C, 6, 1); [all …]
|
/drivers/infiniband/hw/mlx5/ |
D | cq.c | 84 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local 87 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe() 90 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { in get_sw_cqe() 91 return cqe; in get_sw_cqe() 120 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_good_req() argument 124 switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) { in handle_good_req() 138 wc->byte_len = be32_to_cpu(cqe->byte_cnt); in handle_good_req() 167 static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_responder() argument 184 be32_to_cpu(cqe->srqn)); in handle_responder() 190 wqe_ctr = be16_to_cpu(cqe->wqe_counter); in handle_responder() [all …]
|
/drivers/infiniband/hw/bnxt_re/ |
D | qplib_fp.c | 1125 struct cq_req *cqe = (struct cq_req *)hw_cqe; in __clean_cq() local 1127 if (qp == le64_to_cpu(cqe->qp_handle)) in __clean_cq() 1128 cqe->qp_handle = 0; in __clean_cq() 1135 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe; in __clean_cq() local 1137 if (qp == le64_to_cpu(cqe->qp_handle)) in __clean_cq() 1138 cqe->qp_handle = 0; in __clean_cq() 1759 struct bnxt_qplib_cqe *cqe; in __flush_sq() local 1764 cqe = *pcqe; in __flush_sq() 1775 memset(cqe, 0, sizeof(*cqe)); in __flush_sq() 1776 cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR; in __flush_sq() [all …]
|
/drivers/infiniband/sw/rdmavt/ |
D | cq.c | 78 if (head >= (unsigned)cq->ibcq.cqe) { in rvt_cq_enter() 79 head = cq->ibcq.cqe; in rvt_cq_enter() 195 unsigned int entries = attr->cqe; in rvt_create_cq() 271 cq->ibcq.cqe = entries; in rvt_create_cq() 356 int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) in rvt_resize_cq() argument 366 if (cqe < 1 || cqe > rdi->dparms.props.max_cqe) in rvt_resize_cq() 374 sz += sizeof(struct ib_uverbs_wc) * (cqe + 1); in rvt_resize_cq() 376 sz += sizeof(struct ib_wc) * (cqe + 1); in rvt_resize_cq() 399 if (head > (u32)cq->ibcq.cqe) in rvt_resize_cq() 400 head = (u32)cq->ibcq.cqe; in rvt_resize_cq() [all …]
|
/drivers/net/ethernet/qlogic/qede/ |
D | qede_fp.c | 635 struct eth_fast_path_rx_tpa_start_cqe *cqe) in qede_set_gro_params() argument 637 u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags); in qede_set_gro_params() 645 skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) - in qede_set_gro_params() 646 cqe->header_len; in qede_set_gro_params() 726 struct eth_fast_path_rx_tpa_start_cqe *cqe) in qede_tpa_start() argument 728 struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; in qede_tpa_start() 767 le16_to_cpu(cqe->len_on_first_bd)); in qede_tpa_start() 775 skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd)); in qede_tpa_start() 780 tpa_info->start_cqe_placement_offset = cqe->placement_offset; in qede_tpa_start() 781 tpa_info->start_cqe_bd_len = le16_to_cpu(cqe->len_on_first_bd); in qede_tpa_start() [all …]
|
/drivers/net/ethernet/huawei/hinic/ |
D | hinic_hw_qp.c | 330 cqe_size = wq->q_depth * sizeof(*rq->cqe); in alloc_rq_cqe() 331 rq->cqe = vzalloc(cqe_size); in alloc_rq_cqe() 332 if (!rq->cqe) in alloc_rq_cqe() 341 rq->cqe[i] = dma_zalloc_coherent(&pdev->dev, in alloc_rq_cqe() 342 sizeof(*rq->cqe[i]), in alloc_rq_cqe() 344 if (!rq->cqe[i]) in alloc_rq_cqe() 352 dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[j]), rq->cqe[j], in alloc_rq_cqe() 358 vfree(rq->cqe); in alloc_rq_cqe() 374 dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[i]), rq->cqe[i], in free_rq_cqe() 378 vfree(rq->cqe); in free_rq_cqe() [all …]
|
/drivers/scsi/qedi/ |
D | qedi_fw.c | 34 union iscsi_cqe *cqe, in qedi_process_logout_resp() argument 45 cqe_logout_response = &cqe->cqe_common.iscsi_hdr.logout_response; in qedi_process_logout_resp() 53 resp_hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age); in qedi_process_logout_resp() 84 union iscsi_cqe *cqe, in qedi_process_text_resp() argument 100 cqe_text_response = &cqe->cqe_common.iscsi_hdr.text_response; in qedi_process_text_resp() 113 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid, in qedi_process_text_resp() 188 union iscsi_cqe *cqe, in qedi_process_tmf_resp() argument 201 cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response; in qedi_process_tmf_resp() 226 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid, in qedi_process_tmf_resp() 261 union iscsi_cqe *cqe, in qedi_process_login_resp() argument [all …]
|
/drivers/infiniband/ulp/iser/ |
D | iscsi_iser.h | 261 struct ib_cqe cqe; member 293 struct ib_cqe cqe; member 313 struct ib_cqe cqe; member 701 iser_rx(struct ib_cqe *cqe) in iser_rx() argument 703 return container_of(cqe, struct iser_rx_desc, cqe); in iser_rx() 707 iser_tx(struct ib_cqe *cqe) in iser_tx() argument 709 return container_of(cqe, struct iser_tx_desc, cqe); in iser_tx() 713 iser_login(struct ib_cqe *cqe) in iser_login() argument 715 return container_of(cqe, struct iser_login_desc, cqe); in iser_login()
|
/drivers/infiniband/hw/ocrdma/ |
D | ocrdma_verbs.c | 1072 int entries = attr->cqe; in ocrdma_create_cq() 1132 ibcq->cqe = new_cnt; in ocrdma_resize_cq() 1143 struct ocrdma_cqe *cqe = NULL; in ocrdma_flush_cq() local 1145 cqe = cq->va; in ocrdma_flush_cq() 1153 if (is_cqe_valid(cq, cqe)) in ocrdma_flush_cq() 1155 cqe++; in ocrdma_flush_cq() 1706 struct ocrdma_cqe *cqe; in ocrdma_discard_cqes() local 1725 cqe = cq->va + cur_getp; in ocrdma_discard_cqes() 1730 qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK; in ocrdma_discard_cqes() 1736 if (is_cqe_for_sq(cqe)) { in ocrdma_discard_cqes() [all …]
|
D | ocrdma.h | 498 static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe) in is_cqe_valid() argument 501 cqe_valid = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID; in is_cqe_valid() 505 static inline int is_cqe_for_sq(struct ocrdma_cqe *cqe) in is_cqe_for_sq() argument 507 return (le32_to_cpu(cqe->flags_status_srcqpn) & in is_cqe_for_sq() 511 static inline int is_cqe_invalidated(struct ocrdma_cqe *cqe) in is_cqe_invalidated() argument 513 return (le32_to_cpu(cqe->flags_status_srcqpn) & in is_cqe_invalidated() 517 static inline int is_cqe_imm(struct ocrdma_cqe *cqe) in is_cqe_imm() argument 519 return (le32_to_cpu(cqe->flags_status_srcqpn) & in is_cqe_imm() 523 static inline int is_cqe_wr_imm(struct ocrdma_cqe *cqe) in is_cqe_wr_imm() argument 525 return (le32_to_cpu(cqe->flags_status_srcqpn) & in is_cqe_wr_imm()
|
/drivers/scsi/qedf/ |
D | qedf.h | 230 struct fcoe_cqe cqe; member 448 extern void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, 451 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req); 453 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req); 458 extern void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, 471 extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, 478 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req); 480 extern void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, 482 extern void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe); 491 struct fcoe_cqe *cqe); [all …]
|
D | qedf_io.c | 1065 void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, in qedf_scsi_completion() argument 1079 if (!cqe) in qedf_scsi_completion() 1085 fcp_rsp = &cqe->cqe_info.rsp_info; in qedf_scsi_completion() 1132 fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags, in qedf_scsi_completion() 1139 cqe->cqe_info.rsp_info.fw_residual); in qedf_scsi_completion() 1282 void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, in qedf_process_warning_compl() argument 1290 if (!cqe) in qedf_process_warning_compl() 1297 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi), in qedf_process_warning_compl() 1298 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo)); in qedf_process_warning_compl() 1301 le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off), in qedf_process_warning_compl() [all …]
|
/drivers/net/ethernet/mellanox/mlx4/ |
D | en_rx.c | 629 static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, in check_csum() argument 647 hw_checksum = csum_unfold((__force __sum16)cqe->checksum); in check_csum() 649 if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) && in check_csum() 655 if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4)) in check_csum() 658 if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) in check_csum() 672 struct mlx4_cqe *cqe; in mlx4_en_process_rx_cq() local 693 cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; in mlx4_en_process_rx_cq() 696 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, in mlx4_en_process_rx_cq() 715 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == in mlx4_en_process_rx_cq() 718 ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome, in mlx4_en_process_rx_cq() [all …]
|
/drivers/scsi/bnx2i/ |
D | bnx2i.h | 506 struct cqe { struct 650 struct cqe *cq_virt; 654 struct cqe *cq_prod_qe; 655 struct cqe *cq_cons_qe; 656 struct cqe *cq_first_qe; 657 struct cqe *cq_last_qe; 774 struct cqe cqe; member 881 struct cqe *cqe);
|
/drivers/net/ethernet/ibm/ehea/ |
D | ehea_main.c | 542 static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num) in ehea_check_cqe() argument 544 *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5; in ehea_check_cqe() 545 if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0) in ehea_check_cqe() 547 if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) && in ehea_check_cqe() 548 (cqe->header_length == 0)) in ehea_check_cqe() 554 struct sk_buff *skb, struct ehea_cqe *cqe, in ehea_fill_skb() argument 557 int length = cqe->num_bytes_transfered - 4; /*remove CRC */ in ehea_fill_skb() 564 if (cqe->status & EHEA_CQE_BLIND_CKSUM) { in ehea_fill_skb() 566 skb->csum = csum_unfold(~cqe->inet_checksum_value); in ehea_fill_skb() 575 struct ehea_cqe *cqe) in get_skb_by_index() argument [all …]
|