/drivers/infiniband/hw/cxgb4/ |
D | cq.c | 179 struct t4_cqe cqe; in insert_recv_cqe() local 212 struct t4_cqe cqe; in insert_sq_cqe() local 251 struct t4_cqe *cqe = NULL, *swcqe; in c4iw_flush_hw_cq() local 268 static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) in cqe_completes_wr() 286 struct t4_cqe *cqe; in c4iw_count_scqes() local 305 struct t4_cqe *cqe; in c4iw_count_rcqes() local 402 static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, in poll_cq() 568 struct t4_cqe cqe = {0, 0}, *rd_cqe; in c4iw_poll_cq_one() local 879 int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) in c4iw_resize_cq()
|
D | t4.h | 266 struct t4_cqe cqe; member 536 static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe) in t4_valid_cqe() 541 static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) in t4_next_hw_cqe() 570 static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe) in t4_next_cqe()
|
/drivers/infiniband/hw/mlx4/ |
D | cq.c | 80 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local 132 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe) in mlx4_ib_free_cq_buf() 139 u64 buf_addr, int cqe) in mlx4_ib_get_cq_umem() 335 struct mlx4_cqe *cqe, *new_cqe; in mlx4_ib_cq_resize_copy_cqes() local 476 static void dump_cqe(void *cqe) in dump_cqe() 486 static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe, in mlx4_ib_handle_error_cqe() 562 unsigned tail, struct mlx4_cqe *cqe) in use_tunnel_data() 585 struct mlx4_cqe *cqe; in mlx4_ib_poll_one() local 842 struct mlx4_cqe *cqe, *dest; in __mlx4_ib_cq_clean() local
|
/drivers/scsi/bnx2i/ |
D | bnx2i_hwi.c | 1353 struct cqe *cqe) in bnx2i_process_scsi_cmd_resp() 1450 struct cqe *cqe) in bnx2i_process_login_resp() 1518 struct cqe *cqe) in bnx2i_process_text_resp() 1579 struct cqe *cqe) in bnx2i_process_tmf_resp() 1618 struct cqe *cqe) in bnx2i_process_logout_resp() 1664 struct cqe *cqe) in bnx2i_process_nopin_local_cmpl() 1705 struct cqe *cqe) in bnx2i_process_nopin_mesg() 1757 struct cqe *cqe) in bnx2i_process_async_mesg() 1807 struct cqe *cqe) in bnx2i_process_reject_mesg() 1844 struct cqe *cqe) in bnx2i_process_cmd_cleanup_resp() [all …]
|
D | bnx2i.h | 504 struct cqe { struct 505 u8 cqe_byte[BNX2I_CQE_SIZE]; argument 772 struct cqe cqe; member
|
/drivers/infiniband/hw/ocrdma/ |
D | ocrdma_verbs.c | 1352 struct ocrdma_cqe *cqe; in ocrdma_discard_cqes() local 2104 struct ocrdma_cqe *cqe) in ocrdma_set_cqe_status_flushed() 2135 static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, in ocrdma_update_err_cqe() 2157 static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, in ocrdma_update_err_rcqe() 2167 static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, in ocrdma_update_err_scqe() 2178 struct ocrdma_cqe *cqe, struct ib_wc *ibwc, in ocrdma_poll_err_scqe() 2214 struct ocrdma_cqe *cqe, in ocrdma_poll_success_scqe() 2238 static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, in ocrdma_poll_scqe() 2254 static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe) in ocrdma_update_ud_rcqe() 2271 struct ocrdma_cqe *cqe, in ocrdma_update_free_srq_cqe() [all …]
|
D | ocrdma_hw.c | 107 struct ocrdma_mcqe *cqe = (struct ocrdma_mcqe *) in ocrdma_get_mcqe() local 674 struct ocrdma_ae_mcqe *cqe) in ocrdma_dispatch_ibevent() 771 struct ocrdma_ae_mcqe *cqe = ae_cqe; in ocrdma_process_acqe() local 782 static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe) in ocrdma_process_mcqe() 800 struct ocrdma_mcqe *cqe; in ocrdma_mq_cq_handler() local
|
D | ocrdma_sli.h | 1553 #define is_cqe_valid(cq, cqe) \ argument 1556 #define is_cqe_for_sq(cqe) \ argument 1558 #define is_cqe_for_rq(cqe) \ argument 1560 #define is_cqe_invalidated(cqe) \ argument 1563 #define is_cqe_imm(cqe) \ argument 1565 #define is_cqe_wr_imm(cqe) \ argument
|
/drivers/infiniband/hw/cxgb3/ |
D | cxio_hal.c | 75 struct t3_cqe *cqe; in cxio_hal_cq_op() local 352 struct t3_cqe cqe; in insert_recv_cqe() local 389 struct t3_cqe cqe; in insert_sq_cqe() local 430 struct t3_cqe *cqe, *swcqe; in cxio_flush_hw_cq() local 446 static int cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq) in cqe_completes_wr() 466 struct t3_cqe *cqe; in cxio_count_scqes() local 484 struct t3_cqe *cqe; in cxio_count_rcqes() local 1149 int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe, in cxio_poll_cq()
|
D | iwch_cq.c | 48 struct t3_cqe cqe, *rd_cqe; in iwch_poll_cq_one() local
|
D | cxio_wr.h | 675 struct t3_cqe cqe; member 728 #define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \ argument 769 struct t3_cqe *cqe; in cxio_next_hw_cqe() local 779 struct t3_cqe *cqe; in cxio_next_sw_cqe() local 790 struct t3_cqe *cqe; in cxio_next_cqe() local
|
D | cxio_hal.h | 146 struct t3_cqe cqe; /* flits 2-3 */ member
|
/drivers/infiniband/hw/mthca/ |
D | mthca_cq.c | 174 static inline struct mthca_cqe *cqe_sw(struct mthca_cqe *cqe) in cqe_sw() 184 static inline void set_cqe_hw(struct mthca_cqe *cqe) in set_cqe_hw() 191 __be32 *cqe = cqe_ptr; in dump_cqe() local 269 static inline int is_recv_cqe(struct mthca_cqe *cqe) in is_recv_cqe() 281 struct mthca_cqe *cqe; in mthca_cq_clean() local 371 void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int cqe) in mthca_free_cq_buf() 379 struct mthca_err_cqe *cqe, in handle_error_cqe() 490 struct mthca_cqe *cqe; in mthca_poll_one() local
|
/drivers/infiniband/hw/ehca/ |
D | ehca_cq.c | 116 struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector, in ehca_create_cq() 368 int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) in ehca_resize_cq()
|
D | ipz_pt_fn.h | 144 struct ehca_cqe *cqe = ipz_qeit_get(queue); in ipz_qeit_is_valid() local
|
D | ehca_reqs.c | 629 struct ehca_cqe *cqe; in ehca_poll_cq_one() local
|
/drivers/net/ethernet/mellanox/mlx4/ |
D | en_clock.c | 91 u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe) in mlx4_en_get_cqe_ts()
|
D | en_tx.c | 319 struct mlx4_cqe *cqe; in mlx4_en_process_tx_cq() local
|
D | en_rx.c | 560 struct mlx4_cqe *cqe; in mlx4_en_process_rx_cq() local
|
/drivers/net/ethernet/ibm/ehea/ |
D | ehea_main.c | 531 static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num) in ehea_check_cqe() 543 struct sk_buff *skb, struct ehea_cqe *cqe, in ehea_fill_skb() 564 struct ehea_cqe *cqe) in get_skb_by_index() 617 struct ehea_cqe *cqe, int *processed_rq2, in ehea_treat_poll_error() 658 struct ehea_cqe *cqe; in ehea_proc_rwqes() local 805 struct ehea_cqe *cqe; in ehea_proc_cqes() local 882 struct ehea_cqe *cqe; in ehea_poll() local
|
/drivers/block/ |
D | nvme-core.c | 169 struct nvme_completion *cqe) in special_completion() 312 struct nvme_completion *cqe) in bio_completion() 741 struct nvme_completion cqe = nvmeq->cqes[head]; in nvme_process_cq() local 783 struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head]; in nvme_irq_check() local 803 struct nvme_completion *cqe) in sync_completion() 975 static struct nvme_completion cqe = { in nvme_cancel_ios() local
|
/drivers/net/ethernet/broadcom/bnx2x/ |
D | bnx2x_cmn.c | 295 struct eth_end_agg_rx_cqe *cqe) in bnx2x_update_sge_prod() 350 const struct eth_fast_path_rx_cqe *cqe, in bnx2x_get_rxhash() 369 struct eth_fast_path_rx_cqe *cqe) in bnx2x_tpa_start() 521 struct eth_end_agg_rx_cqe *cqe, in bnx2x_fill_frag_skb() 679 struct eth_end_agg_rx_cqe *cqe, in bnx2x_tpa_stop() 781 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe, in bnx2x_csum_validate() 840 union eth_rx_cqe *cqe; in bnx2x_rx_int() local
|
/drivers/infiniband/hw/qib/ |
D | qib_cq.c | 378 int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) in qib_resize_cq()
|
/drivers/infiniband/hw/ipath/ |
D | ipath_cq.c | 371 int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) in ipath_resize_cq()
|
/drivers/infiniband/core/ |
D | verbs.c | 911 void *cq_context, int cqe, int comp_vector) in ib_create_cq() 946 int ib_resize_cq(struct ib_cq *cq, int cqe) in ib_resize_cq()
|