/kernel/linux/linux-5.10/drivers/infiniband/ulp/iser/ |
D | iscsi_iser.h | 517 void iser_err_comp(struct ib_wc *wc, const char *type); 518 void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc); 519 void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc); 520 void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc); 521 void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc); 522 void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc); 523 void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc);
|
D | iser_initiator.c | 562 void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc) in iser_login_rsp() 612 struct ib_wc *wc, in iser_check_remote_inv() 654 void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc) in iser_task_rsp() 706 void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc) in iser_cmd_comp() 712 void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc) in iser_ctrl_comp() 728 void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc) in iser_dataout_comp()
|
/kernel/linux/linux-5.10/include/rdma/ |
D | rdmavt_cq.h | 40 struct ib_wc kqueue[]; 65 bool rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited);
|
/kernel/linux/linux-5.10/drivers/infiniband/sw/rdmavt/ |
D | trace_cq.h | 112 TP_PROTO(struct rvt_cq *cq, struct ib_wc *wc, u32 idx), 152 TP_PROTO(struct rvt_cq *cq, struct ib_wc *wc, u32 idx), 157 TP_PROTO(struct rvt_cq *cq, struct ib_wc *wc, u32 idx),
|
D | cq.c | 67 bool rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited) in rvt_cq_enter() 70 struct ib_wc *kqueue = NULL; in rvt_cq_enter() 238 sz = sizeof(struct ib_wc) * (entries + 1); in rvt_create_cq() 405 sz = sizeof(struct ib_wc) * (cqe + 1); in rvt_resize_cq() 518 int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) in rvt_poll_cq()
|
D | mad.h | 54 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
|
D | cq.h | 59 int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
|
D | mad.c | 71 const struct ib_wc *in_wc, const struct ib_grh *in_grh, in rvt_process_mad()
|
/kernel/linux/linux-5.10/net/rds/ |
D | ib.h | 154 struct ib_wc i_send_wc[RDS_IB_WC_MAX]; 155 struct ib_wc i_recv_wc[RDS_IB_WC_MAX]; 392 void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc); 403 void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc, 430 void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
|
/kernel/linux/linux-5.10/drivers/infiniband/ulp/ipoib/ |
D | ipoib.h | 281 struct ib_wc ibwc[IPOIB_NUM_WC]; 388 struct ib_wc send_wc[MAX_SEND_CQE]; 393 struct ib_wc ibwc[IPOIB_NUM_WC]; 677 void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc); 678 void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc); 775 static inline void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) in ipoib_cm_handle_rx_wc() 779 static inline void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) in ipoib_cm_handle_tx_wc()
|
/kernel/linux/linux-5.10/drivers/infiniband/sw/siw/ |
D | siw_cq.c | 48 int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc) in siw_reap_cqe() 118 struct ib_wc wc; in siw_cq_flush()
|
/kernel/linux/linux-5.10/net/sunrpc/xprtrdma/ |
D | frwr_ops.c | 360 static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc) in frwr_wc_fastreg() 430 static void __frwr_release_mr(struct ib_wc *wc, struct rpcrdma_mr *mr) in __frwr_release_mr() 444 static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc) in frwr_wc_localinv() 465 static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc) in frwr_wc_localinv_wake() 568 static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc) in frwr_wc_localinv_done()
|
D | svc_rdma_rw.c | 17 static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc); 18 static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc); 238 static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc) in svc_rdma_write_done() 296 static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc) in svc_rdma_wc_read_done()
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/qedr/ |
D | verbs.h | 89 int qedr_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc); 95 u8 port_num, const struct ib_wc *in_wc,
|
D | qedr_roce_cm.h | 48 int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
|
/kernel/linux/linux-5.10/drivers/infiniband/core/ |
D | cq.c | 84 static int __poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc) in __poll_cq() 93 static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs, in __ib_process_cq() 108 struct ib_wc *wc = &wcs[i]; in __ib_process_cq() 141 struct ib_wc wcs[IB_POLL_BATCH_DIRECT]; in ib_process_cq_direct()
|
D | agent.h | 48 const struct ib_wc *wc, const struct ib_device *device,
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/ocrdma/ |
D | ocrdma_ah.h | 60 u8 port_num, const struct ib_wc *in_wc,
|
D | ocrdma_verbs.h | 51 int ocrdma_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc);
|
D | ocrdma_verbs.c | 2401 static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc, in ocrdma_update_wc() 2471 static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, in ocrdma_update_err_cqe() 2493 static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, in ocrdma_update_err_rcqe() 2503 static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, in ocrdma_update_err_scqe() 2514 struct ocrdma_cqe *cqe, struct ib_wc *ibwc, in ocrdma_poll_err_scqe() 2559 struct ib_wc *ibwc, bool *polled) in ocrdma_poll_success_scqe() 2584 struct ib_wc *ibwc, bool *polled, bool *stop) in ocrdma_poll_scqe() 2599 static int ocrdma_update_ud_rcqe(struct ocrdma_dev *dev, struct ib_wc *ibwc, in ocrdma_update_ud_rcqe() 2626 static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc, in ocrdma_update_free_srq_cqe() 2647 struct ib_wc *ibwc, bool *polled, bool *stop, in ocrdma_poll_err_rcqe() [all …]
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/mlx5/ |
D | gsi.c | 37 struct ib_wc wc; 68 static void handle_single_completion(struct ib_cq *cq, struct ib_wc *wc) in handle_single_completion() 389 struct ib_ud_wr *wr, struct ib_wc *wc) in mlx5_ib_add_outstanding_wr() 422 struct ib_wc wc = { in mlx5_ib_gsi_silent_drop()
|
D | cq.c | 117 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_good_req() 166 static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_responder() 278 struct ib_wc *wc) in mlx5_handle_error_cqe() 388 static void sw_comp(struct mlx5_ib_qp *qp, int num_entries, struct ib_wc *wc, in sw_comp() 422 struct ib_wc *wc, int *npolled) in mlx5_ib_poll_sw_comp() 443 struct ib_wc *wc) in mlx5_poll_one() 572 struct ib_wc *wc, bool is_fatal_err) in poll_soft_wc() 597 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) in mlx5_ib_poll_cq() 1388 int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc) in mlx5_ib_generate_wc()
|
/kernel/linux/linux-5.10/net/smc/ |
D | smc_wr.c | 81 static inline void smc_wr_tx_process_cqe(struct ib_wc *wc) in smc_wr_tx_process_cqe() 124 struct ib_wc wc[SMC_WR_MAX_POLL_CQE]; in smc_wr_tx_tasklet_fn() 365 static inline void smc_wr_rx_demultiplex(struct ib_wc *wc) in smc_wr_rx_demultiplex() 384 static inline void smc_wr_rx_process_cqes(struct ib_wc wc[], int num) in smc_wr_rx_process_cqes() 414 struct ib_wc wc[SMC_WR_MAX_POLL_CQE]; in smc_wr_rx_tasklet_fn()
|
D | smc_wr.h | 45 void (*handler)(struct ib_wc *, void *);
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/mlx4/ |
D | cq.c | 511 struct ib_wc *wc) in mlx4_ib_handle_error_cqe() 580 static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc, in use_tunnel_data() 608 struct ib_wc *wc, int *npolled, int is_send) in mlx4_ib_qp_sw_comp() 632 struct ib_wc *wc, int *npolled) in mlx4_ib_poll_sw_comp() 658 struct ib_wc *wc) in mlx4_ib_poll_one() 877 int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) in mlx4_ib_poll_cq()
|