/drivers/net/ethernet/intel/ice/ |
D | ice_controlq.c | 36 struct ice_ctl_q_info *cq = &hw->adminq; in ice_adminq_init_regs() local 49 struct ice_ctl_q_info *cq = &hw->mailboxq; in ice_mailbox_init_regs() local 62 struct ice_ctl_q_info *cq = &hw->sbq; in ice_sb_init_regs() local 74 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq) in ice_check_sq_alive() 91 ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) in ice_alloc_ctrlq_sq_ring() 122 ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) in ice_alloc_ctrlq_rq_ring() 158 ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) in ice_alloc_rq_bufs() 230 ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) in ice_alloc_sq_bufs() 299 ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) in ice_cfg_sq_regs() 312 ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) in ice_cfg_rq_regs() [all …]
|
/drivers/infiniband/core/ |
D | cq.c | 43 struct ib_cq *cq = dim->priv; in ib_cq_rdma_dim_work() local 54 static void rdma_dim_init(struct ib_cq *cq) in rdma_dim_init() 75 static void rdma_dim_destroy(struct ib_cq *cq) in rdma_dim_destroy() 84 static int __poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc) in __poll_cq() 93 static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs, in __ib_process_cq() 139 int ib_process_cq_direct(struct ib_cq *cq, int budget) in ib_process_cq_direct() 147 static void ib_cq_completion_direct(struct ib_cq *cq, void *private) in ib_cq_completion_direct() 154 struct ib_cq *cq = container_of(iop, struct ib_cq, iop); in ib_poll_handler() local 173 static void ib_cq_completion_softirq(struct ib_cq *cq, void *private) in ib_cq_completion_softirq() 181 struct ib_cq *cq = container_of(work, struct ib_cq, work); in ib_cq_poll_work() local [all …]
|
/drivers/infiniband/hw/mlx4/ |
D | cq.c | 43 static void mlx4_ib_cq_comp(struct mlx4_cq *cq) in mlx4_ib_cq_comp() 49 static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type) in mlx4_ib_cq_event() 74 static void *get_cqe(struct mlx4_ib_cq *cq, int n) in get_cqe() 79 static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n) in get_sw_cqe() 88 static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq) in next_cqe_sw() 93 int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) in mlx4_ib_modify_cq() 181 struct mlx4_ib_cq *cq = to_mcq(ibcq); in mlx4_ib_create_cq() local 290 static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq, in mlx4_alloc_resize_buf() 314 static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq, in mlx4_alloc_resize_umem() 343 static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq) in mlx4_ib_get_outstanding_cqes() [all …]
|
/drivers/infiniband/hw/mlx5/ |
D | cq.c | 41 static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe) in mlx5_ib_cq_comp() 50 struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq); in mlx5_ib_cq_event() local 69 static void *get_cqe(struct mlx5_ib_cq *cq, int n) in get_cqe() 79 static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n) in get_sw_cqe() 94 static void *next_cqe_sw(struct mlx5_ib_cq *cq) in next_cqe_sw() 421 static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries, in mlx5_ib_poll_sw_comp() 441 static int mlx5_poll_one(struct mlx5_ib_cq *cq, in mlx5_poll_one() 571 static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries, in poll_soft_wc() 599 struct mlx5_ib_cq *cq = to_mcq(ibcq); in mlx5_ib_poll_cq() local 637 struct mlx5_ib_cq *cq = to_mcq(ibcq); in mlx5_ib_arm_cq() local [all …]
|
/drivers/net/ethernet/mellanox/mlx4/ |
D | en_cq.c | 40 static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event) in mlx4_en_cq_event() 52 struct mlx4_en_cq *cq; in mlx4_en_create_cq() local 89 int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, in mlx4_en_activate_cq() 177 struct mlx4_en_cq *cq = *pcq; in mlx4_en_destroy_cq() local 190 void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) in mlx4_en_deactivate_cq() 201 int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) in mlx4_en_set_cq_moder() 207 void mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) in mlx4_en_arm_cq()
|
D | cq.c | 82 static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq) in mlx4_add_cq_to_tasklet() 106 struct mlx4_cq *cq; in mlx4_cq_completion() local 129 struct mlx4_cq *cq; in mlx4_cq_event() local 169 int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq, in mlx4_cq_modify() 191 int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq, in mlx4_cq_resize() 343 struct mlx4_cq *cq, unsigned vector, int collapsed, in mlx4_cq_alloc() 435 void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq) in mlx4_cq_free()
|
/drivers/infiniband/sw/rxe/ |
D | rxe_cq.c | 11 int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq, in rxe_cq_chk_attr() 44 struct rxe_cq *cq = from_tasklet(cq, t, comp_task); in rxe_send_complete() local 57 int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, in rxe_cq_from_init() 92 int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe, in rxe_cq_resize_queue() 107 int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited) in rxe_cq_post() 145 void rxe_cq_disable(struct rxe_cq *cq) in rxe_cq_disable() 156 struct rxe_cq *cq = container_of(arg, typeof(*cq), pelem); in rxe_cq_cleanup() local
|
/drivers/infiniband/hw/vmw_pvrdma/ |
D | pvrdma_cq.c | 67 struct pvrdma_cq *cq = to_vcq(ibcq); in pvrdma_req_notify_cq() local 107 struct pvrdma_cq *cq = to_vcq(ibcq); in pvrdma_create_cq() local 225 static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq) in pvrdma_free_cq() 241 int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) in pvrdma_destroy_cq() 270 static inline struct pvrdma_cqe *get_cqe(struct pvrdma_cq *cq, int i) in get_cqe() 278 void _pvrdma_flush_cqe(struct pvrdma_qp *qp, struct pvrdma_cq *cq) in _pvrdma_flush_cqe() 322 static int pvrdma_poll_one(struct pvrdma_cq *cq, struct pvrdma_qp **cur_qp, in pvrdma_poll_one() 388 struct pvrdma_cq *cq = to_vcq(ibcq); in pvrdma_poll_cq() local
|
/drivers/net/ethernet/mellanox/mlx5/core/ |
D | cq.c | 70 static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq, in mlx5_add_cq_to_tasklet() 89 int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, in mlx5_core_create_cq() 153 int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) in mlx5_core_destroy_cq() 178 int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, in mlx5_core_query_cq() 189 int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, in mlx5_core_modify_cq() 201 struct mlx5_core_cq *cq, in mlx5_core_modify_cq_moderation()
|
/drivers/infiniband/sw/rdmavt/ |
D | cq.c | 25 bool rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited) in rvt_cq_enter() 121 struct rvt_cq *cq = container_of(work, struct rvt_cq, comptask); in send_complete() local 163 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); in rvt_create_cq() local 278 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); in rvt_destroy_cq() local 304 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); in rvt_req_notify_cq() local 340 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); in rvt_resize_cq() local 478 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); in rvt_poll_cq() local
|
/drivers/infiniband/hw/mthca/ |
D | mthca_cq.c | 169 static inline struct mthca_cqe *get_cqe(struct mthca_cq *cq, int entry) in get_cqe() 179 static inline struct mthca_cqe *next_cqe_sw(struct mthca_cq *cq) in next_cqe_sw() 204 static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq, in update_cons_index() 219 struct mthca_cq *cq; in mthca_cq_completion() local 236 struct mthca_cq *cq; in mthca_cq_event() local 273 void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, in mthca_cq_clean() 325 void mthca_cq_resize_copy_cqes(struct mthca_cq *cq) in mthca_cq_resize_copy_cqes() 372 static void handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, in handle_error_cqe() 479 struct mthca_cq *cq, in mthca_poll_one() 658 struct mthca_cq *cq = to_mcq(ibcq); in mthca_poll_cq() local [all …]
|
/drivers/scsi/snic/ |
D | vnic_cq.c | 24 void svnic_cq_free(struct vnic_cq *cq) in svnic_cq_free() 31 int svnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, in svnic_cq_alloc() 47 void svnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, in svnic_cq_init() 70 void svnic_cq_clean(struct vnic_cq *cq) in svnic_cq_clean()
|
/drivers/net/ethernet/cisco/enic/ |
D | vnic_cq.c | 29 void vnic_cq_free(struct vnic_cq *cq) in vnic_cq_free() 36 int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, in vnic_cq_alloc() 51 void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, in vnic_cq_init() 76 void vnic_cq_clean(struct vnic_cq *cq) in vnic_cq_clean()
|
/drivers/scsi/fnic/ |
D | vnic_cq.c | 24 void vnic_cq_free(struct vnic_cq *cq) in vnic_cq_free() 31 int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, in vnic_cq_alloc() 52 void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, in vnic_cq_init() 75 void vnic_cq_clean(struct vnic_cq *cq) in vnic_cq_clean()
|
/drivers/scsi/elx/efct/ |
D | efct_hw_queues.c | 15 struct hw_cq *cq = NULL; in efct_hw_init_queues() local 162 struct hw_cq *cq = kzalloc(sizeof(*cq), GFP_KERNEL); in efct_hw_new_cq() local 198 struct hw_cq *cq = NULL; in efct_hw_new_cq_set() local 244 efct_hw_new_mq(struct hw_cq *cq, u32 entry_count) in efct_hw_new_mq() 275 efct_hw_new_wq(struct hw_cq *cq, u32 entry_count) in efct_hw_new_wq() 390 struct hw_cq *cq; in efct_hw_del_eq() local 404 efct_hw_del_cq(struct hw_cq *cq) in efct_hw_del_cq() 521 efct_hw_rqpair_process_rq(struct efct_hw *hw, struct hw_cq *cq, in efct_hw_rqpair_process_rq()
|
/drivers/infiniband/hw/cxgb4/ |
D | cq.c | 37 static void destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, in destroy_cq() 70 static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, in create_cq() 184 static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq, u32 srqidx) in insert_recv_cqe() 203 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count) in c4iw_flush_rq() 217 static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq, in insert_sq_cqe() 243 struct t4_cq *cq = &chp->cq; in c4iw_flush_sq() local 267 static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq) in flush_completed_wrs() 443 void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count) in c4iw_count_rcqes() 544 static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, in poll_cq()
|
D | restrack.c | 271 static int fill_cq(struct sk_buff *msg, struct t4_cq *cq) in fill_cq() 333 static int fill_hwcqes(struct sk_buff *msg, struct t4_cq *cq, in fill_hwcqes() 350 static int fill_swcqes(struct sk_buff *msg, struct t4_cq *cq, in fill_swcqes() 378 struct t4_cq cq; in c4iw_fill_res_cq_entry() local
|
/drivers/net/ethernet/marvell/octeontx2/nic/ |
D | otx2_txrx.c | 22 struct otx2_cq_queue *cq) in otx2_nix_cq_op_status() 46 static struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq) in otx2_get_next_cqe() 102 struct otx2_cq_queue *cq, in otx2_snd_pkt_handler() 297 struct otx2_cq_queue *cq, in otx2_rcv_pkt_handler() 341 struct otx2_cq_queue *cq, int budget) in otx2_rx_napi_handler() 384 void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) in otx2_refill_pool_ptrs() 398 struct otx2_cq_queue *cq, int budget) in otx2_tx_napi_handler() 448 struct otx2_cq_queue *cq; in otx2_napi_handler() local 972 void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) in otx2_cleanup_rx_cqes() 1003 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) in otx2_cleanup_tx_cqes()
|
/drivers/infiniband/hw/bnxt_re/ |
D | qplib_fp.c | 157 struct bnxt_qplib_cq *cq = nq_work->cq; in bnxt_qpn_cqn_sched_task() local 234 static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq) in clean_nq() 286 static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events) in __wait_for_all_nqes() 304 struct bnxt_qplib_cq *cq; in bnxt_qplib_service_nq() local 1446 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp) in __clean_cq() 2061 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) in bnxt_qplib_create_cq() 2129 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) in bnxt_qplib_destroy_cq() 2253 static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq, in do_wa9060() 2351 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, in bnxt_qplib_cq_process_req() 2458 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq, in bnxt_qplib_cq_process_res_rc() [all …]
|
/drivers/infiniband/hw/qedr/ |
D | verbs.c | 702 struct qedr_cq *cq, struct ib_udata *udata, in qedr_copy_cq_uresp() 723 static void consume_cqe(struct qedr_cq *cq) in consume_cqe() 839 static inline void qedr_init_cq_params(struct qedr_cq *cq, in qedr_init_cq_params() 858 static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags) in doorbell_cq() 867 struct qedr_cq *cq = get_qedr_cq(ibcq); in qedr_arm_cq() local 920 struct qedr_cq *cq = get_qedr_cq(ibcq); in qedr_create_cq() local 1058 struct qedr_cq *cq = get_qedr_cq(ibcq); in qedr_resize_cq() local 1073 struct qedr_cq *cq = get_qedr_cq(ibcq); in qedr_destroy_cq() local 1622 struct qedr_cq *cq = get_qedr_cq(init_attr->ext.cq); in qedr_create_srq() local 4016 static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe) in is_valid_cqe() [all …]
|
/drivers/infiniband/ulp/iser/ |
D | iser_initiator.c | 562 void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc) in iser_login_rsp() 657 void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc) in iser_task_rsp() 709 void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc) in iser_cmd_comp() 715 void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc) in iser_ctrl_comp() 731 void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc) in iser_dataout_comp()
|
/drivers/infiniband/sw/siw/ |
D | siw_cq.c | 48 int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc) in siw_reap_cqe() 116 void siw_cq_flush(struct siw_cq *cq) in siw_cq_flush()
|
/drivers/net/ethernet/microsoft/mana/ |
D | mana_en.c | 141 struct mana_cq *cq; in mana_start_xmit() local 781 static void mana_poll_tx_cq(struct mana_cq *cq) in mana_poll_tx_cq() 965 static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, in mana_process_rx_cqe() 1054 static void mana_poll_rx_cq(struct mana_cq *cq) in mana_poll_rx_cq() 1076 struct mana_cq *cq = context; in mana_cq_handler() local 1103 struct mana_cq *cq = container_of(napi, struct mana_cq, napi); in mana_poll() local 1116 struct mana_cq *cq = context; in mana_schedule_napi() local 1121 static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq) in mana_deinit_cq() 1176 struct mana_cq *cq; in mana_create_txq() local 1410 struct mana_cq *cq = NULL; in mana_create_rxq() local
|
/drivers/isdn/mISDN/ |
D | dsp_core.c | 190 struct mISDN_ctrl_req cq; in dsp_rx_off_member() local 257 struct mISDN_ctrl_req cq; in dsp_fill_empty() local 629 struct mISDN_ctrl_req cq; in get_features() local
|
D | dsp_hwec.c | 38 struct mISDN_ctrl_req cq; in dsp_hwec_enable() local 94 struct mISDN_ctrl_req cq; in dsp_hwec_disable() local
|