Home
last modified time | relevance | path

Searched refs:ib_cq (Results 1 – 25 of 46) sorted by relevance

12

/drivers/infiniband/hw/ehca/
Dehca_cq.c65 ehca_dbg(cq->ib_cq.device, "cq_num=%x real_qp_num=%x", in ehca_cq_assign_qp()
84 ehca_dbg(cq->ib_cq.device, in ehca_cq_unassign_qp()
93 ehca_err(cq->ib_cq.device, in ehca_cq_unassign_qp()
116 struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector, in ehca_create_cq()
121 struct ib_cq *cq; in ehca_create_cq()
161 cq = &my_cq->ib_cq; in ehca_create_cq()
274 my_cq->ib_cq.cqe = my_cq->nr_of_entries = in ehca_create_cq()
326 int ehca_destroy_cq(struct ib_cq *cq) in ehca_destroy_cq()
329 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); in ehca_destroy_cq()
381 int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) in ehca_resize_cq()
Dehca_uverbs.c168 ehca_dbg(cq->ib_cq.device, "cq_num=%x fw", cq->cq_number); in ehca_mmap_cq()
171 ehca_err(cq->ib_cq.device, in ehca_mmap_cq()
179 ehca_dbg(cq->ib_cq.device, "cq_num=%x queue", cq->cq_number); in ehca_mmap_cq()
182 ehca_err(cq->ib_cq.device, in ehca_mmap_cq()
190 ehca_err(cq->ib_cq.device, "bad resource type=%x cq_num=%x", in ehca_mmap_cq()
269 if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context) in ehca_mmap()
274 ehca_err(cq->ib_cq.device, in ehca_mmap()
Dehca_iverbs.h129 struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
133 int ehca_destroy_cq(struct ib_cq *cq);
135 int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
137 int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
139 int ehca_peek_cq(struct ib_cq *cq, int wc_cnt);
141 int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags);
Dehca_reqs.c625 static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc) in ehca_poll_cq_one()
628 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); in ehca_poll_cq_one()
798 static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq, in generate_flush_cqes()
872 int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc) in ehca_poll_cq()
874 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); in ehca_poll_cq()
929 int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags) in ehca_req_notify_cq()
931 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); in ehca_req_notify_cq()
Dehca_classes.h97 struct ib_cq *ibcq_aqp1;
238 struct ib_cq ib_cq; member
Dehca_irq.c78 if (!cq->ib_cq.comp_handler) in comp_event_callback()
82 cq->ib_cq.comp_handler(&cq->ib_cq, cq->ib_cq.cq_context); in comp_event_callback()
/drivers/infiniband/hw/mlx4/
Dmlx4_ib.h63 struct ib_cq *cq;
77 struct ib_cq ibcq;
227 static inline struct mlx4_ib_cq *to_mcq(struct ib_cq *ibcq) in to_mcq()
293 int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
294 int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
295 struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector,
298 int mlx4_ib_destroy_cq(struct ib_cq *cq);
299 int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
300 int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
Dcq.c43 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; in mlx4_ib_cq_comp()
50 struct ib_cq *ibcq; in mlx4_ib_cq_event()
90 int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) in mlx4_ib_modify_cq()
166 struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector, in mlx4_ib_create_cq()
345 int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) in mlx4_ib_resize_cq()
441 int mlx4_ib_destroy_cq(struct ib_cq *cq) in mlx4_ib_destroy_cq()
735 int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) in mlx4_ib_poll_cq()
761 int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) in mlx4_ib_arm_cq()
/drivers/infiniband/hw/qib/
Dqib_cq.c128 int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) in qib_poll_cq()
207 struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries, in qib_create_cq()
214 struct ib_cq *ret; in qib_create_cq()
321 int qib_destroy_cq(struct ib_cq *ibcq) in qib_destroy_cq()
349 int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags) in qib_req_notify_cq()
378 int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) in qib_resize_cq()
Dqib_verbs.h268 struct ib_cq ibcq;
780 static inline struct qib_cq *to_icq(struct ib_cq *ibcq) in to_icq()
955 int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
957 struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries,
961 int qib_destroy_cq(struct ib_cq *ibcq);
963 int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
965 int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
/drivers/infiniband/hw/ipath/
Dipath_cq.c129 int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) in ipath_poll_cq()
200 struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector, in ipath_create_cq()
207 struct ib_cq *ret; in ipath_create_cq()
314 int ipath_destroy_cq(struct ib_cq *ibcq) in ipath_destroy_cq()
342 int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags) in ipath_req_notify_cq()
371 int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) in ipath_resize_cq()
Dipath_verbs.h215 struct ib_cq ibcq;
670 static inline struct ipath_cq *to_icq(struct ib_cq *ibcq) in to_icq()
808 int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
810 struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector,
814 int ipath_destroy_cq(struct ib_cq *ibcq);
816 int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
818 int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
/drivers/infiniband/hw/cxgb4/
Diw_cxgb4.h308 struct ib_cq ibcq;
317 static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq) in to_c4iw_cq()
722 int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
759 int c4iw_destroy_cq(struct ib_cq *ib_cq);
760 struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
764 int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
765 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
Dcq.c709 int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) in c4iw_poll_cq()
730 int c4iw_destroy_cq(struct ib_cq *ib_cq) in c4iw_destroy_cq() argument
735 PDBG("%s ib_cq %p\n", __func__, ib_cq); in c4iw_destroy_cq()
736 chp = to_c4iw_cq(ib_cq); in c4iw_destroy_cq()
742 ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context) in c4iw_destroy_cq()
750 struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, in c4iw_create_cq()
879 int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) in c4iw_resize_cq()
884 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) in c4iw_arm_cq()
/drivers/infiniband/hw/amso1100/
Dc2_provider.h92 struct ib_cq ibcq;
153 static inline struct c2_cq *to_c2cq(struct ib_cq *ibcq) in to_c2cq()
Dc2_provider.c289 static struct ib_cq *c2_create_cq(struct ib_device *ibdev, int entries, int vector, in c2_create_cq()
312 static int c2_destroy_cq(struct ib_cq *ib_cq) in c2_destroy_cq() argument
314 struct c2_cq *cq = to_c2cq(ib_cq); in c2_destroy_cq()
318 c2_free_cq(to_c2dev(ib_cq->device), cq); in c2_destroy_cq()
Dc2.h518 extern int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
519 extern int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
/drivers/infiniband/ulp/srp/
Dib_srp.h135 struct ib_cq *send_cq ____cacheline_aligned_in_smp;
136 struct ib_cq *recv_cq;
/drivers/infiniband/hw/cxgb3/
Diwch_provider.h102 struct ib_cq ibcq;
112 static inline struct iwch_cq *to_iwch_cq(struct ib_cq *ibcq) in to_iwch_cq()
334 int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
/drivers/infiniband/ulp/ipoib/
Dipoib.h301 struct ib_cq *recv_cq;
302 struct ib_cq *send_cq;
418 void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr);
419 void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr);
/drivers/infiniband/hw/mthca/
Dmthca_provider.h202 struct ib_cq ibcq;
324 static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq) in to_mcq()
Dmthca_dev.h494 int mthca_poll_cq(struct ib_cq *ibcq, int num_entries,
496 int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
497 int mthca_arbel_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
/drivers/infiniband/core/
Dverbs.c318 struct ib_cq *uninitialized_var(cq); in ib_destroy_srq()
873 struct ib_cq *scq, *rcq; in ib_destroy_qp()
906 struct ib_cq *ib_create_cq(struct ib_device *device, in ib_create_cq()
911 struct ib_cq *cq; in ib_create_cq()
928 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) in ib_modify_cq()
935 int ib_destroy_cq(struct ib_cq *cq) in ib_destroy_cq()
944 int ib_resize_cq(struct ib_cq *cq, int cqe) in ib_resize_cq()
/drivers/infiniband/ulp/iser/
Discsi_iser.h232 struct ib_cq *rx_cq;
233 struct ib_cq *tx_cq;
Diser_verbs.c45 static void iser_cq_callback(struct ib_cq *cq, void *cq_context);
795 struct ib_cq *cq = device->tx_cq; in iser_drain_tx_cq()
825 struct ib_cq *cq = device->rx_cq; in iser_cq_tasklet_fn()
863 static void iser_cq_callback(struct ib_cq *cq, void *cq_context) in iser_cq_callback()

12