• Home
  • Raw
  • Download

Lines Matching refs:cq

39 static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq)  in mlx5_ib_cq_comp()  argument
41 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; in mlx5_ib_cq_comp()
48 struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq); in mlx5_ib_cq_event() local
49 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in mlx5_ib_cq_event()
50 struct ib_cq *ibcq = &cq->ibcq; in mlx5_ib_cq_event()
62 event.element.cq = ibcq; in mlx5_ib_cq_event()
72 static void *get_cqe(struct mlx5_ib_cq *cq, int n) in get_cqe() argument
74 return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz); in get_cqe()
82 static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n) in get_sw_cqe() argument
84 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe()
87 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe()
90 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { in get_sw_cqe()
97 static void *next_cqe_sw(struct mlx5_ib_cq *cq) in next_cqe_sw() argument
99 return get_sw_cqe(cq, cq->mcq.cons_index); in next_cqe_sw()
496 static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries, in mlx5_ib_poll_sw_comp() argument
503 list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) { in mlx5_ib_poll_sw_comp()
509 list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) { in mlx5_ib_poll_sw_comp()
516 static int mlx5_poll_one(struct mlx5_ib_cq *cq, in mlx5_poll_one() argument
520 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in mlx5_poll_one()
535 cqe = next_cqe_sw(cq); in mlx5_poll_one()
539 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in mlx5_poll_one()
541 ++cq->mcq.cons_index; in mlx5_poll_one()
550 if (likely(cq->resize_buf)) { in mlx5_poll_one()
551 free_cq_buf(dev, &cq->buf); in mlx5_poll_one()
552 cq->buf = *cq->resize_buf; in mlx5_poll_one()
553 kfree(cq->resize_buf); in mlx5_poll_one()
554 cq->resize_buf = NULL; in mlx5_poll_one()
598 "Requestor" : "Responder", cq->mcq.cqn); in mlx5_poll_one()
634 cq->mcq.cqn, mr->sig->err_item.key, in mlx5_poll_one()
647 static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries, in poll_soft_wc() argument
650 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in poll_soft_wc()
654 list_for_each_entry_safe(soft_wc, next, &cq->wc_list, list) { in poll_soft_wc()
659 cq->mcq.cqn); in poll_soft_wc()
671 struct mlx5_ib_cq *cq = to_mcq(ibcq); in mlx5_ib_poll_cq() local
673 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in mlx5_ib_poll_cq()
679 spin_lock_irqsave(&cq->lock, flags); in mlx5_ib_poll_cq()
681 mlx5_ib_poll_sw_comp(cq, num_entries, wc, &npolled); in mlx5_ib_poll_cq()
685 if (unlikely(!list_empty(&cq->wc_list))) in mlx5_ib_poll_cq()
686 soft_polled = poll_soft_wc(cq, num_entries, wc); in mlx5_ib_poll_cq()
689 if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled)) in mlx5_ib_poll_cq()
694 mlx5_cq_set_ci(&cq->mcq); in mlx5_ib_poll_cq()
696 spin_unlock_irqrestore(&cq->lock, flags); in mlx5_ib_poll_cq()
704 struct mlx5_ib_cq *cq = to_mcq(ibcq); in mlx5_ib_arm_cq() local
709 spin_lock_irqsave(&cq->lock, irq_flags); in mlx5_ib_arm_cq()
710 if (cq->notify_flags != IB_CQ_NEXT_COMP) in mlx5_ib_arm_cq()
711 cq->notify_flags = flags & IB_CQ_SOLICITED_MASK; in mlx5_ib_arm_cq()
713 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !list_empty(&cq->wc_list)) in mlx5_ib_arm_cq()
715 spin_unlock_irqrestore(&cq->lock, irq_flags); in mlx5_ib_arm_cq()
717 mlx5_cq_arm(&cq->mcq, in mlx5_ib_arm_cq()
743 struct ib_ucontext *context, struct mlx5_ib_cq *cq, in create_cq_user() argument
773 cq->buf.umem = ib_umem_get(context, ucmd.buf_addr, in create_cq_user()
776 if (IS_ERR(cq->buf.umem)) { in create_cq_user()
777 err = PTR_ERR(cq->buf.umem); in create_cq_user()
782 &cq->db); in create_cq_user()
786 mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift, in create_cq_user()
800 mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, pas, 0); in create_cq_user()
811 mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); in create_cq_user()
814 ib_umem_release(cq->buf.umem); in create_cq_user()
818 static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context) in destroy_cq_user() argument
820 mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); in destroy_cq_user()
821 ib_umem_release(cq->buf.umem); in destroy_cq_user()
824 static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf) in init_cq_buf() argument
837 static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, in create_cq_kernel() argument
845 err = mlx5_db_alloc(dev->mdev, &cq->db); in create_cq_kernel()
849 cq->mcq.set_ci_db = cq->db.db; in create_cq_kernel()
850 cq->mcq.arm_db = cq->db.db + 1; in create_cq_kernel()
851 cq->mcq.cqe_sz = cqe_size; in create_cq_kernel()
853 err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size); in create_cq_kernel()
857 init_cq_buf(cq, &cq->buf); in create_cq_kernel()
860 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * cq->buf.buf.npages; in create_cq_kernel()
868 mlx5_fill_page_array(&cq->buf.buf, pas); in create_cq_kernel()
872 cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); in create_cq_kernel()
879 free_cq_buf(dev, &cq->buf); in create_cq_kernel()
882 mlx5_db_free(dev->mdev, &cq->db); in create_cq_kernel()
886 static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) in destroy_cq_kernel() argument
888 free_cq_buf(dev, &cq->buf); in destroy_cq_kernel()
889 mlx5_db_free(dev->mdev, &cq->db); in destroy_cq_kernel()
894 struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq, in notify_soft_wc_handler() local
897 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in notify_soft_wc_handler()
908 struct mlx5_ib_cq *cq; in mlx5_ib_create_cq() local
929 cq = kzalloc(sizeof(*cq), GFP_KERNEL); in mlx5_ib_create_cq()
930 if (!cq) in mlx5_ib_create_cq()
933 cq->ibcq.cqe = entries - 1; in mlx5_ib_create_cq()
934 mutex_init(&cq->resize_mutex); in mlx5_ib_create_cq()
935 spin_lock_init(&cq->lock); in mlx5_ib_create_cq()
936 cq->resize_buf = NULL; in mlx5_ib_create_cq()
937 cq->resize_umem = NULL; in mlx5_ib_create_cq()
938 cq->create_flags = attr->flags; in mlx5_ib_create_cq()
939 INIT_LIST_HEAD(&cq->list_send_qp); in mlx5_ib_create_cq()
940 INIT_LIST_HEAD(&cq->list_recv_qp); in mlx5_ib_create_cq()
943 err = create_cq_user(dev, udata, context, cq, entries, in mlx5_ib_create_cq()
949 err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, in mlx5_ib_create_cq()
954 INIT_WORK(&cq->notify_work, notify_soft_wc_handler); in mlx5_ib_create_cq()
961 cq->cqe_size = cqe_size; in mlx5_ib_create_cq()
968 MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma); in mlx5_ib_create_cq()
969 if (cq->create_flags & IB_CQ_FLAGS_IGNORE_OVERRUN) in mlx5_ib_create_cq()
972 err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen); in mlx5_ib_create_cq()
976 mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn); in mlx5_ib_create_cq()
977 cq->mcq.irqn = irqn; in mlx5_ib_create_cq()
979 cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp; in mlx5_ib_create_cq()
981 cq->mcq.comp = mlx5_ib_cq_comp; in mlx5_ib_create_cq()
982 cq->mcq.event = mlx5_ib_cq_event; in mlx5_ib_create_cq()
984 INIT_LIST_HEAD(&cq->wc_list); in mlx5_ib_create_cq()
987 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) { in mlx5_ib_create_cq()
994 return &cq->ibcq; in mlx5_ib_create_cq()
997 mlx5_core_destroy_cq(dev->mdev, &cq->mcq); in mlx5_ib_create_cq()
1002 destroy_cq_user(cq, context); in mlx5_ib_create_cq()
1004 destroy_cq_kernel(dev, cq); in mlx5_ib_create_cq()
1007 kfree(cq); in mlx5_ib_create_cq()
1013 int mlx5_ib_destroy_cq(struct ib_cq *cq) in mlx5_ib_destroy_cq() argument
1015 struct mlx5_ib_dev *dev = to_mdev(cq->device); in mlx5_ib_destroy_cq()
1016 struct mlx5_ib_cq *mcq = to_mcq(cq); in mlx5_ib_destroy_cq()
1019 if (cq->uobject) in mlx5_ib_destroy_cq()
1020 context = cq->uobject->context; in mlx5_ib_destroy_cq()
1038 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq) in __mlx5_ib_cq_clean() argument
1046 if (!cq) in __mlx5_ib_cq_clean()
1055 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++) in __mlx5_ib_cq_clean()
1056 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe) in __mlx5_ib_cq_clean()
1062 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { in __mlx5_ib_cq_clean()
1063 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); in __mlx5_ib_cq_clean()
1064 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in __mlx5_ib_cq_clean()
1070 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe); in __mlx5_ib_cq_clean()
1071 dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64; in __mlx5_ib_cq_clean()
1073 memcpy(dest, cqe, cq->mcq.cqe_sz); in __mlx5_ib_cq_clean()
1080 cq->mcq.cons_index += nfreed; in __mlx5_ib_cq_clean()
1085 mlx5_cq_set_ci(&cq->mcq); in __mlx5_ib_cq_clean()
1089 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq) in mlx5_ib_cq_clean() argument
1091 if (!cq) in mlx5_ib_cq_clean()
1094 spin_lock_irq(&cq->lock); in mlx5_ib_cq_clean()
1095 __mlx5_ib_cq_clean(cq, qpn, srq); in mlx5_ib_cq_clean()
1096 spin_unlock_irq(&cq->lock); in mlx5_ib_cq_clean()
1099 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) in mlx5_ib_modify_cq() argument
1101 struct mlx5_ib_dev *dev = to_mdev(cq->device); in mlx5_ib_modify_cq()
1102 struct mlx5_ib_cq *mcq = to_mcq(cq); in mlx5_ib_modify_cq()
1116 static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, in resize_user() argument
1124 struct ib_ucontext *context = cq->buf.umem->context; in resize_user()
1148 cq->resize_umem = umem; in resize_user()
1154 static void un_resize_user(struct mlx5_ib_cq *cq) in un_resize_user() argument
1156 ib_umem_release(cq->resize_umem); in un_resize_user()
1159 static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, in resize_kernel() argument
1164 cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL); in resize_kernel()
1165 if (!cq->resize_buf) in resize_kernel()
1168 err = alloc_cq_buf(dev, cq->resize_buf, entries, cqe_size); in resize_kernel()
1172 init_cq_buf(cq, cq->resize_buf); in resize_kernel()
1177 kfree(cq->resize_buf); in resize_kernel()
1181 static void un_resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) in un_resize_kernel() argument
1183 free_cq_buf(dev, cq->resize_buf); in un_resize_kernel()
1184 cq->resize_buf = NULL; in un_resize_kernel()
1187 static int copy_resize_cqes(struct mlx5_ib_cq *cq) in copy_resize_cqes() argument
1189 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in copy_resize_cqes()
1200 ssize = cq->buf.cqe_size; in copy_resize_cqes()
1201 dsize = cq->resize_buf->cqe_size; in copy_resize_cqes()
1207 i = cq->mcq.cons_index; in copy_resize_cqes()
1208 scqe = get_sw_cqe(cq, i); in copy_resize_cqes()
1217 dcqe = get_cqe_from_buf(cq->resize_buf, in copy_resize_cqes()
1218 (i + 1) & (cq->resize_buf->nent), in copy_resize_cqes()
1221 sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent); in copy_resize_cqes()
1226 scqe = get_sw_cqe(cq, i); in copy_resize_cqes()
1235 cq->mcq.cqn); in copy_resize_cqes()
1239 ++cq->mcq.cons_index; in copy_resize_cqes()
1246 struct mlx5_ib_cq *cq = to_mcq(ibcq); in mlx5_ib_resize_cq() local
1277 mutex_lock(&cq->resize_mutex); in mlx5_ib_resize_cq()
1279 err = resize_user(dev, cq, entries, udata, &npas, &page_shift, in mlx5_ib_resize_cq()
1283 err = resize_kernel(dev, cq, entries, cqe_size); in mlx5_ib_resize_cq()
1285 npas = cq->resize_buf->buf.npages; in mlx5_ib_resize_cq()
1286 page_shift = cq->resize_buf->buf.page_shift; in mlx5_ib_resize_cq()
1304 mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift, in mlx5_ib_resize_cq()
1307 mlx5_fill_page_array(&cq->resize_buf->buf, pas); in mlx5_ib_resize_cq()
1323 MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn); in mlx5_ib_resize_cq()
1325 err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen); in mlx5_ib_resize_cq()
1330 cq->ibcq.cqe = entries - 1; in mlx5_ib_resize_cq()
1331 ib_umem_release(cq->buf.umem); in mlx5_ib_resize_cq()
1332 cq->buf.umem = cq->resize_umem; in mlx5_ib_resize_cq()
1333 cq->resize_umem = NULL; in mlx5_ib_resize_cq()
1338 spin_lock_irqsave(&cq->lock, flags); in mlx5_ib_resize_cq()
1339 if (cq->resize_buf) { in mlx5_ib_resize_cq()
1340 err = copy_resize_cqes(cq); in mlx5_ib_resize_cq()
1342 tbuf = cq->buf; in mlx5_ib_resize_cq()
1343 cq->buf = *cq->resize_buf; in mlx5_ib_resize_cq()
1344 kfree(cq->resize_buf); in mlx5_ib_resize_cq()
1345 cq->resize_buf = NULL; in mlx5_ib_resize_cq()
1349 cq->ibcq.cqe = entries - 1; in mlx5_ib_resize_cq()
1350 spin_unlock_irqrestore(&cq->lock, flags); in mlx5_ib_resize_cq()
1354 mutex_unlock(&cq->resize_mutex); in mlx5_ib_resize_cq()
1364 un_resize_user(cq); in mlx5_ib_resize_cq()
1366 un_resize_kernel(dev, cq); in mlx5_ib_resize_cq()
1368 mutex_unlock(&cq->resize_mutex); in mlx5_ib_resize_cq()
1374 struct mlx5_ib_cq *cq; in mlx5_ib_get_cqe_size() local
1379 cq = to_mcq(ibcq); in mlx5_ib_get_cqe_size()
1380 return cq->cqe_size; in mlx5_ib_get_cqe_size()
1387 struct mlx5_ib_cq *cq = to_mcq(ibcq); in mlx5_ib_generate_wc() local
1395 spin_lock_irqsave(&cq->lock, flags); in mlx5_ib_generate_wc()
1396 list_add_tail(&soft_wc->list, &cq->wc_list); in mlx5_ib_generate_wc()
1397 if (cq->notify_flags == IB_CQ_NEXT_COMP || in mlx5_ib_generate_wc()
1399 cq->notify_flags = 0; in mlx5_ib_generate_wc()
1400 schedule_work(&cq->notify_work); in mlx5_ib_generate_wc()
1402 spin_unlock_irqrestore(&cq->lock, flags); in mlx5_ib_generate_wc()