Home
last modified time | relevance | path

Searched refs:cqn (Results 1 – 25 of 25) sorted by relevance

/drivers/net/ethernet/mellanox/mlx4/
Dcq.c56 void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn) in mlx4_cq_completion() argument
61 cqn & (dev->caps.num_cqs - 1)); in mlx4_cq_completion()
63 mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn); in mlx4_cq_completion()
72 void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type) in mlx4_cq_event() argument
79 cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1)); in mlx4_cq_event()
86 mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn); in mlx4_cq_event()
136 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1); in mlx4_cq_modify()
164 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0); in mlx4_cq_resize()
171 int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn) in __mlx4_cq_alloc_icm() argument
177 *cqn = mlx4_bitmap_alloc(&cq_table->bitmap); in __mlx4_cq_alloc_icm()
[all …]
Den_resources.c41 int is_tx, int rss, int qpn, int cqn, in mlx4_en_fill_qp_context() argument
61 context->cqn_send = cpu_to_be32(cqn); in mlx4_en_fill_qp_context()
62 context->cqn_recv = cpu_to_be32(cqn); in mlx4_en_fill_qp_context()
Dresource_tracker.c876 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn, in cq_res_start_move_to() argument
885 r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn); in cq_res_start_move_to()
1137 int cqn; in cq_alloc_res() local
1142 err = __mlx4_cq_alloc_icm(dev, &cqn); in cq_alloc_res()
1146 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0); in cq_alloc_res()
1148 __mlx4_cq_free_icm(dev, cqn); in cq_alloc_res()
1152 set_param_l(out_param, cqn); in cq_alloc_res()
1427 int cqn; in cq_free_res() local
1432 cqn = get_param_l(&in_param); in cq_free_res()
1433 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0); in cq_free_res()
[all …]
Deq.c235 int cqn; in mlx4_eq_int() local
254 cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; in mlx4_eq_int()
255 mlx4_cq_completion(dev, cqn); in mlx4_eq_int()
376 be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); in mlx4_eq_int()
380 be32_to_cpu(eqe->event.cq_err.cqn) in mlx4_eq_int()
397 be32_to_cpu(eqe->event.cq_err.cqn) in mlx4_eq_int()
Dmlx4.h324 __be32 cqn; member
341 __be32 cqn; member
800 int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn);
801 void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn);
977 void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn);
978 void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type);
Dmlx4_en.h235 u16 cqn; /* index of port CQ associated with this ring */ member
272 u16 cqn; /* index of port CQ associated with this ring */ member
543 int is_tx, int rss, int qpn, int cqn,
Dsrq.c165 int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd, in mlx4_srq_alloc() argument
197 srq_context->pg_offset_cqn = cpu_to_be32(cqn & 0xffffff); in mlx4_srq_alloc()
Den_rx.c329 ring->cqn = priv->rx_cq[ring_ind].mcq.cqn; in mlx4_en_activate_rx_rings()
826 qpn, ring->cqn, context); in mlx4_en_config_rss_qp()
893 priv->rx_ring[0].cqn, &context); in mlx4_en_config_rss_steer()
Den_netdev.c613 priv->rx_ring[i].cqn = cq->mcq.cqn; in mlx4_en_start_port()
653 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn); in mlx4_en_start_port()
Den_tx.c164 ring->cqn = cq; in mlx4_en_activate_tx_ring()
177 ring->cqn, &ring->context); in mlx4_en_activate_tx_ring()
/drivers/infiniband/hw/mthca/
Dmthca_cq.c76 __be32 cqn; member
211 mthca_write64(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn, incr - 1, in update_cons_index()
222 void mthca_cq_completion(struct mthca_dev *dev, u32 cqn) in mthca_cq_completion() argument
226 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); in mthca_cq_completion()
229 mthca_warn(dev, "Completion event for bogus CQ %08x\n", cqn); in mthca_cq_completion()
238 void mthca_cq_event(struct mthca_dev *dev, u32 cqn, in mthca_cq_event() argument
246 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); in mthca_cq_event()
253 mthca_warn(dev, "Async event for bogus CQ %08x\n", cqn); in mthca_cq_event()
302 qpn, cq->cqn, cq->cons_index, prod_index); in mthca_cq_clean()
389 cq->cqn, cq->cons_index); in handle_error_cqe()
[all …]
Dmthca_eq.c132 __be32 cqn; member
149 __be32 cqn; member
219 static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn) in disarm_cq() argument
222 mthca_write64(MTHCA_EQ_DB_DISARM_CQ | eqn, cqn, in disarm_cq()
276 disarm_cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; in mthca_eq_int()
343 be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); in mthca_eq_int()
344 mthca_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn), in mthca_eq_int()
Dmthca_user.h83 __u32 cqn; member
Dmthca_provider.h205 int cqn; member
Dmthca_dev.h503 void mthca_cq_completion(struct mthca_dev *dev, u32 cqn);
504 void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
Dmthca_qp.c717 qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn); in __mthca_modify_qp()
756 qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn); in __mthca_modify_qp()
1316 } else if (send_cq->cqn < recv_cq->cqn) { in mthca_lock_cqs()
1331 } else if (send_cq->cqn < recv_cq->cqn) { in mthca_unlock_cqs()
Dmthca_provider.c696 if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) { in mthca_create_cq()
798 ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries)); in mthca_resize_cq()
/drivers/infiniband/hw/amso1100/
Dc2_cq.c46 static struct c2_cq *c2_cq_get(struct c2_dev *c2dev, int cqn) in c2_cq_get() argument
52 cq = c2dev->qptr_array[cqn]; in c2_cq_get()
369 cq->cqn = cq->mq.index; in c2_init_cq()
370 c2dev->qptr_array[cq->cqn] = cq; in c2_init_cq()
Dc2_user.h69 __u32 cqn; member
Dc2_provider.h95 int cqn; member
Dc2_qp.c622 c2_cq_clean(c2dev, qp, send_cq->cqn); in c2_free_qp()
624 c2_cq_clean(c2dev, qp, recv_cq->cqn); in c2_free_qp()
/drivers/infiniband/hw/mlx4/
Dsrq.c79 u32 cqn; in mlx4_ib_create_srq() local
179 cqn = (init_attr->srq_type == IB_SRQT_XRC) ? in mlx4_ib_create_srq()
180 to_mcq(init_attr->ext.xrc.cq)->mcq.cqn : 0; in mlx4_ib_create_srq()
184 err = mlx4_srq_alloc(dev->dev, to_mpd(pd)->pdn, cqn, xrcdn, &srq->mtt, in mlx4_ib_create_srq()
Duser.h70 __u32 cqn; member
Dcq.c54 "on CQ %06x\n", type, cq->cqn); in mlx4_ib_cq_event()
234 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) { in mlx4_ib_create_cq()
610 cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK); in mlx4_ib_poll_one()
Dqp.c649 } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { in mlx4_ib_lock_cqs()
664 } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { in mlx4_ib_unlock_cqs()
1139 context->cqn_send = cpu_to_be32(send_cq->mcq.cqn); in __mlx4_ib_modify_qp()
1140 context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn); in __mlx4_ib_modify_qp()