Home
last modified time | relevance | path

Searched refs:mqp (Results 1 – 12 of 12) sorted by relevance

/drivers/staging/rdma/ipath/
Dipath_verbs_mcast.c54 struct ipath_mcast_qp *mqp; in ipath_mcast_qp_alloc() local
56 mqp = kmalloc(sizeof *mqp, GFP_KERNEL); in ipath_mcast_qp_alloc()
57 if (!mqp) in ipath_mcast_qp_alloc()
60 mqp->qp = qp; in ipath_mcast_qp_alloc()
64 return mqp; in ipath_mcast_qp_alloc()
67 static void ipath_mcast_qp_free(struct ipath_mcast_qp *mqp) in ipath_mcast_qp_free() argument
69 struct ipath_qp *qp = mqp->qp; in ipath_mcast_qp_free()
75 kfree(mqp); in ipath_mcast_qp_free()
164 struct ipath_mcast_qp *mqp) in ipath_mcast_add() argument
192 if (p->qp == mqp->qp) { in ipath_mcast_add()
[all …]
/drivers/staging/rdma/hfi1/
Dverbs_mcast.c61 struct hfi1_mcast_qp *mqp; in mcast_qp_alloc() local
63 mqp = kmalloc(sizeof(*mqp), GFP_KERNEL); in mcast_qp_alloc()
64 if (!mqp) in mcast_qp_alloc()
67 mqp->qp = qp; in mcast_qp_alloc()
71 return mqp; in mcast_qp_alloc()
74 static void mcast_qp_free(struct hfi1_mcast_qp *mqp) in mcast_qp_free() argument
76 struct hfi1_qp *qp = mqp->qp; in mcast_qp_free()
82 kfree(mqp); in mcast_qp_free()
171 struct hfi1_mcast *mcast, struct hfi1_mcast_qp *mqp) in mcast_add() argument
199 if (p->qp == mqp->qp) { in mcast_add()
[all …]
/drivers/infiniband/hw/qib/
Dqib_verbs_mcast.c44 struct qib_mcast_qp *mqp; in qib_mcast_qp_alloc() local
46 mqp = kmalloc(sizeof(*mqp), GFP_KERNEL); in qib_mcast_qp_alloc()
47 if (!mqp) in qib_mcast_qp_alloc()
50 mqp->qp = qp; in qib_mcast_qp_alloc()
54 return mqp; in qib_mcast_qp_alloc()
57 static void qib_mcast_qp_free(struct qib_mcast_qp *mqp) in qib_mcast_qp_free() argument
59 struct qib_qp *qp = mqp->qp; in qib_mcast_qp_free()
65 kfree(mqp); in qib_mcast_qp_free()
154 struct qib_mcast *mcast, struct qib_mcast_qp *mqp) in qib_mcast_add() argument
182 if (p->qp == mqp->qp) { in qib_mcast_add()
[all …]
/drivers/scsi/arm/
Dmsgqueue.c124 struct msgqueue_entry **mqp; in msgqueue_addmsg() local
136 mqp = &msgq->qe; in msgqueue_addmsg()
137 while (*mqp) in msgqueue_addmsg()
138 mqp = &(*mqp)->next; in msgqueue_addmsg()
140 *mqp = mq; in msgqueue_addmsg()
/drivers/infiniband/hw/mlx4/
Dmain.c1238 struct mlx4_ib_qp *mqp = to_mqp(ibqp); in add_gid_entry() local
1247 if (mlx4_ib_add_mc(mdev, mqp, gid)) { in add_gid_entry()
1248 ge->port = mqp->port; in add_gid_entry()
1252 mutex_lock(&mqp->mutex); in add_gid_entry()
1253 list_add_tail(&ge->list, &mqp->gid_list); in add_gid_entry()
1254 mutex_unlock(&mqp->mutex); in add_gid_entry()
1275 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, in mlx4_ib_add_mc() argument
1281 if (!mqp->port) in mlx4_ib_add_mc()
1285 ndev = mdev->iboe.netdevs[mqp->port - 1]; in mlx4_ib_add_mc()
1718 struct mlx4_ib_qp *mqp = to_mqp(ibqp); in mlx4_ib_mcg_attach() local
[all …]
Dqp.c121 static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp) in to_msqp() argument
123 return container_of(mqp, struct mlx4_ib_sqp, qp); in to_msqp()
131 return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn && in is_tunnel_qp()
132 qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn + in is_tunnel_qp()
143 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_sqp()
144 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3); in is_sqp()
150 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i] || in is_sqp()
151 qp->mqp.qpn == dev->dev->caps.qp1_proxy[i]) { in is_sqp()
168 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_qp0()
169 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 1); in is_qp0()
[all …]
Dmlx4_ib.h288 struct mlx4_qp mqp; member
665 static inline struct mlx4_ib_qp *to_mibqp(struct mlx4_qp *mqp) in to_mibqp() argument
667 return container_of(mqp, struct mlx4_ib_qp, mqp); in to_mibqp()
804 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
862 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
Dcq.c664 struct mlx4_qp *mqp; in mlx4_ib_poll_one() local
718 (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) { in mlx4_ib_poll_one()
724 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev, in mlx4_ib_poll_one()
726 if (unlikely(!mqp)) { in mlx4_ib_poll_one()
732 *cur_qp = to_mibqp(mqp); in mlx4_ib_poll_one()
/drivers/infiniband/hw/mlx5/
Dodp.c158 int ret = mlx5_core_page_fault_resume(dev->mdev, qp->mqp.qpn, in mlx5_ib_page_fault_resume()
163 qp->mqp.qpn); in mlx5_ib_page_fault_resume()
404 wqe_index, qp->mqp.qpn); in mlx5_ib_mr_initiator_pfault_handler()
414 wqe_index, qp->mqp.qpn, in mlx5_ib_mr_initiator_pfault_handler()
421 if (qp->mqp.qpn != ctrl_qpn) { in mlx5_ib_mr_initiator_pfault_handler()
423 wqe_index, qp->mqp.qpn, in mlx5_ib_mr_initiator_pfault_handler()
552 -ret, wqe_index, qp->mqp.qpn); in mlx5_ib_mr_wqe_pfault_handler()
589 qp->mqp.qpn, resume_with_error, pfault->mpfault.flags); in mlx5_ib_mr_wqe_pfault_handler()
756 qp->mqp.pfault_handler = mlx5_ib_pfault_handler; in mlx5_ib_odp_create_qp()
Dqp.c1029 err = mlx5_core_create_qp(dev->mdev, &qp->mqp, in, inlen); in create_qp_common()
1040 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); in create_qp_common()
1042 qp->mqp.event = mlx5_ib_qp_event; in create_qp_common()
1166 MLX5_QP_STATE_RST, in, 0, &qp->mqp)) in destroy_qp_common()
1168 qp->mqp.qpn); in destroy_qp_common()
1175 __mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn, in destroy_qp_common()
1178 __mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); in destroy_qp_common()
1182 err = mlx5_core_destroy_qp(dev->mdev, &qp->mqp); in destroy_qp_common()
1184 mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn); in destroy_qp_common()
1283 qp->ibqp.qp_num = qp->mqp.qpn; in mlx5_ib_create_qp()
[all …]
Dmlx5_ib.h176 struct mlx5_core_qp mqp; member
456 static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp) in to_mibqp() argument
458 return container_of(mqp, struct mlx5_ib_qp, mqp); in to_mibqp()
Dcq.c417 struct mlx5_core_qp *mqp; in mlx5_poll_one() local
461 mqp = __mlx5_qp_lookup(dev->mdev, qpn); in mlx5_poll_one()
462 if (unlikely(!mqp)) { in mlx5_poll_one()
468 *cur_qp = to_mibqp(mqp); in mlx5_poll_one()