/drivers/infiniband/hw/hfi1/ |
D | qp.c | 163 struct ib_qp *ibqp = &qp->ibqp; in hfi1_check_modify_qp() local 164 struct hfi1_ibdev *dev = to_idev(ibqp->device); in hfi1_check_modify_qp() 169 sc = ah_to_sc(ibqp->device, &attr->ah_attr); in hfi1_check_modify_qp() 182 sc = ah_to_sc(ibqp->device, &attr->alt_ah_attr); in hfi1_check_modify_qp() 209 hfi1_update_ah_attr(qp->ibqp.device, &qp->remote_ah_attr); in qp_set_16b() 217 ibp = to_iport(qp->ibqp.device, qp->port_num); in qp_set_16b() 225 struct ib_qp *ibqp = &qp->ibqp; in hfi1_modify_qp() local 229 priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); in hfi1_modify_qp() 239 priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); in hfi1_modify_qp() 265 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); in hfi1_setup_wqe() [all …]
|
D | ud.c | 33 struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num); in ud_loopback() 47 qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp, in ud_loopback() 55 sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ? in ud_loopback() 56 IB_QPT_UD : sqp->ibqp.qp_type; in ud_loopback() 57 dqptype = qp->ibqp.qp_type == IB_QPT_GSI ? in ud_loopback() 58 IB_QPT_UD : qp->ibqp.qp_type; in ud_loopback() 69 if (qp->ibqp.qp_num > 1) { in ud_loopback() 82 sqp->ibqp.qp_num, qp->ibqp.qp_num, in ud_loopback() 93 if (qp->ibqp.qp_num) { in ud_loopback() 131 if (qp->ibqp.qp_num == 0) in ud_loopback() [all …]
|
D | trace_tid.h | 198 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) 207 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); 208 __entry->qpn = qp->ibqp.qp_num; 258 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) 265 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); 266 __entry->qpn = qp->ibqp.qp_num; 305 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) 317 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); 318 __entry->qpn = qp->ibqp.qp_num; 360 __entry->qpn = qp ? qp->ibqp.qp_num : 0; [all …]
|
D | trace_rc.h | 21 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) 32 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); 33 __entry->qpn = qp->ibqp.qp_num; 83 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) 92 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); 93 __entry->qpn = qp->ibqp.qp_num;
|
/drivers/infiniband/sw/rdmavt/ |
D | qp.c | 426 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); in rvt_free_qp_cb() 429 rvt_reset_qp(rdi, qp, qp->ibqp.qp_type); in rvt_free_qp_cb() 582 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); in rvt_clear_mr_refs() 666 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); in rvt_qp_acks_has_lkey() 692 if (qp->ibqp.qp_type == IB_QPT_SMI || in rvt_qp_mr_clean() 693 qp->ibqp.qp_type == IB_QPT_GSI) in rvt_qp_mr_clean() 714 ev.device = qp->ibqp.device; in rvt_qp_mr_clean() 715 ev.element.qp = &qp->ibqp; in rvt_qp_mr_clean() 717 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); in rvt_qp_mr_clean() 732 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits); in rvt_remove_qp() [all …]
|
D | qp.h | 13 int rvt_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr, 15 int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 17 int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata); 18 int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 20 int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, 22 int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
|
D | trace_qp.h | 21 RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device)) 26 RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device)); 27 __entry->qpn = qp->ibqp.qp_num; 51 RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device)) 58 RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device)); 59 __entry->qpn = qp->ibqp.qp_num;
|
D | trace_tx.h | 52 RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device)) 72 RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device)); 75 __entry->qpn = qp->ibqp.qp_num; 76 __entry->qpt = qp->ibqp.qp_type; 119 RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device)) 131 RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device)); 134 __entry->qpn = qp->ibqp.qp_num; 135 __entry->qpt = qp->ibqp.qp_type;
|
D | mcast.c | 238 int rvt_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) in rvt_attach_mcast() argument 240 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); in rvt_attach_mcast() 241 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); in rvt_attach_mcast() 247 if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) in rvt_attach_mcast() 301 int rvt_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) in rvt_detach_mcast() argument 303 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); in rvt_detach_mcast() 304 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); in rvt_detach_mcast() 312 if (ibqp->qp_num <= 1) in rvt_detach_mcast()
|
/drivers/infiniband/hw/mlx5/ |
D | wr.h | 104 int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, 106 int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, 109 static inline int mlx5_ib_post_send_nodrain(struct ib_qp *ibqp, in mlx5_ib_post_send_nodrain() argument 113 return mlx5_ib_post_send(ibqp, wr, bad_wr, false); in mlx5_ib_post_send_nodrain() 116 static inline int mlx5_ib_post_send_drain(struct ib_qp *ibqp, in mlx5_ib_post_send_drain() argument 120 return mlx5_ib_post_send(ibqp, wr, bad_wr, true); in mlx5_ib_post_send_drain() 123 static inline int mlx5_ib_post_recv_nodrain(struct ib_qp *ibqp, in mlx5_ib_post_recv_nodrain() argument 127 return mlx5_ib_post_recv(ibqp, wr, bad_wr, false); in mlx5_ib_post_recv_nodrain() 130 static inline int mlx5_ib_post_recv_drain(struct ib_qp *ibqp, in mlx5_ib_post_recv_drain() argument 134 return mlx5_ib_post_recv(ibqp, wr, bad_wr, true); in mlx5_ib_post_recv_drain()
|
/drivers/infiniband/hw/mana/ |
D | qp.c | 95 static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd, in mana_ib_create_qp_rss() argument 99 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp); in mana_ib_create_qp_rss() 256 static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd, in mana_ib_create_qp_raw() argument 261 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp); in mana_ib_create_qp_raw() 407 int mana_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr, in mana_ib_create_qp() argument 414 return mana_ib_create_qp_rss(ibqp, ibqp->pd, attr, in mana_ib_create_qp() 417 return mana_ib_create_qp_raw(ibqp, ibqp->pd, attr, udata); in mana_ib_create_qp() 420 ibdev_dbg(ibqp->device, "Creating QP type %u not supported\n", in mana_ib_create_qp() 427 int mana_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, in mana_ib_modify_qp() argument 439 container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev); in mana_ib_destroy_qp_rss() [all …]
|
/drivers/infiniband/hw/qib/ |
D | qib_ud.c | 53 struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num); in qib_ud_loopback() 73 sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ? in qib_ud_loopback() 74 IB_QPT_UD : sqp->ibqp.qp_type; in qib_ud_loopback() 75 dqptype = qp->ibqp.qp_type == IB_QPT_GSI ? in qib_ud_loopback() 76 IB_QPT_UD : qp->ibqp.qp_type; in qib_ud_loopback() 87 if (qp->ibqp.qp_num > 1) { in qib_ud_loopback() 99 sqp->ibqp.qp_num, qp->ibqp.qp_num, in qib_ud_loopback() 111 if (qp->ibqp.qp_num) { in qib_ud_loopback() 149 if (qp->ibqp.qp_num == 0) in qib_ud_loopback() 204 wc.qp = &qp->ibqp; in qib_ud_loopback() [all …]
|
D | qib_ruc.c | 53 ev.device = qp->ibqp.device; in qib_migrate_qp() 54 ev.element.qp = &qp->ibqp; in qib_migrate_qp() 56 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); in qib_migrate_qp() 114 0, qp->ibqp.qp_num, in qib_ruc_check_hdr() 153 0, qp->ibqp.qp_num, in qib_ruc_check_hdr() 210 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); in qib_make_ruc_header() 266 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); in qib_do_send() 271 if ((qp->ibqp.qp_type == IB_QPT_RC || in qib_do_send() 272 qp->ibqp.qp_type == IB_QPT_UC) && in qib_do_send() 279 if (qp->ibqp.qp_type == IB_QPT_RC) in qib_do_send() [all …]
|
/drivers/infiniband/hw/vmw_pvrdma/ |
D | pvrdma_qp.c | 61 *send_cq = to_vcq(qp->ibqp.send_cq); in get_cqs() 62 *recv_cq = to_vcq(qp->ibqp.recv_cq); in get_cqs() 191 int pvrdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr, in pvrdma_create_qp() argument 194 struct pvrdma_qp *qp = to_vqp(ibqp); in pvrdma_create_qp() 195 struct pvrdma_dev *dev = to_vdev(ibqp->device); in pvrdma_create_qp() 234 init_attr->port_num > ibqp->device->phys_port_cnt) { in pvrdma_create_qp() 271 qp->rumem = ib_umem_get(ibqp->device, in pvrdma_create_qp() 284 qp->sumem = ib_umem_get(ibqp->device, ucmd.sbuf_addr, in pvrdma_create_qp() 302 ret = pvrdma_set_sq_size(to_vdev(ibqp->device), in pvrdma_create_qp() 307 ret = pvrdma_set_rq_size(to_vdev(ibqp->device), in pvrdma_create_qp() [all …]
|
/drivers/infiniband/hw/mthca/ |
D | mthca_qp.c | 261 event.element.qp = &qp->ibqp; in mthca_qp_event() 262 if (qp->ibqp.event_handler) in mthca_qp_event() 263 qp->ibqp.event_handler(&event, qp->ibqp.qp_context); in mthca_qp_event() 430 int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, in mthca_query_qp() argument 433 struct mthca_dev *dev = to_mdev(ibqp->device); in mthca_query_qp() 434 struct mthca_qp *qp = to_mqp(ibqp); in mthca_query_qp() 556 static int __mthca_modify_qp(struct ib_qp *ibqp, in __mthca_modify_qp() argument 562 struct mthca_dev *dev = to_mdev(ibqp->device); in __mthca_modify_qp() 563 struct mthca_qp *qp = to_mqp(ibqp); in __mthca_modify_qp() 626 if (qp->ibqp.uobject) in __mthca_modify_qp() [all …]
|
D | mthca_mcg.c | 120 int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) in mthca_multicast_attach() argument 122 struct mthca_dev *dev = to_mdev(ibqp->device); in mthca_multicast_attach() 165 if (mgm->qp[i] == cpu_to_be32(ibqp->qp_num | (1 << 31))) { in mthca_multicast_attach() 167 ibqp->qp_num); in mthca_multicast_attach() 171 mgm->qp[i] = cpu_to_be32(ibqp->qp_num | (1 << 31)); in mthca_multicast_attach() 214 int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) in mthca_multicast_detach() argument 216 struct mthca_dev *dev = to_mdev(ibqp->device); in mthca_multicast_detach() 242 if (mgm->qp[i] == cpu_to_be32(ibqp->qp_num | (1 << 31))) in mthca_multicast_detach() 249 mthca_err(dev, "QP %06x not found in MGM\n", ibqp->qp_num); in mthca_multicast_detach()
|
/drivers/infiniband/hw/erdma/ |
D | erdma_verbs.h | 218 struct ib_qp ibqp; member 271 #define QP_ID(qp) ((qp)->ibqp.qp_num) 307 return container_of(qp, struct erdma_qp, ibqp); in to_eqp() 335 int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr, 337 int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask, 339 int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask, 341 int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata); 350 void erdma_qp_get_ref(struct ib_qp *ibqp); 351 void erdma_qp_put_ref(struct ib_qp *ibqp); 353 int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr, [all …]
|
/drivers/infiniband/hw/mlx4/ |
D | qp.c | 215 struct ib_qp *ibqp = &to_mibqp(qpe_work->qp)->ibqp; in mlx4_ib_handle_qp_event() local 218 event.device = ibqp->device; in mlx4_ib_handle_qp_event() 219 event.element.qp = ibqp; in mlx4_ib_handle_qp_event() 252 ibqp->event_handler(&event, ibqp->qp_context); in mlx4_ib_handle_qp_event() 261 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; in mlx4_ib_qp_event() local 267 if (!ibqp->event_handler) in mlx4_ib_qp_event() 774 qp->ibqp.qp_num = qp->mqp.qpn; in _mlx4_ib_create_qp_rss() 1344 if (qp->ibqp.qp_type == IB_QPT_XRC_TGT) in get_pd() 1345 return to_mpd(to_mxrcd(qp->ibqp.xrcd)->pd); in get_pd() 1347 return to_mpd(qp->ibqp.pd); in get_pd() [all …]
|
/drivers/infiniband/hw/hns/ |
D | hns_roce_hw_v2.c | 194 struct ib_device *ibdev = &(to_hr_dev(qp->ibqp.device))->ib_dev; in fill_ext_sge_inl_data() 270 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device); in check_inl_data_len() 287 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device); in set_rc_inl() 328 static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr, in set_rwqe_data_seg() argument 335 struct hns_roce_qp *qp = to_hr_qp(ibqp); in set_rwqe_data_seg() 491 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_PD, to_hr_pd(qp->ibqp.pd)->pdn); in set_ud_wqe() 573 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device); in set_rc_wqe() 603 ret = set_rwqe_data_seg(&qp->ibqp, wr, rc_sq_wqe, in set_rc_wqe() 693 static int hns_roce_v2_post_send(struct ib_qp *ibqp, in hns_roce_v2_post_send() argument 697 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); in hns_roce_v2_post_send() [all …]
|
D | hns_roce_qp.c | 57 ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL); in flush_work_handle() 131 struct ib_qp *ibqp = &hr_qp->ibqp; in hns_roce_ib_qp_event() local 134 if (ibqp->event_handler) { in hns_roce_ib_qp_event() 135 event.device = ibqp->device; in hns_roce_ib_qp_event() 136 event.element.qp = ibqp; in hns_roce_ib_qp_event() 165 dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n", in hns_roce_ib_qp_event() 169 ibqp->event_handler(&event, ibqp->qp_context); in hns_roce_ib_qp_event() 235 if (hr_qp->ibqp.qp_type == IB_QPT_GSI) { in alloc_qpn() 374 if (hr_qp->ibqp.qp_type != IB_QPT_XRC_TGT) in hns_roce_qp_remove() 377 if (hr_qp->ibqp.qp_type != IB_QPT_XRC_INI && in hns_roce_qp_remove() [all …]
|
/drivers/infiniband/hw/usnic/ |
D | usnic_ib_qp_grp.h | 49 struct ib_qp ibqp; member 105 struct usnic_ib_qp_grp *to_uqp_grp(struct ib_qp *ibqp) in to_uqp_grp() argument 107 return container_of(ibqp, struct usnic_ib_qp_grp, ibqp); in to_uqp_grp()
|
/drivers/infiniband/sw/rxe/ |
D | rxe_comp.c | 230 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in check_ack() 413 wc->qp = &qp->ibqp; in make_send_cqe() 417 uwc->qp_num = qp->ibqp.qp_num; in make_send_cqe() 451 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in do_complete() 493 if (qp->ibqp.event_handler) { in comp_check_sq_drain_done() 496 ev.device = qp->ibqp.device; in comp_check_sq_drain_done() 497 ev.element.qp = &qp->ibqp; in comp_check_sq_drain_done() 499 qp->ibqp.event_handler(&ev, in comp_check_sq_drain_done() 500 qp->ibqp.qp_context); in comp_check_sq_drain_done() 561 ib_device_put(qp->ibqp.device); in drain_resp_pkts() [all …]
|
D | rxe_loc.h | 38 int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid); 39 int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid); 118 return qp->ibqp.qp_num; in qp_num() 123 return qp->ibqp.qp_type; in qp_type() 133 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) in qp_mtu() 180 return rxe_wr_opcode_info[opcode].mask[qp->ibqp.qp_type]; in wr_opcode_mask()
|
D | rxe_mcast.c | 297 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in __rxe_init_mca() 433 int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid) in rxe_attach_mcast() argument 436 struct rxe_dev *rxe = to_rdev(ibqp->device); in rxe_attach_mcast() 437 struct rxe_qp *qp = to_rqp(ibqp); in rxe_attach_mcast() 464 int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid) in rxe_detach_mcast() argument 466 struct rxe_dev *rxe = to_rdev(ibqp->device); in rxe_detach_mcast() 467 struct rxe_qp *qp = to_rqp(ibqp); in rxe_detach_mcast()
|
/drivers/infiniband/hw/cxgb4/ |
D | ev.c | 109 event.element.qp = &qhp->ibqp; in post_qp_event() 110 if (qhp->ibqp.event_handler) in post_qp_event() 111 (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); in post_qp_event() 153 c4iw_qp_add_ref(&qhp->ibqp); in c4iw_ev_dispatch() 217 c4iw_qp_rem_ref(&qhp->ibqp); in c4iw_ev_dispatch()
|