/drivers/net/mlx4/ |
D | qp.c | 44 void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type) in mlx4_qp_event() argument 51 qp = __mlx4_qp_lookup(dev, qpn); in mlx4_qp_event() 58 mlx4_warn(dev, "Async event for bogus QP %08x\n", qpn); in mlx4_qp_event() 121 return mlx4_cmd(dev, 0, qp->qpn, 2, in mlx4_qp_modify() 139 cpu_to_be32(qp->qpn); in mlx4_qp_modify() 141 ret = mlx4_cmd(dev, mailbox->dma, qp->qpn | (!!sqd_event << 31), in mlx4_qp_modify() 154 int qpn; in mlx4_qp_reserve_range() local 156 qpn = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align); in mlx4_qp_reserve_range() 157 if (qpn == -1) in mlx4_qp_reserve_range() 160 *base = qpn; in mlx4_qp_reserve_range() [all …]
|
D | mcg.c | 200 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) { in mlx4_multicast_attach() 201 mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn); in mlx4_multicast_attach() 207 mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) | in mlx4_multicast_attach() 210 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); in mlx4_multicast_attach() 277 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) in mlx4_multicast_detach() 281 mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn); in mlx4_multicast_detach()
|
D | en_resources.c | 40 int is_tx, int rss, int qpn, int cqn, int srqn, in mlx4_en_fill_qp_context() argument 55 context->local_qpn = cpu_to_be32(qpn); in mlx4_en_fill_qp_context()
|
D | en_tx.c | 104 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn); in mlx4_en_create_tx_ring() 110 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); in mlx4_en_create_tx_ring() 112 mlx4_err(mdev, "Failed allocating qp %d\n", ring->qpn); in mlx4_en_create_tx_ring() 119 mlx4_qp_release_range(mdev->dev, ring->qpn, 1); in mlx4_en_create_tx_ring() 137 mlx4_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); in mlx4_en_destroy_tx_ring() 141 mlx4_qp_release_range(mdev->dev, ring->qpn, 1); in mlx4_en_destroy_tx_ring() 167 ring->doorbell_qpn = swab32(ring->qp.qpn << 8); in mlx4_en_activate_tx_ring() 169 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, in mlx4_en_activate_tx_ring()
|
D | en_rx.c | 935 int qpn, int srqn, int cqn, in mlx4_en_config_rss_qp() argument 949 err = mlx4_qp_alloc(mdev->dev, qpn, qp); in mlx4_en_config_rss_qp() 951 mlx4_err(mdev, "Failed to allocate qp #%d\n", qpn); in mlx4_en_config_rss_qp() 958 mlx4_en_fill_qp_context(priv, 0, 0, 0, 0, qpn, cqn, srqn, context); in mlx4_en_config_rss_qp() 980 int i, srqn, qpn, cqn; in mlx4_en_config_rss_steer() local 996 qpn = rss_map->base_qpn + i; in mlx4_en_config_rss_steer() 997 err = mlx4_en_config_rss_qp(priv, qpn, srqn, cqn, in mlx4_en_config_rss_steer()
|
/drivers/infiniband/hw/mthca/ |
D | mthca_qp.c | 197 return qp->qpn >= dev->qp_table.sqp_start && in is_sqp() 198 qp->qpn <= dev->qp_table.sqp_start + 3; in is_sqp() 203 return qp->qpn >= dev->qp_table.sqp_start && in is_qp0() 204 qp->qpn <= dev->qp_table.sqp_start + 1; in is_qp0() 237 void mthca_qp_event(struct mthca_dev *dev, u32 qpn, in mthca_qp_event() argument 244 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); in mthca_qp_event() 250 mthca_warn(dev, "Async event for bogus QP %08x\n", qpn); in mthca_qp_event() 451 err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox, &status); in mthca_query_qp() 620 qp_context->local_qpn = cpu_to_be32(qp->qpn); in __mthca_modify_qp() 761 ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE << in __mthca_modify_qp() [all …]
|
D | mthca_eq.c | 142 __be32 qpn; member 281 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int() 286 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int() 291 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int() 296 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int() 306 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int() 311 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int() 316 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int() 321 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
|
D | mthca_mad.c | 160 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; in forward_trap() local 162 struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; in forward_trap() 167 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, in forward_trap()
|
D | mthca_dev.h | 504 void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, 525 void mthca_qp_event(struct mthca_dev *dev, u32 qpn, 555 int qpn,
|
/drivers/infiniband/hw/ipath/ |
D | ipath_qp.c | 105 u32 i, offset, max_scan, qpn; in alloc_qpn() local 130 qpn = qpt->last + 1; in alloc_qpn() 131 if (qpn >= QPN_MAX) in alloc_qpn() 132 qpn = 2; in alloc_qpn() 133 offset = qpn & BITS_PER_PAGE_MASK; in alloc_qpn() 134 map = &qpt->map[qpn / BITS_PER_PAGE]; in alloc_qpn() 146 qpt->last = qpn; in alloc_qpn() 147 ret = qpn; in alloc_qpn() 151 qpn = mk_qpn(qpt, map, offset); in alloc_qpn() 160 } while (offset < BITS_PER_PAGE && qpn < QPN_MAX); in alloc_qpn() [all …]
|
/drivers/infiniband/core/ |
D | cm_msgs.h | 127 static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, __be32 qpn) in cm_req_set_local_qpn() argument 129 req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) | in cm_req_set_local_qpn() 524 static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn) in cm_rep_set_local_qpn() argument 526 rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) | in cm_rep_set_local_qpn() 627 static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, __be32 qpn) in cm_dreq_set_remote_qpn() argument 629 dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) | in cm_dreq_set_remote_qpn() 676 static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, __be32 qpn) in cm_lap_set_remote_qpn() argument 678 lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) | in cm_lap_set_remote_qpn() 812 __be32 qpn) in cm_sidr_rep_set_qpn() argument 814 sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) | in cm_sidr_rep_set_qpn()
|
D | agent.c | 83 int port_num, int qpn) in agent_send_response() argument 101 agent = port_priv->agent[qpn]; in agent_send_response()
|
D | agent.h | 49 int port_num, int qpn);
|
D | mad.c | 198 int ret2, qpn; in ib_register_mad_agent() local 203 qpn = get_spl_qp_index(qp_type); in ib_register_mad_agent() 204 if (qpn == -1) in ib_register_mad_agent() 277 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd, in ib_register_mad_agent() 295 mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; in ib_register_mad_agent() 302 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; in ib_register_mad_agent() 439 int qpn; in ib_register_mad_snoop() local 447 qpn = get_spl_qp_index(qp_type); in ib_register_mad_snoop() 448 if (qpn == -1) { in ib_register_mad_snoop() 465 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn]; in ib_register_mad_snoop() [all …]
|
D | user_mad.c | 232 packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp); in recv_handler() 522 be32_to_cpu(packet->mad.hdr.qpn), in ib_umad_write() 630 if (ureq.qpn != 0 && ureq.qpn != 1) { in ib_umad_reg_agent() 661 ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI, in ib_umad_reg_agent()
|
/drivers/infiniband/hw/mlx4/ |
D | qp.c | 94 return qp->mqp.qpn >= dev->dev->caps.sqp_start && in is_sqp() 95 qp->mqp.qpn <= dev->dev->caps.sqp_start + 3; in is_sqp() 100 return qp->mqp.qpn >= dev->dev->caps.sqp_start && in is_qp0() 101 qp->mqp.qpn <= dev->dev->caps.sqp_start + 1; in is_qp0() 244 "on QP %06x\n", type, qp->qpn); in mlx4_ib_qp_event() 454 int qpn; in create_qp_common() local 550 qpn = sqpn; in create_qp_common() 552 err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn); in create_qp_common() 557 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); in create_qp_common() 566 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); in create_qp_common() [all …]
|
D | mad.c | 200 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; in forward_trap() local 202 struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; in forward_trap() 206 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, in forward_trap()
|
D | cq.c | 592 (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) { in mlx4_ib_poll_one() 752 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) in __mlx4_ib_cq_clean() argument 776 if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) { in __mlx4_ib_cq_clean() 800 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) in mlx4_ib_cq_clean() argument 803 __mlx4_ib_cq_clean(cq, qpn, srq); in mlx4_ib_cq_clean()
|
D | mlx4_ib.h | 268 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq); 269 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
|
/drivers/infiniband/hw/amso1100/ |
D | c2_qp.c | 387 c2dev->qp_table.last++, &qp->qpn); in c2_alloc_qpn() 394 static void c2_free_qpn(struct c2_dev *c2dev, int qpn) in c2_free_qpn() argument 397 idr_remove(&c2dev->qp_table.idr, qpn); in c2_free_qpn() 401 struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn) in c2_find_qpn() argument 407 qp = idr_find(&c2dev->qp_table.idr, qpn); in c2_find_qpn() 429 qp->ibqp.qp_num = qp->qpn; in c2_alloc_qp() 567 c2_free_qpn(c2dev, qp->qpn); in c2_alloc_qp() 610 c2_free_qpn(c2dev, qp->qpn); in c2_free_qp()
|
D | c2_cm.c | 48 ibqp = c2_get_qp(cm_id->device, iw_param->qpn); in c2_llp_connect() 291 ibqp = c2_get_qp(cm_id->device, iw_param->qpn); in c2_llp_accept()
|
D | c2_provider.h | 113 int qpn; member
|
D | c2.h | 494 extern struct ib_qp *c2_get_qp(struct ib_device *device, int qpn); 506 extern struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn);
|
/drivers/infiniband/ulp/ipoib/ |
D | ipoib_cm.c | 426 data.qpn = cpu_to_be32(priv->qp->qp_num); in ipoib_cm_send_rep() 1031 u32 qpn, in ipoib_cm_send_req() argument 1038 data.qpn = cpu_to_be32(priv->qp->qp_num); in ipoib_cm_send_req() 1043 req.service_id = cpu_to_be64(IPOIB_CM_IETF_ID | qpn); in ipoib_cm_send_req() 1091 static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn, in ipoib_cm_tx_init() argument 1125 ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec); in ipoib_cm_tx_init() 1132 p->qp->qp_num, pathrec->dgid.raw, qpn); in ipoib_cm_tx_init() 1296 u32 qpn; in ipoib_cm_tx_start() local 1305 qpn = IPOIB_QPN(neigh->neighbour->ha); in ipoib_cm_tx_start() 1311 ret = ipoib_cm_tx_init(p, qpn, &pathrec); in ipoib_cm_tx_start()
|
D | ipoib_ib.c | 491 struct ib_ah *address, u32 qpn, in post_send() argument 515 priv->tx_wr.wr.ud.remote_qpn = qpn; in post_send() 530 struct ipoib_ah *address, u32 qpn) in ipoib_send() argument 561 skb->len, address, qpn); in ipoib_send() 591 address->ah, qpn, tx_req, phead, hlen))) { in ipoib_send()
|