/drivers/net/ethernet/mellanox/mlx4/ |
D | mcg.c | 125 u32 qpn) in get_promisc_qp() argument 136 if (pqp->qpn == qpn) in get_promisc_qp() 149 unsigned int index, u32 qpn) in new_steering_entry() argument 176 pqp = get_promisc_qp(dev, port, steer, qpn); in new_steering_entry() 183 dqp->qpn = qpn; in new_steering_entry() 209 if (pqp->qpn == qpn) in new_steering_entry() 218 mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK); in new_steering_entry() 241 unsigned int index, u32 qpn) in existing_steering_entry() argument 253 pqp = get_promisc_qp(dev, port, steer, qpn); in existing_steering_entry() 272 if (qpn == dqp->qpn) in existing_steering_entry() [all …]
|
D | qp.c | 49 void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type) in mlx4_qp_event() argument 56 qp = __mlx4_qp_lookup(dev, qpn); in mlx4_qp_event() 63 mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn); in mlx4_qp_event() 79 *proxy_qp0 = qp->qpn >= pf_proxy_offset && qp->qpn <= pf_proxy_offset + 1; in is_master_qp0() 81 *real_qp0 = qp->qpn >= dev->phys_caps.base_sqpn && in is_master_qp0() 82 qp->qpn <= dev->phys_caps.base_sqpn + 1; in is_master_qp0() 145 ret = mlx4_cmd(dev, 0, qp->qpn, 2, in __mlx4_qp_modify() 150 port = (qp->qpn & 1) + 1; in __mlx4_qp_modify() 174 cpu_to_be16(mlx4_qp_roce_entropy(dev, qp->qpn)); in __mlx4_qp_modify() 180 cpu_to_be32(qp->qpn); in __mlx4_qp_modify() [all …]
|
D | en_resources.c | 41 int is_tx, int rss, int qpn, int cqn, in mlx4_en_fill_qp_context() argument 63 context->local_qpn = cpu_to_be32(qpn); in mlx4_en_fill_qp_context() 88 en_dbg(HW, priv, "Setting RX qp %x tunnel mode to RX tunneled & non-tunneled\n", qpn); in mlx4_en_fill_qp_context() 103 ret = mlx4_update_qp(priv->mdev->dev, qp->qpn, in mlx4_en_change_mcast_lb()
|
D | resource_tracker.c | 224 int qpn; member 739 u8 slave, u32 qpn) in update_vport_qp_param() argument 760 if (mlx4_is_qp_reserved(dev, qpn)) in update_vport_qp_param() 774 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms); in update_vport_qp_param() 1120 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn) in alloc_fs_rule_tr() argument 1130 ret->qpn = qpn; in alloc_fs_rule_tr() 1452 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn, in qp_res_start_move_to() argument 1462 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn); in qp_res_start_move_to() 1723 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn) in valid_reserved() argument 1725 return mlx4_is_qp_reserved(dev, qpn) && in valid_reserved() [all …]
|
D | en_rx.c | 1221 static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, in mlx4_en_config_rss_qp() argument 1234 err = mlx4_qp_alloc(mdev->dev, qpn, qp, GFP_KERNEL); in mlx4_en_config_rss_qp() 1236 en_err(priv, "Failed to allocate qp #%x\n", qpn); in mlx4_en_config_rss_qp() 1243 qpn, ring->cqn, -1, context); in mlx4_en_config_rss_qp() 1270 u32 qpn; in mlx4_en_create_drop_qp() local 1272 err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn, in mlx4_en_create_drop_qp() 1278 err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp, GFP_KERNEL); in mlx4_en_create_drop_qp() 1281 mlx4_qp_release_range(priv->mdev->dev, qpn, 1); in mlx4_en_create_drop_qp() 1290 u32 qpn; in mlx4_en_destroy_drop_qp() local 1292 qpn = priv->drop_qp.qpn; in mlx4_en_destroy_drop_qp() [all …]
|
D | en_netdev.c | 187 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn; in mlx4_en_filter_work() 476 int qpn, u64 *reg_id) in mlx4_en_tunnel_steer_add() argument 484 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn, in mlx4_en_tunnel_steer_add() 496 unsigned char *mac, int *qpn, u64 *reg_id) in mlx4_en_uc_steer_add() argument 507 qp.qpn = *qpn; in mlx4_en_uc_steer_add() 527 rule.qpn = *qpn; in mlx4_en_uc_steer_add() 548 unsigned char *mac, int qpn, u64 reg_id) in mlx4_en_uc_steer_release() argument 558 qp.qpn = qpn; in mlx4_en_uc_steer_release() 580 int *qpn = &priv->base_qpn; in mlx4_en_get_qp() local 595 *qpn = base_qpn + index; in mlx4_en_get_qp() [all …]
|
/drivers/net/ethernet/mellanox/mlx5/core/ |
D | qp.c | 150 int qpn = be32_to_cpu(pf_eqe->flags_qpn) & MLX5_QPN_MASK; in mlx5_eq_pagefault() local 151 struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, qpn); in mlx5_eq_pagefault() 158 qpn); in mlx5_eq_pagefault() 185 qpn, pfault.rdma.r_key); in mlx5_eq_pagefault() 205 qpn, pfault.wqe.wqe_index); in mlx5_eq_pagefault() 214 eqe->sub_type, qpn); in mlx5_eq_pagefault() 225 qpn); in mlx5_eq_pagefault() 245 qp->qpn | (rsc_type << MLX5_USER_INDEX_LEN), in create_qprqsq_common() 266 qp->qpn | (qp->common.res << MLX5_USER_INDEX_LEN)); in destroy_qprqsq_common() 287 qp->qpn = MLX5_GET(create_qp_out, out, qpn); in mlx5_core_create_qp() [all …]
|
D | mcg.c | 40 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn) in mlx5_core_attach_mcg() argument 47 MLX5_SET(attach_to_mcg_in, in, qpn, qpn); in mlx5_core_attach_mcg() 54 int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn) in mlx5_core_detach_mcg() argument 61 MLX5_SET(detach_from_mcg_in, in, qpn, qpn); in mlx5_core_detach_mcg()
|
/drivers/infiniband/hw/hns/ |
D | hns_roce_eq.c | 65 struct hns_roce_aeqe *aeqe, int qpn) in hns_roce_wq_catas_err_handle() argument 73 dev_warn(dev, "QP %d, QPC error.\n", qpn); in hns_roce_wq_catas_err_handle() 76 dev_warn(dev, "QP %d, MTU error.\n", qpn); in hns_roce_wq_catas_err_handle() 79 dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn); in hns_roce_wq_catas_err_handle() 82 dev_warn(dev, "QP %d, WQE addr error.\n", qpn); in hns_roce_wq_catas_err_handle() 85 dev_warn(dev, "QP %d, WQE shift error\n", qpn); in hns_roce_wq_catas_err_handle() 88 dev_warn(dev, "QP %d, SL error.\n", qpn); in hns_roce_wq_catas_err_handle() 91 dev_warn(dev, "QP %d, port error.\n", qpn); in hns_roce_wq_catas_err_handle() 100 int qpn) in hns_roce_local_wq_access_err_handle() argument 108 dev_warn(dev, "QP %d, R_key violation.\n", qpn); in hns_roce_local_wq_access_err_handle() [all …]
|
D | hns_roce_qp.c | 44 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) in hns_roce_qp_event() argument 52 qp = __hns_roce_qp_lookup(hr_dev, qpn); in hns_roce_qp_event() 59 dev_warn(dev, "Async event for bogus QP %08x\n", qpn); in hns_roce_qp_event() 105 type, hr_qp->qpn); in hns_roce_ib_qp_event() 140 static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, in hns_roce_gsi_qp_alloc() argument 146 if (!qpn) in hns_roce_gsi_qp_alloc() 149 hr_qp->qpn = qpn; in hns_roce_gsi_qp_alloc() 153 hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp); in hns_roce_gsi_qp_alloc() 170 static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, in hns_roce_qp_alloc() argument 177 if (!qpn) in hns_roce_qp_alloc() [all …]
|
D | hns_roce_hw_v1.c | 384 RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn); in hns_roce_v1_post_recv() 1282 static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, in __hns_roce_v1_cq_clean() argument 1304 HNS_ROCE_CQE_QPN_MASK) == qpn) { in __hns_roce_v1_cq_clean() 1330 static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, in hns_roce_v1_cq_clean() argument 1334 __hns_roce_v1_cq_clean(hr_cq, qpn, srq); in hns_roce_v1_cq_clean() 1449 int qpn; in hns_roce_v1_poll_one() local 1475 qpn = roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_PORT_NUM_M, in hns_roce_v1_poll_one() 1481 qpn = roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M, in hns_roce_v1_poll_one() 1485 if (!*cur_qp || (qpn & HNS_ROCE_CQE_QPN_MASK) != (*cur_qp)->qpn) { in hns_roce_v1_poll_one() 1486 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn); in hns_roce_v1_poll_one() [all …]
|
/drivers/infiniband/sw/rxe/ |
D | rxe_recv.c | 102 u32 qpn, struct rxe_qp *qp) in check_keys() argument 111 if (qpn == 1) { in check_keys() 125 } else if (qpn != 0) { in check_keys() 137 qpn != 0 && pkt->mask) { in check_keys() 138 u32 qkey = (qpn == 1) ? GSI_QKEY : qp->attr.qkey; in check_keys() 142 deth_qkey(pkt), qkey, qpn); in check_keys() 219 u32 qpn = bth_qpn(pkt); in hdr_check() local 228 if (qpn != IB_MULTICAST_QPN) { in hdr_check() 229 index = (qpn == 0) ? port->qp_smi_index : in hdr_check() 230 ((qpn == 1) ? port->qp_gsi_index : qpn); in hdr_check() [all …]
|
D | rxe_hdr.h | 85 __be32 qpn; member 200 return BTH_QPN_MASK & be32_to_cpu(bth->qpn); in __bth_qpn() 203 static inline void __bth_set_qpn(void *arg, u32 qpn) in __bth_set_qpn() argument 206 u32 resvqpn = be32_to_cpu(bth->qpn); in __bth_set_qpn() 208 bth->qpn = cpu_to_be32((BTH_QPN_MASK & qpn) | in __bth_set_qpn() 216 return 0 != (cpu_to_be32(BTH_FECN_MASK) & bth->qpn); in __bth_fecn() 224 bth->qpn |= cpu_to_be32(BTH_FECN_MASK); in __bth_set_fecn() 226 bth->qpn &= ~cpu_to_be32(BTH_FECN_MASK); in __bth_set_fecn() 233 return 0 != (cpu_to_be32(BTH_BECN_MASK) & bth->qpn); in __bth_becn() 241 bth->qpn |= cpu_to_be32(BTH_BECN_MASK); in __bth_set_becn() [all …]
|
/drivers/infiniband/hw/qib/ |
D | qib_qp.c | 168 u32 i, offset, max_scan, qpn; in qib_alloc_qpn() local 190 qpn = qpt->last + 2; in qib_alloc_qpn() 191 if (qpn >= RVT_QPN_MAX) in qib_alloc_qpn() 192 qpn = 2; in qib_alloc_qpn() 193 if (qpt_mask && ((qpn & qpt_mask) >> 1) >= dd->n_krcv_queues) in qib_alloc_qpn() 194 qpn = (qpn | qpt_mask) + 2; in qib_alloc_qpn() 195 offset = qpn & RVT_BITS_PER_PAGE_MASK; in qib_alloc_qpn() 196 map = &qpt->map[qpn / RVT_BITS_PER_PAGE]; in qib_alloc_qpn() 206 qpt->last = qpn; in qib_alloc_qpn() 207 ret = qpn; in qib_alloc_qpn() [all …]
|
/drivers/infiniband/sw/rdmavt/ |
D | trace.h | 88 __field(u32, qpn) 93 __entry->qpn = qp->ibqp.qp_num; 99 __entry->qpn, 142 __field(u32, qpn) 155 __entry->qpn = qp->ibqp.qp_num; 169 __entry->qpn,
|
D | qp.c | 305 u32 i, offset, max_scan, qpn; in alloc_qpn() local 326 qpn = qpt->last + qpt->incr; in alloc_qpn() 327 if (qpn >= RVT_QPN_MAX) in alloc_qpn() 328 qpn = qpt->incr | ((qpt->last & 1) ^ 1); in alloc_qpn() 330 offset = qpn & RVT_BITS_PER_PAGE_MASK; in alloc_qpn() 331 map = &qpt->map[qpn / RVT_BITS_PER_PAGE]; in alloc_qpn() 341 qpt->last = qpn; in alloc_qpn() 342 ret = qpn; in alloc_qpn() 350 qpn = mk_qpn(qpt, map, offset); in alloc_qpn() 351 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX); in alloc_qpn() [all …]
|
/drivers/infiniband/hw/mlx5/ |
D | odp.c | 159 u32 qpn = qp->trans_qp.base.mqp.qpn; in mlx5_ib_page_fault_resume() local 161 qpn, in mlx5_ib_page_fault_resume() 165 pr_err("Failed to resolve the page fault on QP 0x%x\n", qpn); in mlx5_ib_page_fault_resume() 396 u32 qpn = qp->trans_qp.base.mqp.qpn; in mlx5_ib_mr_initiator_pfault_handler() local 407 wqe_index, qpn); in mlx5_ib_mr_initiator_pfault_handler() 417 wqe_index, qpn, in mlx5_ib_mr_initiator_pfault_handler() 424 if (qpn != ctrl_qpn) { in mlx5_ib_mr_initiator_pfault_handler() 426 wqe_index, qpn, in mlx5_ib_mr_initiator_pfault_handler() 543 u32 qpn = qp->trans_qp.base.mqp.qpn; in mlx5_ib_mr_wqe_pfault_handler() local 556 -ret, wqe_index, qpn); in mlx5_ib_mr_wqe_pfault_handler() [all …]
|
/drivers/infiniband/hw/mthca/ |
D | mthca_qp.c | 197 return qp->qpn >= dev->qp_table.sqp_start && in is_sqp() 198 qp->qpn <= dev->qp_table.sqp_start + 3; in is_sqp() 203 return qp->qpn >= dev->qp_table.sqp_start && in is_qp0() 204 qp->qpn <= dev->qp_table.sqp_start + 1; in is_qp0() 237 void mthca_qp_event(struct mthca_dev *dev, u32 qpn, in mthca_qp_event() argument 244 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); in mthca_qp_event() 251 event_type, qpn); in mthca_qp_event() 448 err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox); in mthca_query_qp() 614 qp_context->local_qpn = cpu_to_be32(qp->qpn); in __mthca_modify_qp() 755 ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE << in __mthca_modify_qp() [all …]
|
D | mthca_eq.c | 143 __be32 qpn; member 282 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int() 287 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int() 292 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int() 297 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int() 307 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int() 312 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int() 317 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int() 322 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
|
D | mthca_mad.c | 166 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; in forward_trap() local 168 struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; in forward_trap() 173 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, in forward_trap()
|
/drivers/infiniband/core/ |
D | cm_msgs.h | 115 static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, __be32 qpn) in cm_req_set_local_qpn() argument 117 req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) | in cm_req_set_local_qpn() 523 static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn) in cm_rep_set_local_qpn() argument 525 rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) | in cm_rep_set_local_qpn() 643 static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, __be32 qpn) in cm_dreq_set_remote_qpn() argument 645 dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) | in cm_dreq_set_remote_qpn() 692 static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, __be32 qpn) in cm_lap_set_remote_qpn() argument 694 lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) | in cm_lap_set_remote_qpn() 829 __be32 qpn) in cm_sidr_rep_set_qpn() argument 831 sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) | in cm_sidr_rep_set_qpn()
|
D | user_mad.c | 231 packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp); in recv_handler() 527 be32_to_cpu(packet->mad.hdr.qpn), in ib_umad_write() 646 if (ureq.qpn != 0 && ureq.qpn != 1) { in ib_umad_reg_agent() 649 ureq.qpn); in ib_umad_reg_agent() 684 ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI, in ib_umad_reg_agent() 748 if (ureq.qpn != 0 && ureq.qpn != 1) { in ib_umad_reg_agent2() 751 ureq.qpn); in ib_umad_reg_agent2() 800 ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI, in ib_umad_reg_agent2()
|
/drivers/infiniband/hw/hfi1/ |
D | trace_rc.h | 63 __field(u32, qpn) 74 __entry->qpn = qp->ibqp.qp_num; 86 __entry->qpn,
|
D | trace_ibhdrs.h | 98 __field(u32, qpn) 147 __entry->qpn = 177 __entry->qpn,
|
/drivers/infiniband/hw/mlx4/ |
D | qp.c | 133 return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn && in is_tunnel_qp() 134 qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn + in is_tunnel_qp() 145 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_sqp() 146 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3); in is_sqp() 152 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i] || in is_sqp() 153 qp->mqp.qpn == dev->dev->caps.qp1_proxy[i]) { in is_sqp() 173 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_qp0() 174 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 1); in is_qp0() 180 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i]) { in is_qp0() 329 "on QP %06x\n", type, qp->qpn); in mlx4_ib_qp_event() [all …]
|