Home
last modified time | relevance | path

Searched refs:qpn (Results 1 – 25 of 76) sorted by relevance

1234

/drivers/net/ethernet/mellanox/mlx4/
Dmcg.c127 u32 qpn) in get_promisc_qp() argument
138 if (pqp->qpn == qpn) in get_promisc_qp()
151 unsigned int index, u32 qpn) in new_steering_entry() argument
178 pqp = get_promisc_qp(dev, port, steer, qpn); in new_steering_entry()
185 dqp->qpn = qpn; in new_steering_entry()
211 if (pqp->qpn == qpn) in new_steering_entry()
220 mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK); in new_steering_entry()
243 unsigned int index, u32 qpn) in existing_steering_entry() argument
255 pqp = get_promisc_qp(dev, port, steer, qpn); in existing_steering_entry()
274 if (qpn == dqp->qpn) in existing_steering_entry()
[all …]
Dqp.c49 void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type) in mlx4_qp_event() argument
56 qp = __mlx4_qp_lookup(dev, qpn); in mlx4_qp_event()
63 mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn); in mlx4_qp_event()
79 *proxy_qp0 = qp->qpn >= pf_proxy_offset && qp->qpn <= pf_proxy_offset + 1; in is_master_qp0()
81 *real_qp0 = qp->qpn >= dev->phys_caps.base_sqpn && in is_master_qp0()
82 qp->qpn <= dev->phys_caps.base_sqpn + 1; in is_master_qp0()
145 ret = mlx4_cmd(dev, 0, qp->qpn, 2, in __mlx4_qp_modify()
150 port = (qp->qpn & 1) + 1; in __mlx4_qp_modify()
174 cpu_to_be32(qp->qpn); in __mlx4_qp_modify()
177 qp->qpn | (!!sqd_event << 31), in __mlx4_qp_modify()
[all …]
Dresource_tracker.c224 int qpn; member
742 u8 slave, u32 qpn) in update_vport_qp_param() argument
763 if (mlx4_is_qp_reserved(dev, qpn)) in update_vport_qp_param()
777 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params); in update_vport_qp_param()
1107 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn) in alloc_fs_rule_tr() argument
1117 ret->qpn = qpn; in alloc_fs_rule_tr()
1439 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn, in qp_res_start_move_to() argument
1449 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn); in qp_res_start_move_to()
1709 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn) in valid_reserved() argument
1711 return mlx4_is_qp_reserved(dev, qpn) && in valid_reserved()
[all …]
Den_resources.c41 int is_tx, int rss, int qpn, int cqn, in mlx4_en_fill_qp_context() argument
62 context->local_qpn = cpu_to_be32(qpn); in mlx4_en_fill_qp_context()
87 en_dbg(HW, priv, "Setting RX qp %x tunnel mode to RX tunneled & non-tunneled\n", qpn); in mlx4_en_fill_qp_context()
102 ret = mlx4_update_qp(priv->mdev->dev, qp->qpn, in mlx4_en_change_mcast_lb()
Den_rx.c1150 static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, in mlx4_en_config_rss_qp() argument
1163 err = mlx4_qp_alloc(mdev->dev, qpn, qp, GFP_KERNEL); in mlx4_en_config_rss_qp()
1165 en_err(priv, "Failed to allocate qp #%x\n", qpn); in mlx4_en_config_rss_qp()
1172 qpn, ring->cqn, -1, context); in mlx4_en_config_rss_qp()
1199 u32 qpn; in mlx4_en_create_drop_qp() local
1201 err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn, in mlx4_en_create_drop_qp()
1207 err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp, GFP_KERNEL); in mlx4_en_create_drop_qp()
1210 mlx4_qp_release_range(priv->mdev->dev, qpn, 1); in mlx4_en_create_drop_qp()
1219 u32 qpn; in mlx4_en_destroy_drop_qp() local
1221 qpn = priv->drop_qp.qpn; in mlx4_en_destroy_drop_qp()
[all …]
Den_netdev.c189 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn; in mlx4_en_filter_work()
481 int qpn, u64 *reg_id) in mlx4_en_tunnel_steer_add() argument
489 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn, in mlx4_en_tunnel_steer_add()
501 unsigned char *mac, int *qpn, u64 *reg_id) in mlx4_en_uc_steer_add() argument
512 qp.qpn = *qpn; in mlx4_en_uc_steer_add()
532 rule.qpn = *qpn; in mlx4_en_uc_steer_add()
553 unsigned char *mac, int qpn, u64 reg_id) in mlx4_en_uc_steer_release() argument
563 qp.qpn = qpn; in mlx4_en_uc_steer_release()
585 int *qpn = &priv->base_qpn; in mlx4_en_get_qp() local
600 *qpn = base_qpn + index; in mlx4_en_get_qp()
[all …]
Den_tx.c116 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn, in mlx4_en_create_tx_ring()
123 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL); in mlx4_en_create_tx_ring()
125 en_err(priv, "Failed allocating qp %d\n", ring->qpn); in mlx4_en_create_tx_ring()
156 mlx4_qp_release_range(mdev->dev, ring->qpn, 1); in mlx4_en_create_tx_ring()
178 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); in mlx4_en_destroy_tx_ring()
184 mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1); in mlx4_en_destroy_tx_ring()
210 ring->doorbell_qpn = cpu_to_be32(ring->qp.qpn << 8); in mlx4_en_activate_tx_ring()
213 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, in mlx4_en_activate_tx_ring()
/drivers/net/ethernet/mellanox/mlx5/core/
Dqp.c95 int qpn = be32_to_cpu(pf_eqe->flags_qpn) & MLX5_QPN_MASK; in mlx5_eq_pagefault() local
96 struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, qpn); in mlx5_eq_pagefault()
103 qpn); in mlx5_eq_pagefault()
130 qpn, pfault.rdma.r_key); in mlx5_eq_pagefault()
150 qpn, pfault.wqe.wqe_index); in mlx5_eq_pagefault()
159 eqe->sub_type, qpn); in mlx5_eq_pagefault()
170 qpn); in mlx5_eq_pagefault()
213 qp->qpn = be32_to_cpu(out.qpn) & 0xffffff; in mlx5_core_create_qp()
214 mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn); in mlx5_core_create_qp()
218 err = radix_tree_insert(&table->tree, qp->qpn, qp); in mlx5_core_create_qp()
[all …]
Dmcg.c42 __be32 qpn; member
54 __be32 qpn; member
64 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn) in mlx5_core_attach_mcg() argument
74 in.qpn = cpu_to_be32(qpn); in mlx5_core_attach_mcg()
86 int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn) in mlx5_core_detach_mcg() argument
96 in.qpn = cpu_to_be32(qpn); in mlx5_core_detach_mcg()
/drivers/staging/rdma/hfi1/
Dqp.h88 static inline u32 qpn_hash(struct hfi1_qp_ibdev *dev, u32 qpn) in qpn_hash() argument
90 return hash_32(qpn, dev->qp_table_bits); in qpn_hash()
102 u32 qpn) __must_hold(RCU) in hfi1_lookup_qpn() argument
106 if (unlikely(qpn <= 1)) { in hfi1_lookup_qpn()
107 qp = rcu_dereference(ibp->qp[qpn]); in hfi1_lookup_qpn()
110 u32 n = qpn_hash(dev->qp_dev, qpn); in hfi1_lookup_qpn()
114 if (qp->ibqp.qp_num == qpn) in hfi1_lookup_qpn()
Dqp.c144 u32 i, offset, max_scan, qpn; in alloc_qpn() local
162 qpn = qpt->last + qpt->incr; in alloc_qpn()
163 if (qpn >= QPN_MAX) in alloc_qpn()
164 qpn = qpt->incr | ((qpt->last & 1) ^ 1); in alloc_qpn()
166 offset = qpn & BITS_PER_PAGE_MASK; in alloc_qpn()
167 map = &qpt->map[qpn / BITS_PER_PAGE]; in alloc_qpn()
177 qpt->last = qpn; in alloc_qpn()
178 ret = qpn; in alloc_qpn()
186 qpn = mk_qpn(qpt, map, offset); in alloc_qpn()
187 } while (offset < BITS_PER_PAGE && qpn < QPN_MAX); in alloc_qpn()
[all …]
Dtrace.h301 __field(u32, qpn)
308 __entry->qpn = qp->ibqp.qp_num;
314 __entry->qpn,
335 __field(u32, qpn)
340 __entry->qpn = qp->ibqp.qp_num;
346 __entry->qpn,
452 __field(u32, qpn)
501 __entry->qpn =
533 __entry->qpn,
570 __field(u32, qpn)
[all …]
/drivers/infiniband/hw/mthca/
Dmthca_qp.c197 return qp->qpn >= dev->qp_table.sqp_start && in is_sqp()
198 qp->qpn <= dev->qp_table.sqp_start + 3; in is_sqp()
203 return qp->qpn >= dev->qp_table.sqp_start && in is_qp0()
204 qp->qpn <= dev->qp_table.sqp_start + 1; in is_qp0()
237 void mthca_qp_event(struct mthca_dev *dev, u32 qpn, in mthca_qp_event() argument
244 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); in mthca_qp_event()
251 event_type, qpn); in mthca_qp_event()
448 err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox); in mthca_query_qp()
614 qp_context->local_qpn = cpu_to_be32(qp->qpn); in __mthca_modify_qp()
755 ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE << in __mthca_modify_qp()
[all …]
Dmthca_eq.c143 __be32 qpn; member
282 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
287 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
292 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
297 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
307 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
312 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
317 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
322 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
Dmthca_mad.c165 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; in forward_trap() local
167 struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; in forward_trap()
172 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, in forward_trap()
/drivers/infiniband/hw/qib/
Dqib_qp.c127 u32 i, offset, max_scan, qpn; in alloc_qpn() local
145 qpn = qpt->last + 2; in alloc_qpn()
146 if (qpn >= QPN_MAX) in alloc_qpn()
147 qpn = 2; in alloc_qpn()
148 if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues) in alloc_qpn()
149 qpn = (qpn | qpt->mask) + 2; in alloc_qpn()
150 offset = qpn & RVT_BITS_PER_PAGE_MASK; in alloc_qpn()
151 map = &qpt->map[qpn / RVT_BITS_PER_PAGE]; in alloc_qpn()
161 qpt->last = qpn; in alloc_qpn()
162 ret = qpn; in alloc_qpn()
[all …]
/drivers/staging/rdma/ipath/
Dipath_qp.c106 u32 i, offset, max_scan, qpn; in alloc_qpn() local
131 qpn = qpt->last + 1; in alloc_qpn()
132 if (qpn >= QPN_MAX) in alloc_qpn()
133 qpn = 2; in alloc_qpn()
134 offset = qpn & BITS_PER_PAGE_MASK; in alloc_qpn()
135 map = &qpt->map[qpn / BITS_PER_PAGE]; in alloc_qpn()
147 qpt->last = qpn; in alloc_qpn()
148 ret = qpn; in alloc_qpn()
152 qpn = mk_qpn(qpt, map, offset); in alloc_qpn()
161 } while (offset < BITS_PER_PAGE && qpn < QPN_MAX); in alloc_qpn()
[all …]
/drivers/infiniband/core/
Dcm_msgs.h115 static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, __be32 qpn) in cm_req_set_local_qpn() argument
117 req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) | in cm_req_set_local_qpn()
523 static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn) in cm_rep_set_local_qpn() argument
525 rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) | in cm_rep_set_local_qpn()
643 static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, __be32 qpn) in cm_dreq_set_remote_qpn() argument
645 dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) | in cm_dreq_set_remote_qpn()
692 static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, __be32 qpn) in cm_lap_set_remote_qpn() argument
694 lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) | in cm_lap_set_remote_qpn()
829 __be32 qpn) in cm_sidr_rep_set_qpn() argument
831 sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) | in cm_sidr_rep_set_qpn()
Duser_mad.c231 packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp); in recv_handler()
532 be32_to_cpu(packet->mad.hdr.qpn), in ib_umad_write()
651 if (ureq.qpn != 0 && ureq.qpn != 1) { in ib_umad_reg_agent()
654 ureq.qpn); in ib_umad_reg_agent()
689 ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI, in ib_umad_reg_agent()
753 if (ureq.qpn != 0 && ureq.qpn != 1) { in ib_umad_reg_agent2()
756 ureq.qpn); in ib_umad_reg_agent2()
805 ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI, in ib_umad_reg_agent2()
Dagent.c83 int port_num, int qpn, size_t resp_mad_len, bool opa) in agent_send_response() argument
101 agent = port_priv->agent[qpn]; in agent_send_response()
Dagent.h49 int port_num, int qpn, size_t resp_mad_len, bool opa);
/drivers/infiniband/hw/mlx5/
Dodp.c158 int ret = mlx5_core_page_fault_resume(dev->mdev, qp->mqp.qpn, in mlx5_ib_page_fault_resume()
163 qp->mqp.qpn); in mlx5_ib_page_fault_resume()
404 wqe_index, qp->mqp.qpn); in mlx5_ib_mr_initiator_pfault_handler()
414 wqe_index, qp->mqp.qpn, in mlx5_ib_mr_initiator_pfault_handler()
421 if (qp->mqp.qpn != ctrl_qpn) { in mlx5_ib_mr_initiator_pfault_handler()
423 wqe_index, qp->mqp.qpn, in mlx5_ib_mr_initiator_pfault_handler()
552 -ret, wqe_index, qp->mqp.qpn); in mlx5_ib_mr_wqe_pfault_handler()
589 qp->mqp.qpn, resume_with_error, pfault->mpfault.flags); in mlx5_ib_mr_wqe_pfault_handler()
Dcq.c423 uint32_t qpn; in mlx5_poll_one() local
455 qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff; in mlx5_poll_one()
456 if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) { in mlx5_poll_one()
461 mqp = __mlx5_qp_lookup(dev->mdev, qpn); in mlx5_poll_one()
464 cq->mcq.cqn, qpn); in mlx5_poll_one()
919 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq) in mlx5_ib_cq_clean() argument
925 __mlx5_ib_cq_clean(cq, qpn, srq); in mlx5_ib_cq_clean()
/drivers/infiniband/hw/mlx4/
Dqp.c131 return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn && in is_tunnel_qp()
132 qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn + in is_tunnel_qp()
143 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_sqp()
144 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3); in is_sqp()
150 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i] || in is_sqp()
151 qp->mqp.qpn == dev->dev->caps.qp1_proxy[i]) { in is_sqp()
168 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_qp0()
169 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 1); in is_qp0()
175 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i]) { in is_qp0()
324 "on QP %06x\n", type, qp->qpn); in mlx4_ib_qp_event()
[all …]
/drivers/staging/rdma/amso1100/
Dc2_qp.c390 qp->qpn = ret; in c2_alloc_qpn()
397 static void c2_free_qpn(struct c2_dev *c2dev, int qpn) in c2_free_qpn() argument
400 idr_remove(&c2dev->qp_table.idr, qpn); in c2_free_qpn()
404 struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn) in c2_find_qpn() argument
410 qp = idr_find(&c2dev->qp_table.idr, qpn); in c2_find_qpn()
432 qp->ibqp.qp_num = qp->qpn; in c2_alloc_qp()
570 c2_free_qpn(c2dev, qp->qpn); in c2_alloc_qp()
613 c2_free_qpn(c2dev, qp->qpn); in c2_free_qp()

1234