/drivers/infiniband/hw/ipath/ |
D | ipath_rc.c | 62 static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe) in ipath_init_restart() argument 66 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, in ipath_init_restart() 67 ib_mtu_enum_to_int(qp->path_mtu)); in ipath_init_restart() 68 dev = to_idev(qp->ibqp.device); in ipath_init_restart() 70 if (list_empty(&qp->timerwait)) in ipath_init_restart() 71 list_add_tail(&qp->timerwait, in ipath_init_restart() 86 static int ipath_make_rc_ack(struct ipath_ibdev *dev, struct ipath_qp *qp, in ipath_make_rc_ack() argument 96 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) in ipath_make_rc_ack() 102 switch (qp->s_ack_state) { in ipath_make_rc_ack() 111 if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC) in ipath_make_rc_ack() [all …]
|
D | ipath_qp.c | 207 static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp, in ipath_alloc_qpn() argument 216 qp->ibqp.qp_num = ret; in ipath_alloc_qpn() 222 qp->next = qpt->table[ret]; in ipath_alloc_qpn() 223 qpt->table[ret] = qp; in ipath_alloc_qpn() 224 atomic_inc(&qp->refcount); in ipath_alloc_qpn() 241 static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp) in ipath_free_qp() argument 249 qpp = &qpt->table[qp->ibqp.qp_num % qpt->max]; in ipath_free_qp() 251 if (q == qp) { in ipath_free_qp() 252 *qpp = qp->next; in ipath_free_qp() 253 qp->next = NULL; in ipath_free_qp() [all …]
|
D | ipath_uc.c | 46 int ipath_make_uc_req(struct ipath_qp *qp) in ipath_make_uc_req() argument 54 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); in ipath_make_uc_req() 57 spin_lock_irqsave(&qp->s_lock, flags); in ipath_make_uc_req() 59 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) { in ipath_make_uc_req() 60 if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND)) in ipath_make_uc_req() 63 if (qp->s_last == qp->s_head) in ipath_make_uc_req() 66 if (atomic_read(&qp->s_dma_busy)) { in ipath_make_uc_req() 67 qp->s_flags |= IPATH_S_WAIT_DMA; in ipath_make_uc_req() 70 wqe = get_swqe_ptr(qp, qp->s_last); in ipath_make_uc_req() 71 ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in ipath_make_uc_req() [all …]
|
D | ipath_ruc.c | 86 void ipath_insert_rnr_queue(struct ipath_qp *qp) in ipath_insert_rnr_queue() argument 88 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); in ipath_insert_rnr_queue() 93 list_add(&qp->timerwait, &dev->rnrwait); in ipath_insert_rnr_queue() 99 while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) { in ipath_insert_rnr_queue() 100 qp->s_rnr_timeout -= nqp->s_rnr_timeout; in ipath_insert_rnr_queue() 110 nqp->s_rnr_timeout -= qp->s_rnr_timeout; in ipath_insert_rnr_queue() 111 list_add(&qp->timerwait, l); in ipath_insert_rnr_queue() 122 int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe, in ipath_init_sge() argument 133 if (!ipath_lkey_ok(qp, j ? &ss->sg_list[j - 1] : &ss->sge, in ipath_init_sge() 148 wc.qp = &qp->ibqp; in ipath_init_sge() [all …]
|
D | ipath_ud.c | 52 struct ipath_qp *qp; in ipath_ud_loopback() local 67 qp = ipath_lookup_qpn(&dev->qp_table, swqe->wr.wr.ud.remote_qpn); in ipath_ud_loopback() 68 if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) { in ipath_ud_loopback() 78 if (unlikely(qp->ibqp.qp_num && in ipath_ud_loopback() 80 sqp->qkey : swqe->wr.wr.ud.remote_qkey) != qp->qkey)) { in ipath_ud_loopback() 106 if (qp->ibqp.srq) { in ipath_ud_loopback() 107 srq = to_isrq(qp->ibqp.srq); in ipath_ud_loopback() 113 rq = &qp->r_rq; in ipath_ud_loopback() 133 rsge.sg_list = qp->r_ud_sg_list; in ipath_ud_loopback() 134 if (!ipath_init_sge(qp, wqe, &rlen, &rsge)) { in ipath_ud_loopback() [all …]
|
D | ipath_verbs.c | 334 static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr) in ipath_post_one_send() argument 343 struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd; in ipath_post_one_send() 345 spin_lock_irqsave(&qp->s_lock, flags); in ipath_post_one_send() 347 if (qp->ibqp.qp_type != IB_QPT_SMI && in ipath_post_one_send() 354 if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK))) in ipath_post_one_send() 358 if (wr->num_sge > qp->s_max_sge) in ipath_post_one_send() 366 if (qp->ibqp.qp_type == IB_QPT_UC) { in ipath_post_one_send() 369 } else if (qp->ibqp.qp_type == IB_QPT_UD) { in ipath_post_one_send() 375 if (qp->ibqp.pd != wr->wr.ud.ah->pd) in ipath_post_one_send() 384 else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) in ipath_post_one_send() [all …]
|
D | ipath_verbs_mcast.c | 51 static struct ipath_mcast_qp *ipath_mcast_qp_alloc(struct ipath_qp *qp) in ipath_mcast_qp_alloc() argument 59 mqp->qp = qp; in ipath_mcast_qp_alloc() 60 atomic_inc(&qp->refcount); in ipath_mcast_qp_alloc() 68 struct ipath_qp *qp = mqp->qp; in ipath_mcast_qp_free() local 71 if (atomic_dec_and_test(&qp->refcount)) in ipath_mcast_qp_free() 72 wake_up(&qp->wait); in ipath_mcast_qp_free() 191 if (p->qp == mqp->qp) { in ipath_mcast_add() 236 struct ipath_qp *qp = to_iqp(ibqp); in ipath_multicast_attach() local 251 mqp = ipath_mcast_qp_alloc(qp); in ipath_multicast_attach() 285 struct ipath_qp *qp = to_iqp(ibqp); in ipath_multicast_detach() local [all …]
|
D | ipath_verbs.h | 158 struct ipath_qp *qp; member 480 static inline struct ipath_swqe *get_swqe_ptr(struct ipath_qp *qp, in get_swqe_ptr() argument 483 return (struct ipath_swqe *)((char *)qp->s_wq + in get_swqe_ptr() 485 qp->s_max_sge * in get_swqe_ptr() 646 struct ipath_qp *qp; member 693 static inline void ipath_schedule_send(struct ipath_qp *qp) in ipath_schedule_send() argument 695 if (qp->s_flags & IPATH_S_ANY_WAIT) in ipath_schedule_send() 696 qp->s_flags &= ~IPATH_S_ANY_WAIT; in ipath_schedule_send() 697 if (!(qp->s_flags & IPATH_S_BUSY)) in ipath_schedule_send() 698 tasklet_hi_schedule(&qp->s_task); in ipath_schedule_send() [all …]
|
/drivers/infiniband/hw/mthca/ |
D | mthca_qp.c | 195 static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp) in is_sqp() argument 197 return qp->qpn >= dev->qp_table.sqp_start && in is_sqp() 198 qp->qpn <= dev->qp_table.sqp_start + 3; in is_sqp() 201 static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp) in is_qp0() argument 203 return qp->qpn >= dev->qp_table.sqp_start && in is_qp0() 204 qp->qpn <= dev->qp_table.sqp_start + 1; in is_qp0() 207 static void *get_recv_wqe(struct mthca_qp *qp, int n) in get_recv_wqe() argument 209 if (qp->is_direct) in get_recv_wqe() 210 return qp->queue.direct.buf + (n << qp->rq.wqe_shift); in get_recv_wqe() 212 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + in get_recv_wqe() [all …]
|
D | mthca_provider.c | 542 struct mthca_qp *qp; in mthca_create_qp() local 555 qp = kmalloc(sizeof *qp, GFP_KERNEL); in mthca_create_qp() 556 if (!qp) in mthca_create_qp() 563 kfree(qp); in mthca_create_qp() 571 kfree(qp); in mthca_create_qp() 583 kfree(qp); in mthca_create_qp() 587 qp->mr.ibmr.lkey = ucmd.lkey; in mthca_create_qp() 588 qp->sq.db_index = ucmd.sq_db_index; in mthca_create_qp() 589 qp->rq.db_index = ucmd.rq_db_index; in mthca_create_qp() 596 &init_attr->cap, qp); in mthca_create_qp() [all …]
|
/drivers/infiniband/hw/mlx4/ |
D | qp.c | 61 struct mlx4_ib_qp qp; member 89 return container_of(mqp, struct mlx4_ib_sqp, qp); in to_msqp() 92 static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_sqp() argument 94 return qp->mqp.qpn >= dev->dev->caps.sqp_start && in is_sqp() 95 qp->mqp.qpn <= dev->dev->caps.sqp_start + 3; in is_sqp() 98 static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_qp0() argument 100 return qp->mqp.qpn >= dev->dev->caps.sqp_start && in is_qp0() 101 qp->mqp.qpn <= dev->dev->caps.sqp_start + 1; in is_qp0() 104 static void *get_wqe(struct mlx4_ib_qp *qp, int offset) in get_wqe() argument 106 return mlx4_buf_offset(&qp->buf, offset); in get_wqe() [all …]
|
/drivers/infiniband/hw/amso1100/ |
D | c2_qp.c | 119 void c2_set_qp_state(struct c2_qp *qp, int c2_state) in c2_set_qp_state() argument 125 qp, in c2_set_qp_state() 126 to_ib_state_str(qp->state), in c2_set_qp_state() 128 qp->state = new_state; in c2_set_qp_state() 133 int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp, in c2_qp_modify() argument 145 qp, in c2_qp_modify() 146 to_ib_state_str(qp->state), in c2_qp_modify() 156 wr.qp_handle = qp->adapter_handle; in c2_qp_modify() 172 spin_lock_irqsave(&qp->lock, flags); in c2_qp_modify() 173 if (qp->cm_id && qp->state == IB_QPS_RTS) { in c2_qp_modify() [all …]
|
D | c2_ae.c | 184 struct c2_qp *qp = (struct c2_qp *)resource_user_context; in c2_ae_event() local 185 struct iw_cm_id *cm_id = qp->cm_id; in c2_ae_event() 190 qp); in c2_ae_event() 203 c2_set_qp_state(qp, be32_to_cpu(wr->ae.ae_generic.qp_state)); in c2_ae_event() 218 spin_lock_irqsave(&qp->lock, flags); in c2_ae_event() 219 if (qp->cm_id) { in c2_ae_event() 220 qp->cm_id->rem_ref(qp->cm_id); in c2_ae_event() 221 qp->cm_id = NULL; in c2_ae_event() 223 spin_unlock_irqrestore(&qp->lock, flags); in c2_ae_event() 233 ib_event.element.qp = &qp->ibqp; in c2_ae_event() [all …]
|
D | c2_cm.c | 43 struct c2_qp *qp; in c2_llp_connect() local 51 qp = to_c2qp(ibqp); in c2_llp_connect() 54 cm_id->provider_data = qp; in c2_llp_connect() 56 qp->cm_id = cm_id; in c2_llp_connect() 68 err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird); in c2_llp_connect() 90 wr->qp_handle = qp->adapter_handle; in c2_llp_connect() 123 qp->cm_id = NULL; in c2_llp_connect() 284 struct c2_qp *qp; in c2_llp_accept() local 294 qp = to_c2qp(ibqp); in c2_llp_accept() 297 err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird); in c2_llp_accept() [all …]
|
D | c2_provider.c | 211 struct c2_qp *qp; in c2_add_ref() local 213 qp = to_c2qp(ibqp); in c2_add_ref() 214 atomic_inc(&qp->refcount); in c2_add_ref() 219 struct c2_qp *qp; in c2_rem_ref() local 221 qp = to_c2qp(ibqp); in c2_rem_ref() 222 if (atomic_dec_and_test(&qp->refcount)) in c2_rem_ref() 223 wake_up(&qp->wait); in c2_rem_ref() 229 struct c2_qp *qp; in c2_get_qp() local 231 qp = c2_find_qpn(c2dev, qpn); in c2_get_qp() 233 __func__, qp, qpn, device, in c2_get_qp() [all …]
|
/drivers/net/mlx4/ |
D | qp.c | 47 struct mlx4_qp *qp; in mlx4_qp_event() local 51 qp = __mlx4_qp_lookup(dev, qpn); in mlx4_qp_event() 52 if (qp) in mlx4_qp_event() 53 atomic_inc(&qp->refcount); in mlx4_qp_event() 57 if (!qp) { in mlx4_qp_event() 62 qp->event(qp, event_type); in mlx4_qp_event() 64 if (atomic_dec_and_test(&qp->refcount)) in mlx4_qp_event() 65 complete(&qp->free); in mlx4_qp_event() 71 int sqd_event, struct mlx4_qp *qp) in mlx4_qp_modify() argument 121 return mlx4_cmd(dev, 0, qp->qpn, 2, in mlx4_qp_modify() [all …]
|
D | mcg.c | 50 __be32 qp[MLX4_QP_PER_MGM]; member 150 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], in mlx4_multicast_attach() argument 200 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) { in mlx4_multicast_attach() 201 mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn); in mlx4_multicast_attach() 207 mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) | in mlx4_multicast_attach() 210 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); in mlx4_multicast_attach() 247 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]) in mlx4_multicast_detach() argument 277 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) in mlx4_multicast_detach() 281 mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn); in mlx4_multicast_detach() 288 mgm->qp[loc] = mgm->qp[i - 1]; in mlx4_multicast_detach() [all …]
|
/drivers/infiniband/hw/ehca/ |
D | ehca_uverbs.c | 196 static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp, in ehca_mmap_qp() argument 203 ehca_dbg(qp->ib_qp.device, "qp_num=%x fw", qp->ib_qp.qp_num); in ehca_mmap_qp() 204 ret = ehca_mmap_fw(vma, &qp->galpas, &qp->mm_count_galpa); in ehca_mmap_qp() 206 ehca_err(qp->ib_qp.device, in ehca_mmap_qp() 208 ret, qp->ib_qp.qp_num); in ehca_mmap_qp() 214 ehca_dbg(qp->ib_qp.device, "qp_num=%x rq", qp->ib_qp.qp_num); in ehca_mmap_qp() 215 ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, in ehca_mmap_qp() 216 &qp->mm_count_rqueue); in ehca_mmap_qp() 218 ehca_err(qp->ib_qp.device, in ehca_mmap_qp() 220 ret, qp->ib_qp.qp_num); in ehca_mmap_qp() [all …]
|
D | ehca_reqs.c | 155 static inline int ehca_write_swqe(struct ehca_qp *qp, in ehca_write_swqe() argument 165 struct ehca_qmap_entry *qmap_entry = &qp->sq_map.map[sq_map_idx]; in ehca_write_swqe() 168 (send_wr->num_sge > qp->ipz_squeue.act_nr_of_sg))) { in ehca_write_swqe() 171 send_wr->num_sge, qp->ipz_squeue.act_nr_of_sg); in ehca_write_swqe() 206 qp->init_attr.sq_sig_type == IB_SIGNAL_ALL_WR) in ehca_write_swqe() 221 switch (qp->qp_type) { in ehca_write_swqe() 228 remote_qkey = qp->qkey; in ehca_write_swqe() 233 ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp); in ehca_write_swqe() 237 ehca_gen_err("dest QP# is 0. qp=%x", qp->real_qp_num); in ehca_write_swqe() 255 if (qp->qp_type == IB_QPT_SMI || in ehca_write_swqe() [all …]
|
D | ehca_irq.c | 95 struct ehca_qp *qp = (struct ehca_qp *)data; in print_error_data() local 103 qp->ib_qp.qp_num, resource); in print_error_data() 176 static void dispatch_qp_event(struct ehca_shca *shca, struct ehca_qp *qp, in dispatch_qp_event() argument 182 if (event_type == IB_EVENT_PATH_MIG && !qp->mig_armed) in dispatch_qp_event() 188 if (qp->ext_type == EQPT_SRQ) { in dispatch_qp_event() 189 if (!qp->ib_srq.event_handler) in dispatch_qp_event() 192 event.element.srq = &qp->ib_srq; in dispatch_qp_event() 193 qp->ib_srq.event_handler(&event, qp->ib_srq.srq_context); in dispatch_qp_event() 195 if (!qp->ib_qp.event_handler) in dispatch_qp_event() 198 event.element.qp = &qp->ib_qp; in dispatch_qp_event() [all …]
|
/drivers/infiniband/core/ |
D | iwcm.c | 220 static int iwcm_modify_qp_err(struct ib_qp *qp) in iwcm_modify_qp_err() argument 224 if (!qp) in iwcm_modify_qp_err() 228 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE); in iwcm_modify_qp_err() 235 static int iwcm_modify_qp_sqd(struct ib_qp *qp) in iwcm_modify_qp_sqd() argument 239 BUG_ON(qp == NULL); in iwcm_modify_qp_sqd() 241 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE); in iwcm_modify_qp_sqd() 261 struct ib_qp *qp = NULL; in iw_cm_disconnect() local 274 if (cm_id_priv->qp) in iw_cm_disconnect() 275 qp = cm_id_priv->qp; in iw_cm_disconnect() 300 if (qp) { in iw_cm_disconnect() [all …]
|
D | verbs.c | 286 struct ib_qp *qp; in ib_create_qp() local 288 qp = pd->device->create_qp(pd, qp_init_attr, NULL); in ib_create_qp() 290 if (!IS_ERR(qp)) { in ib_create_qp() 291 qp->device = pd->device; in ib_create_qp() 292 qp->pd = pd; in ib_create_qp() 293 qp->send_cq = qp_init_attr->send_cq; in ib_create_qp() 294 qp->recv_cq = qp_init_attr->recv_cq; in ib_create_qp() 295 qp->srq = qp_init_attr->srq; in ib_create_qp() 296 qp->uobject = NULL; in ib_create_qp() 297 qp->event_handler = qp_init_attr->event_handler; in ib_create_qp() [all …]
|
/drivers/net/ehea/ |
D | ehea_qmr.c | 382 int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue, in ehea_qp_alloc_register() argument 403 qp->fw_handle, rpage, 1); in ehea_qp_alloc_register() 427 struct ehea_qp *qp; in ehea_create_qp() local 432 qp = kzalloc(sizeof(*qp), GFP_KERNEL); in ehea_create_qp() 433 if (!qp) { in ehea_create_qp() 438 qp->adapter = adapter; in ehea_create_qp() 441 &qp->fw_handle, &qp->epas); in ehea_create_qp() 452 ret = ehea_qp_alloc_register(qp, &qp->hw_squeue, init_attr->nr_sq_pages, in ehea_create_qp() 461 ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue1, in ehea_create_qp() 472 ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue2, in ehea_create_qp() [all …]
|
/drivers/infiniband/ulp/ipoib/ |
D | ipoib_verbs.c | 58 ret = ib_modify_qp(priv->qp, qp_attr, IB_QP_QKEY); in ipoib_mcast_attach() 66 ret = ib_attach_mcast(priv->qp, mgid, mlid); in ipoib_mcast_attach() 94 ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask); in ipoib_init_qp() 103 ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask); in ipoib_init_qp() 113 ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask); in ipoib_init_qp() 123 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) in ipoib_init_qp() 196 priv->qp = ib_create_qp(priv->pd, &init_attr); in ipoib_transport_dev_init() 197 if (IS_ERR(priv->qp)) { in ipoib_transport_dev_init() 202 priv->dev->dev_addr[1] = (priv->qp->qp_num >> 16) & 0xff; in ipoib_transport_dev_init() 203 priv->dev->dev_addr[2] = (priv->qp->qp_num >> 8) & 0xff; in ipoib_transport_dev_init() [all …]
|
D | ipoib_cm.c | 127 ret = ib_post_recv(rx->qp, wr, &bad_wr); in ipoib_cm_post_receive_nonsrq() 224 if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr)) in ipoib_cm_start_rx_drain() 271 struct ib_cm_id *cm_id, struct ib_qp *qp, in ipoib_cm_modify_rx_qp() argument 284 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); in ipoib_cm_modify_rx_qp() 296 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); in ipoib_cm_modify_rx_qp() 316 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); in ipoib_cm_modify_rx_qp() 419 struct ib_qp *qp, struct ib_cm_req_event_param *req, in ipoib_cm_send_rep() argument 426 data.qpn = cpu_to_be32(priv->qp->qp_num); in ipoib_cm_send_rep() 434 rep.qp_num = qp->qp_num; in ipoib_cm_send_rep() 458 p->qp = ipoib_cm_create_rx_qp(dev, p); in ipoib_cm_req_handler() [all …]
|