/drivers/infiniband/hw/qib/ |
D | qib_rc.c | 62 static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp, in qib_make_rc_ack() argument 72 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) in qib_make_rc_ack() 78 switch (qp->s_ack_state) { in qib_make_rc_ack() 81 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; in qib_make_rc_ack() 93 if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC) in qib_make_rc_ack() 94 qp->s_tail_ack_queue = 0; in qib_make_rc_ack() 99 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) { in qib_make_rc_ack() 100 if (qp->s_flags & RVT_S_ACK_PENDING) in qib_make_rc_ack() 105 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; in qib_make_rc_ack() 115 qp->s_tail_ack_queue = qp->r_head_ack_queue; in qib_make_rc_ack() [all …]
|
D | qib_uc.c | 48 int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags) in qib_make_uc_req() argument 50 struct qib_qp_priv *priv = qp->priv; in qib_make_uc_req() 56 u32 pmtu = qp->pmtu; in qib_make_uc_req() 59 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { in qib_make_uc_req() 60 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) in qib_make_uc_req() 63 if (qp->s_last == READ_ONCE(qp->s_head)) in qib_make_uc_req() 67 qp->s_flags |= RVT_S_WAIT_DMA; in qib_make_uc_req() 70 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in qib_make_uc_req() 71 rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in qib_make_uc_req() 76 if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) in qib_make_uc_req() [all …]
|
D | qib_ruc.c | 44 void qib_migrate_qp(struct rvt_qp *qp) in qib_migrate_qp() argument 48 qp->s_mig_state = IB_MIG_MIGRATED; in qib_migrate_qp() 49 qp->remote_ah_attr = qp->alt_ah_attr; in qib_migrate_qp() 50 qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr); in qib_migrate_qp() 51 qp->s_pkey_index = qp->s_alt_pkey_index; in qib_migrate_qp() 53 ev.device = qp->ibqp.device; in qib_migrate_qp() 54 ev.element.qp = &qp->ibqp; in qib_migrate_qp() 56 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); in qib_migrate_qp() 83 int has_grh, struct rvt_qp *qp, u32 bth0) in qib_ruc_check_hdr() argument 88 if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) { in qib_ruc_check_hdr() [all …]
|
D | qib_ud.c | 57 struct rvt_qp *qp; in qib_ud_loopback() local 67 qp = rvt_lookup_qpn(rdi, &ibp->rvp, rvt_get_swqe_remote_qpn(swqe)); in qib_ud_loopback() 68 if (!qp) { in qib_ud_loopback() 75 dqptype = qp->ibqp.qp_type == IB_QPT_GSI ? in qib_ud_loopback() 76 IB_QPT_UD : qp->ibqp.qp_type; in qib_ud_loopback() 79 !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { in qib_ud_loopback() 87 if (qp->ibqp.qp_num > 1) { in qib_ud_loopback() 93 pkey2 = qib_get_pkey(ibp, qp->s_pkey_index); in qib_ud_loopback() 99 sqp->ibqp.qp_num, qp->ibqp.qp_num, in qib_ud_loopback() 111 if (qp->ibqp.qp_num) { in qib_ud_loopback() [all …]
|
/drivers/infiniband/sw/rxe/ |
D | rxe_qp.c | 98 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n) in alloc_rd_atomic_resources() argument 100 qp->resp.res_head = 0; in alloc_rd_atomic_resources() 101 qp->resp.res_tail = 0; in alloc_rd_atomic_resources() 102 qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL); in alloc_rd_atomic_resources() 104 if (!qp->resp.resources) in alloc_rd_atomic_resources() 110 static void free_rd_atomic_resources(struct rxe_qp *qp) in free_rd_atomic_resources() argument 112 if (qp->resp.resources) { in free_rd_atomic_resources() 115 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { in free_rd_atomic_resources() 116 struct resp_res *res = &qp->resp.resources[i]; in free_rd_atomic_resources() 118 free_rd_atomic_resource(qp, res); in free_rd_atomic_resources() [all …]
|
D | rxe_comp.c | 114 struct rxe_qp *qp = from_timer(qp, t, retrans_timer); in retransmit_timer() local 116 if (qp->valid) { in retransmit_timer() 117 qp->comp.timeout = 1; in retransmit_timer() 118 rxe_run_task(&qp->comp.task, 1); in retransmit_timer() 122 void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) in rxe_comp_queue_pkt() argument 126 skb_queue_tail(&qp->resp_pkts, skb); in rxe_comp_queue_pkt() 128 must_sched = skb_queue_len(&qp->resp_pkts) > 1; in rxe_comp_queue_pkt() 132 rxe_run_task(&qp->comp.task, must_sched); in rxe_comp_queue_pkt() 135 static inline enum comp_state get_wqe(struct rxe_qp *qp, in get_wqe() argument 144 wqe = queue_head(qp->sq.queue); in get_wqe() [all …]
|
D | rxe_resp.c | 80 void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) in rxe_resp_queue_pkt() argument 85 skb_queue_tail(&qp->req_pkts, skb); in rxe_resp_queue_pkt() 88 (skb_queue_len(&qp->req_pkts) > 1); in rxe_resp_queue_pkt() 90 rxe_run_task(&qp->resp.task, must_sched); in rxe_resp_queue_pkt() 93 static inline enum resp_states get_req(struct rxe_qp *qp, in get_req() argument 98 if (qp->resp.state == QP_STATE_ERROR) { in get_req() 99 while ((skb = skb_dequeue(&qp->req_pkts))) { in get_req() 100 rxe_drop_ref(qp); in get_req() 108 skb = skb_peek(&qp->req_pkts); in get_req() 114 return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN; in get_req() [all …]
|
D | rxe_req.c | 14 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, 17 static inline void retry_first_write_send(struct rxe_qp *qp, in retry_first_write_send() argument 24 int to_send = (wqe->dma.resid > qp->mtu) ? in retry_first_write_send() 25 qp->mtu : wqe->dma.resid; in retry_first_write_send() 27 qp->req.opcode = next_opcode(qp, wqe, in retry_first_write_send() 37 wqe->iova += qp->mtu; in retry_first_write_send() 41 static void req_retry(struct rxe_qp *qp) in req_retry() argument 49 qp->req.wqe_index = consumer_index(qp->sq.queue); in req_retry() 50 qp->req.psn = qp->comp.psn; in req_retry() 51 qp->req.opcode = -1; in req_retry() [all …]
|
/drivers/infiniband/hw/hfi1/ |
D | rc.c | 58 struct rvt_ack_entry *find_prev_entry(struct rvt_qp *qp, u32 psn, u8 *prev, in find_prev_entry() argument 60 __must_hold(&qp->s_lock) in find_prev_entry() 66 for (i = qp->r_head_ack_queue; ; i = p) { in find_prev_entry() 67 if (i == qp->s_tail_ack_queue) in find_prev_entry() 72 p = rvt_size_atomic(ib_to_rvt(qp->ibqp.device)); in find_prev_entry() 73 if (p == qp->r_head_ack_queue) { in find_prev_entry() 77 e = &qp->s_ack_queue[p]; in find_prev_entry() 83 if (p == qp->s_tail_ack_queue && in find_prev_entry() 109 static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp, in make_rc_ack() argument 117 u32 bth1 = qp->remote_qpn | (HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT); in make_rc_ack() [all …]
|
D | qp.c | 66 static void flush_tx_list(struct rvt_qp *qp); 75 static void qp_pio_drain(struct rvt_qp *qp); 164 static void flush_tx_list(struct rvt_qp *qp) in flush_tx_list() argument 166 struct hfi1_qp_priv *priv = qp->priv; in flush_tx_list() 172 static void flush_iowait(struct rvt_qp *qp) in flush_iowait() argument 174 struct hfi1_qp_priv *priv = qp->priv; in flush_iowait() 184 rvt_put_qp(qp); in flush_iowait() 202 int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, in hfi1_check_modify_qp() argument 205 struct ib_qp *ibqp = &qp->ibqp; in hfi1_check_modify_qp() 215 if (!qp_to_sdma_engine(qp, sc) && in hfi1_check_modify_qp() [all …]
|
D | uc.c | 63 int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) in hfi1_make_uc_req() argument 65 struct hfi1_qp_priv *priv = qp->priv; in hfi1_make_uc_req() 71 u32 pmtu = qp->pmtu; in hfi1_make_uc_req() 74 ps->s_txreq = get_txreq(ps->dev, qp); in hfi1_make_uc_req() 78 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { in hfi1_make_uc_req() 79 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) in hfi1_make_uc_req() 82 if (qp->s_last == READ_ONCE(qp->s_head)) in hfi1_make_uc_req() 86 qp->s_flags |= RVT_S_WAIT_DMA; in hfi1_make_uc_req() 89 clear_ahg(qp); in hfi1_make_uc_req() 90 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in hfi1_make_uc_req() [all …]
|
D | tid_rdma.c | 114 static void hfi1_init_trdma_req(struct rvt_qp *qp, 116 static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx); 118 static void hfi1_add_tid_reap_timer(struct rvt_qp *qp); 119 static void hfi1_mod_tid_reap_timer(struct rvt_qp *qp); 120 static void hfi1_mod_tid_retry_timer(struct rvt_qp *qp); 121 static int hfi1_stop_tid_retry_timer(struct rvt_qp *qp); 123 static int make_tid_rdma_ack(struct rvt_qp *qp, 126 static void hfi1_do_tid_send(struct rvt_qp *qp); 130 struct rvt_qp *qp, u32 psn, int diff, bool fecn); 143 static void tid_rdma_schedule_ack(struct rvt_qp *qp) in tid_rdma_schedule_ack() argument [all …]
|
D | trace_tid.h | 195 TP_PROTO(struct rvt_qp *qp), 196 TP_ARGS(qp), 198 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) 205 struct hfi1_qp_priv *priv = qp->priv; 207 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); 208 __entry->qpn = qp->ibqp.qp_num; 225 TP_PROTO(struct rvt_qp *qp), 226 TP_ARGS(qp) 231 TP_PROTO(struct rvt_qp *qp), 232 TP_ARGS(qp) [all …]
|
D | ruc.c | 73 struct rvt_qp *qp = packet->qp; in hfi1_ruc_check_hdr() local 74 u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)]; in hfi1_ruc_check_hdr() 81 if (qp->s_mig_state == IB_MIG_ARMED && migrated) { in hfi1_ruc_check_hdr() 83 if ((rdma_ah_get_ah_flags(&qp->alt_ah_attr) & in hfi1_ruc_check_hdr() 90 if (!(rdma_ah_get_ah_flags(&qp->alt_ah_attr) & in hfi1_ruc_check_hdr() 93 grh = rdma_ah_read_grh(&qp->alt_ah_attr); in hfi1_ruc_check_hdr() 106 hfi1_bad_pkey(ibp, pkey, sl, 0, qp->ibqp.qp_num, in hfi1_ruc_check_hdr() 111 if (slid != rdma_ah_get_dlid(&qp->alt_ah_attr) || in hfi1_ruc_check_hdr() 113 rdma_ah_get_port_num(&qp->alt_ah_attr)) in hfi1_ruc_check_hdr() 115 spin_lock_irqsave(&qp->s_lock, flags); in hfi1_ruc_check_hdr() [all …]
|
D | ud.c | 78 struct rvt_qp *qp; in ud_loopback() local 89 qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp, in ud_loopback() 91 if (!qp) { in ud_loopback() 99 dqptype = qp->ibqp.qp_type == IB_QPT_GSI ? in ud_loopback() 100 IB_QPT_UD : qp->ibqp.qp_type; in ud_loopback() 103 !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { in ud_loopback() 111 if (qp->ibqp.qp_num > 1) { in ud_loopback() 120 qp->s_pkey_index, in ud_loopback() 124 sqp->ibqp.qp_num, qp->ibqp.qp_num, in ud_loopback() 135 if (qp->ibqp.qp_num) { in ud_loopback() [all …]
|
/drivers/infiniband/sw/rdmavt/ |
D | qp.c | 64 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, 464 static void rvt_free_qp_cb(struct rvt_qp *qp, u64 v) in rvt_free_qp_cb() argument 467 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); in rvt_free_qp_cb() 470 rvt_reset_qp(rdi, qp, qp->ibqp.qp_type); in rvt_free_qp_cb() 619 static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends) in rvt_clear_mr_refs() argument 622 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); in rvt_clear_mr_refs() 624 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) in rvt_clear_mr_refs() 625 rvt_put_ss(&qp->s_rdma_read_sge); in rvt_clear_mr_refs() 627 rvt_put_ss(&qp->r_sge); in rvt_clear_mr_refs() 630 while (qp->s_last != qp->s_head) { in rvt_clear_mr_refs() [all …]
|
/drivers/ntb/ |
D | ntb_transport.c | 120 struct ntb_transport_qp *qp; member 148 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, 160 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, 272 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) argument 279 static int ntb_async_tx_submit(struct ntb_transport_qp *qp, 468 struct ntb_transport_qp *qp; in debugfs_read() local 472 qp = filp->private_data; in debugfs_read() 474 if (!qp || !qp->link_is_up) in debugfs_read() 487 "rx_bytes - \t%llu\n", qp->rx_bytes); in debugfs_read() 489 "rx_pkts - \t%llu\n", qp->rx_pkts); in debugfs_read() [all …]
|
/drivers/infiniband/sw/siw/ |
D | siw_qp.c | 95 struct siw_qp *qp; in siw_qp_llp_data_ready() local 102 qp = sk_to_qp(sk); in siw_qp_llp_data_ready() 104 if (likely(!qp->rx_stream.rx_suspend && in siw_qp_llp_data_ready() 105 down_read_trylock(&qp->state_lock))) { in siw_qp_llp_data_ready() 106 read_descriptor_t rd_desc = { .arg.data = qp, .count = 1 }; in siw_qp_llp_data_ready() 108 if (likely(qp->attrs.state == SIW_QP_STATE_RTS)) in siw_qp_llp_data_ready() 117 up_read(&qp->state_lock); in siw_qp_llp_data_ready() 119 siw_dbg_qp(qp, "unable to process RX, suspend: %d\n", in siw_qp_llp_data_ready() 120 qp->rx_stream.rx_suspend); in siw_qp_llp_data_ready() 126 void siw_qp_llp_close(struct siw_qp *qp) in siw_qp_llp_close() argument [all …]
|
/drivers/infiniband/hw/mthca/ |
D | mthca_qp.c | 196 static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp) in is_sqp() argument 198 return qp->qpn >= dev->qp_table.sqp_start && in is_sqp() 199 qp->qpn <= dev->qp_table.sqp_start + 3; in is_sqp() 202 static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp) in is_qp0() argument 204 return qp->qpn >= dev->qp_table.sqp_start && in is_qp0() 205 qp->qpn <= dev->qp_table.sqp_start + 1; in is_qp0() 208 static void *get_recv_wqe(struct mthca_qp *qp, int n) in get_recv_wqe() argument 210 if (qp->is_direct) in get_recv_wqe() 211 return qp->queue.direct.buf + (n << qp->rq.wqe_shift); in get_recv_wqe() 213 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + in get_recv_wqe() [all …]
|
/drivers/net/ethernet/qlogic/qed/ |
D | qed_roce.c | 96 static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid, in qed_rdma_copy_gids() argument 101 if (qp->roce_mode == ROCE_V2_IPV4) { in qed_rdma_copy_gids() 107 src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr); in qed_rdma_copy_gids() 108 dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr); in qed_rdma_copy_gids() 111 for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) { in qed_rdma_copy_gids() 112 src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]); in qed_rdma_copy_gids() 113 dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]); in qed_rdma_copy_gids() 206 static u8 qed_roce_get_qp_tc(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) in qed_roce_get_qp_tc() argument 210 if (qp->vlan_id) { in qed_roce_get_qp_tc() 211 pri = (qp->vlan_id & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; in qed_roce_get_qp_tc() [all …]
|
/drivers/infiniband/hw/mlx4/ |
D | qp.c | 105 static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_tunnel_qp() argument 110 return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn && in is_tunnel_qp() 111 qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn + in is_tunnel_qp() 115 static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_sqp() argument 122 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_sqp() 123 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3); in is_sqp() 129 if (qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp0_proxy || in is_sqp() 130 qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp1_proxy) { in is_sqp() 139 return !!(qp->flags & MLX4_IB_ROCE_V2_GSI_QP); in is_sqp() 143 static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_qp0() argument [all …]
|
/drivers/infiniband/hw/vmw_pvrdma/ |
D | pvrdma_qp.c | 56 struct pvrdma_qp *qp); 58 static inline void get_cqs(struct pvrdma_qp *qp, struct pvrdma_cq **send_cq, in get_cqs() argument 61 *send_cq = to_vcq(qp->ibqp.send_cq); in get_cqs() 62 *recv_cq = to_vcq(qp->ibqp.recv_cq); in get_cqs() 101 static void pvrdma_reset_qp(struct pvrdma_qp *qp) in pvrdma_reset_qp() argument 107 get_cqs(qp, &scq, &rcq); in pvrdma_reset_qp() 110 _pvrdma_flush_cqe(qp, scq); in pvrdma_reset_qp() 112 _pvrdma_flush_cqe(qp, rcq); in pvrdma_reset_qp() 120 if (qp->rq.ring) { in pvrdma_reset_qp() 121 atomic_set(&qp->rq.ring->cons_head, 0); in pvrdma_reset_qp() [all …]
|
/drivers/infiniband/hw/bnxt_re/ |
D | qplib_fp.c | 57 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp); 59 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp) in bnxt_qplib_cancel_phantom_processing() argument 61 qp->sq.condition = false; in bnxt_qplib_cancel_phantom_processing() 62 qp->sq.send_phantom = false; in bnxt_qplib_cancel_phantom_processing() 63 qp->sq.single = false; in bnxt_qplib_cancel_phantom_processing() 67 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp) in __bnxt_qplib_add_flush_qp() argument 71 scq = qp->scq; in __bnxt_qplib_add_flush_qp() 72 rcq = qp->rcq; in __bnxt_qplib_add_flush_qp() 74 if (!qp->sq.flushed) { in __bnxt_qplib_add_flush_qp() 76 "FP: Adding to SQ Flush list = %p\n", qp); in __bnxt_qplib_add_flush_qp() [all …]
|
/drivers/infiniband/hw/i40iw/ |
D | i40iw_uk.c | 47 static enum i40iw_status_code i40iw_nop_1(struct i40iw_qp_uk *qp) in i40iw_nop_1() argument 54 if (!qp->sq_ring.head) in i40iw_nop_1() 57 wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); in i40iw_nop_1() 58 wqe = qp->sq_base[wqe_idx].elem; in i40iw_nop_1() 60 qp->sq_wrtrk_array[wqe_idx].wqe_size = I40IW_QP_WQE_MIN_SIZE; in i40iw_nop_1() 62 peek_head = (qp->sq_ring.head + 1) % qp->sq_ring.size; in i40iw_nop_1() 63 wqe_0 = qp->sq_base[peek_head].elem; in i40iw_nop_1() 65 wqe_0[3] = LS_64(!qp->swqe_polarity, I40IWQPSQ_VALID); in i40iw_nop_1() 67 wqe_0[3] = LS_64(qp->swqe_polarity, I40IWQPSQ_VALID); in i40iw_nop_1() 75 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID) | nop_signature++; in i40iw_nop_1() [all …]
|
/drivers/infiniband/hw/qedr/ |
D | verbs.c | 1277 struct qedr_qp *qp) in qedr_copy_rq_uresp() argument 1289 uresp->rq_icid = qp->icid; in qedr_copy_rq_uresp() 1290 if (qp->urq.db_mmap_entry) in qedr_copy_rq_uresp() 1292 rdma_user_mmap_get_offset(qp->urq.db_mmap_entry); in qedr_copy_rq_uresp() 1297 struct qedr_qp *qp) in qedr_copy_sq_uresp() argument 1303 uresp->sq_icid = qp->icid; in qedr_copy_sq_uresp() 1305 uresp->sq_icid = qp->icid + 1; in qedr_copy_sq_uresp() 1307 if (qp->usq.db_mmap_entry) in qedr_copy_sq_uresp() 1309 rdma_user_mmap_get_offset(qp->usq.db_mmap_entry); in qedr_copy_sq_uresp() 1313 struct qedr_qp *qp, struct ib_udata *udata, in qedr_copy_qp_uresp() argument [all …]
|