Home
last modified time | relevance | path

Searched full:qp (Results 1 – 25 of 990) sorted by relevance

12345678910>>...40

/kernel/linux/linux-5.10/drivers/infiniband/sw/rxe/
Drxe_qp.c82 pr_warn("SMI QP exists for port %d\n", port_num); in rxe_qp_chk_init()
87 pr_warn("GSI QP exists for port %d\n", port_num); in rxe_qp_chk_init()
98 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n) in alloc_rd_atomic_resources() argument
100 qp->resp.res_head = 0; in alloc_rd_atomic_resources()
101 qp->resp.res_tail = 0; in alloc_rd_atomic_resources()
102 qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL); in alloc_rd_atomic_resources()
104 if (!qp->resp.resources) in alloc_rd_atomic_resources()
110 static void free_rd_atomic_resources(struct rxe_qp *qp) in free_rd_atomic_resources() argument
112 if (qp->resp.resources) { in free_rd_atomic_resources()
115 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { in free_rd_atomic_resources()
[all …]
Drxe_req.c14 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
17 static inline void retry_first_write_send(struct rxe_qp *qp, in retry_first_write_send() argument
24 int to_send = (wqe->dma.resid > qp->mtu) ? in retry_first_write_send()
25 qp->mtu : wqe->dma.resid; in retry_first_write_send()
27 qp->req.opcode = next_opcode(qp, wqe, in retry_first_write_send()
37 wqe->iova += qp->mtu; in retry_first_write_send()
41 static void req_retry(struct rxe_qp *qp) in req_retry() argument
49 qp->req.wqe_index = consumer_index(qp->sq.queue); in req_retry()
50 qp->req.psn = qp->comp.psn; in req_retry()
51 qp->req.opcode = -1; in req_retry()
[all …]
Drxe_resp.c80 void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) in rxe_resp_queue_pkt() argument
85 skb_queue_tail(&qp->req_pkts, skb); in rxe_resp_queue_pkt()
88 (skb_queue_len(&qp->req_pkts) > 1); in rxe_resp_queue_pkt()
90 rxe_run_task(&qp->resp.task, must_sched); in rxe_resp_queue_pkt()
93 static inline enum resp_states get_req(struct rxe_qp *qp, in get_req() argument
98 if (qp->resp.state == QP_STATE_ERROR) { in get_req()
99 while ((skb = skb_dequeue(&qp->req_pkts))) { in get_req()
100 rxe_drop_ref(qp); in get_req()
108 skb = skb_peek(&qp->req_pkts); in get_req()
114 return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN; in get_req()
[all …]
Drxe_comp.c114 struct rxe_qp *qp = from_timer(qp, t, retrans_timer); in retransmit_timer() local
116 if (qp->valid) { in retransmit_timer()
117 qp->comp.timeout = 1; in retransmit_timer()
118 rxe_run_task(&qp->comp.task, 1); in retransmit_timer()
122 void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) in rxe_comp_queue_pkt() argument
126 skb_queue_tail(&qp->resp_pkts, skb); in rxe_comp_queue_pkt()
128 must_sched = skb_queue_len(&qp->resp_pkts) > 1; in rxe_comp_queue_pkt()
132 rxe_run_task(&qp->comp.task, must_sched); in rxe_comp_queue_pkt()
135 static inline enum comp_state get_wqe(struct rxe_qp *qp, in get_wqe() argument
144 wqe = queue_head(qp->sq.queue); in get_wqe()
[all …]
/kernel/linux/linux-6.6/drivers/infiniband/sw/rxe/
Drxe_qp.c92 rxe_dbg_dev(rxe, "GSI QP exists for port %d\n", port_num); in rxe_qp_chk_init()
103 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n) in alloc_rd_atomic_resources() argument
105 qp->resp.res_head = 0; in alloc_rd_atomic_resources()
106 qp->resp.res_tail = 0; in alloc_rd_atomic_resources()
107 qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL); in alloc_rd_atomic_resources()
109 if (!qp->resp.resources) in alloc_rd_atomic_resources()
115 static void free_rd_atomic_resources(struct rxe_qp *qp) in free_rd_atomic_resources() argument
117 if (qp->resp.resources) { in free_rd_atomic_resources()
120 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { in free_rd_atomic_resources()
121 struct resp_res *res = &qp->resp.resources[i]; in free_rd_atomic_resources()
[all …]
Drxe_req.c14 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
17 static inline void retry_first_write_send(struct rxe_qp *qp, in retry_first_write_send() argument
23 int to_send = (wqe->dma.resid > qp->mtu) ? in retry_first_write_send()
24 qp->mtu : wqe->dma.resid; in retry_first_write_send()
26 qp->req.opcode = next_opcode(qp, wqe, in retry_first_write_send()
38 static void req_retry(struct rxe_qp *qp) in req_retry() argument
45 struct rxe_queue *q = qp->sq.queue; in req_retry()
52 qp->req.wqe_index = cons; in req_retry()
53 qp->req.psn = qp->comp.psn; in req_retry()
54 qp->req.opcode = -1; in req_retry()
[all …]
Drxe_resp.c50 void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) in rxe_resp_queue_pkt() argument
55 skb_queue_tail(&qp->req_pkts, skb); in rxe_resp_queue_pkt()
58 (skb_queue_len(&qp->req_pkts) > 1); in rxe_resp_queue_pkt()
61 rxe_sched_task(&qp->resp.task); in rxe_resp_queue_pkt()
63 rxe_run_task(&qp->resp.task); in rxe_resp_queue_pkt()
66 static inline enum resp_states get_req(struct rxe_qp *qp, in get_req() argument
71 skb = skb_peek(&qp->req_pkts); in get_req()
77 return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN; in get_req()
80 static enum resp_states check_psn(struct rxe_qp *qp, in check_psn() argument
83 int diff = psn_compare(pkt->psn, qp->resp.psn); in check_psn()
[all …]
Drxe_comp.c117 struct rxe_qp *qp = from_timer(qp, t, retrans_timer); in retransmit_timer() local
120 rxe_dbg_qp(qp, "retransmit timer fired\n"); in retransmit_timer()
122 spin_lock_irqsave(&qp->state_lock, flags); in retransmit_timer()
123 if (qp->valid) { in retransmit_timer()
124 qp->comp.timeout = 1; in retransmit_timer()
125 rxe_sched_task(&qp->comp.task); in retransmit_timer()
127 spin_unlock_irqrestore(&qp->state_lock, flags); in retransmit_timer()
130 void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) in rxe_comp_queue_pkt() argument
134 must_sched = skb_queue_len(&qp->resp_pkts) > 0; in rxe_comp_queue_pkt()
138 skb_queue_tail(&qp->resp_pkts, skb); in rxe_comp_queue_pkt()
[all …]
/kernel/linux/linux-5.10/drivers/infiniband/hw/qib/
Dqib_rc.c53 * @dev: the device for this QP
54 * @qp: a pointer to the QP
59 * Note that we are in the responder's side of the QP context.
60 * Note the QP s_lock must be held.
62 static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp, in qib_make_rc_ack() argument
72 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) in qib_make_rc_ack()
78 switch (qp->s_ack_state) { in qib_make_rc_ack()
81 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; in qib_make_rc_ack()
93 if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC) in qib_make_rc_ack()
94 qp->s_tail_ack_queue = 0; in qib_make_rc_ack()
[all …]
Dqib_uc.c42 * @qp: a pointer to the QP
48 int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags) in qib_make_uc_req() argument
50 struct qib_qp_priv *priv = qp->priv; in qib_make_uc_req()
56 u32 pmtu = qp->pmtu; in qib_make_uc_req()
59 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { in qib_make_uc_req()
60 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) in qib_make_uc_req()
63 if (qp->s_last == READ_ONCE(qp->s_head)) in qib_make_uc_req()
67 qp->s_flags |= RVT_S_WAIT_DMA; in qib_make_uc_req()
70 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in qib_make_uc_req()
71 rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in qib_make_uc_req()
[all …]
/kernel/linux/linux-6.6/drivers/infiniband/hw/qib/
Dqib_rc.c53 * @dev: the device for this QP
54 * @qp: a pointer to the QP
59 * Note that we are in the responder's side of the QP context.
60 * Note the QP s_lock must be held.
62 static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp, in qib_make_rc_ack() argument
72 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) in qib_make_rc_ack()
78 switch (qp->s_ack_state) { in qib_make_rc_ack()
81 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; in qib_make_rc_ack()
93 if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC) in qib_make_rc_ack()
94 qp->s_tail_ack_queue = 0; in qib_make_rc_ack()
[all …]
Dqib_uc.c42 * @qp: a pointer to the QP
49 int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags) in qib_make_uc_req() argument
51 struct qib_qp_priv *priv = qp->priv; in qib_make_uc_req()
57 u32 pmtu = qp->pmtu; in qib_make_uc_req()
60 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { in qib_make_uc_req()
61 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) in qib_make_uc_req()
64 if (qp->s_last == READ_ONCE(qp->s_head)) in qib_make_uc_req()
68 qp->s_flags |= RVT_S_WAIT_DMA; in qib_make_uc_req()
71 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in qib_make_uc_req()
72 rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in qib_make_uc_req()
[all …]
/kernel/linux/linux-6.6/drivers/infiniband/hw/hfi1/
Dqp.c16 #include "qp.h"
22 MODULE_PARM_DESC(qp_table_size, "QP table size");
24 static void flush_tx_list(struct rvt_qp *qp);
33 static void qp_pio_drain(struct rvt_qp *qp);
122 static void flush_tx_list(struct rvt_qp *qp) in flush_tx_list() argument
124 struct hfi1_qp_priv *priv = qp->priv; in flush_tx_list()
130 static void flush_iowait(struct rvt_qp *qp) in flush_iowait() argument
132 struct hfi1_qp_priv *priv = qp->priv; in flush_iowait()
142 rvt_put_qp(qp); in flush_iowait()
160 int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, in hfi1_check_modify_qp() argument
[all …]
Drc.c11 #include "qp.h"
16 struct rvt_ack_entry *find_prev_entry(struct rvt_qp *qp, u32 psn, u8 *prev, in find_prev_entry() argument
18 __must_hold(&qp->s_lock) in find_prev_entry()
24 for (i = qp->r_head_ack_queue; ; i = p) { in find_prev_entry()
25 if (i == qp->s_tail_ack_queue) in find_prev_entry()
30 p = rvt_size_atomic(ib_to_rvt(qp->ibqp.device)); in find_prev_entry()
31 if (p == qp->r_head_ack_queue) { in find_prev_entry()
35 e = &qp->s_ack_queue[p]; in find_prev_entry()
41 if (p == qp->s_tail_ack_queue && in find_prev_entry()
58 * @dev: the device for this QP
[all …]
Duc.c8 #include "qp.h"
15 * @qp: a pointer to the QP
22 int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) in hfi1_make_uc_req() argument
24 struct hfi1_qp_priv *priv = qp->priv; in hfi1_make_uc_req()
30 u32 pmtu = qp->pmtu; in hfi1_make_uc_req()
33 ps->s_txreq = get_txreq(ps->dev, qp); in hfi1_make_uc_req()
37 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { in hfi1_make_uc_req()
38 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) in hfi1_make_uc_req()
41 if (qp->s_last == READ_ONCE(qp->s_head)) in hfi1_make_uc_req()
45 qp->s_flags |= RVT_S_WAIT_DMA; in hfi1_make_uc_req()
[all …]
Dtrace_tid.h28 #define OPFN_PARAM_PRN "[%s] qpn 0x%x %s OPFN: qp 0x%x, max read %u, " \
195 TP_PROTO(struct rvt_qp *qp),
196 TP_ARGS(qp),
198 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
205 struct hfi1_qp_priv *priv = qp->priv;
207 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
208 __entry->qpn = qp->ibqp.qp_num;
225 TP_PROTO(struct rvt_qp *qp),
226 TP_ARGS(qp)
231 TP_PROTO(struct rvt_qp *qp),
[all …]
/kernel/linux/linux-5.10/drivers/infiniband/hw/hfi1/
Drc.c53 #include "qp.h"
58 struct rvt_ack_entry *find_prev_entry(struct rvt_qp *qp, u32 psn, u8 *prev, in find_prev_entry() argument
60 __must_hold(&qp->s_lock) in find_prev_entry()
66 for (i = qp->r_head_ack_queue; ; i = p) { in find_prev_entry()
67 if (i == qp->s_tail_ack_queue) in find_prev_entry()
72 p = rvt_size_atomic(ib_to_rvt(qp->ibqp.device)); in find_prev_entry()
73 if (p == qp->r_head_ack_queue) { in find_prev_entry()
77 e = &qp->s_ack_queue[p]; in find_prev_entry()
83 if (p == qp->s_tail_ack_queue && in find_prev_entry()
100 * @dev: the device for this QP
[all …]
Dqp.c58 #include "qp.h"
64 MODULE_PARM_DESC(qp_table_size, "QP table size");
66 static void flush_tx_list(struct rvt_qp *qp);
75 static void qp_pio_drain(struct rvt_qp *qp);
164 static void flush_tx_list(struct rvt_qp *qp) in flush_tx_list() argument
166 struct hfi1_qp_priv *priv = qp->priv; in flush_tx_list()
172 static void flush_iowait(struct rvt_qp *qp) in flush_iowait() argument
174 struct hfi1_qp_priv *priv = qp->priv; in flush_iowait()
184 rvt_put_qp(qp); in flush_iowait()
202 int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, in hfi1_check_modify_qp() argument
[all …]
Duc.c50 #include "qp.h"
57 * @qp: a pointer to the QP
63 int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) in hfi1_make_uc_req() argument
65 struct hfi1_qp_priv *priv = qp->priv; in hfi1_make_uc_req()
71 u32 pmtu = qp->pmtu; in hfi1_make_uc_req()
74 ps->s_txreq = get_txreq(ps->dev, qp); in hfi1_make_uc_req()
78 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { in hfi1_make_uc_req()
79 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) in hfi1_make_uc_req()
82 if (qp->s_last == READ_ONCE(qp->s_head)) in hfi1_make_uc_req()
86 qp->s_flags |= RVT_S_WAIT_DMA; in hfi1_make_uc_req()
[all …]
/kernel/linux/linux-5.10/drivers/infiniband/sw/rdmavt/
Dqp.c57 #include "qp.h"
64 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
325 * init_qpn_table - initialize the QP number table for a device
381 * free_qpn_table - free the QP number table for a device
393 * rvt_driver_qp_init - Init driver qp resources
407 * If driver is not doing any QP allocation then make sure it is in rvt_driver_qp_init()
408 * providing the necessary QP functions. in rvt_driver_qp_init()
457 * rvt_free_qp_cb - callback function to reset a qp
458 * @qp: the qp to reset
461 * This function resets the qp and removes it from the
[all …]
/kernel/linux/linux-6.6/drivers/infiniband/sw/rdmavt/
Dqp.c15 #include "qp.h"
22 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
283 * init_qpn_table - initialize the QP number table for a device
340 * free_qpn_table - free the QP number table for a device
352 * rvt_driver_qp_init - Init driver qp resources
366 * If driver is not doing any QP allocation then make sure it is in rvt_driver_qp_init()
367 * providing the necessary QP functions. in rvt_driver_qp_init()
416 * rvt_free_qp_cb - callback function to reset a qp
417 * @qp: the qp to reset
420 * This function resets the qp and removes it from the
[all …]
/kernel/linux/linux-5.10/drivers/ntb/
Dntb_transport.c120 struct ntb_transport_qp *qp; member
142 u8 qp_num; /* Only 64 QP's are allowed. 0-63 */
148 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
160 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
272 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) argument
279 static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
468 struct ntb_transport_qp *qp; in debugfs_read() local
472 qp = filp->private_data; in debugfs_read()
474 if (!qp || !qp->link_is_up) in debugfs_read()
485 "\nNTB QP stats:\n\n"); in debugfs_read()
[all …]
/kernel/linux/linux-6.6/drivers/ntb/
Dntb_transport.c120 struct ntb_transport_qp *qp; member
142 u8 qp_num; /* Only 64 QP's are allowed. 0-63 */
148 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
160 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
272 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) argument
279 static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
466 struct ntb_transport_qp *qp; in debugfs_read() local
470 qp = filp->private_data; in debugfs_read()
472 if (!qp || !qp->link_is_up) in debugfs_read()
483 "\nNTB QP stats:\n\n"); in debugfs_read()
[all …]
/kernel/linux/linux-6.6/drivers/net/ethernet/qlogic/qed/
Dqed_roce.c74 /* when destroying a_RoCE QP the control is returned to the user after in qed_roce_stop()
76 * We delay for a short while if an async destroy QP is still expected. in qed_roce_stop()
96 static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid, in qed_rdma_copy_gids() argument
101 if (qp->roce_mode == ROCE_V2_IPV4) { in qed_rdma_copy_gids()
107 src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr); in qed_rdma_copy_gids()
108 dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr); in qed_rdma_copy_gids()
111 for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) { in qed_rdma_copy_gids()
112 src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]); in qed_rdma_copy_gids()
113 dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]); in qed_rdma_copy_gids()
164 DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n"); in qed_roce_alloc_cid()
[all …]
/kernel/linux/linux-5.10/drivers/net/ethernet/qlogic/qed/
Dqed_roce.c74 /* when destroying a_RoCE QP the control is returned to the user after in qed_roce_stop()
76 * We delay for a short while if an async destroy QP is still expected. in qed_roce_stop()
96 static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid, in qed_rdma_copy_gids() argument
101 if (qp->roce_mode == ROCE_V2_IPV4) { in qed_rdma_copy_gids()
107 src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr); in qed_rdma_copy_gids()
108 dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr); in qed_rdma_copy_gids()
111 for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) { in qed_rdma_copy_gids()
112 src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]); in qed_rdma_copy_gids()
113 dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]); in qed_rdma_copy_gids()
164 DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n"); in qed_roce_alloc_cid()
[all …]

12345678910>>...40