/kernel/linux/linux-5.10/drivers/infiniband/sw/rdmavt/ |
D | trace_tx.h | 95 __field(u64, wr_id) 116 __entry->wr_id = wqe->wr.wr_id; 137 __entry->wr_id, 163 __field(u64, wr_id) 175 __entry->wr_id = wqe->wr.wr_id; 191 __entry->wr_id,
|
D | trace_cq.h | 116 __field(u64, wr_id) 127 __entry->wr_id = wc->wr_id; 140 __entry->wr_id,
|
/kernel/linux/linux-5.10/drivers/infiniband/ulp/ipoib/ |
D | ipoib_ib.c | 106 priv->rx_wr.wr_id = id | IPOIB_OP_RECV; in ipoib_ib_post_receive() 176 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; in ipoib_ib_handle_rx_wc() local 183 wr_id, wc->status); in ipoib_ib_handle_rx_wc() 185 if (unlikely(wr_id >= ipoib_recvq_size)) { in ipoib_ib_handle_rx_wc() 187 wr_id, ipoib_recvq_size); in ipoib_ib_handle_rx_wc() 191 skb = priv->rx_ring[wr_id].skb; in ipoib_ib_handle_rx_wc() 197 wc->status, wr_id, wc->vendor_err); in ipoib_ib_handle_rx_wc() 198 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping); in ipoib_ib_handle_rx_wc() 200 priv->rx_ring[wr_id].skb = NULL; in ipoib_ib_handle_rx_wc() 204 memcpy(mapping, priv->rx_ring[wr_id].mapping, in ipoib_ib_handle_rx_wc() [all …]
|
D | ipoib_cm.c | 99 priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV; in ipoib_cm_post_receive_srq() 124 wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV; in ipoib_cm_post_receive_nonsrq() 226 ipoib_cm_rx_drain_wr.wr_id = IPOIB_CM_RX_DRAIN_WRID; in ipoib_cm_start_rx_drain() 564 unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV); in ipoib_cm_handle_rx_wc() local 574 wr_id, wc->status); in ipoib_cm_handle_rx_wc() 576 if (unlikely(wr_id >= ipoib_recvq_size)) { in ipoib_cm_handle_rx_wc() 577 if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) { in ipoib_cm_handle_rx_wc() 585 wr_id, ipoib_recvq_size); in ipoib_cm_handle_rx_wc() 594 skb = rx_ring[wr_id].skb; in ipoib_cm_handle_rx_wc() 599 wc->status, wr_id, wc->vendor_err); in ipoib_cm_handle_rx_wc() [all …]
|
/kernel/linux/linux-5.10/net/smc/ |
D | smc_wr.h | 90 u64 wr_id, temp_wr_id; in smc_wr_rx_post() local 93 wr_id = ++link->wr_rx_id; /* tasklet context, thus not atomic */ in smc_wr_rx_post() 94 temp_wr_id = wr_id; in smc_wr_rx_post() 96 link->wr_rx_ibs[index].wr_id = wr_id; in smc_wr_rx_post()
|
D | smc_wr.c | 41 u64 wr_id; /* work request id sent */ member 70 static inline int smc_wr_tx_find_pending_index(struct smc_link *link, u64 wr_id) in smc_wr_tx_find_pending_index() argument 75 if (link->wr_tx_pends[i].wr_id == wr_id) in smc_wr_tx_find_pending_index() 98 pnd_snd_idx = smc_wr_tx_find_pending_index(link, wc->wr_id); in smc_wr_tx_process_cqe() 190 u64 wr_id; in smc_wr_tx_get_free_slot() local 214 wr_id = smc_wr_tx_get_next_wr_id(link); in smc_wr_tx_get_free_slot() 216 wr_pend->wr_id = wr_id; in smc_wr_tx_get_free_slot() 221 wr_ib->wr_id = wr_id; in smc_wr_tx_get_free_slot() 307 link->wr_reg.wr.wr_id = (u64)(uintptr_t)mr; in smc_wr_reg_send() 375 temp_wr_id = wc->wr_id; in smc_wr_rx_demultiplex()
|
/kernel/linux/linux-5.10/net/rds/ |
D | ib_ring.c | 156 u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest) in rds_ib_ring_completed() argument 160 if (oldest <= (unsigned long long)wr_id) in rds_ib_ring_completed() 161 ret = (unsigned long long)wr_id - oldest + 1; in rds_ib_ring_completed() 163 ret = ring->w_nr - oldest + (unsigned long long)wr_id + 1; in rds_ib_ring_completed() 166 wr_id, oldest); in rds_ib_ring_completed()
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/mlx5/ |
D | gsi.c | 74 u64 wr_id; in handle_single_completion() local 79 wr_id = wr->wc.wr_id; in handle_single_completion() 81 wr->wc.wr_id = wr_id; in handle_single_completion() 407 gsi_wr->wc.wr_id = wr->wr.wr_id; in mlx5_ib_add_outstanding_wr() 423 { .wr_id = wr->wr.wr_id }, in mlx5_ib_gsi_silent_drop()
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/i40iw/ |
D | i40iw_uk.c | 138 u64 wr_id in i40iw_qp_get_next_send_wqe() argument 193 qp->sq_wrtrk_array[*wqe_idx].wrid = wr_id; in i40iw_qp_get_next_send_wqe() 274 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, total_size, info->wr_id); in i40iw_rdma_write() 331 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->lo_addr.len, info->wr_id); in i40iw_rdma_read() 385 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, total_size, info->wr_id); in i40iw_send() 444 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->len, info->wr_id); in i40iw_inline_rdma_write() 520 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->len, info->wr_id); in i40iw_inline_send() 582 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE, 0, info->wr_id); in i40iw_stag_local_invalidate() 624 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE, 0, info->wr_id); in i40iw_mw_bind() 673 qp->rq_wrid_array[wqe_idx] = info->wr_id; in i40iw_post_receive() [all …]
|
D | i40iw_user.h | 238 u64 wr_id; member 258 u64 wr_id; member 264 u64 wr_id; member 410 u64 wr_id 421 enum i40iw_status_code i40iw_nop(struct i40iw_qp_uk *qp, u64 wr_id,
|
/kernel/linux/patches/linux-5.10/prebuilts/usr/include/rdma/ |
D | vmw_pvrdma-abi.h | 170 __aligned_u64 wr_id; member 175 __aligned_u64 wr_id; member 225 __aligned_u64 wr_id; member
|
D | rdma_user_rxe.h | 54 __aligned_u64 wr_id; member 127 __aligned_u64 wr_id; member
|
D | mlx5_user_ioctl_verbs.h | 39 __aligned_u64 wr_id; member
|
/kernel/linux/patches/linux-4.19/prebuilts/usr/include/rdma/ |
D | vmw_pvrdma-abi.h | 153 __aligned_u64 wr_id; member 158 __aligned_u64 wr_id; member 208 __aligned_u64 wr_id; member
|
D | rdma_user_rxe.h | 38 __aligned_u64 wr_id; member 112 __aligned_u64 wr_id; member
|
/kernel/linux/linux-5.10/include/uapi/rdma/ |
D | vmw_pvrdma-abi.h | 230 __aligned_u64 wr_id; /* wr id */ member 238 __aligned_u64 wr_id; /* wr id */ member 291 __aligned_u64 wr_id; member
|
D | rdma_user_rxe.h | 76 __aligned_u64 wr_id; member 155 __aligned_u64 wr_id; member
|
D | mlx5_user_ioctl_verbs.h | 58 __aligned_u64 wr_id; member
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/cxgb4/ |
D | cq.c | 469 srq->sw_rq[srq->pidx].wr_id = pwr->wr_id; in post_pending_srq_wrs() 476 (unsigned long long)pwr->wr_id); in post_pending_srq_wrs() 494 u64 wr_id; in reap_srq_cqe() local 497 wr_id = srq->sw_rq[rel_idx].wr_id; in reap_srq_cqe() 503 (unsigned long long)srq->sw_rq[rel_idx].wr_id); in reap_srq_cqe() 511 srq->sw_rq[srq->cidx].wr_id); in reap_srq_cqe() 522 (unsigned long long)srq->sw_rq[rel_idx].wr_id); in reap_srq_cqe() 525 return wr_id; in reap_srq_cqe() 717 *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id; in poll_cq() 724 *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id; in poll_cq() [all …]
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/qedr/ |
D | qedr_roce_cm.c | 586 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id; in qedr_gsi_post_send() 590 wr->opcode, in_irq(), irqs_disabled(), wr->wr_id); in qedr_gsi_post_send() 656 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id; in qedr_gsi_post_recv() 687 wc[i].wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id; in qedr_gsi_poll_cq() 715 wc[i].wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id; in qedr_gsi_poll_cq()
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/bnxt_re/ |
D | qplib_fp.c | 761 srqe->wr_id[0] = cpu_to_le32((u32)next); in bnxt_qplib_post_srq_recv() 762 srq->swq[next].wr_id = wqe->wr_id; in bnxt_qplib_post_srq_recv() 1741 swq->wr_id = wqe->wr_id; in bnxt_qplib_post_send() 1982 swq->wr_id = wqe->wr_id; in bnxt_qplib_post_recv() 2010 base_hdr->wr_id[0] = cpu_to_le32(wqe_idx); in bnxt_qplib_post_recv() 2138 if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) { in __flush_sq() 2146 cqe->wr_id = sq->swq[last].wr_id; in __flush_sq() 2196 cqe->wr_id = rq->swq[last].wr_id; in __flush_rq() 2291 sq->swq[peek_sq_cons_idx].wr_id == in do_wa9060() 2365 cqe->wr_id = swq->wr_id; in bnxt_qplib_cq_process_req() [all …]
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/mlx4/ |
D | mad.c | 662 wr.wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt); in mlx4_ib_send_to_slave() 1331 recv_wr.wr_id = (u64) index | MLX4_TUN_WRID_RECV | in mlx4_ib_post_pv_qp_buf() 1439 wr.wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum); in mlx4_ib_send_to_wire() 1478 struct mlx4_ib_demux_pv_qp *tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc->wr_id)]; in mlx4_ib_multiplex_mad() 1479 int wr_ix = wc->wr_id & (MLX4_NUM_TUNNEL_BUFS - 1); in mlx4_ib_multiplex_mad() 1743 tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)]; in mlx4_ib_tunnel_comp_worker() 1749 wc.wr_id & in mlx4_ib_tunnel_comp_worker() 1753 "buf:%lld\n", wc.wr_id); in mlx4_ib_tunnel_comp_worker() 1756 rdma_destroy_ah(tun_qp->tx_ring[wc.wr_id & in mlx4_ib_tunnel_comp_worker() 1758 tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah in mlx4_ib_tunnel_comp_worker() [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/cisco/enic/ |
D | vnic_rq.h | 76 uint64_t wr_id; member 130 buf->wr_id = wrid; in vnic_rq_post()
|
D | vnic_wq.h | 61 uint64_t wr_id; /* Cookie */ member 146 buf->wr_id = wrid; in vnic_wq_post()
|
/kernel/linux/linux-5.10/drivers/net/ethernet/ibm/ehea/ |
D | ehea_qmr.h | 87 u64 wr_id; member 126 u64 wr_id; /* work request ID */ member 149 u64 wr_id; /* work request ID from WQE */ member
|