/net/9p/ |
D | trans_rdma.c | 155 struct p9_trans_rdma *rdma = clnt->trans; in p9_rdma_show_options() local 157 if (rdma->port != P9_PORT) in p9_rdma_show_options() 158 seq_printf(m, ",port=%u", rdma->port); in p9_rdma_show_options() 159 if (rdma->sq_depth != P9_RDMA_SQ_DEPTH) in p9_rdma_show_options() 160 seq_printf(m, ",sq=%u", rdma->sq_depth); in p9_rdma_show_options() 161 if (rdma->rq_depth != P9_RDMA_RQ_DEPTH) in p9_rdma_show_options() 162 seq_printf(m, ",rq=%u", rdma->rq_depth); in p9_rdma_show_options() 163 if (rdma->timeout != P9_RDMA_TIMEOUT) in p9_rdma_show_options() 164 seq_printf(m, ",timeout=%lu", rdma->timeout); in p9_rdma_show_options() 165 if (rdma->privport) in p9_rdma_show_options() [all …]
|
/net/sunrpc/xprtrdma/ |
D | svc_rdma_sendto.c | 116 static void svc_rdma_send_cid_init(struct svcxprt_rdma *rdma, in svc_rdma_send_cid_init() argument 119 cid->ci_queue_id = rdma->sc_sq_cq->res.id; in svc_rdma_send_cid_init() 120 cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids); in svc_rdma_send_cid_init() 124 svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma) in svc_rdma_send_ctxt_alloc() argument 133 size += rdma->sc_max_send_sges * sizeof(struct ib_sge); in svc_rdma_send_ctxt_alloc() 137 buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL); in svc_rdma_send_ctxt_alloc() 140 addr = ib_dma_map_single(rdma->sc_pd->device, buffer, in svc_rdma_send_ctxt_alloc() 141 rdma->sc_max_req_size, DMA_TO_DEVICE); in svc_rdma_send_ctxt_alloc() 142 if (ib_dma_mapping_error(rdma->sc_pd->device, addr)) in svc_rdma_send_ctxt_alloc() 145 svc_rdma_send_cid_init(rdma, &ctxt->sc_cid); in svc_rdma_send_ctxt_alloc() [all …]
|
D | svc_rdma_recvfrom.c | 118 static void svc_rdma_recv_cid_init(struct svcxprt_rdma *rdma, in svc_rdma_recv_cid_init() argument 121 cid->ci_queue_id = rdma->sc_rq_cq->res.id; in svc_rdma_recv_cid_init() 122 cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids); in svc_rdma_recv_cid_init() 126 svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma) in svc_rdma_recv_ctxt_alloc() argument 135 buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL); in svc_rdma_recv_ctxt_alloc() 138 addr = ib_dma_map_single(rdma->sc_pd->device, buffer, in svc_rdma_recv_ctxt_alloc() 139 rdma->sc_max_req_size, DMA_FROM_DEVICE); in svc_rdma_recv_ctxt_alloc() 140 if (ib_dma_mapping_error(rdma->sc_pd->device, addr)) in svc_rdma_recv_ctxt_alloc() 143 svc_rdma_recv_cid_init(rdma, &ctxt->rc_cid); in svc_rdma_recv_ctxt_alloc() 155 ctxt->rc_recv_sge.length = rdma->sc_max_req_size; in svc_rdma_recv_ctxt_alloc() [all …]
|
D | svc_rdma_rw.c | 54 svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges) in svc_rdma_get_rw_ctxt() argument 59 spin_lock(&rdma->sc_rw_ctxt_lock); in svc_rdma_get_rw_ctxt() 60 node = llist_del_first(&rdma->sc_rw_ctxts); in svc_rdma_get_rw_ctxt() 61 spin_unlock(&rdma->sc_rw_ctxt_lock); in svc_rdma_get_rw_ctxt() 83 trace_svcrdma_no_rwctx_err(rdma, sges); in svc_rdma_get_rw_ctxt() 87 static void __svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma, in __svc_rdma_put_rw_ctxt() argument 95 static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma, in svc_rdma_put_rw_ctxt() argument 98 __svc_rdma_put_rw_ctxt(rdma, ctxt, &rdma->sc_rw_ctxts); in svc_rdma_put_rw_ctxt() 106 void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma) in svc_rdma_destroy_rw_ctxts() argument 111 while ((node = llist_del_first(&rdma->sc_rw_ctxts)) != NULL) { in svc_rdma_destroy_rw_ctxts() [all …]
|
D | svc_rdma_transport.c | 274 struct svcxprt_rdma *rdma = cma_id->context; in svc_rdma_cma_handler() local 275 struct svc_xprt *xprt = &rdma->sc_xprt; in svc_rdma_cma_handler() 279 clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags); in svc_rdma_cma_handler() 538 struct svcxprt_rdma *rdma = in svc_rdma_detach() local 541 rdma_disconnect(rdma->sc_cm_id); in svc_rdma_detach() 546 struct svcxprt_rdma *rdma = in __svc_rdma_free() local 550 if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) in __svc_rdma_free() 551 ib_drain_qp(rdma->sc_qp); in __svc_rdma_free() 553 svc_rdma_flush_recv_queues(rdma); in __svc_rdma_free() 555 svc_rdma_destroy_rw_ctxts(rdma); in __svc_rdma_free() [all …]
|
D | svc_rdma_backchannel.c | 75 static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma, in svc_rdma_bc_sendto() argument 82 rctxt = svc_rdma_recv_ctxt_get(rdma); in svc_rdma_bc_sendto() 86 ret = svc_rdma_map_reply_msg(rdma, sctxt, rctxt, &rqst->rq_snd_buf); in svc_rdma_bc_sendto() 87 svc_rdma_recv_ctxt_put(rdma, rctxt); in svc_rdma_bc_sendto() 96 ret = svc_rdma_send(rdma, sctxt); in svc_rdma_bc_sendto() 101 svc_rdma_send_ctxt_put(rdma, sctxt); in svc_rdma_bc_sendto() 145 rpcrdma_bc_send_request(struct svcxprt_rdma *rdma, struct rpc_rqst *rqst) in rpcrdma_bc_send_request() argument 153 ctxt = svc_rdma_send_ctxt_get(rdma); in rpcrdma_bc_send_request() 169 rc = svc_rdma_bc_sendto(rdma, rqst, ctxt); in rpcrdma_bc_send_request() 175 svc_rdma_send_ctxt_put(rdma, ctxt); in rpcrdma_bc_send_request() [all …]
|
/net/rds/ |
D | send.c | 281 (rm->rdma.op_active && in rds_send_xmit() 310 if (rm->rdma.op_active && !cp->cp_xmit_rdma_sent) { in rds_send_xmit() 311 rm->m_final_op = &rm->rdma; in rds_send_xmit() 316 ret = conn->c_trans->xmit_rdma(conn, &rm->rdma); in rds_send_xmit() 353 ops_present = (rm->atomic.op_active || rm->rdma.op_active); in rds_send_xmit() 356 if (rm->rdma.op_active && !rm->rdma.op_silent) in rds_send_xmit() 509 ro = &rm->rdma; in rds_rdma_send_complete() 580 ro = &rm->rdma; in __rds_send_complete() 646 struct rm_rdma_op *ro = &rm->rdma; in rds_send_remove_from_sock() 659 rm->rdma.op_notifier = NULL; in rds_send_remove_from_sock() [all …]
|
D | ib_send.c | 114 rds_ib_send_complete(container_of(op, struct rds_message, rdma), in rds_ib_send_unmap_rdma() 167 rm = container_of(send->s_op, struct rds_message, rdma); in rds_ib_send_unmap_op() 579 if (rm->rdma.op_active) { in rds_ib_xmit() 582 ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey); in rds_ib_xmit() 615 if (rm->rdma.op_active && rm->rdma.op_fence) in rds_ib_xmit() 975 rds_message_addref(container_of(op, struct rds_message, rdma)); in rds_ib_xmit_rdma()
|
D | message.c | 162 if (rm->rdma.op_active) in rds_message_purge() 163 rds_rdma_free_op(&rm->rdma); in rds_message_purge() 164 if (rm->rdma.op_rdma_mr) in rds_message_purge() 165 kref_put(&rm->rdma.op_rdma_mr->r_kref, __rds_put_mr_final); in rds_message_purge()
|
D | Makefile | 5 loop.o page.o rdma.o
|
D | rdma.c | 613 struct rm_rdma_op *op = &rm->rdma; in rds_cmsg_rdma_args() 623 || rm->rdma.op_active) in rds_cmsg_rdma_args() 840 rm->rdma.op_rdma_mr = mr; in rds_cmsg_rdma_dest() 859 &rm->rdma.op_rdma_mr, rm->m_conn_path); in rds_cmsg_rdma_map()
|
D | recv.c | 171 struct rds_ext_header_rdma rdma; in rds_recv_incoming_exthdrs() member 183 rds_rdma_unuse(rs, be32_to_cpu(buffer.rdma.h_rdma_rkey), 0); in rds_recv_incoming_exthdrs()
|
D | rds.h | 475 } rdma; member
|