Lines Matching full:mr
49 struct rpcrdma_mr *mr) in frwr_cid_init() argument
51 struct rpc_rdma_cid *cid = &mr->mr_cid; in frwr_cid_init()
54 cid->ci_completion_id = mr->mr_ibmr->res.id; in frwr_cid_init()
57 static void frwr_mr_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr) in frwr_mr_unmap() argument
59 if (mr->mr_device) { in frwr_mr_unmap()
60 trace_xprtrdma_mr_unmap(mr); in frwr_mr_unmap()
61 ib_dma_unmap_sg(mr->mr_device, mr->mr_sg, mr->mr_nents, in frwr_mr_unmap()
62 mr->mr_dir); in frwr_mr_unmap()
63 mr->mr_device = NULL; in frwr_mr_unmap()
68 * frwr_mr_release - Destroy one MR
69 * @mr: MR allocated by frwr_mr_init
72 void frwr_mr_release(struct rpcrdma_mr *mr) in frwr_mr_release() argument
76 frwr_mr_unmap(mr->mr_xprt, mr); in frwr_mr_release()
78 rc = ib_dereg_mr(mr->mr_ibmr); in frwr_mr_release()
80 trace_xprtrdma_frwr_dereg(mr, rc); in frwr_mr_release()
81 kfree(mr->mr_sg); in frwr_mr_release()
82 kfree(mr); in frwr_mr_release()
85 static void frwr_mr_put(struct rpcrdma_mr *mr) in frwr_mr_put() argument
87 frwr_mr_unmap(mr->mr_xprt, mr); in frwr_mr_put()
89 /* The MR is returned to the req's MR free list instead in frwr_mr_put()
90 * of to the xprt's MR free list. No spinlock is needed. in frwr_mr_put()
92 rpcrdma_mr_push(mr, &mr->mr_req->rl_free_mrs); in frwr_mr_put()
107 struct rpcrdma_mr *mr; in frwr_reset() local
109 while ((mr = rpcrdma_mr_pop(&req->rl_registered))) in frwr_reset()
110 frwr_mr_put(mr); in frwr_reset()
114 * frwr_mr_init - Initialize one MR
116 * @mr: generic MR to prepare for FRWR
121 int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr) in frwr_mr_init() argument
137 mr->mr_xprt = r_xprt; in frwr_mr_init()
138 mr->mr_ibmr = frmr; in frwr_mr_init()
139 mr->mr_device = NULL; in frwr_mr_init()
140 INIT_LIST_HEAD(&mr->mr_list); in frwr_mr_init()
141 init_completion(&mr->mr_linv_done); in frwr_mr_init()
142 frwr_cid_init(ep, mr); in frwr_mr_init()
145 mr->mr_sg = sg; in frwr_mr_init()
150 trace_xprtrdma_frwr_alloc(mr, PTR_ERR(frmr)); in frwr_mr_init()
276 * @mr: MR to fill in
282 * On success, @mr is filled in.
287 struct rpcrdma_mr *mr) in frwr_map() argument
298 sg_set_page(&mr->mr_sg[i], seg->mr_page, in frwr_map()
309 mr->mr_dir = rpcrdma_data_dir(writing); in frwr_map()
310 mr->mr_nents = i; in frwr_map()
312 dma_nents = ib_dma_map_sg(ep->re_id->device, mr->mr_sg, mr->mr_nents, in frwr_map()
313 mr->mr_dir); in frwr_map()
316 mr->mr_device = ep->re_id->device; in frwr_map()
318 ibmr = mr->mr_ibmr; in frwr_map()
319 n = ib_map_mr_sg(ibmr, mr->mr_sg, dma_nents, NULL, PAGE_SIZE); in frwr_map()
328 reg_wr = &mr->mr_regwr; in frwr_map()
329 reg_wr->mr = ibmr; in frwr_map()
335 mr->mr_handle = ibmr->rkey; in frwr_map()
336 mr->mr_length = ibmr->length; in frwr_map()
337 mr->mr_offset = ibmr->iova; in frwr_map()
338 trace_xprtrdma_mr_map(mr); in frwr_map()
343 trace_xprtrdma_frwr_sgerr(mr, i); in frwr_map()
347 trace_xprtrdma_frwr_maperr(mr, n); in frwr_map()
356 * Each flushed MR gets destroyed after the QP has drained.
361 struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe); in frwr_wc_fastreg() local
364 trace_xprtrdma_wc_fastreg(wc, &mr->mr_cid); in frwr_wc_fastreg()
387 struct rpcrdma_mr *mr; in frwr_send() local
393 list_for_each_entry(mr, &req->rl_registered, mr_list) { in frwr_send()
394 trace_xprtrdma_mr_fastreg(mr); in frwr_send()
396 mr->mr_cqe.done = frwr_wc_fastreg; in frwr_send()
397 mr->mr_regwr.wr.next = post_wr; in frwr_send()
398 mr->mr_regwr.wr.wr_cqe = &mr->mr_cqe; in frwr_send()
399 mr->mr_regwr.wr.num_sge = 0; in frwr_send()
400 mr->mr_regwr.wr.opcode = IB_WR_REG_MR; in frwr_send()
401 mr->mr_regwr.wr.send_flags = 0; in frwr_send()
402 post_wr = &mr->mr_regwr.wr; in frwr_send()
423 * frwr_reminv - handle a remotely invalidated mr on the @mrs list
430 struct rpcrdma_mr *mr; in frwr_reminv() local
432 list_for_each_entry(mr, mrs, mr_list) in frwr_reminv()
433 if (mr->mr_handle == rep->rr_inv_rkey) { in frwr_reminv()
434 list_del_init(&mr->mr_list); in frwr_reminv()
435 trace_xprtrdma_mr_reminv(mr); in frwr_reminv()
436 frwr_mr_put(mr); in frwr_reminv()
437 break; /* only one invalidated MR per RPC */ in frwr_reminv()
441 static void frwr_mr_done(struct ib_wc *wc, struct rpcrdma_mr *mr) in frwr_mr_done() argument
444 frwr_mr_put(mr); in frwr_mr_done()
456 struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe); in frwr_wc_localinv() local
459 trace_xprtrdma_wc_li(wc, &mr->mr_cid); in frwr_wc_localinv()
460 frwr_mr_done(wc, mr); in frwr_wc_localinv()
470 * Awaken anyone waiting for an MR to finish being fenced.
475 struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe); in frwr_wc_localinv_wake() local
478 trace_xprtrdma_wc_li_wake(wc, &mr->mr_cid); in frwr_wc_localinv_wake()
479 frwr_mr_done(wc, mr); in frwr_wc_localinv_wake()
480 complete(&mr->mr_linv_done); in frwr_wc_localinv_wake()
501 struct rpcrdma_mr *mr; in frwr_unmap_sync() local
510 mr = rpcrdma_mr_pop(&req->rl_registered); in frwr_unmap_sync()
512 trace_xprtrdma_mr_localinv(mr); in frwr_unmap_sync()
515 last = &mr->mr_invwr; in frwr_unmap_sync()
517 last->wr_cqe = &mr->mr_cqe; in frwr_unmap_sync()
522 last->ex.invalidate_rkey = mr->mr_handle; in frwr_unmap_sync()
528 } while ((mr = rpcrdma_mr_pop(&req->rl_registered))); in frwr_unmap_sync()
530 mr = container_of(last, struct rpcrdma_mr, mr_invwr); in frwr_unmap_sync()
537 reinit_completion(&mr->mr_linv_done); in frwr_unmap_sync()
551 wait_for_completion(&mr->mr_linv_done); in frwr_unmap_sync()
572 struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe); in frwr_wc_localinv_done() local
576 trace_xprtrdma_wc_li_done(wc, &mr->mr_cid); in frwr_wc_localinv_done()
578 /* Ensure that @rep is generated before the MR is released */ in frwr_wc_localinv_done()
579 rep = mr->mr_req->rl_reply; in frwr_wc_localinv_done()
588 frwr_mr_put(mr); in frwr_wc_localinv_done()
606 struct rpcrdma_mr *mr; in frwr_unmap_async() local
613 mr = rpcrdma_mr_pop(&req->rl_registered); in frwr_unmap_async()
615 trace_xprtrdma_mr_localinv(mr); in frwr_unmap_async()
618 last = &mr->mr_invwr; in frwr_unmap_async()
620 last->wr_cqe = &mr->mr_cqe; in frwr_unmap_async()
625 last->ex.invalidate_rkey = mr->mr_handle; in frwr_unmap_async()
631 } while ((mr = rpcrdma_mr_pop(&req->rl_registered))); in frwr_unmap_async()
664 * frwr_wp_create - Create an MR for padding Write chunks
673 struct rpcrdma_mr *mr; in frwr_wp_create() local
675 mr = rpcrdma_mr_get(r_xprt); in frwr_wp_create()
676 if (!mr) in frwr_wp_create()
678 mr->mr_req = NULL; in frwr_wp_create()
679 ep->re_write_pad_mr = mr; in frwr_wp_create()
684 if (IS_ERR(frwr_map(r_xprt, &seg, 1, true, xdr_zero, mr))) in frwr_wp_create()
686 trace_xprtrdma_mr_fastreg(mr); in frwr_wp_create()
688 mr->mr_cqe.done = frwr_wc_fastreg; in frwr_wp_create()
689 mr->mr_regwr.wr.next = NULL; in frwr_wp_create()
690 mr->mr_regwr.wr.wr_cqe = &mr->mr_cqe; in frwr_wp_create()
691 mr->mr_regwr.wr.num_sge = 0; in frwr_wp_create()
692 mr->mr_regwr.wr.opcode = IB_WR_REG_MR; in frwr_wp_create()
693 mr->mr_regwr.wr.send_flags = 0; in frwr_wp_create()
695 return ib_post_send(ep->re_id->qp, &mr->mr_regwr.wr, NULL); in frwr_wp_create()