• Home
  • Raw
  • Download

Lines Matching refs:rdma

53 svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)  in svc_rdma_get_rw_ctxt()  argument
57 spin_lock(&rdma->sc_rw_ctxt_lock); in svc_rdma_get_rw_ctxt()
59 ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts); in svc_rdma_get_rw_ctxt()
62 spin_unlock(&rdma->sc_rw_ctxt_lock); in svc_rdma_get_rw_ctxt()
64 spin_unlock(&rdma->sc_rw_ctxt_lock); in svc_rdma_get_rw_ctxt()
82 trace_svcrdma_no_rwctx_err(rdma, sges); in svc_rdma_get_rw_ctxt()
86 static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma, in svc_rdma_put_rw_ctxt() argument
91 spin_lock(&rdma->sc_rw_ctxt_lock); in svc_rdma_put_rw_ctxt()
92 list_add(&ctxt->rw_list, &rdma->sc_rw_ctxts); in svc_rdma_put_rw_ctxt()
93 spin_unlock(&rdma->sc_rw_ctxt_lock); in svc_rdma_put_rw_ctxt()
101 void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma) in svc_rdma_destroy_rw_ctxts() argument
105 while ((ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts)) != NULL) { in svc_rdma_destroy_rw_ctxts()
122 static int svc_rdma_rw_ctx_init(struct svcxprt_rdma *rdma, in svc_rdma_rw_ctx_init() argument
129 ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp, rdma->sc_port_num, in svc_rdma_rw_ctx_init()
133 svc_rdma_put_rw_ctxt(rdma, ctxt); in svc_rdma_rw_ctx_init()
134 trace_svcrdma_dma_map_rw_err(rdma, ctxt->rw_nents, ret); in svc_rdma_rw_ctx_init()
155 static void svc_rdma_cc_cid_init(struct svcxprt_rdma *rdma, in svc_rdma_cc_cid_init() argument
158 cid->ci_queue_id = rdma->sc_sq_cq->res.id; in svc_rdma_cc_cid_init()
159 cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids); in svc_rdma_cc_cid_init()
162 static void svc_rdma_cc_init(struct svcxprt_rdma *rdma, in svc_rdma_cc_init() argument
165 svc_rdma_cc_cid_init(rdma, &cc->cc_cid); in svc_rdma_cc_init()
166 cc->cc_rdma = rdma; in svc_rdma_cc_init()
175 struct svcxprt_rdma *rdma = cc->cc_rdma; in svc_rdma_cc_release() local
181 rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp, in svc_rdma_cc_release()
182 rdma->sc_port_num, ctxt->rw_sg_table.sgl, in svc_rdma_cc_release()
184 svc_rdma_put_rw_ctxt(rdma, ctxt); in svc_rdma_cc_release()
208 svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma, __be32 *chunk) in svc_rdma_write_info_alloc() argument
220 svc_rdma_cc_init(rdma, &info->wi_cc); in svc_rdma_write_info_alloc()
243 struct svcxprt_rdma *rdma = cc->cc_rdma; in svc_rdma_write_done() local
249 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); in svc_rdma_write_done()
250 wake_up(&rdma->sc_send_wait); in svc_rdma_write_done()
253 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); in svc_rdma_write_done()
271 svc_rdma_read_info_alloc(struct svcxprt_rdma *rdma) in svc_rdma_read_info_alloc() argument
279 svc_rdma_cc_init(rdma, &info->ri_cc); in svc_rdma_read_info_alloc()
301 struct svcxprt_rdma *rdma = cc->cc_rdma; in svc_rdma_wc_read_done() local
307 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); in svc_rdma_wc_read_done()
308 wake_up(&rdma->sc_send_wait); in svc_rdma_wc_read_done()
311 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); in svc_rdma_wc_read_done()
312 svc_rdma_recv_ctxt_put(rdma, info->ri_readctxt); in svc_rdma_wc_read_done()
314 spin_lock(&rdma->sc_rq_dto_lock); in svc_rdma_wc_read_done()
316 &rdma->sc_read_complete_q); in svc_rdma_wc_read_done()
318 set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags); in svc_rdma_wc_read_done()
319 spin_unlock(&rdma->sc_rq_dto_lock); in svc_rdma_wc_read_done()
321 svc_xprt_enqueue(&rdma->sc_xprt); in svc_rdma_wc_read_done()
336 struct svcxprt_rdma *rdma = cc->cc_rdma; in svc_rdma_post_chunk_ctxt() local
337 struct svc_xprt *xprt = &rdma->sc_xprt; in svc_rdma_post_chunk_ctxt()
344 if (cc->cc_sqecount > rdma->sc_sq_depth) in svc_rdma_post_chunk_ctxt()
353 first_wr = rdma_rw_ctx_wrs(&ctxt->rw_ctx, rdma->sc_qp, in svc_rdma_post_chunk_ctxt()
354 rdma->sc_port_num, cqe, first_wr); in svc_rdma_post_chunk_ctxt()
360 &rdma->sc_sq_avail) > 0) { in svc_rdma_post_chunk_ctxt()
362 ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr); in svc_rdma_post_chunk_ctxt()
368 trace_svcrdma_sq_full(rdma); in svc_rdma_post_chunk_ctxt()
369 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); in svc_rdma_post_chunk_ctxt()
370 wait_event(rdma->sc_send_wait, in svc_rdma_post_chunk_ctxt()
371 atomic_read(&rdma->sc_sq_avail) > cc->cc_sqecount); in svc_rdma_post_chunk_ctxt()
372 trace_svcrdma_sq_retry(rdma); in svc_rdma_post_chunk_ctxt()
375 trace_svcrdma_sq_post_err(rdma, ret); in svc_rdma_post_chunk_ctxt()
382 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); in svc_rdma_post_chunk_ctxt()
383 wake_up(&rdma->sc_send_wait); in svc_rdma_post_chunk_ctxt()
445 struct svcxprt_rdma *rdma = cc->cc_rdma; in svc_rdma_build_writes() local
463 ctxt = svc_rdma_get_rw_ctxt(rdma, in svc_rdma_build_writes()
469 ret = svc_rdma_rw_ctx_init(rdma, ctxt, offset, handle, in svc_rdma_build_writes()
491 trace_svcrdma_small_wrch_err(rdma, remaining, info->wi_seg_no, in svc_rdma_build_writes()
539 int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch, in svc_rdma_send_write_chunk() argument
549 info = svc_rdma_write_info_alloc(rdma, wr_ch); in svc_rdma_send_write_chunk()
582 int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, in svc_rdma_send_reply_chunk() argument
589 info = svc_rdma_write_info_alloc(rdma, rctxt->rc_reply_chunk); in svc_rdma_send_reply_chunk()
843 int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp, in svc_rdma_recv_read_chunk() argument
861 info = svc_rdma_read_info_alloc(rdma); in svc_rdma_recv_read_chunk()