• Home
  • Raw
  • Download

Lines Matching +full:no +full:- +full:memory +full:- +full:wc

1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (c) 2014-2017 Oracle. All rights reserved.
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
9 * COPYING in the main directory of this source tree, or the BSD-type
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
49 * o buffer memory
57 #include <asm-generic/barrier.h>
93 return -ENOMEM; in rpcrdma_alloc_wq()
120 ib_event_msg(event->event), event->device->name, context); in rpcrdma_qp_async_error_upcall()
122 if (ep->rep_connected == 1) { in rpcrdma_qp_async_error_upcall()
123 ep->rep_connected = -EIO; in rpcrdma_qp_async_error_upcall()
125 wake_up_all(&ep->rep_connect_wait); in rpcrdma_qp_async_error_upcall()
130 * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC
132 * @wc: completed WR
136 rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) in rpcrdma_wc_send() argument
138 struct ib_cqe *cqe = wc->wr_cqe; in rpcrdma_wc_send()
143 trace_xprtrdma_wc_send(sc, wc); in rpcrdma_wc_send()
144 if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR) in rpcrdma_wc_send()
146 ib_wc_status_msg(wc->status), in rpcrdma_wc_send()
147 wc->status, wc->vendor_err); in rpcrdma_wc_send()
153 * rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
155 * @wc: completed WR
159 rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) in rpcrdma_wc_receive() argument
161 struct ib_cqe *cqe = wc->wr_cqe; in rpcrdma_wc_receive()
166 trace_xprtrdma_wc_receive(wc); in rpcrdma_wc_receive()
167 if (wc->status != IB_WC_SUCCESS) in rpcrdma_wc_receive()
170 /* status == SUCCESS means all fields in wc are trustworthy */ in rpcrdma_wc_receive()
171 rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len); in rpcrdma_wc_receive()
172 rep->rr_wc_flags = wc->wc_flags; in rpcrdma_wc_receive()
173 rep->rr_inv_rkey = wc->ex.invalidate_rkey; in rpcrdma_wc_receive()
175 ib_dma_sync_single_for_cpu(rdmab_device(rep->rr_rdmabuf), in rpcrdma_wc_receive()
176 rdmab_addr(rep->rr_rdmabuf), in rpcrdma_wc_receive()
177 wc->byte_len, DMA_FROM_DEVICE); in rpcrdma_wc_receive()
184 if (wc->status != IB_WC_WR_FLUSH_ERR) in rpcrdma_wc_receive()
186 ib_wc_status_msg(wc->status), in rpcrdma_wc_receive()
187 wc->status, wc->vendor_err); in rpcrdma_wc_receive()
188 rpcrdma_set_xdrlen(&rep->rr_hdrbuf, 0); in rpcrdma_wc_receive()
196 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data; in rpcrdma_update_connect_private()
197 const struct rpcrdma_connect_private *pmsg = param->private_data; in rpcrdma_update_connect_private()
200 /* Default settings for RPC-over-RDMA Version One */ in rpcrdma_update_connect_private()
201 r_xprt->rx_ia.ri_implicit_roundup = xprt_rdma_pad_optimize; in rpcrdma_update_connect_private()
206 pmsg->cp_magic == rpcrdma_cmp_magic && in rpcrdma_update_connect_private()
207 pmsg->cp_version == RPCRDMA_CMP_VERSION) { in rpcrdma_update_connect_private()
208 r_xprt->rx_ia.ri_implicit_roundup = true; in rpcrdma_update_connect_private()
209 rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size); in rpcrdma_update_connect_private()
210 wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size); in rpcrdma_update_connect_private()
213 if (rsize < cdata->inline_rsize) in rpcrdma_update_connect_private()
214 cdata->inline_rsize = rsize; in rpcrdma_update_connect_private()
215 if (wsize < cdata->inline_wsize) in rpcrdma_update_connect_private()
216 cdata->inline_wsize = wsize; in rpcrdma_update_connect_private()
218 __func__, cdata->inline_wsize, cdata->inline_rsize); in rpcrdma_update_connect_private()
225 struct rpcrdma_xprt *xprt = id->context; in rpcrdma_conn_upcall()
226 struct rpcrdma_ia *ia = &xprt->rx_ia; in rpcrdma_conn_upcall()
227 struct rpcrdma_ep *ep = &xprt->rx_ep; in rpcrdma_conn_upcall()
231 switch (event->event) { in rpcrdma_conn_upcall()
234 ia->ri_async_rc = 0; in rpcrdma_conn_upcall()
235 complete(&ia->ri_done); in rpcrdma_conn_upcall()
238 ia->ri_async_rc = -EPROTO; in rpcrdma_conn_upcall()
239 complete(&ia->ri_done); in rpcrdma_conn_upcall()
242 ia->ri_async_rc = -ENETUNREACH; in rpcrdma_conn_upcall()
243 complete(&ia->ri_done); in rpcrdma_conn_upcall()
248 ia->ri_device->name, in rpcrdma_conn_upcall()
251 init_completion(&ia->ri_remove_done); in rpcrdma_conn_upcall()
252 set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags); in rpcrdma_conn_upcall()
253 ep->rep_connected = -ENODEV; in rpcrdma_conn_upcall()
254 xprt_force_disconnect(&xprt->rx_xprt); in rpcrdma_conn_upcall()
255 wait_for_completion(&ia->ri_remove_done); in rpcrdma_conn_upcall()
257 ia->ri_id = NULL; in rpcrdma_conn_upcall()
258 ia->ri_device = NULL; in rpcrdma_conn_upcall()
262 ++xprt->rx_xprt.connect_cookie; in rpcrdma_conn_upcall()
264 rpcrdma_update_connect_private(xprt, &event->param.conn); in rpcrdma_conn_upcall()
267 connstate = -ENOTCONN; in rpcrdma_conn_upcall()
270 connstate = -ENETUNREACH; in rpcrdma_conn_upcall()
275 rdma_reject_msg(id, event->status)); in rpcrdma_conn_upcall()
276 connstate = -ECONNREFUSED; in rpcrdma_conn_upcall()
277 if (event->status == IB_CM_REJ_STALE_CONN) in rpcrdma_conn_upcall()
278 connstate = -EAGAIN; in rpcrdma_conn_upcall()
281 ++xprt->rx_xprt.connect_cookie; in rpcrdma_conn_upcall()
282 connstate = -ECONNABORTED; in rpcrdma_conn_upcall()
284 ep->rep_connected = connstate; in rpcrdma_conn_upcall()
286 wake_up_all(&ep->rep_connect_wait); in rpcrdma_conn_upcall()
292 ia->ri_device->name, ia->ri_ops->ro_displayname, in rpcrdma_conn_upcall()
293 ep, rdma_event_msg(event->event)); in rpcrdma_conn_upcall()
309 init_completion(&ia->ri_done); in rpcrdma_create_id()
311 id = rdma_create_id(xprt->rx_xprt.xprt_net, rpcrdma_conn_upcall, in rpcrdma_create_id()
320 ia->ri_async_rc = -ETIMEDOUT; in rpcrdma_create_id()
322 (struct sockaddr *)&xprt->rx_xprt.addr, in rpcrdma_create_id()
329 rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout); in rpcrdma_create_id()
335 rc = ia->ri_async_rc; in rpcrdma_create_id()
339 ia->ri_async_rc = -ETIMEDOUT; in rpcrdma_create_id()
346 rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout); in rpcrdma_create_id()
351 rc = ia->ri_async_rc; in rpcrdma_create_id()
367 * rpcrdma_ia_open - Open and initialize an Interface Adapter.
376 struct rpcrdma_ia *ia = &xprt->rx_ia; in rpcrdma_ia_open()
379 ia->ri_id = rpcrdma_create_id(xprt, ia); in rpcrdma_ia_open()
380 if (IS_ERR(ia->ri_id)) { in rpcrdma_ia_open()
381 rc = PTR_ERR(ia->ri_id); in rpcrdma_ia_open()
384 ia->ri_device = ia->ri_id->device; in rpcrdma_ia_open()
386 ia->ri_pd = ib_alloc_pd(ia->ri_device, 0); in rpcrdma_ia_open()
387 if (IS_ERR(ia->ri_pd)) { in rpcrdma_ia_open()
388 rc = PTR_ERR(ia->ri_pd); in rpcrdma_ia_open()
396 ia->ri_ops = &rpcrdma_frwr_memreg_ops; in rpcrdma_ia_open()
402 ia->ri_ops = &rpcrdma_fmr_memreg_ops; in rpcrdma_ia_open()
408 ia->ri_device->name, xprt_rdma_memreg_strategy); in rpcrdma_ia_open()
409 rc = -EINVAL; in rpcrdma_ia_open()
421 * rpcrdma_ia_remove - Handle device driver unload
432 struct rpcrdma_ep *ep = &r_xprt->rx_ep; in rpcrdma_ia_remove()
433 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_ia_remove()
437 cancel_delayed_work_sync(&buf->rb_refresh_worker); in rpcrdma_ia_remove()
440 * - Don't cancel the connect worker. in rpcrdma_ia_remove()
441 * - Don't call rpcrdma_ep_disconnect, which waits in rpcrdma_ia_remove()
443 * - rdma_disconnect is unneeded, the underlying in rpcrdma_ia_remove()
446 if (ia->ri_id->qp) { in rpcrdma_ia_remove()
447 ib_drain_qp(ia->ri_id->qp); in rpcrdma_ia_remove()
448 rdma_destroy_qp(ia->ri_id); in rpcrdma_ia_remove()
449 ia->ri_id->qp = NULL; in rpcrdma_ia_remove()
451 ib_free_cq(ep->rep_attr.recv_cq); in rpcrdma_ia_remove()
452 ep->rep_attr.recv_cq = NULL; in rpcrdma_ia_remove()
453 ib_free_cq(ep->rep_attr.send_cq); in rpcrdma_ia_remove()
454 ep->rep_attr.send_cq = NULL; in rpcrdma_ia_remove()
459 list_for_each_entry(rep, &buf->rb_recv_bufs, rr_list) in rpcrdma_ia_remove()
460 rpcrdma_dma_unmap_regbuf(rep->rr_rdmabuf); in rpcrdma_ia_remove()
461 list_for_each_entry(req, &buf->rb_allreqs, rl_all) { in rpcrdma_ia_remove()
462 rpcrdma_dma_unmap_regbuf(req->rl_rdmabuf); in rpcrdma_ia_remove()
463 rpcrdma_dma_unmap_regbuf(req->rl_sendbuf); in rpcrdma_ia_remove()
464 rpcrdma_dma_unmap_regbuf(req->rl_recvbuf); in rpcrdma_ia_remove()
467 ib_dealloc_pd(ia->ri_pd); in rpcrdma_ia_remove()
468 ia->ri_pd = NULL; in rpcrdma_ia_remove()
471 complete(&ia->ri_remove_done); in rpcrdma_ia_remove()
477 * rpcrdma_ia_close - Clean up/close an IA.
484 if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) { in rpcrdma_ia_close()
485 if (ia->ri_id->qp) in rpcrdma_ia_close()
486 rdma_destroy_qp(ia->ri_id); in rpcrdma_ia_close()
487 rdma_destroy_id(ia->ri_id); in rpcrdma_ia_close()
489 ia->ri_id = NULL; in rpcrdma_ia_close()
490 ia->ri_device = NULL; in rpcrdma_ia_close()
493 if (ia->ri_pd && !IS_ERR(ia->ri_pd)) in rpcrdma_ia_close()
494 ib_dealloc_pd(ia->ri_pd); in rpcrdma_ia_close()
495 ia->ri_pd = NULL; in rpcrdma_ia_close()
505 struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private; in rpcrdma_ep_create()
510 max_sge = min_t(unsigned int, ia->ri_device->attrs.max_send_sge, in rpcrdma_ep_create()
514 return -ENOMEM; in rpcrdma_ep_create()
516 ia->ri_max_send_sges = max_sge; in rpcrdma_ep_create()
518 rc = ia->ri_ops->ro_open(ia, ep, cdata); in rpcrdma_ep_create()
522 ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall; in rpcrdma_ep_create()
523 ep->rep_attr.qp_context = ep; in rpcrdma_ep_create()
524 ep->rep_attr.srq = NULL; in rpcrdma_ep_create()
525 ep->rep_attr.cap.max_send_sge = max_sge; in rpcrdma_ep_create()
526 ep->rep_attr.cap.max_recv_sge = 1; in rpcrdma_ep_create()
527 ep->rep_attr.cap.max_inline_data = 0; in rpcrdma_ep_create()
528 ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR; in rpcrdma_ep_create()
529 ep->rep_attr.qp_type = IB_QPT_RC; in rpcrdma_ep_create()
530 ep->rep_attr.port_num = ~0; in rpcrdma_ep_create()
535 ep->rep_attr.cap.max_send_wr, in rpcrdma_ep_create()
536 ep->rep_attr.cap.max_recv_wr, in rpcrdma_ep_create()
537 ep->rep_attr.cap.max_send_sge, in rpcrdma_ep_create()
538 ep->rep_attr.cap.max_recv_sge); in rpcrdma_ep_create()
541 ep->rep_send_batch = min_t(unsigned int, RPCRDMA_MAX_SEND_BATCH, in rpcrdma_ep_create()
542 cdata->max_requests >> 2); in rpcrdma_ep_create()
543 ep->rep_send_count = ep->rep_send_batch; in rpcrdma_ep_create()
544 init_waitqueue_head(&ep->rep_connect_wait); in rpcrdma_ep_create()
545 INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker); in rpcrdma_ep_create()
547 sendcq = ib_alloc_cq(ia->ri_device, NULL, in rpcrdma_ep_create()
548 ep->rep_attr.cap.max_send_wr + 1, in rpcrdma_ep_create()
549 ia->ri_device->num_comp_vectors > 1 ? 1 : 0, in rpcrdma_ep_create()
558 recvcq = ib_alloc_cq(ia->ri_device, NULL, in rpcrdma_ep_create()
559 ep->rep_attr.cap.max_recv_wr + 1, in rpcrdma_ep_create()
568 ep->rep_attr.send_cq = sendcq; in rpcrdma_ep_create()
569 ep->rep_attr.recv_cq = recvcq; in rpcrdma_ep_create()
572 memset(&ep->rep_remote_cma, 0, sizeof(ep->rep_remote_cma)); in rpcrdma_ep_create()
574 /* Prepare RDMA-CM private message */ in rpcrdma_ep_create()
575 pmsg->cp_magic = rpcrdma_cmp_magic; in rpcrdma_ep_create()
576 pmsg->cp_version = RPCRDMA_CMP_VERSION; in rpcrdma_ep_create()
577 pmsg->cp_flags |= ia->ri_ops->ro_send_w_inv_ok; in rpcrdma_ep_create()
578 pmsg->cp_send_size = rpcrdma_encode_buffer_size(cdata->inline_wsize); in rpcrdma_ep_create()
579 pmsg->cp_recv_size = rpcrdma_encode_buffer_size(cdata->inline_rsize); in rpcrdma_ep_create()
580 ep->rep_remote_cma.private_data = pmsg; in rpcrdma_ep_create()
581 ep->rep_remote_cma.private_data_len = sizeof(*pmsg); in rpcrdma_ep_create()
584 ep->rep_remote_cma.initiator_depth = 0; in rpcrdma_ep_create()
585 ep->rep_remote_cma.responder_resources = in rpcrdma_ep_create()
586 min_t(int, U8_MAX, ia->ri_device->attrs.max_qp_rd_atom); in rpcrdma_ep_create()
589 * GID changes quickly. RPC layer handles re-establishing in rpcrdma_ep_create()
592 ep->rep_remote_cma.retry_count = 6; in rpcrdma_ep_create()
594 /* RPC-over-RDMA handles its own flow control. In addition, in rpcrdma_ep_create()
595 * make all RNR NAKs visible so we know that RPC-over-RDMA in rpcrdma_ep_create()
596 * flow control is working correctly (no NAKs should be seen). in rpcrdma_ep_create()
598 ep->rep_remote_cma.flow_control = 0; in rpcrdma_ep_create()
599 ep->rep_remote_cma.rnr_retry_count = 0; in rpcrdma_ep_create()
614 * allocated) or re-create it.
619 cancel_delayed_work_sync(&ep->rep_connect_worker); in rpcrdma_ep_destroy()
621 if (ia->ri_id && ia->ri_id->qp) { in rpcrdma_ep_destroy()
623 rdma_destroy_qp(ia->ri_id); in rpcrdma_ep_destroy()
624 ia->ri_id->qp = NULL; in rpcrdma_ep_destroy()
627 if (ep->rep_attr.recv_cq) in rpcrdma_ep_destroy()
628 ib_free_cq(ep->rep_attr.recv_cq); in rpcrdma_ep_destroy()
629 if (ep->rep_attr.send_cq) in rpcrdma_ep_destroy()
630 ib_free_cq(ep->rep_attr.send_cq); in rpcrdma_ep_destroy()
633 /* Re-establish a connection after a device removal event.
645 rc = -EHOSTUNREACH; in rpcrdma_ep_recreate_xprt()
649 rc = -ENOMEM; in rpcrdma_ep_recreate_xprt()
650 err = rpcrdma_ep_create(ep, ia, &r_xprt->rx_data); in rpcrdma_ep_recreate_xprt()
656 rc = -ENETUNREACH; in rpcrdma_ep_recreate_xprt()
657 err = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr); in rpcrdma_ep_recreate_xprt()
685 rc = -EHOSTUNREACH; in rpcrdma_ep_reconnect()
695 * This is a sanity check only. There should be no way these in rpcrdma_ep_reconnect()
699 rc = -ENETUNREACH; in rpcrdma_ep_reconnect()
700 if (ia->ri_device != id->device) { in rpcrdma_ep_reconnect()
705 err = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr); in rpcrdma_ep_reconnect()
714 old = ia->ri_id; in rpcrdma_ep_reconnect()
715 ia->ri_id = id; in rpcrdma_ep_reconnect()
735 switch (ep->rep_connected) { in rpcrdma_ep_connect()
738 rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr); in rpcrdma_ep_connect()
742 rc = -ENETUNREACH; in rpcrdma_ep_connect()
746 case -ENODEV: in rpcrdma_ep_connect()
757 ep->rep_connected = 0; in rpcrdma_ep_connect()
760 rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma); in rpcrdma_ep_connect()
767 wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0); in rpcrdma_ep_connect()
768 if (ep->rep_connected <= 0) { in rpcrdma_ep_connect()
769 if (ep->rep_connected == -EAGAIN) in rpcrdma_ep_connect()
771 rc = ep->rep_connected; in rpcrdma_ep_connect()
779 ep->rep_connected = rc; in rpcrdma_ep_connect()
799 rc = rdma_disconnect(ia->ri_id); in rpcrdma_ep_disconnect()
802 wait_event_interruptible(ep->rep_connect_wait, in rpcrdma_ep_disconnect()
803 ep->rep_connected != 1); in rpcrdma_ep_disconnect()
805 ep->rep_connected = rc; in rpcrdma_ep_disconnect()
809 ib_drain_qp(ia->ri_id->qp); in rpcrdma_ep_disconnect()
812 /* Fixed-size circular FIFO queue. This implementation is wait-free and
813 * lock-free.
818 * ->send_request call at a time.
833 for (i = 0; i <= buf->rb_sc_last; i++) in rpcrdma_sendctxs_destroy()
834 kfree(buf->rb_sc_ctxs[i]); in rpcrdma_sendctxs_destroy()
835 kfree(buf->rb_sc_ctxs); in rpcrdma_sendctxs_destroy()
843 ia->ri_max_send_sges * sizeof(struct ib_sge), in rpcrdma_sendctx_create()
848 sc->sc_wr.wr_cqe = &sc->sc_cqe; in rpcrdma_sendctx_create()
849 sc->sc_wr.sg_list = sc->sc_sges; in rpcrdma_sendctx_create()
850 sc->sc_wr.opcode = IB_WR_SEND; in rpcrdma_sendctx_create()
851 sc->sc_cqe.done = rpcrdma_wc_send; in rpcrdma_sendctx_create()
857 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_sendctxs_create()
863 * the ->send_request call to fail temporarily before too many in rpcrdma_sendctxs_create()
866 i = buf->rb_max_requests + RPCRDMA_MAX_BC_REQUESTS; in rpcrdma_sendctxs_create()
868 buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL); in rpcrdma_sendctxs_create()
869 if (!buf->rb_sc_ctxs) in rpcrdma_sendctxs_create()
870 return -ENOMEM; in rpcrdma_sendctxs_create()
872 buf->rb_sc_last = i - 1; in rpcrdma_sendctxs_create()
873 for (i = 0; i <= buf->rb_sc_last; i++) { in rpcrdma_sendctxs_create()
874 sc = rpcrdma_sendctx_create(&r_xprt->rx_ia); in rpcrdma_sendctxs_create()
876 return -ENOMEM; in rpcrdma_sendctxs_create()
878 sc->sc_xprt = r_xprt; in rpcrdma_sendctxs_create()
879 buf->rb_sc_ctxs[i] = sc; in rpcrdma_sendctxs_create()
881 buf->rb_flags = 0; in rpcrdma_sendctxs_create()
893 return likely(item < buf->rb_sc_last) ? item + 1 : 0; in rpcrdma_sendctx_next()
897 * rpcrdma_sendctx_get_locked - Acquire a send context
906 * and provides an effective memory barrier that flushes the new value
915 next_head = rpcrdma_sendctx_next(buf, buf->rb_sc_head); in rpcrdma_sendctx_get_locked()
917 if (next_head == READ_ONCE(buf->rb_sc_tail)) in rpcrdma_sendctx_get_locked()
921 sc = buf->rb_sc_ctxs[next_head]; in rpcrdma_sendctx_get_locked()
923 /* Releasing the lock in the caller acts as a memory in rpcrdma_sendctx_get_locked()
926 buf->rb_sc_head = next_head; in rpcrdma_sendctx_get_locked()
935 set_bit(RPCRDMA_BUF_F_EMPTY_SCQ, &buf->rb_flags); in rpcrdma_sendctx_get_locked()
937 r_xprt->rx_stats.empty_sendctx_q++; in rpcrdma_sendctx_get_locked()
942 * rpcrdma_sendctx_put_locked - Release a send context
953 struct rpcrdma_buffer *buf = &sc->sc_xprt->rx_buf; in rpcrdma_sendctx_put_locked()
959 next_tail = buf->rb_sc_tail; in rpcrdma_sendctx_put_locked()
964 rpcrdma_unmap_sendctx(buf->rb_sc_ctxs[next_tail]); in rpcrdma_sendctx_put_locked()
966 } while (buf->rb_sc_ctxs[next_tail] != sc); in rpcrdma_sendctx_put_locked()
969 smp_store_release(&buf->rb_sc_tail, next_tail); in rpcrdma_sendctx_put_locked()
971 if (test_and_clear_bit(RPCRDMA_BUF_F_EMPTY_SCQ, &buf->rb_flags)) { in rpcrdma_sendctx_put_locked()
973 xprt_write_space(&sc->sc_xprt->rx_xprt); in rpcrdma_sendctx_put_locked()
984 spin_lock(&buf->rb_recovery_lock); in rpcrdma_mr_recovery_worker()
985 while (!list_empty(&buf->rb_stale_mrs)) { in rpcrdma_mr_recovery_worker()
986 mr = rpcrdma_mr_pop(&buf->rb_stale_mrs); in rpcrdma_mr_recovery_worker()
987 spin_unlock(&buf->rb_recovery_lock); in rpcrdma_mr_recovery_worker()
990 mr->mr_xprt->rx_ia.ri_ops->ro_recover_mr(mr); in rpcrdma_mr_recovery_worker()
992 spin_lock(&buf->rb_recovery_lock); in rpcrdma_mr_recovery_worker()
994 spin_unlock(&buf->rb_recovery_lock); in rpcrdma_mr_recovery_worker()
1000 struct rpcrdma_xprt *r_xprt = mr->mr_xprt; in rpcrdma_mr_defer_recovery()
1001 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_mr_defer_recovery()
1003 spin_lock(&buf->rb_recovery_lock); in rpcrdma_mr_defer_recovery()
1004 rpcrdma_mr_push(mr, &buf->rb_stale_mrs); in rpcrdma_mr_defer_recovery()
1005 spin_unlock(&buf->rb_recovery_lock); in rpcrdma_mr_defer_recovery()
1007 schedule_delayed_work(&buf->rb_recovery_worker, 0); in rpcrdma_mr_defer_recovery()
1013 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_mrs_create()
1014 struct rpcrdma_ia *ia = &r_xprt->rx_ia; in rpcrdma_mrs_create()
1027 rc = ia->ri_ops->ro_init_mr(ia, mr); in rpcrdma_mrs_create()
1033 mr->mr_xprt = r_xprt; in rpcrdma_mrs_create()
1035 list_add(&mr->mr_list, &free); in rpcrdma_mrs_create()
1036 list_add(&mr->mr_all, &all); in rpcrdma_mrs_create()
1039 spin_lock(&buf->rb_mrlock); in rpcrdma_mrs_create()
1040 list_splice(&free, &buf->rb_mrs); in rpcrdma_mrs_create()
1041 list_splice(&all, &buf->rb_all); in rpcrdma_mrs_create()
1042 r_xprt->rx_stats.mrs_allocated += count; in rpcrdma_mrs_create()
1043 spin_unlock(&buf->rb_mrlock); in rpcrdma_mrs_create()
1046 xprt_write_space(&r_xprt->rx_xprt); in rpcrdma_mrs_create()
1063 struct rpcrdma_buffer *buffer = &r_xprt->rx_buf; in rpcrdma_create_req()
1069 return ERR_PTR(-ENOMEM); in rpcrdma_create_req()
1075 return ERR_PTR(-ENOMEM); in rpcrdma_create_req()
1077 req->rl_rdmabuf = rb; in rpcrdma_create_req()
1078 xdr_buf_init(&req->rl_hdrbuf, rb->rg_base, rdmab_length(rb)); in rpcrdma_create_req()
1079 req->rl_buffer = buffer; in rpcrdma_create_req()
1080 INIT_LIST_HEAD(&req->rl_registered); in rpcrdma_create_req()
1082 spin_lock(&buffer->rb_reqslock); in rpcrdma_create_req()
1083 list_add(&req->rl_all, &buffer->rb_allreqs); in rpcrdma_create_req()
1084 spin_unlock(&buffer->rb_reqslock); in rpcrdma_create_req()
1091 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data; in rpcrdma_create_rep()
1092 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_create_rep()
1096 rc = -ENOMEM; in rpcrdma_create_rep()
1101 rep->rr_rdmabuf = rpcrdma_alloc_regbuf(cdata->inline_rsize, in rpcrdma_create_rep()
1103 if (IS_ERR(rep->rr_rdmabuf)) { in rpcrdma_create_rep()
1104 rc = PTR_ERR(rep->rr_rdmabuf); in rpcrdma_create_rep()
1107 xdr_buf_init(&rep->rr_hdrbuf, rep->rr_rdmabuf->rg_base, in rpcrdma_create_rep()
1108 rdmab_length(rep->rr_rdmabuf)); in rpcrdma_create_rep()
1110 rep->rr_cqe.done = rpcrdma_wc_receive; in rpcrdma_create_rep()
1111 rep->rr_rxprt = r_xprt; in rpcrdma_create_rep()
1112 INIT_WORK(&rep->rr_work, rpcrdma_deferred_completion); in rpcrdma_create_rep()
1113 rep->rr_recv_wr.next = NULL; in rpcrdma_create_rep()
1114 rep->rr_recv_wr.wr_cqe = &rep->rr_cqe; in rpcrdma_create_rep()
1115 rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov; in rpcrdma_create_rep()
1116 rep->rr_recv_wr.num_sge = 1; in rpcrdma_create_rep()
1117 rep->rr_temp = temp; in rpcrdma_create_rep()
1119 spin_lock(&buf->rb_lock); in rpcrdma_create_rep()
1120 list_add(&rep->rr_list, &buf->rb_recv_bufs); in rpcrdma_create_rep()
1121 spin_unlock(&buf->rb_lock); in rpcrdma_create_rep()
1135 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_buffer_create()
1138 buf->rb_max_requests = r_xprt->rx_data.max_requests; in rpcrdma_buffer_create()
1139 buf->rb_bc_srv_max_requests = 0; in rpcrdma_buffer_create()
1140 spin_lock_init(&buf->rb_mrlock); in rpcrdma_buffer_create()
1141 spin_lock_init(&buf->rb_lock); in rpcrdma_buffer_create()
1142 spin_lock_init(&buf->rb_recovery_lock); in rpcrdma_buffer_create()
1143 INIT_LIST_HEAD(&buf->rb_mrs); in rpcrdma_buffer_create()
1144 INIT_LIST_HEAD(&buf->rb_all); in rpcrdma_buffer_create()
1145 INIT_LIST_HEAD(&buf->rb_stale_mrs); in rpcrdma_buffer_create()
1146 INIT_DELAYED_WORK(&buf->rb_refresh_worker, in rpcrdma_buffer_create()
1148 INIT_DELAYED_WORK(&buf->rb_recovery_worker, in rpcrdma_buffer_create()
1153 INIT_LIST_HEAD(&buf->rb_send_bufs); in rpcrdma_buffer_create()
1154 INIT_LIST_HEAD(&buf->rb_allreqs); in rpcrdma_buffer_create()
1155 spin_lock_init(&buf->rb_reqslock); in rpcrdma_buffer_create()
1156 for (i = 0; i < buf->rb_max_requests; i++) { in rpcrdma_buffer_create()
1166 list_add(&req->rl_list, &buf->rb_send_bufs); in rpcrdma_buffer_create()
1169 buf->rb_credits = 1; in rpcrdma_buffer_create()
1170 buf->rb_posted_receives = 0; in rpcrdma_buffer_create()
1171 INIT_LIST_HEAD(&buf->rb_recv_bufs); in rpcrdma_buffer_create()
1186 rpcrdma_free_regbuf(rep->rr_rdmabuf); in rpcrdma_destroy_rep()
1193 rpcrdma_free_regbuf(req->rl_recvbuf); in rpcrdma_destroy_req()
1194 rpcrdma_free_regbuf(req->rl_sendbuf); in rpcrdma_destroy_req()
1195 rpcrdma_free_regbuf(req->rl_rdmabuf); in rpcrdma_destroy_req()
1209 spin_lock(&buf->rb_mrlock); in rpcrdma_mrs_destroy()
1210 while (!list_empty(&buf->rb_all)) { in rpcrdma_mrs_destroy()
1211 mr = list_entry(buf->rb_all.next, struct rpcrdma_mr, mr_all); in rpcrdma_mrs_destroy()
1212 list_del(&mr->mr_all); in rpcrdma_mrs_destroy()
1214 spin_unlock(&buf->rb_mrlock); in rpcrdma_mrs_destroy()
1217 if (!list_empty(&mr->mr_list)) in rpcrdma_mrs_destroy()
1218 list_del(&mr->mr_list); in rpcrdma_mrs_destroy()
1220 ia->ri_ops->ro_release_mr(mr); in rpcrdma_mrs_destroy()
1222 spin_lock(&buf->rb_mrlock); in rpcrdma_mrs_destroy()
1224 spin_unlock(&buf->rb_mrlock); in rpcrdma_mrs_destroy()
1225 r_xprt->rx_stats.mrs_allocated = 0; in rpcrdma_mrs_destroy()
1233 cancel_delayed_work_sync(&buf->rb_recovery_worker); in rpcrdma_buffer_destroy()
1234 cancel_delayed_work_sync(&buf->rb_refresh_worker); in rpcrdma_buffer_destroy()
1238 while (!list_empty(&buf->rb_recv_bufs)) { in rpcrdma_buffer_destroy()
1241 rep = list_first_entry(&buf->rb_recv_bufs, in rpcrdma_buffer_destroy()
1243 list_del(&rep->rr_list); in rpcrdma_buffer_destroy()
1247 spin_lock(&buf->rb_reqslock); in rpcrdma_buffer_destroy()
1248 while (!list_empty(&buf->rb_allreqs)) { in rpcrdma_buffer_destroy()
1251 req = list_first_entry(&buf->rb_allreqs, in rpcrdma_buffer_destroy()
1253 list_del(&req->rl_all); in rpcrdma_buffer_destroy()
1255 spin_unlock(&buf->rb_reqslock); in rpcrdma_buffer_destroy()
1257 spin_lock(&buf->rb_reqslock); in rpcrdma_buffer_destroy()
1259 spin_unlock(&buf->rb_reqslock); in rpcrdma_buffer_destroy()
1265 * rpcrdma_mr_get - Allocate an rpcrdma_mr object
1268 * Returns an initialized rpcrdma_mr or NULL if no free
1274 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_mr_get()
1277 spin_lock(&buf->rb_mrlock); in rpcrdma_mr_get()
1278 if (!list_empty(&buf->rb_mrs)) in rpcrdma_mr_get()
1279 mr = rpcrdma_mr_pop(&buf->rb_mrs); in rpcrdma_mr_get()
1280 spin_unlock(&buf->rb_mrlock); in rpcrdma_mr_get()
1288 if (r_xprt->rx_ep.rep_connected != -ENODEV) in rpcrdma_mr_get()
1289 schedule_delayed_work(&buf->rb_refresh_worker, 0); in rpcrdma_mr_get()
1300 spin_lock(&buf->rb_mrlock); in __rpcrdma_mr_put()
1301 rpcrdma_mr_push(mr, &buf->rb_mrs); in __rpcrdma_mr_put()
1302 spin_unlock(&buf->rb_mrlock); in __rpcrdma_mr_put()
1306 * rpcrdma_mr_put - Release an rpcrdma_mr object
1313 __rpcrdma_mr_put(&mr->mr_xprt->rx_buf, mr); in rpcrdma_mr_put()
1317 * rpcrdma_mr_unmap_and_put - DMA unmap an MR and release it
1324 struct rpcrdma_xprt *r_xprt = mr->mr_xprt; in rpcrdma_mr_unmap_and_put()
1327 ib_dma_unmap_sg(r_xprt->rx_ia.ri_device, in rpcrdma_mr_unmap_and_put()
1328 mr->mr_sg, mr->mr_nents, mr->mr_dir); in rpcrdma_mr_unmap_and_put()
1329 __rpcrdma_mr_put(&r_xprt->rx_buf, mr); in rpcrdma_mr_unmap_and_put()
1333 * rpcrdma_buffer_get - Get a request buffer
1343 spin_lock(&buffers->rb_lock); in rpcrdma_buffer_get()
1344 req = list_first_entry_or_null(&buffers->rb_send_bufs, in rpcrdma_buffer_get()
1347 list_del_init(&req->rl_list); in rpcrdma_buffer_get()
1348 spin_unlock(&buffers->rb_lock); in rpcrdma_buffer_get()
1353 * rpcrdma_buffer_put - Put request/reply buffers back into pool
1360 struct rpcrdma_buffer *buffers = req->rl_buffer; in rpcrdma_buffer_put()
1361 struct rpcrdma_rep *rep = req->rl_reply; in rpcrdma_buffer_put()
1363 req->rl_reply = NULL; in rpcrdma_buffer_put()
1365 spin_lock(&buffers->rb_lock); in rpcrdma_buffer_put()
1366 list_add(&req->rl_list, &buffers->rb_send_bufs); in rpcrdma_buffer_put()
1368 if (!rep->rr_temp) { in rpcrdma_buffer_put()
1369 list_add(&rep->rr_list, &buffers->rb_recv_bufs); in rpcrdma_buffer_put()
1373 spin_unlock(&buffers->rb_lock); in rpcrdma_buffer_put()
1385 struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf; in rpcrdma_recv_buffer_put()
1387 if (!rep->rr_temp) { in rpcrdma_recv_buffer_put()
1388 spin_lock(&buffers->rb_lock); in rpcrdma_recv_buffer_put()
1389 list_add(&rep->rr_list, &buffers->rb_recv_bufs); in rpcrdma_recv_buffer_put()
1390 spin_unlock(&buffers->rb_lock); in rpcrdma_recv_buffer_put()
1397 * rpcrdma_alloc_regbuf - allocate and DMA-map memory for SEND/RECV buffers
1403 * can be persistently DMA-mapped for I/O.
1417 return ERR_PTR(-ENOMEM); in rpcrdma_alloc_regbuf()
1419 rb->rg_device = NULL; in rpcrdma_alloc_regbuf()
1420 rb->rg_direction = direction; in rpcrdma_alloc_regbuf()
1421 rb->rg_iov.length = size; in rpcrdma_alloc_regbuf()
1427 * __rpcrdma_map_regbuf - DMA-map a regbuf
1434 struct ib_device *device = ia->ri_device; in __rpcrdma_dma_map_regbuf()
1436 if (rb->rg_direction == DMA_NONE) in __rpcrdma_dma_map_regbuf()
1439 rb->rg_iov.addr = ib_dma_map_single(device, in __rpcrdma_dma_map_regbuf()
1440 (void *)rb->rg_base, in __rpcrdma_dma_map_regbuf()
1442 rb->rg_direction); in __rpcrdma_dma_map_regbuf()
1446 rb->rg_device = device; in __rpcrdma_dma_map_regbuf()
1447 rb->rg_iov.lkey = ia->ri_pd->local_dma_lkey; in __rpcrdma_dma_map_regbuf()
1460 ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb), in rpcrdma_dma_unmap_regbuf()
1461 rdmab_length(rb), rb->rg_direction); in rpcrdma_dma_unmap_regbuf()
1462 rb->rg_device = NULL; in rpcrdma_dma_unmap_regbuf()
1466 * rpcrdma_free_regbuf - deregister and free registered buffer
1486 struct ib_send_wr *send_wr = &req->rl_sendctx->sc_wr; in rpcrdma_ep_post()
1489 if (!ep->rep_send_count || in rpcrdma_ep_post()
1490 test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags)) { in rpcrdma_ep_post()
1491 send_wr->send_flags |= IB_SEND_SIGNALED; in rpcrdma_ep_post()
1492 ep->rep_send_count = ep->rep_send_batch; in rpcrdma_ep_post()
1494 send_wr->send_flags &= ~IB_SEND_SIGNALED; in rpcrdma_ep_post()
1495 --ep->rep_send_count; in rpcrdma_ep_post()
1498 rc = ia->ri_ops->ro_send(ia, req); in rpcrdma_ep_post()
1501 return -ENOTCONN; in rpcrdma_ep_post()
1506 * rpcrdma_post_recvs - Maybe post some Receive buffers
1514 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_post_recvs()
1518 needed = buf->rb_credits + (buf->rb_bc_srv_max_requests << 1); in rpcrdma_post_recvs()
1519 if (buf->rb_posted_receives > needed) in rpcrdma_post_recvs()
1521 needed -= buf->rb_posted_receives; in rpcrdma_post_recvs()
1529 spin_lock(&buf->rb_lock); in rpcrdma_post_recvs()
1530 rep = list_first_entry_or_null(&buf->rb_recv_bufs, in rpcrdma_post_recvs()
1533 list_del(&rep->rr_list); in rpcrdma_post_recvs()
1534 spin_unlock(&buf->rb_lock); in rpcrdma_post_recvs()
1541 rb = rep->rr_rdmabuf; in rpcrdma_post_recvs()
1543 if (!__rpcrdma_dma_map_regbuf(&r_xprt->rx_ia, rb)) { in rpcrdma_post_recvs()
1549 trace_xprtrdma_post_recv(rep->rr_recv_wr.wr_cqe); in rpcrdma_post_recvs()
1550 rep->rr_recv_wr.next = wr; in rpcrdma_post_recvs()
1551 wr = &rep->rr_recv_wr; in rpcrdma_post_recvs()
1553 --needed; in rpcrdma_post_recvs()
1558 rc = ib_post_recv(r_xprt->rx_ia.ri_id->qp, wr, in rpcrdma_post_recvs()
1565 wr = wr->next; in rpcrdma_post_recvs()
1567 --count; in rpcrdma_post_recvs()
1570 buf->rb_posted_receives += count; in rpcrdma_post_recvs()