1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (c) 2016-2018 Oracle. All rights reserved.
4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the BSD-type
11 * license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 *
20 * Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials provided
23 * with the distribution.
24 *
25 * Neither the name of the Network Appliance, Inc. nor the names of
26 * its contributors may be used to endorse or promote products
27 * derived from this software without specific prior written
28 * permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 *
42 * Author: Tom Tucker <tom@opengridcomputing.com>
43 */
44
45 /* Operation
46 *
47 * The main entry point is svc_rdma_sendto. This is called by the
48 * RPC server when an RPC Reply is ready to be transmitted to a client.
49 *
50 * The passed-in svc_rqst contains a struct xdr_buf which holds an
51 * XDR-encoded RPC Reply message. sendto must construct the RPC-over-RDMA
52 * transport header, post all Write WRs needed for this Reply, then post
53 * a Send WR conveying the transport header and the RPC message itself to
54 * the client.
55 *
56 * svc_rdma_sendto must fully transmit the Reply before returning, as
57 * the svc_rqst will be recycled as soon as sendto returns. Remaining
58 * resources referred to by the svc_rqst are also recycled at that time.
59 * Therefore any resources that must remain longer must be detached
60 * from the svc_rqst and released later.
61 *
62 * Page Management
63 *
64 * The I/O that performs Reply transmission is asynchronous, and may
65 * complete well after sendto returns. Thus pages under I/O must be
66 * removed from the svc_rqst before sendto returns.
67 *
68 * The logic here depends on Send Queue and completion ordering. Since
69 * the Send WR is always posted last, it will always complete last. Thus
70 * when it completes, it is guaranteed that all previous Write WRs have
71 * also completed.
72 *
73 * Write WRs are constructed and posted. Each Write segment gets its own
74 * svc_rdma_rw_ctxt, allowing the Write completion handler to find and
75 * DMA-unmap the pages under I/O for that Write segment. The Write
76 * completion handler does not release any pages.
77 *
78 * When the Send WR is constructed, it also gets its own svc_rdma_send_ctxt.
79 * The ownership of all of the Reply's pages are transferred into that
80 * ctxt, the Send WR is posted, and sendto returns.
81 *
82 * The svc_rdma_send_ctxt is presented when the Send WR completes. The
83 * Send completion handler finally releases the Reply's pages.
84 *
85 * This mechanism also assumes that completions on the transport's Send
86 * Completion Queue do not run in parallel. Otherwise a Write completion
87 * and Send completion running at the same time could release pages that
88 * are still DMA-mapped.
89 *
90 * Error Handling
91 *
92 * - If the Send WR is posted successfully, it will either complete
93 * successfully, or get flushed. Either way, the Send completion
94 * handler releases the Reply's pages.
95 * - If the Send WR cannot be not posted, the forward path releases
96 * the Reply's pages.
97 *
98 * This handles the case, without the use of page reference counting,
99 * where two different Write segments send portions of the same page.
100 */
101
102 #include <linux/spinlock.h>
103 #include <asm/unaligned.h>
104
105 #include <rdma/ib_verbs.h>
106 #include <rdma/rdma_cm.h>
107
108 #include <linux/sunrpc/debug.h>
109 #include <linux/sunrpc/svc_rdma.h>
110
111 #include "xprt_rdma.h"
112 #include <trace/events/rpcrdma.h>
113
114 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
115
116 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc);
117
118 static inline struct svc_rdma_send_ctxt *
svc_rdma_next_send_ctxt(struct list_head * list)119 svc_rdma_next_send_ctxt(struct list_head *list)
120 {
121 return list_first_entry_or_null(list, struct svc_rdma_send_ctxt,
122 sc_list);
123 }
124
svc_rdma_send_cid_init(struct svcxprt_rdma * rdma,struct rpc_rdma_cid * cid)125 static void svc_rdma_send_cid_init(struct svcxprt_rdma *rdma,
126 struct rpc_rdma_cid *cid)
127 {
128 cid->ci_queue_id = rdma->sc_sq_cq->res.id;
129 cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
130 }
131
132 static struct svc_rdma_send_ctxt *
svc_rdma_send_ctxt_alloc(struct svcxprt_rdma * rdma)133 svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
134 {
135 struct svc_rdma_send_ctxt *ctxt;
136 dma_addr_t addr;
137 void *buffer;
138 size_t size;
139 int i;
140
141 size = sizeof(*ctxt);
142 size += rdma->sc_max_send_sges * sizeof(struct ib_sge);
143 ctxt = kmalloc(size, GFP_KERNEL);
144 if (!ctxt)
145 goto fail0;
146 buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL);
147 if (!buffer)
148 goto fail1;
149 addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
150 rdma->sc_max_req_size, DMA_TO_DEVICE);
151 if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
152 goto fail2;
153
154 svc_rdma_send_cid_init(rdma, &ctxt->sc_cid);
155
156 ctxt->sc_send_wr.next = NULL;
157 ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe;
158 ctxt->sc_send_wr.sg_list = ctxt->sc_sges;
159 ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED;
160 ctxt->sc_cqe.done = svc_rdma_wc_send;
161 ctxt->sc_xprt_buf = buffer;
162 xdr_buf_init(&ctxt->sc_hdrbuf, ctxt->sc_xprt_buf,
163 rdma->sc_max_req_size);
164 ctxt->sc_sges[0].addr = addr;
165
166 for (i = 0; i < rdma->sc_max_send_sges; i++)
167 ctxt->sc_sges[i].lkey = rdma->sc_pd->local_dma_lkey;
168 return ctxt;
169
170 fail2:
171 kfree(buffer);
172 fail1:
173 kfree(ctxt);
174 fail0:
175 return NULL;
176 }
177
178 /**
179 * svc_rdma_send_ctxts_destroy - Release all send_ctxt's for an xprt
180 * @rdma: svcxprt_rdma being torn down
181 *
182 */
svc_rdma_send_ctxts_destroy(struct svcxprt_rdma * rdma)183 void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma)
184 {
185 struct svc_rdma_send_ctxt *ctxt;
186
187 while ((ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts))) {
188 list_del(&ctxt->sc_list);
189 ib_dma_unmap_single(rdma->sc_pd->device,
190 ctxt->sc_sges[0].addr,
191 rdma->sc_max_req_size,
192 DMA_TO_DEVICE);
193 kfree(ctxt->sc_xprt_buf);
194 kfree(ctxt);
195 }
196 }
197
198 /**
199 * svc_rdma_send_ctxt_get - Get a free send_ctxt
200 * @rdma: controlling svcxprt_rdma
201 *
202 * Returns a ready-to-use send_ctxt, or NULL if none are
203 * available and a fresh one cannot be allocated.
204 */
svc_rdma_send_ctxt_get(struct svcxprt_rdma * rdma)205 struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma)
206 {
207 struct svc_rdma_send_ctxt *ctxt;
208
209 spin_lock(&rdma->sc_send_lock);
210 ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts);
211 if (!ctxt)
212 goto out_empty;
213 list_del(&ctxt->sc_list);
214 spin_unlock(&rdma->sc_send_lock);
215
216 out:
217 rpcrdma_set_xdrlen(&ctxt->sc_hdrbuf, 0);
218 xdr_init_encode(&ctxt->sc_stream, &ctxt->sc_hdrbuf,
219 ctxt->sc_xprt_buf, NULL);
220
221 ctxt->sc_send_wr.num_sge = 0;
222 ctxt->sc_cur_sge_no = 0;
223 ctxt->sc_page_count = 0;
224 return ctxt;
225
226 out_empty:
227 spin_unlock(&rdma->sc_send_lock);
228 ctxt = svc_rdma_send_ctxt_alloc(rdma);
229 if (!ctxt)
230 return NULL;
231 goto out;
232 }
233
234 /**
235 * svc_rdma_send_ctxt_put - Return send_ctxt to free list
236 * @rdma: controlling svcxprt_rdma
237 * @ctxt: object to return to the free list
238 *
239 * Pages left in sc_pages are DMA unmapped and released.
240 */
svc_rdma_send_ctxt_put(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt)241 void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
242 struct svc_rdma_send_ctxt *ctxt)
243 {
244 struct ib_device *device = rdma->sc_cm_id->device;
245 unsigned int i;
246
247 /* The first SGE contains the transport header, which
248 * remains mapped until @ctxt is destroyed.
249 */
250 for (i = 1; i < ctxt->sc_send_wr.num_sge; i++) {
251 ib_dma_unmap_page(device,
252 ctxt->sc_sges[i].addr,
253 ctxt->sc_sges[i].length,
254 DMA_TO_DEVICE);
255 trace_svcrdma_dma_unmap_page(rdma,
256 ctxt->sc_sges[i].addr,
257 ctxt->sc_sges[i].length);
258 }
259
260 for (i = 0; i < ctxt->sc_page_count; ++i)
261 put_page(ctxt->sc_pages[i]);
262
263 spin_lock(&rdma->sc_send_lock);
264 list_add(&ctxt->sc_list, &rdma->sc_send_ctxts);
265 spin_unlock(&rdma->sc_send_lock);
266 }
267
268 /**
269 * svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC
270 * @cq: Completion Queue context
271 * @wc: Work Completion object
272 *
273 * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that
274 * the Send completion handler could be running.
275 */
svc_rdma_wc_send(struct ib_cq * cq,struct ib_wc * wc)276 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
277 {
278 struct svcxprt_rdma *rdma = cq->cq_context;
279 struct ib_cqe *cqe = wc->wr_cqe;
280 struct svc_rdma_send_ctxt *ctxt =
281 container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe);
282
283 trace_svcrdma_wc_send(wc, &ctxt->sc_cid);
284
285 atomic_inc(&rdma->sc_sq_avail);
286 wake_up(&rdma->sc_send_wait);
287
288 svc_rdma_send_ctxt_put(rdma, ctxt);
289
290 if (unlikely(wc->status != IB_WC_SUCCESS)) {
291 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
292 svc_xprt_enqueue(&rdma->sc_xprt);
293 }
294 }
295
296 /**
297 * svc_rdma_send - Post a single Send WR
298 * @rdma: transport on which to post the WR
299 * @ctxt: send ctxt with a Send WR ready to post
300 *
301 * Returns zero the Send WR was posted successfully. Otherwise, a
302 * negative errno is returned.
303 */
svc_rdma_send(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt)304 int svc_rdma_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt)
305 {
306 struct ib_send_wr *wr = &ctxt->sc_send_wr;
307 int ret;
308
309 might_sleep();
310
311 /* Sync the transport header buffer */
312 ib_dma_sync_single_for_device(rdma->sc_pd->device,
313 wr->sg_list[0].addr,
314 wr->sg_list[0].length,
315 DMA_TO_DEVICE);
316
317 /* If the SQ is full, wait until an SQ entry is available */
318 while (1) {
319 if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) {
320 atomic_inc(&rdma_stat_sq_starve);
321 trace_svcrdma_sq_full(rdma);
322 atomic_inc(&rdma->sc_sq_avail);
323 wait_event(rdma->sc_send_wait,
324 atomic_read(&rdma->sc_sq_avail) > 1);
325 if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
326 return -ENOTCONN;
327 trace_svcrdma_sq_retry(rdma);
328 continue;
329 }
330
331 trace_svcrdma_post_send(ctxt);
332 ret = ib_post_send(rdma->sc_qp, wr, NULL);
333 if (ret)
334 break;
335 return 0;
336 }
337
338 trace_svcrdma_sq_post_err(rdma, ret);
339 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
340 wake_up(&rdma->sc_send_wait);
341 return ret;
342 }
343
344 /**
345 * svc_rdma_encode_read_list - Encode RPC Reply's Read chunk list
346 * @sctxt: Send context for the RPC Reply
347 *
348 * Return values:
349 * On success, returns length in bytes of the Reply XDR buffer
350 * that was consumed by the Reply Read list
351 * %-EMSGSIZE on XDR buffer overflow
352 */
svc_rdma_encode_read_list(struct svc_rdma_send_ctxt * sctxt)353 static ssize_t svc_rdma_encode_read_list(struct svc_rdma_send_ctxt *sctxt)
354 {
355 /* RPC-over-RDMA version 1 replies never have a Read list. */
356 return xdr_stream_encode_item_absent(&sctxt->sc_stream);
357 }
358
359 /**
360 * svc_rdma_encode_write_segment - Encode one Write segment
361 * @src: matching Write chunk in the RPC Call header
362 * @sctxt: Send context for the RPC Reply
363 * @remaining: remaining bytes of the payload left in the Write chunk
364 *
365 * Return values:
366 * On success, returns length in bytes of the Reply XDR buffer
367 * that was consumed by the Write segment
368 * %-EMSGSIZE on XDR buffer overflow
369 */
svc_rdma_encode_write_segment(__be32 * src,struct svc_rdma_send_ctxt * sctxt,unsigned int * remaining)370 static ssize_t svc_rdma_encode_write_segment(__be32 *src,
371 struct svc_rdma_send_ctxt *sctxt,
372 unsigned int *remaining)
373 {
374 __be32 *p;
375 const size_t len = rpcrdma_segment_maxsz * sizeof(*p);
376 u32 handle, length;
377 u64 offset;
378
379 p = xdr_reserve_space(&sctxt->sc_stream, len);
380 if (!p)
381 return -EMSGSIZE;
382
383 xdr_decode_rdma_segment(src, &handle, &length, &offset);
384
385 if (*remaining < length) {
386 /* segment only partly filled */
387 length = *remaining;
388 *remaining = 0;
389 } else {
390 /* entire segment was consumed */
391 *remaining -= length;
392 }
393 xdr_encode_rdma_segment(p, handle, length, offset);
394
395 trace_svcrdma_encode_wseg(handle, length, offset);
396 return len;
397 }
398
399 /**
400 * svc_rdma_encode_write_chunk - Encode one Write chunk
401 * @src: matching Write chunk in the RPC Call header
402 * @sctxt: Send context for the RPC Reply
403 * @remaining: size in bytes of the payload in the Write chunk
404 *
405 * Copy a Write chunk from the Call transport header to the
406 * Reply transport header. Update each segment's length field
407 * to reflect the number of bytes written in that segment.
408 *
409 * Return values:
410 * On success, returns length in bytes of the Reply XDR buffer
411 * that was consumed by the Write chunk
412 * %-EMSGSIZE on XDR buffer overflow
413 */
svc_rdma_encode_write_chunk(__be32 * src,struct svc_rdma_send_ctxt * sctxt,unsigned int remaining)414 static ssize_t svc_rdma_encode_write_chunk(__be32 *src,
415 struct svc_rdma_send_ctxt *sctxt,
416 unsigned int remaining)
417 {
418 unsigned int i, nsegs;
419 ssize_t len, ret;
420
421 len = 0;
422 trace_svcrdma_encode_write_chunk(remaining);
423
424 src++;
425 ret = xdr_stream_encode_item_present(&sctxt->sc_stream);
426 if (ret < 0)
427 return -EMSGSIZE;
428 len += ret;
429
430 nsegs = be32_to_cpup(src++);
431 ret = xdr_stream_encode_u32(&sctxt->sc_stream, nsegs);
432 if (ret < 0)
433 return -EMSGSIZE;
434 len += ret;
435
436 for (i = nsegs; i; i--) {
437 ret = svc_rdma_encode_write_segment(src, sctxt, &remaining);
438 if (ret < 0)
439 return -EMSGSIZE;
440 src += rpcrdma_segment_maxsz;
441 len += ret;
442 }
443
444 return len;
445 }
446
447 /**
448 * svc_rdma_encode_write_list - Encode RPC Reply's Write chunk list
449 * @rctxt: Reply context with information about the RPC Call
450 * @sctxt: Send context for the RPC Reply
451 * @length: size in bytes of the payload in the first Write chunk
452 *
453 * The client provides a Write chunk list in the Call message. Fill
454 * in the segments in the first Write chunk in the Reply's transport
455 * header with the number of bytes consumed in each segment.
456 * Remaining chunks are returned unused.
457 *
458 * Assumptions:
459 * - Client has provided only one Write chunk
460 *
461 * Return values:
462 * On success, returns length in bytes of the Reply XDR buffer
463 * that was consumed by the Reply's Write list
464 * %-EMSGSIZE on XDR buffer overflow
465 */
466 static ssize_t
svc_rdma_encode_write_list(const struct svc_rdma_recv_ctxt * rctxt,struct svc_rdma_send_ctxt * sctxt,unsigned int length)467 svc_rdma_encode_write_list(const struct svc_rdma_recv_ctxt *rctxt,
468 struct svc_rdma_send_ctxt *sctxt,
469 unsigned int length)
470 {
471 ssize_t len, ret;
472
473 ret = svc_rdma_encode_write_chunk(rctxt->rc_write_list, sctxt, length);
474 if (ret < 0)
475 return ret;
476 len = ret;
477
478 /* Terminate the Write list */
479 ret = xdr_stream_encode_item_absent(&sctxt->sc_stream);
480 if (ret < 0)
481 return ret;
482
483 return len + ret;
484 }
485
486 /**
487 * svc_rdma_encode_reply_chunk - Encode RPC Reply's Reply chunk
488 * @rctxt: Reply context with information about the RPC Call
489 * @sctxt: Send context for the RPC Reply
490 * @length: size in bytes of the payload in the Reply chunk
491 *
492 * Assumptions:
493 * - Reply can always fit in the client-provided Reply chunk
494 *
495 * Return values:
496 * On success, returns length in bytes of the Reply XDR buffer
497 * that was consumed by the Reply's Reply chunk
498 * %-EMSGSIZE on XDR buffer overflow
499 */
500 static ssize_t
svc_rdma_encode_reply_chunk(const struct svc_rdma_recv_ctxt * rctxt,struct svc_rdma_send_ctxt * sctxt,unsigned int length)501 svc_rdma_encode_reply_chunk(const struct svc_rdma_recv_ctxt *rctxt,
502 struct svc_rdma_send_ctxt *sctxt,
503 unsigned int length)
504 {
505 return svc_rdma_encode_write_chunk(rctxt->rc_reply_chunk, sctxt,
506 length);
507 }
508
svc_rdma_dma_map_page(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt,struct page * page,unsigned long offset,unsigned int len)509 static int svc_rdma_dma_map_page(struct svcxprt_rdma *rdma,
510 struct svc_rdma_send_ctxt *ctxt,
511 struct page *page,
512 unsigned long offset,
513 unsigned int len)
514 {
515 struct ib_device *dev = rdma->sc_cm_id->device;
516 dma_addr_t dma_addr;
517
518 dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE);
519 trace_svcrdma_dma_map_page(rdma, dma_addr, len);
520 if (ib_dma_mapping_error(dev, dma_addr))
521 goto out_maperr;
522
523 ctxt->sc_sges[ctxt->sc_cur_sge_no].addr = dma_addr;
524 ctxt->sc_sges[ctxt->sc_cur_sge_no].length = len;
525 ctxt->sc_send_wr.num_sge++;
526 return 0;
527
528 out_maperr:
529 return -EIO;
530 }
531
532 /* ib_dma_map_page() is used here because svc_rdma_dma_unmap()
533 * handles DMA-unmap and it uses ib_dma_unmap_page() exclusively.
534 */
svc_rdma_dma_map_buf(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt,unsigned char * base,unsigned int len)535 static int svc_rdma_dma_map_buf(struct svcxprt_rdma *rdma,
536 struct svc_rdma_send_ctxt *ctxt,
537 unsigned char *base,
538 unsigned int len)
539 {
540 return svc_rdma_dma_map_page(rdma, ctxt, virt_to_page(base),
541 offset_in_page(base), len);
542 }
543
544 /**
545 * svc_rdma_pull_up_needed - Determine whether to use pull-up
546 * @rdma: controlling transport
547 * @sctxt: send_ctxt for the Send WR
548 * @rctxt: Write and Reply chunks provided by client
549 * @xdr: xdr_buf containing RPC message to transmit
550 *
551 * Returns:
552 * %true if pull-up must be used
553 * %false otherwise
554 */
svc_rdma_pull_up_needed(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * sctxt,const struct svc_rdma_recv_ctxt * rctxt,struct xdr_buf * xdr)555 static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma,
556 struct svc_rdma_send_ctxt *sctxt,
557 const struct svc_rdma_recv_ctxt *rctxt,
558 struct xdr_buf *xdr)
559 {
560 int elements;
561
562 /* For small messages, copying bytes is cheaper than DMA mapping.
563 */
564 if (sctxt->sc_hdrbuf.len + xdr->len < RPCRDMA_PULLUP_THRESH)
565 return true;
566
567 /* Check whether the xdr_buf has more elements than can
568 * fit in a single RDMA Send.
569 */
570 /* xdr->head */
571 elements = 1;
572
573 /* xdr->pages */
574 if (!rctxt || !rctxt->rc_write_list) {
575 unsigned int remaining;
576 unsigned long pageoff;
577
578 pageoff = xdr->page_base & ~PAGE_MASK;
579 remaining = xdr->page_len;
580 while (remaining) {
581 ++elements;
582 remaining -= min_t(u32, PAGE_SIZE - pageoff,
583 remaining);
584 pageoff = 0;
585 }
586 }
587
588 /* xdr->tail */
589 if (xdr->tail[0].iov_len)
590 ++elements;
591
592 /* assume 1 SGE is needed for the transport header */
593 return elements >= rdma->sc_max_send_sges;
594 }
595
596 /**
597 * svc_rdma_pull_up_reply_msg - Copy Reply into a single buffer
598 * @rdma: controlling transport
599 * @sctxt: send_ctxt for the Send WR; xprt hdr is already prepared
600 * @rctxt: Write and Reply chunks provided by client
601 * @xdr: prepared xdr_buf containing RPC message
602 *
603 * The device is not capable of sending the reply directly.
604 * Assemble the elements of @xdr into the transport header buffer.
605 *
606 * Returns zero on success, or a negative errno on failure.
607 */
svc_rdma_pull_up_reply_msg(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * sctxt,const struct svc_rdma_recv_ctxt * rctxt,const struct xdr_buf * xdr)608 static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
609 struct svc_rdma_send_ctxt *sctxt,
610 const struct svc_rdma_recv_ctxt *rctxt,
611 const struct xdr_buf *xdr)
612 {
613 unsigned char *dst, *tailbase;
614 unsigned int taillen;
615
616 dst = sctxt->sc_xprt_buf + sctxt->sc_hdrbuf.len;
617 memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len);
618 dst += xdr->head[0].iov_len;
619
620 tailbase = xdr->tail[0].iov_base;
621 taillen = xdr->tail[0].iov_len;
622 if (rctxt && rctxt->rc_write_list) {
623 u32 xdrpad;
624
625 xdrpad = xdr_pad_size(xdr->page_len);
626 if (taillen && xdrpad) {
627 tailbase += xdrpad;
628 taillen -= xdrpad;
629 }
630 } else {
631 unsigned int len, remaining;
632 unsigned long pageoff;
633 struct page **ppages;
634
635 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
636 pageoff = xdr->page_base & ~PAGE_MASK;
637 remaining = xdr->page_len;
638 while (remaining) {
639 len = min_t(u32, PAGE_SIZE - pageoff, remaining);
640
641 memcpy(dst, page_address(*ppages) + pageoff, len);
642 remaining -= len;
643 dst += len;
644 pageoff = 0;
645 ppages++;
646 }
647 }
648
649 if (taillen)
650 memcpy(dst, tailbase, taillen);
651
652 sctxt->sc_sges[0].length += xdr->len;
653 trace_svcrdma_send_pullup(sctxt->sc_sges[0].length);
654 return 0;
655 }
656
657 /* svc_rdma_map_reply_msg - DMA map the buffer holding RPC message
658 * @rdma: controlling transport
659 * @sctxt: send_ctxt for the Send WR
660 * @rctxt: Write and Reply chunks provided by client
661 * @xdr: prepared xdr_buf containing RPC message
662 *
663 * Load the xdr_buf into the ctxt's sge array, and DMA map each
664 * element as it is added. The Send WR's num_sge field is set.
665 *
666 * Returns zero on success, or a negative errno on failure.
667 */
svc_rdma_map_reply_msg(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * sctxt,const struct svc_rdma_recv_ctxt * rctxt,struct xdr_buf * xdr)668 int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
669 struct svc_rdma_send_ctxt *sctxt,
670 const struct svc_rdma_recv_ctxt *rctxt,
671 struct xdr_buf *xdr)
672 {
673 unsigned int len, remaining;
674 unsigned long page_off;
675 struct page **ppages;
676 unsigned char *base;
677 u32 xdr_pad;
678 int ret;
679
680 /* Set up the (persistently-mapped) transport header SGE. */
681 sctxt->sc_send_wr.num_sge = 1;
682 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len;
683
684 /* If there is a Reply chunk, nothing follows the transport
685 * header, and we're done here.
686 */
687 if (rctxt && rctxt->rc_reply_chunk)
688 return 0;
689
690 /* For pull-up, svc_rdma_send() will sync the transport header.
691 * No additional DMA mapping is necessary.
692 */
693 if (svc_rdma_pull_up_needed(rdma, sctxt, rctxt, xdr))
694 return svc_rdma_pull_up_reply_msg(rdma, sctxt, rctxt, xdr);
695
696 ++sctxt->sc_cur_sge_no;
697 ret = svc_rdma_dma_map_buf(rdma, sctxt,
698 xdr->head[0].iov_base,
699 xdr->head[0].iov_len);
700 if (ret < 0)
701 return ret;
702
703 /* If a Write chunk is present, the xdr_buf's page list
704 * is not included inline. However the Upper Layer may
705 * have added XDR padding in the tail buffer, and that
706 * should not be included inline.
707 */
708 if (rctxt && rctxt->rc_write_list) {
709 base = xdr->tail[0].iov_base;
710 len = xdr->tail[0].iov_len;
711 xdr_pad = xdr_pad_size(xdr->page_len);
712
713 if (len && xdr_pad) {
714 base += xdr_pad;
715 len -= xdr_pad;
716 }
717
718 goto tail;
719 }
720
721 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
722 page_off = xdr->page_base & ~PAGE_MASK;
723 remaining = xdr->page_len;
724 while (remaining) {
725 len = min_t(u32, PAGE_SIZE - page_off, remaining);
726
727 ++sctxt->sc_cur_sge_no;
728 ret = svc_rdma_dma_map_page(rdma, sctxt, *ppages++,
729 page_off, len);
730 if (ret < 0)
731 return ret;
732
733 remaining -= len;
734 page_off = 0;
735 }
736
737 base = xdr->tail[0].iov_base;
738 len = xdr->tail[0].iov_len;
739 tail:
740 if (len) {
741 ++sctxt->sc_cur_sge_no;
742 ret = svc_rdma_dma_map_buf(rdma, sctxt, base, len);
743 if (ret < 0)
744 return ret;
745 }
746
747 return 0;
748 }
749
750 /* The svc_rqst and all resources it owns are released as soon as
751 * svc_rdma_sendto returns. Transfer pages under I/O to the ctxt
752 * so they are released by the Send completion handler.
753 */
svc_rdma_save_io_pages(struct svc_rqst * rqstp,struct svc_rdma_send_ctxt * ctxt)754 static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
755 struct svc_rdma_send_ctxt *ctxt)
756 {
757 int i, pages = rqstp->rq_next_page - rqstp->rq_respages;
758
759 ctxt->sc_page_count += pages;
760 for (i = 0; i < pages; i++) {
761 ctxt->sc_pages[i] = rqstp->rq_respages[i];
762 rqstp->rq_respages[i] = NULL;
763 }
764
765 /* Prevent svc_xprt_release from releasing pages in rq_pages */
766 rqstp->rq_next_page = rqstp->rq_respages;
767 }
768
769 /* Prepare the portion of the RPC Reply that will be transmitted
770 * via RDMA Send. The RPC-over-RDMA transport header is prepared
771 * in sc_sges[0], and the RPC xdr_buf is prepared in following sges.
772 *
773 * Depending on whether a Write list or Reply chunk is present,
774 * the server may send all, a portion of, or none of the xdr_buf.
775 * In the latter case, only the transport header (sc_sges[0]) is
776 * transmitted.
777 *
778 * RDMA Send is the last step of transmitting an RPC reply. Pages
779 * involved in the earlier RDMA Writes are here transferred out
780 * of the rqstp and into the sctxt's page array. These pages are
781 * DMA unmapped by each Write completion, but the subsequent Send
782 * completion finally releases these pages.
783 *
784 * Assumptions:
785 * - The Reply's transport header will never be larger than a page.
786 */
svc_rdma_send_reply_msg(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * sctxt,const struct svc_rdma_recv_ctxt * rctxt,struct svc_rqst * rqstp)787 static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma,
788 struct svc_rdma_send_ctxt *sctxt,
789 const struct svc_rdma_recv_ctxt *rctxt,
790 struct svc_rqst *rqstp)
791 {
792 int ret;
793
794 ret = svc_rdma_map_reply_msg(rdma, sctxt, rctxt, &rqstp->rq_res);
795 if (ret < 0)
796 return ret;
797
798 svc_rdma_save_io_pages(rqstp, sctxt);
799
800 if (rctxt->rc_inv_rkey) {
801 sctxt->sc_send_wr.opcode = IB_WR_SEND_WITH_INV;
802 sctxt->sc_send_wr.ex.invalidate_rkey = rctxt->rc_inv_rkey;
803 } else {
804 sctxt->sc_send_wr.opcode = IB_WR_SEND;
805 }
806 return svc_rdma_send(rdma, sctxt);
807 }
808
809 /**
810 * svc_rdma_send_error_msg - Send an RPC/RDMA v1 error response
811 * @rdma: controlling transport context
812 * @sctxt: Send context for the response
813 * @rctxt: Receive context for incoming bad message
814 * @status: negative errno indicating error that occurred
815 *
816 * Given the client-provided Read, Write, and Reply chunks, the
817 * server was not able to parse the Call or form a complete Reply.
818 * Return an RDMA_ERROR message so the client can retire the RPC
819 * transaction.
820 *
821 * The caller does not have to release @sctxt. It is released by
822 * Send completion, or by this function on error.
823 */
svc_rdma_send_error_msg(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * sctxt,struct svc_rdma_recv_ctxt * rctxt,int status)824 void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
825 struct svc_rdma_send_ctxt *sctxt,
826 struct svc_rdma_recv_ctxt *rctxt,
827 int status)
828 {
829 __be32 *rdma_argp = rctxt->rc_recv_buf;
830 __be32 *p;
831
832 rpcrdma_set_xdrlen(&sctxt->sc_hdrbuf, 0);
833 xdr_init_encode(&sctxt->sc_stream, &sctxt->sc_hdrbuf,
834 sctxt->sc_xprt_buf, NULL);
835
836 p = xdr_reserve_space(&sctxt->sc_stream,
837 rpcrdma_fixed_maxsz * sizeof(*p));
838 if (!p)
839 goto put_ctxt;
840
841 *p++ = *rdma_argp;
842 *p++ = *(rdma_argp + 1);
843 *p++ = rdma->sc_fc_credits;
844 *p = rdma_error;
845
846 switch (status) {
847 case -EPROTONOSUPPORT:
848 p = xdr_reserve_space(&sctxt->sc_stream, 3 * sizeof(*p));
849 if (!p)
850 goto put_ctxt;
851
852 *p++ = err_vers;
853 *p++ = rpcrdma_version;
854 *p = rpcrdma_version;
855 trace_svcrdma_err_vers(*rdma_argp);
856 break;
857 default:
858 p = xdr_reserve_space(&sctxt->sc_stream, sizeof(*p));
859 if (!p)
860 goto put_ctxt;
861
862 *p = err_chunk;
863 trace_svcrdma_err_chunk(*rdma_argp);
864 }
865
866 /* Remote Invalidation is skipped for simplicity. */
867 sctxt->sc_send_wr.num_sge = 1;
868 sctxt->sc_send_wr.opcode = IB_WR_SEND;
869 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len;
870 if (svc_rdma_send(rdma, sctxt))
871 goto put_ctxt;
872 return;
873
874 put_ctxt:
875 svc_rdma_send_ctxt_put(rdma, sctxt);
876 }
877
878 /**
879 * svc_rdma_sendto - Transmit an RPC reply
880 * @rqstp: processed RPC request, reply XDR already in ::rq_res
881 *
882 * Any resources still associated with @rqstp are released upon return.
883 * If no reply message was possible, the connection is closed.
884 *
885 * Returns:
886 * %0 if an RPC reply has been successfully posted,
887 * %-ENOMEM if a resource shortage occurred (connection is lost),
888 * %-ENOTCONN if posting failed (connection is lost).
889 */
svc_rdma_sendto(struct svc_rqst * rqstp)890 int svc_rdma_sendto(struct svc_rqst *rqstp)
891 {
892 struct svc_xprt *xprt = rqstp->rq_xprt;
893 struct svcxprt_rdma *rdma =
894 container_of(xprt, struct svcxprt_rdma, sc_xprt);
895 struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
896 __be32 *rdma_argp = rctxt->rc_recv_buf;
897 __be32 *wr_lst = rctxt->rc_write_list;
898 __be32 *rp_ch = rctxt->rc_reply_chunk;
899 struct xdr_buf *xdr = &rqstp->rq_res;
900 struct svc_rdma_send_ctxt *sctxt;
901 __be32 *p;
902 int ret;
903
904 ret = -ENOTCONN;
905 if (svc_xprt_is_dead(xprt))
906 goto err0;
907
908 ret = -ENOMEM;
909 sctxt = svc_rdma_send_ctxt_get(rdma);
910 if (!sctxt)
911 goto err0;
912
913 p = xdr_reserve_space(&sctxt->sc_stream,
914 rpcrdma_fixed_maxsz * sizeof(*p));
915 if (!p)
916 goto err0;
917 *p++ = *rdma_argp;
918 *p++ = *(rdma_argp + 1);
919 *p++ = rdma->sc_fc_credits;
920 *p = rp_ch ? rdma_nomsg : rdma_msg;
921
922 if (svc_rdma_encode_read_list(sctxt) < 0)
923 goto err0;
924 if (wr_lst) {
925 /* XXX: Presume the client sent only one Write chunk */
926 unsigned long offset;
927 unsigned int length;
928
929 if (rctxt->rc_read_payload_length) {
930 offset = rctxt->rc_read_payload_offset;
931 length = rctxt->rc_read_payload_length;
932 } else {
933 offset = xdr->head[0].iov_len;
934 length = xdr->page_len;
935 }
936 ret = svc_rdma_send_write_chunk(rdma, wr_lst, xdr, offset,
937 length);
938 if (ret < 0)
939 goto err2;
940 if (svc_rdma_encode_write_list(rctxt, sctxt, length) < 0)
941 goto err0;
942 } else {
943 if (xdr_stream_encode_item_absent(&sctxt->sc_stream) < 0)
944 goto err0;
945 }
946 if (rp_ch) {
947 ret = svc_rdma_send_reply_chunk(rdma, rctxt, &rqstp->rq_res);
948 if (ret < 0)
949 goto err2;
950 if (svc_rdma_encode_reply_chunk(rctxt, sctxt, ret) < 0)
951 goto err0;
952 } else {
953 if (xdr_stream_encode_item_absent(&sctxt->sc_stream) < 0)
954 goto err0;
955 }
956
957 ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp);
958 if (ret < 0)
959 goto err1;
960 return 0;
961
962 err2:
963 if (ret != -E2BIG && ret != -EINVAL)
964 goto err1;
965
966 /* Send completion releases payload pages that were part
967 * of previously posted RDMA Writes.
968 */
969 svc_rdma_save_io_pages(rqstp, sctxt);
970 svc_rdma_send_error_msg(rdma, sctxt, rctxt, ret);
971 return 0;
972
973 err1:
974 svc_rdma_send_ctxt_put(rdma, sctxt);
975 err0:
976 trace_svcrdma_send_err(rqstp, ret);
977 set_bit(XPT_CLOSE, &xprt->xpt_flags);
978 return -ENOTCONN;
979 }
980
981 /**
982 * svc_rdma_read_payload - special processing for a READ payload
983 * @rqstp: svc_rqst to operate on
984 * @offset: payload's byte offset in @xdr
985 * @length: size of payload, in bytes
986 *
987 * Returns zero on success.
988 *
989 * For the moment, just record the xdr_buf location of the READ
990 * payload. svc_rdma_sendto will use that location later when
991 * we actually send the payload.
992 */
svc_rdma_read_payload(struct svc_rqst * rqstp,unsigned int offset,unsigned int length)993 int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset,
994 unsigned int length)
995 {
996 struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
997
998 /* XXX: Just one READ payload slot for now, since our
999 * transport implementation currently supports only one
1000 * Write chunk.
1001 */
1002 rctxt->rc_read_payload_offset = offset;
1003 rctxt->rc_read_payload_length = length;
1004
1005 return 0;
1006 }
1007